hexsha
stringlengths 40
40
| size
int64 1
1.03M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 3
239
| max_stars_repo_name
stringlengths 5
130
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
sequencelengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 3
239
| max_issues_repo_name
stringlengths 5
130
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
sequencelengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 3
239
| max_forks_repo_name
stringlengths 5
130
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
sequencelengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 1
1.03M
| avg_line_length
float64 1
958k
| max_line_length
int64 1
1.03M
| alphanum_fraction
float64 0
1
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
79421679e34c0f5d9eabc56c13f5e8ab8d890d33 | 2,869 | py | Python | rdr_service/lib_fhir/fhirclient_1_0_6/models/flag_tests.py | all-of-us/raw-data-repository | d28ad957557587b03ff9c63d55dd55e0508f91d8 | [
"BSD-3-Clause"
] | 39 | 2017-10-13T19:16:27.000Z | 2021-09-24T16:58:21.000Z | rdr_service/lib_fhir/fhirclient_1_0_6/models/flag_tests.py | all-of-us/raw-data-repository | d28ad957557587b03ff9c63d55dd55e0508f91d8 | [
"BSD-3-Clause"
] | 312 | 2017-09-08T15:42:13.000Z | 2022-03-23T18:21:40.000Z | rdr_service/lib_fhir/fhirclient_1_0_6/models/flag_tests.py | all-of-us/raw-data-repository | d28ad957557587b03ff9c63d55dd55e0508f91d8 | [
"BSD-3-Clause"
] | 19 | 2017-09-15T13:58:00.000Z | 2022-02-07T18:33:20.000Z | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Generated from FHIR 1.0.2.7202 on 2016-06-23.
# 2016, SMART Health IT.
import io
import json
import os
import unittest
from . import flag
class FlagTests(unittest.TestCase):
def instantiate_from(self, filename):
datadir = os.environ.get('FHIR_UNITTEST_DATADIR') or ''
with io.open(os.path.join(datadir, filename), 'r', encoding='utf-8') as handle:
js = json.load(handle)
self.assertEqual("Flag", js["resourceType"])
return flag.Flag(js)
def testFlag1(self):
inst = self.instantiate_from("flag-example-encounter.json")
self.assertIsNotNone(inst, "Must have instantiated a Flag instance")
self.implFlag1(inst)
js = inst.as_json()
self.assertEqual("Flag", js["resourceType"])
inst2 = flag.Flag(js)
self.implFlag1(inst2)
def implFlag1(self, inst):
self.assertEqual(inst.category.coding[0].code, "infection")
self.assertEqual(inst.category.coding[0].display, "Infection Control Level")
self.assertEqual(inst.category.coding[0].system, "http://example.org/local")
self.assertEqual(inst.code.coding[0].code, "l3")
self.assertEqual(inst.code.coding[0].display, "Follow Level 3 Protocol")
self.assertEqual(inst.code.coding[0].system, "http://example.org/local/if1")
self.assertEqual(inst.id, "example-encounter")
self.assertEqual(inst.status, "active")
self.assertEqual(inst.text.div, "<div>Follow Infection Control Level 3 Protocol</div>")
self.assertEqual(inst.text.status, "generated")
def testFlag2(self):
inst = self.instantiate_from("flag-example.json")
self.assertIsNotNone(inst, "Must have instantiated a Flag instance")
self.implFlag2(inst)
js = inst.as_json()
self.assertEqual("Flag", js["resourceType"])
inst2 = flag.Flag(js)
self.implFlag2(inst2)
def implFlag2(self, inst):
self.assertEqual(inst.category.coding[0].code, "admin")
self.assertEqual(inst.category.coding[0].display, "Admin")
self.assertEqual(inst.category.coding[0].system, "http://example.org/local")
self.assertEqual(inst.category.text, "admin")
self.assertEqual(inst.code.coding[0].code, "bigdog")
self.assertEqual(inst.code.coding[0].display, "Big dog")
self.assertEqual(inst.code.coding[0].system, "http://example.org/local")
self.assertEqual(inst.code.text, "Patient has a big dog at his home. Always always wear a suit of armor or take other active counter-measures")
self.assertEqual(inst.id, "example")
self.assertEqual(inst.status, "active")
self.assertEqual(inst.text.div, "<div>Large Dog warning for Peter Patient</div>")
self.assertEqual(inst.text.status, "generated")
| 40.985714 | 151 | 0.663646 |
7942168d8c00580f4079567f14d161150bad5402 | 3,542 | py | Python | voipms/api/__init__.py | dtesfai/voipms-python | 1369e0b88569c7009551798d79d16767b46107e0 | [
"MIT"
] | 12 | 2019-09-08T18:03:52.000Z | 2021-01-30T09:52:41.000Z | voipms/api/__init__.py | dtesfai/voipms-python | 1369e0b88569c7009551798d79d16767b46107e0 | [
"MIT"
] | null | null | null | voipms/api/__init__.py | dtesfai/voipms-python | 1369e0b88569c7009551798d79d16767b46107e0 | [
"MIT"
] | 1 | 2021-05-09T15:49:02.000Z | 2021-05-09T15:49:02.000Z | import os
import json
import requests
from voipms.base.exceptions import VoipException
class Client(object):
def __init__(self, username=None, password=None):
self.username = username or os.environ.get('VOIPMS_ACCOUNT_USER')
self.password = password or os.environ.get('VOIPMS_API_TOKEN')
self.api_base = "https://voip.ms/api/v1/rest.php"
if not self.username or not self.password:
raise VoipException("Credentials are required to create a Client")
self.auth = (self.username, self.password)
self._accounts = None
self._call_detail_records = None
self._dids = None
self._general = None
self._voicemail = None
def request(self, method, auth=None, params={}):
auth = auth or self.auth
params["api_username"] = auth[0]
params["api_password"] = auth[1]
params["method"] = method
params["content_type"] = "json"
response = requests.get(self.api_base, params=params)
data = json.loads(response.text)
if data['status'] and data['status'] != 'success':
err_code = data['status']
raise VoipException(err_code)
return data
@property
def accounts(self):
if self._accounts is None:
from voipms.api.accounts import Accounts
self._accounts = Accounts(self)
return self._accounts
@property
def call_detail_records(self):
if self._call_detail_records is None:
from voipms.api.call_detail_records import CallDetailRecords
self._call_detail_records = CallDetailRecords(self)
return self._call_detail_records
@property
def dids(self):
if self._dids is None:
from voipms.api.dids import DIDs
self._dids = DIDs(self)
return self._dids
@property
def general(self):
if self._general is None:
from voipms.api.general import General
self._general = General(self)
return self._general
@property
def voicemail(self):
if self._voicemail is None:
from voipms.api.voicemail import Voicemail
self._voicemail = Voicemail(self)
return self._voicemail
@property
def balance(self):
return self.general.balance
@property
def ip(self):
return self.general.ip
@property
def transaction_history(self):
return self.general.transaction_history
@property
def countries(self):
return self.general.countries
@property
def languages(self):
return self.general.languages
@property
def subaccount(self):
return self.accounts.subaccount
@property
def registration_status(self):
return self.accounts.registration_status
@property
def billing(self):
return self.call_detail_records.billing
@property
def records(self):
return self.call_detail_records.records
@property
def rates(self):
return self.call_detail_records.rates
@property
def termination_rates(self):
return self.call_detail_records.termination_rates
@property
def search(self):
return self.dids.search
@property
def sms(self):
return self.dids.sms
@property
def messages(self):
return self.voicemail.messages
| 26.833333 | 79 | 0.616601 |
794216ee6bb51f9ed78b3a4f3a3c931cc797c9e2 | 1,169 | py | Python | paco/defer.py | lmicra/paco | 1e5ef4df317e7cbbcefdf67d8dee28ce90538f3d | [
"MIT"
] | 208 | 2016-10-24T13:17:08.000Z | 2022-03-12T05:39:21.000Z | paco/defer.py | lmicra/paco | 1e5ef4df317e7cbbcefdf67d8dee28ce90538f3d | [
"MIT"
] | 39 | 2016-10-24T10:40:21.000Z | 2020-04-22T16:17:51.000Z | paco/defer.py | lmicra/paco | 1e5ef4df317e7cbbcefdf67d8dee28ce90538f3d | [
"MIT"
] | 14 | 2016-11-29T11:37:34.000Z | 2021-09-30T12:27:00.000Z | # -*- coding: utf-8 -*-
import asyncio
from .decorator import decorate
from .assertions import assert_corofunction
@decorate
def defer(coro, delay=1):
"""
Returns a coroutine function wrapper that will defer the given coroutine
execution for a certain amount of seconds in a non-blocking way.
This function can be used as decorator.
Arguments:
coro (coroutinefunction): coroutine function to defer.
delay (int/float): number of seconds to defer execution.
Raises:
TypeError: if coro argument is not a coroutine function.
Returns:
filtered values (list): ordered list of resultant values.
Usage::
# Usage as function
await paco.defer(coro, delay=1)
await paco.defer(coro, delay=0.5)
# Usage as decorator
@paco.defer(delay=1)
async def mul_2(num):
return num * 2
await mul_2(2)
# => 4
"""
assert_corofunction(coro=coro)
@asyncio.coroutine
def wrapper(*args, **kw):
# Wait until we're done
yield from asyncio.sleep(delay)
return (yield from coro(*args, **kw))
return wrapper
| 23.857143 | 76 | 0.634731 |
79421838f7eb44fb98b926210c175b7ecbd9564a | 261 | py | Python | pynames/orc/__init__.py | imposeren/pynames | 6928af52d77ed51b57386fea2a6f9ec3e1a3ce80 | [
"BSD-2-Clause-FreeBSD"
] | 19 | 2015-03-28T08:57:04.000Z | 2016-06-18T07:09:04.000Z | pynames/orc/__init__.py | imposeren/pynames | 6928af52d77ed51b57386fea2a6f9ec3e1a3ce80 | [
"BSD-2-Clause-FreeBSD"
] | 10 | 2015-02-10T15:38:16.000Z | 2016-06-28T04:55:20.000Z | pynames/orc/__init__.py | imposeren/pynames | 6928af52d77ed51b57386fea2a6f9ec3e1a3ce80 | [
"BSD-2-Clause-FreeBSD"
] | 6 | 2015-02-09T17:41:40.000Z | 2016-06-17T07:19:17.000Z | # coding: utf-8
import os
from pynames.from_list_generator import FromListGenerator
FIXTURES_DIR = os.path.join(os.path.dirname(__file__), 'fixtures')
class OrcNamesGenerator(FromListGenerator):
SOURCE = os.path.join(FIXTURES_DIR, 'orc_names_list.json')
| 26.1 | 66 | 0.793103 |
7942188b04bdee3b20e42931d4671d600c5f6ae6 | 2,383 | py | Python | uaa_client/authentication.py | 18F/acquisitions.18f.gov | 7ef7091fd65b4b6797ddeb1c1f56def29522c43b | [
"CC0-1.0"
] | 3 | 2016-11-27T05:02:52.000Z | 2017-01-31T17:36:36.000Z | uaa_client/authentication.py | 18F/acquisitions.18f.gov | 7ef7091fd65b4b6797ddeb1c1f56def29522c43b | [
"CC0-1.0"
] | 61 | 2016-11-05T00:27:34.000Z | 2017-09-15T23:37:58.000Z | uaa_client/authentication.py | 18F/acquisitions.18f.gov | 7ef7091fd65b4b6797ddeb1c1f56def29522c43b | [
"CC0-1.0"
] | 2 | 2017-07-14T06:21:26.000Z | 2021-02-14T11:53:05.000Z | import logging
import requests
import jwt
from django.core.urlresolvers import reverse
from django.contrib.auth.models import User
from django.contrib.auth.backends import ModelBackend
from django.conf import settings
logger = logging.getLogger('uaa_client')
def get_auth_url(request):
if settings.DEBUG and settings.UAA_AUTH_URL == 'fake:':
return request.build_absolute_uri(reverse('fake_uaa_provider:auth'))
return settings.UAA_AUTH_URL
def get_token_url(request):
if settings.DEBUG and settings.UAA_TOKEN_URL == 'fake:':
return request.build_absolute_uri(reverse('fake_uaa_provider:token'))
return settings.UAA_TOKEN_URL
def exchange_code_for_access_token(request, code):
redirect_uri = request.build_absolute_uri(reverse('uaa_client:callback'))
payload = {
'grant_type': 'authorization_code',
'code': code,
'response_type': 'token',
'redirect_uri': redirect_uri,
'client_id': settings.UAA_CLIENT_ID,
'client_secret': settings.UAA_CLIENT_SECRET
}
token_url = get_token_url(request)
token_req = requests.post(token_url, data=payload)
if token_req.status_code != 200:
logger.warn('POST %s returned %s '
'w/ content %s' % (
token_url,
token_req.status_code,
repr(token_req.content)
))
return None
response = token_req.json()
request.session.set_expiry(response['expires_in'])
return response['access_token']
def get_user_by_email(email):
try:
return User.objects.get(email=email)
except User.DoesNotExist:
return None
class UaaBackend(ModelBackend):
'''
Custom auth backend for Cloud Foundry / cloud.gov User Account and
Authentication (UAA) servers.
This inherits from ModelBackend so that the superclass can provide
all authorization methods (e.g. `has_perm()`).
'''
def authenticate(self, uaa_oauth2_code=None, request=None, **kwargs):
if uaa_oauth2_code is None or request is None:
return None
access_token = exchange_code_for_access_token(request, uaa_oauth2_code)
if access_token is None:
return None
user_info = jwt.decode(access_token, verify=False)
return get_user_by_email(user_info['email'])
| 30.164557 | 79 | 0.678137 |
794218faef24a145c9ca7d0bddbe722e8a391a90 | 2,279 | py | Python | runtests.py | maccesch/django-moderation | 7b2e5675ce9b308904aa58198e2c5d25097b0317 | [
"BSD-3-Clause"
] | null | null | null | runtests.py | maccesch/django-moderation | 7b2e5675ce9b308904aa58198e2c5d25097b0317 | [
"BSD-3-Clause"
] | null | null | null | runtests.py | maccesch/django-moderation | 7b2e5675ce9b308904aa58198e2c5d25097b0317 | [
"BSD-3-Clause"
] | null | null | null | #!/usr/bin/env python
import sys
import os
from os.path import dirname, abspath
from optparse import OptionParser
from django.conf import settings, global_settings
import django
# For convenience configure settings if they are not pre-configured or if we
# haven't been provided settings to use by environment variable.
if not settings.configured and not os.environ.get('DJANGO_SETTINGS_MODULE'):
settings.configure(
DATABASES={
'default': {
'ENGINE': 'django.db.backends.sqlite3',
}
},
MIDDLEWARE_CLASSES=(
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
),
INSTALLED_APPS=[
'django.contrib.auth',
'django.contrib.admin',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'moderation',
'tests',
],
SERIALIZATION_MODULES = {},
MEDIA_URL = '/media/',
STATIC_URL = '/static/',
ROOT_URLCONF = 'tests.urls.default',
DJANGO_MODERATION_MODERATORS = (
'[email protected]',
),
DEBUG=True,
SITE_ID=1,
)
django.setup()
from django.test.runner import DiscoverRunner
def runtests(*test_args, **kwargs):
if 'south' in settings.INSTALLED_APPS:
from south.management.commands import patch_for_test_db_setup
patch_for_test_db_setup()
if not test_args:
test_args = ['tests']
parent = dirname(abspath(__file__))
sys.path.insert(0, parent)
test_runner = DiscoverRunner(pattern='*.py', verbosity=kwargs.get('verbosity', 1),
interactive=kwargs.get('interactive', False), failfast=kwargs.get('failfast'))
failures = test_runner.run_tests(test_args)
sys.exit(failures)
if __name__ == '__main__':
parser = OptionParser()
parser.add_option('--failfast', action='store_true', default=False, dest='failfast')
(options, args) = parser.parse_args()
runtests(failfast=options.failfast, *args) | 31.219178 | 111 | 0.638438 |
79421c2822542da24a924913f86e3ebee7b9547b | 39,957 | py | Python | fmriprep/workflows/bold/base.py | pcamach2/fmriprep | c2c5d44498cfae0c2cebe223b00b5a9c95e3121e | [
"BSD-3-Clause"
] | null | null | null | fmriprep/workflows/bold/base.py | pcamach2/fmriprep | c2c5d44498cfae0c2cebe223b00b5a9c95e3121e | [
"BSD-3-Clause"
] | null | null | null | fmriprep/workflows/bold/base.py | pcamach2/fmriprep | c2c5d44498cfae0c2cebe223b00b5a9c95e3121e | [
"BSD-3-Clause"
] | null | null | null | # emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-
# vi: set ft=python sts=4 ts=4 sw=4 et:
"""
Orchestrating the BOLD-preprocessing workflow
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
.. autofunction:: init_func_preproc_wf
.. autofunction:: init_func_derivatives_wf
"""
import os
import nibabel as nb
from nipype.interfaces.fsl import Split as FSLSplit
from nipype.pipeline import engine as pe
from nipype.interfaces import utility as niu
from niworkflows.engine.workflows import LiterateWorkflow as Workflow
from niworkflows.interfaces.nibabel import ApplyMask
from niworkflows.interfaces.utility import KeySelect
from niworkflows.interfaces.utils import DictMerge
from niworkflows.func.util import init_bold_reference_wf
from ... import config
from ...utils.meepi import combine_meepi_source
from ...interfaces import DerivativesDataSink
from ...interfaces.reports import FunctionalSummary
# BOLD workflows
from .confounds import init_bold_confs_wf, init_carpetplot_wf
from .hmc import init_bold_hmc_wf
from .stc import init_bold_stc_wf
from .t2s import init_bold_t2s_wf
from .registration import init_bold_t1_trans_wf, init_bold_reg_wf
from .resampling import (
init_bold_surf_wf,
init_bold_std_trans_wf,
init_bold_preproc_trans_wf,
)
from .outputs import init_func_derivatives_wf
def init_func_preproc_wf(bold_file):
"""
This workflow controls the functional preprocessing stages of *fMRIPrep*.
Workflow Graph
.. workflow::
:graph2use: orig
:simple_form: yes
from fmriprep.workflows.tests import mock_config
from fmriprep import config
from fmriprep.workflows.bold.base import init_func_preproc_wf
with mock_config():
bold_file = config.execution.bids_dir / 'sub-01' / 'func' \
/ 'sub-01_task-mixedgamblestask_run-01_bold.nii.gz'
wf = init_func_preproc_wf(str(bold_file))
Parameters
----------
bold_file
BOLD series NIfTI file
Inputs
------
bold_file
BOLD series NIfTI file
t1w_preproc
Bias-corrected structural template image
t1w_mask
Mask of the skull-stripped template image
t1w_dseg
Segmentation of preprocessed structural image, including
gray-matter (GM), white-matter (WM) and cerebrospinal fluid (CSF)
t1w_asec
Segmentation of structural image, done with FreeSurfer.
t1w_aparc
Parcellation of structural image, done with FreeSurfer.
t1w_tpms
List of tissue probability maps in T1w space
template
List of templates to target
anat2std_xfm
List of transform files, collated with templates
std2anat_xfm
List of inverse transform files, collated with templates
subjects_dir
FreeSurfer SUBJECTS_DIR
subject_id
FreeSurfer subject ID
t1w2fsnative_xfm
LTA-style affine matrix translating from T1w to FreeSurfer-conformed subject space
fsnative2t1w_xfm
LTA-style affine matrix translating from FreeSurfer-conformed subject space to T1w
Outputs
-------
bold_t1
BOLD series, resampled to T1w space
bold_mask_t1
BOLD series mask in T1w space
bold_std
BOLD series, resampled to template space
bold_mask_std
BOLD series mask in template space
confounds
TSV of confounds
surfaces
BOLD series, resampled to FreeSurfer surfaces
aroma_noise_ics
Noise components identified by ICA-AROMA
melodic_mix
FSL MELODIC mixing matrix
bold_cifti
BOLD CIFTI image
cifti_variant
combination of target spaces for `bold_cifti`
See Also
--------
* :py:func:`~niworkflows.func.util.init_bold_reference_wf`
* :py:func:`~fmriprep.workflows.bold.stc.init_bold_stc_wf`
* :py:func:`~fmriprep.workflows.bold.hmc.init_bold_hmc_wf`
* :py:func:`~fmriprep.workflows.bold.t2s.init_bold_t2s_wf`
* :py:func:`~fmriprep.workflows.bold.registration.init_bold_t1_trans_wf`
* :py:func:`~fmriprep.workflows.bold.registration.init_bold_reg_wf`
* :py:func:`~fmriprep.workflows.bold.confounds.init_bold_confounds_wf`
* :py:func:`~fmriprep.workflows.bold.confounds.init_ica_aroma_wf`
* :py:func:`~fmriprep.workflows.bold.resampling.init_bold_std_trans_wf`
* :py:func:`~fmriprep.workflows.bold.resampling.init_bold_preproc_trans_wf`
* :py:func:`~fmriprep.workflows.bold.resampling.init_bold_surf_wf`
* :py:func:`~sdcflows.workflows.fmap.init_fmap_wf`
* :py:func:`~sdcflows.workflows.pepolar.init_pepolar_unwarp_wf`
* :py:func:`~sdcflows.workflows.phdiff.init_phdiff_wf`
* :py:func:`~sdcflows.workflows.syn.init_syn_sdc_wf`
* :py:func:`~sdcflows.workflows.unwarp.init_sdc_unwarp_wf`
"""
from sdcflows.workflows.base import init_sdc_estimate_wf, fieldmap_wrangler
ref_file = bold_file
mem_gb = {'filesize': 1, 'resampled': 1, 'largemem': 1}
bold_tlen = 10
multiecho = isinstance(bold_file, list)
# Have some options handy
layout = config.execution.layout
omp_nthreads = config.nipype.omp_nthreads
freesurfer = config.workflow.run_reconall
spaces = config.workflow.spaces
if multiecho:
tes = [layout.get_metadata(echo)['EchoTime'] for echo in bold_file]
ref_file = dict(zip(tes, bold_file))[min(tes)]
if os.path.isfile(ref_file):
bold_tlen, mem_gb = _create_mem_gb(ref_file)
wf_name = _get_wf_name(ref_file)
config.loggers.workflow.debug(
'Creating bold processing workflow for "%s" (%.2f GB / %d TRs). '
'Memory resampled/largemem=%.2f/%.2f GB.',
ref_file, mem_gb['filesize'], bold_tlen, mem_gb['resampled'], mem_gb['largemem'])
sbref_file = None
# Find associated sbref, if possible
entities = layout.parse_file_entities(ref_file)
entities['suffix'] = 'sbref'
entities['extension'] = ['nii', 'nii.gz'] # Overwrite extensions
files = layout.get(return_type='file', **entities)
refbase = os.path.basename(ref_file)
if 'sbref' in config.workflow.ignore:
config.loggers.workflow.info("Single-band reference files ignored.")
elif files and multiecho:
config.loggers.workflow.warning(
"Single-band reference found, but not supported in "
"multi-echo workflows at this time. Ignoring.")
elif files:
sbref_file = files[0]
sbbase = os.path.basename(sbref_file)
if len(files) > 1:
config.loggers.workflow.warning(
"Multiple single-band reference files found for {}; using "
"{}".format(refbase, sbbase))
else:
config.loggers.workflow.info("Using single-band reference file %s.",
sbbase)
else:
config.loggers.workflow.info("No single-band-reference found for %s.",
refbase)
metadata = layout.get_metadata(ref_file)
# Find fieldmaps. Options: (phase1|phase2|phasediff|epi|fieldmap|syn)
fmaps = None
if 'fieldmaps' not in config.workflow.ignore:
fmaps = fieldmap_wrangler(layout, ref_file,
use_syn=config.workflow.use_syn,
force_syn=config.workflow.force_syn)
elif config.workflow.use_syn or config.workflow.force_syn:
# If fieldmaps are not enabled, activate SyN-SDC in unforced (False) mode
fmaps = {'syn': False}
# Short circuits: (True and True and (False or 'TooShort')) == 'TooShort'
run_stc = (bool(metadata.get("SliceTiming")) and
'slicetiming' not in config.workflow.ignore and
(_get_series_len(ref_file) > 4 or "TooShort"))
# Check if MEEPI for T2* coregistration target
if config.workflow.t2s_coreg and not multiecho:
config.loggers.workflow.warning(
"No multiecho BOLD images found for T2* coregistration. "
"Using standard EPI-T1 coregistration.")
config.workflow.t2s_coreg = False
# By default, force-bbr for t2s_coreg unless user specifies otherwise
if config.workflow.t2s_coreg and config.workflow.use_bbr is None:
config.workflow.use_bbr = True
# Build workflow
workflow = Workflow(name=wf_name)
workflow.__postdesc__ = """\
All resamplings can be performed with *a single interpolation
step* by composing all the pertinent transformations (i.e. head-motion
transform matrices, susceptibility distortion correction when available,
and co-registrations to anatomical and output spaces).
Gridded (volumetric) resamplings were performed using `antsApplyTransforms` (ANTs),
configured with Lanczos interpolation to minimize the smoothing
effects of other kernels [@lanczos].
Non-gridded (surface) resamplings were performed using `mri_vol2surf`
(FreeSurfer).
"""
inputnode = pe.Node(niu.IdentityInterface(
fields=['bold_file', 'subjects_dir', 'subject_id',
't1w_preproc', 't1w_mask', 't1w_dseg', 't1w_tpms',
't1w_aseg', 't1w_aparc',
'anat2std_xfm', 'std2anat_xfm', 'template',
't1w2fsnative_xfm', 'fsnative2t1w_xfm']),
name='inputnode')
inputnode.inputs.bold_file = bold_file
if sbref_file is not None:
from niworkflows.interfaces.images import ValidateImage
val_sbref = pe.Node(ValidateImage(in_file=sbref_file), name='val_sbref')
outputnode = pe.Node(niu.IdentityInterface(
fields=['bold_t1', 'bold_t1_ref', 'bold_mask_t1', 'bold_aseg_t1', 'bold_aparc_t1',
'bold_std', 'bold_std_ref', 'bold_mask_std', 'bold_aseg_std', 'bold_aparc_std',
'bold_native', 'bold_cifti', 'cifti_variant', 'cifti_metadata', 'cifti_density',
'surfaces', 'confounds', 'aroma_noise_ics', 'melodic_mix', 'nonaggr_denoised_file',
'confounds_metadata']),
name='outputnode')
# Generate a brain-masked conversion of the t1w
t1w_brain = pe.Node(ApplyMask(), name='t1w_brain')
# BOLD buffer: an identity used as a pointer to either the original BOLD
# or the STC'ed one for further use.
boldbuffer = pe.Node(niu.IdentityInterface(fields=['bold_file']), name='boldbuffer')
summary = pe.Node(
FunctionalSummary(
slice_timing=run_stc,
registration=('FSL', 'FreeSurfer')[freesurfer],
registration_dof=config.workflow.bold2t1w_dof,
pe_direction=metadata.get("PhaseEncodingDirection"),
tr=metadata.get("RepetitionTime")),
name='summary', mem_gb=config.DEFAULT_MEMORY_MIN_GB, run_without_submitting=True)
summary.inputs.dummy_scans = config.workflow.dummy_scans
func_derivatives_wf = init_func_derivatives_wf(
bids_root=layout.root,
cifti_output=config.workflow.cifti_output,
freesurfer=freesurfer,
metadata=metadata,
output_dir=str(config.execution.output_dir),
spaces=spaces,
use_aroma=config.workflow.use_aroma,
)
workflow.connect([
(outputnode, func_derivatives_wf, [
('bold_t1', 'inputnode.bold_t1'),
('bold_t1_ref', 'inputnode.bold_t1_ref'),
('bold_aseg_t1', 'inputnode.bold_aseg_t1'),
('bold_aparc_t1', 'inputnode.bold_aparc_t1'),
('bold_mask_t1', 'inputnode.bold_mask_t1'),
('bold_native', 'inputnode.bold_native'),
('confounds', 'inputnode.confounds'),
('surfaces', 'inputnode.surf_files'),
('aroma_noise_ics', 'inputnode.aroma_noise_ics'),
('melodic_mix', 'inputnode.melodic_mix'),
('nonaggr_denoised_file', 'inputnode.nonaggr_denoised_file'),
('bold_cifti', 'inputnode.bold_cifti'),
('cifti_variant', 'inputnode.cifti_variant'),
('cifti_metadata', 'inputnode.cifti_metadata'),
('cifti_density', 'inputnode.cifti_density'),
('confounds_metadata', 'inputnode.confounds_metadata'),
]),
])
# Generate a tentative boldref
bold_reference_wf = init_bold_reference_wf(omp_nthreads=omp_nthreads)
bold_reference_wf.inputs.inputnode.dummy_scans = config.workflow.dummy_scans
if sbref_file is not None:
workflow.connect([
(val_sbref, bold_reference_wf, [('out_file', 'inputnode.sbref_file')]),
])
# Top-level BOLD splitter
bold_split = pe.Node(FSLSplit(dimension='t'), name='bold_split',
mem_gb=mem_gb['filesize'] * 3)
# HMC on the BOLD
bold_hmc_wf = init_bold_hmc_wf(name='bold_hmc_wf',
mem_gb=mem_gb['filesize'],
omp_nthreads=omp_nthreads)
# calculate BOLD registration to T1w
bold_reg_wf = init_bold_reg_wf(name='bold_reg_wf',
freesurfer=freesurfer,
use_bbr=config.workflow.use_bbr,
bold2t1w_dof=config.workflow.bold2t1w_dof,
mem_gb=mem_gb['resampled'],
omp_nthreads=omp_nthreads,
use_compression=False)
# apply BOLD registration to T1w
bold_t1_trans_wf = init_bold_t1_trans_wf(name='bold_t1_trans_wf',
freesurfer=freesurfer,
use_fieldwarp=bool(fmaps),
multiecho=multiecho,
mem_gb=mem_gb['resampled'],
omp_nthreads=omp_nthreads,
use_compression=False)
# get confounds
bold_confounds_wf = init_bold_confs_wf(
mem_gb=mem_gb['largemem'],
metadata=metadata,
regressors_all_comps=config.workflow.regressors_all_comps,
regressors_fd_th=config.workflow.regressors_fd_th,
regressors_dvars_th=config.workflow.regressors_dvars_th,
name='bold_confounds_wf')
bold_confounds_wf.get_node('inputnode').inputs.t1_transform_flags = [False]
# Apply transforms in 1 shot
# Only use uncompressed output if AROMA is to be run
bold_bold_trans_wf = init_bold_preproc_trans_wf(
mem_gb=mem_gb['resampled'],
omp_nthreads=omp_nthreads,
use_compression=not config.execution.low_mem,
use_fieldwarp=bool(fmaps),
name='bold_bold_trans_wf'
)
bold_bold_trans_wf.inputs.inputnode.name_source = ref_file
# SLICE-TIME CORRECTION (or bypass) #############################################
if run_stc is True: # bool('TooShort') == True, so check True explicitly
bold_stc_wf = init_bold_stc_wf(name='bold_stc_wf', metadata=metadata)
workflow.connect([
(bold_reference_wf, bold_stc_wf, [
('outputnode.skip_vols', 'inputnode.skip_vols')]),
(bold_stc_wf, boldbuffer, [('outputnode.stc_file', 'bold_file')]),
])
if not multiecho:
workflow.connect([
(bold_reference_wf, bold_stc_wf, [
('outputnode.bold_file', 'inputnode.bold_file')])])
else: # for meepi, iterate through stc_wf for all workflows
meepi_echos = boldbuffer.clone(name='meepi_echos')
meepi_echos.iterables = ('bold_file', bold_file)
workflow.connect([
(meepi_echos, bold_stc_wf, [('bold_file', 'inputnode.bold_file')])])
elif not multiecho: # STC is too short or False
# bypass STC from original BOLD to the splitter through boldbuffer
workflow.connect([
(bold_reference_wf, boldbuffer, [('outputnode.bold_file', 'bold_file')])])
else:
# for meepi, iterate over all meepi echos to boldbuffer
boldbuffer.iterables = ('bold_file', bold_file)
# SDC (SUSCEPTIBILITY DISTORTION CORRECTION) or bypass ##########################
bold_sdc_wf = init_sdc_estimate_wf(fmaps, metadata,
omp_nthreads=omp_nthreads,
debug=config.execution.debug)
# MULTI-ECHO EPI DATA #############################################
if multiecho:
from niworkflows.func.util import init_skullstrip_bold_wf
skullstrip_bold_wf = init_skullstrip_bold_wf(name='skullstrip_bold_wf')
inputnode.inputs.bold_file = ref_file # Replace reference w first echo
join_echos = pe.JoinNode(niu.IdentityInterface(fields=['bold_files']),
joinsource=('meepi_echos' if run_stc is True else 'boldbuffer'),
joinfield=['bold_files'],
name='join_echos')
# create optimal combination, adaptive T2* map
bold_t2s_wf = init_bold_t2s_wf(echo_times=tes,
mem_gb=mem_gb['resampled'],
omp_nthreads=omp_nthreads,
t2s_coreg=config.workflow.t2s_coreg,
name='bold_t2smap_wf')
workflow.connect([
(skullstrip_bold_wf, join_echos, [
('outputnode.skull_stripped_file', 'bold_files')]),
(join_echos, bold_t2s_wf, [
('bold_files', 'inputnode.bold_file')]),
])
# MAIN WORKFLOW STRUCTURE #######################################################
workflow.connect([
(inputnode, t1w_brain, [('t1w_preproc', 'in_file'),
('t1w_mask', 'in_mask')]),
# Generate early reference
(inputnode, bold_reference_wf, [('bold_file', 'inputnode.bold_file')]),
# BOLD buffer has slice-time corrected if it was run, original otherwise
(boldbuffer, bold_split, [('bold_file', 'in_file')]),
# HMC
(bold_reference_wf, bold_hmc_wf, [
('outputnode.raw_ref_image', 'inputnode.raw_ref_image'),
('outputnode.bold_file', 'inputnode.bold_file')]),
(bold_reference_wf, summary, [
('outputnode.algo_dummy_scans', 'algo_dummy_scans')]),
# EPI-T1 registration workflow
(inputnode, bold_reg_wf, [
('t1w_dseg', 'inputnode.t1w_dseg'),
# Undefined if --fs-no-reconall, but this is safe
('subjects_dir', 'inputnode.subjects_dir'),
('subject_id', 'inputnode.subject_id'),
('fsnative2t1w_xfm', 'inputnode.fsnative2t1w_xfm')]),
(t1w_brain, bold_reg_wf, [
('out_file', 'inputnode.t1w_brain')]),
(inputnode, bold_t1_trans_wf, [
('bold_file', 'inputnode.name_source'),
('t1w_mask', 'inputnode.t1w_mask'),
('t1w_aseg', 'inputnode.t1w_aseg'),
('t1w_aparc', 'inputnode.t1w_aparc')]),
(t1w_brain, bold_t1_trans_wf, [
('out_file', 'inputnode.t1w_brain')]),
# unused if multiecho, but this is safe
(bold_hmc_wf, bold_t1_trans_wf, [('outputnode.xforms', 'inputnode.hmc_xforms')]),
(bold_reg_wf, bold_t1_trans_wf, [
('outputnode.itk_bold_to_t1', 'inputnode.itk_bold_to_t1')]),
(bold_t1_trans_wf, outputnode, [('outputnode.bold_t1', 'bold_t1'),
('outputnode.bold_t1_ref', 'bold_t1_ref'),
('outputnode.bold_aseg_t1', 'bold_aseg_t1'),
('outputnode.bold_aparc_t1', 'bold_aparc_t1')]),
(bold_reg_wf, summary, [('outputnode.fallback', 'fallback')]),
# SDC (or pass-through workflow)
(t1w_brain, bold_sdc_wf, [
('out_file', 'inputnode.t1w_brain')]),
(bold_reference_wf, bold_sdc_wf, [
('outputnode.ref_image', 'inputnode.epi_file'),
('outputnode.ref_image_brain', 'inputnode.epi_brain'),
('outputnode.bold_mask', 'inputnode.epi_mask')]),
(bold_sdc_wf, bold_t1_trans_wf, [
('outputnode.out_warp', 'inputnode.fieldwarp')]),
(bold_sdc_wf, bold_bold_trans_wf, [
('outputnode.out_warp', 'inputnode.fieldwarp'),
('outputnode.epi_mask', 'inputnode.bold_mask')]),
(bold_sdc_wf, summary, [('outputnode.method', 'distortion_correction')]),
# Connect bold_confounds_wf
(inputnode, bold_confounds_wf, [('t1w_tpms', 'inputnode.t1w_tpms'),
('t1w_mask', 'inputnode.t1w_mask')]),
(bold_hmc_wf, bold_confounds_wf, [
('outputnode.movpar_file', 'inputnode.movpar_file')]),
(bold_reg_wf, bold_confounds_wf, [
('outputnode.itk_t1_to_bold', 'inputnode.t1_bold_xform')]),
(bold_reference_wf, bold_confounds_wf, [
('outputnode.skip_vols', 'inputnode.skip_vols')]),
(bold_confounds_wf, outputnode, [
('outputnode.confounds_file', 'confounds'),
]),
(bold_confounds_wf, outputnode, [
('outputnode.confounds_metadata', 'confounds_metadata'),
]),
# Connect bold_bold_trans_wf
(bold_split, bold_bold_trans_wf, [
('out_files', 'inputnode.bold_file')]),
(bold_hmc_wf, bold_bold_trans_wf, [
('outputnode.xforms', 'inputnode.hmc_xforms')]),
# Summary
(outputnode, summary, [('confounds', 'confounds_file')]),
])
if not config.workflow.t2s_coreg:
workflow.connect([
(bold_sdc_wf, bold_reg_wf, [
('outputnode.epi_brain', 'inputnode.ref_bold_brain')]),
(bold_sdc_wf, bold_t1_trans_wf, [
('outputnode.epi_brain', 'inputnode.ref_bold_brain'),
('outputnode.epi_mask', 'inputnode.ref_bold_mask')]),
])
else:
workflow.connect([
# For t2s_coreg, replace EPI-to-T1w registration inputs
(bold_t2s_wf, bold_reg_wf, [
('outputnode.bold_ref_brain', 'inputnode.ref_bold_brain')]),
(bold_t2s_wf, bold_t1_trans_wf, [
('outputnode.bold_ref_brain', 'inputnode.ref_bold_brain'),
('outputnode.bold_mask', 'inputnode.ref_bold_mask')]),
])
# for standard EPI data, pass along correct file
if not multiecho:
workflow.connect([
(inputnode, func_derivatives_wf, [
('bold_file', 'inputnode.source_file')]),
(bold_bold_trans_wf, bold_confounds_wf, [
('outputnode.bold', 'inputnode.bold'),
('outputnode.bold_mask', 'inputnode.bold_mask')]),
(bold_split, bold_t1_trans_wf, [
('out_files', 'inputnode.bold_split')]),
])
else: # for meepi, create and use optimal combination
workflow.connect([
# update name source for optimal combination
(inputnode, func_derivatives_wf, [
(('bold_file', combine_meepi_source), 'inputnode.source_file')]),
(bold_bold_trans_wf, skullstrip_bold_wf, [
('outputnode.bold', 'inputnode.in_file')]),
(bold_t2s_wf, bold_confounds_wf, [
('outputnode.bold', 'inputnode.bold'),
('outputnode.bold_mask', 'inputnode.bold_mask')]),
(bold_t2s_wf, bold_t1_trans_wf, [
('outputnode.bold', 'inputnode.bold_split')]),
])
if fmaps:
from sdcflows.workflows.outputs import init_sdc_unwarp_report_wf
# Report on BOLD correction
fmap_unwarp_report_wf = init_sdc_unwarp_report_wf()
workflow.connect([
(inputnode, fmap_unwarp_report_wf, [
('t1w_dseg', 'inputnode.in_seg')]),
(bold_reference_wf, fmap_unwarp_report_wf, [
('outputnode.ref_image', 'inputnode.in_pre')]),
(bold_reg_wf, fmap_unwarp_report_wf, [
('outputnode.itk_t1_to_bold', 'inputnode.in_xfm')]),
(bold_sdc_wf, fmap_unwarp_report_wf, [
('outputnode.epi_corrected', 'inputnode.in_post')]),
])
# Overwrite ``out_path_base`` of unwarping DataSinks
for node in fmap_unwarp_report_wf.list_node_names():
if node.split('.')[-1].startswith('ds_'):
fmap_unwarp_report_wf.get_node(node).interface.out_path_base = 'fmriprep'
for node in bold_sdc_wf.list_node_names():
if node.split('.')[-1].startswith('ds_'):
bold_sdc_wf.get_node(node).interface.out_path_base = 'fmriprep'
if 'syn' in fmaps:
sdc_select_std = pe.Node(
KeySelect(fields=['std2anat_xfm']),
name='sdc_select_std', run_without_submitting=True)
sdc_select_std.inputs.key = 'MNI152NLin2009cAsym'
workflow.connect([
(inputnode, sdc_select_std, [('std2anat_xfm', 'std2anat_xfm'),
('template', 'keys')]),
(sdc_select_std, bold_sdc_wf, [('std2anat_xfm', 'inputnode.std2anat_xfm')]),
])
if fmaps.get('syn') is True: # SyN forced
syn_unwarp_report_wf = init_sdc_unwarp_report_wf(
name='syn_unwarp_report_wf', forcedsyn=True)
workflow.connect([
(inputnode, syn_unwarp_report_wf, [
('t1w_dseg', 'inputnode.in_seg')]),
(bold_reference_wf, syn_unwarp_report_wf, [
('outputnode.ref_image', 'inputnode.in_pre')]),
(bold_reg_wf, syn_unwarp_report_wf, [
('outputnode.itk_t1_to_bold', 'inputnode.in_xfm')]),
(bold_sdc_wf, syn_unwarp_report_wf, [
('outputnode.syn_ref', 'inputnode.in_post')]),
])
# Overwrite ``out_path_base`` of unwarping DataSinks
for node in syn_unwarp_report_wf.list_node_names():
if node.split('.')[-1].startswith('ds_'):
syn_unwarp_report_wf.get_node(node).interface.out_path_base = 'fmriprep'
# Map final BOLD mask into T1w space (if required)
nonstd_spaces = set(spaces.get_nonstandard())
if nonstd_spaces.intersection(('T1w', 'anat')):
from niworkflows.interfaces.fixes import (
FixHeaderApplyTransforms as ApplyTransforms
)
boldmask_to_t1w = pe.Node(
ApplyTransforms(interpolation='MultiLabel', float=True),
name='boldmask_to_t1w', mem_gb=0.1
)
workflow.connect([
(bold_reg_wf, boldmask_to_t1w, [
('outputnode.itk_bold_to_t1', 'transforms')]),
(bold_t1_trans_wf, boldmask_to_t1w, [
('outputnode.bold_mask_t1', 'reference_image')]),
(bold_bold_trans_wf if not multiecho else bold_t2s_wf, boldmask_to_t1w, [
('outputnode.bold_mask', 'input_image')]),
(boldmask_to_t1w, outputnode, [
('output_image', 'bold_mask_t1')]),
])
if nonstd_spaces.intersection(('func', 'run', 'bold', 'boldref', 'sbref')):
workflow.connect([
(bold_bold_trans_wf, outputnode, [
('outputnode.bold', 'bold_native')]),
(bold_bold_trans_wf, func_derivatives_wf, [
('outputnode.bold_ref', 'inputnode.bold_native_ref'),
('outputnode.bold_mask', 'inputnode.bold_mask_native')]),
])
if spaces.get_spaces(nonstandard=False, dim=(3,)):
# Apply transforms in 1 shot
# Only use uncompressed output if AROMA is to be run
bold_std_trans_wf = init_bold_std_trans_wf(
freesurfer=freesurfer,
mem_gb=mem_gb['resampled'],
omp_nthreads=omp_nthreads,
spaces=spaces,
name='bold_std_trans_wf',
use_compression=not config.execution.low_mem,
use_fieldwarp=bool(fmaps),
)
workflow.connect([
(inputnode, bold_std_trans_wf, [
('template', 'inputnode.templates'),
('anat2std_xfm', 'inputnode.anat2std_xfm'),
('bold_file', 'inputnode.name_source'),
('t1w_aseg', 'inputnode.bold_aseg'),
('t1w_aparc', 'inputnode.bold_aparc')]),
(bold_hmc_wf, bold_std_trans_wf, [
('outputnode.xforms', 'inputnode.hmc_xforms')]),
(bold_reg_wf, bold_std_trans_wf, [
('outputnode.itk_bold_to_t1', 'inputnode.itk_bold_to_t1')]),
(bold_bold_trans_wf if not multiecho else bold_t2s_wf, bold_std_trans_wf, [
('outputnode.bold_mask', 'inputnode.bold_mask')]),
(bold_sdc_wf, bold_std_trans_wf, [
('outputnode.out_warp', 'inputnode.fieldwarp')]),
(bold_std_trans_wf, outputnode, [('outputnode.bold_std', 'bold_std'),
('outputnode.bold_std_ref', 'bold_std_ref'),
('outputnode.bold_mask_std', 'bold_mask_std')]),
])
if freesurfer:
workflow.connect([
(bold_std_trans_wf, func_derivatives_wf, [
('outputnode.bold_aseg_std', 'inputnode.bold_aseg_std'),
('outputnode.bold_aparc_std', 'inputnode.bold_aparc_std'),
]),
(bold_std_trans_wf, outputnode, [
('outputnode.bold_aseg_std', 'bold_aseg_std'),
('outputnode.bold_aparc_std', 'bold_aparc_std')]),
])
# Xform to 'MNI152NLin2009cAsym' is always computed.
carpetplot_select_std = pe.Node(
KeySelect(fields=['std2anat_xfm'], key='MNI152NLin2009cAsym'),
name='carpetplot_select_std', run_without_submitting=True)
carpetplot_wf = init_carpetplot_wf(
mem_gb=mem_gb['resampled'],
metadata=metadata,
name='carpetplot_wf')
workflow.connect([
(inputnode, carpetplot_select_std, [
('std2anat_xfm', 'std2anat_xfm'),
('template', 'keys')]),
(carpetplot_select_std, carpetplot_wf, [
('std2anat_xfm', 'inputnode.std2anat_xfm')]),
(bold_bold_trans_wf if not multiecho else bold_t2s_wf, carpetplot_wf, [
('outputnode.bold', 'inputnode.bold'),
('outputnode.bold_mask', 'inputnode.bold_mask')]),
(bold_reg_wf, carpetplot_wf, [
('outputnode.itk_t1_to_bold', 'inputnode.t1_bold_xform')]),
(bold_confounds_wf, carpetplot_wf, [
('outputnode.confounds_file', 'inputnode.confounds_file')]),
])
if not multiecho:
workflow.connect([
(bold_split, bold_std_trans_wf, [
('out_files', 'inputnode.bold_split')])
])
else:
split_opt_comb = bold_split.clone(name='split_opt_comb')
workflow.connect([
(bold_t2s_wf, split_opt_comb, [
('outputnode.bold', 'in_file')]),
(split_opt_comb, bold_std_trans_wf, [
('out_files', 'inputnode.bold_split')
])
])
# func_derivatives_wf internally parametrizes over snapshotted spaces.
workflow.connect([
(bold_std_trans_wf, func_derivatives_wf, [
('outputnode.template', 'inputnode.template'),
('outputnode.spatial_reference', 'inputnode.spatial_reference'),
('outputnode.bold_std_ref', 'inputnode.bold_std_ref'),
('outputnode.bold_std', 'inputnode.bold_std'),
('outputnode.bold_mask_std', 'inputnode.bold_mask_std'),
]),
])
if config.workflow.use_aroma: # ICA-AROMA workflow
from .confounds import init_ica_aroma_wf
ica_aroma_wf = init_ica_aroma_wf(
mem_gb=mem_gb['resampled'],
metadata=metadata,
omp_nthreads=omp_nthreads,
use_fieldwarp=bool(fmaps),
err_on_aroma_warn=config.workflow.aroma_err_on_warn,
aroma_melodic_dim=config.workflow.aroma_melodic_dim,
name='ica_aroma_wf')
join = pe.Node(niu.Function(output_names=["out_file"],
function=_to_join),
name='aroma_confounds')
mrg_conf_metadata = pe.Node(niu.Merge(2), name='merge_confound_metadata',
run_without_submitting=True)
mrg_conf_metadata2 = pe.Node(DictMerge(), name='merge_confound_metadata2',
run_without_submitting=True)
workflow.disconnect([
(bold_confounds_wf, outputnode, [
('outputnode.confounds_file', 'confounds'),
]),
(bold_confounds_wf, outputnode, [
('outputnode.confounds_metadata', 'confounds_metadata'),
]),
])
workflow.connect([
(inputnode, ica_aroma_wf, [
('bold_file', 'inputnode.name_source')]),
(bold_hmc_wf, ica_aroma_wf, [
('outputnode.movpar_file', 'inputnode.movpar_file')]),
(bold_reference_wf, ica_aroma_wf, [
('outputnode.skip_vols', 'inputnode.skip_vols')]),
(bold_confounds_wf, join, [
('outputnode.confounds_file', 'in_file')]),
(bold_confounds_wf, mrg_conf_metadata,
[('outputnode.confounds_metadata', 'in1')]),
(ica_aroma_wf, join,
[('outputnode.aroma_confounds', 'join_file')]),
(ica_aroma_wf, mrg_conf_metadata,
[('outputnode.aroma_metadata', 'in2')]),
(mrg_conf_metadata, mrg_conf_metadata2, [('out', 'in_dicts')]),
(ica_aroma_wf, outputnode,
[('outputnode.aroma_noise_ics', 'aroma_noise_ics'),
('outputnode.melodic_mix', 'melodic_mix'),
('outputnode.nonaggr_denoised_file', 'nonaggr_denoised_file')]),
(join, outputnode, [('out_file', 'confounds')]),
(mrg_conf_metadata2, outputnode, [('out_dict', 'confounds_metadata')]),
(bold_std_trans_wf, ica_aroma_wf, [
('outputnode.bold_std', 'inputnode.bold_std'),
('outputnode.bold_mask_std', 'inputnode.bold_mask_std'),
('outputnode.spatial_reference', 'inputnode.spatial_reference')]),
])
# SURFACES ##################################################################################
# Freesurfer
freesurfer_spaces = spaces.get_fs_spaces()
if freesurfer and freesurfer_spaces:
config.loggers.workflow.debug('Creating BOLD surface-sampling workflow.')
bold_surf_wf = init_bold_surf_wf(
mem_gb=mem_gb['resampled'],
surface_spaces=freesurfer_spaces,
medial_surface_nan=config.workflow.medial_surface_nan,
name='bold_surf_wf')
workflow.connect([
(inputnode, bold_surf_wf, [
('t1w_preproc', 'inputnode.t1w_preproc'),
('subjects_dir', 'inputnode.subjects_dir'),
('subject_id', 'inputnode.subject_id'),
('t1w2fsnative_xfm', 'inputnode.t1w2fsnative_xfm')]),
(bold_t1_trans_wf, bold_surf_wf, [('outputnode.bold_t1', 'inputnode.source_file')]),
(bold_surf_wf, outputnode, [('outputnode.surfaces', 'surfaces')]),
(bold_surf_wf, func_derivatives_wf, [
('outputnode.target', 'inputnode.surf_refs')]),
])
# CIFTI output
if config.workflow.cifti_output:
from .resampling import init_bold_grayords_wf
bold_grayords_wf = init_bold_grayords_wf(
grayord_density=config.workflow.cifti_output,
mem_gb=mem_gb['resampled'],
repetition_time=metadata['RepetitionTime'])
workflow.connect([
(inputnode, bold_grayords_wf, [
('subjects_dir', 'inputnode.subjects_dir')]),
(bold_std_trans_wf, bold_grayords_wf, [
('outputnode.bold_std', 'inputnode.bold_std'),
('outputnode.spatial_reference', 'inputnode.spatial_reference')]),
(bold_surf_wf, bold_grayords_wf, [
('outputnode.surfaces', 'inputnode.surf_files'),
('outputnode.target', 'inputnode.surf_refs'),
]),
(bold_grayords_wf, outputnode, [
('outputnode.cifti_bold', 'bold_cifti'),
('outputnode.cifti_variant', 'cifti_variant'),
('outputnode.cifti_metadata', 'cifti_metadata'),
('outputnode.cifti_density', 'cifti_density')]),
])
# REPORTING ############################################################
reportlets_dir = str(config.execution.work_dir / 'reportlets')
ds_report_summary = pe.Node(
DerivativesDataSink(desc='summary', keep_dtype=True),
name='ds_report_summary', run_without_submitting=True,
mem_gb=config.DEFAULT_MEMORY_MIN_GB)
ds_report_validation = pe.Node(
DerivativesDataSink(base_directory=reportlets_dir,
desc='validation', keep_dtype=True),
name='ds_report_validation', run_without_submitting=True,
mem_gb=config.DEFAULT_MEMORY_MIN_GB)
workflow.connect([
(summary, ds_report_summary, [('out_report', 'in_file')]),
(bold_reference_wf, ds_report_validation, [
('outputnode.validation_report', 'in_file')]),
])
# Fill-in datasinks of reportlets seen so far
for node in workflow.list_node_names():
if node.split('.')[-1].startswith('ds_report'):
workflow.get_node(node).inputs.base_directory = reportlets_dir
workflow.get_node(node).inputs.source_file = ref_file
return workflow
def _get_series_len(bold_fname):
from niworkflows.interfaces.registration import _get_vols_to_discard
img = nb.load(bold_fname)
if len(img.shape) < 4:
return 1
skip_vols = _get_vols_to_discard(img)
return img.shape[3] - skip_vols
def _create_mem_gb(bold_fname):
bold_size_gb = os.path.getsize(bold_fname) / (1024**3)
bold_tlen = nb.load(bold_fname).shape[-1]
mem_gb = {
'filesize': bold_size_gb,
'resampled': bold_size_gb * 4,
'largemem': bold_size_gb * (max(bold_tlen / 100, 1.0) + 4),
}
return bold_tlen, mem_gb
def _get_wf_name(bold_fname):
"""
Derive the workflow name for supplied BOLD file.
>>> _get_wf_name('/completely/made/up/path/sub-01_task-nback_bold.nii.gz')
'func_preproc_task_nback_wf'
>>> _get_wf_name('/completely/made/up/path/sub-01_task-nback_run-01_echo-1_bold.nii.gz')
'func_preproc_task_nback_run_01_echo_1_wf'
"""
from nipype.utils.filemanip import split_filename
fname = split_filename(bold_fname)[1]
fname_nosub = '_'.join(fname.split("_")[1:])
# if 'echo' in fname_nosub:
# fname_nosub = '_'.join(fname_nosub.split("_echo-")[:1]) + "_bold"
name = "func_preproc_" + fname_nosub.replace(
".", "_").replace(" ", "").replace("-", "_").replace("_bold", "_wf")
return name
def _to_join(in_file, join_file):
"""Join two tsv files if the join_file is not ``None``."""
from niworkflows.interfaces.utils import JoinTSVColumns
if join_file is None:
return in_file
res = JoinTSVColumns(in_file=in_file, join_file=join_file).run()
return res.outputs.out_file
| 44.545151 | 99 | 0.6043 |
79421c7f6340dc1804ea3ea76d91db2b2eb8c3e8 | 4,466 | py | Python | lib/googlecloudsdk/command_lib/dataproc/batches/batch_message_factory.py | google-cloud-sdk-unofficial/google-cloud-sdk | 2a48a04df14be46c8745050f98768e30474a1aac | [
"Apache-2.0"
] | 2 | 2019-11-10T09:17:07.000Z | 2019-12-18T13:44:08.000Z | lib/googlecloudsdk/command_lib/dataproc/batches/batch_message_factory.py | google-cloud-sdk-unofficial/google-cloud-sdk | 2a48a04df14be46c8745050f98768e30474a1aac | [
"Apache-2.0"
] | null | null | null | lib/googlecloudsdk/command_lib/dataproc/batches/batch_message_factory.py | google-cloud-sdk-unofficial/google-cloud-sdk | 2a48a04df14be46c8745050f98768e30474a1aac | [
"Apache-2.0"
] | 1 | 2020-07-25T01:40:19.000Z | 2020-07-25T01:40:19.000Z | # -*- coding: utf-8 -*- #
# Copyright 2021 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Factory class for Batch message."""
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
from googlecloudsdk.command_lib.dataproc.shared_messages import (
environment_config_factory as ecf)
from googlecloudsdk.command_lib.dataproc.shared_messages import (
runtime_config_factory as rcf)
from googlecloudsdk.command_lib.util.args import labels_util
class BatchMessageFactory(object):
"""Factory class for Batch message.
Factory class for configuring argument parser and creating a Batch message
from the parsed arguments.
"""
INVALID_BATCH_TYPE_ERR_MSG = 'Invalid batch job type: {}.'
MISSING_BATCH_ERR_MSG = 'Missing batch job.'
def __init__(self, dataproc, runtime_config_factory_override=None,
environment_config_factory_override=None):
"""Builder class for Batch message.
Batch message factory. Only the flags added in AddArguments are handled.
User need to provide batch job type specific message during message
creation.
Args:
dataproc: A api_lib.dataproc.Dataproc instance.
runtime_config_factory_override: Override the default
RuntimeConfigFactory instance.
environment_config_factory_override: Override the default
EnvironmentConfigFactory instance.
"""
self.dataproc = dataproc
# Construct available batch type to keyword mapping.
self._batch2key = {
self.dataproc.messages.SparkBatch: 'sparkBatch',
self.dataproc.messages.SparkRBatch: 'sparkRBatch',
self.dataproc.messages.SparkSqlBatch: 'sparkSqlBatch',
self.dataproc.messages.PySparkBatch: 'pysparkBatch'
}
self.runtime_config_factory = runtime_config_factory_override
if not self.runtime_config_factory:
self.runtime_config_factory = rcf.RuntimeConfigFactory(self.dataproc)
self.environment_config_factory = environment_config_factory_override
if not self.environment_config_factory:
self.environment_config_factory = (
ecf.EnvironmentConfigFactory(self.dataproc))
def GetMessage(self, args, batch_job):
"""Creates a Batch message from given args.
Create a Batch message from given arguments. Only the arguments added in
AddAddArguments are handled. User need to provide bath job type specific
message during message creation.
Args:
args: Parsed argument.
batch_job: Batch type job instance.
Returns:
A Batch message instance.
Raises:
AttributeError: When batch_job is invalid.
"""
if not batch_job:
raise AttributeError(BatchMessageFactory.MISSING_BATCH_ERR_MSG)
if not isinstance(batch_job, tuple(self._batch2key.keys())):
raise AttributeError(
BatchMessageFactory.INVALID_BATCH_TYPE_ERR_MSG.format(
type(batch_job)))
kwargs = {}
kwargs[self._batch2key[type(batch_job)]] = batch_job
if args.labels:
kwargs['labels'] = labels_util.ParseCreateArgs(
args, self.dataproc.messages.Batch.LabelsValue)
runtime_config = self.runtime_config_factory.GetMessage(args)
if runtime_config:
kwargs['runtimeConfig'] = runtime_config
environment_config = self.environment_config_factory.GetMessage(args)
if environment_config:
kwargs['environmentConfig'] = environment_config
if not kwargs:
return None
return self.dataproc.messages.Batch(**kwargs)
def AddArguments(parser):
"""Adds arguments related to Batch message.
Add Batch arguments to the given parser. Job specific arguments are not
handled, and need to be set during factory instantiation.
Args:
parser: A argument parser.
"""
labels_util.AddCreateLabelsFlags(parser)
_AddDependency(parser)
def _AddDependency(parser):
rcf.AddArguments(parser)
ecf.AddArguments(parser)
| 32.59854 | 76 | 0.745858 |
79421e18fd3cf6ba18f27d26d86dd495776f7e86 | 318 | py | Python | ExerciciosPYTHON/PythonCeV/018.py | Samuel-Melo890/Python-Desafios | 2abc7734d6a6c1f5ab67421f792d6889d93bac94 | [
"MIT"
] | null | null | null | ExerciciosPYTHON/PythonCeV/018.py | Samuel-Melo890/Python-Desafios | 2abc7734d6a6c1f5ab67421f792d6889d93bac94 | [
"MIT"
] | 2 | 2022-03-18T16:06:07.000Z | 2022-03-18T16:55:29.000Z | ExerciciosPYTHON/PythonCeV/018.py | Samuel-Melo890/Python-Desafios | 2abc7734d6a6c1f5ab67421f792d6889d93bac94 | [
"MIT"
] | null | null | null | print('='*8,'Seno, Cosseno e Tangente','='*8)
a = float(input('Digite o seu angulo:'))
from math import radians, sin, cos, tan
s = sin(radians(a))
c = cos(radians(a))
t = tan(radians(a))
print('''Para o angulo analisado {}, temos:
Seno igual a {:.2f}
Cosseno igual a {:.2f}
Tangente igual a {:.2f}'''.format(a,s,c,t))
| 28.909091 | 45 | 0.638365 |
79421e4821f94d9248c3bb25179f3b423535b8fd | 739 | py | Python | conversor_moeda.py | eduardobaltazarmarfim/PythonC | 8e44b4f191582c73cca6df98120ab142145c4ba1 | [
"MIT"
] | null | null | null | conversor_moeda.py | eduardobaltazarmarfim/PythonC | 8e44b4f191582c73cca6df98120ab142145c4ba1 | [
"MIT"
] | null | null | null | conversor_moeda.py | eduardobaltazarmarfim/PythonC | 8e44b4f191582c73cca6df98120ab142145c4ba1 | [
"MIT"
] | null | null | null | def retorno():
res=input('Deseja executar o programa novamente?[s/n] ')
if(res=='s' or res=='S'):
verificar()
else:
print('Processo finalizado!')
pass
def cabecalho(texto):
print('-'*30)
print(' '*10+texto+' '*15)
print('-'*30)
pass
def verificar():
try:
cabecalho('Conversor de Moeda')
val=float(input('Digite um valor em R$ '))
dolar=float(input('Digite o valor em U$ '))
except:
print('Dados inseridos são invalidos!')
retorno()
else:
res=val/dolar
print('O valor digitado: R$ {}.e o valor em U$ {:.2f}'.format(val,res))
retorno()
pass
verificar() | 14.490196 | 79 | 0.507442 |
79421e9b1649e5c4727926e8fbef214b530c5ecb | 452 | py | Python | bblfsh_sonar_checks/checks/java/RSPEC-2235.py | juanjux/sonar-checks | 65e45ca93bcc59bd20a28f169b00ac4de9b7e98f | [
"Apache-2.0"
] | 3 | 2018-10-02T12:02:37.000Z | 2019-04-04T09:18:37.000Z | bblfsh_sonar_checks/checks/java/RSPEC-2235.py | juanjux/sonar-checks | 65e45ca93bcc59bd20a28f169b00ac4de9b7e98f | [
"Apache-2.0"
] | 6 | 2018-08-17T14:43:57.000Z | 2019-01-08T12:20:03.000Z | bblfsh_sonar_checks/checks/java/RSPEC-2235.py | juanjux/sonar-checks | 65e45ca93bcc59bd20a28f169b00ac4de9b7e98f | [
"Apache-2.0"
] | 3 | 2018-08-23T22:35:41.000Z | 2020-04-18T11:29:45.000Z | import bblfsh_sonar_checks.utils as utils
import bblfsh
def check(uast):
findings = []
catchs = bblfsh.filter(uast, "//CatchClause//SimpleType//Identifier[@Name='IllegalMonitorStateException']")
for c in catchs:
findings.append({"msg": "Don't catch IllegalMonitorStateException",
"pos": c.start_position.line})
return findings
if __name__ == '__main__': utils.run_default_fixture(__file__, check)
| 26.588235 | 111 | 0.688053 |
79421eef89e378213ffa8f58df01131a81f05f58 | 769 | py | Python | python/tests/test_client.py | yndu13/darabonba-time | a5687a49920b2901252d6d9d2449dfe738888e98 | [
"Apache-2.0"
] | 1 | 2020-12-08T06:45:25.000Z | 2020-12-08T06:45:25.000Z | python/tests/test_client.py | yndu13/darabonba-time | a5687a49920b2901252d6d9d2449dfe738888e98 | [
"Apache-2.0"
] | 2 | 2021-01-05T07:14:18.000Z | 2022-02-07T09:32:12.000Z | python/tests/test_client.py | yndu13/darabonba-time | a5687a49920b2901252d6d9d2449dfe738888e98 | [
"Apache-2.0"
] | 3 | 2021-01-27T08:38:06.000Z | 2021-11-23T03:01:53.000Z | import unittest
import time
from alibabacloud_darabonba_time.client import Client
class TestClient(unittest.TestCase):
def test_unix(self):
self.assertEqual(10, len(Client.unix()))
def test_utc(self):
self.assertEqual(len('2020-11-24 09:44:45.426237'),
len(Client.utc()))
def test_sleep(self):
t1 = time.time()
Client.sleep(1500)
self.assertTrue(1 < time.time() - t1 < 2)
def test_format(self):
self.assertEqual(len('20060102 15:04 PM'),
len(Client.format('yyyyMMdd hh:mm a')))
week = ['Monday', 'Tuesday', 'Wednesday', 'Thursday', 'Friday', 'Saturday', 'Sunday']
self.assertIn(Client.format('EEEE'), week)
| 29.576923 | 94 | 0.581274 |
79421f9106f459b0560cfe78dd2cae2402547fc5 | 8,984 | py | Python | upd/vcf_tools.py | bjhall/upd | 180a25bcb22fa54c200c05dbc214d1f84be995a3 | [
"MIT"
] | 5 | 2019-06-20T17:02:41.000Z | 2022-02-08T08:35:01.000Z | upd/vcf_tools.py | bjhall/upd | 180a25bcb22fa54c200c05dbc214d1f84be995a3 | [
"MIT"
] | 6 | 2019-06-20T17:36:52.000Z | 2021-02-04T19:07:06.000Z | upd/vcf_tools.py | bjhall/upd | 180a25bcb22fa54c200c05dbc214d1f84be995a3 | [
"MIT"
] | 1 | 2021-12-24T07:16:05.000Z | 2021-12-24T07:16:05.000Z | import logging
import gzip
import re
from codecs import (open, getreader)
from pprint import pprint as pp
LOG = logging.getLogger(__name__)
def open_file(filename):
"""Open a file and return a iterable with lines"""
if filename.endswith('.gz'):
LOG.info(f"{filename} is zipped")
handle = getreader('utf-8')(gzip.open(filename), errors='replace')
else:
handle = open(filename, mode='r', encoding='utf-8', errors='replace')
return handle
class Variant(object):
"""Implements a Variant class for VCF variants
gt_types: 0=HOM_REF, 1=HET, 3=HOM_ALT, 2=other
"""
def __init__(self, variant_line):
super(Variant, self).__init__()
self.variant_line = variant_line
self.CHROM = None
self.POS = None
self.INFO = {}
self.ALT = None
self.is_snp = True
self.gt_quals = []
self.gt_types = []
self._initialize()
def _initialize(self):
splitted_line = self.variant_line.split('\t')
self.CHROM = splitted_line[0]
self.POS = int(splitted_line[1])
self.ALT = splitted_line[4].split(',')
if len(splitted_line[3]) != len(splitted_line[4]):
self.is_snp = False
self.INFO = self._build_info(splitted_line[7])
if not len(splitted_line) > 8:
return
self.gt_quals, self.gt_types = self._build_gt(splitted_line)
def _build_gt(self, var_info):
"""Build the genotype information
Collapse the FORMAT information from the 8th column with each individual genotype
information. Then retrieve the genotype call and genotype quality
Args:
var_info (list): A splited variant line
Returns:
gt_quals, gt_types (list)
"""
gt_map = {'0/0':0, '0/1':1,'1/1':3}
gt_types = []
gt_quals = []
form = var_info[8].split(':')
for ind_info in var_info[9:]:
gt_info = dict(zip(form, ind_info.split(':')))
gq = 0
try:
gq = int(gt_info.get('GQ',0))
except Exception as err:
pass
gt_quals.append(gq)
genotype = gt_info.get('GT','./.')
if not genotype in gt_map:
gt_types.append(2)
continue
gt_types.append(gt_map[genotype])
return gt_quals, gt_types
def _build_info(self, info):
"""Build a info dictionary from a info str and set self.INFO
Args:
info (str): Raw vcf info string
"""
info_dict = {}
if info == '.':
return info_dict
for value in info.split(';'):
vals = value.split('=')
if not len(vals) == 2:
info_dict[vals[0]] = True
continue
info_dict[vals[0]] = vals[1]
return info_dict
def __str__(self):
return self.variant_line
def __repr__(self):
return f"{self.CHROM}:{self.POS}:{self.gt_types}"
class HREC(object):
"""VCF header record"""
def __init__(self, hid, number=None, htype=None, description=None):
super(HREC, self).__init__()
self.id = hid
self.number = number
self.type = htype
self.description = description
def info(self):
"""Return the header info in a dictionary"""
return {
'ID': self.id,
'Number': self.number,
'Type': self.type,
'Description': self.description
}
class Vcf(object):
"""Implements a simple vcf parser that mimics parts of cyvcf2.VCF"""
def __init__(self, variant_file):
super(Vcf, self).__init__()
self.variant_file = iter(variant_file)
self.raw_header = []
self.samples = []
self._current_variant = None
self._header_keys = set()
self._initialize()
def _initialize(self):
self._parse_header()
def _parse_header(self):
"""docstring for _parse_header"""
line = '#'
while line.startswith('#'):
line = next(self.variant_file)
line = line.rstrip()
if line.startswith('#'):
self.raw_header.append(line)
if not line.startswith('##'):
splitted_line = line.split('\t')
if not len(splitted_line) > 9:
raise SyntaxError("No individuals in VCF")
self.samples = splitted_line[9:]
self._current_variant = line
def contains(self, key):
"""Check if the header contains key"""
if not self._header_keys:
for rec in self.header_iter():
self._header_keys.add(rec.id)
return key in self._header_keys
def header_iter(self):
"""Iterates over the header lines
Creates header records (HREC) for each of the INFO headers
Yields:
header_record (HREC)
"""
info_pattern = re.compile(r'''\#\#INFO=<
ID=(?P<id>[^,]+),
Number=(?P<number>-?\d+|\.|[AGR]),
Type=(?P<type>Integer|Float|Flag|Character|String),
Description="(?P<desc>[^"]*)"
>''', re.VERBOSE
)
for header in self.raw_header:
if header.startswith('##INFO'):
match = info_pattern.match(header)
if not match:
raise SyntaxError(f"One of the INFO lines is malformed:{header}")
header_record = HREC(match.group('id'), match.group('number'),
match.group('type'), match.group('desc'))
yield header_record
def __next__(self):
current_variant = Variant(self._current_variant)
self._current_variant = next(self.variant_file).rstrip()
return current_variant
def __iter__(self):
return self
def __repr__(self):
return f"{self.__class__.__name__} ({self.samples})"
def check_samples(sids, proband, mother, father):
"""Check if proband, mother and father exists in vcf
Args:
sids (list): List of sample ids in VCF
proband (str): ID of proband in VCF
mother (str): ID of mother in VCF
father (str): ID of father in VCF
Returns:
bool: if all samples exists in VCF
"""
if not all(elem in sids for elem in [proband, mother, father]):
return False
return True
def get_vcf(vcf_path, proband, mother, father):
"""Check and open a VCF
Args:
vcf_path (str)
proband (str): ID of proband in VCF
mother (str): ID of mother in VCF
father (str): ID of father in VCF
Returns:
vcf_reader (Vcf)
"""
vcf_handle = open_file(vcf_path)
vcf_reader = Vcf(vcf_handle)
if not check_samples(vcf_reader.samples, proband, mother, father):
raise SyntaxError("At least one of the given sample IDs do not exist in the VCF header")
return vcf_reader
def get_header_desc(reader, header_id):
"""Get description field of an header field ID
Args:
reader (cyvcf2.VCF)
header_id (str)
Returns:
str: Information from a vcf header description
"""
for rec in reader.header_iter():
d = rec.info()
if d.get('ID') == header_id:
return d.get('Description')
return None
def parse_CSQ_header(reader):
"""Parse the order of VEP fields from the vcf header
Args:
reader (Vcf)
Returns:
csq_format (list(str)): A list with the VEP header
"""
csq_str = get_header_desc(reader, "CSQ")
if not csq_str:
raise ValueError("CSQ header field missing. The VCF need to be annotated with VEP")
_, csq_format_str = csq_str.split('Format: ')
csq_format_str = csq_format_str.rstrip('"')
csq_format = csq_format_str.split('|')
return csq_format
def get_pop_AF(variant, vep_fields, af_tag):
"""Extract population frequency from VEP annotations.
Args:
variant (Variant)
vep_fields (list): Description of VEP annotation
af_tag (str): Name of AF field to parse
Returns:
freq (float): The annotated frequency, returns 0 if no data
"""
freq = 0
if vep_fields:
vep_data = variant.INFO['CSQ']
first_vep_str = vep_data.split(',')[0]
data = first_vep_str.split('|')
for i in range(len(data)):
if vep_fields[i] == af_tag:
freq = data[i]
else:
freq = variant.INFO.get(af_tag)
return float(freq or 0)
| 29.650165 | 96 | 0.551647 |
79421f9252f11f73f1bc94d309eb57b41fdae8b0 | 311 | py | Python | data/multilingual/Tibt.DZO/Sun-ExtA_12/pdf_to_json_test_Tibt.DZO_Sun-ExtA_12.py | antoinecarme/pdf_to_json_tests | d57a024fde862e698d916a1178f285883d7a3b2f | [
"BSD-3-Clause"
] | 1 | 2021-09-19T19:47:35.000Z | 2021-09-19T19:47:35.000Z | data/multilingual/Tibt.DZO/Sun-ExtA_12/pdf_to_json_test_Tibt.DZO_Sun-ExtA_12.py | antoinecarme/pdf_to_json_tests | d57a024fde862e698d916a1178f285883d7a3b2f | [
"BSD-3-Clause"
] | null | null | null | data/multilingual/Tibt.DZO/Sun-ExtA_12/pdf_to_json_test_Tibt.DZO_Sun-ExtA_12.py | antoinecarme/pdf_to_json_tests | d57a024fde862e698d916a1178f285883d7a3b2f | [
"BSD-3-Clause"
] | null | null | null | import pdf_to_json as p2j
import json
url = "file:data/multilingual/Tibt.DZO/Sun-ExtA_12/udhr_Tibt.DZO_Sun-ExtA_12.pdf"
lConverter = p2j.pdf_to_json.pdf_to_json_converter()
lConverter.mImageHashOnly = True
lDict = lConverter.convert(url)
print(json.dumps(lDict, indent=4, ensure_ascii=False, sort_keys=True))
| 31.1 | 81 | 0.810289 |
79422042847793b99e8df17f8fa43fb4cb523d69 | 625 | py | Python | python_modules/dagster/dagster_tests/core_tests/test_log_capture.py | bitdotioinc/dagster | 4fe395a37b206b1a48b956fa5dd72bf698104cca | [
"Apache-2.0"
] | 1 | 2021-04-27T19:49:59.000Z | 2021-04-27T19:49:59.000Z | python_modules/dagster/dagster_tests/core_tests/test_log_capture.py | bitdotioinc/dagster | 4fe395a37b206b1a48b956fa5dd72bf698104cca | [
"Apache-2.0"
] | 7 | 2022-03-16T06:55:04.000Z | 2022-03-18T07:03:25.000Z | python_modules/dagster/dagster_tests/core_tests/test_log_capture.py | bitdotioinc/dagster | 4fe395a37b206b1a48b956fa5dd72bf698104cca | [
"Apache-2.0"
] | null | null | null | from __future__ import print_function
import sys
import pytest
from dagster.core.execution.compute_logs import (
mirror_stream_to_file,
should_disable_io_stream_redirect,
)
from dagster.utils.test import get_temp_file_name
@pytest.mark.skipif(
should_disable_io_stream_redirect(), reason="compute logs disabled for win / py3.6+"
)
def test_capture():
with get_temp_file_name() as capture_filepath:
with mirror_stream_to_file(sys.stdout, capture_filepath):
print("HELLO")
with open(capture_filepath, "r") as capture_stream:
assert "HELLO" in capture_stream.read()
| 26.041667 | 88 | 0.7456 |
7942211f9f06c445427327ae6e15eb95b7ae4584 | 1,604 | py | Python | server/api/services/tasks.py | JBris/dolphin_segmentation | b1d22293720c15038d9c521aed8e7b258d8409aa | [
"MIT"
] | 1 | 2021-05-09T05:40:53.000Z | 2021-05-09T05:40:53.000Z | server/api/services/tasks.py | JBris/dolphin_segmentation | b1d22293720c15038d9c521aed8e7b258d8409aa | [
"MIT"
] | null | null | null | server/api/services/tasks.py | JBris/dolphin_segmentation | b1d22293720c15038d9c521aed8e7b258d8409aa | [
"MIT"
] | null | null | null | import os
import json
from pathlib import Path
from decouple import config
class Tasks:
task_dir = config('TASK_DIR', default = '/home/app/system/tasks')
def create_file(self, task_id, url, data):
task_dict = { "id": task_id, "url": url, "status": "created" }
for key in data: task_dict[key] = data[key]
with open(f"{self.task_dir}/{data['name']}.json", 'w') as f:
try: json.dump(task_dict, f)
except: return False
return task_dict
def read_file(self, name):
with open(f"{self.task_dir}/{name}.json", 'r') as f:
try: return json.load(f)
except ValueError: return False
def write_file(self, data):
with open(f"{self.task_dir}/{data['name']}.json", 'w') as f:
try:
json.dump(data, f)
return data
except: return False
def process_directory(self, path):
path = Path(path)
files = path.glob('*')
file_list = []
for file in files:
classified_file = self.classify_file(str(file), file.stem, file.suffix.lower()[1:])
if classified_file: file_list.append(classified_file)
return file_list
def classify_file(self, path, name, ext):
if not os.path.isfile(path): return False
if ext != "json": return False
with open(path, 'r') as f:
try: data = json.load(f)
except ValueError: return False
return {"type": "json", "file": path, "name": name, "status": data.get("status").lower(), "id": data.get("id") }
| 34.12766 | 121 | 0.571696 |
794221ab76148f1fa88826d381702cb079686bbf | 14,319 | py | Python | model-optimizer/mo/middle/passes/infer_test.py | shinh/dldt | 693ab4e79a428e0801f17f4511b129a3fa8f4a62 | [
"Apache-2.0"
] | 1 | 2021-02-20T21:48:36.000Z | 2021-02-20T21:48:36.000Z | model-optimizer/mo/middle/passes/infer_test.py | erinpark33/dldt | edd86d090592f7779f4dbb2681546e1f4e81284f | [
"Apache-2.0"
] | null | null | null | model-optimizer/mo/middle/passes/infer_test.py | erinpark33/dldt | edd86d090592f7779f4dbb2681546e1f4e81284f | [
"Apache-2.0"
] | 1 | 2021-02-19T01:06:12.000Z | 2021-02-19T01:06:12.000Z | """
Copyright (c) 2018-2019 Intel Corporation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import unittest
import numpy as np
from mo.front.common.partial_infer.concat import concat_infer
from mo.graph.graph import Node
from mo.middle.passes.infer import override_placeholder_shapes, partial_infer
from mo.utils.error import Error
from mo.utils.unittest.graph import build_graph
nodes_attributes = {'node_1': {'type': 'Identity', 'value': None, 'kind': 'op'},
'node_1_data': {'value': None, 'kind': 'data', 'data_type': None},
'node_2': {'type': 'Identity', 'value': None, 'kind': 'op'},
'concat': {'type': 'Concat', 'value': None, 'kind': 'op'},
'node_3': {'type': 'Identity', 'value': None, 'kind': 'op'},
'node_3_data': {'value': None, 'kind': 'data', 'data_type': None},
# Placeholders
'placeholder_1': {'shape': None, 'type': 'Input', 'kind': 'op', 'op': 'Placeholder'},
'placeholder_1_data': {'value': None, 'shape': None, 'kind': 'data', 'data_type': None},
'placeholder_2': {'shape': None, 'type': 'Input', 'kind': 'op', 'op': 'Placeholder'},
'pl_1': {'type': 'Placeholder', 'kind': 'op', 'op': 'Placeholder'},
'pl_1_data': {'value': None, 'kind': 'data', 'data_type': None},
'pl_2': {'type': 'Placeholder', 'kind': 'op', 'op': 'Placeholder'},
'pl_2_data': {'value': None, 'kind': 'data', 'data_type': None},
'placeholder_2_data': {'value': None, 'shape': None, 'kind': 'data', 'data_type': None},
# ScaleShift layer
'scaleshift_1': {'type': 'ScaleShift', 'kind': 'op', 'op': 'ScaleShift'},
'scaleshift_1_w': {'value': None, 'shape': None, 'kind': 'data'},
'scaleshift_1_b': {'value': None, 'shape': None, 'kind': 'data'},
'scaleshift_1_data': {'value': None, 'shape': None, 'kind': 'data'},
# Mul op
'mul_1': {'type': None, 'kind': 'op', 'op': 'Mul'},
'mul_1_w': {'value': None, 'shape': None, 'kind': 'data'},
'mul_1_data': {'value': None, 'shape': None, 'kind': 'data'},
'op_output': { 'kind': 'op', 'op': 'OpOutput', 'infer': lambda x: None}
}
class TestInferPass(unittest.TestCase):
def test_override_placeholder_shapes(self):
"""
Test for overriding shape in placeholder by shape from user_shapes.
"""
graph = build_graph(nodes_attributes,
[('node_1', 'node_2'),
('node_2', 'op_output')
],
{'node_2': {'shape': None},
'node_1': {'shape': np.array([1, 3, 227, 227]), 'op': 'Placeholder'}
},
nodes_with_edges_only=True)
ph_shape = np.array([1, 3, 224, 224])
user_dict = {'node_1': [{'shape': ph_shape}]}
override_placeholder_shapes(graph, user_dict)
res_shape = graph.node['node_1']['shape']
self.assertTrue(np.array_equal(ph_shape, res_shape))
def test_override_placeholder_no_shape(self):
"""
Test for case when user_shapes is not defined.
"""
graph = build_graph(nodes_attributes,
[('node_1', 'node_2'),
('node_2', 'op_output')
],
{'node_2': {'shape': None, 'op': 'Placeholder'},
'node_1': {'shape': np.array([1, 3, 227, 227]), 'op': 'Placeholder'}
},
nodes_with_edges_only=True)
out = override_placeholder_shapes(graph, None)
res_shape = graph.node['node_1']['shape']
placeholder_shape = np.array([1, 3, 227, 227])
self.assertIsNone(out)
self.assertTrue(np.array_equal(placeholder_shape, res_shape))
def test_override_placeholder_shapes(self):
"""
Test for case when user_shapes is not None, but it shouldn't rewrite shapes.
"""
graph = build_graph(nodes_attributes,
[('node_1', 'node_2'),
('node_2', 'op_output')
],
{'node_2': {'shape': None},
'node_1': {'shape': np.array([1, 3, 227, 227]), 'op': 'Placeholder'}
},
nodes_with_edges_only=True)
node_1_shape = np.array([1, 3, 227, 227])
user_dict = {'some_node': [{'shape': np.zeros((3))}]}
override_placeholder_shapes(graph, user_dict)
res_shape = graph.node['node_1']['shape']
self.assertTrue(np.array_equal(node_1_shape, res_shape))
def test_override_placeholder_shapes_dict(self):
graph = build_graph(nodes_attributes,
[('node_1', 'node_2'),
('node_2', 'op_output')
],
{'node_2': {'shape': None, 'op': 'Placeholder'},
'node_1': {'shape': np.array([1, 3, 227, 227]), 'op': 'Placeholder'}
},
nodes_with_edges_only=True)
placeholder_shape = np.array([1, 3, 224, 224])
user_shapes = {
'node_1': [{'shape': placeholder_shape}],
'node_2': [{'shape': placeholder_shape}],
}
override_placeholder_shapes(graph, user_shapes)
res_shape = graph.node['node_1']['shape']
res_shape2 = graph.node['node_2']['shape']
self.assertTrue(np.array_equal(placeholder_shape, res_shape))
self.assertTrue(np.array_equal(placeholder_shape, res_shape2))
nodes = {
'placeholder_1': {'name': 'placeholder_1', 'shape': [1, 2, 3, 4], 'type': 'Placeholder', 'value': None,
'kind': 'op', 'op': 'Placeholder'},
'placeholder_2': {'name': 'placeholder_2', 'shape': [5, 6, 7, 8], 'type': 'Placeholder', 'value': None,
'kind': 'op', 'op': 'Placeholder'},
'1': {'name': 'node_1', 'type': 'Identity', 'value': None, 'kind': 'op'},
'2': {'name': 'node_2', 'type': 'Identity', 'value': None, 'kind': 'op'},
'3': {'name': 'concat', 'type': 'Identity', 'value': None, 'kind': 'op'},
'4': {'name': 'output', 'type': 'SoftMax', 'value': None, 'kind': 'op'}
}
edges = [
('placeholder_1', '1'),
('1', '3'),
('placeholder_2', '2'),
('2', '3'),
('3', '4')
]
def test_override_placeholder_shapes_batch_is_not_set(self):
"""
Test case when batch is not set. (shapes shouldn't change)
"""
graph = build_graph(self.nodes, self.edges)
shapes = {}
batch = None
override_placeholder_shapes(graph, shapes, batch)
res_shape_1 = graph.node['placeholder_1']['shape']
res_shape_2 = graph.node['placeholder_2']['shape']
self.assertTrue(np.array_equal(self.nodes['placeholder_1']['shape'], res_shape_1))
self.assertTrue(np.array_equal(self.nodes['placeholder_2']['shape'], res_shape_2))
def test_override_placeholder_shapes_real_inputs_and_batch(self):
"""
Test case when batch is set and shapes should overwrite by user shapes.
"""
graph = build_graph(self.nodes, self.edges)
shapes = {'placeholder_1': [{'shape': np.array([1, 2, 3, 4])}],
'placeholder_2': [{'shape': np.array([1, 5, 6, 7])}]}
batch = 4
override_placeholder_shapes(graph, shapes, batch)
res_shape_1 = graph.node['placeholder_1']['shape']
res_shape_2 = graph.node['placeholder_2']['shape']
self.assertTrue(np.array_equal(res_shape_1, np.array([4, 2, 3, 4])))
self.assertTrue(np.array_equal(res_shape_2, np.array([4, 5, 6, 7])))
def test_override_placeholder_shapes_real_inputs_and_batch_2(self):
"""
Test case when batch is set, but shapes in user_shapes is None.
"""
graph = build_graph(self.nodes, self.edges)
shapes = {'placeholder_1': [{'shape': None}], 'placeholder_2': [{'shape': None}]}
batch = 4
graph.node['placeholder_2']['shape'] = np.array([1, 2, 3, 4])
graph.node['placeholder_2']['shape'] = np.array([1, 5, 6, 7])
override_placeholder_shapes(graph, shapes, batch)
np.testing.assert_array_equal(graph.node['placeholder_1']['shape'], np.array([4, 2, 3, 4]))
np.testing.assert_array_equal(graph.node['placeholder_2']['shape'], np.array([4, 5, 6, 7]))
def test_partial_infer(self):
graph = build_graph(nodes_attributes,
[('node_1', 'concat'),
('node_2', 'concat'),
('concat', 'node_3'),
('node_3', 'op_output')
],
{'node_3': {'kind': 'data', 'shape': None, 'infer': None},
'node_1': {'kind': 'data', 'shape': np.array([1, 3, 227, 227]), 'infer': None},
'node_2': {'kind': 'data', 'shape': np.array([1, 3, 227, 227]), 'infer': None},
'concat': {'kind': 'op', 'axis': 2, 'infer': concat_infer}
},
nodes_with_edges_only=True)
start_node = 'concat'
partial_infer(graph, start_node)
node = Node(graph, start_node)
self.assertTrue(node.is_partial_inferred)
self.assertTrue(node.out_node().is_partial_inferred)
# check if previous nodes are not inferred
node = Node(graph, start_node)
while True:
# collect nodes in a list
if isinstance(node.in_nodes(), list):
in_nodes = node.in_nodes()
else:
in_nodes = [y for x, y in node.in_nodes().items()]
# check parents and find next parent
for n in in_nodes:
if 'embedded_input_' not in n.id:
node = n
self.assertFalse(n.has('is_partial_inferred'))
if not len(in_nodes):
break
def test_partial_infer_no_shape(self):
graph = build_graph(nodes_attributes,
[('node_1', 'node_2'),
('node_2', 'op_output')
],
{'node_2': {'shape': None, 'infer': None},
'node_1': {'shape': None, 'infer': None}
},
nodes_with_edges_only=True)
self.assertRaises(Error, partial_infer, graph, 'node_1')
def test_partial_infer_cycle(self):
graph = build_graph(nodes_attributes,
[('node_1', 'concat'),
('node_2', 'concat'),
('concat', 'node_3'),
('node_3', 'concat'),
('node_3', 'op_output')
],
{'node_3': {'kind': 'data', 'shape': None, 'infer': None},
'node_1': {'kind': 'data', 'shape': np.array([1, 3, 227, 227]), 'infer': None},
'node_2': {'kind': 'data', 'shape': np.array([1, 3, 227, 227]), 'infer': None},
'concat': {'kind': 'op', 'axis': 2, 'infer': concat_infer}
},
nodes_with_edges_only=True)
start_node = 'concat'
self.assertRaises(Error, partial_infer, graph, start_node)
class CycleTest(unittest.TestCase):
def test_is_not_fully_inferred_param(self):
# Node that have is_not_fully_inferred=True
graph = build_graph(nodes_attributes,
[('node_1', 'concat'),
('node_2', 'concat'),
('concat', 'node_3'),
('node_3', 'op_output')
],
{'node_3': {'kind': 'data', 'shape': None, 'infer': None},
'node_1': {'kind': 'data', 'shape': np.array([1, 3, 227, 227]), 'infer': None},
'node_2': {'kind': 'data', 'shape': np.array([1, 3, 227, 227]), 'infer': None},
'concat': {'kind': 'op', 'axis': 2, 'infer': concat_infer, 'is_not_fully_inferred': True}
},
nodes_with_edges_only=True)
start_node = 'concat'
try:
partial_infer(graph, start_node)
except Error:
self.fail("Unexpected Error raised")
node = Node(graph, start_node)
self.assertTrue(node.is_partial_inferred)
self.assertTrue(node.out_node().is_partial_inferred)
def test_for_is_cyclic1(self):
# Test for case of cyclic graph without is_cyclic attrs
graph = build_graph(nodes_attributes,
[('node_1', 'node_1_data'),
('node_1_data', 'node_3'),
('node_3', 'node_3_data'),
('node_3_data', 'node_1')],
nodes_with_edges_only=True)
with self.assertRaisesRegex(Error, 'Graph contains a cycle. Can not proceed.*'):
partial_infer(graph)
| 48.704082 | 118 | 0.502898 |
794221af10a6b547bc96b4b0d0757e70beadaf5b | 5,418 | py | Python | Demo/scripts/eqfix.py | AtjonTV/Python-1.4 | 2a80562c5a163490f444181cb75ca1b3089759ec | [
"Unlicense",
"TCL",
"DOC",
"AAL",
"X11"
] | null | null | null | Demo/scripts/eqfix.py | AtjonTV/Python-1.4 | 2a80562c5a163490f444181cb75ca1b3089759ec | [
"Unlicense",
"TCL",
"DOC",
"AAL",
"X11"
] | null | null | null | Demo/scripts/eqfix.py | AtjonTV/Python-1.4 | 2a80562c5a163490f444181cb75ca1b3089759ec | [
"Unlicense",
"TCL",
"DOC",
"AAL",
"X11"
] | null | null | null | #! /usr/local/bin/python
# Fix Python source files to use the new equality test operator, i.e.,
# if x = y: ...
# is changed to
# if x == y: ...
# The script correctly tokenizes the Python program to reliably
# distinguish between assignments and equality tests.
#
# Command line arguments are files or directories to be processed.
# Directories are searched recursively for files whose name looks
# like a python module.
# Symbolic links are always ignored (except as explicit directory
# arguments). Of course, the original file is kept as a back-up
# (with a "~" attached to its name).
# It complains about binaries (files containing null bytes)
# and about files that are ostensibly not Python files: if the first
# line starts with '#!' and does not contain the string 'python'.
#
# Changes made are reported to stdout in a diff-like format.
#
# Undoubtedly you can do this using find and sed or perl, but this is
# a nice example of Python code that recurses down a directory tree
# and uses regular expressions. Also note several subtleties like
# preserving the file's mode and avoiding to even write a temp file
# when no changes are needed for a file.
#
# NB: by changing only the function fixline() you can turn this
# into a program for a different change to Python programs...
import sys
import regex
import os
from stat import *
import string
err = sys.stderr.write
dbg = err
rep = sys.stdout.write
def main():
bad = 0
if not sys.argv[1:]: # No arguments
err('usage: ' + sys.argv[0] + ' file-or-directory ...\n')
sys.exit(2)
for arg in sys.argv[1:]:
if os.path.isdir(arg):
if recursedown(arg): bad = 1
elif os.path.islink(arg):
err(arg + ': will not process symbolic links\n')
bad = 1
else:
if fix(arg): bad = 1
sys.exit(bad)
ispythonprog = regex.compile('^[a-zA-Z0-9_]+\.py$')
def ispython(name):
return ispythonprog.match(name) >= 0
def recursedown(dirname):
dbg('recursedown(' + `dirname` + ')\n')
bad = 0
try:
names = os.listdir(dirname)
except os.error, msg:
err(dirname + ': cannot list directory: ' + `msg` + '\n')
return 1
names.sort()
subdirs = []
for name in names:
if name in (os.curdir, os.pardir): continue
fullname = os.path.join(dirname, name)
if os.path.islink(fullname): pass
elif os.path.isdir(fullname):
subdirs.append(fullname)
elif ispython(name):
if fix(fullname): bad = 1
for fullname in subdirs:
if recursedown(fullname): bad = 1
return bad
def fix(filename):
## dbg('fix(' + `filename` + ')\n')
try:
f = open(filename, 'r')
except IOError, msg:
err(filename + ': cannot open: ' + `msg` + '\n')
return 1
head, tail = os.path.split(filename)
tempname = os.path.join(head, '@' + tail)
g = None
# If we find a match, we rewind the file and start over but
# now copy everything to a temp file.
lineno = 0
while 1:
line = f.readline()
if not line: break
lineno = lineno + 1
if g is None and '\0' in line:
# Check for binary files
err(filename + ': contains null bytes; not fixed\n')
f.close()
return 1
if lineno == 1 and g is None and line[:2] == '#!':
# Check for non-Python scripts
words = string.split(line[2:])
if words and regex.search('[pP]ython', words[0]) < 0:
msg = filename + ': ' + words[0]
msg = msg + ' script; not fixed\n'
err(msg)
f.close()
return 1
while line[-2:] == '\\\n':
nextline = f.readline()
if not nextline: break
line = line + nextline
lineno = lineno + 1
newline = fixline(line)
if newline != line:
if g is None:
try:
g = open(tempname, 'w')
except IOError, msg:
f.close()
err(tempname+': cannot create: '+\
`msg`+'\n')
return 1
f.seek(0)
lineno = 0
rep(filename + ':\n')
continue # restart from the beginning
rep(`lineno` + '\n')
rep('< ' + line)
rep('> ' + newline)
if g is not None:
g.write(newline)
# End of file
f.close()
if not g: return 0 # No changes
# Finishing touch -- move files
# First copy the file's mode to the temp file
try:
statbuf = os.stat(filename)
os.chmod(tempname, statbuf[ST_MODE] & 07777)
except os.error, msg:
err(tempname + ': warning: chmod failed (' + `msg` + ')\n')
# Then make a backup of the original file as filename~
try:
os.rename(filename, filename + '~')
except os.error, msg:
err(filename + ': warning: backup failed (' + `msg` + ')\n')
# Now move the temp file to the original file
try:
os.rename(tempname, filename)
except os.error, msg:
err(filename + ': rename failed (' + `msg` + ')\n')
return 1
# Return succes
return 0
from tokenize import tokenprog
match = {'if':':', 'elif':':', 'while':':', 'return':'\n', \
'(':')', '[':']', '{':'}', '`':'`'}
def fixline(line):
# Quick check for easy case
if '=' not in line: return line
i, n = 0, len(line)
stack = []
while i < n:
j = tokenprog.match(line, i)
if j < 0:
# A bad token; forget about the rest of this line
print '(Syntax error:)'
print line,
return line
a, b = tokenprog.regs[3] # Location of the token proper
token = line[a:b]
i = i+j
if stack and token == stack[-1]:
del stack[-1]
elif match.has_key(token):
stack.append(match[token])
elif token == '=' and stack:
line = line[:a] + '==' + line[b:]
i, n = a + len('=='), len(line)
elif token == '==' and not stack:
print '(Warning: \'==\' at top level:)'
print line,
return line
main()
| 27.09 | 70 | 0.638612 |
794221c1f04a453aebf4e5c8139d0e642e66248f | 1,545 | py | Python | conftest.py | eric-bonfadini/dask | c2278fece0d4fb4af1e63b6ca26e6a90f63b0fc3 | [
"BSD-3-Clause"
] | null | null | null | conftest.py | eric-bonfadini/dask | c2278fece0d4fb4af1e63b6ca26e6a90f63b0fc3 | [
"BSD-3-Clause"
] | null | null | null | conftest.py | eric-bonfadini/dask | c2278fece0d4fb4af1e63b6ca26e6a90f63b0fc3 | [
"BSD-3-Clause"
] | null | null | null | import pytest
# The doctests in these files fail due to either:
# - Non-required dependencies not being installed
# - Imported doctests due to pulling the docstrings from other packages
# (e.g. `numpy`). No need to run these doctests.
collect_ignore = [
"dask/bytes/hdfs3.py",
"dask/bytes/pyarrow.py",
"dask/bytes/s3.py",
"dask/array/ghost.py",
"dask/array/fft.py",
"dask/dataframe/io/io.py",
"dask/dataframe/io/parquet/arrow.py",
"dask/dot.py",
]
collect_ignore_glob = []
try:
import numpy # noqa: F401
except ImportError:
collect_ignore_glob.append("dask/array/*")
try:
import pandas # noqa: F401
except ImportError:
collect_ignore_glob.append("dask/dataframe/*")
try:
import scipy # noqa: F401
except ImportError:
collect_ignore.append("dask/array/stats.py")
try:
import pyarrow # noqa: F401
except ImportError:
collect_ignore.append("dask/dataframe/io/orc/arrow.py")
try:
import tiledb # noqa: F401
except ImportError:
collect_ignore.append("dask/array/tiledb_io.py")
try:
import sqlalchemy # noqa: F401
except ImportError:
collect_ignore.append("dask/dataframe/io/sql.py")
def pytest_addoption(parser):
parser.addoption("--runslow", action="store_true", help="run slow tests")
def pytest_runtest_setup(item):
if "slow" in item.keywords and not item.config.getoption("--runslow"):
pytest.skip("need --runslow option to run")
pytest.register_assert_rewrite(
"dask.array.utils", "dask.dataframe.utils", "dask.bag.utils"
)
| 24.919355 | 77 | 0.702265 |
794221c8987fd441f8e5e4242a3f1e09c2a96911 | 17,469 | py | Python | coremltools/converters/mil/mil/ops/defs/elementwise_binary.py | tonybove-apple/coremltools | 22a8877beec7bad136ba5612d5aacd8e323ecdfc | [
"BSD-3-Clause"
] | 2,740 | 2017-10-03T23:19:01.000Z | 2022-03-30T15:16:39.000Z | coremltools/converters/mil/mil/ops/defs/elementwise_binary.py | tonybove-apple/coremltools | 22a8877beec7bad136ba5612d5aacd8e323ecdfc | [
"BSD-3-Clause"
] | 1,057 | 2017-10-05T22:47:01.000Z | 2022-03-31T23:51:15.000Z | coremltools/converters/mil/mil/ops/defs/elementwise_binary.py | tonybove-apple/coremltools | 22a8877beec7bad136ba5612d5aacd8e323ecdfc | [
"BSD-3-Clause"
] | 510 | 2017-10-04T19:22:28.000Z | 2022-03-31T12:16:52.000Z | # Copyright (c) 2020, Apple Inc. All rights reserved.
#
# Use of this source code is governed by a BSD-3-clause license that can be
# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause
import numpy as np
import operator
from ._op_reqs import *
from ._utils import promoted_primitive_type, broadcast_shapes
class elementwise_binary(Operation):
"""
Elementwise Binary Op Superclass
"""
input_spec = InputSpec(x=ScalarOrTensorInputType(), y=ScalarOrTensorInputType(),)
def __init__(self, **kwargs):
super(elementwise_binary, self).__init__(**kwargs)
def type_inference(self):
typea = self.x.sym_type
typeb = self.y.sym_type
primitive_type = promoted_primitive_type(typea, typeb)
if primitive_type is None:
raise ValueError("Incompatible primitive types in broadcast operation")
primitive_type = self.get_dtype(primitive_type)
# broadcast
if not types.is_tensor(typea) and not types.is_tensor(typeb):
# both typea and typeb are not tensors
return primitive_type
if types.is_tensor(typea) and not types.is_tensor(typeb):
# a is tensor, b is not
return types.tensor(primitive_type, typea.get_shape())
if not types.is_tensor(typea) and types.is_tensor(typeb):
# a is not tensor, b is
return types.tensor(primitive_type, typeb.get_shape())
# both a, b are tensors
shapea = list(typea.get_shape())
shapeb = list(typeb.get_shape())
ret_shape = broadcast_shapes(shapea, shapeb)
return types.tensor(primitive_type, ret_shape)
@precondition(allow=VALUE)
def value_inference(self):
return self._cast_check_value_inferene(self.x.val, self.y.val)
def get_operator(self):
"""
All subclasses have to implement this.
"""
raise NotImplementedError()
def get_dtype(self, promoted_dtype):
"""
Override if output primitive type is different from input types
(e.g., less, greater)
"""
return promoted_dtype
def _cast_check_value_inferene(self, a, b):
"""
If one of the input is tensor, cast the result to tensor.
"""
to_cast = any([isinstance(x, np.ndarray) for x in [a, b]])
result = self.get_operator()(a, b)
return result if not to_cast else np.array(result)
"""
Elementwise Binary Op Implementation(s)
"""
@register_op(doc_str="")
class add(elementwise_binary):
"""
Return ``x + y`` element-wise with
`broadcasting <https://docs.scipy.org/doc/numpy/user/basics.broadcasting.html>`_.
Parameters
----------
x: <\*,T> (Required)
* Shape must be compatible with ``y`` in broadcast.
y: <\*,T> (Required)
* Shape must be compatible with ``x`` in broadcast.
Returns
-------
<\*,T>
Attributes
----------
T: fp32
"""
def __init__(self, **kwargs):
super(add, self).__init__(**kwargs)
def get_operator(self):
return operator.add
@register_op(doc_str="")
class equal(elementwise_binary):
"""
Return the truth value of ``x == y`` element-wise with
`broadcasting <https://docs.scipy.org/doc/numpy/user/basics.broadcasting.html>`_
(``1`` for true, ``0`` for false in numeric domain).
Parameters
----------
x: <\*,T> (Required)
* Shape must be compatible with ``y`` in broadcast.
y: <\*,T> (Required)
* Shape must be compatible with ``x`` in broadcast.
Returns
-------
<\*, bool>
* A boolean tensor with the same shape as the inputs.
Attributes
----------
T: fp32
"""
def __init__(self, **kwargs):
super(equal, self).__init__(**kwargs)
def get_operator(self):
return np.equal
def get_dtype(self, promoted_dtype):
return types.bool
@register_op(doc_str="")
class floor_div(elementwise_binary):
"""
Return ``x / y`` element-wise with
`broadcasting <https://docs.scipy.org/doc/numpy/user/basics.broadcasting.html>`_,
rounded towards negative infinity.
Parameters
----------
x: tensor<\*, T> (Required)
* Shape must be compatible with ``y`` in broadcast.
y: tensor<\*, T> (Required)
* Shape must be compatible with ``x`` in broadcast.
Returns
-------
tensor<\*, T>
* A tensor of the same type and shape as the inputs.
Attributes
----------
T: fp32
"""
def __init__(self, **kwargs):
super(floor_div, self).__init__(**kwargs)
def get_operator(self):
return operator.floordiv
@register_op(doc_str="")
class greater(elementwise_binary):
"""
Return the truth value of ``x > y`` element-wise with
`broadcasting <https://docs.scipy.org/doc/numpy/user/basics.broadcasting.html>`_
(``1`` for true, ``0`` for false in numeric domain).
Parameters
----------
x: tensor<\*, T> (Required)
* Shape must be compatible with ``y`` in broadcast.
y: tensor<\*, T> (Required)
* Shape must be compatible with ``x`` in broadcast.
Returns
-------
tensor<\*, bool>
* A boolean tensor with the same shape as the inputs.
Attributes
----------
T: fp32
"""
def __init__(self, **kwargs):
super(greater, self).__init__(**kwargs)
def get_operator(self):
return operator.gt
def get_dtype(self, promoted_dtype):
return types.bool
@register_op(doc_str="")
class greater_equal(elementwise_binary):
"""
Return the truth value of ``x >= y`` element-wise with
`broadcasting <https://docs.scipy.org/doc/numpy/user/basics.broadcasting.html>`_
(``1`` for true, ``0`` for false in numeric domain).
Parameters
----------
x: tensor<\*, T> (Required)
* Shape must be compatible with ``y`` in broadcast.
y: tensor<\*, T> (Required)
* Shape must be compatible with ``x`` in broadcast.
Returns
-------
tensor<\*?, bool>
* A boolean tensor with the same shape as the inputs.
Attributes
----------
T: fp32
"""
def __init__(self, **kwargs):
super(greater_equal, self).__init__(**kwargs)
def get_operator(self):
return operator.ge
def get_dtype(self, promoted_dtype):
return types.bool
@register_op(doc_str="")
class less(elementwise_binary):
"""
Return the truth value of ``x < y`` element-wise with
`broadcasting <https://docs.scipy.org/doc/numpy/user/basics.broadcasting.html>`_
(``1`` for true, ``0`` for false in numeric domain).
Parameters
----------
x: tensor<\*, T> (Required)
* Shape must be compatible with ``y`` in broadcast.
y: tensor<\*, T> (Required)
* Shape must be compatible with ``x`` in broadcast.
Returns
-------
tensor<\*?, bool>
* A boolean tensor with the same shape as the inputs.
Attributes
----------
T: fp32
"""
def __init__(self, **kwargs):
super(less, self).__init__(**kwargs)
def get_operator(self):
return operator.lt
def get_dtype(self, promoted_dtype):
return types.bool
@register_op(doc_str="")
class less_equal(elementwise_binary):
"""
Return the truth value of ``x <= y`` element-wise with
`broadcasting <https://docs.scipy.org/doc/numpy/user/basics.broadcasting.html>`_
(``1`` for true, ``0`` for false in numeric domain).
Parameters
----------
x: tensor<\*, T> (Required)
* Shape must be compatible with ``y`` in broadcast.
y: tensor<\*, T> (Required)
* Shape must be compatible with ``x`` in broadcast.
Returns
-------
tensor<\*?, bool>
* A boolean tensor with the same shape as the inputs.
Attributes
----------
T: fp32
"""
def __init__(self, **kwargs):
super(less_equal, self).__init__(**kwargs)
def get_operator(self):
return operator.le
def get_dtype(self, promoted_dtype):
return types.bool
@register_op(doc_str="")
class logical_and(elementwise_binary):
"""
Return the truth value of ``x AND y`` element-wise with
`broadcasting <https://docs.scipy.org/doc/numpy/user/basics.broadcasting.html>`_
(``1`` for true, ``0`` for false in numeric domain). A numeric value ``t`` is
evaluated to true if ``t != 0``.
Parameters
----------
x: tensor<\*, T> (Required)
* Shape must be compatible with ``y`` in broadcast.
y: tensor<\*, T> (Required)
* Shape must be compatible with ``x`` in broadcast.
Returns
-------
tensor<\*?, bool>
* A boolean tensor with the same shape as the inputs.
Attributes
----------
T: fp32
"""
def __init__(self, **kwargs):
super(logical_and, self).__init__(**kwargs)
def get_operator(self):
return np.logical_and
def get_dtype(self, promoted_dtype):
return types.bool
@register_op(doc_str="")
class logical_or(elementwise_binary):
"""
Return the truth value of ``x OR y`` element-wise with
`broadcasting <https://docs.scipy.org/doc/numpy/user/basics.broadcasting.html>`_
(``1`` for true, ``0`` for false in numeric domain). A numeric value ``t`` is
evaluated to true if ``t != 0``.
Parameters
----------
x: tensor<\*, T> (Required)
* Shape must be compatible with ``y`` in broadcast.
y: tensor<\*, T> (Required)
* Shape must be compatible with ``x`` in broadcast.
Returns
-------
tensor<\*?, bool>
* A boolean tensor with the same shape as the inputs.
Attributes
----------
T: fp32
"""
def __init__(self, **kwargs):
super(logical_or, self).__init__(**kwargs)
def get_operator(self):
return np.logical_or
def get_dtype(self, promoted_dtype):
return types.bool
@register_op(doc_str="")
class logical_xor(elementwise_binary):
"""
Return the truth value of ``x XOR y`` element-wise with
`broadcasting <https://docs.scipy.org/doc/numpy/user/basics.broadcasting.html>`_
(``1`` for true, ``0`` for false in numeric domain). A numeric value ``t`` is
evaluated to true if ``t != 0``.
Parameters
----------
x: tensor<\*, T> (Required)
* Shape must be compatible with ``y`` in broadcast.
y: tensor<\*, T> (Required)
* Shape must be compatible with ``x`` in broadcast.
Returns
-------
tensor<\*?, bool>
* A boolean tensor with the same shape as the inputs.
Attributes
----------
T: fp32
"""
def __init__(self, **kwargs):
super(logical_xor, self).__init__(**kwargs)
def get_operator(self):
return np.logical_xor
def get_dtype(self, promoted_dtype):
return types.bool
@register_op(doc_str="")
class maximum(elementwise_binary):
"""
Return ``x > y ? x : y`` element-wise with
`broadcasting <https://docs.scipy.org/doc/numpy/user/basics.broadcasting.html>`_.
Parameters
----------
x: tensor<\*, T> (Required)
* Shape must be compatible with ``y`` in broadcast.
y: tensor<\*, T> (Required)
* Shape must be compatible with ``x`` in broadcast.
Returns
-------
tensor<\*?, bool>
* A boolean tensor with the same shape as the inputs.
Attributes
----------
T: fp32
"""
def __init__(self, **kwargs):
super(maximum, self).__init__(**kwargs)
def get_operator(self):
return np.maximum
@register_op(doc_str="")
class minimum(elementwise_binary):
"""
Return ``x > y ? y : x`` element-wise with
`broadcasting <https://docs.scipy.org/doc/numpy/user/basics.broadcasting.html>`_.
Parameters
----------
x: tensor<\*, T> (Required)
* Shape must be compatible with ``y`` in broadcast.
y: tensor<\*, T> (Required)
* Shape must be compatible with ``x`` in broadcast.
Returns
-------
tensor<\*?, bool>
* A boolean tensor with the same shape as the inputs.
Attributes
----------
T: fp32
"""
def __init__(self, **kwargs):
super(minimum, self).__init__(**kwargs)
def get_operator(self):
return np.minimum
@register_op(doc_str="")
class mod(elementwise_binary):
"""
Return ``x % y`` element-wise with
`broadcasting <https://docs.scipy.org/doc/numpy/user/basics.broadcasting.html>`_.
Parameters
----------
x: tensor<\*, T> (Required)
* Shape must be compatible with ``y`` in broadcast.
y: tensor<\*, T> (Required)
* Shape must be compatible with ``x`` in broadcast.
Returns
-------
tensor<\*?, bool>
* A boolean tensor with the same shape as the inputs.
Attributes
----------
T: fp32
"""
def __init__(self, **kwargs):
super(mod, self).__init__(**kwargs)
def get_operator(self):
return operator.mod
@register_op(doc_str="")
class mul(elementwise_binary):
"""
Return ``x * y`` element-wise with
`broadcasting <https://docs.scipy.org/doc/numpy/user/basics.broadcasting.html>`_.
Parameters
----------
x: tensor<\*, T> (Required)
* Shape must be compatible with ``y`` in broadcast.
y: tensor<\*, T> (Required)
* Shape must be compatible with ``x`` in broadcast.
Returns
-------
tensor<\*?, bool>
* A boolean tensor with the same shape as the inputs.
Attributes
----------
T: fp32
"""
def __init__(self, **kwargs):
super(mul, self).__init__(**kwargs)
def get_operator(self):
return operator.mul
@register_op(doc_str="")
class not_equal(elementwise_binary):
"""
Return the truth value of ``x != y`` element-wise with
`broadcasting <https://docs.scipy.org/doc/numpy/user/basics.broadcasting.html>`_
(``1`` for true, ``0`` for false in numeric domain).
Parameters
----------
x: tensor<\*, T> (Required)
* Shape must be compatible with ``y`` in broadcast.
y: tensor<\*, T> (Required)
* Shape must be compatible with ``x`` in broadcast.
Returns
-------
tensor<\*?, bool>
* A boolean tensor with the same shape as the inputs.
Attributes
----------
T: fp32
"""
def __init__(self, **kwargs):
super(not_equal, self).__init__(**kwargs)
def get_operator(self):
return operator.ne
def get_dtype(self, promoted_dtype):
return types.bool
@register_op(doc_str="")
class real_div(elementwise_binary):
"""
Return ``x / y`` element-wise with
`broadcasting <https://docs.scipy.org/doc/numpy/user/basics.broadcasting.html>`_.
Parameters
----------
x: tensor<\*, T> (Required)
* Shape must be compatible with ``y`` in broadcast.
y: tensor<\*, T> (Required)
* Shape must be compatible with ``x`` in broadcast.
Returns
-------
tensor<\*?, T>
* A tensor with the same type and shape as the inputs.
Attributes
----------
T: fp32
"""
def __init__(self, **kwargs):
# TODO(rdar://79925291): Allow int32 input to floor_div
from coremltools.converters.mil.mil import Builder as mb
from coremltools.converters.mil.mil import types
accepted_types = [types.fp32, types.fp16]
for input_name in ["x", "y"]:
if kwargs[input_name].dtype not in accepted_types:
kwargs[input_name] = mb.cast(x=kwargs[input_name], dtype="fp32")
super(real_div, self).__init__(**kwargs)
def get_operator(self):
return operator.truediv
@register_op(doc_str="")
class pow(elementwise_binary):
"""
Return ``x ^ y`` element-wise with
`broadcasting <https://docs.scipy.org/doc/numpy/user/basics.broadcasting.html>`_.
Parameters
----------
x: tensor<\*, T> (Required)
* Shape must be compatible with ``y`` in broadcast.
y: tensor<\*, T> (Required)
* Shape must be compatible with ``x`` in broadcast.
Returns
-------
tensor<\*?, bool>
* A boolean tensor with the same shape as the inputs.
Attributes
----------
T: fp32
"""
def __init__(self, **kwargs):
super(pow, self).__init__(**kwargs)
def get_operator(self):
return operator.pow
@register_op(doc_str="")
class sub(elementwise_binary):
"""
Return ``x - y`` element-wise with
`broadcasting <https://docs.scipy.org/doc/numpy/user/basics.broadcasting.html>`_.
Parameters
----------
x: tensor<\*, T> (Required)
* Shape must be compatible with ``y`` in broadcast.
y: tensor<\*, T> (Required)
* Shape must be compatible with ``x`` in broadcast.
Returns
-------
tensor<\*?, bool>
* A boolean tensor with the same shape as the inputs.
Attributes
----------
T: fp32
"""
def __init__(self, **kwargs):
super(sub, self).__init__(**kwargs)
def get_operator(self):
return operator.sub
| 25.61437 | 85 | 0.584063 |
794221f0a13e475705f428fb5e577ff5049dc297 | 11,779 | py | Python | deriva/config/annotation_config.py | emirdad/deriva-py | 3d5302af0ff15be53df3b71a671c529a2ce10050 | [
"Apache-2.0"
] | 3 | 2018-11-18T19:33:53.000Z | 2019-10-03T18:27:49.000Z | deriva/config/annotation_config.py | emirdad/deriva-py | 3d5302af0ff15be53df3b71a671c529a2ce10050 | [
"Apache-2.0"
] | 81 | 2017-06-13T18:46:47.000Z | 2022-01-13T01:16:33.000Z | Lib/site-packages/deriva/config/annotation_config.py | fochoao/cpython | 3dc84b260e5bced65ebc2c45c40c8fa65f9b5aa9 | [
"bzip2-1.0.6",
"0BSD"
] | 4 | 2018-06-25T18:23:33.000Z | 2021-01-15T19:38:52.000Z | import sys
import json
import re
from deriva.core import ErmrestCatalog, AttrDict, ermrest_model, get_credential
from deriva.config.base_config import BaseSpec, BaseSpecList, ConfigUtil, ConfigBaseCLI
if sys.version_info > (3,):
unicode = str
MY_VERSION = 0.99
class NoForeignKeyError(ValueError):
pass
class AttrSpecList(BaseSpecList):
SPEC_TYPES = ["catalog_annotations", "schema_annotations", "table_annotations", "column_annotations",
"foreign_key_annotations"]
def __init__(self, known_attrs, specdict, strict=False):
self.ignore_unmanaged = False
self.managed_annotations = self.annotation_list(known_attrs.get(u'managed'))
if self.managed_annotations is None:
raise ValueError("No 'managed' attribute list")
if known_attrs.get(u'ignore_all_unmanaged'):
self.ignore_unmanaged = True
self.ignored_annotations = self.annotation_list(known_attrs.get(u'ignored'))
if self.ignored_annotations is None:
self.ignored_annotations = []
# dictlist = dictlist + [{"managed_annotations": self.managed_annotations}, {"ignored_annotations": self.ignored_annotations}, {"ignore_all_unmanaged": self.ignore_unmanaged}]
BaseSpecList.__init__(self, AttrSpec, specdict, strict)
def annotation_list(self, orig_list):
if orig_list is None:
return None
new = []
for item in orig_list:
new.append(unicode(item))
return new
def add_list(self, dictlist):
for d in dictlist:
if len(d) > 0:
s = AttrSpec(d, self.managed_annotations, self.ignore_unmanaged, self.ignored_annotations)
self.add_spec(s)
class AttrSpec(BaseSpec):
def __init__(self, specdict, managed_annotations, ignore_unmanaged, ignored_annotations):
BaseSpec.__init__(self, specdict, ["uri", "value"], "attributes", ignore_unmanaged)
self.ignore_unmanaged = ignore_unmanaged
self.managed_annotations = managed_annotations
self.ignored_annotations = ignored_annotations
self.known_annotations = self.managed_annotations + self.ignored_annotations
self.validate_annotation()
def validate_annotation(self):
return self.specdict.get("uri") in self.managed_annotations
class AttrConfig:
def __init__(self, server, catalog_id, config_file, credentials, verbose=False, schema_name=None, table_name=None):
self.config = json.load(open(config_file))
self.ignored_schema_patterns = []
ip = self.config.get("ignored_schema_patterns")
if ip is not None:
for p in ip:
self.ignored_schema_patterns.append(re.compile(p))
self.known_attrs = self.config.get(u'known_attributes')
self.managed_annotations = self.known_attrs.get(u'managed')
self.known_annotations = self.managed_annotations
self.all_annotations = self.known_annotations
self.ignored_annotations = self.known_attrs.get(u'ignored')
if self.ignored_annotations is not None:
self.all_annotations = self.all_annotations + self.ignored_annotations
self.ignore_unmanaged = self.known_attrs.get(u'ignore_all_unmanaged')
self.annotation_specs = dict()
for key in AttrSpecList.SPEC_TYPES:
self.annotation_specs[key] = self.make_speclist(key)
self.server = server
self.catalog_id = catalog_id
self.verbose = verbose
old_catalog = ErmrestCatalog('https', self.server, self.catalog_id, credentials)
self.saved_toplevel_config = ConfigUtil.find_toplevel_node(old_catalog.getCatalogModel(), schema_name,
table_name)
self.catalog = ErmrestCatalog('https', self.server, self.catalog_id, credentials)
self.toplevel_config = ConfigUtil.find_toplevel_node(self.catalog.getCatalogModel(), schema_name, table_name)
def make_speclist(self, name):
d = self.config.get(unicode(name))
if d is None:
d = [dict()]
return AttrSpecList(self.known_attrs, d)
def find_best_schema_specs(self, schema_name):
specs = dict()
for key in self.managed_annotations:
specs[key] = self.annotation_specs["schema_annotations"].find_best_schema_spec(schema_name, key=key)
return specs
def find_best_table_specs(self, schema_name, table_name):
specs = dict()
for key in self.managed_annotations:
specs[key] = self.annotation_specs["table_annotations"].find_best_table_spec(schema_name, table_name,
key=key)
return specs
def find_best_fkey_specs(self, fkey):
specs = dict()
for key in self.managed_annotations:
specs[key] = self.annotation_specs["foreign_key_annotations"].find_best_foreign_key_spec(fkey.table.schema.name,
fkey.table.name,
fkey.names,
key=key)
return specs
def find_best_column_specs(self, schema_name, table_name, column_name):
specs = dict()
for key in self.managed_annotations:
specs[key] = self.annotation_specs["column_annotations"].find_best_column_spec(schema_name, table_name,
column_name, key=key)
return specs
def node_name(self, node):
if isinstance(node, ermrest_model.Schema):
return "schema {s}".format(s=str(node.name))
if isinstance(node, ermrest_model.Table):
return "table {s}.{t}".format(s=str(node.schema.name), t=str(node.name))
if isinstance(node, ermrest_model.Column):
return "column {s}.{t}.{c}".format(s=str(node.table.schema.name), t=str(node.table.name), c=str(node.name))
if isinstance(node, ermrest_model.ForeignKey):
return "foreign key {n}".format(n=str(node.names))
return str("unknown node type {t}".format(t=type(node)))
def set_node_annotations(self, node, specs, saved_node):
if specs is None:
if not self.ignore_unmanaged:
if self.verbose:
print("{n}: clearing annotations".format(n=self.node_name(node)))
node.annotations.clear()
return
for k in self.managed_annotations:
s = specs.get(k)
if s is not None and u'value' in s:
if self.verbose:
print("{n}: setting {k} to {v}".format(n=self.node_name(node), k=k, v=s[u'value']))
node.annotations[k] = s[u'value']
elif k in node.annotations:
if self.verbose:
print("{n}: clearing {k}".format(n=self.node_name(node), k=k))
node.annotations.pop(k)
if not self.ignore_unmanaged:
for k in node.annotations.keys():
if k not in self.all_annotations:
raise ValueError("annotation key {k} is neither managed nor ignored".format(k=k))
def set_table_annotations(self, table, saved_table):
self.set_node_annotations(table, self.find_best_table_specs(table.schema.name, table.name), saved_table)
for column in table.column_definitions:
self.set_column_annotations(column, self.find_named_column(saved_table, column.name))
for fkey in table.foreign_keys:
self.set_fkey_annotations(fkey, self.find_corresponding_fkey(saved_table, fkey))
def find_corresponding_fkey(self, table, base_fkey):
if table is None:
return None
if base_fkey.names is None or len(base_fkey.names) == 0:
return None
names = base_fkey.names[0]
if len(names) != 2:
return None
for fkey in table.foreign_keys:
if fkey is not None and fkey.names is not None and len(fkey.names) > 0:
for n in fkey.names:
if len(n) == 2 and n[0] == names[0] and n[1] == names[1]:
return fkey
return None
def find_named_column(self, table, column_name):
if table is None:
return None
for column in table.column_definitions:
if column.name == column_name:
return column
return None
def find_named_schema(self, catalog, schema_name):
if catalog is None or catalog.schemas is None:
return None
return catalog.schemas.get(schema_name)
def find_named_table(self, schema, table_name):
if schema is None:
return None
if schema.tables is None:
return None
return schema.tables.get(table_name)
def set_fkey_annotations(self, fkey, saved_fkey):
self.set_node_annotations(fkey, self.find_best_fkey_specs(fkey), saved_fkey)
def set_column_annotations(self, column, saved_column):
self.set_node_annotations(column, self.find_best_column_specs(column.table.schema.name, column.table.name, column.name),
saved_column)
def set_schema_annotations(self, schema, saved_schema):
for pat in self.ignored_schema_patterns:
if pat.match(schema.name) is not None:
print("ignoring schema {s}".format(s=schema.name))
return
specs = self.find_best_schema_specs(schema.name)
self.set_node_annotations(schema, specs, saved_schema)
for table in schema.tables.values():
self.set_table_annotations(table, self.find_named_table(saved_schema, table.name))
def set_catalog_annotations(self):
specs = dict()
for key in self.managed_annotations:
specs[key] = self.annotation_specs["catalog_annotations"].find_catalog_spec(key)
self.set_node_annotations(self.toplevel_config, specs, self.saved_toplevel_config)
for schema in self.toplevel_config.schemas.values():
self.set_schema_annotations(schema, self.find_named_schema(self.saved_toplevel_config, schema.name))
def set_attributes(self):
if isinstance(self.toplevel_config, ermrest_model.Model):
self.set_catalog_annotations()
elif isinstance(self.toplevel_config, ermrest_model.Schema):
self.set_schema_annotations(self.toplevel_config, self.saved_toplevel_config)
elif isinstance(self.toplevel_config, ermrest_model.Table):
self.set_table_annotations(self.toplevel_config, self.saved_toplevel_config)
else:
raise ValueError("toplevel config is a {t}".format(t=str(type(self.toplevel_config))))
def apply_annotations(self):
self.toplevel_config.apply(self.saved_toplevel_config)
def main():
cli = ConfigBaseCLI("annotation config tool", None, version=MY_VERSION)
args = cli.parse_cli()
table_name = cli.get_table_arg(args)
schema_names = cli.get_schema_arg_list(args)
credentials = get_credential(args.host, args.credential_file)
for schema in schema_names:
attr_config = AttrConfig(args.host, args.catalog, args.config_file, credentials, args.verbose or args.debug,
schema, table_name)
attr_config.set_attributes()
if not args.dryrun:
attr_config.apply_annotations()
if __name__ == '__main__':
sys.exit(main())
| 46.011719 | 183 | 0.635623 |
7942220f52a92bf1eb3d618be1b46cd5111663c6 | 15,198 | py | Python | main.py | sarah-antillia/EfficientDet-VegeFruits | 32fdf5598064f88d2d6599273fb913d891a435f3 | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | main.py | sarah-antillia/EfficientDet-VegeFruits | 32fdf5598064f88d2d6599273fb913d891a435f3 | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | main.py | sarah-antillia/EfficientDet-VegeFruits | 32fdf5598064f88d2d6599273fb913d891a435f3 | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | # Copyright 2020 Google Research. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""The main training script."""
import multiprocessing
import os
# <added date="2021/0810"> arai
os.environ['TF_FORCE_GPU_ALLOW_GROWTH'] = 'true'
# </added>
from absl import app
from absl import flags
from absl import logging
import numpy as np
import tensorflow.compat.v1 as tf
import dataloader
import det_model_fn
import hparams_config
import utils
import pprint
flags.DEFINE_string(
'tpu',
default=None,
help='The Cloud TPU to use for training. This should be either the name '
'used when creating the Cloud TPU, or a grpc://ip.address.of.tpu:8470 '
'url.')
flags.DEFINE_string(
'gcp_project',
default=None,
help='Project name for the Cloud TPU-enabled project. If not specified, we '
'will attempt to automatically detect the GCE project from metadata.')
flags.DEFINE_string(
'tpu_zone',
default=None,
help='GCE zone where the Cloud TPU is located in. If not specified, we '
'will attempt to automatically detect the GCE project from metadata.')
flags.DEFINE_string('eval_name', default=None, help='Eval job name')
flags.DEFINE_enum('strategy', None, ['tpu', 'gpus', ''],
'Training: gpus for multi-gpu, if None, use TF default.')
flags.DEFINE_bool('use_fake_data', False, 'Use fake input.')
flags.DEFINE_bool(
'use_xla', False,
'Use XLA even if strategy is not tpu. If strategy is tpu, always use XLA, '
'and this flag has no effect.')
flags.DEFINE_string('model_dir', None, 'Location of model_dir')
flags.DEFINE_string(
'backbone_ckpt', '', 'Location of the ResNet50 checkpoint to use for model '
'initialization.')
flags.DEFINE_string('ckpt', None,
'Start training from this EfficientDet checkpoint.')
flags.DEFINE_string(
'hparams', '', 'Comma separated k=v pairs of hyperparameters or a module'
' containing attributes to use as hyperparameters.')
flags.DEFINE_integer(
'num_cores', default=8, help='Number of TPU cores for training')
flags.DEFINE_bool('use_spatial_partition', False, 'Use spatial partition.')
flags.DEFINE_integer(
'num_cores_per_replica',
default=2,
help='Number of TPU cores per replica when using spatial partition.')
flags.DEFINE_multi_integer(
'input_partition_dims', [1, 2, 1, 1],
'A list that describes the partition dims for all the tensors.')
flags.DEFINE_integer('train_batch_size', 64, 'global training batch size')
flags.DEFINE_integer('eval_batch_size', 1, 'global evaluation batch size')
flags.DEFINE_integer('eval_samples', 5000, 'Number of samples for eval.')
flags.DEFINE_integer('iterations_per_loop', 1000,
'Number of iterations per TPU training loop')
flags.DEFINE_integer('save_checkpoints_steps', 1000,
'Number of iterations per checkpoint save')
flags.DEFINE_string(
'train_file_pattern', None,
'Glob for training data files (e.g., COCO train - minival set)')
flags.DEFINE_string('val_file_pattern', None,
'Glob for evaluation tfrecords (e.g., COCO val2017 set)')
flags.DEFINE_string(
'val_json_file', None,
'COCO validation JSON containing golden bounding boxes. If None, use the '
'ground truth from the dataloader. Ignored if testdev_dir is not None.')
flags.DEFINE_string('testdev_dir', None,
'COCO testdev dir. If not None, ignorer val_json_file.')
flags.DEFINE_integer('num_examples_per_epoch', 120000,
'Number of examples in one epoch')
flags.DEFINE_integer('num_epochs', None, 'Number of epochs for training')
flags.DEFINE_string('mode', 'train',
'Mode to run: train or eval (default: train)')
flags.DEFINE_string('model_name', 'efficientdet-d1', 'Model name.')
flags.DEFINE_bool('eval_after_train', False, 'Run one eval after the '
'training finishes.')
flags.DEFINE_bool('profile', False, 'Profile training performance.')
flags.DEFINE_integer(
'tf_random_seed', None, 'Sets the TF graph seed for deterministic execution'
' across runs (for debugging).')
# For Eval mode
flags.DEFINE_integer('min_eval_interval', 180,
'Minimum seconds between evaluations.')
flags.DEFINE_integer(
'eval_timeout', None,
'Maximum seconds between checkpoints before evaluation terminates.')
# for train_and_eval mode
flags.DEFINE_bool(
'run_epoch_in_child_process', False,
'This option helps to rectify CPU memory leak. If True, every epoch is '
'run in a separate process for train and eval and memory will be cleared.'
'Drawback: need to kill 2 processes if trainining needs to be interrupted.')
FLAGS = flags.FLAGS
def main(_):
if FLAGS.strategy == 'tpu':
tf.disable_eager_execution()
tpu_cluster_resolver = tf.distribute.cluster_resolver.TPUClusterResolver(
FLAGS.tpu, zone=FLAGS.tpu_zone, project=FLAGS.gcp_project)
tpu_grpc_url = tpu_cluster_resolver.get_master()
tf.Session.reset(tpu_grpc_url)
else:
tpu_cluster_resolver = None
# Check data path
if FLAGS.mode in ('train', 'train_and_eval'):
if FLAGS.train_file_pattern is None:
raise RuntimeError('Must specify --train_file_pattern for train.')
if FLAGS.mode in ('eval', 'train_and_eval'):
if FLAGS.val_file_pattern is None:
raise RuntimeError('Must specify --val_file_pattern for eval.')
# Parse and override hparams
config = hparams_config.get_detection_config(FLAGS.model_name)
config.override(FLAGS.hparams)
if FLAGS.num_epochs: # NOTE: remove this flag after updating all docs.
config.num_epochs = FLAGS.num_epochs
# Parse image size in case it is in string format.
config.image_size = utils.parse_image_size(config.image_size)
# The following is for spatial partitioning. `features` has one tensor while
# `labels` had 4 + (`max_level` - `min_level` + 1) * 2 tensors. The input
# partition is performed on `features` and all partitionable tensors of
# `labels`, see the partition logic below.
# In the TPUEstimator context, the meaning of `shard` and `replica` is the
# same; follwing the API, here has mixed use of both.
if FLAGS.use_spatial_partition:
# Checks input_partition_dims agrees with num_cores_per_replica.
if FLAGS.num_cores_per_replica != np.prod(FLAGS.input_partition_dims):
raise RuntimeError('--num_cores_per_replica must be a product of array'
'elements in --input_partition_dims.')
labels_partition_dims = {
'mean_num_positives': None,
'source_ids': None,
'groundtruth_data': None,
'image_scales': None,
'image_masks': None,
}
# The Input Partition Logic: We partition only the partition-able tensors.
feat_sizes = utils.get_feat_sizes(
config.get('image_size'), config.get('max_level'))
for level in range(config.get('min_level'), config.get('max_level') + 1):
def _can_partition(spatial_dim):
partitionable_index = np.where(
spatial_dim % np.array(FLAGS.input_partition_dims) == 0)
return len(partitionable_index[0]) == len(FLAGS.input_partition_dims)
spatial_dim = feat_sizes[level]
if _can_partition(spatial_dim['height']) and _can_partition(
spatial_dim['width']):
labels_partition_dims['box_targets_%d' %
level] = FLAGS.input_partition_dims
labels_partition_dims['cls_targets_%d' %
level] = FLAGS.input_partition_dims
else:
labels_partition_dims['box_targets_%d' % level] = None
labels_partition_dims['cls_targets_%d' % level] = None
num_cores_per_replica = FLAGS.num_cores_per_replica
input_partition_dims = [FLAGS.input_partition_dims, labels_partition_dims]
num_shards = FLAGS.num_cores // num_cores_per_replica
else:
num_cores_per_replica = None
input_partition_dims = None
num_shards = FLAGS.num_cores
params = dict(
config.as_dict(),
model_name=FLAGS.model_name,
iterations_per_loop=FLAGS.iterations_per_loop,
model_dir=FLAGS.model_dir,
num_shards=num_shards,
num_examples_per_epoch=FLAGS.num_examples_per_epoch,
strategy=FLAGS.strategy,
backbone_ckpt=FLAGS.backbone_ckpt,
ckpt=FLAGS.ckpt,
val_json_file=FLAGS.val_json_file,
testdev_dir=FLAGS.testdev_dir,
profile=FLAGS.profile,
mode=FLAGS.mode)
config_proto = tf.ConfigProto(
allow_soft_placement=True, log_device_placement=False)
if FLAGS.strategy != 'tpu':
if FLAGS.use_xla:
config_proto.graph_options.optimizer_options.global_jit_level = (
tf.OptimizerOptions.ON_1)
config_proto.gpu_options.allow_growth = True
model_dir = FLAGS.model_dir
model_fn_instance = det_model_fn.get_model_fn(FLAGS.model_name)
max_instances_per_image = config.max_instances_per_image
if FLAGS.eval_samples:
eval_steps = int((FLAGS.eval_samples + FLAGS.eval_batch_size - 1) //
FLAGS.eval_batch_size)
else:
eval_steps = None
total_examples = int(config.num_epochs * FLAGS.num_examples_per_epoch)
train_steps = total_examples // FLAGS.train_batch_size
#logging.info(params)
pprint.pprint(params)
if not tf.io.gfile.exists(model_dir):
tf.io.gfile.makedirs(model_dir)
config_file = os.path.join(model_dir, 'config.yaml')
if not tf.io.gfile.exists(config_file):
tf.io.gfile.GFile(config_file, 'w').write(str(config))
train_input_fn = dataloader.InputReader(
FLAGS.train_file_pattern,
is_training=True,
use_fake_data=FLAGS.use_fake_data,
max_instances_per_image=max_instances_per_image)
eval_input_fn = dataloader.InputReader(
FLAGS.val_file_pattern,
is_training=False,
use_fake_data=FLAGS.use_fake_data,
max_instances_per_image=max_instances_per_image)
if FLAGS.strategy == 'tpu':
tpu_config = tf.estimator.tpu.TPUConfig(
FLAGS.iterations_per_loop if FLAGS.strategy == 'tpu' else 1,
num_cores_per_replica=num_cores_per_replica,
input_partition_dims=input_partition_dims,
per_host_input_for_training=tf.estimator.tpu.InputPipelineConfig
.PER_HOST_V2)
run_config = tf.estimator.tpu.RunConfig(
cluster=tpu_cluster_resolver,
model_dir=model_dir,
log_step_count_steps=FLAGS.iterations_per_loop,
session_config=config_proto,
tpu_config=tpu_config,
save_checkpoints_steps=FLAGS.save_checkpoints_steps,
tf_random_seed=FLAGS.tf_random_seed,
)
# TPUEstimator can do both train and eval.
train_est = tf.estimator.tpu.TPUEstimator(
model_fn=model_fn_instance,
train_batch_size=FLAGS.train_batch_size,
eval_batch_size=FLAGS.eval_batch_size,
config=run_config,
params=params)
eval_est = train_est
else:
strategy = None
if FLAGS.strategy == 'gpus':
strategy = tf.distribute.MirroredStrategy()
run_config = tf.estimator.RunConfig(
model_dir=model_dir,
train_distribute=strategy,
log_step_count_steps=FLAGS.iterations_per_loop,
session_config=config_proto,
save_checkpoints_steps=FLAGS.save_checkpoints_steps,
tf_random_seed=FLAGS.tf_random_seed,
)
def get_estimator(global_batch_size):
params['num_shards'] = getattr(strategy, 'num_replicas_in_sync', 1)
params['batch_size'] = global_batch_size // params['num_shards']
return tf.estimator.Estimator(
model_fn=model_fn_instance, config=run_config, params=params)
# train and eval need different estimator due to different batch size.
train_est = get_estimator(FLAGS.train_batch_size)
eval_est = get_estimator(FLAGS.eval_batch_size)
# start train/eval flow.
if FLAGS.mode == 'train':
train_est.train(input_fn=train_input_fn, max_steps=train_steps)
if FLAGS.eval_after_train:
eval_est.evaluate(input_fn=eval_input_fn, steps=eval_steps)
elif FLAGS.mode == 'eval':
# Run evaluation when there's a new checkpoint
for ckpt in tf.train.checkpoints_iterator(
FLAGS.model_dir,
min_interval_secs=FLAGS.min_eval_interval,
timeout=FLAGS.eval_timeout):
logging.info('Starting to evaluate.')
try:
eval_results = eval_est.evaluate(
eval_input_fn, steps=eval_steps, name=FLAGS.eval_name)
# Terminate eval job when final checkpoint is reached.
try:
current_step = int(os.path.basename(ckpt).split('-')[1])
except IndexError:
logging.info('%s has no global step info: stop!', ckpt)
break
utils.archive_ckpt(eval_results, eval_results['AP'], ckpt)
if current_step >= train_steps:
logging.info('Eval finished step %d/%d', current_step, train_steps)
break
except tf.errors.NotFoundError:
# Checkpoint might be not already deleted by the time eval finished.
# We simply skip ssuch case.
logging.info('Checkpoint %s no longer exists, skipping.', ckpt)
elif FLAGS.mode == 'train_and_eval':
ckpt = tf.train.latest_checkpoint(FLAGS.model_dir)
try:
step = int(os.path.basename(ckpt).split('-')[1])
current_epoch = (
step * FLAGS.train_batch_size // FLAGS.num_examples_per_epoch)
logging.info('found ckpt at step %d (epoch %d)', step, current_epoch)
except (IndexError, TypeError):
logging.info('Folder %s has no ckpt with valid step.', FLAGS.model_dir)
current_epoch = 0
def run_train_and_eval(e):
print('\n =====> Starting training, epoch: %d.' % e)
train_est.train(
input_fn=train_input_fn,
max_steps=e * FLAGS.num_examples_per_epoch // FLAGS.train_batch_size)
print('\n =====> Starting evaluation, epoch: %d.' % e)
eval_results = eval_est.evaluate(input_fn=eval_input_fn, steps=eval_steps)
ckpt = tf.train.latest_checkpoint(FLAGS.model_dir)
utils.archive_ckpt(eval_results, eval_results['AP'], ckpt)
epochs_per_cycle = 1 # higher number has less graph construction overhead.
for e in range(current_epoch + 1, config.num_epochs + 1, epochs_per_cycle):
if FLAGS.run_epoch_in_child_process:
p = multiprocessing.Process(target=run_train_and_eval, args=(e,))
p.start()
p.join()
if p.exitcode != 0:
return p.exitcode
else:
tf.compat.v1.reset_default_graph()
run_train_and_eval(e)
else:
logging.info('Invalid mode: %s', FLAGS.mode)
if __name__ == '__main__':
app.run(main)
| 40.745308 | 80 | 0.702527 |
7942235e19c568723d60f85d2e64156e249b90d2 | 1,605 | py | Python | ppline/cli/__main__.py | 5x12/ppline | a4f7bd9aae0752a8abe7c4580c808792bb044ff6 | [
"MIT"
] | 9 | 2021-08-11T13:38:22.000Z | 2022-01-14T15:32:45.000Z | ppline/cli/__main__.py | 5x12/ppline | a4f7bd9aae0752a8abe7c4580c808792bb044ff6 | [
"MIT"
] | null | null | null | ppline/cli/__main__.py | 5x12/ppline | a4f7bd9aae0752a8abe7c4580c808792bb044ff6 | [
"MIT"
] | null | null | null | import argparse
import os
import pathlib
from typing import Optional
from ppline.easy.command import Run
def _arg_project_dir(arg):
expanded = os.path.expanduser(arg)
if os.path.isdir(expanded):
return os.path.abspath(os.path.normpath(expanded))
else:
raise Exception(f'Argument {arg} must be an existing directory.')
return arg
def _arg_config_file(arg):
if pathlib.Path(arg).suffix not in ['.yml', '.yaml']:
raise Exception(f'Argument {arg} must be a .yml/.yaml file.')
if os.path.exists(arg)==False:
raise Exception(f'Cannot find {arg} at your directory')
return arg
parser = argparse.ArgumentParser()
parser.add_argument('--trigger_class', '-tc', default=None, help='Path to class to trigger (in the form of "to/the/file.py:TestClass. TestClass should have a __call__ method")')
parser.add_argument('--project_dir', '-p', type=_arg_project_dir, default=None, help="Path to project's root where configuration .yml/.yaml file is stored")
parser.add_argument('--config_file', '-f', type=_arg_config_file, default=None, help='Name of pipeline configuration .yml/.yaml file')
parser.add_argument('-gitlab', action='store_true', help='Specify if you want to generate .gitlab-ci file.yaml')
# req_grp = parser.add_argument_group(title='Required')
# req_grp.add_argument('--config_file', type=_arg_config_file, help='Name of pipeline config .yml file.', required=True)
# args = parser.parse_args()
args = parser.parse_args()
Run(config_file=args.config_file, project_dir=args.project_dir, trigger_class=args.trigger_class, gitlab=args.gitlab)
| 45.857143 | 177 | 0.738941 |
794223f06b5eaf3832eeeeaac9acc8c4c2a6e067 | 178,083 | py | Python | ckan/tests/logic/action/test_get.py | devansh-srivastav/ckan-1 | 44a29cf85bcf14b24ff97f147b8a1e6cf6c51a93 | [
"BSD-3-Clause"
] | null | null | null | ckan/tests/logic/action/test_get.py | devansh-srivastav/ckan-1 | 44a29cf85bcf14b24ff97f147b8a1e6cf6c51a93 | [
"BSD-3-Clause"
] | null | null | null | ckan/tests/logic/action/test_get.py | devansh-srivastav/ckan-1 | 44a29cf85bcf14b24ff97f147b8a1e6cf6c51a93 | [
"BSD-3-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
import datetime
import re
import copy
import pytest
from ckan import model
import ckan.logic as logic
import ckan.logic.schema as schema
import ckan.tests.factories as factories
import ckan.tests.helpers as helpers
from ckan import __version__
from ckan.lib.search.common import SearchError
@pytest.mark.usefixtures("non_clean_db")
class TestPackageShow(object):
def test_package_show(self):
# simple dataset, simple checks
dataset1 = factories.Dataset()
dataset2 = helpers.call_action("package_show", id=dataset1["id"])
assert dataset2["name"] == dataset1["name"]
missing_keys = set(("title", "groups")) - set(dataset2.keys())
assert not missing_keys, missing_keys
def test_package_show_with_full_dataset(self):
# an full dataset
org = factories.Organization()
group = factories.Group()
dataset1 = factories.Dataset(
resources=[
{
"url": "http://example.com/image.png",
"format": "png",
"name": "Image 1",
}
],
tags=[{"name": factories.Tag.stub().name}],
extras=[{"key": "subject", "value": "science"}],
groups=[{"id": group["id"]}],
owner_org=org["id"],
)
dataset2 = helpers.call_action("package_show", id=dataset1["id"])
# checking the whole dataset is a bit brittle as a test, but it
# documents what the package_dict is clearly and tracks how it changes
# as CKAN changes over time.
# fix values which change every time you run this test
def replace_uuid(dict_, key):
assert key in dict_
dict_[key] = "<SOME-UUID>"
def replace_datetime(dict_, key):
assert key in dict_
dict_[key] = "2019-05-24T15:52:30.123456"
def replace_number_suffix(dict_, key):
# e.g. "Test Dataset 23" -> "Test Dataset "
assert key in dict_
dict_[key] = re.sub(r"\d+$", "num", dict_[key])
replace_uuid(dataset2, "id")
replace_uuid(dataset2, "creator_user_id")
replace_uuid(dataset2, "owner_org")
replace_number_suffix(dataset2, "name")
replace_datetime(dataset2, "metadata_created")
replace_datetime(dataset2, "metadata_modified")
replace_datetime(dataset2["resources"][0], "metadata_modified")
replace_uuid(dataset2["groups"][0], "id")
replace_number_suffix(dataset2["groups"][0], "name")
replace_number_suffix(dataset2["groups"][0], "title")
replace_number_suffix(dataset2["groups"][0], "display_name")
replace_uuid(dataset2["organization"], "id")
replace_number_suffix(dataset2["organization"], "name")
replace_number_suffix(dataset2["organization"], "title")
replace_datetime(dataset2["organization"], "created")
replace_uuid(dataset2["resources"][0], "id")
replace_uuid(dataset2["resources"][0], "package_id")
replace_number_suffix(dataset2["resources"][0], "name")
replace_datetime(dataset2["resources"][0], "created")
replace_uuid(dataset2["tags"][0], "id")
assert dataset2 == {
"author": dataset1["author"],
"author_email": dataset1["author_email"],
"creator_user_id": "<SOME-UUID>",
"extras": dataset1["extras"],
"groups": [
{
"description": group["description"],
"display_name": group["display_name"],
"id": "<SOME-UUID>",
"image_display_url": group["image_display_url"],
"name": group["name"],
"title": group["title"],
}
],
"id": "<SOME-UUID>",
"isopen": dataset1["isopen"],
"license_id": dataset1["license_id"],
"license_title": dataset1["license_title"],
"maintainer": dataset1["maintainer"],
"maintainer_email": dataset1["maintainer_email"],
"metadata_created": "2019-05-24T15:52:30.123456",
"metadata_modified": "2019-05-24T15:52:30.123456",
"name": dataset1["name"],
"notes": dataset1["notes"],
"num_resources": dataset1["num_resources"],
"num_tags": dataset1["num_tags"],
"organization": {
"approval_status": org["approval_status"],
"created": "2019-05-24T15:52:30.123456",
"description": org["description"],
"id": "<SOME-UUID>",
"image_url": org["image_url"],
"is_organization": org["is_organization"],
"name": org["name"],
"state": org["state"],
"title": org["title"],
"type": org["type"],
},
"owner_org": "<SOME-UUID>",
"private": dataset1["private"],
"relationships_as_object": dataset1["relationships_as_object"],
"relationships_as_subject": dataset1["relationships_as_subject"],
"resources": [
{
"cache_last_updated": None,
"cache_url": dataset1["resources"][0]["cache_url"],
"created": "2019-05-24T15:52:30.123456",
"description": dataset1["resources"][0]["description"],
"format": dataset1["resources"][0]["format"],
"hash": "",
"id": "<SOME-UUID>",
"last_modified": dataset1["resources"][0]["last_modified"],
"metadata_modified": "2019-05-24T15:52:30.123456",
"mimetype": dataset1["resources"][0]["mimetype"],
"mimetype_inner": None,
"name": "Image num",
"package_id": "<SOME-UUID>",
"position": dataset1["resources"][0]["position"],
"resource_type": dataset1["resources"][0]["resource_type"],
"size": dataset1["resources"][0]["size"],
"state": dataset1["resources"][0]["state"],
"url": dataset1["resources"][0]["url"],
"url_type": dataset1["resources"][0]["url_type"],
}
],
"state": dataset1["state"],
"tags": [
{
"display_name": dataset1["tags"][0]["display_name"],
"id": "<SOME-UUID>",
"name": dataset1["tags"][0]["name"],
"state": dataset1["tags"][0]["state"],
"vocabulary_id": dataset1["tags"][0]["vocabulary_id"],
}
],
"title": dataset1["title"],
"type": dataset1["type"],
"url": dataset1["url"],
"version": dataset1["version"],
}
def test_package_show_with_custom_schema(self):
dataset1 = factories.Dataset()
from ckan.logic.schema import default_show_package_schema
custom_schema = default_show_package_schema()
def foo(key, data, errors, context): # noqa
data[key] = "foo"
custom_schema["new_field"] = [foo]
dataset2 = helpers.call_action(
"package_show",
id=dataset1["id"],
context={"schema": custom_schema},
)
assert dataset2["new_field"] == "foo"
def test_package_show_with_custom_schema_return_default_schema(self):
dataset1 = factories.Dataset()
from ckan.logic.schema import default_show_package_schema
custom_schema = default_show_package_schema()
def foo(key, data, errors, context): # noqa
data[key] = "foo"
custom_schema["new_field"] = [foo]
dataset2 = helpers.call_action(
"package_show",
id=dataset1["id"],
use_default_schema=True,
context={"schema": custom_schema},
)
assert "new_field" not in dataset2
@pytest.mark.usefixtures("clean_db")
class TestGroupList(object):
def test_group_list(self):
group1 = factories.Group()
group2 = factories.Group()
group_list = helpers.call_action("group_list")
assert sorted(group_list) == sorted(
[g["name"] for g in [group1, group2]]
)
def test_group_list_in_presence_of_organizations(self):
"""
Getting the group_list should only return groups of type 'group' (not
organizations).
"""
group1 = factories.Group()
group2 = factories.Group()
factories.Organization()
factories.Organization()
group_list = helpers.call_action("group_list")
assert sorted(group_list) == sorted(
[g["name"] for g in [group1, group2]]
)
def test_group_list_in_presence_of_custom_group_types(self):
"""Getting the group_list shouldn't return custom group types."""
group1 = factories.Group()
group2 = factories.Group()
factories.Group(type="custom")
group_list = helpers.call_action("group_list")
assert sorted(group_list) == sorted(
[g["name"] for g in [group1, group2]]
)
def test_group_list_return_custom_group(self):
"""
Getting the group_list with a type defined should only return
groups of that type.
"""
group1 = factories.Group(type="custom")
group2 = factories.Group(type="custom")
factories.Group()
factories.Group()
group_list = helpers.call_action("group_list", type="custom")
assert sorted(group_list) == sorted(
[g["name"] for g in [group1, group2]]
)
def test_group_list_sort_by_package_count(self):
factories.Group(name="aa")
factories.Group(name="bb")
factories.Dataset(groups=[{"name": "aa"}, {"name": "bb"}])
factories.Dataset(groups=[{"name": "bb"}])
group_list = helpers.call_action("group_list", sort="package_count")
assert sorted(group_list) == sorted(["bb", "aa"])
def test_group_list_sort_by_package_count_ascending(self):
factories.Group(name="aa")
factories.Group(name="bb")
factories.Dataset(groups=[{"name": "aa"}, {"name": "bb"}])
factories.Dataset(groups=[{"name": "aa"}])
group_list = helpers.call_action(
"group_list", sort="package_count asc"
)
assert group_list == ["bb", "aa"]
def test_group_list_sort_default(self):
factories.Group(name="zz", title="aa")
factories.Group(name="yy", title="bb")
group_list = helpers.call_action("group_list")
assert group_list == ["zz", "yy"]
@pytest.mark.ckan_config("ckan.default_group_sort", "name")
def test_group_list_sort_from_config(self):
factories.Group(name="zz", title="aa")
factories.Group(name="yy", title="bb")
group_list = helpers.call_action("group_list")
assert group_list == ["yy", "zz"]
def eq_expected(self, expected_dict, result_dict):
superfluous_keys = set(result_dict) - set(expected_dict)
assert not superfluous_keys, "Did not expect key: %s" % " ".join(
("%s=%s" % (k, result_dict[k]) for k in superfluous_keys)
)
for key in expected_dict:
assert (
expected_dict[key] == result_dict[key]
), "%s=%s should be %s" % (
key,
result_dict[key],
expected_dict[key],
)
def test_group_list_all_fields(self):
group = factories.Group()
group_list = helpers.call_action("group_list", all_fields=True)
expected_group = dict(group)
for field in ("users", "tags", "extras", "groups"):
del expected_group[field]
assert group_list[0] == expected_group
assert "extras" not in group_list[0]
assert "tags" not in group_list[0]
assert "groups" not in group_list[0]
assert "users" not in group_list[0]
assert "datasets" not in group_list[0]
def _create_bulk_groups(self, name, count):
groups = [
model.Group(name="{}_{}".format(name, i)) for i in range(count)
]
model.Session.add_all(groups)
model.repo.commit_and_remove()
def test_limit_default(self):
self._create_bulk_groups("group_default", 1010)
results = helpers.call_action("group_list")
assert len(results) == 1000 # i.e. default value
@pytest.mark.ckan_config("ckan.group_and_organization_list_max", "5")
def test_limit_configured(self):
self._create_bulk_groups("group_default", 7)
results = helpers.call_action("group_list")
assert len(results) == 5 # i.e. configured limit
def test_all_fields_limit_default(self):
self._create_bulk_groups("org_all_fields_default", 30)
results = helpers.call_action("group_list", all_fields=True)
assert len(results) == 25 # i.e. default value
@pytest.mark.ckan_config(
"ckan.group_and_organization_list_all_fields_max", "5"
)
def test_all_fields_limit_configured(self):
self._create_bulk_groups("org_all_fields_default", 30)
results = helpers.call_action("group_list", all_fields=True)
assert len(results) == 5 # i.e. configured limit
def test_group_list_extras_returned(self):
group = factories.Group(extras=[{"key": "key1", "value": "val1"}])
group_list = helpers.call_action(
"group_list", all_fields=True, include_extras=True
)
assert group_list[0]["extras"] == group["extras"]
assert group_list[0]["extras"][0]["key"] == "key1"
def test_group_list_users_returned(self):
user = factories.User()
group = factories.Group(
users=[{"name": user["name"], "capacity": "admin"}]
)
group_list = helpers.call_action(
"group_list", all_fields=True, include_users=True
)
assert group_list[0]["users"] == group["users"]
assert group_list[0]["users"][0]["name"] == group["users"][0]["name"]
# NB there is no test_group_list_tags_returned because tags are not in the
# group_create schema (yet)
def test_group_list_groups_returned(self):
parent_group = factories.Group(tags=[{"name": "river"}])
child_group = factories.Group(
groups=[{"name": parent_group["name"]}], tags=[{"name": "river"}]
)
group_list = helpers.call_action(
"group_list", all_fields=True, include_groups=True
)
child_group_returned = group_list[0]
if group_list[0]["name"] == child_group["name"]:
child_group_returned, _ = group_list
else:
child_group_returned, _ = group_list[::-1]
expected_parent_group = dict(parent_group)
assert [g["name"] for g in child_group_returned["groups"]] == [
expected_parent_group["name"]
]
def test_group_list_limit(self):
group1 = factories.Group(title="aa")
group2 = factories.Group(title="bb")
group3 = factories.Group(title="cc")
group_names = [g["name"] for g in [group1, group2, group3]]
group_list = helpers.call_action("group_list", limit=1)
assert len(group_list) == 1
assert group_list[0] == group_names[0]
def test_group_list_offset(self):
group1 = factories.Group(title="aa")
group2 = factories.Group(title="bb")
group3 = factories.Group(title="cc")
group_names = [g["name"] for g in [group1, group2, group3]]
group_list = helpers.call_action("group_list", offset=2)
assert len(group_list) == 1
# group list returns sorted result. This is not necessarily
# order of creation
assert group_list[0] == group_names[2]
def test_group_list_limit_and_offset(self):
factories.Group(title="aa")
group2 = factories.Group(title="bb")
factories.Group(title="cc")
group_list = helpers.call_action("group_list", offset=1, limit=1)
assert len(group_list) == 1
assert group_list[0] == group2["name"]
def test_group_list_limit_as_string(self):
factories.Group(name="aa")
factories.Group(name="bb")
group_list = helpers.call_action("group_list", limit="1")
assert len(group_list) == 1
def test_group_list_wrong_limit(self):
with pytest.raises(logic.ValidationError):
helpers.call_action("group_list", limit="a")
def test_group_list_wrong_offset(self):
with pytest.raises(logic.ValidationError):
helpers.call_action("group_list", offset="-2")
@pytest.mark.usefixtures("clean_db", "clean_index")
class TestGroupShow(object):
def test_group_show(self):
group = factories.Group(user=factories.User())
group_dict = helpers.call_action(
"group_show", id=group["id"], include_datasets=True
)
group_dict.pop("packages", None)
assert group_dict == group
def test_group_show_error_not_found(self):
with pytest.raises(logic.NotFound):
helpers.call_action("group_show", id="does_not_exist")
def test_group_show_error_for_organization(self):
org = factories.Organization()
with pytest.raises(logic.NotFound):
helpers.call_action("group_show", id=org["id"])
def test_group_show_packages_returned(self):
user_name = helpers.call_action("get_site_user")["name"]
group = factories.Group(user=factories.User())
datasets = [
{"name": "dataset_1", "groups": [{"name": group["name"]}]},
{"name": "dataset_2", "groups": [{"name": group["name"]}]},
]
for dataset in datasets:
helpers.call_action(
"package_create", context={"user": user_name}, **dataset
)
group_dict = helpers.call_action(
"group_show", id=group["id"], include_datasets=True
)
assert len(group_dict["packages"]) == 2
assert group_dict["package_count"] == 2
def test_group_show_packages_returned_for_view(self):
user_name = helpers.call_action("get_site_user")["name"]
group = factories.Group(user=factories.User())
datasets = [
{"name": "dataset_1", "groups": [{"name": group["name"]}]},
{"name": "dataset_2", "groups": [{"name": group["name"]}]},
]
for dataset in datasets:
helpers.call_action(
"package_create", context={"user": user_name}, **dataset
)
group_dict = helpers.call_action(
"group_show",
id=group["id"],
include_datasets=True,
context={"for_view": True},
)
assert len(group_dict["packages"]) == 2
assert group_dict["package_count"] == 2
def test_group_show_no_packages_returned(self):
user_name = helpers.call_action("get_site_user")["name"]
group = factories.Group(user=factories.User())
datasets = [
{"name": "dataset_1", "groups": [{"name": group["name"]}]},
{"name": "dataset_2", "groups": [{"name": group["name"]}]},
]
for dataset in datasets:
helpers.call_action(
"package_create", context={"user": user_name}, **dataset
)
group_dict = helpers.call_action(
"group_show", id=group["id"], include_datasets=False
)
assert "packages" not in group_dict
assert group_dict["package_count"] == 2
def test_group_show_does_not_show_private_datasets(self):
"""group_show() should never show private datasets.
If a dataset is a private member of an organization and also happens to
be a member of a group, group_show() should not return the dataset as
part of the group dict, even if the user calling group_show() is a
member or admin of the group or the organization or is a sysadmin.
"""
org_member = factories.User()
org = factories.Organization(user=org_member)
private_dataset = factories.Dataset(
user=org_member, owner_org=org["name"], private=True
)
group = factories.Group()
# Add the private dataset to the group.
helpers.call_action(
"member_create",
id=group["id"],
object=private_dataset["id"],
object_type="package",
capacity="public",
)
# Create a member user and an admin user of the group.
group_member = factories.User()
helpers.call_action(
"member_create",
id=group["id"],
object=group_member["id"],
object_type="user",
capacity="member",
)
group_admin = factories.User()
helpers.call_action(
"member_create",
id=group["id"],
object=group_admin["id"],
object_type="user",
capacity="admin",
)
# Create a user who isn't a member of any group or organization.
non_member = factories.User()
sysadmin = factories.Sysadmin()
# None of the users should see the dataset when they call group_show().
for user in (
org_member,
group_member,
group_admin,
non_member,
sysadmin,
None,
):
if user is None:
context = None # No user logged-in.
else:
context = {"user": user["name"]}
group = helpers.call_action(
"group_show",
id=group["id"],
include_datasets=True,
context=context,
)
assert private_dataset["id"] not in [
dataset["id"] for dataset in group["packages"]
], "group_show() should never show private datasets"
@pytest.mark.ckan_config("ckan.search.rows_max", "2")
def test_package_limit_configured(self):
group = factories.Group()
for _ in range(3):
factories.Dataset(groups=[{"id": group["id"]}])
id = group["id"]
results = helpers.call_action("group_show", id=id, include_datasets=1)
assert len(results["packages"]) == 2 # i.e. ckan.search.rows_max
@pytest.mark.usefixtures("clean_db")
class TestOrganizationList(object):
def test_organization_list(self):
org1 = factories.Organization()
org2 = factories.Organization()
org_list = helpers.call_action("organization_list")
assert sorted(org_list) == sorted([g["name"] for g in [org1, org2]])
def test_organization_list_in_presence_of_groups(self):
"""
Getting the organization_list only returns organization group
types.
"""
org1 = factories.Organization()
org2 = factories.Organization()
factories.Group()
factories.Group()
org_list = helpers.call_action("organization_list")
assert sorted(org_list) == sorted([g["name"] for g in [org1, org2]])
def test_organization_list_in_presence_of_custom_group_types(self):
"""
Getting the organization_list only returns organization group
types.
"""
org1 = factories.Organization()
org2 = factories.Organization()
factories.Group(type="custom")
factories.Group(type="custom")
org_list = helpers.call_action("organization_list")
assert sorted(org_list) == sorted([g["name"] for g in [org1, org2]])
def test_organization_list_return_custom_organization_type(self):
"""
Getting the org_list with a type defined should only return
orgs of that type.
"""
factories.Organization()
org2 = factories.Organization(type="custom_org")
factories.Group(type="custom")
factories.Group(type="custom")
org_list = helpers.call_action("organization_list", type="custom_org")
assert sorted(org_list) == sorted(
[g["name"] for g in [org2]]
), "{}".format(org_list)
def _create_bulk_orgs(self, name, count):
from ckan import model
orgs = [
model.Group(
name="{}_{}".format(name, i),
is_organization=True,
type="organization",
)
for i in range(count)
]
model.Session.add_all(orgs)
model.repo.commit_and_remove()
def test_limit_default(self):
self._create_bulk_orgs("org_default", 1010)
results = helpers.call_action("organization_list")
assert len(results) == 1000 # i.e. default value
@pytest.mark.ckan_config("ckan.group_and_organization_list_max", "5")
def test_limit_configured(self):
self._create_bulk_orgs("org_default", 7)
results = helpers.call_action("organization_list")
assert len(results) == 5 # i.e. configured limit
@pytest.mark.ckan_config("ckan.group_and_organization_list_max", "5")
def test_limit_with_custom_max_limit(self):
self._create_bulk_orgs("org_default", 5)
results = helpers.call_action("organization_list", limit=2)
assert len(results) == 2
def test_all_fields_limit_default(self):
self._create_bulk_orgs("org_all_fields_default", 30)
results = helpers.call_action("organization_list", all_fields=True)
assert len(results) == 25 # i.e. default value
@pytest.mark.ckan_config(
"ckan.group_and_organization_list_all_fields_max", "5"
)
def test_all_fields_limit_with_custom_max_limit(self):
self._create_bulk_orgs("org_all_fields_default", 5)
results = helpers.call_action(
"organization_list", all_fields=True, limit=2
)
assert len(results) == 2
@pytest.mark.ckan_config(
"ckan.group_and_organization_list_all_fields_max", "5"
)
def test_all_fields_limit_configured(self):
self._create_bulk_orgs("org_all_fields_default", 30)
results = helpers.call_action("organization_list", all_fields=True)
assert len(results) == 5 # i.e. configured limit
@pytest.mark.usefixtures("non_clean_db")
class TestOrganizationShow(object):
def test_organization_show(self):
org = factories.Organization()
org_dict = helpers.call_action(
"organization_show", id=org["id"], include_datasets=True
)
org_dict.pop("packages", None)
assert org_dict == org
def test_organization_show_error_not_found(self):
with pytest.raises(logic.NotFound):
helpers.call_action("organization_show", id="does_not_exist")
def test_organization_show_error_for_group(self):
group = factories.Group()
with pytest.raises(logic.NotFound):
helpers.call_action("organization_show", id=group["id"])
def test_organization_show_packages_returned(self):
user_name = helpers.call_action("get_site_user")["name"]
org = factories.Organization()
datasets = [
{"name": factories.Dataset.stub().name, "owner_org": org["name"]},
{"name": factories.Dataset.stub().name, "owner_org": org["name"]},
]
for dataset in datasets:
helpers.call_action(
"package_create", context={"user": user_name}, **dataset
)
org_dict = helpers.call_action(
"organization_show", id=org["id"], include_datasets=True
)
assert len(org_dict["packages"]) == 2
assert org_dict["package_count"] == 2
def test_organization_show_private_packages_not_returned(self):
user_name = helpers.call_action("get_site_user")["name"]
org = factories.Organization()
dataset1 = factories.Dataset.stub().name
datasets = [
{"name": dataset1, "owner_org": org["name"]},
{
"name": factories.Dataset.stub().name,
"owner_org": org["name"],
"private": True,
},
]
for dataset in datasets:
helpers.call_action(
"package_create", context={"user": user_name}, **dataset
)
org_dict = helpers.call_action(
"organization_show", id=org["id"], include_datasets=True
)
assert len(org_dict["packages"]) == 1
assert org_dict["packages"][0]["name"] == dataset1
assert org_dict["package_count"] == 1
@pytest.mark.ckan_config("ckan.search.rows_max", "2")
def test_package_limit_configured(self):
org = factories.Organization()
for _ in range(3):
factories.Dataset(owner_org=org["id"])
id = org["id"]
results = helpers.call_action(
"organization_show", id=id, include_datasets=1
)
assert len(results["packages"]) == 2 # i.e. ckan.search.rows_max
@pytest.mark.usefixtures("clean_db")
class TestUserList(object):
def test_user_list_default_values(self):
# we need to set fullname because user_list by default sorts by
# display_name
user = factories.User(fullname="Guido")
got_users = helpers.call_action("user_list")
# There is one default user
assert len(got_users) == 2
got_user = got_users[0]
assert got_user["id"] == user["id"]
assert got_user["name"] == user["name"]
assert got_user["fullname"] == user["fullname"]
assert got_user["display_name"] == user["display_name"]
assert got_user["created"] == user["created"]
assert got_user["about"] == user["about"]
assert got_user["sysadmin"] == user["sysadmin"]
assert got_user["number_created_packages"] == 0
assert "password" not in got_user
assert "reset_key" not in got_user
assert "apikey" not in got_user
assert "email" not in got_user
assert "datasets" not in got_user
def test_user_list_edits(self):
# we need to set fullname because user_list by default sorts by
# display_name
user = factories.User(fullname="Guido")
dataset = factories.Dataset(user=user)
dataset["title"] = "Edited title"
helpers.call_action(
"package_update", context={"user": user["name"]}, **dataset
)
got_users = helpers.call_action("user_list")
# There is one default user
assert len(got_users) == 2
got_user = got_users[0]
assert got_user["number_created_packages"] == 1
def test_user_list_excludes_deleted_users(self):
# we need to set fullname because user_list by default sorts by
# display_name
user = factories.User(fullname="Guido")
factories.User(state="deleted")
got_users = helpers.call_action("user_list")
# There is one default user
assert len(got_users) == 2
assert got_users[0]["name"] == user["name"]
def test_user_list_not_all_fields(self):
# we need to set fullname because user_list by default sorts by
# display_name
user = factories.User(fullname="Guido")
got_users = helpers.call_action("user_list", all_fields=False)
# There is one default user
assert len(got_users) == 2
got_user = got_users[0]
assert got_user == user["name"]
def test_user_list_return_query(self):
user_a = factories.User(email="[email protected]")
query = helpers.call_action(
"user_list",
{"return_query": True},
email="[email protected]"
)
user = query.one()
expected = ["name", "fullname", "about", "email"]
for prop in expected:
assert user_a[prop] == getattr(user, prop), prop
def test_user_list_filtered_by_email(self):
user_a = factories.User(email="[email protected]")
factories.User(email="[email protected]")
got_users = helpers.call_action(
"user_list", email="[email protected]", all_fields=False
)
assert len(got_users) == 1
got_user = got_users[0]
assert got_user == user_a["name"]
def test_user_list_order_by_default(self):
default_user = helpers.call_action("get_site_user", ignore_auth=True)
users = [
factories.User(fullname="Xander Bird", name="bird_x"),
factories.User(fullname="Max Hankins", name="hankins_m"),
factories.User(fullname="", name="morgan_w"),
factories.User(fullname="Kathy Tillman", name="tillman_k"),
]
expected_names = [
u["name"]
for u in [
users[3], # Kathy Tillman
users[1], # Max Hankins
users[2], # morgan_w
users[0], # Xander Bird
]
]
got_users = helpers.call_action("user_list")
got_names = [
u["name"] for u in got_users if u["name"] != default_user["name"]
]
assert got_names == expected_names
def test_user_list_order_by_fullname_only(self):
default_user = helpers.call_action("get_site_user", ignore_auth=True)
users = [
factories.User(fullname="Xander Bird", name="bird_x"),
factories.User(fullname="Max Hankins", name="hankins_m"),
factories.User(fullname="", name="morgan_w"),
factories.User(fullname="Kathy Tillman", name="tillman_k"),
]
expected_fullnames = sorted([u["fullname"] for u in users])
got_users = helpers.call_action("user_list", order_by="fullname")
got_fullnames = [
u["fullname"]
for u in got_users
if u["name"] != default_user["name"]
]
assert got_fullnames == expected_fullnames
def test_user_list_order_by_created_datasets(self):
default_user = helpers.call_action("get_site_user", ignore_auth=True)
users = [
factories.User(fullname="Xander Bird", name="bird_x"),
factories.User(fullname="Max Hankins", name="hankins_m"),
factories.User(fullname="Kathy Tillman", name="tillman_k"),
]
datasets = [
factories.Dataset(user=users[1]),
factories.Dataset(user=users[1]),
]
for dataset in datasets:
dataset["title"] = "Edited title"
helpers.call_action(
"package_update", context={"user": users[1]["name"]}, **dataset
)
expected_names = [
u["name"]
for u in [
users[0], # 0 packages created
users[2], # 0 packages created
users[1], # 2 packages created
]
]
got_users = helpers.call_action(
"user_list", order_by="number_created_packages"
)
got_names = [
u["name"] for u in got_users if u["name"] != default_user["name"]
]
assert got_names == expected_names
def test_user_list_order_by_edits(self):
with pytest.raises(logic.ValidationError):
helpers.call_action("user_list", order_by="edits")
@pytest.mark.usefixtures("non_clean_db")
class TestUserShow(object):
def test_user_show_default_values(self):
user = factories.User()
got_user = helpers.call_action("user_show", id=user["id"])
assert got_user["id"] == user["id"]
assert got_user["name"] == user["name"]
assert got_user["fullname"] == user["fullname"]
assert got_user["display_name"] == user["display_name"]
assert got_user["created"] == user["created"]
assert got_user["about"] == user["about"]
assert got_user["sysadmin"] == user["sysadmin"]
assert got_user["number_created_packages"] == 0
assert "password" not in got_user
assert "reset_key" not in got_user
assert "apikey" not in got_user
assert "email" not in got_user
assert "datasets" not in got_user
assert "password_hash" not in got_user
def test_user_show_keep_email(self):
user = factories.User()
got_user = helpers.call_action(
"user_show", context={"keep_email": True}, id=user["id"]
)
assert got_user["email"] == user["email"]
assert "apikey" not in got_user
assert "password" not in got_user
assert "reset_key" not in got_user
def test_user_show_keep_apikey(self):
user = factories.User()
got_user = helpers.call_action(
"user_show", context={"keep_apikey": True}, id=user["id"]
)
assert "email" not in got_user
assert got_user["apikey"] == user["apikey"]
assert "password" not in got_user
assert "reset_key" not in got_user
def test_user_show_normal_user_no_password_hash(self):
user = factories.User()
got_user = helpers.call_action(
"user_show", id=user["id"], include_password_hash=True
)
assert "password_hash" not in got_user
def test_user_show_for_myself(self):
user = factories.User()
got_user = helpers.call_action(
"user_show", context={"user": user["name"]}, id=user["id"]
)
assert got_user["email"] == user["email"]
assert got_user["apikey"] == user["apikey"]
assert "password" not in got_user
assert "reset_key" not in got_user
def test_user_show_sysadmin_values(self):
user = factories.User()
sysadmin = factories.User(sysadmin=True)
got_user = helpers.call_action(
"user_show", context={"user": sysadmin["name"]}, id=user["id"]
)
assert got_user["email"] == user["email"]
assert got_user["apikey"] == user["apikey"]
assert "password" not in got_user
assert "reset_key" not in got_user
def test_user_show_sysadmin_password_hash(self):
user = factories.User(password="TestPassword1")
sysadmin = factories.User(sysadmin=True)
got_user = helpers.call_action(
"user_show",
context={"user": sysadmin["name"]},
id=user["id"],
include_password_hash=True,
)
assert got_user["email"] == user["email"]
assert got_user["apikey"] == user["apikey"]
assert "password_hash" in got_user
assert "password" not in got_user
assert "reset_key" not in got_user
def test_user_show_include_datasets(self):
user = factories.User()
dataset = factories.Dataset(user=user)
got_user = helpers.call_action(
"user_show", include_datasets=True, id=user["id"]
)
assert len(got_user["datasets"]) == 1
assert got_user["datasets"][0]["name"] == dataset["name"]
def test_user_show_include_datasets_excludes_draft_and_private(self):
user = factories.User()
org = factories.Organization(user=user)
dataset = factories.Dataset(user=user)
factories.Dataset(user=user, state="deleted")
factories.Dataset(user=user, state="draft")
factories.Dataset(user=user, private=True, owner_org=org["name"])
got_user = helpers.call_action(
"user_show", include_datasets=True, id=user["id"]
)
assert len(got_user["datasets"]) == 1
assert got_user["datasets"][0]["name"] == dataset["name"]
assert got_user["number_created_packages"] == 1
def test_user_show_include_datasets_includes_draft_myself(self):
# a user viewing his own user should see the draft and private datasets
user = factories.User()
org = factories.Organization(user=user)
factories.Dataset(user=user)
dataset_deleted = factories.Dataset(user=user, state="deleted")
factories.Dataset(user=user, state="draft")
factories.Dataset(user=user, private=True, owner_org=org["name"])
got_user = helpers.call_action(
"user_show",
context={"user": user["name"]},
include_datasets=True,
id=user["id"],
)
assert len(got_user["datasets"]) == 3
datasets_got = set([user_["name"] for user_ in got_user["datasets"]])
assert dataset_deleted["name"] not in datasets_got
assert got_user["number_created_packages"] == 3
def test_user_show_include_datasets_includes_draft_sysadmin(self):
# sysadmin should see the draft and private datasets
user = factories.User()
sysadmin = factories.Sysadmin()
org = factories.Organization(user=user)
factories.Dataset(user=user)
dataset_deleted = factories.Dataset(user=user, state="deleted")
factories.Dataset(user=user, state="draft")
factories.Dataset(user=user, private=True, owner_org=org["name"])
got_user = helpers.call_action(
"user_show",
context={"user": sysadmin["name"]},
include_datasets=True,
id=user["id"],
)
assert len(got_user["datasets"]) == 3
datasets_got = set([user_["name"] for user_ in got_user["datasets"]])
assert dataset_deleted["name"] not in datasets_got
assert got_user["number_created_packages"] == 3
def test_user_show_for_myself_without_passing_id(self):
user = factories.User()
got_user = helpers.call_action(
"user_show", context={"user": user["name"]}
)
assert got_user["name"] == user["name"]
assert got_user["email"] == user["email"]
assert got_user["apikey"] == user["apikey"]
assert "password" not in got_user
assert "reset_key" not in got_user
@pytest.mark.usefixtures("clean_db", "clean_index")
class TestCurrentPackageList(object):
def test_current_package_list(self):
"""
Test current_package_list_with_resources with no parameters
"""
user = factories.User()
factories.Dataset(user=user)
factories.Dataset(user=user)
current_package_list = helpers.call_action(
"current_package_list_with_resources"
)
assert len(current_package_list) == 2
def test_current_package_list_limit_param(self):
"""
Test current_package_list_with_resources with limit parameter
"""
user = factories.User()
factories.Dataset(user=user)
dataset2 = factories.Dataset(user=user)
current_package_list = helpers.call_action(
"current_package_list_with_resources", limit=1
)
assert len(current_package_list) == 1
assert current_package_list[0]["name"] == dataset2["name"]
def test_current_package_list_offset_param(self):
"""
Test current_package_list_with_resources with offset parameter
"""
user = factories.User()
dataset1 = factories.Dataset(user=user)
factories.Dataset(user=user)
current_package_list = helpers.call_action(
"current_package_list_with_resources", offset=1
)
assert len(current_package_list) == 1
assert current_package_list[0]["name"] == dataset1["name"]
def test_current_package_list_private_datasets_anonoymous_user(self):
"""
Test current_package_list_with_resources with an anonymous user and
a private dataset
"""
user = factories.User()
org = factories.Organization(user=user)
factories.Dataset(
user=user, owner_org=org["name"], private=True
)
factories.Dataset(user=user)
current_package_list = helpers.call_action(
"current_package_list_with_resources", context={}
)
assert len(current_package_list) == 1
def test_current_package_list_private_datasets_sysadmin_user(self):
"""
Test current_package_list_with_resources with a sysadmin user and a
private dataset
"""
user = factories.User()
org = factories.Organization(user=user)
factories.Dataset(
user=user, owner_org=org["name"], private=True
)
factories.Dataset(user=user)
sysadmin = factories.Sysadmin()
current_package_list = helpers.call_action(
"current_package_list_with_resources",
context={"user": sysadmin["name"]},
)
assert len(current_package_list) == 2
@pytest.mark.usefixtures("clean_db", "clean_index")
class TestPackageAutocomplete(object):
def test_package_autocomplete_match_name(self):
pkg = factories.Dataset(name="warandpeace")
result = helpers.call_action("package_autocomplete", q="war")
assert result[0]["name"] == pkg["name"]
assert result[0]["title"] == pkg["title"]
assert result[0]["match_field"] == "name"
assert result[0]["match_displayed"] == pkg["name"]
def test_package_autocomplete_match_title(self):
pkg = factories.Dataset(title="A Wonderful Story")
result = helpers.call_action("package_autocomplete", q="won")
assert result[0]["name"] == pkg["name"]
assert result[0]["title"] == pkg["title"]
assert result[0]["match_field"] == "title"
assert (
result[0]["match_displayed"]
== f"A Wonderful Story ({pkg['name']})"
)
def test_package_autocomplete_does_not_return_private_datasets(self):
user = factories.User()
org = factories.Organization(user=user)
factories.Dataset(
user=user, owner_org=org["name"], title="Some public stuff"
)
factories.Dataset(
user=user,
owner_org=org["name"],
private=True,
title="Some private stuff",
)
package_list = helpers.call_action(
"package_autocomplete", context={"ignore_auth": False}, q="some"
)
assert len(package_list) == 1
def test_package_autocomplete_does_return_private_datasets_from_my_org(
self,
):
user = factories.User()
org = factories.Organization(
users=[{"name": user["name"], "capacity": "member"}]
)
factories.Dataset(
user=user, owner_org=org["id"], title="Some public stuff"
)
factories.Dataset(
user=user,
owner_org=org["id"],
private=True,
title="Some private stuff",
)
package_list = helpers.call_action(
"package_autocomplete",
context={"user": user["name"], "ignore_auth": False},
q="some",
)
assert len(package_list) == 2
def test_package_autocomplete_works_for_the_middle_part_of_title(self):
factories.Dataset(title="Some public stuff")
factories.Dataset(title="Some random stuff")
package_list = helpers.call_action("package_autocomplete", q="bli")
assert len(package_list) == 1
package_list = helpers.call_action("package_autocomplete", q="tuf")
assert len(package_list) == 2
@pytest.mark.usefixtures("clean_db", "clean_index")
class TestPackageSearch(object):
def test_search(self):
factories.Dataset(title="Rivers")
factories.Dataset(title="Lakes") # decoy
search_result = helpers.call_action("package_search", q="rivers")
assert search_result["results"][0]["title"] == "Rivers"
assert search_result["count"] == 1
def test_search_fl(self):
d1 = factories.Dataset(title="Rivers", name="test_ri")
factories.Dataset(title="Lakes")
search_result = helpers.call_action(
"package_search", q="rivers", fl=["title", "name"]
)
assert search_result["results"] == [
{"title": "Rivers", "name": "test_ri"}
]
search_result = helpers.call_action(
"package_search", q="rivers", fl="title,name"
)
assert search_result["results"] == [
{"title": "Rivers", "name": "test_ri"}
]
search_result = helpers.call_action(
"package_search", q="rivers", fl=["id"]
)
assert search_result["results"] == [{"id": d1["id"]}]
def test_search_all(self):
factories.Dataset(title="Rivers")
factories.Dataset(title="Lakes")
search_result = helpers.call_action("package_search") # no q
assert search_result["count"] == 2
def test_bad_action_parameter(self):
with pytest.raises(SearchError):
helpers.call_action("package_search", weird_param=1)
def test_bad_solr_parameter(self):
with pytest.raises(SearchError):
helpers.call_action("package_search", sort="metadata_modified")
# SOLR doesn't like that we didn't specify 'asc' or 'desc'
# SOLR error is 'Missing sort order' or 'Missing_sort_order',
# depending on the solr version.
def _create_bulk_datasets(self, name, count):
from ckan import model
pkgs = [
model.Package(name="{}_{}".format(name, i)) for i in range(count)
]
model.Session.add_all(pkgs)
model.repo.commit_and_remove()
def test_rows_returned_default(self):
self._create_bulk_datasets("rows_default", 11)
results = logic.get_action("package_search")({}, {})
assert len(results["results"]) == 10 # i.e. 'rows' default value
@pytest.mark.ckan_config("ckan.search.rows_max", "3")
def test_rows_returned_limited(self):
self._create_bulk_datasets("rows_limited", 5)
results = logic.get_action("package_search")({}, {"rows": "15"})
assert len(results["results"]) == 3 # i.e. ckan.search.rows_max
def test_facets(self):
org = factories.Organization(name="test-org-facet", title="Test Org")
factories.Dataset(owner_org=org["id"])
factories.Dataset(owner_org=org["id"])
data_dict = {"facet.field": ["organization"]}
search_result = helpers.call_action("package_search", **data_dict)
assert search_result["count"] == 2
assert search_result["search_facets"] == {
"organization": {
"items": [
{
"count": 2,
"display_name": "Test Org",
"name": "test-org-facet",
}
],
"title": "organization",
}
}
def test_facet_limit(self):
group1 = factories.Group(name="test-group-fl1", title="Test Group 1")
group2 = factories.Group(name="test-group-fl2", title="Test Group 2")
factories.Dataset(
groups=[{"name": group1["name"]}, {"name": group2["name"]}]
)
factories.Dataset(groups=[{"name": group1["name"]}])
factories.Dataset()
data_dict = {"facet.field": ["groups"], "facet.limit": 1}
search_result = helpers.call_action("package_search", **data_dict)
assert len(search_result["search_facets"]["groups"]["items"]) == 1
assert search_result["search_facets"] == {
"groups": {
"items": [
{
"count": 2,
"display_name": "Test Group 1",
"name": "test-group-fl1",
}
],
"title": "groups",
}
}
def test_facet_no_limit(self):
group1 = factories.Group()
group2 = factories.Group()
factories.Dataset(
groups=[{"name": group1["name"]}, {"name": group2["name"]}]
)
factories.Dataset(groups=[{"name": group1["name"]}])
factories.Dataset()
data_dict = {"facet.field": ["groups"], "facet.limit": -1} # no limit
search_result = helpers.call_action("package_search", **data_dict)
assert len(search_result["search_facets"]["groups"]["items"]) == 2
def test_sort(self):
factories.Dataset(name="test0")
factories.Dataset(name="test1")
factories.Dataset(name="test2")
search_result = helpers.call_action(
"package_search", sort="metadata_created desc"
)
result_names = [result["name"] for result in search_result["results"]]
assert result_names == ["test2", "test1", "test0"]
@pytest.mark.ckan_config(
"ckan.search.default_package_sort", "metadata_created asc"
)
def test_sort_default_from_config(self):
factories.Dataset(name="test0")
factories.Dataset(name="test1")
factories.Dataset(name="test2")
search_result = helpers.call_action("package_search")
result_names = [result["name"] for result in search_result["results"]]
assert result_names == ["test0", "test1", "test2"]
def test_package_search_on_resource_name(self):
"""
package_search() should allow searching on resource name field.
"""
resource_name = "resource_abc"
factories.Resource(name=resource_name)
search_result = helpers.call_action("package_search", q="resource_abc")
assert (
search_result["results"][0]["resources"][0]["name"]
== resource_name
)
def test_package_search_excludes_private_and_drafts(self):
"""
package_search() with no options should not return private and draft
datasets.
"""
user = factories.User()
org = factories.Organization(user=user)
dataset = factories.Dataset(user=user)
factories.Dataset(user=user, state="deleted")
factories.Dataset(user=user, state="draft")
factories.Dataset(user=user, private=True, owner_org=org["name"])
results = helpers.call_action("package_search")["results"]
assert len(results) == 1
assert results[0]["name"] == dataset["name"]
def test_package_search_with_fq_excludes_private(self):
"""
package_search() with fq capacity:private should not return private
and draft datasets.
"""
user = factories.User()
org = factories.Organization(user=user)
factories.Dataset(user=user)
factories.Dataset(user=user, state="deleted")
factories.Dataset(user=user, state="draft")
factories.Dataset(user=user, private=True, owner_org=org["name"])
fq = "capacity:private"
results = helpers.call_action("package_search", fq=fq)["results"]
assert len(results) == 0
def test_package_search_with_fq_excludes_drafts(self):
"""
A sysadmin user can't use fq drafts to get draft datasets. Nothing is
returned.
"""
user = factories.User()
other_user = factories.User()
org = factories.Organization(user=user)
factories.Dataset(user=user, name="dataset")
factories.Dataset(user=other_user, name="other-dataset")
factories.Dataset(user=user, state="deleted", name="deleted-dataset")
factories.Dataset(user=user, state="draft", name="draft-dataset")
factories.Dataset(
user=other_user, state="draft", name="other-draft-dataset"
)
factories.Dataset(
user=user,
private=True,
owner_org=org["name"],
name="private-dataset",
)
fq = "state:draft"
results = helpers.call_action("package_search", fq=fq)["results"]
assert len(results) == 0
def test_package_search_with_include_drafts_option_excludes_drafts_for_anon_user(
self,
):
"""
An anon user can't user include_drafts to get draft datasets.
"""
user = factories.User()
org = factories.Organization(user=user)
dataset = factories.Dataset(user=user)
factories.Dataset(user=user, state="deleted")
draft_dataset = factories.Dataset(user=user, state="draft")
factories.Dataset(user=user, private=True, owner_org=org["name"])
results = logic.get_action("package_search")(
{"user": ""}, {"include_drafts": True}
)["results"]
assert len(results) == 1
assert results[0]["name"] != draft_dataset["name"]
assert results[0]["name"] == dataset["name"]
def test_package_search_with_include_drafts_option_includes_drafts_for_sysadmin(
self,
):
"""
A sysadmin can use the include_drafts option to get draft datasets for
all users.
"""
user = factories.User()
other_user = factories.User()
sysadmin = factories.Sysadmin()
org = factories.Organization(user=user)
dataset = factories.Dataset(user=user)
factories.Dataset(user=user, state="deleted")
draft_dataset = factories.Dataset(user=user, state="draft")
other_draft_dataset = factories.Dataset(user=other_user, state="draft")
factories.Dataset(user=user, private=True, owner_org=org["name"])
results = logic.get_action("package_search")(
{"user": sysadmin["name"]}, {"include_drafts": True}
)["results"]
assert len(results) == 3
names = [r["name"] for r in results]
assert draft_dataset["name"] in names
assert other_draft_dataset["name"] in names
assert dataset["name"] in names
def test_package_search_with_include_drafts_false_option_doesnot_include_drafts_for_sysadmin(
self,
):
"""
A sysadmin with include_drafts option set to `False` will not get
drafts returned in results.
"""
user = factories.User()
other_user = factories.User()
sysadmin = factories.Sysadmin()
org = factories.Organization(user=user)
dataset = factories.Dataset(user=user)
factories.Dataset(user=user, state="deleted")
draft_dataset = factories.Dataset(user=user, state="draft")
other_draft_dataset = factories.Dataset(user=other_user, state="draft")
factories.Dataset(user=user, private=True, owner_org=org["name"])
results = logic.get_action("package_search")(
{"user": sysadmin["name"]}, {"include_drafts": False}
)["results"]
assert len(results) == 1
names = [r["name"] for r in results]
assert draft_dataset["name"] not in names
assert other_draft_dataset["name"] not in names
assert dataset["name"] in names
def test_package_search_with_include_drafts_option_includes_drafts_for_user(
self,
):
"""
The include_drafts option will include draft datasets for the
authorized user, but not drafts for other users.
"""
user = factories.User()
other_user = factories.User()
org = factories.Organization(user=user)
dataset = factories.Dataset(user=user, name="dataset")
other_dataset = factories.Dataset(
user=other_user, name="other-dataset"
)
factories.Dataset(user=user, state="deleted", name="deleted-dataset")
draft_dataset = factories.Dataset(
user=user, state="draft", name="draft-dataset"
)
other_draft_dataset = factories.Dataset(
user=other_user, state="draft", name="other-draft-dataset"
)
factories.Dataset(
user=user,
private=True,
owner_org=org["name"],
name="private-dataset",
)
results = logic.get_action("package_search")(
{"user": user["name"]}, {"include_drafts": True}
)["results"]
assert len(results) == 3
names = [r["name"] for r in results]
assert draft_dataset["name"] in names
assert other_draft_dataset["name"] not in names
assert dataset["name"] in names
assert other_dataset["name"] in names
def test_package_search_with_fq_for_create_user_id_will_include_datasets_for_other_users(
self,
):
"""
A normal user can use the fq creator_user_id to get active datasets
(but not draft) for another user.
"""
user = factories.User()
other_user = factories.User()
org = factories.Organization(user=user)
dataset = factories.Dataset(user=user, name="dataset")
other_dataset = factories.Dataset(
user=other_user, name="other-dataset"
)
factories.Dataset(user=user, state="deleted", name="deleted-dataset")
draft_dataset = factories.Dataset(
user=user, state="draft", name="draft-dataset"
)
other_draft_dataset = factories.Dataset(
user=other_user, state="draft", name="other-draft-dataset"
)
factories.Dataset(
user=user,
private=True,
owner_org=org["name"],
name="private-dataset",
)
fq = "creator_user_id:{0}".format(other_user["id"])
results = logic.get_action("package_search")(
{"user": user["name"]}, {"fq": fq}
)["results"]
assert len(results) == 1
names = [r["name"] for r in results]
assert draft_dataset["name"] not in names
assert other_draft_dataset["name"] not in names
assert dataset["name"] not in names
assert other_dataset["name"] in names
def test_package_search_with_fq_for_create_user_id_will_not_include_drafts_for_other_users(
self,
):
"""
A normal user can't use fq creator_user_id and drafts to get draft
datasets for another user.
"""
user = factories.User()
other_user = factories.User()
org = factories.Organization(user=user)
factories.Dataset(user=user, name="dataset")
factories.Dataset(user=other_user, name="other-dataset")
factories.Dataset(user=user, state="deleted", name="deleted-dataset")
factories.Dataset(user=user, state="draft", name="draft-dataset")
factories.Dataset(
user=other_user, state="draft", name="other-draft-dataset"
)
factories.Dataset(
user=user,
private=True,
owner_org=org["name"],
name="private-dataset",
)
fq = "(creator_user_id:{0} AND +state:draft)".format(other_user["id"])
results = logic.get_action("package_search")(
{"user": user["name"]}, {"fq": fq, "include_drafts": True}
)["results"]
assert len(results) == 0
def test_package_search_with_fq_for_creator_user_id_and_drafts_and_include_drafts_option_will_not_include_drafts_for_other_user(
self,
):
"""
A normal user can't use fq creator_user_id and drafts and the
include_drafts option to get draft datasets for another user.
"""
user = factories.User()
other_user = factories.User()
org = factories.Organization(user=user)
factories.Dataset(user=user, name="dataset")
factories.Dataset(user=other_user, name="other-dataset")
factories.Dataset(user=user, state="deleted", name="deleted-dataset")
factories.Dataset(user=user, state="draft", name="draft-dataset")
factories.Dataset(
user=other_user, state="draft", name="other-draft-dataset"
)
factories.Dataset(
user=user,
private=True,
owner_org=org["name"],
name="private-dataset",
)
fq = "(creator_user_id:{0} AND +state:draft)".format(other_user["id"])
results = logic.get_action("package_search")(
{"user": user["name"]}, {"fq": fq, "include_drafts": True}
)["results"]
assert len(results) == 0
def test_package_search_with_fq_for_creator_user_id_and_include_drafts_option_will_not_include_drafts_for_other_user(
self,
):
"""
A normal user can't use fq creator_user_id and the include_drafts
option to get draft datasets for another user.
"""
user = factories.User()
other_user = factories.User()
org = factories.Organization(user=user)
factories.Dataset(user=user, name="dataset")
other_dataset = factories.Dataset(
user=other_user, name="other-dataset"
)
factories.Dataset(user=user, state="deleted", name="deleted-dataset")
factories.Dataset(user=user, state="draft", name="draft-dataset")
other_draft_dataset = factories.Dataset(
user=other_user, state="draft", name="other-draft-dataset"
)
factories.Dataset(
user=user,
private=True,
owner_org=org["name"],
name="private-dataset",
)
fq = "creator_user_id:{0}".format(other_user["id"])
results = logic.get_action("package_search")(
{"user": user["name"]}, {"fq": fq, "include_drafts": True}
)["results"]
names = [r["name"] for r in results]
assert len(results) == 1
assert other_dataset["name"] in names
assert other_draft_dataset["name"] not in names
def test_package_search_with_fq_for_create_user_id_will_include_drafts_for_other_users_for_sysadmin(
self,
):
"""
Sysadmins can use fq to get draft datasets for another user.
"""
user = factories.User()
sysadmin = factories.Sysadmin()
other_user = factories.User()
org = factories.Organization(user=user)
dataset = factories.Dataset(user=user, name="dataset")
factories.Dataset(user=other_user, name="other-dataset")
factories.Dataset(user=user, state="deleted", name="deleted-dataset")
draft_dataset = factories.Dataset(
user=user, state="draft", name="draft-dataset"
)
factories.Dataset(
user=other_user, state="draft", name="other-draft-dataset"
)
factories.Dataset(
user=user,
private=True,
owner_org=org["name"],
name="private-dataset",
)
fq = "(creator_user_id:{0} AND +state:draft)".format(user["id"])
results = logic.get_action("package_search")(
{"user": sysadmin["name"]}, {"fq": fq}
)["results"]
names = [r["name"] for r in results]
assert len(results) == 1
assert dataset["name"] not in names
assert draft_dataset["name"] in names
def test_package_search_private_with_include_private(self):
"""
package_search() can return private datasets when
`include_private=True`
"""
user = factories.User()
org = factories.Organization(user=user)
factories.Dataset(user=user, state="deleted")
factories.Dataset(user=user, state="draft")
private_dataset = factories.Dataset(
user=user, private=True, owner_org=org["name"]
)
results = logic.get_action("package_search")(
{"user": user["name"]}, {"include_private": True}
)["results"]
assert [r["name"] for r in results] == [private_dataset["name"]]
@pytest.mark.parametrize("remove_deleted_setting", [True, False])
def test_package_search_private_with_include_private_wont_show_other_orgs_private(
self, remove_deleted_setting
):
with helpers.changed_config("ckan.search.remove_deleted_packages", remove_deleted_setting):
user = factories.User()
user2 = factories.User()
factories.Organization(user=user)
org2 = factories.Organization(user=user2)
# create a deleted dataset if we expect them to be indexed
factories.Dataset(
user=user2,
private=True,
owner_org=org2["name"],
state="active" if remove_deleted_setting else "deleted",
)
# include deleted datasets if we expect them to be indexed
results = logic.get_action("package_search")(
{"user": user["name"]},
{"include_private": True, "include_deleted": not remove_deleted_setting},
)["results"]
assert [r["name"] for r in results] == []
@pytest.mark.parametrize("remove_deleted_setting", [True, False])
def test_package_search_private_with_include_private_syadmin(self, remove_deleted_setting):
with helpers.changed_config("ckan.search.remove_deleted_packages", remove_deleted_setting):
user = factories.User()
sysadmin = factories.Sysadmin()
org = factories.Organization(user=user)
# create a deleted dataset if we expect them to be indexed
private_dataset = factories.Dataset(
user=user,
private=True,
owner_org=org["name"],
state="active" if remove_deleted_setting else "deleted",
)
# include deleted datasets if we expect them to be indexed
results = logic.get_action("package_search")(
{"user": sysadmin["name"]},
{"include_private": True, "include_deleted": not remove_deleted_setting}
)["results"]
assert [r["name"] for r in results] == [private_dataset["name"]]
def test_package_works_without_user_in_context(self):
"""
package_search() should work even if user isn't in the context (e.g.
ckanext-showcase tests.
"""
logic.get_action("package_search")({}, dict(q="anything"))
def test_local_parameters_not_supported(self):
with pytest.raises(SearchError):
helpers.call_action(
"package_search", q='{!child of="content_type:parentDoc"}'
)
@pytest.mark.ckan_config("ckan.plugins", "example_idatasetform")
@pytest.mark.usefixtures("clean_db", "with_plugins")
class TestPackageAutocompleteWithDatasetForm(object):
def test_custom_schema_returned(self):
dataset1 = factories.Dataset(custom_text="foo")
query = helpers.call_action(
"package_search", q="id:{0}".format(dataset1["id"])
)
assert query["results"][0]["id"] == dataset1["id"]
assert query["results"][0]["custom_text"] == "foo"
def test_custom_schema_not_returned(self):
dataset1 = factories.Dataset(custom_text="foo")
query = helpers.call_action(
"package_search",
q="id:{0}".format(dataset1["id"]),
use_default_schema=True,
)
assert query["results"][0]["id"] == dataset1["id"]
assert "custom_text" not in query["results"][0]
assert query["results"][0]["extras"][0]["key"] == "custom_text"
assert query["results"][0]["extras"][0]["value"] == "foo"
@pytest.mark.usefixtures("clean_db", "clean_index")
class TestUserAutocomplete(object):
def test_autocomplete(self):
factories.Sysadmin(name="autocompletesysadmin")
factories.User(name="autocompleteuser")
result = helpers.call_action("user_autocomplete", q="sysadmin")
assert len(result) == 1
user = result.pop()
assert set(user.keys()) == set(["id", "name", "fullname"])
assert user["name"] == "autocompletesysadmin"
def test_autocomplete_multiple(self):
factories.Sysadmin(name="autocompletesysadmin")
factories.User(name="autocompleteuser")
result = helpers.call_action("user_autocomplete", q="compl")
assert len(result) == 2
def test_autocomplete_limit(self):
factories.Sysadmin(name="autocompletesysadmin")
factories.User(name="autocompleteuser")
result = helpers.call_action("user_autocomplete", q="compl", limit=1)
assert len(result) == 1
@pytest.mark.usefixtures("clean_db", "clean_index")
class TestFormatAutocomplete:
def test_missing_param(self):
with pytest.raises(logic.ValidationError):
helpers.call_action("format_autocomplete")
def test_autocomplete(self):
result = helpers.call_action("format_autocomplete", q="cs")
assert result == []
factories.Resource(format="CSV")
result = helpers.call_action("format_autocomplete", q="cs")
assert result == ["csv"]
@pytest.mark.usefixtures("clean_db")
class TestBadLimitQueryParameters(object):
"""test class for #1258 non-int query parameters cause 500 errors
Test that validation errors are raised when calling actions with
bad parameters.
"""
def test_activity_list_actions(self):
actions = [
"user_activity_list",
"package_activity_list",
"group_activity_list",
"organization_activity_list",
"recently_changed_packages_activity_list",
"current_package_list_with_resources",
]
for action in actions:
with pytest.raises(logic.ValidationError):
helpers.call_action(
action,
id="test_user",
limit="not_an_int",
offset="not_an_int",
)
with pytest.raises(logic.ValidationError):
helpers.call_action(
action, id="test_user", limit=-1, offset=-1
)
def test_package_search_facet_field_is_json(self):
kwargs = {"facet.field": "notjson"}
with pytest.raises(logic.ValidationError):
helpers.call_action("package_search", **kwargs)
@pytest.mark.usefixtures("clean_db")
class TestOrganizationListForUser(object):
"""Functional tests for the organization_list_for_user() action function."""
def test_when_user_is_not_a_member_of_any_organizations(self):
"""
When the user isn't a member of any organizations (in any capacity)
organization_list_for_user() should return an empty list.
"""
user = factories.User()
context = {"user": user["name"]}
# Create an organization so we can test that it does not get returned.
factories.Organization()
organizations = helpers.call_action(
"organization_list_for_user", context=context
)
assert organizations == []
def test_when_user_is_an_admin_of_one_organization(self):
"""
When the user is an admin of one organization
organization_list_for_user() should return a list of just that one
organization.
"""
user = factories.User()
context = {"user": user["name"]}
organization = factories.Organization()
# Create a second organization just so we can test that it does not get
# returned.
factories.Organization()
helpers.call_action(
"member_create",
id=organization["id"],
object=user["id"],
object_type="user",
capacity="admin",
)
organizations = helpers.call_action(
"organization_list_for_user", context=context
)
assert len(organizations) == 1
assert organizations[0]["id"] == organization["id"]
def test_when_user_is_an_admin_of_three_organizations(self):
"""
When the user is an admin of three organizations
organization_list_for_user() should return a list of all three
organizations.
"""
user = factories.User()
context = {"user": user["name"]}
organization_1 = factories.Organization()
organization_2 = factories.Organization()
organization_3 = factories.Organization()
# Create a second organization just so we can test that it does not get
# returned.
factories.Organization()
# Make the user an admin of all three organizations:
for organization in (organization_1, organization_2, organization_3):
helpers.call_action(
"member_create",
id=organization["id"],
object=user["id"],
object_type="user",
capacity="admin",
)
organizations = helpers.call_action(
"organization_list_for_user", context=context
)
assert len(organizations) == 3
ids = [organization["id"] for organization in organizations]
for organization in (organization_1, organization_2, organization_3):
assert organization["id"] in ids
def test_when_permissions_extend_to_sub_organizations(self):
"""
When the user is an admin of one organization
organization_list_for_user() should return a list of just that one
organization.
"""
user = factories.User()
context = {"user": user["name"]}
user["capacity"] = "admin"
top_organization = factories.Organization(users=[user])
middle_organization = factories.Organization(users=[user])
bottom_organization = factories.Organization()
# Create another organization just so we can test that it does not get
# returned.
factories.Organization()
helpers.call_action(
"member_create",
id=bottom_organization["id"],
object=middle_organization["id"],
object_type="group",
capacity="parent",
)
helpers.call_action(
"member_create",
id=middle_organization["id"],
object=top_organization["id"],
object_type="group",
capacity="parent",
)
organizations = helpers.call_action(
"organization_list_for_user", context=context
)
assert len(organizations) == 3
org_ids = set(org["id"] for org in organizations)
assert bottom_organization["id"] in org_ids
def test_does_return_members(self):
"""
By default organization_list_for_user() should return organizations
that the user is just a member (not an admin) of.
"""
user = factories.User()
context = {"user": user["name"]}
organization = factories.Organization()
helpers.call_action(
"member_create",
id=organization["id"],
object=user["id"],
object_type="user",
capacity="member",
)
organizations = helpers.call_action(
"organization_list_for_user", context=context
)
assert [org["id"] for org in organizations] == [organization["id"]]
def test_does_return_editors(self):
"""
By default organization_list_for_user() should return organizations
that the user is just an editor (not an admin) of.
"""
user = factories.User()
context = {"user": user["name"]}
organization = factories.Organization()
helpers.call_action(
"member_create",
id=organization["id"],
object=user["id"],
object_type="user",
capacity="editor",
)
organizations = helpers.call_action(
"organization_list_for_user", context=context
)
assert [org["id"] for org in organizations] == [organization["id"]]
def test_editor_permission(self):
"""
organization_list_for_user() should return organizations that the user
is an editor of if passed a permission that belongs to the editor role.
"""
user = factories.User()
context = {"user": user["name"]}
organization = factories.Organization()
helpers.call_action(
"member_create",
id=organization["id"],
object=user["id"],
object_type="user",
capacity="editor",
)
organizations = helpers.call_action(
"organization_list_for_user",
permission="create_dataset",
context=context,
)
assert [org["id"] for org in organizations] == [organization["id"]]
def test_member_permission(self):
"""
organization_list_for_user() should return organizations that the user
is a member of if passed a permission that belongs to the member role.
"""
user = factories.User()
context = {"user": user["name"]}
organization = factories.Organization()
helpers.call_action(
"member_create",
id=organization["id"],
object=user["id"],
object_type="user",
capacity="member",
)
organizations = helpers.call_action(
"organization_list_for_user", permission="read", context=context
)
assert [org["id"] for org in organizations] == [organization["id"]]
def test_invalid_permission(self):
"""
organization_list_for_user() should return an empty list if passed a
non-existent or invalid permission.
Note that we test this with a user who is an editor of one organization.
If the user was an admin of the organization then it would return that
organization - admins have all permissions, including permissions that
don't exist.
"""
user = factories.User()
context = {"user": user["name"]}
organization = factories.Organization()
factories.Organization()
helpers.call_action(
"member_create",
id=organization["id"],
object=user["id"],
object_type="user",
capacity="editor",
)
for permission in ("", " ", "foo", 27.3, 5, True, False, None):
organizations = helpers.call_action(
"organization_list_for_user",
permission=permission,
context=context,
)
assert organizations == []
def test_that_it_does_not_return_groups(self):
"""
organization_list_for_user() should not return groups that the user is
a member, editor or admin of.
"""
user = factories.User()
context = {"user": user["name"]}
group_1 = factories.Group()
group_2 = factories.Group()
group_3 = factories.Group()
helpers.call_action(
"member_create",
id=group_1["id"],
object=user["id"],
object_type="user",
capacity="member",
)
helpers.call_action(
"member_create",
id=group_2["id"],
object=user["id"],
object_type="user",
capacity="editor",
)
helpers.call_action(
"member_create",
id=group_3["id"],
object=user["id"],
object_type="user",
capacity="admin",
)
organizations = helpers.call_action(
"organization_list_for_user", context=context
)
assert organizations == []
def test_that_it_does_not_return_previous_memberships(self):
"""
organization_list_for_user() should return organizations that the user
was previously an admin of.
"""
user = factories.User()
context = {"user": user["name"]}
organization = factories.Organization()
# Make the user an admin of the organization.
helpers.call_action(
"member_create",
id=organization["id"],
object=user["id"],
object_type="user",
capacity="admin",
)
# Remove the user from the organization.
helpers.call_action(
"member_delete",
id=organization["id"],
object=user["id"],
object_type="user",
)
organizations = helpers.call_action(
"organization_list_for_user", context=context
)
assert organizations == []
def test_when_user_is_sysadmin(self):
"""
When the user is a sysadmin organization_list_for_user() should just
return all organizations, even if the user is not a member of them.
"""
user = factories.Sysadmin()
context = {"user": user["name"]}
organization = factories.Organization()
organizations = helpers.call_action(
"organization_list_for_user", context=context
)
assert [org["id"] for org in organizations] == [organization["id"]]
def test_that_it_does_not_return_deleted_organizations(self):
"""
organization_list_for_user() should not return deleted organizations
that the user was an admin of.
"""
user = factories.User()
context = {"user": user["name"]}
organization = factories.Organization()
# Make the user an admin of the organization.
helpers.call_action(
"member_create",
id=organization["id"],
object=user["id"],
object_type="user",
capacity="admin",
)
# Delete the organization.
helpers.call_action(
"organization_delete", id=organization["id"], context=context
)
organizations = helpers.call_action(
"organization_list_for_user", context=context
)
assert organizations == []
def test_with_no_authorized_user(self):
"""
organization_list_for_user() should return an empty list if there's no
authorized user. Users who aren't logged-in don't have any permissions.
"""
# Create an organization so we can test that it doesn't get returned.
factories.Organization()
organizations = helpers.call_action("organization_list_for_user")
assert organizations == []
def test_organization_list_for_user_returns_all_roles(self):
user1 = factories.User()
user2 = factories.User()
user3 = factories.User()
org1 = factories.Organization(
users=[
{"name": user1["name"], "capacity": "admin"},
{"name": user2["name"], "capacity": "editor"},
]
)
org2 = factories.Organization(
users=[
{"name": user1["name"], "capacity": "member"},
{"name": user2["name"], "capacity": "member"},
]
)
org3 = factories.Organization(
users=[{"name": user1["name"], "capacity": "editor"}]
)
org_list_for_user1 = helpers.call_action(
"organization_list_for_user", id=user1["id"]
)
assert sorted([org["id"] for org in org_list_for_user1]) == sorted(
[org1["id"], org2["id"], org3["id"]]
)
org_list_for_user2 = helpers.call_action(
"organization_list_for_user", id=user2["id"]
)
assert sorted([org["id"] for org in org_list_for_user2]) == sorted(
[org1["id"], org2["id"]]
)
org_list_for_user3 = helpers.call_action(
"organization_list_for_user", id=user3["id"]
)
assert org_list_for_user3 == []
@pytest.mark.ckan_config("ckan.plugins", "image_view")
@pytest.mark.usefixtures("non_clean_db", "with_plugins")
class TestShowResourceView(object):
def test_resource_view_show(self):
resource = factories.Resource()
resource_view = {
"resource_id": resource["id"],
"view_type": "image_view",
"title": "View",
"description": "A nice view",
"image_url": "url",
}
new_view = helpers.call_action("resource_view_create", **resource_view)
result = helpers.call_action("resource_view_show", id=new_view["id"])
result.pop("id")
result.pop("package_id")
assert result == resource_view
def test_resource_view_show_id_missing(self):
with pytest.raises(logic.ValidationError):
helpers.call_action("resource_view_show")
def test_resource_view_show_id_not_found(self):
with pytest.raises(logic.NotFound):
helpers.call_action("resource_view_show", id="does_not_exist")
class TestGetHelpShow(object):
def test_help_show_basic(self):
function_name = "package_search"
result = helpers.call_action("help_show", name=function_name)
function = logic.get_action(function_name)
assert result == function.__doc__
def test_help_show_no_docstring(self):
function_name = "package_search"
function = logic.get_action(function_name)
actual_docstring = function.__doc__
function.__doc__ = None
result = helpers.call_action("help_show", name=function_name)
function.__doc__ = actual_docstring
assert result is None
def test_help_show_not_found(self):
function_name = "unknown_action"
with pytest.raises(logic.NotFound):
helpers.call_action("help_show", name=function_name)
@pytest.mark.usefixtures("non_clean_db")
class TestConfigOptionShow(object):
@pytest.mark.ckan_config("ckan.site_title", "My Test CKAN")
def test_config_option_show_in_config_not_in_db(self):
"""config_option_show returns value from config when value on in
system_info table."""
title = helpers.call_action(
"config_option_show", key="ckan.site_title"
)
assert title == "My Test CKAN"
@pytest.mark.ckan_config("ckan.site_title", "My Test CKAN")
def test_config_option_show_in_config_and_in_db(self):
"""config_option_show returns value from db when value is in both
config and system_info table."""
params = {"ckan.site_title": "Test site title"}
helpers.call_action("config_option_update", **params)
title = helpers.call_action(
"config_option_show", key="ckan.site_title"
)
assert title == "Test site title"
@pytest.mark.ckan_config("ckan.not.editable", "My non editable option")
def test_config_option_show_not_whitelisted_key(self):
"""config_option_show raises exception if key is not a whitelisted
config option."""
with pytest.raises(logic.ValidationError):
helpers.call_action("config_option_show", key="ckan.not.editable")
class TestConfigOptionList(object):
def test_config_option_list(self):
"""config_option_list returns whitelisted config option keys"""
keys = helpers.call_action("config_option_list")
schema_keys = list(schema.update_configuration_schema().keys())
assert keys == schema_keys
def remove_pseudo_users(user_list):
pseudo_users = set(("logged_in", "visitor"))
user_list[:] = [
user for user in user_list if user["name"] not in pseudo_users
]
@pytest.mark.usefixtures("non_clean_db")
class TestTagShow(object):
def test_tag_show_for_free_tag(self):
tag = factories.Tag.stub().name
dataset = factories.Dataset(tags=[{"name": tag}])
tag_in_dataset = dataset["tags"][0]
tag_shown = helpers.call_action("tag_show", id=tag)
assert tag_shown["name"] == tag
assert tag_shown["display_name"] == tag
assert tag_shown["id"] == tag_in_dataset["id"]
assert tag_shown["vocabulary_id"] is None
assert "packages" not in tag_shown
@pytest.mark.usefixtures("clean_index")
def test_tag_show_with_datasets(self):
tag = factories.Tag.stub().name
dataset = factories.Dataset(tags=[{"name": tag}])
tag_shown = helpers.call_action(
"tag_show", id=tag, include_datasets=True
)
assert [d["name"] for d in tag_shown["packages"]] == [dataset["name"]]
def test_tag_show_not_found(self):
with pytest.raises(logic.NotFound):
helpers.call_action("tag_show", id=factories.Tag.stub().name)
@pytest.mark.usefixtures("clean_db")
def test_tag_show_for_flexible_tag(self):
# A 'flexible' tag is one with spaces, some punctuation
# and foreign characters in its name
dataset = factories.Dataset(tags=[{"name": "Flexible. \u30a1"}])
tag_shown = helpers.call_action(
"tag_show", id="Flexible. \u30a1", include_datasets=True
)
assert tag_shown["name"] == "Flexible. \u30a1"
assert tag_shown["display_name"] == "Flexible. \u30a1"
assert [d["name"] for d in tag_shown["packages"]] == [dataset["name"]]
def test_tag_show_for_vocab_tag(self):
tag = factories.Tag.stub().name
vocab = factories.Vocabulary(tags=[dict(name=tag)])
dataset = factories.Dataset(tags=vocab["tags"])
tag_in_dataset = dataset["tags"][0]
tag_shown = helpers.call_action(
"tag_show",
id=tag,
vocabulary_id=vocab["id"],
include_datasets=True,
)
assert tag_shown["name"] == tag
assert tag_shown["display_name"] == tag
assert tag_shown["id"] == tag_in_dataset["id"]
assert tag_shown["vocabulary_id"] == vocab["id"]
assert [d["name"] for d in tag_shown["packages"]] == [dataset["name"]]
@pytest.mark.usefixtures("clean_db")
class TestTagList(object):
def test_tag_list(self):
tag = factories.Tag.stub().name
tag2 = factories.Tag.stub().name
factories.Dataset(tags=[{"name": tag}, {"name": tag2}])
factories.Dataset(tags=[{"name": tag2}])
tag_list = helpers.call_action("tag_list")
assert set(tag_list) == set((tag, tag2))
def test_tag_list_all_fields(self):
factories.Dataset(tags=[{"name": "acid-rain"}])
tag_list = helpers.call_action("tag_list", all_fields=True)
assert tag_list[0]["name"] == "acid-rain"
assert tag_list[0]["display_name"] == "acid-rain"
assert "packages" not in tag_list
def test_tag_list_with_flexible_tag(self):
# A 'flexible' tag is one with spaces, punctuation (apart from commas)
# and foreign characters in its name
flexible_tag = "Flexible. \u30a1"
factories.Dataset(tags=[{"name": flexible_tag}])
tag_list = helpers.call_action("tag_list", all_fields=True)
assert tag_list[0]["name"] == flexible_tag
def test_tag_list_with_vocab(self):
vocab = factories.Vocabulary(
tags=[dict(name="acid-rain"), dict(name="pollution")]
)
tag_list = helpers.call_action("tag_list", vocabulary_id=vocab["id"])
assert set(tag_list) == set(("acid-rain", "pollution"))
def test_tag_list_vocab_not_found(self):
with pytest.raises(logic.NotFound):
helpers.call_action("tag_list", vocabulary_id="does-not-exist")
@pytest.mark.usefixtures("clean_db")
class TestMembersList(object):
def test_dataset_delete_marks_membership_of_group_as_deleted(self):
sysadmin = factories.Sysadmin()
group = factories.Group()
dataset = factories.Dataset(groups=[{"name": group["name"]}])
context = {"user": sysadmin["name"]}
group_members = helpers.call_action(
"member_list", context, id=group["id"], object_type="package"
)
assert len(group_members) == 1
assert group_members[0][0] == dataset["id"]
assert group_members[0][1] == "package"
helpers.call_action("package_delete", context, id=dataset["id"])
group_members = helpers.call_action(
"member_list", context, id=group["id"], object_type="package"
)
assert len(group_members) == 0
def test_dataset_delete_marks_membership_of_org_as_deleted(self):
sysadmin = factories.Sysadmin()
org = factories.Organization()
dataset = factories.Dataset(owner_org=org["id"])
context = {"user": sysadmin["name"]}
org_members = helpers.call_action(
"member_list", context, id=org["id"], object_type="package"
)
assert len(org_members) == 1
assert org_members[0][0] == dataset["id"]
assert org_members[0][1] == "package"
helpers.call_action("package_delete", context, id=dataset["id"])
org_members = helpers.call_action(
"member_list", context, id=org["id"], object_type="package"
)
assert len(org_members) == 0
def test_user_delete_marks_membership_of_group_as_deleted(self):
sysadmin = factories.Sysadmin()
group = factories.Group()
user = factories.User()
context = {"user": sysadmin["name"]}
member_dict = {
"username": user["id"],
"id": group["id"],
"role": "member",
}
helpers.call_action("group_member_create", context, **member_dict)
group_members = helpers.call_action(
"member_list",
context,
id=group["id"],
object_type="user",
capacity="member",
)
assert len(group_members) == 1
assert group_members[0][0] == user["id"]
assert group_members[0][1] == "user"
helpers.call_action("user_delete", context, id=user["id"])
group_members = helpers.call_action(
"member_list",
context,
id=group["id"],
object_type="user",
capacity="member",
)
assert len(group_members) == 0
def test_user_delete_marks_membership_of_org_as_deleted(self):
sysadmin = factories.Sysadmin()
org = factories.Organization()
user = factories.User()
context = {"user": sysadmin["name"]}
member_dict = {
"username": user["id"],
"id": org["id"],
"role": "member",
}
helpers.call_action(
"organization_member_create", context, **member_dict
)
org_members = helpers.call_action(
"member_list",
context,
id=org["id"],
object_type="user",
capacity="member",
)
assert len(org_members) == 1
assert org_members[0][0] == user["id"]
assert org_members[0][1] == "user"
helpers.call_action("user_delete", context, id=user["id"])
org_members = helpers.call_action(
"member_list",
context,
id=org["id"],
object_type="user",
capacity="member",
)
assert len(org_members) == 0
@pytest.mark.usefixtures("non_clean_db")
class TestFollow(object):
def test_followee_list(self):
group1 = factories.Group(title="Finance")
group2 = factories.Group(title="Environment")
factories.Group(title="Education")
user = factories.User()
context = {"user": user["name"]}
helpers.call_action("follow_group", context, id=group1["id"])
helpers.call_action("follow_group", context, id=group2["id"])
followee_list = helpers.call_action(
"followee_list", context, id=user["name"]
)
assert len(followee_list) == 2
assert sorted([f["display_name"] for f in followee_list]) == [
"Environment",
"Finance",
]
def test_followee_list_with_q(self):
group1 = factories.Group(title="Finance")
group2 = factories.Group(title="Environment")
factories.Group(title="Education")
user = factories.User()
context = {"user": user["name"]}
helpers.call_action("follow_group", context, id=group1["id"])
helpers.call_action("follow_group", context, id=group2["id"])
followee_list = helpers.call_action(
"followee_list", context, id=user["name"], q="E"
)
assert len(followee_list) == 1
assert followee_list[0]["display_name"] == "Environment"
class TestStatusShow(object):
@pytest.mark.ckan_config("ckan.plugins", "stats")
@pytest.mark.usefixtures("clean_db", "with_plugins")
def test_status_show(self):
status = helpers.call_action("status_show")
assert status["ckan_version"] == __version__
assert status["site_url"] == "http://test.ckan.net"
assert status["site_title"] == "CKAN"
assert status["site_description"] == ""
assert status["locale_default"] == "en"
assert type(status["extensions"]) == list
assert status["extensions"] == ["stats"]
class TestJobList(helpers.FunctionalRQTestBase):
def test_all_queues(self):
"""
Test getting jobs from all queues.
"""
job1 = self.enqueue()
job2 = self.enqueue()
job3 = self.enqueue(queue="my_queue")
jobs = helpers.call_action("job_list")
assert len(jobs) == 3
assert {job["id"] for job in jobs} == {job1.id, job2.id, job3.id}
def test_specific_queues(self):
"""
Test getting jobs from specific queues.
"""
self.enqueue()
job2 = self.enqueue(queue="q2")
job3 = self.enqueue(queue="q3")
job4 = self.enqueue(queue="q3")
jobs = helpers.call_action("job_list", queues=["q2"])
assert len(jobs) == 1
assert jobs[0]["id"] == job2.id
jobs = helpers.call_action("job_list", queues=["q2", "q3"])
assert len(jobs) == 3
assert {job["id"] for job in jobs} == {job2.id, job3.id, job4.id}
class TestJobShow(helpers.FunctionalRQTestBase):
def test_existing_job(self):
"""
Test showing an existing job.
"""
job = self.enqueue(queue="my_queue", title="Title")
d = helpers.call_action("job_show", id=job.id)
assert d["id"] == job.id
assert d["title"] == "Title"
assert d["queue"] == "my_queue"
assert _seconds_since_timestamp(d["created"], "%Y-%m-%dT%H:%M:%S") < 10
def test_not_existing_job(self):
"""
Test showing a not existing job.
"""
with pytest.raises(logic.NotFound):
helpers.call_action("job_show", id="does-not-exist")
def _seconds_since_timestamp(timestamp, format_):
dt = datetime.datetime.strptime(timestamp, format_)
now = datetime.datetime.utcnow()
assert now > dt # we assume timestamp is not in the future
return (now - dt).total_seconds()
@pytest.mark.usefixtures("non_clean_db")
class TestActivityShow(object):
def test_simple_without_data(self):
dataset = factories.Dataset()
user = factories.User()
activity = factories.Activity(
user_id=user["id"],
object_id=dataset["id"],
activity_type="new package",
data={"package": copy.deepcopy(dataset), "actor": "Mr Someone"},
)
activity_shown = helpers.call_action(
"activity_show", id=activity["id"], include_data=False
)
assert activity_shown["user_id"] == user["id"]
assert (
_seconds_since_timestamp(
activity_shown["timestamp"], "%Y-%m-%dT%H:%M:%S.%f"
)
< 10
)
assert activity_shown["object_id"] == dataset["id"]
assert activity_shown["data"] == {"package": {"title": dataset["title"]}}
assert activity_shown["activity_type"] == "new package"
def test_simple_with_data(self):
dataset = factories.Dataset()
user = factories.User()
activity = factories.Activity(
user_id=user["id"],
object_id=dataset["id"],
activity_type="new package",
data={"package": copy.deepcopy(dataset), "actor": "Mr Someone"},
)
activity_shown = helpers.call_action(
"activity_show", id=activity["id"], include_data=True
)
assert activity_shown["user_id"] == user["id"]
assert (
_seconds_since_timestamp(
activity_shown["timestamp"], "%Y-%m-%dT%H:%M:%S.%f"
)
< 10
)
assert activity_shown["object_id"] == dataset["id"]
assert activity_shown["data"] == {
"package": dataset,
"actor": "Mr Someone",
}
assert activity_shown["activity_type"] == "new package"
def _clear_activities():
from ckan import model
model.Session.query(model.ActivityDetail).delete()
model.Session.query(model.Activity).delete()
model.Session.flush()
@pytest.mark.usefixtures("clean_db")
class TestPackageActivityList(object):
def test_create_dataset(self):
user = factories.User()
dataset = factories.Dataset(user=user)
activities = helpers.call_action(
"package_activity_list", id=dataset["id"]
)
assert [activity["activity_type"] for activity in activities] == [
"new package"
]
assert activities[0]["user_id"] == user["id"]
assert activities[0]["object_id"] == dataset["id"]
assert activities[0]["data"]["package"]["title"] == dataset["title"]
assert "extras" not in activities[0]["data"]["package"]
def test_change_dataset(self):
user = factories.User()
_clear_activities()
dataset = factories.Dataset(user=user)
original_title = dataset["title"]
dataset["title"] = "Dataset with changed title"
helpers.call_action(
"package_update", context={"user": user["name"]}, **dataset
)
activities = helpers.call_action(
"package_activity_list", id=dataset["id"]
)
assert [activity["activity_type"] for activity in activities] == [
"changed package",
"new package",
]
assert activities[0]["user_id"] == user["id"]
assert activities[0]["object_id"] == dataset["id"]
assert activities[0]["data"]["package"]["title"] == dataset["title"]
assert (
activities[0]["data"]["package"]["title"]
== "Dataset with changed title"
)
# the old dataset still has the old title
assert activities[1]["activity_type"] == "new package"
assert activities[1]["data"]["package"]["title"] == original_title
def test_change_dataset_add_extra(self):
user = factories.User()
dataset = factories.Dataset(user=user)
_clear_activities()
dataset["extras"].append(dict(key="rating", value="great"))
helpers.call_action(
"package_update", context={"user": user["name"]}, **dataset
)
activities = helpers.call_action(
"package_activity_list", id=dataset["id"]
)
assert [activity["activity_type"] for activity in activities] == [
"changed package"
]
assert activities[0]["user_id"] == user["id"]
assert activities[0]["object_id"] == dataset["id"]
assert activities[0]["data"]["package"]["title"] == dataset["title"]
assert "extras" not in activities[0]["data"]["package"]
def test_change_dataset_change_extra(self):
user = factories.User()
dataset = factories.Dataset(
user=user, extras=[dict(key="rating", value="great")]
)
_clear_activities()
dataset["extras"][0] = dict(key="rating", value="ok")
helpers.call_action(
"package_update", context={"user": user["name"]}, **dataset
)
activities = helpers.call_action(
"package_activity_list", id=dataset["id"]
)
assert [activity["activity_type"] for activity in activities] == [
"changed package"
]
assert activities[0]["user_id"] == user["id"]
assert activities[0]["object_id"] == dataset["id"]
assert activities[0]["data"]["package"]["title"] == dataset["title"]
assert "extras" not in activities[0]["data"]["package"]
def test_change_dataset_delete_extra(self):
user = factories.User()
dataset = factories.Dataset(
user=user, extras=[dict(key="rating", value="great")]
)
_clear_activities()
dataset["extras"] = []
helpers.call_action(
"package_update", context={"user": user["name"]}, **dataset
)
activities = helpers.call_action(
"package_activity_list", id=dataset["id"]
)
assert [activity["activity_type"] for activity in activities] == [
"changed package"
]
assert activities[0]["user_id"] == user["id"]
assert activities[0]["object_id"] == dataset["id"]
assert activities[0]["data"]["package"]["title"] == dataset["title"]
assert "extras" not in activities[0]["data"]["package"]
def test_change_dataset_add_resource(self):
user = factories.User()
dataset = factories.Dataset(user=user)
_clear_activities()
factories.Resource(package_id=dataset["id"], user=user)
activities = helpers.call_action(
"package_activity_list", id=dataset["id"]
)
assert [activity["activity_type"] for activity in activities] == [
"changed package"
]
assert activities[0]["user_id"] == user["id"]
assert activities[0]["object_id"] == dataset["id"]
assert activities[0]["data"]["package"]["title"] == dataset["title"]
# NB the detail is not included - that is only added in by
# activity_list_to_html()
def test_change_dataset_change_resource(self):
user = factories.User()
dataset = factories.Dataset(
user=user,
resources=[dict(url="https://example.com/foo.csv", format="csv")],
)
_clear_activities()
dataset["resources"][0]["format"] = "pdf"
helpers.call_action(
"package_update", context={"user": user["name"]}, **dataset
)
activities = helpers.call_action(
"package_activity_list", id=dataset["id"]
)
assert [activity["activity_type"] for activity in activities] == [
"changed package"
]
assert activities[0]["user_id"] == user["id"]
assert activities[0]["object_id"] == dataset["id"]
assert activities[0]["data"]["package"]["title"] == dataset["title"]
def test_change_dataset_delete_resource(self):
user = factories.User()
dataset = factories.Dataset(
user=user,
resources=[dict(url="https://example.com/foo.csv", format="csv")],
)
_clear_activities()
dataset["resources"] = []
helpers.call_action(
"package_update", context={"user": user["name"]}, **dataset
)
activities = helpers.call_action(
"package_activity_list", id=dataset["id"]
)
assert [activity["activity_type"] for activity in activities] == [
"changed package"
]
assert activities[0]["user_id"] == user["id"]
assert activities[0]["object_id"] == dataset["id"]
assert activities[0]["data"]["package"]["title"] == dataset["title"]
def test_change_dataset_add_tag(self):
user = factories.User()
dataset = factories.Dataset(user=user)
_clear_activities()
dataset["tags"].append(dict(name="checked"))
helpers.call_action(
"package_update", context={"user": user["name"]}, **dataset
)
activities = helpers.call_action(
"package_activity_list", id=dataset["id"]
)
assert [activity["activity_type"] for activity in activities] == [
"changed package"
]
assert activities[0]["user_id"] == user["id"]
assert activities[0]["object_id"] == dataset["id"]
assert activities[0]["data"]["package"]["title"] == dataset["title"]
def test_delete_tag_from_dataset(self):
user = factories.User()
dataset = factories.Dataset(user=user, tags=[dict(name="checked")])
_clear_activities()
dataset["tags"] = []
helpers.call_action(
"package_update", context={"user": user["name"]}, **dataset
)
activities = helpers.call_action(
"package_activity_list", id=dataset["id"]
)
assert [activity["activity_type"] for activity in activities] == [
"changed package"
]
assert activities[0]["user_id"] == user["id"]
assert activities[0]["object_id"] == dataset["id"]
assert activities[0]["data"]["package"]["title"] == dataset["title"]
def test_delete_dataset(self):
user = factories.User()
dataset = factories.Dataset(user=user)
_clear_activities()
helpers.call_action(
"package_delete", context={"user": user["name"]}, **dataset
)
activities = helpers.call_action(
"package_activity_list", id=dataset["id"]
)
assert [activity["activity_type"] for activity in activities] == [
"deleted package"
]
assert activities[0]["user_id"] == user["id"]
assert activities[0]["object_id"] == dataset["id"]
assert activities[0]["data"]["package"]["title"] == dataset["title"]
def test_private_dataset_has_no_activity(self):
user = factories.User()
org = factories.Organization(user=user)
_clear_activities()
dataset = factories.Dataset(
private=True, owner_org=org["id"], user=user
)
dataset["tags"] = []
helpers.call_action(
"package_update", context={"user": user["name"]}, **dataset
)
activities = helpers.call_action(
"package_activity_list", id=dataset["id"]
)
assert [activity["activity_type"] for activity in activities] == []
def test_private_dataset_delete_has_no_activity(self):
user = factories.User()
org = factories.Organization(user=user)
_clear_activities()
dataset = factories.Dataset(
private=True, owner_org=org["id"], user=user
)
helpers.call_action(
"package_delete", context={"user": user["name"]}, **dataset
)
activities = helpers.call_action(
"package_activity_list", id=dataset["id"]
)
assert [activity["activity_type"] for activity in activities] == []
def _create_bulk_types_activities(self, types):
dataset = factories.Dataset()
from ckan import model
user = factories.User()
objs = [
model.Activity(
user_id=user["id"],
object_id=dataset["id"],
activity_type=activity_type,
data=None,
)
for activity_type in types
]
model.Session.add_all(objs)
model.repo.commit_and_remove()
return dataset["id"]
def test_error_bad_search(self):
with pytest.raises(logic.ValidationError):
helpers.call_action(
"package_activity_list",
id=id,
activity_types=["new package"],
exclude_activity_types=["deleted package"],
)
def test_activity_types_filter(self):
types = [
"new package",
"changed package",
"deleted package",
"changed package",
"new package",
]
id = self._create_bulk_types_activities(types)
activities_new = helpers.call_action(
"package_activity_list", id=id, activity_types=["new package"]
)
assert len(activities_new) == 2
activities_not_new = helpers.call_action(
"package_activity_list",
id=id,
exclude_activity_types=["new package"],
)
assert len(activities_not_new) == 3
activities_delete = helpers.call_action(
"package_activity_list", id=id, activity_types=["deleted package"]
)
assert len(activities_delete) == 1
activities_not_deleted = helpers.call_action(
"package_activity_list",
id=id,
exclude_activity_types=["deleted package"],
)
assert len(activities_not_deleted) == 4
def _create_bulk_package_activities(self, count):
dataset = factories.Dataset()
from ckan import model
user = factories.User()
objs = [
model.Activity(
user_id=user["id"],
object_id=dataset["id"],
activity_type=None,
data=None,
)
for _ in range(count)
]
model.Session.add_all(objs)
model.repo.commit_and_remove()
return dataset["id"]
def test_limit_default(self):
id = self._create_bulk_package_activities(35)
results = helpers.call_action("package_activity_list", id=id)
assert len(results) == 31 # i.e. default value
@pytest.mark.ckan_config("ckan.activity_list_limit", "5")
def test_limit_configured(self):
id = self._create_bulk_package_activities(7)
results = helpers.call_action("package_activity_list", id=id)
assert len(results) == 5 # i.e. ckan.activity_list_limit
@pytest.mark.ckan_config("ckan.activity_list_limit", "5")
@pytest.mark.ckan_config("ckan.activity_list_limit_max", "7")
def test_limit_hits_max(self):
id = self._create_bulk_package_activities(9)
results = helpers.call_action(
"package_activity_list", id=id, limit="9"
)
assert len(results) == 7 # i.e. ckan.activity_list_limit_max
def test_normal_user_doesnt_see_hidden_activities(self):
# activity is 'hidden' because dataset is created by site_user
dataset = factories.Dataset()
activities = helpers.call_action(
"package_activity_list", id=dataset["id"]
)
assert [activity["activity_type"] for activity in activities] == []
def test_sysadmin_user_doesnt_see_hidden_activities_by_default(self):
# activity is 'hidden' because dataset is created by site_user
dataset = factories.Dataset()
activities = helpers.call_action(
"package_activity_list", id=dataset["id"]
)
assert [activity["activity_type"] for activity in activities] == []
def test_sysadmin_user_can_include_hidden_activities(self):
# activity is 'hidden' because dataset is created by site_user
dataset = factories.Dataset()
activities = helpers.call_action(
"package_activity_list",
include_hidden_activity=True,
id=dataset["id"],
)
assert [activity["activity_type"] for activity in activities] == [
"new package"
]
@pytest.mark.usefixtures("clean_db")
class TestUserActivityList(object):
def test_create_user(self):
user = factories.User()
activities = helpers.call_action("user_activity_list", id=user["id"])
assert [activity["activity_type"] for activity in activities] == [
"new user"
]
assert activities[0]["user_id"] == user["id"]
assert activities[0]["object_id"] == user["id"]
def test_create_dataset(self):
user = factories.User()
_clear_activities()
dataset = factories.Dataset(user=user)
activities = helpers.call_action("user_activity_list", id=user["id"])
assert [activity["activity_type"] for activity in activities] == [
"new package"
]
assert activities[0]["user_id"] == user["id"]
assert activities[0]["object_id"] == dataset["id"]
assert activities[0]["data"]["package"]["title"] == dataset["title"]
def test_dataset_changed_by_another_user(self):
user = factories.User()
another_user = factories.Sysadmin()
dataset = factories.Dataset(user=user)
_clear_activities()
dataset["extras"].append(dict(key="rating", value="great"))
helpers.call_action(
"package_update", context={"user": another_user["name"]}, **dataset
)
# the user might have created the dataset, but a change by another
# user does not show on the user's activity stream
activities = helpers.call_action("user_activity_list", id=user["id"])
assert [activity["activity_type"] for activity in activities] == []
def test_change_dataset_add_extra(self):
user = factories.User()
dataset = factories.Dataset(user=user)
_clear_activities()
dataset["extras"].append(dict(key="rating", value="great"))
helpers.call_action(
"package_update", context={"user": user["name"]}, **dataset
)
activities = helpers.call_action("user_activity_list", id=user["id"])
assert [activity["activity_type"] for activity in activities] == [
"changed package"
]
assert activities[0]["user_id"] == user["id"]
assert activities[0]["object_id"] == dataset["id"]
assert activities[0]["data"]["package"]["title"] == dataset["title"]
def test_change_dataset_add_tag(self):
user = factories.User()
dataset = factories.Dataset(user=user)
_clear_activities()
dataset["tags"].append(dict(name="checked"))
helpers.call_action(
"package_update", context={"user": user["name"]}, **dataset
)
activities = helpers.call_action("user_activity_list", id=user["id"])
assert [activity["activity_type"] for activity in activities] == [
"changed package"
]
assert activities[0]["user_id"] == user["id"]
assert activities[0]["object_id"] == dataset["id"]
assert activities[0]["data"]["package"]["title"] == dataset["title"]
def test_create_group(self):
user = factories.User()
_clear_activities()
group = factories.Group(user=user)
activities = helpers.call_action("user_activity_list", id=user["id"])
assert [activity["activity_type"] for activity in activities] == [
"new group"
]
assert activities[0]["user_id"] == user["id"]
assert activities[0]["object_id"] == group["id"]
assert activities[0]["data"]["group"]["title"] == group["title"]
def test_delete_group_using_group_delete(self):
user = factories.User()
group = factories.Group(user=user)
_clear_activities()
helpers.call_action(
"group_delete", context={"user": user["name"]}, **group
)
activities = helpers.call_action("user_activity_list", id=user["id"])
assert [activity["activity_type"] for activity in activities] == [
"deleted group"
]
assert activities[0]["user_id"] == user["id"]
assert activities[0]["object_id"] == group["id"]
assert activities[0]["data"]["group"]["title"] == group["title"]
def test_delete_group_by_updating_state(self):
user = factories.User()
group = factories.Group(user=user)
_clear_activities()
group["state"] = "deleted"
helpers.call_action(
"group_update", context={"user": user["name"]}, **group
)
activities = helpers.call_action("user_activity_list", id=user["id"])
assert [activity["activity_type"] for activity in activities] == [
"deleted group"
]
assert activities[0]["user_id"] == user["id"]
assert activities[0]["object_id"] == group["id"]
assert activities[0]["data"]["group"]["title"] == group["title"]
def test_create_organization(self):
user = factories.User()
_clear_activities()
org = factories.Organization(user=user)
activities = helpers.call_action("user_activity_list", id=user["id"])
assert [activity["activity_type"] for activity in activities] == [
"new organization"
]
assert activities[0]["user_id"] == user["id"]
assert activities[0]["object_id"] == org["id"]
assert activities[0]["data"]["group"]["title"] == org["title"]
def test_delete_org_using_organization_delete(self):
user = factories.User()
org = factories.Organization(user=user)
_clear_activities()
helpers.call_action(
"organization_delete", context={"user": user["name"]}, **org
)
activities = helpers.call_action("user_activity_list", id=user["id"])
assert [activity["activity_type"] for activity in activities] == [
"deleted organization"
]
assert activities[0]["user_id"] == user["id"]
assert activities[0]["object_id"] == org["id"]
assert activities[0]["data"]["group"]["title"] == org["title"]
def test_delete_org_by_updating_state(self):
user = factories.User()
org = factories.Organization(user=user)
_clear_activities()
org["state"] = "deleted"
helpers.call_action(
"organization_update", context={"user": user["name"]}, **org
)
activities = helpers.call_action("user_activity_list", id=user["id"])
assert [activity["activity_type"] for activity in activities] == [
"deleted organization"
]
assert activities[0]["user_id"] == user["id"]
assert activities[0]["object_id"] == org["id"]
assert activities[0]["data"]["group"]["title"] == org["title"]
def _create_bulk_user_activities(self, count):
from ckan import model
user = factories.User()
objs = [
model.Activity(
user_id=user["id"],
object_id=None,
activity_type=None,
data=None,
)
for _ in range(count)
]
model.Session.add_all(objs)
model.repo.commit_and_remove()
return user["id"]
def test_limit_default(self):
id = self._create_bulk_user_activities(35)
results = helpers.call_action("user_activity_list", id=id)
assert len(results) == 31 # i.e. default value
@pytest.mark.ckan_config("ckan.activity_list_limit", "5")
def test_limit_configured(self):
id = self._create_bulk_user_activities(7)
results = helpers.call_action("user_activity_list", id=id)
assert len(results) == 5 # i.e. ckan.activity_list_limit
@pytest.mark.ckan_config("ckan.activity_list_limit", "5")
@pytest.mark.ckan_config("ckan.activity_list_limit_max", "7")
def test_limit_hits_max(self):
id = self._create_bulk_user_activities(9)
results = helpers.call_action("user_activity_list", id=id, limit="9")
assert len(results) == 7 # i.e. ckan.activity_list_limit_max
@pytest.mark.usefixtures("clean_db")
class TestGroupActivityList(object):
def test_create_group(self):
user = factories.User()
group = factories.Group(user=user)
activities = helpers.call_action("group_activity_list", id=group["id"])
assert [activity["activity_type"] for activity in activities] == [
"new group"
]
assert activities[0]["user_id"] == user["id"]
assert activities[0]["object_id"] == group["id"]
assert activities[0]["data"]["group"]["title"] == group["title"]
def test_change_group(self):
user = factories.User()
_clear_activities()
group = factories.Group(user=user)
original_title = group["title"]
group["title"] = "Group with changed title"
helpers.call_action(
"group_update", context={"user": user["name"]}, **group
)
activities = helpers.call_action("group_activity_list", id=group["id"])
assert [activity["activity_type"] for activity in activities] == [
"changed group",
"new group",
]
assert activities[0]["user_id"] == user["id"]
assert activities[0]["object_id"] == group["id"]
assert (
activities[0]["data"]["group"]["title"]
== "Group with changed title"
)
# the old group still has the old title
assert activities[1]["activity_type"] == "new group"
assert activities[1]["data"]["group"]["title"] == original_title
def test_create_dataset(self):
user = factories.User()
group = factories.Group(user=user)
_clear_activities()
dataset = factories.Dataset(groups=[{"id": group["id"]}], user=user)
activities = helpers.call_action("group_activity_list", id=group["id"])
assert [activity["activity_type"] for activity in activities] == [
"new package"
]
assert activities[0]["user_id"] == user["id"]
assert activities[0]["object_id"] == dataset["id"]
assert activities[0]["data"]["package"]["title"] == dataset["title"]
def test_change_dataset(self):
user = factories.User()
group = factories.Group(user=user)
_clear_activities()
dataset = factories.Dataset(groups=[{"id": group["id"]}], user=user)
original_title = dataset["title"]
dataset["title"] = "Dataset with changed title"
helpers.call_action(
"package_update", context={"user": user["name"]}, **dataset
)
activities = helpers.call_action("group_activity_list", id=group["id"])
assert [activity["activity_type"] for activity in activities] == [
"changed package",
"new package",
]
assert activities[0]["user_id"] == user["id"]
assert activities[0]["object_id"] == dataset["id"]
assert activities[0]["data"]["package"]["title"] == dataset["title"]
# the old dataset still has the old title
assert activities[1]["activity_type"] == "new package"
assert activities[1]["data"]["package"]["title"] == original_title
def test_change_dataset_add_extra(self):
user = factories.User()
group = factories.Group(user=user)
dataset = factories.Dataset(groups=[{"id": group["id"]}], user=user)
_clear_activities()
dataset["extras"].append(dict(key="rating", value="great"))
helpers.call_action(
"package_update", context={"user": user["name"]}, **dataset
)
activities = helpers.call_action("group_activity_list", id=group["id"])
assert [activity["activity_type"] for activity in activities] == [
"changed package"
]
assert activities[0]["user_id"] == user["id"]
assert activities[0]["object_id"] == dataset["id"]
assert activities[0]["data"]["package"]["title"] == dataset["title"]
def test_change_dataset_add_tag(self):
user = factories.User()
group = factories.Group(user=user)
dataset = factories.Dataset(groups=[{"id": group["id"]}], user=user)
_clear_activities()
dataset["tags"].append(dict(name="checked"))
helpers.call_action(
"package_update", context={"user": user["name"]}, **dataset
)
activities = helpers.call_action("group_activity_list", id=group["id"])
assert [activity["activity_type"] for activity in activities] == [
"changed package"
]
assert activities[0]["user_id"] == user["id"]
assert activities[0]["object_id"] == dataset["id"]
assert activities[0]["data"]["package"]["title"] == dataset["title"]
def test_delete_dataset(self):
user = factories.User()
group = factories.Group(user=user)
dataset = factories.Dataset(groups=[{"id": group["id"]}], user=user)
_clear_activities()
helpers.call_action(
"package_delete", context={"user": user["name"]}, **dataset
)
activities = helpers.call_action("group_activity_list", id=group["id"])
assert [activity["activity_type"] for activity in activities] == [
"deleted package"
]
assert activities[0]["user_id"] == user["id"]
assert activities[0]["object_id"] == dataset["id"]
assert activities[0]["data"]["package"]["title"] == dataset["title"]
def test_change_dataset_that_used_to_be_in_the_group(self):
user = factories.User()
group = factories.Group(user=user)
dataset = factories.Dataset(groups=[{"id": group["id"]}], user=user)
# remove the dataset from the group
dataset["groups"] = []
helpers.call_action(
"package_update", context={"user": user["name"]}, **dataset
)
_clear_activities()
# edit the dataset
dataset["title"] = "Dataset with changed title"
helpers.call_action(
"package_update", context={"user": user["name"]}, **dataset
)
# dataset change should not show up in its former group
activities = helpers.call_action("group_activity_list", id=group["id"])
assert [activity["activity_type"] for activity in activities] == []
def test_delete_dataset_that_used_to_be_in_the_group(self):
user = factories.User()
group = factories.Group(user=user)
dataset = factories.Dataset(groups=[{"id": group["id"]}], user=user)
# remove the dataset from the group
dataset["groups"] = []
dataset["title"] = "Dataset with changed title"
helpers.call_action(
"package_update", context={"user": user["name"]}, **dataset
)
_clear_activities()
helpers.call_action(
"package_delete", context={"user": user["name"]}, **dataset
)
# NOTE:
# ideally the dataset's deletion would not show up in its old group
# but it can't be helped without _group_activity_query getting very
# complicated
activities = helpers.call_action("group_activity_list", id=group["id"])
assert [activity["activity_type"] for activity in activities] == [
"deleted package"
]
assert activities[0]["user_id"] == user["id"]
assert activities[0]["object_id"] == dataset["id"]
assert activities[0]["data"]["package"]["title"] == dataset["title"]
def _create_bulk_group_activities(self, count):
group = factories.Group()
from ckan import model
user = factories.User()
objs = [
model.Activity(
user_id=user["id"],
object_id=group["id"],
activity_type=None,
data=None,
)
for _ in range(count)
]
model.Session.add_all(objs)
model.repo.commit_and_remove()
return group["id"]
def test_limit_default(self):
id = self._create_bulk_group_activities(35)
results = helpers.call_action("group_activity_list", id=id)
assert len(results) == 31 # i.e. default value
@pytest.mark.ckan_config("ckan.activity_list_limit", "5")
def test_limit_configured(self):
id = self._create_bulk_group_activities(7)
results = helpers.call_action("group_activity_list", id=id)
assert len(results) == 5 # i.e. ckan.activity_list_limit
@pytest.mark.ckan_config("ckan.activity_list_limit", "5")
@pytest.mark.ckan_config("ckan.activity_list_limit_max", "7")
def test_limit_hits_max(self):
id = self._create_bulk_group_activities(9)
results = helpers.call_action("group_activity_list", id=id, limit="9")
assert len(results) == 7 # i.e. ckan.activity_list_limit_max
def test_normal_user_doesnt_see_hidden_activities(self):
# activity is 'hidden' because group is created by site_user
group = factories.Group()
activities = helpers.call_action("group_activity_list", id=group["id"])
assert [activity["activity_type"] for activity in activities] == []
def test_sysadmin_user_doesnt_see_hidden_activities_by_default(self):
# activity is 'hidden' because group is created by site_user
group = factories.Group()
activities = helpers.call_action("group_activity_list", id=group["id"])
assert [activity["activity_type"] for activity in activities] == []
def test_sysadmin_user_can_include_hidden_activities(self):
# activity is 'hidden' because group is created by site_user
group = factories.Group()
activities = helpers.call_action(
"group_activity_list", include_hidden_activity=True, id=group["id"]
)
assert [activity["activity_type"] for activity in activities] == [
"new group"
]
@pytest.mark.usefixtures("clean_db")
class TestOrganizationActivityList(object):
def test_create_organization(self):
user = factories.User()
org = factories.Organization(user=user)
activities = helpers.call_action(
"organization_activity_list", id=org["id"]
)
assert [activity["activity_type"] for activity in activities] == [
"new organization"
]
assert activities[0]["user_id"] == user["id"]
assert activities[0]["object_id"] == org["id"]
assert activities[0]["data"]["group"]["title"] == org["title"]
def test_change_organization(self):
user = factories.User()
_clear_activities()
org = factories.Organization(user=user)
original_title = org["title"]
org["title"] = "Organization with changed title"
helpers.call_action(
"organization_update", context={"user": user["name"]}, **org
)
activities = helpers.call_action(
"organization_activity_list", id=org["id"]
)
assert [activity["activity_type"] for activity in activities] == [
"changed organization",
"new organization",
]
assert activities[0]["user_id"] == user["id"]
assert activities[0]["object_id"] == org["id"]
assert (
activities[0]["data"]["group"]["title"]
== "Organization with changed title"
)
# the old org still has the old title
assert activities[1]["activity_type"] == "new organization"
assert activities[1]["data"]["group"]["title"] == original_title
def test_create_dataset(self):
user = factories.User()
org = factories.Organization(user=user)
_clear_activities()
dataset = factories.Dataset(owner_org=org["id"], user=user)
activities = helpers.call_action(
"organization_activity_list", id=org["id"]
)
assert [activity["activity_type"] for activity in activities] == [
"new package"
]
assert activities[0]["user_id"] == user["id"]
assert activities[0]["object_id"] == dataset["id"]
assert activities[0]["data"]["package"]["title"] == dataset["title"]
def test_change_dataset(self):
user = factories.User()
org = factories.Organization(user=user)
_clear_activities()
dataset = factories.Dataset(owner_org=org["id"], user=user)
original_title = dataset["title"]
dataset["title"] = "Dataset with changed title"
helpers.call_action(
"package_update", context={"user": user["name"]}, **dataset
)
activities = helpers.call_action(
"organization_activity_list", id=org["id"]
)
assert [activity["activity_type"] for activity in activities] == [
"changed package",
"new package",
]
assert activities[0]["user_id"] == user["id"]
assert activities[0]["object_id"] == dataset["id"]
assert activities[0]["data"]["package"]["title"] == dataset["title"]
# the old dataset still has the old title
assert activities[1]["activity_type"] == "new package"
assert activities[1]["data"]["package"]["title"] == original_title
def test_change_dataset_add_tag(self):
user = factories.User()
org = factories.Organization(user=user)
dataset = factories.Dataset(owner_org=org["id"], user=user)
_clear_activities()
dataset["tags"].append(dict(name="checked"))
helpers.call_action(
"package_update", context={"user": user["name"]}, **dataset
)
activities = helpers.call_action(
"organization_activity_list", id=org["id"]
)
assert [activity["activity_type"] for activity in activities] == [
"changed package"
]
assert activities[0]["user_id"] == user["id"]
assert activities[0]["object_id"] == dataset["id"]
assert activities[0]["data"]["package"]["title"] == dataset["title"]
def test_delete_dataset(self):
user = factories.User()
org = factories.Organization(user=user)
dataset = factories.Dataset(owner_org=org["id"], user=user)
_clear_activities()
helpers.call_action(
"package_delete", context={"user": user["name"]}, **dataset
)
activities = helpers.call_action(
"organization_activity_list", id=org["id"]
)
assert [activity["activity_type"] for activity in activities] == [
"deleted package"
]
assert activities[0]["user_id"] == user["id"]
assert activities[0]["object_id"] == dataset["id"]
assert activities[0]["data"]["package"]["title"] == dataset["title"]
def test_change_dataset_that_used_to_be_in_the_org(self):
user = factories.User()
org = factories.Organization(user=user)
org2 = factories.Organization(user=user)
dataset = factories.Dataset(owner_org=org["id"], user=user)
# remove the dataset from the org
dataset["owner_org"] = org2["id"]
helpers.call_action(
"package_update", context={"user": user["name"]}, **dataset
)
_clear_activities()
# edit the dataset
dataset["title"] = "Dataset with changed title"
helpers.call_action(
"package_update", context={"user": user["name"]}, **dataset
)
# dataset change should not show up in its former group
activities = helpers.call_action(
"organization_activity_list", id=org["id"]
)
assert [activity["activity_type"] for activity in activities] == []
def test_delete_dataset_that_used_to_be_in_the_org(self):
user = factories.User()
org = factories.Organization(user=user)
org2 = factories.Organization(user=user)
dataset = factories.Dataset(owner_org=org["id"], user=user)
# remove the dataset from the group
dataset["owner_org"] = org2["id"]
helpers.call_action(
"package_update", context={"user": user["name"]}, **dataset
)
_clear_activities()
dataset["title"] = "Dataset with changed title"
helpers.call_action(
"package_delete", context={"user": user["name"]}, **dataset
)
# dataset deletion should not show up in its former org
activities = helpers.call_action(
"organization_activity_list", id=org["id"]
)
assert [activity["activity_type"] for activity in activities] == []
def _create_bulk_org_activities(self, count):
org = factories.Organization()
from ckan import model
user = factories.User()
objs = [
model.Activity(
user_id=user["id"],
object_id=org["id"],
activity_type=None,
data=None,
)
for _ in range(count)
]
model.Session.add_all(objs)
model.repo.commit_and_remove()
return org["id"]
def test_limit_default(self):
id = self._create_bulk_org_activities(35)
results = helpers.call_action("organization_activity_list", id=id)
assert len(results) == 31 # i.e. default value
@pytest.mark.ckan_config("ckan.activity_list_limit", "5")
def test_limit_configured(self):
id = self._create_bulk_org_activities(7)
results = helpers.call_action("organization_activity_list", id=id)
assert len(results) == 5 # i.e. ckan.activity_list_limit
@pytest.mark.ckan_config("ckan.activity_list_limit", "5")
@pytest.mark.ckan_config("ckan.activity_list_limit_max", "7")
def test_limit_hits_max(self):
id = self._create_bulk_org_activities(9)
results = helpers.call_action(
"organization_activity_list", id=id, limit="9"
)
assert len(results) == 7 # i.e. ckan.activity_list_limit_max
def test_normal_user_doesnt_see_hidden_activities(self):
# activity is 'hidden' because org is created by site_user
org = factories.Organization()
activities = helpers.call_action(
"organization_activity_list", id=org["id"]
)
assert [activity["activity_type"] for activity in activities] == []
def test_sysadmin_user_doesnt_see_hidden_activities_by_default(self):
# activity is 'hidden' because org is created by site_user
org = factories.Organization()
activities = helpers.call_action(
"organization_activity_list", id=org["id"]
)
assert [activity["activity_type"] for activity in activities] == []
def test_sysadmin_user_can_include_hidden_activities(self):
# activity is 'hidden' because org is created by site_user
org = factories.Organization()
activities = helpers.call_action(
"organization_activity_list",
include_hidden_activity=True,
id=org["id"],
)
assert [activity["activity_type"] for activity in activities] == [
"new organization"
]
@pytest.mark.usefixtures("clean_db")
class TestRecentlyChangedPackagesActivityList(object):
def test_create_dataset(self):
user = factories.User()
org = factories.Dataset(user=user)
activities = helpers.call_action(
"recently_changed_packages_activity_list", id=org["id"]
)
assert [activity["activity_type"] for activity in activities] == [
"new package"
]
assert activities[0]["user_id"] == user["id"]
assert activities[0]["object_id"] == org["id"]
assert activities[0]["data"]["package"]["title"] == org["title"]
def test_change_dataset(self):
user = factories.User()
org = factories.Organization(user=user)
_clear_activities()
dataset = factories.Dataset(owner_org=org["id"], user=user)
original_title = dataset["title"]
dataset["title"] = "Dataset with changed title"
helpers.call_action(
"package_update", context={"user": user["name"]}, **dataset
)
activities = helpers.call_action(
"recently_changed_packages_activity_list", id=org["id"]
)
assert [activity["activity_type"] for activity in activities] == [
"changed package",
"new package",
]
assert activities[0]["user_id"] == user["id"]
assert activities[0]["object_id"] == dataset["id"]
assert activities[0]["data"]["package"]["title"] == dataset["title"]
# the old dataset still has the old title
assert activities[1]["activity_type"] == "new package"
assert activities[1]["data"]["package"]["title"] == original_title
def test_change_dataset_add_extra(self):
user = factories.User()
org = factories.Organization(user=user)
dataset = factories.Dataset(owner_org=org["id"], user=user)
_clear_activities()
dataset["extras"].append(dict(key="rating", value="great"))
helpers.call_action(
"package_update", context={"user": user["name"]}, **dataset
)
activities = helpers.call_action(
"recently_changed_packages_activity_list", id=org["id"]
)
assert [activity["activity_type"] for activity in activities] == [
"changed package"
]
assert activities[0]["user_id"] == user["id"]
assert activities[0]["object_id"] == dataset["id"]
assert activities[0]["data"]["package"]["title"] == dataset["title"]
def test_change_dataset_add_tag(self):
user = factories.User()
org = factories.Organization(user=user)
dataset = factories.Dataset(owner_org=org["id"], user=user)
_clear_activities()
dataset["tags"].append(dict(name="checked"))
helpers.call_action(
"package_update", context={"user": user["name"]}, **dataset
)
activities = helpers.call_action(
"recently_changed_packages_activity_list", id=org["id"]
)
assert [activity["activity_type"] for activity in activities] == [
"changed package"
]
assert activities[0]["user_id"] == user["id"]
assert activities[0]["object_id"] == dataset["id"]
assert activities[0]["data"]["package"]["title"] == dataset["title"]
def test_delete_dataset(self):
user = factories.User()
org = factories.Organization(user=user)
dataset = factories.Dataset(owner_org=org["id"], user=user)
_clear_activities()
helpers.call_action(
"package_delete", context={"user": user["name"]}, **dataset
)
activities = helpers.call_action(
"organization_activity_list", id=org["id"]
)
assert [activity["activity_type"] for activity in activities] == [
"deleted package"
]
assert activities[0]["user_id"] == user["id"]
assert activities[0]["object_id"] == dataset["id"]
assert activities[0]["data"]["package"]["title"] == dataset["title"]
def _create_bulk_package_activities(self, count):
from ckan import model
user = factories.User()
objs = [
model.Activity(
user_id=user["id"],
object_id=None,
activity_type="new_package",
data=None,
)
for _ in range(count)
]
model.Session.add_all(objs)
model.repo.commit_and_remove()
def test_limit_default(self):
self._create_bulk_package_activities(35)
results = helpers.call_action(
"recently_changed_packages_activity_list"
)
assert len(results) == 31 # i.e. default value
@pytest.mark.ckan_config("ckan.activity_list_limit", "5")
def test_limit_configured(self):
self._create_bulk_package_activities(7)
results = helpers.call_action(
"recently_changed_packages_activity_list"
)
assert len(results) == 5 # i.e. ckan.activity_list_limit
@pytest.mark.ckan_config("ckan.activity_list_limit", "5")
@pytest.mark.ckan_config("ckan.activity_list_limit_max", "7")
def test_limit_hits_max(self):
self._create_bulk_package_activities(9)
results = helpers.call_action(
"recently_changed_packages_activity_list", limit="9"
)
assert len(results) == 7 # i.e. ckan.activity_list_limit_max
@pytest.mark.usefixtures("clean_db")
class TestDashboardActivityList(object):
def test_create_user(self):
user = factories.User()
activities = helpers.call_action(
"dashboard_activity_list", context={"user": user["id"]}
)
assert [activity["activity_type"] for activity in activities] == [
"new user"
]
assert activities[0]["user_id"] == user["id"]
assert activities[0]["object_id"] == user["id"]
# user's own activities are always marked ``'is_new': False``
assert not activities[0]["is_new"]
def test_create_dataset(self):
user = factories.User()
_clear_activities()
dataset = factories.Dataset(user=user)
activities = helpers.call_action(
"dashboard_activity_list", context={"user": user["id"]}
)
assert [activity["activity_type"] for activity in activities] == [
"new package"
]
assert activities[0]["user_id"] == user["id"]
assert activities[0]["object_id"] == dataset["id"]
assert activities[0]["data"]["package"]["title"] == dataset["title"]
# user's own activities are always marked ``'is_new': False``
assert not activities[0]["is_new"]
def test_create_group(self):
user = factories.User()
_clear_activities()
group = factories.Group(user=user)
activities = helpers.call_action(
"dashboard_activity_list", context={"user": user["id"]}
)
assert [activity["activity_type"] for activity in activities] == [
"new group"
]
assert activities[0]["user_id"] == user["id"]
assert activities[0]["object_id"] == group["id"]
assert activities[0]["data"]["group"]["title"] == group["title"]
# user's own activities are always marked ``'is_new': False``
assert not activities[0]["is_new"]
def test_create_organization(self):
user = factories.User()
_clear_activities()
org = factories.Organization(user=user)
activities = helpers.call_action(
"dashboard_activity_list", context={"user": user["id"]}
)
assert [activity["activity_type"] for activity in activities] == [
"new organization"
]
assert activities[0]["user_id"] == user["id"]
assert activities[0]["object_id"] == org["id"]
assert activities[0]["data"]["group"]["title"] == org["title"]
# user's own activities are always marked ``'is_new': False``
assert not activities[0]["is_new"]
def _create_bulk_package_activities(self, count):
user = factories.User()
from ckan import model
objs = [
model.Activity(
user_id=user["id"],
object_id=None,
activity_type=None,
data=None,
)
for _ in range(count)
]
model.Session.add_all(objs)
model.repo.commit_and_remove()
return user["id"]
def test_limit_default(self):
id = self._create_bulk_package_activities(35)
results = helpers.call_action(
"dashboard_activity_list", context={"user": id}
)
assert len(results) == 31 # i.e. default value
@pytest.mark.ckan_config("ckan.activity_list_limit", "5")
def test_limit_configured(self):
id = self._create_bulk_package_activities(7)
results = helpers.call_action(
"dashboard_activity_list", context={"user": id}
)
assert len(results) == 5 # i.e. ckan.activity_list_limit
@pytest.mark.ckan_config("ckan.activity_list_limit", "5")
@pytest.mark.ckan_config("ckan.activity_list_limit_max", "7")
def test_limit_hits_max(self):
id = self._create_bulk_package_activities(9)
results = helpers.call_action(
"dashboard_activity_list", limit="9", context={"user": id}
)
assert len(results) == 7 # i.e. ckan.activity_list_limit_max
@pytest.mark.usefixtures("clean_db")
class TestDashboardNewActivities(object):
def test_users_own_activities(self):
# a user's own activities are not shown as "new"
user = factories.User()
dataset = factories.Dataset(user=user)
dataset["title"] = "Dataset with changed title"
helpers.call_action(
"package_update", context={"user": user["name"]}, **dataset
)
helpers.call_action(
"package_delete", context={"user": user["name"]}, **dataset
)
group = factories.Group(user=user)
group["title"] = "Group with changed title"
helpers.call_action(
"group_update", context={"user": user["name"]}, **group
)
helpers.call_action(
"group_delete", context={"user": user["name"]}, **group
)
new_activities = helpers.call_action(
"dashboard_activity_list", context={"user": user["id"]}
)
assert [activity["is_new"] for activity in new_activities] == [
False
] * 7
new_activities_count = helpers.call_action(
"dashboard_new_activities_count", context={"user": user["id"]}
)
assert new_activities_count == 0
def test_activities_by_a_followed_user(self):
user = factories.User()
followed_user = factories.User()
helpers.call_action(
"follow_user", context={"user": user["name"]}, **followed_user
)
_clear_activities()
dataset = factories.Dataset(user=followed_user)
dataset["title"] = "Dataset with changed title"
helpers.call_action(
"package_update",
context={"user": followed_user["name"]},
**dataset,
)
helpers.call_action(
"package_delete",
context={"user": followed_user["name"]},
**dataset,
)
group = factories.Group(user=followed_user)
group["title"] = "Group with changed title"
helpers.call_action(
"group_update", context={"user": followed_user["name"]}, **group
)
helpers.call_action(
"group_delete", context={"user": followed_user["name"]}, **group
)
activities = helpers.call_action(
"dashboard_activity_list", context={"user": user["id"]}
)
assert [
activity["activity_type"] for activity in activities[::-1]
] == [
"new package",
"changed package",
"deleted package",
"new group",
"changed group",
"deleted group",
]
assert [activity["is_new"] for activity in activities] == [True] * 6
assert (
helpers.call_action(
"dashboard_new_activities_count", context={"user": user["id"]}
)
== 6
)
def test_activities_on_a_followed_dataset(self):
user = factories.User()
another_user = factories.Sysadmin()
_clear_activities()
dataset = factories.Dataset(user=another_user)
helpers.call_action(
"follow_dataset", context={"user": user["name"]}, **dataset
)
dataset["title"] = "Dataset with changed title"
helpers.call_action(
"package_update", context={"user": another_user["name"]}, **dataset
)
activities = helpers.call_action(
"dashboard_activity_list", context={"user": user["id"]}
)
assert [
(activity["activity_type"], activity["is_new"])
for activity in activities[::-1]
] == [
("new package", True),
# NB The 'new package' activity is in our activity stream and shows
# as "new" even though it occurred before we followed it. This is
# known & intended design.
("changed package", True),
]
assert (
helpers.call_action(
"dashboard_new_activities_count", context={"user": user["id"]}
)
== 2
)
def test_activities_on_a_followed_group(self):
user = factories.User()
another_user = factories.Sysadmin()
_clear_activities()
group = factories.Group(user=user)
helpers.call_action(
"follow_group", context={"user": user["name"]}, **group
)
group["title"] = "Group with changed title"
helpers.call_action(
"group_update", context={"user": another_user["name"]}, **group
)
activities = helpers.call_action(
"dashboard_activity_list", context={"user": user["id"]}
)
assert [
(activity["activity_type"], activity["is_new"])
for activity in activities[::-1]
] == [
("new group", False), # False because user did this one herself
("changed group", True),
]
assert (
helpers.call_action(
"dashboard_new_activities_count", context={"user": user["id"]}
)
== 1
)
def test_activities_on_a_dataset_in_a_followed_group(self):
user = factories.User()
another_user = factories.Sysadmin()
group = factories.Group(user=user)
helpers.call_action(
"follow_group", context={"user": user["name"]}, **group
)
_clear_activities()
dataset = factories.Dataset(
groups=[{"name": group["name"]}], user=another_user
)
dataset["title"] = "Dataset with changed title"
helpers.call_action(
"package_update", context={"user": another_user["name"]}, **dataset
)
activities = helpers.call_action(
"dashboard_activity_list", context={"user": user["id"]}
)
assert [
(activity["activity_type"], activity["is_new"])
for activity in activities[::-1]
] == [("new package", True), ("changed package", True)]
assert (
helpers.call_action(
"dashboard_new_activities_count", context={"user": user["id"]}
)
== 2
)
def test_activities_on_a_dataset_in_a_followed_org(self):
user = factories.User()
another_user = factories.Sysadmin()
org = factories.Organization(user=user)
helpers.call_action(
"follow_group", context={"user": user["name"]}, **org
)
_clear_activities()
dataset = factories.Dataset(owner_org=org["id"], user=another_user)
dataset["title"] = "Dataset with changed title"
helpers.call_action(
"package_update", context={"user": another_user["name"]}, **dataset
)
activities = helpers.call_action(
"dashboard_activity_list", context={"user": user["id"]}
)
assert [
(activity["activity_type"], activity["is_new"])
for activity in activities[::-1]
] == [("new package", True), ("changed package", True)]
assert (
helpers.call_action(
"dashboard_new_activities_count", context={"user": user["id"]}
)
== 2
)
def test_activities_that_should_not_show(self):
user = factories.User()
_clear_activities()
# another_user does some activity unconnected with user
another_user = factories.Sysadmin()
group = factories.Group(user=another_user)
dataset = factories.Dataset(
groups=[{"name": group["name"]}], user=another_user
)
dataset["title"] = "Dataset with changed title"
helpers.call_action(
"package_update", context={"user": another_user["name"]}, **dataset
)
activities = helpers.call_action(
"dashboard_activity_list", context={"user": user["id"]}
)
assert [
(activity["activity_type"], activity["is_new"])
for activity in activities[::-1]
] == []
assert (
helpers.call_action(
"dashboard_new_activities_count", context={"user": user["id"]}
)
== 0
)
@pytest.mark.ckan_config("ckan.activity_list_limit", "5")
def test_maximum_number_of_new_activities(self):
"""Test that the new activities count does not go higher than 5, even
if there are more than 5 new activities from the user's followers."""
user = factories.User()
another_user = factories.Sysadmin()
dataset = factories.Dataset()
helpers.call_action(
"follow_dataset", context={"user": user["name"]}, **dataset
)
for n in range(0, 7):
dataset["notes"] = "Updated {n} times".format(n=n)
helpers.call_action(
"package_update",
context={"user": another_user["name"]},
**dataset,
)
assert (
helpers.call_action(
"dashboard_new_activities_count", context={"user": user["id"]}
)
== 5
)
@pytest.mark.usefixtures("non_clean_db")
class TestApiToken(object):
@pytest.mark.parametrize("num_tokens", [0, 1, 2, 5])
def test_token_list(self, num_tokens):
from ckan.lib.api_token import decode
user = factories.User()
ids = []
for _ in range(num_tokens):
data = helpers.call_action(
"api_token_create",
context={"model": model, "user": user["name"]},
user=user["name"],
name="token-name",
)
token = data["token"]
ids.append(decode(token)["jti"])
tokens = helpers.call_action(
"api_token_list",
context={"model": model, "user": user["name"]},
user=user["name"],
)
assert sorted([t["id"] for t in tokens]) == sorted(ids)
@pytest.mark.usefixtures("non_clean_db")
@pytest.mark.ckan_config("ckan.auth.allow_dataset_collaborators", False)
def test_package_collaborator_list_when_config_disabled():
dataset = factories.Dataset()
with pytest.raises(logic.ValidationError):
helpers.call_action("package_collaborator_list", id=dataset["id"])
@pytest.mark.usefixtures("clean_db")
@pytest.mark.ckan_config("ckan.auth.allow_dataset_collaborators", True)
class TestPackageMemberList(object):
def test_list(self):
dataset = factories.Dataset()
user1 = factories.User()
capacity1 = "editor"
user2 = factories.User()
capacity2 = "member"
helpers.call_action(
"package_collaborator_create",
id=dataset["id"],
user_id=user1["id"],
capacity=capacity1,
)
helpers.call_action(
"package_collaborator_create",
id=dataset["id"],
user_id=user2["id"],
capacity=capacity2,
)
members = helpers.call_action(
"package_collaborator_list", id=dataset["id"]
)
assert len(members) == 2
assert members[0]["package_id"] == dataset["id"]
assert members[0]["user_id"] == user1["id"]
assert members[0]["capacity"] == capacity1
assert members[1]["package_id"] == dataset["id"]
assert members[1]["user_id"] == user2["id"]
assert members[1]["capacity"] == capacity2
def test_list_with_capacity(self):
dataset = factories.Dataset()
user1 = factories.User()
capacity1 = "editor"
user2 = factories.User()
capacity2 = "member"
helpers.call_action(
"package_collaborator_create",
id=dataset["id"],
user_id=user1["id"],
capacity=capacity1,
)
helpers.call_action(
"package_collaborator_create",
id=dataset["id"],
user_id=user2["id"],
capacity=capacity2,
)
members = helpers.call_action(
"package_collaborator_list", id=dataset["id"], capacity="member"
)
assert len(members) == 1
assert members[0]["package_id"] == dataset["id"]
assert members[0]["user_id"] == user2["id"]
assert members[0]["capacity"] == capacity2
def test_list_dataset_not_found(self):
with pytest.raises(logic.NotFound):
helpers.call_action("package_collaborator_list", id="xxx")
def test_list_wrong_capacity(self):
dataset = factories.Dataset()
user = factories.User()
capacity = "unknown"
with pytest.raises(logic.ValidationError):
helpers.call_action(
"package_collaborator_list",
id=dataset["id"],
user_id=user["id"],
capacity=capacity,
)
def test_list_for_user(self):
dataset1 = factories.Dataset()
dataset2 = factories.Dataset()
user = factories.User()
capacity1 = "editor"
capacity2 = "member"
helpers.call_action(
"package_collaborator_create",
id=dataset1["id"],
user_id=user["id"],
capacity=capacity1,
)
helpers.call_action(
"package_collaborator_create",
id=dataset2["id"],
user_id=user["id"],
capacity=capacity2,
)
datasets = helpers.call_action(
"package_collaborator_list_for_user", id=user["id"]
)
assert len(datasets) == 2
assert datasets[0]["package_id"] == dataset1["id"]
assert datasets[0]["capacity"] == capacity1
assert datasets[1]["package_id"] == dataset2["id"]
assert datasets[1]["capacity"] == capacity2
def test_list_for_user_with_capacity(self):
dataset1 = factories.Dataset()
dataset2 = factories.Dataset()
user = factories.User()
capacity1 = "editor"
capacity2 = "member"
helpers.call_action(
"package_collaborator_create",
id=dataset1["id"],
user_id=user["id"],
capacity=capacity1,
)
helpers.call_action(
"package_collaborator_create",
id=dataset2["id"],
user_id=user["id"],
capacity=capacity2,
)
datasets = helpers.call_action(
"package_collaborator_list_for_user",
id=user["id"],
capacity="editor",
)
assert len(datasets) == 1
assert datasets[0]["package_id"] == dataset1["id"]
assert datasets[0]["capacity"] == capacity1
def test_list_for_user_user_not_found(self):
with pytest.raises(logic.NotAuthorized):
helpers.call_action("package_collaborator_list_for_user", id="xxx")
def test_list_for_user_wrong_capacity(self):
user = factories.User()
capacity = "unknown"
with pytest.raises(logic.ValidationError):
helpers.call_action(
"package_collaborator_list_for_user",
id=user["id"],
capacity=capacity,
)
@pytest.mark.usefixtures("clean_db", "clean_index")
@pytest.mark.ckan_config("ckan.auth.allow_dataset_collaborators", True)
class TestCollaboratorsSearch(object):
def test_search_results_editor(self):
org = factories.Organization()
dataset1 = factories.Dataset(
name="test1", private=True, owner_org=org["id"]
)
dataset2 = factories.Dataset(name="test2")
user = factories.User()
context = {"user": user["name"], "ignore_auth": False}
results = helpers.call_action(
"package_search", context=context, q="*:*", include_private=True
)
assert results["count"] == 1
assert results["results"][0]["id"] == dataset2["id"]
helpers.call_action(
"package_collaborator_create",
id=dataset1["id"],
user_id=user["id"],
capacity="editor",
)
results = helpers.call_action(
"package_search",
context=context,
q="*:*",
include_private=True,
sort="name asc",
)
assert results["count"] == 2
assert results["results"][0]["id"] == dataset1["id"]
assert results["results"][1]["id"] == dataset2["id"]
def test_search_results_member(self):
org = factories.Organization()
dataset1 = factories.Dataset(
name="test1", private=True, owner_org=org["id"]
)
dataset2 = factories.Dataset(name="test2")
user = factories.User()
context = {"user": user["name"], "ignore_auth": False}
results = helpers.call_action(
"package_search", context=context, q="*:*", include_private=True
)
assert results["count"] == 1
assert results["results"][0]["id"] == dataset2["id"]
helpers.call_action(
"package_collaborator_create",
id=dataset1["id"],
user_id=user["id"],
capacity="member",
)
results = helpers.call_action(
"package_search",
context=context,
q="*:*",
include_private=True,
sort="name asc",
)
assert results["count"] == 2
assert results["results"][0]["id"] == dataset1["id"]
assert results["results"][1]["id"] == dataset2["id"]
@pytest.mark.usefixtures("clean_db")
class TestResourceSearch(object):
def test_required_fields(self):
with pytest.raises(logic.ValidationError):
helpers.call_action("resource_search")
helpers.call_action("resource_search", query="name:*")
def test_base_search(self):
factories.Resource(name="one")
factories.Resource(name="two")
result = helpers.call_action("resource_search", query="name:three")
assert not result["count"]
result = helpers.call_action("resource_search", query="name:one")
assert result["count"] == 1
result = helpers.call_action("resource_search", query="name:")
assert result["count"] == 2
def test_date_search(self):
res = factories.Resource()
result = helpers.call_action(
"resource_search", query="created:" + res["created"]
)
assert result["count"] == 1
def test_number_search(self):
factories.Resource(size=10)
result = helpers.call_action("resource_search", query="size:10")
assert result["count"] == 1
def test_resource_search_across_multiple_fields(self):
factories.Resource(description="indexed resource", format="json")
result = helpers.call_action(
"resource_search", query=["description:index", "format:json"]
)
assert result["count"] == 1
resource = result["results"][0]
assert "index" in resource["description"].lower()
assert "json" in resource["format"].lower()
def test_resource_search_test_percentage_is_escaped(self):
factories.Resource(description="indexed resource", format="json")
result = helpers.call_action(
"resource_search", query="description:index%"
)
assert result == {"count": 0, "results": []}
@pytest.mark.usefixtures("non_clean_db")
class TestUserPluginExtras(object):
def test_returned_if_sysadmin_and_include_plugin_extras_only(self):
sysadmin = factories.Sysadmin()
user = factories.User(plugin_extras={"plugin1": {"key1": "value1"}})
context = {"user": sysadmin["name"], "ignore_auth": False}
user = helpers.call_action(
"user_show",
context=context,
id=user["id"],
include_plugin_extras=True,
)
assert user["plugin_extras"] == {"plugin1": {"key1": "value1"}}
context = {"user": sysadmin["name"], "ignore_auth": False}
user = helpers.call_action("user_show", context=context, id=user["id"])
assert "plugin_extras" not in user
context = {"user": user["name"], "ignore_auth": False}
user = helpers.call_action(
"user_show",
context=context,
id=user["id"],
include_plugin_extras=True,
)
assert "plugin_extras" not in user
@pytest.mark.usefixtures("non_clean_db")
class TestGroupPackageShow:
def test_group_package_show(self):
group = factories.Group()
factories.Dataset()
pkg = factories.Dataset(groups=[{"id": group["id"]}])
group_packages = helpers.call_action(
"group_package_show", id=group["id"]
)
assert len(group_packages) == 1
assert group_packages[0]["name"] == pkg["name"]
@pytest.mark.usefixtures("non_clean_db")
class TestGetSiteUser:
def test_get_site_user_not_authorized(self, ckan_config):
with pytest.raises(logic.NotAuthorized):
helpers.call_auth("get_site_user", {"model": model, "user": ""})
assert helpers.call_auth(
"get_site_user", {"model": model, "user": "", "ignore_auth": True}
)
@pytest.mark.usefixtures("clean_db", "clean_index")
class TestPackageList:
@pytest.mark.usefixtures("app")
def test_package_list(self):
pkg1 = factories.Dataset()
pkg2 = factories.Dataset()
packages = helpers.call_action("package_list")
assert len(packages) == 2
assert set(packages) == {pkg1["name"], pkg2["name"]}
def test_package_list_private(self):
org = factories.Organization()
pkg1 = factories.Dataset()
factories.Dataset(private=True, owner_org=org["id"])
packages = helpers.call_action("package_list")
assert packages == [pkg1["name"]]
| 35.369017 | 132 | 0.595441 |
7942244ee6750fcfaa6511d8ffb37d49cd85d18d | 12,905 | py | Python | pyramid_webassets/__init__.py | Deimos/pyramid_webassets | 6ef5febbdda4b8fd9f9055e973cf6daf07f32268 | [
"MIT"
] | 18 | 2015-08-15T09:23:00.000Z | 2021-01-23T15:23:56.000Z | pyramid_webassets/__init__.py | Deimos/pyramid_webassets | 6ef5febbdda4b8fd9f9055e973cf6daf07f32268 | [
"MIT"
] | 9 | 2015-01-14T19:18:01.000Z | 2018-11-03T20:40:50.000Z | pyramid_webassets/__init__.py | Deimos/pyramid_webassets | 6ef5febbdda4b8fd9f9055e973cf6daf07f32268 | [
"MIT"
] | 10 | 2015-02-13T19:58:12.000Z | 2021-05-10T13:26:50.000Z | from contextlib import closing
from os import path, makedirs
import fileinput
import json
import six
from pyramid.path import AssetResolver
from pyramid.settings import asbool, truthy
from pyramid.threadlocal import get_current_request
from webassets import Bundle
from webassets import __version__ as webassets_version
from webassets.env import Environment, Resolver
from webassets.exceptions import BundleError
from webassets.loaders import YAMLLoader
from zope.interface import Interface
USING_WEBASSETS_CONTEXT = webassets_version > (0, 9)
falsy = frozenset(('f', 'false', 'n', 'no', 'off', '0'))
booly = frozenset(list(truthy) + list(falsy))
auto_booly = frozenset(('true', 'false'))
def maybebool(value):
'''
If `value` is a string type, attempts to convert it to a boolean
if it looks like it might be one, otherwise returns the value
unchanged. The difference between this and
:func:`pyramid.settings.asbool` is how non-bools are handled: this
returns the original value, whereas `asbool` returns False.
'''
if isinstance(value, six.string_types) and value.lower() in booly:
return asbool(value) # pragma: no cover
return value
def text(value):
if type(value) is six.binary_type:
return value.decode('utf-8')
else:
return value
class PyramidResolver(Resolver):
def __init__(self):
super(PyramidResolver, self).__init__()
self.resolver = AssetResolver(None)
def _split_spec(self, item):
if ':' in item:
package, subpath = item.split(':', 1)
return (package, subpath)
else:
return (None, item)
def _resolve_spec(self, spec):
package, subpath = self._split_spec(spec)
try:
pkgpath = self.resolver.resolve(package + ':').abspath()
except ImportError as e:
raise BundleError(e)
else:
return path.join(pkgpath, subpath)
def search_for_source(self, ctx, item):
package, subpath = self._split_spec(item)
if package is None:
if USING_WEBASSETS_CONTEXT:
return super(PyramidResolver, self).search_for_source(
ctx,
item
)
else: # pragma: no cover
return super(PyramidResolver, self).search_for_source(
item
)
else:
pkgpath = self._resolve_spec(package + ':')
return self.consider_single_directory(pkgpath, subpath)
def resolve_source_to_url(self, ctx, filepath, item):
request = get_current_request()
# Use the filepath to reconstruct the item without globs
package, _ = self._split_spec(item)
if package is not None:
pkgdir = self._resolve_spec(package + ':')
if filepath.startswith(pkgdir):
item = '{}:{}'.format(package, filepath[len(pkgdir):])
# Attempt to resolve the filepath as passed (but after versioning).
# If this fails, it may be because the static route was registered
# with an asset spec. In this case, the original item may also be
# an asset spec contained therein, so try to resolve that.
if request is not None:
for attempt in (filepath, item):
try:
return request.static_url(attempt)
except ValueError:
pass
if USING_WEBASSETS_CONTEXT:
return super(PyramidResolver, self).resolve_source_to_url(
ctx,
filepath,
item
)
else: # pragma: no cover
return super(PyramidResolver, self).resolve_source_to_url(
filepath,
item
)
def resolve_output_to_path(self, ctx, target, bundle):
package, filepath = self._split_spec(target)
if package is not None:
pkgpath = self._resolve_spec(package + ':')
target = path.join(pkgpath, filepath)
if USING_WEBASSETS_CONTEXT:
return super(PyramidResolver, self).resolve_output_to_path(
ctx,
target,
bundle
)
else: # pragma: no cover
return super(PyramidResolver, self).resolve_output_to_path(
target,
bundle
)
def resolve_output_to_url(self, ctx, item):
request = get_current_request()
if not path.isabs(item):
if ':' not in item:
if 'asset_base' in ctx.config:
if ctx.config['asset_base'].endswith(':'):
item = ctx.config['asset_base'] + item
else:
item = path.join(ctx.config['asset_base'], item)
else:
item = path.join(ctx.directory, item)
if ':' in item:
filepath = self._resolve_spec(item)
else:
filepath = item
if request is not None:
for attempt in (filepath, item):
try:
return request.static_url(item)
except ValueError:
pass
if USING_WEBASSETS_CONTEXT:
return super(PyramidResolver, self).resolve_output_to_url(
ctx,
filepath
)
else: # pragma: no cover
return super(PyramidResolver, self).resolve_output_to_url(filepath)
class LegacyPyramidResolver(PyramidResolver): # pragma: no cover
def __init__(self, env):
Resolver.__init__(self, env)
self.resolver = AssetResolver(None)
def search_for_source(self, *args):
return PyramidResolver.search_for_source(self, self.env, *args)
def resolve_source_to_url(self, *args):
return PyramidResolver.resolve_source_to_url(self, self.env, *args)
def resolve_output_to_path(self, *args):
return PyramidResolver.resolve_output_to_path(self, self.env, *args)
def resolve_output_to_url(self, *args):
return PyramidResolver.resolve_output_to_url(self, self.env, *args)
class Environment(Environment):
@property
def resolver_class(self):
if USING_WEBASSETS_CONTEXT:
return PyramidResolver
else: # pragma: no cover
return LegacyPyramidResolver
class IWebAssetsEnvironment(Interface):
pass
def add_webasset(config, name, bundle):
asset_env = get_webassets_env(config)
asset_env.register(name, bundle)
def get_webassets_env(config):
return config.registry.queryUtility(IWebAssetsEnvironment)
def get_webassets_env_from_settings(settings, prefix='webassets'):
"""This function will take all webassets.* parameters, and
call the ``Environment()`` constructor with kwargs passed in.
The only two parameters that are not passed as keywords are:
* base_dir
* base_url
which are passed in positionally.
Read the ``WebAssets`` docs for ``Environment`` for more details.
"""
# Make a dictionary of the webassets.* elements...
kwargs = {} # assets settings
cut_prefix = len(prefix) + 1
for k in settings:
if k.startswith(prefix):
val = settings[k]
if isinstance(val, six.string_types):
if val.lower() in auto_booly:
val = asbool(val)
elif val.lower().startswith('json:') and k[cut_prefix:] != 'manifest':
val = json.loads(val[5:])
kwargs[k[cut_prefix:]] = val
if 'base_dir' not in kwargs:
raise Exception("You need to provide webassets.base_dir in your configuration")
if 'base_url' not in kwargs:
raise Exception("You need to provide webassets.base_url in your configuration")
asset_dir = kwargs.pop('base_dir')
asset_url = kwargs.pop('base_url')
if ':' in asset_dir:
try:
resolved_dir = AssetResolver(None).resolve(asset_dir).abspath()
except ImportError:
pass
else:
# Store the original asset spec to use later
kwargs['asset_base'] = asset_dir
asset_dir = resolved_dir
if not asset_url.startswith('/'):
if six.moves.urllib.parse.urlparse(asset_url).scheme == '':
asset_url = '/' + asset_url
if 'debug' in kwargs:
kwargs['debug'] = maybebool(kwargs['debug'])
if 'cache' in kwargs:
cache = kwargs['cache'] = maybebool(kwargs['cache'])
if cache and isinstance(cache, six.string_types) and not path.isdir(cache):
makedirs(cache)
# 'updater' is just passed in...
if 'auto_build' in kwargs:
kwargs['auto_build'] = maybebool(kwargs['auto_build'])
if 'jst_compiler' in kwargs:
kwargs['JST_COMPILER'] = kwargs.pop('jst_compiler')
if 'jst_namespace' in kwargs:
kwargs['JST_NAMESPACE'] = kwargs.pop('jst_namespace')
if 'manifest' in kwargs:
kwargs['manifest'] = maybebool(kwargs['manifest'])
if 'url_expire' in kwargs:
kwargs['url_expire'] = maybebool(kwargs['url_expire'])
if 'static_view' in kwargs:
kwargs['static_view'] = asbool(kwargs['static_view'])
else:
kwargs['static_view'] = False
if 'cache_max_age' in kwargs:
kwargs['cache_max_age'] = int(kwargs.pop('cache_max_age'))
else:
kwargs['cache_max_age'] = None
if 'load_path' in kwargs:
# force load_path to be an array and split on whitespace
if not isinstance(kwargs['load_path'], list):
kwargs['load_path'] = kwargs['load_path'].split()
paths = kwargs.pop('paths', None)
if 'bundles' in kwargs:
if isinstance(kwargs['bundles'], six.string_types):
kwargs['bundles'] = kwargs['bundles'].split()
bundles = kwargs.pop('bundles', None)
assets_env = Environment(asset_dir, asset_url, **kwargs)
if paths is not None:
for map_path, map_url in json.loads(paths).items():
assets_env.append_path(map_path, map_url)
def yaml_stream(fname, mode):
if path.exists(fname):
return open(fname, mode)
else:
return assets_env.resolver.resolver.resolve(fname).stream()
if isinstance(bundles, list):
fnames = reversed(bundles)
fin = fileinput.input(fnames, openhook=yaml_stream)
with closing(fin):
lines = [text(line).rstrip() for line in fin]
yamlin = six.StringIO('\n'.join(lines))
loader = YAMLLoader(yamlin)
result = loader.load_bundles()
assets_env.register(result)
elif isinstance(bundles, dict):
assets_env.register(bundles)
return assets_env
def get_webassets_env_from_request(request):
""" Get the webassets environment in the registry from the request. """
return request.registry.queryUtility(IWebAssetsEnvironment)
def add_setting(config, key, value):
env = config.registry.queryUtility(IWebAssetsEnvironment)
env.config[key] = value
def add_path(config, path, url):
env = config.registry.queryUtility(IWebAssetsEnvironment)
env.append_path(path, url)
def assets(request, *args, **kwargs):
env = get_webassets_env_from_request(request)
result = []
for f in args:
try:
result.append(env[f])
except KeyError:
result.append(f)
bundle = Bundle(*result, **kwargs)
if USING_WEBASSETS_CONTEXT:
with bundle.bind(env):
urls = bundle.urls()
else: # pragma: no cover
urls = bundle.urls(env=env)
return urls
def add_assets_global(event):
event['webassets'] = assets
def includeme(config):
config.add_subscriber(add_assets_global, 'pyramid.events.BeforeRender')
settings = config.registry.settings
assets_env = get_webassets_env_from_settings(settings)
config.registry.registerUtility(assets_env, IWebAssetsEnvironment)
config.add_directive('add_webasset', add_webasset)
config.add_directive('get_webassets_env', get_webassets_env)
config.add_directive('add_webassets_setting', add_setting)
config.add_directive('add_webassets_path', add_path)
if assets_env.config['static_view']:
config.add_static_view(
settings['webassets.base_url'],
settings['webassets.base_dir'],
cache_max_age=assets_env.config['cache_max_age']
)
config.add_static_view(
path.join(assets_env.url, 'webassets-external'),
path.join(assets_env.directory, 'webassets-external'),
cache_max_age=assets_env.config['cache_max_age']
)
config.add_request_method(get_webassets_env_from_request,
'webassets_env', reify=True)
config.add_request_method(assets, 'webassets', reify=True)
| 32.343358 | 87 | 0.624952 |
794225d18205bb037c15a253b0ea1b1f600abcba | 5,754 | py | Python | rl_sandbox/examples/pybullet/hopper/sac_fr_lstm_experiment.py | chanb/rl_sandbox_public | e55f954a29880f83a5b0c3358badda4d900f1564 | [
"MIT"
] | 14 | 2020-11-09T22:05:37.000Z | 2022-02-11T12:41:33.000Z | rl_sandbox/examples/pybullet/hopper/sac_fr_lstm_experiment.py | chanb/rl_sandbox_public | e55f954a29880f83a5b0c3358badda4d900f1564 | [
"MIT"
] | null | null | null | rl_sandbox/examples/pybullet/hopper/sac_fr_lstm_experiment.py | chanb/rl_sandbox_public | e55f954a29880f83a5b0c3358badda4d900f1564 | [
"MIT"
] | null | null | null | import argparse
import numpy as np
import torch
import rl_sandbox.constants as c
import rl_sandbox.transforms.general_transforms as gt
from rl_sandbox.agents.random_agents import UniformContinuousAgent
from rl_sandbox.buffers.wrappers.torch_buffer import TorchBuffer
from rl_sandbox.envs.wrappers.action_repeat import ActionRepeatWrapper
from rl_sandbox.envs.wrappers.augment_action import AugmentActionWrapper
from rl_sandbox.envs.wrappers.frame_stack import FrameStackWrapper
from rl_sandbox.envs.wrappers.renderer import GymRenderer
from rl_sandbox.train.train_sac import train_sac
from rl_sandbox.model_architectures.actor_critics.fully_connected_soft_actor_critic import LSTMSquashedGaussianSAC
from rl_sandbox.model_architectures.layers_definition import VALUE_BASED_LINEAR_LAYERS
# This is for script run
parser = argparse.ArgumentParser()
parser.add_argument('--seed', type=int, required=True, help="Random seed")
args = parser.parse_args()
seed = args.seed
obs_dim = 15
action_dim = 3
min_action = -np.ones(action_dim)
max_action = np.ones(action_dim)
device = torch.device("cuda:0")
# device = torch.device(c.CPU)
action_repeat = 1
num_frames = 1
hidden_state_dim = 128
memory_size = 1000000
max_total_steps = 1000000
experiment_setting = {
# Auxiliary Tasks
c.AUXILIARY_TASKS: {},
# Buffer
c.BUFFER_PREPROCESSING: gt.AsType(),
c.BUFFER_SETTING: {
c.KWARGS: {
c.MEMORY_SIZE: memory_size,
c.OBS_DIM: (obs_dim,),
c.H_STATE_DIM: (hidden_state_dim * 2,),
c.ACTION_DIM: (action_dim,),
c.REWARD_DIM: (1,),
c.INFOS: {c.MEAN: ((action_dim,), np.float32),
c.VARIANCE: ((action_dim,), np.float32),
c.ENTROPY: ((action_dim,), np.float32),
c.LOG_PROB: ((1,), np.float32),
c.VALUE: ((1,), np.float32),
c.DISCOUNTING: ((1,), np.float32)},
c.BURN_IN_WINDOW: 19,
c.PADDING_FIRST: True,
c.CHECKPOINT_INTERVAL: 0,
c.CHECKPOINT_PATH: None,
},
c.STORAGE_TYPE: c.RAM,
c.BUFFER_WRAPPERS: [
{
c.WRAPPER: TorchBuffer,
c.KWARGS: {},
},
],
},
# Environment
c.ACTION_DIM: action_dim,
c.CLIP_ACTION: True,
c.ENV_SETTING: {
c.ENV_BASE: {
# c.ENV_NAME: "Hopper-v2"
c.ENV_NAME: "HopperBulletEnv-v0"
},
c.ENV_TYPE: c.GYM,
c.ENV_WRAPPERS: [
{
c.WRAPPER: GymRenderer,
c.KWARGS: {},
},
# {
# c.WRAPPER: AugmentActionWrapper,
# c.KWARGS: {
# c.ACTION_DIM: action_dim,
# }
# },
{
c.WRAPPER: ActionRepeatWrapper,
c.KWARGS: {
c.ACTION_REPEAT: action_repeat,
c.DISCOUNT_FACTOR: 1.,
c.ENABLE_DISCOUNTING: False,
}
},
{
c.WRAPPER: FrameStackWrapper,
c.KWARGS: {
c.NUM_FRAMES: num_frames,
}
}
]
},
c.MIN_ACTION: min_action,
c.MAX_ACTION: max_action,
c.OBS_DIM: obs_dim,
# Evaluation
c.EVALUATION_FREQUENCY: 5000,
c.EVALUATION_RENDER: False,
c.EVALUATION_RETURNS: [],
c.NUM_EVALUATION_EPISODES: 5,
# Exploration
c.EXPLORATION_STEPS: 1000,
c.EXPLORATION_STRATEGY: UniformContinuousAgent(min_action,
max_action,
np.random.RandomState(seed)),
# General
c.DEVICE: device,
c.SEED: seed,
# Load
c.LOAD_MODEL: False,
# Logging
c.PRINT_INTERVAL: 5000,
c.SAVE_INTERVAL: 50000,
c.LOG_INTERVAL: 1,
# Model
c.MODEL_SETTING: {
c.MODEL_ARCHITECTURE: LSTMSquashedGaussianSAC,
c.KWARGS: {
c.OBS_DIM: obs_dim,
c.HIDDEN_STATE_DIM: hidden_state_dim,
c.ACTION_DIM: action_dim,
c.SHARED_LAYERS: VALUE_BASED_LINEAR_LAYERS(in_dim=obs_dim),
c.INITIAL_ALPHA: 1.,
c.DEVICE: device,
c.NORMALIZE_OBS: False,
c.NORMALIZE_VALUE: False,
},
},
c.OPTIMIZER_SETTING: {
c.POLICY: {
c.OPTIMIZER: torch.optim.Adam,
c.KWARGS: {
c.LR: 3e-4,
},
},
c.QS: {
c.OPTIMIZER: torch.optim.Adam,
c.KWARGS: {
c.LR: 3e-4,
},
},
c.ALPHA: {
c.OPTIMIZER: torch.optim.Adam,
c.KWARGS: {
c.LR: 3e-4,
},
},
},
c.EVALUATION_PREPROCESSING: gt.Identity(),
c.TRAIN_PREPROCESSING: gt.Identity(),
# SAC
c.ACCUM_NUM_GRAD: 1,
c.ACTOR_UPDATE_INTERVAL: 1,
c.BATCH_SIZE: 256,
c.BUFFER_WARMUP: 1000,
c.GAMMA: 0.99,
c.LEARN_ALPHA: True,
c.MAX_GRAD_NORM: 1e10,
c.NUM_GRADIENT_UPDATES: 1,
c.NUM_PREFETCH: 1,
c.REWARD_SCALING: 1.,
c.STEPS_BETWEEN_UPDATE: 1,
c.TARGET_ENTROPY: -action_dim,
c.TARGET_UPDATE_INTERVAL: 5000,
c.TAU: 1.,
c.UPDATE_NUM: 0,
# Progress Tracking
c.CUM_EPISODE_LENGTHS: [0],
c.CURR_EPISODE: 1,
c.NUM_UPDATES: 0,
c.RETURNS: [],
# Save
c.SAVE_PATH: f"../results/pybullet/hopper/gt-sac-fr-lstm-reg_q_targ/{seed}",
# c.SAVE_PATH: None,
# train parameters
c.MAX_TOTAL_STEPS: max_total_steps,
c.TRAIN_RENDER: False,
}
train_sac(experiment_config=experiment_setting)
| 27.5311 | 114 | 0.567431 |
794226326f7f791bb6770c1660668b5743e09659 | 383 | py | Python | tests/xoto3/utils/cm_test.py | xoeye/xoto3 | ef91cde3cce81e1ded311389358271d5c8eba02b | [
"MIT"
] | 16 | 2020-05-23T15:23:38.000Z | 2022-03-18T19:28:37.000Z | tests/xoto3/utils/cm_test.py | xoeye/xoto3 | ef91cde3cce81e1ded311389358271d5c8eba02b | [
"MIT"
] | 9 | 2020-08-19T23:08:36.000Z | 2021-10-06T17:16:35.000Z | tests/xoto3/utils/cm_test.py | xoeye/xoto3 | ef91cde3cce81e1ded311389358271d5c8eba02b | [
"MIT"
] | 2 | 2020-12-12T08:23:53.000Z | 2021-09-03T20:25:54.000Z | from contextlib import contextmanager
from xoto3.utils.cm import xf_cm
@contextmanager
def yield_3():
print("generating a 3")
yield 3
print("cleaning that 3 right on up")
def test_transform_context_manager():
def add_one(x: int):
return x + 1
yield_4 = xf_cm(add_one)(yield_3())
with yield_4 as actually_four:
assert actually_four == 4
| 18.238095 | 40 | 0.684073 |
7942264ef3dda42be1549e119ad750faf49fe9a5 | 2,756 | py | Python | src/pykeen/datasets/nations/__init__.py | nhutnamhcmus/pykeen | 62d4f075fbd39135d6a5c8677d95e1ac46f8318f | [
"MIT"
] | 88 | 2018-10-14T16:28:38.000Z | 2020-06-22T08:03:15.000Z | src/pykeen/datasets/nations/__init__.py | nhutnamhcmus/pykeen | 62d4f075fbd39135d6a5c8677d95e1ac46f8318f | [
"MIT"
] | 42 | 2018-10-10T18:05:56.000Z | 2020-06-09T09:19:27.000Z | src/pykeen/datasets/nations/__init__.py | nhutnamhcmus/pykeen | 62d4f075fbd39135d6a5c8677d95e1ac46f8318f | [
"MIT"
] | 19 | 2019-02-15T17:36:46.000Z | 2020-03-28T11:03:41.000Z | # -*- coding: utf-8 -*-
"""Get triples from the Nations dataset."""
import pathlib
from docdata import parse_docdata
from ..base import PathDataset
from ..literal_base import NumericPathDataset
from ...triples import TriplesNumericLiteralsFactory
__all__ = [
"NATIONS_TRAIN_PATH",
"NATIONS_TEST_PATH",
"NATIONS_VALIDATE_PATH",
"NATIONS_LITERALS_PATH",
"Nations",
"NationsLiteral",
]
HERE = pathlib.Path(__file__).resolve().parent
NATIONS_TRAIN_PATH = HERE.joinpath("train.txt")
NATIONS_TEST_PATH = HERE.joinpath("test.txt")
NATIONS_VALIDATE_PATH = HERE.joinpath("valid.txt")
NATIONS_LITERALS_PATH = HERE.joinpath("literals.txt")
@parse_docdata
class Nations(PathDataset):
"""The Nations dataset.
---
name: Nations
statistics:
entities: 14
relations: 55
training: 1592
testing: 201
validation: 199
triples: 1992
citation:
author: Zhenfeng Lei
year: 2017
github: ZhenfengLei/KGDatasets
"""
def __init__(self, create_inverse_triples: bool = False, **kwargs):
"""Initialize the Nations dataset.
:param create_inverse_triples: Should inverse triples be created? Defaults to false.
:param kwargs: keyword arguments passed to :class:`pykeen.datasets.base.PathDataset`.
"""
super().__init__(
training_path=NATIONS_TRAIN_PATH,
testing_path=NATIONS_TEST_PATH,
validation_path=NATIONS_VALIDATE_PATH,
create_inverse_triples=create_inverse_triples,
**kwargs,
)
@parse_docdata
class NationsLiteral(NumericPathDataset):
"""The Nations dataset with literals.
---
name: NationsL
statistics:
entities: 14
relations: 55
training: 1592
testing: 201
validation: 199
triples: 1992
literal_relations: 2
literal_triples: 26
citation:
author: Hoyt
year: 2020
github: pykeen/pykeen
"""
training: TriplesNumericLiteralsFactory
def __init__(self, create_inverse_triples: bool = False, **kwargs):
"""Initialize the Nations dataset with literals.
:param create_inverse_triples: Should inverse triples be created? Defaults to false.
:param kwargs: keyword arguments passed to :class:`pykeen.datasets.base.PathDataset`.
"""
super().__init__(
training_path=NATIONS_TRAIN_PATH,
testing_path=NATIONS_TEST_PATH,
validation_path=NATIONS_VALIDATE_PATH,
literals_path=NATIONS_LITERALS_PATH,
create_inverse_triples=create_inverse_triples,
**kwargs,
)
if __name__ == "__main__":
Nations().summarize()
| 26.247619 | 93 | 0.658926 |
794226eba8905b6ad571a59ff22592140a4e3305 | 1,170 | py | Python | gen_data2.py | tsnik/RandomGraphModelsComparison | f5ecfcfb0f267f3988edae08522de18ec036b6f7 | [
"MIT"
] | null | null | null | gen_data2.py | tsnik/RandomGraphModelsComparison | f5ecfcfb0f267f3988edae08522de18ec036b6f7 | [
"MIT"
] | null | null | null | gen_data2.py | tsnik/RandomGraphModelsComparison | f5ecfcfb0f267f3988edae08522de18ec036b6f7 | [
"MIT"
] | null | null | null | import pickle
from generators.GaiKapadiaGenerator import GaiKapadiaGeneratorHetero2, GaiKapadiaGenerator
from simulation import monte_carlo_orig
from utills import gen_banks
# Homogeneous
for num in range(1, 7):
banks = gen_banks(100)
data = {"banks": banks}
simulations = {}
x = [0.01 * n for n in range(100)]
mean = []
var = []
for i in x:
print(i)
simulations[i] = monte_carlo_orig(banks, 100, GaiKapadiaGenerator, i)
data["simulations"] = simulations
with open('data\\GaiKapadia\\simulation' + str(num) + '.pickle', 'wb') as f:
pickle.dump(data, f)
# Heterogeneous
for num in range(1, 7):
banks = gen_banks(100)
data = {"banks": banks}
simulations = {}
x = [0.01 * n for n in range(100)]
mean = []
var = []
for i in x:
print(i)
simulations[i] = monte_carlo_orig(banks, 100, GaiKapadiaGeneratorHetero2, i)
data["simulations"] = simulations
with open('data\\GaiKapadiaHetero\\simulation' + str(num) + '.pickle', 'wb') as f:
pickle.dump(data, f)
| 29.25 | 90 | 0.579487 |
794226f315b3ca7f7f401d85507eaff657454055 | 15,001 | py | Python | brats_example.py | mcogswellsri/dare_brain_demo | 9f4c4298dabbc1511c1bba37903f742ad1b0a7b1 | [
"Apache-2.0"
] | null | null | null | brats_example.py | mcogswellsri/dare_brain_demo | 9f4c4298dabbc1511c1bba37903f742ad1b0a7b1 | [
"Apache-2.0"
] | null | null | null | brats_example.py | mcogswellsri/dare_brain_demo | 9f4c4298dabbc1511c1bba37903f742ad1b0a7b1 | [
"Apache-2.0"
] | null | null | null | import tempfile
import argparse
import os
import os.path as pth
import uuid
import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
import nibabel as nib
import skimage.transform
import skimage.io
import matplotlib
import matplotlib.pyplot as plt
from matplotlib.colors import ListedColormap
import colorsys
import seaborn as sns
import scipy.spatial.distance as distance
gray_cmap = matplotlib.cm.get_cmap('gray')
import sys
sys.path.append('./monai_model/')
from data_utils import load_data
modality_to_idx = {
't1': 0,
't2': 1,
't2flair': 2,
't1ce': 3,
}
class BraTSExample:
def __init__(self, params):
parser = argparse.ArgumentParser()
args = parser.parse_args()
self.tmpdir = self.make_single_example_dataset_dir(params.image_dir)
args.val_data = self.tmpdir
args.max_examples = 1
args.split = None
args.model = params.model
self.model = params.model
val_ds, val_loader = load_data(args, include_train=False, vis=True)
self.val_ds = val_ds
assert len(val_ds) == 1
# includes the pre-processing pipeline
self.val_loader = val_loader
# after pre-processing
self.example = next(iter(self.val_loader))
self.shape = self.example['image'].shape
self.slice_ids = list(map(str, range(0, self.shape[-1], 2)))
self.modality_to_idx = {
't1': 0,
't2': 1,
't2flair': 2,
't1ce': 3,
}
self.cache_dir = tempfile.mkdtemp()
self.pred = None
self.pred_thresh = 0.5
self.random_hash = pth.basename(params.image_dir.rstrip('/'))
self.MAX_CONST = 2000
def make_single_example_dataset_dir(self, example_dir):
tmpdir = tempfile.mkdtemp()
assert example_dir.endswith('/'), 'needed to get the right dname'
dname = pth.basename(pth.dirname(example_dir))
os.symlink(example_dir, pth.join(tmpdir, dname))
print(example_dir, pth.join(tmpdir, dname))
return tmpdir
def list_slices(self):
return self.slice_ids
def image_fname(self, img_id, modality):
slice_idx = int(img_id)
modality_idx = self.modality_to_idx[modality]
fname = f'slice_{modality}_{slice_idx}_{self.random_hash}.png'
fname = pth.join(self.cache_dir, fname)
if not pth.exists(fname):
slice = self.example['vis_image'][0, modality_idx, :, :, slice_idx]
plt.imsave(fname, slice, cmap=gray_cmap)
return fname
def gt_fname(self, slice_id, label):
slice_idx = int(slice_id)
label_idx = {
'ET': 0,
'TC': 1,
'WT': 2,
}[label]
fname = f'gt_{slice_idx}_{label}_{self.random_hash}.png'
fname = pth.join(self.cache_dir, fname)
if not pth.exists(fname):
ground_truth = self.example['seg'][0].cpu().numpy()
plt.imsave(fname, seg_cmap(ground_truth[label_idx, :, :, slice_idx], label_idx, colorscheme='gt'))
return fname
def pred_fname(self, slice_id, label, colorscheme='pred'):
slice_idx = int(slice_id)
label_idx = {
'ET': 0,
'TC': 1,
'WT': 2,
'CSF': 3,
'GM': 4,
'WM': 5,
}[label]
if label_idx > 2:
assert self.model == '2d_unet'
fname = f'pred_{slice_idx}_{label}_{self.random_hash}.png'
fname = pth.join(self.cache_dir, fname)
if not pth.exists(fname):
seg = self.get_y_pred(slice_idx, label_idx)
plt.imsave(fname, seg_cmap(seg, label_idx, colorscheme))
return fname
def counter_fname(self, slice_id, label, counter_input, modality, colorscheme='counterfactual'):
slice_idx = int(slice_id)
label_idx = {
'ET': 0,
'TC': 1,
'WT': 2,
}[label]
fname = f'counter_inpaint_{slice_idx}_{label}_{modality}_{self.random_hash}.png'
fname = pth.join(self.cache_dir, fname)
if not pth.exists(fname):
if self.model == 'unet':
raise Exception('not supported')
elif self.model == '2d_att_unet':
key = slice_id
img, counter_slice_id = counter_input
assert counter_slice_id == slice_id
seg = self.predict_slice_attunet(img)
seg = seg[label_idx]
elif self.model == '2d_unet':
key = slice_id
img, counter_slice_id = counter_input
assert counter_slice_id == slice_id
seg = self.predict_slice_2dunet(img)
seg = seg[label_idx]
plt.imsave(fname, seg_cmap(seg, label_idx, colorscheme))
return fname
def predict_slice_2dunet(self, img):
if self.model != '2d_unet':
raise Exception('Not using the right model. Should use the unet '
'from demo_unet/')
img = img.to(self.unet_2d.device)
logits = self.unet_2d.unet(img)
probs = logits # TODO: why? F.sigmoid(logits)
probs = probs[0] # remove batch dimension
# 'ET', 'TC', 'WT', 'CSF', 'GM', 'WM'
seg = (probs > 0.5)
return seg.cpu().numpy()
def predict_slice_attunet(self, img):
if self.model != '2d_att_unet':
raise Exception('Not using a 2d segmentation model. Predict as '
'one volume during initialization.')
img = img.to(self.attunet.device)
probs = self.attunet.unet(img)
probs = probs[0] # remove batch dimension
# 'Background', 'NCR/NET' , 'ED' , 'ET'
seg = (probs > 0.3)
# 'NCR/NET' , 'ED' , 'ET'
# l1, l2, l4
seg = seg[1:]
# see https://www.med.upenn.edu/cbica/brats2020/data.html and for mapping
# reverse conversion:
# l2 = WT - TC
# l1 = TC - ET
# l4 = ET
# forward conversion:
# ET = l4
# TC = l1 + ET
# WT = l2 + TC
# indices in seg after removing background dim:
# l4 - 2
# l2 - 1
# l1 - 0
ET = seg[2]
TC = seg[0] + ET
WT = seg[1] + TC
# [3, 240, 240] w/ first 3 ET,TC,WT in 1-hot float32
seg = torch.stack([ET, TC, WT])
seg = seg.to(torch.float).to('cpu').numpy()
# then, select the right modality to create [240, 240]... pass that to seg_cmap
return seg
def counterfactual(self, slice_id, label, counter_input):
slice_idx = int(slice_id)
label_idx = {
'ET': 0,
'TC': 1,
'WT': 2,
}[label]
# ground truth segmentation
gt = self.example['seg'][0].cpu().numpy()
gt = gt[label_idx, :, :, slice_idx]
img_shape = gt.shape
gt = gt.flatten()
# predicted segmentation
# TODO: fix the root problem... going into eval mode results in weird
# outputs... but there is stochasticity in the network, so
torch.random.manual_seed(8)
pred = self.get_y_pred(slice_idx, label_idx)
pred = pred.flatten()
# counterfactual segmentation
img, counter_slice_id = counter_input
assert counter_slice_id == slice_id
if self.model == 'unet':
raise Exception('not supported')
elif self.model == '2d_att_unet':
counter = self.predict_slice_attunet(img)
elif self.model == '2d_unet':
counter = self.predict_slice_2dunet(img)
counter = counter[label_idx]
counter = counter.flatten()
assert set(list(np.unique(gt)) + list(np.unique(pred)) + list(np.unique(counter))) <= {0., 1.}
if gt.sum() != 0:
before_dice = distance.dice(gt, pred)
after_dice = distance.dice(gt, counter)
diff = after_dice - before_dice
else:
before_dice = 0
after_dice = 0
diff = 0
cmap = plt.cm.get_cmap('coolwarm')
norm = matplotlib.colors.Normalize(vmin=-1., vmax=1., clip=True)
mapper = plt.cm.ScalarMappable(norm=norm, cmap=cmap)
diff_rgba = mapper.to_rgba(diff)
stats = {
'before_dice': before_dice,
'after_dice': after_dice,
'diff_dice': diff,
'diff_color': matplotlib.colors.rgb2hex(diff_rgba),
}
# there's no spatial information, but still show as a (uniform) spatial heatmap
heatmap = np.ones(img_shape)
# diff in [-1, 1]
heatmap *= diff
# center at 0.5 in [0, 1] to meet imsave expectations
heatmap = (heatmap + 1) / 2
fname = f'counter_heatmap_{slice_idx}_{label}_{self.random_hash}.png'
fname = pth.join(self.cache_dir, fname)
# plot with diverging map centered at 0.5
plt.imsave(fname, heatmap, vmin=0, vmax=1, cmap=cmap)
return fname, stats
def predict(self):
if self.model != 'unet':
raise Exception('Not using a 3d segmentation model. Predict as '
'slices are requested instead of once beforehand '
'as performed here.')
self.net.eval()
x, y, batch = prepare_batch(self.example)
y_pred = self.net(x)
y_pred_probs = torch.sigmoid(y_pred)
pred_probs = y_pred_probs.cpu().detach().numpy()
preds = (pred_probs > self.pred_thresh).astype(pred_probs.dtype)
self.pred_probs = pred_probs
self.preds = preds
if 'seg' in self.example:
self.example['y'] = self.example['seg']
self.example['y_pred'] = (y_pred_probs > self.pred_thresh).to(y_pred_probs)
self.example['y_pred_probs'] = y_pred_probs
def get_y_pred(self, slice_idx, label_idx=None):
if self.model == 'unet':
# TODO: should be self.preds, but make sure this doesn't mess up caching
if self.pred is None:
self.predict()
seg = self.preds[0, :, :, :, slice_idx]
elif self.model == '2d_att_unet':
img = self.example['vis_image'][:, :, :, :, slice_idx]
seg = self.predict_slice_attunet(img)
elif self.model == '2d_unet':
img = self.example['vis_image'][:, :, :, :, slice_idx]
seg = self.predict_slice_2dunet(img)
if label_idx is not None:
seg = seg[label_idx]
return seg
def get_box_from_tumor(self, slice_id, tumor_type):
label_idx = {
'ET': 0,
'TC': 1,
'WT': 2,
}[tumor_type]
slice_idx = int(slice_id)
ground_truth = self.example['seg'][0].cpu().numpy()
ground_truth = ground_truth[label_idx, :, :, slice_idx]
# no label
if (ground_truth == 0).all():
return None
col_inds = np.where(ground_truth.sum(axis=0))[0]
x = col_inds.min()
w = (col_inds.max() + 1) - x
row_inds = np.where(ground_truth.sum(axis=1))[0]
y = row_inds.min()
h = (row_inds.max() + 1) - y
img_height, img_width = ground_truth.shape
box = [x/img_width, y/img_height, w/img_width, h/img_height]
return box
def preprocess_for_inpainter(self, slice_id, modality):
slice_idx = int(slice_id)
eps = 1e-5
slice_size = (256, 256)
nifti_file = self.val_ds.data[0][modality]
try:
vol = nib.load(nifti_file)
except nib.filebasedimages.ImageFileError as e:
warnings.warn(str(e))
vol = vol.get_data()
# normalize by constant
img_max = self.MAX_CONST
vol = np.clip(vol, 0, self.MAX_CONST)
if vol.max() > img_max:
warnings.warn(f'img max is {vol.max()}, but normalizing by {img_max}')
# normalize by individual max
#img_max = vol.max() + eps
img = vol[:, :, slice_idx]
img = img.astype(np.float)
img = img / img_max
img = skimage.transform.resize(img, slice_size)
assert len(img.shape) == 2, 'assumes single channel for now'
img = np.tile(img[:, :, None], 3)
return img
#img = (img * 255).astype(np.uint8)
#fname = f'in_inpaint_{slice_idx}_{self.random_hash}.png'
#fname = pth.join(self.cache_dir, fname)
#skimage.io.imsave(fname, img)
#return fname
def deprocess_from_inpainter(self, inpainted_img):
img = inpainted_img.astype(np.float)
img = img * self.MAX_CONST
return img
# https://stackoverflow.com/questions/37765197/darken-or-lighten-a-color-in-matplotlib
def scale_lightness(rgb, scale_l):
# scale_l from 0 to 2
# convert rgb to hls
h, l, s = colorsys.rgb_to_hls(*rgb)
# manipulate h, l, s values and return as rgb
return colorsys.hls_to_rgb(h, min(1, l * scale_l), s = s)
def get_seg_color(label_idx, colorscheme, with_alpha=True):
# custom colormap with different color for each label
scale = {
'gt': 0.7,
'pred': 1.0,
'counterfactual': 1.3,
}[colorscheme]
# ET, TC, WT
if label_idx == 0:
# c69255
color = (198/256, 146/256, 85/256)
elif label_idx == 1:
# a291e1
color = (162/256, 145/256, 225/256)
if colorscheme == 'counterfactual':
scale = 1.15
elif label_idx == 2:
# 5ea5c5
color = (94/256, 165/256, 197/256)
# CSF, GM, WM
elif label_idx == 3:
# DB162F
color = (219/256, 22/256, 47/256)
scale = scale + .3
elif label_idx == 4:
# 383961
color = (56/256, 57/256, 97/256)
scale = scale + .3
elif label_idx == 5:
# 38423B
color = (56/256, 66/256, 59/256)
scale = scale + .3
color = scale_lightness(color, scale)
if with_alpha:
color += (1,)
return color
def seg_cmap(seg, label_idx, colorscheme):
assert set(np.unique(seg)).issubset(set([0., 1.]))
color = get_seg_color(label_idx, colorscheme)
colors = np.array([
[0, 0, 0, 0],
color,
])
cmap = ListedColormap(colors)
# normal color map
#cmap = matplotlib.cm.get_cmap('viridis')
img = cmap(seg)
# find background mask
mask = (seg == 0)
mask = mask[:, :, None].repeat(4, axis=2)
mask[:, :, :3] = False
# set background alpha to 0
img[mask] = 0.
return img
def prepare_batch(batch):
batch = dict(batch)
#for k, tensor in batch.items():
# if torch.is_tensor(tensor):
# batch[k] = tensor.to(device)
x = batch['image']
if "seg" in batch:
seg = batch["seg"]
else:
seg = None
return x, seg, batch
| 32.399568 | 110 | 0.562496 |
794227215472f302fed33acd7bb2817b778bbb19 | 4,458 | py | Python | model/AAConv2d.py | BenQLange/AttentionAugmentedConvLSTM | d8419b7a628b02ac49e8450deb3d60450c7b2d6b | [
"MIT"
] | 30 | 2020-10-06T04:28:16.000Z | 2022-03-26T16:36:31.000Z | model/AAConv2d.py | BenQLange/AttentionAugmentedConvLSTM | d8419b7a628b02ac49e8450deb3d60450c7b2d6b | [
"MIT"
] | null | null | null | model/AAConv2d.py | BenQLange/AttentionAugmentedConvLSTM | d8419b7a628b02ac49e8450deb3d60450c7b2d6b | [
"MIT"
] | 11 | 2020-10-16T08:33:21.000Z | 2021-11-14T14:21:46.000Z | """
Pytorch implementation of Attention Augmented Convolution.
Developed by: Myeongjun Kim (not us)
Source code: https://github.com/leaderj1001/Attention-Augmented-Conv2d
"""
import torch
import torch.nn as nn
import torch.nn.functional as F
use_cuda = torch.cuda.is_available()
device = torch.device("cuda" if use_cuda else "cpu")
class AAConv2d(nn.Module):
def __init__(self, in_channels, out_channels, kernel_size, dk, dv, Nh, relative=True):
super(AAConv2d, self).__init__()
self.in_channels = in_channels
self.out_channels = out_channels
self.kernel_size = kernel_size
self.dk = (out_channels*dk)//1000
self.dv = (out_channels*dv)//1000
self.Nh = Nh
self.relative = relative
self.conv_out = nn.Conv2d(self.in_channels, self.out_channels - self.dv, self.kernel_size, padding=1)
self.qkv_conv = nn.Conv2d(self.in_channels, 2 * self.dk + self.dv, kernel_size=1)
self.attn_out = nn.Conv2d(self.dv, self.dv, 1)
def forward(self, x):
batch, _, height, width = x.size()
conv_out = self.conv_out(x)
flat_q, flat_k, flat_v, q, k, v = self.compute_flat_qkv(x, self.dk, self.dv, self.Nh)
logits = torch.matmul(flat_q.transpose(2, 3), flat_k)
if self.relative:
h_rel_logits, w_rel_logits = self.relative_logits(q)
logits += h_rel_logits
logits += w_rel_logits
weights = F.softmax(logits, dim=-1)
attn_out = torch.matmul(weights, flat_v.transpose(2, 3))
attn_out = torch.reshape(attn_out, (batch, self.Nh, self.dv // self.Nh, height, width))
attn_out = self.combine_heads_2d(attn_out)
attn_out = self.attn_out(attn_out)
return torch.cat((conv_out, attn_out), dim=1)
def compute_flat_qkv(self, x, dk, dv, Nh):
N, _, H, W = x.size()
qkv = self.qkv_conv(x)
q, k, v = torch.split(qkv, [dk, dk, dv], dim=1)
q = self.split_heads_2d(q, Nh)
k = self.split_heads_2d(k, Nh)
v = self.split_heads_2d(v, Nh)
dkh = dk // Nh
q *= dkh ** -0.5
flat_q = torch.reshape(q, (N, Nh, dk // Nh, H * W))
flat_k = torch.reshape(k, (N, Nh, dk // Nh, H * W))
flat_v = torch.reshape(v, (N, Nh, dv // Nh, H * W))
return flat_q, flat_k, flat_v, q, k, v
def split_heads_2d(self, x, Nh):
batch, channels, height, width = x.size()
ret_shape = (batch, Nh, channels // Nh, height, width)
split = torch.reshape(x, ret_shape)
return split
def combine_heads_2d(self, x):
batch, Nh, dv, H, W = x.size()
ret_shape = (batch, Nh * dv, H, W)
return torch.reshape(x, ret_shape)
def relative_logits(self, q):
B, Nh, dk, H, W = q.size()
q = torch.transpose(q, 2, 4).transpose(2, 3)
key_rel_w = nn.Parameter(torch.randn((2 * W - 1, dk), requires_grad=True)).to(device)
rel_logits_w = self.relative_logits_1d(q, key_rel_w, H, W, Nh, "w")
key_rel_h = nn.Parameter(torch.randn((2 * H - 1, dk), requires_grad=True)).to(device)
rel_logits_h = self.relative_logits_1d(torch.transpose(q, 2, 3), key_rel_h, W, H, Nh, "h")
return rel_logits_h, rel_logits_w
def relative_logits_1d(self, q, rel_k, H, W, Nh, case):
rel_logits = torch.einsum('bhxyd,md->bhxym', q, rel_k)
rel_logits = torch.reshape(rel_logits, (-1, Nh * H, W, 2 * W - 1))
rel_logits = self.rel_to_abs(rel_logits)
rel_logits = torch.reshape(rel_logits, (-1, Nh, H, W, W))
rel_logits = torch.unsqueeze(rel_logits, dim=3)
rel_logits = rel_logits.repeat((1, 1, 1, H, 1, 1))
if case == "w":
rel_logits = torch.transpose(rel_logits, 3, 4)
elif case == "h":
rel_logits = torch.transpose(rel_logits, 2, 4).transpose(4, 5).transpose(3, 5)
rel_logits = torch.reshape(rel_logits, (-1, Nh, H * W, H * W))
return rel_logits
def rel_to_abs(self, x):
B, Nh, L, _ = x.size()
col_pad = torch.zeros((B, Nh, L, 1)).to(device)
x = torch.cat((x, col_pad), dim=3)
flat_x = torch.reshape(x, (B, Nh, L * 2 * L))
flat_pad = torch.zeros((B, Nh, L - 1)).to(device)
flat_x_padded = torch.cat((flat_x, flat_pad), dim=2)
final_x = torch.reshape(flat_x_padded, (B, Nh, L + 1, 2 * L - 1))
final_x = final_x[:, :, :L, L - 1:]
return final_x
| 38.431034 | 109 | 0.598026 |
79422756098ce2971200da36efd707c502167e63 | 22,407 | py | Python | django/forms/forms.py | Miserlou/django | 35ddeee45573de57ae3c791bf36496b4a7028ddf | [
"BSD-3-Clause"
] | 1 | 2016-03-07T15:37:48.000Z | 2016-03-07T15:37:48.000Z | django/forms/forms.py | erdem/django | 76d5daa60f90d3692b3ff3b7f5054e4bc7c1f374 | [
"BSD-3-Clause"
] | null | null | null | django/forms/forms.py | erdem/django | 76d5daa60f90d3692b3ff3b7f5054e4bc7c1f374 | [
"BSD-3-Clause"
] | null | null | null | """
Form classes
"""
from __future__ import absolute_import, unicode_literals
import copy
from django.core.exceptions import ValidationError
from django.forms.fields import Field, FileField
from django.forms.util import flatatt, ErrorDict, ErrorList
from django.forms.widgets import Media, media_property, TextInput, Textarea
from django.utils.datastructures import SortedDict
from django.utils.html import conditional_escape, format_html
from django.utils.encoding import StrAndUnicode, smart_unicode, force_unicode
from django.utils.safestring import mark_safe
__all__ = ('BaseForm', 'Form')
NON_FIELD_ERRORS = '__all__'
def pretty_name(name):
"""Converts 'first_name' to 'First name'"""
if not name:
return ''
return name.replace('_', ' ').capitalize()
def get_declared_fields(bases, attrs, with_base_fields=True):
"""
Create a list of form field instances from the passed in 'attrs', plus any
similar fields on the base classes (in 'bases'). This is used by both the
Form and ModelForm metclasses.
If 'with_base_fields' is True, all fields from the bases are used.
Otherwise, only fields in the 'declared_fields' attribute on the bases are
used. The distinction is useful in ModelForm subclassing.
Also integrates any additional media definitions
"""
fields = [(field_name, attrs.pop(field_name)) for field_name, obj in attrs.items() if isinstance(obj, Field)]
fields.sort(key=lambda x: x[1].creation_counter)
# If this class is subclassing another Form, add that Form's fields.
# Note that we loop over the bases in *reverse*. This is necessary in
# order to preserve the correct order of fields.
if with_base_fields:
for base in bases[::-1]:
if hasattr(base, 'base_fields'):
fields = base.base_fields.items() + fields
else:
for base in bases[::-1]:
if hasattr(base, 'declared_fields'):
fields = base.declared_fields.items() + fields
return SortedDict(fields)
class DeclarativeFieldsMetaclass(type):
"""
Metaclass that converts Field attributes to a dictionary called
'base_fields', taking into account parent class 'base_fields' as well.
"""
def __new__(cls, name, bases, attrs):
attrs['base_fields'] = get_declared_fields(bases, attrs)
new_class = super(DeclarativeFieldsMetaclass,
cls).__new__(cls, name, bases, attrs)
if 'media' not in attrs:
new_class.media = media_property(new_class)
return new_class
class BaseForm(StrAndUnicode):
# This is the main implementation of all the Form logic. Note that this
# class is different than Form. See the comments by the Form class for more
# information. Any improvements to the form API should be made to *this*
# class, not to the Form class.
def __init__(self, data=None, files=None, auto_id='id_%s', prefix=None,
initial=None, error_class=ErrorList, label_suffix=':',
empty_permitted=False):
self.is_bound = data is not None or files is not None
self.data = data or {}
self.files = files or {}
self.auto_id = auto_id
self.prefix = prefix
self.initial = initial or {}
self.error_class = error_class
self.label_suffix = label_suffix
self.empty_permitted = empty_permitted
self._errors = None # Stores the errors after clean() has been called.
self._changed_data = None
# The base_fields class attribute is the *class-wide* definition of
# fields. Because a particular *instance* of the class might want to
# alter self.fields, we create self.fields here by copying base_fields.
# Instances should always modify self.fields; they should not modify
# self.base_fields.
self.fields = copy.deepcopy(self.base_fields)
def __unicode__(self):
return self.as_table()
def __iter__(self):
for name in self.fields:
yield self[name]
def __getitem__(self, name):
"Returns a BoundField with the given name."
try:
field = self.fields[name]
except KeyError:
raise KeyError('Key %r not found in Form' % name)
return BoundField(self, field, name)
def _get_errors(self):
"Returns an ErrorDict for the data provided for the form"
if self._errors is None:
self.full_clean()
return self._errors
errors = property(_get_errors)
def is_valid(self):
"""
Returns True if the form has no errors. Otherwise, False. If errors are
being ignored, returns False.
"""
return self.is_bound and not bool(self.errors)
def add_prefix(self, field_name):
"""
Returns the field name with a prefix appended, if this Form has a
prefix set.
Subclasses may wish to override.
"""
return self.prefix and ('%s-%s' % (self.prefix, field_name)) or field_name
def add_initial_prefix(self, field_name):
"""
Add a 'initial' prefix for checking dynamic initial values
"""
return 'initial-%s' % self.add_prefix(field_name)
def _html_output(self, normal_row, error_row, row_ender, help_text_html, errors_on_separate_row):
"Helper function for outputting HTML. Used by as_table(), as_ul(), as_p()."
top_errors = self.non_field_errors() # Errors that should be displayed above all fields.
output, hidden_fields = [], []
for name, field in self.fields.items():
html_class_attr = ''
bf = self[name]
bf_errors = self.error_class([conditional_escape(error) for error in bf.errors]) # Escape and cache in local variable.
if bf.is_hidden:
if bf_errors:
top_errors.extend(['(Hidden field %s) %s' % (name, force_unicode(e)) for e in bf_errors])
hidden_fields.append(unicode(bf))
else:
# Create a 'class="..."' atribute if the row should have any
# CSS classes applied.
css_classes = bf.css_classes()
if css_classes:
html_class_attr = ' class="%s"' % css_classes
if errors_on_separate_row and bf_errors:
output.append(error_row % force_unicode(bf_errors))
if bf.label:
label = conditional_escape(force_unicode(bf.label))
# Only add the suffix if the label does not end in
# punctuation.
if self.label_suffix:
if label[-1] not in ':?.!':
label = format_html('{0}{1}', label, self.label_suffix)
label = bf.label_tag(label) or ''
else:
label = ''
if field.help_text:
help_text = help_text_html % force_unicode(field.help_text)
else:
help_text = ''
output.append(normal_row % {
'errors': force_unicode(bf_errors),
'label': force_unicode(label),
'field': unicode(bf),
'help_text': help_text,
'html_class_attr': html_class_attr
})
if top_errors:
output.insert(0, error_row % force_unicode(top_errors))
if hidden_fields: # Insert any hidden fields in the last row.
str_hidden = ''.join(hidden_fields)
if output:
last_row = output[-1]
# Chop off the trailing row_ender (e.g. '</td></tr>') and
# insert the hidden fields.
if not last_row.endswith(row_ender):
# This can happen in the as_p() case (and possibly others
# that users write): if there are only top errors, we may
# not be able to conscript the last row for our purposes,
# so insert a new, empty row.
last_row = (normal_row % {'errors': '', 'label': '',
'field': '', 'help_text':'',
'html_class_attr': html_class_attr})
output.append(last_row)
output[-1] = last_row[:-len(row_ender)] + str_hidden + row_ender
else:
# If there aren't any rows in the output, just append the
# hidden fields.
output.append(str_hidden)
return mark_safe('\n'.join(output))
def as_table(self):
"Returns this form rendered as HTML <tr>s -- excluding the <table></table>."
return self._html_output(
normal_row = '<tr%(html_class_attr)s><th>%(label)s</th><td>%(errors)s%(field)s%(help_text)s</td></tr>',
error_row = '<tr><td colspan="2">%s</td></tr>',
row_ender = '</td></tr>',
help_text_html = '<br /><span class="helptext">%s</span>',
errors_on_separate_row = False)
def as_ul(self):
"Returns this form rendered as HTML <li>s -- excluding the <ul></ul>."
return self._html_output(
normal_row = '<li%(html_class_attr)s>%(errors)s%(label)s %(field)s%(help_text)s</li>',
error_row = '<li>%s</li>',
row_ender = '</li>',
help_text_html = ' <span class="helptext">%s</span>',
errors_on_separate_row = False)
def as_p(self):
"Returns this form rendered as HTML <p>s."
return self._html_output(
normal_row = '<p%(html_class_attr)s>%(label)s %(field)s%(help_text)s</p>',
error_row = '%s',
row_ender = '</p>',
help_text_html = ' <span class="helptext">%s</span>',
errors_on_separate_row = True)
def non_field_errors(self):
"""
Returns an ErrorList of errors that aren't associated with a particular
field -- i.e., from Form.clean(). Returns an empty ErrorList if there
are none.
"""
return self.errors.get(NON_FIELD_ERRORS, self.error_class())
def _raw_value(self, fieldname):
"""
Returns the raw_value for a particular field name. This is just a
convenient wrapper around widget.value_from_datadict.
"""
field = self.fields[fieldname]
prefix = self.add_prefix(fieldname)
return field.widget.value_from_datadict(self.data, self.files, prefix)
def full_clean(self):
"""
Cleans all of self.data and populates self._errors and
self.cleaned_data.
"""
self._errors = ErrorDict()
if not self.is_bound: # Stop further processing.
return
self.cleaned_data = {}
# If the form is permitted to be empty, and none of the form data has
# changed from the initial data, short circuit any validation.
if self.empty_permitted and not self.has_changed():
return
self._clean_fields()
self._clean_form()
self._post_clean()
if self._errors:
del self.cleaned_data
def _clean_fields(self):
for name, field in self.fields.items():
# value_from_datadict() gets the data from the data dictionaries.
# Each widget type knows how to retrieve its own data, because some
# widgets split data over several HTML fields.
value = field.widget.value_from_datadict(self.data, self.files, self.add_prefix(name))
try:
if isinstance(field, FileField):
initial = self.initial.get(name, field.initial)
value = field.clean(value, initial)
else:
value = field.clean(value)
self.cleaned_data[name] = value
if hasattr(self, 'clean_%s' % name):
value = getattr(self, 'clean_%s' % name)()
self.cleaned_data[name] = value
except ValidationError as e:
self._errors[name] = self.error_class(e.messages)
if name in self.cleaned_data:
del self.cleaned_data[name]
def _clean_form(self):
try:
self.cleaned_data = self.clean()
except ValidationError as e:
self._errors[NON_FIELD_ERRORS] = self.error_class(e.messages)
def _post_clean(self):
"""
An internal hook for performing additional cleaning after form cleaning
is complete. Used for model validation in model forms.
"""
pass
def clean(self):
"""
Hook for doing any extra form-wide cleaning after Field.clean() been
called on every field. Any ValidationError raised by this method will
not be associated with a particular field; it will have a special-case
association with the field named '__all__'.
"""
return self.cleaned_data
def has_changed(self):
"""
Returns True if data differs from initial.
"""
return bool(self.changed_data)
def _get_changed_data(self):
if self._changed_data is None:
self._changed_data = []
# XXX: For now we're asking the individual widgets whether or not the
# data has changed. It would probably be more efficient to hash the
# initial data, store it in a hidden field, and compare a hash of the
# submitted data, but we'd need a way to easily get the string value
# for a given field. Right now, that logic is embedded in the render
# method of each widget.
for name, field in self.fields.items():
prefixed_name = self.add_prefix(name)
data_value = field.widget.value_from_datadict(self.data, self.files, prefixed_name)
if not field.show_hidden_initial:
initial_value = self.initial.get(name, field.initial)
else:
initial_prefixed_name = self.add_initial_prefix(name)
hidden_widget = field.hidden_widget()
initial_value = hidden_widget.value_from_datadict(
self.data, self.files, initial_prefixed_name)
if field.widget._has_changed(initial_value, data_value):
self._changed_data.append(name)
return self._changed_data
changed_data = property(_get_changed_data)
def _get_media(self):
"""
Provide a description of all media required to render the widgets on this form
"""
media = Media()
for field in self.fields.values():
media = media + field.widget.media
return media
media = property(_get_media)
def is_multipart(self):
"""
Returns True if the form needs to be multipart-encoded, i.e. it has
FileInput. Otherwise, False.
"""
for field in self.fields.values():
if field.widget.needs_multipart_form:
return True
return False
def hidden_fields(self):
"""
Returns a list of all the BoundField objects that are hidden fields.
Useful for manual form layout in templates.
"""
return [field for field in self if field.is_hidden]
def visible_fields(self):
"""
Returns a list of BoundField objects that aren't hidden fields.
The opposite of the hidden_fields() method.
"""
return [field for field in self if not field.is_hidden]
class Form(BaseForm):
"A collection of Fields, plus their associated data."
# This is a separate class from BaseForm in order to abstract the way
# self.fields is specified. This class (Form) is the one that does the
# fancy metaclass stuff purely for the semantic sugar -- it allows one
# to define a form using declarative syntax.
# BaseForm itself has no way of designating self.fields.
__metaclass__ = DeclarativeFieldsMetaclass
class BoundField(StrAndUnicode):
"A Field plus data"
def __init__(self, form, field, name):
self.form = form
self.field = field
self.name = name
self.html_name = form.add_prefix(name)
self.html_initial_name = form.add_initial_prefix(name)
self.html_initial_id = form.add_initial_prefix(self.auto_id)
if self.field.label is None:
self.label = pretty_name(name)
else:
self.label = self.field.label
self.help_text = field.help_text or ''
def __unicode__(self):
"""Renders this field as an HTML widget."""
if self.field.show_hidden_initial:
return self.as_widget() + self.as_hidden(only_initial=True)
return self.as_widget()
def __iter__(self):
"""
Yields rendered strings that comprise all widgets in this BoundField.
This really is only useful for RadioSelect widgets, so that you can
iterate over individual radio buttons in a template.
"""
for subwidget in self.field.widget.subwidgets(self.html_name, self.value()):
yield subwidget
def __len__(self):
return len(list(self.__iter__()))
def __getitem__(self, idx):
return list(self.__iter__())[idx]
def _errors(self):
"""
Returns an ErrorList for this field. Returns an empty ErrorList
if there are none.
"""
return self.form.errors.get(self.name, self.form.error_class())
errors = property(_errors)
def as_widget(self, widget=None, attrs=None, only_initial=False):
"""
Renders the field by rendering the passed widget, adding any HTML
attributes passed as attrs. If no widget is specified, then the
field's default widget will be used.
"""
if not widget:
widget = self.field.widget
attrs = attrs or {}
auto_id = self.auto_id
if auto_id and 'id' not in attrs and 'id' not in widget.attrs:
if not only_initial:
attrs['id'] = auto_id
else:
attrs['id'] = self.html_initial_id
if not only_initial:
name = self.html_name
else:
name = self.html_initial_name
return widget.render(name, self.value(), attrs=attrs)
def as_text(self, attrs=None, **kwargs):
"""
Returns a string of HTML for representing this as an <input type="text">.
"""
return self.as_widget(TextInput(), attrs, **kwargs)
def as_textarea(self, attrs=None, **kwargs):
"Returns a string of HTML for representing this as a <textarea>."
return self.as_widget(Textarea(), attrs, **kwargs)
def as_hidden(self, attrs=None, **kwargs):
"""
Returns a string of HTML for representing this as an <input type="hidden">.
"""
return self.as_widget(self.field.hidden_widget(), attrs, **kwargs)
def _data(self):
"""
Returns the data for this BoundField, or None if it wasn't given.
"""
return self.field.widget.value_from_datadict(self.form.data, self.form.files, self.html_name)
data = property(_data)
def value(self):
"""
Returns the value for this BoundField, using the initial value if
the form is not bound or the data otherwise.
"""
if not self.form.is_bound:
data = self.form.initial.get(self.name, self.field.initial)
if callable(data):
data = data()
else:
data = self.field.bound_data(
self.data, self.form.initial.get(self.name, self.field.initial)
)
return self.field.prepare_value(data)
def label_tag(self, contents=None, attrs=None):
"""
Wraps the given contents in a <label>, if the field has an ID attribute.
contents should be 'mark_safe'd to avoid HTML escaping. If contents
aren't given, uses the field's HTML-escaped label.
If attrs are given, they're used as HTML attributes on the <label> tag.
"""
contents = contents or conditional_escape(self.label)
widget = self.field.widget
id_ = widget.attrs.get('id') or self.auto_id
if id_:
attrs = attrs and flatatt(attrs) or ''
contents = format_html('<label for="{0}"{1}>{2}</label>',
widget.id_for_label(id_), attrs, contents
)
return mark_safe(contents)
def css_classes(self, extra_classes=None):
"""
Returns a string of space-separated CSS classes for this field.
"""
if hasattr(extra_classes, 'split'):
extra_classes = extra_classes.split()
extra_classes = set(extra_classes or [])
if self.errors and hasattr(self.form, 'error_css_class'):
extra_classes.add(self.form.error_css_class)
if self.field.required and hasattr(self.form, 'required_css_class'):
extra_classes.add(self.form.required_css_class)
return ' '.join(extra_classes)
def _is_hidden(self):
"Returns True if this BoundField's widget is hidden."
return self.field.widget.is_hidden
is_hidden = property(_is_hidden)
def _auto_id(self):
"""
Calculates and returns the ID attribute for this BoundField, if the
associated Form has specified auto_id. Returns an empty string otherwise.
"""
auto_id = self.form.auto_id
if auto_id and '%s' in smart_unicode(auto_id):
return smart_unicode(auto_id) % self.html_name
elif auto_id:
return self.html_name
return ''
auto_id = property(_auto_id)
def _id_for_label(self):
"""
Wrapper around the field widget's `id_for_label` method.
Useful, for example, for focusing on this field regardless of whether
it has a single widget or a MutiWidget.
"""
widget = self.field.widget
id_ = widget.attrs.get('id') or self.auto_id
return widget.id_for_label(id_)
id_for_label = property(_id_for_label)
| 40.228007 | 130 | 0.604945 |
794228278bf05eab939e7a221c05b24af49ad009 | 2,457 | py | Python | cinder/api/views/qos_specs.py | aarunsai81/netapp | 8f0f7bf9be7f4d9fb9c3846bfc639c90a05f86ba | [
"Apache-2.0"
] | 11 | 2015-08-25T13:11:18.000Z | 2020-10-15T11:29:20.000Z | cinder/api/views/qos_specs.py | aarunsai81/netapp | 8f0f7bf9be7f4d9fb9c3846bfc639c90a05f86ba | [
"Apache-2.0"
] | 5 | 2018-01-25T11:31:56.000Z | 2019-05-06T23:13:35.000Z | cinder/api/views/qos_specs.py | aarunsai81/netapp | 8f0f7bf9be7f4d9fb9c3846bfc639c90a05f86ba | [
"Apache-2.0"
] | 11 | 2015-02-20T18:48:24.000Z | 2021-01-30T20:26:18.000Z | # Copyright (C) 2013 eBay Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from cinder.api import common
class ViewBuilder(common.ViewBuilder):
"""Model QoS specs API responses as a python dictionary."""
_collection_name = "qos-specs"
def __init__(self):
"""Initialize view builder."""
super(ViewBuilder, self).__init__()
def summary_list(self, request, qos_specs, qos_count=None):
"""Show a list of qos_specs without many details."""
return self._list_view(self.detail, request, qos_specs, qos_count)
def summary(self, request, qos_spec):
"""Generic, non-detailed view of a qos_specs."""
return self.detail(request, qos_spec)
def detail(self, request, qos_spec):
"""Detailed view of a single qos_spec."""
# TODO(zhiteng) Add associations to detailed view
return {
'qos_specs': {
'id': qos_spec.id,
'name': qos_spec.name,
'consumer': qos_spec.consumer,
'specs': qos_spec.specs
},
'links': self._get_links(request,
qos_spec.id),
}
def associations(self, request, associates):
"""View of qos specs associations."""
return {
'qos_associations': associates
}
def _list_view(self, func, request, qos_specs, qos_count=None):
"""Provide a view for a list of qos_specs."""
specs_list = [func(request, specs)['qos_specs'] for specs in qos_specs]
specs_links = self._get_collection_links(request, qos_specs,
self._collection_name,
qos_count)
specs_dict = dict(qos_specs=specs_list)
if specs_links:
specs_dict['qos_specs_links'] = specs_links
return specs_dict
| 36.671642 | 79 | 0.612536 |
7942285e7af8c95021d52f3ee8e6210da7da4976 | 6,380 | py | Python | onmt/inference/search.py | Dan-hbd/NMTGMinor | 84e59ac8391ee78852d7c71afc60c3c8b8e3d44d | [
"MIT"
] | 75 | 2019-05-02T10:37:39.000Z | 2022-02-13T17:53:24.000Z | onmt/inference/search.py | Dan-hbd/NMTGMinor | 84e59ac8391ee78852d7c71afc60c3c8b8e3d44d | [
"MIT"
] | 11 | 2018-11-08T16:52:51.000Z | 2021-09-23T15:01:14.000Z | onmt/inference/search.py | Dan-hbd/NMTGMinor | 84e59ac8391ee78852d7c71afc60c3c8b8e3d44d | [
"MIT"
] | 34 | 2018-06-04T14:20:01.000Z | 2022-01-26T08:10:05.000Z | # Copyright (c) 2017-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the LICENSE file in
# the root directory of this source tree. An additional grant of patent rights
# can be found in the PATENTS file in the same directory.
import torch
import onmt
class Search(object):
def __init__(self, tgt_dict):
# self.pad = onmt.constants.PAD
# self.unk = onmt.constants.UNK
# self.eos = onmt.constants.EOS
# self.bos = onmt.constants.BOS
self.vocab_size = tgt_dict.size()
self.scores_buf = None
self.indices_buf = None
self.beams_buf = None
def _init_buffers(self, t):
if self.scores_buf is None:
self.scores_buf = t.new()
self.indices_buf = torch.LongTensor().to(device=t.device)
def step(self, step, lprobs, scores, beam_size):
"""Take a single search step.
Args:
step: the current search step, starting at 0
lprobs: (bsz x input_beam_size x vocab_size)
the model's log-probabilities over the vocabulary at the current step
scores: (bsz x input_beam_size x step)
the historical model scores of each hypothesis up to this point
Return: A tuple of (scores, indices, beams) where:
scores: (bsz x output_beam_size)
the scores of the chosen elements; output_beam_size can be
larger than input_beam_size, e.g., we may return
2*input_beam_size to account for EOS
indices: (bsz x output_beam_size)
the indices of the chosen elements
beams: (bsz x output_beam_size)
the hypothesis ids of the chosen elements, in the range [0, input_beam_size)
:param lprobs:
:param step:
:param scores:
:param beam_size:
"""
raise NotImplementedError
def set_src_lengths(self, src_lengths):
self.src_lengths = src_lengths
class BeamSearch(Search):
def __init__(self, tgt_dict):
super().__init__(tgt_dict)
def step(self, step, lprobs, scores, initial_score=None, **kwargs):
super()._init_buffers(lprobs)
# batch size first, then beam size
bsz, beam_size, vocab_size = lprobs.size()
if step == 0:
# at the first step all hypotheses are equally likely, so use
# only the first beam
if initial_score is None or torch.sum(initial_score).item() == 0:
lprobs = lprobs[:, ::beam_size, :].contiguous()
else:
lprobs.add_(initial_score.unsqueeze(-1))
# if we don't do this, the first beam will contain top K of exactly the same thing ...
else:
# make probs contain cumulative scores for each hypothesis
lprobs.add_(scores[:, :, step - 1].unsqueeze(-1))
# here lprobs should be (bsz, beam_size, V) (in streaming, bsz should be 1)
torch.topk(
lprobs.view(bsz, -1), # after view, it should be (bsz, beam_size x V)
k=min(
# Take the best 2 x beam_size predictions. We'll choose the first
# beam_size of these which don't predict eos to continue with.
beam_size * 2,
lprobs.view(bsz, -1).size(1) - beam_size, # -beam_size so we never select pad (beam_size times)
),
out=(self.scores_buf, self.indices_buf),
)
# torch.div(self.indices_buf, vocab_size, out=self.beams_buf)
# beams_buf helps us know where the origin of each
self.beams_buf = torch.true_divide(self.indices_buf, vocab_size).long()
# indices: the word indices in the vocabulary
self.indices_buf.fmod_(vocab_size)
return self.scores_buf, self.indices_buf, self.beams_buf
class DiverseBeamSearch(Search):
"""Diverse Beam Search.
See "Diverse Beam Search: Decoding Diverse Solutions from Neural Sequence
Models" for details.
We only implement the Hamming Diversity penalty here, which performed best
in the original paper.
"""
def __init__(self, tgt_dict, num_groups, diversity_strength):
super().__init__(tgt_dict)
self.num_groups = num_groups
self.diversity_strength = -diversity_strength
self.diversity_buf = None
self.beam = BeamSearch(tgt_dict)
def step(self, step, lprobs, scores):
super()._init_buffers(lprobs)
bsz, beam_size, vocab_size = lprobs.size()
if beam_size % self.num_groups != 0:
raise ValueError(
'DiverseBeamSearch requires --beam to be divisible by the number of groups'
)
group_size = beam_size // self.num_groups
# initialize diversity penalty
if self.diversity_buf is None:
self.diversity_buf = lprobs.new()
torch.zeros(lprobs[:, 0, :].size(), out=self.diversity_buf)
scores_G, indices_G, beams_G = [], [], []
for g in range(self.num_groups):
lprobs_g = lprobs[:, g::self.num_groups, :]
scores_g = scores[:, g::self.num_groups, :] if step > 0 else None
# apply diversity penalty
if g > 0:
lprobs_g = torch.add(lprobs_g, self.diversity_strength, self.diversity_buf.unsqueeze(1))
else:
lprobs_g = lprobs_g.contiguous()
scores_buf, indices_buf, beams_buf = self.beam.step(step, lprobs_g, scores_g)
beams_buf.mul_(self.num_groups).add_(g)
scores_G.append(scores_buf.clone())
indices_G.append(indices_buf.clone())
beams_G.append(beams_buf.clone())
# update diversity penalty
self.diversity_buf.scatter_add_(
1,
indices_buf,
self.diversity_buf.new_ones(indices_buf.size())
)
# interleave results from different groups
self.scores_buf = torch.stack(scores_G, dim=2, out=self.scores_buf).view(bsz, -1)
self.indices_buf = torch.stack(indices_G, dim=2, out=self.indices_buf).view(bsz, -1)
self.beams_buf = torch.stack(beams_G, dim=2, out=self.beams_buf).view(bsz, -1)
return self.scores_buf, self.indices_buf, self.beams_buf
| 38.666667 | 112 | 0.614734 |
79422c05fdd619a3ad6566c6b19510aa2d729af3 | 6,700 | py | Python | tensorflow_probability/python/distributions/exponential.py | jakee417/probability-1 | ae7117f37ac441bc7a888167ea23e5e620c5bcde | [
"Apache-2.0"
] | 3,670 | 2018-02-14T03:29:40.000Z | 2022-03-30T01:19:52.000Z | tensorflow_probability/python/distributions/exponential.py | jakee417/probability-1 | ae7117f37ac441bc7a888167ea23e5e620c5bcde | [
"Apache-2.0"
] | 1,395 | 2018-02-24T02:28:49.000Z | 2022-03-31T16:12:06.000Z | tensorflow_probability/python/distributions/exponential.py | jakee417/probability-1 | ae7117f37ac441bc7a888167ea23e5e620c5bcde | [
"Apache-2.0"
] | 1,135 | 2018-02-14T01:51:10.000Z | 2022-03-28T02:24:11.000Z | # Copyright 2018 The TensorFlow Probability Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""The Exponential distribution class."""
# Dependency imports
import numpy as np
import tensorflow.compat.v2 as tf
from tensorflow_probability.python.bijectors import softplus as softplus_bijector
from tensorflow_probability.python.distributions import gamma
from tensorflow_probability.python.internal import distribution_util
from tensorflow_probability.python.internal import dtype_util
from tensorflow_probability.python.internal import parameter_properties
from tensorflow_probability.python.internal import prefer_static as ps
from tensorflow_probability.python.internal import samplers
from tensorflow_probability.python.internal import tensor_util
__all__ = [
'Exponential',
]
class Exponential(gamma.Gamma):
"""Exponential distribution.
The Exponential distribution is parameterized by an event `rate` parameter.
#### Mathematical Details
The probability density function (pdf) is,
```none
pdf(x; lambda, x > 0) = exp(-lambda x) / Z
Z = 1 / lambda
```
where `rate = lambda` and `Z` is the normalizaing constant.
The Exponential distribution is a special case of the Gamma distribution,
i.e.,
```python
Exponential(rate) = Gamma(concentration=1., rate)
```
The Exponential distribution uses a `rate` parameter, or "inverse scale",
which can be intuited as,
```none
X ~ Exponential(rate=1)
Y = X / rate
```
"""
def __init__(self,
rate,
force_probs_to_zero_outside_support=False,
validate_args=False,
allow_nan_stats=True,
name='Exponential'):
"""Construct Exponential distribution with parameter `rate`.
Args:
rate: Floating point tensor, equivalent to `1 / mean`. Must contain only
positive values.
force_probs_to_zero_outside_support: Python `bool`. When `True`, negative
and non-integer values are evaluated "strictly": `cdf` returns
`0`, `sf` returns `1`, and `log_cdf` and `log_sf` correspond. When
`False`, the implementation is free to save computation (and TF graph
size) by evaluating something that matches the Exponential cdf at
non-negative values `x` but produces an unrestricted result on
other inputs. In the case of Exponential distribution, the `cdf`
formula in this case happens to be the continuous function
`1 - exp(rate * value)`.
Note that this function is not itself a cdf function.
Default value: `False`.
validate_args: Python `bool`, default `False`. When `True` distribution
parameters are checked for validity despite possibly degrading runtime
performance. When `False` invalid inputs may silently render incorrect
outputs.
allow_nan_stats: Python `bool`, default `True`. When `True`, statistics
(e.g., mean, mode, variance) use the value "`NaN`" to indicate the
result is undefined. When `False`, an exception is raised if one or
more of the statistic's batch members are undefined.
name: Python `str` name prefixed to Ops created by this class.
"""
parameters = dict(locals())
# Even though all statistics of are defined for valid inputs, this is not
# true in the parent class "Gamma." Therefore, passing
# allow_nan_stats=True
# through to the parent class results in unnecessary asserts.
with tf.name_scope(name) as name:
self._rate = tensor_util.convert_nonref_to_tensor(
rate,
name='rate',
dtype=dtype_util.common_dtype([rate], dtype_hint=tf.float32))
super(Exponential, self).__init__(
concentration=1.,
rate=self._rate,
allow_nan_stats=allow_nan_stats,
validate_args=validate_args,
force_probs_to_zero_outside_support=(
force_probs_to_zero_outside_support),
name=name)
self._parameters = parameters
@classmethod
def _parameter_properties(cls, dtype, num_classes=None):
# pylint: disable=g-long-lambda
return dict(
rate=parameter_properties.ParameterProperties(
default_constraining_bijector_fn=(
lambda: softplus_bijector.Softplus(low=dtype_util.eps(dtype)))))
# pylint: enable=g-long-lambda
@property
def rate(self):
return self._rate
def _cdf(self, value):
cdf = -tf.math.expm1(-self.rate * value)
# Set cdf = 0 when value is less than 0.
return distribution_util.extend_cdf_outside_support(value, cdf, low=0.)
def _log_survival_function(self, value):
rate = tf.convert_to_tensor(self._rate)
log_sf = self._log_prob(value, rate=rate) - tf.math.log(rate)
if self.force_probs_to_zero_outside_support:
# Set log_survival_function = 0 when value is less than 0.
log_sf = tf.where(value < 0., tf.zeros_like(log_sf), log_sf)
return log_sf
def _sample_n(self, n, seed=None):
rate = tf.convert_to_tensor(self.rate)
shape = ps.concat([[n], ps.shape(rate)], 0)
# Uniform variates must be sampled from the open-interval `(0, 1)` rather
# than `[0, 1)`. To do so, we use
# `np.finfo(dtype_util.as_numpy_dtype(self.dtype)).tiny`
# because it is the smallest, positive, "normal" number. A "normal" number
# is such that the mantissa has an implicit leading 1. Normal, positive
# numbers x, y have the reasonable property that, `x + y >= max(x, y)`. In
# this case, a subnormal number (i.e., np.nextafter) can cause us to sample
# 0.
sampled = samplers.uniform(
shape,
minval=np.finfo(dtype_util.as_numpy_dtype(self.dtype)).tiny,
maxval=1.,
seed=seed,
dtype=self.dtype)
return -tf.math.log(sampled) / rate
def _quantile(self, value):
return -tf.math.log1p(-value) / self.rate
def _default_event_space_bijector(self):
return softplus_bijector.Softplus(validate_args=self.validate_args)
@classmethod
def _maximum_likelihood_parameters(cls, value):
return {'rate': 1. / tf.reduce_mean(value, axis=0)}
| 37.853107 | 81 | 0.691343 |
79422c2db485f52d5c53a57e57ce272f710c5518 | 8,317 | py | Python | spikeforest/spikeforestwidgets/unitwaveformswidget.py | mhhennig/spikeforest | 5b4507ead724af3de0be5d48a3b23aaedb0be170 | [
"Apache-2.0"
] | 1 | 2021-09-23T01:07:19.000Z | 2021-09-23T01:07:19.000Z | spikeforest/spikeforestwidgets/unitwaveformswidget.py | mhhennig/spikeforest | 5b4507ead724af3de0be5d48a3b23aaedb0be170 | [
"Apache-2.0"
] | null | null | null | spikeforest/spikeforestwidgets/unitwaveformswidget.py | mhhennig/spikeforest | 5b4507ead724af3de0be5d48a3b23aaedb0be170 | [
"Apache-2.0"
] | 1 | 2021-09-23T01:07:21.000Z | 2021-09-23T01:07:21.000Z | import numpy as np
from matplotlib import pyplot as plt
import vdomr as vd
import time
from spikeforest import spikewidgets as sw
class UnitWaveformsWidget(vd.Component):
def __init__(self, *, recording, sorting, max_num_spikes_per_unit=20, snippet_len=100):
vd.Component.__init__(self)
self._widgets = [
UnitWaveformWidget(
recording=recording,
sorting=sorting,
unit_id=id,
average_waveform=None,
max_num_spikes_per_unit=max_num_spikes_per_unit,
snippet_len=snippet_len
)
for id in sorting.get_unit_ids()
]
vd.devel.loadBootstrap()
def setSelectedUnitIds(self, ids):
ids = set(ids)
for W in self._widgets:
W.setSelected(W.unitId() in ids)
def render(self):
box_style = dict(float='left')
boxes = [
vd.div(W, style=box_style)
for W in self._widgets
]
div = vd.div(boxes)
return div
class UnitWaveformWidget(vd.Component):
def __init__(self, *, recording, sorting, unit_id, average_waveform=None, show_average=True, max_num_spikes_per_unit=20, snippet_len=100):
vd.Component.__init__(self)
self._plot = _UnitWaveformPlot(
recording=recording,
sorting=sorting,
unit_id=unit_id,
average_waveform=average_waveform,
show_average=show_average,
max_num_spikes_per_unit=max_num_spikes_per_unit,
snippet_len=snippet_len
)
self._plot_div = vd.components.LazyDiv(self._plot)
self._unit_id = unit_id
self._selected = False
def setSelected(self, val):
if self._selected == val:
return
self._selected = val
self.refresh()
def unitId(self):
return self._unit_id
def render(self):
style0 = {'border': 'solid 1px black', 'margin': '5px'}
style1 = {}
if self._selected:
style1['background-color'] = 'yellow'
return vd.div(
vd.p('Unit {}'.format(self._unit_id), style={'text-align': 'center'}),
vd.div(self._plot_div, style=style0),
style=style1
)
class _UnitWaveformPlot(vd.components.Pyplot):
def __init__(self, *, recording, sorting, unit_id, average_waveform, show_average, max_num_spikes_per_unit, snippet_len):
vd.components.Pyplot.__init__(self)
self._recording = recording
self._sorting = sorting
self._unit_id = unit_id
self._max_num_spikes_per_unit = max_num_spikes_per_unit
self._average_waveform = average_waveform
self._show_average = show_average
self._snippet_len = snippet_len
def plot(self):
# W=sw.UnitWaveformsWidget(recording=self._recording,sorting=self._sorting,unit_ids=[self._unit_id],width=5,height=5)
# W.plot()
plot_unit_waveform(
recording=self._recording,
sorting=self._sorting,
unit_id=self._unit_id,
average_waveform=self._average_waveform,
show_average=self._show_average,
max_num_spikes_per_unit=self._max_num_spikes_per_unit,
snippet_len=self._snippet_len
)
def _compute_minimum_gap(x):
a = np.sort(np.unique(x))
if len(a) <= 1:
return 1
return np.min(np.diff(a))
def _plot_spike_shapes(*, representative_waveforms=None, average_waveform=None, show_average, channel_locations=None, ylim=None, max_representatives=None, color='blue', title=''):
if average_waveform is None:
if representative_waveforms is None:
raise Exception('You must provide either average_waveform, representative waveforms, or both')
average_waveform = np.mean(representative_waveforms, axis=2)
M = average_waveform.shape[0] # number of channels
T = average_waveform.shape[1] # number of timepoints
if ylim is None:
ylim = [average_waveform.min(), average_waveform.max()]
yrange = ylim[1] - ylim[0]
if channel_locations is None:
channel_locations = np.zeros((M, 2))
for m in range(M):
channel_locations[m, :] = [0, -m]
if channel_locations.shape[1] > 2:
channel_locations = channel_locations[:, -2:]
xmin = np.min(channel_locations[:, 0])
xmax = np.max(channel_locations[:, 0])
ymin = np.min(channel_locations[:, 1])
ymax = np.max(channel_locations[:, 1])
xgap = _compute_minimum_gap(channel_locations[:, 0])
ygap = _compute_minimum_gap(channel_locations[:, 1])
xvals = np.linspace(-xgap * 0.8 / 2, xgap * 0.8 / 2, T)
yscale = 1 / (yrange / 2) * ygap / 2 * 0.4
ax = plt.axes([0, 0, 1, 1], frameon=False)
ax.get_xaxis().set_visible(False)
ax.get_yaxis().set_visible(False)
ax.get_xaxis().set_ticks([])
ax.get_yaxis().set_ticks([])
if representative_waveforms is not None:
if max_representatives is not None:
W0 = representative_waveforms
if W0.shape[2] > max_representatives:
indices = np.random.choice(range(W0.shape[2]), size=max_representatives, replace=False)
representative_waveforms = W0[:, :, indices]
L = representative_waveforms.shape[2]
# for j in range(L):
# XX = np.zeros((T, M))
# YY = np.zeros((T, M))
# for m in range(M):
# loc = channel_locations[m, -2:]
# XX[:, m] = loc[0] + xvals
# YY[:, m] = loc[1] + (representative_waveforms[m, :, j] - representative_waveforms[m, 0, j])*yscale
# color=(np.random.uniform(0,1), np.random.uniform(0,1), np.random.uniform(0,1))
# plt.plot(XX, YY, color=color, alpha=0.3)
XX = np.zeros((T, M, L))
YY = np.zeros((T, M, L))
for m in range(M):
loc = channel_locations[m, -2:]
for j in range(L):
XX[:, m, j] = loc[0] + xvals
YY[:, m, j] = loc[1] + (representative_waveforms[m, :, j] - representative_waveforms[m, 0, j]) * yscale
XX = XX.reshape(T, M * L)
YY = YY.reshape(T, M * L)
plt.plot(XX, YY, color=(0.5, 0.5, 0.5), alpha=0.5)
if show_average:
XX = np.zeros((T, M))
YY = np.zeros((T, M))
for m in range(M):
loc = channel_locations[m, -2:]
XX[:, m] = loc[0] + xvals
YY[:, m] = loc[1] + (average_waveform[m, :] - average_waveform[m, 0]) * yscale
plt.plot(XX, YY, color)
plt.xlim(xmin - xgap / 2, xmax + xgap / 2)
plt.ylim(ymin - ygap / 2, ymax + ygap / 2)
# plt.gca().set_axis_off()
if title:
plt.title(title, color='gray')
def _get_random_spike_waveforms(*, recording, sorting, unit, max_num, channels, snippet_len):
st = sorting.get_unit_spike_train(unit_id=unit)
num_events = len(st)
if num_events > max_num:
event_indices = np.random.choice(range(num_events), size=max_num, replace=False)
else:
event_indices = range(num_events)
spikes = recording.get_snippets(reference_frames=st[event_indices].astype(int), snippet_len=snippet_len, channel_ids=channels)
if len(spikes) > 0:
spikes = np.dstack(tuple(spikes))
else:
spikes = np.zeros((recording.get_num_channels(), snippet_len, 0))
return spikes
def plot_unit_waveform(*, recording, sorting, unit_id, max_num_spikes_per_unit, average_waveform, show_average, channel_ids=None, snippet_len=100, title=''):
if not channel_ids:
channel_ids = recording.get_channel_ids()
M = len(channel_ids)
channel_locations = np.zeros((M, 2))
for ii, ch in enumerate(channel_ids):
loc = recording.get_channel_property(ch, 'location')
channel_locations[ii, :] = loc[-2:]
spikes = _get_random_spike_waveforms(recording=recording, sorting=sorting, unit=unit_id, max_num=max_num_spikes_per_unit, channels=channel_ids, snippet_len=snippet_len)
# if not title:
# title='Unit {}'.format(int(unit_id))
_plot_spike_shapes(representative_waveforms=spikes, channel_locations=channel_locations, average_waveform=average_waveform, show_average=show_average, title=title)
| 38.151376 | 179 | 0.621137 |
79422c2ea05a3e109616ee3fb82e1ee907b49d7e | 5,542 | py | Python | web.py | legoktm/mass-rename | 3f9b923c53b7ff66ffbd2a2052528f6f3a623386 | [
"MIT"
] | 1 | 2021-07-28T11:41:06.000Z | 2021-07-28T11:41:06.000Z | web.py | legoktm/mass-rename | 3f9b923c53b7ff66ffbd2a2052528f6f3a623386 | [
"MIT"
] | null | null | null | web.py | legoktm/mass-rename | 3f9b923c53b7ff66ffbd2a2052528f6f3a623386 | [
"MIT"
] | null | null | null | #!/data/project/legobot/python/bin/python
"""
Copyright (C) 2013 Legoktm
Permission is hereby granted, free of charge, to any person obtaining
a copy of this software and associated documentation files (the "Software"),
to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense,
and/or sell copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
IN THE SOFTWARE.
"""
print "Content-type: text/html\n\n"
import cgitb
cgitb.enable()
import cgi
import os
import requests
import subprocess
import hashlib
environ = {'INSTANCEPROJECT': 'tools',
'LOGNAME': 'local-legobot',
'USER': 'local-legobot',
'PATH': '/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/X11R6/bin',
'HOME': '/data/project/legobot/',
'LANG': 'en_US.UTF-8',
'TERM': 'xterm-256color',
'SHELL': '/bin/bash',
'SHLVL': '1',
'PYWIKIBOT2_DIR': '/data/project/legobot/.pywikibot/',
'SUDO_USER': 'legoktm',
'USERNAME': 'local-legobot',
'SUDO_UID': '2552',
'INSTANCENAME': 'tools-login',
'_': '/usr/bin/python',
'SUDO_COMMAND': '/bin/bash',
'SUDO_GID': '500',
'OLDPWD': '/data/project/legobot/cgi-bin',
'NODE_PATH': '/usr/lib/nodejs:/usr/share/javascript',
'PWD': '/data/project/legobot/cgi-bin/massrename',
'MAIL': '/var/mail/local-legobot',
}
def TUSC(username, password, lang, project):
headers = {'User-agent': 'Commons mass-rename tool by User:Legoktm'}
# http://tools.wikimedia.de/~magnus/tusc.php?check=1&botmode=1&user=USERNAME&language=LANGUAGE&project=PROJECT&password=TUSC_PASSWORD
params = {'check': 1,
'botmode': 1,
'user': username,
'language': lang,
'project': project,
'password': password,
}
url = 'http://tools-webproxy/tusc/tusc.php'
r = requests.post(url, params, headers=headers)
try:
if int(r.text) == 1:
return True
except Exception:
pass
return False
def authusers(username):
with open(os.path.expanduser('~/.whitelist')) as f:
t = f.read()
for line in t:
if line == username:
return True
return False
def wrap(thing):
return '<html><head><title>The Super Rename Tool!</title></head><body>{0}</body></html>'.format(thing)
def start():
form = """<form action="/legobot/cgi-bin/massrename/web.py" method="post">
<input type="text" name="cat" placeholder="Category:Blah"><br />
<input type="text" name="find" placeholder="Thing to find"><br />
<input type="text" name="replace" placeholder="Thing to replace it with"><br />
<input type="text" name="reason" placeholder="Move reason"><br />
<input type="text" name="username" placeholder="TUSC: username"><br />
<input type="password" name="password" placeholder="TUSC: password"><br />
<input type="text" name="lang" placeholder="TUSC: language"><br />
<input type="text" name="project" placeholder="TUSC: project"><br />
<button type="submit">Go!</button>
</form>
"""
return wrap(form)
def tuscfailure():
return wrap('Could not authenticate with TUSC.')
def notanadmin():
return wrap('Unfortunately this tool can only be used by Commons administrators.')
def jobnotsent():
return wrap('There was an error sending your job. Please contact Legoktm with details of what you were trying '
'to do.')
def succeeded(val):
return wrap('Your job has been sent. Legobot will process it shortly. If there are any issues, please let Legoktm '
'know, and provide the ID for your job, which is: ' + val + '.')
form = cgi.FieldStorage()
if not 'cat' in form:
print start()
quit()
cat = form['cat'].value
username = form['username'].value
password = form['password'].value
lang = form['lang'].value
project = form['project'].value
find = form['find'].value
replace = form['replace'].value
reason = form['reason'].value
summary = reason + ". On behalf of " + username
t = TUSC(username, password, lang, project)
if not t:
print tuscfailure()
quit()
if not authusers(username):
print notanadmin()
quit()
h = hashlib.md5()
h.update(username)
h.update(cat)
h.update(find)
h.update(replace)
val = h.hexdigest()
#we all good?
#try:
x = subprocess.check_output('qsub -N {0} -l h_vmem=256M -j y -o $HOME/renamelogs/{0} /data/project/legobot/cgi-bin/massrename/rename.py "{1}" "{2}" "{3}" "{4}"'.format(
val, cat, find, replace, summary), stderr=subprocess.STDOUT, shell=True, env=environ)
#except subprocess.CalledProcessError:
# print jobnotsent()
# quit()
print succeeded(val) | 35.075949 | 168 | 0.64742 |
79422cca9687d15a9ddafc5d3c7c022091906451 | 2,607 | py | Python | allhub/repos/pages.py | srinivasreddy/allhub | ff20858c9984da5c4edd5043c39eed3b6d5d693d | [
"Apache-2.0"
] | 2 | 2019-10-07T15:46:33.000Z | 2019-11-26T04:30:39.000Z | allhub/repos/pages.py | srinivasreddy/allhub | ff20858c9984da5c4edd5043c39eed3b6d5d693d | [
"Apache-2.0"
] | 1 | 2020-03-09T14:44:04.000Z | 2020-03-09T14:44:04.000Z | allhub/repos/pages.py | srinivasreddy/allhub | ff20858c9984da5c4edd5043c39eed3b6d5d693d | [
"Apache-2.0"
] | 2 | 2019-10-08T05:22:37.000Z | 2019-10-08T06:20:47.000Z | from allhub.response import Response
class PagesSiteMixin:
def pages_site_info(self, owner, repo):
url = "/repos/{owner}/{repo}/pages".format(owner=owner, repo=repo)
self.response = Response(self.get(url), "PagesInfo")
return self.response.transform()
def enable_pages_site(self, owner, repo, source_branch, source_path):
params = {"source": {"branch": source_branch, "path": source_path}}
url = "/repos/{owner}/{repo}/pages".format(owner=owner, repo=repo)
self.response = Response(
self.post(
url,
params=params,
**{"Accept": "application/vnd.github.switcheroo-preview+json"},
),
"PagesInfo",
)
return self.response.transform()
def disable_pages_site(self, owner, repo):
url = "/repos/{owner}/{repo}/pages".format(owner=owner, repo=repo)
self.response = Response(
self.delete(
url, **{"Accept": "application/vnd.github.switcheroo-preview+json"}
),
"",
)
return self.response.status_code == 204
def update_pages_site(self, owner, repo, cname, source):
params = {"cname": cname, "source": source}
url = "/repos/{owner}/{repo}/pages".format(owner=owner, repo=repo)
self.response = Response(
self.put(
url,
params=params,
**{"Accept": "application/vnd.github.switcheroo-preview+json"},
),
"PagesInfo",
)
return self.response.status_code == 204
def request_pages_build(self, owner, repo):
url = "/repos/{owner}/{repo}/pages/builds".format(owner=owner, repo=repo)
self.response = Response(self.post(url), "PagesInfo")
return self.response.transform()
def pages_build(self, owner, repo):
url = "/repos/{owner}/{repo}/pages/builds".format(owner=owner, repo=repo)
self.response = Response(self.get(url), "PagesInfo")
return self.response.transform()
def latest_pages_build(self, owner, repo):
url = "/repos/{owner}/{repo}/pages/builds/latest".format(owner=owner, repo=repo)
self.response = Response(self.get(url), "PagesInfo")
return self.response.transform()
def get_pages_build(self, owner, repo, build_id):
url = "/repos/{owner}/{repo}/pages/builds/{build_id}".format(
owner=owner, repo=repo, build_id=build_id
)
self.response = Response(self.get(url), "PagesInfo")
return self.response.transform()
| 38.910448 | 88 | 0.593402 |
79422cfeb94a6a0b53c7ab6d45eeb59103a47e94 | 1,755 | py | Python | demo.py | Minyus/MegaDepth | 89baf59897951b45361def8fecdc156a79d98ee2 | [
"MIT"
] | null | null | null | demo.py | Minyus/MegaDepth | 89baf59897951b45361def8fecdc156a79d98ee2 | [
"MIT"
] | null | null | null | demo.py | Minyus/MegaDepth | 89baf59897951b45361def8fecdc156a79d98ee2 | [
"MIT"
] | null | null | null | import torch
import sys
from torch.autograd import Variable
import numpy as np
# from options.train_options import TrainOptions
# opt = TrainOptions().parse() # set CUDA_VISIBLE_DEVICES before import torch
from options.test_options import TestOptions
opt = TestOptions().parse() # set CUDA_VISIBLE_DEVICES before import torch
from data.data_loader import CreateDataLoader
from models.models import create_model
from skimage import io
from skimage.transform import resize
img_path = 'demo.jpg'
model = create_model(opt)
input_height = 384
input_width = 512
def test_simple(model):
total_loss =0
toal_count = 0
print("============================= TEST ============================")
model.switch_to_eval()
img = np.float32(io.imread(img_path))/255.0
img = resize(img, (input_height, input_width), order = 1)
input_img = torch.from_numpy( np.transpose(img, (2,0,1)) ).contiguous().float()
input_img = input_img.unsqueeze(0)
input_images = Variable(input_img.cuda() )
pred_log_depth = model.netG.forward(input_images)
pred_log_depth = torch.squeeze(pred_log_depth)
pred_depth = torch.exp(pred_log_depth)
# visualize prediction using inverse depth, so that we don't need sky segmentation (if you want to use RGB map for visualization, \
# you have to run semantic segmentation to mask the sky first since the depth of sky is random from CNN)
pred_inv_depth = 1/pred_depth
pred_inv_depth = pred_inv_depth.data.cpu().numpy()
# you might also use percentile for better visualization
pred_inv_depth = pred_inv_depth/np.amax(pred_inv_depth)
io.imsave('demo.png', pred_inv_depth)
# print(pred_inv_depth.shape)
sys.exit()
test_simple(model)
print("We are done")
| 31.909091 | 135 | 0.720798 |
79422d2f969e76edaea0c5a1dbe2a3cfcbb95d6a | 7,492 | py | Python | anytask/courses/migrations/0009_auto__add_field_course_max_tasks_withoout_score_per_student.py | AnnaSvalova/anytask | f814b43c496f67a2efe2a150873a1ae32ad97449 | [
"MIT"
] | 1 | 2018-12-03T05:48:43.000Z | 2018-12-03T05:48:43.000Z | anytask/courses/migrations/0009_auto__add_field_course_max_tasks_withoout_score_per_student.py | AnnaSvalova/anytask | f814b43c496f67a2efe2a150873a1ae32ad97449 | [
"MIT"
] | null | null | null | anytask/courses/migrations/0009_auto__add_field_course_max_tasks_withoout_score_per_student.py | AnnaSvalova/anytask | f814b43c496f67a2efe2a150873a1ae32ad97449 | [
"MIT"
] | 1 | 2021-09-18T22:38:20.000Z | 2021-09-18T22:38:20.000Z | # encoding: utf-8
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'Course.max_tasks_withoout_score_per_student'
db.add_column('courses_course', 'max_tasks_withoout_score_per_student', self.gf('django.db.models.fields.IntegerField')(default=0, null=True, blank=True), keep_default=False)
def backwards(self, orm):
# Deleting field 'Course.max_tasks_withoout_score_per_student'
db.delete_column('courses_course', 'max_tasks_withoout_score_per_student')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2012, 8, 15, 19, 4, 13, 943963)'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2012, 8, 15, 19, 4, 13, 943858)'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'courses.course': {
'Meta': {'object_name': 'Course'},
'added_time': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'auto_now_add': 'True', 'blank': 'True'}),
'days_drop_from_blacklist': ('django.db.models.fields.IntegerField', [], {'default': '0', 'null': 'True', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': "orm['groups.Group']", 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'max_days_without_score': ('django.db.models.fields.IntegerField', [], {'default': '0', 'null': 'True', 'blank': 'True'}),
'max_tasks_withoout_score_per_student': ('django.db.models.fields.IntegerField', [], {'default': '0', 'null': 'True', 'blank': 'True'}),
'max_users_per_task': ('django.db.models.fields.IntegerField', [], {'default': '0', 'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '256', 'db_index': 'True'}),
'students': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'course_students_set'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['auth.User']"}),
'take_policy': ('django.db.models.fields.IntegerField', [], {'default': '0', 'max_length': '1', 'null': 'True', 'db_index': 'True', 'blank': 'True'}),
'teachers': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'course_teachers_set'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['auth.User']"}),
'type': ('django.db.models.fields.IntegerField', [], {'default': '0', 'max_length': '1', 'null': 'True', 'db_index': 'True', 'blank': 'True'}),
'update_time': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'auto_now': 'True', 'blank': 'True'}),
'year': ('django.db.models.fields.related.ForeignKey', [], {'default': '2012', 'to': "orm['years.Year']"})
},
'groups.group': {
'Meta': {'unique_together': "(('year', 'name'),)", 'object_name': 'Group'},
'added_time': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '256', 'blank': 'True'}),
'students': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': "orm['auth.User']", 'null': 'True', 'blank': 'True'}),
'update_time': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'auto_now': 'True', 'blank': 'True'}),
'year': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['years.Year']", 'blank': 'True'})
},
'years.year': {
'Meta': {'object_name': 'Year'},
'added_time': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'start_year': ('django.db.models.fields.IntegerField', [], {'unique': 'True', 'db_index': 'True'}),
'update_time': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'auto_now': 'True', 'blank': 'True'})
}
}
complete_apps = ['courses']
| 79.702128 | 206 | 0.579952 |
79422d5130b37f3b9dc077649969bccddeae1566 | 6,139 | py | Python | wordpress/models.py | observermedia/django-wordpress-rest | f0d96891d8ac5a69c8ba90e044876e756fad1bfe | [
"MIT"
] | 9 | 2015-08-10T23:12:27.000Z | 2020-02-15T08:35:09.000Z | wordpress/models.py | observermedia/django-wordpress-rest | f0d96891d8ac5a69c8ba90e044876e756fad1bfe | [
"MIT"
] | 1 | 2016-01-06T19:16:08.000Z | 2016-01-06T19:16:08.000Z | wordpress/models.py | observermedia/django-wordpress-rest | f0d96891d8ac5a69c8ba90e044876e756fad1bfe | [
"MIT"
] | 5 | 2016-02-29T09:29:58.000Z | 2020-06-23T16:10:58.000Z | from __future__ import unicode_literals
import collections
from django.db import models
from django.utils.translation import ugettext_lazy as _
from django.core.urlresolvers import reverse
from jsonfield import JSONField
class DateTracking(models.Model):
"""
An abstract model that adds tracking fields for creation and modification dates
"""
created_date = models.DateTimeField(blank=False, null=False, auto_now_add=True)
updated_date = models.DateTimeField(blank=False, null=False, auto_now=True)
class Meta:
abstract = True
class WordPressIDs(models.Model):
"""
An abstract model that adds basic WordPress API IDs
"""
site_id = models.IntegerField(blank=False, null=False,
help_text=_("The site ID on Wordpress.com"))
wp_id = models.IntegerField(blank=False, null=False,
help_text=_("The object ID on Wordpress.com"))
class Meta:
abstract = True
unique_together = ("wp_id", "site_id")
class Category(WordPressIDs, DateTracking, models.Model):
name = models.CharField(max_length=1000, blank=False, null=False)
slug = models.SlugField(max_length=1000, blank=False, null=False, unique=True)
description = models.TextField(blank=True, null=False)
post_count = models.IntegerField(blank=False, null=False)
parent_wp_id = models.IntegerField(blank=True, null=True)
class Meta:
verbose_name_plural = "categories"
def __unicode__(self):
return "{}: {}".format(self.pk, self.name)
class Tag(WordPressIDs, DateTracking, models.Model):
name = models.CharField(max_length=1000, blank=False, null=False)
slug = models.SlugField(max_length=1000, blank=False, null=False, unique=True)
description = models.TextField(blank=True, null=False)
post_count = models.IntegerField(blank=False, null=False)
def get_absolute_url(self):
return reverse('tag', kwargs={"tag_slug": self.slug})
def __unicode__(self):
return "{}: {}".format(self.pk, self.name)
class Author(WordPressIDs, DateTracking, models.Model):
login = models.CharField(max_length=255, blank=False, null=False)
email = models.CharField(max_length=1000, blank=False, null=False)
name = models.CharField(max_length=1000, blank=False, null=False)
nice_name = models.CharField(max_length=1000, blank=False, null=False)
url = models.CharField(max_length=1000, blank=False, null=False)
avatar_url = models.CharField(max_length=1000, blank=False, null=False)
profile_url = models.CharField(max_length=1000, blank=False, null=False)
def __unicode__(self):
return self.name
class Media(WordPressIDs, DateTracking, models.Model):
url = models.CharField(max_length=1000, blank=False, null=False,
help_text=_("The full URL to the media file"))
guid = models.CharField(max_length=1000, blank=True, null=True, db_index=True)
uploaded_date = models.DateTimeField(blank=False, null=False)
post_ID = models.IntegerField(blank=True, null=True,
help_text=_("ID of the post this media is attached to"))
file_name = models.CharField(max_length=500, blank=True, null=True)
file_extension = models.CharField(max_length=10, blank=True, null=True)
mime_type = models.CharField(max_length=200, blank=True, null=True)
width = models.IntegerField(blank=True, null=True)
height = models.IntegerField(blank=True, null=True)
title = models.TextField(blank=True, null=True)
caption = models.TextField(blank=True, null=True)
description = models.TextField(blank=True, null=True)
alt = models.TextField(blank=True, null=True)
exif = JSONField(load_kwargs={'object_pairs_hook': collections.OrderedDict})
def __unicode__(self):
return "{}: {}".format(self.pk, self.url)
class Post(WordPressIDs, DateTracking, models.Model):
author = models.ForeignKey("Author", blank=True, null=True)
post_date = models.DateTimeField(blank=False, null=False)
modified = models.DateTimeField(blank=False, null=False,
help_text=_("The post's most recent update time"))
title = models.TextField(blank=True, null=True)
url = models.CharField(max_length=1000, blank=False, null=False,
help_text=_("The full permalink URL to the post"))
short_url = models.CharField(max_length=1000, blank=False, null=False,
help_text=_("The wp.me short URL"))
content = models.TextField(blank=True, null=True)
excerpt = models.TextField(blank=True, null=True)
slug = models.SlugField(max_length=200, blank=True, null=True, db_index=True)
guid = models.CharField(max_length=1000, blank=True, null=True, db_index=True)
status = models.CharField(max_length=20, blank=True, null=True)
sticky = models.BooleanField(default=False,
help_text=_("Show this post at the top of the chronological list, even if old."))
password = models.CharField(max_length=1000, blank=True, null=True)
parent = JSONField(load_kwargs={'object_pairs_hook': collections.OrderedDict}, blank=True, null=True)
post_type = models.CharField(max_length=20, blank=True, null=True)
likes_enabled = models.NullBooleanField()
sharing_enabled = models.NullBooleanField()
like_count = models.IntegerField(blank=True, null=True)
global_ID = models.CharField(max_length=1000)
featured_image = models.CharField(max_length=1000)
post_thumbnail = JSONField(blank=True, null=True, load_kwargs={'object_pairs_hook': collections.OrderedDict})
attachments = models.ManyToManyField("Media", blank=True)
format = models.CharField(max_length=20)
menu_order = models.IntegerField(blank=True, null=True)
tags = models.ManyToManyField("Tag", blank=True)
categories = models.ManyToManyField("Category", blank=True)
metadata = JSONField(load_kwargs={'object_pairs_hook': collections.OrderedDict})
def __unicode__(self):
return "{}: {}".format(self.pk, self.slug)
| 46.157895 | 114 | 0.702395 |
79422f7dc973f48283af23bf792f0aad18d1fa05 | 1,155 | py | Python | django_dashboard/routers.py | keepexploring/smartbiogas | ca663435b05666113e3c0cb55e6f087c61497208 | [
"MIT"
] | null | null | null | django_dashboard/routers.py | keepexploring/smartbiogas | ca663435b05666113e3c0cb55e6f087c61497208 | [
"MIT"
] | 10 | 2017-11-24T12:15:40.000Z | 2022-02-10T06:41:32.000Z | django_dashboard/routers.py | keepexploring/smartbiogas | ca663435b05666113e3c0cb55e6f087c61497208 | [
"MIT"
] | null | null | null | class Dashboard_Router(object):
""" A router to control all database operations on models in
the dashboard application """
def db_for_read(self, model, **hints):
"""
Point all operations on myapp2 models to 'my_db_2'
"""
if model._meta.app_label == 'django_dashboard':
return 'data'
return None
def db_for_write(self, model, **hints):
"""
Point all operations on myapp models to 'other'
"""
if model._meta.app_label == 'django_dashboard':
return 'data'
return None
def allow_relation(self, obj1, obj2, **hints):
"""
Allow relations if a model in the auth app is involved.
"""
if obj1._meta.app_label == 'django_dashboard' or \
obj2._meta.app_label == 'django_dashboard':
return True
return None
def allow_migrate(self, db, app_label, model_name=None, **hints):
"""
Make sure the auth app only appears in the 'auth_db'
database.
"""
if app_label == 'django_dashboard':
return db == 'data'
return None | 31.216216 | 69 | 0.575758 |
79422ff0f4c8118b2719b183fc11414047c664ee | 256 | py | Python | utils/scraper/translation.py | Nuzair46/Galactic-Empire-Bot | 2f2d2681d0a130a87dc9cff6ad5feba0af8b6d66 | [
"MIT"
] | null | null | null | utils/scraper/translation.py | Nuzair46/Galactic-Empire-Bot | 2f2d2681d0a130a87dc9cff6ad5feba0af8b6d66 | [
"MIT"
] | null | null | null | utils/scraper/translation.py | Nuzair46/Galactic-Empire-Bot | 2f2d2681d0a130a87dc9cff6ad5feba0af8b6d66 | [
"MIT"
] | null | null | null | import googletrans
from googletrans import Translator
class language():
def translateto(text, lang):
translator = Translator()
final = translator.translate(text, dest = lang)
language = googletrans.LANGUAGES
return final.text,language[final.src]
| 25.6 | 49 | 0.773438 |
794230752a697bf28da367ab9bb3af8bd728e8ab | 1,594 | py | Python | Python/paint-house.py | RideGreg/LeetCode | b70818b1e6947bf29519a24f78816e022ebab59e | [
"MIT"
] | 1 | 2022-01-30T06:55:28.000Z | 2022-01-30T06:55:28.000Z | Python/paint-house.py | RideGreg/LeetCode | b70818b1e6947bf29519a24f78816e022ebab59e | [
"MIT"
] | null | null | null | Python/paint-house.py | RideGreg/LeetCode | b70818b1e6947bf29519a24f78816e022ebab59e | [
"MIT"
] | 1 | 2021-12-31T03:56:39.000Z | 2021-12-31T03:56:39.000Z | # Time: O(n)
# Space: O(1)
# 256
# There are a row of n houses, each house can be painted with one of the three colors: red, blue or green.
# The cost of painting each house with a certain color is different. You have to paint all the houses
# such that no two adjacent houses have the same color.
#
# The cost of painting each house with a certain color is represented by a n x 3 cost matrix. For example,
# costs[0][0] is the cost of painting house 0 with color red; costs[1][2] is the cost of
# painting house 1 with color green, and so on... Find the minimum cost to paint all houses.
class Solution(object):
def minCost(self, costs):
"""
:type costs: List[List[int]]
:rtype: int
"""
ans = [0] * 3
for c in costs:
ans = [c[0] + min(ans[1], ans[2]),
c[1] + min(ans[2], ans[0]),
c[2] + min(ans[0], ans[1])]
return min(ans)
# Time: O(n)
# Space: O(n)
class Solution2(object): # modified input list
def minCost(self, costs):
"""
:type costs: List[List[int]]
:rtype: int
"""
if not costs:
return 0
n = len(costs)
for i in range(1, n):
costs[i][0] += min(costs[i - 1][1], costs[i - 1][2])
costs[i][1] += min(costs[i - 1][0], costs[i - 1][2])
costs[i][2] += min(costs[i - 1][0], costs[i - 1][1])
return min(costs[n - 1])
print(Solution().minCost([[14,2,11],[11,14,5],[14,3,10]])) # 10 = blue 2 + green 5 + blue 3
print(Solution().minCost([[1,2,3],[1,4,6]])) # 3
| 33.208333 | 106 | 0.550188 |
794231b561427ba75a1adc54c4758a74b68fbb0d | 4,716 | py | Python | api/metricsdata_test.py | liamnewmarch/chromium-dashboard | 0e10b98554a85bd8edb7c61f8fa0494821fe005f | [
"Apache-2.0"
] | 450 | 2015-01-31T03:16:15.000Z | 2022-03-25T14:03:26.000Z | api/metricsdata_test.py | nirajbhutada/chromium-dashboard | 6fcec787cad34350b50806f7ffde4238942d7ec3 | [
"Apache-2.0"
] | 1,336 | 2015-01-16T21:41:33.000Z | 2022-03-30T16:28:24.000Z | api/metricsdata_test.py | Fmichi50/chromium-dashboard | e77396a8cc5a5f10f46ff9b9456f6aa0b2dc94d8 | [
"Apache-2.0"
] | 314 | 2015-01-28T00:17:45.000Z | 2022-03-28T05:09:46.000Z |
# Copyright 2020 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License")
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import testing_config # Must be imported first
import datetime
import mock
import flask
# from google.appengine.api import users
from framework import users
from api import metricsdata
from internals import models
test_app = flask.Flask(__name__)
class MetricsFunctionTests(testing_config.CustomTestCase):
def setUp(self):
self.datapoint = models.StableInstance(
day_percentage=0.0123456789, date=datetime.date.today(),
bucket_id=1, property_name='prop')
def test_truncate_day_percentage(self):
updated_datapoint = metricsdata._truncate_day_percentage(self.datapoint)
self.assertEqual(0.01234568, updated_datapoint.day_percentage)
def test_is_googler__anon(self):
testing_config.sign_out()
user = users.get_current_user()
self.assertFalse(metricsdata._is_googler(user))
def test_is_googler__nongoogler(self):
testing_config.sign_in('[email protected]', 111)
user = users.get_current_user()
self.assertFalse(metricsdata._is_googler(user))
def test_is_googler__googler(self):
testing_config.sign_in('[email protected]', 111)
user = users.get_current_user()
self.assertTrue(metricsdata._is_googler(user))
def test_clean_data__no_op(self):
testing_config.sign_in('[email protected]', 111)
datapoints = [self.datapoint]
updated_datapoints = metricsdata._clean_data(datapoints)
self.assertEqual(0.0123456789, list(updated_datapoints)[0].day_percentage)
def test_clean_data__clean_datapoints(self):
testing_config.sign_out()
datapoints = [self.datapoint]
updated_datapoints = metricsdata._clean_data(datapoints)
self.assertEqual(0.01234568, list(updated_datapoints)[0].day_percentage)
class PopularityTimelineHandlerTests(testing_config.CustomTestCase):
def setUp(self):
self.handler = metricsdata.PopularityTimelineHandler()
self.datapoint = models.StableInstance(
day_percentage=0.0123456789, date=datetime.date.today(),
bucket_id=1, property_name='prop')
self.datapoint.put()
def tearDown(self):
self.datapoint.key.delete()
def test_make_query(self):
actual_query = self.handler.make_query(1)
self.assertEqual(actual_query.kind, models.StableInstance._get_kind())
def test_get_template_data__bad_bucket(self):
url = '/data/timeline/csspopularity?bucket_id=not-a-number'
with test_app.test_request_context(url):
actual = self.handler.get_template_data()
self.assertEqual([], actual)
def test_get_template_data__normal(self):
testing_config.sign_out()
url = '/data/timeline/csspopularity?bucket_id=1'
with test_app.test_request_context(url):
actual_datapoints = self.handler.get_template_data()
self.assertEqual(1, len(actual_datapoints))
self.assertEqual(0.01234568, actual_datapoints[0]['day_percentage'])
# TODO(jrobbins): Test for metricsdata.FeatureHandler.
class FeatureBucketsHandlerTest(testing_config.CustomTestCase):
def setUp(self):
self.handler = metricsdata.FeatureBucketsHandler()
self.prop_1 = models.CssPropertyHistogram(
bucket_id=1, property_name='b prop')
self.prop_1.put()
self.prop_2 = models.CssPropertyHistogram(
bucket_id=2, property_name='a prop')
self.prop_2.put()
self.prop_3 = models.FeatureObserverHistogram(
bucket_id=3, property_name='b feat')
self.prop_3.put()
self.prop_4 = models.FeatureObserverHistogram(
bucket_id=4, property_name='a feat')
self.prop_4.put()
def tearDown(self):
self.prop_1.key.delete()
self.prop_2.key.delete()
self.prop_3.key.delete()
self.prop_4.key.delete()
def test_get_template_data__css(self):
with test_app.test_request_context('/data/blink/cssprops'):
actual_buckets = self.handler.get_template_data('cssprops')
self.assertEqual(
[(2, 'a prop'), (1, 'b prop')],
actual_buckets)
def test_get_template_data__js(self):
with test_app.test_request_context('/data/blink/features'):
actual_buckets = self.handler.get_template_data('features')
self.assertEqual(
[(4, 'a feat'), (3, 'b feat')],
actual_buckets)
| 33.211268 | 78 | 0.744275 |
7942329c36169eed3489b27254df102ad7c629c8 | 7,259 | py | Python | sdk/python/pulumi_google_native/jobs/v4/tenant.py | AaronFriel/pulumi-google-native | 75d1cda425e33d4610348972cd70bddf35f1770d | [
"Apache-2.0"
] | 44 | 2021-04-18T23:00:48.000Z | 2022-02-14T17:43:15.000Z | sdk/python/pulumi_google_native/jobs/v4/tenant.py | AaronFriel/pulumi-google-native | 75d1cda425e33d4610348972cd70bddf35f1770d | [
"Apache-2.0"
] | 354 | 2021-04-16T16:48:39.000Z | 2022-03-31T17:16:39.000Z | sdk/python/pulumi_google_native/jobs/v4/tenant.py | AaronFriel/pulumi-google-native | 75d1cda425e33d4610348972cd70bddf35f1770d | [
"Apache-2.0"
] | 8 | 2021-04-24T17:46:51.000Z | 2022-01-05T10:40:21.000Z | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
__all__ = ['TenantArgs', 'Tenant']
@pulumi.input_type
class TenantArgs:
def __init__(__self__, *,
external_id: pulumi.Input[str],
name: Optional[pulumi.Input[str]] = None,
project: Optional[pulumi.Input[str]] = None):
"""
The set of arguments for constructing a Tenant resource.
:param pulumi.Input[str] external_id: Client side tenant identifier, used to uniquely identify the tenant. The maximum number of allowed characters is 255.
:param pulumi.Input[str] name: Required during tenant update. The resource name for a tenant. This is generated by the service when a tenant is created. The format is "projects/{project_id}/tenants/{tenant_id}", for example, "projects/foo/tenants/bar".
"""
pulumi.set(__self__, "external_id", external_id)
if name is not None:
pulumi.set(__self__, "name", name)
if project is not None:
pulumi.set(__self__, "project", project)
@property
@pulumi.getter(name="externalId")
def external_id(self) -> pulumi.Input[str]:
"""
Client side tenant identifier, used to uniquely identify the tenant. The maximum number of allowed characters is 255.
"""
return pulumi.get(self, "external_id")
@external_id.setter
def external_id(self, value: pulumi.Input[str]):
pulumi.set(self, "external_id", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
Required during tenant update. The resource name for a tenant. This is generated by the service when a tenant is created. The format is "projects/{project_id}/tenants/{tenant_id}", for example, "projects/foo/tenants/bar".
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter
def project(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "project")
@project.setter
def project(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "project", value)
class Tenant(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
external_id: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
project: Optional[pulumi.Input[str]] = None,
__props__=None):
"""
Creates a new tenant entity.
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] external_id: Client side tenant identifier, used to uniquely identify the tenant. The maximum number of allowed characters is 255.
:param pulumi.Input[str] name: Required during tenant update. The resource name for a tenant. This is generated by the service when a tenant is created. The format is "projects/{project_id}/tenants/{tenant_id}", for example, "projects/foo/tenants/bar".
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: TenantArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
Creates a new tenant entity.
:param str resource_name: The name of the resource.
:param TenantArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(TenantArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
external_id: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
project: Optional[pulumi.Input[str]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = TenantArgs.__new__(TenantArgs)
if external_id is None and not opts.urn:
raise TypeError("Missing required property 'external_id'")
__props__.__dict__["external_id"] = external_id
__props__.__dict__["name"] = name
__props__.__dict__["project"] = project
super(Tenant, __self__).__init__(
'google-native:jobs/v4:Tenant',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None) -> 'Tenant':
"""
Get an existing Tenant resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = TenantArgs.__new__(TenantArgs)
__props__.__dict__["external_id"] = None
__props__.__dict__["name"] = None
return Tenant(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter(name="externalId")
def external_id(self) -> pulumi.Output[str]:
"""
Client side tenant identifier, used to uniquely identify the tenant. The maximum number of allowed characters is 255.
"""
return pulumi.get(self, "external_id")
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
"""
Required during tenant update. The resource name for a tenant. This is generated by the service when a tenant is created. The format is "projects/{project_id}/tenants/{tenant_id}", for example, "projects/foo/tenants/bar".
"""
return pulumi.get(self, "name")
| 43.467066 | 260 | 0.642375 |
79423389242ae0c8a41f0df6bb6087f409852cf2 | 14,095 | py | Python | python/ray/remote_function.py | yorickvanzweeden/ray | 79a9d6d5171071190f531121be310fb4d203cf11 | [
"Apache-2.0"
] | null | null | null | python/ray/remote_function.py | yorickvanzweeden/ray | 79a9d6d5171071190f531121be310fb4d203cf11 | [
"Apache-2.0"
] | 27 | 2021-08-07T07:07:24.000Z | 2022-03-26T07:05:43.000Z | python/ray/remote_function.py | yorickvanzweeden/ray | 79a9d6d5171071190f531121be310fb4d203cf11 | [
"Apache-2.0"
] | null | null | null | import logging
import inspect
from functools import wraps
from ray import cloudpickle as pickle
from ray._raylet import PythonFunctionDescriptor
from ray import cross_language, Language
from ray._private.client_mode_hook import client_mode_convert_function
from ray._private.client_mode_hook import client_mode_should_convert
from ray.util.placement_group import (
PlacementGroup,
check_placement_group_index,
get_current_placement_group,
)
import ray._private.signature
import ray._private.runtime_env as runtime_support
from ray.util.tracing.tracing_helper import (_tracing_task_invocation,
_inject_tracing_into_function)
# Default parameters for remote functions.
DEFAULT_REMOTE_FUNCTION_CPUS = 1
DEFAULT_REMOTE_FUNCTION_NUM_RETURN_VALS = 1
DEFAULT_REMOTE_FUNCTION_MAX_CALLS = 0
# Normal tasks may be retried on failure this many times.
# TODO(swang): Allow this to be set globally for an application.
DEFAULT_REMOTE_FUNCTION_NUM_TASK_RETRIES = 3
logger = logging.getLogger(__name__)
class RemoteFunction:
"""A remote function.
This is a decorated function. It can be used to spawn tasks.
Attributes:
_language: The target language.
_function: The original function.
_function_descriptor: The function descriptor. This is not defined
until the remote function is first invoked because that is when the
function is pickled, and the pickled function is used to compute
the function descriptor.
_function_name: The module and function name.
_num_cpus: The default number of CPUs to use for invocations of this
remote function.
_num_gpus: The default number of GPUs to use for invocations of this
remote function.
_memory: The heap memory request for this task.
_object_store_memory: The object store memory request for this task.
_resources: The default custom resource requirements for invocations of
this remote function.
_num_returns: The default number of return values for invocations
of this remote function.
_max_calls: The number of times a worker can execute this function
before exiting.
_decorator: An optional decorator that should be applied to the remote
function invocation (as opposed to the function execution) before
invoking the function. The decorator must return a function that
takes in two arguments ("args" and "kwargs"). In most cases, it
should call the function that was passed into the decorator and
return the resulting ObjectRefs. For an example, see
"test_decorated_function" in "python/ray/tests/test_basic.py".
_function_signature: The function signature.
_last_export_session_and_job: A pair of the last exported session
and job to help us to know whether this function was exported.
This is an imperfect mechanism used to determine if we need to
export the remote function again. It is imperfect in the sense that
the actor class definition could be exported multiple times by
different workers.
"""
def __init__(self, language, function, function_descriptor, num_cpus,
num_gpus, memory, object_store_memory, resources,
accelerator_type, num_returns, max_calls, max_retries):
if inspect.iscoroutinefunction(function):
raise ValueError("'async def' should not be used for remote "
"tasks. You can wrap the async function with "
"`asyncio.get_event_loop.run_until(f())`. "
"See more at docs.ray.io/async_api.html")
self._language = language
self._function = _inject_tracing_into_function(function)
self._function_name = (function.__module__ + "." + function.__name__)
self._function_descriptor = function_descriptor
self._is_cross_language = language != Language.PYTHON
self._num_cpus = (DEFAULT_REMOTE_FUNCTION_CPUS
if num_cpus is None else num_cpus)
self._num_gpus = num_gpus
self._memory = memory
if object_store_memory is not None:
raise NotImplementedError(
"setting object_store_memory is not implemented for tasks")
self._object_store_memory = None
self._resources = resources
self._accelerator_type = accelerator_type
self._num_returns = (DEFAULT_REMOTE_FUNCTION_NUM_RETURN_VALS
if num_returns is None else num_returns)
self._max_calls = (DEFAULT_REMOTE_FUNCTION_MAX_CALLS
if max_calls is None else max_calls)
self._max_retries = (DEFAULT_REMOTE_FUNCTION_NUM_TASK_RETRIES
if max_retries is None else max_retries)
self._decorator = getattr(function, "__ray_invocation_decorator__",
None)
self._function_signature = ray._private.signature.extract_signature(
self._function)
self._last_export_session_and_job = None
# Override task.remote's signature and docstring
@wraps(function)
def _remote_proxy(*args, **kwargs):
return self._remote(args=args, kwargs=kwargs)
self.remote = _remote_proxy
def __call__(self, *args, **kwargs):
raise TypeError("Remote functions cannot be called directly. Instead "
f"of running '{self._function_name}()', "
f"try '{self._function_name}.remote()'.")
def options(self,
args=None,
kwargs=None,
num_returns=None,
num_cpus=None,
num_gpus=None,
memory=None,
object_store_memory=None,
accelerator_type=None,
resources=None,
max_retries=None,
placement_group="default",
placement_group_bundle_index=-1,
placement_group_capture_child_tasks=None,
runtime_env=None,
override_environment_variables=None,
name=""):
"""Configures and overrides the task invocation parameters.
The arguments are the same as those that can be passed to
:obj:`ray.remote`.
Examples:
.. code-block:: python
@ray.remote(num_gpus=1, max_calls=1, num_returns=2)
def f():
return 1, 2
# Task f will require 2 gpus instead of 1.
g = f.options(num_gpus=2, max_calls=None)
"""
func_cls = self
class FuncWrapper:
def remote(self, *args, **kwargs):
return func_cls._remote(
args=args,
kwargs=kwargs,
num_returns=num_returns,
num_cpus=num_cpus,
num_gpus=num_gpus,
memory=memory,
object_store_memory=object_store_memory,
accelerator_type=accelerator_type,
resources=resources,
max_retries=max_retries,
placement_group=placement_group,
placement_group_bundle_index=placement_group_bundle_index,
placement_group_capture_child_tasks=(
placement_group_capture_child_tasks),
runtime_env=runtime_env,
override_environment_variables=(
override_environment_variables),
name=name)
return FuncWrapper()
@_tracing_task_invocation
def _remote(self,
args=None,
kwargs=None,
num_returns=None,
num_cpus=None,
num_gpus=None,
memory=None,
object_store_memory=None,
accelerator_type=None,
resources=None,
max_retries=None,
placement_group="default",
placement_group_bundle_index=-1,
placement_group_capture_child_tasks=None,
runtime_env=None,
override_environment_variables=None,
name=""):
"""Submit the remote function for execution."""
if client_mode_should_convert():
return client_mode_convert_function(
self,
args,
kwargs,
num_returns=num_returns,
num_cpus=num_cpus,
num_gpus=num_gpus,
memory=memory,
object_store_memory=object_store_memory,
accelerator_type=accelerator_type,
resources=resources,
max_retries=max_retries,
placement_group=placement_group,
placement_group_bundle_index=placement_group_bundle_index,
placement_group_capture_child_tasks=(
placement_group_capture_child_tasks),
runtime_env=runtime_env,
override_environment_variables=override_environment_variables,
name=name)
worker = ray.worker.global_worker
worker.check_connected()
# If this function was not exported in this session and job, we need to
# export this function again, because the current GCS doesn't have it.
if not self._is_cross_language and \
self._last_export_session_and_job != \
worker.current_session_and_job:
# There is an interesting question here. If the remote function is
# used by a subsequent driver (in the same script), should the
# second driver pickle the function again? If yes, then the remote
# function definition can differ in the second driver (e.g., if
# variables in its closure have changed). We probably want the
# behavior of the remote function in the second driver to be
# independent of whether or not the function was invoked by the
# first driver. This is an argument for repickling the function,
# which we do here.
self._pickled_function = pickle.dumps(self._function)
self._function_descriptor = PythonFunctionDescriptor.from_function(
self._function, self._pickled_function)
self._last_export_session_and_job = worker.current_session_and_job
worker.function_actor_manager.export(self)
kwargs = {} if kwargs is None else kwargs
args = [] if args is None else args
if num_returns is None:
num_returns = self._num_returns
if max_retries is None:
max_retries = self._max_retries
if placement_group_capture_child_tasks is None:
placement_group_capture_child_tasks = (
worker.should_capture_child_tasks_in_placement_group)
if placement_group == "default":
if placement_group_capture_child_tasks:
placement_group = get_current_placement_group()
else:
placement_group = PlacementGroup.empty()
if not placement_group:
placement_group = PlacementGroup.empty()
check_placement_group_index(placement_group,
placement_group_bundle_index)
resources = ray._private.utils.resources_from_resource_arguments(
self._num_cpus, self._num_gpus, self._memory,
self._object_store_memory, self._resources, self._accelerator_type,
num_cpus, num_gpus, memory, object_store_memory, resources,
accelerator_type)
if runtime_env:
runtime_env_dict = runtime_support.RuntimeEnvDict(
runtime_env).get_parsed_dict()
else:
runtime_env_dict = {}
if override_environment_variables:
logger.warning("override_environment_variables is deprecated and "
"will be removed in Ray 1.5. Please use "
".options(runtime_env={'env_vars': {...}}).remote()"
"instead.")
def invocation(args, kwargs):
if self._is_cross_language:
list_args = cross_language.format_args(worker, args, kwargs)
elif not args and not kwargs and not self._function_signature:
list_args = []
else:
list_args = ray._private.signature.flatten_args(
self._function_signature, args, kwargs)
if worker.mode == ray.worker.LOCAL_MODE:
assert not self._is_cross_language, \
"Cross language remote function " \
"cannot be executed locally."
object_refs = worker.core_worker.submit_task(
self._language,
self._function_descriptor,
list_args,
name,
num_returns,
resources,
max_retries,
placement_group.id,
placement_group_bundle_index,
placement_group_capture_child_tasks,
worker.debugger_breakpoint,
runtime_env_dict,
override_environment_variables=override_environment_variables
or dict())
# Reset worker's debug context from the last "remote" command
# (which applies only to this .remote call).
worker.debugger_breakpoint = b""
if len(object_refs) == 1:
return object_refs[0]
elif len(object_refs) > 1:
return object_refs
if self._decorator is not None:
invocation = self._decorator(invocation)
return invocation(args, kwargs)
| 43.236196 | 79 | 0.614899 |
794234085b4fc411d6780bb2e8768a95f03ea14f | 10,610 | py | Python | pygsp/learning.py | dataronio/pygsp | 0f35fbf2623d32060fe2f709715a88a818528e26 | [
"BSD-3-Clause"
] | 341 | 2015-09-07T16:59:41.000Z | 2022-03-23T20:27:55.000Z | pygsp/learning.py | dataronio/pygsp | 0f35fbf2623d32060fe2f709715a88a818528e26 | [
"BSD-3-Clause"
] | 96 | 2017-04-16T04:38:57.000Z | 2021-11-02T18:36:41.000Z | pygsp/learning.py | dataronio/pygsp | 0f35fbf2623d32060fe2f709715a88a818528e26 | [
"BSD-3-Clause"
] | 95 | 2016-03-25T14:35:28.000Z | 2022-02-08T18:54:29.000Z | # -*- coding: utf-8 -*-
r"""
The :mod:`pygsp.learning` module provides functions to solve learning problems.
Semi-supervized learning
========================
Those functions help to solve a semi-supervized learning problem, i.e., a
problem where only some values of a graph signal are known and the others shall
be inferred.
.. autosummary::
regression_tikhonov
classification_tikhonov
classification_tikhonov_simplex
"""
import numpy as np
from scipy import sparse
def _import_pyunlocbox():
try:
from pyunlocbox import functions, solvers
except Exception as e:
raise ImportError('Cannot import pyunlocbox, which is needed to solve '
'this optimization problem. Try to install it with '
'pip (or conda) install pyunlocbox. '
'Original exception: {}'.format(e))
return functions, solvers
def _to_logits(x):
logits = np.zeros([len(x), np.max(x)+1])
logits[range(len(x)), x] = 1
return logits
def classification_tikhonov_simplex(G, y, M, tau=0.1, **kwargs):
r"""Solve a classification problem on graph via Tikhonov minimization
with simple constraints.
The function first transforms :math:`y` in logits :math:`Y`, then solves
.. math:: \operatorname*{arg min}_X \| M X - Y \|_2^2 + \tau \ tr(X^T L X)
\text{ s.t. } sum(X) = 1 \text{ and } X >= 0,
where :math:`X` and :math:`Y` are logits.
Parameters
----------
G : :class:`pygsp.graphs.Graph`
y : array, length G.n_vertices
Measurements.
M : array of boolean, length G.n_vertices
Masking vector.
tau : float
Regularization parameter.
kwargs : dict
Parameters for :func:`pyunlocbox.solvers.solve`.
Returns
-------
logits : array, length G.n_vertices
The logits :math:`X`.
Examples
--------
>>> from pygsp import graphs, learning
>>> import matplotlib.pyplot as plt
>>>
>>> G = graphs.Logo()
>>> G.estimate_lmax()
Create a ground truth signal:
>>> signal = np.zeros(G.n_vertices)
>>> signal[G.info['idx_s']] = 1
>>> signal[G.info['idx_p']] = 2
Construct a measurement signal from a binary mask:
>>> rng = np.random.default_rng(42)
>>> mask = rng.uniform(0, 1, G.n_vertices) > 0.5
>>> measures = signal.copy()
>>> measures[~mask] = np.nan
Solve the classification problem by reconstructing the signal:
>>> recovery = learning.classification_tikhonov_simplex(
... G, measures, mask, tau=0.1, verbosity='NONE')
Plot the results.
Note that we recover the class with ``np.argmax(recovery, axis=1)``.
>>> prediction = np.argmax(recovery, axis=1)
>>> fig, ax = plt.subplots(2, 3, sharey=True, figsize=(10, 6))
>>> _ = G.plot(signal, ax=ax[0, 0], title='Ground truth')
>>> _ = G.plot(measures, ax=ax[0, 1], title='Measurements')
>>> _ = G.plot(prediction, ax=ax[0, 2], title='Recovered class')
>>> _ = G.plot(recovery[:, 0], ax=ax[1, 0], title='Logit 0')
>>> _ = G.plot(recovery[:, 1], ax=ax[1, 1], title='Logit 1')
>>> _ = G.plot(recovery[:, 2], ax=ax[1, 2], title='Logit 2')
>>> _ = fig.tight_layout()
"""
functions, solvers = _import_pyunlocbox()
if tau <= 0:
raise ValueError('Tau should be greater than 0.')
y = y.copy()
y[M == False] = 0
Y = _to_logits(y.astype(int))
Y[M == False, :] = 0
def proj_simplex(y):
d = y.shape[1]
a = np.ones(d)
idx = np.argsort(y)
def evalpL(y, k, idx):
return np.sum(y[idx[k:]] - y[idx[k]]) - 1
def bisectsearch(idx, y):
idxL, idxH = 0, d-1
L = evalpL(y, idxL, idx)
H = evalpL(y, idxH, idx)
if L < 0:
return idxL
while (idxH-idxL) > 1:
iMid = int((idxL + idxH) / 2)
M = evalpL(y, iMid, idx)
if M > 0:
idxL, L = iMid, M
else:
idxH, H = iMid, M
return idxH
def proj(idx, y):
k = bisectsearch(idx, y)
lam = (np.sum(y[idx[k:]]) - 1) / (d - k)
return np.maximum(0, y - lam)
x = np.empty_like(y)
for i in range(len(y)):
x[i] = proj(idx[i], y[i])
# x = np.stack(map(proj, idx, y))
return x
def smooth_eval(x):
xTLx = np.sum(x * (G.L.dot(x)))
e = M * ((M * x.T) - Y.T)
l2 = np.sum(e * e)
return tau * xTLx + l2
def smooth_grad(x):
return 2 * ((M * (M * x.T - Y.T)).T + tau * G.L * x)
f1 = functions.func()
f1._eval = smooth_eval
f1._grad = smooth_grad
f2 = functions.func()
f2._eval = lambda x: 0 # Indicator functions evaluate to zero.
f2._prox = lambda x, step: proj_simplex(x)
step = 0.5 / (1 + tau * G.lmax)
solver = solvers.forward_backward(step=step)
ret = solvers.solve([f1, f2], Y.copy(), solver, **kwargs)
return ret['sol']
def classification_tikhonov(G, y, M, tau=0):
r"""Solve a classification problem on graph via Tikhonov minimization.
The function first transforms :math:`y` in logits :math:`Y`, then solves
.. math:: \operatorname*{arg min}_X \| M X - Y \|_2^2 + \tau \ tr(X^T L X)
if :math:`\tau > 0`, and
.. math:: \operatorname*{arg min}_X tr(X^T L X) \ \text{ s. t. } \ Y = M X
otherwise, where :math:`X` and :math:`Y` are logits.
The function returns the maximum of the logits.
Parameters
----------
G : :class:`pygsp.graphs.Graph`
y : array, length G.n_vertices
Measurements.
M : array of boolean, length G.n_vertices
Masking vector.
tau : float
Regularization parameter.
Returns
-------
logits : array, length G.n_vertices
The logits :math:`X`.
Examples
--------
>>> from pygsp import graphs, learning
>>> import matplotlib.pyplot as plt
>>>
>>> G = graphs.Logo()
Create a ground truth signal:
>>> signal = np.zeros(G.n_vertices)
>>> signal[G.info['idx_s']] = 1
>>> signal[G.info['idx_p']] = 2
Construct a measurement signal from a binary mask:
>>> rng = np.random.default_rng(42)
>>> mask = rng.uniform(0, 1, G.n_vertices) > 0.5
>>> measures = signal.copy()
>>> measures[~mask] = np.nan
Solve the classification problem by reconstructing the signal:
>>> recovery = learning.classification_tikhonov(G, measures, mask, tau=0)
Plot the results.
Note that we recover the class with ``np.argmax(recovery, axis=1)``.
>>> prediction = np.argmax(recovery, axis=1)
>>> fig, ax = plt.subplots(2, 3, sharey=True, figsize=(10, 6))
>>> _ = G.plot(signal, ax=ax[0, 0], title='Ground truth')
>>> _ = G.plot(measures, ax=ax[0, 1], title='Measurements')
>>> _ = G.plot(prediction, ax=ax[0, 2], title='Recovered class')
>>> _ = G.plot(recovery[:, 0], ax=ax[1, 0], title='Logit 0')
>>> _ = G.plot(recovery[:, 1], ax=ax[1, 1], title='Logit 1')
>>> _ = G.plot(recovery[:, 2], ax=ax[1, 2], title='Logit 2')
>>> _ = fig.tight_layout()
"""
y = y.copy()
y[M == False] = 0
Y = _to_logits(y.astype(int))
return regression_tikhonov(G, Y, M, tau)
def regression_tikhonov(G, y, M, tau=0):
r"""Solve a regression problem on graph via Tikhonov minimization.
The function solves
.. math:: \operatorname*{arg min}_x \| M x - y \|_2^2 + \tau \ x^T L x
if :math:`\tau > 0`, and
.. math:: \operatorname*{arg min}_x x^T L x \ \text{ s. t. } \ y = M x
otherwise.
Parameters
----------
G : :class:`pygsp.graphs.Graph`
y : array, length G.n_vertices
Measurements.
M : array of boolean, length G.n_vertices
Masking vector.
tau : float
Regularization parameter.
Returns
-------
x : array, length G.n_vertices
Recovered values :math:`x`.
Examples
--------
>>> from pygsp import graphs, filters, learning
>>> import matplotlib.pyplot as plt
>>>
>>> G = graphs.Sensor(N=100, seed=42)
>>> G.estimate_lmax()
Create a smooth ground truth signal:
>>> filt = lambda x: 1 / (1 + 10*x)
>>> filt = filters.Filter(G, filt)
>>> rng = np.random.default_rng(42)
>>> signal = filt.analyze(rng.normal(size=G.n_vertices))
Construct a measurement signal from a binary mask:
>>> mask = rng.uniform(0, 1, G.n_vertices) > 0.5
>>> measures = signal.copy()
>>> measures[~mask] = np.nan
Solve the regression problem by reconstructing the signal:
>>> recovery = learning.regression_tikhonov(G, measures, mask, tau=0)
Plot the results:
>>> fig, (ax1, ax2, ax3) = plt.subplots(1, 3, sharey=True, figsize=(10, 3))
>>> limits = [signal.min(), signal.max()]
>>> _ = G.plot(signal, ax=ax1, limits=limits, title='Ground truth')
>>> _ = G.plot(measures, ax=ax2, limits=limits, title='Measures')
>>> _ = G.plot(recovery, ax=ax3, limits=limits, title='Recovery')
>>> _ = fig.tight_layout()
"""
if tau > 0:
y = y.copy()
y[M == False] = 0
if sparse.issparse(G.L):
def Op(x):
return (M * x.T).T + tau * (G.L.dot(x))
LinearOp = sparse.linalg.LinearOperator([G.N, G.N], Op)
if y.ndim > 1:
sol = np.empty(shape=y.shape)
res = np.empty(shape=y.shape[1])
for i in range(y.shape[1]):
sol[:, i], res[i] = sparse.linalg.cg(
LinearOp, y[:, i])
else:
sol, res = sparse.linalg.cg(LinearOp, y)
# TODO: do something with the residual...
return sol
else:
# Creating this matrix may be problematic in term of memory.
# Consider using an operator instead...
if type(G.L).__module__ == np.__name__:
LinearOp = np.diag(M*1) + tau * G.L
return np.linalg.solve(LinearOp, M * y)
else:
if np.prod(M.shape) != G.n_vertices:
raise ValueError("M should be of size [G.n_vertices,]")
indl = M
indu = (M == False)
Luu = G.L[indu, :][:, indu]
Wul = - G.L[indu, :][:, indl]
if sparse.issparse(G.L):
sol_part = sparse.linalg.spsolve(Luu, Wul.dot(y[indl]))
else:
sol_part = np.linalg.solve(Luu, np.matmul(Wul, y[indl]))
sol = y.copy()
sol[indu] = sol_part
return sol
| 28.598383 | 79 | 0.55344 |
79423433cdcc39041c7fd83b1754e656cc596c82 | 3,178 | py | Python | backend/api/models.py | AndyPaPaLeu/Disfactory | 4afc370ae6b0d526891fce2b1fe0b9c687309ed1 | [
"MIT"
] | null | null | null | backend/api/models.py | AndyPaPaLeu/Disfactory | 4afc370ae6b0d526891fce2b1fe0b9c687309ed1 | [
"MIT"
] | null | null | null | backend/api/models.py | AndyPaPaLeu/Disfactory | 4afc370ae6b0d526891fce2b1fe0b9c687309ed1 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
import uuid
from django.conf import settings
from django.contrib.gis.db import models
from django.contrib.gis.geos import Point
from django.contrib.postgres.fields import JSONField
class Factory(models.Model):
"""Factories that are potential to be illegal."""
# List of fact_type & status
factory_type_list = [
("1","金屬"),
("2-1","沖床、銑床、車床、鏜孔"),
("2-2", "焊接、鑄造、熱處理"),
("2-3", "金屬表面處理、噴漆"),
("3", "塑膠加工、射出"),
("4", "橡膠加工"),
("5", "非金屬礦物(石材)"),
("6", "食品"),
("7", "皮革"),
("8", "紡織"),
("9", "其他")
]
status_list = [
("D","已舉報"),
("F","資料不齊"),
("A","待審核")
]
# All Features
id = models.UUIDField(
primary_key=True,
default=uuid.uuid4,
editable=False,
verbose_name="ID",
)
lat = models.FloatField()
lng = models.FloatField()
point = models.PointField(srid=settings.POSTGIS_SRID)
landcode = models.CharField(max_length=50, blank=True, null=True)
name = models.CharField(max_length=50, blank=True, null=True)
factory_type = models.CharField(max_length=3, choices=factory_type_list, default="9")
status = models.CharField(max_length=1, choices=status_list, default="A")
status_time = models.DateTimeField(auto_now_add=True)
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
def save(self, *args, **kwargs):
self.point = Point(self.lng, self.lat, srid=4326)
self.point.transform(settings.POSTGIS_SRID)
super(Factory, self).save(*args, **kwargs)
class ReportRecord(models.Model):
"""Report records send by users.
`ReportRecord` will be queried in advanced by admins from
Citizen of the Earth, Taiwan. They will filter the most recent
records out every a few weeks to catch the bad guys.
"""
id = models.AutoField(primary_key=True)
factory = models.ForeignKey("Factory", on_delete=models.PROTECT)
user_ip = models.GenericIPAddressField(default="192.168.0.1", blank=True, null=True)
action_type = models.CharField(max_length=10) # PUT, POST
action_body = JSONField() # request body
created_at = models.DateTimeField(auto_now_add=True)
contact = models.CharField(max_length=64, blank=True, null=True)
others = models.CharField(max_length=1024, blank=True)
class Image(models.Model):
"""Images of factories that are uploaded by user."""
id = models.UUIDField(
primary_key=True,
default=uuid.uuid4,
editable=False,
)
factory = models.ForeignKey(
"Factory",
on_delete=models.PROTECT,
related_name="images",
blank=True,
null=True,
)
report_record = models.ForeignKey(
"ReportRecord",
on_delete=models.PROTECT,
blank=True,
null=True,
)
image_path = models.URLField(max_length=256) # get from Imgur
created_at = models.DateTimeField(auto_now_add=True)
# the DB saving time
orig_time = models.DateTimeField(blank=True, null=True)
# the actual photo taken time
| 30.854369 | 89 | 0.636249 |
794234572a920a864b13e67d733c2043997abdbd | 28,376 | py | Python | venv/Lib/site-packages/PyOpenGL-3.0.1/src/get_gl_extensions.py | temelkirci/Motion_Editor | a8b8d4c4d2dcc9be28385600f56066cef92a38ad | [
"MIT"
] | 1 | 2022-03-02T17:07:20.000Z | 2022-03-02T17:07:20.000Z | venv/Lib/site-packages/PyOpenGL-3.0.1/src/get_gl_extensions.py | temelkirci/RealTime_6DOF_Motion_Editor | a8b8d4c4d2dcc9be28385600f56066cef92a38ad | [
"MIT"
] | null | null | null | venv/Lib/site-packages/PyOpenGL-3.0.1/src/get_gl_extensions.py | temelkirci/RealTime_6DOF_Motion_Editor | a8b8d4c4d2dcc9be28385600f56066cef92a38ad | [
"MIT"
] | null | null | null | #! /usr/bin/env python
"""Script to download OpenGL extensions header and produce wrappers
This script is the mechanism that allows for automatically
wrapping new extensions with basic ctypes-level functionality.
It doesn't do everything, but it approximates the same level
of access as GLEW or pyglet will give you.
The script also downloads the extension specification into .txt
files that sit next to the same-named .py files, the purpose of
this is to allow developers to grep through the source files
looking for common features, such as references to the glGet*
tables.
glGetBoolean/glGetInteger/glGetFloat/glGetDouble
A CSV file in this directory controls registration of
constants for use with glGet, the format of the file is
a tab-delimited format with the constants in the first
column. Records are as follows:
For no registration (e.g. when you need a custom function):
CONSTANT
For a static size:
CONSTANT (1,)
For dynamic dictionary-based lookup of size based on another
parameter:
CONSTANT pname LOOKUPCONSTANT (1,) LOOKUPCONSTANT (4,) ...
Note that only constants which appear in a downloadable
specification (.txt file) under a New Tokens header with a note
that they can be passed to glGetBoolean will be so registered.
"""
import urllib, os, sys, re, string, traceback, logging, textwrap
EXTENSION_HEADER_SOURCE = 'http://www.opengl.org/registry/api/glext.h'
#ROOT_EXTENSION_SOURCE = 'http://oss.sgi.com/projects/ogl-sample/registry/'
ROOT_EXTENSION_SOURCE = 'http://www.opengl.org/registry/specs/'
AUTOGENERATION_SENTINEL = """### DO NOT EDIT above the line "END AUTOGENERATED SECTION" below!"""
AUTOGENERATION_SENTINEL_END = """### END AUTOGENERATED SECTION"""
if not os.path.isdir( os.path.join('..','OpenGL','GL') ):
raise RuntimeError( """Only run this script with the src directory as the current working directory""" )
log = logging.getLogger( 'autogen' )
def download( url ):
"""Download the given url, informing the user of what we're doing"""
sys.stderr.write( 'Download: %r\n'%(url,))
file = urllib.urlopen( url )
return file.read()
registry = {}
def nameToPath( name ):
return os.path.join( * name.split( '_',2 ) )
def nameToPathMinusGL( name ):
return "/".join( name.split( '_',2 )[1:] )
def indent( text, indent='\t' ):
return "\n".join([
'%s%s'%(indent,line)
for line in text.splitlines()
])
class Helper( object ):
root = ROOT_EXTENSION_SOURCE
def __getitem__( self, key ):
item = getattr( self, key, None )
if item is None:
raise KeyError( key )
if callable( item ):
return item()
else:
return item
class Function( Helper ):
def __init__( self, returnType, name, signature, dll='platform.GL' ):
"""Parse definition into our various elements"""
self.returnType = self.parseReturnType(returnType)
self.name = name
try:
self.argTypes, self.argNames = self.parseArguments( signature )
except Exception, err:
log.error( """Error parsing arguments for %s %s: %s""", name, signature, err )
self.argTypes, self.argNames = (), ()
## self.pysignature = '(%s)'%(
## ", ".join([
## item.split()[-1].strip('*')
## for item in signature[1:-1].split( ',' )
## if item.strip().strip('*')
## ])
## )
self.dll = dll
findName = re.compile( '[a-zA-z0-9]*$' )
def parseReturnType( self, returnType ):
return self.cTypeToPyType( returnType )
def parseArguments( self, signature ):
"""Parse a C argument-type declaration into a ctypes-style argTypes and argNames"""
signature = signature.strip()[1:-1]
# first and easiest case is a void call...
if not signature.strip() or signature.strip() == 'void':
return (), ()
types, names = [], []
for item in signature.split( ',' ):
item = item.strip()
nameMatch = self.findName.search( item )
if not nameMatch:
raise ValueError( item )
name = nameMatch.group(0)
rest = item[:nameMatch.start(0)].strip()
types.append( self.cTypeToPyType( rest ) )
names.append( name )
return types, names
def cTypeToPyType( self, base ):
"""Given a C declared type for an argument/return type, get Python/ctypes version"""
base = base.strip()
if base.endswith( 'const' ):
return self.cTypeToPyType( base[:-5] )
elif base.startswith( 'const' ):
return self.cTypeToPyType( base[5:] )
elif base.endswith( '*' ):
new = self.cTypeToPyType( base[:-1] )
if new == 'constants.GLvoid':
return 'ctypes.c_void_p'
elif self.CTYPE_TO_ARRAY_TYPE.has_key( new ):
return 'arrays.%s'%(self.CTYPE_TO_ARRAY_TYPE[new])
elif new in ( 'arrays.GLcharArray','arrays.GLcharARBArray'):
# can't have a pointer to these...
return 'ctypes.POINTER( ctypes.POINTER( constants.GLchar ))'
elif new in ( 'constants.GLcharARB',):
return 'ctypes.POINTER( ctypes.c_char_p )'
else:
log.warn( 'Unconverted pointer type in %s: %r', self.name, new )
return 'ctypes.POINTER(%s)'%(new)
else:
return 'constants.%s'%(base,)
def errorReturn( self ):
return '0'
def declaration( self ):
"""Produce a declaration for this function in ctypes format"""
dll = self.dll
returnType = self.returnType
if self.argTypes:
argTypes = '(%s,)'%( ','.join(self.argTypes))
else:
argTypes = '()'
if self.argNames:
argNames = '(%s,)'%( ','.join([repr(x) for x in self.argNames]))
else:
argNames = '()'
arguments = ', '.join([
'%(type)s(%(name)s)'%locals()
for (type,name) in [
(type.split('.',1)[1],name)
for type,name in zip( self.argTypes,self.argNames )
]
])
name = self.name
if returnType.strip() in ('constants.GLvoid', 'constants.void'):
returnType = pyReturn = 'None'
else:
pyReturn = self.returnType
log.info( 'returnType %s -> %s', self.returnType, pyReturn )
doc = '%(name)s(%(arguments)s) -> %(pyReturn)s'%locals()
return self.TEMPLATE%locals()
TEMPLATE = """%(name)s = platform.createExtensionFunction(
%(name)r,dll=%(dll)s,
extension=EXTENSION_NAME,
resultType=%(returnType)s,
argTypes=%(argTypes)s,
doc=%(doc)r,
argNames=%(argNames)s,
deprecated=_DEPRECATED,
)
"""
CTYPE_TO_ARRAY_TYPE = {
'constants.GLfloat': 'GLfloatArray',
'constants.float': 'GLfloatArray',
'constants.GLclampf': 'GLclampfArray',
'constants.GLdouble': 'GLdoubleArray',
'constants.double': 'GLdoubleArray',
'constants.int': 'GLintArray',
'constants.GLint': 'GLintArray',
'constants.GLuint': 'GLuintArray',
'constants.unsigned int':'GLuintArray',
'constants.unsigned char': 'GLbyteArray',
'constants.uint': 'GLuintArray',
'constants.GLshort': 'GLshortArray',
'constants.GLushort': 'GLushortArray',
'constants.short unsigned int':'GLushortArray',
'constants.GLubyte': 'GLubyteArray',
'constants.GLbool': 'GLbooleanArray',
'constants.GLboolean': 'GLbooleanArray',
'arrays.GLbooleanArray': 'GLbooleanArray',
'constants.GLbyte': 'GLbyteArray',
'constants.char': 'GLbyteArray',
'constants.gleDouble': 'GLdoubleArray',
'constants.GLchar': 'GLcharArray',
'constants.GLcharARB': 'GLcharARBArray',
'constants.GLhalfNV': 'GLushortArray',
'constants.GLhandle': 'GLuintArray',
'constants.GLhandleARB': 'GLuintArray',
'constants.GLenum': 'GLuintArray',
# following should all have special sub-classes that enforce dimensions
'constants.gleDouble * 4': 'GLdoubleArray',
'constants.gleDouble * 3': 'GLdoubleArray',
'constants.gleDouble * 2': 'GLdoubleArray',
'constants.c_float * 3': 'GLfloatArray',
'constants.gleDouble * 3 * 2': 'GLdoubleArray',
'constants.GLsizei': 'GLsizeiArray',
'constants.GLint64': 'GLint64Array',
'constants.GLint64EXT': 'GLint64Array',
'constants.GLuint64': 'GLuint64Array',
'constants.GLuint64EXT': 'GLuint64Array',
}
# Don't know how Tarn got the api_versions, short of manually entering them...
WRAPPER_TEMPLATE = """'''OpenGL extension %(owner)s.%(module)s
Automatically generated by the get_gl_extensions script, do not edit!
'''
from OpenGL import platform, constants, constant, arrays
from OpenGL import extensions
from OpenGL.GL import glget
import ctypes
EXTENSION_NAME = %(constantModule)r
_DEPRECATED = %(deprecatedFlag)r
%(constants)s
%(declarations)s%(deprecated)s
"""
INIT_TEMPLATE = """
def glInit%(camelModule)s%(owner)s():
'''Return boolean indicating whether this extension is available'''
return extensions.hasGLExtension( EXTENSION_NAME )
"""
FINAL_MODULE_TEMPLATE = """'''OpenGL extension %(owner)s.%(module)s
This module customises the behaviour of the
OpenGL.raw.%(prefix)s.%(owner)s.%(module)s to provide a more
Python-friendly API
%(overview)sThe official definition of this extension is available here:
%(root)s%(owner)s/%(module)s.txt
'''
from OpenGL import platform, constants, constant, arrays
from OpenGL import extensions, wrapper
from OpenGL.GL import glget
import ctypes
from OpenGL.raw.%(prefix)s.%(owner)s.%(module)s import *
"""
class Module( Helper ):
targetDirectory = os.path.join( '..','OpenGL')
rawTargetDirectory = os.path.join( '..','OpenGL','raw')
prefix = 'GL'
defineFinder = re.compile( r'\#define[ \t]+([a-zA-Z0-9_]+)[ \t]*(0x[0-9a-fA-F]+)' )
functionFinder = re.compile( r'GLAPI[ \t]+(.*?)[ \t]+APIENTRY[ \t]+([a-zA-Z0-9_]+)[ \t]*\(' )
signatureFinderTemplate = r'typedef[ \t]+%(returnTypeRE)s[ \t]+\(APIENTRYP[ \t]+PFN%(nameUpper)sPROC\)[ \t]*(\(.*?\))[;]'
typeDefFinder = re.compile( r'typedef[ \t]+(([a-zA-Z0-9_]+[ \t]*)+);' )
RAW_MODULE_TEMPLATE = WRAPPER_TEMPLATE + INIT_TEMPLATE
def __init__( self, name, segments, header ):
log.info( 'name: %r', name )
if not name.startswith( 'GL_' ):
name = 'GL_'+name
self.name = name
self.segments = segments
self.header = header
try:
self.prefix, self.owner, self.module = name.split('_',2)
self.sentinelConstant = '%s_%s'%(self.owner,self.module)
except ValueError:
if name.endswith( 'SGIX' ):
self.prefix = "GL"
self.owner = 'SGIX'
self.module = name[3:-4]
self.sentinelConstant = '%s%s'%(self.module,self.owner)
else:
log.error( """Unable to parse module name: %s""", name )
raise
if self.module[0] in string.digits:
self.module = 'GL_%s'%(self.module,)
self.camelModule = "".join([x.title() for x in self.module.split('_')])
self.rawModule = self.module
# XXX need to figure this out better
self.rawOwner = self.owner
while self.owner and self.owner[0] in string.digits:
self.owner = self.owner[1:]
self.rawPathName = os.path.join( self.rawTargetDirectory, self.prefix, self.owner, self.module+'.py' )
self.pathName = os.path.join( self.targetDirectory, self.prefix, self.owner, self.module+'.py' )
self.findFunctions()
self.constantModule = '%(prefix)s_%(owner)s_%(rawModule)s'%self
if self.rawModule.endswith( '_DEPRECATED' ):
self.constantModule = self.constantModule[:-len('_DEPRECATED')]
self.deprecatedFlag = True
else:
self.deprecatedFlag = False
specification = self.getSpecification()
self.overview = ''
if self.header.includeOverviews:
for title,section in specification.blocks( specification.source ):
if title.startswith( 'Overview' ):
self.overview = 'Overview (from the spec)\n%s\n\n'%(
indent( section.replace('\xd4','O').replace('\xd5','O') )
)
break
def shouldReplace( self ):
"""Should we replace the given filename?"""
filename = self.pathName
if not os.path.isfile(
filename
):
return True
else:
hasLines = 0
for line in open( filename ):
if line.strip() == AUTOGENERATION_SENTINEL_END.strip():
return True
hasLines = 1
if not hasLines:
return True
return False
def findFunctions( self ):
"""Find all of our function definitions"""
result = []
for segment in self.segments:
for match in self.functionFinder.finditer(segment):
returnType, name = match.groups()
nameUpper = re.escape(name.upper())
returnTypeRE = re.escape( returnType )
signatureFinder = re.compile( self.signatureFinderTemplate%locals() )
try:
signature = signatureFinder.search( segment ).group(1)
result.append( Function( returnType, name, signature ))
except AttributeError:
log.warn(
"Couldn't find signature for function %s %s",
returnType,name,
)
self.functions = result
def declarations( self ):
"""
DECLARE_VOID_EXT(glPointParameterfARB, (GLenum pname, GLfloat param), (pname, param))
DECLARE_VOID_EXT(glPointParameterfvARB, (GLenum pname, const GLfloat* param), (pname, param))
"""
result = []
for function in self.functions:
result.append( function.declaration() )
return "\n".join( result )
def functionNames( self ):
"""
"glPointParameterfARB",
"glPointParameterfvARB",
"""
result = []
for function in self.functions:
result.append( '"%s",'%(function.name,))
return "\n".join(result)
def swigFunctionDeclarations( self ):
"""
void glPointParameterfARB(GLenum pname, GLfloat param);
DOC(glPointParameterfARB, "glPointParameterfARB(pname, param) -> None")
void glPointParameterfvARB(GLenum pname, const GLfloat* param);
DOC(glPointParameterfvARB, "glPointParameterfvARB(pname, param) -> None")
"""
result = []
for segment in self.segments:
for match in self.typeDefFinder.finditer( segment ):
result.append( match.group(0))
for function in self.functions:
result.append( '%(returnType)s %(name)s%(signature)s;'%function )
result.append( 'DOC(%(name)s, "%(name)s%(pysignature)s")'%function )
return "\n".join( result )
def constants( self ):
"""Retrieve constants from the segments
This is, of course, all heuristically done :)
"""
result = []
glGets = self.getSpecification().glGetConstants()
glGetSizes = self.header.glGetSizes
for segment in self.segments:
for match in self.defineFinder.finditer( segment ):
name,value = match.groups()
value = int(value,0)
result.append( '%(name)s = constant.Constant( %(name)r, 0x%(value)X )'%locals() )
if glGets.has_key( name ) or glGetSizes.has_key( name ):
size = glGetSizes.get( name, [] )
if len(size) == 0: # not yet specified...
glGetSizes[ name ] = []
elif len(size) == 1: # static size...
size = size[0]
result.append(
"""glget.addGLGetConstant( %(name)s, %(size)s )"""%locals()
)
else:
# param name, then (key,value) for rest of elements
param = size[0]
rest = size[1:]
set = {}
while rest:
current = rest[:2]
del rest[:2]
if len(current) == 2:
set[current[0]] = current[1]
else:
log.warn(
"""Incorrect format for glGet constant %s (unevent set of values)""",
name,
)
size = '{ %s }'%(
','.join([
'%s : %s'%(
key,value
)
for (key,value) in set
])
)
result.append(
"""glget.addGLGetConstant( %(name)s, %(size)s, %(param)r )"""%locals()
)
return '\n'.join(result)
SPEC_EXCEPTIONS = {
# different URLs... grr...
'3DFX/multisample': 'http://oss.sgi.com/projects/ogl-sample/registry/3DFX/3dfx_multisample.txt',
#'EXT/color_matrix': 'http://oss.sgi.com/projects/ogl-sample/registry/SGI/color_matrix.txt',
#'EXT/texture_cube_map': 'http://oss.sgi.com/projects/ogl-sample/registry/ARB/texture_cube_map.txt',
'SGIS/fog_function': 'http://oss.sgi.com/projects/ogl-sample/registry/SGIS/fog_func.txt',
}
def getSpecification( self ):
"""Retrieve our specification document...
Retrieves the .txt file which defines this specification,
allowing us to review the document locally in order to provide
a reasonable wrapping of it...
"""
specFile = os.path.splitext( self.pathName )[0] + '.txt'
specURLFragment = nameToPathMinusGL(self.name)
if self.SPEC_EXCEPTIONS.has_key( specURLFragment ):
specURL = self.SPEC_EXCEPTIONS[ specURLFragment ]
else:
specURL = '%s/%s.txt'%(
ROOT_EXTENSION_SOURCE,
specURLFragment,
)
if not os.path.isfile( specFile ):
try:
data = download(specURL)
except Exception, err:
log.warn( """Failure downloading specification %s: %s""", specURL, err )
data = ""
else:
try:
open(specFile,'w').write( data )
except IOError, err:
pass
else:
data = open( specFile ).read()
if 'Error 404' in data:
log.info( """Spec 404: %s""", specURL)
data = ''
return Specification( data )
def process( self ):
"""(re)Wrap the given module"""
# first the raw wrapped API...
directory = os.path.dirname(self.rawPathName)
try:
os.makedirs( directory )
except os.error:
pass
if not os.path.isfile( os.path.join(directory, '__init__.py')):
open( os.path.join(directory, '__init__.py'),'w').write(
'''"""OpenGL Extensions"""'''
)
current = ''
toWrite = self.RAW_MODULE_TEMPLATE % self
try:
current = open( self.rawPathName, 'r').read()
except Exception, err:
pass
if current.strip() != toWrite.strip():
fh = open( self.rawPathName, 'w')
fh.write( toWrite )
fh.close()
if self.shouldReplace( ):
# now the final module with any included custom code...
toWrite = FINAL_MODULE_TEMPLATE % self
current = ''
try:
current = open( self.pathName, 'r').read()
except Exception, err:
pass
else:
found = current.rfind( '\n'+AUTOGENERATION_SENTINEL_END )
if found >= -1:
if current[:found].strip() == toWrite.strip():
# we aren't going to change anything...
return False
found += len( '\n' + AUTOGENERATION_SENTINEL_END )
current = current[found:]
else:
current = ''
try:
fh = open( self.pathName, 'w')
except IOError, err:
log.warn( "Unable to create module for %r %s", self.name, err )
return False
else:
fh.write( toWrite )
fh.write( AUTOGENERATION_SENTINEL_END )
fh.write( current )
fh.close()
return True
return False
def deprecated( self ):
"""Produce import line for deprecated functions if appropriate"""
name = self.name + '_DEPRECATED'
if self.header.registry.get( name ):
return '''# import legacy entry points to allow checking for bool(entryPoint)
from OpenGL.raw.%(prefix)s.%(owner)s.%(module)s_DEPRECATED import *'''%self
return ''
class VersionModule( Module ):
"""Module representing an OpenGL version's extension to the spec"""
targetDirectory = os.path.join( '..','OpenGL')
rawTargetDirectory = os.path.join( '..','OpenGL','raw')
prefix = 'GL'
RAW_MODULE_TEMPLATE = WRAPPER_TEMPLATE
def getSpecification( self ):
"""Retrieve our specification document...
Retrieves the .txt file which defines this specification,
allowing us to review the document locally in order to provide
a reasonable wrapping of it...
"""
return Specification( '' )
class Specification( object ):
"""Parser for parsing OpenGL specifications for interesting information
"""
def __init__( self, source ):
"""Store the source text for the specification"""
self.source = source
def blocks( self, data ):
"""Retrieve the set of all blocks"""
data = data.splitlines()
title = []
block = []
for line in data:
if line and line.lstrip() == line:
if block:
yield "\n".join(title), textwrap.dedent( "\n".join(block) )
title = [ ]
block = [ ]
title.append( line )
else:
block.append( line )
if block:
yield "\n".join(title), textwrap.dedent( "\n".join(block) )
def constantBlocks( self ):
"""Retrieve the set of constant blocks"""
for title,block in self.blocks( self.source ):
if title and title.startswith( 'New Tokens' ):
yield block
def glGetConstants( self ):
"""Retrieve the set of constants which pass to glGet* functions"""
table = {}
for block in self.constantBlocks():
for title, section in self.blocks( block ):
for possible in (
'GetBooleanv','GetIntegerv','<pname> of Get'
):
if possible in title:
for line in section.splitlines():
line = line.strip().split()
if len(line) == 2:
constant,value = line
table['GL_%s'%(constant,)] = value
break
return table
class Header( object ):
"""Manages the overall header source
registry -- registry of extensions/versions found with the
header segments that define them...
includeOverviews -- if True, include the specification's
overviews in the indivdual extensions
"""
registry = None
includeOverviews = True
def getFile( self ):
"""Load or download the source of the glext.h header"""
if not os.path.isfile( 'glext.h' ):
data = download( EXTENSION_HEADER_SOURCE )
open( 'glext.h', 'w').write( data )
else:
data = open( 'glext.h' ).read()
return data
def getRegistry( self ):
"""Retrieve a parsed registry of extensions/versions
This uses accidents of the header definition to produce
the results, but the header is fairly consistent...
returns { name: segments} to pass to Module init
"""
if self.registry:
return self.registry
file = self.getFile()
index = file.find( '#define GL_GLEXT_VERSION' )
file = file[index:]
extensions = file.split( '\n#ifndef ' )[1:]
for item in extensions:
name, definition = item.split( None, 1 )
definition = '#ifndef '+item
registry.setdefault( name, []).append( definition )
self.registry = registry
return registry
def iterModules( self ):
"""Yield each Module( name, segments ) for all extensions
extensions do *not* include the GL core versions...
"""
items = self.getRegistry().items()
items.sort()
for name, segments in items:
if name in ('APIENTRY','APIENTRYP','GLAPI'):
continue
if not name.startswith( 'GL_VERSION' ):
yield Module( name, segments, header=self )
else:
yield VersionModule( name, segments, header=self )
def iterVersions( self ):
"""Yield each Version( name, segments ) for all versions"""
items = self.getRegistry().items()
items.sort()
for name, segments in items:
if name.startswith( 'GL_VERSION' ):
yield Version( name, segments )
def autoGenerate( self ):
"""Autogenerate all Modules in this header"""
new = {}
total = count = 0
for module in self.iterModules():
if module.process( ):
new[module.constantModule] = module
count += 1
total += 1
return total, count
def constantSections( self ):
"""Print the constant sections for all modules"""
for module in self.iterModules():
module.getSpecification()
for constant, value in module.getSpecification().glGetConstants().items():
#print title
print constant
glGetSizes = {}
def loadGLGetSizes( self ):
"""Load manually-generated table of glGet* sizes"""
table = self.glGetSizes
try:
lines = [
line.split('\t')
for line in open( 'glgetsizes.csv' ).read().splitlines()
]
except IOError, err:
pass
else:
for line in lines:
if line and line[0]:
table[line[0].strip('"')] = [
v for v in [
v.strip('"') for v in line[1:]
]
if v
]
def saveGLGetSizes( self ):
"""Save out sorted list of glGet sizes to disk"""
items = self.glGetSizes.items()
items.sort()
data = "\n".join([
'%s\t%s'%(
key,"\t".join(value)
)
for (key,value) in items
])
open( 'glgetsizes.csv','w').write( data )
if __name__ == "__main__":
logging.basicConfig()
log.setLevel( logging.WARN )
header = Header()
header.loadGLGetSizes()
total,count = Header().autoGenerate()
print '%s total %s replaced'%(total,count)
header.saveGLGetSizes()
#header.constantSections()
| 39.301939 | 125 | 0.551593 |
794235c04f65aa169640bcbddcf291f837503be7 | 1,080 | py | Python | adventofcode/2016/03.py | hacktoolkit/code_challenges | d71f8362496a72963a53abba7bcc9dd4d35a2920 | [
"MIT"
] | 10 | 2015-01-31T09:04:45.000Z | 2022-01-08T04:09:48.000Z | adventofcode/2016/03.py | hacktoolkit/code_challenges | d71f8362496a72963a53abba7bcc9dd4d35a2920 | [
"MIT"
] | 3 | 2016-05-16T07:37:01.000Z | 2016-05-18T14:14:16.000Z | adventofcode/2016/03.py | hacktoolkit/code_challenges | d71f8362496a72963a53abba7bcc9dd4d35a2920 | [
"MIT"
] | 6 | 2015-02-06T06:00:00.000Z | 2020-02-13T16:13:48.000Z | from utils import (
InputConfig,
ingest,
transpose,
)
INPUT_FILE = '03.in'
EXPECTED_ANSWERS = (983, 1836, )
def main():
solution = Solution()
answers = (solution.solve1(), solution.solve2(), )
print(answers)
assert(answers == EXPECTED_ANSWERS)
class Solution:
def __init__(self):
self.data = ingest(INPUT_FILE, InputConfig(as_table=True, cell_func=int))
def solve1(self):
valid_triangles = 0
for values in self.data:
a, b, c = sorted(values)
if a + b > c:
valid_triangles += 1
answer = valid_triangles
return answer
def solve2(self):
matrix = transpose(self.data)
values = [
col
for row in matrix
for col in row
]
valid_triangles = 0
for i in range(0, len(values), 3):
a, b, c = sorted(values[i:i+3])
if a + b > c:
valid_triangles += 1
answer = valid_triangles
return answer
if __name__ == '__main__':
main()
| 19.636364 | 81 | 0.539815 |
79423650e93c04dd2815f8685ef1ba379328ae87 | 10,530 | py | Python | tests/core/ssl/test_ssl.py | vito-jwt/chia-blockchain | cd0a1c28e3c41ac2650ab7b3ef6a041e0c7f87b2 | [
"Apache-2.0"
] | 6 | 2021-05-21T10:29:32.000Z | 2021-07-10T12:51:34.000Z | tests/core/ssl/test_ssl.py | Mateus-dang/chia-blockchain | 2d2693496591b0b786461d16929b99a980d2528f | [
"Apache-2.0"
] | 28 | 2021-07-13T21:07:14.000Z | 2022-03-29T21:10:38.000Z | tests/core/ssl/test_ssl.py | Mateus-dang/chia-blockchain | 2d2693496591b0b786461d16929b99a980d2528f | [
"Apache-2.0"
] | 2 | 2021-04-20T16:37:10.000Z | 2021-04-24T04:59:36.000Z | import asyncio
import aiohttp
import pytest
from chia.protocols.shared_protocol import protocol_version
from chia.server.outbound_message import NodeType
from chia.server.server import ChiaServer, ssl_context_for_client
from chia.server.ws_connection import WSChiaConnection
from chia.ssl.create_ssl import generate_ca_signed_cert
from chia.types.peer_info import PeerInfo
from chia.util.block_tools import test_constants
from chia.util.ints import uint16
from tests.setup_nodes import (
bt,
self_hostname,
setup_farmer_harvester,
setup_introducer,
setup_simulators_and_wallets,
setup_timelord,
)
async def establish_connection(server: ChiaServer, dummy_port: int, ssl_context) -> bool:
timeout = aiohttp.ClientTimeout(total=10)
session = aiohttp.ClientSession(timeout=timeout)
try:
incoming_queue: asyncio.Queue = asyncio.Queue()
url = f"wss://{self_hostname}:{server._port}/ws"
ws = await session.ws_connect(url, autoclose=False, autoping=True, ssl=ssl_context)
wsc = WSChiaConnection(
NodeType.FULL_NODE,
ws,
server._port,
server.log,
True,
False,
self_hostname,
incoming_queue,
lambda x, y: x,
None,
100,
30,
)
handshake = await wsc.perform_handshake(server._network_id, protocol_version, dummy_port, NodeType.FULL_NODE)
await session.close()
return handshake
except Exception:
await session.close()
return False
class TestSSL:
@pytest.fixture(scope="function")
async def harvester_farmer(self):
async for _ in setup_farmer_harvester(test_constants):
yield _
@pytest.fixture(scope="function")
async def wallet_node(self):
async for _ in setup_simulators_and_wallets(1, 1, {}):
yield _
@pytest.fixture(scope="function")
async def introducer(self):
async for _ in setup_introducer(21233):
yield _
@pytest.fixture(scope="function")
async def timelord(self):
async for _ in setup_timelord(21236, 21237, False, test_constants, bt):
yield _
@pytest.mark.asyncio
async def test_public_connections(self, wallet_node):
full_nodes, wallets = wallet_node
full_node_api = full_nodes[0]
server_1: ChiaServer = full_node_api.full_node.server
wallet_node, server_2 = wallets[0]
success = await server_2.start_client(PeerInfo(self_hostname, uint16(server_1._port)), None)
assert success is True
@pytest.mark.asyncio
async def test_farmer(self, harvester_farmer):
harvester_api, farmer_api = harvester_farmer
farmer_server = farmer_api.farmer.server
# Create valid cert (valid meaning signed with private CA)
priv_crt = farmer_server._private_key_path.parent / "valid.crt"
priv_key = farmer_server._private_key_path.parent / "valid.key"
generate_ca_signed_cert(
farmer_server.ca_private_crt_path.read_bytes(),
farmer_server.ca_private_key_path.read_bytes(),
priv_crt,
priv_key,
)
ssl_context = ssl_context_for_client(
farmer_server.ca_private_crt_path, farmer_server.ca_private_crt_path, priv_crt, priv_key
)
connected = await establish_connection(farmer_server, 12312, ssl_context)
assert connected is True
# Create not authenticated cert
pub_crt = farmer_server._private_key_path.parent / "non_valid.crt"
pub_key = farmer_server._private_key_path.parent / "non_valid.key"
generate_ca_signed_cert(
farmer_server.chia_ca_crt_path.read_bytes(), farmer_server.chia_ca_key_path.read_bytes(), pub_crt, pub_key
)
ssl_context = ssl_context_for_client(
farmer_server.chia_ca_crt_path, farmer_server.chia_ca_crt_path, pub_crt, pub_key
)
connected = await establish_connection(farmer_server, 12312, ssl_context)
assert connected is False
ssl_context = ssl_context_for_client(
farmer_server.ca_private_crt_path, farmer_server.ca_private_crt_path, pub_crt, pub_key
)
connected = await establish_connection(farmer_server, 12312, ssl_context)
assert connected is False
@pytest.mark.asyncio
async def test_full_node(self, wallet_node):
full_nodes, wallets = wallet_node
full_node_api = full_nodes[0]
full_node_server = full_node_api.full_node.server
# Create not authenticated cert
pub_crt = full_node_server._private_key_path.parent / "p2p.crt"
pub_key = full_node_server._private_key_path.parent / "p2p.key"
generate_ca_signed_cert(
full_node_server.chia_ca_crt_path.read_bytes(),
full_node_server.chia_ca_key_path.read_bytes(),
pub_crt,
pub_key,
)
ssl_context = ssl_context_for_client(
full_node_server.chia_ca_crt_path, full_node_server.chia_ca_crt_path, pub_crt, pub_key
)
connected = await establish_connection(full_node_server, 12312, ssl_context)
assert connected is True
@pytest.mark.asyncio
async def test_wallet(self, wallet_node):
full_nodes, wallets = wallet_node
wallet_node, wallet_server = wallets[0]
# Wallet should not accept incoming connections
pub_crt = wallet_server._private_key_path.parent / "p2p.crt"
pub_key = wallet_server._private_key_path.parent / "p2p.key"
generate_ca_signed_cert(
wallet_server.chia_ca_crt_path.read_bytes(), wallet_server.chia_ca_key_path.read_bytes(), pub_crt, pub_key
)
ssl_context = ssl_context_for_client(
wallet_server.chia_ca_crt_path, wallet_server.chia_ca_crt_path, pub_crt, pub_key
)
connected = await establish_connection(wallet_server, 12312, ssl_context)
assert connected is False
# Not even signed by private cert
priv_crt = wallet_server._private_key_path.parent / "valid.crt"
priv_key = wallet_server._private_key_path.parent / "valid.key"
generate_ca_signed_cert(
wallet_server.ca_private_crt_path.read_bytes(),
wallet_server.ca_private_key_path.read_bytes(),
priv_crt,
priv_key,
)
ssl_context = ssl_context_for_client(
wallet_server.ca_private_crt_path, wallet_server.ca_private_crt_path, priv_crt, priv_key
)
connected = await establish_connection(wallet_server, 12312, ssl_context)
assert connected is False
@pytest.mark.asyncio
async def test_harvester(self, harvester_farmer):
harvester, farmer_api = harvester_farmer
harvester_server = harvester.server
# harvester should not accept incoming connections
pub_crt = harvester_server._private_key_path.parent / "p2p.crt"
pub_key = harvester_server._private_key_path.parent / "p2p.key"
generate_ca_signed_cert(
harvester_server.chia_ca_crt_path.read_bytes(),
harvester_server.chia_ca_key_path.read_bytes(),
pub_crt,
pub_key,
)
ssl_context = ssl_context_for_client(
harvester_server.chia_ca_crt_path, harvester_server.chia_ca_crt_path, pub_crt, pub_key
)
connected = await establish_connection(harvester_server, 12312, ssl_context)
assert connected is False
# Not even signed by private cert
priv_crt = harvester_server._private_key_path.parent / "valid.crt"
priv_key = harvester_server._private_key_path.parent / "valid.key"
generate_ca_signed_cert(
harvester_server.ca_private_crt_path.read_bytes(),
harvester_server.ca_private_key_path.read_bytes(),
priv_crt,
priv_key,
)
ssl_context = ssl_context_for_client(
harvester_server.ca_private_crt_path, harvester_server.ca_private_crt_path, priv_crt, priv_key
)
connected = await establish_connection(harvester_server, 12312, ssl_context)
assert connected is False
@pytest.mark.asyncio
async def test_introducer(self, introducer):
introducer_api, introducer_server = introducer
# Create not authenticated cert
pub_crt = introducer_server.chia_ca_key_path.parent / "p2p.crt"
pub_key = introducer_server.chia_ca_key_path.parent / "p2p.key"
generate_ca_signed_cert(
introducer_server.chia_ca_crt_path.read_bytes(),
introducer_server.chia_ca_key_path.read_bytes(),
pub_crt,
pub_key,
)
ssl_context = ssl_context_for_client(
introducer_server.chia_ca_crt_path, introducer_server.chia_ca_crt_path, pub_crt, pub_key
)
connected = await establish_connection(introducer_server, 12312, ssl_context)
assert connected is True
@pytest.mark.asyncio
async def test_timelord(self, timelord):
timelord_api, timelord_server = timelord
# timelord should not accept incoming connections
pub_crt = timelord_server._private_key_path.parent / "p2p.crt"
pub_key = timelord_server._private_key_path.parent / "p2p.key"
generate_ca_signed_cert(
timelord_server.chia_ca_crt_path.read_bytes(),
timelord_server.chia_ca_key_path.read_bytes(),
pub_crt,
pub_key,
)
ssl_context = ssl_context_for_client(
timelord_server.chia_ca_crt_path, timelord_server.chia_ca_crt_path, pub_crt, pub_key
)
connected = await establish_connection(timelord_server, 12312, ssl_context)
assert connected is False
# Not even signed by private cert
priv_crt = timelord_server._private_key_path.parent / "valid.crt"
priv_key = timelord_server._private_key_path.parent / "valid.key"
generate_ca_signed_cert(
timelord_server.ca_private_crt_path.read_bytes(),
timelord_server.ca_private_key_path.read_bytes(),
priv_crt,
priv_key,
)
ssl_context = ssl_context_for_client(
timelord_server.ca_private_crt_path, timelord_server.ca_private_crt_path, priv_crt, priv_key
)
connected = await establish_connection(timelord_server, 12312, ssl_context)
assert connected is False
| 40.344828 | 118 | 0.688034 |
794236ef9da41238d051fa544bb382526350f649 | 43,898 | py | Python | files_sdk/models/user.py | Files-com/files-sdk-python | 84cedc9be099cd9e4db6249ef7a9d60595487090 | [
"MIT"
] | 14 | 2020-08-05T15:48:06.000Z | 2021-08-18T13:13:39.000Z | files_sdk/models/user.py | Files-com/files-sdk-python | 84cedc9be099cd9e4db6249ef7a9d60595487090 | [
"MIT"
] | 4 | 2020-10-30T14:49:25.000Z | 2021-09-29T17:11:53.000Z | files_sdk/models/user.py | Files-com/files-sdk-python | 84cedc9be099cd9e4db6249ef7a9d60595487090 | [
"MIT"
] | null | null | null | import builtins
import datetime
from files_sdk.api import Api
from files_sdk.list_obj import ListObj
from files_sdk.exceptions import InvalidParameterError, MissingParameterError, NotImplementedError
class User:
default_attributes = {
'id': None, # int64 - User ID
'username': None, # string - User's username
'admin_group_ids': None, # array - List of group IDs of which this user is an administrator
'allowed_ips': None, # string - A list of allowed IPs if applicable. Newline delimited
'attachments_permission': None, # boolean - DEPRECATED: Can the user create Bundles (aka Share Links)? Use the bundle permission instead.
'api_keys_count': None, # int64 - Number of api keys associated with this user
'authenticate_until': None, # date-time - Scheduled Date/Time at which user will be deactivated
'authentication_method': None, # string - How is this user authenticated?
'avatar_url': None, # string - URL holding the user's avatar
'billing_permission': None, # boolean - Allow this user to perform operations on the account, payments, and invoices?
'bypass_site_allowed_ips': None, # boolean - Allow this user to skip site-wide IP blacklists?
'bypass_inactive_disable': None, # boolean - Exempt this user from being disabled based on inactivity?
'created_at': None, # date-time - When this user was created
'dav_permission': None, # boolean - Can the user connect with WebDAV?
'disabled': None, # boolean - Is user disabled? Disabled users cannot log in, and do not count for billing purposes. Users can be automatically disabled after an inactivity period via a Site setting.
'email': None, # email - User email address
'ftp_permission': None, # boolean - Can the user access with FTP/FTPS?
'group_ids': None, # string - Comma-separated list of group IDs of which this user is a member
'header_text': None, # string - Text to display to the user in the header of the UI
'language': None, # string - Preferred language
'last_login_at': None, # date-time - User's last login time
'last_protocol_cipher': None, # string - The last protocol and cipher used
'lockout_expires': None, # date-time - Time in the future that the user will no longer be locked out if applicable
'name': None, # string - User's full name
'company': None, # string - User's company
'notes': None, # string - Any internal notes on the user
'notification_daily_send_time': None, # int64 - Hour of the day at which daily notifications should be sent. Can be in range 0 to 23
'office_integration_enabled': None, # boolean - Enable integration with Office for the web?
'password_set_at': None, # date-time - Last time the user's password was set
'password_validity_days': None, # int64 - Number of days to allow user to use the same password
'public_keys_count': None, # int64 - Number of public keys associated with this user
'receive_admin_alerts': None, # boolean - Should the user receive admin alerts such a certificate expiration notifications and overages?
'require_2fa': None, # string - 2FA required setting
'active_2fa': None, # boolean - Is 2fa active for the user?
'require_password_change': None, # boolean - Is a password change required upon next user login?
'password_expired': None, # boolean - Is user's password expired?
'restapi_permission': None, # boolean - Can this user access the REST API?
'self_managed': None, # boolean - Does this user manage it's own credentials or is it a shared/bot user?
'sftp_permission': None, # boolean - Can the user access with SFTP?
'site_admin': None, # boolean - Is the user an administrator for this site?
'skip_welcome_screen': None, # boolean - Skip Welcome page in the UI?
'ssl_required': None, # string - SSL required setting
'sso_strategy_id': None, # int64 - SSO (Single Sign On) strategy ID for the user, if applicable.
'subscribe_to_newsletter': None, # boolean - Is the user subscribed to the newsletter?
'externally_managed': None, # boolean - Is this user managed by a SsoStrategy?
'time_zone': None, # string - User time zone
'type_of_2fa': None, # string - Type(s) of 2FA methods in use. Will be either `sms`, `totp`, `u2f`, `yubi`, or multiple values sorted alphabetically and joined by an underscore.
'user_root': None, # string - Root folder for FTP (and optionally SFTP if the appropriate site-wide setting is set.) Note that this is not used for API, Desktop, or Web interface.
'avatar_file': None, # file - An image file for your user avatar.
'avatar_delete': None, # boolean - If true, the avatar will be deleted.
'change_password': None, # string - Used for changing a password on an existing user.
'change_password_confirmation': None, # string - Optional, but if provided, we will ensure that it matches the value sent in `change_password`.
'grant_permission': None, # string - Permission to grant on the user root. Can be blank or `full`, `read`, `write`, `list`, or `history`.
'group_id': None, # int64 - Group ID to associate this user with.
'imported_password_hash': None, # string - Pre-calculated hash of the user's password. If supplied, this will be used to authenticate the user on first login. Supported hash menthods are MD5, SHA1, and SHA256.
'password': None, # string - User password.
'password_confirmation': None, # string - Optional, but if provided, we will ensure that it matches the value sent in `password`.
'announcements_read': None, # boolean - Signifies that the user has read all the announcements in the UI.
}
def __init__(self, attributes=None, options=None):
if not isinstance(attributes, dict):
attributes = {}
if not isinstance(options, dict):
options = {}
self.set_attributes(attributes)
self.options = options
def set_attributes(self, attributes):
for (attribute, default_value) in User.default_attributes.items():
setattr(self, attribute, attributes.get(attribute, default_value))
def get_attributes(self):
return {k: getattr(self, k, None) for k in User.default_attributes if getattr(self, k, None) is not None}
# Unlock user who has been locked out due to failed logins
def unlock(self, params = None):
if not isinstance(params, dict):
params = {}
if hasattr(self, "id") and self.id:
params['id'] = self.id
else:
raise MissingParameterError("Current object doesn't have a id")
if "id" not in params:
raise MissingParameterError("Parameter missing: id")
if "id" in params and not isinstance(params["id"], int):
raise InvalidParameterError("Bad parameter: id must be an int")
response, _options = Api.send_request("POST", "/users/{id}/unlock".format(id=params['id']), params, self.options)
return response.data
# Resend user welcome email
def resend_welcome_email(self, params = None):
if not isinstance(params, dict):
params = {}
if hasattr(self, "id") and self.id:
params['id'] = self.id
else:
raise MissingParameterError("Current object doesn't have a id")
if "id" not in params:
raise MissingParameterError("Parameter missing: id")
if "id" in params and not isinstance(params["id"], int):
raise InvalidParameterError("Bad parameter: id must be an int")
response, _options = Api.send_request("POST", "/users/{id}/resend_welcome_email".format(id=params['id']), params, self.options)
return response.data
# Trigger 2FA Reset process for user who has lost access to their existing 2FA methods
def user_2fa_reset(self, params = None):
if not isinstance(params, dict):
params = {}
if hasattr(self, "id") and self.id:
params['id'] = self.id
else:
raise MissingParameterError("Current object doesn't have a id")
if "id" not in params:
raise MissingParameterError("Parameter missing: id")
if "id" in params and not isinstance(params["id"], int):
raise InvalidParameterError("Bad parameter: id must be an int")
response, _options = Api.send_request("POST", "/users/{id}/2fa/reset".format(id=params['id']), params, self.options)
return response.data
# Parameters:
# avatar_file - file - An image file for your user avatar.
# avatar_delete - boolean - If true, the avatar will be deleted.
# change_password - string - Used for changing a password on an existing user.
# change_password_confirmation - string - Optional, but if provided, we will ensure that it matches the value sent in `change_password`.
# email - string - User's email.
# grant_permission - string - Permission to grant on the user root. Can be blank or `full`, `read`, `write`, `list`, or `history`.
# group_id - int64 - Group ID to associate this user with.
# group_ids - string - A list of group ids to associate this user with. Comma delimited.
# imported_password_hash - string - Pre-calculated hash of the user's password. If supplied, this will be used to authenticate the user on first login. Supported hash menthods are MD5, SHA1, and SHA256.
# password - string - User password.
# password_confirmation - string - Optional, but if provided, we will ensure that it matches the value sent in `password`.
# announcements_read - boolean - Signifies that the user has read all the announcements in the UI.
# allowed_ips - string - A list of allowed IPs if applicable. Newline delimited
# attachments_permission - boolean - DEPRECATED: Can the user create Bundles (aka Share Links)? Use the bundle permission instead.
# authenticate_until - string - Scheduled Date/Time at which user will be deactivated
# authentication_method - string - How is this user authenticated?
# billing_permission - boolean - Allow this user to perform operations on the account, payments, and invoices?
# bypass_inactive_disable - boolean - Exempt this user from being disabled based on inactivity?
# bypass_site_allowed_ips - boolean - Allow this user to skip site-wide IP blacklists?
# dav_permission - boolean - Can the user connect with WebDAV?
# disabled - boolean - Is user disabled? Disabled users cannot log in, and do not count for billing purposes. Users can be automatically disabled after an inactivity period via a Site setting.
# ftp_permission - boolean - Can the user access with FTP/FTPS?
# header_text - string - Text to display to the user in the header of the UI
# language - string - Preferred language
# notification_daily_send_time - int64 - Hour of the day at which daily notifications should be sent. Can be in range 0 to 23
# name - string - User's full name
# company - string - User's company
# notes - string - Any internal notes on the user
# office_integration_enabled - boolean - Enable integration with Office for the web?
# password_validity_days - int64 - Number of days to allow user to use the same password
# receive_admin_alerts - boolean - Should the user receive admin alerts such a certificate expiration notifications and overages?
# require_password_change - boolean - Is a password change required upon next user login?
# restapi_permission - boolean - Can this user access the REST API?
# self_managed - boolean - Does this user manage it's own credentials or is it a shared/bot user?
# sftp_permission - boolean - Can the user access with SFTP?
# site_admin - boolean - Is the user an administrator for this site?
# skip_welcome_screen - boolean - Skip Welcome page in the UI?
# ssl_required - string - SSL required setting
# sso_strategy_id - int64 - SSO (Single Sign On) strategy ID for the user, if applicable.
# subscribe_to_newsletter - boolean - Is the user subscribed to the newsletter?
# require_2fa - string - 2FA required setting
# time_zone - string - User time zone
# user_root - string - Root folder for FTP (and optionally SFTP if the appropriate site-wide setting is set.) Note that this is not used for API, Desktop, or Web interface.
# username - string - User's username
def update(self, params = None):
if not isinstance(params, dict):
params = {}
if hasattr(self, "id") and self.id:
params['id'] = self.id
else:
raise MissingParameterError("Current object doesn't have a id")
if "id" not in params:
raise MissingParameterError("Parameter missing: id")
if "id" in params and not isinstance(params["id"], int):
raise InvalidParameterError("Bad parameter: id must be an int")
if "change_password" in params and not isinstance(params["change_password"], str):
raise InvalidParameterError("Bad parameter: change_password must be an str")
if "change_password_confirmation" in params and not isinstance(params["change_password_confirmation"], str):
raise InvalidParameterError("Bad parameter: change_password_confirmation must be an str")
if "email" in params and not isinstance(params["email"], str):
raise InvalidParameterError("Bad parameter: email must be an str")
if "grant_permission" in params and not isinstance(params["grant_permission"], str):
raise InvalidParameterError("Bad parameter: grant_permission must be an str")
if "group_id" in params and not isinstance(params["group_id"], int):
raise InvalidParameterError("Bad parameter: group_id must be an int")
if "group_ids" in params and not isinstance(params["group_ids"], str):
raise InvalidParameterError("Bad parameter: group_ids must be an str")
if "imported_password_hash" in params and not isinstance(params["imported_password_hash"], str):
raise InvalidParameterError("Bad parameter: imported_password_hash must be an str")
if "password" in params and not isinstance(params["password"], str):
raise InvalidParameterError("Bad parameter: password must be an str")
if "password_confirmation" in params and not isinstance(params["password_confirmation"], str):
raise InvalidParameterError("Bad parameter: password_confirmation must be an str")
if "allowed_ips" in params and not isinstance(params["allowed_ips"], str):
raise InvalidParameterError("Bad parameter: allowed_ips must be an str")
if "authenticate_until" in params and not isinstance(params["authenticate_until"], str):
raise InvalidParameterError("Bad parameter: authenticate_until must be an str")
if "authentication_method" in params and not isinstance(params["authentication_method"], str):
raise InvalidParameterError("Bad parameter: authentication_method must be an str")
if "header_text" in params and not isinstance(params["header_text"], str):
raise InvalidParameterError("Bad parameter: header_text must be an str")
if "language" in params and not isinstance(params["language"], str):
raise InvalidParameterError("Bad parameter: language must be an str")
if "notification_daily_send_time" in params and not isinstance(params["notification_daily_send_time"], int):
raise InvalidParameterError("Bad parameter: notification_daily_send_time must be an int")
if "name" in params and not isinstance(params["name"], str):
raise InvalidParameterError("Bad parameter: name must be an str")
if "company" in params and not isinstance(params["company"], str):
raise InvalidParameterError("Bad parameter: company must be an str")
if "notes" in params and not isinstance(params["notes"], str):
raise InvalidParameterError("Bad parameter: notes must be an str")
if "password_validity_days" in params and not isinstance(params["password_validity_days"], int):
raise InvalidParameterError("Bad parameter: password_validity_days must be an int")
if "ssl_required" in params and not isinstance(params["ssl_required"], str):
raise InvalidParameterError("Bad parameter: ssl_required must be an str")
if "sso_strategy_id" in params and not isinstance(params["sso_strategy_id"], int):
raise InvalidParameterError("Bad parameter: sso_strategy_id must be an int")
if "require_2fa" in params and not isinstance(params["require_2fa"], str):
raise InvalidParameterError("Bad parameter: require_2fa must be an str")
if "time_zone" in params and not isinstance(params["time_zone"], str):
raise InvalidParameterError("Bad parameter: time_zone must be an str")
if "user_root" in params and not isinstance(params["user_root"], str):
raise InvalidParameterError("Bad parameter: user_root must be an str")
if "username" in params and not isinstance(params["username"], str):
raise InvalidParameterError("Bad parameter: username must be an str")
response, _options = Api.send_request("PATCH", "/users/{id}".format(id=params['id']), params, self.options)
return response.data
def delete(self, params = None):
if not isinstance(params, dict):
params = {}
if hasattr(self, "id") and self.id:
params['id'] = self.id
else:
raise MissingParameterError("Current object doesn't have a id")
if "id" not in params:
raise MissingParameterError("Parameter missing: id")
if "id" in params and not isinstance(params["id"], int):
raise InvalidParameterError("Bad parameter: id must be an int")
response, _options = Api.send_request("DELETE", "/users/{id}".format(id=params['id']), params, self.options)
return response.data
def destroy(self, params = None):
self.delete(params)
def save(self):
if hasattr(self, "id") and self.id:
self.update(self.get_attributes())
else:
new_obj = create(self.get_attributes(), self.options)
self.set_attributes(new_obj.get_attributes())
# Parameters:
# cursor - string - Used for pagination. Send a cursor value to resume an existing list from the point at which you left off. Get a cursor from an existing list via the X-Files-Cursor-Next header.
# per_page - int64 - Number of records to show per page. (Max: 10,000, 1,000 or less is recommended).
# sort_by - object - If set, sort records by the specified field in either 'asc' or 'desc' direction (e.g. sort_by[last_login_at]=desc). Valid fields are `authenticate_until`, `email`, `last_desktop_login_at`, `last_login_at`, `username`, `company`, `name`, `site_admin`, `receive_admin_alerts`, `password_validity_days`, `ssl_required` or `not_site_admin`.
# filter - object - If set, return records where the specifiied field is equal to the supplied value. Valid fields are `username`, `email`, `company`, `site_admin`, `password_validity_days`, `ssl_required`, `last_login_at`, `authenticate_until` or `not_site_admin`.
# filter_gt - object - If set, return records where the specifiied field is greater than the supplied value. Valid fields are `username`, `email`, `company`, `site_admin`, `password_validity_days`, `ssl_required`, `last_login_at`, `authenticate_until` or `not_site_admin`.
# filter_gteq - object - If set, return records where the specifiied field is greater than or equal to the supplied value. Valid fields are `username`, `email`, `company`, `site_admin`, `password_validity_days`, `ssl_required`, `last_login_at`, `authenticate_until` or `not_site_admin`.
# filter_like - object - If set, return records where the specifiied field is equal to the supplied value. Valid fields are `username`, `email`, `company`, `site_admin`, `password_validity_days`, `ssl_required`, `last_login_at`, `authenticate_until` or `not_site_admin`.
# filter_lt - object - If set, return records where the specifiied field is less than the supplied value. Valid fields are `username`, `email`, `company`, `site_admin`, `password_validity_days`, `ssl_required`, `last_login_at`, `authenticate_until` or `not_site_admin`.
# filter_lteq - object - If set, return records where the specifiied field is less than or equal to the supplied value. Valid fields are `username`, `email`, `company`, `site_admin`, `password_validity_days`, `ssl_required`, `last_login_at`, `authenticate_until` or `not_site_admin`.
# ids - string - comma-separated list of User IDs
# q[username] - string - List users matching username.
# q[email] - string - List users matching email.
# q[notes] - string - List users matching notes field.
# q[admin] - string - If `true`, list only admin users.
# q[allowed_ips] - string - If set, list only users with overridden allowed IP setting.
# q[password_validity_days] - string - If set, list only users with overridden password validity days setting.
# q[ssl_required] - string - If set, list only users with overridden SSL required setting.
# search - string - Searches for partial matches of name, username, or email.
def list(params = None, options = None):
if not isinstance(params, dict):
params = {}
if not isinstance(options, dict):
options = {}
if "cursor" in params and not isinstance(params["cursor"], str):
raise InvalidParameterError("Bad parameter: cursor must be an str")
if "per_page" in params and not isinstance(params["per_page"], int):
raise InvalidParameterError("Bad parameter: per_page must be an int")
if "sort_by" in params and not isinstance(params["sort_by"], dict):
raise InvalidParameterError("Bad parameter: sort_by must be an dict")
if "filter" in params and not isinstance(params["filter"], dict):
raise InvalidParameterError("Bad parameter: filter must be an dict")
if "filter_gt" in params and not isinstance(params["filter_gt"], dict):
raise InvalidParameterError("Bad parameter: filter_gt must be an dict")
if "filter_gteq" in params and not isinstance(params["filter_gteq"], dict):
raise InvalidParameterError("Bad parameter: filter_gteq must be an dict")
if "filter_like" in params and not isinstance(params["filter_like"], dict):
raise InvalidParameterError("Bad parameter: filter_like must be an dict")
if "filter_lt" in params and not isinstance(params["filter_lt"], dict):
raise InvalidParameterError("Bad parameter: filter_lt must be an dict")
if "filter_lteq" in params and not isinstance(params["filter_lteq"], dict):
raise InvalidParameterError("Bad parameter: filter_lteq must be an dict")
if "ids" in params and not isinstance(params["ids"], str):
raise InvalidParameterError("Bad parameter: ids must be an str")
if "search" in params and not isinstance(params["search"], str):
raise InvalidParameterError("Bad parameter: search must be an str")
return ListObj(User,"GET", "/users", params, options)
def all(params = None, options = None):
list(params, options)
# Parameters:
# id (required) - int64 - User ID.
def find(id, params = None, options = None):
if not isinstance(params, dict):
params = {}
if not isinstance(options, dict):
options = {}
params["id"] = id
if "id" in params and not isinstance(params["id"], int):
raise InvalidParameterError("Bad parameter: id must be an int")
if "id" not in params:
raise MissingParameterError("Parameter missing: id")
response, options = Api.send_request("GET", "/users/{id}".format(id=params['id']), params, options)
return User(response.data, options)
def get(id, params = None, options = None):
find(id, params, options)
# Parameters:
# avatar_file - file - An image file for your user avatar.
# avatar_delete - boolean - If true, the avatar will be deleted.
# change_password - string - Used for changing a password on an existing user.
# change_password_confirmation - string - Optional, but if provided, we will ensure that it matches the value sent in `change_password`.
# email - string - User's email.
# grant_permission - string - Permission to grant on the user root. Can be blank or `full`, `read`, `write`, `list`, or `history`.
# group_id - int64 - Group ID to associate this user with.
# group_ids - string - A list of group ids to associate this user with. Comma delimited.
# imported_password_hash - string - Pre-calculated hash of the user's password. If supplied, this will be used to authenticate the user on first login. Supported hash menthods are MD5, SHA1, and SHA256.
# password - string - User password.
# password_confirmation - string - Optional, but if provided, we will ensure that it matches the value sent in `password`.
# announcements_read - boolean - Signifies that the user has read all the announcements in the UI.
# allowed_ips - string - A list of allowed IPs if applicable. Newline delimited
# attachments_permission - boolean - DEPRECATED: Can the user create Bundles (aka Share Links)? Use the bundle permission instead.
# authenticate_until - string - Scheduled Date/Time at which user will be deactivated
# authentication_method - string - How is this user authenticated?
# billing_permission - boolean - Allow this user to perform operations on the account, payments, and invoices?
# bypass_inactive_disable - boolean - Exempt this user from being disabled based on inactivity?
# bypass_site_allowed_ips - boolean - Allow this user to skip site-wide IP blacklists?
# dav_permission - boolean - Can the user connect with WebDAV?
# disabled - boolean - Is user disabled? Disabled users cannot log in, and do not count for billing purposes. Users can be automatically disabled after an inactivity period via a Site setting.
# ftp_permission - boolean - Can the user access with FTP/FTPS?
# header_text - string - Text to display to the user in the header of the UI
# language - string - Preferred language
# notification_daily_send_time - int64 - Hour of the day at which daily notifications should be sent. Can be in range 0 to 23
# name - string - User's full name
# company - string - User's company
# notes - string - Any internal notes on the user
# office_integration_enabled - boolean - Enable integration with Office for the web?
# password_validity_days - int64 - Number of days to allow user to use the same password
# receive_admin_alerts - boolean - Should the user receive admin alerts such a certificate expiration notifications and overages?
# require_password_change - boolean - Is a password change required upon next user login?
# restapi_permission - boolean - Can this user access the REST API?
# self_managed - boolean - Does this user manage it's own credentials or is it a shared/bot user?
# sftp_permission - boolean - Can the user access with SFTP?
# site_admin - boolean - Is the user an administrator for this site?
# skip_welcome_screen - boolean - Skip Welcome page in the UI?
# ssl_required - string - SSL required setting
# sso_strategy_id - int64 - SSO (Single Sign On) strategy ID for the user, if applicable.
# subscribe_to_newsletter - boolean - Is the user subscribed to the newsletter?
# require_2fa - string - 2FA required setting
# time_zone - string - User time zone
# user_root - string - Root folder for FTP (and optionally SFTP if the appropriate site-wide setting is set.) Note that this is not used for API, Desktop, or Web interface.
# username - string - User's username
def create(params = None, options = None):
if not isinstance(params, dict):
params = {}
if not isinstance(options, dict):
options = {}
if "change_password" in params and not isinstance(params["change_password"], str):
raise InvalidParameterError("Bad parameter: change_password must be an str")
if "change_password_confirmation" in params and not isinstance(params["change_password_confirmation"], str):
raise InvalidParameterError("Bad parameter: change_password_confirmation must be an str")
if "email" in params and not isinstance(params["email"], str):
raise InvalidParameterError("Bad parameter: email must be an str")
if "grant_permission" in params and not isinstance(params["grant_permission"], str):
raise InvalidParameterError("Bad parameter: grant_permission must be an str")
if "group_id" in params and not isinstance(params["group_id"], int):
raise InvalidParameterError("Bad parameter: group_id must be an int")
if "group_ids" in params and not isinstance(params["group_ids"], str):
raise InvalidParameterError("Bad parameter: group_ids must be an str")
if "imported_password_hash" in params and not isinstance(params["imported_password_hash"], str):
raise InvalidParameterError("Bad parameter: imported_password_hash must be an str")
if "password" in params and not isinstance(params["password"], str):
raise InvalidParameterError("Bad parameter: password must be an str")
if "password_confirmation" in params and not isinstance(params["password_confirmation"], str):
raise InvalidParameterError("Bad parameter: password_confirmation must be an str")
if "allowed_ips" in params and not isinstance(params["allowed_ips"], str):
raise InvalidParameterError("Bad parameter: allowed_ips must be an str")
if "authenticate_until" in params and not isinstance(params["authenticate_until"], str):
raise InvalidParameterError("Bad parameter: authenticate_until must be an str")
if "authentication_method" in params and not isinstance(params["authentication_method"], str):
raise InvalidParameterError("Bad parameter: authentication_method must be an str")
if "header_text" in params and not isinstance(params["header_text"], str):
raise InvalidParameterError("Bad parameter: header_text must be an str")
if "language" in params and not isinstance(params["language"], str):
raise InvalidParameterError("Bad parameter: language must be an str")
if "notification_daily_send_time" in params and not isinstance(params["notification_daily_send_time"], int):
raise InvalidParameterError("Bad parameter: notification_daily_send_time must be an int")
if "name" in params and not isinstance(params["name"], str):
raise InvalidParameterError("Bad parameter: name must be an str")
if "company" in params and not isinstance(params["company"], str):
raise InvalidParameterError("Bad parameter: company must be an str")
if "notes" in params and not isinstance(params["notes"], str):
raise InvalidParameterError("Bad parameter: notes must be an str")
if "password_validity_days" in params and not isinstance(params["password_validity_days"], int):
raise InvalidParameterError("Bad parameter: password_validity_days must be an int")
if "ssl_required" in params and not isinstance(params["ssl_required"], str):
raise InvalidParameterError("Bad parameter: ssl_required must be an str")
if "sso_strategy_id" in params and not isinstance(params["sso_strategy_id"], int):
raise InvalidParameterError("Bad parameter: sso_strategy_id must be an int")
if "require_2fa" in params and not isinstance(params["require_2fa"], str):
raise InvalidParameterError("Bad parameter: require_2fa must be an str")
if "time_zone" in params and not isinstance(params["time_zone"], str):
raise InvalidParameterError("Bad parameter: time_zone must be an str")
if "user_root" in params and not isinstance(params["user_root"], str):
raise InvalidParameterError("Bad parameter: user_root must be an str")
if "username" in params and not isinstance(params["username"], str):
raise InvalidParameterError("Bad parameter: username must be an str")
response, options = Api.send_request("POST", "/users", params, options)
return User(response.data, options)
# Unlock user who has been locked out due to failed logins
def unlock(id, params = None, options = None):
if not isinstance(params, dict):
params = {}
if not isinstance(options, dict):
options = {}
params["id"] = id
if "id" in params and not isinstance(params["id"], int):
raise InvalidParameterError("Bad parameter: id must be an int")
if "id" not in params:
raise MissingParameterError("Parameter missing: id")
response, _options = Api.send_request("POST", "/users/{id}/unlock".format(id=params['id']), params, options)
return response.data
# Resend user welcome email
def resend_welcome_email(id, params = None, options = None):
if not isinstance(params, dict):
params = {}
if not isinstance(options, dict):
options = {}
params["id"] = id
if "id" in params and not isinstance(params["id"], int):
raise InvalidParameterError("Bad parameter: id must be an int")
if "id" not in params:
raise MissingParameterError("Parameter missing: id")
response, _options = Api.send_request("POST", "/users/{id}/resend_welcome_email".format(id=params['id']), params, options)
return response.data
# Trigger 2FA Reset process for user who has lost access to their existing 2FA methods
def user_2fa_reset(id, params = None, options = None):
if not isinstance(params, dict):
params = {}
if not isinstance(options, dict):
options = {}
params["id"] = id
if "id" in params and not isinstance(params["id"], int):
raise InvalidParameterError("Bad parameter: id must be an int")
if "id" not in params:
raise MissingParameterError("Parameter missing: id")
response, _options = Api.send_request("POST", "/users/{id}/2fa/reset".format(id=params['id']), params, options)
return response.data
# Parameters:
# avatar_file - file - An image file for your user avatar.
# avatar_delete - boolean - If true, the avatar will be deleted.
# change_password - string - Used for changing a password on an existing user.
# change_password_confirmation - string - Optional, but if provided, we will ensure that it matches the value sent in `change_password`.
# email - string - User's email.
# grant_permission - string - Permission to grant on the user root. Can be blank or `full`, `read`, `write`, `list`, or `history`.
# group_id - int64 - Group ID to associate this user with.
# group_ids - string - A list of group ids to associate this user with. Comma delimited.
# imported_password_hash - string - Pre-calculated hash of the user's password. If supplied, this will be used to authenticate the user on first login. Supported hash menthods are MD5, SHA1, and SHA256.
# password - string - User password.
# password_confirmation - string - Optional, but if provided, we will ensure that it matches the value sent in `password`.
# announcements_read - boolean - Signifies that the user has read all the announcements in the UI.
# allowed_ips - string - A list of allowed IPs if applicable. Newline delimited
# attachments_permission - boolean - DEPRECATED: Can the user create Bundles (aka Share Links)? Use the bundle permission instead.
# authenticate_until - string - Scheduled Date/Time at which user will be deactivated
# authentication_method - string - How is this user authenticated?
# billing_permission - boolean - Allow this user to perform operations on the account, payments, and invoices?
# bypass_inactive_disable - boolean - Exempt this user from being disabled based on inactivity?
# bypass_site_allowed_ips - boolean - Allow this user to skip site-wide IP blacklists?
# dav_permission - boolean - Can the user connect with WebDAV?
# disabled - boolean - Is user disabled? Disabled users cannot log in, and do not count for billing purposes. Users can be automatically disabled after an inactivity period via a Site setting.
# ftp_permission - boolean - Can the user access with FTP/FTPS?
# header_text - string - Text to display to the user in the header of the UI
# language - string - Preferred language
# notification_daily_send_time - int64 - Hour of the day at which daily notifications should be sent. Can be in range 0 to 23
# name - string - User's full name
# company - string - User's company
# notes - string - Any internal notes on the user
# office_integration_enabled - boolean - Enable integration with Office for the web?
# password_validity_days - int64 - Number of days to allow user to use the same password
# receive_admin_alerts - boolean - Should the user receive admin alerts such a certificate expiration notifications and overages?
# require_password_change - boolean - Is a password change required upon next user login?
# restapi_permission - boolean - Can this user access the REST API?
# self_managed - boolean - Does this user manage it's own credentials or is it a shared/bot user?
# sftp_permission - boolean - Can the user access with SFTP?
# site_admin - boolean - Is the user an administrator for this site?
# skip_welcome_screen - boolean - Skip Welcome page in the UI?
# ssl_required - string - SSL required setting
# sso_strategy_id - int64 - SSO (Single Sign On) strategy ID for the user, if applicable.
# subscribe_to_newsletter - boolean - Is the user subscribed to the newsletter?
# require_2fa - string - 2FA required setting
# time_zone - string - User time zone
# user_root - string - Root folder for FTP (and optionally SFTP if the appropriate site-wide setting is set.) Note that this is not used for API, Desktop, or Web interface.
# username - string - User's username
def update(id, params = None, options = None):
if not isinstance(params, dict):
params = {}
if not isinstance(options, dict):
options = {}
params["id"] = id
if "id" in params and not isinstance(params["id"], int):
raise InvalidParameterError("Bad parameter: id must be an int")
if "change_password" in params and not isinstance(params["change_password"], str):
raise InvalidParameterError("Bad parameter: change_password must be an str")
if "change_password_confirmation" in params and not isinstance(params["change_password_confirmation"], str):
raise InvalidParameterError("Bad parameter: change_password_confirmation must be an str")
if "email" in params and not isinstance(params["email"], str):
raise InvalidParameterError("Bad parameter: email must be an str")
if "grant_permission" in params and not isinstance(params["grant_permission"], str):
raise InvalidParameterError("Bad parameter: grant_permission must be an str")
if "group_id" in params and not isinstance(params["group_id"], int):
raise InvalidParameterError("Bad parameter: group_id must be an int")
if "group_ids" in params and not isinstance(params["group_ids"], str):
raise InvalidParameterError("Bad parameter: group_ids must be an str")
if "imported_password_hash" in params and not isinstance(params["imported_password_hash"], str):
raise InvalidParameterError("Bad parameter: imported_password_hash must be an str")
if "password" in params and not isinstance(params["password"], str):
raise InvalidParameterError("Bad parameter: password must be an str")
if "password_confirmation" in params and not isinstance(params["password_confirmation"], str):
raise InvalidParameterError("Bad parameter: password_confirmation must be an str")
if "allowed_ips" in params and not isinstance(params["allowed_ips"], str):
raise InvalidParameterError("Bad parameter: allowed_ips must be an str")
if "authenticate_until" in params and not isinstance(params["authenticate_until"], str):
raise InvalidParameterError("Bad parameter: authenticate_until must be an str")
if "authentication_method" in params and not isinstance(params["authentication_method"], str):
raise InvalidParameterError("Bad parameter: authentication_method must be an str")
if "header_text" in params and not isinstance(params["header_text"], str):
raise InvalidParameterError("Bad parameter: header_text must be an str")
if "language" in params and not isinstance(params["language"], str):
raise InvalidParameterError("Bad parameter: language must be an str")
if "notification_daily_send_time" in params and not isinstance(params["notification_daily_send_time"], int):
raise InvalidParameterError("Bad parameter: notification_daily_send_time must be an int")
if "name" in params and not isinstance(params["name"], str):
raise InvalidParameterError("Bad parameter: name must be an str")
if "company" in params and not isinstance(params["company"], str):
raise InvalidParameterError("Bad parameter: company must be an str")
if "notes" in params and not isinstance(params["notes"], str):
raise InvalidParameterError("Bad parameter: notes must be an str")
if "password_validity_days" in params and not isinstance(params["password_validity_days"], int):
raise InvalidParameterError("Bad parameter: password_validity_days must be an int")
if "ssl_required" in params and not isinstance(params["ssl_required"], str):
raise InvalidParameterError("Bad parameter: ssl_required must be an str")
if "sso_strategy_id" in params and not isinstance(params["sso_strategy_id"], int):
raise InvalidParameterError("Bad parameter: sso_strategy_id must be an int")
if "require_2fa" in params and not isinstance(params["require_2fa"], str):
raise InvalidParameterError("Bad parameter: require_2fa must be an str")
if "time_zone" in params and not isinstance(params["time_zone"], str):
raise InvalidParameterError("Bad parameter: time_zone must be an str")
if "user_root" in params and not isinstance(params["user_root"], str):
raise InvalidParameterError("Bad parameter: user_root must be an str")
if "username" in params and not isinstance(params["username"], str):
raise InvalidParameterError("Bad parameter: username must be an str")
if "id" not in params:
raise MissingParameterError("Parameter missing: id")
response, options = Api.send_request("PATCH", "/users/{id}".format(id=params['id']), params, options)
return User(response.data, options)
def delete(id, params = None, options = None):
if not isinstance(params, dict):
params = {}
if not isinstance(options, dict):
options = {}
params["id"] = id
if "id" in params and not isinstance(params["id"], int):
raise InvalidParameterError("Bad parameter: id must be an int")
if "id" not in params:
raise MissingParameterError("Parameter missing: id")
response, _options = Api.send_request("DELETE", "/users/{id}".format(id=params['id']), params, options)
return response.data
def destroy(id, params = None, options = None):
delete(id, params, options)
def new(*args, **kwargs):
return User(*args, **kwargs) | 72.438944 | 359 | 0.70477 |
79423789704070c6a4de993938326a49817b4d08 | 3,173 | py | Python | cron_read_data_from_aggregate.py | HadiOfBBG/pegasusrises | 0a2df78eac955d3966030bdca1731bf0aa63510c | [
"Apache-2.0"
] | null | null | null | cron_read_data_from_aggregate.py | HadiOfBBG/pegasusrises | 0a2df78eac955d3966030bdca1731bf0aa63510c | [
"Apache-2.0"
] | null | null | null | cron_read_data_from_aggregate.py | HadiOfBBG/pegasusrises | 0a2df78eac955d3966030bdca1731bf0aa63510c | [
"Apache-2.0"
] | null | null | null | import csv
from collections import defaultdict
import StringIO
from google.appengine.ext import blobstore
from google.appengine.ext.webapp import blobstore_handlers
from jinja_template import JinjaTemplating
from google.appengine.ext import db
from google.appengine.api import memcache
from questions_details_from_google_sheets import QuestionsDetailsFromGoogleSheet
from models.pegasus_model import BbgDemoModel
from models.questions import Questions
from save_data_into_pegasus_db import SaveDataIntoPegasusDatabase
from urllib2 import Request, urlopen, URLError
import json
import ast
from xmltodict import *
import xmltodict
class CronToReadDataFromAggregate(JinjaTemplating):
def get(self):
self.response.out.write("Cron way of reading pegasus database")
return
# self.getFormIdsGeneratedByAggregate()
def post(self):
self.getFormIdsGeneratedByAggregate()
#this function get the ID of the form to retieve data from and also calls the function that requst for the data
def getFormIdsGeneratedByAggregate(self):
#Here am suppose to query and get all form IDs so a query(Loop through) to make request to get IDS of data submitted on that form
#For Pegasus A, it is moslty likely going to be one form
# self.response.out.write('You are here to read data right?')
# return
form_id = 'pegasusDemoQuestionnaire'
num_of_form_ids = '1000'
self.getIdsOfDataSubmissions(form_id, num_of_form_ids)
return
def getIdsOfDataSubmissions(self, form_id,num_of_form_ids):
# uuid:64802bb2-383c-476d-a7aa-95db88bfb734
request = Request('https://pegasusodk.appspot.com/view/submissionList?formId=' + form_id + '&numEntries=' + num_of_form_ids)
try:
response = urlopen(request)
data_submissions = response.read()
converting_form_ids_in_xml_to_json = xmltodict.parse(data_submissions)
list_of_submissions_ids = converting_form_ids_in_xml_to_json['idChunk']['idList']['id']
# self.response.out.write(converting_form_ids_in_xml_to_json['idChunk']['idList']['id'])
# return
for submission_id in list_of_submissions_ids:
self.response.out.write(submission_id)
self.response.out.write("\n")
# return
# self.getDataSubmittedUsingSubmissionID(form_id, submission_id)
return
except URLError, e:
self.response.out.write('No submissions IDs retrieved. Got an error code:')
def getDataSubmittedUsingSubmissionID(self,form_id,submission_id):
request = Request('https://pegasusodk.appspot.com/formid[@version=null and @uiVersion=null]/topElement[@key=idvalue]')
try:
response = urlopen(request)
data_associated_with_submission_id = response.read()
json_form_of_data_submitted = xmltodict.parse(data_associated_with_submission_id)
self.response.out.write(json_form_of_data_submitted)
except URLError, e:
self.response.out.write('No submissions retrieved. Got an error code:')
| 37.329412 | 137 | 0.716987 |
794237b6ddd85963136bd45e3b6cf1b288f9a8d7 | 2,212 | py | Python | vspk/v6/fetchers/nucosremarkingpolicytables_fetcher.py | axxyhtrx/vspk-python | 4495882c6bcbb1ef51b14b9f4dc7efe46476ff50 | [
"BSD-3-Clause"
] | 19 | 2016-03-07T12:34:22.000Z | 2020-06-11T11:09:02.000Z | vspk/v6/fetchers/nucosremarkingpolicytables_fetcher.py | axxyhtrx/vspk-python | 4495882c6bcbb1ef51b14b9f4dc7efe46476ff50 | [
"BSD-3-Clause"
] | 40 | 2016-06-13T15:36:54.000Z | 2020-11-10T18:14:43.000Z | vspk/v6/fetchers/nucosremarkingpolicytables_fetcher.py | axxyhtrx/vspk-python | 4495882c6bcbb1ef51b14b9f4dc7efe46476ff50 | [
"BSD-3-Clause"
] | 15 | 2016-06-10T22:06:01.000Z | 2020-12-15T18:37:42.000Z | # -*- coding: utf-8 -*-
#
# Copyright (c) 2015, Alcatel-Lucent Inc, 2017 Nokia
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from bambou import NURESTFetcher
class NUCOSRemarkingPolicyTablesFetcher(NURESTFetcher):
""" Represents a NUCOSRemarkingPolicyTables fetcher
Notes:
This fetcher enables to fetch NUCOSRemarkingPolicyTable objects.
See:
bambou.NURESTFetcher
"""
@classmethod
def managed_class(cls):
""" Return NUCOSRemarkingPolicyTable class that is managed.
Returns:
.NUCOSRemarkingPolicyTable: the managed class
"""
from .. import NUCOSRemarkingPolicyTable
return NUCOSRemarkingPolicyTable
| 41.735849 | 86 | 0.738246 |
794238faa1c31f6b769a87f3377822b2f8827ddc | 55,195 | py | Python | tests/test_modeling_tf_common.py | wilcoln/transformers | 6331d4fe59e85840bb5693837e791f4caedcd53b | [
"Apache-2.0"
] | null | null | null | tests/test_modeling_tf_common.py | wilcoln/transformers | 6331d4fe59e85840bb5693837e791f4caedcd53b | [
"Apache-2.0"
] | null | null | null | tests/test_modeling_tf_common.py | wilcoln/transformers | 6331d4fe59e85840bb5693837e791f4caedcd53b | [
"Apache-2.0"
] | null | null | null | # coding=utf-8
# Copyright 2019 HuggingFace Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
import inspect
import os
import random
import tempfile
import unittest
from importlib import import_module
from typing import List, Tuple
from transformers import is_tf_available
from transformers.testing_utils import _tf_gpu_memory_limit, is_pt_tf_cross_test, require_tf, slow
if is_tf_available():
import numpy as np
import tensorflow as tf
from transformers import (
TF_MODEL_FOR_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_MASKED_LM_MAPPING,
TF_MODEL_FOR_MULTIPLE_CHOICE_MAPPING,
TF_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING,
TF_MODEL_FOR_PRETRAINING_MAPPING,
TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING,
TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
TFSharedEmbeddings,
tf_top_k_top_p_filtering,
)
if _tf_gpu_memory_limit is not None:
gpus = tf.config.list_physical_devices("GPU")
for gpu in gpus:
# Restrict TensorFlow to only allocate x GB of memory on the GPUs
try:
tf.config.set_logical_device_configuration(
gpu, [tf.config.LogicalDeviceConfiguration(memory_limit=_tf_gpu_memory_limit)]
)
logical_gpus = tf.config.list_logical_devices("GPU")
print("Logical GPUs", logical_gpus)
except RuntimeError as e:
# Virtual devices must be set before GPUs have been initialized
print(e)
def _config_zero_init(config):
configs_no_init = copy.deepcopy(config)
for key in configs_no_init.__dict__.keys():
if "_range" in key or "_std" in key:
setattr(configs_no_init, key, 0.0)
return configs_no_init
@require_tf
class TFModelTesterMixin:
model_tester = None
all_model_classes = ()
all_generative_model_classes = ()
test_resize_embeddings = True
is_encoder_decoder = False
def _prepare_for_class(self, inputs_dict, model_class, return_labels=False) -> dict:
inputs_dict = copy.deepcopy(inputs_dict)
if model_class in TF_MODEL_FOR_MULTIPLE_CHOICE_MAPPING.values():
inputs_dict = {
k: tf.tile(tf.expand_dims(v, 1), (1, self.model_tester.num_choices) + (1,) * (v.ndim - 1))
if isinstance(v, tf.Tensor) and v.ndim > 0
else v
for k, v in inputs_dict.items()
}
if return_labels:
if model_class in TF_MODEL_FOR_MULTIPLE_CHOICE_MAPPING.values():
inputs_dict["labels"] = tf.ones(self.model_tester.batch_size, dtype=tf.int32)
elif model_class in TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING.values():
inputs_dict["start_positions"] = tf.zeros(self.model_tester.batch_size, dtype=tf.int32)
inputs_dict["end_positions"] = tf.zeros(self.model_tester.batch_size, dtype=tf.int32)
elif model_class in TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING.values():
inputs_dict["labels"] = tf.zeros(self.model_tester.batch_size, dtype=tf.int32)
elif model_class in TF_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING.values():
inputs_dict["next_sentence_label"] = tf.zeros(self.model_tester.batch_size, dtype=tf.int32)
elif model_class in [
*TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING.values(),
*TF_MODEL_FOR_CAUSAL_LM_MAPPING.values(),
*TF_MODEL_FOR_MASKED_LM_MAPPING.values(),
*TF_MODEL_FOR_PRETRAINING_MAPPING.values(),
*TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING.values(),
]:
inputs_dict["labels"] = tf.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length), dtype=tf.int32
)
return inputs_dict
def test_initialization(self):
pass
def test_save_load(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
model = model_class(config)
outputs = model(self._prepare_for_class(inputs_dict, model_class))
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(tmpdirname, saved_model=False)
model = model_class.from_pretrained(tmpdirname)
after_outputs = model(self._prepare_for_class(inputs_dict, model_class))
self.assert_outputs_same(after_outputs, outputs)
def test_graph_mode(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
inputs = self._prepare_for_class(inputs_dict, model_class)
model = model_class(config)
@tf.function
def run_in_graph_mode():
return model(inputs)
outputs = run_in_graph_mode()
self.assertIsNotNone(outputs)
def test_forward_signature(self):
config, _ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
model = model_class(config)
signature = inspect.signature(model.call)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
arg_names = [*signature.parameters.keys()]
if model.config.is_encoder_decoder:
expected_arg_names = [
"input_ids",
"attention_mask",
"decoder_input_ids",
"decoder_attention_mask",
]
expected_arg_names.extend(
["head_mask", "decoder_head_mask", "encoder_outputs"]
if "head_mask" and "decoder_head_mask" in arg_names
else ["encoder_outputs"]
)
self.assertListEqual(arg_names[: len(expected_arg_names)], expected_arg_names)
else:
expected_arg_names = ["input_ids"]
self.assertListEqual(arg_names[:1], expected_arg_names)
def test_saved_model_creation(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
config.output_hidden_states = False
config.output_attentions = False
if hasattr(config, "use_cache"):
config.use_cache = False
model_class = self.all_model_classes[0]
class_inputs_dict = self._prepare_for_class(inputs_dict, model_class)
model = model_class(config)
model(class_inputs_dict)
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(tmpdirname, saved_model=True)
saved_model_dir = os.path.join(tmpdirname, "saved_model")
self.assertTrue(os.path.exists(saved_model_dir))
@slow
def test_saved_model_creation_extended(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
config.output_hidden_states = True
config.output_attentions = True
if hasattr(config, "use_cache"):
config.use_cache = True
for model_class in self.all_model_classes:
class_inputs_dict = self._prepare_for_class(inputs_dict, model_class)
model = model_class(config)
model(class_inputs_dict)
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(tmpdirname, saved_model=True)
saved_model_dir = os.path.join(tmpdirname, "saved_model")
self.assertTrue(os.path.exists(saved_model_dir))
@slow
def test_saved_model_with_hidden_states_output(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
config.output_hidden_states = True
for model_class in self.all_model_classes:
class_inputs_dict = self._prepare_for_class(inputs_dict, model_class)
# A saved model is always executed in graph mode, since we merged the PR #8777
# the booleans in graph mode are always the ones in the config, then we update
# the use_cache property if it exists in order to have similar booleans with the inputs
if "use_cache" in class_inputs_dict:
config.use_cache = class_inputs_dict.pop("use_cache")
model = model_class(config)
num_out = len(model(class_inputs_dict))
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(tmpdirname)
saved_model_dir = os.path.join(tmpdirname, "saved_model")
model = tf.keras.models.load_model(saved_model_dir)
outputs = model(class_inputs_dict)
if self.is_encoder_decoder:
output = outputs["encoder_hidden_states"] if isinstance(outputs, dict) else outputs[-1]
else:
output = outputs["hidden_states"] if isinstance(outputs, dict) else outputs[-1]
hidden_states = [t.numpy() for t in output]
self.assertEqual(len(outputs), num_out)
expected_num_layers = getattr(
self.model_tester, "expected_num_hidden_layers", self.model_tester.num_hidden_layers + 1
)
self.assertEqual(len(hidden_states), expected_num_layers)
self.assertListEqual(
list(hidden_states[0].shape[-2:]),
[self.model_tester.seq_length, self.model_tester.hidden_size],
)
@slow
def test_saved_model_with_attentions_output(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
config.output_attentions = True
encoder_seq_length = getattr(self.model_tester, "encoder_seq_length", self.model_tester.seq_length)
encoder_key_length = getattr(self.model_tester, "key_length", encoder_seq_length)
for model_class in self.all_model_classes:
class_inputs_dict = self._prepare_for_class(inputs_dict, model_class)
# A saved model is always executed in graph mode, since we merged the PR #8777
# the booleans in graph mode are always the ones in the config, then we update
# the use_cache property if it exists in order to have similar booleans with the inputs
if "use_cache" in class_inputs_dict:
config.use_cache = class_inputs_dict.pop("use_cache")
model = model_class(config)
num_out = len(model(class_inputs_dict))
with tempfile.TemporaryDirectory() as tmpdirname:
saved_model_dir = os.path.join(tmpdirname, "saved_model")
model.save_pretrained(saved_model_dir)
model = tf.keras.models.load_model(saved_model_dir)
outputs = model(class_inputs_dict)
if self.is_encoder_decoder:
output = outputs["encoder_attentions"] if isinstance(outputs, dict) else outputs[-1]
else:
output = outputs["attentions"] if isinstance(outputs, dict) else outputs[-1]
attentions = [t.numpy() for t in output]
self.assertEqual(len(outputs), num_out)
self.assertEqual(len(attentions), self.model_tester.num_hidden_layers)
self.assertListEqual(
list(attentions[0].shape[-3:]),
[self.model_tester.num_attention_heads, encoder_seq_length, encoder_key_length],
)
def test_keras_save_load(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
tf_main_layer_classes = set(
module_member
for model_class in self.all_model_classes
for module in (import_module(model_class.__module__),)
for module_member_name in dir(module)
if module_member_name.endswith("MainLayer")
for module_member in (getattr(module, module_member_name),)
if isinstance(module_member, type)
and tf.keras.layers.Layer in module_member.__bases__
and getattr(module_member, "_keras_serializable", False)
)
for main_layer_class in tf_main_layer_classes:
# T5MainLayer needs an embed_tokens parameter when called without the inputs_embeds parameter
if "T5" in main_layer_class.__name__:
# Take the same values than in TFT5ModelTester for this shared layer
shared = TFSharedEmbeddings(99, 32, name="shared")
config.use_cache = inputs_dict.pop("use_cache", None)
main_layer = main_layer_class(config, embed_tokens=shared)
else:
main_layer = main_layer_class(config)
symbolic_inputs = {
name: tf.keras.Input(tensor.shape[1:], dtype=tensor.dtype) for name, tensor in inputs_dict.items()
}
model = tf.keras.Model(symbolic_inputs, outputs=main_layer(symbolic_inputs))
outputs = model(inputs_dict)
with tempfile.TemporaryDirectory() as tmpdirname:
filepath = os.path.join(tmpdirname, "keras_model.h5")
model.save(filepath)
if "T5" in main_layer_class.__name__:
model = tf.keras.models.load_model(
filepath,
custom_objects={
main_layer_class.__name__: main_layer_class,
"TFSharedEmbeddings": TFSharedEmbeddings,
},
)
else:
model = tf.keras.models.load_model(
filepath, custom_objects={main_layer_class.__name__: main_layer_class}
)
assert isinstance(model, tf.keras.Model)
after_outputs = model(inputs_dict)
self.assert_outputs_same(after_outputs, outputs)
def assert_outputs_same(self, after_outputs, outputs):
# Make sure we don't have nans
if isinstance(after_outputs, tf.Tensor):
out_1 = after_outputs.numpy()
elif isinstance(after_outputs, dict):
out_1 = after_outputs[list(after_outputs.keys())[0]].numpy()
else:
out_1 = after_outputs[0].numpy()
out_2 = outputs[0].numpy()
self.assertEqual(out_1.shape, out_2.shape)
out_1 = out_1[~np.isnan(out_1)]
out_2 = out_2[~np.isnan(out_2)]
max_diff = np.amax(np.abs(out_1 - out_2))
self.assertLessEqual(max_diff, 1e-5)
@is_pt_tf_cross_test
def test_pt_tf_model_equivalence(self):
import torch
import transformers
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
pt_model_class_name = model_class.__name__[2:] # Skip the "TF" at the beginning
pt_model_class = getattr(transformers, pt_model_class_name)
config.output_hidden_states = True
tf_model = model_class(config)
pt_model = pt_model_class(config)
# Check we can load pt model in tf and vice-versa with model => model functions
tf_model = transformers.load_pytorch_model_in_tf2_model(
tf_model, pt_model, tf_inputs=self._prepare_for_class(inputs_dict, model_class)
)
pt_model = transformers.load_tf2_model_in_pytorch_model(pt_model, tf_model)
# Check predictions on first output (logits/hidden-states) are close enought given low-level computational differences
pt_model.eval()
pt_inputs_dict = {}
for name, key in self._prepare_for_class(inputs_dict, model_class).items():
if type(key) == bool:
pt_inputs_dict[name] = key
else:
pt_inputs_dict[name] = torch.from_numpy(key.numpy()).to(torch.long)
# need to rename encoder-decoder "inputs" for PyTorch
if "inputs" in pt_inputs_dict and self.is_encoder_decoder:
pt_inputs_dict["input_ids"] = pt_inputs_dict.pop("inputs")
with torch.no_grad():
pto = pt_model(**pt_inputs_dict)
tfo = tf_model(self._prepare_for_class(inputs_dict, model_class), training=False)
tf_hidden_states = tfo[0].numpy()
pt_hidden_states = pto[0].numpy()
tf_nans = np.copy(np.isnan(tf_hidden_states))
pt_nans = np.copy(np.isnan(pt_hidden_states))
pt_hidden_states[tf_nans] = 0
tf_hidden_states[tf_nans] = 0
pt_hidden_states[pt_nans] = 0
tf_hidden_states[pt_nans] = 0
max_diff = np.amax(np.abs(tf_hidden_states - pt_hidden_states))
self.assertLessEqual(max_diff, 4e-2)
# Check we can load pt model in tf and vice-versa with checkpoint => model functions
with tempfile.TemporaryDirectory() as tmpdirname:
pt_checkpoint_path = os.path.join(tmpdirname, "pt_model.bin")
torch.save(pt_model.state_dict(), pt_checkpoint_path)
tf_model = transformers.load_pytorch_checkpoint_in_tf2_model(tf_model, pt_checkpoint_path)
tf_checkpoint_path = os.path.join(tmpdirname, "tf_model.h5")
tf_model.save_weights(tf_checkpoint_path)
pt_model = transformers.load_tf2_checkpoint_in_pytorch_model(pt_model, tf_checkpoint_path)
# Check predictions on first output (logits/hidden-states) are close enought given low-level computational differences
pt_model.eval()
pt_inputs_dict = {}
for name, key in self._prepare_for_class(inputs_dict, model_class).items():
if type(key) == bool:
key = np.array(key, dtype=bool)
pt_inputs_dict[name] = torch.from_numpy(key).to(torch.long)
else:
pt_inputs_dict[name] = torch.from_numpy(key.numpy()).to(torch.long)
# need to rename encoder-decoder "inputs" for PyTorch
if "inputs" in pt_inputs_dict and self.is_encoder_decoder:
pt_inputs_dict["input_ids"] = pt_inputs_dict.pop("inputs")
with torch.no_grad():
pto = pt_model(**pt_inputs_dict)
tfo = tf_model(self._prepare_for_class(inputs_dict, model_class))
tfo = tfo[0].numpy()
pto = pto[0].numpy()
tf_nans = np.copy(np.isnan(tfo))
pt_nans = np.copy(np.isnan(pto))
pto[tf_nans] = 0
tfo[tf_nans] = 0
pto[pt_nans] = 0
tfo[pt_nans] = 0
max_diff = np.amax(np.abs(tfo - pto))
self.assertLessEqual(max_diff, 4e-2)
def test_train_pipeline_custom_model(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
tf_main_layer_classes = set(
module_member
for model_class in self.all_model_classes
for module in (import_module(model_class.__module__),)
for module_member_name in dir(module)
if module_member_name.endswith("MainLayer")
for module_member in (getattr(module, module_member_name),)
if isinstance(module_member, type)
and tf.keras.layers.Layer in module_member.__bases__
and getattr(module_member, "_keras_serializable", False)
)
for main_layer_class in tf_main_layer_classes:
# T5MainLayer needs an embed_tokens parameter when called without the inputs_embeds parameter
if "T5" in main_layer_class.__name__:
# Take the same values than in TFT5ModelTester for this shared layer
shared = TFSharedEmbeddings(self.model_tester.vocab_size, self.model_tester.hidden_size, name="shared")
config.use_cache = False
main_layer = main_layer_class(config, embed_tokens=shared)
del inputs_dict["use_cache"]
else:
main_layer = main_layer_class(config)
symbolic_inputs = {
name: tf.keras.Input(tensor.shape[1:], dtype=tensor.dtype) for name, tensor in inputs_dict.items()
}
if hasattr(self.model_tester, "num_labels"):
num_labels = self.model_tester.num_labels
else:
num_labels = 2
X = tf.data.Dataset.from_tensor_slices(
(inputs_dict, np.ones((self.model_tester.batch_size, self.model_tester.seq_length, num_labels, 1)))
).batch(1)
hidden_states = main_layer(symbolic_inputs)[0]
outputs = tf.keras.layers.Dense(num_labels, activation="softmax", name="outputs")(hidden_states)
model = tf.keras.models.Model(inputs=symbolic_inputs, outputs=[outputs])
model.compile(loss="binary_crossentropy", optimizer="adam", metrics=["binary_accuracy"])
model.fit(X, epochs=1)
with tempfile.TemporaryDirectory() as tmpdirname:
filepath = os.path.join(tmpdirname, "keras_model.h5")
model.save(filepath)
if "T5" in main_layer_class.__name__:
model = tf.keras.models.load_model(
filepath,
custom_objects={
main_layer_class.__name__: main_layer_class,
"TFSharedEmbeddings": TFSharedEmbeddings,
},
)
else:
model = tf.keras.models.load_model(
filepath, custom_objects={main_layer_class.__name__: main_layer_class}
)
assert isinstance(model, tf.keras.Model)
model(inputs_dict)
def test_compile_tf_model(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
max_input = getattr(self.model_tester, "max_position_embeddings", 512)
optimizer = tf.keras.optimizers.Adam(learning_rate=3e-5, epsilon=1e-08, clipnorm=1.0)
loss = tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True)
metric = tf.keras.metrics.SparseCategoricalAccuracy("accuracy")
for model_class in self.all_model_classes:
if self.is_encoder_decoder:
input_ids = {
"decoder_input_ids": tf.keras.Input(
batch_shape=(2, max_input),
name="decoder_input_ids",
dtype="int32",
),
"input_ids": tf.keras.Input(batch_shape=(2, max_input), name="input_ids", dtype="int32"),
}
elif model_class in TF_MODEL_FOR_MULTIPLE_CHOICE_MAPPING.values():
input_ids = tf.keras.Input(batch_shape=(4, 2, max_input), name="input_ids", dtype="int32")
else:
input_ids = tf.keras.Input(batch_shape=(2, max_input), name="input_ids", dtype="int32")
# Prepare our model
model = model_class(config)
model(self._prepare_for_class(inputs_dict, model_class)) # Model must be called before saving.
# Let's load it from the disk to be sure we can use pretrained weights
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(tmpdirname, saved_model=False)
model = model_class.from_pretrained(tmpdirname)
outputs_dict = model(input_ids)
hidden_states = outputs_dict[0]
# Add a dense layer on top to test integration with other keras modules
outputs = tf.keras.layers.Dense(2, activation="softmax", name="outputs")(hidden_states)
# Compile extended model
extended_model = tf.keras.Model(inputs=[input_ids], outputs=[outputs])
extended_model.compile(optimizer=optimizer, loss=loss, metrics=[metric])
def test_keyword_and_dict_args(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
model = model_class(config)
inputs = self._prepare_for_class(inputs_dict, model_class)
outputs_dict = model(inputs)
inputs_keywords = copy.deepcopy(self._prepare_for_class(inputs_dict, model_class))
input_ids = inputs_keywords.pop("input_ids", None)
outputs_keywords = model(input_ids, **inputs_keywords)
output_dict = outputs_dict[0].numpy()
output_keywords = outputs_keywords[0].numpy()
self.assertLess(np.sum(np.abs(output_dict - output_keywords)), 1e-6)
def test_attention_outputs(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
config.return_dict = True
decoder_seq_length = getattr(self.model_tester, "decoder_seq_length", self.model_tester.seq_length)
encoder_seq_length = getattr(self.model_tester, "encoder_seq_length", self.model_tester.seq_length)
decoder_key_length = getattr(self.model_tester, "key_length", decoder_seq_length)
encoder_key_length = getattr(self.model_tester, "key_length", encoder_seq_length)
def check_decoder_attentions_output(outputs):
out_len = len(outputs)
self.assertEqual(out_len % 2, 0)
decoder_attentions = outputs.decoder_attentions
self.assertEqual(len(decoder_attentions), self.model_tester.num_hidden_layers)
self.assertListEqual(
list(decoder_attentions[0].shape[-3:]),
[self.model_tester.num_attention_heads, decoder_seq_length, decoder_key_length],
)
def check_encoder_attentions_output(outputs):
attentions = [
t.numpy() for t in (outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions)
]
self.assertEqual(len(attentions), self.model_tester.num_hidden_layers)
self.assertListEqual(
list(attentions[0].shape[-3:]),
[self.model_tester.num_attention_heads, encoder_seq_length, encoder_key_length],
)
for model_class in self.all_model_classes:
inputs_dict["output_attentions"] = True
inputs_dict["use_cache"] = False
config.output_hidden_states = False
model = model_class(config)
outputs = model(self._prepare_for_class(inputs_dict, model_class))
out_len = len(outputs)
self.assertEqual(config.output_hidden_states, False)
check_encoder_attentions_output(outputs)
if self.is_encoder_decoder:
model = model_class(config)
outputs = model(self._prepare_for_class(inputs_dict, model_class))
self.assertEqual(config.output_hidden_states, False)
check_decoder_attentions_output(outputs)
# Check that output attentions can also be changed via the config
del inputs_dict["output_attentions"]
config.output_attentions = True
model = model_class(config)
outputs = model(self._prepare_for_class(inputs_dict, model_class))
self.assertEqual(config.output_hidden_states, False)
check_encoder_attentions_output(outputs)
# Check attention is always last and order is fine
inputs_dict["output_attentions"] = True
config.output_hidden_states = True
model = model_class(config)
outputs = model(self._prepare_for_class(inputs_dict, model_class))
self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1), len(outputs))
self.assertEqual(model.config.output_hidden_states, True)
check_encoder_attentions_output(outputs)
def test_hidden_states_output(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
def check_hidden_states_output(config, inputs_dict, model_class):
model = model_class(config)
outputs = model(self._prepare_for_class(inputs_dict, model_class))
expected_num_layers = getattr(
self.model_tester, "expected_num_hidden_layers", self.model_tester.num_hidden_layers + 1
)
if model.config.is_encoder_decoder:
encoder_hidden_states = outputs.encoder_hidden_states
decoder_hidden_states = outputs.decoder_hidden_states
self.assertEqual(config.output_attentions, False)
self.assertEqual(len(encoder_hidden_states), expected_num_layers)
self.assertListEqual(
list(encoder_hidden_states[0].shape[-2:]),
[self.model_tester.seq_length, self.model_tester.hidden_size],
)
self.assertEqual(len(decoder_hidden_states), expected_num_layers)
self.assertListEqual(
list(decoder_hidden_states[0].shape[-2:]),
[self.model_tester.seq_length, self.model_tester.hidden_size],
)
else:
hidden_states = outputs.hidden_states
self.assertEqual(config.output_attentions, False)
self.assertEqual(len(hidden_states), expected_num_layers)
self.assertListEqual(
list(hidden_states[0].shape[-2:]),
[self.model_tester.seq_length, self.model_tester.hidden_size],
)
for model_class in self.all_model_classes:
inputs_dict["output_hidden_states"] = True
check_hidden_states_output(config, inputs_dict, model_class)
del inputs_dict["output_hidden_states"]
config.output_hidden_states = True
check_hidden_states_output(config, inputs_dict, model_class)
def test_model_common_attributes(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
list_lm_models = (
list(TF_MODEL_FOR_CAUSAL_LM_MAPPING.values())
+ list(TF_MODEL_FOR_MASKED_LM_MAPPING.values())
+ list(TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING.values())
)
for model_class in self.all_model_classes:
model = model_class(config)
assert isinstance(model.get_input_embeddings(), tf.keras.layers.Layer)
if model_class in list_lm_models:
x = model.get_output_embeddings()
assert isinstance(x, tf.keras.layers.Layer)
name = model.get_bias()
assert isinstance(name, dict)
for k, v in name.items():
assert isinstance(v, tf.Variable)
else:
x = model.get_output_embeddings()
assert x is None
name = model.get_bias()
assert name is None
def test_determinism(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
model = model_class(config)
first, second = (
model(self._prepare_for_class(inputs_dict, model_class), training=False)[0],
model(self._prepare_for_class(inputs_dict, model_class), training=False)[0],
)
out_1 = first.numpy()
out_2 = second.numpy()
out_1 = out_1[~np.isnan(out_1)]
out_2 = out_2[~np.isnan(out_2)]
max_diff = np.amax(np.abs(out_1 - out_2))
self.assertLessEqual(max_diff, 1e-5)
def test_model_outputs_equivalence(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
def check_equivalence(model, tuple_inputs, dict_inputs, additional_kwargs={}):
tuple_output = model(tuple_inputs, return_dict=False, **additional_kwargs)
dict_output = model(dict_inputs, return_dict=True, **additional_kwargs).to_tuple()
def recursive_check(tuple_object, dict_object):
if isinstance(tuple_object, (List, Tuple)):
for tuple_iterable_value, dict_iterable_value in zip(tuple_object, dict_object):
recursive_check(tuple_iterable_value, dict_iterable_value)
elif tuple_object is None:
return
else:
self.assertTrue(
all(tf.equal(tuple_object, dict_object)),
msg=f"Tuple and dict output are not equal. Difference: {tf.math.reduce_max(tf.abs(tuple_object - dict_object))}",
)
recursive_check(tuple_output, dict_output)
for model_class in self.all_model_classes:
model = model_class(config)
tuple_inputs = self._prepare_for_class(inputs_dict, model_class)
dict_inputs = self._prepare_for_class(inputs_dict, model_class)
check_equivalence(model, tuple_inputs, dict_inputs)
tuple_inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True)
dict_inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True)
check_equivalence(model, tuple_inputs, dict_inputs)
tuple_inputs = self._prepare_for_class(inputs_dict, model_class)
dict_inputs = self._prepare_for_class(inputs_dict, model_class)
check_equivalence(model, tuple_inputs, dict_inputs, {"output_hidden_states": True})
tuple_inputs = self._prepare_for_class(inputs_dict, model_class)
dict_inputs = self._prepare_for_class(inputs_dict, model_class)
check_equivalence(model, tuple_inputs, dict_inputs, {"output_attentions": True})
tuple_inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True)
dict_inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True)
check_equivalence(model, tuple_inputs, dict_inputs, {"output_hidden_states": True})
tuple_inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True)
dict_inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True)
check_equivalence(model, tuple_inputs, dict_inputs, {"output_attentions": True})
tuple_inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True)
dict_inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True)
check_equivalence(
model, tuple_inputs, dict_inputs, {"output_hidden_states": True, "output_attentions": True}
)
def _get_embeds(self, wte, input_ids):
# ^^ In our TF models, the input_embeddings can take slightly different forms,
# so we try a few of them.
# We used to fall back to just synthetically creating a dummy tensor of ones:
try:
x = wte(input_ids, mode="embedding")
except Exception:
try:
x = wte([input_ids], mode="embedding")
except Exception:
try:
x = wte([input_ids, None, None, None], mode="embedding")
except Exception:
if hasattr(self.model_tester, "embedding_size"):
x = tf.ones(
input_ids.shape + [self.model_tester.embedding_size],
dtype=tf.dtypes.float32,
)
else:
x = tf.ones(
input_ids.shape + [self.model_tester.hidden_size],
dtype=tf.dtypes.float32,
)
return x
def test_inputs_embeds(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
model = model_class(config)
inputs = copy.deepcopy(self._prepare_for_class(inputs_dict, model_class))
if not self.is_encoder_decoder:
input_ids = inputs["input_ids"]
del inputs["input_ids"]
else:
encoder_input_ids = inputs["input_ids"]
decoder_input_ids = inputs.get("decoder_input_ids", encoder_input_ids)
del inputs["input_ids"]
inputs.pop("decoder_input_ids", None)
wte = model.get_input_embeddings()
if not self.is_encoder_decoder:
inputs["inputs_embeds"] = self._get_embeds(wte, input_ids)
else:
inputs["inputs_embeds"] = self._get_embeds(wte, encoder_input_ids)
inputs["decoder_inputs_embeds"] = self._get_embeds(wte, decoder_input_ids)
model(inputs)
def test_numpy_arrays_inputs(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
def prepare_numpy_arrays(inputs_dict):
inputs_np_dict = {}
for k, v in inputs_dict.items():
if tf.is_tensor(v):
inputs_np_dict[k] = v.numpy()
else:
inputs_np_dict[k] = np.array(k)
return inputs_np_dict
for model_class in self.all_model_classes:
model = model_class(config)
inputs = self._prepare_for_class(inputs_dict, model_class)
inputs_np = prepare_numpy_arrays(inputs)
model(inputs_np)
def test_resize_token_embeddings(self):
if not self.test_resize_embeddings:
return
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
def _get_word_embedding_weight(model, embedding_layer):
if hasattr(embedding_layer, "word_embeddings"):
return embedding_layer.word_embeddings
elif hasattr(embedding_layer, "weight"):
return embedding_layer.weight
elif hasattr(embedding_layer, "decoder"):
return embedding_layer.decoder
else:
# Here we build the word embeddings weights if not exists.
# And then we retry to get the attribute once built.
model(model.dummy_inputs)
if hasattr(embedding_layer, "word_embeddings"):
return embedding_layer.word_embeddings
elif hasattr(embedding_layer, "weight"):
return embedding_layer.weight
elif hasattr(embedding_layer, "decoder"):
return embedding_layer.decoder
else:
return None
for model_class in self.all_model_classes:
for size in [config.vocab_size - 10, config.vocab_size + 10, None]:
# build the embeddings
model = model_class(config=config)
old_input_embeddings = _get_word_embedding_weight(model, model.get_input_embeddings())
old_bias = model.get_bias()
old_output_embeddings = _get_word_embedding_weight(model, model.get_output_embeddings())
# reshape the embeddings
model.resize_token_embeddings(size)
new_input_embeddings = _get_word_embedding_weight(model, model.get_input_embeddings())
new_bias = model.get_bias()
new_output_embeddings = _get_word_embedding_weight(model, model.get_output_embeddings())
# check that the resized embeddings size matches the desired size.
assert_size = size if size is not None else config.vocab_size
self.assertEqual(new_input_embeddings.shape[0], assert_size)
# check that weights remain the same after resizing
models_equal = True
for p1, p2 in zip(old_input_embeddings.value(), new_input_embeddings.value()):
if tf.math.reduce_sum(tf.math.abs(p1 - p2)) > 0:
models_equal = False
self.assertTrue(models_equal)
if old_bias is not None and new_bias is not None:
for old_weight, new_weight in zip(old_bias.values(), new_bias.values()):
self.assertEqual(new_weight.shape[0], assert_size)
models_equal = True
for p1, p2 in zip(old_weight.value(), new_weight.value()):
if tf.math.reduce_sum(tf.math.abs(p1 - p2)) > 0:
models_equal = False
self.assertTrue(models_equal)
if old_output_embeddings is not None and new_output_embeddings is not None:
self.assertEqual(new_output_embeddings.shape[0], assert_size)
self.assertEqual(new_output_embeddings.shape[1], old_output_embeddings.shape[1])
models_equal = True
for p1, p2 in zip(old_output_embeddings.value(), new_output_embeddings.value()):
if tf.math.reduce_sum(tf.math.abs(p1 - p2)) > 0:
models_equal = False
self.assertTrue(models_equal)
def test_lm_head_model_random_no_beam_search_generate(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
input_ids = inputs_dict["input_ids"]
# iterate over all generative models
for model_class in self.all_generative_model_classes:
model = model_class(config)
if config.bos_token_id is None:
# if bos token id is not defined mobel needs input_ids
with self.assertRaises(AssertionError):
model.generate(do_sample=True, max_length=5)
# num_return_sequences = 1
self._check_generated_ids(model.generate(input_ids, do_sample=True))
else:
# num_return_sequences = 1
self._check_generated_ids(model.generate(do_sample=True, max_length=5))
with self.assertRaises(AssertionError):
# generating multiple sequences when no beam search generation
# is not allowed as it would always generate the same sequences
model.generate(input_ids, do_sample=False, num_return_sequences=2)
# num_return_sequences > 1, sample
self._check_generated_ids(model.generate(input_ids, do_sample=True, num_return_sequences=2))
# check bad words tokens language generation
# create list of 1-seq bad token and list of 2-seq of bad tokens
bad_words_ids = [self._generate_random_bad_tokens(1, model), self._generate_random_bad_tokens(2, model)]
output_tokens = model.generate(
input_ids, do_sample=True, bad_words_ids=bad_words_ids, num_return_sequences=2
)
# only count generated tokens
generated_ids = output_tokens[:, input_ids.shape[-1] :]
self.assertFalse(self._check_match_tokens(generated_ids.numpy().tolist(), bad_words_ids))
def test_lm_head_model_random_beam_search_generate(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
input_ids = inputs_dict["input_ids"]
for model_class in self.all_generative_model_classes:
model = model_class(config)
if config.bos_token_id is None:
# if bos token id is not defined mobel needs input_ids, num_return_sequences = 1
self._check_generated_ids(model.generate(input_ids, do_sample=True, num_beams=2))
else:
# num_return_sequences = 1
self._check_generated_ids(model.generate(do_sample=True, max_length=5, num_beams=2))
with self.assertRaises(AssertionError):
# generating more sequences than having beams leads is not possible
model.generate(input_ids, do_sample=False, num_return_sequences=3, num_beams=2)
# num_return_sequences > 1, sample
self._check_generated_ids(
model.generate(
input_ids,
do_sample=True,
num_beams=2,
num_return_sequences=2,
)
)
# num_return_sequences > 1, greedy
self._check_generated_ids(model.generate(input_ids, do_sample=False, num_beams=2, num_return_sequences=2))
# check bad words tokens language generation
# create list of 1-seq bad token and list of 2-seq of bad tokens
bad_words_ids = [self._generate_random_bad_tokens(1, model), self._generate_random_bad_tokens(2, model)]
output_tokens = model.generate(
input_ids, do_sample=False, bad_words_ids=bad_words_ids, num_beams=2, num_return_sequences=2
)
# only count generated tokens
generated_ids = output_tokens[:, input_ids.shape[-1] :]
self.assertFalse(self._check_match_tokens(generated_ids.numpy().tolist(), bad_words_ids))
def test_loss_computation(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
model = model_class(config)
if getattr(model, "compute_loss", None):
# The number of elements in the loss should be the same as the number of elements in the label
prepared_for_class = self._prepare_for_class(inputs_dict.copy(), model_class, return_labels=True)
added_label = prepared_for_class[
sorted(list(prepared_for_class.keys() - inputs_dict.keys()), reverse=True)[0]
]
loss_size = tf.size(added_label)
if model.__class__ in TF_MODEL_FOR_CAUSAL_LM_MAPPING.values():
# if loss is causal lm loss, labels are shift, so that one label per batch
# is cut
loss_size = loss_size - self.model_tester.batch_size
# Test that model correctly compute the loss with kwargs
prepared_for_class = self._prepare_for_class(inputs_dict.copy(), model_class, return_labels=True)
input_ids = prepared_for_class.pop("input_ids")
loss = model(input_ids, **prepared_for_class)[0]
self.assertEqual(loss.shape, [loss_size])
# Test that model correctly compute the loss with a dict
prepared_for_class = self._prepare_for_class(inputs_dict.copy(), model_class, return_labels=True)
loss = model(prepared_for_class)[0]
self.assertEqual(loss.shape, [loss_size])
# Test that model correctly compute the loss with a tuple
prepared_for_class = self._prepare_for_class(inputs_dict.copy(), model_class, return_labels=True)
# Get keys that were added with the _prepare_for_class function
label_keys = prepared_for_class.keys() - inputs_dict.keys()
signature = inspect.signature(model.call).parameters
signature_names = list(signature.keys())
# Create a dictionary holding the location of the tensors in the tuple
tuple_index_mapping = {0: "input_ids"}
for label_key in label_keys:
label_key_index = signature_names.index(label_key)
tuple_index_mapping[label_key_index] = label_key
sorted_tuple_index_mapping = sorted(tuple_index_mapping.items())
# Initialize a list with their default values, update the values and convert to a tuple
list_input = []
for name in signature_names:
if name != "kwargs":
list_input.append(signature[name].default)
for index, value in sorted_tuple_index_mapping:
list_input[index] = prepared_for_class[value]
tuple_input = tuple(list_input)
# Send to model
loss = model(tuple_input[:-1])[0]
self.assertEqual(loss.shape, [loss_size])
def _generate_random_bad_tokens(self, num_bad_tokens, model):
# special tokens cannot be bad tokens
special_tokens = []
if model.config.bos_token_id is not None:
special_tokens.append(model.config.bos_token_id)
if model.config.pad_token_id is not None:
special_tokens.append(model.config.pad_token_id)
if model.config.eos_token_id is not None:
special_tokens.append(model.config.eos_token_id)
# create random bad tokens that are not special tokens
bad_tokens = []
while len(bad_tokens) < num_bad_tokens:
token = tf.squeeze(ids_tensor((1, 1), self.model_tester.vocab_size), 0).numpy()[0]
if token not in special_tokens:
bad_tokens.append(token)
return bad_tokens
def _check_generated_ids(self, output_ids):
for token_id in output_ids[0].numpy().tolist():
self.assertGreaterEqual(token_id, 0)
self.assertLess(token_id, self.model_tester.vocab_size)
def _check_match_tokens(self, generated_ids, bad_words_ids):
# for all bad word tokens
for bad_word_ids in bad_words_ids:
# for all slices in batch
for generated_ids_slice in generated_ids:
# for all word idx
for i in range(len(bad_word_ids), len(generated_ids_slice)):
# if tokens match
if generated_ids_slice[i - len(bad_word_ids) : i] == bad_word_ids:
return True
return False
def ids_tensor(shape, vocab_size, rng=None, name=None, dtype=None):
"""Creates a random int32 tensor of the shape within the vocab size."""
if rng is None:
rng = random.Random()
total_dims = 1
for dim in shape:
total_dims *= dim
values = []
for _ in range(total_dims):
values.append(rng.randint(0, vocab_size - 1))
output = tf.constant(values, shape=shape, dtype=dtype if dtype is not None else tf.int32)
return output
@require_tf
class UtilsFunctionsTest(unittest.TestCase):
# tests whether the top_k_top_p_filtering function behaves as expected
def test_top_k_top_p_filtering(self):
logits = tf.convert_to_tensor(
[
[
8.2220991, # 3rd highest value; idx. 0
-0.5620044,
5.23229752,
4.0386393,
-6.8798378,
-0.54785802,
-3.2012153,
2.92777176,
1.88171953,
7.35341276, # 5th highest value; idx. 9
8.43207833, # 2nd highest value; idx. 10
-9.85711836,
-5.96209236,
-1.13039161,
-7.1115294,
-0.8369633,
-5.3186408,
7.06427407,
0.81369344,
-0.82023817,
-5.9179796,
0.58813443,
-6.99778438,
4.71551189,
-0.18771637,
7.44020759, # 4th highest value; idx. 25
9.38450987, # 1st highest value; idx. 26
2.12662941,
-9.32562038,
2.35652522,
], # cummulative prob of 5 highest values <= 0.6
[
0.58425518,
4.53139238,
-5.57510464,
-6.28030699,
-7.19529503,
-4.02122551,
1.39337037,
-6.06707057,
1.59480517,
-9.643119,
0.03907799,
0.67231762,
-8.88206726,
6.27115922, # 4th highest value; idx. 13
2.28520723,
4.82767506,
4.30421368,
8.8275313, # 2nd highest value; idx. 17
5.44029958, # 5th highest value; idx. 18
-4.4735794,
7.38579536, # 3rd highest value; idx. 20
-2.91051663,
2.61946077,
-2.5674762,
-9.48959302,
-4.02922645,
-1.35416918,
9.67702323, # 1st highest value; idx. 27
-5.89478553,
1.85370467,
], # cummulative prob of 5 highest values <= 0.6
],
dtype=tf.float32,
)
non_inf_expected_idx = tf.convert_to_tensor(
[[0, 0], [0, 9], [0, 10], [0, 25], [0, 26], [1, 13], [1, 17], [1, 18], [1, 20], [1, 27]],
dtype=tf.int32,
) # expected non filtered idx as noted above
non_inf_expected_output = tf.convert_to_tensor(
[8.222099, 7.3534126, 8.432078, 7.4402075, 9.38451, 6.271159, 8.827531, 5.4402995, 7.3857956, 9.677023],
dtype=tf.float32,
) # expected non filtered values as noted above
output = tf_top_k_top_p_filtering(logits, top_k=10, top_p=0.6, min_tokens_to_keep=4)
non_inf_output = output[output != -float("inf")]
non_inf_idx = tf.cast(
tf.where(tf.not_equal(output, tf.constant(-float("inf"), dtype=tf.float32))),
dtype=tf.int32,
)
tf.debugging.assert_near(non_inf_output, non_inf_expected_output, rtol=1e-12)
tf.debugging.assert_equal(non_inf_idx, non_inf_expected_idx)
| 46.460438 | 137 | 0.615726 |
79423937c6050f53dbd09a0b249ad9f0ca672f6d | 2,463 | py | Python | pygenetic/Statistics.py | QuailAutomation/pygenetic | 93b0240a1942b882df30b53d856a87becca1d7ec | [
"MIT"
] | 2 | 2020-05-30T05:13:37.000Z | 2021-03-15T19:54:28.000Z | pygenetic/Statistics.py | QuailAutomation/pygenetic | 93b0240a1942b882df30b53d856a87becca1d7ec | [
"MIT"
] | 1 | 2021-06-19T20:30:25.000Z | 2021-06-19T20:30:25.000Z | pygenetic/Statistics.py | QuailAutomation/pygenetic | 93b0240a1942b882df30b53d856a87becca1d7ec | [
"MIT"
] | 2 | 2020-08-02T20:52:50.000Z | 2021-02-07T15:52:15.000Z | import matplotlib
import os
if 'TRAVIS' in os.environ:
print('Warning: no DISPLAY environment variable found. Using matplotlib non-interactive Agg backend')
matplotlib.use('Agg')
import matplotlib.pyplot as plt
class Statistics:
"""
Class to generate Statistics on operation of Genetic Algorithm
Instance Members:
-----------------
statistic_dict : A dictionary storing different statistics mapped to list storing each generation data
Stats stored are best-fitness, worst-fitness, avg-fitness, diversity and mutation-rate
"""
def __init__(self):
self.statistic_dict = {'best-fitness':[],'worst-fitness':[],'avg-fitness':[],'diversity':[],'mutation_rate':[]}
def add_statistic(self,statistic,value):
"""
Appends a value to specified statistic, usually called after each iteration
Parameters :
------------
statistic : The statistic for which the value is relevant and is to be appended
value : Th evalue to be appended
"""
if statistic in self.statistic_dict:
self.statistic_dict[statistic].append(value)
else:
self.statistic_dict[statistic] = [value]
def plot(self):
"""
Generates a line graph for each statistic to display change over iterations
"""
fig,ax = plt.subplots()
ax.set_xlabel('Generation')
ax.set_ylabel('Statistic')
for statistic in self.statistic_dict:
ax.plot(range(1,len(self.statistic_dict[statistic])+1),self.statistic_dict[statistic],label=statistic)
fig.legend(loc='upper left')
return fig
def plot_statistics(self,statistics):
"""
Generates a line graph for list of specified statistics to display change over iterations
Parameters :
----------
statistics : A list of statistic names whose variation is to be shown
"""
fig,ax = plt.subplots()
ax.set_xlabel('Generation')
ax.set_ylabel('Statistic')
for statistic in statistics:
ax.plot(range(1,len(self.statistic_dict[statistic])+1),self.statistic_dict[statistic],label=statistic)
fig.legend(loc='upper left')
return fig
def plot_statistic(self,statistic):
"""
Generates a line graph for specified statistic to display change over iterations
Parameters :
----------
statistic : The statistic name whose variation is to be shown
"""
fig,ax = plt.subplots()
ax.set_xlabel('Generation')
ax.set_ylabel('Statistic')
ax.plot(range(1,len(self.statistic_dict[statistic])+1),self.statistic_dict[statistic],label=statistic)
fig.legend(loc='upper left')
return fig
| 29.321429 | 113 | 0.725132 |
79423995f1a3d63eb992419866556d65dfa1230a | 13,981 | py | Python | tests/test_binder_file_field.py | MLewiDev/django-binder | 3f55f18169ed5536305f7c0c2e257962346d30f4 | [
"MIT"
] | null | null | null | tests/test_binder_file_field.py | MLewiDev/django-binder | 3f55f18169ed5536305f7c0c2e257962346d30f4 | [
"MIT"
] | null | null | null | tests/test_binder_file_field.py | MLewiDev/django-binder | 3f55f18169ed5536305f7c0c2e257962346d30f4 | [
"MIT"
] | null | null | null | from os.path import basename
from io import BytesIO
from PIL import Image
from tempfile import NamedTemporaryFile
from django.test import TestCase, Client
from django.core.files.base import ContentFile
from django.core.files.uploadedfile import SimpleUploadedFile
from django.contrib.auth.models import User
from django.db import connection
from binder.json import jsonloads
from .testapp.models import Zoo
from .utils import temp_imagefile
JPG_CONTENT = b'\xff\xd8\xff\xe0\x00\x10JFIF\x00\x01\x01\x00\x00\x01\x00\x01\x00\x00\xff\xdb\x00C\x00\x08\x06\x06\x07\x06\x05\x08\x07\x07\x07\t\t\x08\n\x0c\x14\r\x0c\x0b\x0b\x0c\x19\x12\x13\x0f\x14\x1d\x1a\x1f\x1e\x1d\x1a\x1c\x1c $.\' ",#\x1c\x1c(7),01444\x1f\'9=82<.342\xff\xdb\x00C\x01\t\t\t\x0c\x0b\x0c\x18\r\r\x182!\x1c!22222222222222222222222222222222222222222222222222\xff\xc0\x00\x11\x08\x00\x01\x00\x01\x03\x01"\x00\x02\x11\x01\x03\x11\x01\xff\xc4\x00\x1f\x00\x00\x01\x05\x01\x01\x01\x01\x01\x01\x00\x00\x00\x00\x00\x00\x00\x00\x01\x02\x03\x04\x05\x06\x07\x08\t\n\x0b\xff\xc4\x00\xb5\x10\x00\x02\x01\x03\x03\x02\x04\x03\x05\x05\x04\x04\x00\x00\x01}\x01\x02\x03\x00\x04\x11\x05\x12!1A\x06\x13Qa\x07"q\x142\x81\x91\xa1\x08#B\xb1\xc1\x15R\xd1\xf0$3br\x82\t\n\x16\x17\x18\x19\x1a%&\'()*456789:CDEFGHIJSTUVWXYZcdefghijstuvwxyz\x83\x84\x85\x86\x87\x88\x89\x8a\x92\x93\x94\x95\x96\x97\x98\x99\x9a\xa2\xa3\xa4\xa5\xa6\xa7\xa8\xa9\xaa\xb2\xb3\xb4\xb5\xb6\xb7\xb8\xb9\xba\xc2\xc3\xc4\xc5\xc6\xc7\xc8\xc9\xca\xd2\xd3\xd4\xd5\xd6\xd7\xd8\xd9\xda\xe1\xe2\xe3\xe4\xe5\xe6\xe7\xe8\xe9\xea\xf1\xf2\xf3\xf4\xf5\xf6\xf7\xf8\xf9\xfa\xff\xc4\x00\x1f\x01\x00\x03\x01\x01\x01\x01\x01\x01\x01\x01\x01\x00\x00\x00\x00\x00\x00\x01\x02\x03\x04\x05\x06\x07\x08\t\n\x0b\xff\xc4\x00\xb5\x11\x00\x02\x01\x02\x04\x04\x03\x04\x07\x05\x04\x04\x00\x01\x02w\x00\x01\x02\x03\x11\x04\x05!1\x06\x12AQ\x07aq\x13"2\x81\x08\x14B\x91\xa1\xb1\xc1\t#3R\xf0\x15br\xd1\n\x16$4\xe1%\xf1\x17\x18\x19\x1a&\'()*56789:CDEFGHIJSTUVWXYZcdefghijstuvwxyz\x82\x83\x84\x85\x86\x87\x88\x89\x8a\x92\x93\x94\x95\x96\x97\x98\x99\x9a\xa2\xa3\xa4\xa5\xa6\xa7\xa8\xa9\xaa\xb2\xb3\xb4\xb5\xb6\xb7\xb8\xb9\xba\xc2\xc3\xc4\xc5\xc6\xc7\xc8\xc9\xca\xd2\xd3\xd4\xd5\xd6\xd7\xd8\xd9\xda\xe2\xe3\xe4\xe5\xe6\xe7\xe8\xe9\xea\xf2\xf3\xf4\xf5\xf6\xf7\xf8\xf9\xfa\xff\xda\x00\x0c\x03\x01\x00\x02\x11\x03\x11\x00?\x00\xb5E\x14W\xc6\x9eq\xff\xd9'
PNG_CONTENT = b'\x89PNG\r\n\x1a\n\x00\x00\x00\rIHDR\x00\x00\x00\x01\x00\x00\x00\x01\x08\x02\x00\x00\x00\x90wS\xde\x00\x00\x00\x0cIDATx\x9cc0*K\x01\x00\x01\xea\x01\r1\x93\xfe`\x00\x00\x00\x00IEND\xaeB`\x82'
JPG_HASH = '7f6262521ea97a0dca86703b5fc90d648303f877'
PNG_HASH = '1888ce8ba1019738482c8dc3e30bea871b4e47e7'
class BinderFileFieldTest(TestCase):
def setUp(self):
super().setUp()
u = User(username='testuser', is_active=True, is_superuser=True)
u.set_password('test')
u.save()
self.client = Client()
r = self.client.login(username='testuser', password='test')
self.assertTrue(r)
def test_save(self):
zoo = Zoo(name='Apenheul')
zoo.binder_picture = ContentFile(JPG_CONTENT, name='pic.jpg')
self.assertEqual(zoo.binder_picture.content_type, 'image/jpeg')
self.assertEqual(zoo.binder_picture.content_hash, JPG_HASH)
zoo.save()
zoo2 = Zoo.objects.get(pk=zoo.pk)
self.assertEqual(zoo2.binder_picture.content_type, 'image/jpeg')
self.assertEqual(zoo2.binder_picture.content_hash, JPG_HASH)
def test_post(self):
filename = 'pic.jpg'
zoo = Zoo(name='Apenheul')
zoo.save()
response = self.client.post('/zoo/%s/binder_picture/' % zoo.id, data={
'file': ContentFile(JPG_CONTENT, name=filename),
})
self.assertEqual(response.status_code, 200)
content = jsonloads(response.content)
# Remove once Django 3 lands with: https://docs.djangoproject.com/en/3.1/howto/custom-file-storage/#django.core.files.storage.get_alternative_name
zoo.refresh_from_db()
filename = basename(zoo.binder_picture.name) # Without folders foo/bar/
self.assertEqual(
content['data']['binder_picture'],
'/zoo/{}/binder_picture/?h={}&content_type=image/jpeg&filename={}'.format(zoo.pk, JPG_HASH, filename),
)
response = self.client.get('/zoo/{}/'.format(zoo.pk))
self.assertEqual(response.status_code, 200)
data = jsonloads(response.content)
self.assertEqual(
data['data']['binder_picture'],
'/zoo/{}/binder_picture/?h={}&content_type=image/jpeg&filename={}'.format(zoo.pk, JPG_HASH, filename),
)
def test_post_no_extension(self):
filename = 'foobar'
zoo = Zoo(name='Apenheul')
zoo.save()
response = self.client.post('/zoo/%s/binder_picture/' % zoo.id, data={
'file': ContentFile('foobar', name=filename),
})
self.assertEqual(response.status_code, 400)
# # Remove once Django 3 lands with: https://docs.djangoproject.com/en/3.1/howto/custom-file-storage/#django.core.files.storage.get_alternative_name
# zoo.refresh_from_db()
# filename = basename(zoo.binder_picture.name) # Without folders foo/bar/
#
# self.assertEqual(
# content['data']['binder_picture'],
# '/zoo/{}/binder_picture/?h={}&content_type=&filename={}'.format(zoo.pk, HASH, filename),
# )
#
# response = self.client.get('/zoo/{}/'.format(zoo.pk))
#
# self.assertEqual(response.status_code, 200)
# data = jsonloads(response.content)
# self.assertEqual(
# data['data']['binder_picture'],
# '/zoo/{}/binder_picture/?h={}&content_type=&filename={}'.format(zoo.pk, HASH, filename),
# )
def test_post_with_long_filename(self):
filename = 'this_is_an_extremely_long_filename_which_should_be_over_200_chars_but_under_400_and_im_running_out_of_things_to_say_and_i_guess_we_just_keep_going_and_im_now_in_poznan_working_onsite_perhaps_thats_interesting_and_just_ordered_pizza_for_lunch.jpg'
zoo = Zoo(name='Apenheul')
zoo.save()
response = self.client.post('/zoo/%s/binder_picture/' % zoo.id, data={
'file': ContentFile(JPG_CONTENT, name=filename),
})
self.assertEqual(response.status_code, 200)
content = jsonloads(response.content)
# Remove once Django 3 lands with: https://docs.djangoproject.com/en/3.1/howto/custom-file-storage/#django.core.files.storage.get_alternative_name
zoo.refresh_from_db()
filename = basename(zoo.binder_picture.name) # Without folders foo/bar/
self.assertEqual(
content['data']['binder_picture'],
'/zoo/{}/binder_picture/?h={}&content_type=image/jpeg&filename={}'.format(zoo.pk, JPG_HASH, filename),
)
response = self.client.get('/zoo/{}/'.format(zoo.pk))
self.assertEqual(response.status_code, 200)
data = jsonloads(response.content)
self.assertEqual(
data['data']['binder_picture'],
'/zoo/{}/binder_picture/?h={}&content_type=image/jpeg&filename={}'.format(zoo.pk, JPG_HASH, filename),
)
def test_get(self):
filename = 'pic.jpg'
zoo = Zoo(name='Apenheul')
zoo.binder_picture = ContentFile(JPG_CONTENT, name=filename)
zoo.save()
response = self.client.get('/zoo/{}/'.format(zoo.pk))
self.assertEqual(response.status_code, 200)
data = jsonloads(response.content)
# Remove once Django 3 lands with: https://docs.djangoproject.com/en/3.1/howto/custom-file-storage/#django.core.files.storage.get_alternative_name
zoo.refresh_from_db()
filename = basename(zoo.binder_picture.name) # Without folders foo/bar/
self.assertEqual(
data['data']['binder_picture'],
'/zoo/{}/binder_picture/?h={}&content_type=image/jpeg&filename={}'.format(zoo.pk, JPG_HASH, filename),
)
def test_setting_blank(self):
zoo = Zoo(name='Apenheul')
zoo.binder_picture = ''
zoo.save()
response = self.client.get('/zoo/{}/'.format(zoo.pk))
self.assertEqual(response.status_code, 200)
data = jsonloads(response.content)
self.assertIsNone(data['data']['binder_picture'])
def test_upgrade_from_normal_file_field_with_existing_data(self):
filename = 'pic.jpg'
zoo = Zoo(name='Apenheul')
zoo.save()
with open(filename, 'wb+') as file:
file.write(JPG_CONTENT)
with connection.cursor() as cur:
# Update db directly to mimic existing records.
cur.execute("UPDATE {} set binder_picture='{}'".format(zoo._meta.db_table, file.name))
response = self.client.get('/zoo/{}/'.format(zoo.pk))
self.assertEqual(response.status_code, 200)
data = jsonloads(response.content)
# Remove once Django 3 lands with: https://docs.djangoproject.com/en/3.1/howto/custom-file-storage/#django.core.files.storage.get_alternative_name
zoo.refresh_from_db()
filename = zoo.binder_picture.name
self.assertEqual(
data['data']['binder_picture'],
'/zoo/{}/binder_picture/?h={}&content_type=image/jpeg&filename={}'.format(zoo.pk, JPG_HASH, filename),
)
def test_reusing_same_file_for_multiple_fields(self):
with BytesIO() as bytesio:
im = Image.new('RGBA', (50,100))
im.save(bytesio, format='png')
bytesio.seek(0)
test_image = SimpleUploadedFile('test.png', bytesio.read())
zoo1 = Zoo(name='Apenheul', django_picture=test_image)
zoo1.save()
zoo2 = Zoo(name='Apenheul', django_picture=test_image)
zoo2.save()
zoo3 = Zoo(name='Apenheul', binder_picture=test_image)
zoo3.save()
zoo4 = Zoo(name='Apenheul', binder_picture=test_image)
zoo4.save()
# I've seen this happen a few times, where a file exists in the db but not on disk.
def test_non_existing_file_on_diks(self):
zoo = Zoo(name='Apenheul')
zoo.save()
with connection.cursor() as cur:
# Update db directly to mimic record without existing file
cur.execute("UPDATE {} set binder_picture='non-exisiting-pic.jpg'".format(zoo._meta.db_table))
response = self.client.get('/zoo/{}/'.format(zoo.pk))
self.assertEqual(response.status_code, 200)
data = jsonloads(response.content)
self.assertEqual(
data['data']['binder_picture'],
'/zoo/{}/binder_picture/?h={}&content_type=image/jpeg&filename={}'.format(zoo.pk, '', 'non-exisiting-pic.jpg'),
)
def test_post_image_doesnt_leave_unclosed_file(self):
zoo = Zoo(name='Apenheul')
zoo.save()
# Basically this construction of assertRaise wrapped around assertWarns
# is to make sure no warning is triggered. This works, since assertWarns
# raises an AssertionError. Basically a `self.assertNotWarns`.
with self.assertRaises(AssertionError) as cm:
with self.assertWarns(ResourceWarning) as cm2:
response = self.client.post('/zoo/%s/binder_picture_custom_extensions/' % zoo.id, data={
'file': ContentFile(PNG_CONTENT, name='foobar.png'),
})
self.assertEqual(str(cm.exception), 'ResourceWarning not triggered')
class BinderFileFieldBlankNotNullableTest(TestCase):
def setUp(self):
super().setUp()
u = User(username='testuser', is_active=True, is_superuser=True)
u.set_password('test')
u.save()
self.client = Client()
r = self.client.login(username='testuser', password='test')
self.assertTrue(r)
def test_setting_blank(self):
zoo = Zoo(name='Apenheul')
zoo.django_picture_not_null = ''
zoo.binder_picture_not_null = ''
zoo.save()
response = self.client.get('/zoo/{}/'.format(zoo.pk))
self.assertEqual(response.status_code, 200)
data = jsonloads(response.content)
self.assertIsNone(data['data']['django_picture_not_null'])
self.assertIsNone(data['data']['binder_picture_not_null'])
# When a file field is blank=True and null=False, Django will convert the
# None to empty string.
def test_deleting(self):
zoo = Zoo(name='Apenheul')
zoo.django_picture_not_null = ContentFile(JPG_CONTENT, name='pic.jpg')
zoo.binder_picture_not_null = ContentFile(JPG_CONTENT, name='pic.jpg')
zoo.save()
zoo.django_picture_not_null.delete()
zoo.binder_picture_not_null.delete()
zoo.refresh_from_db()
self.assertEqual('', zoo.django_picture_not_null)
self.assertEqual('', zoo.binder_picture_not_null)
class BinderFileFieldAllowedExtensionTest(TestCase):
def setUp(self):
super().setUp()
u = User(username='testuser', is_active=True, is_superuser=True)
u.set_password('test')
u.save()
self.client = Client()
r = self.client.login(username='testuser', password='test')
self.assertTrue(r)
def test_post_allowed_extension_fail(self):
zoo = Zoo(name='Apenheul')
zoo.save()
response = self.client.post('/zoo/%s/binder_picture_custom_extensions/' % zoo.id, data={
'file': ContentFile(JPG_CONTENT, name='foobar.jpg'),
})
self.assertEqual(response.status_code, 400)
def test_post_without_extension_fails(self):
zoo = Zoo(name='Apenheul')
zoo.save()
response = self.client.post('/zoo/%s/binder_picture_custom_extensions/' % zoo.id, data={
'file': ContentFile(PNG_CONTENT, name='foobar'),
})
self.assertEqual(response.status_code, 400)
content = jsonloads(response.content)
self.assertEqual(content['code'], 'FileTypeIncorrect')
self.assertEqual(content['allowed_types'], [{"extension": "png"}])
def test_post_allowed_extension_success(self):
for filename in ['foobar.png', 'foobar.PNG', 'foobar.Png', 'foobar.pNg', 'foobar.pnG']:
with self.subTest(filename=filename):
zoo = Zoo(name='Apenheul')
zoo.save()
response = self.client.post('/zoo/%s/binder_picture_custom_extensions/' % zoo.id, data={
'file': ContentFile(PNG_CONTENT, name=filename),
})
self.assertEqual(response.status_code, 200)
content = jsonloads(response.content)
# Remove once Django 3 lands with: https://docs.djangoproject.com/en/3.1/howto/custom-file-storage/#django.core.files.storage.get_alternative_name
zoo.refresh_from_db()
filename = basename(zoo.binder_picture_custom_extensions.name) # Without folders foo/bar/
self.assertEqual(
content['data']['binder_picture_custom_extensions'],
'/zoo/{}/binder_picture_custom_extensions/?h={}&content_type=image/png&filename={}'.format(zoo.pk, PNG_HASH, filename),
)
response = self.client.get('/zoo/{}/'.format(zoo.pk))
self.assertEqual(response.status_code, 200)
data = jsonloads(response.content)
self.assertEqual(
data['data']['binder_picture_custom_extensions'],
'/zoo/{}/binder_picture_custom_extensions/?h={}&content_type=image/png&filename={}'.format(zoo.pk, PNG_HASH, filename),
)
| 41.859281 | 1,876 | 0.733066 |
794239d23f13b3f3024830568a3d632d3b5fed1f | 3,883 | py | Python | docker/pythonpath_dev/superset_config.py | 7vikpeculiar/superset | 800ced5e257d5d83d6dbe4ced0e7318ac40d026f | [
"Apache-2.0"
] | 2 | 2021-12-21T15:57:16.000Z | 2022-01-31T02:22:02.000Z | docker/pythonpath_dev/superset_config.py | 7vikpeculiar/superset | 800ced5e257d5d83d6dbe4ced0e7318ac40d026f | [
"Apache-2.0"
] | 10 | 2022-01-05T01:31:07.000Z | 2022-03-16T01:09:46.000Z | docker/pythonpath_dev/superset_config.py | 7vikpeculiar/superset | 800ced5e257d5d83d6dbe4ced0e7318ac40d026f | [
"Apache-2.0"
] | 2 | 2021-12-21T13:41:18.000Z | 2021-12-26T22:16:43.000Z | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
# This file is included in the final Docker image and SHOULD be overridden when
# deploying the image to prod. Settings configured here are intended for use in local
# development environments. Also note that superset_config_docker.py is imported
# as a final step as a means to override "defaults" configured here
#
import logging
import os
from datetime import timedelta
from typing import Optional
from cachelib.file import FileSystemCache
from celery.schedules import crontab
logger = logging.getLogger()
def get_env_variable(var_name: str, default: Optional[str] = None) -> str:
"""Get the environment variable or raise exception."""
try:
return os.environ[var_name]
except KeyError:
if default is not None:
return default
else:
error_msg = "The environment variable {} was missing, abort...".format(
var_name
)
raise EnvironmentError(error_msg)
DATABASE_DIALECT = get_env_variable("DATABASE_DIALECT")
DATABASE_USER = get_env_variable("DATABASE_USER")
DATABASE_PASSWORD = get_env_variable("DATABASE_PASSWORD")
DATABASE_HOST = get_env_variable("DATABASE_HOST")
DATABASE_PORT = get_env_variable("DATABASE_PORT")
DATABASE_DB = get_env_variable("DATABASE_DB")
# The SQLAlchemy connection string.
SQLALCHEMY_DATABASE_URI = "%s://%s:%s@%s:%s/%s" % (
DATABASE_DIALECT,
DATABASE_USER,
DATABASE_PASSWORD,
DATABASE_HOST,
DATABASE_PORT,
DATABASE_DB,
)
REDIS_HOST = get_env_variable("REDIS_HOST")
REDIS_PORT = get_env_variable("REDIS_PORT")
REDIS_CELERY_DB = get_env_variable("REDIS_CELERY_DB", "0")
REDIS_RESULTS_DB = get_env_variable("REDIS_RESULTS_DB", "1")
RESULTS_BACKEND = FileSystemCache("/app/superset_home/sqllab")
class CeleryConfig(object):
BROKER_URL = f"redis://{REDIS_HOST}:{REDIS_PORT}/{REDIS_CELERY_DB}"
CELERY_IMPORTS = ("superset.sql_lab",)
CELERY_RESULT_BACKEND = f"redis://{REDIS_HOST}:{REDIS_PORT}/{REDIS_RESULTS_DB}"
CELERYD_LOG_LEVEL = "DEBUG"
CELERYD_PREFETCH_MULTIPLIER = 1
CELERY_ACKS_LATE = False
CELERYBEAT_SCHEDULE = {
"reports.scheduler": {
"task": "reports.scheduler",
"schedule": crontab(minute="*", hour="*"),
},
"reports.prune_log": {
"task": "reports.prune_log",
"schedule": crontab(minute=10, hour=0),
},
}
CELERY_CONFIG = CeleryConfig
FEATURE_FLAGS = {"ALERT_REPORTS": True}
ALERT_REPORTS_NOTIFICATION_DRY_RUN = True
WEBDRIVER_BASEURL = "http://superset:8088/"
# The base URL for the email report hyperlinks.
WEBDRIVER_BASEURL_USER_FRIENDLY = WEBDRIVER_BASEURL
SQLLAB_CTAS_NO_LIMIT = True
#
# Optionally import superset_config_docker.py (which will have been included on
# the PYTHONPATH) in order to allow for local settings to be overridden
#
try:
import superset_config_docker
from superset_config_docker import * # noqa
logger.info(
f"Loaded your Docker configuration at " f"[{superset_config_docker.__file__}]"
)
except ImportError:
logger.info("Using default Docker config...")
| 33.765217 | 86 | 0.731136 |
79423affaf4f467800c3768d6919e02a95c6b675 | 1,471 | py | Python | concrete_settings/contrib/sources/yaml_source.py | coordt/concrete-settings | b444c3f1f8cdbe30135c1978876215e04ebc7622 | [
"MIT"
] | 5 | 2020-04-25T12:18:33.000Z | 2021-03-26T18:51:33.000Z | concrete_settings/contrib/sources/yaml_source.py | coordt/concrete-settings | b444c3f1f8cdbe30135c1978876215e04ebc7622 | [
"MIT"
] | 13 | 2019-03-20T10:42:39.000Z | 2021-07-07T08:01:05.000Z | concrete_settings/contrib/sources/yaml_source.py | coordt/concrete-settings | b444c3f1f8cdbe30135c1978876215e04ebc7622 | [
"MIT"
] | 3 | 2020-04-25T08:53:29.000Z | 2021-07-06T19:15:52.000Z | from typing import Any, Tuple, Union, Type
from concrete_settings.exceptions import ConcreteSettingsError
from concrete_settings.sources import FileSource, register_source, NotFound
@register_source
class YamlSource(FileSource):
extensions = ['.yml', '.yaml']
def __init__(self, path):
try:
import yaml # noqa: F401 # imported but unused
except ImportError as e:
raise ConcreteSettingsError(
f'YAML source is not available for `{path}` '
'due to error importing `yaml` package.\n'
'Perhaps you have forgotten to install PyYAML?'
) from e
super().__init__(path)
self._data = None
def read(self, setting, parents: Tuple[str, ...] = ()) -> Union[Type[NotFound], Any]:
if self._data is None:
self._data = self._read_file(self.path)
d = self._data
for key in parents:
d = d[key]
val = d.get(setting.name, NotFound)
return val
@staticmethod
def _read_file(path):
import yaml
try:
with open(path) as f:
raw_data = f.read()
return yaml.safe_load(raw_data) or {}
except FileNotFoundError as e:
raise ConcreteSettingsError(f"Source file {path} was not found") from e
except yaml.YAMLError as e:
raise ConcreteSettingsError(f"Error parsing YAML from {path}: {e}") from e
| 31.297872 | 89 | 0.598912 |
79423b25723d7790a6454c86821d0f4225d6f44f | 4,928 | py | Python | dialite/_base.py | flexxui/dialite | 9a62d34c0b8eb7b61b527bcf82e7ba0e4808d866 | [
"BSD-2-Clause"
] | 13 | 2018-02-26T14:28:57.000Z | 2022-03-22T16:22:31.000Z | dialite/_base.py | flexxui/dialite | 9a62d34c0b8eb7b61b527bcf82e7ba0e4808d866 | [
"BSD-2-Clause"
] | 4 | 2018-10-29T19:44:44.000Z | 2020-09-07T12:11:14.000Z | dialite/_base.py | flexxui/dialite | 9a62d34c0b8eb7b61b527bcf82e7ba0e4808d866 | [
"BSD-2-Clause"
] | 4 | 2018-08-09T09:56:05.000Z | 2020-06-08T06:10:20.000Z | from __future__ import absolute_import, division, print_function
import os
import subprocess
import sys
import time
import webbrowser
from . import logger
if sys.version_info < (3,): # pragma: no cover
input = raw_input # noqa
class BaseApp(object):
"""The base app class. Acts as a placeholder to define the API
that subclasses must implement.
"""
def works(self):
raise NotImplementedError() # to test whether the app actually works
def fail(self, title, message):
raise NotImplementedError()
def warn(self, title, message):
raise NotImplementedError()
def inform(self, title, message):
raise NotImplementedError()
def ask_ok(self, title, message):
raise NotImplementedError()
def ask_retry(self, title, message):
raise NotImplementedError()
def ask_yesno(self, title, message):
raise NotImplementedError()
class TerminalApp(BaseApp):
"""An application classes that uses input()."""
def works(self):
return hastty()
def fail(self, title, message):
logger.error("%s: %s" % (title, message))
def warn(self, title, message):
logger.warning("%s: %s" % (title, message))
def inform(self, title, message):
logger.info("%s: %s" % (title, message))
def ask_ok(self, title, message):
text = "%s: %s" % (title, message)
text += "\nconfirm ([y]/n)? "
return self._ask_yes_no(text)
def ask_retry(self, title, message):
text = "%s: %s" % (title, message)
text += "\nretry ([y]/n)? "
return self._ask_yes_no(text)
def ask_yesno(self, title, message):
text = "%s: %s" % (title, message)
text += "\nanswer ([y]/n)? "
return self._ask_yes_no(text)
def _ask_yes_no(self, text, default="y"):
while True:
res = input(text) or default
if res.lower() in ("y", "yes"):
return True
elif res.lower() in ("n", "no"):
return False
else:
print("invalid answer")
class StubApp(BaseApp):
"""A stub application class for platforms that we do not support, and
where no tty is available. Pass warning() and inform(), fail for anything
else.
"""
def works(self):
return True
def _error(self, kind, title, message):
# Show error in browser, because user may not be able to see exception
show_error_via_browser()
# Close program
t = "Cannot show %s-dialog on platform %s.\n %s: %s"
sys.exit(t % (kind, sys.platform, title, message))
def fail(self, title, message):
logger.error("FAIL %s: %s" % (title, message))
def warn(self, title, message):
logger.warning("WARN %s: %s" % (title, message))
def inform(self, title, message):
logger.info("INFO %s: %s" % (title, message))
def ask_ok(self, title, message):
self._error("CONFIRM", title, message)
def ask_retry(self, title, message):
self._error("RETRY", title, message)
def ask_yesno(self, title, message):
self._error("YESNO", title, message)
def check_output(*args, **kwargs):
"""Call a subprocess, return return-code and stdout.
When *this* process exits, kills the subprocess.
"""
kwargs["stdout"] = subprocess.PIPE
kwargs["stderr"] = subprocess.STDOUT
p = subprocess.Popen(*args, **kwargs)
try:
while p.poll() is None:
time.sleep(0.002)
return p.poll(), p.stdout.read().decode("utf-8", "ignore")
finally:
if p.poll() is None: # pragma: no cover
p.kill()
def test_call(*args, **kwargs):
"""Test whether a subprocess call succeeds."""
try:
subprocess.check_output(*args, **kwargs)
return True
except Exception:
return False
def hastty():
"""Whether (it looks like) a tty is available."""
try:
return sys.stdin and sys.stdin.isatty()
except Exception: # pragma: no cover
return False # i.e. no isatty method?
error_html = """
<html><body>
Dialite error:<br/>
Could not show dialog on this platform, and cannot fallback to a tty.
</body></html>
""".lstrip()
def show_error_via_browser():
# Select file to write html log to
dir = os.path.expanduser("~")
for name in ("Desktop", "desktop"):
if os.path.isdir(os.path.join(dir, name)):
dir = os.path.join(dir, name)
break
filename = os.path.join(dir, "dialite_error.html")
# Write file
try:
with open(filename, "wb") as f:
f.write(error_html.encode("utf-8"))
except Exception: # pragma: no cover
return # no user directory, or rights to write there?
# Open it in a browser
try:
webbrowser.open(filename)
except Exception: # pragma: no cover
return # no browser?
| 27.377778 | 78 | 0.600649 |
79423b6b092fd9a8cdcc056dbc6bd72767799c75 | 881 | py | Python | tests/generators/ios/test_conversion.py | brianleungwh/signals | d28d2722d681d390ebd21cd668d0b19f2f184451 | [
"MIT"
] | 3 | 2016-02-04T22:58:03.000Z | 2017-12-15T13:37:47.000Z | tests/generators/ios/test_conversion.py | brianleungwh/signals | d28d2722d681d390ebd21cd668d0b19f2f184451 | [
"MIT"
] | 37 | 2015-08-28T20:17:23.000Z | 2021-12-13T19:48:49.000Z | tests/generators/ios/test_conversion.py | brianleungwh/signals | d28d2722d681d390ebd21cd668d0b19f2f184451 | [
"MIT"
] | 6 | 2016-01-12T18:51:27.000Z | 2016-10-19T10:32:45.000Z | import unittest
from signals.generators.ios.conversion import python_to_objc_variable, sanitize_field_name, get_proper_name
class ConversionTestCase(unittest.TestCase):
def test_python_to_objc_variable(self):
self.assertEqual(python_to_objc_variable("verbose_description"), "verboseDescription")
self.assertEqual(python_to_objc_variable("verbose_description", capitalize_first=True), "VerboseDescription")
def test_sanitize_field_name(self):
self.assertEqual(sanitize_field_name("description"), "theDescription")
self.assertEqual(sanitize_field_name("messages"), "messages")
def test_get_proper_name(self):
self.assertEqual(get_proper_name("unread_count"), "unreadCount")
self.assertEqual(get_proper_name("unread_count", capitalize_first=True), "UnreadCount")
self.assertEqual(get_proper_name("id"), "theID")
| 48.944444 | 117 | 0.777526 |
79423b73098cb09753e7797851719ad8248837d8 | 9,369 | py | Python | hrf/trfx_semi.py | thu-spmi/semi-EBM | 393e3ea3566dd60c48872a5c573a335e8e802707 | [
"Apache-2.0"
] | 2 | 2021-09-18T14:21:24.000Z | 2021-12-20T03:39:13.000Z | hrf/trfx_semi.py | thu-spmi/semi-EBM | 393e3ea3566dd60c48872a5c573a335e8e802707 | [
"Apache-2.0"
] | null | null | null | hrf/trfx_semi.py | thu-spmi/semi-EBM | 393e3ea3566dd60c48872a5c573a335e8e802707 | [
"Apache-2.0"
] | 1 | 2021-09-12T07:02:23.000Z | 2021-09-12T07:02:23.000Z | import numpy as np
import time
from collections import OrderedDict
from base import seq, log
from . import trfx
from .trfx import DefaultOps
class Config(trfx.Config):
def __init__(self, data):
super().__init__(data)
self.train_batch_size = 1000
self.full_batch_size = 100
self.inter_alpha = 100
class TRF(trfx.TRF):
def __init__(self, config, data_x, data_full,
logdir, device='/gpu:0', name='trf'):
super().__init__(config, data_x, logdir, device, name)
self.data_full = data_full
self.data_x = data_x
def update(self, data_list, sample_list, data_full_list=None):
if data_full_list is None:
return super().update(data_list, sample_list)
# compute the scalars
data_scalar = np.ones(len(data_list)) / len(data_list)
sample_len = np.array([len(x) for x in sample_list])
sample_facter = np.array(self.config.pi_true[self.config.min_len:]) / \
np.array(self.config.pi_0[self.config.min_len:])
sample_scalar = sample_facter[sample_len - self.config.min_len] / len(sample_list)
# update word phi
if not self.config.fix_trf_model:
with self.time_recoder.recode('update_word'):
self.phi_word.update(data_list, data_scalar, sample_list, sample_scalar,
learning_rate=self.cur_lr_word)
if not self.config.fix_crf_model:
data_full_scalar = self.config.inter_alpha * np.ones(len(data_full_list)) / len(data_full_list)
data_part_list = data_list + sample_list + data_full_list
data_part_scalar = -np.concatenate([data_scalar, -sample_scalar, -data_full_scalar], axis=0)
# forward-backward for data
data_part_list_x = [s.x[0] for s in data_part_list]
with self.time_recoder.recode('update_marginal_data'):
data_fp_logps_list, logzs_data = self.marginal_logps(data_part_list_x)
with self.time_recoder.recode('update_tag'):
self.phi_tag.update(data_full_list, data_full_scalar, data_part_list, data_part_scalar,
data_fp_logps_list=None,
sample_fp_logps_list=data_fp_logps_list,
learning_rate=self.cur_lr_tag)
with self.time_recoder.recode('update_mix'):
self.phi_mix.update(data_full_list, data_full_scalar, data_part_list, data_part_scalar,
data_fp_logps_list=None,
sample_fp_logps_list=data_fp_logps_list,
learning_rate=self.cur_lr_mix)
# update zeta
with self.time_recoder.recode('update_logz'):
self.norm_const.update(sample_list, learning_rate=self.cur_lr_logz)
logz1 = self.get_true_logz(self.config.min_len)[0]
self.norm_const.set_logz1(logz1)
# update simulater
with self.time_recoder.recode('update_simulater'):
self.sampler.update(seq.get_x(sample_list))
# update dbg info
self.sample_cur_pi.fill(0)
for x in sample_list:
self.sample_cur_pi[len(x)] += 1
self.sample_acc_count += self.sample_cur_pi
self.sample_cur_pi /= self.sample_cur_pi.sum()
dbg_info = dict()
dbg_info['logz1'] = logz1
acc_pi = self.sample_acc_count / np.sum(self.sample_acc_count)
dbg_info['pi_dist'] = np.arccos(np.dot(acc_pi, self.config.pi_0) /
np.linalg.norm(acc_pi) / np.linalg.norm(self.config.pi_0))
return dbg_info
def train(self, print_per_epoch=0.1, operation=None):
# initialize
self.initialize()
if self.exist_model():
self.restore()
if self.config.load_crf_model is not None:
self.restore_crf(self.config.load_crf_model)
if self.config.load_trf_model is not None:
self.restore_trf(self.config.load_trf_model)
train_list = self.data.datas[0]
valid_list = self.data.datas[1]
print('[TRF] [Train]...')
time_beginning = time.time()
model_train_nll = []
# model_train_nll_phi = []
# model_q_nll = []
# model_kl_dist = []
self.data.train_batch_size = self.config.train_batch_size
self.data.is_shuffle = True
self.data_full.train_batch_size = self.config.full_batch_size
self.data_full.is_shuffle = True
epoch_step_num = self.data.get_epoch_step_num()
print('[TRF] epoch_step_num={}'.format(epoch_step_num))
print('[TRF] train_list={}'.format(len(train_list)))
print('[TRF] valid_list={}'.format(len(valid_list)))
last_epoch = 0
epoch = 0
print_next_epoch = 0
for step, (data_seqs, data_full_seqs) in enumerate(zip(self.data, self.data_full)):
###########################
# extra operations
###########################
if operation is not None:
operation.run(step, epoch)
if int(self.data.get_cur_epoch()) > last_epoch:
self.save()
last_epoch = int(self.data.get_cur_epoch())
if epoch >= self.config.max_epoch:
print('[TRF] train stop!')
self.save()
# operation.perform(step, epoch)
break
# update epoches
epoch = self.data.get_cur_epoch()
# update training information
self.training_info['trained_step'] += 1
self.training_info['trained_epoch'] = self.data.get_cur_epoch()
self.training_info['trained_time'] = (time.time() - time_beginning) / 60
# draw samples
with self.time_recoder.recode('sample'):
sample_seqs = self.draw(self.config.sample_batch_size)
# update paramters
with self.time_recoder.recode('update'):
# learining rate
self.cur_lr_word = self.config.lr_word.get_lr(step+1, epoch)
self.cur_lr_tag = self.config.lr_tag.get_lr(step+1, epoch)
self.cur_lr_mix = self.config.lr_mix.get_lr(step+1, epoch)
self.cur_lr_logz = self.config.lr_logz.get_lr(step+1, epoch)
# update
update_info = self.update(data_seqs, sample_seqs, data_full_seqs)
# evaulate the nll
with self.time_recoder.recode('eval_train_nll'):
nll_train = self.eval(data_seqs)[0]
model_train_nll.append(nll_train)
# model_train_nll_phi.append(self.eval(data_seqs, is_norm=False)[0])
# model_kl_dist.append(self.eval(sample_seqs)[0] - self.mcmc.eval(sample_seqs)[0])
if epoch >= print_next_epoch:
print_next_epoch = epoch + print_per_epoch
time_since_beg = (time.time() - time_beginning) / 60
# with self.time_recoder.recode('eval'):
# model_valid_nll = self.eval(valid_list)[0]
info = OrderedDict()
info['step'] = step
info['epoch'] = epoch
info['time'] = time_since_beg
info['lr_tag'] = '{:.2e}'.format(self.cur_lr_tag)
info['lr_mix'] = '{:.2e}'.format(self.cur_lr_mix)
info['lr_word'] = '{:.2e}'.format(self.cur_lr_word)
info['lr_logz'] = '{:.2e}'.format(self.cur_lr_logz)
info['lj_rate'] = self.sampler.lj_rate
info['mv_rate'] = self.sampler.mv_rate
info['logz1'] = self.update_global_norm()
info.update(update_info)
info['train'] = np.mean(model_train_nll[-epoch_step_num:])
# info['train_phi'] = np.mean(model_train_nll_phi[-100:])
# info['valid'] = model_valid_nll
# info['auxil'] = np.mean(model_q_nll[-epoch_step_num:])
# info['kl_dist'] = np.mean(model_kl_dist[-epoch_step_num:])
x_list = seq.get_x(sample_seqs)
info['kl_dist'] = np.mean(-self.get_logpxs(x_list, for_eval=False)) - self.sampler.eval(x_list)[0]
##########
true_logz = None
if self.config.max_len <= 5:
true_logz = np.array(self.get_true_logz())
sa_logz = np.array(self.norm_const.get_logz())
self.norm_const.set_logz(true_logz)
true_nll_train = self.eval(train_list)[0]
self.norm_const.set_logz(sa_logz)
info['true_train'] = true_nll_train
log.print_line(info)
print('[end]')
# self.debug_logz()
# write time
f = self.write_files.get('time')
f.write('step={} epoch={:.3f} time={:.2f} '.format(step, epoch, time_since_beg))
f.write(' '.join(['{}={:.2f}'.format(x[0], x[1]) for x in self.time_recoder.time_recoder.items()]) + '\n')
f.flush()
# write zeta, logz, pi
self.write_log_zeta(step, true_logz) | 41.64 | 122 | 0.572847 |
79423baecf69f422facb64d13574e8c5929ae599 | 649 | py | Python | focalloss.py | shiqiuwang/Hardness_aware_Sample_Distillation | b70823e9e180ed61a02e1eb73bbe9a3c2897050f | [
"Apache-2.0",
"MIT"
] | null | null | null | focalloss.py | shiqiuwang/Hardness_aware_Sample_Distillation | b70823e9e180ed61a02e1eb73bbe9a3c2897050f | [
"Apache-2.0",
"MIT"
] | null | null | null | focalloss.py | shiqiuwang/Hardness_aware_Sample_Distillation | b70823e9e180ed61a02e1eb73bbe9a3c2897050f | [
"Apache-2.0",
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
'''
@Time : 2021/7/22 15:14
@Author : Qiushi Wang
@FileName: focalloss.py
@Software: PyCharm
'''
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
import numpy as np
class FocalLoss(nn.Module):
def __init__(self, gamma=0.0, alpha=1.0):
super(FocalLoss, self).__init__()
self.gamma = gamma
self.alpha = alpha
self.criterion = nn.CrossEntropyLoss(reduce=False)
def forward(self, inputs, targets):
loss = self.criterion(inputs, targets)
return torch.mul(loss, torch.pow((1 - torch.exp(-1*loss)), self.gamma))
| 23.178571 | 79 | 0.659476 |
79423ca1c81c8146bb8277bb6988e95d942949ad | 2,475 | py | Python | Geometry/HGCalCommonData/test/python/testHGCalParametersDD4Hep_cfg.py | gputtley/cmssw | c1ef8454804e4ebea8b65f59c4a952a6c94fde3b | [
"Apache-2.0"
] | 2 | 2017-09-29T13:32:51.000Z | 2019-01-31T00:40:58.000Z | Geometry/HGCalCommonData/test/python/testHGCalParametersDD4Hep_cfg.py | gputtley/cmssw | c1ef8454804e4ebea8b65f59c4a952a6c94fde3b | [
"Apache-2.0"
] | 8 | 2020-03-20T23:18:36.000Z | 2020-05-27T11:00:06.000Z | Geometry/HGCalCommonData/test/python/testHGCalParametersDD4Hep_cfg.py | gputtley/cmssw | c1ef8454804e4ebea8b65f59c4a952a6c94fde3b | [
"Apache-2.0"
] | 3 | 2017-06-07T15:22:28.000Z | 2019-02-28T20:48:30.000Z | import FWCore.ParameterSet.Config as cms
process = cms.Process("HGCalParametersTest")
process.load("SimGeneral.HepPDTESSource.pdt_cfi")
process.load("Geometry.HGCalCommonData.hgcalParametersInitialization_cfi")
process.load('FWCore.MessageService.MessageLogger_cfi')
if hasattr(process,'MessageLogger'):
process.MessageLogger.categories.append('HGCalGeom')
process.DDDetectorESProducer = cms.ESSource("DDDetectorESProducer",
confGeomXMLFiles = cms.FileInPath('Geometry/HGCalCommonData/data/dd4hep/cms-test-ddhgcal-algorithm.xml'),
appendToDataLabel = cms.string('')
)
process.DDCompactViewESProducer = cms.ESProducer("DDCompactViewESProducer",
appendToDataLabel = cms.string('')
)
process.load("IOMC.RandomEngine.IOMC_cff")
process.RandomNumberGeneratorService.generator.initialSeed = 456789
process.source = cms.Source("EmptySource")
process.maxEvents = cms.untracked.PSet(
input = cms.untracked.int32(1)
)
process.generator = cms.EDProducer("FlatRandomEGunProducer",
PGunParameters = cms.PSet(
PartID = cms.vint32(14),
MinEta = cms.double(-3.5),
MaxEta = cms.double(3.5),
MinPhi = cms.double(-3.14159265359),
MaxPhi = cms.double(3.14159265359),
MinE = cms.double(9.99),
MaxE = cms.double(10.01)
),
AddAntiParticle = cms.bool(False),
Verbosity = cms.untracked.int32(0),
firstRun = cms.untracked.uint32(1)
)
process.hgcalEEParametersInitialize.fromDD4Hep = cms.bool(True)
process.hgcalHESiParametersInitialize.fromDD4Hep = cms.bool(True)
process.hgcalHEScParametersInitialize.fromDD4Hep = cms.bool(True)
process.testEE = cms.EDAnalyzer("HGCalParameterTester",
Name = cms.untracked.string("HGCalEESensitive"),
Mode = cms.untracked.int32(1)
# Mode = cms.untracked.int32(0)
)
process.testHESil = process.testEE.clone(
Name = cms.untracked.string("HGCalHESiliconSensitive")
)
process.testHESci = process.testEE.clone(
Name = cms.untracked.string("HGCalHEScintillatorSensitive"),
Mode = cms.untracked.int32(2)
)
#process.p1 = cms.Path(process.generator*process.testEE*process.testHESil*process.testHESci)
process.p1 = cms.Path(process.generator*process.testEE*process.testHESil)
| 38.076923 | 149 | 0.671111 |
79423d510370578dd70fc8b4700deedf2dbede26 | 3,995 | py | Python | Tests/test_PopGen_GenePop_EasyController.py | adamnovak/biopython | 92772dd6add33e0b87ab593841f924f0f6f16090 | [
"PostgreSQL"
] | 5 | 2016-03-09T03:41:23.000Z | 2022-01-24T12:34:44.000Z | Tests/test_PopGen_GenePop_EasyController.py | adamnovak/biopython | 92772dd6add33e0b87ab593841f924f0f6f16090 | [
"PostgreSQL"
] | null | null | null | Tests/test_PopGen_GenePop_EasyController.py | adamnovak/biopython | 92772dd6add33e0b87ab593841f924f0f6f16090 | [
"PostgreSQL"
] | 6 | 2016-10-24T11:27:13.000Z | 2020-02-26T16:35:01.000Z | # Copyright 2009 by Tiago Antao <[email protected]>. All rights reserved.
# This code is part of the Biopython distribution and governed by its
# license. Please see the LICENSE file that should have been included
# as part of this package.
import os
import unittest
from Bio import MissingExternalDependencyError
from Bio.PopGen.GenePop.EasyController import EasyController
#Tests genepop related code for easy contorller. Note: this requires genepop
#test_PopGen_GenePop_nodepend tests code that does not require genepop
found = False
for path in os.environ['PATH'].split(os.pathsep):
try:
for filename in os.listdir(path):
if filename.startswith('Genepop'):
found = True
except os.error:
pass # Path doesn't exist - correct to pass
if not found:
raise MissingExternalDependencyError(
"Install GenePop if you want to use Bio.PopGen.GenePop.")
cur_dir = os.path.abspath(".") # Tests directory
class AppTest(unittest.TestCase):
"""Tests genepop execution via biopython using EasyController.
"""
def setUp(self):
#Genepop likes to be on the directory where the file is.
os.chdir("PopGen")
self.ctrl = EasyController("big.gen")
def tearDown(self):
os.chdir(cur_dir)
def test_basic_info(self):
"""Test basic info.
"""
pops, loci = self.ctrl.get_basic_info()
self.assertEqual(len(pops), 10)
self.assertEqual(len(loci), 37)
def test_get_heterozygosity_info(self):
"""Test heterozygosity info.
"""
hz_info = self.ctrl.get_heterozygosity_info(0, "Locus2")
self.assertEqual(hz_info[1], 24)
self.assertEqual(hz_info[3], 7)
def test_get_alleles(self):
"""Test get alleles.
"""
#Returns keys of a dict, so order is Python implementation dependent
self.assertEqual(set(self.ctrl.get_alleles(0, "Locus3")), set([3, 20]))
def test_get_alleles_all_pops(self):
"""Test get alleles for all populations.
"""
self.assertEqual(self.ctrl.get_alleles_all_pops("Locus4"), [1, 3])
def test_get_fis(self):
"""Test get Fis.
"""
alleles, overall = self.ctrl.get_fis(0, "Locus2")
self.assertEqual(alleles[3][0], 55)
self.assertEqual(overall[0], 62)
def test_get_allele_frequency(self):
"""Test allele frequency.
"""
tot_genes, alleles = self.ctrl.get_allele_frequency(0, "Locus2")
self.assertEqual(tot_genes, 62)
self.assertTrue(abs(alleles[20] - 0.113) < 0.05)
def test_get_genotype_count(self):
"""Test genotype count.
"""
self.assertEqual(len(self.ctrl.get_genotype_count(0, "Locus2")), 3)
def test_estimate_nm(self):
"""Test Nm estimation.
"""
nms = self.ctrl.estimate_nm()
self.assertEqual(nms[0], 28.0)
#These tests are frequently failing, possibly due to a Genepop problem.
# def test_get_avg_fst_pair_locus(self):
# """Test get average Fst for pairwise pops on a locus.
# """
# self.assertEqual(len(self.ctrl.get_avg_fst_pair_locus("Locus4")), 45)
#
# def test_get_avg_fst_pair(self):
# """Test get pairwise Fst.
# """
# pop_fis = self.ctrl.get_avg_fst_pair()
# self.assertEqual(len(pop_fis), 45)
def test_get_avg_fis(self):
"""Test average Fis.
"""
self.ctrl.get_avg_fis()
def test_get_multilocus_f_stats(self):
"""Test multilocus F stats.
"""
mf = self.ctrl.get_multilocus_f_stats()
self.assertEqual(len(mf), 3)
self.assertTrue(mf[0]<0.1)
def test_get_f_stats(self):
"""Test F stats.
"""
fs = self.ctrl.get_f_stats("Locus2")
self.assertEqual(len(fs), 5)
self.assertTrue(fs[0]<0)
if __name__ == "__main__":
runner = unittest.TextTestRunner(verbosity = 2)
unittest.main(testRunner=runner)
| 31.456693 | 79 | 0.64005 |
79423f0f0cc8795100f645d43b43906c99f3a99e | 2,410 | py | Python | pptremoteagent.py | gigibu5/python-ppt-remote | e19b2711a8c76e19d5175f622904eab8b11387b5 | [
"MIT"
] | null | null | null | pptremoteagent.py | gigibu5/python-ppt-remote | e19b2711a8c76e19d5175f622904eab8b11387b5 | [
"MIT"
] | null | null | null | pptremoteagent.py | gigibu5/python-ppt-remote | e19b2711a8c76e19d5175f622904eab8b11387b5 | [
"MIT"
] | null | null | null | #!/usr/bin/python3
# This is a script to remote control PowerPoint presentations on Windows from your smartphone.
# Usage:
# * Run pptremoteserver.py on a server accessible from the internet
# * Run pptremoteagent.py on the computer where you have PowerPoint running
# * Open the pptremoteserver's IP address with your smartphone's browser and control the slideshow
#
# Notes:
# * By default pptremoteagent.py polls localhost:8080. To have it poll a different address, run:
# pptremoteagent.py -s <serverip>:<serverport>
# * You will need to install the following Python 3 modules:
# On server: flask
# On agent: keyboard, pywin32, requests
from win32gui import GetWindowText, GetForegroundWindow
import time, requests, sys, getopt, keyboard
REQUESTS_CONNECT_TIMEOUT = 5
REQUESTS_READ_TIMEOUT = 5
COMMANDS = ['next', 'back', 'stop']
def getcommand (p_server):
try:
r = requests.get('http://' + p_server + '/command', timeout=(REQUESTS_CONNECT_TIMEOUT, REQUESTS_READ_TIMEOUT))
except:
print('WARNING: unable to access command server')
return(None)
if r.status_code != requests.codes.ok:
return (None)
rjson = r.json()
if not rjson is None:
rcmd = rjson['command']
if rcmd in COMMANDS:
return(rcmd)
return(None)
def main(argv):
#initialize variables for command line arguments
arg_server = '127.0.0.1:8080'
#get command line arguments
try:
opts, args = getopt.getopt(argv, 's:')
except getopt.GetoptError:
sys.exit(2)
for opt, arg in opts:
if opt == '-s':
arg_server = arg
print ('INFO: Polling server %s' % arg_server)
while (True):
time.sleep(1)
wintext = GetWindowText(GetForegroundWindow())
if wintext.startswith('PowerPointova diaprojekcija - ['):
cmd = getcommand (arg_server)
if not cmd is None:
if cmd == 'next':
keyboard.send('space')
elif cmd == 'back':
keyboard.send('backspace')
elif cmd == 'stop':
keyboard.send('escape')
if __name__ == '__main__':
main(sys.argv[1:])
| 31.710526 | 119 | 0.582988 |
79423f4557e53addb8df59cd788e9718c14cb853 | 3,903 | py | Python | spec/API_specification/signatures/utility_functions.py | cnpryer/array-api | 02fa9237eab3258120778baec12cd38cfd309ee3 | [
"MIT"
] | null | null | null | spec/API_specification/signatures/utility_functions.py | cnpryer/array-api | 02fa9237eab3258120778baec12cd38cfd309ee3 | [
"MIT"
] | null | null | null | spec/API_specification/signatures/utility_functions.py | cnpryer/array-api | 02fa9237eab3258120778baec12cd38cfd309ee3 | [
"MIT"
] | null | null | null | from ._types import Optional, Tuple, Union, array
def all(x: array, /, *, axis: Optional[Union[int, Tuple[int, ...]]] = None, keepdims: bool = False) -> array:
"""
Tests whether all input array elements evaluate to ``True`` along a specified axis.
.. note::
Positive infinity, negative infinity, and NaN must evaluate to ``True``.
.. note::
If ``x`` is an empty array or the size of the axis (dimension) along which to evaluate elements is zero, the test result must be ``True``.
Parameters
----------
x: array
input array.
axis: Optional[Union[int, Tuple[int, ...]]]
axis or axes along which to perform a logical AND reduction. By default, a logical AND reduction must be performed over the entire array. If a tuple of integers, logical AND reductions must be performed over multiple axes. A valid ``axis`` must be an integer on the interval ``[-N, N)``, where ``N`` is the rank (number of dimensions) of ``x``. If an ``axis`` is specified as a negative integer, the function must determine the axis along which to perform a reduction by counting backward from the last dimension (where ``-1`` refers to the last dimension). If provided an invalid ``axis``, the function must raise an exception. Default: ``None``.
keepdims: bool
If ``True``, the reduced axes (dimensions) must be included in the result as singleton dimensions, and, accordingly, the result must be compatible with the input array (see :ref:`broadcasting`). Otherwise, if ``False``, the reduced axes (dimensions) must not be included in the result. Default: ``False``.
Returns
-------
out: array
if a logical AND reduction was performed over the entire array, the returned array must be a zero-dimensional array containing the test result; otherwise, the returned array must be a non-zero-dimensional array containing the test results. The returned array must have a data type of ``bool``.
"""
def any(x: array, /, *, axis: Optional[Union[int, Tuple[int, ...]]] = None, keepdims: bool = False) -> array:
"""
Tests whether any input array element evaluates to ``True`` along a specified axis.
.. note::
Positive infinity, negative infinity, and NaN must evaluate to ``True``.
.. note::
If ``x`` is an empty array or the size of the axis (dimension) along which to evaluate elements is zero, the test result must be ``False``.
Parameters
----------
x: array
input array.
axis: Optional[Union[int, Tuple[int, ...]]]
axis or axes along which to perform a logical OR reduction. By default, a logical OR reduction must be performed over the entire array. If a tuple of integers, logical OR reductions must be performed over multiple axes. A valid ``axis`` must be an integer on the interval ``[-N, N)``, where ``N`` is the rank (number of dimensions) of ``x``. If an ``axis`` is specified as a negative integer, the function must determine the axis along which to perform a reduction by counting backward from the last dimension (where ``-1`` refers to the last dimension). If provided an invalid ``axis``, the function must raise an exception. Default: ``None``.
keepdims: bool
If ``True``, the reduced axes (dimensions) must be included in the result as singleton dimensions, and, accordingly, the result must be compatible with the input array (see :ref:`broadcasting`). Otherwise, if ``False``, the reduced axes (dimensions) must not be included in the result. Default: ``False``.
Returns
-------
out: array
if a logical OR reduction was performed over the entire array, the returned array must be a zero-dimensional array containing the test result; otherwise, the returned array must be a non-zero-dimensional array containing the test results. The returned array must have a data type of ``bool``.
"""
__all__ = ['all', 'any']
| 72.277778 | 655 | 0.694081 |
79423f52837badbf12ad9812152743d647d1c9c4 | 25 | py | Python | keras2cpp/__init__.py | AlessandroFasse/keras2cpp | b30ec20f49ab1d4211e56e94ff83c3da27519d56 | [
"MIT"
] | null | null | null | keras2cpp/__init__.py | AlessandroFasse/keras2cpp | b30ec20f49ab1d4211e56e94ff83c3da27519d56 | [
"MIT"
] | null | null | null | keras2cpp/__init__.py | AlessandroFasse/keras2cpp | b30ec20f49ab1d4211e56e94ff83c3da27519d56 | [
"MIT"
] | null | null | null | from ._keras2cpp import * | 25 | 25 | 0.8 |
79423fb83c5b9ea7f0048619f44f226757d9238f | 1,012 | py | Python | conf/settings.py | gustavohenrique/django-splinter-example | 19f2b66649dd48cdc4e7784baa792abbc9393987 | [
"MIT"
] | 2 | 2019-07-11T18:05:09.000Z | 2021-11-15T09:52:49.000Z | conf/settings.py | gustavohenrique/django-splinter-example | 19f2b66649dd48cdc4e7784baa792abbc9393987 | [
"MIT"
] | null | null | null | conf/settings.py | gustavohenrique/django-splinter-example | 19f2b66649dd48cdc4e7784baa792abbc9393987 | [
"MIT"
] | null | null | null | # coding: utf-8
import os
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
SECRET_KEY = 'a4fc$s%ytp6$)=u+%r!^%pecjuqit&ar$ue8#y5n6z4q0axl3c'
DEBUG = True
TEMPLATE_DEBUG = False
ALLOWED_HOSTS = ['*']
INSTALLED_APPS = (
'django.contrib.contenttypes',
'django.contrib.staticfiles',
'poll'
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'conf.urls'
WSGI_APPLICATION = 'conf.wsgi.application'
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = False
USE_L10N = False
USE_TZ = True
STATIC_URL = '/static/'
| 20.653061 | 65 | 0.710474 |
79423fe80095752239b0f5ce51f3fafc5efaa381 | 507 | py | Python | ABC_C/ABC167_C.py | ryosuke0825/atcoder_python | 185cdbe7db44ecca1aaf357858d16d31ce515ddb | [
"MIT"
] | null | null | null | ABC_C/ABC167_C.py | ryosuke0825/atcoder_python | 185cdbe7db44ecca1aaf357858d16d31ce515ddb | [
"MIT"
] | null | null | null | ABC_C/ABC167_C.py | ryosuke0825/atcoder_python | 185cdbe7db44ecca1aaf357858d16d31ce515ddb | [
"MIT"
] | null | null | null | N, M, X = map(int, input().split())
CA = []
for _ in range(N):
tmp = list(map(int, input().split()))
CA.append(tmp)
ans = -1
for i in range(2**N):
tmp_cost = 0
tmp_X = [0]*(M+1)
for j in range(N):
if ((i >> j) & 1):
for k in range(M+1):
tmp_X[k] += CA[j][k]
for j in range(1, M+1):
if tmp_X[j] < X:
break
else:
if ans == -1:
ans = tmp_X[0]
else:
ans = min(ans, tmp_X[0])
print(ans)
| 21.125 | 41 | 0.424063 |
79424208c0dc911f46ec133ae22b5ce56e4ac7c9 | 9,335 | py | Python | tensorflow_probability/python/distributions/normal.py | TheCaffeineDev/probability | 7aa13647c57fe621eadc2b7ad3020817aa8b9ba5 | [
"Apache-2.0"
] | null | null | null | tensorflow_probability/python/distributions/normal.py | TheCaffeineDev/probability | 7aa13647c57fe621eadc2b7ad3020817aa8b9ba5 | [
"Apache-2.0"
] | null | null | null | tensorflow_probability/python/distributions/normal.py | TheCaffeineDev/probability | 7aa13647c57fe621eadc2b7ad3020817aa8b9ba5 | [
"Apache-2.0"
] | null | null | null | # Copyright 2018 The TensorFlow Probability Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""The Normal (Gaussian) distribution class."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# Dependency imports
import numpy as np
import tensorflow.compat.v2 as tf
from tensorflow_probability.python.bijectors import identity as identity_bijector
from tensorflow_probability.python.distributions import distribution
from tensorflow_probability.python.distributions import kullback_leibler
from tensorflow_probability.python.internal import assert_util
from tensorflow_probability.python.internal import dtype_util
from tensorflow_probability.python.internal import prefer_static
from tensorflow_probability.python.internal import reparameterization
from tensorflow_probability.python.internal import special_math
from tensorflow_probability.python.internal import tensor_util
__all__ = [
'Normal',
]
class Normal(distribution.Distribution):
"""The Normal distribution with location `loc` and `scale` parameters.
#### Mathematical details
The probability density function (pdf) is,
```none
pdf(x; mu, sigma) = exp(-0.5 (x - mu)**2 / sigma**2) / Z
Z = (2 pi sigma**2)**0.5
```
where `loc = mu` is the mean, `scale = sigma` is the std. deviation, and, `Z`
is the normalization constant.
The Normal distribution is a member of the [location-scale family](
https://en.wikipedia.org/wiki/Location-scale_family), i.e., it can be
constructed as,
```none
X ~ Normal(loc=0, scale=1)
Y = loc + scale * X
```
#### Examples
Examples of initialization of one or a batch of distributions.
```python
import tensorflow_probability as tfp
tfd = tfp.distributions
# Define a single scalar Normal distribution.
dist = tfd.Normal(loc=0., scale=3.)
# Evaluate the cdf at 1, returning a scalar.
dist.cdf(1.)
# Define a batch of two scalar valued Normals.
# The first has mean 1 and standard deviation 11, the second 2 and 22.
dist = tfd.Normal(loc=[1, 2.], scale=[11, 22.])
# Evaluate the pdf of the first distribution on 0, and the second on 1.5,
# returning a length two tensor.
dist.prob([0, 1.5])
# Get 3 samples, returning a 3 x 2 tensor.
dist.sample([3])
```
Arguments are broadcast when possible.
```python
# Define a batch of two scalar valued Normals.
# Both have mean 1, but different standard deviations.
dist = tfd.Normal(loc=1., scale=[11, 22.])
# Evaluate the pdf of both distributions on the same point, 3.0,
# returning a length 2 tensor.
dist.prob(3.0)
```
"""
def __init__(self,
loc,
scale,
validate_args=False,
allow_nan_stats=True,
name='Normal'):
"""Construct Normal distributions with mean and stddev `loc` and `scale`.
The parameters `loc` and `scale` must be shaped in a way that supports
broadcasting (e.g. `loc + scale` is a valid operation).
Args:
loc: Floating point tensor; the means of the distribution(s).
scale: Floating point tensor; the stddevs of the distribution(s).
Must contain only positive values.
validate_args: Python `bool`, default `False`. When `True` distribution
parameters are checked for validity despite possibly degrading runtime
performance. When `False` invalid inputs may silently render incorrect
outputs.
allow_nan_stats: Python `bool`, default `True`. When `True`,
statistics (e.g., mean, mode, variance) use the value "`NaN`" to
indicate the result is undefined. When `False`, an exception is raised
if one or more of the statistic's batch members are undefined.
name: Python `str` name prefixed to Ops created by this class.
Raises:
TypeError: if `loc` and `scale` have different `dtype`.
"""
parameters = dict(locals())
with tf.name_scope(name) as name:
dtype = dtype_util.common_dtype([loc, scale], dtype_hint=tf.float32)
self._loc = tensor_util.convert_nonref_to_tensor(
loc, dtype=dtype, name='loc')
self._scale = tensor_util.convert_nonref_to_tensor(
scale, dtype=dtype, name='scale')
super(Normal, self).__init__(
dtype=dtype,
reparameterization_type=reparameterization.FULLY_REPARAMETERIZED,
validate_args=validate_args,
allow_nan_stats=allow_nan_stats,
parameters=parameters,
name=name)
@staticmethod
def _param_shapes(sample_shape):
return dict(
zip(('loc', 'scale'),
([tf.convert_to_tensor(sample_shape, dtype=tf.int32)] * 2)))
@classmethod
def _params_event_ndims(cls):
return dict(loc=0, scale=0)
@property
def loc(self):
"""Distribution parameter for the mean."""
return self._loc
@property
def scale(self):
"""Distribution parameter for standard deviation."""
return self._scale
def _batch_shape_tensor(self, loc=None, scale=None):
return prefer_static.broadcast_shape(
prefer_static.shape(self.loc if loc is None else loc),
prefer_static.shape(self.scale if scale is None else scale))
def _batch_shape(self):
return tf.broadcast_static_shape(self.loc.shape, self.scale.shape)
def _event_shape_tensor(self):
return tf.constant([], dtype=tf.int32)
def _event_shape(self):
return tf.TensorShape([])
def _sample_n(self, n, seed=None):
loc = tf.convert_to_tensor(self.loc)
scale = tf.convert_to_tensor(self.scale)
shape = tf.concat([[n], self._batch_shape_tensor(loc=loc, scale=scale)],
axis=0)
sampled = tf.random.normal(
shape=shape, mean=0., stddev=1., dtype=self.dtype, seed=seed)
return sampled * scale + loc
def _log_prob(self, x):
scale = tf.convert_to_tensor(self.scale)
log_unnormalized = -0.5 * tf.math.squared_difference(
x / scale, self.loc / scale)
log_normalization = tf.constant(
0.5 * np.log(2. * np.pi), dtype=self.dtype) + tf.math.log(scale)
return log_unnormalized - log_normalization
def _log_cdf(self, x):
return special_math.log_ndtr(self._z(x))
def _cdf(self, x):
return special_math.ndtr(self._z(x))
def _log_survival_function(self, x):
return special_math.log_ndtr(-self._z(x))
def _survival_function(self, x):
return special_math.ndtr(-self._z(x))
def _entropy(self):
log_normalization = tf.constant(
0.5 * np.log(2. * np.pi), dtype=self.dtype) + tf.math.log(self.scale)
entropy = 0.5 + log_normalization
return entropy * tf.ones_like(self.loc)
def _mean(self):
return self.loc * tf.ones_like(self.scale)
def _quantile(self, p):
return special_math.ndtri(p) * self.scale + self.loc
def _stddev(self):
return self.scale * tf.ones_like(self.loc)
_mode = _mean
def _z(self, x, scale=None):
"""Standardize input `x` to a unit normal."""
with tf.name_scope('standardize'):
return (x - self.loc) / (self.scale if scale is None else scale)
def _default_event_space_bijector(self):
return identity_bijector.Identity(validate_args=self.validate_args)
def _parameter_control_dependencies(self, is_init):
assertions = []
if is_init:
try:
self._batch_shape()
except ValueError:
raise ValueError(
'Arguments `loc` and `scale` must have compatible shapes; '
'loc.shape={}, scale.shape={}.'.format(
self.loc.shape, self.scale.shape))
# We don't bother checking the shapes in the dynamic case because
# all member functions access both arguments anyway.
if not self.validate_args:
assert not assertions # Should never happen.
return []
if is_init != tensor_util.is_ref(self.scale):
assertions.append(assert_util.assert_positive(
self.scale, message='Argument `scale` must be positive.'))
return assertions
@kullback_leibler.RegisterKL(Normal, Normal)
def _kl_normal_normal(a, b, name=None):
"""Calculate the batched KL divergence KL(a || b) with a and b Normal.
Args:
a: instance of a Normal distribution object.
b: instance of a Normal distribution object.
name: Name to use for created operations.
Default value: `None` (i.e., `'kl_normal_normal'`).
Returns:
kl_div: Batchwise KL(a || b)
"""
with tf.name_scope(name or 'kl_normal_normal'):
b_scale = tf.convert_to_tensor(b.scale) # We'll read it thrice.
diff_log_scale = tf.math.log(a.scale) - tf.math.log(b_scale)
return (
0.5 * tf.math.squared_difference(a.loc / b_scale, b.loc / b_scale) +
0.5 * tf.math.expm1(2. * diff_log_scale) -
diff_log_scale)
| 33.102837 | 81 | 0.683985 |
794242c24b3a0025891c247d822546c028a62428 | 74,621 | py | Python | certbot/cli.py | daramousk/certbot | 082040afb4c6542445ee8437a3dea61171706a80 | [
"Apache-2.0"
] | null | null | null | certbot/cli.py | daramousk/certbot | 082040afb4c6542445ee8437a3dea61171706a80 | [
"Apache-2.0"
] | null | null | null | certbot/cli.py | daramousk/certbot | 082040afb4c6542445ee8437a3dea61171706a80 | [
"Apache-2.0"
] | null | null | null | """Certbot command line argument & config processing."""
# pylint: disable=too-many-lines
from __future__ import print_function
import argparse
import copy
import glob
import logging.handlers
import sys
import configargparse
import six
import zope.component
import zope.interface
from zope.interface import interfaces as zope_interfaces
from acme import challenges
# pylint: disable=unused-import, no-name-in-module
from acme.magic_typing import Any, Dict, Optional
# pylint: enable=unused-import, no-name-in-module
import certbot
import certbot.plugins.enhancements as enhancements
import certbot.plugins.selection as plugin_selection
from certbot import constants
from certbot import crypto_util
from certbot import errors
from certbot import hooks
from certbot import interfaces
from certbot import util
from certbot.compat import os
from certbot.display import util as display_util
from certbot.plugins import disco as plugins_disco
logger = logging.getLogger(__name__)
# Global, to save us from a lot of argument passing within the scope of this module
helpful_parser = None # type: Optional[HelpfulArgumentParser]
# For help strings, figure out how the user ran us.
# When invoked from letsencrypt-auto, sys.argv[0] is something like:
# "/home/user/.local/share/certbot/bin/certbot"
# Note that this won't work if the user set VENV_PATH or XDG_DATA_HOME before
# running letsencrypt-auto (and sudo stops us from seeing if they did), so it
# should only be used for purposes where inability to detect letsencrypt-auto
# fails safely
LEAUTO = "letsencrypt-auto"
if "CERTBOT_AUTO" in os.environ:
# if we're here, this is probably going to be certbot-auto, unless the
# user saved the script under a different name
LEAUTO = os.path.basename(os.environ["CERTBOT_AUTO"])
old_path_fragment = os.path.join(".local", "share", "letsencrypt")
new_path_prefix = os.path.abspath(os.path.join(os.sep, "opt",
"eff.org", "certbot", "venv"))
if old_path_fragment in sys.argv[0] or sys.argv[0].startswith(new_path_prefix):
cli_command = LEAUTO
else:
cli_command = "certbot"
# Argparse's help formatting has a lot of unhelpful peculiarities, so we want
# to replace as much of it as we can...
# This is the stub to include in help generated by argparse
SHORT_USAGE = """
{0} [SUBCOMMAND] [options] [-d DOMAIN] [-d DOMAIN] ...
Certbot can obtain and install HTTPS/TLS/SSL certificates. By default,
it will attempt to use a webserver both for obtaining and installing the
certificate. """.format(cli_command)
# This section is used for --help and --help all ; it needs information
# about installed plugins to be fully formatted
COMMAND_OVERVIEW = """The most common SUBCOMMANDS and flags are:
obtain, install, and renew certificates:
(default) run Obtain & install a certificate in your current webserver
certonly Obtain or renew a certificate, but do not install it
renew Renew all previously obtained certificates that are near expiry
enhance Add security enhancements to your existing configuration
-d DOMAINS Comma-separated list of domains to obtain a certificate for
%s
--standalone Run a standalone webserver for authentication
%s
--webroot Place files in a server's webroot folder for authentication
--manual Obtain certificates interactively, or using shell script hooks
-n Run non-interactively
--test-cert Obtain a test certificate from a staging server
--dry-run Test "renew" or "certonly" without saving any certificates to disk
manage certificates:
certificates Display information about certificates you have from Certbot
revoke Revoke a certificate (supply --cert-path or --cert-name)
delete Delete a certificate
manage your account:
register Create an ACME account
unregister Deactivate an ACME account
update_account Update an ACME account
--agree-tos Agree to the ACME server's Subscriber Agreement
-m EMAIL Email address for important account notifications
"""
# This is the short help for certbot --help, where we disable argparse
# altogether
HELP_AND_VERSION_USAGE = """
More detailed help:
-h, --help [TOPIC] print this message, or detailed help on a topic;
the available TOPICS are:
all, automation, commands, paths, security, testing, or any of the
subcommands or plugins (certonly, renew, install, register, nginx,
apache, standalone, webroot, etc.)
-h all print a detailed help page including all topics
--version print the version number
"""
# These argparse parameters should be removed when detecting defaults.
ARGPARSE_PARAMS_TO_REMOVE = ("const", "nargs", "type",)
# These sets are used when to help detect options set by the user.
EXIT_ACTIONS = set(("help", "version",))
ZERO_ARG_ACTIONS = set(("store_const", "store_true",
"store_false", "append_const", "count",))
# Maps a config option to a set of config options that may have modified it.
# This dictionary is used recursively, so if A modifies B and B modifies C,
# it is determined that C was modified by the user if A was modified.
VAR_MODIFIERS = {"account": set(("server",)),
"renew_hook": set(("deploy_hook",)),
"server": set(("dry_run", "staging",)),
"webroot_map": set(("webroot_path",))}
def report_config_interaction(modified, modifiers):
"""Registers config option interaction to be checked by set_by_cli.
This function can be called by during the __init__ or
add_parser_arguments methods of plugins to register interactions
between config options.
:param modified: config options that can be modified by modifiers
:type modified: iterable or str (string_types)
:param modifiers: config options that modify modified
:type modifiers: iterable or str (string_types)
"""
if isinstance(modified, six.string_types):
modified = (modified,)
if isinstance(modifiers, six.string_types):
modifiers = (modifiers,)
for var in modified:
VAR_MODIFIERS.setdefault(var, set()).update(modifiers)
def possible_deprecation_warning(config):
"A deprecation warning for users with the old, not-self-upgrading letsencrypt-auto."
if cli_command != LEAUTO:
return
if config.no_self_upgrade:
# users setting --no-self-upgrade might be hanging on a client version like 0.3.0
# or 0.5.0 which is the new script, but doesn't set CERTBOT_AUTO; they don't
# need warnings
return
if "CERTBOT_AUTO" not in os.environ:
logger.warning("You are running with an old copy of letsencrypt-auto"
" that does not receive updates, and is less reliable than more"
" recent versions. The letsencrypt client has also been renamed"
" to Certbot. We recommend upgrading to the latest certbot-auto"
" script, or using native OS packages.")
logger.debug("Deprecation warning circumstances: %s / %s", sys.argv[0], os.environ)
class _Default(object):
"""A class to use as a default to detect if a value is set by a user"""
def __bool__(self):
return False
def __eq__(self, other):
return isinstance(other, _Default)
def __hash__(self):
return id(_Default)
def __nonzero__(self):
return self.__bool__()
def set_by_cli(var):
"""
Return True if a particular config variable has been set by the user
(CLI or config file) including if the user explicitly set it to the
default. Returns False if the variable was assigned a default value.
"""
detector = set_by_cli.detector # type: ignore
if detector is None and helpful_parser is not None:
# Setup on first run: `detector` is a weird version of config in which
# the default value of every attribute is wrangled to be boolean-false
plugins = plugins_disco.PluginsRegistry.find_all()
# reconstructed_args == sys.argv[1:], or whatever was passed to main()
reconstructed_args = helpful_parser.args + [helpful_parser.verb]
detector = set_by_cli.detector = prepare_and_parse_args( # type: ignore
plugins, reconstructed_args, detect_defaults=True)
# propagate plugin requests: eg --standalone modifies config.authenticator
detector.authenticator, detector.installer = ( # type: ignore
plugin_selection.cli_plugin_requests(detector))
if not isinstance(getattr(detector, var), _Default):
logger.debug("Var %s=%s (set by user).", var, getattr(detector, var))
return True
for modifier in VAR_MODIFIERS.get(var, []):
if set_by_cli(modifier):
logger.debug("Var %s=%s (set by user).",
var, VAR_MODIFIERS.get(var, []))
return True
return False
# static housekeeping var
# functions attributed are not supported by mypy
# https://github.com/python/mypy/issues/2087
set_by_cli.detector = None # type: ignore
def has_default_value(option, value):
"""Does option have the default value?
If the default value of option is not known, False is returned.
:param str option: configuration variable being considered
:param value: value of the configuration variable named option
:returns: True if option has the default value, otherwise, False
:rtype: bool
"""
if helpful_parser is not None:
return (option in helpful_parser.defaults and
helpful_parser.defaults[option] == value)
return False
def option_was_set(option, value):
"""Was option set by the user or does it differ from the default?
:param str option: configuration variable being considered
:param value: value of the configuration variable named option
:returns: True if the option was set, otherwise, False
:rtype: bool
"""
return set_by_cli(option) or not has_default_value(option, value)
def argparse_type(variable):
"""Return our argparse type function for a config variable (default: str)"""
# pylint: disable=protected-access
if helpful_parser is not None:
for action in helpful_parser.parser._actions:
if action.type is not None and action.dest == variable:
return action.type
return str
def read_file(filename, mode="rb"):
"""Returns the given file's contents.
:param str filename: path to file
:param str mode: open mode (see `open`)
:returns: absolute path of filename and its contents
:rtype: tuple
:raises argparse.ArgumentTypeError: File does not exist or is not readable.
"""
try:
filename = os.path.abspath(filename)
with open(filename, mode) as the_file:
contents = the_file.read()
return filename, contents
except IOError as exc:
raise argparse.ArgumentTypeError(exc.strerror)
def flag_default(name):
"""Default value for CLI flag."""
# XXX: this is an internal housekeeping notion of defaults before
# argparse has been set up; it is not accurate for all flags. Call it
# with caution. Plugin defaults are missing, and some things are using
# defaults defined in this file, not in constants.py :(
return copy.deepcopy(constants.CLI_DEFAULTS[name])
def config_help(name, hidden=False):
"""Extract the help message for an `.IConfig` attribute."""
# pylint: disable=no-member
if hidden:
return argparse.SUPPRESS
field = interfaces.IConfig.__getitem__(name) # type: zope.interface.interface.Attribute # pylint: disable=no-value-for-parameter
return field.__doc__
class HelpfulArgumentGroup(object):
"""Emulates an argparse group for use with HelpfulArgumentParser.
This class is used in the add_group method of HelpfulArgumentParser.
Command line arguments can be added to the group, but help
suppression and default detection is applied by
HelpfulArgumentParser when necessary.
"""
def __init__(self, helpful_arg_parser, topic):
self._parser = helpful_arg_parser
self._topic = topic
def add_argument(self, *args, **kwargs):
"""Add a new command line argument to the argument group."""
self._parser.add(self._topic, *args, **kwargs)
class CustomHelpFormatter(argparse.HelpFormatter):
"""This is a clone of ArgumentDefaultsHelpFormatter, with bugfixes.
In particular we fix https://bugs.python.org/issue28742
"""
def _get_help_string(self, action):
helpstr = action.help
if '%(default)' not in action.help and '(default:' not in action.help:
if action.default != argparse.SUPPRESS:
defaulting_nargs = [argparse.OPTIONAL, argparse.ZERO_OR_MORE]
if action.option_strings or action.nargs in defaulting_nargs:
helpstr += ' (default: %(default)s)'
return helpstr
# The attributes here are:
# short: a string that will be displayed by "certbot -h commands"
# opts: a string that heads the section of flags with which this command is documented,
# both for "certbot -h SUBCOMMAND" and "certbot -h all"
# usage: an optional string that overrides the header of "certbot -h SUBCOMMAND"
VERB_HELP = [
("run (default)", {
"short": "Obtain/renew a certificate, and install it",
"opts": "Options for obtaining & installing certificates",
"usage": SHORT_USAGE.replace("[SUBCOMMAND]", ""),
"realname": "run"
}),
("certonly", {
"short": "Obtain or renew a certificate, but do not install it",
"opts": "Options for modifying how a certificate is obtained",
"usage": ("\n\n certbot certonly [options] [-d DOMAIN] [-d DOMAIN] ...\n\n"
"This command obtains a TLS/SSL certificate without installing it anywhere.")
}),
("renew", {
"short": "Renew all certificates (or one specified with --cert-name)",
"opts": ("The 'renew' subcommand will attempt to renew all"
" certificates (or more precisely, certificate lineages) you have"
" previously obtained if they are close to expiry, and print a"
" summary of the results. By default, 'renew' will reuse the options"
" used to create obtain or most recently successfully renew each"
" certificate lineage. You can try it with `--dry-run` first. For"
" more fine-grained control, you can renew individual lineages with"
" the `certonly` subcommand. Hooks are available to run commands"
" before and after renewal; see"
" https://certbot.eff.org/docs/using.html#renewal for more"
" information on these."),
"usage": "\n\n certbot renew [--cert-name CERTNAME] [options]\n\n"
}),
("certificates", {
"short": "List certificates managed by Certbot",
"opts": "List certificates managed by Certbot",
"usage": ("\n\n certbot certificates [options] ...\n\n"
"Print information about the status of certificates managed by Certbot.")
}),
("delete", {
"short": "Clean up all files related to a certificate",
"opts": "Options for deleting a certificate",
"usage": "\n\n certbot delete --cert-name CERTNAME\n\n"
}),
("revoke", {
"short": "Revoke a certificate specified with --cert-path or --cert-name",
"opts": "Options for revocation of certificates",
"usage": "\n\n certbot revoke [--cert-path /path/to/fullchain.pem | "
"--cert-name example.com] [options]\n\n"
}),
("register", {
"short": "Register for account with Let's Encrypt / other ACME server",
"opts": "Options for account registration",
"usage": "\n\n certbot register --email [email protected] [options]\n\n"
}),
("update_account", {
"short": "Update existing account with Let's Encrypt / other ACME server",
"opts": "Options for account modification",
"usage": "\n\n certbot update_account --email [email protected] [options]\n\n"
}),
("unregister", {
"short": "Irrevocably deactivate your account",
"opts": "Options for account deactivation.",
"usage": "\n\n certbot unregister [options]\n\n"
}),
("install", {
"short": "Install an arbitrary certificate in a server",
"opts": "Options for modifying how a certificate is deployed",
"usage": "\n\n certbot install --cert-path /path/to/fullchain.pem "
" --key-path /path/to/private-key [options]\n\n"
}),
("config_changes", {
"short": "Show changes that Certbot has made to server configurations",
"opts": "Options for controlling which changes are displayed",
"usage": "\n\n certbot config_changes --num NUM [options]\n\n"
}),
("rollback", {
"short": "Roll back server conf changes made during certificate installation",
"opts": "Options for rolling back server configuration changes",
"usage": "\n\n certbot rollback --checkpoints 3 [options]\n\n"
}),
("plugins", {
"short": "List plugins that are installed and available on your system",
"opts": 'Options for for the "plugins" subcommand',
"usage": "\n\n certbot plugins [options]\n\n"
}),
("update_symlinks", {
"short": "Recreate symlinks in your /etc/letsencrypt/live/ directory",
"opts": ("Recreates certificate and key symlinks in {0}, if you changed them by hand "
"or edited a renewal configuration file".format(
os.path.join(flag_default("config_dir"), "live"))),
"usage": "\n\n certbot update_symlinks [options]\n\n"
}),
("enhance", {
"short": "Add security enhancements to your existing configuration",
"opts": ("Helps to harden the TLS configuration by adding security enhancements "
"to already existing configuration."),
"usage": "\n\n certbot enhance [options]\n\n"
}),
]
# VERB_HELP is a list in order to preserve order, but a dict is sometimes useful
VERB_HELP_MAP = dict(VERB_HELP)
class HelpfulArgumentParser(object):
"""Argparse Wrapper.
This class wraps argparse, adding the ability to make --help less
verbose, and request help on specific subcategories at a time, eg
'certbot --help security' for security options.
"""
def __init__(self, args, plugins, detect_defaults=False):
from certbot import main
self.VERBS = {
"auth": main.certonly,
"certonly": main.certonly,
"config_changes": main.config_changes,
"run": main.run,
"install": main.install,
"plugins": main.plugins_cmd,
"register": main.register,
"update_account": main.update_account,
"unregister": main.unregister,
"renew": main.renew,
"revoke": main.revoke,
"rollback": main.rollback,
"everything": main.run,
"update_symlinks": main.update_symlinks,
"certificates": main.certificates,
"delete": main.delete,
"enhance": main.enhance,
}
# Get notification function for printing
try:
self.notify = zope.component.getUtility(
interfaces.IDisplay).notification
except zope_interfaces.ComponentLookupError:
self.notify = display_util.NoninteractiveDisplay(
sys.stdout).notification
# List of topics for which additional help can be provided
HELP_TOPICS = ["all", "security", "paths", "automation", "testing"]
HELP_TOPICS += list(self.VERBS) + self.COMMANDS_TOPICS + ["manage"]
plugin_names = list(plugins)
self.help_topics = HELP_TOPICS + plugin_names + [None] # type: ignore
self.detect_defaults = detect_defaults
self.args = args
if self.args and self.args[0] == 'help':
self.args[0] = '--help'
self.determine_verb()
help1 = self.prescan_for_flag("-h", self.help_topics)
help2 = self.prescan_for_flag("--help", self.help_topics)
if isinstance(help1, bool) and isinstance(help2, bool):
self.help_arg = help1 or help2
else:
self.help_arg = help1 if isinstance(help1, six.string_types) else help2
short_usage = self._usage_string(plugins, self.help_arg)
self.visible_topics = self.determine_help_topics(self.help_arg)
# elements are added by .add_group()
self.groups = {} # type: Dict[str, argparse._ArgumentGroup]
# elements are added by .parse_args()
self.defaults = {} # type: Dict[str, Any]
self.parser = configargparse.ArgParser(
prog="certbot",
usage=short_usage,
formatter_class=CustomHelpFormatter,
args_for_setting_config_path=["-c", "--config"],
default_config_files=flag_default("config_files"),
config_arg_help_message="path to config file (default: {0})".format(
" and ".join(flag_default("config_files"))))
# This is the only way to turn off overly verbose config flag documentation
self.parser._add_config_file_help = False # pylint: disable=protected-access
# Help that are synonyms for --help subcommands
COMMANDS_TOPICS = ["command", "commands", "subcommand", "subcommands", "verbs"]
def _list_subcommands(self):
longest = max(len(v) for v in VERB_HELP_MAP)
text = "The full list of available SUBCOMMANDS is:\n\n"
for verb, props in sorted(VERB_HELP):
doc = props.get("short", "")
text += '{0:<{length}} {1}\n'.format(verb, doc, length=longest)
text += "\nYou can get more help on a specific subcommand with --help SUBCOMMAND\n"
return text
def _usage_string(self, plugins, help_arg):
"""Make usage strings late so that plugins can be initialised late
:param plugins: all discovered plugins
:param help_arg: False for none; True for --help; "TOPIC" for --help TOPIC
:rtype: str
:returns: a short usage string for the top of --help TOPIC)
"""
if "nginx" in plugins:
nginx_doc = "--nginx Use the Nginx plugin for authentication & installation"
else:
nginx_doc = "(the certbot nginx plugin is not installed)"
if "apache" in plugins:
apache_doc = "--apache Use the Apache plugin for authentication & installation"
else:
apache_doc = "(the certbot apache plugin is not installed)"
usage = SHORT_USAGE
if help_arg is True:
self.notify(usage + COMMAND_OVERVIEW % (apache_doc, nginx_doc) + HELP_AND_VERSION_USAGE)
sys.exit(0)
elif help_arg in self.COMMANDS_TOPICS:
self.notify(usage + self._list_subcommands())
sys.exit(0)
elif help_arg == "all":
# if we're doing --help all, the OVERVIEW is part of the SHORT_USAGE at
# the top; if we're doing --help someothertopic, it's OT so it's not
usage += COMMAND_OVERVIEW % (apache_doc, nginx_doc)
else:
custom = VERB_HELP_MAP.get(help_arg, {}).get("usage", None)
usage = custom if custom else usage
return usage
def remove_config_file_domains_for_renewal(self, parsed_args):
"""Make "certbot renew" safe if domains are set in cli.ini."""
# Works around https://github.com/certbot/certbot/issues/4096
if self.verb == "renew":
for source, flags in self.parser._source_to_settings.items(): # pylint: disable=protected-access
if source.startswith("config_file") and "domains" in flags:
parsed_args.domains = _Default() if self.detect_defaults else []
def parse_args(self):
"""Parses command line arguments and returns the result.
:returns: parsed command line arguments
:rtype: argparse.Namespace
"""
parsed_args = self.parser.parse_args(self.args)
parsed_args.func = self.VERBS[self.verb]
parsed_args.verb = self.verb
self.remove_config_file_domains_for_renewal(parsed_args)
if self.detect_defaults:
return parsed_args
self.defaults = dict((key, copy.deepcopy(self.parser.get_default(key)))
for key in vars(parsed_args))
# Do any post-parsing homework here
if self.verb == "renew":
if parsed_args.force_interactive:
raise errors.Error(
"{0} cannot be used with renew".format(
constants.FORCE_INTERACTIVE_FLAG))
parsed_args.noninteractive_mode = True
if parsed_args.force_interactive and parsed_args.noninteractive_mode:
raise errors.Error(
"Flag for non-interactive mode and {0} conflict".format(
constants.FORCE_INTERACTIVE_FLAG))
if parsed_args.staging or parsed_args.dry_run:
self.set_test_server(parsed_args)
if parsed_args.csr:
self.handle_csr(parsed_args)
if parsed_args.must_staple:
parsed_args.staple = True
if parsed_args.validate_hooks:
hooks.validate_hooks(parsed_args)
if parsed_args.allow_subset_of_names:
if any(util.is_wildcard_domain(d) for d in parsed_args.domains):
raise errors.Error("Using --allow-subset-of-names with a"
" wildcard domain is not supported.")
if parsed_args.hsts and parsed_args.auto_hsts:
raise errors.Error(
"Parameters --hsts and --auto-hsts cannot be used simultaneously.")
possible_deprecation_warning(parsed_args)
return parsed_args
def set_test_server(self, parsed_args):
"""We have --staging/--dry-run; perform sanity check and set config.server"""
if parsed_args.server not in (flag_default("server"), constants.STAGING_URI):
conflicts = ["--staging"] if parsed_args.staging else []
conflicts += ["--dry-run"] if parsed_args.dry_run else []
raise errors.Error("--server value conflicts with {0}".format(
" and ".join(conflicts)))
parsed_args.server = constants.STAGING_URI
if parsed_args.dry_run:
if self.verb not in ["certonly", "renew"]:
raise errors.Error("--dry-run currently only works with the "
"'certonly' or 'renew' subcommands (%r)" % self.verb)
parsed_args.break_my_certs = parsed_args.staging = True
if glob.glob(os.path.join(parsed_args.config_dir, constants.ACCOUNTS_DIR, "*")):
# The user has a prod account, but might not have a staging
# one; we don't want to start trying to perform interactive registration
parsed_args.tos = True
parsed_args.register_unsafely_without_email = True
def handle_csr(self, parsed_args):
"""Process a --csr flag."""
if parsed_args.verb != "certonly":
raise errors.Error("Currently, a CSR file may only be specified "
"when obtaining a new or replacement "
"via the certonly command. Please try the "
"certonly command instead.")
if parsed_args.allow_subset_of_names:
raise errors.Error("--allow-subset-of-names cannot be used with --csr")
csrfile, contents = parsed_args.csr[0:2]
typ, csr, domains = crypto_util.import_csr_file(csrfile, contents)
# This is not necessary for webroot to work, however,
# obtain_certificate_from_csr requires parsed_args.domains to be set
for domain in domains:
add_domains(parsed_args, domain)
if not domains:
# TODO: add CN to domains instead:
raise errors.Error(
"Unfortunately, your CSR %s needs to have a SubjectAltName for every domain"
% parsed_args.csr[0])
parsed_args.actual_csr = (csr, typ)
csr_domains = set([d.lower() for d in domains])
config_domains = set(parsed_args.domains)
if csr_domains != config_domains:
raise errors.ConfigurationError(
"Inconsistent domain requests:\nFrom the CSR: {0}\nFrom command line/config: {1}"
.format(", ".join(csr_domains), ", ".join(config_domains)))
def determine_verb(self):
"""Determines the verb/subcommand provided by the user.
This function works around some of the limitations of argparse.
"""
if "-h" in self.args or "--help" in self.args:
# all verbs double as help arguments; don't get them confused
self.verb = "help"
return
for i, token in enumerate(self.args):
if token in self.VERBS:
verb = token
if verb == "auth":
verb = "certonly"
if verb == "everything":
verb = "run"
self.verb = verb
self.args.pop(i)
return
self.verb = "run"
def prescan_for_flag(self, flag, possible_arguments):
"""Checks cli input for flags.
Check for a flag, which accepts a fixed set of possible arguments, in
the command line; we will use this information to configure argparse's
help correctly. Return the flag's argument, if it has one that matches
the sequence @possible_arguments; otherwise return whether the flag is
present.
"""
if flag not in self.args:
return False
pos = self.args.index(flag)
try:
nxt = self.args[pos + 1]
if nxt in possible_arguments:
return nxt
except IndexError:
pass
return True
def add(self, topics, *args, **kwargs):
"""Add a new command line argument.
:param topics: str or [str] help topic(s) this should be listed under,
or None for options that don't fit under a specific
topic which will only be shown in "--help all" output.
The first entry determines where the flag lives in the
"--help all" output (None -> "optional arguments").
:param list *args: the names of this argument flag
:param dict **kwargs: various argparse settings for this argument
"""
if isinstance(topics, list):
# if this flag can be listed in multiple sections, try to pick the one
# that the user has asked for help about
topic = self.help_arg if self.help_arg in topics else topics[0]
else:
topic = topics # there's only one
if self.detect_defaults:
kwargs = self.modify_kwargs_for_default_detection(**kwargs)
if self.visible_topics[topic]:
if topic in self.groups:
group = self.groups[topic]
group.add_argument(*args, **kwargs)
else:
self.parser.add_argument(*args, **kwargs)
else:
kwargs["help"] = argparse.SUPPRESS
self.parser.add_argument(*args, **kwargs)
def modify_kwargs_for_default_detection(self, **kwargs):
"""Modify an arg so we can check if it was set by the user.
Changes the parameters given to argparse when adding an argument
so we can properly detect if the value was set by the user.
:param dict kwargs: various argparse settings for this argument
:returns: a modified versions of kwargs
:rtype: dict
"""
action = kwargs.get("action", None)
if action not in EXIT_ACTIONS:
kwargs["action"] = ("store_true" if action in ZERO_ARG_ACTIONS else
"store")
kwargs["default"] = _Default()
for param in ARGPARSE_PARAMS_TO_REMOVE:
kwargs.pop(param, None)
return kwargs
def add_deprecated_argument(self, argument_name, num_args):
"""Adds a deprecated argument with the name argument_name.
Deprecated arguments are not shown in the help. If they are used
on the command line, a warning is shown stating that the
argument is deprecated and no other action is taken.
:param str argument_name: Name of deprecated argument.
:param int nargs: Number of arguments the option takes.
"""
util.add_deprecated_argument(
self.parser.add_argument, argument_name, num_args)
def add_group(self, topic, verbs=(), **kwargs):
"""Create a new argument group.
This method must be called once for every topic, however, calls
to this function are left next to the argument definitions for
clarity.
:param str topic: Name of the new argument group.
:param str verbs: List of subcommands that should be documented as part of
this help group / topic
:returns: The new argument group.
:rtype: `HelpfulArgumentGroup`
"""
if self.visible_topics[topic]:
self.groups[topic] = self.parser.add_argument_group(topic, **kwargs)
if self.help_arg:
for v in verbs:
self.groups[topic].add_argument(v, help=VERB_HELP_MAP[v]["short"])
return HelpfulArgumentGroup(self, topic)
def add_plugin_args(self, plugins):
"""
Let each of the plugins add its own command line arguments, which
may or may not be displayed as help topics.
"""
for name, plugin_ep in six.iteritems(plugins):
parser_or_group = self.add_group(name,
description=plugin_ep.long_description)
plugin_ep.plugin_cls.inject_parser_options(parser_or_group, name)
def determine_help_topics(self, chosen_topic):
"""
The user may have requested help on a topic, return a dict of which
topics to display. @chosen_topic has prescan_for_flag's return type
:returns: dict
"""
# topics maps each topic to whether it should be documented by
# argparse on the command line
if chosen_topic == "auth":
chosen_topic = "certonly"
if chosen_topic == "everything":
chosen_topic = "run"
if chosen_topic == "all":
# Addition of condition closes #6209 (removal of duplicate route53 option).
return dict([(t, True) if t != 'certbot-route53:auth' else (t, False)
for t in self.help_topics])
elif not chosen_topic:
return dict([(t, False) for t in self.help_topics])
return dict([(t, t == chosen_topic) for t in self.help_topics])
def _add_all_groups(helpful):
helpful.add_group("automation", description="Flags for automating execution & other tweaks")
helpful.add_group("security", description="Security parameters & server settings")
helpful.add_group("testing",
description="The following flags are meant for testing and integration purposes only.")
helpful.add_group("paths", description="Flags for changing execution paths & servers")
helpful.add_group("manage",
description="Various subcommands and flags are available for managing your certificates:",
verbs=["certificates", "delete", "renew", "revoke", "update_symlinks"])
# VERBS
for verb, docs in VERB_HELP:
name = docs.get("realname", verb)
helpful.add_group(name, description=docs["opts"])
def prepare_and_parse_args(plugins, args, detect_defaults=False): # pylint: disable=too-many-statements
"""Returns parsed command line arguments.
:param .PluginsRegistry plugins: available plugins
:param list args: command line arguments with the program name removed
:returns: parsed command line arguments
:rtype: argparse.Namespace
"""
# pylint: disable=too-many-statements
helpful = HelpfulArgumentParser(args, plugins, detect_defaults)
_add_all_groups(helpful)
# --help is automatically provided by argparse
helpful.add(
None, "-v", "--verbose", dest="verbose_count", action="count",
default=flag_default("verbose_count"), help="This flag can be used "
"multiple times to incrementally increase the verbosity of output, "
"e.g. -vvv.")
helpful.add(
None, "-t", "--text", dest="text_mode", action="store_true",
default=flag_default("text_mode"), help=argparse.SUPPRESS)
helpful.add(
None, "--max-log-backups", type=nonnegative_int,
default=flag_default("max_log_backups"),
help="Specifies the maximum number of backup logs that should "
"be kept by Certbot's built in log rotation. Setting this "
"flag to 0 disables log rotation entirely, causing "
"Certbot to always append to the same log file.")
helpful.add(
[None, "automation", "run", "certonly", "enhance"],
"-n", "--non-interactive", "--noninteractive",
dest="noninteractive_mode", action="store_true",
default=flag_default("noninteractive_mode"),
help="Run without ever asking for user input. This may require "
"additional command line flags; the client will try to explain "
"which ones are required if it finds one missing")
helpful.add(
[None, "register", "run", "certonly", "enhance"],
constants.FORCE_INTERACTIVE_FLAG, action="store_true",
default=flag_default("force_interactive"),
help="Force Certbot to be interactive even if it detects it's not "
"being run in a terminal. This flag cannot be used with the "
"renew subcommand.")
helpful.add(
[None, "run", "certonly", "certificates", "enhance"],
"-d", "--domains", "--domain", dest="domains",
metavar="DOMAIN", action=_DomainsAction,
default=flag_default("domains"),
help="Domain names to apply. For multiple domains you can use "
"multiple -d flags or enter a comma separated list of domains "
"as a parameter. The first domain provided will be the "
"subject CN of the certificate, and all domains will be "
"Subject Alternative Names on the certificate. "
"The first domain will also be used in "
"some software user interfaces and as the file paths for the "
"certificate and related material unless otherwise "
"specified or you already have a certificate with the same "
"name. In the case of a name collision it will append a number "
"like 0001 to the file path name. (default: Ask)")
helpful.add(
[None, "run", "certonly", "register"],
"--eab-kid", dest="eab_kid",
metavar="EAB_KID",
help="Key Identifier for External Account Binding"
)
helpful.add(
[None, "run", "certonly", "register"],
"--eab-hmac-key", dest="eab_hmac_key",
metavar="EAB_HMAC_KEY",
help="HMAC key for External Account Binding"
)
helpful.add(
[None, "run", "certonly", "manage", "delete", "certificates",
"renew", "enhance"], "--cert-name", dest="certname",
metavar="CERTNAME", default=flag_default("certname"),
help="Certificate name to apply. This name is used by Certbot for housekeeping "
"and in file paths; it doesn't affect the content of the certificate itself. "
"To see certificate names, run 'certbot certificates'. "
"When creating a new certificate, specifies the new certificate's name. "
"(default: the first provided domain or the name of an existing "
"certificate on your system for the same domains)")
helpful.add(
[None, "testing", "renew", "certonly"],
"--dry-run", action="store_true", dest="dry_run",
default=flag_default("dry_run"),
help="Perform a test run of the client, obtaining test (invalid) certificates"
" but not saving them to disk. This can currently only be used"
" with the 'certonly' and 'renew' subcommands. \nNote: Although --dry-run"
" tries to avoid making any persistent changes on a system, it "
" is not completely side-effect free: if used with webserver authenticator plugins"
" like apache and nginx, it makes and then reverts temporary config changes"
" in order to obtain test certificates, and reloads webservers to deploy and then"
" roll back those changes. It also calls --pre-hook and --post-hook commands"
" if they are defined because they may be necessary to accurately simulate"
" renewal. --deploy-hook commands are not called.")
helpful.add(
["register", "automation"], "--register-unsafely-without-email", action="store_true",
default=flag_default("register_unsafely_without_email"),
help="Specifying this flag enables registering an account with no "
"email address. This is strongly discouraged, because in the "
"event of key loss or account compromise you will irrevocably "
"lose access to your account. You will also be unable to receive "
"notice about impending expiration or revocation of your "
"certificates. Updates to the Subscriber Agreement will still "
"affect you, and will be effective 14 days after posting an "
"update to the web site.")
# TODO: When `certbot register --update-registration` is fully deprecated,
# delete following helpful.add
helpful.add(
"register", "--update-registration", action="store_true",
default=flag_default("update_registration"), dest="update_registration",
help=argparse.SUPPRESS)
helpful.add(
["register", "update_account", "unregister", "automation"], "-m", "--email",
default=flag_default("email"),
help=config_help("email"))
helpful.add(["register", "update_account", "automation"], "--eff-email", action="store_true",
default=flag_default("eff_email"), dest="eff_email",
help="Share your e-mail address with EFF")
helpful.add(["register", "update_account", "automation"], "--no-eff-email",
action="store_false", default=flag_default("eff_email"), dest="eff_email",
help="Don't share your e-mail address with EFF")
helpful.add(
["automation", "certonly", "run"],
"--keep-until-expiring", "--keep", "--reinstall",
dest="reinstall", action="store_true", default=flag_default("reinstall"),
help="If the requested certificate matches an existing certificate, always keep the "
"existing one until it is due for renewal (for the "
"'run' subcommand this means reinstall the existing certificate). (default: Ask)")
helpful.add(
"automation", "--expand", action="store_true", default=flag_default("expand"),
help="If an existing certificate is a strict subset of the requested names, "
"always expand and replace it with the additional names. (default: Ask)")
helpful.add(
"automation", "--version", action="version",
version="%(prog)s {0}".format(certbot.__version__),
help="show program's version number and exit")
helpful.add(
["automation", "renew"],
"--force-renewal", "--renew-by-default", dest="renew_by_default",
action="store_true", default=flag_default("renew_by_default"),
help="If a certificate "
"already exists for the requested domains, renew it now, "
"regardless of whether it is near expiry. (Often "
"--keep-until-expiring is more appropriate). Also implies "
"--expand.")
helpful.add(
"automation", "--renew-with-new-domains", dest="renew_with_new_domains",
action="store_true", default=flag_default("renew_with_new_domains"),
help="If a "
"certificate already exists for the requested certificate name "
"but does not match the requested domains, renew it now, "
"regardless of whether it is near expiry.")
helpful.add(
"automation", "--reuse-key", dest="reuse_key",
action="store_true", default=flag_default("reuse_key"),
help="When renewing, use the same private key as the existing "
"certificate.")
helpful.add(
["automation", "renew", "certonly"],
"--allow-subset-of-names", action="store_true",
default=flag_default("allow_subset_of_names"),
help="When performing domain validation, do not consider it a failure "
"if authorizations can not be obtained for a strict subset of "
"the requested domains. This may be useful for allowing renewals for "
"multiple domains to succeed even if some domains no longer point "
"at this system. This option cannot be used with --csr.")
helpful.add(
"automation", "--agree-tos", dest="tos", action="store_true",
default=flag_default("tos"),
help="Agree to the ACME Subscriber Agreement (default: Ask)")
helpful.add(
["unregister", "automation"], "--account", metavar="ACCOUNT_ID",
default=flag_default("account"),
help="Account ID to use")
helpful.add(
"automation", "--duplicate", dest="duplicate", action="store_true",
default=flag_default("duplicate"),
help="Allow making a certificate lineage that duplicates an existing one "
"(both can be renewed in parallel)")
helpful.add(
"automation", "--os-packages-only", action="store_true",
default=flag_default("os_packages_only"),
help="(certbot-auto only) install OS package dependencies and then stop")
helpful.add(
"automation", "--no-self-upgrade", action="store_true",
default=flag_default("no_self_upgrade"),
help="(certbot-auto only) prevent the certbot-auto script from"
" upgrading itself to newer released versions (default: Upgrade"
" automatically)")
helpful.add(
"automation", "--no-bootstrap", action="store_true",
default=flag_default("no_bootstrap"),
help="(certbot-auto only) prevent the certbot-auto script from"
" installing OS-level dependencies (default: Prompt to install "
" OS-wide dependencies, but exit if the user says 'No')")
helpful.add(
"automation", "--no-permissions-check", action="store_true",
default=flag_default("no_permissions_check"),
help="(certbot-auto only) skip the check on the file system"
" permissions of the certbot-auto script")
helpful.add(
["automation", "renew", "certonly", "run"],
"-q", "--quiet", dest="quiet", action="store_true",
default=flag_default("quiet"),
help="Silence all output except errors. Useful for automation via cron."
" Implies --non-interactive.")
# overwrites server, handled in HelpfulArgumentParser.parse_args()
helpful.add(["testing", "revoke", "run"], "--test-cert", "--staging",
dest="staging", action="store_true", default=flag_default("staging"),
help="Use the staging server to obtain or revoke test (invalid) certificates; equivalent"
" to --server " + constants.STAGING_URI)
helpful.add(
"testing", "--debug", action="store_true", default=flag_default("debug"),
help="Show tracebacks in case of errors, and allow certbot-auto "
"execution on experimental platforms")
helpful.add(
[None, "certonly", "run"], "--debug-challenges", action="store_true",
default=flag_default("debug_challenges"),
help="After setting up challenges, wait for user input before "
"submitting to CA")
helpful.add(
"testing", "--no-verify-ssl", action="store_true",
help=config_help("no_verify_ssl"),
default=flag_default("no_verify_ssl"))
helpful.add(
["testing", "standalone", "manual"], "--http-01-port", type=int,
dest="http01_port",
default=flag_default("http01_port"), help=config_help("http01_port"))
helpful.add(
["testing", "standalone"], "--http-01-address",
dest="http01_address",
default=flag_default("http01_address"), help=config_help("http01_address"))
helpful.add(
["testing", "nginx"], "--https-port", type=int,
default=flag_default("https_port"),
help=config_help("https_port"))
helpful.add(
"testing", "--break-my-certs", action="store_true",
default=flag_default("break_my_certs"),
help="Be willing to replace or renew valid certificates with invalid "
"(testing/staging) certificates")
helpful.add(
"security", "--rsa-key-size", type=int, metavar="N",
default=flag_default("rsa_key_size"), help=config_help("rsa_key_size"))
helpful.add(
"security", "--must-staple", action="store_true",
dest="must_staple", default=flag_default("must_staple"),
help=config_help("must_staple"))
helpful.add(
["security", "enhance"],
"--redirect", action="store_true", dest="redirect",
default=flag_default("redirect"),
help="Automatically redirect all HTTP traffic to HTTPS for the newly "
"authenticated vhost. (default: Ask)")
helpful.add(
"security", "--no-redirect", action="store_false", dest="redirect",
default=flag_default("redirect"),
help="Do not automatically redirect all HTTP traffic to HTTPS for the newly "
"authenticated vhost. (default: Ask)")
helpful.add(
["security", "enhance"],
"--hsts", action="store_true", dest="hsts", default=flag_default("hsts"),
help="Add the Strict-Transport-Security header to every HTTP response."
" Forcing browser to always use SSL for the domain."
" Defends against SSL Stripping.")
helpful.add(
"security", "--no-hsts", action="store_false", dest="hsts",
default=flag_default("hsts"), help=argparse.SUPPRESS)
helpful.add(
["security", "enhance"],
"--uir", action="store_true", dest="uir", default=flag_default("uir"),
help='Add the "Content-Security-Policy: upgrade-insecure-requests"'
' header to every HTTP response. Forcing the browser to use'
' https:// for every http:// resource.')
helpful.add(
"security", "--no-uir", action="store_false", dest="uir", default=flag_default("uir"),
help=argparse.SUPPRESS)
helpful.add(
"security", "--staple-ocsp", action="store_true", dest="staple",
default=flag_default("staple"),
help="Enables OCSP Stapling. A valid OCSP response is stapled to"
" the certificate that the server offers during TLS.")
helpful.add(
"security", "--no-staple-ocsp", action="store_false", dest="staple",
default=flag_default("staple"), help=argparse.SUPPRESS)
helpful.add(
"security", "--strict-permissions", action="store_true",
default=flag_default("strict_permissions"),
help="Require that all configuration files are owned by the current "
"user; only needed if your config is somewhere unsafe like /tmp/")
helpful.add(
["manual", "standalone", "certonly", "renew"],
"--preferred-challenges", dest="pref_challs",
action=_PrefChallAction, default=flag_default("pref_challs"),
help='A sorted, comma delimited list of the preferred challenge to '
'use during authorization with the most preferred challenge '
'listed first (Eg, "dns" or "http,dns"). '
'Not all plugins support all challenges. See '
'https://certbot.eff.org/docs/using.html#plugins for details. '
'ACME Challenges are versioned, but if you pick "http" rather '
'than "http-01", Certbot will select the latest version '
'automatically.')
helpful.add(
"renew", "--pre-hook",
help="Command to be run in a shell before obtaining any certificates."
" Intended primarily for renewal, where it can be used to temporarily"
" shut down a webserver that might conflict with the standalone"
" plugin. This will only be called if a certificate is actually to be"
" obtained/renewed. When renewing several certificates that have"
" identical pre-hooks, only the first will be executed.")
helpful.add(
"renew", "--post-hook",
help="Command to be run in a shell after attempting to obtain/renew"
" certificates. Can be used to deploy renewed certificates, or to"
" restart any servers that were stopped by --pre-hook. This is only"
" run if an attempt was made to obtain/renew a certificate. If"
" multiple renewed certificates have identical post-hooks, only"
" one will be run.")
helpful.add("renew", "--renew-hook",
action=_RenewHookAction, help=argparse.SUPPRESS)
helpful.add(
"renew", "--no-random-sleep-on-renew", action="store_false",
default=flag_default("random_sleep_on_renew"), dest="random_sleep_on_renew",
help=argparse.SUPPRESS)
helpful.add(
"renew", "--deploy-hook", action=_DeployHookAction,
help='Command to be run in a shell once for each successfully'
' issued certificate. For this command, the shell variable'
' $RENEWED_LINEAGE will point to the config live subdirectory'
' (for example, "/etc/letsencrypt/live/example.com") containing'
' the new certificates and keys; the shell variable'
' $RENEWED_DOMAINS will contain a space-delimited list of'
' renewed certificate domains (for example, "example.com'
' www.example.com"')
helpful.add(
"renew", "--disable-hook-validation",
action="store_false", dest="validate_hooks",
default=flag_default("validate_hooks"),
help="Ordinarily the commands specified for"
" --pre-hook/--post-hook/--deploy-hook will be checked for"
" validity, to see if the programs being run are in the $PATH,"
" so that mistakes can be caught early, even when the hooks"
" aren't being run just yet. The validation is rather"
" simplistic and fails if you use more advanced shell"
" constructs, so you can use this switch to disable it."
" (default: False)")
helpful.add(
"renew", "--no-directory-hooks", action="store_false",
default=flag_default("directory_hooks"), dest="directory_hooks",
help="Disable running executables found in Certbot's hook directories"
" during renewal. (default: False)")
helpful.add(
"renew", "--disable-renew-updates", action="store_true",
default=flag_default("disable_renew_updates"), dest="disable_renew_updates",
help="Disable automatic updates to your server configuration that"
" would otherwise be done by the selected installer plugin, and triggered"
" when the user executes \"certbot renew\", regardless of if the certificate"
" is renewed. This setting does not apply to important TLS configuration"
" updates.")
helpful.add(
"renew", "--no-autorenew", action="store_false",
default=flag_default("autorenew"), dest="autorenew",
help="Disable auto renewal of certificates.")
helpful.add_deprecated_argument("--agree-dev-preview", 0)
helpful.add_deprecated_argument("--dialog", 0)
# Deprecation of tls-sni-01 related cli flags
# TODO: remove theses flags completely in few releases
class _DeprecatedTLSSNIAction(util._ShowWarning): # pylint: disable=protected-access
def __call__(self, parser, namespace, values, option_string=None):
super(_DeprecatedTLSSNIAction, self).__call__(parser, namespace, values, option_string)
namespace.https_port = values
helpful.add(
["testing", "standalone", "apache", "nginx"], "--tls-sni-01-port",
type=int, action=_DeprecatedTLSSNIAction, help=argparse.SUPPRESS)
helpful.add_deprecated_argument("--tls-sni-01-address", 1)
# Populate the command line parameters for new style enhancements
enhancements.populate_cli(helpful.add)
_create_subparsers(helpful)
_paths_parser(helpful)
# _plugins_parsing should be the last thing to act upon the main
# parser (--help should display plugin-specific options last)
_plugins_parsing(helpful, plugins)
if not detect_defaults:
global helpful_parser # pylint: disable=global-statement
helpful_parser = helpful
return helpful.parse_args()
def _create_subparsers(helpful):
helpful.add("config_changes", "--num", type=int, default=flag_default("num"),
help="How many past revisions you want to be displayed")
from certbot.client import sample_user_agent # avoid import loops
helpful.add(
None, "--user-agent", default=flag_default("user_agent"),
help='Set a custom user agent string for the client. User agent strings allow '
'the CA to collect high level statistics about success rates by OS, '
'plugin and use case, and to know when to deprecate support for past Python '
"versions and flags. If you wish to hide this information from the Let's "
'Encrypt server, set this to "". '
'(default: {0}). The flags encoded in the user agent are: '
'--duplicate, --force-renew, --allow-subset-of-names, -n, and '
'whether any hooks are set.'.format(sample_user_agent()))
helpful.add(
None, "--user-agent-comment", default=flag_default("user_agent_comment"),
type=_user_agent_comment_type,
help="Add a comment to the default user agent string. May be used when repackaging Certbot "
"or calling it from another tool to allow additional statistical data to be collected."
" Ignored if --user-agent is set. (Example: Foo-Wrapper/1.0)")
helpful.add("certonly",
"--csr", default=flag_default("csr"), type=read_file,
help="Path to a Certificate Signing Request (CSR) in DER or PEM format."
" Currently --csr only works with the 'certonly' subcommand.")
helpful.add("revoke",
"--reason", dest="reason",
choices=CaseInsensitiveList(sorted(constants.REVOCATION_REASONS,
key=constants.REVOCATION_REASONS.get)),
action=_EncodeReasonAction, default=flag_default("reason"),
help="Specify reason for revoking certificate. (default: unspecified)")
helpful.add("revoke",
"--delete-after-revoke", action="store_true",
default=flag_default("delete_after_revoke"),
help="Delete certificates after revoking them, along with all previous and later "
"versions of those certificates.")
helpful.add("revoke",
"--no-delete-after-revoke", action="store_false",
dest="delete_after_revoke",
default=flag_default("delete_after_revoke"),
help="Do not delete certificates after revoking them. This "
"option should be used with caution because the 'renew' "
"subcommand will attempt to renew undeleted revoked "
"certificates.")
helpful.add("rollback",
"--checkpoints", type=int, metavar="N",
default=flag_default("rollback_checkpoints"),
help="Revert configuration N number of checkpoints.")
helpful.add("plugins",
"--init", action="store_true", default=flag_default("init"),
help="Initialize plugins.")
helpful.add("plugins",
"--prepare", action="store_true", default=flag_default("prepare"),
help="Initialize and prepare plugins.")
helpful.add("plugins",
"--authenticators", action="append_const", dest="ifaces",
default=flag_default("ifaces"),
const=interfaces.IAuthenticator, help="Limit to authenticator plugins only.")
helpful.add("plugins",
"--installers", action="append_const", dest="ifaces",
default=flag_default("ifaces"),
const=interfaces.IInstaller, help="Limit to installer plugins only.")
class CaseInsensitiveList(list):
"""A list that will ignore case when searching.
This class is passed to the `choices` argument of `argparse.add_arguments`
through the `helpful` wrapper. It is necessary due to special handling of
command line arguments by `set_by_cli` in which the `type_func` is not applied."""
def __contains__(self, element):
return super(CaseInsensitiveList, self).__contains__(element.lower())
def _paths_parser(helpful):
add = helpful.add
verb = helpful.verb
if verb == "help":
verb = helpful.help_arg
cph = "Path to where certificate is saved (with auth --csr), installed from, or revoked."
sections = ["paths", "install", "revoke", "certonly", "manage"]
if verb == "certonly":
add(sections, "--cert-path", type=os.path.abspath,
default=flag_default("auth_cert_path"), help=cph)
elif verb == "revoke":
add(sections, "--cert-path", type=read_file, required=False, help=cph)
else:
add(sections, "--cert-path", type=os.path.abspath, help=cph)
section = "paths"
if verb in ("install", "revoke"):
section = verb
# revoke --key-path reads a file, install --key-path takes a string
add(section, "--key-path",
type=((verb == "revoke" and read_file) or os.path.abspath),
help="Path to private key for certificate installation "
"or revocation (if account key is missing)")
default_cp = None
if verb == "certonly":
default_cp = flag_default("auth_chain_path")
add(["paths", "install"], "--fullchain-path", default=default_cp, type=os.path.abspath,
help="Accompanying path to a full certificate chain (certificate plus chain).")
add("paths", "--chain-path", default=default_cp, type=os.path.abspath,
help="Accompanying path to a certificate chain.")
add("paths", "--config-dir", default=flag_default("config_dir"),
help=config_help("config_dir"))
add("paths", "--work-dir", default=flag_default("work_dir"),
help=config_help("work_dir"))
add("paths", "--logs-dir", default=flag_default("logs_dir"),
help="Logs directory.")
add("paths", "--server", default=flag_default("server"),
help=config_help("server"))
def _plugins_parsing(helpful, plugins):
# It's nuts, but there are two "plugins" topics. Somehow this works
helpful.add_group(
"plugins", description="Plugin Selection: Certbot client supports an "
"extensible plugins architecture. See '%(prog)s plugins' for a "
"list of all installed plugins and their names. You can force "
"a particular plugin by setting options provided below. Running "
"--help <plugin_name> will list flags specific to that plugin.")
helpful.add("plugins", "--configurator", default=flag_default("configurator"),
help="Name of the plugin that is both an authenticator and an installer."
" Should not be used together with --authenticator or --installer. "
"(default: Ask)")
helpful.add("plugins", "-a", "--authenticator", default=flag_default("authenticator"),
help="Authenticator plugin name.")
helpful.add("plugins", "-i", "--installer", default=flag_default("installer"),
help="Installer plugin name (also used to find domains).")
helpful.add(["plugins", "certonly", "run", "install", "config_changes"],
"--apache", action="store_true", default=flag_default("apache"),
help="Obtain and install certificates using Apache")
helpful.add(["plugins", "certonly", "run", "install", "config_changes"],
"--nginx", action="store_true", default=flag_default("nginx"),
help="Obtain and install certificates using Nginx")
helpful.add(["plugins", "certonly"], "--standalone", action="store_true",
default=flag_default("standalone"),
help='Obtain certificates using a "standalone" webserver.')
helpful.add(["plugins", "certonly"], "--manual", action="store_true",
default=flag_default("manual"),
help="Provide laborious manual instructions for obtaining a certificate")
helpful.add(["plugins", "certonly"], "--webroot", action="store_true",
default=flag_default("webroot"),
help="Obtain certificates by placing files in a webroot directory.")
helpful.add(["plugins", "certonly"], "--dns-cloudflare", action="store_true",
default=flag_default("dns_cloudflare"),
help=("Obtain certificates using a DNS TXT record (if you are "
"using Cloudflare for DNS)."))
helpful.add(["plugins", "certonly"], "--dns-cloudxns", action="store_true",
default=flag_default("dns_cloudxns"),
help=("Obtain certificates using a DNS TXT record (if you are "
"using CloudXNS for DNS)."))
helpful.add(["plugins", "certonly"], "--dns-digitalocean", action="store_true",
default=flag_default("dns_digitalocean"),
help=("Obtain certificates using a DNS TXT record (if you are "
"using DigitalOcean for DNS)."))
helpful.add(["plugins", "certonly"], "--dns-dnsimple", action="store_true",
default=flag_default("dns_dnsimple"),
help=("Obtain certificates using a DNS TXT record (if you are "
"using DNSimple for DNS)."))
helpful.add(["plugins", "certonly"], "--dns-dnsmadeeasy", action="store_true",
default=flag_default("dns_dnsmadeeasy"),
help=("Obtain certificates using a DNS TXT record (if you are "
"using DNS Made Easy for DNS)."))
helpful.add(["plugins", "certonly"], "--dns-gehirn", action="store_true",
default=flag_default("dns_gehirn"),
help=("Obtain certificates using a DNS TXT record "
"(if you are using Gehirn Infrastracture Service for DNS)."))
helpful.add(["plugins", "certonly"], "--dns-google", action="store_true",
default=flag_default("dns_google"),
help=("Obtain certificates using a DNS TXT record (if you are "
"using Google Cloud DNS)."))
helpful.add(["plugins", "certonly"], "--dns-linode", action="store_true",
default=flag_default("dns_linode"),
help=("Obtain certificates using a DNS TXT record (if you are "
"using Linode for DNS)."))
helpful.add(["plugins", "certonly"], "--dns-luadns", action="store_true",
default=flag_default("dns_luadns"),
help=("Obtain certificates using a DNS TXT record (if you are "
"using LuaDNS for DNS)."))
helpful.add(["plugins", "certonly"], "--dns-nsone", action="store_true",
default=flag_default("dns_nsone"),
help=("Obtain certificates using a DNS TXT record (if you are "
"using NS1 for DNS)."))
helpful.add(["plugins", "certonly"], "--dns-ovh", action="store_true",
default=flag_default("dns_ovh"),
help=("Obtain certificates using a DNS TXT record (if you are "
"using OVH for DNS)."))
helpful.add(["plugins", "certonly"], "--dns-rfc2136", action="store_true",
default=flag_default("dns_rfc2136"),
help="Obtain certificates using a DNS TXT record (if you are using BIND for DNS).")
helpful.add(["plugins", "certonly"], "--dns-route53", action="store_true",
default=flag_default("dns_route53"),
help=("Obtain certificates using a DNS TXT record (if you are using Route53 for "
"DNS)."))
helpful.add(["plugins", "certonly"], "--dns-sakuracloud", action="store_true",
default=flag_default("dns_sakuracloud"),
help=("Obtain certificates using a DNS TXT record "
"(if you are using Sakura Cloud for DNS)."))
# things should not be reorder past/pre this comment:
# plugins_group should be displayed in --help before plugin
# specific groups (so that plugins_group.description makes sense)
helpful.add_plugin_args(plugins)
class _EncodeReasonAction(argparse.Action):
"""Action class for parsing revocation reason."""
def __call__(self, parser, namespace, reason, option_string=None):
"""Encodes the reason for certificate revocation."""
code = constants.REVOCATION_REASONS[reason.lower()]
setattr(namespace, self.dest, code)
class _DomainsAction(argparse.Action):
"""Action class for parsing domains."""
def __call__(self, parser, namespace, domain, option_string=None):
"""Just wrap add_domains in argparseese."""
add_domains(namespace, domain)
def add_domains(args_or_config, domains):
"""Registers new domains to be used during the current client run.
Domains are not added to the list of requested domains if they have
already been registered.
:param args_or_config: parsed command line arguments
:type args_or_config: argparse.Namespace or
configuration.NamespaceConfig
:param str domain: one or more comma separated domains
:returns: domains after they have been normalized and validated
:rtype: `list` of `str`
"""
validated_domains = []
for domain in domains.split(","):
domain = util.enforce_domain_sanity(domain.strip())
validated_domains.append(domain)
if domain not in args_or_config.domains:
args_or_config.domains.append(domain)
return validated_domains
class _PrefChallAction(argparse.Action):
"""Action class for parsing preferred challenges."""
def __call__(self, parser, namespace, pref_challs, option_string=None):
try:
challs = parse_preferred_challenges(pref_challs.split(","))
except errors.Error as error:
raise argparse.ArgumentError(self, str(error))
namespace.pref_challs.extend(challs)
def parse_preferred_challenges(pref_challs):
"""Translate and validate preferred challenges.
:param pref_challs: list of preferred challenge types
:type pref_challs: `list` of `str`
:returns: validated list of preferred challenge types
:rtype: `list` of `str`
:raises errors.Error: if pref_challs is invalid
"""
aliases = {"dns": "dns-01", "http": "http-01", "tls-sni": "tls-sni-01"}
challs = [c.strip() for c in pref_challs]
challs = [aliases.get(c, c) for c in challs]
# Ignore tls-sni-01 from the list, and generates a deprecation warning
# TODO: remove this option completely in few releases
if "tls-sni-01" in challs:
logger.warning('TLS-SNI-01 support is deprecated. This value is being dropped from the '
'setting of --preferred-challenges and future versions of Certbot will '
'error if it is included.')
challs = [chall for chall in challs if chall != "tls-sni-01"]
unrecognized = ", ".join(name for name in challs
if name not in challenges.Challenge.TYPES)
if unrecognized:
raise errors.Error(
"Unrecognized challenges: {0}".format(unrecognized))
return challs
def _user_agent_comment_type(value):
if "(" in value or ")" in value:
raise argparse.ArgumentTypeError("may not contain parentheses")
return value
class _DeployHookAction(argparse.Action):
"""Action class for parsing deploy hooks."""
def __call__(self, parser, namespace, values, option_string=None):
renew_hook_set = namespace.deploy_hook != namespace.renew_hook
if renew_hook_set and namespace.renew_hook != values:
raise argparse.ArgumentError(
self, "conflicts with --renew-hook value")
namespace.deploy_hook = namespace.renew_hook = values
class _RenewHookAction(argparse.Action):
"""Action class for parsing renew hooks."""
def __call__(self, parser, namespace, values, option_string=None):
deploy_hook_set = namespace.deploy_hook is not None
if deploy_hook_set and namespace.deploy_hook != values:
raise argparse.ArgumentError(
self, "conflicts with --deploy-hook value")
namespace.renew_hook = values
def nonnegative_int(value):
"""Converts value to an int and checks that it is not negative.
This function should used as the type parameter for argparse
arguments.
:param str value: value provided on the command line
:returns: integer representation of value
:rtype: int
:raises argparse.ArgumentTypeError: if value isn't a non-negative integer
"""
try:
int_value = int(value)
except ValueError:
raise argparse.ArgumentTypeError("value must be an integer")
if int_value < 0:
raise argparse.ArgumentTypeError("value must be non-negative")
return int_value
| 45.639755 | 134 | 0.639887 |
7942442203076f3ef9bf083b8ec144db80f0c5f7 | 119 | py | Python | azure_monitor/src/azure_monitor/version.py | hectorhdzg/opentelemetry-azure-monitor-python | f57679d80f259181486a1124f0d6b71012d4826b | [
"MIT"
] | null | null | null | azure_monitor/src/azure_monitor/version.py | hectorhdzg/opentelemetry-azure-monitor-python | f57679d80f259181486a1124f0d6b71012d4826b | [
"MIT"
] | null | null | null | azure_monitor/src/azure_monitor/version.py | hectorhdzg/opentelemetry-azure-monitor-python | f57679d80f259181486a1124f0d6b71012d4826b | [
"MIT"
] | 1 | 2020-07-30T13:31:44.000Z | 2020-07-30T13:31:44.000Z | # Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
__version__ = "0.4.dev0"
| 29.75 | 59 | 0.756303 |
794244479cde3a398065451c51f81014624c3a16 | 1,481 | py | Python | third_party/mosquitto/test/broker/03-publish-dollar-v5.py | HowJMay/simple-tangle-accelerator | d79bfda23a0fcf67d5a7f9e66f02efa3e73ba381 | [
"MIT"
] | null | null | null | third_party/mosquitto/test/broker/03-publish-dollar-v5.py | HowJMay/simple-tangle-accelerator | d79bfda23a0fcf67d5a7f9e66f02efa3e73ba381 | [
"MIT"
] | null | null | null | third_party/mosquitto/test/broker/03-publish-dollar-v5.py | HowJMay/simple-tangle-accelerator | d79bfda23a0fcf67d5a7f9e66f02efa3e73ba381 | [
"MIT"
] | 1 | 2021-05-04T16:09:27.000Z | 2021-05-04T16:09:27.000Z | #!/usr/bin/env python3
# Test whether a PUBLISH to $ topics QoS 1 results in the expected PUBACK packet.
from mosq_test_helper import *
mid = 1
def helper(topic, reason_code):
global mid
publish_packet = mosq_test.gen_publish(topic, qos=1, mid=mid, payload="message", proto_ver=5)
if reason_code == 0:
puback_packet = mosq_test.gen_puback(mid, proto_ver=5)
else:
puback_packet = mosq_test.gen_puback(mid, proto_ver=5, reason_code=reason_code)
sock = mosq_test.do_client_connect(connect_packet, connack_packet, port=port)
mosq_test.do_send_receive(sock, publish_packet, puback_packet, "puback%d"%(mid))
rc = 1
keepalive = 60
connect_packet = mosq_test.gen_connect("pub-test", keepalive=keepalive, proto_ver=5)
connack_packet = mosq_test.gen_connack(rc=0, proto_ver=5)
port = mosq_test.get_port()
broker = mosq_test.start_broker(filename=os.path.basename(__file__), port=port)
try:
sock = mosq_test.do_client_connect(connect_packet, connack_packet, port=port)
helper("$SYS/broker/uptime", mqtt5_rc.MQTT_RC_NOT_AUTHORIZED)
helper("$SYS/broker/connection/me", mqtt5_rc.MQTT_RC_NOT_AUTHORIZED)
helper("$SYS/broker/connection/me/state", mqtt5_rc.MQTT_RC_NO_MATCHING_SUBSCRIBERS)
helper("$share/share/topic", mqtt5_rc.MQTT_RC_NOT_AUTHORIZED)
rc = 0
sock.close()
finally:
broker.terminate()
broker.wait()
(stdo, stde) = broker.communicate()
if rc:
print(stde.decode('utf-8'))
exit(rc)
| 31.510638 | 97 | 0.735989 |
7942448d45ee3fb70d1bdf5b147ccbb936c57314 | 140 | py | Python | tcsstamp/__version__.py | rhuygen/tcsstamp | 61fbf80210dc49b05028dad5edc3b816ed4f0973 | [
"MIT"
] | null | null | null | tcsstamp/__version__.py | rhuygen/tcsstamp | 61fbf80210dc49b05028dad5edc3b816ed4f0973 | [
"MIT"
] | null | null | null | tcsstamp/__version__.py | rhuygen/tcsstamp | 61fbf80210dc49b05028dad5edc3b816ed4f0973 | [
"MIT"
] | null | null | null | """
The one and only place for the version number.
This file is used by
"""
VERSION = (0, 3, 7)
__version__ = '.'.join(map(str, VERSION))
| 15.555556 | 46 | 0.65 |
794244d0fb94f4613871c46cc34e89661efa0174 | 25,223 | py | Python | keystone/tests/unit/core.py | maestro-hybrid-cloud/keystone | a597a86b854215835a4d54885daeb161d7b0efb8 | [
"Apache-2.0"
] | null | null | null | keystone/tests/unit/core.py | maestro-hybrid-cloud/keystone | a597a86b854215835a4d54885daeb161d7b0efb8 | [
"Apache-2.0"
] | null | null | null | keystone/tests/unit/core.py | maestro-hybrid-cloud/keystone | a597a86b854215835a4d54885daeb161d7b0efb8 | [
"Apache-2.0"
] | null | null | null | # Copyright 2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from __future__ import absolute_import
import atexit
import datetime
import functools
import logging
import os
import re
import shutil
import socket
import sys
import uuid
import warnings
import fixtures
from oslo_config import cfg
from oslo_config import fixture as config_fixture
from oslo_log import fixture as log_fixture
from oslo_log import log
from oslo_utils import timeutils
import oslotest.base as oslotest
from oslotest import mockpatch
from paste.deploy import loadwsgi
import six
from sqlalchemy import exc
from testtools import testcase
# NOTE(ayoung)
# environment.use_eventlet must run before any of the code that will
# call the eventlet monkeypatching.
from keystone.common import environment # noqa
environment.use_eventlet()
from keystone import auth
from keystone.common import config as common_cfg
from keystone.common import dependency
from keystone.common import kvs
from keystone.common.kvs import core as kvs_core
from keystone.common import sql
from keystone import config
from keystone import exception
from keystone import notifications
from keystone.policy.backends import rules
from keystone.server import common
from keystone.tests.unit import ksfixtures
from keystone.version import controllers
from keystone.version import service
config.configure()
LOG = log.getLogger(__name__)
PID = six.text_type(os.getpid())
TESTSDIR = os.path.dirname(os.path.abspath(__file__))
TESTCONF = os.path.join(TESTSDIR, 'config_files')
ROOTDIR = os.path.normpath(os.path.join(TESTSDIR, '..', '..', '..'))
VENDOR = os.path.join(ROOTDIR, 'vendor')
ETCDIR = os.path.join(ROOTDIR, 'etc')
def _calc_tmpdir():
env_val = os.environ.get('KEYSTONE_TEST_TEMP_DIR')
if not env_val:
return os.path.join(TESTSDIR, 'tmp', PID)
return os.path.join(env_val, PID)
TMPDIR = _calc_tmpdir()
CONF = cfg.CONF
log.register_options(CONF)
rules.init()
IN_MEM_DB_CONN_STRING = 'sqlite://'
TIME_FORMAT = '%Y-%m-%dT%H:%M:%S.%fZ'
exception._FATAL_EXCEPTION_FORMAT_ERRORS = True
os.makedirs(TMPDIR)
atexit.register(shutil.rmtree, TMPDIR)
class dirs(object):
@staticmethod
def root(*p):
return os.path.join(ROOTDIR, *p)
@staticmethod
def etc(*p):
return os.path.join(ETCDIR, *p)
@staticmethod
def tests(*p):
return os.path.join(TESTSDIR, *p)
@staticmethod
def tmp(*p):
return os.path.join(TMPDIR, *p)
@staticmethod
def tests_conf(*p):
return os.path.join(TESTCONF, *p)
# keystone.common.sql.initialize() for testing.
DEFAULT_TEST_DB_FILE = dirs.tmp('test.db')
class EggLoader(loadwsgi.EggLoader):
_basket = {}
def find_egg_entry_point(self, object_type, name=None):
egg_key = '%s:%s' % (object_type, name)
egg_ep = self._basket.get(egg_key)
if not egg_ep:
egg_ep = super(EggLoader, self).find_egg_entry_point(
object_type, name=name)
self._basket[egg_key] = egg_ep
return egg_ep
# NOTE(dstanek): class paths were remove from the keystone-paste.ini in
# favor of using entry points. This caused tests to slow to a crawl
# since we reload the application object for each RESTful test. This
# monkey-patching adds caching to paste deploy's egg lookup.
loadwsgi.EggLoader = EggLoader
@atexit.register
def remove_test_databases():
db = dirs.tmp('test.db')
if os.path.exists(db):
os.unlink(db)
pristine = dirs.tmp('test.db.pristine')
if os.path.exists(pristine):
os.unlink(pristine)
def generate_paste_config(extension_name):
# Generate a file, based on keystone-paste.ini, that is named:
# extension_name.ini, and includes extension_name in the pipeline
with open(dirs.etc('keystone-paste.ini'), 'r') as f:
contents = f.read()
new_contents = contents.replace(' service_v3',
' %s service_v3' % (extension_name))
new_paste_file = dirs.tmp(extension_name + '.ini')
with open(new_paste_file, 'w') as f:
f.write(new_contents)
return new_paste_file
def remove_generated_paste_config(extension_name):
# Remove the generated paste config file, named extension_name.ini
paste_file_to_remove = dirs.tmp(extension_name + '.ini')
os.remove(paste_file_to_remove)
def skip_if_cache_disabled(*sections):
"""This decorator is used to skip a test if caching is disabled.
Caching can be disabled either globally or for a specific section.
In the code fragment::
@skip_if_cache_is_disabled('assignment', 'token')
def test_method(*args):
...
The method test_method would be skipped if caching is disabled globally via
the `enabled` option in the `cache` section of the configuration or if
the `caching` option is set to false in either `assignment` or `token`
sections of the configuration. This decorator can be used with no
arguments to only check global caching.
If a specified configuration section does not define the `caching` option,
this decorator makes the same assumption as the `should_cache_fn` in
keystone.common.cache that caching should be enabled.
"""
def wrapper(f):
@functools.wraps(f)
def inner(*args, **kwargs):
if not CONF.cache.enabled:
raise testcase.TestSkipped('Cache globally disabled.')
for s in sections:
conf_sec = getattr(CONF, s, None)
if conf_sec is not None:
if not getattr(conf_sec, 'caching', True):
raise testcase.TestSkipped('%s caching disabled.' % s)
return f(*args, **kwargs)
return inner
return wrapper
def skip_if_no_multiple_domains_support(f):
"""Decorator to skip tests for identity drivers limited to one domain."""
@functools.wraps(f)
def wrapper(*args, **kwargs):
test_obj = args[0]
if not test_obj.identity_api.multiple_domains_supported:
raise testcase.TestSkipped('No multiple domains support')
return f(*args, **kwargs)
return wrapper
class UnexpectedExit(Exception):
pass
def new_ref():
"""Populates a ref with attributes common to some API entities."""
return {
'id': uuid.uuid4().hex,
'name': uuid.uuid4().hex,
'description': uuid.uuid4().hex,
'enabled': True}
def new_region_ref():
ref = new_ref()
# Region doesn't have name or enabled.
del ref['name']
del ref['enabled']
ref['parent_region_id'] = None
return ref
def new_service_ref():
ref = new_ref()
ref['type'] = uuid.uuid4().hex
return ref
def new_endpoint_ref(service_id, interface='public', default_region_id=None,
**kwargs):
ref = new_ref()
del ref['enabled'] # enabled is optional
ref['interface'] = interface
ref['service_id'] = service_id
ref['url'] = 'https://' + uuid.uuid4().hex + '.com'
ref['region_id'] = default_region_id
ref.update(kwargs)
return ref
def new_domain_ref():
ref = new_ref()
return ref
def new_project_ref(domain_id=None, parent_id=None, is_domain=False):
ref = new_ref()
ref['domain_id'] = domain_id
ref['parent_id'] = parent_id
ref['is_domain'] = is_domain
return ref
def new_user_ref(domain_id, project_id=None):
ref = new_ref()
ref['domain_id'] = domain_id
ref['email'] = uuid.uuid4().hex
ref['password'] = uuid.uuid4().hex
if project_id:
ref['default_project_id'] = project_id
return ref
def new_group_ref(domain_id):
ref = new_ref()
ref['domain_id'] = domain_id
return ref
def new_credential_ref(user_id, project_id=None, cred_type=None):
ref = dict()
ref['id'] = uuid.uuid4().hex
ref['user_id'] = user_id
if cred_type == 'ec2':
ref['type'] = 'ec2'
ref['blob'] = uuid.uuid4().hex
else:
ref['type'] = 'cert'
ref['blob'] = uuid.uuid4().hex
if project_id:
ref['project_id'] = project_id
return ref
def new_role_ref():
ref = new_ref()
# Roles don't have a description or the enabled flag
del ref['description']
del ref['enabled']
return ref
def new_policy_ref():
ref = new_ref()
ref['blob'] = uuid.uuid4().hex
ref['type'] = uuid.uuid4().hex
return ref
def new_trust_ref(trustor_user_id, trustee_user_id, project_id=None,
impersonation=None, expires=None, role_ids=None,
role_names=None, remaining_uses=None,
allow_redelegation=False):
ref = dict()
ref['id'] = uuid.uuid4().hex
ref['trustor_user_id'] = trustor_user_id
ref['trustee_user_id'] = trustee_user_id
ref['impersonation'] = impersonation or False
ref['project_id'] = project_id
ref['remaining_uses'] = remaining_uses
ref['allow_redelegation'] = allow_redelegation
if isinstance(expires, six.string_types):
ref['expires_at'] = expires
elif isinstance(expires, dict):
ref['expires_at'] = (
timeutils.utcnow() + datetime.timedelta(**expires)
).strftime(TIME_FORMAT)
elif expires is None:
pass
else:
raise NotImplementedError('Unexpected value for "expires"')
role_ids = role_ids or []
role_names = role_names or []
if role_ids or role_names:
ref['roles'] = []
for role_id in role_ids:
ref['roles'].append({'id': role_id})
for role_name in role_names:
ref['roles'].append({'name': role_name})
return ref
class BaseTestCase(oslotest.BaseTestCase):
"""Light weight base test class.
This is a placeholder that will eventually go away once the
setup/teardown in TestCase is properly trimmed down to the bare
essentials. This is really just a play to speed up the tests by
eliminating unnecessary work.
"""
def setUp(self):
super(BaseTestCase, self).setUp()
self.useFixture(mockpatch.PatchObject(sys, 'exit',
side_effect=UnexpectedExit))
self.useFixture(log_fixture.get_logging_handle_error_fixture())
warnings.filterwarnings('error', category=DeprecationWarning,
module='^keystone\\.')
warnings.simplefilter('error', exc.SAWarning)
self.addCleanup(warnings.resetwarnings)
def cleanup_instance(self, *names):
"""Create a function suitable for use with self.addCleanup.
:returns: a callable that uses a closure to delete instance attributes
"""
def cleanup():
for name in names:
# TODO(dstanek): remove this 'if' statement once
# load_backend in test_backend_ldap is only called once
# per test
if hasattr(self, name):
delattr(self, name)
return cleanup
class TestCase(BaseTestCase):
def config_files(self):
return []
def config_overrides(self):
# NOTE(morganfainberg): enforce config_overrides can only ever be
# called a single time.
assert self.__config_overrides_called is False
self.__config_overrides_called = True
signing_certfile = 'examples/pki/certs/signing_cert.pem'
signing_keyfile = 'examples/pki/private/signing_key.pem'
self.config_fixture.config(group='oslo_policy',
policy_file=dirs.etc('policy.json'))
self.config_fixture.config(
# TODO(morganfainberg): Make Cache Testing a separate test case
# in tempest, and move it out of the base unit tests.
group='cache',
backend='dogpile.cache.memory',
enabled=True,
proxies=['oslo_cache.testing.CacheIsolatingProxy'])
self.config_fixture.config(
group='catalog',
driver='templated',
template_file=dirs.tests('default_catalog.templates'))
self.config_fixture.config(
group='kvs',
backends=[
('keystone.tests.unit.test_kvs.'
'KVSBackendForcedKeyMangleFixture'),
'keystone.tests.unit.test_kvs.KVSBackendFixture'])
self.config_fixture.config(group='revoke', driver='kvs')
self.config_fixture.config(
group='signing', certfile=signing_certfile,
keyfile=signing_keyfile,
ca_certs='examples/pki/certs/cacert.pem')
self.config_fixture.config(group='token', driver='kvs')
self.config_fixture.config(
group='saml', certfile=signing_certfile, keyfile=signing_keyfile)
self.config_fixture.config(
default_log_levels=[
'amqp=WARN',
'amqplib=WARN',
'boto=WARN',
'qpid=WARN',
'sqlalchemy=WARN',
'suds=INFO',
'oslo.messaging=INFO',
'iso8601=WARN',
'requests.packages.urllib3.connectionpool=WARN',
'routes.middleware=INFO',
'stevedore.extension=INFO',
'keystone.notifications=INFO',
'keystone.common._memcache_pool=INFO',
'keystone.common.ldap=INFO',
])
self.auth_plugin_config_override()
def auth_plugin_config_override(self, methods=None, **method_classes):
if methods is not None:
self.config_fixture.config(group='auth', methods=methods)
common_cfg.setup_authentication()
if method_classes:
self.config_fixture.config(group='auth', **method_classes)
def _assert_config_overrides_called(self):
assert self.__config_overrides_called is True
def setUp(self):
super(TestCase, self).setUp()
self.__config_overrides_called = False
self.addCleanup(CONF.reset)
self.config_fixture = self.useFixture(config_fixture.Config(CONF))
self.addCleanup(delattr, self, 'config_fixture')
self.config(self.config_files())
# NOTE(morganfainberg): mock the auth plugin setup to use the config
# fixture which automatically unregisters options when performing
# cleanup.
def mocked_register_auth_plugin_opt(conf, opt):
self.config_fixture.register_opt(opt, group='auth')
self.useFixture(mockpatch.PatchObject(
common_cfg, '_register_auth_plugin_opt',
new=mocked_register_auth_plugin_opt))
self.config_overrides()
# NOTE(morganfainberg): ensure config_overrides has been called.
self.addCleanup(self._assert_config_overrides_called)
self.useFixture(fixtures.FakeLogger(level=logging.DEBUG))
# NOTE(morganfainberg): This code is a copy from the oslo-incubator
# log module. This is not in a function or otherwise available to use
# without having a CONF object to setup logging. This should help to
# reduce the log size by limiting what we log (similar to how Keystone
# would run under mod_wsgi or eventlet).
for pair in CONF.default_log_levels:
mod, _sep, level_name = pair.partition('=')
logger = logging.getLogger(mod)
logger.setLevel(level_name)
self.useFixture(ksfixtures.Cache())
# Clear the registry of providers so that providers from previous
# tests aren't used.
self.addCleanup(dependency.reset)
self.addCleanup(kvs.INMEMDB.clear)
# Ensure Notification subscriptions and resource types are empty
self.addCleanup(notifications.clear_subscribers)
self.addCleanup(notifications.reset_notifier)
# Reset the auth-plugin registry
self.addCleanup(self.clear_auth_plugin_registry)
self.addCleanup(setattr, controllers, '_VERSIONS', [])
def config(self, config_files):
sql.initialize()
CONF(args=[], project='keystone', default_config_files=config_files)
def load_backends(self):
"""Initializes each manager and assigns them to an attribute."""
# TODO(blk-u): Shouldn't need to clear the registry here, but some
# tests call load_backends multiple times. These should be fixed to
# only call load_backends once.
dependency.reset()
# TODO(morganfainberg): Shouldn't need to clear the registry here, but
# some tests call load_backends multiple times. Since it is not
# possible to re-configure a backend, we need to clear the list. This
# should eventually be removed once testing has been cleaned up.
kvs_core.KEY_VALUE_STORE_REGISTRY.clear()
self.clear_auth_plugin_registry()
drivers, _unused = common.setup_backends(
load_extra_backends_fn=self.load_extra_backends)
for manager_name, manager in drivers.items():
setattr(self, manager_name, manager)
self.addCleanup(self.cleanup_instance(*list(drivers.keys())))
def load_extra_backends(self):
"""Override to load managers that aren't loaded by default.
This is useful to load managers initialized by extensions. No extra
backends are loaded by default.
:return: dict of name -> manager
"""
return {}
def load_fixtures(self, fixtures):
"""Hacky basic and naive fixture loading based on a python module.
Expects that the various APIs into the various services are already
defined on `self`.
"""
# NOTE(dstanek): create a list of attribute names to be removed
# from this instance during cleanup
fixtures_to_cleanup = []
# TODO(termie): doing something from json, probably based on Django's
# loaddata will be much preferred.
if (hasattr(self, 'identity_api') and
hasattr(self, 'assignment_api') and
hasattr(self, 'resource_api')):
for domain in fixtures.DOMAINS:
try:
rv = self.resource_api.create_domain(domain['id'], domain)
except exception.Conflict:
rv = self.resource_api.get_domain(domain['id'])
except exception.NotImplemented:
rv = domain
attrname = 'domain_%s' % domain['id']
setattr(self, attrname, rv)
fixtures_to_cleanup.append(attrname)
for tenant in fixtures.TENANTS:
if hasattr(self, 'tenant_%s' % tenant['id']):
try:
# This will clear out any roles on the project as well
self.resource_api.delete_project(tenant['id'])
except exception.ProjectNotFound:
pass
rv = self.resource_api.create_project(
tenant['id'], tenant)
attrname = 'tenant_%s' % tenant['id']
setattr(self, attrname, rv)
fixtures_to_cleanup.append(attrname)
for role in fixtures.ROLES:
try:
rv = self.role_api.create_role(role['id'], role)
except exception.Conflict:
rv = self.role_api.get_role(role['id'])
attrname = 'role_%s' % role['id']
setattr(self, attrname, rv)
fixtures_to_cleanup.append(attrname)
for user in fixtures.USERS:
user_copy = user.copy()
tenants = user_copy.pop('tenants')
try:
existing_user = getattr(self, 'user_%s' % user['id'], None)
if existing_user is not None:
self.identity_api.delete_user(existing_user['id'])
except exception.UserNotFound:
pass
# For users, the manager layer will generate the ID
user_copy = self.identity_api.create_user(user_copy)
# Our tests expect that the password is still in the user
# record so that they can reference it, so put it back into
# the dict returned.
user_copy['password'] = user['password']
for tenant_id in tenants:
try:
self.assignment_api.add_user_to_project(
tenant_id, user_copy['id'])
except exception.Conflict:
pass
# Use the ID from the fixture as the attribute name, so
# that our tests can easily reference each user dict, while
# the ID in the dict will be the real public ID.
attrname = 'user_%s' % user['id']
setattr(self, attrname, user_copy)
fixtures_to_cleanup.append(attrname)
self.addCleanup(self.cleanup_instance(*fixtures_to_cleanup))
def _paste_config(self, config):
if not config.startswith('config:'):
test_path = os.path.join(TESTSDIR, config)
etc_path = os.path.join(ROOTDIR, 'etc', config)
for path in [test_path, etc_path]:
if os.path.exists('%s-paste.ini' % path):
return 'config:%s-paste.ini' % path
return config
def loadapp(self, config, name='main'):
return service.loadapp(self._paste_config(config), name=name)
def clear_auth_plugin_registry(self):
auth.controllers.AUTH_METHODS.clear()
auth.controllers.AUTH_PLUGINS_LOADED = False
def assertCloseEnoughForGovernmentWork(self, a, b, delta=3):
"""Asserts that two datetimes are nearly equal within a small delta.
:param delta: Maximum allowable time delta, defined in seconds.
"""
msg = '%s != %s within %s delta' % (a, b, delta)
self.assertTrue(abs(a - b).seconds <= delta, msg)
def assertNotEmpty(self, l):
self.assertTrue(len(l))
def assertRaisesRegexp(self, expected_exception, expected_regexp,
callable_obj, *args, **kwargs):
"""Asserts that the message in a raised exception matches a regexp."""
try:
callable_obj(*args, **kwargs)
except expected_exception as exc_value:
if isinstance(expected_regexp, six.string_types):
expected_regexp = re.compile(expected_regexp)
if isinstance(exc_value.args[0], unicode):
if not expected_regexp.search(unicode(exc_value)):
raise self.failureException(
'"%s" does not match "%s"' %
(expected_regexp.pattern, unicode(exc_value)))
else:
if not expected_regexp.search(str(exc_value)):
raise self.failureException(
'"%s" does not match "%s"' %
(expected_regexp.pattern, str(exc_value)))
else:
if hasattr(expected_exception, '__name__'):
excName = expected_exception.__name__
else:
excName = str(expected_exception)
raise self.failureException("%s not raised" % excName)
@property
def ipv6_enabled(self):
if socket.has_ipv6:
sock = None
try:
sock = socket.socket(socket.AF_INET6)
# NOTE(Mouad): Try to bind to IPv6 loopback ip address.
sock.bind(("::1", 0))
return True
except socket.error:
pass
finally:
if sock:
sock.close()
return False
def skip_if_no_ipv6(self):
if not self.ipv6_enabled:
raise self.skipTest("IPv6 is not enabled in the system")
def skip_if_env_not_set(self, env_var):
if not os.environ.get(env_var):
self.skipTest('Env variable %s is not set.' % env_var)
class SQLDriverOverrides(object):
"""A mixin for consolidating sql-specific test overrides."""
def config_overrides(self):
super(SQLDriverOverrides, self).config_overrides()
# SQL specific driver overrides
self.config_fixture.config(group='catalog', driver='sql')
self.config_fixture.config(group='identity', driver='sql')
self.config_fixture.config(group='policy', driver='sql')
self.config_fixture.config(group='revoke', driver='sql')
self.config_fixture.config(group='token', driver='sql')
self.config_fixture.config(group='trust', driver='sql')
| 35.031944 | 79 | 0.630377 |
79424548d33de4aa405f63c3a67ad80562f4d292 | 158 | py | Python | pistis/__init__.py | mbhall88/metis | 8c5e5834f053f276ae937cde4e28b82fe8e1611e | [
"MIT"
] | 6 | 2018-08-27T15:18:54.000Z | 2021-07-17T04:17:20.000Z | pistis/__init__.py | mbhall88/metis | 8c5e5834f053f276ae937cde4e28b82fe8e1611e | [
"MIT"
] | 1 | 2018-04-17T21:33:34.000Z | 2018-04-19T12:50:19.000Z | pistis/__init__.py | mbhall88/pistis | 8c5e5834f053f276ae937cde4e28b82fe8e1611e | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""Top-level package for pistis."""
__author__ = """Michael Benjamin Hall"""
__email__ = '[email protected]'
__version__ = '0.1.0'
| 19.75 | 40 | 0.64557 |
7942462ea10d087e3bf1663780b2f3165ea22b10 | 922 | py | Python | sorts/radix_sort.py | jenia90/Python | 696fb4a681ad9e4d84e0d2b894daf449a3e30b24 | [
"MIT"
] | 14 | 2020-10-03T05:43:48.000Z | 2021-11-01T21:02:26.000Z | sorts/radix_sort.py | jenia90/Python | 696fb4a681ad9e4d84e0d2b894daf449a3e30b24 | [
"MIT"
] | 2 | 2020-10-19T17:42:36.000Z | 2020-10-19T18:05:18.000Z | sorts/radix_sort.py | jenia90/Python | 696fb4a681ad9e4d84e0d2b894daf449a3e30b24 | [
"MIT"
] | 12 | 2020-10-03T05:44:19.000Z | 2022-01-16T05:37:54.000Z | from __future__ import annotations
def radix_sort(list_of_ints: list[int]) -> list[int]:
"""
radix_sort(range(15)) == sorted(range(15))
True
radix_sort(reversed(range(15))) == sorted(range(15))
True
radix_sort([1,100,10,1000]) == sorted([1,100,10,1000])
True
"""
RADIX = 10
placement = 1
max_digit = max(list_of_ints)
while placement <= max_digit:
# declare and initialize empty buckets
buckets = [list() for _ in range(RADIX)]
# split list_of_ints between the buckets
for i in list_of_ints:
tmp = int((i / placement) % RADIX)
buckets[tmp].append(i)
# put each buckets' contents into list_of_ints
a = 0
for b in range(RADIX):
for i in buckets[b]:
list_of_ints[a] = i
a += 1
# move to next
placement *= RADIX
return list_of_ints
| 28.8125 | 58 | 0.571584 |
7942463ec6a5c0d6b1032f58303bed69ffbb5d35 | 2,089 | py | Python | testing/testing-analysis-regress_field.py | jenfly/atmos | c0a733b78749098d8cc2caaaacee245e6aeeac07 | [
"MIT"
] | 16 | 2015-10-08T06:14:35.000Z | 2020-02-12T02:47:33.000Z | testing/testing-analysis-regress_field.py | jenfly/atmos | c0a733b78749098d8cc2caaaacee245e6aeeac07 | [
"MIT"
] | null | null | null | testing/testing-analysis-regress_field.py | jenfly/atmos | c0a733b78749098d8cc2caaaacee245e6aeeac07 | [
"MIT"
] | 3 | 2018-10-16T07:58:14.000Z | 2021-09-17T06:39:00.000Z | import sys
sys.path.append('/home/jwalker/dynamics/python/atmos-tools')
sys.path.append('/home/jwalker/dynamics/python/atmos-read')
import xray
import numpy as np
import matplotlib.pyplot as plt
import matplotlib as mpl
import collections
import pandas as pd
import atmos as atm
import utils # monsoon-onset
mpl.rcParams['font.size'] = 10
# ----------------------------------------------------------------------
onset_nm = 'CHP_MFC'
years = np.arange(1979, 2015)
datadir = atm.homedir() + 'datastore/merra/analysis/'
varnms = ['U200', 'T200']
datafiles = collections.OrderedDict()
filestr = datadir + 'merra_%s_dailyrel_%s_%d.nc'
for nm in varnms:
datafiles[nm] = [filestr % (nm, onset_nm, yr) for yr in years]
# ----------------------------------------------------------------------
# Read data
data = collections.OrderedDict()
for nm in datafiles:
var, onset, retreat = utils.load_dailyrel(datafiles[nm])
data[nm] = var
# ----------------------------------------------------------------------
# Test atm.regress_field
day = -60
# 1-d timeseries
var = data['U200'].sel(dayrel=day)
ts = atm.mean_over_geobox(var, 10, 30, 60, 100)
ts_reg = atm.Linreg(onset, ts)
ts_reg2 = atm.regress_field(ts, onset)
print(ts_reg.r, ts_reg2.r.values)
print(ts_reg.slope, ts_reg2.m.values)
print(ts_reg.p, ts_reg2.p.values)
# x-y data
regdays = [-60, -30, 0, 30, 60]
plotdays = [-60, -30]
clev_r = np.arange(-1.0, 1.01, 0.05)
for nm in varnms:
print(nm)
var = data[nm].sel(dayrel=regdays)
reg_daily = atm.regress_field(var, onset, axis=0)
for day in plotdays:
reg = reg_daily.sel(dayrel=day)
title = '%s day %d vs. Onset ' % (var.name, day)
cint_m = atm.cinterval(reg.m)
clev_m = atm.clevels(reg.m, cint_m, symmetric=True)
plt.figure(figsize=(11, 8))
plt.subplot(1, 2, 1)
atm.contourf_latlon(reg['r'], clev=clev_r, cmap='RdBu_r')
plt.title(title + ' - Corr Coeff')
plt.subplot(1, 2, 2)
atm.contourf_latlon(reg['m'], clev=clev_m, cmap='RdBu_r')
plt.title(title + ' - Reg Coeff')
| 29.013889 | 72 | 0.598372 |
79424770b1f8337bb2cf586e89d8c1a8121df1ba | 5,860 | py | Python | terrascript/openstack/r.py | vutsalsinghal/python-terrascript | 3b9fb5ad77453d330fb0cd03524154a342c5d5dc | [
"BSD-2-Clause"
] | null | null | null | terrascript/openstack/r.py | vutsalsinghal/python-terrascript | 3b9fb5ad77453d330fb0cd03524154a342c5d5dc | [
"BSD-2-Clause"
] | null | null | null | terrascript/openstack/r.py | vutsalsinghal/python-terrascript | 3b9fb5ad77453d330fb0cd03524154a342c5d5dc | [
"BSD-2-Clause"
] | null | null | null | # terrascript/openstack/r.py
import terrascript
class openstack_blockstorage_quotaset_v2(terrascript.Resource):
pass
class openstack_blockstorage_quotaset_v3(terrascript.Resource):
pass
class openstack_blockstorage_volume_v1(terrascript.Resource):
pass
class openstack_blockstorage_volume_v2(terrascript.Resource):
pass
class openstack_blockstorage_volume_v3(terrascript.Resource):
pass
class openstack_blockstorage_volume_attach_v2(terrascript.Resource):
pass
class openstack_blockstorage_volume_attach_v3(terrascript.Resource):
pass
class openstack_compute_flavor_v2(terrascript.Resource):
pass
class openstack_compute_flavor_access_v2(terrascript.Resource):
pass
class openstack_compute_instance_v2(terrascript.Resource):
pass
class openstack_compute_interface_attach_v2(terrascript.Resource):
pass
class openstack_compute_keypair_v2(terrascript.Resource):
pass
class openstack_compute_secgroup_v2(terrascript.Resource):
pass
class openstack_compute_servergroup_v2(terrascript.Resource):
pass
class openstack_compute_floatingip_v2(terrascript.Resource):
pass
class openstack_compute_floatingip_associate_v2(terrascript.Resource):
pass
class openstack_compute_volume_attach_v2(terrascript.Resource):
pass
class openstack_containerinfra_clustertemplate_v1(terrascript.Resource):
pass
class openstack_containerinfra_cluster_v1(terrascript.Resource):
pass
class openstack_db_instance_v1(terrascript.Resource):
pass
class openstack_db_user_v1(terrascript.Resource):
pass
class openstack_db_configuration_v1(terrascript.Resource):
pass
class openstack_db_database_v1(terrascript.Resource):
pass
class openstack_dns_recordset_v2(terrascript.Resource):
pass
class openstack_dns_zone_v2(terrascript.Resource):
pass
class openstack_fw_firewall_v1(terrascript.Resource):
pass
class openstack_fw_policy_v1(terrascript.Resource):
pass
class openstack_fw_rule_v1(terrascript.Resource):
pass
class openstack_identity_endpoint_v3(terrascript.Resource):
pass
class openstack_identity_project_v3(terrascript.Resource):
pass
class openstack_identity_role_v3(terrascript.Resource):
pass
class openstack_identity_role_assignment_v3(terrascript.Resource):
pass
class openstack_identity_service_v3(terrascript.Resource):
pass
class openstack_identity_user_v3(terrascript.Resource):
pass
class openstack_identity_application_credential_v3(terrascript.Resource):
pass
class openstack_images_image_v2(terrascript.Resource):
pass
class openstack_lb_member_v1(terrascript.Resource):
pass
class openstack_lb_monitor_v1(terrascript.Resource):
pass
class openstack_lb_pool_v1(terrascript.Resource):
pass
class openstack_lb_vip_v1(terrascript.Resource):
pass
class openstack_lb_loadbalancer_v2(terrascript.Resource):
pass
class openstack_lb_listener_v2(terrascript.Resource):
pass
class openstack_lb_pool_v2(terrascript.Resource):
pass
class openstack_lb_member_v2(terrascript.Resource):
pass
class openstack_lb_monitor_v2(terrascript.Resource):
pass
class openstack_lb_l7policy_v2(terrascript.Resource):
pass
class openstack_lb_l7rule_v2(terrascript.Resource):
pass
class openstack_networking_floatingip_v2(terrascript.Resource):
pass
class openstack_networking_floatingip_associate_v2(terrascript.Resource):
pass
class openstack_networking_network_v2(terrascript.Resource):
pass
class openstack_networking_port_v2(terrascript.Resource):
pass
class openstack_networking_rbac_policy_v2(terrascript.Resource):
pass
class openstack_networking_port_secgroup_associate_v2(terrascript.Resource):
pass
class openstack_networking_qos_bandwidth_limit_rule_v2(terrascript.Resource):
pass
class openstack_networking_qos_dscp_marking_rule_v2(terrascript.Resource):
pass
class openstack_networking_qos_minimum_bandwidth_rule_v2(terrascript.Resource):
pass
class openstack_networking_qos_policy_v2(terrascript.Resource):
pass
class openstack_networking_router_v2(terrascript.Resource):
pass
class openstack_networking_router_interface_v2(terrascript.Resource):
pass
class openstack_networking_router_route_v2(terrascript.Resource):
pass
class openstack_networking_secgroup_v2(terrascript.Resource):
pass
class openstack_networking_secgroup_rule_v2(terrascript.Resource):
pass
class openstack_networking_subnet_v2(terrascript.Resource):
pass
class openstack_networking_subnet_route_v2(terrascript.Resource):
pass
class openstack_networking_subnetpool_v2(terrascript.Resource):
pass
class openstack_networking_addressscope_v2(terrascript.Resource):
pass
class openstack_networking_trunk_v2(terrascript.Resource):
pass
class openstack_objectstorage_container_v1(terrascript.Resource):
pass
class openstack_objectstorage_object_v1(terrascript.Resource):
pass
class openstack_objectstorage_tempurl_v1(terrascript.Resource):
pass
class openstack_vpnaas_ipsec_policy_v2(terrascript.Resource):
pass
class openstack_vpnaas_service_v2(terrascript.Resource):
pass
class openstack_vpnaas_ike_policy_v2(terrascript.Resource):
pass
class openstack_vpnaas_endpoint_group_v2(terrascript.Resource):
pass
class openstack_vpnaas_site_connection_v2(terrascript.Resource):
pass
class openstack_sharedfilesystem_securityservice_v2(terrascript.Resource):
pass
class openstack_sharedfilesystem_sharenetwork_v2(terrascript.Resource):
pass
class openstack_sharedfilesystem_share_v2(terrascript.Resource):
pass
class openstack_sharedfilesystem_share_access_v2(terrascript.Resource):
pass
class openstack_keymanager_secret_v1(terrascript.Resource):
pass
class openstack_keymanager_container_v1(terrascript.Resource):
pass
| 23.629032 | 79 | 0.832253 |
79424802a8f944657e12fc180a6c7395ea8b623c | 5,994 | py | Python | Transformer/SLT_transformer/test.py | Seunghoon-Yi/Paper_review-PyTorch | 44728cc9c3eee6c0146b0cff8a46099e789dfabc | [
"MIT"
] | 2 | 2021-08-12T13:05:37.000Z | 2021-12-30T08:25:18.000Z | Transformer/SLT_transformer/test.py | Seunghoon-Yi/Paper_review-PyTorch | 44728cc9c3eee6c0146b0cff8a46099e789dfabc | [
"MIT"
] | null | null | null | Transformer/SLT_transformer/test.py | Seunghoon-Yi/Paper_review-PyTorch | 44728cc9c3eee6c0146b0cff8a46099e789dfabc | [
"MIT"
] | null | null | null | from model import SLT_Transformer
from dataloader import Vocab_tokenizer, get_loader
from sklearn.utils import shuffle
import pandas as pd
import os
import numpy as np
import torch
import torch.nn as nn
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
def eval(Model, iterator, metric, data_tokenizer, trial): # No gradient updatd, no optimizer and clipping
Model.eval()
epoch_loss = 0
with torch.no_grad():
total_len = len(iterator)
test_sentence = []
GT_sentence = []
for i, (features, glosses, translations) in enumerate(iterator):
src, orth, trg = \
features.to(device), glosses.to(device), translations.to(device)
predict_translation, predict_gloss = Model(src, trg[:, :-1])
for tokens in predict_translation:
# Get argmax of tokens, bring it back to CPU.
tokens = torch.argmax(tokens, dim = 1).to(dtype = torch.long, device = torch.device("cpu"))
tokens = tokens.numpy()
# make string, append it to test_sentence
itos = data_tokenizer.stringnize(tokens)
pred_string = ' '.join(itos)
test_sentence.append(pred_string)
for tokens in trg:
tokens = tokens.to(dtype=torch.long, device=torch.device("cpu"))
tokens = tokens.numpy()
# make string, append it to test_sentence
itos = data_tokenizer.stringnize(tokens[1:])
GT_string = ' '.join(itos)
GT_sentence.append(GT_string)
translation_dim = predict_translation.shape[-1]
gloss_dim = predict_gloss.shape[-1]
# Predictions
predict_translation = predict_translation.contiguous().view(-1, translation_dim)
predict_gloss = predict_gloss.contiguous().view(-1, gloss_dim)
# GTs
orth = orth.contiguous().view(-1)
orth = orth.type(torch.LongTensor).to(device)
trg = trg[:, 1:].contiguous().view(-1)
trg = trg.type(torch.LongTensor).to(device)
loss_translation = metric(predict_translation, trg)
loss_gloss = metric(predict_gloss, orth)
loss = loss_translation
epoch_loss += loss.item()
with open(f"./bestmodel/TestPred_trial_{trial}.txt", "w", -1, "utf-8") as f:
f.write('\n'.join(test_sentence))
f.close()
with open(f"./bestmodel/TestGT_trial_{trial}.txt", "w", -1, "utf-8") as f:
f.write('\n'.join(GT_sentence))
f.close()
return epoch_loss / len(iterator)
def count_parameters(model):
return sum(p.numel() for p in model.parameters() if p.requires_grad)
def main():
base_path = 'C:/Users/Siryu_sci/2021-MLVU/SLT_project/'
train_data = pd.read_csv(base_path + "PHOENIX-2014-T.train.corpus.csv", delimiter='|')
val_data = pd.read_csv(base_path + "PHOENIX-2014-T.dev.corpus.csv", delimiter='|')
test_data = pd.read_csv(base_path + "PHOENIX-2014-T.test.corpus.csv", delimiter='|')
Traindata = pd.concat([train_data, val_data])
max_len = 55
# Define the tokenizer. data : translation, orth : gloss
data_tokenizer = Vocab_tokenizer(freq_th=1, max_len = max_len)
orth_tokenizer = Vocab_tokenizer(freq_th=1, max_len = 60)
data_tokenizer.build_vocab(Traindata.translation)
orth_tokenizer.build_vocab(Traindata.orth)
#print(orth_tokenizer.stoi)
targets = data_tokenizer.numericalize(Traindata.translation)
glosses = orth_tokenizer.numericalize(Traindata.orth)
labels = Traindata.name.to_numpy()
print("Translation : ", targets.shape, len(data_tokenizer),
"\n", "Glosses : ", glosses.shape, len(orth_tokenizer)) # (7615, 300) 2948
############################# Split them into Train and dev set #############################
labels, targets, glosses = shuffle(labels, targets, glosses, random_state=42)
train_labels, train_glosses, train_translations = labels[:7115], glosses[:7115], targets[:7115]
val_labels, val_glosses, val_translations = labels[7115:], glosses[7115:], targets[7115:]
test_labels = test_data.name.to_numpy()
test_glosses = orth_tokenizer.numericalize(test_data.orth)
test_translations = data_tokenizer.numericalize(test_data.translation)
BATCH_SIZE = 8
train_loader, train_dataset, pad_idx = get_loader(base_path, train_labels, train_glosses,
train_translations, n_workers=2, BS=BATCH_SIZE, transform=None)
val_loader, val_dataset, pad_idx = get_loader(base_path, val_labels, val_glosses,
val_translations, n_workers=2, BS=BATCH_SIZE, transform=None)
test_loader, test_dataset, pad_idx = get_loader(base_path, test_labels, test_glosses,
test_translations, n_workers=2, BS=BATCH_SIZE, transform=None)
N_tokens = len(data_tokenizer) # Since we're only training the model on the training dataset!
N_glosses = len(orth_tokenizer)
######################### Define the model and auxiliary functions #########################
Transformer = SLT_Transformer(N_glosses, N_tokens, pad_idx, pad_idx, device=device).cuda()
criterion = nn.CrossEntropyLoss().cuda()
print(f'The model has {count_parameters(Transformer):,} trainable parameters')
Transformer.load_state_dict(torch.load('B4_n2_d512_gloss_pool.pt'))
total_loss = 0
N_trial = 5
for i in range(N_trial):
test_loss = eval(Transformer, test_loader, criterion, data_tokenizer, i)
print(test_loss) ; total_loss+=test_loss
print("average loss : ", total_loss/N_trial)
if __name__ == "__main__":
main() | 42.814286 | 116 | 0.621455 |
7942484b439e4cca8ca6a11c5ce73641d21bf282 | 596 | py | Python | pixel_test.py | ChrisProgramming2018/ReinforcementLearning | d5895d989249978a0b24fa0488433950daeb31bd | [
"MIT"
] | null | null | null | pixel_test.py | ChrisProgramming2018/ReinforcementLearning | d5895d989249978a0b24fa0488433950daeb31bd | [
"MIT"
] | null | null | null | pixel_test.py | ChrisProgramming2018/ReinforcementLearning | d5895d989249978a0b24fa0488433950daeb31bd | [
"MIT"
] | null | null | null | import numpy as np
import robosuite as suite
from model_cnn import CNNStemNetwork
env_name = 'SawyerPickPlace'
env = suite.make(env_name,
has_renderer=False,
ignore_done=True,
use_camera_obs=True,
has_offscreen_renderer=True,
camera_height=84,
camera_width=84,
render_collision_mesh=False,
render_visual_mesh=True,
camera_name='agentview',
use_object_obs=False,
camera_depth=True,
reward_shaping=True,)
state = env.reset()
print(state)
print(state["image"])
print(state["image"].shape)
| 18.060606 | 36 | 0.666107 |
79424b80c3637649defad3c6cc964dd7b919e9c3 | 233 | py | Python | configs/dcn/cascade_mask_rcnn_x101_32x4d_fpn_dconv_c3-c5_1x_coco.py | yypurpose/mmdetection | ec6bfd96eae0af047c623f3d1ec31b0b3f1f4a6c | [
"Apache-2.0"
] | null | null | null | configs/dcn/cascade_mask_rcnn_x101_32x4d_fpn_dconv_c3-c5_1x_coco.py | yypurpose/mmdetection | ec6bfd96eae0af047c623f3d1ec31b0b3f1f4a6c | [
"Apache-2.0"
] | null | null | null | configs/dcn/cascade_mask_rcnn_x101_32x4d_fpn_dconv_c3-c5_1x_coco.py | yypurpose/mmdetection | ec6bfd96eae0af047c623f3d1ec31b0b3f1f4a6c | [
"Apache-2.0"
] | null | null | null | _base_ = '../cascade_rcnn/cascade_mask_rcnn_x101_32x4d_fpn_1x_coco.py'
model = dict(
backbone=dict(
dcn=dict(type='DCN', deform_groups=1, fallback_on_stride=False),
stage_with_dcn=(False, True, True, True)))
| 38.833333 | 73 | 0.699571 |
79424cbcc2b860ef06e391f7ca542b48fd6590ce | 4,527 | py | Python | tensorflow/tools/docs/tf_doctest.py | yolman230/tensorflow | 8180678e1b71f9e4326b9d84987d78232000bac2 | [
"Apache-2.0"
] | 2 | 2019-12-10T11:03:50.000Z | 2020-07-22T22:04:14.000Z | tensorflow/tools/docs/tf_doctest.py | yolman230/tensorflow | 8180678e1b71f9e4326b9d84987d78232000bac2 | [
"Apache-2.0"
] | 2 | 2021-08-25T15:53:30.000Z | 2022-02-10T01:40:41.000Z | tensorflow/tools/docs/tf_doctest.py | yolman230/tensorflow | 8180678e1b71f9e4326b9d84987d78232000bac2 | [
"Apache-2.0"
] | 1 | 2019-12-10T05:27:13.000Z | 2019-12-10T05:27:13.000Z | # Lint as: python3
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Run doctests for tensorflow."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import sys
from absl import flags
from absl.testing import absltest
import numpy as np
import tensorflow.compat.v2 as tf
from tensorflow.tools.docs import tf_doctest_lib
# We put doctest after absltest so that it picks up the unittest monkeypatch.
# Otherwise doctest tests aren't runnable at all.
import doctest # pylint: disable=g-bad-import-order
tf.compat.v1.enable_v2_behavior()
FLAGS = flags.FLAGS
flags.DEFINE_string('module', None, 'A specific module to run doctest on.')
flags.DEFINE_boolean('list', None,
'List all the modules in the core package imported.')
flags.DEFINE_string('file', None, 'A specific file to run doctest on.')
flags.mark_flags_as_mutual_exclusive(['module', 'file'])
flags.mark_flags_as_mutual_exclusive(['list', 'file'])
PACKAGE = 'tensorflow.python.'
def find_modules():
"""Finds all the modules in the core package imported.
Returns:
A list containing all the modules in tensorflow.python.
"""
tf_modules = []
for name, module in sys.modules.items():
if name.startswith(PACKAGE):
tf_modules.append(module)
return tf_modules
def filter_on_submodules(all_modules, submodule):
"""Filters all the modules based on the module flag.
The module flag has to be relative to the core package imported.
For example, if `submodule=keras.layers` then, this function will return
all the modules in the submodule.
Args:
all_modules: All the modules in the core package.
submodule: Submodule to filter from all the modules.
Returns:
All the modules in the submodule.
"""
filtered_modules = [
mod for mod in all_modules
if PACKAGE + submodule in mod.__name__
]
return filtered_modules
def get_module_and_inject_docstring(file_path):
"""Replaces the docstring of the module with the changed file's content.
Args:
file_path: Path to the file
Returns:
A list containing the module changed by the file.
"""
file_path = os.path.abspath(file_path)
mod_index = file_path.find(PACKAGE.replace('.', os.sep))
file_mod_name, _ = os.path.splitext(file_path[mod_index:])
file_module = sys.modules[file_mod_name.replace(os.sep, '.')]
with open(file_path, 'r') as f:
content = f.read()
file_module.__doc__ = content
return [file_module]
class TfTestCase(tf.test.TestCase):
def set_up(self, test):
self.setUp()
def tear_down(self, test):
self.tearDown()
def load_tests(unused_loader, tests, unused_ignore):
"""Loads all the tests in the docstrings and runs them."""
tf_modules = find_modules()
if FLAGS.module:
tf_modules = filter_on_submodules(tf_modules, FLAGS.module)
if FLAGS.list:
print('**************************************************')
for mod in tf_modules:
print(mod.__name__)
print('**************************************************')
return tests
if FLAGS.file:
tf_modules = get_module_and_inject_docstring(FLAGS.file)
for module in tf_modules:
testcase = TfTestCase()
tests.addTests(
doctest.DocTestSuite(
module,
test_finder=doctest.DocTestFinder(exclude_empty=False),
extraglobs={
'tf': tf,
'np': np,
'os': os
},
setUp=testcase.set_up,
tearDown=testcase.tear_down,
checker=tf_doctest_lib.TfDoctestOutputChecker(),
optionflags=(doctest.ELLIPSIS | doctest.NORMALIZE_WHITESPACE
| doctest.IGNORE_EXCEPTION_DETAIL
| doctest.DONT_ACCEPT_BLANKLINE),
))
return tests
if __name__ == '__main__':
absltest.main()
| 28.118012 | 80 | 0.674398 |
79424d2fb1ce244159d7a3b0c533ef36236d4262 | 7,650 | py | Python | sdk/python/pulumi_azure_native/network/v20200401/get_local_network_gateway.py | pulumi-bot/pulumi-azure-native | f7b9490b5211544318e455e5cceafe47b628e12c | [
"Apache-2.0"
] | null | null | null | sdk/python/pulumi_azure_native/network/v20200401/get_local_network_gateway.py | pulumi-bot/pulumi-azure-native | f7b9490b5211544318e455e5cceafe47b628e12c | [
"Apache-2.0"
] | null | null | null | sdk/python/pulumi_azure_native/network/v20200401/get_local_network_gateway.py | pulumi-bot/pulumi-azure-native | f7b9490b5211544318e455e5cceafe47b628e12c | [
"Apache-2.0"
] | null | null | null | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union
from ... import _utilities, _tables
from . import outputs
__all__ = [
'GetLocalNetworkGatewayResult',
'AwaitableGetLocalNetworkGatewayResult',
'get_local_network_gateway',
]
@pulumi.output_type
class GetLocalNetworkGatewayResult:
"""
A common class for general resource information.
"""
def __init__(__self__, bgp_settings=None, etag=None, fqdn=None, gateway_ip_address=None, id=None, local_network_address_space=None, location=None, name=None, provisioning_state=None, resource_guid=None, tags=None, type=None):
if bgp_settings and not isinstance(bgp_settings, dict):
raise TypeError("Expected argument 'bgp_settings' to be a dict")
pulumi.set(__self__, "bgp_settings", bgp_settings)
if etag and not isinstance(etag, str):
raise TypeError("Expected argument 'etag' to be a str")
pulumi.set(__self__, "etag", etag)
if fqdn and not isinstance(fqdn, str):
raise TypeError("Expected argument 'fqdn' to be a str")
pulumi.set(__self__, "fqdn", fqdn)
if gateway_ip_address and not isinstance(gateway_ip_address, str):
raise TypeError("Expected argument 'gateway_ip_address' to be a str")
pulumi.set(__self__, "gateway_ip_address", gateway_ip_address)
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if local_network_address_space and not isinstance(local_network_address_space, dict):
raise TypeError("Expected argument 'local_network_address_space' to be a dict")
pulumi.set(__self__, "local_network_address_space", local_network_address_space)
if location and not isinstance(location, str):
raise TypeError("Expected argument 'location' to be a str")
pulumi.set(__self__, "location", location)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if provisioning_state and not isinstance(provisioning_state, str):
raise TypeError("Expected argument 'provisioning_state' to be a str")
pulumi.set(__self__, "provisioning_state", provisioning_state)
if resource_guid and not isinstance(resource_guid, str):
raise TypeError("Expected argument 'resource_guid' to be a str")
pulumi.set(__self__, "resource_guid", resource_guid)
if tags and not isinstance(tags, dict):
raise TypeError("Expected argument 'tags' to be a dict")
pulumi.set(__self__, "tags", tags)
if type and not isinstance(type, str):
raise TypeError("Expected argument 'type' to be a str")
pulumi.set(__self__, "type", type)
@property
@pulumi.getter(name="bgpSettings")
def bgp_settings(self) -> Optional['outputs.BgpSettingsResponse']:
"""
Local network gateway's BGP speaker settings.
"""
return pulumi.get(self, "bgp_settings")
@property
@pulumi.getter
def etag(self) -> str:
"""
A unique read-only string that changes whenever the resource is updated.
"""
return pulumi.get(self, "etag")
@property
@pulumi.getter
def fqdn(self) -> Optional[str]:
"""
FQDN of local network gateway.
"""
return pulumi.get(self, "fqdn")
@property
@pulumi.getter(name="gatewayIpAddress")
def gateway_ip_address(self) -> Optional[str]:
"""
IP address of local network gateway.
"""
return pulumi.get(self, "gateway_ip_address")
@property
@pulumi.getter
def id(self) -> Optional[str]:
"""
Resource ID.
"""
return pulumi.get(self, "id")
@property
@pulumi.getter(name="localNetworkAddressSpace")
def local_network_address_space(self) -> Optional['outputs.AddressSpaceResponse']:
"""
Local network site address space.
"""
return pulumi.get(self, "local_network_address_space")
@property
@pulumi.getter
def location(self) -> Optional[str]:
"""
Resource location.
"""
return pulumi.get(self, "location")
@property
@pulumi.getter
def name(self) -> str:
"""
Resource name.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="provisioningState")
def provisioning_state(self) -> str:
"""
The provisioning state of the local network gateway resource.
"""
return pulumi.get(self, "provisioning_state")
@property
@pulumi.getter(name="resourceGuid")
def resource_guid(self) -> str:
"""
The resource GUID property of the local network gateway resource.
"""
return pulumi.get(self, "resource_guid")
@property
@pulumi.getter
def tags(self) -> Optional[Mapping[str, str]]:
"""
Resource tags.
"""
return pulumi.get(self, "tags")
@property
@pulumi.getter
def type(self) -> str:
"""
Resource type.
"""
return pulumi.get(self, "type")
class AwaitableGetLocalNetworkGatewayResult(GetLocalNetworkGatewayResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetLocalNetworkGatewayResult(
bgp_settings=self.bgp_settings,
etag=self.etag,
fqdn=self.fqdn,
gateway_ip_address=self.gateway_ip_address,
id=self.id,
local_network_address_space=self.local_network_address_space,
location=self.location,
name=self.name,
provisioning_state=self.provisioning_state,
resource_guid=self.resource_guid,
tags=self.tags,
type=self.type)
def get_local_network_gateway(local_network_gateway_name: Optional[str] = None,
resource_group_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetLocalNetworkGatewayResult:
"""
A common class for general resource information.
:param str local_network_gateway_name: The name of the local network gateway.
:param str resource_group_name: The name of the resource group.
"""
__args__ = dict()
__args__['localNetworkGatewayName'] = local_network_gateway_name
__args__['resourceGroupName'] = resource_group_name
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('azure-native:network/v20200401:getLocalNetworkGateway', __args__, opts=opts, typ=GetLocalNetworkGatewayResult).value
return AwaitableGetLocalNetworkGatewayResult(
bgp_settings=__ret__.bgp_settings,
etag=__ret__.etag,
fqdn=__ret__.fqdn,
gateway_ip_address=__ret__.gateway_ip_address,
id=__ret__.id,
local_network_address_space=__ret__.local_network_address_space,
location=__ret__.location,
name=__ret__.name,
provisioning_state=__ret__.provisioning_state,
resource_guid=__ret__.resource_guid,
tags=__ret__.tags,
type=__ret__.type)
| 36.428571 | 229 | 0.652418 |
79424eb4899625bb1d3174966210aac2fb2111ec | 541 | py | Python | bigbrain/bigbrain/oauth2/tags/customfilters.py | slhill/bigbrain | 69dcf775c450c867c8c65c1b05e06cb6d3a7e5d4 | [
"Apache-2.0"
] | null | null | null | bigbrain/bigbrain/oauth2/tags/customfilters.py | slhill/bigbrain | 69dcf775c450c867c8c65c1b05e06cb6d3a7e5d4 | [
"Apache-2.0"
] | null | null | null | bigbrain/bigbrain/oauth2/tags/customfilters.py | slhill/bigbrain | 69dcf775c450c867c8c65c1b05e06cb6d3a7e5d4 | [
"Apache-2.0"
] | null | null | null | from google.appengine.ext import webapp
import json
register = webapp.template.create_template_register()
def gql_json_parser(query_obj):
result = []
for entry in query_obj:
result.append(dict([(p, unicode(getattr(entry, p))) for p in entry.properties()]))
return result
@register.filter(name='make_json')
def make_json(thing):
try:
return json.dumps(thing)
except Exception:
return json.dumps(gql_json_parser(thing))
@register.filter(name='strip_benchmark')
def strip_benchmark(name):
return name[10:]
| 24.590909 | 90 | 0.731978 |
79424f1e8410408076d00010566f2bd8e4c9c1d6 | 748 | py | Python | steps/oci-compute-step-instances-start/step.py | Bryxxit/relay-integrations-relay-oci-compute | d5d7f00317b9a58fecef9969660d50291ec4eb6b | [
"Apache-2.0"
] | null | null | null | steps/oci-compute-step-instances-start/step.py | Bryxxit/relay-integrations-relay-oci-compute | d5d7f00317b9a58fecef9969660d50291ec4eb6b | [
"Apache-2.0"
] | null | null | null | steps/oci-compute-step-instances-start/step.py | Bryxxit/relay-integrations-relay-oci-compute | d5d7f00317b9a58fecef9969660d50291ec4eb6b | [
"Apache-2.0"
] | 2 | 2021-01-12T17:09:00.000Z | 2021-01-13T12:57:32.000Z | #!/usr/bin/env python
import oci
from relay_sdk import Interface, Dynamic as D
relay = Interface()
config = {
"user": relay.get(D.oci.connection.userOCID),
"key_content": relay.get(D.oci.connection.keyContent),
"fingerprint": relay.get(D.oci.connection.fingerprint),
"tenancy": relay.get(D.oci.connection.tenancy),
"region": relay.get(D.oci.region)
}
from oci.config import validate_config
validate_config(config)
# initialize the ComputeClient
compute = oci.core.ComputeClient(config)
instanceIDs = relay.get(D.oci.instanceIDs)
if not instanceIDs:
print("No instance IDs found")
exit(0)
print('Starting instances: {}'.format(instanceIDs))
for instanceID in instanceIDs:
compute.instance_action(instanceID,"START")
| 24.933333 | 59 | 0.744652 |
79425005707466e3d2dcaef4913ae1b768d5d323 | 2,818 | py | Python | samples.py | smartsheet-samples/python-snippets | 04951c2ca8ae1a97386bdd3fa6e010f2845e1421 | [
"MIT"
] | 3 | 2018-05-14T14:08:33.000Z | 2021-07-05T16:01:00.000Z | samples.py | smartsheet-samples/python-snippets | 04951c2ca8ae1a97386bdd3fa6e010f2845e1421 | [
"MIT"
] | null | null | null | samples.py | smartsheet-samples/python-snippets | 04951c2ca8ae1a97386bdd3fa6e010f2845e1421 | [
"MIT"
] | 2 | 2019-04-12T16:59:19.000Z | 2021-09-03T12:13:23.000Z | # Install the smartsheet sdk with the command: pip install smartsheet-python-sdk
import smartsheet
import logging
# TODO: Set your API access token here, or leave as None and set as environment variable "SMARTSHEET_ACCESS_TOKEN"
access_token = None
# Download an image in a cell
def download_cell_image(client, sheet_id, row_id, column_id, default_filename):
# Get desired row
row = client.Sheets.get_row(sheet_id, row_id)
cell = row.get_column(column_id)
image = cell.image
filename = getattr(image, 'alt_text', default_filename)
# Obtain a temporary image URL
imageUrl = ss_client.models.ImageUrl( { "imageId": image.id } )
response = ss_client.Images.get_image_urls([imageUrl])
url = response.image_urls[0].url
# Download the image
import requests
response = requests.get(url)
if response.status_code == 200:
with open(filename, 'wb') as f:
f.write(response.content)
# Set column definition to a list of contacts
def add_column_contacts(ss_client, sheet_id, column_id, emails):
column = ss_client.models.Column()
column.type = 'CONTACT_LIST'
contacts = []
for email in emails:
contact_option = ss_client.models.contact_option.ContactOption()
contact_option.email = email
contacts.append(contact_option)
column.contact_options = contacts
ss_client.Sheets.update_column(sheet_id, column_id, column)
return None
# Set sheet link
# Value from source will be visible in dest
def set_sheet_link(ss_client, source_sheet_id, source_row_id, source_column_id, dest_sheet_id, dest_row_id, dest_column_id):
cell_link = ss_client.models.CellLink()
cell_link.sheet_id = source_sheet_id
cell_link.row_id = source_row_id
cell_link.column_id = source_column_id
cell = ss_client.models.Cell()
cell.column_id = dest_column_id
cell.value = None
cell.link_in_from_cell = cell_link
row = ss_client.models.Row()
row.id = dest_row_id
row.cells.append(cell)
rows = []
rows.append(row)
ss_client.Sheets.update_rows(dest_sheet_id, rows)
return None
print('Starting ...')
# Initialize client
ss_client = smartsheet.Smartsheet(access_token)
# Make sure we don't miss any error
ss_client.errors_as_exceptions(True)
# setup logging
logging.basicConfig(filename='samples.log', level=logging.DEBUG)
# Add your test calls here
sheet_id = 5370997298751364
row_id = 6483985534609284
column_id = 2009176927954820
sheet_id2 = 6903887367038852
row_id2 = 6144655761926020
column_id2 = 7262773366286212
download_cell_image(ss_client, sheet_id, row_id, column_id, "save.jpg")
# add_column_contacts(ss_client, sheet_id, column_id, ['[email protected]'])
# set_sheet_link(ss_client, sheet_id2, row_id2, column_id2, sheet_id, row_id, column_id)
print('Done')
| 29.978723 | 124 | 0.742725 |
79425119b157d84c715d6604792d8a4658441608 | 1,048 | py | Python | UE4Parse/Assets/Objects/Decompress.py | MinshuG/pyUE4Parse | 96cda8132ff423bd36be20828025c2c1c0a7e406 | [
"MIT"
] | 13 | 2021-06-09T09:21:00.000Z | 2022-03-30T22:13:24.000Z | UE4Parse/Assets/Objects/Decompress.py | MinshuG/pyUE4Parse | 96cda8132ff423bd36be20828025c2c1c0a7e406 | [
"MIT"
] | 3 | 2021-09-04T22:23:02.000Z | 2022-03-04T09:45:45.000Z | UE4Parse/Assets/Objects/Decompress.py | MinshuG/pyUE4Parse | 96cda8132ff423bd36be20828025c2c1c0a7e406 | [
"MIT"
] | 6 | 2021-09-02T10:28:21.000Z | 2022-03-30T22:13:37.000Z |
def Decompress(buffer: bytes, method, decompressSize = 0) -> bytes:
if method == "Oodle":
from UE4Parse.Oodle import Decompress as OoDecompress
result = OoDecompress(buffer=buffer, decompressLength=decompressSize)
assert len(result) == decompressSize
return result
elif method == "Gzip":
from gzip import decompress as gDecompress
result = gDecompress(buffer)
assert len(result) == decompressSize
return result
elif method == "Zlib":
from zlib import decompress as zDecompress
result = zDecompress(buffer, bufsize=decompressSize)
assert len(result) == decompressSize
return result
elif method == "LZ4":
from lz4.frame import LZ4FrameDecompressor
lz4Decompress = LZ4FrameDecompressor().decompress
result = lz4Decompress(buffer, max_length=decompressSize)
assert len(result) == decompressSize
return result
else:
raise NotImplementedError("Unknown Compression Method " + str(method))
| 38.814815 | 78 | 0.674618 |
7942514027cd4e07d0a0d41f220087ae70649244 | 53,763 | py | Python | plugins/modules/oci_database_autonomous_database.py | rishimahajan/oci-ansible-collection | 3a0cfd55347466fcd8ab257d3d7c41d58f5469ab | [
"Apache-2.0"
] | null | null | null | plugins/modules/oci_database_autonomous_database.py | rishimahajan/oci-ansible-collection | 3a0cfd55347466fcd8ab257d3d7c41d58f5469ab | [
"Apache-2.0"
] | null | null | null | plugins/modules/oci_database_autonomous_database.py | rishimahajan/oci-ansible-collection | 3a0cfd55347466fcd8ab257d3d7c41d58f5469ab | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/python
# Copyright (c) 2017, 2020 Oracle and/or its affiliates.
# This software is made available to you under the terms of the GPL 3.0 license or the Apache 2.0 license.
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
# Apache License v2.0
# See LICENSE.TXT for details.
# GENERATED FILE - DO NOT EDIT - MANUAL CHANGES WILL BE OVERWRITTEN
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {
"metadata_version": "1.1",
"status": ["preview"],
"supported_by": "community",
}
DOCUMENTATION = """
---
module: oci_database_autonomous_database
short_description: Manage an AutonomousDatabase resource in Oracle Cloud Infrastructure
description:
- This module allows the user to create, update and delete an AutonomousDatabase resource in Oracle Cloud Infrastructure
- For I(state=present), creates a new Autonomous Database.
- "This resource has the following action operations in the M(oci_autonomous_database_actions) module: autonomous_database_manual_refresh,
deregister_autonomous_database_data_safe, disable_autonomous_database_operations_insights, enable_autonomous_database_operations_insights, fail_over,
generate_autonomous_database_wallet, register_autonomous_database_data_safe, restart, restore, rotate_autonomous_database_encryption_key, start, stop,
switchover."
version_added: "2.9"
author: Oracle (@oracle)
options:
compartment_id:
description:
- The L(OCID,https://docs.cloud.oracle.com/Content/General/Concepts/identifiers.htm) of the compartment of the Autonomous Database.
- Required for create using I(state=present).
- Required for update when environment variable C(OCI_USE_NAME_AS_IDENTIFIER) is set.
- Required for delete when environment variable C(OCI_USE_NAME_AS_IDENTIFIER) is set.
type: str
db_name:
description:
- The database name. The name must begin with an alphabetic character and can contain a maximum of 14 alphanumeric characters. Special characters
are not permitted. The database name must be unique in the tenancy.
- Required for create using I(state=present).
- This parameter is updatable.
type: str
cpu_core_count:
description:
- The number of OCPU cores to be made available to the database.
- Required for create using I(state=present).
- This parameter is updatable.
type: int
db_workload:
description:
- "The Autonomous Database workload type. The following values are valid:"
- "- OLTP - indicates an Autonomous Transaction Processing database
- DW - indicates an Autonomous Data Warehouse database
- AJD - indicates an Autonomous JSON Database"
- This parameter is updatable.
type: str
choices:
- "OLTP"
- "DW"
- "AJD"
data_storage_size_in_tbs:
description:
- The size, in terabytes, of the data volume that will be created and attached to the database. This storage can later be scaled up if needed.
- Required for create using I(state=present).
- This parameter is updatable.
type: int
is_free_tier:
description:
- Indicates if this is an Always Free resource. The default value is false. Note that Always Free Autonomous Databases have 1 CPU and 20GB of
memory. For Always Free databases, memory and CPU cannot be scaled.
- This parameter is updatable.
type: bool
admin_password:
description:
- "The password must be between 12 and 30 characters long, and must contain at least 1 uppercase, 1 lowercase, and 1 numeric character. It cannot
contain the double quote symbol (\\") or the username \\"admin\\", regardless of casing."
- This parameter is updatable.
type: str
display_name:
description:
- The user-friendly name for the Autonomous Database. The name does not have to be unique.
- Required for create, update, delete when environment variable C(OCI_USE_NAME_AS_IDENTIFIER) is set.
- This parameter is updatable when C(OCI_USE_NAME_AS_IDENTIFIER) is not set.
type: str
aliases: ["name"]
license_model:
description:
- The Oracle license model that applies to the Oracle Autonomous Database. Bring your own license (BYOL) allows you to apply your current on-
premises Oracle software licenses to equivalent, highly automated Oracle PaaS and IaaS services in the cloud.
License Included allows you to subscribe to new Oracle Database software licenses and the Database service.
Note that when provisioning an Autonomous Database on L(dedicated Exadata
infrastructure,https://docs.cloud.oracle.com/Content/Database/Concepts/adbddoverview.htm), this attribute must be null because the attribute is
already set at the
Autonomous Exadata Infrastructure level. When using L(shared Exadata
infrastructure,https://docs.cloud.oracle.com/Content/Database/Concepts/adboverview.htm#AEI), if a value is not specified, the system will supply
the value of `BRING_YOUR_OWN_LICENSE`.
- This parameter is updatable.
type: str
choices:
- "LICENSE_INCLUDED"
- "BRING_YOUR_OWN_LICENSE"
is_preview_version_with_service_terms_accepted:
description:
- If set to `TRUE`, indicates that an Autonomous Database preview version is being provisioned, and that the preview version's terms of service have
been accepted. Note that preview version software is only available for databases on L(shared Exadata
infrastructure,https://docs.cloud.oracle.com/Content/Database/Concepts/adboverview.htm#AEI).
type: bool
is_auto_scaling_enabled:
description:
- Indicates if auto scaling is enabled for the Autonomous Database OCPU core count. The default value is `FALSE`.
- This parameter is updatable.
type: bool
is_dedicated:
description:
- True if the database is on L(dedicated Exadata infrastructure,https://docs.cloud.oracle.com/Content/Database/Concepts/adbddoverview.htm).
type: bool
autonomous_container_database_id:
description:
- The Autonomous Container Database L(OCID,https://docs.cloud.oracle.com/Content/General/Concepts/identifiers.htm).
type: str
is_access_control_enabled:
description:
- Indicates if the database-level access control is enabled.
If disabled, database access is defined by the network security rules.
If enabled, database access is restricted to the IP addresses defined by the rules specified with the `whitelistedIps` property. While specifying
`whitelistedIps` rules is optional,
if database-level access control is enabled and no rules are specified, the database will become inaccessible. The rules can be added later using
the `UpdateAutonomousDatabase` API operation or edit option in console.
When creating a database clone, the desired access control setting should be specified. By default, database-level access control will be disabled
for the clone.
- This property is applicable only to Autonomous Databases on the Exadata Cloud@Customer platform.
- This parameter is updatable.
type: bool
whitelisted_ips:
description:
- The client IP access control list (ACL). This feature is available for autonomous databases on L(shared Exadata
infrastructure,https://docs.cloud.oracle.com/Content/Database/Concepts/adboverview.htm#AEI) and on Exadata Cloud@Customer.
Only clients connecting from an IP address included in the ACL may access the Autonomous Database instance.
- "For shared Exadata infrastructure, this is an array of CIDR (Classless Inter-Domain Routing) notations for a subnet or VCN OCID.
Use a semicolon (;) as a deliminator between the VCN-specific subnets or IPs.
Example: `[\\"1.1.1.1\\",\\"1.1.1.0/24\\",\\"ocid1.vcn.oc1.sea.<unique_id>\\",\\"ocid1.vcn.oc1.sea.<unique_id1>;1.1.1.1\\",\\"ocid1.vcn.oc1.sea.<u
nique_id2>;1.1.0.0/16\\"]`
For Exadata Cloud@Customer, this is an array of IP addresses or CIDR (Classless Inter-Domain Routing) notations.
Example: `[\\"1.1.1.1\\",\\"1.1.1.0/24\\",\\"1.1.2.25\\"]`"
- For an update operation, if you want to delete all the IPs in the ACL, use an array with a single empty string entry.
- This parameter is updatable.
type: list
is_data_guard_enabled:
description:
- Indicates whether the Autonomous Database has Data Guard enabled.
- This parameter is updatable.
type: bool
subnet_id:
description:
- The L(OCID,https://docs.cloud.oracle.com/Content/General/Concepts/identifiers.htm) of the subnet the resource is associated with.
- "**Subnet Restrictions:**
- For bare metal DB systems and for single node virtual machine DB systems, do not use a subnet that overlaps with 192.168.16.16/28.
- For Exadata and virtual machine 2-node RAC systems, do not use a subnet that overlaps with 192.168.128.0/20.
- For Autonomous Database, setting this will disable public secure access to the database."
- These subnets are used by the Oracle Clusterware private interconnect on the database instance.
Specifying an overlapping subnet will cause the private interconnect to malfunction.
This restriction applies to both the client subnet and the backup subnet.
- This parameter is updatable.
type: str
nsg_ids:
description:
- "A list of the L(OCIDs,https://docs.cloud.oracle.com/Content/General/Concepts/identifiers.htm) of the network security groups (NSGs) that this
resource belongs to. Setting this to an empty array after the list is created removes the resource from all NSGs. For more information about NSGs,
see L(Security Rules,https://docs.cloud.oracle.com/Content/Network/Concepts/securityrules.htm).
**NsgIds restrictions:**
- Autonomous Databases with private access require at least 1 Network Security Group (NSG). The nsgIds array cannot be empty."
- This parameter is updatable.
type: list
private_endpoint_label:
description:
- The private endpoint label for the resource. Setting this to an empty string, after the private endpoint database gets created, will change the
same private endpoint database to the public endpoint database.
- This parameter is updatable.
type: str
freeform_tags:
description:
- Free-form tags for this resource. Each tag is a simple key-value pair with no predefined name, type, or namespace.
For more information, see L(Resource Tags,https://docs.cloud.oracle.com/Content/General/Concepts/resourcetags.htm).
- "Example: `{\\"Department\\": \\"Finance\\"}`"
- This parameter is updatable.
type: dict
defined_tags:
description:
- Defined tags for this resource. Each key is predefined and scoped to a namespace.
For more information, see L(Resource Tags,https://docs.cloud.oracle.com/Content/General/Concepts/resourcetags.htm).
- This parameter is updatable.
type: dict
db_version:
description:
- A valid Oracle Database version for Autonomous Database.
- This parameter is updatable.
type: str
source:
description:
- "The source of the database: Use `NONE` for creating a new Autonomous Database. Use `DATABASE` for creating a new Autonomous Database by cloning
an existing Autonomous Database."
- "For Autonomous Databases on L(shared Exadata infrastructure,https://docs.cloud.oracle.com/Content/Database/Concepts/adboverview.htm#AEI), the
following cloning options are available: Use `BACKUP_FROM_ID` for creating a new Autonomous Database from a specified backup. Use
`BACKUP_FROM_TIMESTAMP` for creating a point-in-time Autonomous Database clone using backups. For more information, see L(Cloning an Autonomous
Database,https://docs.cloud.oracle.com/Content/Database/Tasks/adbcloning.htm)."
type: str
choices:
- "DATABASE"
- "CLONE_TO_REFRESHABLE"
- "BACKUP_FROM_ID"
- "BACKUP_FROM_TIMESTAMP"
- "NONE"
default: "NONE"
source_id:
description:
- The L(OCID,https://docs.cloud.oracle.com/Content/General/Concepts/identifiers.htm) of the source Autonomous Database that you will clone to create
a new Autonomous Database.
- Required when source is one of ['DATABASE', 'CLONE_TO_REFRESHABLE']
type: str
clone_type:
description:
- The Autonomous Database clone type.
- Required when source is one of ['BACKUP_FROM_TIMESTAMP', 'DATABASE', 'BACKUP_FROM_ID']
type: str
choices:
- "FULL"
- "METADATA"
refreshable_mode:
description:
- The refresh mode of the clone. AUTOMATIC indicates that the clone is automatically being refreshed with data from the source Autonomous Database.
- This parameter is updatable.
- Applicable when source is 'CLONE_TO_REFRESHABLE'
type: str
choices:
- "AUTOMATIC"
- "MANUAL"
autonomous_database_backup_id:
description:
- The L(OCID,https://docs.cloud.oracle.com/Content/General/Concepts/identifiers.htm) of the source Autonomous Database Backup that you will clone to
create a new Autonomous Database.
- Required when source is 'BACKUP_FROM_ID'
type: str
autonomous_database_id:
description:
- The L(OCID,https://docs.cloud.oracle.com/Content/General/Concepts/identifiers.htm) of the source Autonomous Database that you will clone to create
a new Autonomous Database.
- Required for update using I(state=present) when environment variable C(OCI_USE_NAME_AS_IDENTIFIER) is not set.
- Required for delete using I(state=absent) when environment variable C(OCI_USE_NAME_AS_IDENTIFIER) is not set.
- Required when source is 'BACKUP_FROM_TIMESTAMP'
type: str
aliases: ["id"]
timestamp:
description:
- The timestamp specified for the point-in-time clone of the source Autonomous Database. The timestamp must be in the past.
- Required when source is 'BACKUP_FROM_TIMESTAMP'
type: str
is_refreshable_clone:
description:
- Indicates whether the Autonomous Database is a refreshable clone.
- This parameter is updatable.
type: bool
open_mode:
description:
- The `DATABASE OPEN` mode. You can open the database in `READ_ONLY` or `READ_WRITE` mode.
- This parameter is updatable.
type: str
choices:
- "READ_ONLY"
- "READ_WRITE"
permission_level:
description:
- The Autonomous Database permission level. Restricted mode allows access only to admin users.
- This parameter is updatable.
type: str
choices:
- "RESTRICTED"
- "UNRESTRICTED"
state:
description:
- The state of the AutonomousDatabase.
- Use I(state=present) to create or update an AutonomousDatabase.
- Use I(state=absent) to delete an AutonomousDatabase.
type: str
required: false
default: 'present'
choices: ["present", "absent"]
extends_documentation_fragment: [ oracle.oci.oracle, oracle.oci.oracle_creatable_resource, oracle.oci.oracle_wait_options ]
"""
EXAMPLES = """
- name: Create autonomous_database
oci_database_autonomous_database:
compartment_id: ocid.compartment.oc1..<unique_ID>
display_name: example_autonomous_database
db_name: adatabasedb1
admin_password: <password>
cpu_core_count: 8
data_storage_size_in_tbs: 1
- name: Update autonomous_database using name (when environment variable OCI_USE_NAME_AS_IDENTIFIER is set)
oci_database_autonomous_database:
cpu_core_count: 20
display_name: example_autonomous_database
- name: Update autonomous_database using name (when environment variable OCI_USE_NAME_AS_IDENTIFIER is set)
oci_database_autonomous_database:
autonomous_database_id: ocid1.autonomousdatabase.oc1.iad.Example
cpu_core_count: 20
- name: Update autonomous_database
oci_database_autonomous_database:
autonomous_database_id: ocid1.autonomousdatabase.oc1.iad.Example
- name: Delete autonomous_database
oci_database_autonomous_database:
autonomous_database_id: ocid1.autonomousdatabase.oc1.iad.Example
state: absent
- name: Delete autonomous_database using name (when environment variable OCI_USE_NAME_AS_IDENTIFIER is set)
oci_database_autonomous_database:
compartment_id: ocid.compartment.oc1..<unique_ID>
display_name: example_autonomous_database
state: absent
"""
RETURN = """
autonomous_database:
description:
- Details of the AutonomousDatabase resource acted upon by the current operation
returned: on success
type: complex
contains:
id:
description:
- The L(OCID,https://docs.cloud.oracle.com/Content/General/Concepts/identifiers.htm) of the Autonomous Database.
returned: on success
type: string
sample: ocid1.resource.oc1..xxxxxxEXAMPLExxxxxx
compartment_id:
description:
- The L(OCID,https://docs.cloud.oracle.com/Content/General/Concepts/identifiers.htm) of the compartment.
returned: on success
type: string
sample: ocid1.compartment.oc1..xxxxxxEXAMPLExxxxxx
lifecycle_state:
description:
- The current state of the Autonomous Database.
returned: on success
type: string
sample: PROVISIONING
lifecycle_details:
description:
- Information about the current lifecycle state.
returned: on success
type: string
sample: lifecycle_details_example
db_name:
description:
- The database name.
returned: on success
type: string
sample: db_name_example
is_free_tier:
description:
- Indicates if this is an Always Free resource. The default value is false. Note that Always Free Autonomous Databases have 1 CPU and 20GB of
memory. For Always Free databases, memory and CPU cannot be scaled.
returned: on success
type: bool
sample: true
system_tags:
description:
- System tags for this resource. Each key is predefined and scoped to a namespace.
For more information, see L(Resource Tags,https://docs.cloud.oracle.com/Content/General/Concepts/resourcetags.htm).
returned: on success
type: dict
sample: {}
time_reclamation_of_free_autonomous_database:
description:
- The date and time the Always Free database will be stopped because of inactivity. If this time is reached without any database activity, the
database will automatically be put into the STOPPED state.
returned: on success
type: string
sample: 2013-10-20T19:20:30+01:00
time_deletion_of_free_autonomous_database:
description:
- The date and time the Always Free database will be automatically deleted because of inactivity. If the database is in the STOPPED state and
without activity until this time, it will be deleted.
returned: on success
type: string
sample: 2013-10-20T19:20:30+01:00
backup_config:
description:
- ""
returned: on success
type: complex
contains:
manual_backup_bucket_name:
description:
- Name of L(Object Storage,https://docs.cloud.oracle.com/Content/Object/Concepts/objectstorageoverview.htm) bucket to use for storing
manual backups.
returned: on success
type: string
sample: manual_backup_bucket_name_example
manual_backup_type:
description:
- The manual backup destination type.
returned: on success
type: string
sample: NONE
cpu_core_count:
description:
- The number of OCPU cores to be made available to the database.
returned: on success
type: int
sample: 56
data_storage_size_in_tbs:
description:
- The quantity of data in the database, in terabytes.
returned: on success
type: int
sample: 56
infrastructure_type:
description:
- The infrastructure type this resource belongs to.
returned: on success
type: string
sample: CLOUD
is_dedicated:
description:
- True if the database uses L(dedicated Exadata infrastructure,https://docs.cloud.oracle.com/Content/Database/Concepts/adbddoverview.htm).
returned: on success
type: bool
sample: true
autonomous_container_database_id:
description:
- The Autonomous Container Database L(OCID,https://docs.cloud.oracle.com/Content/General/Concepts/identifiers.htm).
returned: on success
type: string
sample: ocid1.autonomouscontainerdatabase.oc1..xxxxxxEXAMPLExxxxxx
time_created:
description:
- The date and time the Autonomous Database was created.
returned: on success
type: string
sample: 2013-10-20T19:20:30+01:00
display_name:
description:
- The user-friendly name for the Autonomous Database. The name does not have to be unique.
returned: on success
type: string
sample: display_name_example
service_console_url:
description:
- The URL of the Service Console for the Autonomous Database.
returned: on success
type: string
sample: service_console_url_example
connection_strings:
description:
- The connection string used to connect to the Autonomous Database. The username for the Service Console is ADMIN. Use the password you entered
when creating the Autonomous Database for the password value.
returned: on success
type: complex
contains:
high:
description:
- The High database service provides the highest level of resources to each SQL statement resulting in the highest performance, but
supports the fewest number of concurrent SQL statements.
returned: on success
type: string
sample: high_example
medium:
description:
- The Medium database service provides a lower level of resources to each SQL statement potentially resulting a lower level of
performance, but supports more concurrent SQL statements.
returned: on success
type: string
sample: medium_example
low:
description:
- The Low database service provides the least level of resources to each SQL statement, but supports the most number of concurrent SQL
statements.
returned: on success
type: string
sample: low_example
dedicated:
description:
- The database service provides the least level of resources to each SQL statement, but supports the most number of concurrent SQL
statements.
returned: on success
type: string
sample: dedicated_example
all_connection_strings:
description:
- Returns all connection strings that can be used to connect to the Autonomous Database.
For more information, please see L(Predefined Database Service Names for Autonomous Transaction
Processing,https://docs.oracle.com/en/cloud/paas/atp-cloud/atpug/connect-predefined.html#GUID-9747539B-FD46-44F1-8FF8-F5AC650F15BE)
returned: on success
type: dict
sample: {}
connection_urls:
description:
- ""
returned: on success
type: complex
contains:
sql_dev_web_url:
description:
- Oracle SQL Developer Web URL.
returned: on success
type: string
sample: sql_dev_web_url_example
apex_url:
description:
- Oracle Application Express (APEX) URL.
returned: on success
type: string
sample: apex_url_example
machine_learning_user_management_url:
description:
- Oracle Machine Learning user management URL.
returned: on success
type: string
sample: machine_learning_user_management_url_example
license_model:
description:
- The Oracle license model that applies to the Oracle Autonomous Database. Bring your own license (BYOL) allows you to apply your current on-
premises Oracle software licenses to equivalent, highly automated Oracle PaaS and IaaS services in the cloud.
License Included allows you to subscribe to new Oracle Database software licenses and the Database service.
Note that when provisioning an Autonomous Database on L(dedicated Exadata
infrastructure,https://docs.cloud.oracle.com/Content/Database/Concepts/adbddoverview.htm), this attribute must be null because the attribute
is already set at the
Autonomous Exadata Infrastructure level. When using L(shared Exadata
infrastructure,https://docs.cloud.oracle.com/Content/Database/Concepts/adboverview.htm#AEI), if a value is not specified, the system will
supply the value of `BRING_YOUR_OWN_LICENSE`.
returned: on success
type: string
sample: LICENSE_INCLUDED
used_data_storage_size_in_tbs:
description:
- The amount of storage that has been used, in terabytes.
returned: on success
type: int
sample: 56
freeform_tags:
description:
- Free-form tags for this resource. Each tag is a simple key-value pair with no predefined name, type, or namespace.
For more information, see L(Resource Tags,https://docs.cloud.oracle.com/Content/General/Concepts/resourcetags.htm).
- "Example: `{\\"Department\\": \\"Finance\\"}`"
returned: on success
type: dict
sample: {'Department': 'Finance'}
defined_tags:
description:
- Defined tags for this resource. Each key is predefined and scoped to a namespace.
For more information, see L(Resource Tags,https://docs.cloud.oracle.com/Content/General/Concepts/resourcetags.htm).
returned: on success
type: dict
sample: {'Operations': {'CostCenter': 'US'}}
subnet_id:
description:
- The L(OCID,https://docs.cloud.oracle.com/Content/General/Concepts/identifiers.htm) of the subnet the resource is associated with.
- "**Subnet Restrictions:**
- For bare metal DB systems and for single node virtual machine DB systems, do not use a subnet that overlaps with 192.168.16.16/28.
- For Exadata and virtual machine 2-node RAC systems, do not use a subnet that overlaps with 192.168.128.0/20.
- For Autonomous Database, setting this will disable public secure access to the database."
- These subnets are used by the Oracle Clusterware private interconnect on the database instance.
Specifying an overlapping subnet will cause the private interconnect to malfunction.
This restriction applies to both the client subnet and the backup subnet.
returned: on success
type: string
sample: ocid1.subnet.oc1..xxxxxxEXAMPLExxxxxx
nsg_ids:
description:
- "A list of the L(OCIDs,https://docs.cloud.oracle.com/Content/General/Concepts/identifiers.htm) of the network security groups (NSGs) that this
resource belongs to. Setting this to an empty array after the list is created removes the resource from all NSGs. For more information about
NSGs, see L(Security Rules,https://docs.cloud.oracle.com/Content/Network/Concepts/securityrules.htm).
**NsgIds restrictions:**
- Autonomous Databases with private access require at least 1 Network Security Group (NSG). The nsgIds array cannot be empty."
returned: on success
type: list
sample: []
private_endpoint:
description:
- The private endpoint for the resource.
returned: on success
type: string
sample: private_endpoint_example
private_endpoint_label:
description:
- The private endpoint label for the resource. Setting this to an empty string, after the private endpoint database gets created, will change
the same private endpoint database to the public endpoint database.
returned: on success
type: string
sample: private_endpoint_label_example
private_endpoint_ip:
description:
- The private endpoint Ip address for the resource.
returned: on success
type: string
sample: private_endpoint_ip_example
db_version:
description:
- A valid Oracle Database version for Autonomous Database.
returned: on success
type: string
sample: db_version_example
is_preview:
description:
- Indicates if the Autonomous Database version is a preview version.
returned: on success
type: bool
sample: true
db_workload:
description:
- "The Autonomous Database workload type. The following values are valid:"
- "- OLTP - indicates an Autonomous Transaction Processing database
- DW - indicates an Autonomous Data Warehouse database
- AJD - indicates an Autonomous JSON Database"
returned: on success
type: string
sample: OLTP
is_access_control_enabled:
description:
- Indicates if the database-level access control is enabled.
If disabled, database access is defined by the network security rules.
If enabled, database access is restricted to the IP addresses defined by the rules specified with the `whitelistedIps` property. While
specifying `whitelistedIps` rules is optional,
if database-level access control is enabled and no rules are specified, the database will become inaccessible. The rules can be added later
using the `UpdateAutonomousDatabase` API operation or edit option in console.
When creating a database clone, the desired access control setting should be specified. By default, database-level access control will be
disabled for the clone.
- This property is applicable only to Autonomous Databases on the Exadata Cloud@Customer platform.
returned: on success
type: bool
sample: true
whitelisted_ips:
description:
- The client IP access control list (ACL). This feature is available for autonomous databases on L(shared Exadata
infrastructure,https://docs.cloud.oracle.com/Content/Database/Concepts/adboverview.htm#AEI) and on Exadata Cloud@Customer.
Only clients connecting from an IP address included in the ACL may access the Autonomous Database instance.
- "For shared Exadata infrastructure, this is an array of CIDR (Classless Inter-Domain Routing) notations for a subnet or VCN OCID.
Use a semicolon (;) as a deliminator between the VCN-specific subnets or IPs.
Example: `[\\"1.1.1.1\\",\\"1.1.1.0/24\\",\\"ocid1.vcn.oc1.sea.<unique_id>\\",\\"ocid1.vcn.oc1.sea.<unique_id1>;1.1.1.1\\",\\"ocid1.vcn.oc1.se
a.<unique_id2>;1.1.0.0/16\\"]`
For Exadata Cloud@Customer, this is an array of IP addresses or CIDR (Classless Inter-Domain Routing) notations.
Example: `[\\"1.1.1.1\\",\\"1.1.1.0/24\\",\\"1.1.2.25\\"]`"
- For an update operation, if you want to delete all the IPs in the ACL, use an array with a single empty string entry.
returned: on success
type: list
sample: []
is_auto_scaling_enabled:
description:
- Indicates if auto scaling is enabled for the Autonomous Database CPU core count.
returned: on success
type: bool
sample: true
data_safe_status:
description:
- Status of the Data Safe registration for this Autonomous Database.
returned: on success
type: string
sample: REGISTERING
operations_insights_status:
description:
- Status of Operations Insights for this Autonomous Database.
returned: on success
type: string
sample: ENABLING
time_maintenance_begin:
description:
- The date and time when maintenance will begin.
returned: on success
type: string
sample: 2013-10-20T19:20:30+01:00
time_maintenance_end:
description:
- The date and time when maintenance will end.
returned: on success
type: string
sample: 2013-10-20T19:20:30+01:00
is_refreshable_clone:
description:
- Indicates whether the Autonomous Database is a refreshable clone.
returned: on success
type: bool
sample: true
time_of_last_refresh:
description:
- The date and time when last refresh happened.
returned: on success
type: string
sample: 2013-10-20T19:20:30+01:00
time_of_last_refresh_point:
description:
- The refresh point timestamp (UTC). The refresh point is the time to which the database was most recently refreshed. Data created after the
refresh point is not included in the refresh.
returned: on success
type: string
sample: 2013-10-20T19:20:30+01:00
time_of_next_refresh:
description:
- The date and time of next refresh.
returned: on success
type: string
sample: 2013-10-20T19:20:30+01:00
open_mode:
description:
- The `DATABASE OPEN` mode. You can open the database in `READ_ONLY` or `READ_WRITE` mode.
returned: on success
type: string
sample: READ_ONLY
refreshable_status:
description:
- The refresh status of the clone. REFRESHING indicates that the clone is currently being refreshed with data from the source Autonomous
Database.
returned: on success
type: string
sample: REFRESHING
refreshable_mode:
description:
- The refresh mode of the clone. AUTOMATIC indicates that the clone is automatically being refreshed with data from the source Autonomous
Database.
returned: on success
type: string
sample: AUTOMATIC
source_id:
description:
- The L(OCID,https://docs.cloud.oracle.com/Content/General/Concepts/identifiers.htm) of the source Autonomous Database that was cloned to create
the current Autonomous Database.
returned: on success
type: string
sample: ocid1.source.oc1..xxxxxxEXAMPLExxxxxx
permission_level:
description:
- The Autonomous Database permission level. Restricted mode allows access only to admin users.
returned: on success
type: string
sample: RESTRICTED
time_of_last_switchover:
description:
- The timestamp of the last switchover operation for the Autonomous Database.
returned: on success
type: string
sample: 2013-10-20T19:20:30+01:00
time_of_last_failover:
description:
- The timestamp of the last failover operation.
returned: on success
type: string
sample: 2013-10-20T19:20:30+01:00
is_data_guard_enabled:
description:
- Indicates whether the Autonomous Database has Data Guard enabled.
returned: on success
type: bool
sample: true
failed_data_recovery_in_seconds:
description:
- Indicates the number of seconds of data loss for a Data Guard failover.
returned: on success
type: int
sample: 56
standby_db:
description:
- ""
returned: on success
type: complex
contains:
lag_time_in_seconds:
description:
- The amount of time, in seconds, that the data of the standby database lags the data of the primary database. Can be used to determine
the potential data loss in the event of a failover.
returned: on success
type: int
sample: 56
lifecycle_state:
description:
- The current state of the Autonomous Database.
returned: on success
type: string
sample: PROVISIONING
lifecycle_details:
description:
- Additional information about the current lifecycle state.
returned: on success
type: string
sample: lifecycle_details_example
available_upgrade_versions:
description:
- List of Oracle Database versions available for a database upgrade. If there are no version upgrades available, this list is empty.
returned: on success
type: list
sample: []
key_store_id:
description:
- The L(OCID,https://docs.cloud.oracle.com/Content/General/Concepts/identifiers.htm) of the key store.
returned: on success
type: string
sample: ocid1.keystore.oc1..xxxxxxEXAMPLExxxxxx
key_store_wallet_name:
description:
- The wallet name for Oracle Key Vault.
returned: on success
type: string
sample: key_store_wallet_name_example
sample: {
"id": "ocid1.resource.oc1..xxxxxxEXAMPLExxxxxx",
"compartment_id": "ocid1.compartment.oc1..xxxxxxEXAMPLExxxxxx",
"lifecycle_state": "PROVISIONING",
"lifecycle_details": "lifecycle_details_example",
"db_name": "db_name_example",
"is_free_tier": true,
"system_tags": {},
"time_reclamation_of_free_autonomous_database": "2013-10-20T19:20:30+01:00",
"time_deletion_of_free_autonomous_database": "2013-10-20T19:20:30+01:00",
"backup_config": {
"manual_backup_bucket_name": "manual_backup_bucket_name_example",
"manual_backup_type": "NONE"
},
"cpu_core_count": 56,
"data_storage_size_in_tbs": 56,
"infrastructure_type": "CLOUD",
"is_dedicated": true,
"autonomous_container_database_id": "ocid1.autonomouscontainerdatabase.oc1..xxxxxxEXAMPLExxxxxx",
"time_created": "2013-10-20T19:20:30+01:00",
"display_name": "display_name_example",
"service_console_url": "service_console_url_example",
"connection_strings": {
"high": "high_example",
"medium": "medium_example",
"low": "low_example",
"dedicated": "dedicated_example",
"all_connection_strings": {}
},
"connection_urls": {
"sql_dev_web_url": "sql_dev_web_url_example",
"apex_url": "apex_url_example",
"machine_learning_user_management_url": "machine_learning_user_management_url_example"
},
"license_model": "LICENSE_INCLUDED",
"used_data_storage_size_in_tbs": 56,
"freeform_tags": {'Department': 'Finance'},
"defined_tags": {'Operations': {'CostCenter': 'US'}},
"subnet_id": "ocid1.subnet.oc1..xxxxxxEXAMPLExxxxxx",
"nsg_ids": [],
"private_endpoint": "private_endpoint_example",
"private_endpoint_label": "private_endpoint_label_example",
"private_endpoint_ip": "private_endpoint_ip_example",
"db_version": "db_version_example",
"is_preview": true,
"db_workload": "OLTP",
"is_access_control_enabled": true,
"whitelisted_ips": [],
"is_auto_scaling_enabled": true,
"data_safe_status": "REGISTERING",
"operations_insights_status": "ENABLING",
"time_maintenance_begin": "2013-10-20T19:20:30+01:00",
"time_maintenance_end": "2013-10-20T19:20:30+01:00",
"is_refreshable_clone": true,
"time_of_last_refresh": "2013-10-20T19:20:30+01:00",
"time_of_last_refresh_point": "2013-10-20T19:20:30+01:00",
"time_of_next_refresh": "2013-10-20T19:20:30+01:00",
"open_mode": "READ_ONLY",
"refreshable_status": "REFRESHING",
"refreshable_mode": "AUTOMATIC",
"source_id": "ocid1.source.oc1..xxxxxxEXAMPLExxxxxx",
"permission_level": "RESTRICTED",
"time_of_last_switchover": "2013-10-20T19:20:30+01:00",
"time_of_last_failover": "2013-10-20T19:20:30+01:00",
"is_data_guard_enabled": true,
"failed_data_recovery_in_seconds": 56,
"standby_db": {
"lag_time_in_seconds": 56,
"lifecycle_state": "PROVISIONING",
"lifecycle_details": "lifecycle_details_example"
},
"available_upgrade_versions": [],
"key_store_id": "ocid1.keystore.oc1..xxxxxxEXAMPLExxxxxx",
"key_store_wallet_name": "key_store_wallet_name_example"
}
"""
from ansible.module_utils.basic import AnsibleModule
from ansible_collections.oracle.oci.plugins.module_utils import (
oci_common_utils,
oci_wait_utils,
)
from ansible_collections.oracle.oci.plugins.module_utils.oci_resource_utils import (
OCIResourceHelperBase,
get_custom_class,
)
try:
from oci.work_requests import WorkRequestClient
from oci.database import DatabaseClient
from oci.database.models import CreateAutonomousDatabaseBase
from oci.database.models import UpdateAutonomousDatabaseDetails
HAS_OCI_PY_SDK = True
except ImportError:
HAS_OCI_PY_SDK = False
class AutonomousDatabaseHelperGen(OCIResourceHelperBase):
"""Supported operations: create, update, get, list and delete"""
def __init__(self, *args, **kwargs):
super(AutonomousDatabaseHelperGen, self).__init__(*args, **kwargs)
self.work_request_client = WorkRequestClient(
self.client._config, **self.client._kwargs
)
def get_module_resource_id_param(self):
return "autonomous_database_id"
def get_module_resource_id(self):
return self.module.params.get("autonomous_database_id")
def get_get_fn(self):
return self.client.get_autonomous_database
def get_resource(self):
return oci_common_utils.call_with_backoff(
self.client.get_autonomous_database,
autonomous_database_id=self.module.params.get("autonomous_database_id"),
)
def get_required_kwargs_for_list(self):
required_list_method_params = [
"compartment_id",
]
return dict(
(param, self.module.params[param]) for param in required_list_method_params
)
def get_optional_kwargs_for_list(self):
optional_list_method_params = (
["autonomous_container_database_id", "display_name"]
if self._use_name_as_identifier()
else [
"autonomous_container_database_id",
"db_workload",
"db_version",
"is_free_tier",
"display_name",
"is_refreshable_clone",
"is_data_guard_enabled",
]
)
return dict(
(param, self.module.params[param])
for param in optional_list_method_params
if self.module.params.get(param) is not None
and (
self._use_name_as_identifier()
or (
not self.module.params.get("key_by")
or param in self.module.params.get("key_by")
)
)
)
def list_resources(self):
required_kwargs = self.get_required_kwargs_for_list()
optional_kwargs = self.get_optional_kwargs_for_list()
kwargs = oci_common_utils.merge_dicts(required_kwargs, optional_kwargs)
return oci_common_utils.list_all_resources(
self.client.list_autonomous_databases, **kwargs
)
def get_create_model_class(self):
return CreateAutonomousDatabaseBase
def create_resource(self):
create_details = self.get_create_model()
return oci_wait_utils.call_and_wait(
call_fn=self.client.create_autonomous_database,
call_fn_args=(),
call_fn_kwargs=dict(create_autonomous_database_details=create_details,),
waiter_type=oci_wait_utils.WORK_REQUEST_WAITER_KEY,
operation=oci_common_utils.CREATE_OPERATION_KEY,
waiter_client=self.work_request_client,
resource_helper=self,
wait_for_states=oci_common_utils.get_work_request_completed_states(),
)
def get_update_model_class(self):
return UpdateAutonomousDatabaseDetails
def update_resource(self):
update_details = self.get_update_model()
return oci_wait_utils.call_and_wait(
call_fn=self.client.update_autonomous_database,
call_fn_args=(),
call_fn_kwargs=dict(
autonomous_database_id=self.module.params.get("autonomous_database_id"),
update_autonomous_database_details=update_details,
),
waiter_type=oci_wait_utils.WORK_REQUEST_WAITER_KEY,
operation=oci_common_utils.UPDATE_OPERATION_KEY,
waiter_client=self.work_request_client,
resource_helper=self,
wait_for_states=oci_common_utils.get_work_request_completed_states(),
)
def delete_resource(self):
return oci_wait_utils.call_and_wait(
call_fn=self.client.delete_autonomous_database,
call_fn_args=(),
call_fn_kwargs=dict(
autonomous_database_id=self.module.params.get("autonomous_database_id"),
),
waiter_type=oci_wait_utils.WORK_REQUEST_WAITER_KEY,
operation=oci_common_utils.DELETE_OPERATION_KEY,
waiter_client=self.work_request_client,
resource_helper=self,
wait_for_states=oci_common_utils.get_work_request_completed_states(),
)
AutonomousDatabaseHelperCustom = get_custom_class("AutonomousDatabaseHelperCustom")
class ResourceHelper(AutonomousDatabaseHelperCustom, AutonomousDatabaseHelperGen):
pass
def main():
module_args = oci_common_utils.get_common_arg_spec(
supports_create=True, supports_wait=True
)
module_args.update(
dict(
compartment_id=dict(type="str"),
db_name=dict(type="str"),
cpu_core_count=dict(type="int"),
db_workload=dict(type="str", choices=["OLTP", "DW", "AJD"]),
data_storage_size_in_tbs=dict(type="int"),
is_free_tier=dict(type="bool"),
admin_password=dict(type="str", no_log=True),
display_name=dict(aliases=["name"], type="str"),
license_model=dict(
type="str", choices=["LICENSE_INCLUDED", "BRING_YOUR_OWN_LICENSE"]
),
is_preview_version_with_service_terms_accepted=dict(type="bool"),
is_auto_scaling_enabled=dict(type="bool"),
is_dedicated=dict(type="bool"),
autonomous_container_database_id=dict(type="str"),
is_access_control_enabled=dict(type="bool"),
whitelisted_ips=dict(type="list"),
is_data_guard_enabled=dict(type="bool"),
subnet_id=dict(type="str"),
nsg_ids=dict(type="list"),
private_endpoint_label=dict(type="str"),
freeform_tags=dict(type="dict"),
defined_tags=dict(type="dict"),
db_version=dict(type="str"),
source=dict(
type="str",
default="NONE",
choices=[
"DATABASE",
"CLONE_TO_REFRESHABLE",
"BACKUP_FROM_ID",
"BACKUP_FROM_TIMESTAMP",
"NONE",
],
),
source_id=dict(type="str"),
clone_type=dict(type="str", choices=["FULL", "METADATA"]),
refreshable_mode=dict(type="str", choices=["AUTOMATIC", "MANUAL"]),
autonomous_database_backup_id=dict(type="str"),
autonomous_database_id=dict(aliases=["id"], type="str"),
timestamp=dict(type="str"),
is_refreshable_clone=dict(type="bool"),
open_mode=dict(type="str", choices=["READ_ONLY", "READ_WRITE"]),
permission_level=dict(type="str", choices=["RESTRICTED", "UNRESTRICTED"]),
state=dict(type="str", default="present", choices=["present", "absent"]),
)
)
module = AnsibleModule(argument_spec=module_args, supports_check_mode=True)
if not HAS_OCI_PY_SDK:
module.fail_json(msg="oci python sdk required for this module.")
resource_helper = ResourceHelper(
module=module,
resource_type="autonomous_database",
service_client_class=DatabaseClient,
namespace="database",
)
result = dict(changed=False)
if resource_helper.is_delete_using_name():
result = resource_helper.delete_using_name()
elif resource_helper.is_delete():
result = resource_helper.delete()
elif resource_helper.is_update_using_name():
result = resource_helper.update_using_name()
elif resource_helper.is_update():
result = resource_helper.update()
elif resource_helper.is_create():
result = resource_helper.create()
module.exit_json(**result)
if __name__ == "__main__":
main()
| 47.917112 | 160 | 0.626156 |
794252279fa1684065e1622fa47ab7b08a979b08 | 96,851 | py | Python | gaugette/fonts/verdana_24.py | wsiffer/Google-Bartender | 37018d3efe33a84074a6dccbce9e82f20ef3c923 | [
"MIT"
] | 6 | 2020-07-30T00:21:29.000Z | 2022-03-16T23:31:09.000Z | gaugette/fonts/verdana_24.py | antndeb/Google-Bartender | 37018d3efe33a84074a6dccbce9e82f20ef3c923 | [
"MIT"
] | null | null | null | gaugette/fonts/verdana_24.py | antndeb/Google-Bartender | 37018d3efe33a84074a6dccbce9e82f20ef3c923 | [
"MIT"
] | 1 | 2022-03-16T23:39:29.000Z | 2022-03-16T23:39:29.000Z | # coding=utf-8
# Module verdana_24
# generated from Verdana 18pt
name = "Verdana 24"
start_char = '!'
end_char = chr(127)
char_height = 24
space_width = 12
gap_width = 3
bitmaps = (
# @0 '!' (2 pixels wide)
0x00, #
0xC0, # OO
0xC0, # OO
0xC0, # OO
0xC0, # OO
0xC0, # OO
0xC0, # OO
0xC0, # OO
0xC0, # OO
0xC0, # OO
0xC0, # OO
0xC0, # OO
0xC0, # OO
0xC0, # OO
0x00, #
0x00, #
0x00, #
0xC0, # OO
0xC0, # OO
0x00, #
0x00, #
0x00, #
0x00, #
0x00, #
# @24 '"' (7 pixels wide)
0xC6, # OO OO
0xC6, # OO OO
0xC6, # OO OO
0xC6, # OO OO
0xC6, # OO OO
0xC6, # OO OO
0xC6, # OO OO
0x00, #
0x00, #
0x00, #
0x00, #
0x00, #
0x00, #
0x00, #
0x00, #
0x00, #
0x00, #
0x00, #
0x00, #
0x00, #
0x00, #
0x00, #
0x00, #
0x00, #
# @48 '#' (15 pixels wide)
0x00, 0x00, #
0x02, 0x08, # O O
0x02, 0x08, # O O
0x04, 0x10, # O O
0x04, 0x10, # O O
0x04, 0x10, # O O
0x7F, 0xFE, # OOOOOOOOOOOOOO
0x04, 0x10, # O O
0x08, 0x20, # O O
0x08, 0x20, # O O
0x08, 0x20, # O O
0x08, 0x20, # O O
0xFF, 0xFC, # OOOOOOOOOOOOOO
0x10, 0x40, # O O
0x10, 0x40, # O O
0x10, 0x40, # O O
0x10, 0x40, # O O
0x20, 0x80, # O O
0x20, 0x80, # O O
0x00, 0x00, #
0x00, 0x00, #
0x00, 0x00, #
0x00, 0x00, #
0x00, 0x00, #
# @96 '$' (11 pixels wide)
0x00, 0x00, #
0x04, 0x00, # O
0x04, 0x00, # O
0x04, 0x00, # O
0x1F, 0x80, # OOOOOO
0x7F, 0xC0, # OOOOOOOOO
0xE4, 0x40, # OOO O O
0xC4, 0x00, # OO O
0xC4, 0x00, # OO O
0xE4, 0x00, # OOO O
0x7C, 0x00, # OOOOO
0x3F, 0x80, # OOOOOOO
0x07, 0xC0, # OOOOO
0x04, 0x60, # O OO
0x04, 0x60, # O OO
0x04, 0x60, # O OO
0xC4, 0xE0, # OO O OOO
0xFF, 0xC0, # OOOOOOOOOO
0x7F, 0x00, # OOOOOOO
0x04, 0x00, # O
0x04, 0x00, # O
0x04, 0x00, # O
0x04, 0x00, # O
0x00, 0x00, #
# @144 '%' (22 pixels wide)
0x00, 0x00, 0x00, #
0x3E, 0x03, 0x00, # OOOOO OO
0x7F, 0x02, 0x00, # OOOOOOO O
0xE3, 0x86, 0x00, # OOO OOO OO
0xC1, 0x84, 0x00, # OO OO O
0xC1, 0x8C, 0x00, # OO OO OO
0xC1, 0x88, 0x00, # OO OO O
0xC1, 0x98, 0x00, # OO OO OO
0xE3, 0x98, 0x00, # OOO OOO OO
0x7F, 0x31, 0xF0, # OOOOOOO OO OOOOO
0x3E, 0x33, 0xF8, # OOOOO OO OOOOOOO
0x00, 0x67, 0x1C, # OO OOO OOO
0x00, 0x66, 0x0C, # OO OO OO
0x00, 0x46, 0x0C, # O OO OO
0x00, 0xC6, 0x0C, # OO OO OO
0x00, 0x86, 0x0C, # O OO OO
0x01, 0x87, 0x1C, # OO OOO OOO
0x01, 0x03, 0xF8, # O OOOOOOO
0x03, 0x01, 0xF0, # OO OOOOO
0x00, 0x00, 0x00, #
0x00, 0x00, 0x00, #
0x00, 0x00, 0x00, #
0x00, 0x00, 0x00, #
0x00, 0x00, 0x00, #
# @216 '&' (17 pixels wide)
0x00, 0x00, 0x00, #
0x1F, 0x00, 0x00, # OOOOO
0x3F, 0x80, 0x00, # OOOOOOO
0x71, 0xC0, 0x00, # OOO OOO
0x60, 0xC0, 0x00, # OO OO
0x60, 0xC0, 0x00, # OO OO
0x60, 0xC0, 0x00, # OO OO
0x39, 0x80, 0x00, # OOO OO
0x1F, 0x0C, 0x00, # OOOOO OO
0x3E, 0x0C, 0x00, # OOOOO OO
0x63, 0x8C, 0x00, # OO OOO OO
0x41, 0xCC, 0x00, # O OOO OO
0xC0, 0xE8, 0x00, # OO OOO O
0xC0, 0x78, 0x00, # OO OOOO
0xC0, 0x38, 0x00, # OO OOO
0xE0, 0x3C, 0x00, # OOO OOOO
0x70, 0x6E, 0x00, # OOO OO OOO
0x3F, 0xC7, 0x00, # OOOOOOOO OOO
0x1F, 0x03, 0x80, # OOOOO OOO
0x00, 0x00, 0x00, #
0x00, 0x00, 0x00, #
0x00, 0x00, 0x00, #
0x00, 0x00, 0x00, #
0x00, 0x00, 0x00, #
# @288 ''' (2 pixels wide)
0xC0, # OO
0xC0, # OO
0xC0, # OO
0xC0, # OO
0xC0, # OO
0xC0, # OO
0xC0, # OO
0x00, #
0x00, #
0x00, #
0x00, #
0x00, #
0x00, #
0x00, #
0x00, #
0x00, #
0x00, #
0x00, #
0x00, #
0x00, #
0x00, #
0x00, #
0x00, #
0x00, #
# @312 '(' (7 pixels wide)
0x0E, # OOO
0x1C, # OOO
0x18, # OO
0x30, # OO
0x30, # OO
0x60, # OO
0x60, # OO
0xE0, # OOO
0xC0, # OO
0xC0, # OO
0xC0, # OO
0xC0, # OO
0xC0, # OO
0xC0, # OO
0xC0, # OO
0xC0, # OO
0xE0, # OOO
0x60, # OO
0x60, # OO
0x30, # OO
0x30, # OO
0x18, # OO
0x1C, # OOO
0x0E, # OOO
# @336 ')' (7 pixels wide)
0xE0, # OOO
0x70, # OOO
0x30, # OO
0x18, # OO
0x18, # OO
0x0C, # OO
0x0C, # OO
0x0E, # OOO
0x06, # OO
0x06, # OO
0x06, # OO
0x06, # OO
0x06, # OO
0x06, # OO
0x06, # OO
0x06, # OO
0x0E, # OOO
0x0C, # OO
0x0C, # OO
0x18, # OO
0x18, # OO
0x30, # OO
0x70, # OOO
0xE0, # OOO
# @360 '*' (12 pixels wide)
0x06, 0x00, # OO
0x06, 0x00, # OO
0xC6, 0x30, # OO OO OO
0x76, 0xE0, # OOO OO OOO
0x1F, 0x80, # OOOOOO
0x06, 0x00, # OO
0x1F, 0x80, # OOOOOO
0x76, 0xE0, # OOO OO OOO
0xC6, 0x30, # OO OO OO
0x06, 0x00, # OO
0x06, 0x00, # OO
0x00, 0x00, #
0x00, 0x00, #
0x00, 0x00, #
0x00, 0x00, #
0x00, 0x00, #
0x00, 0x00, #
0x00, 0x00, #
0x00, 0x00, #
0x00, 0x00, #
0x00, 0x00, #
0x00, 0x00, #
0x00, 0x00, #
0x00, 0x00, #
# @408 '+' (14 pixels wide)
0x00, 0x00, #
0x00, 0x00, #
0x00, 0x00, #
0x00, 0x00, #
0x03, 0x00, # OO
0x03, 0x00, # OO
0x03, 0x00, # OO
0x03, 0x00, # OO
0x03, 0x00, # OO
0x03, 0x00, # OO
0xFF, 0xFC, # OOOOOOOOOOOOOO
0xFF, 0xFC, # OOOOOOOOOOOOOO
0x03, 0x00, # OO
0x03, 0x00, # OO
0x03, 0x00, # OO
0x03, 0x00, # OO
0x03, 0x00, # OO
0x03, 0x00, # OO
0x00, 0x00, #
0x00, 0x00, #
0x00, 0x00, #
0x00, 0x00, #
0x00, 0x00, #
0x00, 0x00, #
# @456 ',' (5 pixels wide)
0x00, #
0x00, #
0x00, #
0x00, #
0x00, #
0x00, #
0x00, #
0x00, #
0x00, #
0x00, #
0x00, #
0x00, #
0x00, #
0x00, #
0x00, #
0x00, #
0x38, # OOO
0x30, # OO
0x70, # OOO
0x70, # OOO
0x60, # OO
0x60, # OO
0xC0, # OO
0xC0, # OO
# @480 '-' (7 pixels wide)
0x00, #
0x00, #
0x00, #
0x00, #
0x00, #
0x00, #
0x00, #
0x00, #
0x00, #
0x00, #
0xFE, # OOOOOOO
0xFE, # OOOOOOO
0x00, #
0x00, #
0x00, #
0x00, #
0x00, #
0x00, #
0x00, #
0x00, #
0x00, #
0x00, #
0x00, #
0x00, #
# @504 '.' (3 pixels wide)
0x00, #
0x00, #
0x00, #
0x00, #
0x00, #
0x00, #
0x00, #
0x00, #
0x00, #
0x00, #
0x00, #
0x00, #
0x00, #
0x00, #
0x00, #
0x00, #
0xE0, # OOO
0xE0, # OOO
0xE0, # OOO
0x00, #
0x00, #
0x00, #
0x00, #
0x00, #
# @528 '/' (10 pixels wide)
0x00, 0xC0, # OO
0x01, 0x80, # OO
0x01, 0x80, # OO
0x01, 0x80, # OO
0x03, 0x00, # OO
0x03, 0x00, # OO
0x03, 0x00, # OO
0x06, 0x00, # OO
0x06, 0x00, # OO
0x06, 0x00, # OO
0x0C, 0x00, # OO
0x0C, 0x00, # OO
0x0C, 0x00, # OO
0x18, 0x00, # OO
0x18, 0x00, # OO
0x18, 0x00, # OO
0x30, 0x00, # OO
0x30, 0x00, # OO
0x30, 0x00, # OO
0x60, 0x00, # OO
0x60, 0x00, # OO
0x60, 0x00, # OO
0xC0, 0x00, # OO
0x00, 0x00, #
# @576 '0' (11 pixels wide)
0x00, 0x00, #
0x1F, 0x00, # OOOOO
0x3F, 0x80, # OOOOOOO
0x71, 0xC0, # OOO OOO
0x60, 0xC0, # OO OO
0xC0, 0x60, # OO OO
0xC0, 0x60, # OO OO
0xC0, 0x60, # OO OO
0xC0, 0x60, # OO OO
0xC0, 0x60, # OO OO
0xC0, 0x60, # OO OO
0xC0, 0x60, # OO OO
0xC0, 0x60, # OO OO
0xC0, 0x60, # OO OO
0xC0, 0x60, # OO OO
0x60, 0xC0, # OO OO
0x71, 0xC0, # OOO OOO
0x3F, 0x80, # OOOOOOO
0x1F, 0x00, # OOOOO
0x00, 0x00, #
0x00, 0x00, #
0x00, 0x00, #
0x00, 0x00, #
0x00, 0x00, #
# @624 '1' (10 pixels wide)
0x00, 0x00, #
0x0C, 0x00, # OO
0x1C, 0x00, # OOO
0xFC, 0x00, # OOOOOO
0xFC, 0x00, # OOOOOO
0x0C, 0x00, # OO
0x0C, 0x00, # OO
0x0C, 0x00, # OO
0x0C, 0x00, # OO
0x0C, 0x00, # OO
0x0C, 0x00, # OO
0x0C, 0x00, # OO
0x0C, 0x00, # OO
0x0C, 0x00, # OO
0x0C, 0x00, # OO
0x0C, 0x00, # OO
0x0C, 0x00, # OO
0xFF, 0xC0, # OOOOOOOOOO
0xFF, 0xC0, # OOOOOOOOOO
0x00, 0x00, #
0x00, 0x00, #
0x00, 0x00, #
0x00, 0x00, #
0x00, 0x00, #
# @672 '2' (11 pixels wide)
0x00, 0x00, #
0x3E, 0x00, # OOOOO
0xFF, 0x80, # OOOOOOOOO
0xC1, 0x80, # OO OO
0x80, 0xC0, # O OO
0x00, 0xC0, # OO
0x00, 0xC0, # OO
0x00, 0xC0, # OO
0x01, 0xC0, # OOO
0x01, 0x80, # OO
0x03, 0x00, # OO
0x07, 0x00, # OOO
0x0E, 0x00, # OOO
0x1C, 0x00, # OOO
0x38, 0x00, # OOO
0x70, 0x00, # OOO
0xE0, 0x00, # OOO
0xFF, 0xE0, # OOOOOOOOOOO
0xFF, 0xE0, # OOOOOOOOOOO
0x00, 0x00, #
0x00, 0x00, #
0x00, 0x00, #
0x00, 0x00, #
0x00, 0x00, #
# @720 '3' (11 pixels wide)
0x00, 0x00, #
0x3F, 0x00, # OOOOOO
0xFF, 0xC0, # OOOOOOOOOO
0xE0, 0xE0, # OOO OOO
0x80, 0x60, # O OO
0x00, 0x60, # OO
0x00, 0x60, # OO
0x00, 0xC0, # OO
0x0F, 0x00, # OOOO
0x0F, 0x80, # OOOOO
0x00, 0xC0, # OO
0x00, 0x60, # OO
0x00, 0x60, # OO
0x00, 0x60, # OO
0x00, 0x60, # OO
0x80, 0xE0, # O OOO
0xE1, 0xC0, # OOO OOO
0xFF, 0x80, # OOOOOOOOO
0x3F, 0x00, # OOOOOO
0x00, 0x00, #
0x00, 0x00, #
0x00, 0x00, #
0x00, 0x00, #
0x00, 0x00, #
# @768 '4' (14 pixels wide)
0x00, 0x00, #
0x00, 0x60, # OO
0x00, 0xE0, # OOO
0x01, 0xE0, # OOOO
0x03, 0xE0, # OOOOO
0x07, 0x60, # OOO OO
0x0E, 0x60, # OOO OO
0x1C, 0x60, # OOO OO
0x38, 0x60, # OOO OO
0x30, 0x60, # OO OO
0x60, 0x60, # OO OO
0xC0, 0x60, # OO OO
0xFF, 0xFC, # OOOOOOOOOOOOOO
0xFF, 0xFC, # OOOOOOOOOOOOOO
0x00, 0x60, # OO
0x00, 0x60, # OO
0x00, 0x60, # OO
0x00, 0x60, # OO
0x00, 0x60, # OO
0x00, 0x00, #
0x00, 0x00, #
0x00, 0x00, #
0x00, 0x00, #
0x00, 0x00, #
# @816 '5' (11 pixels wide)
0x00, 0x00, #
0x7F, 0xE0, # OOOOOOOOOO
0x7F, 0xE0, # OOOOOOOOOO
0x60, 0x00, # OO
0x60, 0x00, # OO
0x60, 0x00, # OO
0x60, 0x00, # OO
0x60, 0x00, # OO
0x7F, 0x00, # OOOOOOO
0x7F, 0xC0, # OOOOOOOOO
0x01, 0xC0, # OOO
0x00, 0x60, # OO
0x00, 0x60, # OO
0x00, 0x60, # OO
0x00, 0x60, # OO
0x00, 0xE0, # OOO
0xC1, 0xC0, # OO OOO
0xFF, 0x80, # OOOOOOOOO
0x7F, 0x00, # OOOOOOO
0x00, 0x00, #
0x00, 0x00, #
0x00, 0x00, #
0x00, 0x00, #
0x00, 0x00, #
# @864 '6' (11 pixels wide)
0x00, 0x00, #
0x07, 0x80, # OOOO
0x1F, 0x80, # OOOOOO
0x38, 0x00, # OOO
0x70, 0x00, # OOO
0x60, 0x00, # OO
0x60, 0x00, # OO
0xC0, 0x00, # OO
0xDF, 0x00, # OO OOOOO
0xFF, 0xC0, # OOOOOOOOOO
0xE0, 0xC0, # OOO OO
0xC0, 0x60, # OO OO
0xC0, 0x60, # OO OO
0xC0, 0x60, # OO OO
0xC0, 0x60, # OO OO
0x60, 0x60, # OO OO
0x70, 0xC0, # OOO OO
0x3F, 0x80, # OOOOOOO
0x1F, 0x00, # OOOOO
0x00, 0x00, #
0x00, 0x00, #
0x00, 0x00, #
0x00, 0x00, #
0x00, 0x00, #
# @912 '7' (12 pixels wide)
0x00, 0x00, #
0xFF, 0xF0, # OOOOOOOOOOOO
0xFF, 0xF0, # OOOOOOOOOOOO
0x00, 0x30, # OO
0x00, 0x30, # OO
0x00, 0x60, # OO
0x00, 0x60, # OO
0x00, 0xC0, # OO
0x00, 0xC0, # OO
0x01, 0x80, # OO
0x03, 0x80, # OOO
0x03, 0x00, # OO
0x07, 0x00, # OOO
0x06, 0x00, # OO
0x0E, 0x00, # OOO
0x0C, 0x00, # OO
0x1C, 0x00, # OOO
0x18, 0x00, # OO
0x38, 0x00, # OOO
0x00, 0x00, #
0x00, 0x00, #
0x00, 0x00, #
0x00, 0x00, #
0x00, 0x00, #
# @960 '8' (11 pixels wide)
0x00, 0x00, #
0x1F, 0x00, # OOOOO
0x7F, 0xC0, # OOOOOOOOO
0x60, 0xE0, # OO OOO
0xC0, 0x60, # OO OO
0xC0, 0x60, # OO OO
0xC0, 0x60, # OO OO
0xE0, 0xC0, # OOO OO
0x79, 0x80, # OOOO OO
0x3F, 0x00, # OOOOOO
0x63, 0x80, # OO OOO
0x60, 0xC0, # OO OO
0xC0, 0x60, # OO OO
0xC0, 0x60, # OO OO
0xC0, 0x60, # OO OO
0xE0, 0x60, # OOO OO
0x70, 0xC0, # OOO OO
0x7F, 0x80, # OOOOOOOO
0x1F, 0x00, # OOOOO
0x00, 0x00, #
0x00, 0x00, #
0x00, 0x00, #
0x00, 0x00, #
0x00, 0x00, #
# @1008 '9' (11 pixels wide)
0x00, 0x00, #
0x1F, 0x00, # OOOOO
0x3F, 0x80, # OOOOOOO
0x61, 0xC0, # OO OOO
0xE0, 0xC0, # OOO OO
0xC0, 0x60, # OO OO
0xC0, 0x60, # OO OO
0xC0, 0x60, # OO OO
0xC0, 0x60, # OO OO
0x60, 0xE0, # OO OOO
0x7F, 0xE0, # OOOOOOOOOO
0x1F, 0x60, # OOOOO OO
0x00, 0x60, # OO
0x00, 0xC0, # OO
0x00, 0xC0, # OO
0x01, 0xC0, # OOO
0x23, 0x80, # O OOO
0x3F, 0x00, # OOOOOO
0x3E, 0x00, # OOOOO
0x00, 0x00, #
0x00, 0x00, #
0x00, 0x00, #
0x00, 0x00, #
0x00, 0x00, #
# @1056 ':' (3 pixels wide)
0x00, #
0x00, #
0x00, #
0x00, #
0x00, #
0x00, #
0xE0, # OOO
0xE0, # OOO
0xE0, # OOO
0x00, #
0x00, #
0x00, #
0x00, #
0x00, #
0x00, #
0x00, #
0xE0, # OOO
0xE0, # OOO
0xE0, # OOO
0x00, #
0x00, #
0x00, #
0x00, #
0x00, #
# @1080 ';' (5 pixels wide)
0x00, #
0x00, #
0x00, #
0x00, #
0x00, #
0x00, #
0x70, # OOO
0x70, # OOO
0x70, # OOO
0x00, #
0x00, #
0x00, #
0x00, #
0x00, #
0x00, #
0x00, #
0x38, # OOO
0x30, # OO
0x70, # OOO
0x70, # OOO
0x60, # OO
0x60, # OO
0xC0, # OO
0xC0, # OO
# @1104 '<' (14 pixels wide)
0x00, 0x00, #
0x00, 0x00, #
0x00, 0x00, #
0x00, 0x00, #
0x00, 0x00, #
0x00, 0x0C, # OO
0x00, 0x3C, # OOOO
0x01, 0xF0, # OOOOO
0x07, 0xC0, # OOOOO
0x1F, 0x00, # OOOOO
0x78, 0x00, # OOOO
0xE0, 0x00, # OOO
0x78, 0x00, # OOOO
0x1F, 0x00, # OOOOO
0x07, 0xC0, # OOOOO
0x01, 0xF0, # OOOOO
0x00, 0x3C, # OOOO
0x00, 0x0C, # OO
0x00, 0x00, #
0x00, 0x00, #
0x00, 0x00, #
0x00, 0x00, #
0x00, 0x00, #
0x00, 0x00, #
# @1152 '=' (14 pixels wide)
0x00, 0x00, #
0x00, 0x00, #
0x00, 0x00, #
0x00, 0x00, #
0x00, 0x00, #
0x00, 0x00, #
0x00, 0x00, #
0x00, 0x00, #
0xFF, 0xFC, # OOOOOOOOOOOOOO
0xFF, 0xFC, # OOOOOOOOOOOOOO
0x00, 0x00, #
0x00, 0x00, #
0x00, 0x00, #
0xFF, 0xFC, # OOOOOOOOOOOOOO
0xFF, 0xFC, # OOOOOOOOOOOOOO
0x00, 0x00, #
0x00, 0x00, #
0x00, 0x00, #
0x00, 0x00, #
0x00, 0x00, #
0x00, 0x00, #
0x00, 0x00, #
0x00, 0x00, #
0x00, 0x00, #
# @1200 '>' (14 pixels wide)
0x00, 0x00, #
0x00, 0x00, #
0x00, 0x00, #
0x00, 0x00, #
0x00, 0x00, #
0xC0, 0x00, # OO
0xF0, 0x00, # OOOO
0x3E, 0x00, # OOOOO
0x0F, 0x80, # OOOOO
0x03, 0xE0, # OOOOO
0x00, 0x78, # OOOO
0x00, 0x1C, # OOO
0x00, 0x78, # OOOO
0x03, 0xE0, # OOOOO
0x0F, 0x80, # OOOOO
0x3E, 0x00, # OOOOO
0xF0, 0x00, # OOOO
0xC0, 0x00, # OO
0x00, 0x00, #
0x00, 0x00, #
0x00, 0x00, #
0x00, 0x00, #
0x00, 0x00, #
0x00, 0x00, #
# @1248 '?' (10 pixels wide)
0x00, 0x00, #
0x7E, 0x00, # OOOOOO
0xFF, 0x80, # OOOOOOOOO
0x81, 0xC0, # O OOO
0x00, 0xC0, # OO
0x00, 0xC0, # OO
0x00, 0xC0, # OO
0x01, 0xC0, # OOO
0x01, 0x80, # OO
0x07, 0x00, # OOO
0x1E, 0x00, # OOOO
0x18, 0x00, # OO
0x18, 0x00, # OO
0x18, 0x00, # OO
0x00, 0x00, #
0x00, 0x00, #
0x00, 0x00, #
0x18, 0x00, # OO
0x18, 0x00, # OO
0x00, 0x00, #
0x00, 0x00, #
0x00, 0x00, #
0x00, 0x00, #
0x00, 0x00, #
# @1296 '@' (20 pixels wide)
0x00, 0x00, 0x00, #
0x01, 0xFC, 0x00, # OOOOOOO
0x07, 0xFF, 0x00, # OOOOOOOOOOO
0x1E, 0x07, 0x80, # OOOO OOOO
0x38, 0x01, 0xC0, # OOO OOO
0x30, 0xF6, 0xE0, # OO OOOO OO OOO
0x61, 0xFE, 0x60, # OO OOOOOOOO OO
0x63, 0x86, 0x70, # OO OOO OO OOO
0xC7, 0x06, 0x30, # OO OOO OO OO
0xC6, 0x06, 0x30, # OO OO OO OO
0xC6, 0x06, 0x30, # OO OO OO OO
0xC6, 0x06, 0x30, # OO OO OO OO
0xC6, 0x06, 0x30, # OO OO OO OO
0xC6, 0x06, 0x30, # OO OO OO OO
0x63, 0x0E, 0x60, # OO OO OOO OO
0x63, 0xFF, 0xE0, # OO OOOOOOOOOOOOO
0x31, 0xF7, 0xE0, # OO OOOOO OOOOOO
0x38, 0x00, 0x00, # OOO
0x1E, 0x00, 0x00, # OOOO
0x07, 0xFE, 0x00, # OOOOOOOOOO
0x01, 0xFE, 0x00, # OOOOOOOO
0x00, 0x00, 0x00, #
0x00, 0x00, 0x00, #
0x00, 0x00, 0x00, #
# @1368 'A' (16 pixels wide)
0x00, 0x00, #
0x03, 0xC0, # OOOO
0x03, 0xC0, # OOOO
0x03, 0xC0, # OOOO
0x06, 0x60, # OO OO
0x06, 0x60, # OO OO
0x0E, 0x30, # OOO OO
0x0C, 0x30, # OO OO
0x0C, 0x30, # OO OO
0x18, 0x18, # OO OO
0x18, 0x18, # OO OO
0x18, 0x18, # OO OO
0x3F, 0xFC, # OOOOOOOOOOOO
0x3F, 0xFC, # OOOOOOOOOOOO
0x30, 0x0C, # OO OO
0x60, 0x06, # OO OO
0x60, 0x06, # OO OO
0x60, 0x06, # OO OO
0xC0, 0x03, # OO OO
0x00, 0x00, #
0x00, 0x00, #
0x00, 0x00, #
0x00, 0x00, #
0x00, 0x00, #
# @1416 'B' (13 pixels wide)
0x00, 0x00, #
0xFF, 0xC0, # OOOOOOOOOO
0xFF, 0xE0, # OOOOOOOOOOO
0xC0, 0x70, # OO OOO
0xC0, 0x30, # OO OO
0xC0, 0x30, # OO OO
0xC0, 0x30, # OO OO
0xC0, 0x60, # OO OO
0xFF, 0xC0, # OOOOOOOOOO
0xFF, 0xE0, # OOOOOOOOOOO
0xC0, 0x70, # OO OOO
0xC0, 0x18, # OO OO
0xC0, 0x18, # OO OO
0xC0, 0x18, # OO OO
0xC0, 0x18, # OO OO
0xC0, 0x38, # OO OOO
0xC0, 0x70, # OO OOO
0xFF, 0xE0, # OOOOOOOOOOO
0xFF, 0xC0, # OOOOOOOOOO
0x00, 0x00, #
0x00, 0x00, #
0x00, 0x00, #
0x00, 0x00, #
0x00, 0x00, #
# @1464 'C' (15 pixels wide)
0x00, 0x00, #
0x03, 0xF8, # OOOOOOO
0x1F, 0xFE, # OOOOOOOOOOOO
0x3C, 0x0E, # OOOO OOO
0x70, 0x02, # OOO O
0x60, 0x00, # OO
0x60, 0x00, # OO
0xC0, 0x00, # OO
0xC0, 0x00, # OO
0xC0, 0x00, # OO
0xC0, 0x00, # OO
0xC0, 0x00, # OO
0xC0, 0x00, # OO
0xE0, 0x00, # OOO
0x60, 0x00, # OO
0x70, 0x02, # OOO O
0x3C, 0x0E, # OOOO OOO
0x1F, 0xFE, # OOOOOOOOOOOO
0x07, 0xF0, # OOOOOOO
0x00, 0x00, #
0x00, 0x00, #
0x00, 0x00, #
0x00, 0x00, #
0x00, 0x00, #
# @1512 'D' (16 pixels wide)
0x00, 0x00, #
0xFF, 0xC0, # OOOOOOOOOO
0xFF, 0xF0, # OOOOOOOOOOOO
0xC0, 0x78, # OO OOOO
0xC0, 0x1C, # OO OOO
0xC0, 0x0E, # OO OOO
0xC0, 0x06, # OO OO
0xC0, 0x03, # OO OO
0xC0, 0x03, # OO OO
0xC0, 0x03, # OO OO
0xC0, 0x03, # OO OO
0xC0, 0x03, # OO OO
0xC0, 0x03, # OO OO
0xC0, 0x06, # OO OO
0xC0, 0x06, # OO OO
0xC0, 0x1C, # OO OOO
0xC0, 0x78, # OO OOOO
0xFF, 0xF0, # OOOOOOOOOOOO
0xFF, 0xC0, # OOOOOOOOOO
0x00, 0x00, #
0x00, 0x00, #
0x00, 0x00, #
0x00, 0x00, #
0x00, 0x00, #
# @1560 'E' (12 pixels wide)
0x00, 0x00, #
0xFF, 0xF0, # OOOOOOOOOOOO
0xFF, 0xF0, # OOOOOOOOOOOO
0xC0, 0x00, # OO
0xC0, 0x00, # OO
0xC0, 0x00, # OO
0xC0, 0x00, # OO
0xC0, 0x00, # OO
0xFF, 0xF0, # OOOOOOOOOOOO
0xFF, 0xF0, # OOOOOOOOOOOO
0xC0, 0x00, # OO
0xC0, 0x00, # OO
0xC0, 0x00, # OO
0xC0, 0x00, # OO
0xC0, 0x00, # OO
0xC0, 0x00, # OO
0xC0, 0x00, # OO
0xFF, 0xF0, # OOOOOOOOOOOO
0xFF, 0xF0, # OOOOOOOOOOOO
0x00, 0x00, #
0x00, 0x00, #
0x00, 0x00, #
0x00, 0x00, #
0x00, 0x00, #
# @1608 'F' (12 pixels wide)
0x00, 0x00, #
0xFF, 0xF0, # OOOOOOOOOOOO
0xFF, 0xF0, # OOOOOOOOOOOO
0xC0, 0x00, # OO
0xC0, 0x00, # OO
0xC0, 0x00, # OO
0xC0, 0x00, # OO
0xC0, 0x00, # OO
0xFF, 0xE0, # OOOOOOOOOOO
0xFF, 0xE0, # OOOOOOOOOOO
0xC0, 0x00, # OO
0xC0, 0x00, # OO
0xC0, 0x00, # OO
0xC0, 0x00, # OO
0xC0, 0x00, # OO
0xC0, 0x00, # OO
0xC0, 0x00, # OO
0xC0, 0x00, # OO
0xC0, 0x00, # OO
0x00, 0x00, #
0x00, 0x00, #
0x00, 0x00, #
0x00, 0x00, #
0x00, 0x00, #
# @1656 'G' (16 pixels wide)
0x00, 0x00, #
0x03, 0xF8, # OOOOOOO
0x0F, 0xFF, # OOOOOOOOOOOO
0x3C, 0x07, # OOOO OOO
0x30, 0x01, # OO O
0x60, 0x00, # OO
0x60, 0x00, # OO
0xC0, 0x00, # OO
0xC0, 0x00, # OO
0xC0, 0x00, # OO
0xC0, 0x7F, # OO OOOOOOO
0xC0, 0x7F, # OO OOOOOOO
0xC0, 0x03, # OO OO
0x60, 0x03, # OO OO
0x60, 0x03, # OO OO
0x70, 0x03, # OOO OO
0x3C, 0x07, # OOOO OOO
0x0F, 0xFF, # OOOOOOOOOOOO
0x03, 0xF8, # OOOOOOO
0x00, 0x00, #
0x00, 0x00, #
0x00, 0x00, #
0x00, 0x00, #
0x00, 0x00, #
# @1704 'H' (14 pixels wide)
0x00, 0x00, #
0xC0, 0x0C, # OO OO
0xC0, 0x0C, # OO OO
0xC0, 0x0C, # OO OO
0xC0, 0x0C, # OO OO
0xC0, 0x0C, # OO OO
0xC0, 0x0C, # OO OO
0xC0, 0x0C, # OO OO
0xFF, 0xFC, # OOOOOOOOOOOOOO
0xFF, 0xFC, # OOOOOOOOOOOOOO
0xC0, 0x0C, # OO OO
0xC0, 0x0C, # OO OO
0xC0, 0x0C, # OO OO
0xC0, 0x0C, # OO OO
0xC0, 0x0C, # OO OO
0xC0, 0x0C, # OO OO
0xC0, 0x0C, # OO OO
0xC0, 0x0C, # OO OO
0xC0, 0x0C, # OO OO
0x00, 0x00, #
0x00, 0x00, #
0x00, 0x00, #
0x00, 0x00, #
0x00, 0x00, #
# @1752 'I' (6 pixels wide)
0x00, #
0xFC, # OOOOOO
0xFC, # OOOOOO
0x30, # OO
0x30, # OO
0x30, # OO
0x30, # OO
0x30, # OO
0x30, # OO
0x30, # OO
0x30, # OO
0x30, # OO
0x30, # OO
0x30, # OO
0x30, # OO
0x30, # OO
0x30, # OO
0xFC, # OOOOOO
0xFC, # OOOOOO
0x00, #
0x00, #
0x00, #
0x00, #
0x00, #
# @1776 'J' (8 pixels wide)
0x00, #
0x3F, # OOOOOO
0x3F, # OOOOOO
0x03, # OO
0x03, # OO
0x03, # OO
0x03, # OO
0x03, # OO
0x03, # OO
0x03, # OO
0x03, # OO
0x03, # OO
0x03, # OO
0x03, # OO
0x03, # OO
0x03, # OO
0x87, # O OOO
0xFE, # OOOOOOO
0xFC, # OOOOOO
0x00, #
0x00, #
0x00, #
0x00, #
0x00, #
# @1800 'K' (15 pixels wide)
0x00, 0x00, #
0xC0, 0x0C, # OO OO
0xC0, 0x18, # OO OO
0xC0, 0x30, # OO OO
0xC0, 0x60, # OO OO
0xC1, 0xC0, # OO OOO
0xC3, 0x80, # OO OOO
0xC7, 0x00, # OO OOO
0xCE, 0x00, # OO OOO
0xDC, 0x00, # OO OOO
0xFE, 0x00, # OOOOOOO
0xE7, 0x00, # OOO OOO
0xC3, 0x80, # OO OOO
0xC1, 0xC0, # OO OOO
0xC0, 0xE0, # OO OOO
0xC0, 0x70, # OO OOO
0xC0, 0x38, # OO OOO
0xC0, 0x1C, # OO OOO
0xC0, 0x0E, # OO OOO
0x00, 0x00, #
0x00, 0x00, #
0x00, 0x00, #
0x00, 0x00, #
0x00, 0x00, #
# @1848 'L' (11 pixels wide)
0x00, 0x00, #
0xC0, 0x00, # OO
0xC0, 0x00, # OO
0xC0, 0x00, # OO
0xC0, 0x00, # OO
0xC0, 0x00, # OO
0xC0, 0x00, # OO
0xC0, 0x00, # OO
0xC0, 0x00, # OO
0xC0, 0x00, # OO
0xC0, 0x00, # OO
0xC0, 0x00, # OO
0xC0, 0x00, # OO
0xC0, 0x00, # OO
0xC0, 0x00, # OO
0xC0, 0x00, # OO
0xC0, 0x00, # OO
0xFF, 0xE0, # OOOOOOOOOOO
0xFF, 0xE0, # OOOOOOOOOOO
0x00, 0x00, #
0x00, 0x00, #
0x00, 0x00, #
0x00, 0x00, #
0x00, 0x00, #
# @1896 'M' (16 pixels wide)
0x00, 0x00, #
0xE0, 0x07, # OOO OOO
0xF0, 0x0F, # OOOO OOOO
0xF0, 0x0F, # OOOO OOOO
0xD8, 0x1B, # OO OO OO OO
0xD8, 0x1B, # OO OO OO OO
0xCC, 0x33, # OO OO OO OO
0xCC, 0x33, # OO OO OO OO
0xCE, 0x63, # OO OOO OO OO
0xC6, 0x63, # OO OO OO OO
0xC7, 0xC3, # OO OOOOO OO
0xC3, 0xC3, # OO OOOO OO
0xC3, 0x83, # OO OOO OO
0xC1, 0x83, # OO OO OO
0xC0, 0x03, # OO OO
0xC0, 0x03, # OO OO
0xC0, 0x03, # OO OO
0xC0, 0x03, # OO OO
0xC0, 0x03, # OO OO
0x00, 0x00, #
0x00, 0x00, #
0x00, 0x00, #
0x00, 0x00, #
0x00, 0x00, #
# @1944 'N' (14 pixels wide)
0x00, 0x00, #
0xF0, 0x0C, # OOOO OO
0xF0, 0x0C, # OOOO OO
0xF8, 0x0C, # OOOOO OO
0xDC, 0x0C, # OO OOO OO
0xDC, 0x0C, # OO OOO OO
0xCE, 0x0C, # OO OOO OO
0xC6, 0x0C, # OO OO OO
0xC7, 0x0C, # OO OOO OO
0xC3, 0x0C, # OO OO OO
0xC3, 0x8C, # OO OOO OO
0xC1, 0xCC, # OO OOO OO
0xC1, 0xCC, # OO OOO OO
0xC0, 0xEC, # OO OOO OO
0xC0, 0x6C, # OO OO OO
0xC0, 0x7C, # OO OOOOO
0xC0, 0x3C, # OO OOOO
0xC0, 0x3C, # OO OOOO
0xC0, 0x1C, # OO OOO
0x00, 0x00, #
0x00, 0x00, #
0x00, 0x00, #
0x00, 0x00, #
0x00, 0x00, #
# @1992 'O' (17 pixels wide)
0x00, 0x00, 0x00, #
0x07, 0xF0, 0x00, # OOOOOOO
0x1F, 0xFC, 0x00, # OOOOOOOOOOO
0x3C, 0x1E, 0x00, # OOOO OOOO
0x70, 0x07, 0x00, # OOO OOO
0x60, 0x03, 0x00, # OO OO
0xE0, 0x03, 0x80, # OOO OOO
0xC0, 0x01, 0x80, # OO OO
0xC0, 0x01, 0x80, # OO OO
0xC0, 0x01, 0x80, # OO OO
0xC0, 0x01, 0x80, # OO OO
0xC0, 0x01, 0x80, # OO OO
0xC0, 0x01, 0x80, # OO OO
0xE0, 0x03, 0x80, # OOO OOO
0x60, 0x03, 0x00, # OO OO
0x70, 0x07, 0x00, # OOO OOO
0x3C, 0x1E, 0x00, # OOOO OOOO
0x1F, 0xFC, 0x00, # OOOOOOOOOOO
0x07, 0xF0, 0x00, # OOOOOOO
0x00, 0x00, 0x00, #
0x00, 0x00, 0x00, #
0x00, 0x00, 0x00, #
0x00, 0x00, 0x00, #
0x00, 0x00, 0x00, #
# @2064 'P' (11 pixels wide)
0x00, 0x00, #
0xFF, 0x00, # OOOOOOOO
0xFF, 0x80, # OOOOOOOOO
0xC1, 0xC0, # OO OOO
0xC0, 0x60, # OO OO
0xC0, 0x60, # OO OO
0xC0, 0x60, # OO OO
0xC0, 0x60, # OO OO
0xC0, 0xE0, # OO OOO
0xC1, 0xC0, # OO OOO
0xFF, 0x80, # OOOOOOOOO
0xFE, 0x00, # OOOOOOO
0xC0, 0x00, # OO
0xC0, 0x00, # OO
0xC0, 0x00, # OO
0xC0, 0x00, # OO
0xC0, 0x00, # OO
0xC0, 0x00, # OO
0xC0, 0x00, # OO
0x00, 0x00, #
0x00, 0x00, #
0x00, 0x00, #
0x00, 0x00, #
0x00, 0x00, #
# @2112 'Q' (17 pixels wide)
0x00, 0x00, 0x00, #
0x07, 0xF0, 0x00, # OOOOOOO
0x1F, 0xFC, 0x00, # OOOOOOOOOOO
0x3C, 0x1E, 0x00, # OOOO OOOO
0x70, 0x07, 0x00, # OOO OOO
0x60, 0x03, 0x00, # OO OO
0xE0, 0x03, 0x80, # OOO OOO
0xC0, 0x01, 0x80, # OO OO
0xC0, 0x01, 0x80, # OO OO
0xC0, 0x01, 0x80, # OO OO
0xC0, 0x01, 0x80, # OO OO
0xC0, 0x01, 0x80, # OO OO
0xC0, 0x01, 0x80, # OO OO
0xE0, 0x03, 0x00, # OOO OO
0x60, 0x03, 0x00, # OO OO
0x70, 0x07, 0x00, # OOO OOO
0x3C, 0x1E, 0x00, # OOOO OOOO
0x1F, 0xFC, 0x00, # OOOOOOOOOOO
0x07, 0xF0, 0x00, # OOOOOOO
0x00, 0x30, 0x00, # OO
0x00, 0x30, 0x00, # OO
0x00, 0x18, 0x00, # OO
0x00, 0x1F, 0x80, # OOOOOO
0x00, 0x0F, 0x80, # OOOOO
# @2184 'R' (14 pixels wide)
0x00, 0x00, #
0xFF, 0x00, # OOOOOOOO
0xFF, 0xC0, # OOOOOOOOOO
0xC0, 0xC0, # OO OO
0xC0, 0x60, # OO OO
0xC0, 0x60, # OO OO
0xC0, 0x60, # OO OO
0xC0, 0x60, # OO OO
0xC0, 0xE0, # OO OOO
0xC1, 0xC0, # OO OOO
0xFF, 0x80, # OOOOOOOOO
0xFF, 0x00, # OOOOOOOO
0xC3, 0x80, # OO OOO
0xC1, 0x80, # OO OO
0xC0, 0xC0, # OO OO
0xC0, 0x60, # OO OO
0xC0, 0x70, # OO OOO
0xC0, 0x38, # OO OOO
0xC0, 0x1C, # OO OOO
0x00, 0x00, #
0x00, 0x00, #
0x00, 0x00, #
0x00, 0x00, #
0x00, 0x00, #
# @2232 'S' (13 pixels wide)
0x00, 0x00, #
0x1F, 0xC0, # OOOOOOO
0x3F, 0xF0, # OOOOOOOOOO
0x70, 0x70, # OOO OOO
0xC0, 0x10, # OO O
0xC0, 0x00, # OO
0xC0, 0x00, # OO
0xE0, 0x00, # OOO
0x78, 0x00, # OOOO
0x3F, 0xC0, # OOOOOOOO
0x0F, 0xF0, # OOOOOOOO
0x00, 0x78, # OOOO
0x00, 0x18, # OO
0x00, 0x18, # OO
0x00, 0x18, # OO
0x80, 0x18, # O OO
0xE0, 0x70, # OOO OOO
0xFF, 0xE0, # OOOOOOOOOOO
0x3F, 0x80, # OOOOOOO
0x00, 0x00, #
0x00, 0x00, #
0x00, 0x00, #
0x00, 0x00, #
0x00, 0x00, #
# @2280 'T' (14 pixels wide)
0x00, 0x00, #
0xFF, 0xFC, # OOOOOOOOOOOOOO
0xFF, 0xFC, # OOOOOOOOOOOOOO
0x03, 0x00, # OO
0x03, 0x00, # OO
0x03, 0x00, # OO
0x03, 0x00, # OO
0x03, 0x00, # OO
0x03, 0x00, # OO
0x03, 0x00, # OO
0x03, 0x00, # OO
0x03, 0x00, # OO
0x03, 0x00, # OO
0x03, 0x00, # OO
0x03, 0x00, # OO
0x03, 0x00, # OO
0x03, 0x00, # OO
0x03, 0x00, # OO
0x03, 0x00, # OO
0x00, 0x00, #
0x00, 0x00, #
0x00, 0x00, #
0x00, 0x00, #
0x00, 0x00, #
# @2328 'U' (14 pixels wide)
0x00, 0x00, #
0xC0, 0x0C, # OO OO
0xC0, 0x0C, # OO OO
0xC0, 0x0C, # OO OO
0xC0, 0x0C, # OO OO
0xC0, 0x0C, # OO OO
0xC0, 0x0C, # OO OO
0xC0, 0x0C, # OO OO
0xC0, 0x0C, # OO OO
0xC0, 0x0C, # OO OO
0xC0, 0x0C, # OO OO
0xC0, 0x0C, # OO OO
0xC0, 0x0C, # OO OO
0xC0, 0x0C, # OO OO
0xC0, 0x0C, # OO OO
0x60, 0x18, # OO OO
0x70, 0x38, # OOO OOO
0x3F, 0xF0, # OOOOOOOOOO
0x0F, 0xC0, # OOOOOO
0x00, 0x00, #
0x00, 0x00, #
0x00, 0x00, #
0x00, 0x00, #
0x00, 0x00, #
# @2376 'V' (16 pixels wide)
0x00, 0x00, #
0xC0, 0x03, # OO OO
0x60, 0x06, # OO OO
0x60, 0x06, # OO OO
0x60, 0x06, # OO OO
0x30, 0x0C, # OO OO
0x30, 0x0C, # OO OO
0x30, 0x0C, # OO OO
0x18, 0x18, # OO OO
0x18, 0x18, # OO OO
0x18, 0x18, # OO OO
0x0C, 0x30, # OO OO
0x0C, 0x30, # OO OO
0x0E, 0x70, # OOO OOO
0x06, 0x60, # OO OO
0x06, 0x60, # OO OO
0x03, 0xC0, # OOOO
0x03, 0xC0, # OOOO
0x03, 0xC0, # OOOO
0x00, 0x00, #
0x00, 0x00, #
0x00, 0x00, #
0x00, 0x00, #
0x00, 0x00, #
# @2424 'W' (22 pixels wide)
0x00, 0x00, 0x00, #
0xC0, 0x30, 0x0C, # OO OO OO
0xC0, 0x78, 0x0C, # OO OOOO OO
0x60, 0x78, 0x18, # OO OOOO OO
0x60, 0x78, 0x18, # OO OOOO OO
0x60, 0x78, 0x18, # OO OOOO OO
0x70, 0xCC, 0x38, # OOO OO OO OOO
0x30, 0xCC, 0x30, # OO OO OO OO
0x30, 0xCC, 0x30, # OO OO OO OO
0x30, 0xCC, 0x30, # OO OO OO OO
0x38, 0x86, 0x70, # OOO O OO OOO
0x19, 0x86, 0x60, # OO OO OO OO
0x19, 0x86, 0x60, # OO OO OO OO
0x19, 0x86, 0x60, # OO OO OO OO
0x0D, 0x03, 0xE0, # OO O OOOOO
0x0F, 0x03, 0xC0, # OOOO OOOO
0x0F, 0x03, 0xC0, # OOOO OOOO
0x0F, 0x03, 0xC0, # OOOO OOOO
0x06, 0x01, 0xC0, # OO OOO
0x00, 0x00, 0x00, #
0x00, 0x00, 0x00, #
0x00, 0x00, 0x00, #
0x00, 0x00, 0x00, #
0x00, 0x00, 0x00, #
# @2496 'X' (14 pixels wide)
0x00, 0x00, #
0xE0, 0x1C, # OOO OOO
0x60, 0x18, # OO OO
0x30, 0x30, # OO OO
0x38, 0x70, # OOO OOO
0x18, 0x60, # OO OO
0x0C, 0xC0, # OO OO
0x0E, 0xC0, # OOO OO
0x07, 0x80, # OOOO
0x07, 0x80, # OOOO
0x07, 0x80, # OOOO
0x07, 0x80, # OOOO
0x0C, 0xC0, # OO OO
0x0C, 0xE0, # OO OOO
0x18, 0x60, # OO OO
0x38, 0x70, # OOO OOO
0x30, 0x30, # OO OO
0x60, 0x18, # OO OO
0xE0, 0x1C, # OOO OOO
0x00, 0x00, #
0x00, 0x00, #
0x00, 0x00, #
0x00, 0x00, #
0x00, 0x00, #
# @2544 'Y' (14 pixels wide)
0x00, 0x00, #
0xE0, 0x1C, # OOO OOO
0x60, 0x18, # OO OO
0x70, 0x38, # OOO OOO
0x30, 0x30, # OO OO
0x18, 0x60, # OO OO
0x1C, 0xE0, # OOO OOO
0x0C, 0xC0, # OO OO
0x0F, 0xC0, # OOOOOO
0x07, 0x80, # OOOO
0x03, 0x00, # OO
0x03, 0x00, # OO
0x03, 0x00, # OO
0x03, 0x00, # OO
0x03, 0x00, # OO
0x03, 0x00, # OO
0x03, 0x00, # OO
0x03, 0x00, # OO
0x03, 0x00, # OO
0x00, 0x00, #
0x00, 0x00, #
0x00, 0x00, #
0x00, 0x00, #
0x00, 0x00, #
# @2592 'Z' (14 pixels wide)
0x00, 0x00, #
0xFF, 0xFC, # OOOOOOOOOOOOOO
0xFF, 0xFC, # OOOOOOOOOOOOOO
0x00, 0x1C, # OOO
0x00, 0x38, # OOO
0x00, 0x70, # OOO
0x00, 0x60, # OO
0x00, 0xC0, # OO
0x01, 0x80, # OO
0x03, 0x80, # OOO
0x07, 0x00, # OOO
0x06, 0x00, # OO
0x0C, 0x00, # OO
0x18, 0x00, # OO
0x38, 0x00, # OOO
0x70, 0x00, # OOO
0xE0, 0x00, # OOO
0xFF, 0xFC, # OOOOOOOOOOOOOO
0xFF, 0xFC, # OOOOOOOOOOOOOO
0x00, 0x00, #
0x00, 0x00, #
0x00, 0x00, #
0x00, 0x00, #
0x00, 0x00, #
# @2640 '[' (6 pixels wide)
0xFC, # OOOOOO
0xFC, # OOOOOO
0xC0, # OO
0xC0, # OO
0xC0, # OO
0xC0, # OO
0xC0, # OO
0xC0, # OO
0xC0, # OO
0xC0, # OO
0xC0, # OO
0xC0, # OO
0xC0, # OO
0xC0, # OO
0xC0, # OO
0xC0, # OO
0xC0, # OO
0xC0, # OO
0xC0, # OO
0xC0, # OO
0xC0, # OO
0xC0, # OO
0xFC, # OOOOOO
0xFC, # OOOOOO
# @2664 '\' (10 pixels wide)
0xC0, 0x00, # OO
0x60, 0x00, # OO
0x60, 0x00, # OO
0x60, 0x00, # OO
0x30, 0x00, # OO
0x30, 0x00, # OO
0x30, 0x00, # OO
0x18, 0x00, # OO
0x18, 0x00, # OO
0x18, 0x00, # OO
0x0C, 0x00, # OO
0x0C, 0x00, # OO
0x0C, 0x00, # OO
0x06, 0x00, # OO
0x06, 0x00, # OO
0x06, 0x00, # OO
0x03, 0x00, # OO
0x03, 0x00, # OO
0x03, 0x00, # OO
0x01, 0x80, # OO
0x01, 0x80, # OO
0x01, 0x80, # OO
0x00, 0xC0, # OO
0x00, 0x00, #
# @2712 ']' (6 pixels wide)
0xFC, # OOOOOO
0xFC, # OOOOOO
0x0C, # OO
0x0C, # OO
0x0C, # OO
0x0C, # OO
0x0C, # OO
0x0C, # OO
0x0C, # OO
0x0C, # OO
0x0C, # OO
0x0C, # OO
0x0C, # OO
0x0C, # OO
0x0C, # OO
0x0C, # OO
0x0C, # OO
0x0C, # OO
0x0C, # OO
0x0C, # OO
0x0C, # OO
0x0C, # OO
0xFC, # OOOOOO
0xFC, # OOOOOO
# @2736 '^' (15 pixels wide)
0x00, 0x00, #
0x03, 0x80, # OOO
0x03, 0x80, # OOO
0x06, 0xC0, # OO OO
0x0C, 0x60, # OO OO
0x1C, 0x70, # OOO OOO
0x18, 0x30, # OO OO
0x30, 0x18, # OO OO
0x60, 0x0C, # OO OO
0xE0, 0x0E, # OOO OOO
0x00, 0x00, #
0x00, 0x00, #
0x00, 0x00, #
0x00, 0x00, #
0x00, 0x00, #
0x00, 0x00, #
0x00, 0x00, #
0x00, 0x00, #
0x00, 0x00, #
0x00, 0x00, #
0x00, 0x00, #
0x00, 0x00, #
0x00, 0x00, #
0x00, 0x00, #
# @2784 '_' (15 pixels wide)
0x00, 0x00, #
0x00, 0x00, #
0x00, 0x00, #
0x00, 0x00, #
0x00, 0x00, #
0x00, 0x00, #
0x00, 0x00, #
0x00, 0x00, #
0x00, 0x00, #
0x00, 0x00, #
0x00, 0x00, #
0x00, 0x00, #
0x00, 0x00, #
0x00, 0x00, #
0x00, 0x00, #
0x00, 0x00, #
0x00, 0x00, #
0x00, 0x00, #
0x00, 0x00, #
0x00, 0x00, #
0x00, 0x00, #
0xFF, 0xFE, # OOOOOOOOOOOOOOO
0x00, 0x00, #
0x00, 0x00, #
# @2832 '`' (5 pixels wide)
0xE0, # OOO
0x70, # OOO
0x30, # OO
0x18, # OO
0x00, #
0x00, #
0x00, #
0x00, #
0x00, #
0x00, #
0x00, #
0x00, #
0x00, #
0x00, #
0x00, #
0x00, #
0x00, #
0x00, #
0x00, #
0x00, #
0x00, #
0x00, #
0x00, #
0x00, #
# @2856 'a' (11 pixels wide)
0x00, 0x00, #
0x00, 0x00, #
0x00, 0x00, #
0x00, 0x00, #
0x00, 0x00, #
0x00, 0x00, #
0x3F, 0x80, # OOOOOOO
0x7F, 0xC0, # OOOOOOOOO
0x40, 0xE0, # O OOO
0x00, 0x60, # OO
0x0F, 0xE0, # OOOOOOO
0x3F, 0xE0, # OOOOOOOOO
0x70, 0x60, # OOO OO
0xC0, 0x60, # OO OO
0xC0, 0x60, # OO OO
0xC0, 0x60, # OO OO
0xE1, 0xE0, # OOO OOOO
0x7F, 0xE0, # OOOOOOOOOO
0x3E, 0x60, # OOOOO OO
0x00, 0x00, #
0x00, 0x00, #
0x00, 0x00, #
0x00, 0x00, #
0x00, 0x00, #
# @2904 'b' (12 pixels wide)
0xC0, 0x00, # OO
0xC0, 0x00, # OO
0xC0, 0x00, # OO
0xC0, 0x00, # OO
0xC0, 0x00, # OO
0xC0, 0x00, # OO
0xCF, 0x80, # OO OOOOO
0xFF, 0xC0, # OOOOOOOOOO
0xF0, 0xE0, # OOOO OOO
0xC0, 0x70, # OO OOO
0xC0, 0x30, # OO OO
0xC0, 0x30, # OO OO
0xC0, 0x30, # OO OO
0xC0, 0x30, # OO OO
0xC0, 0x30, # OO OO
0xC0, 0x60, # OO OO
0xE0, 0xE0, # OOO OOO
0xFF, 0xC0, # OOOOOOOOOO
0xDF, 0x00, # OO OOOOO
0x00, 0x00, #
0x00, 0x00, #
0x00, 0x00, #
0x00, 0x00, #
0x00, 0x00, #
# @2952 'c' (11 pixels wide)
0x00, 0x00, #
0x00, 0x00, #
0x00, 0x00, #
0x00, 0x00, #
0x00, 0x00, #
0x00, 0x00, #
0x0F, 0x80, # OOOOO
0x3F, 0xE0, # OOOOOOOOO
0x70, 0x60, # OOO OO
0x60, 0x20, # OO O
0xC0, 0x00, # OO
0xC0, 0x00, # OO
0xC0, 0x00, # OO
0xC0, 0x00, # OO
0xC0, 0x00, # OO
0x60, 0x20, # OO O
0x70, 0x60, # OOO OO
0x3F, 0xE0, # OOOOOOOOO
0x0F, 0x80, # OOOOO
0x00, 0x00, #
0x00, 0x00, #
0x00, 0x00, #
0x00, 0x00, #
0x00, 0x00, #
# @3000 'd' (12 pixels wide)
0x00, 0x30, # OO
0x00, 0x30, # OO
0x00, 0x30, # OO
0x00, 0x30, # OO
0x00, 0x30, # OO
0x00, 0x30, # OO
0x0F, 0xB0, # OOOOO OO
0x3F, 0xF0, # OOOOOOOOOO
0x70, 0x30, # OOO OO
0x60, 0x30, # OO OO
0xC0, 0x30, # OO OO
0xC0, 0x30, # OO OO
0xC0, 0x30, # OO OO
0xC0, 0x30, # OO OO
0xC0, 0x30, # OO OO
0xE0, 0x30, # OOO OO
0x70, 0xF0, # OOO OOOO
0x3F, 0xF0, # OOOOOOOOOO
0x1F, 0x30, # OOOOO OO
0x00, 0x00, #
0x00, 0x00, #
0x00, 0x00, #
0x00, 0x00, #
0x00, 0x00, #
# @3048 'e' (12 pixels wide)
0x00, 0x00, #
0x00, 0x00, #
0x00, 0x00, #
0x00, 0x00, #
0x00, 0x00, #
0x00, 0x00, #
0x0F, 0x80, # OOOOO
0x3F, 0xE0, # OOOOOOOOO
0x70, 0x60, # OOO OO
0x60, 0x30, # OO OO
0xC0, 0x30, # OO OO
0xFF, 0xF0, # OOOOOOOOOOOO
0xFF, 0xF0, # OOOOOOOOOOOO
0xC0, 0x00, # OO
0xC0, 0x00, # OO
0x60, 0x10, # OO O
0x70, 0x70, # OOO OOO
0x3F, 0xF0, # OOOOOOOOOO
0x0F, 0xC0, # OOOOOO
0x00, 0x00, #
0x00, 0x00, #
0x00, 0x00, #
0x00, 0x00, #
0x00, 0x00, #
# @3096 'f' (8 pixels wide)
0x1F, # OOOOO
0x3F, # OOOOOO
0x30, # OO
0x60, # OO
0x60, # OO
0x60, # OO
0xFE, # OOOOOOO
0xFE, # OOOOOOO
0x60, # OO
0x60, # OO
0x60, # OO
0x60, # OO
0x60, # OO
0x60, # OO
0x60, # OO
0x60, # OO
0x60, # OO
0x60, # OO
0x60, # OO
0x00, #
0x00, #
0x00, #
0x00, #
0x00, #
# @3120 'g' (12 pixels wide)
0x00, 0x00, #
0x00, 0x00, #
0x00, 0x00, #
0x00, 0x00, #
0x00, 0x00, #
0x00, 0x00, #
0x0F, 0xB0, # OOOOO OO
0x3F, 0xF0, # OOOOOOOOOO
0x70, 0x70, # OOO OOO
0x60, 0x30, # OO OO
0xC0, 0x30, # OO OO
0xC0, 0x30, # OO OO
0xC0, 0x30, # OO OO
0xC0, 0x30, # OO OO
0xC0, 0x30, # OO OO
0xE0, 0x30, # OOO OO
0x70, 0xF0, # OOO OOOO
0x3F, 0xF0, # OOOOOOOOOO
0x1F, 0x30, # OOOOO OO
0x00, 0x30, # OO
0x00, 0x30, # OO
0x60, 0xE0, # OO OOO
0x7F, 0xC0, # OOOOOOOOO
0x7F, 0x80, # OOOOOOOO
# @3168 'h' (11 pixels wide)
0xC0, 0x00, # OO
0xC0, 0x00, # OO
0xC0, 0x00, # OO
0xC0, 0x00, # OO
0xC0, 0x00, # OO
0xC0, 0x00, # OO
0xCF, 0x80, # OO OOOOO
0xFF, 0xC0, # OOOOOOOOOO
0xF0, 0xE0, # OOOO OOO
0xC0, 0x60, # OO OO
0xC0, 0x60, # OO OO
0xC0, 0x60, # OO OO
0xC0, 0x60, # OO OO
0xC0, 0x60, # OO OO
0xC0, 0x60, # OO OO
0xC0, 0x60, # OO OO
0xC0, 0x60, # OO OO
0xC0, 0x60, # OO OO
0xC0, 0x60, # OO OO
0x00, 0x00, #
0x00, 0x00, #
0x00, 0x00, #
0x00, 0x00, #
0x00, 0x00, #
# @3216 'i' (2 pixels wide)
0x00, #
0xC0, # OO
0xC0, # OO
0x00, #
0x00, #
0x00, #
0xC0, # OO
0xC0, # OO
0xC0, # OO
0xC0, # OO
0xC0, # OO
0xC0, # OO
0xC0, # OO
0xC0, # OO
0xC0, # OO
0xC0, # OO
0xC0, # OO
0xC0, # OO
0xC0, # OO
0x00, #
0x00, #
0x00, #
0x00, #
0x00, #
# @3240 'j' (7 pixels wide)
0x00, #
0x06, # OO
0x06, # OO
0x00, #
0x00, #
0x00, #
0x3E, # OOOOO
0x3E, # OOOOO
0x06, # OO
0x06, # OO
0x06, # OO
0x06, # OO
0x06, # OO
0x06, # OO
0x06, # OO
0x06, # OO
0x06, # OO
0x06, # OO
0x06, # OO
0x06, # OO
0x06, # OO
0x0E, # OOO
0xFC, # OOOOOO
0xF8, # OOOOO
# @3264 'k' (12 pixels wide)
0xC0, 0x00, # OO
0xC0, 0x00, # OO
0xC0, 0x00, # OO
0xC0, 0x00, # OO
0xC0, 0x00, # OO
0xC0, 0x00, # OO
0xC0, 0x60, # OO OO
0xC0, 0xC0, # OO OO
0xC1, 0x80, # OO OO
0xC3, 0x00, # OO OO
0xC6, 0x00, # OO OO
0xCC, 0x00, # OO OO
0xDC, 0x00, # OO OOO
0xEE, 0x00, # OOO OOO
0xC7, 0x00, # OO OOO
0xC3, 0x80, # OO OOO
0xC1, 0xC0, # OO OOO
0xC0, 0xE0, # OO OOO
0xC0, 0x70, # OO OOO
0x00, 0x00, #
0x00, 0x00, #
0x00, 0x00, #
0x00, 0x00, #
0x00, 0x00, #
# @3312 'l' (2 pixels wide)
0xC0, # OO
0xC0, # OO
0xC0, # OO
0xC0, # OO
0xC0, # OO
0xC0, # OO
0xC0, # OO
0xC0, # OO
0xC0, # OO
0xC0, # OO
0xC0, # OO
0xC0, # OO
0xC0, # OO
0xC0, # OO
0xC0, # OO
0xC0, # OO
0xC0, # OO
0xC0, # OO
0xC0, # OO
0x00, #
0x00, #
0x00, #
0x00, #
0x00, #
# @3336 'm' (18 pixels wide)
0x00, 0x00, 0x00, #
0x00, 0x00, 0x00, #
0x00, 0x00, 0x00, #
0x00, 0x00, 0x00, #
0x00, 0x00, 0x00, #
0x00, 0x00, 0x00, #
0xCF, 0x0F, 0x00, # OO OOOO OOOO
0xFF, 0xBF, 0x80, # OOOOOOOOO OOOOOOO
0xF1, 0xF1, 0xC0, # OOOO OOOOO OOO
0xC0, 0xC0, 0xC0, # OO OO OO
0xC0, 0xC0, 0xC0, # OO OO OO
0xC0, 0xC0, 0xC0, # OO OO OO
0xC0, 0xC0, 0xC0, # OO OO OO
0xC0, 0xC0, 0xC0, # OO OO OO
0xC0, 0xC0, 0xC0, # OO OO OO
0xC0, 0xC0, 0xC0, # OO OO OO
0xC0, 0xC0, 0xC0, # OO OO OO
0xC0, 0xC0, 0xC0, # OO OO OO
0xC0, 0xC0, 0xC0, # OO OO OO
0x00, 0x00, 0x00, #
0x00, 0x00, 0x00, #
0x00, 0x00, 0x00, #
0x00, 0x00, 0x00, #
0x00, 0x00, 0x00, #
# @3408 'n' (11 pixels wide)
0x00, 0x00, #
0x00, 0x00, #
0x00, 0x00, #
0x00, 0x00, #
0x00, 0x00, #
0x00, 0x00, #
0xCF, 0x80, # OO OOOOO
0xFF, 0xC0, # OOOOOOOOOO
0xF0, 0xE0, # OOOO OOO
0xC0, 0x60, # OO OO
0xC0, 0x60, # OO OO
0xC0, 0x60, # OO OO
0xC0, 0x60, # OO OO
0xC0, 0x60, # OO OO
0xC0, 0x60, # OO OO
0xC0, 0x60, # OO OO
0xC0, 0x60, # OO OO
0xC0, 0x60, # OO OO
0xC0, 0x60, # OO OO
0x00, 0x00, #
0x00, 0x00, #
0x00, 0x00, #
0x00, 0x00, #
0x00, 0x00, #
# @3456 'o' (13 pixels wide)
0x00, 0x00, #
0x00, 0x00, #
0x00, 0x00, #
0x00, 0x00, #
0x00, 0x00, #
0x00, 0x00, #
0x0F, 0x80, # OOOOO
0x3F, 0xE0, # OOOOOOOOO
0x70, 0x70, # OOO OOO
0x60, 0x30, # OO OO
0xC0, 0x18, # OO OO
0xC0, 0x18, # OO OO
0xC0, 0x18, # OO OO
0xC0, 0x18, # OO OO
0xC0, 0x18, # OO OO
0x60, 0x30, # OO OO
0x70, 0x70, # OOO OOO
0x3F, 0xE0, # OOOOOOOOO
0x0F, 0x80, # OOOOO
0x00, 0x00, #
0x00, 0x00, #
0x00, 0x00, #
0x00, 0x00, #
0x00, 0x00, #
# @3504 'p' (12 pixels wide)
0x00, 0x00, #
0x00, 0x00, #
0x00, 0x00, #
0x00, 0x00, #
0x00, 0x00, #
0x00, 0x00, #
0xCF, 0x80, # OO OOOOO
0xFF, 0xC0, # OOOOOOOOOO
0xF0, 0xE0, # OOOO OOO
0xC0, 0x70, # OO OOO
0xC0, 0x30, # OO OO
0xC0, 0x30, # OO OO
0xC0, 0x30, # OO OO
0xC0, 0x30, # OO OO
0xC0, 0x30, # OO OO
0xC0, 0x60, # OO OO
0xC0, 0xE0, # OO OOO
0xFF, 0xC0, # OOOOOOOOOO
0xDF, 0x00, # OO OOOOO
0xC0, 0x00, # OO
0xC0, 0x00, # OO
0xC0, 0x00, # OO
0xC0, 0x00, # OO
0xC0, 0x00, # OO
# @3552 'q' (12 pixels wide)
0x00, 0x00, #
0x00, 0x00, #
0x00, 0x00, #
0x00, 0x00, #
0x00, 0x00, #
0x00, 0x00, #
0x0F, 0xB0, # OOOOO OO
0x3F, 0xF0, # OOOOOOOOOO
0x70, 0x70, # OOO OOO
0x60, 0x30, # OO OO
0xC0, 0x30, # OO OO
0xC0, 0x30, # OO OO
0xC0, 0x30, # OO OO
0xC0, 0x30, # OO OO
0xC0, 0x30, # OO OO
0xE0, 0x30, # OOO OO
0x70, 0xF0, # OOO OOOO
0x3F, 0xF0, # OOOOOOOOOO
0x1F, 0x30, # OOOOO OO
0x00, 0x30, # OO
0x00, 0x30, # OO
0x00, 0x30, # OO
0x00, 0x30, # OO
0x00, 0x30, # OO
# @3600 'r' (8 pixels wide)
0x00, #
0x00, #
0x00, #
0x00, #
0x00, #
0x00, #
0xCF, # OO OOOO
0xDF, # OO OOOOO
0xF0, # OOOO
0xC0, # OO
0xC0, # OO
0xC0, # OO
0xC0, # OO
0xC0, # OO
0xC0, # OO
0xC0, # OO
0xC0, # OO
0xC0, # OO
0xC0, # OO
0x00, #
0x00, #
0x00, #
0x00, #
0x00, #
# @3624 's' (11 pixels wide)
0x00, 0x00, #
0x00, 0x00, #
0x00, 0x00, #
0x00, 0x00, #
0x00, 0x00, #
0x00, 0x00, #
0x1F, 0x80, # OOOOOO
0x7F, 0xC0, # OOOOOOOOO
0xE0, 0x40, # OOO O
0xC0, 0x00, # OO
0xC0, 0x00, # OO
0x7C, 0x00, # OOOOO
0x3F, 0x80, # OOOOOOO
0x03, 0xE0, # OOOOO
0x00, 0x60, # OO
0x80, 0x60, # O OO
0xE0, 0xE0, # OOO OOO
0xFF, 0xC0, # OOOOOOOOOO
0x3F, 0x00, # OOOOOO
0x00, 0x00, #
0x00, 0x00, #
0x00, 0x00, #
0x00, 0x00, #
0x00, 0x00, #
# @3672 't' (7 pixels wide)
0x00, #
0x00, #
0x60, # OO
0x60, # OO
0x60, # OO
0x60, # OO
0xFE, # OOOOOOO
0xFE, # OOOOOOO
0x60, # OO
0x60, # OO
0x60, # OO
0x60, # OO
0x60, # OO
0x60, # OO
0x60, # OO
0x60, # OO
0x70, # OOO
0x3E, # OOOOO
0x1E, # OOOO
0x00, #
0x00, #
0x00, #
0x00, #
0x00, #
# @3696 'u' (11 pixels wide)
0x00, 0x00, #
0x00, 0x00, #
0x00, 0x00, #
0x00, 0x00, #
0x00, 0x00, #
0x00, 0x00, #
0xC0, 0x60, # OO OO
0xC0, 0x60, # OO OO
0xC0, 0x60, # OO OO
0xC0, 0x60, # OO OO
0xC0, 0x60, # OO OO
0xC0, 0x60, # OO OO
0xC0, 0x60, # OO OO
0xC0, 0x60, # OO OO
0xC0, 0x60, # OO OO
0xC0, 0x60, # OO OO
0xE1, 0xE0, # OOO OOOO
0x7F, 0xE0, # OOOOOOOOOO
0x3E, 0x60, # OOOOO OO
0x00, 0x00, #
0x00, 0x00, #
0x00, 0x00, #
0x00, 0x00, #
0x00, 0x00, #
# @3744 'v' (12 pixels wide)
0x00, 0x00, #
0x00, 0x00, #
0x00, 0x00, #
0x00, 0x00, #
0x00, 0x00, #
0x00, 0x00, #
0xC0, 0x30, # OO OO
0x60, 0x60, # OO OO
0x60, 0x60, # OO OO
0x60, 0x60, # OO OO
0x30, 0xC0, # OO OO
0x30, 0xC0, # OO OO
0x39, 0xC0, # OOO OOO
0x19, 0x80, # OO OO
0x19, 0x80, # OO OO
0x0F, 0x00, # OOOO
0x0F, 0x00, # OOOO
0x0F, 0x00, # OOOO
0x06, 0x00, # OO
0x00, 0x00, #
0x00, 0x00, #
0x00, 0x00, #
0x00, 0x00, #
0x00, 0x00, #
# @3792 'w' (18 pixels wide)
0x00, 0x00, 0x00, #
0x00, 0x00, 0x00, #
0x00, 0x00, 0x00, #
0x00, 0x00, 0x00, #
0x00, 0x00, 0x00, #
0x00, 0x00, 0x00, #
0xC0, 0xC0, 0xC0, # OO OO OO
0xC0, 0xE0, 0xC0, # OO OOO OO
0x61, 0xE1, 0x80, # OO OOOO OO
0x61, 0xE1, 0x80, # OO OOOO OO
0x61, 0x31, 0x80, # OO O OO OO
0x73, 0x31, 0x80, # OOO OO OO OO
0x33, 0x33, 0x00, # OO OO OO OO
0x32, 0x1B, 0x00, # OO O OO OO
0x36, 0x1B, 0x00, # OO OO OO OO
0x1E, 0x1A, 0x00, # OOOO OO O
0x1C, 0x0E, 0x00, # OOO OOO
0x1C, 0x0E, 0x00, # OOO OOO
0x1C, 0x0E, 0x00, # OOO OOO
0x00, 0x00, 0x00, #
0x00, 0x00, 0x00, #
0x00, 0x00, 0x00, #
0x00, 0x00, 0x00, #
0x00, 0x00, 0x00, #
# @3864 'x' (12 pixels wide)
0x00, 0x00, #
0x00, 0x00, #
0x00, 0x00, #
0x00, 0x00, #
0x00, 0x00, #
0x00, 0x00, #
0xE0, 0x70, # OOO OOO
0x70, 0xE0, # OOO OOO
0x30, 0xC0, # OO OO
0x19, 0x80, # OO OO
0x1F, 0x80, # OOOOOO
0x0F, 0x00, # OOOO
0x06, 0x00, # OO
0x0F, 0x00, # OOOO
0x1F, 0x80, # OOOOOO
0x19, 0x80, # OO OO
0x30, 0xC0, # OO OO
0x70, 0xE0, # OOO OOO
0xE0, 0x70, # OOO OOO
0x00, 0x00, #
0x00, 0x00, #
0x00, 0x00, #
0x00, 0x00, #
0x00, 0x00, #
# @3912 'y' (12 pixels wide)
0x00, 0x00, #
0x00, 0x00, #
0x00, 0x00, #
0x00, 0x00, #
0x00, 0x00, #
0x00, 0x00, #
0xC0, 0x30, # OO OO
0x60, 0x60, # OO OO
0x60, 0x60, # OO OO
0x70, 0xE0, # OOO OOO
0x30, 0xC0, # OO OO
0x30, 0xC0, # OO OO
0x19, 0x80, # OO OO
0x19, 0x80, # OO OO
0x1F, 0x80, # OOOOOO
0x0F, 0x00, # OOOO
0x0F, 0x00, # OOOO
0x06, 0x00, # OO
0x06, 0x00, # OO
0x0E, 0x00, # OOO
0x0C, 0x00, # OO
0x0C, 0x00, # OO
0x18, 0x00, # OO
0x18, 0x00, # OO
# @3960 'z' (11 pixels wide)
0x00, 0x00, #
0x00, 0x00, #
0x00, 0x00, #
0x00, 0x00, #
0x00, 0x00, #
0x00, 0x00, #
0xFF, 0xE0, # OOOOOOOOOOO
0xFF, 0xE0, # OOOOOOOOOOO
0x00, 0xE0, # OOO
0x01, 0xC0, # OOO
0x03, 0x80, # OOO
0x07, 0x00, # OOO
0x0E, 0x00, # OOO
0x1C, 0x00, # OOO
0x38, 0x00, # OOO
0x70, 0x00, # OOO
0xE0, 0x00, # OOO
0xFF, 0xE0, # OOOOOOOOOOO
0xFF, 0xE0, # OOOOOOOOOOO
0x00, 0x00, #
0x00, 0x00, #
0x00, 0x00, #
0x00, 0x00, #
0x00, 0x00, #
# @4008 '{' (11 pixels wide)
0x01, 0xE0, # OOOO
0x03, 0xE0, # OOOOO
0x07, 0x00, # OOO
0x06, 0x00, # OO
0x06, 0x00, # OO
0x06, 0x00, # OO
0x06, 0x00, # OO
0x06, 0x00, # OO
0x06, 0x00, # OO
0x0C, 0x00, # OO
0x1C, 0x00, # OOO
0xF0, 0x00, # OOOO
0xF0, 0x00, # OOOO
0x1C, 0x00, # OOO
0x0C, 0x00, # OO
0x06, 0x00, # OO
0x06, 0x00, # OO
0x06, 0x00, # OO
0x06, 0x00, # OO
0x06, 0x00, # OO
0x06, 0x00, # OO
0x07, 0x00, # OOO
0x03, 0xE0, # OOOOO
0x01, 0xE0, # OOOO
# @4056 '|' (2 pixels wide)
0xC0, # OO
0xC0, # OO
0xC0, # OO
0xC0, # OO
0xC0, # OO
0xC0, # OO
0xC0, # OO
0xC0, # OO
0xC0, # OO
0xC0, # OO
0xC0, # OO
0xC0, # OO
0xC0, # OO
0xC0, # OO
0xC0, # OO
0xC0, # OO
0xC0, # OO
0xC0, # OO
0xC0, # OO
0xC0, # OO
0xC0, # OO
0xC0, # OO
0xC0, # OO
0xC0, # OO
# @4080 '}' (11 pixels wide)
0xF0, 0x00, # OOOO
0xF8, 0x00, # OOOOO
0x1C, 0x00, # OOO
0x0C, 0x00, # OO
0x0C, 0x00, # OO
0x0C, 0x00, # OO
0x0C, 0x00, # OO
0x0C, 0x00, # OO
0x0C, 0x00, # OO
0x06, 0x00, # OO
0x07, 0x00, # OOO
0x01, 0xE0, # OOOO
0x01, 0xE0, # OOOO
0x07, 0x00, # OOO
0x06, 0x00, # OO
0x0C, 0x00, # OO
0x0C, 0x00, # OO
0x0C, 0x00, # OO
0x0C, 0x00, # OO
0x0C, 0x00, # OO
0x0C, 0x00, # OO
0x1C, 0x00, # OOO
0xF8, 0x00, # OOOOO
0xF0, 0x00, # OOOO
# @4128 '~' (15 pixels wide)
0x00, 0x00, #
0x00, 0x00, #
0x00, 0x00, #
0x00, 0x00, #
0x00, 0x00, #
0x00, 0x00, #
0x00, 0x00, #
0x00, 0x00, #
0x3C, 0x06, # OOOO OO
0x7E, 0x06, # OOOOOO OO
0x63, 0x06, # OO OO OO
0xC1, 0x8C, # OO OO OO
0xC0, 0xFC, # OO OOOOOO
0xC0, 0x78, # OO OOOO
0x00, 0x00, #
0x00, 0x00, #
0x00, 0x00, #
0x00, 0x00, #
0x00, 0x00, #
0x00, 0x00, #
0x00, 0x00, #
0x00, 0x00, #
0x00, 0x00, #
0x00, 0x00, #
# @4176 '°' (9 pixels wide)
0x00, 0x00, #
0x3E, 0x00, # OOOOO
0x7F, 0x00, # OOOOOOO
0xE3, 0x80, # OOO OOO
0xC1, 0x80, # OO OO
0xC1, 0x80, # OO OO
0xC1, 0x80, # OO OO
0xE3, 0x80, # OOO OOO
0x7F, 0x00, # OOOOOOO
0x3E, 0x00, # OOOOO
0x00, 0x00, #
0x00, 0x00, #
0x00, 0x00, #
0x00, 0x00, #
0x00, 0x00, #
0x00, 0x00, #
0x00, 0x00, #
0x00, 0x00, #
0x00, 0x00, #
0x00, 0x00, #
0x00, 0x00, #
0x00, 0x00, #
0x00, 0x00, #
0x00, 0x00, #
)
descriptors = (
(2,0),# !
(7,24),# "
(15,48),# #
(11,96),# $
(22,144),# %
(17,216),# &
(2,288),# '
(7,312),# (
(7,336),# )
(12,360),# *
(14,408),# +
(5,456),# ,
(7,480),# -
(3,504),# .
(10,528),# /
(11,576),# 0
(10,624),# 1
(11,672),# 2
(11,720),# 3
(14,768),# 4
(11,816),# 5
(11,864),# 6
(12,912),# 7
(11,960),# 8
(11,1008),# 9
(3,1056),# :
(5,1080),# ;
(14,1104),# <
(14,1152),# =
(14,1200),# >
(10,1248),# ?
(20,1296),# @
(16,1368),# A
(13,1416),# B
(15,1464),# C
(16,1512),# D
(12,1560),# E
(12,1608),# F
(16,1656),# G
(14,1704),# H
(6,1752),# I
(8,1776),# J
(15,1800),# K
(11,1848),# L
(16,1896),# M
(14,1944),# N
(17,1992),# O
(11,2064),# P
(17,2112),# Q
(14,2184),# R
(13,2232),# S
(14,2280),# T
(14,2328),# U
(16,2376),# V
(22,2424),# W
(14,2496),# X
(14,2544),# Y
(14,2592),# Z
(6,2640),# [
(10,2664),# \
(6,2712),# ]
(15,2736),# ^
(15,2784),# _
(5,2832),# `
(11,2856),# a
(12,2904),# b
(11,2952),# c
(12,3000),# d
(12,3048),# e
(8,3096),# f
(12,3120),# g
(11,3168),# h
(2,3216),# i
(7,3240),# j
(12,3264),# k
(2,3312),# l
(18,3336),# m
(11,3408),# n
(13,3456),# o
(12,3504),# p
(12,3552),# q
(8,3600),# r
(11,3624),# s
(7,3672),# t
(11,3696),# u
(12,3744),# v
(18,3792),# w
(12,3864),# x
(12,3912),# y
(11,3960),# z
(11,4008),# {
(2,4056),# |
(11,4080),# }
(15,4128),# ~
(9,4176),# °
)
kerning = (
(2,2,2,2,2,2,2,2,1,2,2,1,2,2,0,2,2,2,2,2,2,2,2,2,2,2,1,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,1,2,2,0,1,2,2,2,2,2,2,2,2,2,0,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,),
(7,7,6,7,7,6,7,6,7,7,1,2,0,4,1,7,7,7,7,3,6,6,7,7,7,7,6,0,0,7,7,6,3,7,6,7,7,7,6,7,7,5,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,4,0,7,5,7,3,3,3,7,3,7,7,5,7,7,7,7,3,7,3,7,4,7,7,7,7,7,7,7,2,7,7,0,7,),
(15,15,14,15,15,14,15,14,12,13,11,10,11,12,10,15,12,13,13,14,14,14,13,15,15,15,14,13,11,15,13,14,12,15,14,15,15,15,14,15,13,11,15,15,15,15,15,15,15,15,15,13,15,13,14,13,13,13,15,13,13,12,0,12,13,15,14,14,14,15,14,15,15,13,15,15,15,15,14,15,14,15,12,15,15,15,15,15,15,15,14,15,13,14,15,),
(11,10,10,11,10,11,10,11,7,7,9,9,9,11,9,11,10,11,11,11,11,11,8,11,10,11,9,9,11,11,8,11,10,11,11,11,11,11,10,11,10,11,11,11,11,11,11,11,11,11,11,6,11,8,9,9,7,11,11,8,7,7,6,6,11,11,11,11,11,10,11,11,11,8,11,11,11,11,11,11,11,11,11,10,11,10,10,10,10,11,10,11,7,11,10,),
(22,16,22,22,19,22,20,22,18,16,22,20,22,22,20,22,21,22,22,22,22,22,19,22,20,22,20,22,22,22,19,22,21,22,22,22,22,22,22,22,21,22,22,22,22,22,22,22,22,22,22,16,22,18,19,20,16,22,22,18,18,20,7,17,22,22,22,22,22,21,22,22,22,17,22,22,22,22,22,22,22,22,22,21,22,20,21,20,20,22,22,22,18,22,18,),
(17,10,15,16,14,14,15,16,13,14,14,16,14,17,15,14,17,17,16,14,16,14,15,15,15,17,16,14,14,16,14,14,17,17,14,17,17,17,14,17,17,17,17,17,17,17,14,17,14,17,16,11,14,11,12,17,11,17,17,11,13,14,2,12,15,17,14,14,14,16,14,17,17,12,17,17,17,17,14,17,14,17,16,14,15,13,14,17,13,17,14,17,13,14,13,),
(2,2,1,2,2,1,2,1,2,2,0,0,0,0,0,2,2,2,2,0,1,1,2,2,2,2,1,0,0,2,2,1,0,2,1,2,2,2,1,2,2,0,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,0,0,2,0,2,0,0,0,2,0,2,2,0,2,2,2,2,0,2,0,2,0,2,2,2,2,2,2,2,0,2,2,0,2,),
(6,7,2,3,4,3,7,3,7,5,2,7,2,4,6,3,4,5,5,2,5,3,6,4,4,4,7,2,2,3,5,2,3,6,3,6,6,6,3,6,6,4,6,6,6,6,3,6,3,6,4,6,6,6,6,6,6,6,7,7,7,2,5,7,3,7,2,2,2,4,6,7,6,7,7,7,3,3,2,7,2,3,3,4,3,3,3,3,4,3,2,7,7,2,4,),
(7,6,7,7,7,7,6,7,3,7,7,5,7,7,5,7,6,7,7,7,7,7,4,7,7,7,6,7,7,7,5,7,6,7,7,7,7,7,7,7,6,7,7,7,7,7,7,7,7,7,7,4,7,5,5,6,4,7,7,4,4,7,4,3,7,7,7,7,7,7,7,7,7,5,7,7,7,7,7,7,7,7,7,7,7,6,7,6,6,7,7,7,4,7,7,),
(12,12,8,12,12,10,12,12,10,12,7,7,7,9,7,12,11,12,12,10,11,12,12,11,12,12,11,7,12,9,12,12,8,12,12,12,12,12,12,12,12,10,12,12,12,12,12,12,12,12,11,12,12,11,12,11,11,12,12,11,8,11,0,10,11,12,11,11,11,11,11,12,12,9,12,12,12,12,11,12,11,12,12,11,12,11,11,10,11,11,7,12,9,10,11,),
(14,8,10,13,12,13,12,14,9,9,14,9,14,11,10,14,10,9,8,14,8,14,8,13,13,11,9,14,8,8,11,14,11,14,14,14,14,14,14,14,12,8,14,14,14,14,14,14,14,14,10,8,14,11,12,9,8,9,14,10,10,8,0,9,12,14,14,14,14,13,14,14,14,9,14,14,14,14,14,14,14,14,14,13,14,12,13,11,12,9,14,14,9,14,8,),
(4,0,2,5,0,4,3,5,2,0,0,3,0,5,3,4,4,5,5,0,5,4,2,4,3,5,3,0,0,5,1,3,4,5,3,5,5,5,3,5,4,5,5,5,5,5,3,5,3,5,5,0,4,0,1,4,0,5,5,0,2,0,3,0,5,5,4,4,4,4,4,5,5,2,5,5,5,5,4,5,4,5,5,4,5,1,2,4,1,5,0,5,2,0,0,),
(7,0,3,6,5,6,5,7,2,2,7,2,7,4,3,7,3,2,0,7,0,7,1,6,6,4,2,7,0,0,4,7,4,7,7,7,7,7,7,7,5,1,7,7,7,7,7,7,7,7,3,1,7,4,5,2,1,2,7,3,3,0,0,2,5,7,7,7,7,6,7,7,7,2,7,7,7,7,7,7,7,7,7,6,7,5,6,4,5,2,7,7,2,7,0,),
(3,0,1,3,0,2,1,3,0,0,0,2,0,3,1,2,3,3,3,0,3,2,1,2,1,3,2,0,0,3,0,1,3,3,1,3,3,3,1,3,3,3,3,3,3,3,1,3,1,3,3,0,2,0,0,3,0,3,3,0,0,0,0,0,3,3,2,2,2,2,2,3,3,0,3,3,3,3,2,3,2,3,3,2,3,0,0,3,0,3,0,3,0,0,0,),
(9,10,7,8,9,8,10,7,10,9,6,5,6,7,2,8,9,9,9,6,8,7,9,8,8,8,7,6,7,8,9,7,4,9,7,9,9,9,7,9,9,7,9,9,9,9,8,9,8,9,8,9,9,9,9,9,9,9,10,10,10,7,3,10,6,10,6,6,6,8,6,10,9,6,10,10,8,8,6,8,6,8,7,8,8,8,8,8,8,8,6,10,10,6,9,),
(11,11,11,11,11,11,11,11,7,11,11,8,11,10,8,11,10,10,10,11,10,11,9,11,11,11,10,11,11,11,10,11,9,11,11,11,11,11,11,11,9,10,11,11,11,11,11,11,11,11,11,9,11,9,10,8,9,10,11,9,8,11,0,7,11,11,11,11,11,11,11,11,11,9,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,8,11,11,),
(10,6,8,10,6,8,8,9,6,6,6,9,6,10,8,8,10,10,10,6,10,8,8,9,8,10,9,6,6,10,7,8,10,10,7,10,10,10,6,10,10,10,10,10,10,10,7,10,7,10,10,6,8,6,6,10,6,10,10,5,6,6,0,5,9,10,8,8,8,9,8,10,10,5,10,10,10,10,8,10,8,10,10,8,9,6,7,10,6,10,6,10,6,6,6,),
(11,10,9,11,10,9,10,10,7,10,8,10,8,11,9,10,11,11,11,8,11,10,9,10,10,11,10,8,10,11,9,10,11,11,10,11,11,11,10,11,11,11,11,11,11,11,10,11,10,11,11,9,10,9,9,11,8,11,11,8,7,9,0,7,10,11,9,9,9,10,9,11,11,8,11,11,11,11,9,11,9,11,11,10,10,10,10,11,10,11,8,11,7,8,10,),
(11,11,11,11,11,11,11,11,8,10,11,8,11,10,8,11,11,11,11,11,10,11,10,11,11,11,10,11,11,11,11,11,10,11,11,11,11,11,11,11,10,10,11,11,11,11,11,11,11,11,11,10,11,10,10,9,10,10,11,10,8,9,0,8,11,11,11,11,11,11,11,11,11,9,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,8,11,11,),
(14,11,14,11,11,14,12,14,10,11,11,10,11,11,11,14,11,11,11,14,11,14,11,14,11,11,10,13,14,11,11,14,12,14,14,14,14,14,14,14,12,11,14,14,14,14,14,14,14,14,11,11,14,11,11,11,11,11,14,10,11,11,0,10,14,14,14,14,14,13,14,14,14,9,14,14,14,14,14,14,14,14,12,13,14,12,12,11,11,11,14,14,11,14,11,),
(11,11,11,10,10,11,11,11,10,11,11,8,11,10,8,11,9,11,11,11,10,11,11,11,9,10,8,11,11,10,11,11,10,11,11,11,11,11,11,11,11,10,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,10,11,10,0,10,11,11,11,11,11,10,11,11,11,6,11,11,11,11,11,11,11,11,11,10,11,9,10,8,9,10,11,11,11,11,10,),
(11,9,11,10,9,11,9,11,8,9,11,8,11,10,8,11,9,10,11,11,10,11,9,11,9,10,8,11,11,10,9,11,10,11,11,11,11,11,11,11,9,10,11,11,11,11,11,11,11,11,11,9,11,9,9,9,9,10,11,8,9,10,0,8,11,11,11,11,11,10,11,11,11,6,11,11,11,11,11,11,11,11,11,10,11,9,10,8,9,10,11,11,9,11,8,),
(12,12,10,11,12,11,12,10,11,12,9,7,9,9,6,11,12,12,12,8,11,11,12,12,12,11,10,8,10,11,12,10,7,12,11,12,12,12,10,12,12,10,12,12,12,12,11,12,11,12,12,12,12,12,12,12,12,12,12,11,12,9,0,11,9,12,9,9,9,11,9,12,12,9,12,12,11,11,9,11,9,11,10,11,11,11,11,11,11,11,8,12,12,8,12,),
(11,11,11,11,11,11,11,11,8,10,10,8,10,10,8,11,11,11,11,11,10,11,10,11,11,11,10,10,11,11,11,11,10,11,11,11,11,11,11,11,10,10,11,11,11,11,11,11,11,11,11,10,11,10,10,9,10,10,11,10,8,8,0,8,11,11,11,11,11,11,11,11,11,9,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,8,11,11,),
(11,11,11,11,11,11,11,11,7,11,11,7,11,9,7,11,10,10,10,11,10,11,9,11,11,11,10,11,11,11,10,11,9,11,11,11,11,11,11,11,9,9,11,11,11,11,11,11,11,11,11,9,11,9,10,8,9,9,11,9,8,11,0,7,10,11,11,11,11,11,11,11,11,9,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,8,11,11,),
(3,3,2,3,3,2,3,3,0,3,0,2,0,3,1,3,3,3,3,1,3,3,1,3,3,3,2,0,3,3,0,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,0,3,1,2,3,0,3,3,1,0,2,0,0,3,3,2,2,2,3,2,3,3,1,3,3,3,3,2,3,2,3,3,3,3,3,3,3,3,3,0,3,0,1,3,),
(4,4,3,5,4,4,4,5,2,4,0,3,0,5,3,4,4,5,5,2,5,4,2,4,4,5,3,0,4,5,1,4,4,5,4,5,5,5,4,5,4,5,5,5,5,5,4,5,4,5,5,0,4,2,3,4,1,5,5,2,2,3,3,0,5,5,4,4,4,4,4,5,5,2,5,5,5,5,4,5,4,5,5,4,5,4,4,4,4,5,0,5,2,2,4,),
(14,14,13,14,14,13,14,14,10,11,8,12,7,14,12,14,14,14,14,10,14,13,11,14,14,14,13,5,10,14,11,13,13,14,13,14,14,14,13,14,14,14,14,14,14,14,14,14,14,14,14,8,14,12,13,13,11,14,14,12,10,11,0,9,14,14,13,13,13,14,13,14,14,12,14,14,14,14,13,14,13,14,14,14,14,14,14,14,14,14,9,14,10,8,14,),
(14,7,11,14,14,14,12,14,9,14,8,9,7,11,11,14,10,12,10,14,13,14,10,14,14,14,13,11,14,10,11,14,12,14,14,14,14,14,14,14,12,8,14,14,14,14,14,14,14,14,13,8,14,11,12,11,10,12,14,11,10,14,0,9,14,14,14,14,14,13,14,14,14,9,14,14,14,14,14,14,14,14,14,13,14,13,13,12,13,12,11,14,10,14,13,),
(14,7,13,12,11,13,12,14,9,9,14,9,14,11,10,14,10,9,7,14,10,14,8,13,12,11,9,14,11,5,11,14,11,14,14,14,14,14,14,14,12,8,14,14,14,14,14,14,14,14,9,8,14,10,11,9,8,9,14,10,10,11,0,9,12,14,14,14,14,13,14,14,14,9,14,14,14,14,14,14,14,14,13,13,14,12,13,10,12,9,14,14,8,14,9,),
(10,10,9,10,10,9,10,10,7,9,7,5,7,7,5,10,10,10,10,7,9,10,9,10,10,10,9,6,9,10,10,9,6,10,10,10,10,10,10,10,9,7,10,10,10,10,10,10,10,10,10,9,10,9,9,8,9,9,10,9,7,8,0,7,9,10,8,8,8,10,8,10,10,8,10,10,10,10,8,10,8,10,9,10,10,10,10,10,10,10,5,10,7,7,10,),
(20,19,20,20,20,20,19,20,16,20,20,17,20,19,17,20,18,19,19,20,19,20,16,20,20,20,19,20,20,19,17,20,18,20,20,20,20,20,20,20,18,19,20,20,20,20,20,20,20,20,20,16,20,18,18,17,16,19,20,17,16,20,5,15,20,20,20,20,20,20,20,20,20,18,20,20,20,20,20,20,20,20,20,20,20,19,20,19,19,20,20,20,16,20,20,),
(16,12,14,15,12,15,14,15,12,12,13,15,13,16,14,14,16,16,15,14,15,14,14,15,14,16,15,13,14,15,13,14,16,16,14,16,16,16,14,16,16,16,16,16,16,16,14,16,14,16,15,10,14,10,11,16,10,16,16,10,12,13,1,11,15,16,14,15,14,15,15,16,16,11,16,16,16,16,14,16,15,16,15,14,15,12,13,16,12,16,14,16,12,14,12,),
(13,12,13,12,12,13,12,13,9,11,13,10,13,12,10,13,12,12,13,13,12,13,11,13,12,12,11,13,13,12,12,13,12,13,13,13,13,13,13,13,11,12,13,13,13,13,13,13,13,13,13,11,13,11,11,11,11,12,13,11,10,11,0,9,13,13,13,13,13,12,13,13,13,10,13,13,13,13,13,13,13,13,13,12,13,12,12,12,12,12,13,13,10,13,12,),
(15,15,13,15,15,15,15,15,13,15,9,13,8,15,13,14,15,15,15,9,15,14,15,15,15,15,13,8,3,15,15,14,14,15,14,15,15,15,14,15,15,15,15,15,15,15,14,15,14,15,15,15,15,14,15,14,14,15,15,14,13,11,0,13,15,15,14,15,14,14,15,15,15,10,15,15,15,15,14,15,15,15,15,14,15,11,12,14,11,15,10,15,13,3,15,),
(16,15,16,16,16,16,15,16,12,16,16,11,16,13,12,16,14,14,14,16,15,16,12,16,16,16,15,16,16,15,13,16,14,16,16,16,16,16,16,16,14,13,16,16,16,16,16,16,16,16,16,12,16,14,14,12,12,13,16,13,12,16,1,11,15,16,16,16,16,16,16,16,16,14,16,16,16,16,16,16,16,16,16,16,16,15,16,15,15,16,16,16,12,16,16,),
(12,12,10,12,12,10,12,12,11,12,6,11,5,12,10,12,12,12,12,10,12,12,12,11,12,12,11,9,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,11,12,12,0,11,11,12,11,11,11,11,11,12,12,7,12,12,12,12,11,12,11,12,12,11,12,11,11,12,11,12,8,12,12,11,11,),
(12,12,7,11,11,10,12,11,11,12,5,7,5,9,6,11,9,12,12,9,11,11,12,11,11,11,10,8,11,7,12,11,8,12,11,12,12,12,11,12,12,10,12,12,12,12,11,12,11,12,10,12,12,12,12,12,12,12,12,11,12,11,0,11,10,12,10,10,10,10,10,12,12,7,12,12,11,11,10,11,10,11,11,11,11,10,10,9,10,4,7,12,12,10,11,),
(16,16,16,16,16,16,16,16,14,16,16,14,16,16,14,16,16,16,16,16,16,16,16,16,16,16,14,16,16,16,16,16,15,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,15,16,15,15,16,16,15,13,12,1,14,16,16,16,16,16,15,16,16,16,11,16,16,16,16,16,16,16,16,16,15,16,14,15,15,14,16,16,16,13,16,16,),
(14,14,14,14,14,14,14,14,13,14,14,13,14,14,12,14,14,14,14,14,14,14,14,14,14,14,13,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,13,14,14,0,13,14,14,14,14,14,14,14,14,14,12,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,),
(6,6,4,6,5,4,6,5,5,6,4,5,4,6,4,4,6,6,6,4,6,4,6,5,4,6,5,4,4,6,6,4,6,6,4,6,6,6,4,6,6,6,6,6,6,6,4,6,4,6,6,6,6,6,6,6,6,6,6,5,6,4,0,5,5,6,4,4,4,5,4,6,6,2,6,6,6,6,4,6,4,6,6,5,5,4,4,6,4,6,4,6,6,4,5,),
(8,8,8,8,8,8,8,8,7,8,8,6,8,8,6,8,8,8,8,8,8,8,8,8,8,8,7,8,8,8,8,8,7,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,7,8,8,0,7,8,8,8,8,8,8,8,8,8,6,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,),
(15,14,13,14,12,12,14,14,13,13,8,14,8,15,13,12,15,15,14,10,14,12,14,13,13,15,14,8,11,14,13,12,15,15,11,15,15,15,11,15,15,15,15,15,15,15,11,15,11,15,14,14,14,14,14,15,14,15,15,13,14,8,0,13,13,15,12,12,12,14,12,15,15,10,15,15,15,15,12,15,12,15,14,12,13,10,12,15,10,15,10,15,14,10,12,),
(11,4,9,11,5,9,9,10,7,2,5,10,4,11,9,9,11,11,11,2,11,9,9,10,9,11,10,2,2,11,8,9,11,11,8,11,11,11,7,11,11,11,11,11,11,11,8,11,8,11,11,5,9,5,7,11,5,11,11,5,7,2,0,6,10,11,9,9,9,10,9,11,11,6,11,11,11,11,9,11,9,11,11,9,10,7,8,11,6,11,6,11,7,2,2,),
(16,16,16,16,16,16,16,16,15,16,16,15,16,16,14,16,16,16,16,16,16,16,16,16,16,16,15,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,15,16,16,1,15,16,16,16,16,16,16,16,16,16,14,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,),
(14,14,14,14,14,14,14,14,13,14,14,13,14,14,12,14,14,14,14,14,14,14,14,14,14,14,13,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,13,14,14,0,13,14,14,14,14,14,14,14,14,14,12,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,),
(17,17,17,17,17,17,17,17,13,17,17,13,17,15,14,17,16,16,16,17,16,17,14,17,17,17,16,17,17,17,15,17,15,17,17,17,17,17,17,17,15,15,17,17,17,17,17,17,17,17,17,14,17,15,16,14,14,15,17,15,13,17,2,12,17,17,17,17,17,17,17,17,17,15,17,17,17,17,17,17,17,17,17,17,17,17,17,17,17,17,17,17,13,17,17,),
(11,11,10,11,11,10,11,11,8,11,9,6,9,8,6,11,11,11,11,9,10,11,9,11,11,11,10,8,11,11,10,11,7,11,11,11,11,11,11,11,9,7,11,11,11,11,11,11,11,11,11,9,11,10,10,9,9,9,11,9,8,10,0,7,10,11,10,10,10,11,10,11,11,9,11,11,11,11,10,11,10,11,11,11,11,11,11,11,11,11,7,11,8,9,11,),
(17,17,17,17,17,17,17,17,17,17,17,17,17,15,17,17,16,16,16,17,16,17,14,17,17,17,17,17,17,17,15,17,15,17,17,17,17,17,17,17,15,15,17,17,17,17,17,17,17,17,17,14,17,15,16,14,14,15,17,15,17,17,13,12,16,17,17,17,17,17,17,17,17,17,17,17,17,17,17,17,17,17,17,17,17,17,17,17,17,17,17,17,17,17,17,),
(14,11,12,13,11,11,12,13,10,11,9,13,9,14,12,11,14,14,13,9,13,11,12,12,12,14,13,8,11,13,11,11,14,14,11,14,14,14,11,14,14,14,14,14,14,14,11,14,11,14,13,10,11,10,10,14,9,14,14,9,10,10,0,9,12,14,11,11,11,13,11,14,14,9,14,14,14,14,11,14,11,14,13,11,12,11,11,14,11,14,9,14,10,9,11,),
(13,12,13,12,12,13,12,13,10,12,13,10,13,12,10,13,12,12,13,13,12,13,12,13,12,12,10,13,13,12,12,13,12,13,13,13,13,13,13,13,12,12,13,13,13,13,13,13,13,13,13,12,13,11,12,11,11,12,13,11,10,10,0,10,13,13,13,13,13,12,13,13,13,8,13,13,13,13,13,13,13,13,13,12,13,11,12,10,11,12,13,13,10,13,12,),
(14,14,8,9,13,12,14,11,13,14,8,9,8,11,7,12,11,14,14,8,13,11,14,13,12,11,9,8,8,8,14,9,8,14,11,14,14,14,10,14,14,12,14,14,14,14,11,14,11,14,12,14,14,14,14,14,14,14,14,13,14,8,0,13,8,14,8,8,8,12,8,14,14,9,14,14,8,8,8,8,8,8,8,13,8,8,8,8,8,8,9,14,14,8,13,),
(14,14,14,14,14,14,14,14,13,14,14,11,14,13,11,14,14,14,14,14,13,14,14,14,14,14,13,14,14,14,14,14,12,14,14,14,14,14,14,14,14,13,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,13,14,14,0,13,14,14,14,14,14,14,14,14,14,12,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,),
(16,16,13,14,15,14,16,14,15,15,13,11,13,13,9,14,15,15,15,12,15,14,16,15,15,14,13,12,13,14,15,13,10,16,14,16,16,16,14,16,16,14,16,16,16,16,14,16,14,16,15,16,16,16,16,16,16,16,16,15,16,13,1,15,13,16,13,13,13,14,13,16,16,12,16,16,14,14,13,14,13,14,13,14,14,14,14,14,14,14,12,16,16,12,15,),
(22,22,20,21,21,20,22,20,21,22,20,17,20,19,16,21,21,22,22,19,21,20,22,21,21,21,20,19,20,21,22,20,18,22,20,22,22,22,20,22,22,20,22,22,22,22,21,22,21,22,21,22,22,22,22,22,22,22,22,21,22,20,7,21,19,22,20,20,20,21,20,22,22,19,22,22,21,21,20,21,20,21,20,21,21,21,21,21,21,21,19,22,22,19,21,),
(14,14,12,13,12,12,14,13,13,13,9,13,9,14,12,11,14,14,13,11,13,11,14,12,12,14,13,9,11,13,13,11,14,14,11,14,14,14,11,14,14,14,14,14,14,14,11,14,11,14,13,14,14,14,14,14,14,14,14,13,14,9,0,13,12,14,11,12,11,13,12,14,14,9,14,14,14,14,11,14,12,14,13,12,12,10,11,14,10,14,10,14,14,11,12,),
(14,14,10,11,13,12,14,11,13,13,8,9,8,11,7,12,13,13,13,8,13,11,14,12,12,11,10,8,10,11,13,10,8,14,11,14,14,14,11,14,14,12,14,14,14,14,11,14,11,14,12,14,14,14,14,14,14,14,14,13,14,9,0,13,9,14,9,9,9,12,9,14,14,9,14,14,11,11,9,11,9,11,10,12,11,11,11,11,11,11,8,14,14,8,13,),
(14,14,12,14,14,13,14,13,13,14,8,13,8,14,12,13,14,14,14,7,14,12,14,13,13,14,13,7,9,14,14,12,14,14,12,14,14,14,12,14,14,14,14,14,14,14,12,14,12,14,14,14,14,14,14,14,14,14,14,13,14,9,0,13,13,14,12,12,12,13,12,14,14,9,14,14,14,14,12,14,12,14,14,13,13,11,11,14,11,14,9,14,14,8,14,),
(6,6,2,2,4,3,6,3,6,2,2,6,2,3,6,3,2,4,4,2,5,2,6,3,3,3,6,2,2,2,5,2,2,6,2,6,6,6,2,6,6,4,6,6,6,6,2,6,2,6,3,6,6,6,6,6,6,6,6,6,6,2,2,6,2,6,2,2,2,4,5,6,6,6,6,6,2,2,2,6,2,2,2,2,2,2,2,2,3,2,2,6,6,2,4,),
(8,4,6,8,5,7,8,8,9,5,6,10,6,8,10,7,8,8,8,7,8,7,6,7,6,8,10,6,7,8,5,7,8,8,7,8,8,8,6,8,8,8,8,8,8,8,7,8,7,8,8,3,7,3,4,8,3,8,10,2,10,5,9,5,8,8,7,7,7,7,9,8,8,10,8,8,8,8,7,10,7,8,8,7,8,4,5,8,7,8,6,10,10,7,5,),
(6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,),
(15,12,11,15,14,13,13,15,10,14,9,10,8,12,10,15,11,11,11,13,14,15,9,13,14,14,13,12,15,12,10,15,12,15,15,15,15,15,15,15,13,9,15,15,15,15,15,15,15,15,13,9,15,12,13,10,10,9,15,12,11,15,0,10,13,15,14,14,14,14,14,15,15,11,15,15,15,15,14,15,14,15,15,14,15,14,14,12,14,13,11,15,10,14,13,),
(13,8,0,10,0,0,13,12,13,3,1,14,8,12,14,4,5,4,4,1,4,4,3,4,4,12,14,1,1,1,5,0,0,2,0,0,3,3,0,1,9,7,0,4,0,1,0,4,4,1,2,1,1,0,0,1,1,1,15,8,11,0,15,10,4,3,4,3,3,7,14,4,13,11,3,13,0,4,2,15,5,7,4,8,4,3,0,3,11,4,10,15,12,0,6,),
(5,5,0,0,5,4,5,3,3,4,0,0,0,2,0,4,5,5,5,0,4,3,4,4,4,2,0,0,0,0,5,2,0,5,3,5,5,5,3,5,4,2,5,5,5,5,3,5,3,5,4,4,5,4,4,4,4,4,5,4,4,0,0,3,0,5,0,0,0,4,0,5,4,0,5,5,0,0,0,0,0,0,0,4,0,0,0,0,0,0,0,5,4,0,5,),
(11,9,11,11,11,11,9,11,7,11,11,10,11,11,9,11,11,11,11,11,11,11,9,11,11,11,10,11,11,11,8,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,5,11,8,9,11,7,11,11,8,7,11,0,6,11,11,11,11,11,10,11,11,11,8,11,11,11,11,11,11,11,11,11,10,11,10,10,11,10,11,11,11,7,11,10,),
(12,9,12,12,11,12,10,12,7,11,12,9,12,11,9,12,10,11,11,12,11,12,8,12,11,11,10,12,12,11,9,12,10,12,12,12,12,12,12,12,10,11,12,12,12,12,12,12,12,12,11,6,12,9,10,9,7,11,12,9,8,12,0,7,12,12,12,12,12,11,12,12,12,8,12,12,12,12,12,12,12,12,12,11,12,11,11,9,11,11,12,12,7,12,10,),
(11,9,9,11,11,11,9,11,7,11,5,9,4,11,9,11,11,11,11,9,11,11,8,11,11,11,10,8,11,11,8,11,10,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,5,11,9,9,10,7,11,11,8,7,11,0,6,11,11,10,11,10,11,11,11,11,9,11,11,11,11,10,11,11,11,11,11,11,10,11,10,10,11,7,11,7,10,11,),
(12,12,12,12,12,12,12,12,12,12,12,11,12,12,10,12,12,12,12,12,12,12,12,12,12,12,11,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,0,12,12,12,12,12,12,12,12,12,12,10,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,),
(12,9,12,12,11,12,10,12,8,11,12,10,12,12,10,12,12,12,12,12,12,12,9,12,11,12,10,12,12,12,9,12,11,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,6,12,9,10,11,7,12,12,9,8,12,0,7,12,12,12,12,12,11,12,12,12,9,12,12,12,12,12,12,12,12,12,11,12,11,11,11,11,12,12,12,8,12,11,),
(8,8,6,7,7,6,8,7,8,6,3,3,3,5,2,7,4,6,6,4,7,7,8,7,7,7,6,3,3,7,7,6,3,8,7,8,8,8,7,8,8,6,8,8,8,8,7,8,7,8,7,8,8,8,8,8,8,8,8,8,8,5,0,8,6,8,5,5,5,7,5,8,8,5,8,8,7,7,5,7,5,7,6,7,7,7,7,7,7,7,3,8,8,3,7,),
(12,12,12,12,12,12,12,12,9,12,12,11,12,12,11,12,12,12,12,12,12,12,10,12,12,12,11,12,12,12,9,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,6,12,10,11,12,9,12,12,10,10,12,11,7,12,12,12,12,12,12,12,12,12,10,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,10,12,12,),
(11,9,11,11,11,11,9,11,7,11,11,10,11,11,9,11,11,11,11,11,11,11,9,11,11,11,10,11,11,11,8,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,5,11,8,9,11,7,11,11,8,7,11,0,6,11,11,11,11,11,10,11,11,11,8,11,11,11,11,11,11,11,11,11,10,11,10,10,11,10,11,11,11,7,11,10,),
(2,2,2,2,2,2,2,2,1,2,2,1,2,2,0,2,2,2,2,2,2,2,2,2,2,2,1,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,1,2,2,0,1,2,2,2,2,2,2,2,2,2,0,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,),
(7,7,7,7,7,7,7,7,6,7,7,6,7,7,6,7,7,7,7,7,7,7,7,7,7,7,6,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,6,7,7,7,6,7,7,7,7,7,7,7,7,7,6,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,),
(12,11,10,11,11,10,11,11,8,9,7,11,7,12,10,11,12,12,11,7,11,10,10,11,11,12,11,6,9,11,9,10,12,12,10,12,12,12,10,12,12,12,12,12,12,12,11,12,11,12,11,6,11,9,10,12,8,12,12,9,8,8,0,7,10,12,9,9,9,11,9,12,12,9,12,12,12,12,9,12,9,12,11,11,11,11,11,12,11,12,7,12,8,7,11,),
(2,2,2,2,2,2,2,2,2,2,2,1,2,2,0,2,2,2,2,2,2,2,2,2,2,2,1,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,0,2,2,2,2,2,2,2,2,2,2,0,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,),
(18,16,18,18,18,18,16,18,14,18,18,17,18,18,16,18,18,18,18,18,18,18,16,18,18,18,17,18,18,18,15,18,18,18,18,18,18,18,18,18,18,18,18,18,18,18,18,18,18,18,18,12,18,15,16,18,14,18,18,15,14,18,3,13,18,18,18,18,18,17,18,18,18,15,18,18,18,18,18,18,18,18,18,17,18,17,17,18,17,18,18,18,14,18,17,),
(11,9,11,11,11,11,9,11,7,11,11,10,11,11,9,11,11,11,11,11,11,11,9,11,11,11,10,11,11,11,8,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,5,11,8,9,11,7,11,11,8,7,11,0,6,11,11,11,11,11,10,11,11,11,8,11,11,11,11,11,11,11,11,11,10,11,10,10,11,10,11,11,11,7,11,10,),
(13,9,13,12,12,13,11,13,8,12,13,10,13,12,10,13,11,12,12,13,12,13,9,13,12,12,11,13,13,12,10,13,11,13,13,13,13,13,13,13,11,12,13,13,13,13,13,13,13,13,12,7,13,10,11,10,8,12,13,9,9,12,0,8,13,13,13,13,13,12,13,13,13,9,13,13,13,13,13,13,13,13,13,12,13,11,12,10,11,12,13,13,8,13,11,),
(12,9,12,12,11,12,10,12,7,11,12,9,12,11,9,12,10,11,11,12,11,12,8,12,11,11,10,12,12,11,9,12,10,12,12,12,12,12,12,12,10,11,12,12,12,12,12,12,12,12,11,6,12,9,10,9,7,11,12,9,8,12,2,7,12,12,12,12,12,11,12,12,12,8,12,12,12,12,12,12,12,12,12,11,12,11,11,9,11,11,12,12,7,12,10,),
(12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,10,12,12,12,12,12,12,12,9,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,6,12,10,11,12,9,12,12,10,12,12,12,7,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,),
(8,8,7,8,8,7,8,8,4,7,2,3,2,5,3,8,4,2,2,5,7,8,0,8,8,8,7,2,4,8,1,7,4,8,8,8,8,8,8,8,6,2,8,8,8,8,8,8,8,8,8,2,8,6,7,4,5,2,8,6,4,6,0,3,7,8,6,6,6,8,6,8,8,6,8,8,8,8,6,8,6,8,7,8,8,8,8,8,8,8,3,8,4,2,8,),
(11,9,9,11,10,11,9,11,7,10,6,9,6,11,9,11,10,11,11,11,11,11,8,11,10,11,9,8,11,11,8,11,10,11,11,11,11,11,10,11,10,11,11,11,11,11,11,11,11,11,11,5,11,8,8,9,6,11,11,7,7,9,0,6,11,11,11,11,11,10,11,11,11,8,11,11,11,11,11,11,11,11,11,10,11,9,10,9,9,11,9,11,7,11,10,),
(7,7,6,7,7,6,7,7,3,6,3,6,3,7,5,7,7,7,7,4,7,7,5,7,7,7,6,3,3,7,4,6,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,3,7,5,6,7,4,7,7,5,3,5,0,2,6,7,5,5,5,7,5,7,7,5,7,7,7,7,5,7,5,7,7,7,7,7,7,7,7,7,3,7,3,3,7,),
(11,11,11,11,11,11,11,11,7,11,11,10,11,11,9,11,11,11,11,11,11,11,9,11,11,11,10,11,11,11,8,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,5,11,9,10,11,8,11,11,9,7,11,0,6,11,11,11,11,11,11,11,11,11,9,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,7,11,11,),
(12,12,11,12,12,11,12,11,8,11,10,7,10,9,6,12,8,8,8,10,11,11,5,12,12,12,11,10,11,12,7,11,8,12,11,12,12,12,11,12,10,8,12,12,12,12,12,12,12,12,12,6,12,10,11,8,9,8,12,10,8,11,0,7,10,12,10,10,10,12,10,12,12,10,12,12,12,12,10,12,10,12,11,12,12,12,12,12,12,12,10,12,8,10,12,),
(18,18,17,18,18,17,18,18,14,17,17,14,17,15,13,18,15,15,15,17,17,18,13,18,18,18,17,17,17,18,14,17,15,18,18,18,18,18,18,18,16,15,18,18,18,18,18,18,18,18,18,12,18,16,17,15,15,15,18,16,14,17,3,13,17,18,17,17,17,18,17,18,18,16,18,18,18,18,17,18,17,18,17,18,18,18,18,18,18,18,17,18,14,17,18,),
(12,12,11,12,12,11,12,11,8,10,9,11,9,12,10,12,12,12,11,8,11,11,10,12,12,12,11,8,10,12,9,11,12,12,11,12,12,12,11,12,12,12,12,12,12,12,12,12,12,12,12,6,12,10,11,12,9,12,12,10,8,9,0,7,10,12,9,9,9,12,9,12,12,10,12,12,12,12,9,12,9,12,11,12,12,12,12,12,12,12,8,12,8,8,12,),
(12,12,11,12,12,11,12,11,8,11,10,7,10,9,6,12,8,8,8,10,11,11,5,12,12,12,11,10,11,12,7,11,8,12,11,12,12,12,11,12,10,8,12,12,12,12,12,12,12,12,12,6,12,10,11,8,9,8,12,10,8,11,6,7,10,12,10,10,10,12,10,12,12,10,12,12,12,12,10,12,10,12,11,12,12,12,12,12,12,12,10,12,8,10,12,),
(11,11,10,11,11,10,11,11,7,11,9,10,9,11,9,11,11,11,11,9,11,11,9,11,11,11,10,8,11,11,8,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,5,11,9,10,11,8,11,11,9,7,10,0,6,10,11,10,10,10,11,10,11,11,9,11,11,11,11,10,11,10,11,11,11,11,11,11,11,11,11,8,11,7,9,11,),
(11,11,6,7,9,8,11,8,11,8,6,11,6,8,11,8,7,9,9,6,10,7,11,8,8,8,11,5,7,7,10,7,7,11,7,11,11,11,7,11,11,9,11,11,11,11,7,11,7,11,8,11,11,11,11,11,11,11,11,11,11,6,8,11,7,11,6,7,6,9,10,11,11,11,11,11,7,7,6,11,7,7,7,7,7,7,7,7,8,7,5,11,11,6,9,),
(2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,),
(11,6,11,9,6,11,9,11,6,6,11,6,11,8,7,11,7,7,6,11,6,11,6,11,8,8,6,11,8,6,8,11,9,11,11,11,11,11,11,11,9,6,11,11,11,11,11,11,11,11,6,6,11,7,8,7,5,7,11,7,7,7,6,6,10,11,11,11,11,10,11,11,11,6,11,11,11,11,11,11,11,11,10,10,11,9,10,7,9,7,11,11,5,11,6,),
(15,8,14,15,15,14,13,15,10,15,15,10,15,12,11,15,11,10,11,14,14,15,9,14,15,15,14,14,15,11,12,15,12,15,15,15,15,15,15,15,13,9,15,15,15,15,15,15,15,15,14,9,15,12,13,10,11,10,15,12,11,15,0,10,14,15,15,15,15,14,15,15,15,10,15,15,15,15,15,15,15,15,15,14,15,14,14,13,14,10,14,15,11,14,14,),
(9,9,8,9,9,8,9,9,6,8,3,4,2,6,4,9,9,9,9,6,8,9,8,9,9,9,8,4,8,9,9,8,5,9,9,9,9,9,9,9,8,6,9,9,9,9,9,9,9,9,9,8,9,8,8,7,8,8,9,8,7,7,0,6,8,9,7,7,7,9,7,9,9,7,9,9,9,9,7,9,7,9,8,9,9,9,9,9,9,9,4,9,7,6,9,),
)
# End of font
| 36.111484 | 292 | 0.417497 |
79425453dcbb4c0d5645db8be440c0f6d4e0ef17 | 168,302 | py | Python | Lib/test/test_buffer.py | tai271828/RustPython | 9fb70707c4803f9f6d79dd4c8077bd16f0a9be45 | [
"CC-BY-4.0",
"MIT"
] | 11,058 | 2018-05-29T07:40:06.000Z | 2022-03-31T11:38:42.000Z | Lib/test/test_buffer.py | tai271828/RustPython | 9fb70707c4803f9f6d79dd4c8077bd16f0a9be45 | [
"CC-BY-4.0",
"MIT"
] | 2,105 | 2018-06-01T10:07:16.000Z | 2022-03-31T14:56:42.000Z | Lib/test/test_buffer.py | tai271828/RustPython | 9fb70707c4803f9f6d79dd4c8077bd16f0a9be45 | [
"CC-BY-4.0",
"MIT"
] | 914 | 2018-07-27T09:36:14.000Z | 2022-03-31T19:56:34.000Z | #
# The ndarray object from _testbuffer.c is a complete implementation of
# a PEP-3118 buffer provider. It is independent from NumPy's ndarray
# and the tests don't require NumPy.
#
# If NumPy is present, some tests check both ndarray implementations
# against each other.
#
# Most ndarray tests also check that memoryview(ndarray) behaves in
# the same way as the original. Thus, a substantial part of the
# memoryview tests is now in this module.
#
# Written and designed by Stefan Krah for Python 3.3.
#
import contextlib
import unittest
from test import support
from itertools import permutations, product
from random import randrange, sample, choice
import warnings
import sys, array, io, os
from decimal import Decimal
from fractions import Fraction
try:
from _testbuffer import *
except ImportError:
ndarray = None
try:
import struct
except ImportError:
struct = None
try:
import ctypes
except ImportError:
ctypes = None
try:
with support.EnvironmentVarGuard() as os.environ, \
warnings.catch_warnings():
from numpy import ndarray as numpy_array
except ImportError:
numpy_array = None
SHORT_TEST = True
# ======================================================================
# Random lists by format specifier
# ======================================================================
# Native format chars and their ranges.
NATIVE = {
'?':0, 'c':0, 'b':0, 'B':0,
'h':0, 'H':0, 'i':0, 'I':0,
'l':0, 'L':0, 'n':0, 'N':0,
'f':0, 'd':0, 'P':0
}
# NumPy does not have 'n' or 'N':
if numpy_array:
del NATIVE['n']
del NATIVE['N']
if struct:
try:
# Add "qQ" if present in native mode.
struct.pack('Q', 2**64-1)
NATIVE['q'] = 0
NATIVE['Q'] = 0
except struct.error:
pass
# Standard format chars and their ranges.
STANDARD = {
'?':(0, 2), 'c':(0, 1<<8),
'b':(-(1<<7), 1<<7), 'B':(0, 1<<8),
'h':(-(1<<15), 1<<15), 'H':(0, 1<<16),
'i':(-(1<<31), 1<<31), 'I':(0, 1<<32),
'l':(-(1<<31), 1<<31), 'L':(0, 1<<32),
'q':(-(1<<63), 1<<63), 'Q':(0, 1<<64),
'f':(-(1<<63), 1<<63), 'd':(-(1<<1023), 1<<1023)
}
def native_type_range(fmt):
"""Return range of a native type."""
if fmt == 'c':
lh = (0, 256)
elif fmt == '?':
lh = (0, 2)
elif fmt == 'f':
lh = (-(1<<63), 1<<63)
elif fmt == 'd':
lh = (-(1<<1023), 1<<1023)
else:
for exp in (128, 127, 64, 63, 32, 31, 16, 15, 8, 7):
try:
struct.pack(fmt, (1<<exp)-1)
break
except struct.error:
pass
lh = (-(1<<exp), 1<<exp) if exp & 1 else (0, 1<<exp)
return lh
fmtdict = {
'':NATIVE,
'@':NATIVE,
'<':STANDARD,
'>':STANDARD,
'=':STANDARD,
'!':STANDARD
}
if struct:
for fmt in fmtdict['@']:
fmtdict['@'][fmt] = native_type_range(fmt)
MEMORYVIEW = NATIVE.copy()
ARRAY = NATIVE.copy()
for k in NATIVE:
if not k in "bBhHiIlLfd":
del ARRAY[k]
BYTEFMT = NATIVE.copy()
for k in NATIVE:
if not k in "Bbc":
del BYTEFMT[k]
fmtdict['m'] = MEMORYVIEW
fmtdict['@m'] = MEMORYVIEW
fmtdict['a'] = ARRAY
fmtdict['b'] = BYTEFMT
fmtdict['@b'] = BYTEFMT
# Capabilities of the test objects:
MODE = 0
MULT = 1
cap = { # format chars # multiplier
'ndarray': (['', '@', '<', '>', '=', '!'], ['', '1', '2', '3']),
'array': (['a'], ['']),
'numpy': ([''], ['']),
'memoryview': (['@m', 'm'], ['']),
'bytefmt': (['@b', 'b'], ['']),
}
def randrange_fmt(mode, char, obj):
"""Return random item for a type specified by a mode and a single
format character."""
x = randrange(*fmtdict[mode][char])
if char == 'c':
x = bytes([x])
if obj == 'numpy' and x == b'\x00':
# http://projects.scipy.org/numpy/ticket/1925
x = b'\x01'
if char == '?':
x = bool(x)
if char == 'f' or char == 'd':
x = struct.pack(char, x)
x = struct.unpack(char, x)[0]
return x
def gen_item(fmt, obj):
"""Return single random item."""
mode, chars = fmt.split('#')
x = []
for c in chars:
x.append(randrange_fmt(mode, c, obj))
return x[0] if len(x) == 1 else tuple(x)
def gen_items(n, fmt, obj):
"""Return a list of random items (or a scalar)."""
if n == 0:
return gen_item(fmt, obj)
lst = [0] * n
for i in range(n):
lst[i] = gen_item(fmt, obj)
return lst
def struct_items(n, obj):
mode = choice(cap[obj][MODE])
xfmt = mode + '#'
fmt = mode.strip('amb')
nmemb = randrange(2, 10) # number of struct members
for _ in range(nmemb):
char = choice(tuple(fmtdict[mode]))
multiplier = choice(cap[obj][MULT])
xfmt += (char * int(multiplier if multiplier else 1))
fmt += (multiplier + char)
items = gen_items(n, xfmt, obj)
item = gen_item(xfmt, obj)
return fmt, items, item
def randitems(n, obj='ndarray', mode=None, char=None):
"""Return random format, items, item."""
if mode is None:
mode = choice(cap[obj][MODE])
if char is None:
char = choice(tuple(fmtdict[mode]))
multiplier = choice(cap[obj][MULT])
fmt = mode + '#' + char * int(multiplier if multiplier else 1)
items = gen_items(n, fmt, obj)
item = gen_item(fmt, obj)
fmt = mode.strip('amb') + multiplier + char
return fmt, items, item
def iter_mode(n, obj='ndarray'):
"""Iterate through supported mode/char combinations."""
for mode in cap[obj][MODE]:
for char in fmtdict[mode]:
yield randitems(n, obj, mode, char)
def iter_format(nitems, testobj='ndarray'):
"""Yield (format, items, item) for all possible modes and format
characters plus one random compound format string."""
for t in iter_mode(nitems, testobj):
yield t
if testobj != 'ndarray':
return
yield struct_items(nitems, testobj)
def is_byte_format(fmt):
return 'c' in fmt or 'b' in fmt or 'B' in fmt
def is_memoryview_format(fmt):
"""format suitable for memoryview"""
x = len(fmt)
return ((x == 1 or (x == 2 and fmt[0] == '@')) and
fmt[x-1] in MEMORYVIEW)
NON_BYTE_FORMAT = [c for c in fmtdict['@'] if not is_byte_format(c)]
# ======================================================================
# Multi-dimensional tolist(), slicing and slice assignments
# ======================================================================
def atomp(lst):
"""Tuple items (representing structs) are regarded as atoms."""
return not isinstance(lst, list)
def listp(lst):
return isinstance(lst, list)
def prod(lst):
"""Product of list elements."""
if len(lst) == 0:
return 0
x = lst[0]
for v in lst[1:]:
x *= v
return x
def strides_from_shape(ndim, shape, itemsize, layout):
"""Calculate strides of a contiguous array. Layout is 'C' or
'F' (Fortran)."""
if ndim == 0:
return ()
if layout == 'C':
strides = list(shape[1:]) + [itemsize]
for i in range(ndim-2, -1, -1):
strides[i] *= strides[i+1]
else:
strides = [itemsize] + list(shape[:-1])
for i in range(1, ndim):
strides[i] *= strides[i-1]
return strides
def _ca(items, s):
"""Convert flat item list to the nested list representation of a
multidimensional C array with shape 's'."""
if atomp(items):
return items
if len(s) == 0:
return items[0]
lst = [0] * s[0]
stride = len(items) // s[0] if s[0] else 0
for i in range(s[0]):
start = i*stride
lst[i] = _ca(items[start:start+stride], s[1:])
return lst
def _fa(items, s):
"""Convert flat item list to the nested list representation of a
multidimensional Fortran array with shape 's'."""
if atomp(items):
return items
if len(s) == 0:
return items[0]
lst = [0] * s[0]
stride = s[0]
for i in range(s[0]):
lst[i] = _fa(items[i::stride], s[1:])
return lst
def carray(items, shape):
if listp(items) and not 0 in shape and prod(shape) != len(items):
raise ValueError("prod(shape) != len(items)")
return _ca(items, shape)
def farray(items, shape):
if listp(items) and not 0 in shape and prod(shape) != len(items):
raise ValueError("prod(shape) != len(items)")
return _fa(items, shape)
def indices(shape):
"""Generate all possible tuples of indices."""
iterables = [range(v) for v in shape]
return product(*iterables)
def getindex(ndim, ind, strides):
"""Convert multi-dimensional index to the position in the flat list."""
ret = 0
for i in range(ndim):
ret += strides[i] * ind[i]
return ret
def transpose(src, shape):
"""Transpose flat item list that is regarded as a multi-dimensional
matrix defined by shape: dest...[k][j][i] = src[i][j][k]... """
if not shape:
return src
ndim = len(shape)
sstrides = strides_from_shape(ndim, shape, 1, 'C')
dstrides = strides_from_shape(ndim, shape[::-1], 1, 'C')
dest = [0] * len(src)
for ind in indices(shape):
fr = getindex(ndim, ind, sstrides)
to = getindex(ndim, ind[::-1], dstrides)
dest[to] = src[fr]
return dest
def _flatten(lst):
"""flatten list"""
if lst == []:
return lst
if atomp(lst):
return [lst]
return _flatten(lst[0]) + _flatten(lst[1:])
def flatten(lst):
"""flatten list or return scalar"""
if atomp(lst): # scalar
return lst
return _flatten(lst)
def slice_shape(lst, slices):
"""Get the shape of lst after slicing: slices is a list of slice
objects."""
if atomp(lst):
return []
return [len(lst[slices[0]])] + slice_shape(lst[0], slices[1:])
def multislice(lst, slices):
"""Multi-dimensional slicing: slices is a list of slice objects."""
if atomp(lst):
return lst
return [multislice(sublst, slices[1:]) for sublst in lst[slices[0]]]
def m_assign(llst, rlst, lslices, rslices):
"""Multi-dimensional slice assignment: llst and rlst are the operands,
lslices and rslices are lists of slice objects. llst and rlst must
have the same structure.
For a two-dimensional example, this is not implemented in Python:
llst[0:3:2, 0:3:2] = rlst[1:3:1, 1:3:1]
Instead we write:
lslices = [slice(0,3,2), slice(0,3,2)]
rslices = [slice(1,3,1), slice(1,3,1)]
multislice_assign(llst, rlst, lslices, rslices)
"""
if atomp(rlst):
return rlst
rlst = [m_assign(l, r, lslices[1:], rslices[1:])
for l, r in zip(llst[lslices[0]], rlst[rslices[0]])]
llst[lslices[0]] = rlst
return llst
def cmp_structure(llst, rlst, lslices, rslices):
"""Compare the structure of llst[lslices] and rlst[rslices]."""
lshape = slice_shape(llst, lslices)
rshape = slice_shape(rlst, rslices)
if (len(lshape) != len(rshape)):
return -1
for i in range(len(lshape)):
if lshape[i] != rshape[i]:
return -1
if lshape[i] == 0:
return 0
return 0
def multislice_assign(llst, rlst, lslices, rslices):
"""Return llst after assigning: llst[lslices] = rlst[rslices]"""
if cmp_structure(llst, rlst, lslices, rslices) < 0:
raise ValueError("lvalue and rvalue have different structures")
return m_assign(llst, rlst, lslices, rslices)
# ======================================================================
# Random structures
# ======================================================================
#
# PEP-3118 is very permissive with respect to the contents of a
# Py_buffer. In particular:
#
# - shape can be zero
# - strides can be any integer, including zero
# - offset can point to any location in the underlying
# memory block, provided that it is a multiple of
# itemsize.
#
# The functions in this section test and verify random structures
# in full generality. A structure is valid iff it fits in the
# underlying memory block.
#
# The structure 't' (short for 'tuple') is fully defined by:
#
# t = (memlen, itemsize, ndim, shape, strides, offset)
#
def verify_structure(memlen, itemsize, ndim, shape, strides, offset):
"""Verify that the parameters represent a valid array within
the bounds of the allocated memory:
char *mem: start of the physical memory block
memlen: length of the physical memory block
offset: (char *)buf - mem
"""
if offset % itemsize:
return False
if offset < 0 or offset+itemsize > memlen:
return False
if any(v % itemsize for v in strides):
return False
if ndim <= 0:
return ndim == 0 and not shape and not strides
if 0 in shape:
return True
imin = sum(strides[j]*(shape[j]-1) for j in range(ndim)
if strides[j] <= 0)
imax = sum(strides[j]*(shape[j]-1) for j in range(ndim)
if strides[j] > 0)
return 0 <= offset+imin and offset+imax+itemsize <= memlen
def get_item(lst, indices):
for i in indices:
lst = lst[i]
return lst
def memory_index(indices, t):
"""Location of an item in the underlying memory."""
memlen, itemsize, ndim, shape, strides, offset = t
p = offset
for i in range(ndim):
p += strides[i]*indices[i]
return p
def is_overlapping(t):
"""The structure 't' is overlapping if at least one memory location
is visited twice while iterating through all possible tuples of
indices."""
memlen, itemsize, ndim, shape, strides, offset = t
visited = 1<<memlen
for ind in indices(shape):
i = memory_index(ind, t)
bit = 1<<i
if visited & bit:
return True
visited |= bit
return False
def rand_structure(itemsize, valid, maxdim=5, maxshape=16, shape=()):
"""Return random structure:
(memlen, itemsize, ndim, shape, strides, offset)
If 'valid' is true, the returned structure is valid, otherwise invalid.
If 'shape' is given, use that instead of creating a random shape.
"""
if not shape:
ndim = randrange(maxdim+1)
if (ndim == 0):
if valid:
return itemsize, itemsize, ndim, (), (), 0
else:
nitems = randrange(1, 16+1)
memlen = nitems * itemsize
offset = -itemsize if randrange(2) == 0 else memlen
return memlen, itemsize, ndim, (), (), offset
minshape = 2
n = randrange(100)
if n >= 95 and valid:
minshape = 0
elif n >= 90:
minshape = 1
shape = [0] * ndim
for i in range(ndim):
shape[i] = randrange(minshape, maxshape+1)
else:
ndim = len(shape)
maxstride = 5
n = randrange(100)
zero_stride = True if n >= 95 and n & 1 else False
strides = [0] * ndim
strides[ndim-1] = itemsize * randrange(-maxstride, maxstride+1)
if not zero_stride and strides[ndim-1] == 0:
strides[ndim-1] = itemsize
for i in range(ndim-2, -1, -1):
maxstride *= shape[i+1] if shape[i+1] else 1
if zero_stride:
strides[i] = itemsize * randrange(-maxstride, maxstride+1)
else:
strides[i] = ((1,-1)[randrange(2)] *
itemsize * randrange(1, maxstride+1))
imin = imax = 0
if not 0 in shape:
imin = sum(strides[j]*(shape[j]-1) for j in range(ndim)
if strides[j] <= 0)
imax = sum(strides[j]*(shape[j]-1) for j in range(ndim)
if strides[j] > 0)
nitems = imax - imin
if valid:
offset = -imin * itemsize
memlen = offset + (imax+1) * itemsize
else:
memlen = (-imin + imax) * itemsize
offset = -imin-itemsize if randrange(2) == 0 else memlen
return memlen, itemsize, ndim, shape, strides, offset
def randslice_from_slicelen(slicelen, listlen):
"""Create a random slice of len slicelen that fits into listlen."""
maxstart = listlen - slicelen
start = randrange(maxstart+1)
maxstep = (listlen - start) // slicelen if slicelen else 1
step = randrange(1, maxstep+1)
stop = start + slicelen * step
s = slice(start, stop, step)
_, _, _, control = slice_indices(s, listlen)
if control != slicelen:
raise RuntimeError
return s
def randslice_from_shape(ndim, shape):
"""Create two sets of slices for an array x with shape 'shape'
such that shapeof(x[lslices]) == shapeof(x[rslices])."""
lslices = [0] * ndim
rslices = [0] * ndim
for n in range(ndim):
l = shape[n]
slicelen = randrange(1, l+1) if l > 0 else 0
lslices[n] = randslice_from_slicelen(slicelen, l)
rslices[n] = randslice_from_slicelen(slicelen, l)
return tuple(lslices), tuple(rslices)
def rand_aligned_slices(maxdim=5, maxshape=16):
"""Create (lshape, rshape, tuple(lslices), tuple(rslices)) such that
shapeof(x[lslices]) == shapeof(y[rslices]), where x is an array
with shape 'lshape' and y is an array with shape 'rshape'."""
ndim = randrange(1, maxdim+1)
minshape = 2
n = randrange(100)
if n >= 95:
minshape = 0
elif n >= 90:
minshape = 1
all_random = True if randrange(100) >= 80 else False
lshape = [0]*ndim; rshape = [0]*ndim
lslices = [0]*ndim; rslices = [0]*ndim
for n in range(ndim):
small = randrange(minshape, maxshape+1)
big = randrange(minshape, maxshape+1)
if big < small:
big, small = small, big
# Create a slice that fits the smaller value.
if all_random:
start = randrange(-small, small+1)
stop = randrange(-small, small+1)
step = (1,-1)[randrange(2)] * randrange(1, small+2)
s_small = slice(start, stop, step)
_, _, _, slicelen = slice_indices(s_small, small)
else:
slicelen = randrange(1, small+1) if small > 0 else 0
s_small = randslice_from_slicelen(slicelen, small)
# Create a slice of the same length for the bigger value.
s_big = randslice_from_slicelen(slicelen, big)
if randrange(2) == 0:
rshape[n], lshape[n] = big, small
rslices[n], lslices[n] = s_big, s_small
else:
rshape[n], lshape[n] = small, big
rslices[n], lslices[n] = s_small, s_big
return lshape, rshape, tuple(lslices), tuple(rslices)
def randitems_from_structure(fmt, t):
"""Return a list of random items for structure 't' with format
'fmtchar'."""
memlen, itemsize, _, _, _, _ = t
return gen_items(memlen//itemsize, '#'+fmt, 'numpy')
def ndarray_from_structure(items, fmt, t, flags=0):
"""Return ndarray from the tuple returned by rand_structure()"""
memlen, itemsize, ndim, shape, strides, offset = t
return ndarray(items, shape=shape, strides=strides, format=fmt,
offset=offset, flags=ND_WRITABLE|flags)
def numpy_array_from_structure(items, fmt, t):
"""Return numpy_array from the tuple returned by rand_structure()"""
memlen, itemsize, ndim, shape, strides, offset = t
buf = bytearray(memlen)
for j, v in enumerate(items):
struct.pack_into(fmt, buf, j*itemsize, v)
return numpy_array(buffer=buf, shape=shape, strides=strides,
dtype=fmt, offset=offset)
# ======================================================================
# memoryview casts
# ======================================================================
def cast_items(exporter, fmt, itemsize, shape=None):
"""Interpret the raw memory of 'exporter' as a list of items with
size 'itemsize'. If shape=None, the new structure is assumed to
be 1-D with n * itemsize = bytelen. If shape is given, the usual
constraint for contiguous arrays prod(shape) * itemsize = bytelen
applies. On success, return (items, shape). If the constraints
cannot be met, return (None, None). If a chunk of bytes is interpreted
as NaN as a result of float conversion, return ('nan', None)."""
bytelen = exporter.nbytes
if shape:
if prod(shape) * itemsize != bytelen:
return None, shape
elif shape == []:
if exporter.ndim == 0 or itemsize != bytelen:
return None, shape
else:
n, r = divmod(bytelen, itemsize)
shape = [n]
if r != 0:
return None, shape
mem = exporter.tobytes()
byteitems = [mem[i:i+itemsize] for i in range(0, len(mem), itemsize)]
items = []
for v in byteitems:
item = struct.unpack(fmt, v)[0]
if item != item:
return 'nan', shape
items.append(item)
return (items, shape) if shape != [] else (items[0], shape)
def gencastshapes():
"""Generate shapes to test casting."""
for n in range(32):
yield [n]
ndim = randrange(4, 6)
minshape = 1 if randrange(100) > 80 else 2
yield [randrange(minshape, 5) for _ in range(ndim)]
ndim = randrange(2, 4)
minshape = 1 if randrange(100) > 80 else 2
yield [randrange(minshape, 5) for _ in range(ndim)]
# ======================================================================
# Actual tests
# ======================================================================
def genslices(n):
"""Generate all possible slices for a single dimension."""
return product(range(-n, n+1), range(-n, n+1), range(-n, n+1))
def genslices_ndim(ndim, shape):
"""Generate all possible slice tuples for 'shape'."""
iterables = [genslices(shape[n]) for n in range(ndim)]
return product(*iterables)
def rslice(n, allow_empty=False):
"""Generate random slice for a single dimension of length n.
If zero=True, the slices may be empty, otherwise they will
be non-empty."""
minlen = 0 if allow_empty or n == 0 else 1
slicelen = randrange(minlen, n+1)
return randslice_from_slicelen(slicelen, n)
def rslices(n, allow_empty=False):
"""Generate random slices for a single dimension."""
for _ in range(5):
yield rslice(n, allow_empty)
def rslices_ndim(ndim, shape, iterations=5):
"""Generate random slice tuples for 'shape'."""
# non-empty slices
for _ in range(iterations):
yield tuple(rslice(shape[n]) for n in range(ndim))
# possibly empty slices
for _ in range(iterations):
yield tuple(rslice(shape[n], allow_empty=True) for n in range(ndim))
# invalid slices
yield tuple(slice(0,1,0) for _ in range(ndim))
def rpermutation(iterable, r=None):
pool = tuple(iterable)
r = len(pool) if r is None else r
yield tuple(sample(pool, r))
def ndarray_print(nd):
"""Print ndarray for debugging."""
try:
x = nd.tolist()
except (TypeError, NotImplementedError):
x = nd.tobytes()
if isinstance(nd, ndarray):
offset = nd.offset
flags = nd.flags
else:
offset = 'unknown'
flags = 'unknown'
print("ndarray(%s, shape=%s, strides=%s, suboffsets=%s, offset=%s, "
"format='%s', itemsize=%s, flags=%s)" %
(x, nd.shape, nd.strides, nd.suboffsets, offset,
nd.format, nd.itemsize, flags))
sys.stdout.flush()
ITERATIONS = 100
MAXDIM = 5
MAXSHAPE = 10
if SHORT_TEST:
ITERATIONS = 10
MAXDIM = 3
MAXSHAPE = 4
genslices = rslices
genslices_ndim = rslices_ndim
permutations = rpermutation
@unittest.skipUnless(struct, 'struct module required for this test.')
@unittest.skipUnless(ndarray, 'ndarray object required for this test')
class TestBufferProtocol(unittest.TestCase):
def setUp(self):
# The suboffsets tests need sizeof(void *).
self.sizeof_void_p = get_sizeof_void_p()
def verify(self, result, *, obj,
itemsize, fmt, readonly,
ndim, shape, strides,
lst, sliced=False, cast=False):
# Verify buffer contents against expected values.
if shape:
expected_len = prod(shape)*itemsize
else:
if not fmt: # array has been implicitly cast to unsigned bytes
expected_len = len(lst)
else: # ndim = 0
expected_len = itemsize
# Reconstruct suboffsets from strides. Support for slicing
# could be added, but is currently only needed for test_getbuf().
suboffsets = ()
if result.suboffsets:
self.assertGreater(ndim, 0)
suboffset0 = 0
for n in range(1, ndim):
if shape[n] == 0:
break
if strides[n] <= 0:
suboffset0 += -strides[n] * (shape[n]-1)
suboffsets = [suboffset0] + [-1 for v in range(ndim-1)]
# Not correct if slicing has occurred in the first dimension.
stride0 = self.sizeof_void_p
if strides[0] < 0:
stride0 = -stride0
strides = [stride0] + list(strides[1:])
self.assertIs(result.obj, obj)
self.assertEqual(result.nbytes, expected_len)
self.assertEqual(result.itemsize, itemsize)
self.assertEqual(result.format, fmt)
self.assertIs(result.readonly, readonly)
self.assertEqual(result.ndim, ndim)
self.assertEqual(result.shape, tuple(shape))
if not (sliced and suboffsets):
self.assertEqual(result.strides, tuple(strides))
self.assertEqual(result.suboffsets, tuple(suboffsets))
if isinstance(result, ndarray) or is_memoryview_format(fmt):
rep = result.tolist() if fmt else result.tobytes()
self.assertEqual(rep, lst)
if not fmt: # array has been cast to unsigned bytes,
return # the remaining tests won't work.
# PyBuffer_GetPointer() is the definition how to access an item.
# If PyBuffer_GetPointer(indices) is correct for all possible
# combinations of indices, the buffer is correct.
#
# Also test tobytes() against the flattened 'lst', with all items
# packed to bytes.
if not cast: # casts chop up 'lst' in different ways
b = bytearray()
buf_err = None
for ind in indices(shape):
try:
item1 = get_pointer(result, ind)
item2 = get_item(lst, ind)
if isinstance(item2, tuple):
x = struct.pack(fmt, *item2)
else:
x = struct.pack(fmt, item2)
b.extend(x)
except BufferError:
buf_err = True # re-exporter does not provide full buffer
break
self.assertEqual(item1, item2)
if not buf_err:
# test tobytes()
self.assertEqual(result.tobytes(), b)
# test hex()
m = memoryview(result)
h = "".join("%02x" % c for c in b)
self.assertEqual(m.hex(), h)
# lst := expected multi-dimensional logical representation
# flatten(lst) := elements in C-order
ff = fmt if fmt else 'B'
flattened = flatten(lst)
# Rules for 'A': if the array is already contiguous, return
# the array unaltered. Otherwise, return a contiguous 'C'
# representation.
for order in ['C', 'F', 'A']:
expected = result
if order == 'F':
if not is_contiguous(result, 'A') or \
is_contiguous(result, 'C'):
# For constructing the ndarray, convert the
# flattened logical representation to Fortran order.
trans = transpose(flattened, shape)
expected = ndarray(trans, shape=shape, format=ff,
flags=ND_FORTRAN)
else: # 'C', 'A'
if not is_contiguous(result, 'A') or \
is_contiguous(result, 'F') and order == 'C':
# The flattened list is already in C-order.
expected = ndarray(flattened, shape=shape, format=ff)
contig = get_contiguous(result, PyBUF_READ, order)
self.assertEqual(contig.tobytes(), b)
self.assertTrue(cmp_contig(contig, expected))
if ndim == 0:
continue
nmemb = len(flattened)
ro = 0 if readonly else ND_WRITABLE
### See comment in test_py_buffer_to_contiguous for an
### explanation why these tests are valid.
# To 'C'
contig = py_buffer_to_contiguous(result, 'C', PyBUF_FULL_RO)
self.assertEqual(len(contig), nmemb * itemsize)
initlst = [struct.unpack_from(fmt, contig, n*itemsize)
for n in range(nmemb)]
if len(initlst[0]) == 1:
initlst = [v[0] for v in initlst]
y = ndarray(initlst, shape=shape, flags=ro, format=fmt)
self.assertEqual(memoryview(y), memoryview(result))
contig_bytes = memoryview(result).tobytes()
self.assertEqual(contig_bytes, contig)
contig_bytes = memoryview(result).tobytes(order=None)
self.assertEqual(contig_bytes, contig)
contig_bytes = memoryview(result).tobytes(order='C')
self.assertEqual(contig_bytes, contig)
# To 'F'
contig = py_buffer_to_contiguous(result, 'F', PyBUF_FULL_RO)
self.assertEqual(len(contig), nmemb * itemsize)
initlst = [struct.unpack_from(fmt, contig, n*itemsize)
for n in range(nmemb)]
if len(initlst[0]) == 1:
initlst = [v[0] for v in initlst]
y = ndarray(initlst, shape=shape, flags=ro|ND_FORTRAN,
format=fmt)
self.assertEqual(memoryview(y), memoryview(result))
contig_bytes = memoryview(result).tobytes(order='F')
self.assertEqual(contig_bytes, contig)
# To 'A'
contig = py_buffer_to_contiguous(result, 'A', PyBUF_FULL_RO)
self.assertEqual(len(contig), nmemb * itemsize)
initlst = [struct.unpack_from(fmt, contig, n*itemsize)
for n in range(nmemb)]
if len(initlst[0]) == 1:
initlst = [v[0] for v in initlst]
f = ND_FORTRAN if is_contiguous(result, 'F') else 0
y = ndarray(initlst, shape=shape, flags=f|ro, format=fmt)
self.assertEqual(memoryview(y), memoryview(result))
contig_bytes = memoryview(result).tobytes(order='A')
self.assertEqual(contig_bytes, contig)
if is_memoryview_format(fmt):
try:
m = memoryview(result)
except BufferError: # re-exporter does not provide full information
return
ex = result.obj if isinstance(result, memoryview) else result
def check_memoryview(m, expected_readonly=readonly):
self.assertIs(m.obj, ex)
self.assertEqual(m.nbytes, expected_len)
self.assertEqual(m.itemsize, itemsize)
self.assertEqual(m.format, fmt)
self.assertEqual(m.readonly, expected_readonly)
self.assertEqual(m.ndim, ndim)
self.assertEqual(m.shape, tuple(shape))
if not (sliced and suboffsets):
self.assertEqual(m.strides, tuple(strides))
self.assertEqual(m.suboffsets, tuple(suboffsets))
n = 1 if ndim == 0 else len(lst)
self.assertEqual(len(m), n)
rep = result.tolist() if fmt else result.tobytes()
self.assertEqual(rep, lst)
self.assertEqual(m, result)
check_memoryview(m)
with m.toreadonly() as mm:
check_memoryview(mm, expected_readonly=True)
m.tobytes() # Releasing mm didn't release m
def verify_getbuf(self, orig_ex, ex, req, sliced=False):
def simple_fmt(ex):
return ex.format == '' or ex.format == 'B'
def match(req, flag):
return ((req&flag) == flag)
if (# writable request to read-only exporter
(ex.readonly and match(req, PyBUF_WRITABLE)) or
# cannot match explicit contiguity request
(match(req, PyBUF_C_CONTIGUOUS) and not ex.c_contiguous) or
(match(req, PyBUF_F_CONTIGUOUS) and not ex.f_contiguous) or
(match(req, PyBUF_ANY_CONTIGUOUS) and not ex.contiguous) or
# buffer needs suboffsets
(not match(req, PyBUF_INDIRECT) and ex.suboffsets) or
# buffer without strides must be C-contiguous
(not match(req, PyBUF_STRIDES) and not ex.c_contiguous) or
# PyBUF_SIMPLE|PyBUF_FORMAT and PyBUF_WRITABLE|PyBUF_FORMAT
(not match(req, PyBUF_ND) and match(req, PyBUF_FORMAT))):
self.assertRaises(BufferError, ndarray, ex, getbuf=req)
return
if isinstance(ex, ndarray) or is_memoryview_format(ex.format):
lst = ex.tolist()
else:
nd = ndarray(ex, getbuf=PyBUF_FULL_RO)
lst = nd.tolist()
# The consumer may have requested default values or a NULL format.
ro = False if match(req, PyBUF_WRITABLE) else ex.readonly
fmt = ex.format
itemsize = ex.itemsize
ndim = ex.ndim
if not match(req, PyBUF_FORMAT):
# itemsize refers to the original itemsize before the cast.
# The equality product(shape) * itemsize = len still holds.
# The equality calcsize(format) = itemsize does _not_ hold.
fmt = ''
lst = orig_ex.tobytes() # Issue 12834
if not match(req, PyBUF_ND):
ndim = 1
shape = orig_ex.shape if match(req, PyBUF_ND) else ()
strides = orig_ex.strides if match(req, PyBUF_STRIDES) else ()
nd = ndarray(ex, getbuf=req)
self.verify(nd, obj=ex,
itemsize=itemsize, fmt=fmt, readonly=ro,
ndim=ndim, shape=shape, strides=strides,
lst=lst, sliced=sliced)
def test_ndarray_getbuf(self):
requests = (
# distinct flags
PyBUF_INDIRECT, PyBUF_STRIDES, PyBUF_ND, PyBUF_SIMPLE,
PyBUF_C_CONTIGUOUS, PyBUF_F_CONTIGUOUS, PyBUF_ANY_CONTIGUOUS,
# compound requests
PyBUF_FULL, PyBUF_FULL_RO,
PyBUF_RECORDS, PyBUF_RECORDS_RO,
PyBUF_STRIDED, PyBUF_STRIDED_RO,
PyBUF_CONTIG, PyBUF_CONTIG_RO,
)
# items and format
items_fmt = (
([True if x % 2 else False for x in range(12)], '?'),
([1,2,3,4,5,6,7,8,9,10,11,12], 'b'),
([1,2,3,4,5,6,7,8,9,10,11,12], 'B'),
([(2**31-x) if x % 2 else (-2**31+x) for x in range(12)], 'l')
)
# shape, strides, offset
structure = (
([], [], 0),
([1,3,1], [], 0),
([12], [], 0),
([12], [-1], 11),
([6], [2], 0),
([6], [-2], 11),
([3, 4], [], 0),
([3, 4], [-4, -1], 11),
([2, 2], [4, 1], 4),
([2, 2], [-4, -1], 8)
)
# ndarray creation flags
ndflags = (
0, ND_WRITABLE, ND_FORTRAN, ND_FORTRAN|ND_WRITABLE,
ND_PIL, ND_PIL|ND_WRITABLE
)
# flags that can actually be used as flags
real_flags = (0, PyBUF_WRITABLE, PyBUF_FORMAT,
PyBUF_WRITABLE|PyBUF_FORMAT)
for items, fmt in items_fmt:
itemsize = struct.calcsize(fmt)
for shape, strides, offset in structure:
strides = [v * itemsize for v in strides]
offset *= itemsize
for flags in ndflags:
if strides and (flags&ND_FORTRAN):
continue
if not shape and (flags&ND_PIL):
continue
_items = items if shape else items[0]
ex1 = ndarray(_items, format=fmt, flags=flags,
shape=shape, strides=strides, offset=offset)
ex2 = ex1[::-2] if shape else None
m1 = memoryview(ex1)
if ex2:
m2 = memoryview(ex2)
if ex1.ndim == 0 or (ex1.ndim == 1 and shape and strides):
self.assertEqual(m1, ex1)
if ex2 and ex2.ndim == 1 and shape and strides:
self.assertEqual(m2, ex2)
for req in requests:
for bits in real_flags:
self.verify_getbuf(ex1, ex1, req|bits)
self.verify_getbuf(ex1, m1, req|bits)
if ex2:
self.verify_getbuf(ex2, ex2, req|bits,
sliced=True)
self.verify_getbuf(ex2, m2, req|bits,
sliced=True)
items = [1,2,3,4,5,6,7,8,9,10,11,12]
# ND_GETBUF_FAIL
ex = ndarray(items, shape=[12], flags=ND_GETBUF_FAIL)
self.assertRaises(BufferError, ndarray, ex)
# Request complex structure from a simple exporter. In this
# particular case the test object is not PEP-3118 compliant.
base = ndarray([9], [1])
ex = ndarray(base, getbuf=PyBUF_SIMPLE)
self.assertRaises(BufferError, ndarray, ex, getbuf=PyBUF_WRITABLE)
self.assertRaises(BufferError, ndarray, ex, getbuf=PyBUF_ND)
self.assertRaises(BufferError, ndarray, ex, getbuf=PyBUF_STRIDES)
self.assertRaises(BufferError, ndarray, ex, getbuf=PyBUF_C_CONTIGUOUS)
self.assertRaises(BufferError, ndarray, ex, getbuf=PyBUF_F_CONTIGUOUS)
self.assertRaises(BufferError, ndarray, ex, getbuf=PyBUF_ANY_CONTIGUOUS)
nd = ndarray(ex, getbuf=PyBUF_SIMPLE)
# Issue #22445: New precise contiguity definition.
for shape in [1,12,1], [7,0,7]:
for order in 0, ND_FORTRAN:
ex = ndarray(items, shape=shape, flags=order|ND_WRITABLE)
self.assertTrue(is_contiguous(ex, 'F'))
self.assertTrue(is_contiguous(ex, 'C'))
for flags in requests:
nd = ndarray(ex, getbuf=flags)
self.assertTrue(is_contiguous(nd, 'F'))
self.assertTrue(is_contiguous(nd, 'C'))
def test_ndarray_exceptions(self):
nd = ndarray([9], [1])
ndm = ndarray([9], [1], flags=ND_VAREXPORT)
# Initialization of a new ndarray or mutation of an existing array.
for c in (ndarray, nd.push, ndm.push):
# Invalid types.
self.assertRaises(TypeError, c, {1,2,3})
self.assertRaises(TypeError, c, [1,2,'3'])
self.assertRaises(TypeError, c, [1,2,(3,4)])
self.assertRaises(TypeError, c, [1,2,3], shape={3})
self.assertRaises(TypeError, c, [1,2,3], shape=[3], strides={1})
self.assertRaises(TypeError, c, [1,2,3], shape=[3], offset=[])
self.assertRaises(TypeError, c, [1], shape=[1], format={})
self.assertRaises(TypeError, c, [1], shape=[1], flags={})
self.assertRaises(TypeError, c, [1], shape=[1], getbuf={})
# ND_FORTRAN flag is only valid without strides.
self.assertRaises(TypeError, c, [1], shape=[1], strides=[1],
flags=ND_FORTRAN)
# ND_PIL flag is only valid with ndim > 0.
self.assertRaises(TypeError, c, [1], shape=[], flags=ND_PIL)
# Invalid items.
self.assertRaises(ValueError, c, [], shape=[1])
self.assertRaises(ValueError, c, ['XXX'], shape=[1], format="L")
# Invalid combination of items and format.
self.assertRaises(struct.error, c, [1000], shape=[1], format="B")
self.assertRaises(ValueError, c, [1,(2,3)], shape=[2], format="B")
self.assertRaises(ValueError, c, [1,2,3], shape=[3], format="QL")
# Invalid ndim.
n = ND_MAX_NDIM+1
self.assertRaises(ValueError, c, [1]*n, shape=[1]*n)
# Invalid shape.
self.assertRaises(ValueError, c, [1], shape=[-1])
self.assertRaises(ValueError, c, [1,2,3], shape=['3'])
self.assertRaises(OverflowError, c, [1], shape=[2**128])
# prod(shape) * itemsize != len(items)
self.assertRaises(ValueError, c, [1,2,3,4,5], shape=[2,2], offset=3)
# Invalid strides.
self.assertRaises(ValueError, c, [1,2,3], shape=[3], strides=['1'])
self.assertRaises(OverflowError, c, [1], shape=[1],
strides=[2**128])
# Invalid combination of strides and shape.
self.assertRaises(ValueError, c, [1,2], shape=[2,1], strides=[1])
# Invalid combination of strides and format.
self.assertRaises(ValueError, c, [1,2,3,4], shape=[2], strides=[3],
format="L")
# Invalid offset.
self.assertRaises(ValueError, c, [1,2,3], shape=[3], offset=4)
self.assertRaises(ValueError, c, [1,2,3], shape=[1], offset=3,
format="L")
# Invalid format.
self.assertRaises(ValueError, c, [1,2,3], shape=[3], format="")
self.assertRaises(struct.error, c, [(1,2,3)], shape=[1],
format="@#$")
# Striding out of the memory bounds.
items = [1,2,3,4,5,6,7,8,9,10]
self.assertRaises(ValueError, c, items, shape=[2,3],
strides=[-3, -2], offset=5)
# Constructing consumer: format argument invalid.
self.assertRaises(TypeError, c, bytearray(), format="Q")
# Constructing original base object: getbuf argument invalid.
self.assertRaises(TypeError, c, [1], shape=[1], getbuf=PyBUF_FULL)
# Shape argument is mandatory for original base objects.
self.assertRaises(TypeError, c, [1])
# PyBUF_WRITABLE request to read-only provider.
self.assertRaises(BufferError, ndarray, b'123', getbuf=PyBUF_WRITABLE)
# ND_VAREXPORT can only be specified during construction.
nd = ndarray([9], [1], flags=ND_VAREXPORT)
self.assertRaises(ValueError, nd.push, [1], [1], flags=ND_VAREXPORT)
# Invalid operation for consumers: push/pop
nd = ndarray(b'123')
self.assertRaises(BufferError, nd.push, [1], [1])
self.assertRaises(BufferError, nd.pop)
# ND_VAREXPORT not set: push/pop fail with exported buffers
nd = ndarray([9], [1])
nd.push([1], [1])
m = memoryview(nd)
self.assertRaises(BufferError, nd.push, [1], [1])
self.assertRaises(BufferError, nd.pop)
m.release()
nd.pop()
# Single remaining buffer: pop fails
self.assertRaises(BufferError, nd.pop)
del nd
# get_pointer()
self.assertRaises(TypeError, get_pointer, {}, [1,2,3])
self.assertRaises(TypeError, get_pointer, b'123', {})
nd = ndarray(list(range(100)), shape=[1]*100)
self.assertRaises(ValueError, get_pointer, nd, [5])
nd = ndarray(list(range(12)), shape=[3,4])
self.assertRaises(ValueError, get_pointer, nd, [2,3,4])
self.assertRaises(ValueError, get_pointer, nd, [3,3])
self.assertRaises(ValueError, get_pointer, nd, [-3,3])
self.assertRaises(OverflowError, get_pointer, nd, [1<<64,3])
# tolist() needs format
ex = ndarray([1,2,3], shape=[3], format='L')
nd = ndarray(ex, getbuf=PyBUF_SIMPLE)
self.assertRaises(ValueError, nd.tolist)
# memoryview_from_buffer()
ex1 = ndarray([1,2,3], shape=[3], format='L')
ex2 = ndarray(ex1)
nd = ndarray(ex2)
self.assertRaises(TypeError, nd.memoryview_from_buffer)
nd = ndarray([(1,)*200], shape=[1], format='L'*200)
self.assertRaises(TypeError, nd.memoryview_from_buffer)
n = ND_MAX_NDIM
nd = ndarray(list(range(n)), shape=[1]*n)
self.assertRaises(ValueError, nd.memoryview_from_buffer)
# get_contiguous()
nd = ndarray([1], shape=[1])
self.assertRaises(TypeError, get_contiguous, 1, 2, 3, 4, 5)
self.assertRaises(TypeError, get_contiguous, nd, "xyz", 'C')
self.assertRaises(OverflowError, get_contiguous, nd, 2**64, 'C')
self.assertRaises(TypeError, get_contiguous, nd, PyBUF_READ, 961)
self.assertRaises(UnicodeEncodeError, get_contiguous, nd, PyBUF_READ,
'\u2007')
self.assertRaises(ValueError, get_contiguous, nd, PyBUF_READ, 'Z')
self.assertRaises(ValueError, get_contiguous, nd, 255, 'A')
# cmp_contig()
nd = ndarray([1], shape=[1])
self.assertRaises(TypeError, cmp_contig, 1, 2, 3, 4, 5)
self.assertRaises(TypeError, cmp_contig, {}, nd)
self.assertRaises(TypeError, cmp_contig, nd, {})
# is_contiguous()
nd = ndarray([1], shape=[1])
self.assertRaises(TypeError, is_contiguous, 1, 2, 3, 4, 5)
self.assertRaises(TypeError, is_contiguous, {}, 'A')
self.assertRaises(TypeError, is_contiguous, nd, 201)
def test_ndarray_linked_list(self):
for perm in permutations(range(5)):
m = [0]*5
nd = ndarray([1,2,3], shape=[3], flags=ND_VAREXPORT)
m[0] = memoryview(nd)
for i in range(1, 5):
nd.push([1,2,3], shape=[3])
m[i] = memoryview(nd)
for i in range(5):
m[perm[i]].release()
self.assertRaises(BufferError, nd.pop)
del nd
def test_ndarray_format_scalar(self):
# ndim = 0: scalar
for fmt, scalar, _ in iter_format(0):
itemsize = struct.calcsize(fmt)
nd = ndarray(scalar, shape=(), format=fmt)
self.verify(nd, obj=None,
itemsize=itemsize, fmt=fmt, readonly=True,
ndim=0, shape=(), strides=(),
lst=scalar)
def test_ndarray_format_shape(self):
# ndim = 1, shape = [n]
nitems = randrange(1, 10)
for fmt, items, _ in iter_format(nitems):
itemsize = struct.calcsize(fmt)
for flags in (0, ND_PIL):
nd = ndarray(items, shape=[nitems], format=fmt, flags=flags)
self.verify(nd, obj=None,
itemsize=itemsize, fmt=fmt, readonly=True,
ndim=1, shape=(nitems,), strides=(itemsize,),
lst=items)
def test_ndarray_format_strides(self):
# ndim = 1, strides
nitems = randrange(1, 30)
for fmt, items, _ in iter_format(nitems):
itemsize = struct.calcsize(fmt)
for step in range(-5, 5):
if step == 0:
continue
shape = [len(items[::step])]
strides = [step*itemsize]
offset = itemsize*(nitems-1) if step < 0 else 0
for flags in (0, ND_PIL):
nd = ndarray(items, shape=shape, strides=strides,
format=fmt, offset=offset, flags=flags)
self.verify(nd, obj=None,
itemsize=itemsize, fmt=fmt, readonly=True,
ndim=1, shape=shape, strides=strides,
lst=items[::step])
def test_ndarray_fortran(self):
items = [1,2,3,4,5,6,7,8,9,10,11,12]
ex = ndarray(items, shape=(3, 4), strides=(1, 3))
nd = ndarray(ex, getbuf=PyBUF_F_CONTIGUOUS|PyBUF_FORMAT)
self.assertEqual(nd.tolist(), farray(items, (3, 4)))
def test_ndarray_multidim(self):
for ndim in range(5):
shape_t = [randrange(2, 10) for _ in range(ndim)]
nitems = prod(shape_t)
for shape in permutations(shape_t):
fmt, items, _ = randitems(nitems)
itemsize = struct.calcsize(fmt)
for flags in (0, ND_PIL):
if ndim == 0 and flags == ND_PIL:
continue
# C array
nd = ndarray(items, shape=shape, format=fmt, flags=flags)
strides = strides_from_shape(ndim, shape, itemsize, 'C')
lst = carray(items, shape)
self.verify(nd, obj=None,
itemsize=itemsize, fmt=fmt, readonly=True,
ndim=ndim, shape=shape, strides=strides,
lst=lst)
if is_memoryview_format(fmt):
# memoryview: reconstruct strides
ex = ndarray(items, shape=shape, format=fmt)
nd = ndarray(ex, getbuf=PyBUF_CONTIG_RO|PyBUF_FORMAT)
self.assertTrue(nd.strides == ())
mv = nd.memoryview_from_buffer()
self.verify(mv, obj=None,
itemsize=itemsize, fmt=fmt, readonly=True,
ndim=ndim, shape=shape, strides=strides,
lst=lst)
# Fortran array
nd = ndarray(items, shape=shape, format=fmt,
flags=flags|ND_FORTRAN)
strides = strides_from_shape(ndim, shape, itemsize, 'F')
lst = farray(items, shape)
self.verify(nd, obj=None,
itemsize=itemsize, fmt=fmt, readonly=True,
ndim=ndim, shape=shape, strides=strides,
lst=lst)
def test_ndarray_index_invalid(self):
# not writable
nd = ndarray([1], shape=[1])
self.assertRaises(TypeError, nd.__setitem__, 1, 8)
mv = memoryview(nd)
self.assertEqual(mv, nd)
self.assertRaises(TypeError, mv.__setitem__, 1, 8)
# cannot be deleted
nd = ndarray([1], shape=[1], flags=ND_WRITABLE)
self.assertRaises(TypeError, nd.__delitem__, 1)
mv = memoryview(nd)
self.assertEqual(mv, nd)
self.assertRaises(TypeError, mv.__delitem__, 1)
# overflow
nd = ndarray([1], shape=[1], flags=ND_WRITABLE)
self.assertRaises(OverflowError, nd.__getitem__, 1<<64)
self.assertRaises(OverflowError, nd.__setitem__, 1<<64, 8)
mv = memoryview(nd)
self.assertEqual(mv, nd)
self.assertRaises(IndexError, mv.__getitem__, 1<<64)
self.assertRaises(IndexError, mv.__setitem__, 1<<64, 8)
# format
items = [1,2,3,4,5,6,7,8]
nd = ndarray(items, shape=[len(items)], format="B", flags=ND_WRITABLE)
self.assertRaises(struct.error, nd.__setitem__, 2, 300)
self.assertRaises(ValueError, nd.__setitem__, 1, (100, 200))
mv = memoryview(nd)
self.assertEqual(mv, nd)
self.assertRaises(ValueError, mv.__setitem__, 2, 300)
self.assertRaises(TypeError, mv.__setitem__, 1, (100, 200))
items = [(1,2), (3,4), (5,6)]
nd = ndarray(items, shape=[len(items)], format="LQ", flags=ND_WRITABLE)
self.assertRaises(ValueError, nd.__setitem__, 2, 300)
self.assertRaises(struct.error, nd.__setitem__, 1, (b'\x001', 200))
def test_ndarray_index_scalar(self):
# scalar
nd = ndarray(1, shape=(), flags=ND_WRITABLE)
mv = memoryview(nd)
self.assertEqual(mv, nd)
x = nd[()]; self.assertEqual(x, 1)
x = nd[...]; self.assertEqual(x.tolist(), nd.tolist())
x = mv[()]; self.assertEqual(x, 1)
x = mv[...]; self.assertEqual(x.tolist(), nd.tolist())
self.assertRaises(TypeError, nd.__getitem__, 0)
self.assertRaises(TypeError, mv.__getitem__, 0)
self.assertRaises(TypeError, nd.__setitem__, 0, 8)
self.assertRaises(TypeError, mv.__setitem__, 0, 8)
self.assertEqual(nd.tolist(), 1)
self.assertEqual(mv.tolist(), 1)
nd[()] = 9; self.assertEqual(nd.tolist(), 9)
mv[()] = 9; self.assertEqual(mv.tolist(), 9)
nd[...] = 5; self.assertEqual(nd.tolist(), 5)
mv[...] = 5; self.assertEqual(mv.tolist(), 5)
def test_ndarray_index_null_strides(self):
ex = ndarray(list(range(2*4)), shape=[2, 4], flags=ND_WRITABLE)
nd = ndarray(ex, getbuf=PyBUF_CONTIG)
# Sub-views are only possible for full exporters.
self.assertRaises(BufferError, nd.__getitem__, 1)
# Same for slices.
self.assertRaises(BufferError, nd.__getitem__, slice(3,5,1))
def test_ndarray_index_getitem_single(self):
# getitem
for fmt, items, _ in iter_format(5):
nd = ndarray(items, shape=[5], format=fmt)
for i in range(-5, 5):
self.assertEqual(nd[i], items[i])
self.assertRaises(IndexError, nd.__getitem__, -6)
self.assertRaises(IndexError, nd.__getitem__, 5)
if is_memoryview_format(fmt):
mv = memoryview(nd)
self.assertEqual(mv, nd)
for i in range(-5, 5):
self.assertEqual(mv[i], items[i])
self.assertRaises(IndexError, mv.__getitem__, -6)
self.assertRaises(IndexError, mv.__getitem__, 5)
# getitem with null strides
for fmt, items, _ in iter_format(5):
ex = ndarray(items, shape=[5], flags=ND_WRITABLE, format=fmt)
nd = ndarray(ex, getbuf=PyBUF_CONTIG|PyBUF_FORMAT)
for i in range(-5, 5):
self.assertEqual(nd[i], items[i])
if is_memoryview_format(fmt):
mv = nd.memoryview_from_buffer()
self.assertIs(mv.__eq__(nd), NotImplemented)
for i in range(-5, 5):
self.assertEqual(mv[i], items[i])
# getitem with null format
items = [1,2,3,4,5]
ex = ndarray(items, shape=[5])
nd = ndarray(ex, getbuf=PyBUF_CONTIG_RO)
for i in range(-5, 5):
self.assertEqual(nd[i], items[i])
# getitem with null shape/strides/format
items = [1,2,3,4,5]
ex = ndarray(items, shape=[5])
nd = ndarray(ex, getbuf=PyBUF_SIMPLE)
for i in range(-5, 5):
self.assertEqual(nd[i], items[i])
def test_ndarray_index_setitem_single(self):
# assign single value
for fmt, items, single_item in iter_format(5):
nd = ndarray(items, shape=[5], format=fmt, flags=ND_WRITABLE)
for i in range(5):
items[i] = single_item
nd[i] = single_item
self.assertEqual(nd.tolist(), items)
self.assertRaises(IndexError, nd.__setitem__, -6, single_item)
self.assertRaises(IndexError, nd.__setitem__, 5, single_item)
if not is_memoryview_format(fmt):
continue
nd = ndarray(items, shape=[5], format=fmt, flags=ND_WRITABLE)
mv = memoryview(nd)
self.assertEqual(mv, nd)
for i in range(5):
items[i] = single_item
mv[i] = single_item
self.assertEqual(mv.tolist(), items)
self.assertRaises(IndexError, mv.__setitem__, -6, single_item)
self.assertRaises(IndexError, mv.__setitem__, 5, single_item)
# assign single value: lobject = robject
for fmt, items, single_item in iter_format(5):
nd = ndarray(items, shape=[5], format=fmt, flags=ND_WRITABLE)
for i in range(-5, 4):
items[i] = items[i+1]
nd[i] = nd[i+1]
self.assertEqual(nd.tolist(), items)
if not is_memoryview_format(fmt):
continue
nd = ndarray(items, shape=[5], format=fmt, flags=ND_WRITABLE)
mv = memoryview(nd)
self.assertEqual(mv, nd)
for i in range(-5, 4):
items[i] = items[i+1]
mv[i] = mv[i+1]
self.assertEqual(mv.tolist(), items)
def test_ndarray_index_getitem_multidim(self):
shape_t = (2, 3, 5)
nitems = prod(shape_t)
for shape in permutations(shape_t):
fmt, items, _ = randitems(nitems)
for flags in (0, ND_PIL):
# C array
nd = ndarray(items, shape=shape, format=fmt, flags=flags)
lst = carray(items, shape)
for i in range(-shape[0], shape[0]):
self.assertEqual(lst[i], nd[i].tolist())
for j in range(-shape[1], shape[1]):
self.assertEqual(lst[i][j], nd[i][j].tolist())
for k in range(-shape[2], shape[2]):
self.assertEqual(lst[i][j][k], nd[i][j][k])
# Fortran array
nd = ndarray(items, shape=shape, format=fmt,
flags=flags|ND_FORTRAN)
lst = farray(items, shape)
for i in range(-shape[0], shape[0]):
self.assertEqual(lst[i], nd[i].tolist())
for j in range(-shape[1], shape[1]):
self.assertEqual(lst[i][j], nd[i][j].tolist())
for k in range(shape[2], shape[2]):
self.assertEqual(lst[i][j][k], nd[i][j][k])
def test_ndarray_sequence(self):
nd = ndarray(1, shape=())
self.assertRaises(TypeError, eval, "1 in nd", locals())
mv = memoryview(nd)
self.assertEqual(mv, nd)
self.assertRaises(TypeError, eval, "1 in mv", locals())
for fmt, items, _ in iter_format(5):
nd = ndarray(items, shape=[5], format=fmt)
for i, v in enumerate(nd):
self.assertEqual(v, items[i])
self.assertTrue(v in nd)
if is_memoryview_format(fmt):
mv = memoryview(nd)
for i, v in enumerate(mv):
self.assertEqual(v, items[i])
self.assertTrue(v in mv)
def test_ndarray_slice_invalid(self):
items = [1,2,3,4,5,6,7,8]
# rvalue is not an exporter
xl = ndarray(items, shape=[8], flags=ND_WRITABLE)
ml = memoryview(xl)
self.assertRaises(TypeError, xl.__setitem__, slice(0,8,1), items)
self.assertRaises(TypeError, ml.__setitem__, slice(0,8,1), items)
# rvalue is not a full exporter
xl = ndarray(items, shape=[8], flags=ND_WRITABLE)
ex = ndarray(items, shape=[8], flags=ND_WRITABLE)
xr = ndarray(ex, getbuf=PyBUF_ND)
self.assertRaises(BufferError, xl.__setitem__, slice(0,8,1), xr)
# zero step
nd = ndarray(items, shape=[8], format="L", flags=ND_WRITABLE)
mv = memoryview(nd)
self.assertRaises(ValueError, nd.__getitem__, slice(0,1,0))
self.assertRaises(ValueError, mv.__getitem__, slice(0,1,0))
nd = ndarray(items, shape=[2,4], format="L", flags=ND_WRITABLE)
mv = memoryview(nd)
self.assertRaises(ValueError, nd.__getitem__,
(slice(0,1,1), slice(0,1,0)))
self.assertRaises(ValueError, nd.__getitem__,
(slice(0,1,0), slice(0,1,1)))
self.assertRaises(TypeError, nd.__getitem__, "@%$")
self.assertRaises(TypeError, nd.__getitem__, ("@%$", slice(0,1,1)))
self.assertRaises(TypeError, nd.__getitem__, (slice(0,1,1), {}))
# memoryview: not implemented
self.assertRaises(NotImplementedError, mv.__getitem__,
(slice(0,1,1), slice(0,1,0)))
self.assertRaises(TypeError, mv.__getitem__, "@%$")
# differing format
xl = ndarray(items, shape=[8], format="B", flags=ND_WRITABLE)
xr = ndarray(items, shape=[8], format="b")
ml = memoryview(xl)
mr = memoryview(xr)
self.assertRaises(ValueError, xl.__setitem__, slice(0,1,1), xr[7:8])
self.assertEqual(xl.tolist(), items)
self.assertRaises(ValueError, ml.__setitem__, slice(0,1,1), mr[7:8])
self.assertEqual(ml.tolist(), items)
# differing itemsize
xl = ndarray(items, shape=[8], format="B", flags=ND_WRITABLE)
yr = ndarray(items, shape=[8], format="L")
ml = memoryview(xl)
mr = memoryview(xr)
self.assertRaises(ValueError, xl.__setitem__, slice(0,1,1), xr[7:8])
self.assertEqual(xl.tolist(), items)
self.assertRaises(ValueError, ml.__setitem__, slice(0,1,1), mr[7:8])
self.assertEqual(ml.tolist(), items)
# differing ndim
xl = ndarray(items, shape=[2, 4], format="b", flags=ND_WRITABLE)
xr = ndarray(items, shape=[8], format="b")
ml = memoryview(xl)
mr = memoryview(xr)
self.assertRaises(ValueError, xl.__setitem__, slice(0,1,1), xr[7:8])
self.assertEqual(xl.tolist(), [[1,2,3,4], [5,6,7,8]])
self.assertRaises(NotImplementedError, ml.__setitem__, slice(0,1,1),
mr[7:8])
# differing shape
xl = ndarray(items, shape=[8], format="b", flags=ND_WRITABLE)
xr = ndarray(items, shape=[8], format="b")
ml = memoryview(xl)
mr = memoryview(xr)
self.assertRaises(ValueError, xl.__setitem__, slice(0,2,1), xr[7:8])
self.assertEqual(xl.tolist(), items)
self.assertRaises(ValueError, ml.__setitem__, slice(0,2,1), mr[7:8])
self.assertEqual(ml.tolist(), items)
# _testbuffer.c module functions
self.assertRaises(TypeError, slice_indices, slice(0,1,2), {})
self.assertRaises(TypeError, slice_indices, "###########", 1)
self.assertRaises(ValueError, slice_indices, slice(0,1,0), 4)
x = ndarray(items, shape=[8], format="b", flags=ND_PIL)
self.assertRaises(TypeError, x.add_suboffsets)
ex = ndarray(items, shape=[8], format="B")
x = ndarray(ex, getbuf=PyBUF_SIMPLE)
self.assertRaises(TypeError, x.add_suboffsets)
def test_ndarray_slice_zero_shape(self):
items = [1,2,3,4,5,6,7,8,9,10,11,12]
x = ndarray(items, shape=[12], format="L", flags=ND_WRITABLE)
y = ndarray(items, shape=[12], format="L")
x[4:4] = y[9:9]
self.assertEqual(x.tolist(), items)
ml = memoryview(x)
mr = memoryview(y)
self.assertEqual(ml, x)
self.assertEqual(ml, y)
ml[4:4] = mr[9:9]
self.assertEqual(ml.tolist(), items)
x = ndarray(items, shape=[3, 4], format="L", flags=ND_WRITABLE)
y = ndarray(items, shape=[4, 3], format="L")
x[1:2, 2:2] = y[1:2, 3:3]
self.assertEqual(x.tolist(), carray(items, [3, 4]))
def test_ndarray_slice_multidim(self):
shape_t = (2, 3, 5)
ndim = len(shape_t)
nitems = prod(shape_t)
for shape in permutations(shape_t):
fmt, items, _ = randitems(nitems)
itemsize = struct.calcsize(fmt)
for flags in (0, ND_PIL):
nd = ndarray(items, shape=shape, format=fmt, flags=flags)
lst = carray(items, shape)
for slices in rslices_ndim(ndim, shape):
listerr = None
try:
sliced = multislice(lst, slices)
except Exception as e:
listerr = e.__class__
nderr = None
try:
ndsliced = nd[slices]
except Exception as e:
nderr = e.__class__
if nderr or listerr:
self.assertIs(nderr, listerr)
else:
self.assertEqual(ndsliced.tolist(), sliced)
def test_ndarray_slice_redundant_suboffsets(self):
shape_t = (2, 3, 5, 2)
ndim = len(shape_t)
nitems = prod(shape_t)
for shape in permutations(shape_t):
fmt, items, _ = randitems(nitems)
itemsize = struct.calcsize(fmt)
nd = ndarray(items, shape=shape, format=fmt)
nd.add_suboffsets()
ex = ndarray(items, shape=shape, format=fmt)
ex.add_suboffsets()
mv = memoryview(ex)
lst = carray(items, shape)
for slices in rslices_ndim(ndim, shape):
listerr = None
try:
sliced = multislice(lst, slices)
except Exception as e:
listerr = e.__class__
nderr = None
try:
ndsliced = nd[slices]
except Exception as e:
nderr = e.__class__
if nderr or listerr:
self.assertIs(nderr, listerr)
else:
self.assertEqual(ndsliced.tolist(), sliced)
def test_ndarray_slice_assign_single(self):
for fmt, items, _ in iter_format(5):
for lslice in genslices(5):
for rslice in genslices(5):
for flags in (0, ND_PIL):
f = flags|ND_WRITABLE
nd = ndarray(items, shape=[5], format=fmt, flags=f)
ex = ndarray(items, shape=[5], format=fmt, flags=f)
mv = memoryview(ex)
lsterr = None
diff_structure = None
lst = items[:]
try:
lval = lst[lslice]
rval = lst[rslice]
lst[lslice] = lst[rslice]
diff_structure = len(lval) != len(rval)
except Exception as e:
lsterr = e.__class__
nderr = None
try:
nd[lslice] = nd[rslice]
except Exception as e:
nderr = e.__class__
if diff_structure: # ndarray cannot change shape
self.assertIs(nderr, ValueError)
else:
self.assertEqual(nd.tolist(), lst)
self.assertIs(nderr, lsterr)
if not is_memoryview_format(fmt):
continue
mverr = None
try:
mv[lslice] = mv[rslice]
except Exception as e:
mverr = e.__class__
if diff_structure: # memoryview cannot change shape
self.assertIs(mverr, ValueError)
else:
self.assertEqual(mv.tolist(), lst)
self.assertEqual(mv, nd)
self.assertIs(mverr, lsterr)
self.verify(mv, obj=ex,
itemsize=nd.itemsize, fmt=fmt, readonly=False,
ndim=nd.ndim, shape=nd.shape, strides=nd.strides,
lst=nd.tolist())
def test_ndarray_slice_assign_multidim(self):
shape_t = (2, 3, 5)
ndim = len(shape_t)
nitems = prod(shape_t)
for shape in permutations(shape_t):
fmt, items, _ = randitems(nitems)
for flags in (0, ND_PIL):
for _ in range(ITERATIONS):
lslices, rslices = randslice_from_shape(ndim, shape)
nd = ndarray(items, shape=shape, format=fmt,
flags=flags|ND_WRITABLE)
lst = carray(items, shape)
listerr = None
try:
result = multislice_assign(lst, lst, lslices, rslices)
except Exception as e:
listerr = e.__class__
nderr = None
try:
nd[lslices] = nd[rslices]
except Exception as e:
nderr = e.__class__
if nderr or listerr:
self.assertIs(nderr, listerr)
else:
self.assertEqual(nd.tolist(), result)
def test_ndarray_random(self):
# construction of valid arrays
for _ in range(ITERATIONS):
for fmt in fmtdict['@']:
itemsize = struct.calcsize(fmt)
t = rand_structure(itemsize, True, maxdim=MAXDIM,
maxshape=MAXSHAPE)
self.assertTrue(verify_structure(*t))
items = randitems_from_structure(fmt, t)
x = ndarray_from_structure(items, fmt, t)
xlist = x.tolist()
mv = memoryview(x)
if is_memoryview_format(fmt):
mvlist = mv.tolist()
self.assertEqual(mvlist, xlist)
if t[2] > 0:
# ndim > 0: test against suboffsets representation.
y = ndarray_from_structure(items, fmt, t, flags=ND_PIL)
ylist = y.tolist()
self.assertEqual(xlist, ylist)
mv = memoryview(y)
if is_memoryview_format(fmt):
self.assertEqual(mv, y)
mvlist = mv.tolist()
self.assertEqual(mvlist, ylist)
if numpy_array:
shape = t[3]
if 0 in shape:
continue # http://projects.scipy.org/numpy/ticket/1910
z = numpy_array_from_structure(items, fmt, t)
self.verify(x, obj=None,
itemsize=z.itemsize, fmt=fmt, readonly=False,
ndim=z.ndim, shape=z.shape, strides=z.strides,
lst=z.tolist())
def test_ndarray_random_invalid(self):
# exceptions during construction of invalid arrays
for _ in range(ITERATIONS):
for fmt in fmtdict['@']:
itemsize = struct.calcsize(fmt)
t = rand_structure(itemsize, False, maxdim=MAXDIM,
maxshape=MAXSHAPE)
self.assertFalse(verify_structure(*t))
items = randitems_from_structure(fmt, t)
nderr = False
try:
x = ndarray_from_structure(items, fmt, t)
except Exception as e:
nderr = e.__class__
self.assertTrue(nderr)
if numpy_array:
numpy_err = False
try:
y = numpy_array_from_structure(items, fmt, t)
except Exception as e:
numpy_err = e.__class__
if 0: # http://projects.scipy.org/numpy/ticket/1910
self.assertTrue(numpy_err)
def test_ndarray_random_slice_assign(self):
# valid slice assignments
for _ in range(ITERATIONS):
for fmt in fmtdict['@']:
itemsize = struct.calcsize(fmt)
lshape, rshape, lslices, rslices = \
rand_aligned_slices(maxdim=MAXDIM, maxshape=MAXSHAPE)
tl = rand_structure(itemsize, True, shape=lshape)
tr = rand_structure(itemsize, True, shape=rshape)
self.assertTrue(verify_structure(*tl))
self.assertTrue(verify_structure(*tr))
litems = randitems_from_structure(fmt, tl)
ritems = randitems_from_structure(fmt, tr)
xl = ndarray_from_structure(litems, fmt, tl)
xr = ndarray_from_structure(ritems, fmt, tr)
xl[lslices] = xr[rslices]
xllist = xl.tolist()
xrlist = xr.tolist()
ml = memoryview(xl)
mr = memoryview(xr)
self.assertEqual(ml.tolist(), xllist)
self.assertEqual(mr.tolist(), xrlist)
if tl[2] > 0 and tr[2] > 0:
# ndim > 0: test against suboffsets representation.
yl = ndarray_from_structure(litems, fmt, tl, flags=ND_PIL)
yr = ndarray_from_structure(ritems, fmt, tr, flags=ND_PIL)
yl[lslices] = yr[rslices]
yllist = yl.tolist()
yrlist = yr.tolist()
self.assertEqual(xllist, yllist)
self.assertEqual(xrlist, yrlist)
ml = memoryview(yl)
mr = memoryview(yr)
self.assertEqual(ml.tolist(), yllist)
self.assertEqual(mr.tolist(), yrlist)
if numpy_array:
if 0 in lshape or 0 in rshape:
continue # http://projects.scipy.org/numpy/ticket/1910
zl = numpy_array_from_structure(litems, fmt, tl)
zr = numpy_array_from_structure(ritems, fmt, tr)
zl[lslices] = zr[rslices]
if not is_overlapping(tl) and not is_overlapping(tr):
# Slice assignment of overlapping structures
# is undefined in NumPy.
self.verify(xl, obj=None,
itemsize=zl.itemsize, fmt=fmt, readonly=False,
ndim=zl.ndim, shape=zl.shape,
strides=zl.strides, lst=zl.tolist())
self.verify(xr, obj=None,
itemsize=zr.itemsize, fmt=fmt, readonly=False,
ndim=zr.ndim, shape=zr.shape,
strides=zr.strides, lst=zr.tolist())
def test_ndarray_re_export(self):
items = [1,2,3,4,5,6,7,8,9,10,11,12]
nd = ndarray(items, shape=[3,4], flags=ND_PIL)
ex = ndarray(nd)
self.assertTrue(ex.flags & ND_PIL)
self.assertIs(ex.obj, nd)
self.assertEqual(ex.suboffsets, (0, -1))
self.assertFalse(ex.c_contiguous)
self.assertFalse(ex.f_contiguous)
self.assertFalse(ex.contiguous)
def test_ndarray_zero_shape(self):
# zeros in shape
for flags in (0, ND_PIL):
nd = ndarray([1,2,3], shape=[0], flags=flags)
mv = memoryview(nd)
self.assertEqual(mv, nd)
self.assertEqual(nd.tolist(), [])
self.assertEqual(mv.tolist(), [])
nd = ndarray([1,2,3], shape=[0,3,3], flags=flags)
self.assertEqual(nd.tolist(), [])
nd = ndarray([1,2,3], shape=[3,0,3], flags=flags)
self.assertEqual(nd.tolist(), [[], [], []])
nd = ndarray([1,2,3], shape=[3,3,0], flags=flags)
self.assertEqual(nd.tolist(),
[[[], [], []], [[], [], []], [[], [], []]])
def test_ndarray_zero_strides(self):
# zero strides
for flags in (0, ND_PIL):
nd = ndarray([1], shape=[5], strides=[0], flags=flags)
mv = memoryview(nd)
self.assertEqual(mv, nd)
self.assertEqual(nd.tolist(), [1, 1, 1, 1, 1])
self.assertEqual(mv.tolist(), [1, 1, 1, 1, 1])
def test_ndarray_offset(self):
nd = ndarray(list(range(20)), shape=[3], offset=7)
self.assertEqual(nd.offset, 7)
self.assertEqual(nd.tolist(), [7,8,9])
def test_ndarray_memoryview_from_buffer(self):
for flags in (0, ND_PIL):
nd = ndarray(list(range(3)), shape=[3], flags=flags)
m = nd.memoryview_from_buffer()
self.assertEqual(m, nd)
def test_ndarray_get_pointer(self):
for flags in (0, ND_PIL):
nd = ndarray(list(range(3)), shape=[3], flags=flags)
for i in range(3):
self.assertEqual(nd[i], get_pointer(nd, [i]))
def test_ndarray_tolist_null_strides(self):
ex = ndarray(list(range(20)), shape=[2,2,5])
nd = ndarray(ex, getbuf=PyBUF_ND|PyBUF_FORMAT)
self.assertEqual(nd.tolist(), ex.tolist())
m = memoryview(ex)
self.assertEqual(m.tolist(), ex.tolist())
def test_ndarray_cmp_contig(self):
self.assertFalse(cmp_contig(b"123", b"456"))
x = ndarray(list(range(12)), shape=[3,4])
y = ndarray(list(range(12)), shape=[4,3])
self.assertFalse(cmp_contig(x, y))
x = ndarray([1], shape=[1], format="B")
self.assertTrue(cmp_contig(x, b'\x01'))
self.assertTrue(cmp_contig(b'\x01', x))
def test_ndarray_hash(self):
a = array.array('L', [1,2,3])
nd = ndarray(a)
self.assertRaises(ValueError, hash, nd)
# one-dimensional
b = bytes(list(range(12)))
nd = ndarray(list(range(12)), shape=[12])
self.assertEqual(hash(nd), hash(b))
# C-contiguous
nd = ndarray(list(range(12)), shape=[3,4])
self.assertEqual(hash(nd), hash(b))
nd = ndarray(list(range(12)), shape=[3,2,2])
self.assertEqual(hash(nd), hash(b))
# Fortran contiguous
b = bytes(transpose(list(range(12)), shape=[4,3]))
nd = ndarray(list(range(12)), shape=[3,4], flags=ND_FORTRAN)
self.assertEqual(hash(nd), hash(b))
b = bytes(transpose(list(range(12)), shape=[2,3,2]))
nd = ndarray(list(range(12)), shape=[2,3,2], flags=ND_FORTRAN)
self.assertEqual(hash(nd), hash(b))
# suboffsets
b = bytes(list(range(12)))
nd = ndarray(list(range(12)), shape=[2,2,3], flags=ND_PIL)
self.assertEqual(hash(nd), hash(b))
# non-byte formats
nd = ndarray(list(range(12)), shape=[2,2,3], format='L')
self.assertEqual(hash(nd), hash(nd.tobytes()))
def test_py_buffer_to_contiguous(self):
# The requests are used in _testbuffer.c:py_buffer_to_contiguous
# to generate buffers without full information for testing.
requests = (
# distinct flags
PyBUF_INDIRECT, PyBUF_STRIDES, PyBUF_ND, PyBUF_SIMPLE,
# compound requests
PyBUF_FULL, PyBUF_FULL_RO,
PyBUF_RECORDS, PyBUF_RECORDS_RO,
PyBUF_STRIDED, PyBUF_STRIDED_RO,
PyBUF_CONTIG, PyBUF_CONTIG_RO,
)
# no buffer interface
self.assertRaises(TypeError, py_buffer_to_contiguous, {}, 'F',
PyBUF_FULL_RO)
# scalar, read-only request
nd = ndarray(9, shape=(), format="L", flags=ND_WRITABLE)
for order in ['C', 'F', 'A']:
for request in requests:
b = py_buffer_to_contiguous(nd, order, request)
self.assertEqual(b, nd.tobytes())
# zeros in shape
nd = ndarray([1], shape=[0], format="L", flags=ND_WRITABLE)
for order in ['C', 'F', 'A']:
for request in requests:
b = py_buffer_to_contiguous(nd, order, request)
self.assertEqual(b, b'')
nd = ndarray(list(range(8)), shape=[2, 0, 7], format="L",
flags=ND_WRITABLE)
for order in ['C', 'F', 'A']:
for request in requests:
b = py_buffer_to_contiguous(nd, order, request)
self.assertEqual(b, b'')
### One-dimensional arrays are trivial, since Fortran and C order
### are the same.
# one-dimensional
for f in [0, ND_FORTRAN]:
nd = ndarray([1], shape=[1], format="h", flags=f|ND_WRITABLE)
ndbytes = nd.tobytes()
for order in ['C', 'F', 'A']:
for request in requests:
b = py_buffer_to_contiguous(nd, order, request)
self.assertEqual(b, ndbytes)
nd = ndarray([1, 2, 3], shape=[3], format="b", flags=f|ND_WRITABLE)
ndbytes = nd.tobytes()
for order in ['C', 'F', 'A']:
for request in requests:
b = py_buffer_to_contiguous(nd, order, request)
self.assertEqual(b, ndbytes)
# one-dimensional, non-contiguous input
nd = ndarray([1, 2, 3], shape=[2], strides=[2], flags=ND_WRITABLE)
ndbytes = nd.tobytes()
for order in ['C', 'F', 'A']:
for request in [PyBUF_STRIDES, PyBUF_FULL]:
b = py_buffer_to_contiguous(nd, order, request)
self.assertEqual(b, ndbytes)
nd = nd[::-1]
ndbytes = nd.tobytes()
for order in ['C', 'F', 'A']:
for request in requests:
try:
b = py_buffer_to_contiguous(nd, order, request)
except BufferError:
continue
self.assertEqual(b, ndbytes)
###
### Multi-dimensional arrays:
###
### The goal here is to preserve the logical representation of the
### input array but change the physical representation if necessary.
###
### _testbuffer example:
### ====================
###
### C input array:
### --------------
### >>> nd = ndarray(list(range(12)), shape=[3, 4])
### >>> nd.tolist()
### [[0, 1, 2, 3],
### [4, 5, 6, 7],
### [8, 9, 10, 11]]
###
### Fortran output:
### ---------------
### >>> py_buffer_to_contiguous(nd, 'F', PyBUF_FULL_RO)
### >>> b'\x00\x04\x08\x01\x05\t\x02\x06\n\x03\x07\x0b'
###
### The return value corresponds to this input list for
### _testbuffer's ndarray:
### >>> nd = ndarray([0,4,8,1,5,9,2,6,10,3,7,11], shape=[3,4],
### flags=ND_FORTRAN)
### >>> nd.tolist()
### [[0, 1, 2, 3],
### [4, 5, 6, 7],
### [8, 9, 10, 11]]
###
### The logical array is the same, but the values in memory are now
### in Fortran order.
###
### NumPy example:
### ==============
### _testbuffer's ndarray takes lists to initialize the memory.
### Here's the same sequence in NumPy:
###
### C input:
### --------
### >>> nd = ndarray(buffer=bytearray(list(range(12))),
### shape=[3, 4], dtype='B')
### >>> nd
### array([[ 0, 1, 2, 3],
### [ 4, 5, 6, 7],
### [ 8, 9, 10, 11]], dtype=uint8)
###
### Fortran output:
### ---------------
### >>> fortran_buf = nd.tostring(order='F')
### >>> fortran_buf
### b'\x00\x04\x08\x01\x05\t\x02\x06\n\x03\x07\x0b'
###
### >>> nd = ndarray(buffer=fortran_buf, shape=[3, 4],
### dtype='B', order='F')
###
### >>> nd
### array([[ 0, 1, 2, 3],
### [ 4, 5, 6, 7],
### [ 8, 9, 10, 11]], dtype=uint8)
###
# multi-dimensional, contiguous input
lst = list(range(12))
for f in [0, ND_FORTRAN]:
nd = ndarray(lst, shape=[3, 4], flags=f|ND_WRITABLE)
if numpy_array:
na = numpy_array(buffer=bytearray(lst),
shape=[3, 4], dtype='B',
order='C' if f == 0 else 'F')
# 'C' request
if f == ND_FORTRAN: # 'F' to 'C'
x = ndarray(transpose(lst, [4, 3]), shape=[3, 4],
flags=ND_WRITABLE)
expected = x.tobytes()
else:
expected = nd.tobytes()
for request in requests:
try:
b = py_buffer_to_contiguous(nd, 'C', request)
except BufferError:
continue
self.assertEqual(b, expected)
# Check that output can be used as the basis for constructing
# a C array that is logically identical to the input array.
y = ndarray([v for v in b], shape=[3, 4], flags=ND_WRITABLE)
self.assertEqual(memoryview(y), memoryview(nd))
if numpy_array:
self.assertEqual(b, na.tostring(order='C'))
# 'F' request
if f == 0: # 'C' to 'F'
x = ndarray(transpose(lst, [3, 4]), shape=[4, 3],
flags=ND_WRITABLE)
else:
x = ndarray(lst, shape=[3, 4], flags=ND_WRITABLE)
expected = x.tobytes()
for request in [PyBUF_FULL, PyBUF_FULL_RO, PyBUF_INDIRECT,
PyBUF_STRIDES, PyBUF_ND]:
try:
b = py_buffer_to_contiguous(nd, 'F', request)
except BufferError:
continue
self.assertEqual(b, expected)
# Check that output can be used as the basis for constructing
# a Fortran array that is logically identical to the input array.
y = ndarray([v for v in b], shape=[3, 4], flags=ND_FORTRAN|ND_WRITABLE)
self.assertEqual(memoryview(y), memoryview(nd))
if numpy_array:
self.assertEqual(b, na.tostring(order='F'))
# 'A' request
if f == ND_FORTRAN:
x = ndarray(lst, shape=[3, 4], flags=ND_WRITABLE)
expected = x.tobytes()
else:
expected = nd.tobytes()
for request in [PyBUF_FULL, PyBUF_FULL_RO, PyBUF_INDIRECT,
PyBUF_STRIDES, PyBUF_ND]:
try:
b = py_buffer_to_contiguous(nd, 'A', request)
except BufferError:
continue
self.assertEqual(b, expected)
# Check that output can be used as the basis for constructing
# an array with order=f that is logically identical to the input
# array.
y = ndarray([v for v in b], shape=[3, 4], flags=f|ND_WRITABLE)
self.assertEqual(memoryview(y), memoryview(nd))
if numpy_array:
self.assertEqual(b, na.tostring(order='A'))
# multi-dimensional, non-contiguous input
nd = ndarray(list(range(12)), shape=[3, 4], flags=ND_WRITABLE|ND_PIL)
# 'C'
b = py_buffer_to_contiguous(nd, 'C', PyBUF_FULL_RO)
self.assertEqual(b, nd.tobytes())
y = ndarray([v for v in b], shape=[3, 4], flags=ND_WRITABLE)
self.assertEqual(memoryview(y), memoryview(nd))
# 'F'
b = py_buffer_to_contiguous(nd, 'F', PyBUF_FULL_RO)
x = ndarray(transpose(lst, [3, 4]), shape=[4, 3], flags=ND_WRITABLE)
self.assertEqual(b, x.tobytes())
y = ndarray([v for v in b], shape=[3, 4], flags=ND_FORTRAN|ND_WRITABLE)
self.assertEqual(memoryview(y), memoryview(nd))
# 'A'
b = py_buffer_to_contiguous(nd, 'A', PyBUF_FULL_RO)
self.assertEqual(b, nd.tobytes())
y = ndarray([v for v in b], shape=[3, 4], flags=ND_WRITABLE)
self.assertEqual(memoryview(y), memoryview(nd))
def test_memoryview_construction(self):
items_shape = [(9, []), ([1,2,3], [3]), (list(range(2*3*5)), [2,3,5])]
# NumPy style, C-contiguous:
for items, shape in items_shape:
# From PEP-3118 compliant exporter:
ex = ndarray(items, shape=shape)
m = memoryview(ex)
self.assertTrue(m.c_contiguous)
self.assertTrue(m.contiguous)
ndim = len(shape)
strides = strides_from_shape(ndim, shape, 1, 'C')
lst = carray(items, shape)
self.verify(m, obj=ex,
itemsize=1, fmt='B', readonly=True,
ndim=ndim, shape=shape, strides=strides,
lst=lst)
# From memoryview:
m2 = memoryview(m)
self.verify(m2, obj=ex,
itemsize=1, fmt='B', readonly=True,
ndim=ndim, shape=shape, strides=strides,
lst=lst)
# PyMemoryView_FromBuffer(): no strides
nd = ndarray(ex, getbuf=PyBUF_CONTIG_RO|PyBUF_FORMAT)
self.assertEqual(nd.strides, ())
m = nd.memoryview_from_buffer()
self.verify(m, obj=None,
itemsize=1, fmt='B', readonly=True,
ndim=ndim, shape=shape, strides=strides,
lst=lst)
# PyMemoryView_FromBuffer(): no format, shape, strides
nd = ndarray(ex, getbuf=PyBUF_SIMPLE)
self.assertEqual(nd.format, '')
self.assertEqual(nd.shape, ())
self.assertEqual(nd.strides, ())
m = nd.memoryview_from_buffer()
lst = [items] if ndim == 0 else items
self.verify(m, obj=None,
itemsize=1, fmt='B', readonly=True,
ndim=1, shape=[ex.nbytes], strides=(1,),
lst=lst)
# NumPy style, Fortran contiguous:
for items, shape in items_shape:
# From PEP-3118 compliant exporter:
ex = ndarray(items, shape=shape, flags=ND_FORTRAN)
m = memoryview(ex)
self.assertTrue(m.f_contiguous)
self.assertTrue(m.contiguous)
ndim = len(shape)
strides = strides_from_shape(ndim, shape, 1, 'F')
lst = farray(items, shape)
self.verify(m, obj=ex,
itemsize=1, fmt='B', readonly=True,
ndim=ndim, shape=shape, strides=strides,
lst=lst)
# From memoryview:
m2 = memoryview(m)
self.verify(m2, obj=ex,
itemsize=1, fmt='B', readonly=True,
ndim=ndim, shape=shape, strides=strides,
lst=lst)
# PIL style:
for items, shape in items_shape[1:]:
# From PEP-3118 compliant exporter:
ex = ndarray(items, shape=shape, flags=ND_PIL)
m = memoryview(ex)
ndim = len(shape)
lst = carray(items, shape)
self.verify(m, obj=ex,
itemsize=1, fmt='B', readonly=True,
ndim=ndim, shape=shape, strides=ex.strides,
lst=lst)
# From memoryview:
m2 = memoryview(m)
self.verify(m2, obj=ex,
itemsize=1, fmt='B', readonly=True,
ndim=ndim, shape=shape, strides=ex.strides,
lst=lst)
# Invalid number of arguments:
self.assertRaises(TypeError, memoryview, b'9', 'x')
# Not a buffer provider:
self.assertRaises(TypeError, memoryview, {})
# Non-compliant buffer provider:
ex = ndarray([1,2,3], shape=[3])
nd = ndarray(ex, getbuf=PyBUF_SIMPLE)
self.assertRaises(BufferError, memoryview, nd)
nd = ndarray(ex, getbuf=PyBUF_CONTIG_RO|PyBUF_FORMAT)
self.assertRaises(BufferError, memoryview, nd)
# ndim > 64
nd = ndarray([1]*128, shape=[1]*128, format='L')
self.assertRaises(ValueError, memoryview, nd)
self.assertRaises(ValueError, nd.memoryview_from_buffer)
self.assertRaises(ValueError, get_contiguous, nd, PyBUF_READ, 'C')
self.assertRaises(ValueError, get_contiguous, nd, PyBUF_READ, 'F')
self.assertRaises(ValueError, get_contiguous, nd[::-1], PyBUF_READ, 'C')
def test_memoryview_cast_zero_shape(self):
# Casts are undefined if buffer is multidimensional and shape
# contains zeros. These arrays are regarded as C-contiguous by
# Numpy and PyBuffer_GetContiguous(), so they are not caught by
# the test for C-contiguity in memory_cast().
items = [1,2,3]
for shape in ([0,3,3], [3,0,3], [0,3,3]):
ex = ndarray(items, shape=shape)
self.assertTrue(ex.c_contiguous)
msrc = memoryview(ex)
self.assertRaises(TypeError, msrc.cast, 'c')
# Monodimensional empty view can be cast (issue #19014).
for fmt, _, _ in iter_format(1, 'memoryview'):
msrc = memoryview(b'')
m = msrc.cast(fmt)
self.assertEqual(m.tobytes(), b'')
self.assertEqual(m.tolist(), [])
check_sizeof = support.check_sizeof
def test_memoryview_sizeof(self):
check = self.check_sizeof
vsize = support.calcvobjsize
base_struct = 'Pnin 2P2n2i5P P'
per_dim = '3n'
items = list(range(8))
check(memoryview(b''), vsize(base_struct + 1 * per_dim))
a = ndarray(items, shape=[2, 4], format="b")
check(memoryview(a), vsize(base_struct + 2 * per_dim))
a = ndarray(items, shape=[2, 2, 2], format="b")
check(memoryview(a), vsize(base_struct + 3 * per_dim))
def test_memoryview_struct_module(self):
class INT(object):
def __init__(self, val):
self.val = val
def __int__(self):
return self.val
class IDX(object):
def __init__(self, val):
self.val = val
def __index__(self):
return self.val
def f(): return 7
values = [INT(9), IDX(9),
2.2+3j, Decimal("-21.1"), 12.2, Fraction(5, 2),
[1,2,3], {4,5,6}, {7:8}, (), (9,),
True, False, None, NotImplemented,
b'a', b'abc', bytearray(b'a'), bytearray(b'abc'),
'a', 'abc', r'a', r'abc',
f, lambda x: x]
for fmt, items, item in iter_format(10, 'memoryview'):
ex = ndarray(items, shape=[10], format=fmt, flags=ND_WRITABLE)
nd = ndarray(items, shape=[10], format=fmt, flags=ND_WRITABLE)
m = memoryview(ex)
struct.pack_into(fmt, nd, 0, item)
m[0] = item
self.assertEqual(m[0], nd[0])
itemsize = struct.calcsize(fmt)
if 'P' in fmt:
continue
for v in values:
struct_err = None
try:
struct.pack_into(fmt, nd, itemsize, v)
except struct.error:
struct_err = struct.error
mv_err = None
try:
m[1] = v
except (TypeError, ValueError) as e:
mv_err = e.__class__
if struct_err or mv_err:
self.assertIsNot(struct_err, None)
self.assertIsNot(mv_err, None)
else:
self.assertEqual(m[1], nd[1])
def test_memoryview_cast_zero_strides(self):
# Casts are undefined if strides contains zeros. These arrays are
# (sometimes!) regarded as C-contiguous by Numpy, but not by
# PyBuffer_GetContiguous().
ex = ndarray([1,2,3], shape=[3], strides=[0])
self.assertFalse(ex.c_contiguous)
msrc = memoryview(ex)
self.assertRaises(TypeError, msrc.cast, 'c')
def test_memoryview_cast_invalid(self):
# invalid format
for sfmt in NON_BYTE_FORMAT:
sformat = '@' + sfmt if randrange(2) else sfmt
ssize = struct.calcsize(sformat)
for dfmt in NON_BYTE_FORMAT:
dformat = '@' + dfmt if randrange(2) else dfmt
dsize = struct.calcsize(dformat)
ex = ndarray(list(range(32)), shape=[32//ssize], format=sformat)
msrc = memoryview(ex)
self.assertRaises(TypeError, msrc.cast, dfmt, [32//dsize])
for sfmt, sitems, _ in iter_format(1):
ex = ndarray(sitems, shape=[1], format=sfmt)
msrc = memoryview(ex)
for dfmt, _, _ in iter_format(1):
if not is_memoryview_format(dfmt):
self.assertRaises(ValueError, msrc.cast, dfmt,
[32//dsize])
else:
if not is_byte_format(sfmt) and not is_byte_format(dfmt):
self.assertRaises(TypeError, msrc.cast, dfmt,
[32//dsize])
# invalid shape
size_h = struct.calcsize('h')
size_d = struct.calcsize('d')
ex = ndarray(list(range(2*2*size_d)), shape=[2,2,size_d], format='h')
msrc = memoryview(ex)
self.assertRaises(TypeError, msrc.cast, shape=[2,2,size_h], format='d')
ex = ndarray(list(range(120)), shape=[1,2,3,4,5])
m = memoryview(ex)
# incorrect number of args
self.assertRaises(TypeError, m.cast)
self.assertRaises(TypeError, m.cast, 1, 2, 3)
# incorrect dest format type
self.assertRaises(TypeError, m.cast, {})
# incorrect dest format
self.assertRaises(ValueError, m.cast, "X")
self.assertRaises(ValueError, m.cast, "@X")
self.assertRaises(ValueError, m.cast, "@XY")
# dest format not implemented
self.assertRaises(ValueError, m.cast, "=B")
self.assertRaises(ValueError, m.cast, "!L")
self.assertRaises(ValueError, m.cast, "<P")
self.assertRaises(ValueError, m.cast, ">l")
self.assertRaises(ValueError, m.cast, "BI")
self.assertRaises(ValueError, m.cast, "xBI")
# src format not implemented
ex = ndarray([(1,2), (3,4)], shape=[2], format="II")
m = memoryview(ex)
self.assertRaises(NotImplementedError, m.__getitem__, 0)
self.assertRaises(NotImplementedError, m.__setitem__, 0, 8)
self.assertRaises(NotImplementedError, m.tolist)
# incorrect shape type
ex = ndarray(list(range(120)), shape=[1,2,3,4,5])
m = memoryview(ex)
self.assertRaises(TypeError, m.cast, "B", shape={})
# incorrect shape elements
ex = ndarray(list(range(120)), shape=[2*3*4*5])
m = memoryview(ex)
self.assertRaises(OverflowError, m.cast, "B", shape=[2**64])
self.assertRaises(ValueError, m.cast, "B", shape=[-1])
self.assertRaises(ValueError, m.cast, "B", shape=[2,3,4,5,6,7,-1])
self.assertRaises(ValueError, m.cast, "B", shape=[2,3,4,5,6,7,0])
self.assertRaises(TypeError, m.cast, "B", shape=[2,3,4,5,6,7,'x'])
# N-D -> N-D cast
ex = ndarray(list([9 for _ in range(3*5*7*11)]), shape=[3,5,7,11])
m = memoryview(ex)
self.assertRaises(TypeError, m.cast, "I", shape=[2,3,4,5])
# cast with ndim > 64
nd = ndarray(list(range(128)), shape=[128], format='I')
m = memoryview(nd)
self.assertRaises(ValueError, m.cast, 'I', [1]*128)
# view->len not a multiple of itemsize
ex = ndarray(list([9 for _ in range(3*5*7*11)]), shape=[3*5*7*11])
m = memoryview(ex)
self.assertRaises(TypeError, m.cast, "I", shape=[2,3,4,5])
# product(shape) * itemsize != buffer size
ex = ndarray(list([9 for _ in range(3*5*7*11)]), shape=[3*5*7*11])
m = memoryview(ex)
self.assertRaises(TypeError, m.cast, "B", shape=[2,3,4,5])
# product(shape) * itemsize overflow
nd = ndarray(list(range(128)), shape=[128], format='I')
m1 = memoryview(nd)
nd = ndarray(list(range(128)), shape=[128], format='B')
m2 = memoryview(nd)
if sys.maxsize == 2**63-1:
self.assertRaises(TypeError, m1.cast, 'B',
[7, 7, 73, 127, 337, 92737, 649657])
self.assertRaises(ValueError, m1.cast, 'B',
[2**20, 2**20, 2**10, 2**10, 2**3])
self.assertRaises(ValueError, m2.cast, 'I',
[2**20, 2**20, 2**10, 2**10, 2**1])
else:
self.assertRaises(TypeError, m1.cast, 'B',
[1, 2147483647])
self.assertRaises(ValueError, m1.cast, 'B',
[2**10, 2**10, 2**5, 2**5, 2**1])
self.assertRaises(ValueError, m2.cast, 'I',
[2**10, 2**10, 2**5, 2**3, 2**1])
def test_memoryview_cast(self):
bytespec = (
('B', lambda ex: list(ex.tobytes())),
('b', lambda ex: [x-256 if x > 127 else x for x in list(ex.tobytes())]),
('c', lambda ex: [bytes(chr(x), 'latin-1') for x in list(ex.tobytes())]),
)
def iter_roundtrip(ex, m, items, fmt):
srcsize = struct.calcsize(fmt)
for bytefmt, to_bytelist in bytespec:
m2 = m.cast(bytefmt)
lst = to_bytelist(ex)
self.verify(m2, obj=ex,
itemsize=1, fmt=bytefmt, readonly=False,
ndim=1, shape=[31*srcsize], strides=(1,),
lst=lst, cast=True)
m3 = m2.cast(fmt)
self.assertEqual(m3, ex)
lst = ex.tolist()
self.verify(m3, obj=ex,
itemsize=srcsize, fmt=fmt, readonly=False,
ndim=1, shape=[31], strides=(srcsize,),
lst=lst, cast=True)
# cast from ndim = 0 to ndim = 1
srcsize = struct.calcsize('I')
ex = ndarray(9, shape=[], format='I')
destitems, destshape = cast_items(ex, 'B', 1)
m = memoryview(ex)
m2 = m.cast('B')
self.verify(m2, obj=ex,
itemsize=1, fmt='B', readonly=True,
ndim=1, shape=destshape, strides=(1,),
lst=destitems, cast=True)
# cast from ndim = 1 to ndim = 0
destsize = struct.calcsize('I')
ex = ndarray([9]*destsize, shape=[destsize], format='B')
destitems, destshape = cast_items(ex, 'I', destsize, shape=[])
m = memoryview(ex)
m2 = m.cast('I', shape=[])
self.verify(m2, obj=ex,
itemsize=destsize, fmt='I', readonly=True,
ndim=0, shape=(), strides=(),
lst=destitems, cast=True)
# array.array: roundtrip to/from bytes
for fmt, items, _ in iter_format(31, 'array'):
ex = array.array(fmt, items)
m = memoryview(ex)
iter_roundtrip(ex, m, items, fmt)
# ndarray: roundtrip to/from bytes
for fmt, items, _ in iter_format(31, 'memoryview'):
ex = ndarray(items, shape=[31], format=fmt, flags=ND_WRITABLE)
m = memoryview(ex)
iter_roundtrip(ex, m, items, fmt)
def test_memoryview_cast_1D_ND(self):
# Cast between C-contiguous buffers. At least one buffer must
# be 1D, at least one format must be 'c', 'b' or 'B'.
for _tshape in gencastshapes():
for char in fmtdict['@']:
# Casts to _Bool are undefined if the source contains values
# other than 0 or 1.
if char == "?":
continue
tfmt = ('', '@')[randrange(2)] + char
tsize = struct.calcsize(tfmt)
n = prod(_tshape) * tsize
obj = 'memoryview' if is_byte_format(tfmt) else 'bytefmt'
for fmt, items, _ in iter_format(n, obj):
size = struct.calcsize(fmt)
shape = [n] if n > 0 else []
tshape = _tshape + [size]
ex = ndarray(items, shape=shape, format=fmt)
m = memoryview(ex)
titems, tshape = cast_items(ex, tfmt, tsize, shape=tshape)
if titems is None:
self.assertRaises(TypeError, m.cast, tfmt, tshape)
continue
if titems == 'nan':
continue # NaNs in lists are a recipe for trouble.
# 1D -> ND
nd = ndarray(titems, shape=tshape, format=tfmt)
m2 = m.cast(tfmt, shape=tshape)
ndim = len(tshape)
strides = nd.strides
lst = nd.tolist()
self.verify(m2, obj=ex,
itemsize=tsize, fmt=tfmt, readonly=True,
ndim=ndim, shape=tshape, strides=strides,
lst=lst, cast=True)
# ND -> 1D
m3 = m2.cast(fmt)
m4 = m2.cast(fmt, shape=shape)
ndim = len(shape)
strides = ex.strides
lst = ex.tolist()
self.verify(m3, obj=ex,
itemsize=size, fmt=fmt, readonly=True,
ndim=ndim, shape=shape, strides=strides,
lst=lst, cast=True)
self.verify(m4, obj=ex,
itemsize=size, fmt=fmt, readonly=True,
ndim=ndim, shape=shape, strides=strides,
lst=lst, cast=True)
if ctypes:
# format: "T{>l:x:>d:y:}"
class BEPoint(ctypes.BigEndianStructure):
_fields_ = [("x", ctypes.c_long), ("y", ctypes.c_double)]
point = BEPoint(100, 200.1)
m1 = memoryview(point)
m2 = m1.cast('B')
self.assertEqual(m2.obj, point)
self.assertEqual(m2.itemsize, 1)
self.assertIs(m2.readonly, False)
self.assertEqual(m2.ndim, 1)
self.assertEqual(m2.shape, (m2.nbytes,))
self.assertEqual(m2.strides, (1,))
self.assertEqual(m2.suboffsets, ())
x = ctypes.c_double(1.2)
m1 = memoryview(x)
m2 = m1.cast('c')
self.assertEqual(m2.obj, x)
self.assertEqual(m2.itemsize, 1)
self.assertIs(m2.readonly, False)
self.assertEqual(m2.ndim, 1)
self.assertEqual(m2.shape, (m2.nbytes,))
self.assertEqual(m2.strides, (1,))
self.assertEqual(m2.suboffsets, ())
def test_memoryview_tolist(self):
# Most tolist() tests are in self.verify() etc.
a = array.array('h', list(range(-6, 6)))
m = memoryview(a)
self.assertEqual(m, a)
self.assertEqual(m.tolist(), a.tolist())
a = a[2::3]
m = m[2::3]
self.assertEqual(m, a)
self.assertEqual(m.tolist(), a.tolist())
ex = ndarray(list(range(2*3*5*7*11)), shape=[11,2,7,3,5], format='L')
m = memoryview(ex)
self.assertEqual(m.tolist(), ex.tolist())
ex = ndarray([(2, 5), (7, 11)], shape=[2], format='lh')
m = memoryview(ex)
self.assertRaises(NotImplementedError, m.tolist)
ex = ndarray([b'12345'], shape=[1], format="s")
m = memoryview(ex)
self.assertRaises(NotImplementedError, m.tolist)
ex = ndarray([b"a",b"b",b"c",b"d",b"e",b"f"], shape=[2,3], format='s')
m = memoryview(ex)
self.assertRaises(NotImplementedError, m.tolist)
def test_memoryview_repr(self):
m = memoryview(bytearray(9))
r = m.__repr__()
self.assertTrue(r.startswith("<memory"))
m.release()
r = m.__repr__()
self.assertTrue(r.startswith("<released"))
def test_memoryview_sequence(self):
for fmt in ('d', 'f'):
inf = float(3e400)
ex = array.array(fmt, [1.0, inf, 3.0])
m = memoryview(ex)
self.assertIn(1.0, m)
self.assertIn(5e700, m)
self.assertIn(3.0, m)
ex = ndarray(9.0, [], format='f')
m = memoryview(ex)
self.assertRaises(TypeError, eval, "9.0 in m", locals())
@contextlib.contextmanager
def assert_out_of_bounds_error(self, dim):
with self.assertRaises(IndexError) as cm:
yield
self.assertEqual(str(cm.exception),
"index out of bounds on dimension %d" % (dim,))
def test_memoryview_index(self):
# ndim = 0
ex = ndarray(12.5, shape=[], format='d')
m = memoryview(ex)
self.assertEqual(m[()], 12.5)
self.assertEqual(m[...], m)
self.assertEqual(m[...], ex)
self.assertRaises(TypeError, m.__getitem__, 0)
ex = ndarray((1,2,3), shape=[], format='iii')
m = memoryview(ex)
self.assertRaises(NotImplementedError, m.__getitem__, ())
# range
ex = ndarray(list(range(7)), shape=[7], flags=ND_WRITABLE)
m = memoryview(ex)
self.assertRaises(IndexError, m.__getitem__, 2**64)
self.assertRaises(TypeError, m.__getitem__, 2.0)
self.assertRaises(TypeError, m.__getitem__, 0.0)
# out of bounds
self.assertRaises(IndexError, m.__getitem__, -8)
self.assertRaises(IndexError, m.__getitem__, 8)
# multi-dimensional
ex = ndarray(list(range(12)), shape=[3,4], flags=ND_WRITABLE)
m = memoryview(ex)
self.assertEqual(m[0, 0], 0)
self.assertEqual(m[2, 0], 8)
self.assertEqual(m[2, 3], 11)
self.assertEqual(m[-1, -1], 11)
self.assertEqual(m[-3, -4], 0)
# out of bounds
for index in (3, -4):
with self.assert_out_of_bounds_error(dim=1):
m[index, 0]
for index in (4, -5):
with self.assert_out_of_bounds_error(dim=2):
m[0, index]
self.assertRaises(IndexError, m.__getitem__, (2**64, 0))
self.assertRaises(IndexError, m.__getitem__, (0, 2**64))
self.assertRaises(TypeError, m.__getitem__, (0, 0, 0))
self.assertRaises(TypeError, m.__getitem__, (0.0, 0.0))
# Not implemented: multidimensional sub-views
self.assertRaises(NotImplementedError, m.__getitem__, ())
self.assertRaises(NotImplementedError, m.__getitem__, 0)
def test_memoryview_assign(self):
# ndim = 0
ex = ndarray(12.5, shape=[], format='f', flags=ND_WRITABLE)
m = memoryview(ex)
m[()] = 22.5
self.assertEqual(m[()], 22.5)
m[...] = 23.5
self.assertEqual(m[()], 23.5)
self.assertRaises(TypeError, m.__setitem__, 0, 24.7)
# read-only
ex = ndarray(list(range(7)), shape=[7])
m = memoryview(ex)
self.assertRaises(TypeError, m.__setitem__, 2, 10)
# range
ex = ndarray(list(range(7)), shape=[7], flags=ND_WRITABLE)
m = memoryview(ex)
self.assertRaises(IndexError, m.__setitem__, 2**64, 9)
self.assertRaises(TypeError, m.__setitem__, 2.0, 10)
self.assertRaises(TypeError, m.__setitem__, 0.0, 11)
# out of bounds
self.assertRaises(IndexError, m.__setitem__, -8, 20)
self.assertRaises(IndexError, m.__setitem__, 8, 25)
# pack_single() success:
for fmt in fmtdict['@']:
if fmt == 'c' or fmt == '?':
continue
ex = ndarray([1,2,3], shape=[3], format=fmt, flags=ND_WRITABLE)
m = memoryview(ex)
i = randrange(-3, 3)
m[i] = 8
self.assertEqual(m[i], 8)
self.assertEqual(m[i], ex[i])
ex = ndarray([b'1', b'2', b'3'], shape=[3], format='c',
flags=ND_WRITABLE)
m = memoryview(ex)
m[2] = b'9'
self.assertEqual(m[2], b'9')
ex = ndarray([True, False, True], shape=[3], format='?',
flags=ND_WRITABLE)
m = memoryview(ex)
m[1] = True
self.assertIs(m[1], True)
# pack_single() exceptions:
nd = ndarray([b'x'], shape=[1], format='c', flags=ND_WRITABLE)
m = memoryview(nd)
self.assertRaises(TypeError, m.__setitem__, 0, 100)
ex = ndarray(list(range(120)), shape=[1,2,3,4,5], flags=ND_WRITABLE)
m1 = memoryview(ex)
for fmt, _range in fmtdict['@'].items():
if (fmt == '?'): # PyObject_IsTrue() accepts anything
continue
if fmt == 'c': # special case tested above
continue
m2 = m1.cast(fmt)
lo, hi = _range
if fmt == 'd' or fmt == 'f':
lo, hi = -2**1024, 2**1024
if fmt != 'P': # PyLong_AsVoidPtr() accepts negative numbers
self.assertRaises(ValueError, m2.__setitem__, 0, lo-1)
self.assertRaises(TypeError, m2.__setitem__, 0, "xyz")
self.assertRaises(ValueError, m2.__setitem__, 0, hi)
# invalid item
m2 = m1.cast('c')
self.assertRaises(ValueError, m2.__setitem__, 0, b'\xff\xff')
# format not implemented
ex = ndarray(list(range(1)), shape=[1], format="xL", flags=ND_WRITABLE)
m = memoryview(ex)
self.assertRaises(NotImplementedError, m.__setitem__, 0, 1)
ex = ndarray([b'12345'], shape=[1], format="s", flags=ND_WRITABLE)
m = memoryview(ex)
self.assertRaises(NotImplementedError, m.__setitem__, 0, 1)
# multi-dimensional
ex = ndarray(list(range(12)), shape=[3,4], flags=ND_WRITABLE)
m = memoryview(ex)
m[0,1] = 42
self.assertEqual(ex[0][1], 42)
m[-1,-1] = 43
self.assertEqual(ex[2][3], 43)
# errors
for index in (3, -4):
with self.assert_out_of_bounds_error(dim=1):
m[index, 0] = 0
for index in (4, -5):
with self.assert_out_of_bounds_error(dim=2):
m[0, index] = 0
self.assertRaises(IndexError, m.__setitem__, (2**64, 0), 0)
self.assertRaises(IndexError, m.__setitem__, (0, 2**64), 0)
self.assertRaises(TypeError, m.__setitem__, (0, 0, 0), 0)
self.assertRaises(TypeError, m.__setitem__, (0.0, 0.0), 0)
# Not implemented: multidimensional sub-views
self.assertRaises(NotImplementedError, m.__setitem__, 0, [2, 3])
def test_memoryview_slice(self):
ex = ndarray(list(range(12)), shape=[12], flags=ND_WRITABLE)
m = memoryview(ex)
# zero step
self.assertRaises(ValueError, m.__getitem__, slice(0,2,0))
self.assertRaises(ValueError, m.__setitem__, slice(0,2,0),
bytearray([1,2]))
# 0-dim slicing (identity function)
self.assertRaises(NotImplementedError, m.__getitem__, ())
# multidimensional slices
ex = ndarray(list(range(12)), shape=[12], flags=ND_WRITABLE)
m = memoryview(ex)
self.assertRaises(NotImplementedError, m.__getitem__,
(slice(0,2,1), slice(0,2,1)))
self.assertRaises(NotImplementedError, m.__setitem__,
(slice(0,2,1), slice(0,2,1)), bytearray([1,2]))
# invalid slice tuple
self.assertRaises(TypeError, m.__getitem__, (slice(0,2,1), {}))
self.assertRaises(TypeError, m.__setitem__, (slice(0,2,1), {}),
bytearray([1,2]))
# rvalue is not an exporter
self.assertRaises(TypeError, m.__setitem__, slice(0,1,1), [1])
# non-contiguous slice assignment
for flags in (0, ND_PIL):
ex1 = ndarray(list(range(12)), shape=[12], strides=[-1], offset=11,
flags=ND_WRITABLE|flags)
ex2 = ndarray(list(range(24)), shape=[12], strides=[2], flags=flags)
m1 = memoryview(ex1)
m2 = memoryview(ex2)
ex1[2:5] = ex1[2:5]
m1[2:5] = m2[2:5]
self.assertEqual(m1, ex1)
self.assertEqual(m2, ex2)
ex1[1:3][::-1] = ex2[0:2][::1]
m1[1:3][::-1] = m2[0:2][::1]
self.assertEqual(m1, ex1)
self.assertEqual(m2, ex2)
ex1[4:1:-2][::-1] = ex1[1:4:2][::1]
m1[4:1:-2][::-1] = m1[1:4:2][::1]
self.assertEqual(m1, ex1)
self.assertEqual(m2, ex2)
def test_memoryview_array(self):
def cmptest(testcase, a, b, m, singleitem):
for i, _ in enumerate(a):
ai = a[i]
mi = m[i]
testcase.assertEqual(ai, mi)
a[i] = singleitem
if singleitem != ai:
testcase.assertNotEqual(a, m)
testcase.assertNotEqual(a, b)
else:
testcase.assertEqual(a, m)
testcase.assertEqual(a, b)
m[i] = singleitem
testcase.assertEqual(a, m)
testcase.assertEqual(b, m)
a[i] = ai
m[i] = mi
for n in range(1, 5):
for fmt, items, singleitem in iter_format(n, 'array'):
for lslice in genslices(n):
for rslice in genslices(n):
a = array.array(fmt, items)
b = array.array(fmt, items)
m = memoryview(b)
self.assertEqual(m, a)
self.assertEqual(m.tolist(), a.tolist())
self.assertEqual(m.tobytes(), a.tobytes())
self.assertEqual(len(m), len(a))
cmptest(self, a, b, m, singleitem)
array_err = None
have_resize = None
try:
al = a[lslice]
ar = a[rslice]
a[lslice] = a[rslice]
have_resize = len(al) != len(ar)
except Exception as e:
array_err = e.__class__
m_err = None
try:
m[lslice] = m[rslice]
except Exception as e:
m_err = e.__class__
if have_resize: # memoryview cannot change shape
self.assertIs(m_err, ValueError)
elif m_err or array_err:
self.assertIs(m_err, array_err)
else:
self.assertEqual(m, a)
self.assertEqual(m.tolist(), a.tolist())
self.assertEqual(m.tobytes(), a.tobytes())
cmptest(self, a, b, m, singleitem)
def test_memoryview_compare_special_cases(self):
a = array.array('L', [1, 2, 3])
b = array.array('L', [1, 2, 7])
# Ordering comparisons raise:
v = memoryview(a)
w = memoryview(b)
for attr in ('__lt__', '__le__', '__gt__', '__ge__'):
self.assertIs(getattr(v, attr)(w), NotImplemented)
self.assertIs(getattr(a, attr)(v), NotImplemented)
# Released views compare equal to themselves:
v = memoryview(a)
v.release()
self.assertEqual(v, v)
self.assertNotEqual(v, a)
self.assertNotEqual(a, v)
v = memoryview(a)
w = memoryview(a)
w.release()
self.assertNotEqual(v, w)
self.assertNotEqual(w, v)
# Operand does not implement the buffer protocol:
v = memoryview(a)
self.assertNotEqual(v, [1, 2, 3])
# NaNs
nd = ndarray([(0, 0)], shape=[1], format='l x d x', flags=ND_WRITABLE)
nd[0] = (-1, float('nan'))
self.assertNotEqual(memoryview(nd), nd)
# Depends on issue #15625: the struct module does not understand 'u'.
a = array.array('u', 'xyz')
v = memoryview(a)
self.assertNotEqual(a, v)
self.assertNotEqual(v, a)
# Some ctypes format strings are unknown to the struct module.
if ctypes:
# format: "T{>l:x:>l:y:}"
class BEPoint(ctypes.BigEndianStructure):
_fields_ = [("x", ctypes.c_long), ("y", ctypes.c_long)]
point = BEPoint(100, 200)
a = memoryview(point)
b = memoryview(point)
self.assertNotEqual(a, b)
self.assertNotEqual(a, point)
self.assertNotEqual(point, a)
self.assertRaises(NotImplementedError, a.tolist)
def test_memoryview_compare_ndim_zero(self):
nd1 = ndarray(1729, shape=[], format='@L')
nd2 = ndarray(1729, shape=[], format='L', flags=ND_WRITABLE)
v = memoryview(nd1)
w = memoryview(nd2)
self.assertEqual(v, w)
self.assertEqual(w, v)
self.assertEqual(v, nd2)
self.assertEqual(nd2, v)
self.assertEqual(w, nd1)
self.assertEqual(nd1, w)
self.assertFalse(v.__ne__(w))
self.assertFalse(w.__ne__(v))
w[()] = 1728
self.assertNotEqual(v, w)
self.assertNotEqual(w, v)
self.assertNotEqual(v, nd2)
self.assertNotEqual(nd2, v)
self.assertNotEqual(w, nd1)
self.assertNotEqual(nd1, w)
self.assertFalse(v.__eq__(w))
self.assertFalse(w.__eq__(v))
nd = ndarray(list(range(12)), shape=[12], flags=ND_WRITABLE|ND_PIL)
ex = ndarray(list(range(12)), shape=[12], flags=ND_WRITABLE|ND_PIL)
m = memoryview(ex)
self.assertEqual(m, nd)
m[9] = 100
self.assertNotEqual(m, nd)
# struct module: equal
nd1 = ndarray((1729, 1.2, b'12345'), shape=[], format='Lf5s')
nd2 = ndarray((1729, 1.2, b'12345'), shape=[], format='hf5s',
flags=ND_WRITABLE)
v = memoryview(nd1)
w = memoryview(nd2)
self.assertEqual(v, w)
self.assertEqual(w, v)
self.assertEqual(v, nd2)
self.assertEqual(nd2, v)
self.assertEqual(w, nd1)
self.assertEqual(nd1, w)
# struct module: not equal
nd1 = ndarray((1729, 1.2, b'12345'), shape=[], format='Lf5s')
nd2 = ndarray((-1729, 1.2, b'12345'), shape=[], format='hf5s',
flags=ND_WRITABLE)
v = memoryview(nd1)
w = memoryview(nd2)
self.assertNotEqual(v, w)
self.assertNotEqual(w, v)
self.assertNotEqual(v, nd2)
self.assertNotEqual(nd2, v)
self.assertNotEqual(w, nd1)
self.assertNotEqual(nd1, w)
self.assertEqual(v, nd1)
self.assertEqual(w, nd2)
def test_memoryview_compare_ndim_one(self):
# contiguous
nd1 = ndarray([-529, 576, -625, 676, -729], shape=[5], format='@h')
nd2 = ndarray([-529, 576, -625, 676, 729], shape=[5], format='@h')
v = memoryview(nd1)
w = memoryview(nd2)
self.assertEqual(v, nd1)
self.assertEqual(w, nd2)
self.assertNotEqual(v, nd2)
self.assertNotEqual(w, nd1)
self.assertNotEqual(v, w)
# contiguous, struct module
nd1 = ndarray([-529, 576, -625, 676, -729], shape=[5], format='<i')
nd2 = ndarray([-529, 576, -625, 676, 729], shape=[5], format='>h')
v = memoryview(nd1)
w = memoryview(nd2)
self.assertEqual(v, nd1)
self.assertEqual(w, nd2)
self.assertNotEqual(v, nd2)
self.assertNotEqual(w, nd1)
self.assertNotEqual(v, w)
# non-contiguous
nd1 = ndarray([-529, -625, -729], shape=[3], format='@h')
nd2 = ndarray([-529, 576, -625, 676, -729], shape=[5], format='@h')
v = memoryview(nd1)
w = memoryview(nd2)
self.assertEqual(v, nd2[::2])
self.assertEqual(w[::2], nd1)
self.assertEqual(v, w[::2])
self.assertEqual(v[::-1], w[::-2])
# non-contiguous, struct module
nd1 = ndarray([-529, -625, -729], shape=[3], format='!h')
nd2 = ndarray([-529, 576, -625, 676, -729], shape=[5], format='<l')
v = memoryview(nd1)
w = memoryview(nd2)
self.assertEqual(v, nd2[::2])
self.assertEqual(w[::2], nd1)
self.assertEqual(v, w[::2])
self.assertEqual(v[::-1], w[::-2])
# non-contiguous, suboffsets
nd1 = ndarray([-529, -625, -729], shape=[3], format='@h')
nd2 = ndarray([-529, 576, -625, 676, -729], shape=[5], format='@h',
flags=ND_PIL)
v = memoryview(nd1)
w = memoryview(nd2)
self.assertEqual(v, nd2[::2])
self.assertEqual(w[::2], nd1)
self.assertEqual(v, w[::2])
self.assertEqual(v[::-1], w[::-2])
# non-contiguous, suboffsets, struct module
nd1 = ndarray([-529, -625, -729], shape=[3], format='h 0c')
nd2 = ndarray([-529, 576, -625, 676, -729], shape=[5], format='> h',
flags=ND_PIL)
v = memoryview(nd1)
w = memoryview(nd2)
self.assertEqual(v, nd2[::2])
self.assertEqual(w[::2], nd1)
self.assertEqual(v, w[::2])
self.assertEqual(v[::-1], w[::-2])
def test_memoryview_compare_zero_shape(self):
# zeros in shape
nd1 = ndarray([900, 961], shape=[0], format='@h')
nd2 = ndarray([-900, -961], shape=[0], format='@h')
v = memoryview(nd1)
w = memoryview(nd2)
self.assertEqual(v, nd1)
self.assertEqual(w, nd2)
self.assertEqual(v, nd2)
self.assertEqual(w, nd1)
self.assertEqual(v, w)
# zeros in shape, struct module
nd1 = ndarray([900, 961], shape=[0], format='= h0c')
nd2 = ndarray([-900, -961], shape=[0], format='@ i')
v = memoryview(nd1)
w = memoryview(nd2)
self.assertEqual(v, nd1)
self.assertEqual(w, nd2)
self.assertEqual(v, nd2)
self.assertEqual(w, nd1)
self.assertEqual(v, w)
def test_memoryview_compare_zero_strides(self):
# zero strides
nd1 = ndarray([900, 900, 900, 900], shape=[4], format='@L')
nd2 = ndarray([900], shape=[4], strides=[0], format='L')
v = memoryview(nd1)
w = memoryview(nd2)
self.assertEqual(v, nd1)
self.assertEqual(w, nd2)
self.assertEqual(v, nd2)
self.assertEqual(w, nd1)
self.assertEqual(v, w)
# zero strides, struct module
nd1 = ndarray([(900, 900)]*4, shape=[4], format='@ Li')
nd2 = ndarray([(900, 900)], shape=[4], strides=[0], format='!L h')
v = memoryview(nd1)
w = memoryview(nd2)
self.assertEqual(v, nd1)
self.assertEqual(w, nd2)
self.assertEqual(v, nd2)
self.assertEqual(w, nd1)
self.assertEqual(v, w)
def test_memoryview_compare_random_formats(self):
# random single character native formats
n = 10
for char in fmtdict['@m']:
fmt, items, singleitem = randitems(n, 'memoryview', '@', char)
for flags in (0, ND_PIL):
nd = ndarray(items, shape=[n], format=fmt, flags=flags)
m = memoryview(nd)
self.assertEqual(m, nd)
nd = nd[::-3]
m = memoryview(nd)
self.assertEqual(m, nd)
# random formats
n = 10
for _ in range(100):
fmt, items, singleitem = randitems(n)
for flags in (0, ND_PIL):
nd = ndarray(items, shape=[n], format=fmt, flags=flags)
m = memoryview(nd)
self.assertEqual(m, nd)
nd = nd[::-3]
m = memoryview(nd)
self.assertEqual(m, nd)
def test_memoryview_compare_multidim_c(self):
# C-contiguous, different values
nd1 = ndarray(list(range(-15, 15)), shape=[3, 2, 5], format='@h')
nd2 = ndarray(list(range(0, 30)), shape=[3, 2, 5], format='@h')
v = memoryview(nd1)
w = memoryview(nd2)
self.assertEqual(v, nd1)
self.assertEqual(w, nd2)
self.assertNotEqual(v, nd2)
self.assertNotEqual(w, nd1)
self.assertNotEqual(v, w)
# C-contiguous, different values, struct module
nd1 = ndarray([(0, 1, 2)]*30, shape=[3, 2, 5], format='=f q xxL')
nd2 = ndarray([(-1.2, 1, 2)]*30, shape=[3, 2, 5], format='< f 2Q')
v = memoryview(nd1)
w = memoryview(nd2)
self.assertEqual(v, nd1)
self.assertEqual(w, nd2)
self.assertNotEqual(v, nd2)
self.assertNotEqual(w, nd1)
self.assertNotEqual(v, w)
# C-contiguous, different shape
nd1 = ndarray(list(range(30)), shape=[2, 3, 5], format='L')
nd2 = ndarray(list(range(30)), shape=[3, 2, 5], format='L')
v = memoryview(nd1)
w = memoryview(nd2)
self.assertEqual(v, nd1)
self.assertEqual(w, nd2)
self.assertNotEqual(v, nd2)
self.assertNotEqual(w, nd1)
self.assertNotEqual(v, w)
# C-contiguous, different shape, struct module
nd1 = ndarray([(0, 1, 2)]*21, shape=[3, 7], format='! b B xL')
nd2 = ndarray([(0, 1, 2)]*21, shape=[7, 3], format='= Qx l xxL')
v = memoryview(nd1)
w = memoryview(nd2)
self.assertEqual(v, nd1)
self.assertEqual(w, nd2)
self.assertNotEqual(v, nd2)
self.assertNotEqual(w, nd1)
self.assertNotEqual(v, w)
# C-contiguous, different format, struct module
nd1 = ndarray(list(range(30)), shape=[2, 3, 5], format='L')
nd2 = ndarray(list(range(30)), shape=[2, 3, 5], format='l')
v = memoryview(nd1)
w = memoryview(nd2)
self.assertEqual(v, nd1)
self.assertEqual(w, nd2)
self.assertEqual(v, nd2)
self.assertEqual(w, nd1)
self.assertEqual(v, w)
def test_memoryview_compare_multidim_fortran(self):
# Fortran-contiguous, different values
nd1 = ndarray(list(range(-15, 15)), shape=[5, 2, 3], format='@h',
flags=ND_FORTRAN)
nd2 = ndarray(list(range(0, 30)), shape=[5, 2, 3], format='@h',
flags=ND_FORTRAN)
v = memoryview(nd1)
w = memoryview(nd2)
self.assertEqual(v, nd1)
self.assertEqual(w, nd2)
self.assertNotEqual(v, nd2)
self.assertNotEqual(w, nd1)
self.assertNotEqual(v, w)
# Fortran-contiguous, different values, struct module
nd1 = ndarray([(2**64-1, -1)]*6, shape=[2, 3], format='=Qq',
flags=ND_FORTRAN)
nd2 = ndarray([(-1, 2**64-1)]*6, shape=[2, 3], format='=qQ',
flags=ND_FORTRAN)
v = memoryview(nd1)
w = memoryview(nd2)
self.assertEqual(v, nd1)
self.assertEqual(w, nd2)
self.assertNotEqual(v, nd2)
self.assertNotEqual(w, nd1)
self.assertNotEqual(v, w)
# Fortran-contiguous, different shape
nd1 = ndarray(list(range(-15, 15)), shape=[2, 3, 5], format='l',
flags=ND_FORTRAN)
nd2 = ndarray(list(range(-15, 15)), shape=[3, 2, 5], format='l',
flags=ND_FORTRAN)
v = memoryview(nd1)
w = memoryview(nd2)
self.assertEqual(v, nd1)
self.assertEqual(w, nd2)
self.assertNotEqual(v, nd2)
self.assertNotEqual(w, nd1)
self.assertNotEqual(v, w)
# Fortran-contiguous, different shape, struct module
nd1 = ndarray(list(range(-15, 15)), shape=[2, 3, 5], format='0ll',
flags=ND_FORTRAN)
nd2 = ndarray(list(range(-15, 15)), shape=[3, 2, 5], format='l',
flags=ND_FORTRAN)
v = memoryview(nd1)
w = memoryview(nd2)
self.assertEqual(v, nd1)
self.assertEqual(w, nd2)
self.assertNotEqual(v, nd2)
self.assertNotEqual(w, nd1)
self.assertNotEqual(v, w)
# Fortran-contiguous, different format, struct module
nd1 = ndarray(list(range(30)), shape=[5, 2, 3], format='@h',
flags=ND_FORTRAN)
nd2 = ndarray(list(range(30)), shape=[5, 2, 3], format='@b',
flags=ND_FORTRAN)
v = memoryview(nd1)
w = memoryview(nd2)
self.assertEqual(v, nd1)
self.assertEqual(w, nd2)
self.assertEqual(v, nd2)
self.assertEqual(w, nd1)
self.assertEqual(v, w)
def test_memoryview_compare_multidim_mixed(self):
# mixed C/Fortran contiguous
lst1 = list(range(-15, 15))
lst2 = transpose(lst1, [3, 2, 5])
nd1 = ndarray(lst1, shape=[3, 2, 5], format='@l')
nd2 = ndarray(lst2, shape=[3, 2, 5], format='l', flags=ND_FORTRAN)
v = memoryview(nd1)
w = memoryview(nd2)
self.assertEqual(v, nd1)
self.assertEqual(w, nd2)
self.assertEqual(v, w)
# mixed C/Fortran contiguous, struct module
lst1 = [(-3.3, -22, b'x')]*30
lst1[5] = (-2.2, -22, b'x')
lst2 = transpose(lst1, [3, 2, 5])
nd1 = ndarray(lst1, shape=[3, 2, 5], format='d b c')
nd2 = ndarray(lst2, shape=[3, 2, 5], format='d h c', flags=ND_FORTRAN)
v = memoryview(nd1)
w = memoryview(nd2)
self.assertEqual(v, nd1)
self.assertEqual(w, nd2)
self.assertEqual(v, w)
# different values, non-contiguous
ex1 = ndarray(list(range(40)), shape=[5, 8], format='@I')
nd1 = ex1[3:1:-1, ::-2]
ex2 = ndarray(list(range(40)), shape=[5, 8], format='I')
nd2 = ex2[1:3:1, ::-2]
v = memoryview(nd1)
w = memoryview(nd2)
self.assertEqual(v, nd1)
self.assertEqual(w, nd2)
self.assertNotEqual(v, nd2)
self.assertNotEqual(w, nd1)
self.assertNotEqual(v, w)
# same values, non-contiguous, struct module
ex1 = ndarray([(2**31-1, -2**31)]*22, shape=[11, 2], format='=ii')
nd1 = ex1[3:1:-1, ::-2]
ex2 = ndarray([(2**31-1, -2**31)]*22, shape=[11, 2], format='>ii')
nd2 = ex2[1:3:1, ::-2]
v = memoryview(nd1)
w = memoryview(nd2)
self.assertEqual(v, nd1)
self.assertEqual(w, nd2)
self.assertEqual(v, nd2)
self.assertEqual(w, nd1)
self.assertEqual(v, w)
# different shape
ex1 = ndarray(list(range(30)), shape=[2, 3, 5], format='b')
nd1 = ex1[1:3:, ::-2]
nd2 = ndarray(list(range(30)), shape=[3, 2, 5], format='b')
nd2 = ex2[1:3:, ::-2]
v = memoryview(nd1)
w = memoryview(nd2)
self.assertEqual(v, nd1)
self.assertEqual(w, nd2)
self.assertNotEqual(v, nd2)
self.assertNotEqual(w, nd1)
self.assertNotEqual(v, w)
# different shape, struct module
ex1 = ndarray(list(range(30)), shape=[2, 3, 5], format='B')
nd1 = ex1[1:3:, ::-2]
nd2 = ndarray(list(range(30)), shape=[3, 2, 5], format='b')
nd2 = ex2[1:3:, ::-2]
v = memoryview(nd1)
w = memoryview(nd2)
self.assertEqual(v, nd1)
self.assertEqual(w, nd2)
self.assertNotEqual(v, nd2)
self.assertNotEqual(w, nd1)
self.assertNotEqual(v, w)
# different format, struct module
ex1 = ndarray([(2, b'123')]*30, shape=[5, 3, 2], format='b3s')
nd1 = ex1[1:3:, ::-2]
nd2 = ndarray([(2, b'123')]*30, shape=[5, 3, 2], format='i3s')
nd2 = ex2[1:3:, ::-2]
v = memoryview(nd1)
w = memoryview(nd2)
self.assertEqual(v, nd1)
self.assertEqual(w, nd2)
self.assertNotEqual(v, nd2)
self.assertNotEqual(w, nd1)
self.assertNotEqual(v, w)
def test_memoryview_compare_multidim_zero_shape(self):
# zeros in shape
nd1 = ndarray(list(range(30)), shape=[0, 3, 2], format='i')
nd2 = ndarray(list(range(30)), shape=[5, 0, 2], format='@i')
v = memoryview(nd1)
w = memoryview(nd2)
self.assertEqual(v, nd1)
self.assertEqual(w, nd2)
self.assertNotEqual(v, nd2)
self.assertNotEqual(w, nd1)
self.assertNotEqual(v, w)
# zeros in shape, struct module
nd1 = ndarray(list(range(30)), shape=[0, 3, 2], format='i')
nd2 = ndarray(list(range(30)), shape=[5, 0, 2], format='@i')
v = memoryview(nd1)
w = memoryview(nd2)
self.assertEqual(v, nd1)
self.assertEqual(w, nd2)
self.assertNotEqual(v, nd2)
self.assertNotEqual(w, nd1)
self.assertNotEqual(v, w)
def test_memoryview_compare_multidim_zero_strides(self):
# zero strides
nd1 = ndarray([900]*80, shape=[4, 5, 4], format='@L')
nd2 = ndarray([900], shape=[4, 5, 4], strides=[0, 0, 0], format='L')
v = memoryview(nd1)
w = memoryview(nd2)
self.assertEqual(v, nd1)
self.assertEqual(w, nd2)
self.assertEqual(v, nd2)
self.assertEqual(w, nd1)
self.assertEqual(v, w)
self.assertEqual(v.tolist(), w.tolist())
# zero strides, struct module
nd1 = ndarray([(1, 2)]*10, shape=[2, 5], format='=lQ')
nd2 = ndarray([(1, 2)], shape=[2, 5], strides=[0, 0], format='<lQ')
v = memoryview(nd1)
w = memoryview(nd2)
self.assertEqual(v, nd1)
self.assertEqual(w, nd2)
self.assertEqual(v, nd2)
self.assertEqual(w, nd1)
self.assertEqual(v, w)
def test_memoryview_compare_multidim_suboffsets(self):
# suboffsets
ex1 = ndarray(list(range(40)), shape=[5, 8], format='@I')
nd1 = ex1[3:1:-1, ::-2]
ex2 = ndarray(list(range(40)), shape=[5, 8], format='I', flags=ND_PIL)
nd2 = ex2[1:3:1, ::-2]
v = memoryview(nd1)
w = memoryview(nd2)
self.assertEqual(v, nd1)
self.assertEqual(w, nd2)
self.assertNotEqual(v, nd2)
self.assertNotEqual(w, nd1)
self.assertNotEqual(v, w)
# suboffsets, struct module
ex1 = ndarray([(2**64-1, -1)]*40, shape=[5, 8], format='=Qq',
flags=ND_WRITABLE)
ex1[2][7] = (1, -2)
nd1 = ex1[3:1:-1, ::-2]
ex2 = ndarray([(2**64-1, -1)]*40, shape=[5, 8], format='>Qq',
flags=ND_PIL|ND_WRITABLE)
ex2[2][7] = (1, -2)
nd2 = ex2[1:3:1, ::-2]
v = memoryview(nd1)
w = memoryview(nd2)
self.assertEqual(v, nd1)
self.assertEqual(w, nd2)
self.assertEqual(v, nd2)
self.assertEqual(w, nd1)
self.assertEqual(v, w)
# suboffsets, different shape
ex1 = ndarray(list(range(30)), shape=[2, 3, 5], format='b',
flags=ND_PIL)
nd1 = ex1[1:3:, ::-2]
nd2 = ndarray(list(range(30)), shape=[3, 2, 5], format='b')
nd2 = ex2[1:3:, ::-2]
v = memoryview(nd1)
w = memoryview(nd2)
self.assertEqual(v, nd1)
self.assertEqual(w, nd2)
self.assertNotEqual(v, nd2)
self.assertNotEqual(w, nd1)
self.assertNotEqual(v, w)
# suboffsets, different shape, struct module
ex1 = ndarray([(2**8-1, -1)]*40, shape=[2, 3, 5], format='Bb',
flags=ND_PIL|ND_WRITABLE)
nd1 = ex1[1:2:, ::-2]
ex2 = ndarray([(2**8-1, -1)]*40, shape=[3, 2, 5], format='Bb')
nd2 = ex2[1:2:, ::-2]
v = memoryview(nd1)
w = memoryview(nd2)
self.assertEqual(v, nd1)
self.assertEqual(w, nd2)
self.assertNotEqual(v, nd2)
self.assertNotEqual(w, nd1)
self.assertNotEqual(v, w)
# suboffsets, different format
ex1 = ndarray(list(range(30)), shape=[5, 3, 2], format='i', flags=ND_PIL)
nd1 = ex1[1:3:, ::-2]
ex2 = ndarray(list(range(30)), shape=[5, 3, 2], format='@I', flags=ND_PIL)
nd2 = ex2[1:3:, ::-2]
v = memoryview(nd1)
w = memoryview(nd2)
self.assertEqual(v, nd1)
self.assertEqual(w, nd2)
self.assertEqual(v, nd2)
self.assertEqual(w, nd1)
self.assertEqual(v, w)
# suboffsets, different format, struct module
ex1 = ndarray([(b'hello', b'', 1)]*27, shape=[3, 3, 3], format='5s0sP',
flags=ND_PIL|ND_WRITABLE)
ex1[1][2][2] = (b'sushi', b'', 1)
nd1 = ex1[1:3:, ::-2]
ex2 = ndarray([(b'hello', b'', 1)]*27, shape=[3, 3, 3], format='5s0sP',
flags=ND_PIL|ND_WRITABLE)
ex1[1][2][2] = (b'sushi', b'', 1)
nd2 = ex2[1:3:, ::-2]
v = memoryview(nd1)
w = memoryview(nd2)
self.assertEqual(v, nd1)
self.assertEqual(w, nd2)
self.assertNotEqual(v, nd2)
self.assertNotEqual(w, nd1)
self.assertNotEqual(v, w)
# initialize mixed C/Fortran + suboffsets
lst1 = list(range(-15, 15))
lst2 = transpose(lst1, [3, 2, 5])
nd1 = ndarray(lst1, shape=[3, 2, 5], format='@l', flags=ND_PIL)
nd2 = ndarray(lst2, shape=[3, 2, 5], format='l', flags=ND_FORTRAN|ND_PIL)
v = memoryview(nd1)
w = memoryview(nd2)
self.assertEqual(v, nd1)
self.assertEqual(w, nd2)
self.assertEqual(v, w)
# initialize mixed C/Fortran + suboffsets, struct module
lst1 = [(b'sashimi', b'sliced', 20.05)]*30
lst1[11] = (b'ramen', b'spicy', 9.45)
lst2 = transpose(lst1, [3, 2, 5])
nd1 = ndarray(lst1, shape=[3, 2, 5], format='< 10p 9p d', flags=ND_PIL)
nd2 = ndarray(lst2, shape=[3, 2, 5], format='> 10p 9p d',
flags=ND_FORTRAN|ND_PIL)
v = memoryview(nd1)
w = memoryview(nd2)
self.assertEqual(v, nd1)
self.assertEqual(w, nd2)
self.assertEqual(v, w)
def test_memoryview_compare_not_equal(self):
# items not equal
for byteorder in ['=', '<', '>', '!']:
x = ndarray([2**63]*120, shape=[3,5,2,2,2], format=byteorder+'Q')
y = ndarray([2**63]*120, shape=[3,5,2,2,2], format=byteorder+'Q',
flags=ND_WRITABLE|ND_FORTRAN)
y[2][3][1][1][1] = 1
a = memoryview(x)
b = memoryview(y)
self.assertEqual(a, x)
self.assertEqual(b, y)
self.assertNotEqual(a, b)
self.assertNotEqual(a, y)
self.assertNotEqual(b, x)
x = ndarray([(2**63, 2**31, 2**15)]*120, shape=[3,5,2,2,2],
format=byteorder+'QLH')
y = ndarray([(2**63, 2**31, 2**15)]*120, shape=[3,5,2,2,2],
format=byteorder+'QLH', flags=ND_WRITABLE|ND_FORTRAN)
y[2][3][1][1][1] = (1, 1, 1)
a = memoryview(x)
b = memoryview(y)
self.assertEqual(a, x)
self.assertEqual(b, y)
self.assertNotEqual(a, b)
self.assertNotEqual(a, y)
self.assertNotEqual(b, x)
def test_memoryview_check_released(self):
a = array.array('d', [1.1, 2.2, 3.3])
m = memoryview(a)
m.release()
# PyMemoryView_FromObject()
self.assertRaises(ValueError, memoryview, m)
# memoryview.cast()
self.assertRaises(ValueError, m.cast, 'c')
# getbuffer()
self.assertRaises(ValueError, ndarray, m)
# memoryview.tolist()
self.assertRaises(ValueError, m.tolist)
# memoryview.tobytes()
self.assertRaises(ValueError, m.tobytes)
# sequence
self.assertRaises(ValueError, eval, "1.0 in m", locals())
# subscript
self.assertRaises(ValueError, m.__getitem__, 0)
# assignment
self.assertRaises(ValueError, m.__setitem__, 0, 1)
for attr in ('obj', 'nbytes', 'readonly', 'itemsize', 'format', 'ndim',
'shape', 'strides', 'suboffsets', 'c_contiguous',
'f_contiguous', 'contiguous'):
self.assertRaises(ValueError, m.__getattribute__, attr)
# richcompare
b = array.array('d', [1.1, 2.2, 3.3])
m1 = memoryview(a)
m2 = memoryview(b)
self.assertEqual(m1, m2)
m1.release()
self.assertNotEqual(m1, m2)
self.assertNotEqual(m1, a)
self.assertEqual(m1, m1)
def test_memoryview_tobytes(self):
# Many implicit tests are already in self.verify().
t = (-529, 576, -625, 676, -729)
nd = ndarray(t, shape=[5], format='@h')
m = memoryview(nd)
self.assertEqual(m, nd)
self.assertEqual(m.tobytes(), nd.tobytes())
nd = ndarray([t], shape=[1], format='>hQiLl')
m = memoryview(nd)
self.assertEqual(m, nd)
self.assertEqual(m.tobytes(), nd.tobytes())
nd = ndarray([t for _ in range(12)], shape=[2,2,3], format='=hQiLl')
m = memoryview(nd)
self.assertEqual(m, nd)
self.assertEqual(m.tobytes(), nd.tobytes())
nd = ndarray([t for _ in range(120)], shape=[5,2,2,3,2],
format='<hQiLl')
m = memoryview(nd)
self.assertEqual(m, nd)
self.assertEqual(m.tobytes(), nd.tobytes())
# Unknown formats are handled: tobytes() purely depends on itemsize.
if ctypes:
# format: "T{>l:x:>l:y:}"
class BEPoint(ctypes.BigEndianStructure):
_fields_ = [("x", ctypes.c_long), ("y", ctypes.c_long)]
point = BEPoint(100, 200)
a = memoryview(point)
self.assertEqual(a.tobytes(), bytes(point))
def test_memoryview_get_contiguous(self):
# Many implicit tests are already in self.verify().
# no buffer interface
self.assertRaises(TypeError, get_contiguous, {}, PyBUF_READ, 'F')
# writable request to read-only object
self.assertRaises(BufferError, get_contiguous, b'x', PyBUF_WRITE, 'C')
# writable request to non-contiguous object
nd = ndarray([1, 2, 3], shape=[2], strides=[2])
self.assertRaises(BufferError, get_contiguous, nd, PyBUF_WRITE, 'A')
# scalar, read-only request from read-only exporter
nd = ndarray(9, shape=(), format="L")
for order in ['C', 'F', 'A']:
m = get_contiguous(nd, PyBUF_READ, order)
self.assertEqual(m, nd)
self.assertEqual(m[()], 9)
# scalar, read-only request from writable exporter
nd = ndarray(9, shape=(), format="L", flags=ND_WRITABLE)
for order in ['C', 'F', 'A']:
m = get_contiguous(nd, PyBUF_READ, order)
self.assertEqual(m, nd)
self.assertEqual(m[()], 9)
# scalar, writable request
for order in ['C', 'F', 'A']:
nd[()] = 9
m = get_contiguous(nd, PyBUF_WRITE, order)
self.assertEqual(m, nd)
self.assertEqual(m[()], 9)
m[()] = 10
self.assertEqual(m[()], 10)
self.assertEqual(nd[()], 10)
# zeros in shape
nd = ndarray([1], shape=[0], format="L", flags=ND_WRITABLE)
for order in ['C', 'F', 'A']:
m = get_contiguous(nd, PyBUF_READ, order)
self.assertRaises(IndexError, m.__getitem__, 0)
self.assertEqual(m, nd)
self.assertEqual(m.tolist(), [])
nd = ndarray(list(range(8)), shape=[2, 0, 7], format="L",
flags=ND_WRITABLE)
for order in ['C', 'F', 'A']:
m = get_contiguous(nd, PyBUF_READ, order)
self.assertEqual(ndarray(m).tolist(), [[], []])
# one-dimensional
nd = ndarray([1], shape=[1], format="h", flags=ND_WRITABLE)
for order in ['C', 'F', 'A']:
m = get_contiguous(nd, PyBUF_WRITE, order)
self.assertEqual(m, nd)
self.assertEqual(m.tolist(), nd.tolist())
nd = ndarray([1, 2, 3], shape=[3], format="b", flags=ND_WRITABLE)
for order in ['C', 'F', 'A']:
m = get_contiguous(nd, PyBUF_WRITE, order)
self.assertEqual(m, nd)
self.assertEqual(m.tolist(), nd.tolist())
# one-dimensional, non-contiguous
nd = ndarray([1, 2, 3], shape=[2], strides=[2], flags=ND_WRITABLE)
for order in ['C', 'F', 'A']:
m = get_contiguous(nd, PyBUF_READ, order)
self.assertEqual(m, nd)
self.assertEqual(m.tolist(), nd.tolist())
self.assertRaises(TypeError, m.__setitem__, 1, 20)
self.assertEqual(m[1], 3)
self.assertEqual(nd[1], 3)
nd = nd[::-1]
for order in ['C', 'F', 'A']:
m = get_contiguous(nd, PyBUF_READ, order)
self.assertEqual(m, nd)
self.assertEqual(m.tolist(), nd.tolist())
self.assertRaises(TypeError, m.__setitem__, 1, 20)
self.assertEqual(m[1], 1)
self.assertEqual(nd[1], 1)
# multi-dimensional, contiguous input
nd = ndarray(list(range(12)), shape=[3, 4], flags=ND_WRITABLE)
for order in ['C', 'A']:
m = get_contiguous(nd, PyBUF_WRITE, order)
self.assertEqual(ndarray(m).tolist(), nd.tolist())
self.assertRaises(BufferError, get_contiguous, nd, PyBUF_WRITE, 'F')
m = get_contiguous(nd, PyBUF_READ, order)
self.assertEqual(ndarray(m).tolist(), nd.tolist())
nd = ndarray(list(range(12)), shape=[3, 4],
flags=ND_WRITABLE|ND_FORTRAN)
for order in ['F', 'A']:
m = get_contiguous(nd, PyBUF_WRITE, order)
self.assertEqual(ndarray(m).tolist(), nd.tolist())
self.assertRaises(BufferError, get_contiguous, nd, PyBUF_WRITE, 'C')
m = get_contiguous(nd, PyBUF_READ, order)
self.assertEqual(ndarray(m).tolist(), nd.tolist())
# multi-dimensional, non-contiguous input
nd = ndarray(list(range(12)), shape=[3, 4], flags=ND_WRITABLE|ND_PIL)
for order in ['C', 'F', 'A']:
self.assertRaises(BufferError, get_contiguous, nd, PyBUF_WRITE,
order)
m = get_contiguous(nd, PyBUF_READ, order)
self.assertEqual(ndarray(m).tolist(), nd.tolist())
# flags
nd = ndarray([1,2,3,4,5], shape=[3], strides=[2])
m = get_contiguous(nd, PyBUF_READ, 'C')
self.assertTrue(m.c_contiguous)
def test_memoryview_serializing(self):
# C-contiguous
size = struct.calcsize('i')
a = array.array('i', [1,2,3,4,5])
m = memoryview(a)
buf = io.BytesIO(m)
b = bytearray(5*size)
buf.readinto(b)
self.assertEqual(m.tobytes(), b)
# C-contiguous, multi-dimensional
size = struct.calcsize('L')
nd = ndarray(list(range(12)), shape=[2,3,2], format="L")
m = memoryview(nd)
buf = io.BytesIO(m)
b = bytearray(2*3*2*size)
buf.readinto(b)
self.assertEqual(m.tobytes(), b)
# Fortran contiguous, multi-dimensional
#size = struct.calcsize('L')
#nd = ndarray(list(range(12)), shape=[2,3,2], format="L",
# flags=ND_FORTRAN)
#m = memoryview(nd)
#buf = io.BytesIO(m)
#b = bytearray(2*3*2*size)
#buf.readinto(b)
#self.assertEqual(m.tobytes(), b)
def test_memoryview_hash(self):
# bytes exporter
b = bytes(list(range(12)))
m = memoryview(b)
self.assertEqual(hash(b), hash(m))
# C-contiguous
mc = m.cast('c', shape=[3,4])
self.assertEqual(hash(mc), hash(b))
# non-contiguous
mx = m[::-2]
b = bytes(list(range(12))[::-2])
self.assertEqual(hash(mx), hash(b))
# Fortran contiguous
nd = ndarray(list(range(30)), shape=[3,2,5], flags=ND_FORTRAN)
m = memoryview(nd)
self.assertEqual(hash(m), hash(nd))
# multi-dimensional slice
nd = ndarray(list(range(30)), shape=[3,2,5])
x = nd[::2, ::, ::-1]
m = memoryview(x)
self.assertEqual(hash(m), hash(x))
# multi-dimensional slice with suboffsets
nd = ndarray(list(range(30)), shape=[2,5,3], flags=ND_PIL)
x = nd[::2, ::, ::-1]
m = memoryview(x)
self.assertEqual(hash(m), hash(x))
# equality-hash invariant
x = ndarray(list(range(12)), shape=[12], format='B')
a = memoryview(x)
y = ndarray(list(range(12)), shape=[12], format='b')
b = memoryview(y)
self.assertEqual(a, b)
self.assertEqual(hash(a), hash(b))
# non-byte formats
nd = ndarray(list(range(12)), shape=[2,2,3], format='L')
m = memoryview(nd)
self.assertRaises(ValueError, m.__hash__)
nd = ndarray(list(range(-6, 6)), shape=[2,2,3], format='h')
m = memoryview(nd)
self.assertRaises(ValueError, m.__hash__)
nd = ndarray(list(range(12)), shape=[2,2,3], format='= L')
m = memoryview(nd)
self.assertRaises(ValueError, m.__hash__)
nd = ndarray(list(range(-6, 6)), shape=[2,2,3], format='< h')
m = memoryview(nd)
self.assertRaises(ValueError, m.__hash__)
def test_memoryview_release(self):
# Create re-exporter from getbuffer(memoryview), then release the view.
a = bytearray([1,2,3])
m = memoryview(a)
nd = ndarray(m) # re-exporter
self.assertRaises(BufferError, m.release)
del nd
m.release()
a = bytearray([1,2,3])
m = memoryview(a)
nd1 = ndarray(m, getbuf=PyBUF_FULL_RO, flags=ND_REDIRECT)
nd2 = ndarray(nd1, getbuf=PyBUF_FULL_RO, flags=ND_REDIRECT)
self.assertIs(nd2.obj, m)
self.assertRaises(BufferError, m.release)
del nd1, nd2
m.release()
# chained views
a = bytearray([1,2,3])
m1 = memoryview(a)
m2 = memoryview(m1)
nd = ndarray(m2) # re-exporter
m1.release()
self.assertRaises(BufferError, m2.release)
del nd
m2.release()
a = bytearray([1,2,3])
m1 = memoryview(a)
m2 = memoryview(m1)
nd1 = ndarray(m2, getbuf=PyBUF_FULL_RO, flags=ND_REDIRECT)
nd2 = ndarray(nd1, getbuf=PyBUF_FULL_RO, flags=ND_REDIRECT)
self.assertIs(nd2.obj, m2)
m1.release()
self.assertRaises(BufferError, m2.release)
del nd1, nd2
m2.release()
# Allow changing layout while buffers are exported.
nd = ndarray([1,2,3], shape=[3], flags=ND_VAREXPORT)
m1 = memoryview(nd)
nd.push([4,5,6,7,8], shape=[5]) # mutate nd
m2 = memoryview(nd)
x = memoryview(m1)
self.assertEqual(x.tolist(), m1.tolist())
y = memoryview(m2)
self.assertEqual(y.tolist(), m2.tolist())
self.assertEqual(y.tolist(), nd.tolist())
m2.release()
y.release()
nd.pop() # pop the current view
self.assertEqual(x.tolist(), nd.tolist())
del nd
m1.release()
x.release()
# If multiple memoryviews share the same managed buffer, implicit
# release() in the context manager's __exit__() method should still
# work.
def catch22(b):
with memoryview(b) as m2:
pass
x = bytearray(b'123')
with memoryview(x) as m1:
catch22(m1)
self.assertEqual(m1[0], ord(b'1'))
x = ndarray(list(range(12)), shape=[2,2,3], format='l')
y = ndarray(x, getbuf=PyBUF_FULL_RO, flags=ND_REDIRECT)
z = ndarray(y, getbuf=PyBUF_FULL_RO, flags=ND_REDIRECT)
self.assertIs(z.obj, x)
with memoryview(z) as m:
catch22(m)
self.assertEqual(m[0:1].tolist(), [[[0, 1, 2], [3, 4, 5]]])
# Test garbage collection.
for flags in (0, ND_REDIRECT):
x = bytearray(b'123')
with memoryview(x) as m1:
del x
y = ndarray(m1, getbuf=PyBUF_FULL_RO, flags=flags)
with memoryview(y) as m2:
del y
z = ndarray(m2, getbuf=PyBUF_FULL_RO, flags=flags)
with memoryview(z) as m3:
del z
catch22(m3)
catch22(m2)
catch22(m1)
self.assertEqual(m1[0], ord(b'1'))
self.assertEqual(m2[1], ord(b'2'))
self.assertEqual(m3[2], ord(b'3'))
del m3
del m2
del m1
x = bytearray(b'123')
with memoryview(x) as m1:
del x
y = ndarray(m1, getbuf=PyBUF_FULL_RO, flags=flags)
with memoryview(y) as m2:
del y
z = ndarray(m2, getbuf=PyBUF_FULL_RO, flags=flags)
with memoryview(z) as m3:
del z
catch22(m1)
catch22(m2)
catch22(m3)
self.assertEqual(m1[0], ord(b'1'))
self.assertEqual(m2[1], ord(b'2'))
self.assertEqual(m3[2], ord(b'3'))
del m1, m2, m3
# memoryview.release() fails if the view has exported buffers.
x = bytearray(b'123')
with self.assertRaises(BufferError):
with memoryview(x) as m:
ex = ndarray(m)
m[0] == ord(b'1')
def test_memoryview_redirect(self):
nd = ndarray([1.0 * x for x in range(12)], shape=[12], format='d')
a = array.array('d', [1.0 * x for x in range(12)])
for x in (nd, a):
y = ndarray(x, getbuf=PyBUF_FULL_RO, flags=ND_REDIRECT)
z = ndarray(y, getbuf=PyBUF_FULL_RO, flags=ND_REDIRECT)
m = memoryview(z)
self.assertIs(y.obj, x)
self.assertIs(z.obj, x)
self.assertIs(m.obj, x)
self.assertEqual(m, x)
self.assertEqual(m, y)
self.assertEqual(m, z)
self.assertEqual(m[1:3], x[1:3])
self.assertEqual(m[1:3], y[1:3])
self.assertEqual(m[1:3], z[1:3])
del y, z
self.assertEqual(m[1:3], x[1:3])
def test_memoryview_from_static_exporter(self):
fmt = 'B'
lst = [0,1,2,3,4,5,6,7,8,9,10,11]
# exceptions
self.assertRaises(TypeError, staticarray, 1, 2, 3)
# view.obj==x
x = staticarray()
y = memoryview(x)
self.verify(y, obj=x,
itemsize=1, fmt=fmt, readonly=True,
ndim=1, shape=[12], strides=[1],
lst=lst)
for i in range(12):
self.assertEqual(y[i], i)
del x
del y
x = staticarray()
y = memoryview(x)
del y
del x
x = staticarray()
y = ndarray(x, getbuf=PyBUF_FULL_RO)
z = ndarray(y, getbuf=PyBUF_FULL_RO)
m = memoryview(z)
self.assertIs(y.obj, x)
self.assertIs(m.obj, z)
self.verify(m, obj=z,
itemsize=1, fmt=fmt, readonly=True,
ndim=1, shape=[12], strides=[1],
lst=lst)
del x, y, z, m
x = staticarray()
y = ndarray(x, getbuf=PyBUF_FULL_RO, flags=ND_REDIRECT)
z = ndarray(y, getbuf=PyBUF_FULL_RO, flags=ND_REDIRECT)
m = memoryview(z)
self.assertIs(y.obj, x)
self.assertIs(z.obj, x)
self.assertIs(m.obj, x)
self.verify(m, obj=x,
itemsize=1, fmt=fmt, readonly=True,
ndim=1, shape=[12], strides=[1],
lst=lst)
del x, y, z, m
# view.obj==NULL
x = staticarray(legacy_mode=True)
y = memoryview(x)
self.verify(y, obj=None,
itemsize=1, fmt=fmt, readonly=True,
ndim=1, shape=[12], strides=[1],
lst=lst)
for i in range(12):
self.assertEqual(y[i], i)
del x
del y
x = staticarray(legacy_mode=True)
y = memoryview(x)
del y
del x
x = staticarray(legacy_mode=True)
y = ndarray(x, getbuf=PyBUF_FULL_RO)
z = ndarray(y, getbuf=PyBUF_FULL_RO)
m = memoryview(z)
self.assertIs(y.obj, None)
self.assertIs(m.obj, z)
self.verify(m, obj=z,
itemsize=1, fmt=fmt, readonly=True,
ndim=1, shape=[12], strides=[1],
lst=lst)
del x, y, z, m
x = staticarray(legacy_mode=True)
y = ndarray(x, getbuf=PyBUF_FULL_RO, flags=ND_REDIRECT)
z = ndarray(y, getbuf=PyBUF_FULL_RO, flags=ND_REDIRECT)
m = memoryview(z)
# Clearly setting view.obj==NULL is inferior, since it
# messes up the redirection chain:
self.assertIs(y.obj, None)
self.assertIs(z.obj, y)
self.assertIs(m.obj, y)
self.verify(m, obj=y,
itemsize=1, fmt=fmt, readonly=True,
ndim=1, shape=[12], strides=[1],
lst=lst)
del x, y, z, m
def test_memoryview_getbuffer_undefined(self):
# getbufferproc does not adhere to the new documentation
nd = ndarray([1,2,3], [3], flags=ND_GETBUF_FAIL|ND_GETBUF_UNDEFINED)
self.assertRaises(BufferError, memoryview, nd)
def test_issue_7385(self):
x = ndarray([1,2,3], shape=[3], flags=ND_GETBUF_FAIL)
self.assertRaises(BufferError, memoryview, x)
if __name__ == "__main__":
unittest.main()
| 38.042948 | 88 | 0.512109 |
794254654f00f6fd4b817c6016e9d9c1874f374e | 224 | py | Python | espaloma/mm/__init__.py | jstaker7/espaloma | d80d280acd608dc04c93966afe15cc3cb74f65a8 | [
"MIT"
] | null | null | null | espaloma/mm/__init__.py | jstaker7/espaloma | d80d280acd608dc04c93966afe15cc3cb74f65a8 | [
"MIT"
] | null | null | null | espaloma/mm/__init__.py | jstaker7/espaloma | d80d280acd608dc04c93966afe15cc3cb74f65a8 | [
"MIT"
] | null | null | null | import espaloma
import espaloma.mm
import espaloma.mm.angle
import espaloma.mm.bond
import espaloma.mm.energy
import espaloma.mm.functional
import espaloma.mm.geometry
import espaloma.mm.nonbonded
import espaloma.mm.torsion
| 22.4 | 29 | 0.852679 |
794254acb1899534fd14c75d1c2a9d6d0668358b | 2,189 | py | Python | runtime/image_classification/models/vgg16/gpus=4_straight/stage2.py | vibhatha/pipedream | af6b811f5d01a68e9eb91065e5242fc1a075f279 | [
"MIT"
] | null | null | null | runtime/image_classification/models/vgg16/gpus=4_straight/stage2.py | vibhatha/pipedream | af6b811f5d01a68e9eb91065e5242fc1a075f279 | [
"MIT"
] | null | null | null | runtime/image_classification/models/vgg16/gpus=4_straight/stage2.py | vibhatha/pipedream | af6b811f5d01a68e9eb91065e5242fc1a075f279 | [
"MIT"
] | null | null | null | # Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
import torch
class Stage2(torch.nn.Module):
def __init__(self):
super(Stage2, self).__init__()
self.layer1 = torch.nn.Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
self.layer2 = torch.nn.ReLU(inplace=True)
self.layer3 = torch.nn.Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
self.layer4 = torch.nn.ReLU(inplace=True)
self.layer5 = torch.nn.MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1, ceil_mode=False)
self.layer6 = torch.nn.Conv2d(256, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
self.layer7 = torch.nn.ReLU(inplace=True)
self.layer8 = torch.nn.Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
self.layer9 = torch.nn.ReLU(inplace=True)
self.layer10 = torch.nn.Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
self.layer11 = torch.nn.ReLU(inplace=True)
self._initialize_weights()
def forward(self, input0):
out0 = input0.clone()
out1 = self.layer1(out0)
out2 = self.layer2(out1)
out3 = self.layer3(out2)
out4 = self.layer4(out3)
out5 = self.layer5(out4)
out6 = self.layer6(out5)
out7 = self.layer7(out6)
out8 = self.layer8(out7)
out9 = self.layer9(out8)
out10 = self.layer10(out9)
out11 = self.layer11(out10)
return out11
def _initialize_weights(self):
for m in self.modules():
if isinstance(m, torch.nn.Conv2d):
torch.nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
if m.bias is not None:
torch.nn.init.constant_(m.bias, 0)
elif isinstance(m, torch.nn.BatchNorm2d):
torch.nn.init.constant_(m.weight, 1)
torch.nn.init.constant_(m.bias, 0)
elif isinstance(m, torch.nn.Linear):
torch.nn.init.normal_(m.weight, 0, 0.01)
torch.nn.init.constant_(m.bias, 0)
| 43.78 | 106 | 0.582458 |
794254cc0c48cf27ffad58761d0ddc2b0742ac55 | 7,227 | py | Python | refnx/analysis/test/NISTModels.py | brotwasme/refnx2019 | 8b62f7668d196c0ec443b47ea669573417a682e6 | [
"BSD-3-Clause"
] | null | null | null | refnx/analysis/test/NISTModels.py | brotwasme/refnx2019 | 8b62f7668d196c0ec443b47ea669573417a682e6 | [
"BSD-3-Clause"
] | null | null | null | refnx/analysis/test/NISTModels.py | brotwasme/refnx2019 | 8b62f7668d196c0ec443b47ea669573417a682e6 | [
"BSD-3-Clause"
] | null | null | null | import os
import numpy as np
from numpy import exp, sin, cos, arctan, array, pi
from numpy.testing import assert_allclose, assert_
from refnx.analysis import (CurveFitter, Objective, Parameter, Parameters,
Model)
thisdir, thisfile = os.path.split(__file__)
NIST_DIR = os.path.join(thisdir, 'NIST_STRD')
def ndig(a, b):
"precision for NIST values"
return np.round(-np.log10((np.abs(np.abs(a) - np.abs(b)) +
1.e-15) /
np.abs(b)))
def read_params(params):
return np.array(params)
def Bennett5(x, b):
b = read_params(b)
return b[0] * (b[1] + x)**(-1 / b[2])
def BoxBOD(x, b):
b = read_params(b)
return b[0] * (1 - exp(-b[1] * x))
def Chwirut(x, b):
b = read_params(b)
return exp(-b[0] * x) / (b[1] + b[2] * x)
def DanWood(x, b):
b = read_params(b)
return b[0] * x**b[1]
def ENSO(x, b):
b = read_params(b)
return (b[0] +
(b[1] * cos(2 * pi * x / 12) + b[2] * sin(2 * pi * x / 12) +
b[4] * cos(2 * pi * x / b[3]) + b[5] * sin(2 * pi * x / b[3]) +
b[7] * cos(2 * pi * x / b[6]) + b[8] * sin(2 * pi * x / b[6])))
def Eckerle4(x, b):
b = read_params(b)
return (b[0] / b[1]) * exp(-0.5 * ((x - b[2]) / b[1])**2)
def Gauss(x, b):
b = read_params(b)
return b[0] * exp(-b[1] * x) + (b[2] * exp(-(x - b[3])**2 / b[4]**2) +
b[5] * exp(-(x - b[6])**2 / b[7]**2))
def Hahn1(x, b):
b = read_params(b)
return ((b[0] + b[1] * x + b[2] * x**2 + b[3] * x**3) /
(1 + b[4] * x + b[5] * x**2 + b[6] * x**3))
def Kirby(x, b):
b = read_params(b)
return (b[0] + b[1] * x + b[2] * x**2) / (1 + b[3] * x + b[4] * x**2)
def Lanczos(x, b):
b = read_params(b)
return (b[0] * exp(-b[1] * x) +
b[2] * exp(-b[3] * x) +
b[4] * exp(-b[5] * x))
def MGH09(x, b):
b = read_params(b)
return b[0] * (x**2 + x * b[1]) / (x**2 + x * b[2] + b[3])
def MGH10(x, b):
b = read_params(b)
return b[0] * exp(b[1] / (x + b[2]))
def MGH17(x, b):
b = read_params(b)
return b[0] + b[1] * exp(-x * b[3]) + b[2] * exp(-x * b[4])
def Misra1a(x, b):
b = read_params(b)
return b[0] * (1 - exp(-b[1] * x))
def Misra1b(x, b):
b = read_params(b)
return b[0] * (1 - (1 + 0.5 * b[1] * x)**(-2))
def Misra1c(x, b):
b = read_params(b)
return b[0] * (1 - (1 + 2 * b[1] * x)**(-.5))
def Misra1d(x, b):
b = read_params(b)
return b[0] * b[1] * x * ((1 + b[1] * x)**(-1))
def Nelson(x, b):
b = read_params(b)
x1 = x[:, 0]
x2 = x[:, 1]
return b[0] - b[1] * x1 * exp(-b[2] * x2)
def Rat42(x, b):
b = read_params(b)
return b[0] / (1 + exp(b[1] - b[2] * x))
def Rat43(x, b):
b = read_params(b)
return b[0] / ((1 + exp(b[1] - b[2] * x))**(1 / b[3]))
def Roszman1(x, b):
b = read_params(b)
return b[0] - b[1] * x - arctan(b[2] / (x - b[3])) / pi
def Thurber(x, b):
b = read_params(b)
return ((b[0] + b[1] * x + b[2] * x**2 + b[3] * x**3) /
(1 + b[4] * x + b[5] * x**2 + b[6] * x**3))
# Model name fcn, #fitting params, dim of x
NIST_Models = {'Bennett5': (Bennett5, 3, 1),
'BoxBOD': (BoxBOD, 2, 1),
'Chwirut1': (Chwirut, 3, 1),
'Chwirut2': (Chwirut, 3, 1),
'DanWood': (DanWood, 2, 1),
'ENSO': (ENSO, 9, 1),
'Eckerle4': (Eckerle4, 3, 1),
'Gauss1': (Gauss, 8, 1),
'Gauss2': (Gauss, 8, 1),
'Gauss3': (Gauss, 8, 1),
'Hahn1': (Hahn1, 7, 1),
'Kirby2': (Kirby, 5, 1),
'Lanczos1': (Lanczos, 6, 1),
'Lanczos2': (Lanczos, 6, 1),
'Lanczos3': (Lanczos, 6, 1),
'MGH09': (MGH09, 4, 1),
'MGH10': (MGH10, 3, 1),
'MGH17': (MGH17, 5, 1),
'Misra1a': (Misra1a, 2, 1),
'Misra1b': (Misra1b, 2, 1),
'Misra1c': (Misra1c, 2, 1),
'Misra1d': (Misra1d, 2, 1),
'Nelson': (Nelson, 3, 2),
'Rat42': (Rat42, 3, 1),
'Rat43': (Rat43, 4, 1),
'Roszman1': (Roszman1, 4, 1),
'Thurber': (Thurber, 7, 1)}
def NIST_runner(dataset, method='least_squares', chi_atol=1e-5,
val_rtol=1e-2, err_rtol=5e-3):
NIST_dataset = ReadNistData(dataset)
x, y = (NIST_dataset['x'], NIST_dataset['y'])
if dataset == 'Nelson':
y = np.log(y)
params = NIST_dataset['start']
fitfunc = NIST_Models[dataset][0]
model = Model(params, fitfunc)
objective = Objective(model, (x, y))
fitter = CurveFitter(objective)
result = fitter.fit(method=method)
assert_allclose(objective.chisqr(),
NIST_dataset['sum_squares'],
atol=chi_atol)
certval = NIST_dataset['cert_values']
assert_allclose(result.x, certval, rtol=val_rtol)
if 'stderr' in result:
certerr = NIST_dataset['cert_stderr']
assert_allclose(result.stderr, certerr, rtol=err_rtol)
def ReadNistData(dataset, start='start2'):
"""
NIST STRD data is in a simple, fixed format with line numbers being
significant!
"""
with open(os.path.join(NIST_DIR, "%s.dat" % dataset), 'r') as finp:
lines = [l[:-1] for l in finp.readlines()]
model_lines = lines[30:39]
param_lines = lines[40:58]
data_lines = lines[60:]
words = model_lines[1].strip().split()
nparams = int(words[0])
start1 = np.zeros(nparams)
start2 = np.zeros(nparams)
certval = np.zeros(nparams)
certerr = np.zeros(nparams)
for i, text in enumerate(param_lines[:nparams]):
[s1, s2, val, err] = [float(x) for x in text.split('=')[1].split()]
start1[i] = s1
start2[i] = s2
certval[i] = val
certerr[i] = err
for t in param_lines[nparams:]:
t = t.strip()
if ':' not in t:
continue
val = float(t.split(':')[1])
if t.startswith('Residual Sum of Squares'):
sum_squares = val
elif t.startswith('Residual Standard Deviation'):
std_dev = val
elif t.startswith('Degrees of Freedom'):
nfree = int(val)
elif t.startswith('Number of Observations'):
ndata = int(val)
y, x = [], []
for d in data_lines:
vals = [float(i) for i in d.strip().split()]
y.append(vals[0])
if len(vals) > 2:
x.append(vals[1:])
else:
x.append(vals[1])
y = array(y)
x = array(x)
params = Parameters()
for i in range(nparams):
pname = 'p%i' % (i + 1)
if start == 'start2':
pval = start2[i]
elif start == 'start1':
pval = start1[i]
p = Parameter(pval, name=pname, vary=True)
params.append(p)
out = {'y': y, 'x': x, 'nparams': nparams, 'ndata': ndata,
'nfree': nfree, 'start': params, 'sum_squares': sum_squares,
'std_dev': std_dev, 'cert_values': certval,
'cert_stderr': certerr}
return out
| 26.569853 | 76 | 0.479314 |
794254e8d9a6a8b9392368c0aaec5786677332d9 | 1,031 | py | Python | Python/yolo-cards-detection-augmenter/main.py | ablaszkiewicz/university | dd477b78a6adddbadc404a2082daa9c92c846a86 | [
"MIT"
] | null | null | null | Python/yolo-cards-detection-augmenter/main.py | ablaszkiewicz/university | dd477b78a6adddbadc404a2082daa9c92c846a86 | [
"MIT"
] | null | null | null | Python/yolo-cards-detection-augmenter/main.py | ablaszkiewicz/university | dd477b78a6adddbadc404a2082daa9c92c846a86 | [
"MIT"
] | 1 | 2021-04-08T14:54:10.000Z | 2021-04-08T14:54:10.000Z | from image_operations import *
import glob
import os
iterations = 70
# objects_files = glob.glob('dataset_original/*')
objects_files = ["9_H", "10_H", "J_H", "Q_H", "K_H", "A_H",
"9_S", "10_S", "J_S", "Q_S", "K_S", "A_S",
"9_C", "10_C", "J_C", "Q_C", "K_C", "A_C",
"9_D", "10_D", "J_D", "Q_D", "K_D", "A_D"]
objects = [os.path.splitext(os.path.basename(path))[0] for path in objects_files]
backgrounds_files = glob.glob('backgrounds/*')
backgrounds = [os.path.splitext(os.path.basename(path))[0] for path in backgrounds_files]
print(objects)
total_length = len(backgrounds) * iterations
counter = 0
for background_file_name in backgrounds:
for i in range(iterations):
image = ImageAugmenter(objects, background_file_name, 0)
image.save(i)
counter += 1
print("Progress:", counter, "/", total_length)
# for i in range(54):
# # if i == 53:
# # print(i)
# # continue
# # print(i, '\\n', end='', sep='')
# print(i)
| 30.323529 | 89 | 0.592629 |
7942552ed4d335b34094039e4fb7443652d0a7b4 | 337 | py | Python | common/SoupUtils.py | loveflycforever/TypeY | a002dc46be1d6f458abfbc27c3e5763ae739487f | [
"MIT"
] | null | null | null | common/SoupUtils.py | loveflycforever/TypeY | a002dc46be1d6f458abfbc27c3e5763ae739487f | [
"MIT"
] | null | null | null | common/SoupUtils.py | loveflycforever/TypeY | a002dc46be1d6f458abfbc27c3e5763ae739487f | [
"MIT"
] | null | null | null | from bs4 import BeautifulSoup
def strippedTagString(tag):
return tag.get_text().replace('\n', '').replace(' ', '')
def selectStrippedStrings(soup, selector, index=0):
return soup.select(selector)[index].stripped_strings
def selectFindAll(soup, selector, tag, index=0):
return soup.select(selector)[index].find_all(tag)
| 24.071429 | 60 | 0.727003 |
794256a008fbdd15ed6a60be1202abd34c7713c2 | 700 | py | Python | Trees/DFS_PostExample.py | yabur/LeetCode_Practice | d002dedf8f6694b9d313c8facf0d39e688decb15 | [
"MIT"
] | null | null | null | Trees/DFS_PostExample.py | yabur/LeetCode_Practice | d002dedf8f6694b9d313c8facf0d39e688decb15 | [
"MIT"
] | null | null | null | Trees/DFS_PostExample.py | yabur/LeetCode_Practice | d002dedf8f6694b9d313c8facf0d39e688decb15 | [
"MIT"
] | null | null | null | class TreeNode:
def __init__(self):
""" Constructor
Args:
name (string)
space (int)
"""
self.name = name
self.space = space
List = [] # child
# PostOrder Example
# Amount of disk usage node uses
#
def helper(self, node):
if node not in self:
return 0
du = node.space() # pre order initilazing
for child in node.children:
du = du + helper(child)
return du # post order result
if __name__ == '__main__':
L = [5,6,3,67,89,4]
newBook = TreeNode.dfs(L, 3)
print(newBook)
| 21.212121 | 50 | 0.464286 |
794258c92765b9f28ae980a50eb26f2acf6bd136 | 258 | py | Python | main/service/crypto_stats_service.py | gbdevw/python-fastapi-and-r | 06992857575519e1d1e7ffe72090758fb201b698 | [
"Apache-2.0"
] | 1 | 2021-12-03T05:27:23.000Z | 2021-12-03T05:27:23.000Z | main/service/crypto_stats_service.py | gbdevw/python-fastapi-and-r | 06992857575519e1d1e7ffe72090758fb201b698 | [
"Apache-2.0"
] | null | null | null | main/service/crypto_stats_service.py | gbdevw/python-fastapi-and-r | 06992857575519e1d1e7ffe72090758fb201b698 | [
"Apache-2.0"
] | 1 | 2021-12-11T06:55:32.000Z | 2021-12-11T06:55:32.000Z | from main.entities.crypto_stats import CryptoStats
class CryptoStatsService:
"""Interface for the CryptoStatsService : Service which provides crypto stats"""
async def get_crypto_stats (self, product_id: str = 'btc-usd') -> CryptoStats:
pass | 43 | 84 | 0.751938 |
Subsets and Splits