blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
616
| content_id
stringlengths 40
40
| detected_licenses
sequencelengths 0
112
| license_type
stringclasses 2
values | repo_name
stringlengths 5
115
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 777
values | visit_date
timestamp[us]date 2015-08-06 10:31:46
2023-09-06 10:44:38
| revision_date
timestamp[us]date 1970-01-01 02:38:32
2037-05-03 13:00:00
| committer_date
timestamp[us]date 1970-01-01 02:38:32
2023-09-06 01:08:06
| github_id
int64 4.92k
681M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 22
values | gha_event_created_at
timestamp[us]date 2012-06-04 01:52:49
2023-09-14 21:59:50
⌀ | gha_created_at
timestamp[us]date 2008-05-22 07:58:19
2023-08-21 12:35:19
⌀ | gha_language
stringclasses 149
values | src_encoding
stringclasses 26
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 3
10.2M
| extension
stringclasses 188
values | content
stringlengths 3
10.2M
| authors
sequencelengths 1
1
| author_id
stringlengths 1
132
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
556c12041b5bfbc25611e4d336cb5c75bf26346e | 088f76ed195918dcf35fe77d5832a75987cd183c | /modulos/db/migrations/0001_initial.py | cd415d70496dec49cf4ae7c400d93f41e5e01696 | [] | no_license | diegofer/alliance | 0b25c8771425c32bb2fe0a9930c69ce23ebdacf3 | 2810f3faf06cc21253e5db485e5980ffa6eeb585 | refs/heads/master | 2021-01-25T03:20:14.340425 | 2013-10-10T06:57:20 | 2013-10-10T06:57:20 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,639 | py | # -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'Region'
db.create_table(u'db_region', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('nombre', self.gf('django.db.models.fields.CharField')(max_length=40)),
('path', self.gf('modulos.django_google_maps.fields.PathField')()),
('center', self.gf('modulos.django_google_maps.fields.GeoLocationField')(max_length=100)),
('zoom', self.gf('django.db.models.fields.CharField')(max_length=3)),
('user', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['usuarios.Usuario'])),
))
db.send_create_signal(u'db', ['Region'])
def backwards(self, orm):
# Deleting model 'Region'
db.delete_table(u'db_region')
models = {
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'db.region': {
'Meta': {'object_name': 'Region'},
'center': ('modulos.django_google_maps.fields.GeoLocationField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'nombre': ('django.db.models.fields.CharField', [], {'max_length': '40'}),
'path': ('modulos.django_google_maps.fields.PathField', [], {}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['usuarios.Usuario']"}),
'zoom': ('django.db.models.fields.CharField', [], {'max_length': '3'})
},
u'usuarios.usuario': {
'Meta': {'object_name': 'Usuario'},
'ambito': ('django.db.models.fields.CharField', [], {'max_length': '1'}),
'email': ('django.db.models.fields.EmailField', [], {'unique': 'True', 'max_length': '255', 'db_index': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_admin': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_padre': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '100'})
}
}
complete_apps = ['db'] | [
"[email protected]"
] | |
1bbdf5c26fea911af2c6b0458b250f758bbb9475 | cf0d8d989da051a81afc60d9f4986c50c1462fb7 | /python高级/09迭代器和生成器/t03_gen_func.py | 84dca9e3fa12480dccad8bebaf5e585e7a7999cd | [] | no_license | pankypan/PythonNotes | 6a8da81a0e79f8bdc757f8493985321ef7873b44 | 48660b00b3b65cca409e61d34c32a024702d5a6e | refs/heads/master | 2023-04-29T01:51:12.930856 | 2021-05-13T00:43:33 | 2021-05-13T00:43:33 | 274,271,807 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 860 | py | # 生成器函数,函数里只要有yield关键字
def gen_func():
yield 1
yield 2
yield 3
def fib(index):
if index <= 2:
return 1
else:
return fib(index - 1) + fib(index - 2)
def fib2(index):
re_list = []
n, a, b = 0, 0, 1
while n < index:
re_list.append(b)
a, b = b, a + b
n += 1
return re_list
def gen_fib(index):
n, a, b = 0, 0, 1
while n < index:
yield b
a, b = b, a + b
n += 1
for data in gen_fib(10):
print(data)
# print (gen_fib(10))
# 斐波拉契 0 1 1 2 3 5 8
# 惰性求值, 延迟求值提供了可能
def func():
return 1
if __name__ == "__main__":
# 生成器对象, python编译字节码的时候就产生了,
gen = gen_func()
for value in gen:
print(value)
# re = func()
# pass
| [
"[email protected]"
] | |
2ab74c1ba61579a3956135b2dfce975ebe9e3e83 | c659ce50198ddab51dc6e105523d74c09f25face | /graph-analyzer/app/io/__init__.py | 4a6ac2824c0098e08b35a9a02cf9050f06555a4c | [] | no_license | YanzheL/deeparcher | a9c4e150fecbe7413e75bf1c710c169e0b052a2e | 85ae0de666ce05c41205748aeef40099e0a5116c | refs/heads/dev | 2023-06-23T22:34:05.864953 | 2021-02-09T05:48:34 | 2021-02-09T05:48:34 | 248,565,834 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 171 | py | from .cugraph import to_cugraph
from .dot import from_dot
from .dump import merge_attributes, dump_attributes
from .pb import to_pb_object, from_pb_object, to_pb, from_pb
| [
"[email protected]"
] | |
ff9efbbad2390741fe268885986710d2b4db69f2 | 90dfecb740ebb354c56a1542945384b9b03eacf0 | /supplier/api/serializers.py | 8142392929fa410c7ffa642c2f2e37ed9d2ce931 | [] | no_license | sujatakhadka111/cycleEcommerce | 2cb688b77da916280792ed005580c8c1163a65ff | 0da3771a9c247b2d24bcd30ec12bd47a7f8f21fd | refs/heads/master | 2023-05-05T21:35:59.744465 | 2021-05-26T02:46:40 | 2021-05-26T02:46:40 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 954 | py | from rest_framework import serializers
from supplier.models import Supplier, Category, Cycle, Gallery
class CategorySerializer(serializers.ModelSerializer):
class Meta:
model = Category
fields = '__all__'
class SupplierSerializer(serializers.ModelSerializer):
class Meta:
model = Supplier
fields = '__all__'
class GallerySerializer(serializers.ModelSerializer):
class Meta:
model = Gallery
fields = '__all__'
class CycleSerializer(serializers.ModelSerializer):
class Meta:
model = Cycle
fields = '__all__'
class CycleDetailSerializer(serializers.ModelSerializer):
supplier = SupplierSerializer()
category = CategorySerializer()
gallery = GallerySerializer(read_only=True, many=True, source='gallery_set')
class Meta:
model = Cycle
fields = ('supplier', 'category', 'gallery', 'name', 'slug', 'image', 'description', 'price',)
| [
"[email protected]"
] | |
6f07744254b0ab7acf4036bfef15f375bf52dbf4 | 2f09e893c3a21f4a17c95b99446d1efbf0b109f7 | /huaytools/utils/__init__.py | 9579bb4861c4c663a68bc19f1bb4b42632973cbb | [
"MIT"
] | permissive | knight134/huaytools | b19f0078e724963415c63d60218ae3cc624f598a | cbecd6771c05f8241e756a7619047589397b16d3 | refs/heads/master | 2020-04-24T18:30:27.732740 | 2018-05-27T13:51:24 | 2018-05-27T13:51:24 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,949 | py | """"""
import os
import sys
import pickle
import logging
from six.moves import urllib
from .bunch import Bunch, bunchify, unbunchify
from .time import *
def maybe_mkdirs(path, is_file=False, exist_ok=True):
"""递归创建文件夹
Args:
path (str): 待创建的路径,递归创建
is_file (bool): 是否为文件路径
exist_ok (bool): 默认为 True
Examples:
>>> maybe_mkdirs('D:/Tmp/a/b/')
'D:/Tmp/a/b/'
>>> maybe_mkdirs('D:/Tmp/a/b/c.txt')
'D:/Tmp/a/b/c.txt'
>>> maybe_mkdirs('D:/Tmp/a/b/c', is_file=True) # 假设 c 是一个无后缀文件
'D:/Tmp/a/b/c'
Returns:
str
"""
if is_file:
dirs, filename = os.path.split(path)
os.makedirs(dirs, exist_ok=exist_ok)
else:
os.makedirs(path, exist_ok=exist_ok)
return path
def save_to_pickle(obj, filepath):
"""
保存到 pickle 文件
Args:
obj: 需要保存的对象
filepath(str): 文件名
Returns:
None
"""
filepath = maybe_mkdirs(filepath, is_file=True)
with open(filepath, 'wb') as f:
pickle.dump(obj, f)
def load_from_pickle(filepath):
"""
从 pickle 加载对象
Args:
filepath(str): 文件名
Returns:
"""
with open(filepath) as f:
return pickle.load(f)
def set_logging_basic_config(**kwargs):
"""
快速设置 logging.basicConfig
Args can be specified:
filename: Specifies that a FileHandler be created, using the specified
filename, rather than a StreamHandler.
filemode: Specifies the mode to open the file, if filename is specified
(if filemode is unspecified, it defaults to 'a').
format: Use the specified format string for the handler.
datefmt: Use the specified date/time format.
style: If a format string is specified, use this to specify the
type of format string (possible values '%', '{', '$', for
%-formatting, :meth:`str.format` and :class:`string.Template`
- defaults to '%').
level: Set the root logger level to the specified level.
stream: Use the specified stream to initialize the StreamHandler. Note
that this argument is incompatible with 'filename' - if both
are present, 'stream' is ignored.
handlers: If specified, this should be an iterable of already created
handlers, which will be added to the root handler. Any handler
in the list which does not have a formatter assigned will be
assigned the formatter created in this function.
Returns:
None
"""
if 'format' not in kwargs:
kwargs['format'] = '[%(name)s] : %(asctime)s : %(levelname)s : %(message)s'
if 'level' not in kwargs:
kwargs['level'] = logging.INFO
logging.basicConfig(**kwargs)
def get_filepath_recursive(dirpath, abspath=True, recursive=True):
"""获取目录下所有文件名 (默认递归)
该函数主要用于需要一次性处理大量**相似文件**的情况
该函数主要利用 `os.walk(path)` 实现,
该函数会递归遍历 path 下的所有文件夹,并返回一个生成器
Args:
dirpath (str): 文件夹路径
abspath (bool): 是否返回绝对路径
recursive (bool): 是否递归
Examples:
>>> fs_gen = get_filepath_recursive('D:/Tmp')
Returns:
list
"""
fs = []
if recursive:
for root, _, files in os.walk(dirpath):
if abspath:
fs.extend((os.path.join(root, file) for file in files))
else:
fs.extend(files)
else:
if abspath:
fs.extend((os.path.join(dirpath, file) for file in os.listdir(dirpath)))
else:
fs.extend(os.listdir(dirpath))
return fs
def maybe_download(url, to_path='D:/Tmp', filename=None, expected_byte=None):
"""下载文件到指定目录
Args:
url (str): 文件下载路径
to_path (str): 下载到本地路径
filename (str): 重命名文件
expected_byte (int): 文件预期大小
Returns:
str: filepath
Examples:
>>> url = 'http://mattmahoney.net/dc/bbb.zip'
>>> filepath = maybe_download(url, filename='b.zip')
>>> fp = maybe_download(url, to_path='D:/Tmp/b', expected_byte=45370)
"""
if filename is not None:
filepath = os.path.join(maybe_mkdirs(to_path), filename)
else:
_, filename = os.path.split(url)
filepath = os.path.join(maybe_mkdirs(to_path), filename)
if not os.path.exists(filepath):
urllib.request.urlretrieve(url, filepath)
logging.info('File is downloading.')
if expected_byte is not None:
file_size = os.stat(filepath).st_size
if file_size != expected_byte:
logging.info('File has been damage, please download it manually.')
else:
logging.info('File is ready.')
return filepath
def cycle_iter(iterator):
"""
无限循环迭代器
Args:
iterator (Iterable): 可迭代对象
Examples:
>>> it = cycle_iter([1, 2, 3])
>>> for _ in range(4):
... print(next(it))
1
2
3
1
"""
# while True:
# yield from iter(iterator)
from itertools import cycle
return cycle(iterator)
def system_is_windows():
"""
If the system is windows, return True
Examples:
>>> if system_is_windows():
... print("Windows")
Windows
"""
import platform
return platform.system() == "Windows"
is_windows_system = system_is_windows()
def get_logger(name=None, fname=None, mode='a', level=logging.INFO, stream=None,
fmt="[%(name)s] : %(asctime)s : %(levelname)s : %(message)s"):
"""创建一个 logger
默认 log to console,如果同时指定了 fname,还会将日志输出到文件
Examples:
>>> logger = get_logger("Test", stream=sys.stdout, fmt="[%(name)s] : %(levelname)s : %(message)s")
>>> logger.info("test")
[Test] : INFO : test
"""
logger = logging.Logger(name)
logger.setLevel(level)
fmt = logging.Formatter(fmt)
ch = logging.StreamHandler(stream)
ch.setFormatter(fmt)
logger.addHandler(ch)
if fname is not None:
fh = logging.FileHandler(fname, mode)
fh.setFormatter(fmt)
logger.addHandler(fh)
return logger
def to_unicode(txt, encoding='utf8', errors='strict'):
"""Convert text to unicode.
Args:
txt:
encoding:
errors:
Returns:
str
"""
if sys.version_info[0] >= 3:
unicode = str
if isinstance(txt, unicode):
return txt
return unicode(txt, encoding, errors=errors)
| [
"[email protected]"
] | |
4e7589cb121d96ff365d9ff81907befbb70cd588 | 6be29c75fe23bf38ac2df4125242e767fb37d41c | /utils/update_dependencies.py | 61e6dd1e408145cd15a7b25ccb672cfe3f3ca2dc | [
"Apache-2.0"
] | permissive | Laxman-SM/plaso | 579c7954b2622368427740e2b5687bf2efe249e7 | bec7b974ec9c2967be58fc704afca936591e46d3 | refs/heads/master | 2021-01-22T05:32:59.383909 | 2017-05-26T04:15:29 | 2017-05-26T04:15:29 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 17,694 | py | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""Script to update the dependencies in various configuration files."""
import os
import sys
# Change PYTHONPATH to include dependencies.
sys.path.insert(0, u'.')
import utils.dependencies # pylint: disable=wrong-import-position
class DependencyFileWriter(object):
"""Dependency file writer."""
def __init__(self, dependency_helper):
"""Initializes a dependency file writer.
Args:
dependency_helper (DependencyHelper): dependency helper.
"""
super(DependencyFileWriter, self).__init__()
self._dependency_helper = dependency_helper
class AppveyorYmlWriter(DependencyFileWriter):
"""Appveyor.yml file writer."""
_PATH = os.path.join(u'appveyor.yml')
_VERSION_PYWIN32 = u'220'
_VERSION_WMI = u'1.4.9'
_VERSION_SQLITE = u'3180000'
_DOWNLOAD_PIP = (
u' - ps: (new-object net.webclient).DownloadFile('
u'\'https://bootstrap.pypa.io/get-pip.py\', '
u'\'C:\\Projects\\get-pip.py\')')
_DOWNLOAD_PYWIN32 = (
u' - ps: (new-object net.webclient).DownloadFile('
u'\'https://github.com/log2timeline/l2tbinaries/raw/master/win32/'
u'pywin32-{0:s}.win32-py2.7.exe\', '
u'\'C:\\Projects\\pywin32-{0:s}.win32-py2.7.exe\')').format(
_VERSION_PYWIN32)
_DOWNLOAD_WMI = (
u' - ps: (new-object net.webclient).DownloadFile('
u'\'https://github.com/log2timeline/l2tbinaries/raw/master/win32/'
u'WMI-{0:s}.win32.exe\', \'C:\\Projects\\WMI-{0:s}.win32.exe\')').format(
_VERSION_WMI)
_INSTALL_PIP = (
u' - cmd: "%PYTHON%\\\\python.exe C:\\\\Projects\\\\get-pip.py"')
_INSTALL_PYWIN32 = (
u' - cmd: "%PYTHON%\\\\Scripts\\\\easy_install.exe '
u'C:\\\\Projects\\\\pywin32-{0:s}.win32-py2.7.exe"').format(
_VERSION_PYWIN32)
_INSTALL_WMI = (
u' - cmd: "%PYTHON%\\\\Scripts\\\\easy_install.exe '
u'C:\\\\Projects\\\\WMI-{0:s}.win32.exe"').format(_VERSION_WMI)
_DOWNLOAD_SQLITE = (
u' - ps: (new-object net.webclient).DownloadFile('
u'\'https://www.sqlite.org/2017/sqlite-dll-win32-x86-{0:s}.zip\', '
u'\'C:\\Projects\\sqlite-dll-win32-x86-{0:s}.zip\')').format(
_VERSION_SQLITE)
_EXTRACT_SQLITE = (
u' - ps: $Output = Invoke-Expression -Command '
u'"& \'C:\\\\Program Files\\\\7-Zip\\\\7z.exe\' -y '
u'-oC:\\\\Projects\\\\ x C:\\\\Projects\\\\'
u'sqlite-dll-win32-x86-{0:s}.zip 2>&1"').format(_VERSION_SQLITE)
_INSTALL_SQLITE = (
u' - cmd: copy C:\\Projects\\sqlite3.dll C:\\Python27\\DLLs\\')
_DOWNLOAD_L2TDEVTOOLS = (
u' - cmd: git clone https://github.com/log2timeline/l2tdevtools.git && '
u'move l2tdevtools ..\\')
_FILE_HEADER = [
u'environment:',
u' matrix:',
u' - PYTHON: "C:\\\\Python27"',
u'',
u'install:',
(u' - cmd: \'"C:\\Program Files\\Microsoft SDKs\\Windows\\v7.1\\Bin\\'
u'SetEnv.cmd" /x86 /release\''),
_DOWNLOAD_PIP,
_DOWNLOAD_PYWIN32,
_DOWNLOAD_WMI,
_INSTALL_PIP,
_INSTALL_PYWIN32,
_INSTALL_WMI,
_DOWNLOAD_L2TDEVTOOLS,
_DOWNLOAD_SQLITE,
_EXTRACT_SQLITE,
_INSTALL_SQLITE]
_L2TDEVTOOLS_UPDATE = (
u' - cmd: mkdir dependencies && set PYTHONPATH=..\\l2tdevtools && '
u'"%PYTHON%\\\\python.exe" ..\\l2tdevtools\\tools\\update.py '
u'--download-directory dependencies --machine-type x86 '
u'--msi-targetdir "%PYTHON%" {0:s}')
_FILE_FOOTER = [
u'',
u'build: off',
u'',
u'test_script:',
u' - "%PYTHON%\\\\python.exe run_tests.py"',
u'']
def Write(self):
"""Writes an appveyor.yml file."""
file_content = []
file_content.extend(self._FILE_HEADER)
dependencies = self._dependency_helper.GetL2TBinaries()
dependencies.extend([u'funcsigs', u'mock', u'pbr'])
dependencies = u' '.join(dependencies)
l2tdevtools_update = self._L2TDEVTOOLS_UPDATE.format(dependencies)
file_content.append(l2tdevtools_update)
file_content.extend(self._FILE_FOOTER)
file_content = u'\n'.join(file_content)
file_content = file_content.encode(u'utf-8')
with open(self._PATH, 'wb') as file_object:
file_object.write(file_content)
class DPKGControlWriter(DependencyFileWriter):
"""Dpkg control file writer."""
_PATH = os.path.join(u'config', u'dpkg', u'control')
_PROJECT_NAME = u'plaso'
_MAINTAINER = (
u'Log2Timeline maintainers <[email protected]>')
_FILE_HEADER = [
u'Source: {0:s}'.format(_PROJECT_NAME),
u'Section: python',
u'Priority: extra',
u'Maintainer: {0:s}'.format(_MAINTAINER),
(u'Build-Depends: debhelper (>= 7), python-all (>= 2.7~), '
u'python-setuptools'),
u'Standards-Version: 3.9.5',
u'X-Python-Version: >= 2.7',
u'Homepage: https://github.com/log2timeline/plaso',
u'',
u'Package: plaso-data',
u'Architecture: all',
u'Depends: ${misc:Depends}',
u'Description: Data files for plaso (log2timeline)',
u' Plaso (log2timeline) is a framework to create super timelines. Its',
u' purpose is to extract timestamps from various files found on typical',
u' computer systems and aggregate them.',
u'']
_PYTHON2_PACKAGE_HEADER = [
u'Package: python-{0:s}'.format(_PROJECT_NAME),
u'Architecture: all']
_FILE_DESCRIPTION = [
u'Description: Python bindings for plaso (log2timeline)',
u' Plaso (log2timeline) is a framework to create super timelines. Its',
u' purpose is to extract timestamps from various files found on typical',
u' computer systems and aggregate them.',
u'']
_FILE_FOOTER = [
u'Package: plaso-tools',
u'Architecture: all',
(u'Depends: python-plaso, python (>= 2.7~), ${python:Depends}, '
u'${misc:Depends}'),
u'Description: Tools for plaso (log2timeline)',
u' Plaso (log2timeline) is a framework to create super timelines. Its',
u' purpose is to extract timestamps from various files found on typical',
u' computer systems and aggregate them.',
u'']
def Write(self):
"""Writes a dpkg control file."""
file_content = []
file_content.extend(self._FILE_HEADER)
file_content.extend(self._PYTHON2_PACKAGE_HEADER)
dependencies = self._dependency_helper.GetDPKGDepends()
dependencies.extend([u'${python:Depends}', u'${misc:Depends}'])
dependencies = u', '.join(dependencies)
file_content.append((
u'Depends: plaso-data, {0:s}, ${{python:Depends}}, '
u'${{misc:Depends}}').format(dependencies))
file_content.extend(self._FILE_DESCRIPTION)
file_content.extend(self._FILE_FOOTER)
file_content = u'\n'.join(file_content)
file_content = file_content.encode(u'utf-8')
with open(self._PATH, 'wb') as file_object:
file_object.write(file_content)
class GIFTCOPRInstallScriptWriter(DependencyFileWriter):
"""Class to help write the gift_copr_install.sh file."""
_PATH = os.path.join(u'config', u'linux', u'gift_copr_install.sh')
_FILE_HEADER = [
u'#!/usr/bin/env bash',
u'set -e',
u'',
u'# Dependencies for running Plaso, alphabetized, one per line.',
(u'# This should not include packages only required for testing or '
u'development.')]
_ADDITIONAL_DEPENDENCIES = [
u'',
u'# Additional dependencies for running Plaso tests, alphabetized,',
u'# one per line.',
u'TEST_DEPENDENCIES="python-mock";',
u'',
u'# Additional dependencies for doing Plaso development, alphabetized,',
u'# one per line.',
u'DEVELOPMENT_DEPENDENCIES="python-sphinx',
u' pylint";',
u'',
u'# Additional dependencies for doing Plaso debugging, alphabetized,',
u'# one per line.']
_FILE_FOOTER = [
u'',
u'sudo dnf install dnf-plugins-core',
u'sudo dnf copr enable @gift/dev',
u'sudo dnf install -y ${PLASO_DEPENDENCIES}',
u'',
u'if [[ "$*" =~ "include-debug" ]]; then',
u' sudo dnf install -y ${DEBUG_DEPENDENCIES}',
u'fi',
u'',
u'if [[ "$*" =~ "include-development" ]]; then',
u' sudo dnf install -y ${DEVELOPMENT_DEPENDENCIES}',
u'fi',
u'',
u'if [[ "$*" =~ "include-test" ]]; then',
u' sudo dnf install -y ${TEST_DEPENDENCIES}',
u'fi',
u'']
def Write(self):
"""Writes a gift_copr_install.sh file."""
file_content = []
file_content.extend(self._FILE_HEADER)
dependencies = self._dependency_helper.GetRPMRequires(exclude_version=True)
libyal_dependencies = []
for index, dependency in enumerate(dependencies):
if index == 0:
file_content.append(u'PLASO_DEPENDENCIES="{0:s}'.format(dependency))
elif index + 1 == len(dependencies):
file_content.append(u' {0:s}";'.format(dependency))
else:
file_content.append(u' {0:s}'.format(dependency))
if dependency.startswith(u'lib') and dependency.endswith(u'-python'):
dependency, _, _ = dependency.partition(u'-')
libyal_dependencies.append(dependency)
file_content.extend(self._ADDITIONAL_DEPENDENCIES)
for index, dependency in enumerate(libyal_dependencies):
if index == 0:
file_content.append(u'DEBUG_DEPENDENCIES="{0:s}-debuginfo'.format(
dependency))
elif index + 1 == len(libyal_dependencies):
file_content.append(u' {0:s}-debuginfo";'.format(
dependency))
else:
file_content.append(u' {0:s}-debuginfo'.format(
dependency))
file_content.extend(self._FILE_FOOTER)
file_content = u'\n'.join(file_content)
file_content = file_content.encode(u'utf-8')
with open(self._PATH, 'wb') as file_object:
file_object.write(file_content)
class GIFTPPAInstallScriptWriter(DependencyFileWriter):
"""Class to help write the gift_ppa_install.sh file."""
_PATH = os.path.join(u'config', u'linux', u'gift_ppa_install.sh')
_FILE_HEADER = [
u'#!/usr/bin/env bash',
u'set -e',
u'',
u'# Dependencies for running Plaso, alphabetized, one per line.',
(u'# This should not include packages only required for testing or '
u'development.')]
_ADDITIONAL_DEPENDENCIES = [
u'',
u'# Additional dependencies for running Plaso tests, alphabetized,',
u'# one per line.',
u'TEST_DEPENDENCIES="python-mock";',
u'',
u'# Additional dependencies for doing Plaso development, alphabetized,',
u'# one per line.',
u'DEVELOPMENT_DEPENDENCIES="python-sphinx',
u' pylint";',
u'',
u'# Additional dependencies for doing Plaso debugging, alphabetized,',
u'# one per line.']
_FILE_FOOTER = [
u'',
u'sudo add-apt-repository ppa:gift/dev -y',
u'sudo apt-get update -q',
u'sudo apt-get install -y ${PLASO_DEPENDENCIES}',
u'',
u'if [[ "$*" =~ "include-debug" ]]; then',
u' sudo apt-get install -y ${DEBUG_DEPENDENCIES}',
u'fi',
u'',
u'if [[ "$*" =~ "include-development" ]]; then',
u' sudo apt-get install -y ${DEVELOPMENT_DEPENDENCIES}',
u'fi',
u'',
u'if [[ "$*" =~ "include-test" ]]; then',
u' sudo apt-get install -y ${TEST_DEPENDENCIES}',
u'fi',
u'']
def Write(self):
"""Writes a gift_ppa_install.sh file."""
file_content = []
file_content.extend(self._FILE_HEADER)
dependencies = self._dependency_helper.GetDPKGDepends(exclude_version=True)
libyal_dependencies = []
for index, dependency in enumerate(dependencies):
if index == 0:
file_content.append(u'PLASO_DEPENDENCIES="{0:s}'.format(dependency))
elif index + 1 == len(dependencies):
file_content.append(u' {0:s}";'.format(dependency))
else:
file_content.append(u' {0:s}'.format(dependency))
if dependency.startswith(u'lib') and dependency.endswith(u'-python'):
dependency, _, _ = dependency.partition(u'-')
libyal_dependencies.append(dependency)
file_content.extend(self._ADDITIONAL_DEPENDENCIES)
for index, dependency in enumerate(libyal_dependencies):
if index == 0:
file_content.append(u'DEBUG_DEPENDENCIES="{0:s}-dbg'.format(dependency))
else:
file_content.append(u' {0:s}-dbg'.format(dependency))
file_content.append(u' {0:s}-python-dbg'.format(
dependency))
file_content.append(u' python-guppy";')
file_content.extend(self._FILE_FOOTER)
file_content = u'\n'.join(file_content)
file_content = file_content.encode(u'utf-8')
with open(self._PATH, 'wb') as file_object:
file_object.write(file_content)
class RequirementsWriter(DependencyFileWriter):
"""Requirements.txt file writer."""
_PATH = u'requirements.txt'
_FILE_HEADER = [
u'pip >= 7.0.0',
u'pytest',
u'mock']
def Write(self):
"""Writes a requirements.txt file."""
file_content = []
file_content.extend(self._FILE_HEADER)
dependencies = self._dependency_helper.GetInstallRequires()
for dependency in dependencies:
file_content.append(u'{0:s}'.format(dependency))
file_content = u'\n'.join(file_content)
file_content = file_content.encode(u'utf-8')
with open(self._PATH, 'wb') as file_object:
file_object.write(file_content)
class SetupCfgWriter(DependencyFileWriter):
"""Setup.cfg file writer."""
_PATH = u'setup.cfg'
_MAINTAINER = (
u'Log2Timeline maintainers <[email protected]>')
_FILE_HEADER = [
u'[sdist]',
u'template = MANIFEST.in',
u'manifest = MANIFEST',
u'',
u'[sdist_test_data]',
u'template = MANIFEST.test_data.in',
u'manifest = MANIFEST.test_data',
u'',
u'[bdist_rpm]',
u'release = 1',
u'packager = {0:s}'.format(_MAINTAINER),
u'doc_files = ACKNOWLEDGEMENTS',
u' AUTHORS',
u' LICENSE',
u' README',
u'build_requires = python-setuptools']
def Write(self):
"""Writes a setup.cfg file."""
file_content = []
file_content.extend(self._FILE_HEADER)
dependencies = self._dependency_helper.GetRPMRequires()
for index, dependency in enumerate(dependencies):
if index == 0:
file_content.append(u'requires = {0:s}'.format(dependency))
else:
file_content.append(u' {0:s}'.format(dependency))
file_content = u'\n'.join(file_content)
file_content = file_content.encode(u'utf-8')
with open(self._PATH, 'wb') as file_object:
file_object.write(file_content)
class TravisBeforeInstallScriptWriter(DependencyFileWriter):
"""Travis-CI install.sh file writer."""
_PATH = os.path.join(u'config', u'travis', u'install.sh')
_FILE_HEADER = [
u'#!/bin/bash',
u'#',
u'# Script to set up Travis-CI test VM.',
u'',
(u'COVERALL_DEPENDENCIES="python-coverage python-coveralls '
u'python-docopt";'),
u'']
_FILE_FOOTER = [
u'',
u'# Exit on error.',
u'set -e;',
u'',
u'if test ${TRAVIS_OS_NAME} = "osx";',
u'then',
u'\tgit clone https://github.com/log2timeline/l2tdevtools.git;',
u'',
u'\tmv l2tdevtools ../;',
u'\tmkdir dependencies;',
u'',
(u'\tPYTHONPATH=../l2tdevtools ../l2tdevtools/tools/update.py '
u'--download-directory=dependencies ${L2TBINARIES_DEPENDENCIES} '
u'${L2TBINARIES_TEST_DEPENDENCIES};'),
u'',
u'elif test ${TRAVIS_OS_NAME} = "linux";',
u'then',
u'\tsudo rm -f /etc/apt/sources.list.d/travis_ci_zeromq3-source.list;',
u'',
u'\tsudo add-apt-repository ppa:gift/dev -y;',
u'\tsudo apt-get update -q;',
u'\t# Only install the Python 2 dependencies.',
(u'\t# Also see: https://docs.travis-ci.com/user/languages/python/'
u'#Travis-CI-Uses-Isolated-virtualenvs'),
(u'\tsudo apt-get install -y ${COVERALL_DEPENDENCIES} '
u'${PYTHON2_DEPENDENCIES} ${PYTHON2_TEST_DEPENDENCIES};'),
u'fi',
u'']
def Write(self):
"""Writes an install.sh file."""
file_content = []
file_content.extend(self._FILE_HEADER)
dependencies = self._dependency_helper.GetL2TBinaries()
dependencies = u' '.join(dependencies)
file_content.append(u'L2TBINARIES_DEPENDENCIES="{0:s}";'.format(
dependencies))
file_content.append(u'')
file_content.append(
u'L2TBINARIES_TEST_DEPENDENCIES="funcsigs mock pbr";')
file_content.append(u'')
dependencies = self._dependency_helper.GetDPKGDepends(exclude_version=True)
dependencies = u' '.join(dependencies)
file_content.append(u'PYTHON2_DEPENDENCIES="{0:s}";'.format(dependencies))
file_content.append(u'')
file_content.append(u'PYTHON2_TEST_DEPENDENCIES="python-mock";')
file_content.extend(self._FILE_FOOTER)
file_content = u'\n'.join(file_content)
file_content = file_content.encode(u'utf-8')
with open(self._PATH, 'wb') as file_object:
file_object.write(file_content)
if __name__ == u'__main__':
helper = utils.dependencies.DependencyHelper()
for writer_class in (
AppveyorYmlWriter, DPKGControlWriter, GIFTCOPRInstallScriptWriter,
GIFTPPAInstallScriptWriter, RequirementsWriter, SetupCfgWriter,
TravisBeforeInstallScriptWriter):
writer = writer_class(helper)
writer.Write()
| [
"[email protected]"
] | |
64d1bc524c1d29a597ca9ba46e50836150601d42 | fb0f6646b2a7972454453907fbdc656b7471f55f | /p179_reversed_for01.py | 2562de9f1fb0f0a924ae944b4944cb4778b638d9 | [] | no_license | woojin97318/python_basic | 6497d5c85369746edfe8ca79ad7f3f47c871ee66 | 97e9a322a08f1483bf35dc03507ac36af2bf1ddb | refs/heads/master | 2023-07-15T03:06:05.716623 | 2021-08-25T03:46:48 | 2021-08-25T03:46:48 | 399,681,125 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 114 | py | # 역반복문
for i in range(4, 0 -1, -1):
# 출력합니다.
print("현재 반복 변수: {}".format(i)) | [
"[email protected]"
] | |
4d753dfde319985d5ae4884af6dac0cbd1ef5e73 | 487ce91881032c1de16e35ed8bc187d6034205f7 | /codes/CodeJamCrawler/16_0_4_neat/16_0_4_aminoacid_fractalArt.py | 4489c1d81b9dac58e47b59005b9f5a80872dbaee | [] | no_license | DaHuO/Supergraph | 9cd26d8c5a081803015d93cf5f2674009e92ef7e | c88059dc66297af577ad2b8afa4e0ac0ad622915 | refs/heads/master | 2021-06-14T16:07:52.405091 | 2016-08-21T13:39:13 | 2016-08-21T13:39:13 | 49,829,508 | 2 | 0 | null | 2021-03-19T21:55:46 | 2016-01-17T18:23:00 | Python | UTF-8 | Python | false | false | 474 | py |
def fractalArt(K, C, S, inp):
if S == K:
print 'Case #%d:' % (inp + 1),
for i in xrange(1, S + 1):
print i,
print
elif 2 * S <= K:
print 'Case #%d:' % (inp + 1), 'IMPOSSIBLE'
else:
print 'Case #%d:' % (inp + 1),
for i in xrange(2, 2 + S):
print i,
if __name__ == '__main__':
for i in xrange(input()):
K, C, S = map(int, raw_input().split())
fractalArt(K, C, S, i)
| [
"[[email protected]]"
] | |
f0673b96822fd829a04400d223f9b5677c8fe4b1 | ec3e57d2c4de3522585176300366d4a74a971b8b | /0x16-api_advanced/1-top_ten.py | 70185f62bb9482ea23c602d5779d7844238172b3 | [] | no_license | moncada92/holberton-system_engineering-devops | 562657ebaea2a26fa0c3f874b5e88e7267c73528 | f40d3eb6fecbcf031e42b43afb716ac63d3b86a3 | refs/heads/master | 2020-12-23T03:37:03.172160 | 2020-10-08T01:27:14 | 2020-10-08T01:27:14 | 237,020,173 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 620 | py | #!/usr/bin/python3
'''top ten in reddit'''
import requests
def top_ten(subreddit):
'''get the top 10'''
url = 'https://www.reddit.com/r/{}/hot.json'.format(subreddit)
agent = "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 \
(KHTML, like Gecko; Google Web Preview) \
Chrome/27.0.1453 Safari/537.36"
headers = {"User-Agent": agent}
response = requests.get(url, headers=headers).json()
if 'error' in response:
print('None')
return
_top = response['data']['children']
for i, top in enumerate(_top[:10], 1):
print(top['data']['title'])
| [
"[email protected]"
] | |
f971ede530c630222865d8708042fb42c083b737 | 32226e72c8cbaa734b2bdee081c2a2d4d0322702 | /experiments/murtaza/vae/sawyer_torque_vae_td3.py | ae5d625df3f2c46a97b1dc2a24db2631b6b239fa | [
"MIT"
] | permissive | Asap7772/rail-rl-franka-eval | 2b1cbad7adae958b3b53930a837df8a31ab885dc | 4bf99072376828193d05b53cf83c7e8f4efbd3ba | refs/heads/master | 2022-11-15T07:08:33.416025 | 2020-07-12T22:05:32 | 2020-07-12T22:05:32 | 279,155,722 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,127 | py | from sawyer_control.sawyer_reaching import SawyerXYZReachingImgMultitaskEnv
from railrl.launchers.launcher_util import run_experiment
from railrl.launchers.arglauncher import run_variants
import railrl.misc.hyperparameter as hyp
from railrl.torch.vae.relabeled_vae_experiment import experiment
if __name__ == "__main__":
vae_paths = {
"16": "/home/mdalal/Documents/railrl-private/data/local/05-14-sawyer-torque-vae-train-16/05-14-sawyer_torque_vae_train_16_2018_05_14_21_48_53_0000--s-32499/itr_1000.pkl",
"32": "/home/mdalal/Documents/railrl-private/data/local/05-14-sawyer-torque-vae-train-32/05-14-sawyer_torque_vae_train_32_2018_05_14_21_49_34_0000--s-13212/itr_1000.pkl",
"64": "/home/mdalal/Documents/railrl-private/data/local/05-14-sawyer-torque-vae-train-64/05-14-sawyer_torque_vae_train_64_2018_05_14_22_08_58_0000--s-19762/itr_1000.pkl",
}
use_gpu=True
variant = dict(
algo_kwargs=dict(
num_epochs=50,
num_steps_per_epoch=1000,
num_steps_per_eval=500,
tau=1e-2,
batch_size=128,
max_path_length=100,
discount=0.95,
),
env_kwargs=dict(
action_mode='torque',
reward='norm',
update_hz=100,
),
replay_kwargs=dict(
fraction_goals_are_rollout_goals=0.2,
fraction_goals_are_env_goals=0.5,
),
algorithm='TD3',
normalize=False,
rdim=16,
render=False,
env=SawyerXYZReachingImgMultitaskEnv,
use_env_goals=True,
vae_paths=vae_paths,
wrap_mujoco_env=False,
do_state_based_exp=False,
exploration_noise=0.1,
snapshot_mode='last',
mode='here_no_doodad',
use_gpu=use_gpu,
)
n_seeds = 1
search_space = {
'exploration_type': [
'ou',
],
'algo_kwargs.num_updates_per_env_step': [3],
'algo_kwargs.discount': [0.98],
'replay_kwargs.fraction_goals_are_env_goals': [0, 0.5], # 0.0 is normal, 0.5 means half goals are resampled from env
'replay_kwargs.fraction_goals_are_rollout_goals': [0.2],#[0.2, 1.0], # 1.0 is normal, 0.2 is (future, k=4) HER
'exploration_noise': [0.25],
'algo_kwargs.reward_scale': [1e-4], # use ~1e-4 for VAE experiments
'training_mode': ['train', ],
'testing_mode': ['test', ],
'rdim': [16, 32, 64], # Sweep only for VAE experiments
'seedid': range(n_seeds),
'hidden_sizes':[[100, 100]],
}
# run_variants(experiment, sweeper.iterate_hyperparameters(), run_id=10)
sweeper = hyp.DeterministicHyperparameterSweeper(
search_space, default_parameters=variant,
)
for variant in sweeper.iterate_hyperparameters():
n_seeds = 1
exp_prefix = 'test'
mode = 'here_no_doodad'
for i in range(n_seeds):
run_experiment(
experiment,
mode=mode,
exp_prefix=exp_prefix,
variant=variant,
use_gpu=use_gpu,
) | [
"[email protected]"
] | |
ec52c4722e197827169f4edb78d23a75beb1cda9 | c71e5115b895065d2abe4120799ffc28fa729086 | /procon-archive/atcoder.jp/abc170/abc170_a/Main.py | 16f4853ff2370c03e22ec9b47f568e64193acb68 | [] | no_license | ken0105/competitive-programming | eb82f92a7b7ad0db601ea341c1441de6c6165064 | f918f85a0ea6dfbe9cac3ef835f80503bb16a75d | refs/heads/master | 2023-06-05T09:55:25.264731 | 2021-06-29T14:38:20 | 2021-06-29T14:38:20 | 328,328,825 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 173 | py | import math
if __name__ == "__main__":
n = list(map(int,input().split()))
for i in range(len(n)):
if n[i] == 0:
print(i + 1)
exit()
| [
"[email protected]"
] | |
3ebe3d0d69e0fb55f8d199180ad06e9cf03bca59 | e9d2ab28bd23021aef1e478439e290d13dd5ff58 | /python/EXAMPLES/projects/GUI_SIMPLE/p6_gui_calculate_WORKED/index.py | 557d6ef504415e12830cc9d8f1ebaad3750d3131 | [] | no_license | zlodiak/lessons | cb2177203760200672cf4eec546330d9b1a87f7f | f9a08a51c142d37cd8c4b2d50ba5925898b1acf6 | refs/heads/master | 2020-12-24T08:30:30.018325 | 2016-09-17T09:27:19 | 2016-09-17T09:27:19 | 29,296,093 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,449 | py | from tkinter import *
root = Tk()
root.geometry("500x500")
label1 = Label(root, text = "Число 1", bg = "red", fg = "white")
label1.pack(side = LEFT, anchor=NW)
input1 = Entry(root, width = 20, bd = 3)
input1.pack(side = LEFT, anchor=NW)
label2 = Label(root, text = "Число 2", bg = "red", fg = "white")
label2.pack(side = LEFT, anchor=NW)
input2 = Entry(root, width = 20, bd = 3)
input2.pack(side = LEFT, anchor=NW)
var=IntVar()
var.set(1)
rad0 = Radiobutton(root,text="сложить",
variable=var,value=0)
rad1 = Radiobutton(root,text="вычесть",
variable=var,value=1)
rad2 = Radiobutton(root,text="умножить",
variable=var,value=2)
rad3 = Radiobutton(root,text="поделить",
variable=var,value=3)
rad0.pack()
rad1.pack()
rad2.pack()
rad3.pack()
def calculate():
i1 = int(input1.get())
i2 = int(input2.get())
operation = var.get()
if operation == 0:
resultat = i1 + i2
elif operation == 1:
resultat = i1 -i2
elif operation == 2:
resultat = i1 * i2
else:
resultat = i1 / i2
result.configure(text = resultat, fg = 'blue')
button = Button(root, text = 'выполнить действие', command = calculate)
button.pack()
result = Label(root, text = 'result', fg = 'red')
result.pack()
root.mainloop()
| [
"[email protected]"
] | |
6f89807c5c4b792b3ba95fad0c3b1187097b3c86 | 470b46ff2e28f5f7fc4ecd3629980fbfd13a6313 | /programmers/x만큼 간격이 있는 n개의 숫자.py | 2c2a67471edb7e22400647499464326623e3e484 | [] | no_license | jihoonyou/problem-solving | 18c3ff05ae6c37e0c41cc755ffc7377a93bd02a6 | b7e5500ac16ff1b4736954298d13e8a5e1ab8193 | refs/heads/master | 2021-06-12T11:37:49.894072 | 2021-04-22T17:08:27 | 2021-04-22T17:08:27 | 181,782,842 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 253 | py | '''
x만큼 간격이 있는 n개의 숫자
https://programmers.co.kr/learn/courses/30/lessons/12954
'''
def solution(x, n):
answer = []
start = 0
while n != 0:
start += x
answer.append(start)
n -= 1
return answer | [
"[email protected]"
] | |
88988fae9222f7680a67577b6a9d0720c5253a5b | 034974504fabd1ee4101bf11ec310173200891b9 | /src/python/strelka/scanners/scan_vb.py | 20eac318118590eba034ed168cd4aaa0f0fea7e9 | [
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | jshlbrd/strelka | 9826591eb53cc5e46887d925996c38fdbec81dbe | 98c89afcc42d8f025e60f201ee9826b6086b5828 | refs/heads/master | 2020-04-11T04:36:02.620498 | 2019-06-11T19:43:44 | 2019-06-11T19:43:44 | 161,518,186 | 2 | 0 | NOASSERTION | 2018-12-12T16:51:38 | 2018-12-12T16:51:37 | null | UTF-8 | Python | false | false | 2,492 | py | import pygments
from pygments import formatters
from pygments import lexers
from strelka import strelka
class ScanVb(strelka.Scanner):
"""Collects metadata from Visual Basic script files.
Attributes:
lexer: Pygments lexer ('vbnet') used to parse the file.
"""
def init(self):
self.lexer = lexers.get_lexer_by_name('vbnet')
def scan(self, data, file, options, expire_at):
highlight = pygments.highlight(
data,
self.lexer,
formatters.RawTokenFormatter(),
)
highlight_list = highlight.split(b'\n')
ordered_highlights = []
for hl in highlight_list:
split_highlight = hl.split(b'\t')
if len(split_highlight) == 2:
token = split_highlight[0].decode()
value = split_highlight[1].decode().strip('\'"').strip()
highlight_entry = {'token': token, 'value': value}
if highlight_entry['value']:
ordered_highlights.append(highlight_entry)
self.event.setdefault('tokens', [])
self.event.setdefault('comments', [])
self.event.setdefault('functions', [])
self.event.setdefault('names', [])
self.event.setdefault('operators', [])
self.event.setdefault('strings', [])
position = 0
while position < len(ordered_highlights):
ohlp = ordered_highlights[position]
if ohlp['token'] not in self.event['tokens']:
self.event['tokens'].append(ohlp['token'])
if ohlp['token'] == 'Token.Comment':
if ohlp['value'] not in self.event['comments']:
self.event['comments'].append(ohlp['value'])
elif ohlp['token'] == 'Token.Name.Function':
if ohlp['value'] not in self.event['functions']:
self.event['functions'].append(ohlp['value'])
elif ohlp['token'] == 'Token.Name':
if ohlp['value'] not in self.event['names']:
self.event['names'].append(ohlp['value'])
elif ohlp['token'] == 'Token.Operator':
if ohlp['value'] not in self.event['operators']:
self.event['operators'].append(ohlp['value'])
elif ohlp['token'] == 'Token.Literal.String':
if ohlp['value'] not in self.event['strings']:
self.event['strings'].append(ohlp['value'])
position += 1
| [
"[email protected]"
] | |
1c6011eaee23f8c7e8fadbca79b231bce0ba83f5 | 32c56293475f49c6dd1b0f1334756b5ad8763da9 | /google-cloud-sdk/lib/third_party/kubernetes/client/apis/batch_v1beta1_api.py | 2dbffc221638de25db7f2c5ef9e2cd6a03e6adfc | [
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0",
"MIT"
] | permissive | bopopescu/socialliteapp | b9041f17f8724ee86f2ecc6e2e45b8ff6a44b494 | 85bb264e273568b5a0408f733b403c56373e2508 | refs/heads/master | 2022-11-20T03:01:47.654498 | 2020-02-01T20:29:43 | 2020-02-01T20:29:43 | 282,403,750 | 0 | 0 | MIT | 2020-07-25T08:31:59 | 2020-07-25T08:31:59 | null | UTF-8 | Python | false | false | 92,421 | py | # coding: utf-8
"""
Kubernetes
No description provided (generated by Swagger Codegen
https://github.com/swagger-api/swagger-codegen)
OpenAPI spec version: v1.14.4
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import sys
import os
import re
# python 2 and python 3 compatibility library
from six import iteritems
from ..api_client import ApiClient
class BatchV1beta1Api(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
Ref: https://github.com/swagger-api/swagger-codegen
"""
def __init__(self, api_client=None):
if api_client is None:
api_client = ApiClient()
self.api_client = api_client
def create_namespaced_cron_job(self, namespace, body, **kwargs):
"""
create a CronJob
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.create_namespaced_cron_job(namespace, body,
async_req=True)
>>> result = thread.get()
:param async_req bool
:param str namespace: object name and auth scope, such as for teams and
projects (required)
:param V1beta1CronJob body: (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should
not be persisted. An invalid or unrecognized dryRun directive will
result in an error response and no further processing of the request.
Valid values are: - All: all dry run stages will be processed
:param str field_manager: fieldManager is a name associated with the
actor or entity that is making these changes. The value must be less
than or 128 characters long, and only contain printable characters, as
defined by https://golang.org/pkg/unicode/#IsPrint.
:return: V1beta1CronJob
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.create_namespaced_cron_job_with_http_info(
namespace, body, **kwargs)
else:
(data) = self.create_namespaced_cron_job_with_http_info(
namespace, body, **kwargs)
return data
def create_namespaced_cron_job_with_http_info(self, namespace, body,
**kwargs):
"""
create a CronJob
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.create_namespaced_cron_job_with_http_info(namespace,
body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str namespace: object name and auth scope, such as for teams and
projects (required)
:param V1beta1CronJob body: (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should
not be persisted. An invalid or unrecognized dryRun directive will
result in an error response and no further processing of the request.
Valid values are: - All: all dry run stages will be processed
:param str field_manager: fieldManager is a name associated with the
actor or entity that is making these changes. The value must be less
than or 128 characters long, and only contain printable characters, as
defined by https://golang.org/pkg/unicode/#IsPrint.
:return: V1beta1CronJob
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['namespace', 'body', 'pretty', 'dry_run', 'field_manager']
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError("Got an unexpected keyword argument '%s'"
' to method create_namespaced_cron_job' % key)
params[key] = val
del params['kwargs']
# verify the required parameter 'namespace' is set
if ('namespace' not in params) or (params['namespace'] is None):
raise ValueError(
'Missing the required parameter `namespace` when calling `create_namespaced_cron_job`'
)
# verify the required parameter 'body' is set
if ('body' not in params) or (params['body'] is None):
raise ValueError(
'Missing the required parameter `body` when calling `create_namespaced_cron_job`'
)
collection_formats = {}
path_params = {}
if 'namespace' in params:
path_params['namespace'] = params['namespace']
query_params = []
if 'pretty' in params:
query_params.append(('pretty', params['pretty']))
if 'dry_run' in params:
query_params.append(('dryRun', params['dry_run']))
if 'field_manager' in params:
query_params.append(('fieldManager', params['field_manager']))
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['*/*'])
# Authentication setting
auth_settings = ['BearerToken']
return self.api_client.call_api(
'/apis/batch/v1beta1/namespaces/{namespace}/cronjobs',
'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1beta1CronJob',
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def delete_collection_namespaced_cron_job(self, namespace, **kwargs):
"""
delete collection of CronJob
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_collection_namespaced_cron_job(namespace,
async_req=True)
>>> result = thread.get()
:param async_req bool
:param str namespace: object name and auth scope, such as for teams and
projects (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str _continue: The continue option should be set when retrieving
more results from the server. Since this value is server defined,
clients may only use the continue value from a previous query result
with identical query parameters (except for the value of continue) and
the server may reject a continue value it does not recognize. If the
specified continue value is no longer valid whether due to expiration
(generally five to fifteen minutes) or a configuration change on the
server, the server will respond with a 410 ResourceExpired error
together with a continue token. If the client needs a consistent list,
it must restart their list without the continue field. Otherwise, the
client may send another list request with the token received with the
410 error, the server will respond with a list starting from the next
key, but from the latest snapshot, which is inconsistent from the
previous list results - objects that are created, modified, or deleted
after the first list request will be included in the response, as long
as their keys are after the \"next key\". This field is not supported
when watch is true. Clients may start a watch from the last
resourceVersion value returned by the server and not miss any
modifications.
:param str field_selector: A selector to restrict the list of returned
objects by their fields. Defaults to everything.
:param str label_selector: A selector to restrict the list of returned
objects by their labels. Defaults to everything.
:param int limit: limit is a maximum number of responses to return for a
list call. If more items exist, the server will set the `continue` field
on the list metadata to a value that can be used with the same initial
query to retrieve the next set of results. Setting a limit may return
fewer than the requested amount of items (up to zero items) in the event
all requested objects are filtered out and clients should only use the
presence of the continue field to determine whether more results are
available. Servers may choose not to support the limit argument and will
return all of the available results. If limit is specified and the
continue field is empty, clients may assume that no more results are
available. This field is not supported if watch is true. The server
guarantees that the objects returned when using continue will be
identical to issuing a single list call without a limit - that is, no
objects created, modified, or deleted after the first request is issued
will be included in any subsequent continued requests. This is sometimes
referred to as a consistent snapshot, and ensures that a client that is
using limit to receive smaller chunks of a very large result can ensure
they see all possible objects. If objects are updated during a chunked
list the version of the object that was present at the time the first
list result was calculated is returned.
:param str resource_version: When specified with a watch call, shows
changes that occur after that particular version of a resource. Defaults
to changes from the beginning of history. When specified for list: - if
unset, then the result is returned from remote storage based on
quorum-read flag; - if it's 0, then we simply return what we currently
have in cache, no guarantee; - if set to non zero, then the result is at
least as fresh as given rv.
:param int timeout_seconds: Timeout for the list/watch call. This limits
the duration of the call, regardless of any activity or inactivity.
:param bool watch: Watch for changes to the described resources and
return them as a stream of add, update, and remove notifications.
Specify resourceVersion.
:return: V1Status
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.delete_collection_namespaced_cron_job_with_http_info(
namespace, **kwargs)
else:
(data) = self.delete_collection_namespaced_cron_job_with_http_info(
namespace, **kwargs)
return data
def delete_collection_namespaced_cron_job_with_http_info(
self, namespace, **kwargs):
"""
delete collection of CronJob
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread =
api.delete_collection_namespaced_cron_job_with_http_info(namespace,
async_req=True)
>>> result = thread.get()
:param async_req bool
:param str namespace: object name and auth scope, such as for teams and
projects (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str _continue: The continue option should be set when retrieving
more results from the server. Since this value is server defined,
clients may only use the continue value from a previous query result
with identical query parameters (except for the value of continue) and
the server may reject a continue value it does not recognize. If the
specified continue value is no longer valid whether due to expiration
(generally five to fifteen minutes) or a configuration change on the
server, the server will respond with a 410 ResourceExpired error
together with a continue token. If the client needs a consistent list,
it must restart their list without the continue field. Otherwise, the
client may send another list request with the token received with the
410 error, the server will respond with a list starting from the next
key, but from the latest snapshot, which is inconsistent from the
previous list results - objects that are created, modified, or deleted
after the first list request will be included in the response, as long
as their keys are after the \"next key\". This field is not supported
when watch is true. Clients may start a watch from the last
resourceVersion value returned by the server and not miss any
modifications.
:param str field_selector: A selector to restrict the list of returned
objects by their fields. Defaults to everything.
:param str label_selector: A selector to restrict the list of returned
objects by their labels. Defaults to everything.
:param int limit: limit is a maximum number of responses to return for a
list call. If more items exist, the server will set the `continue` field
on the list metadata to a value that can be used with the same initial
query to retrieve the next set of results. Setting a limit may return
fewer than the requested amount of items (up to zero items) in the event
all requested objects are filtered out and clients should only use the
presence of the continue field to determine whether more results are
available. Servers may choose not to support the limit argument and will
return all of the available results. If limit is specified and the
continue field is empty, clients may assume that no more results are
available. This field is not supported if watch is true. The server
guarantees that the objects returned when using continue will be
identical to issuing a single list call without a limit - that is, no
objects created, modified, or deleted after the first request is issued
will be included in any subsequent continued requests. This is sometimes
referred to as a consistent snapshot, and ensures that a client that is
using limit to receive smaller chunks of a very large result can ensure
they see all possible objects. If objects are updated during a chunked
list the version of the object that was present at the time the first
list result was calculated is returned.
:param str resource_version: When specified with a watch call, shows
changes that occur after that particular version of a resource. Defaults
to changes from the beginning of history. When specified for list: - if
unset, then the result is returned from remote storage based on
quorum-read flag; - if it's 0, then we simply return what we currently
have in cache, no guarantee; - if set to non zero, then the result is at
least as fresh as given rv.
:param int timeout_seconds: Timeout for the list/watch call. This limits
the duration of the call, regardless of any activity or inactivity.
:param bool watch: Watch for changes to the described resources and
return them as a stream of add, update, and remove notifications.
Specify resourceVersion.
:return: V1Status
If the method is called asynchronously,
returns the request thread.
"""
all_params = [
'namespace', 'pretty', '_continue', 'field_selector', 'label_selector',
'limit', 'resource_version', 'timeout_seconds', 'watch'
]
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError("Got an unexpected keyword argument '%s'"
' to method delete_collection_namespaced_cron_job' %
key)
params[key] = val
del params['kwargs']
# verify the required parameter 'namespace' is set
if ('namespace' not in params) or (params['namespace'] is None):
raise ValueError(
'Missing the required parameter `namespace` when calling `delete_collection_namespaced_cron_job`'
)
collection_formats = {}
path_params = {}
if 'namespace' in params:
path_params['namespace'] = params['namespace']
query_params = []
if 'pretty' in params:
query_params.append(('pretty', params['pretty']))
if '_continue' in params:
query_params.append(('continue', params['_continue']))
if 'field_selector' in params:
query_params.append(('fieldSelector', params['field_selector']))
if 'label_selector' in params:
query_params.append(('labelSelector', params['label_selector']))
if 'limit' in params:
query_params.append(('limit', params['limit']))
if 'resource_version' in params:
query_params.append(('resourceVersion', params['resource_version']))
if 'timeout_seconds' in params:
query_params.append(('timeoutSeconds', params['timeout_seconds']))
if 'watch' in params:
query_params.append(('watch', params['watch']))
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['*/*'])
# Authentication setting
auth_settings = ['BearerToken']
return self.api_client.call_api(
'/apis/batch/v1beta1/namespaces/{namespace}/cronjobs',
'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1Status',
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def delete_namespaced_cron_job(self, name, namespace, **kwargs):
"""
delete a CronJob
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_namespaced_cron_job(name, namespace,
async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the CronJob (required)
:param str namespace: object name and auth scope, such as for teams and
projects (required)
:param str pretty: If 'true', then the output is pretty printed.
:param V1DeleteOptions body:
:param str dry_run: When present, indicates that modifications should
not be persisted. An invalid or unrecognized dryRun directive will
result in an error response and no further processing of the request.
Valid values are: - All: all dry run stages will be processed
:param int grace_period_seconds: The duration in seconds before the
object should be deleted. Value must be non-negative integer. The value
zero indicates delete immediately. If this value is nil, the default
grace period for the specified type will be used. Defaults to a per
object value if not specified. zero means delete immediately.
:param bool orphan_dependents: Deprecated: please use the
PropagationPolicy, this field will be deprecated in 1.7. Should the
dependent objects be orphaned. If true/false, the \"orphan\" finalizer
will be added to/removed from the object's finalizers list. Either this
field or PropagationPolicy may be set, but not both.
:param str propagation_policy: Whether and how garbage collection will
be performed. Either this field or OrphanDependents may be set, but not
both. The default policy is decided by the existing finalizer set in the
metadata.finalizers and the resource-specific default policy. Acceptable
values are: 'Orphan' - orphan the dependents; 'Background' - allow the
garbage collector to delete the dependents in the background;
'Foreground' - a cascading policy that deletes all dependents in the
foreground.
:return: V1Status
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.delete_namespaced_cron_job_with_http_info(
name, namespace, **kwargs)
else:
(data) = self.delete_namespaced_cron_job_with_http_info(
name, namespace, **kwargs)
return data
def delete_namespaced_cron_job_with_http_info(self, name, namespace,
**kwargs):
"""
delete a CronJob
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_namespaced_cron_job_with_http_info(name,
namespace, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the CronJob (required)
:param str namespace: object name and auth scope, such as for teams and
projects (required)
:param str pretty: If 'true', then the output is pretty printed.
:param V1DeleteOptions body:
:param str dry_run: When present, indicates that modifications should
not be persisted. An invalid or unrecognized dryRun directive will
result in an error response and no further processing of the request.
Valid values are: - All: all dry run stages will be processed
:param int grace_period_seconds: The duration in seconds before the
object should be deleted. Value must be non-negative integer. The value
zero indicates delete immediately. If this value is nil, the default
grace period for the specified type will be used. Defaults to a per
object value if not specified. zero means delete immediately.
:param bool orphan_dependents: Deprecated: please use the
PropagationPolicy, this field will be deprecated in 1.7. Should the
dependent objects be orphaned. If true/false, the \"orphan\" finalizer
will be added to/removed from the object's finalizers list. Either this
field or PropagationPolicy may be set, but not both.
:param str propagation_policy: Whether and how garbage collection will
be performed. Either this field or OrphanDependents may be set, but not
both. The default policy is decided by the existing finalizer set in the
metadata.finalizers and the resource-specific default policy. Acceptable
values are: 'Orphan' - orphan the dependents; 'Background' - allow the
garbage collector to delete the dependents in the background;
'Foreground' - a cascading policy that deletes all dependents in the
foreground.
:return: V1Status
If the method is called asynchronously,
returns the request thread.
"""
all_params = [
'name', 'namespace', 'pretty', 'body', 'dry_run',
'grace_period_seconds', 'orphan_dependents', 'propagation_policy'
]
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError("Got an unexpected keyword argument '%s'"
' to method delete_namespaced_cron_job' % key)
params[key] = val
del params['kwargs']
# verify the required parameter 'name' is set
if ('name' not in params) or (params['name'] is None):
raise ValueError(
'Missing the required parameter `name` when calling `delete_namespaced_cron_job`'
)
# verify the required parameter 'namespace' is set
if ('namespace' not in params) or (params['namespace'] is None):
raise ValueError(
'Missing the required parameter `namespace` when calling `delete_namespaced_cron_job`'
)
collection_formats = {}
path_params = {}
if 'name' in params:
path_params['name'] = params['name']
if 'namespace' in params:
path_params['namespace'] = params['namespace']
query_params = []
if 'pretty' in params:
query_params.append(('pretty', params['pretty']))
if 'dry_run' in params:
query_params.append(('dryRun', params['dry_run']))
if 'grace_period_seconds' in params:
query_params.append(
('gracePeriodSeconds', params['grace_period_seconds']))
if 'orphan_dependents' in params:
query_params.append(('orphanDependents', params['orphan_dependents']))
if 'propagation_policy' in params:
query_params.append(('propagationPolicy', params['propagation_policy']))
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['*/*'])
# Authentication setting
auth_settings = ['BearerToken']
return self.api_client.call_api(
'/apis/batch/v1beta1/namespaces/{namespace}/cronjobs/{name}',
'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1Status',
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_api_resources(self, **kwargs):
"""
get available resources
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_api_resources(async_req=True)
>>> result = thread.get()
:param async_req bool
:return: V1APIResourceList
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.get_api_resources_with_http_info(**kwargs)
else:
(data) = self.get_api_resources_with_http_info(**kwargs)
return data
def get_api_resources_with_http_info(self, **kwargs):
"""
get available resources
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_api_resources_with_http_info(async_req=True)
>>> result = thread.get()
:param async_req bool
:return: V1APIResourceList
If the method is called asynchronously,
returns the request thread.
"""
all_params = []
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError("Got an unexpected keyword argument '%s'"
' to method get_api_resources' % key)
params[key] = val
del params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf'])
# Authentication setting
auth_settings = ['BearerToken']
return self.api_client.call_api(
'/apis/batch/v1beta1/',
'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1APIResourceList',
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def list_cron_job_for_all_namespaces(self, **kwargs):
"""
list or watch objects of kind CronJob
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.list_cron_job_for_all_namespaces(async_req=True)
>>> result = thread.get()
:param async_req bool
:param str _continue: The continue option should be set when retrieving
more results from the server. Since this value is server defined,
clients may only use the continue value from a previous query result
with identical query parameters (except for the value of continue) and
the server may reject a continue value it does not recognize. If the
specified continue value is no longer valid whether due to expiration
(generally five to fifteen minutes) or a configuration change on the
server, the server will respond with a 410 ResourceExpired error
together with a continue token. If the client needs a consistent list,
it must restart their list without the continue field. Otherwise, the
client may send another list request with the token received with the
410 error, the server will respond with a list starting from the next
key, but from the latest snapshot, which is inconsistent from the
previous list results - objects that are created, modified, or deleted
after the first list request will be included in the response, as long
as their keys are after the \"next key\". This field is not supported
when watch is true. Clients may start a watch from the last
resourceVersion value returned by the server and not miss any
modifications.
:param str field_selector: A selector to restrict the list of returned
objects by their fields. Defaults to everything.
:param str label_selector: A selector to restrict the list of returned
objects by their labels. Defaults to everything.
:param int limit: limit is a maximum number of responses to return for a
list call. If more items exist, the server will set the `continue` field
on the list metadata to a value that can be used with the same initial
query to retrieve the next set of results. Setting a limit may return
fewer than the requested amount of items (up to zero items) in the event
all requested objects are filtered out and clients should only use the
presence of the continue field to determine whether more results are
available. Servers may choose not to support the limit argument and will
return all of the available results. If limit is specified and the
continue field is empty, clients may assume that no more results are
available. This field is not supported if watch is true. The server
guarantees that the objects returned when using continue will be
identical to issuing a single list call without a limit - that is, no
objects created, modified, or deleted after the first request is issued
will be included in any subsequent continued requests. This is sometimes
referred to as a consistent snapshot, and ensures that a client that is
using limit to receive smaller chunks of a very large result can ensure
they see all possible objects. If objects are updated during a chunked
list the version of the object that was present at the time the first
list result was calculated is returned.
:param str pretty: If 'true', then the output is pretty printed.
:param str resource_version: When specified with a watch call, shows
changes that occur after that particular version of a resource. Defaults
to changes from the beginning of history. When specified for list: - if
unset, then the result is returned from remote storage based on
quorum-read flag; - if it's 0, then we simply return what we currently
have in cache, no guarantee; - if set to non zero, then the result is at
least as fresh as given rv.
:param int timeout_seconds: Timeout for the list/watch call. This limits
the duration of the call, regardless of any activity or inactivity.
:param bool watch: Watch for changes to the described resources and
return them as a stream of add, update, and remove notifications.
Specify resourceVersion.
:return: V1beta1CronJobList
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.list_cron_job_for_all_namespaces_with_http_info(**kwargs)
else:
(data) = self.list_cron_job_for_all_namespaces_with_http_info(**kwargs)
return data
def list_cron_job_for_all_namespaces_with_http_info(self, **kwargs):
"""
list or watch objects of kind CronJob
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread =
api.list_cron_job_for_all_namespaces_with_http_info(async_req=True)
>>> result = thread.get()
:param async_req bool
:param str _continue: The continue option should be set when retrieving
more results from the server. Since this value is server defined,
clients may only use the continue value from a previous query result
with identical query parameters (except for the value of continue) and
the server may reject a continue value it does not recognize. If the
specified continue value is no longer valid whether due to expiration
(generally five to fifteen minutes) or a configuration change on the
server, the server will respond with a 410 ResourceExpired error
together with a continue token. If the client needs a consistent list,
it must restart their list without the continue field. Otherwise, the
client may send another list request with the token received with the
410 error, the server will respond with a list starting from the next
key, but from the latest snapshot, which is inconsistent from the
previous list results - objects that are created, modified, or deleted
after the first list request will be included in the response, as long
as their keys are after the \"next key\". This field is not supported
when watch is true. Clients may start a watch from the last
resourceVersion value returned by the server and not miss any
modifications.
:param str field_selector: A selector to restrict the list of returned
objects by their fields. Defaults to everything.
:param str label_selector: A selector to restrict the list of returned
objects by their labels. Defaults to everything.
:param int limit: limit is a maximum number of responses to return for a
list call. If more items exist, the server will set the `continue` field
on the list metadata to a value that can be used with the same initial
query to retrieve the next set of results. Setting a limit may return
fewer than the requested amount of items (up to zero items) in the event
all requested objects are filtered out and clients should only use the
presence of the continue field to determine whether more results are
available. Servers may choose not to support the limit argument and will
return all of the available results. If limit is specified and the
continue field is empty, clients may assume that no more results are
available. This field is not supported if watch is true. The server
guarantees that the objects returned when using continue will be
identical to issuing a single list call without a limit - that is, no
objects created, modified, or deleted after the first request is issued
will be included in any subsequent continued requests. This is sometimes
referred to as a consistent snapshot, and ensures that a client that is
using limit to receive smaller chunks of a very large result can ensure
they see all possible objects. If objects are updated during a chunked
list the version of the object that was present at the time the first
list result was calculated is returned.
:param str pretty: If 'true', then the output is pretty printed.
:param str resource_version: When specified with a watch call, shows
changes that occur after that particular version of a resource. Defaults
to changes from the beginning of history. When specified for list: - if
unset, then the result is returned from remote storage based on
quorum-read flag; - if it's 0, then we simply return what we currently
have in cache, no guarantee; - if set to non zero, then the result is at
least as fresh as given rv.
:param int timeout_seconds: Timeout for the list/watch call. This limits
the duration of the call, regardless of any activity or inactivity.
:param bool watch: Watch for changes to the described resources and
return them as a stream of add, update, and remove notifications.
Specify resourceVersion.
:return: V1beta1CronJobList
If the method is called asynchronously,
returns the request thread.
"""
all_params = [
'_continue', 'field_selector', 'label_selector', 'limit', 'pretty',
'resource_version', 'timeout_seconds', 'watch'
]
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError("Got an unexpected keyword argument '%s'"
' to method list_cron_job_for_all_namespaces' % key)
params[key] = val
del params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
if '_continue' in params:
query_params.append(('continue', params['_continue']))
if 'field_selector' in params:
query_params.append(('fieldSelector', params['field_selector']))
if 'label_selector' in params:
query_params.append(('labelSelector', params['label_selector']))
if 'limit' in params:
query_params.append(('limit', params['limit']))
if 'pretty' in params:
query_params.append(('pretty', params['pretty']))
if 'resource_version' in params:
query_params.append(('resourceVersion', params['resource_version']))
if 'timeout_seconds' in params:
query_params.append(('timeoutSeconds', params['timeout_seconds']))
if 'watch' in params:
query_params.append(('watch', params['watch']))
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf', 'application/json;stream=watch', 'application/vnd.kubernetes.protobuf;stream=watch'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['*/*'])
# Authentication setting
auth_settings = ['BearerToken']
return self.api_client.call_api(
'/apis/batch/v1beta1/cronjobs',
'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1beta1CronJobList',
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def list_namespaced_cron_job(self, namespace, **kwargs):
"""
list or watch objects of kind CronJob
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.list_namespaced_cron_job(namespace, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str namespace: object name and auth scope, such as for teams and
projects (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str _continue: The continue option should be set when retrieving
more results from the server. Since this value is server defined,
clients may only use the continue value from a previous query result
with identical query parameters (except for the value of continue) and
the server may reject a continue value it does not recognize. If the
specified continue value is no longer valid whether due to expiration
(generally five to fifteen minutes) or a configuration change on the
server, the server will respond with a 410 ResourceExpired error
together with a continue token. If the client needs a consistent list,
it must restart their list without the continue field. Otherwise, the
client may send another list request with the token received with the
410 error, the server will respond with a list starting from the next
key, but from the latest snapshot, which is inconsistent from the
previous list results - objects that are created, modified, or deleted
after the first list request will be included in the response, as long
as their keys are after the \"next key\". This field is not supported
when watch is true. Clients may start a watch from the last
resourceVersion value returned by the server and not miss any
modifications.
:param str field_selector: A selector to restrict the list of returned
objects by their fields. Defaults to everything.
:param str label_selector: A selector to restrict the list of returned
objects by their labels. Defaults to everything.
:param int limit: limit is a maximum number of responses to return for a
list call. If more items exist, the server will set the `continue` field
on the list metadata to a value that can be used with the same initial
query to retrieve the next set of results. Setting a limit may return
fewer than the requested amount of items (up to zero items) in the event
all requested objects are filtered out and clients should only use the
presence of the continue field to determine whether more results are
available. Servers may choose not to support the limit argument and will
return all of the available results. If limit is specified and the
continue field is empty, clients may assume that no more results are
available. This field is not supported if watch is true. The server
guarantees that the objects returned when using continue will be
identical to issuing a single list call without a limit - that is, no
objects created, modified, or deleted after the first request is issued
will be included in any subsequent continued requests. This is sometimes
referred to as a consistent snapshot, and ensures that a client that is
using limit to receive smaller chunks of a very large result can ensure
they see all possible objects. If objects are updated during a chunked
list the version of the object that was present at the time the first
list result was calculated is returned.
:param str resource_version: When specified with a watch call, shows
changes that occur after that particular version of a resource. Defaults
to changes from the beginning of history. When specified for list: - if
unset, then the result is returned from remote storage based on
quorum-read flag; - if it's 0, then we simply return what we currently
have in cache, no guarantee; - if set to non zero, then the result is at
least as fresh as given rv.
:param int timeout_seconds: Timeout for the list/watch call. This limits
the duration of the call, regardless of any activity or inactivity.
:param bool watch: Watch for changes to the described resources and
return them as a stream of add, update, and remove notifications.
Specify resourceVersion.
:return: V1beta1CronJobList
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.list_namespaced_cron_job_with_http_info(namespace, **kwargs)
else:
(data) = self.list_namespaced_cron_job_with_http_info(namespace, **kwargs)
return data
def list_namespaced_cron_job_with_http_info(self, namespace, **kwargs):
"""
list or watch objects of kind CronJob
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.list_namespaced_cron_job_with_http_info(namespace,
async_req=True)
>>> result = thread.get()
:param async_req bool
:param str namespace: object name and auth scope, such as for teams and
projects (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str _continue: The continue option should be set when retrieving
more results from the server. Since this value is server defined,
clients may only use the continue value from a previous query result
with identical query parameters (except for the value of continue) and
the server may reject a continue value it does not recognize. If the
specified continue value is no longer valid whether due to expiration
(generally five to fifteen minutes) or a configuration change on the
server, the server will respond with a 410 ResourceExpired error
together with a continue token. If the client needs a consistent list,
it must restart their list without the continue field. Otherwise, the
client may send another list request with the token received with the
410 error, the server will respond with a list starting from the next
key, but from the latest snapshot, which is inconsistent from the
previous list results - objects that are created, modified, or deleted
after the first list request will be included in the response, as long
as their keys are after the \"next key\". This field is not supported
when watch is true. Clients may start a watch from the last
resourceVersion value returned by the server and not miss any
modifications.
:param str field_selector: A selector to restrict the list of returned
objects by their fields. Defaults to everything.
:param str label_selector: A selector to restrict the list of returned
objects by their labels. Defaults to everything.
:param int limit: limit is a maximum number of responses to return for a
list call. If more items exist, the server will set the `continue` field
on the list metadata to a value that can be used with the same initial
query to retrieve the next set of results. Setting a limit may return
fewer than the requested amount of items (up to zero items) in the event
all requested objects are filtered out and clients should only use the
presence of the continue field to determine whether more results are
available. Servers may choose not to support the limit argument and will
return all of the available results. If limit is specified and the
continue field is empty, clients may assume that no more results are
available. This field is not supported if watch is true. The server
guarantees that the objects returned when using continue will be
identical to issuing a single list call without a limit - that is, no
objects created, modified, or deleted after the first request is issued
will be included in any subsequent continued requests. This is sometimes
referred to as a consistent snapshot, and ensures that a client that is
using limit to receive smaller chunks of a very large result can ensure
they see all possible objects. If objects are updated during a chunked
list the version of the object that was present at the time the first
list result was calculated is returned.
:param str resource_version: When specified with a watch call, shows
changes that occur after that particular version of a resource. Defaults
to changes from the beginning of history. When specified for list: - if
unset, then the result is returned from remote storage based on
quorum-read flag; - if it's 0, then we simply return what we currently
have in cache, no guarantee; - if set to non zero, then the result is at
least as fresh as given rv.
:param int timeout_seconds: Timeout for the list/watch call. This limits
the duration of the call, regardless of any activity or inactivity.
:param bool watch: Watch for changes to the described resources and
return them as a stream of add, update, and remove notifications.
Specify resourceVersion.
:return: V1beta1CronJobList
If the method is called asynchronously,
returns the request thread.
"""
all_params = [
'namespace', 'pretty', '_continue', 'field_selector', 'label_selector',
'limit', 'resource_version', 'timeout_seconds', 'watch'
]
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError("Got an unexpected keyword argument '%s'"
' to method list_namespaced_cron_job' % key)
params[key] = val
del params['kwargs']
# verify the required parameter 'namespace' is set
if ('namespace' not in params) or (params['namespace'] is None):
raise ValueError(
'Missing the required parameter `namespace` when calling `list_namespaced_cron_job`'
)
collection_formats = {}
path_params = {}
if 'namespace' in params:
path_params['namespace'] = params['namespace']
query_params = []
if 'pretty' in params:
query_params.append(('pretty', params['pretty']))
if '_continue' in params:
query_params.append(('continue', params['_continue']))
if 'field_selector' in params:
query_params.append(('fieldSelector', params['field_selector']))
if 'label_selector' in params:
query_params.append(('labelSelector', params['label_selector']))
if 'limit' in params:
query_params.append(('limit', params['limit']))
if 'resource_version' in params:
query_params.append(('resourceVersion', params['resource_version']))
if 'timeout_seconds' in params:
query_params.append(('timeoutSeconds', params['timeout_seconds']))
if 'watch' in params:
query_params.append(('watch', params['watch']))
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf', 'application/json;stream=watch', 'application/vnd.kubernetes.protobuf;stream=watch'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['*/*'])
# Authentication setting
auth_settings = ['BearerToken']
return self.api_client.call_api(
'/apis/batch/v1beta1/namespaces/{namespace}/cronjobs',
'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1beta1CronJobList',
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def patch_namespaced_cron_job(self, name, namespace, body, **kwargs):
"""
partially update the specified CronJob
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.patch_namespaced_cron_job(name, namespace, body,
async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the CronJob (required)
:param str namespace: object name and auth scope, such as for teams and
projects (required)
:param object body: (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should
not be persisted. An invalid or unrecognized dryRun directive will
result in an error response and no further processing of the request.
Valid values are: - All: all dry run stages will be processed
:param str field_manager: fieldManager is a name associated with the
actor or entity that is making these changes. The value must be less
than or 128 characters long, and only contain printable characters, as
defined by https://golang.org/pkg/unicode/#IsPrint. This field is
required for apply requests (application/apply-patch) but optional for
non-apply patch types (JsonPatch, MergePatch, StrategicMergePatch).
:param bool force: Force is going to \"force\" Apply requests. It means
user will re-acquire conflicting fields owned by other people. Force
flag must be unset for non-apply patch requests.
:return: V1beta1CronJob
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.patch_namespaced_cron_job_with_http_info(
name, namespace, body, **kwargs)
else:
(data) = self.patch_namespaced_cron_job_with_http_info(
name, namespace, body, **kwargs)
return data
def patch_namespaced_cron_job_with_http_info(self, name, namespace, body,
**kwargs):
"""
partially update the specified CronJob
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.patch_namespaced_cron_job_with_http_info(name,
namespace, body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the CronJob (required)
:param str namespace: object name and auth scope, such as for teams and
projects (required)
:param object body: (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should
not be persisted. An invalid or unrecognized dryRun directive will
result in an error response and no further processing of the request.
Valid values are: - All: all dry run stages will be processed
:param str field_manager: fieldManager is a name associated with the
actor or entity that is making these changes. The value must be less
than or 128 characters long, and only contain printable characters, as
defined by https://golang.org/pkg/unicode/#IsPrint. This field is
required for apply requests (application/apply-patch) but optional for
non-apply patch types (JsonPatch, MergePatch, StrategicMergePatch).
:param bool force: Force is going to \"force\" Apply requests. It means
user will re-acquire conflicting fields owned by other people. Force
flag must be unset for non-apply patch requests.
:return: V1beta1CronJob
If the method is called asynchronously,
returns the request thread.
"""
all_params = [
'name', 'namespace', 'body', 'pretty', 'dry_run', 'field_manager',
'force'
]
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError("Got an unexpected keyword argument '%s'"
' to method patch_namespaced_cron_job' % key)
params[key] = val
del params['kwargs']
# verify the required parameter 'name' is set
if ('name' not in params) or (params['name'] is None):
raise ValueError(
'Missing the required parameter `name` when calling `patch_namespaced_cron_job`'
)
# verify the required parameter 'namespace' is set
if ('namespace' not in params) or (params['namespace'] is None):
raise ValueError(
'Missing the required parameter `namespace` when calling `patch_namespaced_cron_job`'
)
# verify the required parameter 'body' is set
if ('body' not in params) or (params['body'] is None):
raise ValueError(
'Missing the required parameter `body` when calling `patch_namespaced_cron_job`'
)
collection_formats = {}
path_params = {}
if 'name' in params:
path_params['name'] = params['name']
if 'namespace' in params:
path_params['namespace'] = params['namespace']
query_params = []
if 'pretty' in params:
query_params.append(('pretty', params['pretty']))
if 'dry_run' in params:
query_params.append(('dryRun', params['dry_run']))
if 'field_manager' in params:
query_params.append(('fieldManager', params['field_manager']))
if 'force' in params:
query_params.append(('force', params['force']))
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json-patch+json', 'application/merge-patch+json', 'application/strategic-merge-patch+json'])
# Authentication setting
auth_settings = ['BearerToken']
return self.api_client.call_api(
'/apis/batch/v1beta1/namespaces/{namespace}/cronjobs/{name}',
'PATCH',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1beta1CronJob',
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def patch_namespaced_cron_job_status(self, name, namespace, body, **kwargs):
"""
partially update status of the specified CronJob
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.patch_namespaced_cron_job_status(name, namespace, body,
async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the CronJob (required)
:param str namespace: object name and auth scope, such as for teams and
projects (required)
:param object body: (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should
not be persisted. An invalid or unrecognized dryRun directive will
result in an error response and no further processing of the request.
Valid values are: - All: all dry run stages will be processed
:param str field_manager: fieldManager is a name associated with the
actor or entity that is making these changes. The value must be less
than or 128 characters long, and only contain printable characters, as
defined by https://golang.org/pkg/unicode/#IsPrint. This field is
required for apply requests (application/apply-patch) but optional for
non-apply patch types (JsonPatch, MergePatch, StrategicMergePatch).
:param bool force: Force is going to \"force\" Apply requests. It means
user will re-acquire conflicting fields owned by other people. Force
flag must be unset for non-apply patch requests.
:return: V1beta1CronJob
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.patch_namespaced_cron_job_status_with_http_info(
name, namespace, body, **kwargs)
else:
(data) = self.patch_namespaced_cron_job_status_with_http_info(
name, namespace, body, **kwargs)
return data
def patch_namespaced_cron_job_status_with_http_info(self, name, namespace,
body, **kwargs):
"""
partially update status of the specified CronJob
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.patch_namespaced_cron_job_status_with_http_info(name,
namespace, body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the CronJob (required)
:param str namespace: object name and auth scope, such as for teams and
projects (required)
:param object body: (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should
not be persisted. An invalid or unrecognized dryRun directive will
result in an error response and no further processing of the request.
Valid values are: - All: all dry run stages will be processed
:param str field_manager: fieldManager is a name associated with the
actor or entity that is making these changes. The value must be less
than or 128 characters long, and only contain printable characters, as
defined by https://golang.org/pkg/unicode/#IsPrint. This field is
required for apply requests (application/apply-patch) but optional for
non-apply patch types (JsonPatch, MergePatch, StrategicMergePatch).
:param bool force: Force is going to \"force\" Apply requests. It means
user will re-acquire conflicting fields owned by other people. Force
flag must be unset for non-apply patch requests.
:return: V1beta1CronJob
If the method is called asynchronously,
returns the request thread.
"""
all_params = [
'name', 'namespace', 'body', 'pretty', 'dry_run', 'field_manager',
'force'
]
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError("Got an unexpected keyword argument '%s'"
' to method patch_namespaced_cron_job_status' % key)
params[key] = val
del params['kwargs']
# verify the required parameter 'name' is set
if ('name' not in params) or (params['name'] is None):
raise ValueError(
'Missing the required parameter `name` when calling `patch_namespaced_cron_job_status`'
)
# verify the required parameter 'namespace' is set
if ('namespace' not in params) or (params['namespace'] is None):
raise ValueError(
'Missing the required parameter `namespace` when calling `patch_namespaced_cron_job_status`'
)
# verify the required parameter 'body' is set
if ('body' not in params) or (params['body'] is None):
raise ValueError(
'Missing the required parameter `body` when calling `patch_namespaced_cron_job_status`'
)
collection_formats = {}
path_params = {}
if 'name' in params:
path_params['name'] = params['name']
if 'namespace' in params:
path_params['namespace'] = params['namespace']
query_params = []
if 'pretty' in params:
query_params.append(('pretty', params['pretty']))
if 'dry_run' in params:
query_params.append(('dryRun', params['dry_run']))
if 'field_manager' in params:
query_params.append(('fieldManager', params['field_manager']))
if 'force' in params:
query_params.append(('force', params['force']))
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json-patch+json', 'application/merge-patch+json', 'application/strategic-merge-patch+json'])
# Authentication setting
auth_settings = ['BearerToken']
return self.api_client.call_api(
'/apis/batch/v1beta1/namespaces/{namespace}/cronjobs/{name}/status',
'PATCH',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1beta1CronJob',
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def read_namespaced_cron_job(self, name, namespace, **kwargs):
"""
read the specified CronJob
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.read_namespaced_cron_job(name, namespace,
async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the CronJob (required)
:param str namespace: object name and auth scope, such as for teams and
projects (required)
:param str pretty: If 'true', then the output is pretty printed.
:param bool exact: Should the export be exact. Exact export maintains
cluster-specific fields like 'Namespace'. Deprecated. Planned for
removal in 1.18.
:param bool export: Should this value be exported. Export strips fields
that a user can not specify. Deprecated. Planned for removal in 1.18.
:return: V1beta1CronJob
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.read_namespaced_cron_job_with_http_info(
name, namespace, **kwargs)
else:
(data) = self.read_namespaced_cron_job_with_http_info(
name, namespace, **kwargs)
return data
def read_namespaced_cron_job_with_http_info(self, name, namespace, **kwargs):
"""
read the specified CronJob
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.read_namespaced_cron_job_with_http_info(name,
namespace, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the CronJob (required)
:param str namespace: object name and auth scope, such as for teams and
projects (required)
:param str pretty: If 'true', then the output is pretty printed.
:param bool exact: Should the export be exact. Exact export maintains
cluster-specific fields like 'Namespace'. Deprecated. Planned for
removal in 1.18.
:param bool export: Should this value be exported. Export strips fields
that a user can not specify. Deprecated. Planned for removal in 1.18.
:return: V1beta1CronJob
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['name', 'namespace', 'pretty', 'exact', 'export']
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError("Got an unexpected keyword argument '%s'"
' to method read_namespaced_cron_job' % key)
params[key] = val
del params['kwargs']
# verify the required parameter 'name' is set
if ('name' not in params) or (params['name'] is None):
raise ValueError(
'Missing the required parameter `name` when calling `read_namespaced_cron_job`'
)
# verify the required parameter 'namespace' is set
if ('namespace' not in params) or (params['namespace'] is None):
raise ValueError(
'Missing the required parameter `namespace` when calling `read_namespaced_cron_job`'
)
collection_formats = {}
path_params = {}
if 'name' in params:
path_params['name'] = params['name']
if 'namespace' in params:
path_params['namespace'] = params['namespace']
query_params = []
if 'pretty' in params:
query_params.append(('pretty', params['pretty']))
if 'exact' in params:
query_params.append(('exact', params['exact']))
if 'export' in params:
query_params.append(('export', params['export']))
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['*/*'])
# Authentication setting
auth_settings = ['BearerToken']
return self.api_client.call_api(
'/apis/batch/v1beta1/namespaces/{namespace}/cronjobs/{name}',
'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1beta1CronJob',
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def read_namespaced_cron_job_status(self, name, namespace, **kwargs):
"""
read status of the specified CronJob
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.read_namespaced_cron_job_status(name, namespace,
async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the CronJob (required)
:param str namespace: object name and auth scope, such as for teams and
projects (required)
:param str pretty: If 'true', then the output is pretty printed.
:return: V1beta1CronJob
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.read_namespaced_cron_job_status_with_http_info(
name, namespace, **kwargs)
else:
(data) = self.read_namespaced_cron_job_status_with_http_info(
name, namespace, **kwargs)
return data
def read_namespaced_cron_job_status_with_http_info(self, name, namespace,
**kwargs):
"""
read status of the specified CronJob
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.read_namespaced_cron_job_status_with_http_info(name,
namespace, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the CronJob (required)
:param str namespace: object name and auth scope, such as for teams and
projects (required)
:param str pretty: If 'true', then the output is pretty printed.
:return: V1beta1CronJob
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['name', 'namespace', 'pretty']
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError("Got an unexpected keyword argument '%s'"
' to method read_namespaced_cron_job_status' % key)
params[key] = val
del params['kwargs']
# verify the required parameter 'name' is set
if ('name' not in params) or (params['name'] is None):
raise ValueError(
'Missing the required parameter `name` when calling `read_namespaced_cron_job_status`'
)
# verify the required parameter 'namespace' is set
if ('namespace' not in params) or (params['namespace'] is None):
raise ValueError(
'Missing the required parameter `namespace` when calling `read_namespaced_cron_job_status`'
)
collection_formats = {}
path_params = {}
if 'name' in params:
path_params['name'] = params['name']
if 'namespace' in params:
path_params['namespace'] = params['namespace']
query_params = []
if 'pretty' in params:
query_params.append(('pretty', params['pretty']))
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['*/*'])
# Authentication setting
auth_settings = ['BearerToken']
return self.api_client.call_api(
'/apis/batch/v1beta1/namespaces/{namespace}/cronjobs/{name}/status',
'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1beta1CronJob',
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def replace_namespaced_cron_job(self, name, namespace, body, **kwargs):
"""
replace the specified CronJob
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.replace_namespaced_cron_job(name, namespace, body,
async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the CronJob (required)
:param str namespace: object name and auth scope, such as for teams and
projects (required)
:param V1beta1CronJob body: (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should
not be persisted. An invalid or unrecognized dryRun directive will
result in an error response and no further processing of the request.
Valid values are: - All: all dry run stages will be processed
:param str field_manager: fieldManager is a name associated with the
actor or entity that is making these changes. The value must be less
than or 128 characters long, and only contain printable characters, as
defined by https://golang.org/pkg/unicode/#IsPrint.
:return: V1beta1CronJob
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.replace_namespaced_cron_job_with_http_info(
name, namespace, body, **kwargs)
else:
(data) = self.replace_namespaced_cron_job_with_http_info(
name, namespace, body, **kwargs)
return data
def replace_namespaced_cron_job_with_http_info(self, name, namespace, body,
**kwargs):
"""
replace the specified CronJob
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.replace_namespaced_cron_job_with_http_info(name,
namespace, body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the CronJob (required)
:param str namespace: object name and auth scope, such as for teams and
projects (required)
:param V1beta1CronJob body: (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should
not be persisted. An invalid or unrecognized dryRun directive will
result in an error response and no further processing of the request.
Valid values are: - All: all dry run stages will be processed
:param str field_manager: fieldManager is a name associated with the
actor or entity that is making these changes. The value must be less
than or 128 characters long, and only contain printable characters, as
defined by https://golang.org/pkg/unicode/#IsPrint.
:return: V1beta1CronJob
If the method is called asynchronously,
returns the request thread.
"""
all_params = [
'name', 'namespace', 'body', 'pretty', 'dry_run', 'field_manager'
]
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError("Got an unexpected keyword argument '%s'"
' to method replace_namespaced_cron_job' % key)
params[key] = val
del params['kwargs']
# verify the required parameter 'name' is set
if ('name' not in params) or (params['name'] is None):
raise ValueError(
'Missing the required parameter `name` when calling `replace_namespaced_cron_job`'
)
# verify the required parameter 'namespace' is set
if ('namespace' not in params) or (params['namespace'] is None):
raise ValueError(
'Missing the required parameter `namespace` when calling `replace_namespaced_cron_job`'
)
# verify the required parameter 'body' is set
if ('body' not in params) or (params['body'] is None):
raise ValueError(
'Missing the required parameter `body` when calling `replace_namespaced_cron_job`'
)
collection_formats = {}
path_params = {}
if 'name' in params:
path_params['name'] = params['name']
if 'namespace' in params:
path_params['namespace'] = params['namespace']
query_params = []
if 'pretty' in params:
query_params.append(('pretty', params['pretty']))
if 'dry_run' in params:
query_params.append(('dryRun', params['dry_run']))
if 'field_manager' in params:
query_params.append(('fieldManager', params['field_manager']))
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['*/*'])
# Authentication setting
auth_settings = ['BearerToken']
return self.api_client.call_api(
'/apis/batch/v1beta1/namespaces/{namespace}/cronjobs/{name}',
'PUT',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1beta1CronJob',
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def replace_namespaced_cron_job_status(self, name, namespace, body, **kwargs):
"""
replace status of the specified CronJob
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.replace_namespaced_cron_job_status(name, namespace,
body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the CronJob (required)
:param str namespace: object name and auth scope, such as for teams and
projects (required)
:param V1beta1CronJob body: (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should
not be persisted. An invalid or unrecognized dryRun directive will
result in an error response and no further processing of the request.
Valid values are: - All: all dry run stages will be processed
:param str field_manager: fieldManager is a name associated with the
actor or entity that is making these changes. The value must be less
than or 128 characters long, and only contain printable characters, as
defined by https://golang.org/pkg/unicode/#IsPrint.
:return: V1beta1CronJob
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.replace_namespaced_cron_job_status_with_http_info(
name, namespace, body, **kwargs)
else:
(data) = self.replace_namespaced_cron_job_status_with_http_info(
name, namespace, body, **kwargs)
return data
def replace_namespaced_cron_job_status_with_http_info(self, name, namespace,
body, **kwargs):
"""
replace status of the specified CronJob
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.replace_namespaced_cron_job_status_with_http_info(name,
namespace, body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the CronJob (required)
:param str namespace: object name and auth scope, such as for teams and
projects (required)
:param V1beta1CronJob body: (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should
not be persisted. An invalid or unrecognized dryRun directive will
result in an error response and no further processing of the request.
Valid values are: - All: all dry run stages will be processed
:param str field_manager: fieldManager is a name associated with the
actor or entity that is making these changes. The value must be less
than or 128 characters long, and only contain printable characters, as
defined by https://golang.org/pkg/unicode/#IsPrint.
:return: V1beta1CronJob
If the method is called asynchronously,
returns the request thread.
"""
all_params = [
'name', 'namespace', 'body', 'pretty', 'dry_run', 'field_manager'
]
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError("Got an unexpected keyword argument '%s'"
' to method replace_namespaced_cron_job_status' % key)
params[key] = val
del params['kwargs']
# verify the required parameter 'name' is set
if ('name' not in params) or (params['name'] is None):
raise ValueError(
'Missing the required parameter `name` when calling `replace_namespaced_cron_job_status`'
)
# verify the required parameter 'namespace' is set
if ('namespace' not in params) or (params['namespace'] is None):
raise ValueError(
'Missing the required parameter `namespace` when calling `replace_namespaced_cron_job_status`'
)
# verify the required parameter 'body' is set
if ('body' not in params) or (params['body'] is None):
raise ValueError(
'Missing the required parameter `body` when calling `replace_namespaced_cron_job_status`'
)
collection_formats = {}
path_params = {}
if 'name' in params:
path_params['name'] = params['name']
if 'namespace' in params:
path_params['namespace'] = params['namespace']
query_params = []
if 'pretty' in params:
query_params.append(('pretty', params['pretty']))
if 'dry_run' in params:
query_params.append(('dryRun', params['dry_run']))
if 'field_manager' in params:
query_params.append(('fieldManager', params['field_manager']))
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['*/*'])
# Authentication setting
auth_settings = ['BearerToken']
return self.api_client.call_api(
'/apis/batch/v1beta1/namespaces/{namespace}/cronjobs/{name}/status',
'PUT',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1beta1CronJob',
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
| [
"[email protected]"
] | |
0a78a39d5c03577d008f38ca0df3535425a19bfd | 8d9cc46c596cdcd7bc30fc89f8b2fe0c7ed40c05 | /restdoctor/rest_framework/custom_types.py | 40d53aef69133f4e9c5116991d8cf400e2ff65eb | [] | no_license | yakovistomin/restdoctor | ac9974f6acd36745f60e67425eeb44ee1527fb06 | 1f29dce6ff179b40dbc91a2a57de0ecdea7b6af7 | refs/heads/master | 2023-01-30T12:36:16.506062 | 2020-12-07T20:07:47 | 2020-12-07T20:07:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,176 | py | from __future__ import annotations
import typing as t
from django.db import models
from rest_framework.parsers import BaseParser
from rest_framework.renderers import BaseRenderer
from rest_framework.response import Response
from rest_framework.routers import DynamicRoute, Route
from rest_framework.viewsets import ViewSet
OpenAPISchema = t.Dict[str, 'OpenAPISchema'] # type: ignore
LocalRefs = t.Dict[t.Tuple[str, ...], t.Any]
CodesTuple = t.Tuple[str, str]
ActionCodesMap = t.Dict[str, CodesTuple]
ActionMap = t.Dict[str, str]
Handler = t.Callable[..., Response]
ResourceExtraAction = t.Tuple[str, str, Handler]
RouteOrDynamicRoute = t.Union[Route, DynamicRoute]
RouteOrDynamicRouteList = t.List[RouteOrDynamicRoute]
Parsers = t.Sequence[BaseParser]
OptionalParser = t.Optional[BaseParser]
Renderers = t.Sequence[BaseRenderer]
OptionalRenderer = t.Optional[BaseRenderer]
ResourceMapElement = t.TypeVar('ResourceMapElement')
ResourceMap = t.Dict[str, ResourceMapElement]
ResourceViewsMap = ResourceMap[t.Type[ViewSet]]
ResourceActionsMap = ResourceMap[t.Set[str]]
ResourceHandlersMap = ResourceMap[Handler]
ResourceModelsMap = ResourceMap[t.Optional[models.Model]]
| [
"[email protected]"
] | |
b9dd064c283b696b7938ee5f7e9e8ebd7db7bd8e | e2e08d7c97398a42e6554f913ee27340226994d9 | /pyautoTest-master(ICF-7.5.0)/test_case/scg/scg_Route/test_c140981.py | f1078bce49f9f722a1cb00cd9b8cc419b55d8930 | [] | no_license | lizhuoya1111/Automated_testing_practice | 88e7be512e831d279324ad710946232377fb4c01 | b3a532d33ddeb8d01fff315bcd59b451befdef23 | refs/heads/master | 2022-12-04T08:19:29.806445 | 2020-08-14T03:51:20 | 2020-08-14T03:51:20 | 287,426,498 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,331 | py | import pytest
import time
import sys
from page_obj.scg.scg_def import *
from page_obj.scg.scg_def_obj import *
from page_obj.scg.scg_def_log import *
from page_obj.common.rail import *
from page_obj.scg.scg_dev import *
from os.path import dirname, abspath
sys.path.insert(0, dirname(dirname(abspath(__file__))))
test_id = "140981"
# 点击policy route里的“Add single Gateway”
def test_c140981(browser):
try:
login_web(browser, url=dev1)
into_fun(browser, 策略路由)
# 增加单网关路由
browser.find_element_by_xpath('//*[@id="button_area"]/div/input[2]').click()
time.sleep(1)
gettext = browser.find_element_by_xpath('//*[@id="for_config_tb_title"]/ul/li').text
# print(gettext)
try:
assert "增加新策略路由" in gettext
rail_pass(test_run_id, test_id)
except:
rail_fail(test_run_id, test_id)
assert "增加新策略路由" in gettext
except Exception as err:
# 如果上面的步骤有报错,重新设备,恢复配置
print(err)
rail_fail(test_run_id, test_id)
reload(hostip=dev1)
assert False
if __name__ == '__main__':
pytest.main(["-v", "-s", "test_c"+str(test_id)+".py"]) | [
"[email protected]"
] | |
d23ba18c266995f3574214eb2e42a26c7b06b840 | 4bd555bc662b8182a2e7644976bfdb00ed5e1ebe | /PythonistaAppTemplate/PythonistaKit.framework/pylib/xml/sax/saxutils.py | 9352dc86e6e159560f26826dbe871ce36bdc6318 | [] | no_license | fhelmli/homeNOWG2 | a103df1ef97194dec9501dbda87ec1f7c111fb4a | e794fd87b296544542fd9dc7ac94c981c6312419 | refs/heads/master | 2020-04-04T13:40:20.417769 | 2019-01-30T21:41:04 | 2019-01-30T21:41:04 | 155,970,686 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 16,059 | py | #import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
#import pythonista
"""\
A library of useful helper classes to the SAX classes, for the
convenience of application and driver writers.
"""
import os, urlparse, urllib, types
import io
import sys
import handler
import xmlreader
try:
_StringTypes = [types.StringType, types.UnicodeType]
except AttributeError:
_StringTypes = [types.StringType]
def __dict_replace(s, d):
"""Replace substrings of a string using a dictionary."""
for key, value in d.items():
s = s.replace(key, value)
return s
def escape(data, entities={}):
"""Escape &, <, and > in a string of data.
You can escape other strings of data by passing a dictionary as
the optional entities parameter. The keys and values must all be
strings; each key will be replaced with its corresponding value.
"""
# must do ampersand first
data = data.replace("&", "&")
data = data.replace(">", ">")
data = data.replace("<", "<")
if entities:
data = __dict_replace(data, entities)
return data
def unescape(data, entities={}):
"""Unescape &, <, and > in a string of data.
You can unescape other strings of data by passing a dictionary as
the optional entities parameter. The keys and values must all be
strings; each key will be replaced with its corresponding value.
"""
data = data.replace("<", "<")
data = data.replace(">", ">")
if entities:
data = __dict_replace(data, entities)
# must do ampersand last
return data.replace("&", "&")
def quoteattr(data, entities={}):
"""Escape and quote an attribute value.
Escape &, <, and > in a string of data, then quote it for use as
an attribute value. The \" character will be escaped as well, if
necessary.
You can escape other strings of data by passing a dictionary as
the optional entities parameter. The keys and values must all be
strings; each key will be replaced with its corresponding value.
"""
entities = entities.copy()
entities.update({'\n': ' ', '\r': ' ', '\t':'	'})
data = escape(data, entities)
if '"' in data:
if "'" in data:
data = '"%s"' % data.replace('"', """)
else:
data = "'%s'" % data
else:
data = '"%s"' % data
return data
def _gettextwriter(out, encoding):
if out is None:
import sys
out = sys.stdout
if isinstance(out, io.RawIOBase):
buffer = io.BufferedIOBase(out)
# Keep the original file open when the TextIOWrapper is
# destroyed
buffer.close = lambda: None
else:
# This is to handle passed objects that aren't in the
# IOBase hierarchy, but just have a write method
buffer = io.BufferedIOBase()
buffer.writable = lambda: True
buffer.write = out.write
try:
# TextIOWrapper uses this methods to determine
# if BOM (for UTF-16, etc) should be added
buffer.seekable = out.seekable
buffer.tell = out.tell
except AttributeError:
pass
# wrap a binary writer with TextIOWrapper
class UnbufferedTextIOWrapper(io.TextIOWrapper):
def write(self, s):
super(UnbufferedTextIOWrapper, self).write(s)
self.flush()
return UnbufferedTextIOWrapper(buffer, encoding=encoding,
errors='xmlcharrefreplace',
newline='\n')
class XMLGenerator(handler.ContentHandler):
def __init__(self, out=None, encoding="iso-8859-1"):
handler.ContentHandler.__init__(self)
out = _gettextwriter(out, encoding)
self._write = out.write
self._flush = out.flush
self._ns_contexts = [{}] # contains uri -> prefix dicts
self._current_context = self._ns_contexts[-1]
self._undeclared_ns_maps = []
self._encoding = encoding
def _qname(self, name):
"""Builds a qualified name from a (ns_url, localname) pair"""
if name[0]:
# Per http://www.w3.org/XML/1998/namespace, The 'xml' prefix is
# bound by definition to http://www.w3.org/XML/1998/namespace. It
# does not need to be declared and will not usually be found in
# self._current_context.
if 'http://www.w3.org/XML/1998/namespace' == name[0]:
return 'xml:' + name[1]
# The name is in a non-empty namespace
prefix = self._current_context[name[0]]
if prefix:
# If it is not the default namespace, prepend the prefix
return prefix + ":" + name[1]
# Return the unqualified name
return name[1]
# ContentHandler methods
def startDocument(self):
self._write(u'<?xml version="1.0" encoding="%s"?>\n' %
self._encoding)
def endDocument(self):
self._flush()
def startPrefixMapping(self, prefix, uri):
self._ns_contexts.append(self._current_context.copy())
self._current_context[uri] = prefix
self._undeclared_ns_maps.append((prefix, uri))
def endPrefixMapping(self, prefix):
self._current_context = self._ns_contexts[-1]
del self._ns_contexts[-1]
def startElement(self, name, attrs):
self._write(u'<' + name)
for (name, value) in attrs.items():
self._write(u' %s=%s' % (name, quoteattr(value)))
self._write(u'>')
def endElement(self, name):
self._write(u'</%s>' % name)
def startElementNS(self, name, qname, attrs):
self._write(u'<' + self._qname(name))
for prefix, uri in self._undeclared_ns_maps:
if prefix:
self._write(u' xmlns:%s="%s"' % (prefix, uri))
else:
self._write(u' xmlns="%s"' % uri)
self._undeclared_ns_maps = []
for (name, value) in attrs.items():
self._write(u' %s=%s' % (self._qname(name), quoteattr(value)))
self._write(u'>')
def endElementNS(self, name, qname):
self._write(u'</%s>' % self._qname(name))
def characters(self, content):
self._write(escape(unicode(content)))
def ignorableWhitespace(self, content):
self._write(unicode(content))
def processingInstruction(self, target, data):
self._write(u'<?%s %s?>' % (target, data))
class XMLFilterBase(xmlreader.XMLReader):
"""This class is designed to sit between an XMLReader and the
client application's event handlers. By default, it does nothing
but pass requests up to the reader and events on to the handlers
unmodified, but subclasses can override specific methods to modify
the event stream or the configuration requests as they pass
through."""
def __init__(self, parent = None):
xmlreader.XMLReader.__init__(self)
self._parent = parent
# ErrorHandler methods
def error(self, exception):
self._err_handler.error(exception)
def fatalError(self, exception):
self._err_handler.fatalError(exception)
def warning(self, exception):
self._err_handler.warning(exception)
# ContentHandler methods
def setDocumentLocator(self, locator):
self._cont_handler.setDocumentLocator(locator)
def startDocument(self):
self._cont_handler.startDocument()
def endDocument(self):
self._cont_handler.endDocument()
def startPrefixMapping(self, prefix, uri):
self._cont_handler.startPrefixMapping(prefix, uri)
def endPrefixMapping(self, prefix):
self._cont_handler.endPrefixMapping(prefix)
def startElement(self, name, attrs):
self._cont_handler.startElement(name, attrs)
def endElement(self, name):
self._cont_handler.endElement(name)
def startElementNS(self, name, qname, attrs):
self._cont_handler.startElementNS(name, qname, attrs)
def endElementNS(self, name, qname):
self._cont_handler.endElementNS(name, qname)
def characters(self, content):
self._cont_handler.characters(content)
def ignorableWhitespace(self, chars):
self._cont_handler.ignorableWhitespace(chars)
def processingInstruction(self, target, data):
self._cont_handler.processingInstruction(target, data)
def skippedEntity(self, name):
self._cont_handler.skippedEntity(name)
# DTDHandler methods
def notationDecl(self, name, publicId, systemId):
self._dtd_handler.notationDecl(name, publicId, systemId)
def unparsedEntityDecl(self, name, publicId, systemId, ndata):
self._dtd_handler.unparsedEntityDecl(name, publicId, systemId, ndata)
# EntityResolver methods
def resolveEntity(self, publicId, systemId):
return self._ent_handler.resolveEntity(publicId, systemId)
# XMLReader methods
def parse(self, source):
self._parent.setContentHandler(self)
self._parent.setErrorHandler(self)
self._parent.setEntityResolver(self)
self._parent.setDTDHandler(self)
self._parent.parse(source)
def setLocale(self, locale):
self._parent.setLocale(locale)
def getFeature(self, name):
return self._parent.getFeature(name)
def setFeature(self, name, state):
self._parent.setFeature(name, state)
def getProperty(self, name):
return self._parent.getProperty(name)
def setProperty(self, name, value):
self._parent.setProperty(name, value)
# XMLFilter methods
def getParent(self):
return self._parent
def setParent(self, parent):
self._parent = parent
# --- Utility functions
def prepare_input_source(source, base = ""):
"""This function takes an InputSource and an optional base URL and
returns a fully resolved InputSource object ready for reading."""
if type(source) in _StringTypes:
source = xmlreader.InputSource(source)
elif hasattr(source, "read"):
f = source
source = xmlreader.InputSource()
source.setByteStream(f)
if hasattr(f, "name"):
source.setSystemId(f.name)
if source.getByteStream() is None:
try:
sysid = source.getSystemId()
basehead = os.path.dirname(os.path.normpath(base))
encoding = sys.getfilesystemencoding()
if isinstance(sysid, unicode):
if not isinstance(basehead, unicode):
try:
basehead = basehead.decode(encoding)
except UnicodeDecodeError:
sysid = sysid.encode(encoding)
else:
if isinstance(basehead, unicode):
try:
sysid = sysid.decode(encoding)
except UnicodeDecodeError:
basehead = basehead.encode(encoding)
sysidfilename = os.path.join(basehead, sysid)
isfile = os.path.isfile(sysidfilename)
except UnicodeError:
isfile = False
if isfile:
source.setSystemId(sysidfilename)
f = open(sysidfilename, "rb")
else:
source.setSystemId(urlparse.urljoin(base, source.getSystemId()))
f = urllib.urlopen(source.getSystemId())
source.setByteStream(f)
return source
| [
"[email protected]"
] | |
79f5b7f0154dc1d9f06027f04a34a9568c525ba0 | 0e083f405af00029c9ec31849f0f7f81c56844b5 | /mmdeploy/backend/openvino/wrapper.py | ab91f8331b3763712bf98412003dbc3566133b1b | [
"Apache-2.0"
] | permissive | open-mmlab/mmdeploy | 39b9e7b611caab2c76a6142fcb99f0bf1d92ad24 | 5479c8774f5b88d7ed9d399d4e305cb42cc2e73a | refs/heads/main | 2023-09-01T21:29:25.315371 | 2023-08-31T09:59:29 | 2023-08-31T09:59:29 | 441,467,833 | 2,164 | 605 | Apache-2.0 | 2023-09-14T10:39:04 | 2021-12-24T13:04:44 | Python | UTF-8 | Python | false | false | 5,353 | py | # Copyright (c) OpenMMLab. All rights reserved.
import os.path as osp
from typing import Dict, Optional, Sequence
import numpy as np
import torch
from mmdeploy.utils import Backend
from mmdeploy.utils.timer import TimeCounter
from ..base import BACKEND_WRAPPER, BaseWrapper
@BACKEND_WRAPPER.register_module(Backend.OPENVINO.value)
class OpenVINOWrapper(BaseWrapper):
"""OpenVINO wrapper for inference in CPU.
Args:
ir_model_file (str): Input OpenVINO IR model file.
output_names (Sequence[str] | None): Names of model outputs in order.
Defaults to `None` and the wrapper will load the output names from
model.
Examples:
>>> from mmdeploy.backend.openvino import OpenVINOWrapper
>>> import torch
>>>
>>> ir_model_file = 'model.xml'
>>> model = OpenVINOWrapper(ir_model_file)
>>> inputs = dict(input=torch.randn(1, 3, 224, 224, device='cpu'))
>>> outputs = model(inputs)
>>> print(outputs)
"""
def __init__(self,
ir_model_file: str,
output_names: Optional[Sequence[str]] = None,
**kwargs):
from openvino.inference_engine import IECore
self.ie = IECore()
bin_path = osp.splitext(ir_model_file)[0] + '.bin'
self.net = self.ie.read_network(ir_model_file, bin_path)
for input in self.net.input_info.values():
batch_size = input.input_data.shape[0]
dims = len(input.input_data.shape)
# if input is a image, it has (B,C,H,W) channels,
# need batch_size==1
assert not dims == 4 or batch_size == 1, \
'Only batch 1 is supported.'
self.device = 'cpu'
self.sess = self.ie.load_network(
network=self.net, device_name=self.device.upper(), num_requests=1)
# TODO: Check if output_names can be read
if output_names is None:
output_names = [name for name in self.net.outputs]
super().__init__(output_names)
def __update_device(
self, inputs: Dict[str, torch.Tensor]) -> Dict[str, torch.Tensor]:
"""Updates the device type to 'self.device' (cpu) for the input
tensors.
Args:
inputs (Dict[str, torch.Tensor]): The input name and tensor pairs.
Returns:
Dict[str, torch.Tensor]: The output name and tensor pairs
with updated device type.
"""
updated_inputs = {
name: data.to(torch.device(self.device)).contiguous()
for name, data in inputs.items()
}
return updated_inputs
def __reshape(self, inputs: Dict[str, torch.Tensor]):
"""Reshape the model for the shape of the input data.
Args:
inputs (Dict[str, torch.Tensor]): The input name and tensor pairs.
"""
input_shapes = {name: data.shape for name, data in inputs.items()}
reshape_needed = False
for input_name, input_shape in input_shapes.items():
blob_shape = self.net.input_info[input_name].input_data.shape
if not np.array_equal(input_shape, blob_shape):
reshape_needed = True
break
if reshape_needed:
self.net.reshape(input_shapes)
self.sess = self.ie.load_network(
network=self.net,
device_name=self.device.upper(),
num_requests=1)
def __process_outputs(
self, outputs: Dict[str, torch.Tensor]) -> Dict[str, torch.Tensor]:
"""Converts tensors from 'torch' to 'numpy' and fixes the names of the
outputs.
Args:
outputs Dict[str, torch.Tensor]: The output name and tensor pairs.
Returns:
Dict[str, torch.Tensor]: The output name and tensor pairs
after processing.
"""
outputs = {
name: torch.from_numpy(tensor)
for name, tensor in outputs.items()
}
cleaned_outputs = {}
for name, value in outputs.items():
if '.' in name:
new_output_name = name.split('.')[0]
cleaned_outputs[new_output_name] = value
else:
cleaned_outputs[name] = value
return cleaned_outputs
def forward(self, inputs: Dict[str,
torch.Tensor]) -> Dict[str, torch.Tensor]:
"""Run forward inference.
Args:
inputs (Dict[str, torch.Tensor]): The input name and tensor pairs.
Returns:
Dict[str, torch.Tensor]: The output name and tensor pairs.
"""
inputs = self.__update_device(inputs)
self.__reshape(inputs)
outputs = self.__openvino_execute(inputs)
outputs = self.__process_outputs(outputs)
return outputs
@TimeCounter.count_time(Backend.OPENVINO.value)
def __openvino_execute(
self, inputs: Dict[str, torch.Tensor]) -> Dict[str, torch.Tensor]:
"""Run inference with OpenVINO IE.
Args:
inputs (Dict[str, torch.Tensor]): The input name and tensor pairs.
Returns:
Dict[str, numpy.ndarray]: The output name and tensor pairs.
"""
outputs = self.sess.infer(inputs)
return outputs
| [
"[email protected]"
] | |
de6f542882672b658532eb178c29616dbd103d99 | 658ab464e9c796f819ad85f569ad06ab6e66992e | /src/commonlib/pi_work.py | 50c01111fc13ad02ca1933edff005ecb983ade37 | [] | no_license | huowolf/python-demo | 03e5731ba632caada819dd70d0f9dc07c98308a1 | e3b80dcc0e0bc2437a0b2882e17563c8171460a2 | refs/heads/master | 2020-03-23T22:00:57.515258 | 2018-09-07T15:33:22 | 2018-09-07T15:33:22 | 142,147,622 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 754 | py | # 计算圆周率可以根据公式:
# 利用Python提供的itertools模块,我们来计算这个序列的前N项和:
import itertools
def pi(N):
' 计算pi的值 '
# step 1: 创建一个奇数序列: 1, 3, 5, 7, 9, ...
odds = itertools.count(1, 2)
# step 2: 取该序列的前N项: 1, 3, 5, 7, 9, ..., 2*N-1.
oddN=itertools.islice(odds,N)
# step 3: 添加正负符号并用4除: 4/1, -4/3, 4/5, -4/7, 4/9, ...
sum=0
for i,e in enumerate(oddN):
sum+=(-1)**i*4/e
# step 4: 求和:
return sum
# 测试:
print(pi(10))
print(pi(100))
print(pi(1000))
print(pi(10000))
assert 3.04 < pi(10) < 3.05
assert 3.13 < pi(100) < 3.14
assert 3.140 < pi(1000) < 3.141
assert 3.1414 < pi(10000) < 3.1415
print('ok') | [
"[email protected]"
] | |
98e594fa4430a75bb827ee7bfbc0b330e5f0e8a0 | 75f0580af1734b9edb9e06bfadfe48f45b057872 | /studyscores.py | d702f4db0b792522c145ceac388e750fd9d3fc5f | [] | no_license | penteract/adventofcode | 5bb317f8093f60c1d776d0983016a5288d059603 | 7b7344708ef1d58caa339a32a13f3390556b664c | refs/heads/master | 2023-01-29T16:08:13.541190 | 2023-01-16T20:21:02 | 2023-01-16T20:21:02 | 160,901,373 | 5 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,034 | py | #! /usr/bin/env python3
import json
dat = open("419070.json").read()
j = json.loads(dat)
byname = {j["members"][i]["name"]:j["members"][i] for i in j["members"]}
bypuzz = {}
G="get_star_ts"
HOUR=60*60
DAY = HOUR*24
for n in byname:
for l in byname[n]["completion_day_level"]:
if l not in bypuzz: bypuzz[l]={}
k = byname[n]["completion_day_level"][l]
if all(c in k for c in "12"): bypuzz[l][n] = [int(k["1"][G]),int(k["2"][G])]
elif all(c in k for c in "1"): bypuzz[l][n] = [int(k["1"][G]),int(k["1"][G])+1000000]
LEN = len("joefarebrother") + len(str(DAY)) + 1
print(" ".join((b+"").rjust(LEN) for b in ["part1","part2","delta"]))
for day,dat in sorted(bypuzz.items(),key=lambda x:int(x[0])):
l1=[]
l2=[]
ld=[]
for name,(t1,t2) in dat.items():
l1.append(((t1-5*HOUR)%DAY,name))
l2.append(((t2-5*HOUR)%DAY,name))
ld.append((t2-t1,name))
l1.sort()
l2.sort()
ld.sort()
print(day)
for tri in zip(l1,l2,ld):
print(" ".join((str(b)+":"+str(a)).rjust(LEN) for a,b in tri))
print()
| [
"[email protected]"
] | |
f67ee34a5888807e43485f6883b0f5d664156cb6 | e67d4123c10d464c91e70210d58bd4900164645b | /83/D. Basketball Team/basketball_team.py | e6ea0d4199272efb2c911f4808f5545e4d86e15f | [] | no_license | pkaleta/Codeforces | 422188d4483fbf8dd99d6b0654c8e464fb143560 | fb011f616f8db366c6aba80ff2be01692611ef81 | refs/heads/master | 2021-01-19T06:42:30.162981 | 2011-11-26T01:29:30 | 2011-11-26T01:29:30 | 2,853,430 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 464 | py | import sys
n, m, h = map(int, sys.stdin.readline().split())
s = map(int, sys.stdin.readline().split())
ss = sum(s)
def calc(n1, n2, k):
ret = 1.0
i = n1-k+1
j = n2-k+1
while i <= n1 or j <= n2:
if i > n1: ii = 1
else: ii = i
if j > n2: jj = 1
else: jj = j
ret *= float(ii)/float(jj)
i += 1
j += 1
return ret
if (ss < n):
print "-1.0"
else:
print 1.0-calc(ss-s[h-1], ss-1, n-1)
| [
"[email protected]"
] | |
fbe7a63214573776495856cc9e932b74a59a55bb | 86cc998fd200a89e7caf5a4acfe81b81a2d5827c | /lib/cron/genDNS | c1f0374fccff9c10a5320cdb0894b994af182ee0 | [
"Apache-2.0"
] | permissive | arguello/contractor | 6fe28b3356548c097f28ffe54555963962351405 | dd78f5b770ee7b5c41cddfc0a61869908b96e385 | refs/heads/master | 2022-05-26T22:04:53.239954 | 2020-05-03T01:55:25 | 2020-05-03T01:55:25 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,265 | #!/usr/bin/env python3
import os
os.environ.setdefault( 'DJANGO_SETTINGS_MODULE', 'contractor.settings' )
import django
django.setup()
import sys
import json
import hashlib
import subprocess
from datetime import datetime
from contractor.Directory.models import Zone
from contractor.Directory.lib import genZone, genPtrZones, genMasterFile
CACHE_FILE = '/var/lib/contractor/dns.cache'
ZONE_DIR = '/etc/bind/contractor/zones/'
MASTER_FILE = '/etc/bind/contractor/dns.master'
def serial():
return str( int( datetime.now().timestamp() / 60 ) )
# serial number is a unsigned 32bit number, that is monatomically increasing
# we are taking the curent timestamp and / 60, this will give us one minute
# resolution, and will last long past y2038, if is still in use past that,
# I will impressed (20 years)
def updateFile( filename, txt, cache ):
hash = hashlib.sha256( txt.encode() ).hexdigest()
if cache.get( filename, '' ) != hash:
print( 'Writing "{0}"...'.format( filename ) )
open( os.path.join( ZONE_DIR, filename ), 'w' ).write( txt.replace( '**ZONE_SERIAL**', serial() ) )
cache[ filename ] = hash
print( 'Reading cache...' )
try:
cache = json.loads( open( CACHE_FILE, 'r' ).read() )
except FileNotFoundError:
cache = {}
except json.JSONDecodeError as e:
raise ValueError( 'Error parsing cache file: {0}'.format( e ) )
ptr_list = []
zone_file_list = []
for zone in Zone.objects.all():
print( 'Doing "{0}"...'.format( zone.fqdn ) )
filename, txt = genZone( zone, ptr_list, zone_file_list )
updateFile( filename, txt, cache )
print( 'Doing PTR zones...' )
for filename, txt in genPtrZones( ptr_list, zone_file_list ):
updateFile( filename, txt, cache )
print( 'Writing master config...' )
open( MASTER_FILE, 'w' ).write( genMasterFile( ZONE_DIR, zone_file_list ) )
print( 'Writing cache...' )
open( CACHE_FILE, 'w' ).write( json.dumps( cache ) )
print( 'Checking...' )
try:
subprocess.check_call( [ '/usr/sbin/named-checkconf', '-z' ] )
except subprocess.CalledProcessError:
print( 'Validity check failed...' )
sys.exit( 1 )
try:
subprocess.check_call( [ '/usr/sbin/rndc', 'reload' ] )
except subprocess.CalledProcessError:
print( 'WARNING: "rndc reload" failed' )
print( 'Done!' )
sys.exit( 0 )
| [
"[email protected]"
] | ||
91f837f9b380a07ff980b9f1a00bbf9755ecaafa | 0d0cf0165ca108e8d94056c2bae5ad07fe9f9377 | /28_Winning_a_Kaggle_Competition_in_Python/4_Modeling/gridSearch.py | 64f6581e3eec2ec544d66c65ea3a97365e39e676 | [] | no_license | MACHEIKH/Datacamp_Machine_Learning_For_Everyone | 550ec4038ebdb69993e16fe22d5136f00101b692 | 9fe8947f490da221430e6dccce6e2165a42470f3 | refs/heads/main | 2023-01-22T06:26:15.996504 | 2020-11-24T11:21:53 | 2020-11-24T11:21:53 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,242 | py | # Grid search
# Recall that we've created a baseline Gradient Boosting model in the previous lesson. Your goal now is to find the best max_depth hyperparameter value for this Gradient Boosting model. This hyperparameter limits the number of nodes in each individual tree. You will be using K-fold cross-validation to measure the local performance of the model for each hyperparameter value.
# You're given a function get_cv_score(), which takes the train dataset and dictionary of the model parameters as arguments and returns the overall validation RMSE score over 3-fold cross-validation.
# Instructions
# 100 XP
# Specify the grid for possible max_depth values with 3, 6, 9, 12 and 15.
# Pass each hyperparameter candidate in the grid to the model params dictionary.
# Possible max depth values
max_depth_grid = [3,6,9,12,15]
results = {}
# For each value in the grid
for max_depth_candidate in max_depth_grid:
# Specify parameters for the model
params = {'max_depth': max_depth_candidate}
# Calculate validation score for a particular hyperparameter
validation_score = get_cv_score(train, params)
# Save the results for each max depth value
results[max_depth_candidate] = validation_score
print(results)
| [
"[email protected]"
] | |
eb8919a580c7e7e998422669d4fa651907a4c043 | 7950c4faf15ec1dc217391d839ddc21efd174ede | /explore/2021/april/Palindrome_Linked_List.py | 3a0dbc3dad627a0957f59068800b0605ba19ddd9 | [] | no_license | lixiang2017/leetcode | f462ecd269c7157aa4f5854f8c1da97ca5375e39 | f93380721b8383817fe2b0d728deca1321c9ef45 | refs/heads/master | 2023-08-25T02:56:58.918792 | 2023-08-22T16:43:36 | 2023-08-22T16:43:36 | 153,090,613 | 5 | 0 | null | null | null | null | UTF-8 | Python | false | false | 617 | py | '''
Time: O(N)
Space: O(N)
You are here!
Your runtime beats 21.99 % of python submissions.
You are here!
Your memory usage beats 13.77 % of python submissions.
'''
# Definition for singly-linked list.
# class ListNode(object):
# def __init__(self, val=0, next=None):
# self.val = val
# self.next = next
class Solution(object):
def isPalindrome(self, head):
"""
:type head: ListNode
:rtype: bool
"""
cur = head
values = []
while cur:
values.append(cur.val)
cur = cur.next
return values == values[:: -1]
| [
"[email protected]"
] | |
30d181fcf1726a600a41f7afa9ca31f81a00e976 | a66460a46611483dfbdc94c7996893f427e60d97 | /ansible/my_env/lib/python2.7/site-packages/ansible/modules/cloud/smartos/vmadm.py | 11598ef8ee5eeeceb289007193a21555a5e67330 | [
"MIT"
] | permissive | otus-devops-2019-02/yyashkin_infra | 06b57807dde26f94f501828c07503d6bf1d70816 | 0cd0c003884155ac922e3e301305ac202de7028c | refs/heads/master | 2020-04-29T02:42:22.056724 | 2019-05-15T16:24:35 | 2019-05-15T16:24:35 | 175,780,718 | 0 | 0 | MIT | 2019-05-15T16:24:36 | 2019-03-15T08:37:35 | HCL | UTF-8 | Python | false | false | 24,315 | py | #!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2017, Jasper Lievisse Adriaanse <[email protected]>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: vmadm
short_description: Manage SmartOS virtual machines and zones.
description:
- Manage SmartOS virtual machines through vmadm(1M).
version_added: "2.3"
author: Jasper Lievisse Adriaanse (@jasperla)
options:
archive_on_delete:
required: false
description:
- When enabled, the zone dataset will be mounted on C(/zones/archive)
upon removal.
autoboot:
required: false
description:
- Whether or not a VM is booted when the system is rebooted.
brand:
required: true
choices: [ joyent, joyent-minimal, kvm, lx ]
default: joyent
description:
- Type of virtual machine.
boot:
required: false
description:
- Set the boot order for KVM VMs.
cpu_cap:
required: false
description:
- Sets a limit on the amount of CPU time that can be used by a VM.
Use C(0) for no cap.
cpu_shares:
required: false
description:
- Sets a limit on the number of fair share scheduler (FSS) CPU shares for
a VM. This limit is relative to all other VMs on the system.
cpu_type:
required: false
choices: [ qemu64, host ]
default: qemu64
description:
- Control the type of virtual CPU exposed to KVM VMs.
customer_metadata:
required: false
description:
- Metadata to be set and associated with this VM, this contain customer
modifiable keys.
delegate_dataset:
required: false
description:
- Whether to delegate a ZFS dataset to an OS VM.
disk_driver:
required: false
description:
- Default value for a virtual disk model for KVM guests.
disks:
required: false
description:
- A list of disks to add, valid properties are documented in vmadm(1M).
dns_domain:
required: false
description:
- Domain value for C(/etc/hosts).
docker:
required: false
description:
- Docker images need this flag enabled along with the I(brand) set to C(lx).
version_added: "2.5"
filesystems:
required: false
description:
- Mount additional filesystems into an OS VM.
firewall_enabled:
required: false
description:
- Enables the firewall, allowing fwadm(1M) rules to be applied.
force:
required: false
description:
- Force a particular action (i.e. stop or delete a VM).
fs_allowed:
required: false
description:
- Comma separated list of filesystem types this zone is allowed to mount.
hostname:
required: false
description:
- Zone/VM hostname.
image_uuid:
required: false
description:
- Image UUID.
indestructible_delegated:
required: false
description:
- Adds an C(@indestructible) snapshot to delegated datasets.
indestructible_zoneroot:
required: false
description:
- Adds an C(@indestructible) snapshot to zoneroot.
internal_metadata:
required: false
description:
- Metadata to be set and associated with this VM, this contains operator
generated keys.
internal_metadata_namespace:
required: false
description:
- List of namespaces to be set as I(internal_metadata-only); these namespaces
will come from I(internal_metadata) rather than I(customer_metadata).
kernel_version:
required: false
description:
- Kernel version to emulate for LX VMs.
limit_priv:
required: false
description:
- Set (comma separated) list of privileges the zone is allowed to use.
maintain_resolvers:
required: false
description:
- Resolvers in C(/etc/resolv.conf) will be updated when updating
the I(resolvers) property.
max_locked_memory:
required: false
description:
- Total amount of memory (in MiBs) on the host that can be locked by this VM.
max_lwps:
required: false
description:
- Maximum number of lightweight processes this VM is allowed to have running.
max_physical_memory:
required: false
description:
- Maximum amount of memory (in MiBs) on the host that the VM is allowed to use.
max_swap:
required: false
description:
- Maximum amount of virtual memory (in MiBs) the VM is allowed to use.
mdata_exec_timeout:
required: false
description:
- Timeout in seconds (or 0 to disable) for the C(svc:/smartdc/mdata:execute) service
that runs user-scripts in the zone.
name:
required: false
aliases: [ alias ]
description:
- Name of the VM. vmadm(1M) uses this as an optional name.
nic_driver:
required: false
description:
- Default value for a virtual NIC model for KVM guests.
nics:
required: false
description:
- A list of nics to add, valid properties are documented in vmadm(1M).
nowait:
required: false
description:
- Consider the provisioning complete when the VM first starts, rather than
when the VM has rebooted.
qemu_opts:
required: false
description:
- Additional qemu arguments for KVM guests. This overwrites the default arguments
provided by vmadm(1M) and should only be used for debugging.
qemu_extra_opts:
required: false
description:
- Additional qemu cmdline arguments for KVM guests.
quota:
required: false
description:
- Quota on zone filesystems (in MiBs).
ram:
required: false
description:
- Amount of virtual RAM for a KVM guest (in MiBs).
resolvers:
required: false
description:
- List of resolvers to be put into C(/etc/resolv.conf).
routes:
required: false
description:
- Dictionary that maps destinations to gateways, these will be set as static
routes in the VM.
spice_opts:
required: false
description:
- Addition options for SPICE-enabled KVM VMs.
spice_password:
required: false
description:
- Password required to connect to SPICE. By default no password is set.
Please note this can be read from the Global Zone.
state:
required: true
choices: [ present, absent, stopped, restarted ]
description:
- States for the VM to be in. Please note that C(present), C(stopped) and C(restarted)
operate on a VM that is currently provisioned. C(present) means that the VM will be
created if it was absent, and that it will be in a running state. C(absent) will
shutdown the zone before removing it.
C(stopped) means the zone will be created if it doesn't exist already, before shutting
it down.
tmpfs:
required: false
description:
- Amount of memory (in MiBs) that will be available in the VM for the C(/tmp) filesystem.
uuid:
required: false
description:
- UUID of the VM. Can either be a full UUID or C(*) for all VMs.
vcpus:
required: false
description:
- Number of virtual CPUs for a KVM guest.
vga:
required: false
description:
- Specify VGA emulation used by KVM VMs.
virtio_txburst:
required: false
description:
- Number of packets that can be sent in a single flush of the tx queue of virtio NICs.
virtio_txtimer:
required: false
description:
- Timeout (in nanoseconds) for the TX timer of virtio NICs.
vnc_password:
required: false
description:
- Password required to connect to VNC. By default no password is set.
Please note this can be read from the Global Zone.
vnc_port:
required: false
description:
- TCP port to listen of the VNC server. Or set C(0) for random,
or C(-1) to disable.
zfs_data_compression:
required: false
description:
- Specifies compression algorithm used for this VMs data dataset. This option
only has effect on delegated datasets.
zfs_data_recsize:
required: false
description:
- Suggested block size (power of 2) for files in the delegated dataset's filesystem.
zfs_filesystem_limit:
required: false
description:
- Maximum number of filesystems the VM can have.
zfs_io_priority:
required: false
description:
- IO throttle priority value relative to other VMs.
zfs_root_compression:
required: false
description:
- Specifies compression algorithm used for this VMs root dataset. This option
only has effect on the zoneroot dataset.
zfs_root_recsize:
required: false
description:
- Suggested block size (power of 2) for files in the zoneroot dataset's filesystem.
zfs_snapshot_limit:
required: false
description:
- Number of snapshots the VM can have.
zpool:
required: false
description:
- ZFS pool the VM's zone dataset will be created in.
requirements:
- python >= 2.6
'''
EXAMPLES = '''
- name: create SmartOS zone
vmadm:
brand: joyent
state: present
alias: fw_zone
image_uuid: 95f265b8-96b2-11e6-9597-972f3af4b6d5
firewall_enabled: yes
indestructible_zoneroot: yes
nics:
- nic_tag: admin
ip: dhcp
primary: true
internal_metadata:
root_pw: 'secret'
quota: 1
- name: Delete a zone
vmadm:
alias: test_zone
state: deleted
- name: Stop all zones
vmadm:
uuid: '*'
state: stopped
'''
RETURN = '''
uuid:
description: UUID of the managed VM.
returned: always
type: string
sample: 'b217ab0b-cf57-efd8-cd85-958d0b80be33'
alias:
description: Alias of the managed VM.
returned: When addressing a VM by alias.
type: string
sample: 'dns-zone'
state:
description: State of the target, after execution.
returned: success
type: string
sample: 'running'
'''
import json
import os
import re
import tempfile
import traceback
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils._text import to_native
# While vmadm(1M) supports a -E option to return any errors in JSON, the
# generated JSON does not play well with the JSON parsers of Python.
# The returned message contains '\n' as part of the stacktrace,
# which breaks the parsers.
def get_vm_prop(module, uuid, prop):
# Lookup a property for the given VM.
# Returns the property, or None if not found.
cmd = '{0} lookup -j -o {1} uuid={2}'.format(module.vmadm, prop, uuid)
(rc, stdout, stderr) = module.run_command(cmd)
if rc != 0:
module.fail_json(
msg='Could not perform lookup of {0} on {1}'.format(prop, uuid), exception=stderr)
try:
stdout_json = json.loads(stdout)
except Exception as e:
module.fail_json(
msg='Invalid JSON returned by vmadm for uuid lookup of {0}'.format(prop),
details=to_native(e), exception=traceback.format_exc())
if len(stdout_json) > 0 and prop in stdout_json[0]:
return stdout_json[0][prop]
else:
return None
def get_vm_uuid(module, alias):
# Lookup the uuid that goes with the given alias.
# Returns the uuid or '' if not found.
cmd = '{0} lookup -j -o uuid alias={1}'.format(module.vmadm, alias)
(rc, stdout, stderr) = module.run_command(cmd)
if rc != 0:
module.fail_json(
msg='Could not retrieve UUID of {0}'.format(alias), exception=stderr)
# If no VM was found matching the given alias, we get back an empty array.
# That is not an error condition as we might be explicitly checking it's
# absence.
if stdout.strip() == '[]':
return None
else:
try:
stdout_json = json.loads(stdout)
except Exception as e:
module.fail_json(
msg='Invalid JSON returned by vmadm for uuid lookup of {0}'.format(alias),
details=to_native(e), exception=traceback.format_exc())
if len(stdout_json) > 0 and 'uuid' in stdout_json[0]:
return stdout_json[0]['uuid']
def get_all_vm_uuids(module):
# Retrieve the UUIDs for all VMs.
cmd = '{0} lookup -j -o uuid'.format(module.vmadm)
(rc, stdout, stderr) = module.run_command(cmd)
if rc != 0:
module.fail_json(msg='Failed to get VMs list', exception=stderr)
try:
stdout_json = json.loads(stdout)
return [v['uuid'] for v in stdout_json]
except Exception as e:
module.fail_json(msg='Could not retrieve VM UUIDs', details=to_native(e),
exception=traceback.format_exc())
def new_vm(module, uuid, vm_state):
payload_file = create_payload(module, uuid)
(rc, stdout, stderr) = vmadm_create_vm(module, payload_file)
if rc != 0:
changed = False
module.fail_json(msg='Could not create VM', exception=stderr)
else:
changed = True
# 'vmadm create' returns all output to stderr...
match = re.match('Successfully created VM (.*)', stderr)
if match:
vm_uuid = match.groups()[0]
if not is_valid_uuid(vm_uuid):
module.fail_json(msg='Invalid UUID for VM {0}?'.format(vm_uuid))
else:
module.fail_json(msg='Could not retrieve UUID of newly created(?) VM')
# Now that the VM is created, ensure it is in the desired state (if not 'running')
if vm_state != 'running':
ret = set_vm_state(module, vm_uuid, vm_state)
if not ret:
module.fail_json(msg='Could not set VM {0} to state {1}'.format(vm_uuid, vm_state))
try:
os.unlink(payload_file)
except Exception as e:
# Since the payload may contain sensitive information, fail hard
# if we cannot remove the file so the operator knows about it.
module.fail_json(msg='Could not remove temporary JSON payload file {0}: {1}'.format(payload_file, to_native(e)),
exception=traceback.format_exc())
return changed, vm_uuid
def vmadm_create_vm(module, payload_file):
# Create a new VM using the provided payload.
cmd = '{0} create -f {1}'.format(module.vmadm, payload_file)
return module.run_command(cmd)
def set_vm_state(module, vm_uuid, vm_state):
p = module.params
# Check if the VM is already in the desired state.
state = get_vm_prop(module, vm_uuid, 'state')
if state and (state == vm_state):
return None
# Lookup table for the state to be in, and which command to use for that.
# vm_state: [vmadm commandm, forceable?]
cmds = {
'stopped': ['stop', True],
'running': ['start', False],
'deleted': ['delete', True],
'rebooted': ['reboot', False]
}
if p['force'] and cmds[vm_state][1]:
force = '-F'
else:
force = ''
cmd = 'vmadm {0} {1} {2}'.format(cmds[vm_state][0], force, vm_uuid)
(rc, stdout, stderr) = module.run_command(cmd)
match = re.match('^Successfully.*', stderr)
if match:
return True
else:
return False
def create_payload(module, uuid):
# Create the JSON payload (vmdef) and return the filename.
p = module.params
# Filter out the few options that are not valid VM properties.
module_options = ['debug', 'force', 'state']
vmattrs = filter(lambda prop: prop not in module_options, p)
vmdef = {}
for attr in vmattrs:
if p[attr]:
vmdef[attr] = p[attr]
try:
vmdef_json = json.dumps(vmdef)
except Exception as e:
module.fail_json(
msg='Could not create valid JSON payload', exception=traceback.format_exc())
# Create the temporary file that contains our payload, and set tight
# permissions for it may container sensitive information.
try:
# XXX: When there's a way to get the current ansible temporary directory
# drop the mkstemp call and rely on ANSIBLE_KEEP_REMOTE_FILES to retain
# the payload (thus removing the `save_payload` option).
fname = tempfile.mkstemp()[1]
os.chmod(fname, 0o400)
with open(fname, 'w') as fh:
fh.write(vmdef_json)
except Exception as e:
module.fail_json(msg='Could not save JSON payload: %s' % to_native(e), exception=traceback.format_exc())
return fname
def vm_state_transition(module, uuid, vm_state):
ret = set_vm_state(module, uuid, vm_state)
# Whether the VM changed state.
if ret is None:
return False
elif ret:
return True
else:
module.fail_json(msg='Failed to set VM {0} to state {1}'.format(uuid, vm_state))
def is_valid_uuid(uuid):
if re.match('^[0-9a-f]{8}-([0-9a-f]{4}-){3}[0-9a-f]{12}$', uuid, re.IGNORECASE):
return True
else:
return False
def validate_uuids(module):
# Perform basic UUID validation.
failed = []
for u in [['uuid', module.params['uuid']],
['image_uuid', module.params['image_uuid']]]:
if u[1] and u[1] != '*':
if not is_valid_uuid(u[1]):
failed.append(u[0])
if len(failed) > 0:
module.fail_json(msg='No valid UUID(s) found for: {0}'.format(", ".join(failed)))
def manage_all_vms(module, vm_state):
# Handle operations for all VMs, which can by definition only
# be state transitions.
state = module.params['state']
if state == 'created':
module.fail_json(msg='State "created" is only valid for tasks with a single VM')
# If any of the VMs has a change, the task as a whole has a change.
any_changed = False
# First get all VM uuids and for each check their state, and adjust it if needed.
for uuid in get_all_vm_uuids(module):
current_vm_state = get_vm_prop(module, uuid, 'state')
if not current_vm_state and vm_state == 'deleted':
any_changed = False
else:
if module.check_mode:
if (not current_vm_state) or (get_vm_prop(module, uuid, 'state') != state):
any_changed = True
else:
any_changed = (vm_state_transition(module, uuid, vm_state) | any_changed)
return any_changed
def main():
# In order to reduce the clutter and boilerplate for trivial options,
# abstract the vmadm properties and build the dict of arguments later.
# Dict of all options that are simple to define based on their type.
# They're not required and have a default of None.
properties = {
'str': [
'boot', 'disk_driver', 'dns_domain', 'fs_allowed', 'hostname',
'image_uuid', 'internal_metadata_namespace', 'kernel_version',
'limit_priv', 'nic_driver', 'qemu_opts', 'qemu_extra_opts',
'spice_opts', 'uuid', 'vga', 'zfs_data_compression',
'zfs_root_compression', 'zpool'
],
'bool': [
'archive_on_delete', 'autoboot', 'debug', 'delegate_dataset',
'docker', 'firewall_enabled', 'force', 'indestructible_delegated',
'indestructible_zoneroot', 'maintain_resolvers', 'nowait'
],
'int': [
'cpu_cap', 'cpu_shares', 'max_locked_memory', 'max_lwps',
'max_physical_memory', 'max_swap', 'mdata_exec_timeout',
'quota', 'ram', 'tmpfs', 'vcpus', 'virtio_txburst',
'virtio_txtimer', 'vnc_port', 'zfs_data_recsize',
'zfs_filesystem_limit', 'zfs_io_priority', 'zfs_root_recsize',
'zfs_snapshot_limit'
],
'dict': ['customer_metadata', 'internal_metadata', 'routes'],
'list': ['disks', 'nics', 'resolvers', 'filesystems']
}
# Start with the options that are not as trivial as those above.
options = dict(
state=dict(
default='running',
type='str',
choices=['present', 'running', 'absent', 'deleted', 'stopped', 'created', 'restarted', 'rebooted']
),
name=dict(
default=None, type='str',
aliases=['alias']
),
brand=dict(
default='joyent',
type='str',
choices=['joyent', 'joyent-minimal', 'kvm', 'lx']
),
cpu_type=dict(
default='qemu64',
type='str',
choices=['host', 'qemu64']
),
# Regular strings, however these require additional options.
spice_password=dict(type='str', no_log=True),
vnc_password=dict(type='str', no_log=True),
)
# Add our 'simple' options to options dict.
for type in properties:
for p in properties[type]:
option = dict(default=None, type=type)
options[p] = option
module = AnsibleModule(
argument_spec=options,
supports_check_mode=True,
required_one_of=[['name', 'uuid']]
)
module.vmadm = module.get_bin_path('vmadm', required=True)
p = module.params
uuid = p['uuid']
state = p['state']
# Translate the state parameter into something we can use later on.
if state in ['present', 'running']:
vm_state = 'running'
elif state in ['stopped', 'created']:
vm_state = 'stopped'
elif state in ['absent', 'deleted']:
vm_state = 'deleted'
elif state in ['restarted', 'rebooted']:
vm_state = 'rebooted'
result = {'state': state}
# While it's possible to refer to a given VM by it's `alias`, it's easier
# to operate on VMs by their UUID. So if we're not given a `uuid`, look
# it up.
if not uuid:
uuid = get_vm_uuid(module, p['name'])
# Bit of a chicken and egg problem here for VMs with state == deleted.
# If they're going to be removed in this play, we have to lookup the
# uuid. If they're already deleted there's nothing to lookup.
# So if state == deleted and get_vm_uuid() returned '', the VM is already
# deleted and there's nothing else to do.
if uuid is None and vm_state == 'deleted':
result['name'] = p['name']
module.exit_json(**result)
validate_uuids(module)
if p['name']:
result['name'] = p['name']
result['uuid'] = uuid
if uuid == '*':
result['changed'] = manage_all_vms(module, vm_state)
module.exit_json(**result)
# The general flow is as follows:
# - first the current state of the VM is obtained by it's UUID.
# - If the state was not found and the desired state is 'deleted', return.
# - If the state was not found, it means the VM has to be created.
# Subsequently the VM will be set to the desired state (i.e. stopped)
# - Otherwise, it means the VM exists already and we operate on it's
# state (i.e. reboot it.)
#
# In the future it should be possible to query the VM for a particular
# property as a valid state (i.e. queried) so the result can be
# registered.
# Also, VMs should be able to get their properties updated.
# Managing VM snapshots should be part of a standalone module.
# First obtain the VM state to determine what needs to be done with it.
current_vm_state = get_vm_prop(module, uuid, 'state')
# First handle the case where the VM should be deleted and is not present.
if not current_vm_state and vm_state == 'deleted':
result['changed'] = False
elif module.check_mode:
# Shortcut for check mode, if there is no VM yet, it will need to be created.
# Or, if the VM is not in the desired state yet, it needs to transition.
if (not current_vm_state) or (get_vm_prop(module, uuid, 'state') != state):
result['changed'] = True
else:
result['changed'] = False
module.exit_json(**result)
# No VM was found that matched the given ID (alias or uuid), so we create it.
elif not current_vm_state:
result['changed'], result['uuid'] = new_vm(module, uuid, vm_state)
else:
# VM was found, operate on its state directly.
result['changed'] = vm_state_transition(module, uuid, vm_state)
module.exit_json(**result)
if __name__ == '__main__':
main()
| [
"[email protected]"
] | |
4ab98705595c75687b2a1d43a82da9ce0f973aed | 7a15271c7cddd199f43555469a67d26ce0f60836 | /uncertainty_baselines/models/segmenter_gp.py | 76a63b7daf4b436ae49c69c2f1b67b9b791125c4 | [
"Apache-2.0"
] | permissive | google/uncertainty-baselines | b2c339d918bf3949ee066f9eafa6b51232a2ac3d | f5f6f50f82bd441339c9d9efbef3f09e72c5fef6 | refs/heads/main | 2023-09-02T13:59:26.355288 | 2023-08-14T16:35:22 | 2023-08-14T16:36:11 | 280,026,201 | 1,235 | 198 | Apache-2.0 | 2023-09-11T22:21:48 | 2020-07-16T01:54:32 | Python | UTF-8 | Python | false | false | 6,167 | py | # coding=utf-8
# Copyright 2023 The Uncertainty Baselines Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Segmenter GP Vision Transformer (ViT) model.
Based on scenic library implementation.
"""
from typing import Any, Callable, Tuple, Iterable
import edward2.jax as ed
import flax.linen as nn
import jax
import jax.numpy as jnp
import ml_collections
from uncertainty_baselines.models import segmenter
from uncertainty_baselines.models import segmenter_be
Array = Any
PRNGKey = Any
Shape = Tuple[int]
DType = type(jnp.float32)
InitializeFn = Callable[[jnp.ndarray, Iterable[int], DType], jnp.ndarray]
class SegVitGP(nn.Module):
"""Segmentation model with ViT backbone and decoder."""
num_classes: int
patches: ml_collections.ConfigDict
backbone_configs: ml_collections.ConfigDict
decoder_configs: ml_collections.ConfigDict
head_kernel_init: InitializeFn = nn.initializers.variance_scaling( # pytype: disable=annotation-type-mismatch # jax-types
0.02, 'fan_in', 'truncated_normal')
@nn.compact
def __call__(self, x: Array, *, train: bool, debug: bool = False):
"""Applies the module."""
input_shape = x.shape
b, h, w, _ = input_shape
fh, fw = self.patches.size
gh, gw = h // fh, w // fw
if self.backbone_configs.type == 'vit' and self.decoder_configs.type == 'linear':
assert self.backbone_configs.ens_size == 1
if self.backbone_configs.type == 'vit' and self.decoder_configs.type == 'linear_be':
raise NotImplementedError(
'Configuration with encoder {} and decoder {} is not implemented'
.format(
self.backbone_configs.type,
self.decoder_configs.type,
))
if self.backbone_configs.type == 'vit':
x, out = segmenter.ViTBackbone(
mlp_dim=self.backbone_configs.mlp_dim,
num_layers=self.backbone_configs.num_layers,
num_heads=self.backbone_configs.num_heads,
patches=self.patches,
hidden_size=self.backbone_configs.hidden_size,
dropout_rate=self.backbone_configs.dropout_rate,
attention_dropout_rate=self.backbone_configs.attention_dropout_rate,
classifier=self.backbone_configs.classifier,
name='backbone')(
x, train=train)
elif self.backbone_configs.type == 'vit_be':
x, out = segmenter_be.ViTBackboneBE(
mlp_dim=self.backbone_configs.mlp_dim,
num_layers=self.backbone_configs.num_layers,
num_heads=self.backbone_configs.num_heads,
patches=self.patches,
hidden_size=self.backbone_configs.hidden_size,
dropout_rate=self.backbone_configs.dropout_rate,
attention_dropout_rate=self.backbone_configs.attention_dropout_rate,
classifier=self.backbone_configs.classifier,
ens_size=self.backbone_configs.ens_size,
random_sign_init=self.backbone_configs.random_sign_init,
be_layers=self.backbone_configs.be_layers,
name='backbone')(
x, train=train)
else:
raise ValueError(f'Unknown backbone: {self.backbone_configs.type}.')
# remove CLS tokens for decoding
if self.backbone_configs.classifier == 'token':
x = x[..., 1:, :]
ens_size = self.backbone_configs.get('ens_size', 1)
if self.decoder_configs.type == 'linear':
# Linear head only, like Segmenter baseline:
# https://arxiv.org/abs/2105.05633
output_projection = nn.Dense(
self.num_classes,
kernel_init=self.head_kernel_init,
name='output_projection')
x = jnp.reshape(x, [b * ens_size, gh, gw, -1])
x = output_projection(x)
elif self.decoder_configs.type == 'gp':
# Gaussian process layer output: (logits, covmat, and *random features)
# *random features are optional
output_projection = ed.nn.RandomFeatureGaussianProcess(
features=self.num_classes,
name='output_projection',
**self.decoder_configs.gp_layer)
x = jnp.reshape(x, [b*ens_size*gh*gw, -1])
x_gp = output_projection(x)
out['logits_gp'] = x_gp[0]
out['covmat_gp'] = x_gp[1]
if len(x_gp) > 2:
out['random_features_gp'] = x_gp[2]
if not train:
# During inference, compute posterior mean by adjusting the original
# logits with predictive uncertainty.
x = ed.nn.utils.mean_field_logits(
logits=x_gp[0],
covmat=x_gp[1],
mean_field_factor=self.decoder_configs.mean_field_factor)
else:
x = x_gp[0]
x = jnp.reshape(x, [b*ens_size, gh, gw, -1])
elif self.decoder_configs.type == 'linear_be':
output_projection = ed.nn.DenseBatchEnsemble(
self.num_classes,
self.backbone_configs.ens_size,
activation=None,
alpha_init=ed.nn.utils.make_sign_initializer(
self.backbone_configs.get('random_sign_init')),
gamma_init=ed.nn.utils.make_sign_initializer(
self.backbone_configs.get('random_sign_init')),
kernel_init=self.head_kernel_init,
name='output_projection_be')
x = output_projection(x)
else:
raise ValueError(
f'Decoder type {self.decoder_configs.type} is not defined.')
# Resize bilinearly:
x = jax.image.resize(x, [b * ens_size, h, w, x.shape[-1]], 'bilinear')
out['logits'] = x
new_input_shape = tuple([
input_shape[0] * ens_size,
] + list(input_shape[1:-1]))
assert new_input_shape == x.shape[:-1], (
'BE Input and output shapes do not match: %d vs. %d.', new_input_shape,
x.shape[:-1])
return x, out
| [
"[email protected]"
] | |
79cc42438964db5b2d6053c997019c47cc2affe2 | 969d094bfb09662b369278dc2cde1dc160a286b6 | /For_Loops/03_odd_even_position.py | a146cc42f1f89bb7903ec3322e674bb3592cfd3e | [] | no_license | IvayloValkov/Python-the-beginning | e96756105b56d7c0ae2687a82ccace1ca97bc895 | 4d074c32f8251af5a96aece1ae447d09db038026 | refs/heads/main | 2023-02-16T13:05:59.726572 | 2021-01-17T08:32:45 | 2021-01-17T08:32:45 | 330,342,003 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,087 | py | import sys
n = int(input())
odd_sum = 0
odd_max = -sys.maxsize
odd_min = sys.maxsize
even_sum = 0
even_max = -sys.maxsize
even_min = sys.maxsize
for i in range(1, n + 1):
input_number = float(input())
if i % 2 == 0:
even_sum += input_number
if input_number > even_max:
even_max = input_number
if input_number < even_min:
even_min = input_number
else:
odd_sum += input_number
if input_number > odd_max:
odd_max = input_number
if input_number < odd_min:
odd_min = input_number
print(f'OddSum={odd_sum:.2f},')
if odd_min != sys.maxsize:
print(f'OddMin={odd_min:.2f},')
else:
print(f'OddMin=No,')
if odd_max != -sys.maxsize:
print(f'OddMax={odd_max:.2f},')
else:
print(f'OddMax=No,')
print(f'EvenSum={even_sum:.2f},')
if even_min != sys.maxsize:
print(f'EvenMin={even_min:.2f},')
else:
print(f'EvenMin=No,')
if even_max != -sys.maxsize:
print(f'EvenMax={even_max:.2f}')
else:
print(f'EvenMax=No')
| [
"[email protected]"
] | |
5e0e4793593f70670867c372bf60125379bc503e | 2d0bada349646b801a69c542407279cc7bc25013 | /src/vai_quantizer/vai_q_tensorflow2.x/tensorflow_model_optimization/python/core/clustering/keras/clustering_centroids_test.py | 693276b6d53bd5037cb74fcd2afdafc97370dd90 | [
"Apache-2.0",
"BSD-3-Clause",
"LicenseRef-scancode-generic-cla",
"BSD-3-Clause-Open-MPI",
"LicenseRef-scancode-free-unknown",
"Libtool-exception",
"GCC-exception-3.1",
"LicenseRef-scancode-mit-old-style",
"OFL-1.1",
"JSON",
"LGPL-2.1-only",
"LGPL-2.0-or-later",
"ICU",
"LicenseRef-scancode-other-permissive",
"GPL-2.0-or-later",
"GPL-3.0-only",
"LicenseRef-scancode-issl-2018",
"MIT",
"LGPL-2.1-or-later",
"LicenseRef-scancode-unicode",
"LGPL-3.0-only",
"LicenseRef-scancode-warranty-disclaimer",
"GPL-3.0-or-later",
"Zlib",
"BSD-Source-Code",
"ClArtistic",
"LicenseRef-scancode-unknown-license-reference",
"ISC",
"NCSA",
"LicenseRef-scancode-proprietary-license",
"GPL-2.0-only",
"CC-BY-4.0",
"FSFULLR",
"Minpack",
"Unlicense",
"BSL-1.0",
"NAIST-2003",
"LicenseRef-scancode-protobuf",
"LicenseRef-scancode-public-domain",
"Libpng",
"Spencer-94",
"BSD-2-Clause",
"Intel",
"GPL-1.0-or-later",
"MPL-2.0"
] | permissive | Xilinx/Vitis-AI | 31e664f7adff0958bb7d149883ab9c231efb3541 | f74ddc6ed086ba949b791626638717e21505dba2 | refs/heads/master | 2023-08-31T02:44:51.029166 | 2023-07-27T06:50:28 | 2023-07-27T06:50:28 | 215,649,623 | 1,283 | 683 | Apache-2.0 | 2023-08-17T09:24:55 | 2019-10-16T21:41:54 | Python | UTF-8 | Python | false | false | 6,383 | py | # Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for keras clustering centroids initialisation API."""
import tensorflow as tf
import tensorflow.keras.backend as K
from absl.testing import parameterized
from tensorflow_model_optimization.python.core.clustering.keras import cluster_config
from tensorflow_model_optimization.python.core.clustering.keras import clustering_centroids
keras = tf.keras
errors_impl = tf.errors
layers = keras.layers
test = tf.test
CentroidInitialization = cluster_config.CentroidInitialization
class ClusteringCentroidsTest(test.TestCase, parameterized.TestCase):
"""Unit tests for the clustering_centroids module."""
def setUp(self):
self.factory = clustering_centroids.CentroidsInitializerFactory
@parameterized.parameters(
(CentroidInitialization.LINEAR),
(CentroidInitialization.RANDOM),
(CentroidInitialization.DENSITY_BASED),
)
def testExistingInitsAreSupported(self, init_type):
"""
Verifies that the given centroid initialization methods are supported.
"""
self.assertTrue(self.factory.init_is_supported(init_type))
def testNonExistingInitIsNotSupported(self):
self.assertFalse(self.factory.init_is_supported("DEADBEEF"))
@parameterized.parameters(
(
CentroidInitialization.LINEAR,
clustering_centroids.LinearCentroidsInitialisation
),
(
CentroidInitialization.RANDOM,
clustering_centroids.RandomCentroidsInitialisation
),
(
CentroidInitialization.DENSITY_BASED,
clustering_centroids.DensityBasedCentroidsInitialisation
),
)
def testReturnsMethodForExistingInit(self, init_type, method):
"""
Verifies that the centroid initializer factory method returns the expected
classes for the given initialization methods.
"""
self.assertEqual(self.factory.get_centroid_initializer(init_type), method)
def testThrowsValueErrorForNonExistingInit(self):
"""
Verifies that the centroid initializer factory method raises an exception
when invoked with an unsupported initialization method.
"""
with self.assertRaises(ValueError):
self.factory.get_centroid_initializer("DEADBEEF")
@parameterized.parameters(
(0, 0, 1, 1, 1, 0),
(0, 0, 5, 5, 1, 0),
(1, 2, 3, 4, 1, 1),
(7, 12, 17, 22, 1, 5),
(-5, 4, 7, 10, 1.0 / 2.0, 13.0 / 2.0),
)
def testLinearSolverConstruction(self, x1, y1, x2, y2, a, b):
"""
Verifies that a TFLinearEquationSolver is constructed correctly.
"""
solver = clustering_centroids.TFLinearEquationSolver(float(x1),
float(y1),
float(x2),
float(y2))
solver_a = solver.a
self.assertAlmostEqual(K.batch_get_value([solver_a])[0], a)
self.assertAlmostEqual(K.batch_get_value([solver.b])[0], b)
@parameterized.parameters(
(0, 0, 1, 1, 5, 5),
(0, 0, 5, 5, 20, 20),
(1, 2, 3, 4, 3, 4),
(7, 12, 17, 22, 3, 8),
)
def testLinearSolverSolveForX(self, x1, y1, x2, y2, x, y):
"""
Verifies that TFLinearEquationSolver solves the given equations correctly
for X.
"""
solver = clustering_centroids.TFLinearEquationSolver(float(x1),
float(y1),
float(x2),
float(y2))
for_x = solver.solve_for_x(y)
self.assertAlmostEqual(K.batch_get_value([for_x])[0], x)
@parameterized.parameters(
(0, 0, 1, 1, 5, 5),
(0, 0, 5, 5, 20, 20),
(1, 2, 3, 4, 3, 4),
(7, 12, 17, 22, 3, 8),
)
def testLinearSolverSolveForY(self, x1, y1, x2, y2, x, y):
"""
Verifies that TFLinearEquationSolver solves the given equations correctly
for Y.
"""
solver = clustering_centroids.TFLinearEquationSolver(float(x1),
float(y1),
float(x2),
float(y2))
for_y = solver.solve_for_y(x)
self.assertAlmostEqual(K.batch_get_value([for_y])[0], y)
@parameterized.parameters(
([1, 2, 6, 7], 4, 0.5),
([1, 2, 6, 7], 1, 1. / 4.),
([1, 2, 3, 4, 5, 6, 7, 8, 9], 3, 1. / 3.),
([1, 2, 3, 4, 5, 6, 7, 8, 9], 99, 1.),
([1, 2, 3, 4, 5, 6, 7, 8, 9], -20, 0.)
)
def testCDFValues(self, weights, point, probability):
"""
Verifies that TFCumulativeDistributionFunction yields the expected output
for the inputs provided.
"""
cdf_calc = clustering_centroids.TFCumulativeDistributionFunction(weights)
self.assertAlmostEqual(
probability,
K.batch_get_value([cdf_calc.get_cdf_value(point)])[0]
)
@parameterized.parameters(
(
[0, 1, 2, 3, 3.1, 3.2, 3.3, 3.4, 3.5],
5,
[0.11137931, 2.0534482, 3.145862, 3.3886206, 3.51]
),
(
[0, 1, 2, 3, 3.1, 3.2, 3.3, 3.4, 3.5],
3,
[0.11137931, 3.145862, 3.51]
),
(
[0., 1., 2., 3., 4., 5., 6., 7., 8., 9.],
3,
[0.3010345, 5.2775865, 9.01]
)
)
def testClusterCentroids(self, weights, number_of_clusters, centroids):
dbci = clustering_centroids.DensityBasedCentroidsInitialisation(
weights,
number_of_clusters
)
calc_centroids = K.batch_get_value([dbci.get_cluster_centroids()])[0]
self.assertSequenceAlmostEqual(centroids, calc_centroids, places=4)
if __name__ == '__main__':
test.main()
| [
"[email protected]"
] | |
0e1c35982cd1e8a0dac5bd43a934045a405885ac | 1058861a696e8b9882175b786fec131f396d69f2 | /task_app/migrations/0001_initial.py | 3816d3d7eb537e306c14c4b3d442babb4d18d4b5 | [] | no_license | wgoode3/djangular-example | f79622442532fa5dc5450f4c5ed8e39ce6f784c3 | 0a8924ea95a7a2faed6865b60f06ceb4a5aed5bb | refs/heads/master | 2020-03-28T04:38:59.091342 | 2018-12-19T21:08:27 | 2018-12-19T21:08:27 | 147,727,910 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 756 | py | # Generated by Django 2.1.1 on 2018-09-06 18:49
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Task',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=255)),
('description', models.CharField(max_length=255)),
('status', models.CharField(max_length=255)),
('created_at', models.DateTimeField(auto_now_add=True)),
('updated_at', models.DateTimeField(auto_now=True)),
],
),
]
| [
"[email protected]"
] | |
044533f19e009cf5a932c77574f9acb421be9c94 | ea1af1a564f96fb36974aa094192877598b0c6bf | /Chapter10/Exercises/ex10_2.py | 5a3786e78e11934cfb22408aad7d0cc3eefeda94 | [] | no_license | GSantos23/Crash_Course | 63eecd13a60141e520b5ca4351341c21c4782801 | 4a5fc0cb9ce987948a728d43c4f266d34ba49a87 | refs/heads/master | 2020-03-20T23:20:43.201255 | 2018-08-21T01:13:06 | 2018-08-21T01:13:06 | 137,841,877 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 800 | py | # Exerrcise 10.2
'''
Learning C: You can use the replace() method to replace any word in a
string with a different word. Here's a quick example showing how to replace
'dog' with 'cat' in a sentence:
-----------------------------------------------------------------------
>>> message = "I realy like dogs."
>>> messsage.replace('dog', 'cat')
'I really like cats.'
-----------------------------------------------------------------------
Read each line from the file you just created, learning_python.txt, and
replace the word Python with the name of another language, such as C. Print
each modified line to the screen.
'''
filename = 'learning_python.txt'
with open(filename) as file_object:
code_text = file_object.readlines()
for lines in code_text:
print(lines.replace('Python', 'C').strip())
| [
"[email protected]"
] | |
799f715fba061b3e4141658da26aa45c489d4fb7 | e396fb9580ff90f7896dba3416be3a7bef81f367 | /rdflib/namespace/RDFS.py | b32a830c87dbb4a7af617304e26edefa504411b5 | [
"CC0-1.0"
] | permissive | Philani7777777/definednamespace | c9e37ccc41762ff07e8b9e800a20b11a187ca355 | f1178ba9c36a94bbd422844f4ddc71de67521d7b | refs/heads/master | 2022-09-24T14:40:43.844447 | 2020-05-27T04:35:20 | 2020-05-27T04:35:20 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,742 | py | from rdflib.term import URIRef
from rdflib.namespace import DefinedNamespace, Namespace
class RDFS(DefinedNamespace):
"""
The RDF Schema vocabulary (RDFS)
Generated from: http://www.w3.org/2000/01/rdf-schema#
Date: 2020-05-26 14:20:05.794866
rdfs:seeAlso <http://www.w3.org/2000/01/rdf-schema-more>
"""
# http://www.w3.org/1999/02/22-rdf-syntax-ns#Property
comment: URIRef # A description of the subject resource.
domain: URIRef # A domain of the subject property.
isDefinedBy: URIRef # The defininition of the subject resource.
label: URIRef # A human-readable name for the subject.
member: URIRef # A member of the subject resource.
range: URIRef # A range of the subject property.
seeAlso: URIRef # Further information about the subject resource.
subClassOf: URIRef # The subject is a subclass of a class.
subPropertyOf: URIRef # The subject is a subproperty of a property.
# http://www.w3.org/2000/01/rdf-schema#Class
Class: URIRef # The class of classes.
Container: URIRef # The class of RDF containers.
ContainerMembershipProperty: URIRef # The class of container membership properties, rdf:_1, rdf:_2, ..., all of which are sub-properties of 'member'.
Datatype: URIRef # The class of RDF datatypes.
Literal: URIRef # The class of literal values, eg. textual strings and integers.
Resource: URIRef # The class resource, everything.
_NS = Namespace("http://www.w3.org/2000/01/rdf-schema#")
| [
"[email protected]"
] | |
22985d6e6f69c5fcd7acd56d0f9670bd5e45d637 | 6a084a2df2869ce3ad565610cbf92eccf00a233e | /states/boto_iam.py | d31a6a9bcfca75038554f59e60804c39840fe66c | [] | no_license | ltxin/saltstack | 95b5356715cc918afec378e2926d9f9a1c7a85d5 | 30a493ef5e46bd7629c8ba400e559dab023c1431 | refs/heads/master | 2021-01-16T17:52:56.939714 | 2017-08-11T10:13:41 | 2017-08-11T10:13:41 | 100,019,324 | 0 | 2 | null | null | null | null | UTF-8 | Python | false | false | 66,478 | py | # -*- coding: utf-8 -*-
'''
Manage IAM objects
==================
.. versionadded:: 2015.8.0
This module uses ``boto``, which can be installed via package, or pip.
This module accepts explicit IAM credentials but can also utilize
IAM roles assigned to the instance through Instance Profiles. Dynamic
credentials are then automatically obtained from AWS API and no further
configuration is necessary. More information available `here
<http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/iam-roles-for-amazon-ec2.html>`_.
It's also possible to specify ``key``, ``keyid`` and ``region`` via a profile, either
passed in as a dict, or as a string to pull from pillars or minion config:
.. code-block:: yaml
delete-user:
boto_iam.user_absent:
- name: myuser
- delete_keys: true
.. code-block:: yaml
delete-keys:
boto_iam.keys_absent:
- access_keys:
- 'AKIAJHTMIQ2ASDFLASDF'
- 'PQIAJHTMIQ2ASRTLASFR'
- user_name: myuser
.. code-block:: yaml
create-user:
boto_iam.user_present:
- name: myuser
- policies:
mypolicy: |
{
"Version": "2012-10-17",
"Statement": [{
"Effect": "Allow",
"Action": "*",
"Resource": "*"}]
}
- password: NewPassword$$1
- region: eu-west-1
- keyid: 'AKIAJHTMIQ2ASDFLASDF'
- key: 'fdkjsafkljsASSADFalkfjasdf'
.. code-block:: yaml
create-group:
boto_iam.group_present:
- name: mygroup
- users:
- myuser
- myuser1
- policies:
mypolicy: |
{
"Version": "2012-10-17",
"Statement": [{
"Effect": "Allow",
"Action": "*",
"Resource": "*"}]
}
- region: eu-west-1
- keyid: 'AKIAJHTMIQ2ASDFLASDF'
- key: 'safsdfsal;fdkjsafkljsASSADFalkfj'
.. code-block:: yaml
change-policy:
boto_iam.account_policy:
- change_password: True
- region: eu-west-1
- keyid: 'AKIAJHTMIQ2ASDFLASDF'
- key: 'safsdfsal;fdkjsafkljsASSADFalkfj'
.. code-block:: yaml
create server certificate:
boto_iam.server_cert_present:
- name: mycert
- public_key: salt://base/mycert.crt
- private_key: salt://base/mycert.key
- cert_chain: salt://base/mycert_chain.crt
- region: eu-west-1
- keyid: 'AKIAJHTMIQ2ASDFLASDF'
- key: 'fdkjsafkljsASSADFalkfjasdf'
.. code-block:: yaml
delete server certificate:
boto_iam.server_cert_absent:
- name: mycert
.. code-block:: yaml
create keys for user:
boto_iam.keys_present:
- name: myusername
- number: 2
- save_dir: /root
- region: eu-west-1
- keyid: 'AKIAJHTMIQ2ASDFLASDF'
- key: 'fdkjsafkljsASSADFalkfjasdf'
.. code-block:: yaml
create policy:
boto_iam.policy_present:
- name: myname
- policy_document: '{"MyPolicy": "Statement": [{"Action": ["sqs:*"], "Effect": "Allow", "Resource": ["arn:aws:sqs:*:*:*"], "Sid": "MyPolicySqs1"}]}'
- region: eu-west-1
- keyid: 'AKIAJHTMIQ2ASDFLASDF'
- key: 'fdkjsafkljsASSADFalkfjasdf'
.. code-block:: yaml
add-saml-provider:
boto_iam.saml_provider_present:
- name: my_saml_provider
- saml_metadata_document: salt://base/files/provider.xml
- keyid: 'AKIAJHTMIQ2ASDFLASDF'
- key: 'safsdfsal;fdkjsafkljsASSADFalkfj'
'''
# Import Python Libs
from __future__ import absolute_import
import logging
import json
import os
# Import Salt Libs
from salt.ext.six.moves import range # pylint: disable=import-error,redefined-builtin
import salt.utils
import salt.utils.odict as odict
import salt.utils.dictupdate as dictupdate
from salt.ext import six
# Import 3rd party libs
try:
from salt._compat import ElementTree as ET
HAS_ELEMENT_TREE = True
except ImportError:
HAS_ELEMENT_TREE = False
log = logging.getLogger(__name__)
__virtualname__ = 'boto_iam'
if six.PY2:
def _byteify(thing):
# Note that we intentionally don't treat odicts here - they won't compare equal
# in many circumstances where AWS treats them the same...
if isinstance(thing, dict):
return dict([(_byteify(k), _byteify(v)) for k, v in thing.iteritems()])
elif isinstance(thing, list):
return [_byteify(m) for m in thing]
elif isinstance(thing, unicode): # pylint: disable=W1699
return thing.encode('utf-8')
else:
return thing
else: # six.PY3
def _byteify(text):
return text
def __virtual__():
'''
Only load if elementtree xml library and boto are available.
'''
if not HAS_ELEMENT_TREE:
return (False, 'Cannot load {0} state: ElementTree library unavailable'.format(__virtualname__))
if 'boto_iam.get_user' in __salt__:
return True
else:
return (False, 'Cannot load {0} state: boto_iam module unavailable'.format(__virtualname__))
def user_absent(name, delete_keys=True, delete_mfa_devices=True, delete_profile=True, region=None, key=None, keyid=None, profile=None):
'''
.. versionadded:: 2015.8.0
Ensure the IAM user is absent. User cannot be deleted if it has keys.
name (string)
The name of the new user.
delete_keys (bool)
Delete all keys from user.
delete_mfa_devices (bool)
Delete all mfa devices from user.
.. versionadded:: 2016.3.0
delete_profile (bool)
Delete profile from user.
.. versionadded:: 2016.3.0
region (string)
Region to connect to.
key (string)
Secret key to be used.
keyid (string)
Access key to be used.
profile (dict)
A dict with region, key and keyid, or a pillar key (string)
that contains a dict with region, key and keyid.
'''
ret = {'name': name, 'result': True, 'comment': '', 'changes': {}}
if not __salt__['boto_iam.get_user'](name, region, key, keyid, profile):
ret['result'] = True
ret['comment'] = 'IAM User {0} does not exist.'.format(name)
return ret
# delete the user's access keys
if delete_keys:
keys = __salt__['boto_iam.get_all_access_keys'](user_name=name, region=region, key=key,
keyid=keyid, profile=profile)
log.debug('Keys for user {0} are {1}.'.format(name, keys))
if isinstance(keys, dict):
keys = keys['list_access_keys_response']['list_access_keys_result']['access_key_metadata']
for k in keys:
if __opts__['test']:
ret['comment'] = ' '.join([ret['comment'], 'Key {0} is set to be deleted.'.format(k['access_key_id'])])
ret['result'] = None
else:
if _delete_key(ret, k['access_key_id'], name, region, key, keyid, profile):
ret['comment'] = ' '.join([ret['comment'], 'Key {0} has been deleted.'.format(k['access_key_id'])])
ret['changes'][k['access_key_id']] = 'deleted'
# delete the user's MFA tokens
if delete_mfa_devices:
devices = __salt__['boto_iam.get_all_mfa_devices'](user_name=name, region=region, key=key, keyid=keyid, profile=profile)
if devices:
for d in devices:
serial = d['serial_number']
if __opts__['test']:
ret['comment'] = ' '.join([ret['comment'], 'IAM user {0} MFA device {1} is set to be deleted.'.format(name, serial)])
ret['result'] = None
else:
mfa_deleted = __salt__['boto_iam.deactivate_mfa_device'](user_name=name, serial=serial, region=region, key=key, keyid=keyid, profile=profile)
if mfa_deleted:
ret['comment'] = ' '.join([ret['comment'], 'IAM user {0} MFA device {1} are deleted.'.format(name, serial)])
# delete the user's login profile
if delete_profile:
if __opts__['test']:
ret['comment'] = ' '.join([ret['comment'], 'IAM user {0} login profile is set to be deleted.'.format(name)])
ret['result'] = None
else:
profile_deleted = __salt__['boto_iam.delete_login_profile'](name, region, key, keyid, profile)
if profile_deleted:
ret['comment'] = ' '.join([ret['comment'], 'IAM user {0} login profile is deleted.'.format(name)])
if __opts__['test']:
ret['comment'] = ' '.join([ret['comment'], 'IAM user {0} policies are set to be deleted.'.format(name)])
ret['result'] = None
else:
_ret = _user_policies_detached(name, region, key, keyid, profile)
ret['comment'] = ' '.join([ret['comment'], _ret['comment']])
if not _ret['result']:
ret['result'] = _ret['result']
if ret['result'] is False:
return ret
# finally, actually delete the user
if __opts__['test']:
ret['comment'] = ' '.join([ret['comment'], 'IAM user {0} is set to be deleted.'.format(name)])
ret['result'] = None
return ret
deleted = __salt__['boto_iam.delete_user'](name, region, key, keyid, profile)
if deleted is True:
ret['comment'] = ' '.join([ret['comment'], 'IAM user {0} is deleted.'.format(name)])
ret['result'] = True
ret['changes']['deleted'] = name
return ret
ret['comment'] = 'IAM user {0} could not be deleted.\n {1}'.format(name, deleted)
ret['result'] = False
return ret
def keys_present(name, number, save_dir, region=None, key=None, keyid=None, profile=None,
save_format="{2}\n{0}\n{3}\n{1}\n"):
'''
.. versionadded:: 2015.8.0
Ensure the IAM access keys are present.
name (string)
The name of the new user.
number (int)
Number of keys that user should have.
save_dir (string)
The directory that the key/keys will be saved. Keys are saved to a file named according
to the username privided.
region (string)
Region to connect to.
key (string)
Secret key to be used.
keyid (string)
Access key to be used.
profile (dict)
A dict with region, key and keyid, or a pillar key (string)
that contains a dict with region, key and keyid.
save_format (dict)
Save format is repeated for each key. Default format is "{2}\n{0}\n{3}\n{1}\n",
where {0} and {1} are placeholders for new key_id and key respectively,
whereas {2} and {3} are "key_id-{number}" and 'key-{number}' strings kept for compatibility.
'''
ret = {'name': name, 'result': True, 'comment': '', 'changes': {}}
if not __salt__['boto_iam.get_user'](name, region, key, keyid, profile):
ret['result'] = False
ret['comment'] = 'IAM User {0} does not exist.'.format(name)
return ret
if not isinstance(number, int):
ret['comment'] = 'The number of keys must be an integer.'
ret['result'] = False
return ret
if not os.path.isdir(save_dir):
ret['comment'] = 'The directory {0} does not exist.'.format(save_dir)
ret['result'] = False
return ret
keys = __salt__['boto_iam.get_all_access_keys'](user_name=name, region=region, key=key,
keyid=keyid, profile=profile)
if isinstance(keys, str):
log.debug('keys are : false {0}'.format(keys))
error, message = _get_error(keys)
ret['comment'] = 'Could not get keys.\n{0}\n{1}'.format(error, message)
ret['result'] = False
return ret
keys = keys['list_access_keys_response']['list_access_keys_result']['access_key_metadata']
log.debug('Keys are : {0}.'.format(keys))
if len(keys) >= number:
ret['comment'] = 'The number of keys exist for user {0}'.format(name)
ret['result'] = True
return ret
if __opts__['test']:
ret['comment'] = 'Access key is set to be created for {0}.'.format(name)
ret['result'] = None
return ret
new_keys = {}
for i in range(number-len(keys)):
created = __salt__['boto_iam.create_access_key'](name, region, key, keyid, profile)
if isinstance(created, str):
error, message = _get_error(created)
ret['comment'] = 'Could not create keys.\n{0}\n{1}'.format(error, message)
ret['result'] = False
return ret
log.debug('Created is : {0}'.format(created))
response = 'create_access_key_response'
result = 'create_access_key_result'
new_keys[str(i)] = {}
new_keys[str(i)]['key_id'] = created[response][result]['access_key']['access_key_id']
new_keys[str(i)]['secret_key'] = created[response][result]['access_key']['secret_access_key']
try:
with salt.utils.fopen('{0}/{1}'.format(save_dir, name), 'a') as _wrf:
for key_num, key in new_keys.items():
key_id = key['key_id']
secret_key = key['secret_key']
_wrf.write(save_format.format(key_id, secret_key, 'key_id-{0}'.format(key_num), 'key-{0}'.format(key_num)))
ret['comment'] = 'Keys have been written to file {0}/{1}.'.format(save_dir, name)
ret['result'] = True
ret['changes'] = new_keys
return ret
except IOError:
ret['comment'] = 'Could not write to file {0}/{1}.'.format(save_dir, name)
ret['result'] = False
return ret
def keys_absent(access_keys, user_name, region=None, key=None, keyid=None, profile=None):
'''
.. versionadded:: 2015.8.0
Ensure the IAM user access_key_id is absent.
access_key_id (list)
A list of access key ids
user_name (string)
The username of the user
region (string)
Region to connect to.
key (string)
Secret key to be used.
keyid (string)
Access key to be used.
profile (dict)
A dict with region, key and keyid, or a pillar key (string)
that contains a dict with region, key and keyid.
'''
ret = {'name': access_keys, 'result': True, 'comment': '', 'changes': {}}
if not __salt__['boto_iam.get_user'](user_name, region, key, keyid, profile):
ret['result'] = False
ret['comment'] = 'IAM User {0} does not exist.'.format(user_name)
return ret
for k in access_keys:
ret = _delete_key(ret, k, user_name, region, key, keyid, profile)
return ret
def _delete_key(ret, access_key_id, user_name, region=None, key=None, keyid=None, profile=None):
keys = __salt__['boto_iam.get_all_access_keys'](user_name=user_name, region=region, key=key,
keyid=keyid, profile=profile)
log.debug('Keys for user {1} are : {0}.'.format(keys, user_name))
if isinstance(keys, str):
log.debug('Keys {0} are a string. Something went wrong.'.format(keys))
ret['comment'] = ' '.join([ret['comment'], 'Key {0} could not be deleted.'.format(access_key_id)])
return ret
keys = keys['list_access_keys_response']['list_access_keys_result']['access_key_metadata']
for k in keys:
log.debug('Key is: {0} and is compared with: {1}'.format(k['access_key_id'], access_key_id))
if str(k['access_key_id']) == str(access_key_id):
if __opts__['test']:
ret['comment'] = 'Access key {0} is set to be deleted.'.format(access_key_id)
ret['result'] = None
return ret
deleted = __salt__['boto_iam.delete_access_key'](access_key_id, user_name, region, key,
keyid, profile)
if deleted:
ret['comment'] = ' '.join([ret['comment'], 'Key {0} has been deleted.'.format(access_key_id)])
ret['changes'][access_key_id] = 'deleted'
return ret
ret['comment'] = ' '.join([ret['comment'], 'Key {0} could not be deleted.'.format(access_key_id)])
return ret
ret['comment'] = ' '.join([ret['comment'], 'Key {0} does not exist.'.format(k)])
return ret
def user_present(name, policies=None, policies_from_pillars=None, managed_policies=None, password=None, path=None,
region=None, key=None, keyid=None, profile=None):
'''
.. versionadded:: 2015.8.0
Ensure the IAM user is present
name (string)
The name of the new user.
policies (dict)
A dict of IAM group policy documents.
policies_from_pillars (list)
A list of pillars that contain role policy dicts. Policies in the
pillars will be merged in the order defined in the list and key
conflicts will be handled by later defined keys overriding earlier
defined keys. The policies defined here will be merged with the
policies defined in the policies argument. If keys conflict, the keys
in the policies argument will override the keys defined in
policies_from_pillars.
managed_policies (list)
A list of managed policy names or ARNs that should be attached to this
user.
password (string)
The password for the new user. Must comply with account policy.
path (string)
The path of the user. Default is '/'.
.. versionadded:: 2015.8.2
region (string)
Region to connect to.
key (string)
Secret key to be used.
keyid (string)
Access key to be used.
profile (dict)
A dict with region, key and keyid, or a pillar key (string)
that contains a dict with region, key and keyid.
'''
ret = {'name': name, 'result': True, 'comment': '', 'changes': {}}
if not policies:
policies = {}
if not policies_from_pillars:
policies_from_pillars = []
if not managed_policies:
managed_policies = []
_policies = {}
for policy in policies_from_pillars:
_policy = __salt__['pillar.get'](policy)
_policies.update(_policy)
_policies.update(policies)
exists = __salt__['boto_iam.get_user'](name, region, key, keyid, profile)
if not exists:
if __opts__['test']:
ret['comment'] = 'IAM user {0} is set to be created.'.format(name)
ret['result'] = None
return ret
created = __salt__['boto_iam.create_user'](name, path, region, key, keyid, profile)
if created:
ret['changes']['user'] = created
ret['comment'] = ' '.join([ret['comment'], 'User {0} has been created.'.format(name)])
if password:
ret = _case_password(ret, name, password, region, key, keyid, profile)
_ret = _user_policies_present(name, _policies, region, key, keyid, profile)
ret['changes'] = dictupdate.update(ret['changes'], _ret['changes'])
ret['comment'] = ' '.join([ret['comment'], _ret['comment']])
else:
ret['comment'] = ' '.join([ret['comment'], 'User {0} is present.'.format(name)])
if password:
ret = _case_password(ret, name, password, region, key, keyid, profile)
_ret = _user_policies_present(name, _policies, region, key, keyid, profile)
ret['changes'] = dictupdate.update(ret['changes'], _ret['changes'])
ret['comment'] = ' '.join([ret['comment'], _ret['comment']])
_ret = _user_policies_attached(name, managed_policies, region, key, keyid, profile)
ret['changes'] = dictupdate.update(ret['changes'], _ret['changes'])
ret['comment'] = ' '.join([ret['comment'], _ret['comment']])
if not _ret['result']:
ret['result'] = _ret['result']
return ret
return ret
def _user_policies_present(name, policies=None, region=None, key=None, keyid=None, profile=None):
ret = {'result': True, 'comment': '', 'changes': {}}
policies_to_create = {}
policies_to_delete = []
for policy_name, policy in six.iteritems(policies):
if isinstance(policy, six.string_types):
dict_policy = _byteify(json.loads(policy, object_pairs_hook=odict.OrderedDict))
else:
dict_policy = _byteify(policy)
_policy = _byteify(__salt__['boto_iam.get_user_policy'](name, policy_name, region, key, keyid, profile))
if _policy != dict_policy:
log.debug("Policy mismatch:\n{0}\n{1}".format(_policy, dict_policy))
policies_to_create[policy_name] = policy
_list = __salt__['boto_iam.get_all_user_policies'](
user_name=name, region=region, key=key, keyid=keyid, profile=profile
)
for policy_name in _list:
if policy_name not in policies:
policies_to_delete.append(policy_name)
if policies_to_create or policies_to_delete:
_to_modify = list(policies_to_delete)
_to_modify.extend(policies_to_create)
if __opts__['test']:
msg = '{0} policies to be modified on user {1}.'
ret['comment'] = msg.format(', '.join(_to_modify), name)
ret['result'] = None
return ret
ret['changes']['old'] = {'policies': _list}
for policy_name, policy in six.iteritems(policies_to_create):
policy_set = __salt__['boto_iam.put_user_policy'](
name, policy_name, policy, region, key, keyid, profile
)
if not policy_set:
_list = __salt__['boto_iam.get_all_user_policies'](
user_name=name, region=region, key=key, keyid=keyid, profile=profile
)
ret['changes']['new'] = {'policies': _list}
ret['result'] = False
msg = 'Failed to add policy {0} for user {1}'
ret['comment'] = msg.format(policy_name, name)
return ret
for policy_name in policies_to_delete:
policy_unset = __salt__['boto_iam.delete_user_policy'](
name, policy_name, region, key, keyid, profile
)
if not policy_unset:
_list = __salt__['boto_iam.get_all_user_policies'](
user_name=name, region=region, key=key, keyid=keyid, profile=profile
)
ret['changes']['new'] = {'policies': _list}
ret['result'] = False
msg = 'Failed to add policy {0} to user {1}'
ret['comment'] = msg.format(policy_name, name)
return ret
_list = __salt__['boto_iam.get_all_user_policies'](
user_name=name, region=region, key=key, keyid=keyid, profile=profile
)
ret['changes']['new'] = {'policies': _list}
msg = '{0} policies modified on user {1}.'
ret['comment'] = msg.format(', '.join(_list), name)
return ret
def _user_policies_attached(
name,
managed_policies=None,
region=None,
key=None,
keyid=None,
profile=None):
ret = {'result': True, 'comment': '', 'changes': {}}
policies_to_attach = []
policies_to_detach = []
for policy in managed_policies or []:
entities = __salt__['boto_iam.list_entities_for_policy'](policy,
entity_filter='User',
region=region, key=key, keyid=keyid,
profile=profile)
found = False
for userdict in entities.get('policy_users', []):
if name == userdict.get('user_name'):
found = True
break
if not found:
policies_to_attach.append(policy)
_list = __salt__['boto_iam.list_attached_user_policies'](name, region=region, key=key, keyid=keyid,
profile=profile)
oldpolicies = [x.get('policy_arn') for x in _list]
for policy_data in _list:
if policy_data.get('policy_name') not in managed_policies \
and policy_data.get('policy_arn') not in managed_policies:
policies_to_detach.append(policy_data.get('policy_arn'))
if policies_to_attach or policies_to_detach:
_to_modify = list(policies_to_detach)
_to_modify.extend(policies_to_attach)
if __opts__['test']:
msg = '{0} policies to be modified on user {1}.'
ret['comment'] = msg.format(', '.join(_to_modify), name)
ret['result'] = None
return ret
ret['changes']['old'] = {'managed_policies': oldpolicies}
for policy_name in policies_to_attach:
policy_set = __salt__['boto_iam.attach_user_policy'](policy_name,
name,
region=region, key=key,
keyid=keyid,
profile=profile)
if not policy_set:
_list = __salt__['boto_iam.list_attached_user_policies'](name, region=region,
key=key,
keyid=keyid,
profile=profile)
newpolicies = [x.get('policy_arn') for x in _list]
ret['changes']['new'] = {'managed_policies': newpolicies}
ret['result'] = False
msg = 'Failed to add policy {0} to user {1}'
ret['comment'] = msg.format(policy_name, name)
return ret
for policy_name in policies_to_detach:
policy_unset = __salt__['boto_iam.detach_user_policy'](policy_name,
name,
region=region, key=key,
keyid=keyid,
profile=profile)
if not policy_unset:
_list = __salt__['boto_iam.list_attached_user_policies'](name, region=region,
key=key,
keyid=keyid,
profile=profile)
newpolicies = [x.get('policy_arn') for x in _list]
ret['changes']['new'] = {'managed_policies': newpolicies}
ret['result'] = False
msg = 'Failed to remove policy {0} from user {1}'
ret['comment'] = msg.format(policy_name, name)
return ret
_list = __salt__['boto_iam.list_attached_user_policies'](name, region=region, key=key,
keyid=keyid,
profile=profile)
newpolicies = [x.get('policy_arn') for x in _list]
log.debug(newpolicies)
ret['changes']['new'] = {'managed_policies': newpolicies}
msg = '{0} policies modified on user {1}.'
ret['comment'] = msg.format(', '.join(newpolicies), name)
return ret
def _user_policies_detached(
name,
region=None,
key=None,
keyid=None,
profile=None):
ret = {'result': True, 'comment': '', 'changes': {}}
_list = __salt__['boto_iam.list_attached_user_policies'](user_name=name,
region=region, key=key, keyid=keyid, profile=profile)
oldpolicies = [x.get('policy_arn') for x in _list]
if not _list:
msg = 'No attached policies in user {0}.'.format(name)
ret['comment'] = msg
return ret
if __opts__['test']:
msg = '{0} policies to be detached from user {1}.'
ret['comment'] = msg.format(', '.join(oldpolicies), name)
ret['result'] = None
return ret
ret['changes']['old'] = {'managed_policies': oldpolicies}
for policy_arn in oldpolicies:
policy_unset = __salt__['boto_iam.detach_user_policy'](policy_arn,
name,
region=region, key=key,
keyid=keyid,
profile=profile)
if not policy_unset:
_list = __salt__['boto_iam.list_attached_user_policies'](name, region=region,
key=key, keyid=keyid,
profile=profile)
newpolicies = [x.get('policy_arn') for x in _list]
ret['changes']['new'] = {'managed_policies': newpolicies}
ret['result'] = False
msg = 'Failed to detach {0} from user {1}'
ret['comment'] = msg.format(policy_arn, name)
return ret
_list = __salt__['boto_iam.list_attached_user_policies'](name, region=region, key=key,
keyid=keyid, profile=profile)
newpolicies = [x.get('policy_arn') for x in _list]
ret['changes']['new'] = {'managed_policies': newpolicies}
msg = '{0} policies detached from user {1}.'
ret['comment'] = msg.format(', '.join(newpolicies), name)
return ret
def _case_password(ret, name, password, region=None, key=None, keyid=None, profile=None):
if __opts__['test']:
ret['comment'] = 'Login policy for {0} is set to be changed.'.format(name)
ret['result'] = None
return ret
login = __salt__['boto_iam.create_login_profile'](name, password, region, key, keyid, profile)
log.debug('Login is : {0}.'.format(login))
if login:
if 'Conflict' in login:
ret['comment'] = ' '.join([ret['comment'], 'Login profile for user {0} exists.'.format(name)])
else:
ret['comment'] = ' '.join([ret['comment'], 'Password has been added to User {0}.'.format(name)])
ret['changes']['password'] = 'REDACTED'
else:
ret['result'] = False
ret['comment'] = ' '.join([ret['comment'], 'Password for user {0} could not be set.\nPlease check your password policy.'.format(name)])
return ret
def group_absent(name, region=None, key=None, keyid=None, profile=None):
'''
.. versionadded:: 2015.8.0
Ensure the IAM group is absent.
name (string)
The name of the group.
region (string)
Region to connect to.
key (string)
Secret key to be used.
keyid (string)
Access key to be used.
profile (dict)
A dict with region, key and keyid, or a pillar key (string)
that contains a dict with region, key and keyid.
'''
ret = {'name': name, 'result': True, 'comment': '', 'changes': {}}
if not __salt__['boto_iam.get_group'](name, region, key, keyid, profile):
ret['result'] = True
ret['comment'] = 'IAM Group {0} does not exist.'.format(name)
return ret
if __opts__['test']:
ret['comment'] = ' '.join([ret['comment'], 'IAM group {0} policies are set to be deleted.'.format(name)])
ret['result'] = None
else:
_ret = _group_policies_detached(name, region, key, keyid, profile)
ret['comment'] = ' '.join([ret['comment'], _ret['comment']])
if not _ret['result']:
ret['result'] = _ret['result']
if ret['result'] is False:
return ret
ret['comment'] = ' '.join([ret['comment'], 'IAM group {0} users are set to be removed.'.format(name)])
existing_users = __salt__['boto_iam.get_group_members'](group_name=name, region=region, key=key, keyid=keyid, profile=profile)
_ret = _case_group(ret, [], name, existing_users, region, key, keyid, profile)
ret['changes'] = dictupdate.update(ret['changes'], _ret['changes'])
ret['comment'] = ' '.join([ret['comment'], _ret['comment']])
if not _ret['result']:
ret['result'] = _ret['result']
return ret
# finally, actually delete the group
if __opts__['test']:
ret['comment'] = ' '.join([ret['comment'], 'IAM group {0} is set to be deleted.'.format(name)])
ret['result'] = None
return ret
deleted = __salt__['boto_iam.delete_group'](name, region, key, keyid, profile)
if deleted is True:
ret['comment'] = ' '.join([ret['comment'], 'IAM group {0} is deleted.'.format(name)])
ret['result'] = True
ret['changes']['deleted'] = name
return ret
ret['comment'] = 'IAM group {0} could not be deleted.\n {1}'.format(name, deleted)
ret['result'] = False
return ret
def group_present(name, policies=None, policies_from_pillars=None, managed_policies=None, users=None, path='/', region=None, key=None, keyid=None, profile=None):
'''
.. versionadded:: 2015.8.0
Ensure the IAM group is present
name (string)
The name of the new group.
path (string)
The path for the group, defaults to '/'
policies (dict)
A dict of IAM group policy documents.
policies_from_pillars (list)
A list of pillars that contain role policy dicts. Policies in the
pillars will be merged in the order defined in the list and key
conflicts will be handled by later defined keys overriding earlier
defined keys. The policies defined here will be merged with the
policies defined in the policies argument. If keys conflict, the keys
in the policies argument will override the keys defined in
policies_from_pillars.
manaaged_policies (list)
A list of policy names or ARNs that should be attached to this group.
users (list)
A list of users to be added to the group.
region (string)
Region to connect to.
key (string)
Secret key to be used.
keyid (string)
Access key to be used.
profile (dict)
A dict with region, key and keyid, or a pillar key (string) that
contains a dict with region, key and keyid.
'''
ret = {'name': name, 'result': True, 'comment': '', 'changes': {}}
if not policies:
policies = {}
if not policies_from_pillars:
policies_from_pillars = []
if not managed_policies:
managed_policies = []
_policies = {}
for policy in policies_from_pillars:
_policy = __salt__['pillar.get'](policy)
_policies.update(_policy)
_policies.update(policies)
exists = __salt__['boto_iam.get_group'](group_name=name, region=region, key=key, keyid=keyid, profile=profile)
if not exists:
if __opts__['test']:
ret['comment'] = 'IAM group {0} is set to be created.'.format(name)
ret['result'] = None
return ret
created = __salt__['boto_iam.create_group'](group_name=name, path=path, region=region, key=key, keyid=keyid, profile=profile)
if not created:
ret['comment'] = 'Failed to create IAM group {0}.'.format(name)
ret['result'] = False
return ret
ret['changes']['group'] = created
ret['comment'] = ' '.join([ret['comment'], 'Group {0} has been created.'.format(name)])
else:
ret['comment'] = ' '.join([ret['comment'], 'Group {0} is present.'.format(name)])
# Group exists, ensure group policies and users are set.
_ret = _group_policies_present(
name, _policies, region, key, keyid, profile
)
ret['changes'] = dictupdate.update(ret['changes'], _ret['changes'])
ret['comment'] = ' '.join([ret['comment'], _ret['comment']])
if not _ret['result']:
ret['result'] = _ret['result']
return ret
_ret = _group_policies_attached(name, managed_policies, region, key, keyid, profile)
ret['changes'] = dictupdate.update(ret['changes'], _ret['changes'])
ret['comment'] = ' '.join([ret['comment'], _ret['comment']])
if not _ret['result']:
ret['result'] = _ret['result']
return ret
if users is not None:
log.debug('Users are : {0}.'.format(users))
existing_users = __salt__['boto_iam.get_group_members'](group_name=name, region=region, key=key, keyid=keyid, profile=profile)
ret = _case_group(ret, users, name, existing_users, region, key, keyid, profile)
return ret
def _case_group(ret, users, group_name, existing_users, region, key, keyid, profile):
_users = []
for user in existing_users:
_users.append(user['user_name'])
log.debug('upstream users are {0}'.format(_users))
for user in users:
log.debug('users are {0}'.format(user))
if user in _users:
log.debug('user exists')
ret['comment'] = ' '.join([ret['comment'], 'User {0} is already a member of group {1}.'.format(user, group_name)])
continue
else:
log.debug('user is set to be added {0}'.format(user))
if __opts__['test']:
ret['comment'] = 'User {0} is set to be added to group {1}.'.format(user, group_name)
ret['result'] = None
else:
__salt__['boto_iam.add_user_to_group'](user, group_name, region, key, keyid, profile)
ret['comment'] = ' '.join([ret['comment'], 'User {0} has been added to group {1}.'.format(user, group_name)])
ret['changes'][user] = group_name
for user in _users:
if user not in users:
if __opts__['test']:
ret['comment'] = ' '.join([ret['comment'], 'User {0} is set to be removed from group {1}.'.format(user, group_name)])
ret['result'] = None
else:
__salt__['boto_iam.remove_user_from_group'](group_name=group_name, user_name=user, region=region,
key=key, keyid=keyid, profile=profile)
ret['comment'] = ' '.join([ret['comment'], 'User {0} has been removed from group {1}.'.format(user, group_name)])
ret['changes'][user] = 'Removed from group {0}.'.format(group_name)
return ret
def _group_policies_present(
name,
policies=None,
region=None,
key=None,
keyid=None,
profile=None):
ret = {'result': True, 'comment': '', 'changes': {}}
policies_to_create = {}
policies_to_delete = []
for policy_name, policy in six.iteritems(policies):
if isinstance(policy, six.string_types):
dict_policy = _byteify(json.loads(policy, object_pairs_hook=odict.OrderedDict))
else:
dict_policy = _byteify(policy)
_policy = _byteify(__salt__['boto_iam.get_group_policy'](name, policy_name, region, key, keyid, profile))
if _policy != dict_policy:
log.debug("Policy mismatch:\n{0}\n{1}".format(_policy, dict_policy))
policies_to_create[policy_name] = policy
_list = __salt__['boto_iam.get_all_group_policies'](
name, region, key, keyid, profile
)
for policy_name in _list:
if policy_name not in policies:
policies_to_delete.append(policy_name)
if policies_to_create or policies_to_delete:
_to_modify = list(policies_to_delete)
_to_modify.extend(policies_to_create)
if __opts__['test']:
msg = '{0} policies to be modified on group {1}.'
ret['comment'] = msg.format(', '.join(_to_modify), name)
ret['result'] = None
return ret
ret['changes']['old'] = {'policies': _list}
for policy_name, policy in six.iteritems(policies_to_create):
policy_set = __salt__['boto_iam.put_group_policy'](
name, policy_name, policy, region, key, keyid, profile
)
if not policy_set:
_list = __salt__['boto_iam.get_all_group_policies'](
name, region, key, keyid, profile
)
ret['changes']['new'] = {'policies': _list}
ret['result'] = False
msg = 'Failed to add policy {0} to group {1}'
ret['comment'] = msg.format(policy_name, name)
return ret
for policy_name in policies_to_delete:
policy_unset = __salt__['boto_iam.delete_group_policy'](
name, policy_name, region, key, keyid, profile
)
if not policy_unset:
_list = __salt__['boto_iam.get_all_group_policies'](
name, region, key, keyid, profile
)
ret['changes']['new'] = {'policies': _list}
ret['result'] = False
msg = 'Failed to add policy {0} to group {1}'
ret['comment'] = msg.format(policy_name, name)
return ret
_list = __salt__['boto_iam.get_all_group_policies'](
name, region, key, keyid, profile
)
ret['changes']['new'] = {'policies': _list}
msg = '{0} policies modified on group {1}.'
ret['comment'] = msg.format(', '.join(_list), name)
return ret
def _group_policies_attached(
name,
managed_policies=None,
region=None,
key=None,
keyid=None,
profile=None):
ret = {'result': True, 'comment': '', 'changes': {}}
policies_to_attach = []
policies_to_detach = []
for policy in managed_policies or []:
entities = __salt__['boto_iam.list_entities_for_policy'](policy,
entity_filter='Group',
region=region, key=key, keyid=keyid,
profile=profile)
found = False
for groupdict in entities.get('policy_groups', []):
if name == groupdict.get('group_name'):
found = True
break
if not found:
policies_to_attach.append(policy)
_list = __salt__['boto_iam.list_attached_group_policies'](name, region=region, key=key, keyid=keyid,
profile=profile)
oldpolicies = [x.get('policy_arn') for x in _list]
for policy_data in _list:
if policy_data.get('policy_name') not in managed_policies \
and policy_data.get('policy_arn') not in managed_policies:
policies_to_detach.append(policy_data.get('policy_arn'))
if policies_to_attach or policies_to_detach:
_to_modify = list(policies_to_detach)
_to_modify.extend(policies_to_attach)
if __opts__['test']:
msg = '{0} policies to be modified on group {1}.'
ret['comment'] = msg.format(', '.join(_to_modify), name)
ret['result'] = None
return ret
ret['changes']['old'] = {'managed_policies': oldpolicies}
for policy_name in policies_to_attach:
policy_set = __salt__['boto_iam.attach_group_policy'](policy_name,
name,
region=region, key=key,
keyid=keyid,
profile=profile)
if not policy_set:
_list = __salt__['boto_iam.list_attached_group_policies'](name, region=region,
key=key, keyid=keyid,
profile=profile)
newpolicies = [x.get('policy_arn') for x in _list]
ret['changes']['new'] = {'managed_policies': newpolicies}
ret['result'] = False
msg = 'Failed to add policy {0} to group {1}'
ret['comment'] = msg.format(policy_name, name)
return ret
for policy_name in policies_to_detach:
policy_unset = __salt__['boto_iam.detach_group_policy'](policy_name,
name,
region=region, key=key,
keyid=keyid,
profile=profile)
if not policy_unset:
_list = __salt__['boto_iam.list_attached_group_policies'](name, region=region,
key=key, keyid=keyid,
profile=profile)
newpolicies = [x.get('policy_arn') for x in _list]
ret['changes']['new'] = {'managed_policies': newpolicies}
ret['result'] = False
msg = 'Failed to remove policy {0} from group {1}'
ret['comment'] = msg.format(policy_name, name)
return ret
_list = __salt__['boto_iam.list_attached_group_policies'](name, region=region, key=key,
keyid=keyid, profile=profile)
newpolicies = [x.get('policy_arn') for x in _list]
log.debug(newpolicies)
ret['changes']['new'] = {'managed_policies': newpolicies}
msg = '{0} policies modified on group {1}.'
ret['comment'] = msg.format(', '.join(newpolicies), name)
return ret
def _group_policies_detached(
name,
region=None,
key=None,
keyid=None,
profile=None):
ret = {'result': True, 'comment': '', 'changes': {}}
_list = __salt__['boto_iam.list_attached_group_policies'](group_name=name,
region=region, key=key, keyid=keyid, profile=profile)
oldpolicies = [x.get('policy_arn') for x in _list]
if not _list:
msg = 'No attached policies in group {0}.'.format(name)
ret['comment'] = msg
return ret
if __opts__['test']:
msg = '{0} policies to be detached from group {1}.'
ret['comment'] = msg.format(', '.join(oldpolicies), name)
ret['result'] = None
return ret
ret['changes']['old'] = {'managed_policies': oldpolicies}
for policy_arn in oldpolicies:
policy_unset = __salt__['boto_iam.detach_group_policy'](policy_arn,
name,
region=region, key=key,
keyid=keyid,
profile=profile)
if not policy_unset:
_list = __salt__['boto_iam.list_attached_group_policies'](name, region=region,
key=key, keyid=keyid,
profile=profile)
newpolicies = [x.get('policy_arn') for x in _list]
ret['changes']['new'] = {'managed_policies': newpolicies}
ret['result'] = False
msg = 'Failed to detach {0} from group {1}'
ret['comment'] = msg.format(policy_arn, name)
return ret
_list = __salt__['boto_iam.list_attached_group_policies'](name, region=region, key=key,
keyid=keyid, profile=profile)
newpolicies = [x.get('policy_arn') for x in _list]
ret['changes']['new'] = {'managed_policies': newpolicies}
msg = '{0} policies detached from group {1}.'
ret['comment'] = msg.format(', '.join(newpolicies), name)
return ret
def account_policy(name=None, allow_users_to_change_password=None,
hard_expiry=None, max_password_age=None,
minimum_password_length=None, password_reuse_prevention=None,
require_lowercase_characters=None, require_numbers=None,
require_symbols=None, require_uppercase_characters=None,
region=None, key=None, keyid=None, profile=None):
'''
Change account policy.
.. versionadded:: 2015.8.0
name (string)
The name of the account policy
allow_users_to_change_password (bool)
Allows all IAM users in your account to
use the AWS Management Console to change their own passwords.
hard_expiry (bool)
Prevents IAM users from setting a new password after their
password has expired.
max_password_age (int)
The number of days that an IAM user password is valid.
minimum_password_length (int)
The minimum number of characters allowed in an IAM user password.
password_reuse_prevention (int)
Specifies the number of previous passwords
that IAM users are prevented from reusing.
require_lowercase_characters (bool)
Specifies whether IAM user passwords
must contain at least one lowercase character from the ISO basic Latin alphabet (a to z).
require_numbers (bool)
Specifies whether IAM user passwords must contain at
least one numeric character (0 to 9).
require_symbols (bool)
Specifies whether IAM user passwords must contain at
least one of the following non-alphanumeric characters: ! @ # $ % ^ & * ( ) _ + - = [ ] { } | '
require_uppercase_characters (bool)
Specifies whether IAM user passwords must
contain at least one uppercase character from the ISO basic Latin alphabet (A to Z).
region (string)
Region to connect to.
key (string)
Secret key to be used.
keyid (string)
Access key to be used.
profile (dict)
A dict with region, key and keyid, or a pillar key (string)
'''
config = locals()
ret = {'name': 'Account Policy', 'result': True, 'comment': '', 'changes': {}}
info = __salt__['boto_iam.get_account_policy'](region, key, keyid, profile)
if not info:
ret['comment'] = 'Account policy is not Enabled.'
ret['result'] = False
return ret
for key, value in config.items():
if key in ('region', 'key', 'keyid', 'profile', 'name'):
continue
if value is not None and str(info[key]) != str(value).lower():
ret['comment'] = ' '.join([ret['comment'], 'Policy value {0} has been set to {1}.'.format(value, info[key])])
ret['changes'][key] = str(value).lower()
if not ret['changes']:
ret['comment'] = 'Account policy is not changed.'
return ret
if __opts__['test']:
ret['comment'] = 'Account policy is set to be changed.'
ret['result'] = None
return ret
if __salt__['boto_iam.update_account_password_policy'](allow_users_to_change_password,
hard_expiry,
max_password_age,
minimum_password_length,
password_reuse_prevention,
require_lowercase_characters,
require_numbers,
require_symbols,
require_uppercase_characters,
region, key, keyid, profile):
return ret
ret['comment'] = 'Account policy is not changed.'
ret['changes'] = None
ret['result'] = False
return ret
def server_cert_absent(name, region=None, key=None, keyid=None, profile=None):
'''
Deletes a server certificate.
.. versionadded:: 2015.8.0
name (string)
The name for the server certificate. Do not include the path in this value.
region (string)
The name of the region to connect to.
key (string)
The key to be used in order to connect
keyid (string)
The keyid to be used in order to connect
profile (string)
The profile that contains a dict of region, key, keyid
'''
ret = {'name': name, 'result': True, 'comment': '', 'changes': {}}
exists = __salt__['boto_iam.get_server_certificate'](name, region, key, keyid, profile)
if not exists:
ret['comment'] = 'Certificate {0} does not exist.'.format(name)
return ret
if __opts__['test']:
ret['comment'] = 'Server certificate {0} is set to be deleted.'.format(name)
ret['result'] = None
return ret
deleted = __salt__['boto_iam.delete_server_cert'](name, region, key, keyid, profile)
if not deleted:
ret['result'] = False
ret['comment'] = 'Certificate {0} failed to be deleted.'.format(name)
return ret
ret['comment'] = 'Certificate {0} was deleted.'.format(name)
ret['changes'] = deleted
return ret
def server_cert_present(name, public_key, private_key, cert_chain=None, path=None,
region=None, key=None, keyid=None, profile=None):
'''
Crete server certificate.
.. versionadded:: 2015.8.0
name (string)
The name for the server certificate. Do not include the path in this value.
public_key (string)
The contents of the public key certificate in PEM-encoded format.
private_key (string)
The contents of the private key in PEM-encoded format.
cert_chain (string)
The contents of the certificate chain. This is typically a
concatenation of the PEM-encoded public key certificates of the chain.
path (string)
The path for the server certificate.
region (string)
The name of the region to connect to.
key (string)
The key to be used in order to connect
keyid (string)
The keyid to be used in order to connect
profile (string)
The profile that contains a dict of region, key, keyid
'''
ret = {'name': name, 'result': True, 'comment': '', 'changes': {}}
exists = __salt__['boto_iam.get_server_certificate'](name, region, key, keyid, profile)
log.debug('Variables are : {0}.'.format(locals()))
if exists:
ret['comment'] = 'Certificate {0} exists.'.format(name)
return ret
if 'salt://' in public_key:
try:
public_key = __salt__['cp.get_file_str'](public_key)
except IOError as e:
log.debug(e)
ret['comment'] = 'File {0} not found.'.format(public_key)
ret['result'] = False
return ret
if 'salt://' in private_key:
try:
private_key = __salt__['cp.get_file_str'](private_key)
except IOError as e:
log.debug(e)
ret['comment'] = 'File {0} not found.'.format(private_key)
ret['result'] = False
return ret
if cert_chain is not None and 'salt://' in cert_chain:
try:
cert_chain = __salt__['cp.get_file_str'](cert_chain)
except IOError as e:
log.debug(e)
ret['comment'] = 'File {0} not found.'.format(cert_chain)
ret['result'] = False
return ret
if __opts__['test']:
ret['comment'] = 'Server certificate {0} is set to be created.'.format(name)
ret['result'] = None
return ret
created = __salt__['boto_iam.upload_server_cert'](name, public_key, private_key, cert_chain,
path, region, key, keyid, profile)
if created is not False:
ret['comment'] = 'Certificate {0} was created.'.format(name)
ret['changes'] = created
return ret
ret['result'] = False
ret['comment'] = 'Certificate {0} failed to be created.'.format(name)
return ret
def policy_present(name, policy_document, path=None, description=None,
region=None, key=None, keyid=None, profile=None):
'''
.. versionadded:: 2015.8.0
Ensure the IAM managed policy is present
name (string)
The name of the new policy.
policy_document (dict)
The document of the new policy
path (string)
The path in which the policy will be created. Default is '/'.
description (string)
Description
region (string)
Region to connect to.
key (string)
Secret key to be used.
keyid (string)
Access key to be used.
profile (dict)
A dict with region, key and keyid, or a pillar key (string)
that contains a dict with region, key and keyid.
'''
ret = {'name': name, 'result': True, 'comment': '', 'changes': {}}
policy = __salt__['boto_iam.get_policy'](name, region, key, keyid, profile)
if not policy:
if __opts__['test']:
ret['comment'] = 'IAM policy {0} is set to be created.'.format(name)
ret['result'] = None
return ret
created = __salt__['boto_iam.create_policy'](name, policy_document, path, description, region, key, keyid, profile)
if created:
ret['changes']['policy'] = created
ret['comment'] = ' '.join([ret['comment'], 'Policy {0} has been created.'.format(name)])
else:
ret['result'] = False
ret['comment'] = 'Failed to update policy.'
ret['changes'] = {}
return ret
else:
policy = policy.get('policy', {})
ret['comment'] = ' '.join([ret['comment'], 'Policy {0} is present.'.format(name)])
_describe = __salt__['boto_iam.get_policy_version'](name, policy.get('default_version_id'),
region, key, keyid, profile).get('policy_version', {})
if isinstance(_describe['document'], six.string_types):
describeDict = json.loads(_describe['document'])
else:
describeDict = _describe['document']
if isinstance(policy_document, six.string_types):
policy_document = json.loads(policy_document)
r = salt.utils.compare_dicts(describeDict, policy_document)
if bool(r):
if __opts__['test']:
msg = 'Policy {0} set to be modified.'.format(name)
ret['comment'] = msg
ret['result'] = None
return ret
ret['comment'] = ' '.join([ret['comment'], 'Policy to be modified'])
policy_document = json.dumps(policy_document)
r = __salt__['boto_iam.create_policy_version'](policy_name=name,
policy_document=policy_document,
set_as_default=True,
region=region, key=key,
keyid=keyid, profile=profile)
if not r.get('created'):
ret['result'] = False
ret['comment'] = 'Failed to update policy: {0}.'.format(r['error']['message'])
ret['changes'] = {}
return ret
__salt__['boto_iam.delete_policy_version'](policy_name=name,
version_id=policy['default_version_id'],
region=region, key=key,
keyid=keyid, profile=profile)
ret['changes'].setdefault('new', {})['document'] = policy_document
ret['changes'].setdefault('old', {})['document'] = _describe['document']
return ret
def policy_absent(name,
region=None, key=None, keyid=None, profile=None):
'''
.. versionadded:: 2015.8.0
Ensure the IAM managed policy with the specified name is absent
name (string)
The name of the new policy.
region (string)
Region to connect to.
key (string)
Secret key to be used.
keyid (string)
Access key to be used.
profile (dict)
A dict with region, key and keyid, or a pillar key (string)
that contains a dict with region, key and keyid.
'''
ret = {'name': name, 'result': True, 'comment': '', 'changes': {}}
r = __salt__['boto_iam.policy_exists'](name,
region=region, key=key, keyid=keyid, profile=profile)
if not r:
ret['comment'] = 'Policy {0} does not exist.'.format(name)
return ret
if __opts__['test']:
ret['comment'] = 'Policy {0} is set to be removed.'.format(name)
ret['result'] = None
return ret
# delete non-default versions
versions = __salt__['boto_iam.list_policy_versions'](name,
region=region, key=key,
keyid=keyid, profile=profile)
if versions:
for version in versions:
if version.get('is_default_version', False):
continue
r = __salt__['boto_iam.delete_policy_version'](name,
version_id=version.get('version_id'),
region=region, key=key,
keyid=keyid, profile=profile)
if not r:
ret['result'] = False
ret['comment'] = 'Failed to delete policy {0}.'.format(name)
return ret
r = __salt__['boto_iam.delete_policy'](name,
region=region, key=key,
keyid=keyid, profile=profile)
if not r:
ret['result'] = False
ret['comment'] = 'Failed to delete policy {0}.'.format(name)
return ret
ret['changes']['old'] = {'policy': name}
ret['changes']['new'] = {'policy': None}
ret['comment'] = 'Policy {0} deleted.'.format(name)
return ret
def saml_provider_present(name, saml_metadata_document, region=None, key=None, keyid=None, profile=None):
'''
.. versionadded::
Ensure the SAML provider with the specified name is present.
name (string)
The name of the SAML provider.
saml_metadata_document (string)
The xml document of the SAML provider.
region (string)
Region to connect to.
key (string)
Secret key to be used.
keyid (string)
Access key to be used.
profile (dict)
A dict with region, key and keyid, or a pillar key (string)
that contains a dict with region, key and keyid.
'''
ret = {'name': name, 'result': True, 'comment': '', 'changes': {}}
if 'salt://' in saml_metadata_document:
try:
saml_metadata_document = __salt__['cp.get_file_str'](saml_metadata_document)
ET.fromstring(saml_metadata_document)
except IOError as e:
log.debug(e)
ret['comment'] = 'SAML document file {0} not found or could not be loaded'.format(name)
ret['result'] = False
return ret
for provider in __salt__['boto_iam.list_saml_providers'](region=region,
key=key, keyid=keyid,
profile=profile):
if provider == name:
ret['comment'] = 'SAML provider {0} is present.'.format(name)
return ret
if __opts__['test']:
ret['comment'] = 'SAML provider {0} is set to be create.'.format(name)
ret['result'] = None
return ret
created = __salt__['boto_iam.create_saml_provider'](name, saml_metadata_document,
region=region, key=key, keyid=keyid,
profile=profile)
if created:
ret['comment'] = 'SAML provider {0} was created.'.format(name)
ret['changes']['new'] = name
return ret
ret['result'] = False
ret['comment'] = 'SAML provider {0} failed to be created.'.format(name)
return ret
def saml_provider_absent(name, region=None, key=None, keyid=None, profile=None):
'''
.. versionadded::
Ensure the SAML provider with the specified name is absent.
name (string)
The name of the SAML provider.
saml_metadata_document (string)
The xml document of the SAML provider.
region (string)
Region to connect to.
key (string)
Secret key to be used.
keyid (string)
Access key to be used.
profile (dict)
A dict with region, key and keyid, or a pillar key (string)
that contains a dict with region, key and keyid.
'''
ret = {'name': name, 'result': True, 'comment': '', 'changes': {}}
provider = __salt__['boto_iam.list_saml_providers'](region=region,
key=key, keyid=keyid,
profile=profile)
if len(provider) == 0:
ret['comment'] = 'SAML provider {0} is absent.'.format(name)
return ret
if __opts__['test']:
ret['comment'] = 'SAML provider {0} is set to be removed.'.format(name)
ret['result'] = None
return ret
deleted = __salt__['boto_iam.delete_saml_provider'](name, region=region,
key=key, keyid=keyid,
profile=profile)
if deleted is not False:
ret['comment'] = 'SAML provider {0} was deleted.'.format(name)
ret['changes']['old'] = name
return ret
ret['result'] = False
ret['comment'] = 'SAML provider {0} failed to be deleted.'.format(name)
return ret
def _get_error(error):
# Converts boto exception to string that can be used to output error.
error = '\n'.join(error.split('\n')[1:])
error = ET.fromstring(error)
code = error[0][1].text
message = error[0][2].text
return code, message
| [
"[email protected]"
] | |
bc1c50c9adb00b6d195e495eedb5e73b7c85c345 | a8ba2295b41b26716dc6dbf62392c7ea9ef5ea08 | /apps/calificacion/views.py | ae9ab7a486377042221e173d02a485c0f35e8e22 | [] | no_license | clcneogeek325/iicea | 7131fd335db94a4af8dbddf5d0126672fc3b312e | 328079ee6e642dc2ecda3b9fd4bf119d81260f3d | refs/heads/master | 2021-01-25T05:15:44.448413 | 2014-12-18T03:34:45 | 2014-12-18T03:34:45 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,080 | py | from django.shortcuts import render_to_response
from django.template import RequestContext
from .models import calificacion
from .forms import calificacionForm
from django.http import HttpResponse,HttpResponseRedirect
from django.core.exceptions import ObjectDoesNotExist
from django.contrib.auth.models import User
from django.contrib.auth.forms import UserCreationForm
from apps.semestre.models import semestre
from apps.alumno.models import alumno
from django.core.paginator import Paginator, EmptyPage, PageNotAnInteger
from django.contrib.auth.decorators import login_required
from iicea.settings import URL_LOGIN
from django.template.loader import render_to_string
import cStringIO as StringIO
import ho.pisa as pisa
import cgi
def generar_pdf(html):
result = StringIO.StringIO()
pdf = pisa.pisaDocument(StringIO.StringIO(html.encode("UTF-8")), result)
if not pdf.err:
return HttpResponse(result.getvalue(),content_type='application/pdf')
return HttpResponse('Error al generar el PDF: %s' % cgi.escape(html))
def pdf(request):
ctx = {'pagesize':'A4'}
html = render_to_string('calificacion/pdf.html', ctx,
context_instance=RequestContext(request))
return generar_pdf(html)
#===============================================
#===============================================
@login_required(login_url=URL_LOGIN)
def view_lista_calificacions(request):
contact_list = calificacion.objects.order_by('id').reverse()
paginator = Paginator(contact_list, 3)# Show 25 contacts per page
page = request.GET.get('page')
try:
lista = paginator.page(page)
except PageNotAnInteger:
# If page is not an integer, deliver first page.
lista = paginator.page(1)
except EmptyPage:
# If page is out of range (e.g. 9999), deliver last page of results.
lista = paginator.page(paginator.num_pages)
ctx = {'lista':lista}
return render_to_response("calificacion/lista.html",ctx,
context_instance=RequestContext(request))
@login_required(login_url=URL_LOGIN)
def view_eliminar_calificacion(request,id):
c = calificacion.objects.get(pk=id)
c.activo = False
c.save()
return HttpResponseRedirect('/calificacion/')
@login_required(login_url=URL_LOGIN)
def view_calificaciones_alumno(request,id):
lista = semestre.objects.filter(activo=True)
ctx = {'lista':lista,'id_alumno':id}
return render_to_response("calificacion/semestres.html",ctx,
context_instance=RequestContext(request))
def calificaciones_alumno_x_semestre(request,id_semestre,id_user):
print id_semestre,"---",id_user
s = semestre.objects.get(pk=id_semestre)
a = alumno.objects.get(alumno_id=id_user)
lista = calificacion.objects.filter(alumno=a,semestre=s)
msg = "Lista de Calificaciones"
ctx = {'lista':lista,'msg':msg,'id_semestre':id_semestre,'id_user':id_user}
return ctx
@login_required(login_url=URL_LOGIN)
def view_calificaciones_alumno_x_semestre(request,id_semestre,id_user):
ctx = calificaciones_alumno_x_semestre(request,id_semestre,id_user)
return render_to_response("calificacion/calificaciones.html",ctx,
context_instance=RequestContext(request))
def pdf_calificaciones_alumno_x_semestre(request,id_semestre,id_user):
ctx = calificaciones_alumno_x_semestre(request,id_semestre,id_user)
html = render_to_string("calificacion/pdf.html",ctx,
context_instance=RequestContext(request))
return generar_pdf(html)
@login_required(login_url=URL_LOGIN)
def view_editar_calificacion(request,id):
try:
a = calificacion.objects.get(pk=id)
if request.method == "POST":
form = calificacionForm(request.POST,instance=a)
if form.is_valid():
form.save()
return HttpResponseRedirect("/calificacion/")
else:
print "no valido",form.errors
form = calificacionForm(request.POST)
ctx = {'form':form}
return render_to_response('calificacion/edit.html',ctx,
context_instance=RequestContext(request))
else:
form = calificacionForm(instance=a)
ctx = {'form':form}
return render_to_response('calificacion/edit.html',ctx,
context_instance=RequestContext(request))
except ObjectDoesNotExist:
ctx = {'msg':"No se encontro el perfil solicitado"}
return render_to_response('msg.html',ctx,
context_instance=RequestContext(request))
@login_required(login_url=URL_LOGIN)
def view_agregar_calificacion(request):
if calificacion.objects.filter(activo=True).exists():
datos = calificacion.objects.filter(activo=True).order_by('id').reverse()
ultimo_alumno = {'alumno':datos[0].alumno,'semestre':datos[0].semestre}
else:
ultimo_alumno = {}
if request.method == "POST":
form = calificacionForm(request.POST)
if form.is_valid():
form.save()
return HttpResponseRedirect("/calificacion/")
else:
form = calificacionForm(request.POST)
ctx = {'form':form}
return render_to_response('calificacion/add.html',ctx,
context_instance=RequestContext(request))
else:
form = calificacionForm(initial=ultimo_alumno)
ctx = {'form':form}
return render_to_response('calificacion/add.html',ctx,
context_instance=RequestContext(request))
| [
"[email protected]"
] | |
3a376fb7cbc7165ed0919498a1c070330e60c6ff | d587b67e83a8e598e2d84bbf23edbbc395429a1a | /baiscRevision/feb21Class.py | 611499997306a15bcc875480c9735107daf2f532 | [
"MIT"
] | permissive | koromax1/code_for_Kids | e7d87264918ca7dc5d6edf62b2c1fa672a380bcd | ee4407f503b76fe56419b89008a28d5bfabe3592 | refs/heads/main | 2023-04-08T00:24:40.113132 | 2021-03-31T18:01:36 | 2021-03-31T18:01:36 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,616 | py | #revision
"""
1. Basic Data Type
2. addition,
3. if else, nested if else
4. for loop, while loop, (loop + if else)
5. function
"""
var1 = 100 #integer
var2 = 100.501 #float
var3 = True #boolean
var4 = 'Python' #string
#how to check the type of these variables?
# print(type(var1))
# print(type(var3))
#how to print something
# print('Hello World!') #single string print
# + == generic addition
# , == generic addition with type change
# print(var2 + var4) # var2 + var4
# print(var2 , var4) # var2 = str(var2) + 'Python'
#string Manipulation::: addition
# result = "float value: "+ str(var2) +" "+"string value: "+ var4 +" "+"boolean: "+ str(var3)
# print(result)
#if else for boolean types
'''
var5 = False #take an umbrella or not
rainy_weather = False
if rainy_weather == True:
if var5 == True:
print('You just save yourself from rain')
elif var5 == False:
print('you will get drenched in rain')
elif rainy_weather == False:
if var5 == True:
print('smart boy')
else:
print('lucky!!')
else:
print('You are not saved')
'''
# for i in range(5):
# print(i)
# if i == 2:
# print('Black Widow')
# if i % 2 == 0:
# print('BATMAN')
# else:
# print('Spiderman')
# i = 1
# while i < 5:
# print(i)
# i = i + 1
#function
def addition():
a = 5
b = 5
print('The addition function output is ')
print(a+b)
# addition()
#parameter or argument pass
def substraction(value):
a = 10
print('The substraction value is ')
print(a - value)
substraction(3)
| [
"[email protected]"
] | |
fbc2a37d26fc1291c81b5a80f7b93341e7c4f4a8 | 4c9c98b7a5b21848e53dfa8fb6ead1d9ea412d48 | /algorithms/bit_manipulation/python/lonely_integer.py | 3e6d8164fcf2633c7c2160a26b764ad7037fe12f | [] | no_license | thommms/hacker_rank | 1e701c4a932e4f4c196d38fd32c7155a68da079c | fe8b05e0e73425df5d4011b290add418d461eef9 | refs/heads/master | 2020-03-19T12:50:00.808869 | 2018-05-28T17:42:09 | 2018-05-28T17:42:09 | 136,543,275 | 1 | 0 | null | 2018-06-07T23:50:26 | 2018-06-07T23:50:26 | null | UTF-8 | Python | false | false | 258 | py | from functools import reduce
n, A = int(input()), [int(x) for x in input().strip().split(' ')]
# for x in A:
# # print(A.count(x))
# if A.count(x) % 2 != 0:
# print(" ".join(str(x)))
answer = reduce((lambda x, y: x ^ y), A)
print(answer)
| [
"[email protected]"
] | |
96230b8a541d32409872d48c2fc7ee9d476559d3 | 5a8304c26aaa0e0c87ae4daafa3f1c5f56714e5d | /ProTwo/ProTwo/appTwo/migrations/0001_initial.py | 55b93a818b16730b2e068c6604b4d4dedda06aac | [] | no_license | jack456054/Django-test | c625460f3e3b2061eff6d13dd095e32bcf3e3220 | 501837dd80608a8c982214e41f6b746655aabca5 | refs/heads/master | 2023-04-28T01:21:28.688973 | 2019-10-02T06:58:31 | 2019-10-02T06:58:31 | 210,776,683 | 0 | 0 | null | 2023-04-21T20:38:20 | 2019-09-25T06:57:10 | Python | UTF-8 | Python | false | false | 630 | py | # Generated by Django 2.2.5 on 2019-09-27 03:55
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='User',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('first_name', models.CharField(max_length=128)),
('last_name', models.CharField(max_length=128)),
('email', models.EmailField(max_length=264, unique=True)),
],
),
]
| [
"[email protected]"
] | |
fc21204d2e8e095e9a3d71541379fab4054538ac | 894b290b4f4f47b5eb523c23efd7bd6110d91b2f | /44_xhs_note/xhs_note/xhs_note/scripts/xhs_transform.py | 9fbe1879beb2c7540b72e328915d81996f564fd9 | [] | no_license | wliustc/SpiderS | 6650c00616d11239de8c045828bafdc5a299b1ce | 441f309c50d28c1a3917bed19321cd5cbe7c2861 | refs/heads/master | 2020-03-27T06:15:39.495785 | 2018-06-14T07:55:44 | 2018-06-14T07:55:44 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,640 | py | # -*- coding: utf-8 -*-
import sys
import json
import re
reload(sys)
sys.setdefaultencoding('utf-8')
_mapping = {
'sellCount':re.compile(r'\\"sellCount\\":\\"(\d+)\\"'),
}
def get_regex_group1(key,_str, default=None):
p = _mapping[key]
m = p.search(_str)
if m:
return m.group(1)
return default
def get_json_hierarchy(_json_obj, arch_ele_list):
for e in arch_ele_list:
if e not in _json_obj:
return None
_json_obj = _json_obj[e]
return _json_obj
def format_list(data):
result = []
if data:
for item in data:
tmp = ''
if item:
if type(item) == unicode:
tmp = item.encode('utf-8')
tmp = tmp.replace('\u0001','')
tmp = tmp.replace('\n',' ')
tmp = tmp.replace('\t',' ')
tmp = tmp.replace('\r',' ')
tmp = re.sub(r'[\x01-\x1f]','', tmp)
tmp = tmp.strip()
elif type(item) == int:
tmp = str(item)
elif type(item) == str:
tmp = item.encode('utf-8').replace("\u0001",'')
tmp = tmp.replace('\n',' ')
tmp = re.sub(r'[\x01-\x1f]','', tmp)
tmp = tmp.replace('\t',' ')
tmp = tmp.replace('\r',' ')
tmp = tmp.decode('utf-8').strip()
else:
tmp = item
result.append(tmp)
return result
for line in sys.stdin:
try:
line = json.loads(line)
line = line['content']
result = []
if line:
note = json.loads(line['note'])
lists = json.loads(line['list'])
#id
result.append(lists['id'])
#task_date
result.append(line['task_date'])
#oid
result.append(line['oid'])
#list
result.append(line['list'])
#note
result.append(line['note'])
#comments
if 'comments' in note:
result.append(note['comments'])
else:
result.append(None)
#category
if 'category' in note:
result.append(note['category'])
else:
result.append(None)
#p_time
times = note.get('time',None)
result.append("{0}:00".format(times))
result.append(line['task_date'])
print "\t".join(format_list(result))
except:
print "$$$$$$$$$$$$$ ex"
pass
| [
"[email protected]"
] | |
f79fe5e3d38708362ecb883e7298586ff89912a3 | 0dae97b2205ef5d8ce884ec2af4bf99ad2baec43 | /drf_admin/apps/monitor/views/error.py | 85e4282f3af0cc742498d883c97e4d8ba6ab05f3 | [
"MIT"
] | permissive | 15051882416/drf_admin | 2520affacd0345d042b499c3e9a56a112cc235d5 | 0b31fa5248afb6fc20e6ef425b2dcc4d39977d81 | refs/heads/master | 2022-12-31T04:57:27.017134 | 2020-10-24T01:09:58 | 2020-10-24T01:09:58 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,013 | py | # -*- coding: utf-8 -*-
"""
@author : Wang Meng
@github : https://github.com/tianpangji
@software : PyCharm
@file : error.py
@create : 2020/10/3 16:18
"""
from rest_framework import status
from rest_framework.filters import SearchFilter
from rest_framework.generics import ListAPIView
from rest_framework.response import Response
from monitor.models import ErrorLogs
from monitor.serializers.error import ErrorLogsSerializer
class ErrorLogAPIView(ListAPIView):
"""
get:
监控--错误日志列表
错误日志列表, status: 200(成功), return: 错误日志列表信息
delete:
监控--错误日志清空
错误日志清空, status: 204(成功), return: None
"""
queryset = ErrorLogs.objects.all()
serializer_class = ErrorLogsSerializer
filter_backends = (SearchFilter,)
search_fields = ('username', 'view', 'desc', 'ip')
def delete(self, request):
self.queryset.delete()
return Response(status=status.HTTP_204_NO_CONTENT)
| [
"[email protected]"
] | |
8b6ef84075551101e3b0b9f5f29542a3f477fbe9 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p03455/s301099697.py | d18a961cc423fd1f50780324308fcbf6869269e6 | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 139 | py | input = input().strip().split()
a = int(input[0])
b = int(input[1])
if a % 2 == 0 or b % 2 == 0:
print('Even')
else:
print('Odd')
| [
"[email protected]"
] | |
59b74c155bf78c020afb0694200450f11e982f0e | 0e4d09b2a1b93aaa6d623d16905854d993a934ae | /Python/Django/belt_reviewer/apps/bookReviews/apps.py | f6a890ac833bc47e8802d8b4cb392f83db148f59 | [] | no_license | freefaller69/DojoAssignments | ee7f6308b02041be3244f795422e0e044d4a41b2 | f40426ac448026c1172048665f36024ad22f0d81 | refs/heads/master | 2021-01-17T10:23:39.419514 | 2017-07-25T00:50:41 | 2017-07-25T00:50:41 | 84,012,790 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 170 | py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.apps import AppConfig
class BookreviewsConfig(AppConfig):
name = 'bookReviews'
| [
"[email protected]"
] | |
858a562d0dc95131c95bea67ee2ba1707e80d416 | 0c90211f4564d4541aade68cf93997bcf64827f1 | /tests/ipfwd/test_nhop_group.py | 05d4ffdd0e6c9ec1b94bbd40e96ad1f919f89340 | [
"LicenseRef-scancode-generic-cla",
"Apache-2.0"
] | permissive | stephenxs/sonic-mgmt | 74a3662212a5ee5b2583a93fc17b43cdd5a2c0a0 | 85dd08e4d86884ff0031c75b99de3c67b5e24698 | refs/heads/master | 2023-08-15T17:48:52.537559 | 2023-05-19T03:10:08 | 2023-05-19T03:10:08 | 188,744,518 | 0 | 0 | NOASSERTION | 2021-09-17T03:06:51 | 2019-05-27T00:15:18 | Python | UTF-8 | Python | false | false | 30,531 | py | import ipaddr
import logging
import os
import pytest
import random
import time
from collections import namedtuple
from collections import defaultdict
from ptf.mask import Mask
import ptf.packet as scapy
import ptf.testutils as testutils
from tests.common.helpers.assertions import pytest_assert
from tests.common.cisco_data import is_cisco_device
from tests.common.mellanox_data import is_mellanox_device, get_chip_type
from tests.common.innovium_data import is_innovium_device
from tests.common.plugins.loganalyzer.loganalyzer import LogAnalyzer
from tests.common.utilities import wait_until
from tests.platform_tests.link_flap.link_flap_utils import toggle_one_link
from tests.common.platform.device_utils import fanout_switch_port_lookup
CISCO_NHOP_GROUP_FILL_PERCENTAGE = 0.92
pytestmark = [
pytest.mark.topology('t1', 't2')
]
logger = logging.getLogger(__name__)
class IPRoutes:
"""
Program IP routes with next hops on to the DUT
"""
def __init__(self, duthost, asic):
self.arp_list = []
self.asic = asic
self.duthost = duthost
fileloc = os.path.join(os.path.sep, "tmp")
self.filename = os.path.join(fileloc, "static_ip.sh")
self.ip_nhops = []
self.IP_NHOP = namedtuple("IP_NHOP", "prefix nhop")
def add_ip_route(self, ip_route, nhop_path_ips):
"""
Add IP route with ECMP paths
"""
# add IP route, nhop to list
self.ip_nhops.append(self.IP_NHOP(ip_route, nhop_path_ips))
def program_routes(self):
"""
Create a file with static ip route add commands, copy file
to DUT and run it from DUT
"""
with open(self.filename, "w") as fn:
for ip_nhop in self.ip_nhops:
ip_route = "sudo {} ip route add {}".format(
self.asic.ns_arg, ip_nhop.prefix
)
ip_nhop_str = ""
for ip in ip_nhop.nhop:
ip_nhop_str += "nexthop via {} ".format(ip)
ip_cmd = "{} {}".format(ip_route, ip_nhop_str)
fn.write(ip_cmd + "\n")
fn.close()
# copy file to DUT and run it on DUT
self.duthost.copy(src=self.filename, dest=self.filename, mode="0755")
result = self.duthost.shell(self.filename)
pytest_assert(
result["rc"] == 0,
"IP add failed on duthost:{}".format(self.filename)
)
def delete_routes(self):
"""
Create a file with static ip route del commands, copy file
to DUT and run it from DUT
"""
with open(self.filename, "w") as fn:
for ip_nhop in self.ip_nhops:
ip_route = "sudo {} ip route del {}".format(self.asic.ns_arg, ip_nhop.prefix)
fn.write(ip_route + "\n")
fn.close()
self.duthost.copy(src=self.filename, dest=self.filename, mode="0755")
try:
self.duthost.shell(self.filename)
self.duthost.shell("rm {}".format(self.filename))
os.remove(self.filename)
except: # noqa: E722
pass
class Arp:
"""
Create IP interface and create a list of ARPs with given IP,
MAC parameters
"""
def __init__(self, duthost, asic, count, iface, ip=ipaddr.IPAddress("172.16.0.0"), mac="C0:FF:EE:00"):
IP_MAC = namedtuple("IP_MAC", "ip mac")
self.iface = iface
self.ip_mac_list = []
self.duthost = duthost
self.asic = asic
self.if_addr = "{}/16".format(ip + 3)
fileloc = os.path.join(os.path.sep, "tmp")
self.filename = os.path.join(fileloc, "static_arp.sh")
# create a list of IP-MAC bindings
for i in range(11, count + 11):
moff1 = "{0:x}".format(i // 255)
moff2 = "{0:x}".format(i % 255)
self.ip_mac_list.append(IP_MAC(
"{}".format(ip + i),
"{}:{}:{}".format(mac, moff1.zfill(2), moff2.zfill(2))
))
def arps_add(self):
"""
Create a file with static arp add commands, copy file
to DUT and run it from DUT
"""
# add IP address to the eth interface
ip_iface = "ip address add {} dev {}".format(self.if_addr, self.iface)
logger.info("IF ADDR ADD {}".format(ip_iface))
result = self.asic.command(ip_iface)
pytest_assert(result["rc"] == 0, ip_iface)
arp_cmd = "sudo {} arp -s {} {}"
with open(self.filename, "w") as fn:
for ip_mac in self.ip_mac_list:
cmd = arp_cmd.format(self.asic.ns_arg, ip_mac.ip, ip_mac.mac)
fn.write(cmd + "\n")
fn.close()
self.duthost.copy(src=self.filename, dest=self.filename, mode="0755")
result = self.duthost.shell(self.filename)
pytest_assert(
result["rc"] == 0,
"arp add failed on duthost:{}".format(self.filename)
)
def arps_del(self):
"""
Create a file with static arp del commands, copy file
to DUT and run it from DUT
"""
arp_cmd = "sudo {} arp -d {}"
with open(self.filename, "w") as fn:
for ip_mac in self.ip_mac_list:
cmd = arp_cmd.format(self.asic.ns_arg, ip_mac.ip)
fn.write(cmd + "\n")
fn.close()
self.duthost.copy(src=self.filename, dest=self.filename, mode="0755")
try:
self.duthost.shell(self.filename)
self.duthost.shell("rm {}".format(self.filename))
os.remove(self.filename)
except: # noqa: E722
pass
def clean_up(self):
# delete static ARPs
self.arps_del()
# del IP address from the eth interface
ip_iface = "ip address del {} dev {}".format(self.if_addr, self.iface)
logger.info("IF ADDR DEL {}".format(ip_iface))
try:
self.asic.command(ip_iface)
except: # noqa: E722
pass
def get_crm_info(duthost, asic):
"""
get CRM info
"""
get_group_stats = ("{} COUNTERS_DB HMGET CRM:STATS"
" crm_stats_nexthop_group_used"
" crm_stats_nexthop_group_available").format(asic.sonic_db_cli)
pytest_assert(wait_until(25, 5, 0, lambda: (len(duthost.command(get_group_stats)["stdout_lines"]) >= 2)),
get_group_stats)
result = duthost.command(get_group_stats)
pytest_assert(result["rc"] == 0 or len(result["stdout_lines"]) < 2, get_group_stats)
crm_info = {
"used": int(result["stdout_lines"][0]),
"available": int(result["stdout_lines"][1])
}
get_polling = '{} CONFIG_DB HMGET "CRM|Config" "polling_interval"'.format(
asic.sonic_db_cli
)
result = duthost.command(get_polling)
pytest_assert(result["rc"] == 0, get_polling)
crm_info.update({
"polling": int(result["stdout_lines"][0])
})
return crm_info
# code from doc.python.org to generate combinations
# This is used to create unique nexthop groups
def combinations(iterable, r):
# combinations('ABCD', 2) --> AB AC AD BC BD CD
# combinations(range(4), 3) --> 012 013 023 123
pool = tuple(iterable)
n = len(pool)
if r > n:
return
indices = list(range(r))
yield tuple(pool[i] for i in indices)
while True:
for i in reversed(list(range(r))):
if indices[i] != i + n - r:
break
else:
return
indices[i] += 1
for j in range(i + 1, r):
indices[j] = indices[j - 1] + 1
yield tuple(pool[i] for i in indices)
def loganalyzer_ignore_regex_list():
ignore = [
".*Unaccounted_ROUTE_ENTRY_TABLE_entries.*",
".*ERR swss#orchagent: :- addAclTable: Failed to.*",
".*ERR swss#orchagent: :- create: create status:.*",
".*ERR syncd#syncd: [none] SAI_API_ACL:brcm_sai_dnx_create_acl_table:338 create table.*",
".*ERR syncd#syncd: [none] SAI_API_ACL:_brcm_sai_dnx_create_acl_table:7807 field group.*",
".*ERR syncd#syncd: :- processQuadEvent: attr: SAI_ACL_TABLE_ATTR_ACL_BIND_POINT_TYPE_LIST:.*",
".*ERR syncd#syncd: :- processQuadEvent: attr: SAI_ACL_TABLE_ATTR_ACL_STAGE:.*",
".*ERR syncd#syncd: :- processQuadEvent: attr: SAI_ACL_TABLE_ATTR_FIELD_ACL_IP_TYPE:.*",
".*ERR syncd#syncd: :- processQuadEvent: attr: SAI_ACL_TABLE_ATTR_FIELD_ACL_RANGE_TYPE:.*",
".*ERR syncd#syncd: :- processQuadEvent: attr: SAI_ACL_TABLE_ATTR_FIELD_DSCP:.*",
".*ERR syncd#syncd: :- processQuadEvent: attr: SAI_ACL_TABLE_ATTR_FIELD_DST_IP:.*",
".*ERR syncd#syncd: :- processQuadEvent: attr: SAI_ACL_TABLE_ATTR_FIELD_DST_IPV6:.*",
".*ERR syncd#syncd: :- processQuadEvent: attr: SAI_ACL_TABLE_ATTR_FIELD_ETHER_TYPE:.*",
".*ERR syncd#syncd: :- processQuadEvent: attr: SAI_ACL_TABLE_ATTR_FIELD_ICMP_CODE:.*",
".*ERR syncd#syncd: :- processQuadEvent: attr: SAI_ACL_TABLE_ATTR_FIELD_ICMP_TYPE:.*",
".*ERR syncd#syncd: :- processQuadEvent: attr: SAI_ACL_TABLE_ATTR_FIELD_ICMPV6_CODE:.*",
".*ERR syncd#syncd: :- processQuadEvent: attr: SAI_ACL_TABLE_ATTR_FIELD_ICMPV6_TYPE:.*",
".*ERR syncd#syncd: :- processQuadEvent: attr: SAI_ACL_TABLE_ATTR_FIELD_IN_PORTS:.*",
".*ERR syncd#syncd: :- processQuadEvent: attr: SAI_ACL_TABLE_ATTR_FIELD_IP_PROTOCOL:.*",
".*ERR syncd#syncd: :- processQuadEvent: attr: SAI_ACL_TABLE_ATTR_FIELD_IPV6_NEXT_HEADER:.*",
".*ERR syncd#syncd: :- processQuadEvent: attr: SAI_ACL_TABLE_ATTR_FIELD_L4_DST_PORT:.*",
".*ERR syncd#syncd: :- processQuadEvent: attr: SAI_ACL_TABLE_ATTR_FIELD_L4_SRC_PORT:.*",
".*ERR syncd#syncd: :- processQuadEvent: attr: SAI_ACL_TABLE_ATTR_FIELD_OUTER_VLAN_ID:.*",
".*ERR syncd#syncd: :- processQuadEvent: attr: SAI_ACL_TABLE_ATTR_FIELD_SRC_IP:.*",
".*ERR syncd#syncd: :- processQuadEvent: attr: SAI_ACL_TABLE_ATTR_FIELD_SRC_IPV6:.*",
".*ERR syncd#syncd: :- processQuadEvent: attr: SAI_ACL_TABLE_ATTR_FIELD_TCP_FLAGS:.*",
".*ERR syncd#syncd: :- sendApiResponse: api SAI_COMMON_API_CREATE.*",
".*ERR swss#orchagent: :- getResAvailableCounters: Failed to get availability for object_type.*",
".*brcm_sai_dnx_create_acl_table:.*",
]
return ignore
def build_pkt(dest_mac, ip_addr, ttl, flow_count):
pkt = testutils.simple_tcp_packet(
eth_dst=dest_mac,
eth_src="00:11:22:33:44:55",
pktlen=100,
ip_src="19.0.0.100",
ip_dst=ip_addr,
ip_ttl=ttl,
tcp_dport=200 + flow_count,
tcp_sport=100 + flow_count
)
exp_packet = Mask(pkt)
exp_packet.set_do_not_care_scapy(scapy.Ether, "dst")
exp_packet.set_do_not_care_scapy(scapy.Ether, "src")
exp_packet.set_do_not_care_scapy(scapy.IP, "version")
exp_packet.set_do_not_care_scapy(scapy.IP, "ihl")
exp_packet.set_do_not_care_scapy(scapy.IP, "tos")
exp_packet.set_do_not_care_scapy(scapy.IP, "len")
exp_packet.set_do_not_care_scapy(scapy.IP, "flags")
exp_packet.set_do_not_care_scapy(scapy.IP, "id")
exp_packet.set_do_not_care_scapy(scapy.IP, "frag")
exp_packet.set_do_not_care_scapy(scapy.IP, "ttl")
exp_packet.set_do_not_care_scapy(scapy.IP, "chksum")
exp_packet.set_do_not_care_scapy(scapy.IP, "options")
exp_packet.set_do_not_care_scapy(scapy.TCP, "seq")
exp_packet.set_do_not_care_scapy(scapy.TCP, "ack")
exp_packet.set_do_not_care_scapy(scapy.TCP, "reserved")
exp_packet.set_do_not_care_scapy(scapy.TCP, "dataofs")
exp_packet.set_do_not_care_scapy(scapy.TCP, "window")
exp_packet.set_do_not_care_scapy(scapy.TCP, "chksum")
exp_packet.set_do_not_care_scapy(scapy.TCP, "urgptr")
exp_packet.set_ignore_extra_bytes()
return pkt, exp_packet
def test_nhop_group_member_count(duthost, tbinfo):
"""
Test next hop group resource count. Steps:
- Add test IP address to an active IP interface
- Add static ARPs
- Create unique next hop groups
- Add IP route and nexthop
- check CRM resource
- clean up
- Verify no errors and crash
"""
# Set of parameters for Cisco-8000 devices
if is_cisco_device(duthost):
default_max_nhop_paths = 2
polling_interval = 1
sleep_time = 380
elif is_innovium_device(duthost):
default_max_nhop_paths = 3
polling_interval = 10
sleep_time = 120
elif is_mellanox_device(duthost) and get_chip_type(duthost) == 'spectrum1':
default_max_nhop_paths = 8
polling_interval = 10
sleep_time = 120
else:
default_max_nhop_paths = 32
polling_interval = 10
sleep_time = 120
nhop_group_limit = 1024
# program more than the advertised limit
extra_nhops = 10
asic = duthost.asic_instance()
# find out MAX NHOP group count supported on the platform
result = asic.run_redis_cmd(argv=["redis-cli", "-n", 6, "HGETALL", "SWITCH_CAPABILITY|switch"])
it = iter(result)
switch_capability = dict(list(zip(it, it)))
max_nhop = switch_capability.get("MAX_NEXTHOP_GROUP_COUNT")
max_nhop = nhop_group_limit if max_nhop is None else int(max_nhop)
if is_cisco_device(duthost) or is_innovium_device(duthost):
crm_stat = get_crm_info(duthost, asic)
nhop_group_count = crm_stat["available"]
nhop_group_count = int(nhop_group_count * CISCO_NHOP_GROUP_FILL_PERCENTAGE)
else:
nhop_group_count = min(max_nhop, nhop_group_limit) + extra_nhops
# find out an active IP port
ip_ifaces = list(asic.get_active_ip_interfaces(tbinfo).keys())
pytest_assert(len(ip_ifaces), "No IP interfaces found")
eth_if = ip_ifaces[0]
# Generate ARP entries
if is_cisco_device(duthost):
arp_count = 257
else:
arp_count = 40
arplist = Arp(duthost, asic, arp_count, eth_if)
arplist.arps_add()
# indices
indices = list(range(arp_count))
ip_indices = combinations(indices, default_max_nhop_paths)
# initialize log analyzer
marker = "NHOP TEST PATH COUNT {} {}".format(nhop_group_count, eth_if)
loganalyzer = LogAnalyzer(ansible_host=duthost, marker_prefix=marker)
marker = loganalyzer.init()
loganalyzer.load_common_config()
loganalyzer.expect_regex = []
loganalyzer.ignore_regex.extend(loganalyzer_ignore_regex_list())
ip_prefix = ipaddr.IPAddress("192.168.0.0")
crm_before = get_crm_info(duthost, asic)
# increase CRM polling time
asic.command("crm config polling interval {}".format(polling_interval))
logger.info("Adding {} next hops on {}".format(nhop_group_count, eth_if))
# create nexthop group
nhop = IPRoutes(duthost, asic)
try:
for i, indx_list in zip(list(range(nhop_group_count)), ip_indices):
# get a list of unique group of next hop IPs
ips = [arplist.ip_mac_list[x].ip for x in indx_list]
ip_route = "{}/31".format(ip_prefix + (2*i))
# add IP route with the next hop group created
nhop.add_ip_route(ip_route, ips)
nhop.program_routes()
# wait for routes to be synced and programmed
time.sleep(sleep_time)
crm_after = get_crm_info(duthost, asic)
finally:
nhop.delete_routes()
arplist.clean_up()
asic.command(
"crm config polling interval {}".format(crm_before["polling"])
)
# check for any errors or crash
loganalyzer.analyze(marker)
# verify the test used up all the NHOP group resources
# skip this check on Mellanox as ASIC resources are shared
if is_cisco_device(duthost):
pytest_assert(
crm_after["available"] + nhop_group_count == crm_before["available"],
"Unused NHOP group resource:{}, used:{}, nhop_group_count:{}, Unused NHOP group resource before:{}".format(
crm_after["available"], crm_after["used"], nhop_group_count, crm_before["available"]
)
)
elif is_mellanox_device(duthost):
logger.info("skip this check on Mellanox as ASIC resources are shared")
else:
pytest_assert(
crm_after["available"] == 0,
"Unused NHOP group resource:{}, used:{}".format(
crm_after["available"], crm_after["used"]
)
)
def test_nhop_group_member_order_capability(duthost, tbinfo, ptfadapter, gather_facts,
enum_rand_one_frontend_asic_index, fanouthosts):
"""
Test SONiC and SAI Vendor capability are same for ordered ecmp feature
and SAI vendor is honoring the Ordered nature of nexthop group member
"""
if is_mellanox_device(duthost):
# Note: Need remove this check once Mellanox committed Ordered ECMP
pytest.skip("Ordered ECMP currently not supported on Mellanox DUT")
asic = duthost.asic_instance(enum_rand_one_frontend_asic_index)
result = asic.run_redis_cmd(argv=["redis-cli", "-n", 6, "HGETALL", "SWITCH_CAPABILITY|switch"])
it = iter(result)
switch_capability = dict(list(zip(it, it)))
result = asic.run_redis_cmd(argv=["redis-cli", "-n", 0, "HGETALL", "SWITCH_TABLE:switch"])
it = iter(result)
switch_table = dict(list(zip(it, it)))
order_ecmp_capability = switch_capability.get("ORDERED_ECMP_CAPABLE")
order_ecmp_configured = switch_table.get("ordered_ecmp")
pytest_assert(order_ecmp_capability == order_ecmp_configured,
"Order Ecmp Feature configured and capability not same")
if order_ecmp_configured == "false":
pytest.skip("Order ECMP is not configured so skipping the test-case")
# Check Gather facts IP Interface is active one
ip_ifaces = list(asic.get_active_ip_interfaces(tbinfo).keys())
pytest_assert(len(ip_ifaces), "No IP interfaces found")
pytest_assert(gather_facts['src_router_intf_name'] in ip_ifaces, "Selected IP interfaces is not active")
# Generate ARP entries
arp_count = 8
arplist = Arp(duthost, asic, arp_count, gather_facts['src_router_intf_name'])
neighbor_mac = [neighbor[1].lower() for neighbor in arplist.ip_mac_list]
ip_route = "192.168.100.50"
ip_prefix = ip_route + "/31"
ip_ttl = 121
# create nexthop group
nhop = IPRoutes(duthost, asic)
recvd_pkt_result = defaultdict(set)
rtr_mac = asic.get_router_mac()
def built_and_send_tcp_ip_packet():
for flow_count in range(50):
pkt, exp_pkt = build_pkt(rtr_mac, ip_route, ip_ttl, flow_count)
testutils.send(ptfadapter, gather_facts['dst_port_ids'][0], pkt, 10)
(_, recv_pkt) = testutils.verify_packet_any_port(test=ptfadapter, pkt=exp_pkt,
ports=gather_facts['src_port_ids'])
assert recv_pkt
# Make sure routing is done
pytest_assert(scapy.Ether(recv_pkt).ttl == (ip_ttl - 1), "Routed Packet TTL not decremented")
pytest_assert(scapy.Ether(recv_pkt).src == rtr_mac, "Routed Packet Source Mac is not router MAC")
pytest_assert(scapy.Ether(recv_pkt).dst.lower() in neighbor_mac,
"Routed Packet Destination Mac not valid neighbor entry")
recvd_pkt_result[flow_count].add(scapy.Ether(recv_pkt).dst)
# Test/Iteration Scenario 1: Verify After ecmp member remove/add flow order remains same.
# Test/Iteration Scenario 2: Veirfy Neighbor created in different order but flow order remains same.
for iter_count in range(2):
try:
# create neighbor entry in different order list
random.seed(iter_count)
random.shuffle(arplist.ip_mac_list)
arplist.arps_add()
ips = [arplist.ip_mac_list[x].ip for x in range(arp_count)]
# add IP route
nhop.ip_nhops = []
nhop.add_ip_route(ip_prefix, ips)
nhop.program_routes()
# wait for routes to be synced and programmed
time.sleep(5)
ptfadapter.dataplane.flush()
built_and_send_tcp_ip_packet()
if iter_count == 0:
fanout, fanout_port = fanout_switch_port_lookup(fanouthosts, duthost.hostname,
gather_facts['src_port'][0])
# Simulate ECMP Acceleration with link flap where ECMP memeber are removed
# and added back to the group
# BGP service is stoped so we don't get Route Removal message
# from FRR and it is just member add/remove trigger
asic.stop_service("bgp")
time.sleep(15)
toggle_one_link(duthost, gather_facts['src_port'][0], fanout, fanout_port)
time.sleep(5)
built_and_send_tcp_ip_packet()
for flow_count, nexthop_selected in recvd_pkt_result.items():
pytest_assert(len(nexthop_selected) == 1,
"Error flow {} received on different nexthop in iteration {}"
.format(flow_count, iter_count))
finally:
asic.start_service("bgp")
time.sleep(15)
nhop.delete_routes()
arplist.clean_up()
th_asic_flow_map = {0: 'c0:ff:ee:00:00:10', 1: 'c0:ff:ee:00:00:0b',
2: 'c0:ff:ee:00:00:12',
3: 'c0:ff:ee:00:00:0d', 4: 'c0:ff:ee:00:00:11',
5: 'c0:ff:ee:00:00:0e', 6: 'c0:ff:ee:00:00:0f',
7: 'c0:ff:ee:00:00:0c', 8: 'c0:ff:ee:00:00:0e',
9: 'c0:ff:ee:00:00:11',
10: 'c0:ff:ee:00:00:0c', 11: 'c0:ff:ee:00:00:0f',
12: 'c0:ff:ee:00:00:12', 13: 'c0:ff:ee:00:00:0d',
14: 'c0:ff:ee:00:00:10',
15: 'c0:ff:ee:00:00:0b', 16: 'c0:ff:ee:00:00:11',
17: 'c0:ff:ee:00:00:0e', 18: 'c0:ff:ee:00:00:0f',
19: 'c0:ff:ee:00:00:0c',
20: 'c0:ff:ee:00:00:10', 21: 'c0:ff:ee:00:00:0b',
22: 'c0:ff:ee:00:00:12', 23: 'c0:ff:ee:00:00:0d',
24: 'c0:ff:ee:00:00:11',
25: 'c0:ff:ee:00:00:0e', 26: 'c0:ff:ee:00:00:0f',
27: 'c0:ff:ee:00:00:0c', 28: 'c0:ff:ee:00:00:0b', 29: 'c0:ff:ee:00:00:10',
30: 'c0:ff:ee:00:00:0d', 31: 'c0:ff:ee:00:00:12',
32: 'c0:ff:ee:00:00:0c', 33: 'c0:ff:ee:00:00:0f',
34: 'c0:ff:ee:00:00:0e',
35: 'c0:ff:ee:00:00:11', 36: 'c0:ff:ee:00:00:0d',
37: 'c0:ff:ee:00:00:12', 38: 'c0:ff:ee:00:00:0b', 39: 'c0:ff:ee:00:00:10',
40: 'c0:ff:ee:00:00:12', 41: 'c0:ff:ee:00:00:0d',
42: 'c0:ff:ee:00:00:10', 43: 'c0:ff:ee:00:00:0b', 44: 'c0:ff:ee:00:00:0e',
45: 'c0:ff:ee:00:00:11', 46: 'c0:ff:ee:00:00:0c',
47: 'c0:ff:ee:00:00:0f', 48: 'c0:ff:ee:00:00:0d', 49: 'c0:ff:ee:00:00:12'}
gb_asic_flow_map = {0: 'c0:ff:ee:00:00:0f', 1: 'c0:ff:ee:00:00:10',
2: 'c0:ff:ee:00:00:0e', 3: 'c0:ff:ee:00:00:0f', 4: 'c0:ff:ee:00:00:11',
5: 'c0:ff:ee:00:00:0f', 6: 'c0:ff:ee:00:00:12',
7: 'c0:ff:ee:00:00:0c', 8: 'c0:ff:ee:00:00:0e', 9: 'c0:ff:ee:00:00:10',
10: 'c0:ff:ee:00:00:11', 11: 'c0:ff:ee:00:00:0f',
12: 'c0:ff:ee:00:00:0c', 13: 'c0:ff:ee:00:00:0f',
14: 'c0:ff:ee:00:00:11',
15: 'c0:ff:ee:00:00:0c', 16: 'c0:ff:ee:00:00:0e',
17: 'c0:ff:ee:00:00:11', 18: 'c0:ff:ee:00:00:11', 19: 'c0:ff:ee:00:00:0c',
20: 'c0:ff:ee:00:00:10', 21: 'c0:ff:ee:00:00:0b',
22: 'c0:ff:ee:00:00:0d', 23: 'c0:ff:ee:00:00:10', 24: 'c0:ff:ee:00:00:12',
25: 'c0:ff:ee:00:00:11', 26: 'c0:ff:ee:00:00:11',
27: 'c0:ff:ee:00:00:0c', 28: 'c0:ff:ee:00:00:11', 29: 'c0:ff:ee:00:00:0c',
30: 'c0:ff:ee:00:00:12', 31: 'c0:ff:ee:00:00:10',
32: 'c0:ff:ee:00:00:11', 33: 'c0:ff:ee:00:00:0c', 34: 'c0:ff:ee:00:00:0c',
35: 'c0:ff:ee:00:00:0b', 36: 'c0:ff:ee:00:00:0d',
37: 'c0:ff:ee:00:00:10', 38: 'c0:ff:ee:00:00:0e', 39: 'c0:ff:ee:00:00:0d',
40: 'c0:ff:ee:00:00:0e', 41: 'c0:ff:ee:00:00:11',
42: 'c0:ff:ee:00:00:11', 43: 'c0:ff:ee:00:00:0c', 44: 'c0:ff:ee:00:00:0e',
45: 'c0:ff:ee:00:00:0f', 46: 'c0:ff:ee:00:00:0f',
47: 'c0:ff:ee:00:00:0c', 48: 'c0:ff:ee:00:00:0e', 49: 'c0:ff:ee:00:00:10'}
td2_asic_flow_map = {0: 'c0:ff:ee:00:00:10', 1: 'c0:ff:ee:00:00:0b',
2: 'c0:ff:ee:00:00:12',
3: 'c0:ff:ee:00:00:0d', 4: 'c0:ff:ee:00:00:11',
5: 'c0:ff:ee:00:00:0e', 6: 'c0:ff:ee:00:00:0f',
7: 'c0:ff:ee:00:00:0c', 8: 'c0:ff:ee:00:00:0e',
9: 'c0:ff:ee:00:00:11',
10: 'c0:ff:ee:00:00:0c', 11: 'c0:ff:ee:00:00:0f',
12: 'c0:ff:ee:00:00:12', 13: 'c0:ff:ee:00:00:0d',
14: 'c0:ff:ee:00:00:10',
15: 'c0:ff:ee:00:00:0b', 16: 'c0:ff:ee:00:00:11',
17: 'c0:ff:ee:00:00:0e', 18: 'c0:ff:ee:00:00:0f',
19: 'c0:ff:ee:00:00:0c',
20: 'c0:ff:ee:00:00:10', 21: 'c0:ff:ee:00:00:0b',
22: 'c0:ff:ee:00:00:12', 23: 'c0:ff:ee:00:00:0d',
24: 'c0:ff:ee:00:00:11',
25: 'c0:ff:ee:00:00:0e', 26: 'c0:ff:ee:00:00:0f',
27: 'c0:ff:ee:00:00:0c', 28: 'c0:ff:ee:00:00:0b', 29: 'c0:ff:ee:00:00:10',
30: 'c0:ff:ee:00:00:0d', 31: 'c0:ff:ee:00:00:12',
32: 'c0:ff:ee:00:00:0c', 33: 'c0:ff:ee:00:00:0f',
34: 'c0:ff:ee:00:00:0e',
35: 'c0:ff:ee:00:00:11', 36: 'c0:ff:ee:00:00:0d',
37: 'c0:ff:ee:00:00:12', 38: 'c0:ff:ee:00:00:0b', 39: 'c0:ff:ee:00:00:10',
40: 'c0:ff:ee:00:00:12', 41: 'c0:ff:ee:00:00:0d',
42: 'c0:ff:ee:00:00:10', 43: 'c0:ff:ee:00:00:0b', 44: 'c0:ff:ee:00:00:0e',
45: 'c0:ff:ee:00:00:11', 46: 'c0:ff:ee:00:00:0c',
47: 'c0:ff:ee:00:00:0f', 48: 'c0:ff:ee:00:00:0d', 49: 'c0:ff:ee:00:00:12'}
th2_asic_flow_map = {0: 'c0:ff:ee:00:00:10', 1: 'c0:ff:ee:00:00:0b',
2: 'c0:ff:ee:00:00:12',
3: 'c0:ff:ee:00:00:0d', 4: 'c0:ff:ee:00:00:11',
5: 'c0:ff:ee:00:00:0e', 6: 'c0:ff:ee:00:00:0f',
7: 'c0:ff:ee:00:00:0c', 8: 'c0:ff:ee:00:00:0e',
9: 'c0:ff:ee:00:00:11',
10: 'c0:ff:ee:00:00:0c', 11: 'c0:ff:ee:00:00:0f',
12: 'c0:ff:ee:00:00:12', 13: 'c0:ff:ee:00:00:0d',
14: 'c0:ff:ee:00:00:10',
15: 'c0:ff:ee:00:00:0b', 16: 'c0:ff:ee:00:00:11',
17: 'c0:ff:ee:00:00:0e', 18: 'c0:ff:ee:00:00:0f',
19: 'c0:ff:ee:00:00:0c',
20: 'c0:ff:ee:00:00:10', 21: 'c0:ff:ee:00:00:0b',
22: 'c0:ff:ee:00:00:12', 23: 'c0:ff:ee:00:00:0d',
24: 'c0:ff:ee:00:00:11',
25: 'c0:ff:ee:00:00:0e', 26: 'c0:ff:ee:00:00:0f',
27: 'c0:ff:ee:00:00:0c', 28: 'c0:ff:ee:00:00:0b', 29: 'c0:ff:ee:00:00:10',
30: 'c0:ff:ee:00:00:0d', 31: 'c0:ff:ee:00:00:12',
32: 'c0:ff:ee:00:00:0c', 33: 'c0:ff:ee:00:00:0f',
34: 'c0:ff:ee:00:00:0e',
35: 'c0:ff:ee:00:00:11', 36: 'c0:ff:ee:00:00:0d',
37: 'c0:ff:ee:00:00:12', 38: 'c0:ff:ee:00:00:0b', 39: 'c0:ff:ee:00:00:10',
40: 'c0:ff:ee:00:00:12', 41: 'c0:ff:ee:00:00:0d',
42: 'c0:ff:ee:00:00:10', 43: 'c0:ff:ee:00:00:0b', 44: 'c0:ff:ee:00:00:0e',
45: 'c0:ff:ee:00:00:11', 46: 'c0:ff:ee:00:00:0c',
47: 'c0:ff:ee:00:00:0f', 48: 'c0:ff:ee:00:00:0d', 49: 'c0:ff:ee:00:00:12'}
# Make sure a givenflow always hash to same nexthop/neighbor. This is done to try to find issue
# where SAI vendor changes Hash Function across SAI releases. Please note this will not catch the issue every time
# as there is always probability even after change of Hash Function same nexthop/neighbor is selected.
# Fill this array after first run of test case which will give neighbor selected
SUPPORTED_ASIC_TO_NEXTHOP_SELECTED_MAP = {"th": th_asic_flow_map, "gb": gb_asic_flow_map, "gblc": gb_asic_flow_map,
"td2": td2_asic_flow_map, "th2": th2_asic_flow_map}
vendor = duthost.facts["asic_type"]
hostvars = duthost.host.options['variable_manager']._hostvars[duthost.hostname]
mgFacts = duthost.get_extended_minigraph_facts(tbinfo)
dutAsic = None
for asic, nexthop_map in list(SUPPORTED_ASIC_TO_NEXTHOP_SELECTED_MAP.items()):
vendorAsic = "{0}_{1}_hwskus".format(vendor, asic)
if vendorAsic in list(hostvars.keys()) and mgFacts["minigraph_hwsku"] in hostvars[vendorAsic]:
dutAsic = asic
break
# Vendor need to update SUPPORTED_ASIC_TO_NEXTHOP_SELECTED_MAP . To do this we need to run the test case 1st
# time and see the neighbor picked by flow (pkt) sent above. Once that is determined update the map
# SUPPORTED_ASIC_TO_NEXTHOP_SELECTED_MAP
pytest_assert(dutAsic, "Please add ASIC in the SUPPORTED_ASIC_TO_NEXTHOP_SELECTED_MAP \
list and update the asic to nexthop mapping")
for flow_count, nexthop_selected in recvd_pkt_result.items():
pytest_assert(nexthop_map[flow_count] in nexthop_selected,
"Flow {} is not picking expected Neighbor".format(flow_count))
| [
"[email protected]"
] | |
4f38cefdcab4a44e41529b84691a9e960842084c | 9743d5fd24822f79c156ad112229e25adb9ed6f6 | /xai/brain/wordbase/otherforms/_falconers.py | 0fce1a8cca8e6471c93e68ec9dd97d82dc818c42 | [
"MIT"
] | permissive | cash2one/xai | de7adad1758f50dd6786bf0111e71a903f039b64 | e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6 | refs/heads/master | 2021-01-19T12:33:54.964379 | 2017-01-28T02:00:50 | 2017-01-28T02:00:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 230 | py |
#calss header
class _FALCONERS():
def __init__(self,):
self.name = "FALCONERS"
self.definitions = falconer
self.parents = []
self.childen = []
self.properties = []
self.jsondata = {}
self.basic = ['falconer']
| [
"[email protected]"
] | |
bd9179a9b52e2b845931041c86375a59b7643ac9 | b15d2787a1eeb56dfa700480364337216d2b1eb9 | /accelbyte_py_sdk/api/ugc/operations/anonymization/delete_all_user_channel.py | e79b27369aad54884a209d7c6733f5955d9219bd | [
"MIT"
] | permissive | AccelByte/accelbyte-python-sdk | dedf3b8a592beef5fcf86b4245678ee3277f953d | 539c617c7e6938892fa49f95585b2a45c97a59e0 | refs/heads/main | 2023-08-24T14:38:04.370340 | 2023-08-22T01:08:03 | 2023-08-22T01:08:03 | 410,735,805 | 2 | 1 | MIT | 2022-08-02T03:54:11 | 2021-09-27T04:00:10 | Python | UTF-8 | Python | false | false | 6,830 | py | # Copyright (c) 2021 AccelByte Inc. All Rights Reserved.
# This is licensed software from AccelByte Inc, for limitations
# and restrictions contact your company contract manager.
#
# Code generated. DO NOT EDIT!
# template file: ags_py_codegen
# pylint: disable=duplicate-code
# pylint: disable=line-too-long
# pylint: disable=missing-function-docstring
# pylint: disable=missing-module-docstring
# pylint: disable=too-many-arguments
# pylint: disable=too-many-branches
# pylint: disable=too-many-instance-attributes
# pylint: disable=too-many-lines
# pylint: disable=too-many-locals
# pylint: disable=too-many-public-methods
# pylint: disable=too-many-return-statements
# pylint: disable=too-many-statements
# pylint: disable=unused-import
# AccelByte Gaming Services Ugc Service (2.11.3)
from __future__ import annotations
from typing import Any, Dict, List, Optional, Tuple, Union
from .....core import Operation
from .....core import HeaderStr
from .....core import HttpResponse
from ...models import ResponseError
class DeleteAllUserChannel(Operation):
"""Delete all user channel (DeleteAllUserChannel)
Required permission NAMESPACE:{namespace}:USER:{userId}:CHANNEL [DELETE]
Required Permission(s):
- NAMESPACE:{namespace}:USER:{userId}:CHANNEL [DELETE]
Properties:
url: /ugc/v1/public/namespaces/{namespace}/users/{userId}/channels
method: DELETE
tags: ["Anonymization"]
consumes: ["application/json", "application/octet-stream"]
produces: ["application/json"]
securities: [BEARER_AUTH]
namespace: (namespace) REQUIRED str in path
user_id: (userId) REQUIRED str in path
Responses:
204: No Content - (No Content)
401: Unauthorized - ResponseError (Unauthorized)
404: Not Found - ResponseError (Not Found)
500: Internal Server Error - ResponseError (Internal Server Error)
"""
# region fields
_url: str = "/ugc/v1/public/namespaces/{namespace}/users/{userId}/channels"
_method: str = "DELETE"
_consumes: List[str] = ["application/json", "application/octet-stream"]
_produces: List[str] = ["application/json"]
_securities: List[List[str]] = [["BEARER_AUTH"]]
_location_query: str = None
namespace: str # REQUIRED in [path]
user_id: str # REQUIRED in [path]
# endregion fields
# region properties
@property
def url(self) -> str:
return self._url
@property
def method(self) -> str:
return self._method
@property
def consumes(self) -> List[str]:
return self._consumes
@property
def produces(self) -> List[str]:
return self._produces
@property
def securities(self) -> List[List[str]]:
return self._securities
@property
def location_query(self) -> str:
return self._location_query
# endregion properties
# region get methods
# endregion get methods
# region get_x_params methods
def get_all_params(self) -> dict:
return {
"path": self.get_path_params(),
}
def get_path_params(self) -> dict:
result = {}
if hasattr(self, "namespace"):
result["namespace"] = self.namespace
if hasattr(self, "user_id"):
result["userId"] = self.user_id
return result
# endregion get_x_params methods
# region is/has methods
# endregion is/has methods
# region with_x methods
def with_namespace(self, value: str) -> DeleteAllUserChannel:
self.namespace = value
return self
def with_user_id(self, value: str) -> DeleteAllUserChannel:
self.user_id = value
return self
# endregion with_x methods
# region to methods
def to_dict(self, include_empty: bool = False) -> dict:
result: dict = {}
if hasattr(self, "namespace") and self.namespace:
result["namespace"] = str(self.namespace)
elif include_empty:
result["namespace"] = ""
if hasattr(self, "user_id") and self.user_id:
result["userId"] = str(self.user_id)
elif include_empty:
result["userId"] = ""
return result
# endregion to methods
# region response methods
# noinspection PyMethodMayBeStatic
def parse_response(
self, code: int, content_type: str, content: Any
) -> Tuple[None, Union[None, HttpResponse, ResponseError]]:
"""Parse the given response.
204: No Content - (No Content)
401: Unauthorized - ResponseError (Unauthorized)
404: Not Found - ResponseError (Not Found)
500: Internal Server Error - ResponseError (Internal Server Error)
---: HttpResponse (Undocumented Response)
---: HttpResponse (Unexpected Content-Type Error)
---: HttpResponse (Unhandled Error)
"""
pre_processed_response, error = self.pre_process_response(
code=code, content_type=content_type, content=content
)
if error is not None:
return None, None if error.is_no_content() else error
code, content_type, content = pre_processed_response
if code == 204:
return None, None
if code == 401:
return None, ResponseError.create_from_dict(content)
if code == 404:
return None, ResponseError.create_from_dict(content)
if code == 500:
return None, ResponseError.create_from_dict(content)
return self.handle_undocumented_response(
code=code, content_type=content_type, content=content
)
# endregion response methods
# region static methods
@classmethod
def create(cls, namespace: str, user_id: str, **kwargs) -> DeleteAllUserChannel:
instance = cls()
instance.namespace = namespace
instance.user_id = user_id
return instance
@classmethod
def create_from_dict(
cls, dict_: dict, include_empty: bool = False
) -> DeleteAllUserChannel:
instance = cls()
if "namespace" in dict_ and dict_["namespace"] is not None:
instance.namespace = str(dict_["namespace"])
elif include_empty:
instance.namespace = ""
if "userId" in dict_ and dict_["userId"] is not None:
instance.user_id = str(dict_["userId"])
elif include_empty:
instance.user_id = ""
return instance
@staticmethod
def get_field_info() -> Dict[str, str]:
return {
"namespace": "namespace",
"userId": "user_id",
}
@staticmethod
def get_required_map() -> Dict[str, bool]:
return {
"namespace": True,
"userId": True,
}
# endregion static methods
| [
"[email protected]"
] | |
1f214def1e5f25602cc33de641d1c798d8190ae4 | 3ae36a5791c26bb7b41a6ed7d81d16cb45cfb8c9 | /python_sicp/homework4.py | a0a8a7a7f8df743e9a01f1bf94f594462968a121 | [] | no_license | crossin/Crossin-practices | 0ef23022e3f298862aa831a7cb9684dc4aa04653 | 1b0cbe8db9b947122c40dcfca4ae883cd99b6087 | refs/heads/master | 2021-01-01T16:42:52.298084 | 2017-07-11T01:17:38 | 2017-07-11T01:17:38 | 97,899,778 | 1 | 0 | null | 2017-07-21T02:58:33 | 2017-07-21T02:58:33 | null | UTF-8 | Python | false | false | 505 | py | #question1
def make_counter():
dct = {}
def counter(x):
dct[x] = dct.get(x,0) + 1
return dct[x]
return counter
c = make_counter()
c('a')
c('b')
# print(c('c'))
# question2
def make_fib():
fib_num = 0
next_num = 1
def fib():
nonlocal fib_num
nonlocal next_num
next_num,fib_num = next_num+fib_num,next_num
return fib_num
return fib
f = make_fib()
print(f())
print(f())
print(f())
print(f())
| [
"[email protected]"
] | |
f06b8923cf042a7a8f0b46519c24463e8c09ceab | 0eaf0d3f0e96a839f2ef37b92d4db5eddf4b5e02 | /abc132/b.py | 1ce7d09d63f948f5793250950b03d97d686964c4 | [] | no_license | silphire/atcoder | b7b02798a87048757745d99e8564397d1ca20169 | f214ef92f13bc5d6b290746d5a94e2faad20d8b0 | refs/heads/master | 2023-09-03T17:56:30.885166 | 2023-09-02T14:16:24 | 2023-09-02T14:16:24 | 245,110,029 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 155 | py | n = int(input())
p = list(map(int, input().split()))
x = 0
for i in range(n - 2):
if p[i + 1] == sorted(p[i:i+3])[1]:
x += 1
print(x)
| [
"[email protected]"
] | |
27f4ae0b2cabf4a2f7cb7b767fca5ee8f99b9cb5 | 699b5dbc51b5a8bc22d0e0e5b6ce7287c9948603 | /tests/conftest.py | 8b5746dfa8d6085a2f3e8e27c4af358027be8ae6 | [] | no_license | gvalkov/riemann-python-api | ccf3db14e620a274db0a748472c93b3ddcabb619 | 873222dfdd61670333dbcf6804755a250357ebc4 | refs/heads/master | 2021-01-16T21:16:43.177708 | 2016-07-21T23:07:13 | 2016-07-21T23:07:13 | 62,182,384 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 279 | py | import pytest
@pytest.fixture
def dummy_socket():
return DummySocket()
class DummySocket:
def __init__(self):
self.data = [b'hello', b'world', b'']
def recv(self, bufsize):
return self.data.pop(0)
@pytest.fixture
def transport():
return None
| [
"[email protected]"
] | |
5cecbd6920e73728d767dc6630ee6999dac4c5fa | d93159d0784fc489a5066d3ee592e6c9563b228b | /FWCore/Services/test/fpe_test_2_cfg.py | c8967e9decfd318751718a2b93c2a094c3482857 | [] | permissive | simonecid/cmssw | 86396e31d41a003a179690f8c322e82e250e33b2 | 2559fdc9545b2c7e337f5113b231025106dd22ab | refs/heads/CAallInOne_81X | 2021-08-15T23:25:02.901905 | 2016-09-13T08:10:20 | 2016-09-13T08:53:42 | 176,462,898 | 0 | 1 | Apache-2.0 | 2019-03-19T08:30:28 | 2019-03-19T08:30:24 | null | UTF-8 | Python | false | false | 2,141 | py | # Unit test configuration file for EnableFloatingPointExceptions service
import os # Since we have a general-purpose programming langauge, we'll use it!
import FWCore.ParameterSet.Config as cms
process = cms.Process("TEST")
import FWCore.Framework.test.cmsExceptionsFatal_cff
process.options = FWCore.Framework.test.cmsExceptionsFatal_cff.options
process.load("FWCore.Services.InitRootHandlers_cfi")
process.EnableFloatingPointExceptions = cms.Service("EnableFloatingPointExceptions",
moduleNames = cms.untracked.vstring('default', 'nofpe', 'module2'),
default = cms.untracked.PSet(
enableOverFlowEx = cms.untracked.bool(eval(os.getenv("OVERFLOW"))),
enableDivByZeroEx = cms.untracked.bool(False),
enableInvalidEx = cms.untracked.bool(eval(os.getenv("INVALID"))),
enableUnderFlowEx = cms.untracked.bool(eval(os.getenv("UNDERFLOW")))
),
module2 = cms.untracked.PSet(
enableOverFlowEx = cms.untracked.bool(False),
enableDivByZeroEx = cms.untracked.bool(eval(os.getenv("DIVIDEBYZERO"))),
enableInvalidEx = cms.untracked.bool(False),
enableUnderFlowEx = cms.untracked.bool(False)
),
nofpe = cms.untracked.PSet(
enableOverFlowEx = cms.untracked.bool(True),
enableDivByZeroEx = cms.untracked.bool(True),
enableInvalidEx = cms.untracked.bool(True),
enableUnderFlowEx = cms.untracked.bool(True)
),
setPrecisionDouble = cms.untracked.bool(True),
reportSettings = cms.untracked.bool(False)
)
process.maxEvents = cms.untracked.PSet(
input = cms.untracked.int32(1)
)
process.source = cms.Source("EmptySource")
process.module1 = cms.EDAnalyzer("FpeTester", testname = cms.string("overflow"))
process.module2 = cms.EDAnalyzer("FpeTester", testname = cms.string("division"))
process.module3 = cms.EDAnalyzer("FpeTester", testname = cms.string("invalid"))
process.module4 = cms.EDAnalyzer("FpeTester", testname = cms.string("underflow"))
process.nofpe = cms.EDAnalyzer("FpeTester", testname = cms.string("nofpe"))
process.p = cms.Path(process.nofpe*process.module1*process.module2*process.module3*process.module4)
| [
"[email protected]"
] | |
31d1db09e594ff0a03df0641f7486c2caaebbadf | 1ada3010856e39c93e2483c960aa8fc25e2b3332 | /TopInterviewQuestions/BinarySearchIterative.py | 2a47e4120cd7ba3dd11f23e4f40a9d9730fbea71 | [] | no_license | Taoge123/LeetCode | 4f9e26be05f39b37bdbb9c1e75db70afdfa1b456 | 4877e35a712f59bc7b8fffa3d8af2ffa56adb08c | refs/heads/master | 2022-02-24T20:09:21.149818 | 2020-07-31T03:18:05 | 2020-07-31T03:18:05 | 142,700,689 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 462 | py | def binarySearch(arr, l, r, x):
while l <= r:
mid = l + (r - l) // 2
if arr[mid] == x:
return mid
elif arr[mid] < x:
l = mid + 1
else:
r = mid - 1
return -1
arr = [2, 3, 4, 10, 40]
x = 10
# Function call
result = binarySearch(arr, 0, len(arr) - 1, x)
if result != -1:
print("Element is present at index %d" % result)
else:
print("Element is not present in array")
| [
"[email protected]"
] | |
03b2d22ccc0320ef2c505d0c9c9187a3a442d8fc | 9743d5fd24822f79c156ad112229e25adb9ed6f6 | /xai/brain/wordbase/nouns/_rosebud.py | 32f351d1d00dcba1d7d029720f46c5640a86bcc3 | [
"MIT"
] | permissive | cash2one/xai | de7adad1758f50dd6786bf0111e71a903f039b64 | e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6 | refs/heads/master | 2021-01-19T12:33:54.964379 | 2017-01-28T02:00:50 | 2017-01-28T02:00:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 318 | py |
#calss header
class _ROSEBUD():
def __init__(self,):
self.name = "ROSEBUD"
self.definitions = [u'the beginning stage of a rose flower']
self.parents = []
self.childen = []
self.properties = []
self.jsondata = {}
self.specie = 'nouns'
def run(self, obj1 = [], obj2 = []):
return self.jsondata
| [
"[email protected]"
] | |
10f2e9083396c3b2628cf29d367edfe99535a561 | 5d44202250e0f872500a6688c280082d721fb590 | /manage.py | e9765693f7ffae740c06634b870c5c3712fded27 | [] | no_license | crowdbotics-apps/oscar-19844 | 0edec2195f5a5b7ad284e4dae339332075859fbf | 0d1a0c744432dba6fa8d06aa88a6c491aa9e0a7e | refs/heads/master | 2022-12-04T01:55:32.806069 | 2020-08-28T12:56:50 | 2020-08-28T12:56:50 | 291,046,919 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 631 | py | #!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'oscar_19844.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
| [
"[email protected]"
] | |
3475f2836d81a4eaf92185524a9ef8a17f6e6b76 | 72e5338e393ce7ced7b9737542b84dc4257659b0 | /migen/test/test_sort.py | 163be8c2f38036b24d895c8566ecc0f5bec0619f | [
"BSD-2-Clause",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | mogorman/migen | 2a2c86feb79f065a6365a6f615c93a9ef916b184 | 467272f1a77be616ccbed8a5b2e1a0756ce59b6b | refs/heads/master | 2021-01-17T21:37:46.782144 | 2015-03-10T05:30:28 | 2015-03-10T05:30:28 | 30,615,751 | 1 | 0 | null | 2015-02-10T21:34:19 | 2015-02-10T21:34:19 | null | UTF-8 | Python | false | false | 734 | py | import unittest
from random import randrange
from migen.fhdl.std import *
from migen.genlib.sort import *
from migen.test.support import SimCase, SimBench
class BitonicCase(SimCase, unittest.TestCase):
class TestBench(SimBench):
def __init__(self):
self.submodules.dut = BitonicSort(8, 4, ascending=True)
def test_sizes(self):
self.assertEqual(len(self.tb.dut.i), 8)
self.assertEqual(len(self.tb.dut.o), 8)
for i in range(8):
self.assertEqual(flen(self.tb.dut.i[i]), 4)
self.assertEqual(flen(self.tb.dut.o[i]), 4)
def test_sort(self):
def cb(tb, tbp):
for i in tb.dut.i:
tbp.simulator.wr(i, randrange(1<<flen(i)))
self.assertEqual(sorted(list(tbp.dut.i)), list(tbp.dut.o))
self.run_with(cb, 20)
| [
"[email protected]"
] | |
d5cd3dd04ad13a4879984799c4d4c77396857512 | 53fab060fa262e5d5026e0807d93c75fb81e67b9 | /backup/user_072/ch2_2019_08_13_16_35_18_838289.py | 9c29c9f819b8a93140238031859ca4d27127d58b | [] | no_license | gabriellaec/desoft-analise-exercicios | b77c6999424c5ce7e44086a12589a0ad43d6adca | 01940ab0897aa6005764fc220b900e4d6161d36b | refs/heads/main | 2023-01-31T17:19:42.050628 | 2020-12-16T05:21:31 | 2020-12-16T05:21:31 | 306,735,108 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 106 | py | def calcula_velocidade_media(x,z):
y=x/z
return y
s=4
t=2
b=calcula_velocidade_media(s,t)
print(b) | [
"[email protected]"
] | |
634c722b3755f68c71a2049285d7c29e6e4b3ca9 | 3633bab8066f576c8bf9e7908afe30bb070d0b70 | /Hack-ninth-week/1-Money-In-The-Bank/Client.py | f57203098ff116bb9ce1ebabcf4a1af2776aa555 | [] | no_license | 6desislava6/Hack-Bulgaria | 099c195e45a443cf4a3342eff6612ac2aa66565b | de4bf7baae35e21d6a7b27d4bde68247bb85b67a | refs/heads/master | 2021-01-20T11:57:29.027595 | 2015-06-02T17:36:59 | 2015-06-02T17:36:59 | 32,828,816 | 4 | 4 | null | null | null | null | UTF-8 | Python | false | false | 658 | py | class Client():
def __init__(self, id, username, balance, message, email):
self.__username = username
self.__balance = balance
self.__id = id
self.__message = message
self.__email = email
def get_username(self):
return self.__username
def get_balance(self):
return self.__balance
def get_id(self):
return self.__id
def get_message(self):
return self.__message
def set_message(self, new_message):
self.__message = new_message
def set_email(self, new_email):
self.__email = new_email
def get_email(self):
return self.__email
| [
"[email protected]"
] | |
86b9eb36ba14748eb10a6c8ae0c92d61abc315bf | c6d852e5842cf6f74123445d20ff03876377ae26 | /lemon/python22/lemon_06_190828_for_while_函数/优秀作业_0828/homework_6.py | f4d0cbc78178c71c569b3205de28dd577e11abb0 | [] | no_license | songyongzhuang/PythonCode_office | 0b3d35ca5d58bc305ae90fea8b1e8c7214619979 | cfadd3132c2c7c518c784589e0dab6510a662a6c | refs/heads/master | 2023-02-13T14:06:10.610935 | 2021-01-14T09:11:32 | 2021-01-14T09:11:32 | 327,183,429 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,451 | py | # _*_ coding: UTF-8 _*_
# @Time :2019-08-29 09:18
# @Author :清莲
# @FileName :homework_6.py
# @Software :PyCharm
# 题一:求出三个整数中的最大值
list = []
i = 0
print("输入三个整数,最后返回你最大的数字~")
while i != 3:
try:
num = int(input())
list.append(num)
i += 1
except:
print("请输入整型数字")
print("最大的数字是", max(list))
# 题二:打印九九乘法表
print("\n九九乘法表:")
i = 1
while i != 10:
j = 1
while j <= i:
print("{0} * {1} = {2}".format(j, i, i * j), end='\t')
j += 1
print()
i += 1
# 删除列表中元素
black_list = ['卖茶叶', '卖面膜', '卖保险', '卖花生', '卖手机']
black_list.clear()
"""第二种方法
black_list = ['卖茶叶', '卖面膜', '卖保险', '卖花生', '卖手机']
del black_list
black_list = []
"""
"""第三种方法:我猜实际希望操作为通过循环一个一个删除
black_list = ['卖茶叶', '卖面膜', '卖保险', '卖花生', '卖手机']
for i in range(black_list.__len__()):
black_list.pop()
"""
# 题四:使用循环实现排序
# 经典排序算法:冒泡、选择、插排、归并、希尔、快排、堆排,我就写三个最基础的吧
def bubbleSort(arr): # 冒泡
for i in range(1, len(arr)):
for j in range(0, len(arr) - i):
if arr[j] > arr[j + 1]:
arr[j], arr[j + 1] = arr[j + 1], arr[j]
return arr
def selectionSort(arr): # 选择
for i in range(len(arr) - 1):
minIndex = i
for j in range(i + 1, len(arr)):
if arr[j] < arr[minIndex]:
minIndex = j
if i != minIndex:
arr[i], arr[minIndex] = arr[minIndex], arr[i]
return arr
def insertionSort(arr): # 插排
for i in range(len(arr)):
preIndex = i - 1
current = arr[i]
while preIndex >= 0 and arr[preIndex] > current:
arr[preIndex + 1] = arr[preIndex]
preIndex -= 1
arr[preIndex + 1] = current
return arr
a = [1, 7, 4, 89, 34, 2]
print("\n排序后的a:", bubbleSort(a))
# 题五:定义函数判断是否登录成功
def setUp(user, password):
if user == 'lemon' and password == 'best':
print("登录系统成功")
else:
print("用户名或密码错误")
user = input("\n用户名:")
password = input("密码:")
setUp(user, password)
| [
"[email protected]"
] | |
48b7314ccc78a5208c9f222e43dc1dfa9beb3baf | 0b69a011c9ffee099841c140be95ed93c704fb07 | /problemsets/Codeforces/Python/A1207.py | b1961a8fecbfbe5bbae1031de06c32ef6f6ef39a | [
"Apache-2.0"
] | permissive | juarezpaulino/coderemite | 4bd03f4f2780eb6013f07c396ba16aa7dbbceea8 | a4649d3f3a89d234457032d14a6646b3af339ac1 | refs/heads/main | 2023-01-31T11:35:19.779668 | 2020-12-18T01:33:46 | 2020-12-18T01:33:46 | 320,931,351 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 255 | py | """
*
* Author: Juarez Paulino(coderemite)
* Email: [email protected]
*
"""
for _ in '0'*int(input()):
b,p,f=map(int,input().split())
h,c=map(int,input().split())
if h<c: h,c,p,f=c,h,f,p
b//=2; t=min(p,b); b-=t
print(h*t+c*min(b,f)) | [
"[email protected]"
] | |
453ad9e3e455b7dd53970c8dae92d54c5ff91fc4 | 72488f37a830b7a2d29be0dc98815ef3fac1250b | /examples/tox21/tox21_DAG.py | acb32eabd5cf75c632c4c636c4d0625965cda61f | [
"MIT"
] | permissive | mhejrati/deepchem | d62ffebf3dfe680534ebcca528302ca31dbdf95b | 8a35de2ec17312a8630690387e730d18b5267a93 | refs/heads/master | 2021-01-18T20:22:43.834707 | 2017-04-01T22:42:42 | 2017-04-01T22:42:42 | 86,959,622 | 1 | 0 | null | 2017-04-02T03:15:45 | 2017-04-02T03:15:45 | null | UTF-8 | Python | false | false | 1,977 | py | """
Script that trains DAG models on Tox21 dataset.
"""
from __future__ import print_function
from __future__ import division
from __future__ import unicode_literals
import numpy as np
np.random.seed(123)
import tensorflow as tf
tf.set_random_seed(123)
import deepchem as dc
# Load Tox21 dataset
tox21_tasks, tox21_datasets, transformers = dc.molnet.load_tox21(
featurizer='GraphConv')
train_dataset, valid_dataset, test_dataset = tox21_datasets
# Fit models
metric = dc.metrics.Metric(
dc.metrics.roc_auc_score, np.mean, mode="classification")
max_atoms_train = max([mol.get_num_atoms() for mol in train_dataset.X])
max_atoms_valid = max([mol.get_num_atoms() for mol in valid_dataset.X])
max_atoms_test = max([mol.get_num_atoms() for mol in test_dataset.X])
max_atoms = max([max_atoms_train, max_atoms_valid, max_atoms_test])
transformer = dc.trans.DAGTransformer(max_atoms=max_atoms)
train_dataset.reshard(512)
train_dataset = transformer.transform(train_dataset)
valid_dataset.reshard(512)
valid_dataset = transformer.transform(valid_dataset)
test_dataset.reshard(512)
test_dataset = transformer.transform(test_dataset)
# Number of features on conv-mols
n_feat = 75
# Batch size of models
batch_size = 64
graph = dc.nn.SequentialDAGGraph(75, batch_size=batch_size, max_atoms=max_atoms)
graph.add(dc.nn.DAGLayer(30, 75, max_atoms=max_atoms))
graph.add(dc.nn.DAGGather(max_atoms=max_atoms))
model = dc.models.MultitaskGraphClassifier(
graph,
len(tox21_tasks),
n_feat,
batch_size=batch_size,
learning_rate=1e-3,
learning_rate_decay_time=1000,
optimizer_type="adam",
beta1=.9,
beta2=.999)
# Fit trained model
model.fit(train_dataset, nb_epoch=20, log_every_N_batches=5)
print("Evaluating model")
train_scores = model.evaluate(train_dataset, [metric], transformers)
valid_scores = model.evaluate(valid_dataset, [metric], transformers)
print("Train scores")
print(train_scores)
print("Validation scores")
print(valid_scores)
| [
"[email protected]"
] | |
771a079115d604f1bcfedc48fe6db067bc10275b | 048c6b84e679a3e81bf7b4980ad2b4a99781b9b7 | /tests/unit/qm/corfunctions/spectraldensities_test.py | e42af2cdcb2f8907d0c9d94e242a331c08acd2bd | [] | no_license | saayeh/quantarhei | 9b7a7c60e1325ef783bdbc9ac4b6f33a13301802 | b77a41272b7df0ccbcde2710bf04bf412c126a6f | refs/heads/master | 2020-12-07T06:29:27.954470 | 2017-09-01T21:09:45 | 2017-09-01T21:09:45 | 66,932,421 | 0 | 0 | null | 2016-08-30T10:52:11 | 2016-08-30T10:52:11 | null | UTF-8 | Python | false | false | 3,212 | py | # -*- coding: utf-8 -*-
import unittest
import numpy
import matplotlib.pyplot as plt
"""
*******************************************************************************
Tests of the quantarhei.qm.corfunctions.spectraldensities module
*******************************************************************************
"""
from quantarhei import SpectralDensity, CorrelationFunction
from quantarhei import TimeAxis
from quantarhei import energy_units
class TestSpectralDensity(unittest.TestCase):
"""Tests spectral densities module
"""
def test_underdamped_brownian_oscillator(self):
"""Testing Underdamped Brownian oscillator spectral density
"""
par = dict(ftype="UnderdampedBrownian",
reorg = 1.0,
freq = 500.0,
gamma = 1.0/500.0)
parO = dict(ftype="OverdampedBrownian",
reorg = 200.0,
cortime = 100.0,
T = 300.0)
par["T"] = 300.0
params = []
for i in range(5):
p = par.copy()
p["freq"] = par["freq"] + (i+1)*200.0
params.append(p)
time = TimeAxis(0.0, 100000, 1.0)
#
# Adding through correlation functions
#
with energy_units("1/cm"):
sd = SpectralDensity(time, par)
cf = sd.get_CorrelationFunction(temperature=300)
#cf.plot()
tot_cf = cf
tot_cf.axis = time
for p in params:
sd = SpectralDensity(time,p)
cf = sd.get_CorrelationFunction(temperature=300)
cf.axis = time
tot_cf += cf
#tot_cf.plot(show=False)
ct = CorrelationFunction(time, parO)
tot_cf += ct
tot_sd1 = tot_cf.get_SpectralDensity()
#tot_sd1.plot(show=False)
#tt.plot()
#
# Adding through SpectralDensity
#
with energy_units("1/cm"):
sd = SpectralDensity(time, par)
ax = sd.axis
tot_sd2 = sd
for p in params:
sd = SpectralDensity(time, p)
sd.axis = ax
tot_sd2 += sd
ov = SpectralDensity(time, parO)
ov.axis = ax
tot_sd2 += ov
#tot_sd2.plot(color="-r")
numpy.testing.assert_allclose(tot_sd1.data, tot_sd2.data, atol=1.0e-3)
cf1 = tot_sd1.get_CorrelationFunction(temperature=300)
cf2 = tot_sd2.get_CorrelationFunction(temperature=300)
#cf1.plot(show=False)
#cf2.plot(color="-r", axis=[0.0, 2000,
# numpy.min(cf1.data)-numpy.max(cf1.data)*0.1,
# numpy.max(cf1.data)*1.1])
numpy.testing.assert_allclose(cf1.data, cf2.data, atol=1.0e-3)
| [
"[email protected]"
] | |
4866fa215547659f317b66100bf9c6726089084b | 9bb78acf73e7ab74e3f85078499a4520594f060f | /concat_wiki.py | 1d1a6567e67f227989255c85c72795ef42abfcda | [
"Apache-2.0"
] | permissive | VNGResearch/crawl_news | e65f8ae2c4f0d7cbe51c4e072f3e9200c4490ddd | 187dfc9fa228435669a81f20f8d4d8e7b9bdf2fd | refs/heads/master | 2021-01-13T13:32:01.952767 | 2016-12-05T08:55:57 | 2016-12-05T08:55:57 | 72,624,025 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,134 | py | '''The code is suing to format wiki text after used WikiExtractor for dump archieve.'''
import os, glob, pdb
dir_in = './data/wiki/text/'
dir_out = './data/wiki/'
with open(os.path.join(dir_out, 'wiki_concat.txt'), 'w') as fw:
for d in os.listdir(dir_in):
print('===================', d)
for filename in glob.iglob(os.path.join(dir_in, d) + '/wiki_*'):
#print('process {}'.format(filename))
content = ''
title = True
with open(filename) as f:
for line in f:
line = line.strip()
if line== '':
continue
if line.startswith('<doc'):
content = ''
title = True
continue
if title ==True:
title = False
continue
if line.startswith('</doc'):
fw.write(content.strip() + '\n')
#pdb.set_trace()
else:
content += ' ' + line
| [
"[email protected]"
] | |
8753e00f80c068ee6d8255bab175a8bb7d47ecc7 | 8b11fb374dca3b0515dc804aae66921201653a19 | /checkio/logistic-golf.py | e5441b5be0e15166f92f0b463fbd4da08c9d9ac0 | [
"Giftware"
] | permissive | nptit/python-snippets | 670b8d672e2ad3a6de264f21187bb497a7b0779b | b1eab44b50765e1710529747fd07a5ce320dd860 | refs/heads/master | 2021-01-25T11:27:32.989352 | 2016-04-26T17:28:22 | 2016-04-26T17:28:22 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 379 | py | # too slow!
from itertools import*
def golf(m):
n=len(m);x=range;d=[]
for r in x(2,n+1):
for p in permutations(x(n), r):
if p[0]==0 and p[-1]==n-1 and all([m[p[i-1]][p[i]]>0 for i in x(1,len(p))]):
d.append(sum([m[p[i-1]][p[i]] for i in x(1,len(p))]))
return min(d) if d else 0
print golf(((0, 80, 58, 0), (80, 0, 71, 80), (58, 71, 0, 58), (0, 80, 58, 0))) == 116
| [
"[email protected]"
] | |
d0de9b6633cc377588297dcd15b40aac7d775ed4 | f0d713996eb095bcdc701f3fab0a8110b8541cbb | /QgSMSMpfcEebAyCye_8.py | 06bd85d0df27fd7f46dc66b430fe2682ed749eff | [] | no_license | daniel-reich/turbo-robot | feda6c0523bb83ab8954b6d06302bfec5b16ebdf | a7a25c63097674c0a81675eed7e6b763785f1c41 | refs/heads/main | 2023-03-26T01:55:14.210264 | 2021-03-23T16:08:01 | 2021-03-23T16:08:01 | 350,773,815 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 906 | py | """
One cause for speeding is the desire to shorten the time spent traveling. In
long distance trips speeding does save an appreciable amount of time. However,
the same cannot be said about short distance trips.
Create a function that calculates the amount of time saved were you traveling
with an average speed that is _above_ the speed-limit as compared to traveling
with an average speed _exactly at_ the speed-limit.
### Examples
# The parameter's format is as follows:
# (speed limit, avg speed, distance traveled at avg speed)
time_saved(80, 90, 40) ➞ 3.3
time_saved(80, 90, 4000) ➞ 333.3
time_saved(80, 100, 40 ) ➞ 6.0
time_saved(80, 100, 10) ➞ 1.5
### Notes
* Speed = distance/time
* The time returned should be in **minutes** , not hours.
"""
def time_saved(s_lim, s_avg, d):
return round(((d/s_lim)*60) - ((d/s_avg)*60), 1)
| [
"[email protected]"
] | |
6ff8cc12f595864bcc1aa10f23be4625856b1410 | a67571dc6f4e83e44a90e4802d2f54b22fb21fd2 | /tns_glass/expenses/urls.py | 03a7c1c571b56cd9db1aa78f3d491ba4469d59b8 | [] | no_license | TechnoServe/SMSBookkeeping | 1833690e3329967b6ae731aad2ddb6b93655d935 | cbc816368ba4980ca6ce87c2bda95b76295009f1 | refs/heads/master | 2020-08-03T19:12:13.023005 | 2019-11-05T12:08:45 | 2019-11-05T12:08:45 | 211,856,646 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 126 | py | from django.conf.urls import *
from .views import *
# set up our url patterns
urlpatterns = ExpenseCRUDL().as_urlpatterns()
| [
"[email protected]"
] | |
9b7935ea289223dec3fdf5cf5c323d2d1c109180 | 358519772669c73092f625f630722c38e1d33783 | /DatabaseTopology/Force/G96Angle.py | 387d9265f6154a9856b137dc70e643a014848156 | [] | no_license | minghao2016/mmtools | e7e61aca084498408ceae965dd6c9450ad89eafa | 3ade988afb51cd54ee5a4067d8deaad88afbb0fe | refs/heads/master | 2021-09-21T01:02:22.522187 | 2014-09-19T03:40:03 | 2014-09-19T03:40:03 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 720 | py | from Topology.Decorators import *
from Topology.Force.AbstractAngle import *
class G96Angle(AbstractAngle):
@accepts_compatible_units(None, None, None, units.degrees, units.kilojoules_per_mole)
def __init__(self, atom1, atom2, atom3, theta, k):
"""
"""
AbstractAngle.__init__(self, atom1, atom2, atom3)
self.theta = theta
self.k = k
def getForceParameters(self):
return (self.atom1, self.atom2, self.atom3, self.theta, self.k)
def __repr__(self):
print self.atom1+' '+self.atom2+' '+ self.atom3+' '+ self.theta+' '+self.k
def __str__(self):
print self.atom1+' '+self.atom2+' '+ self.atom3+' '+ self.theta+' '+self.k
| [
"[email protected]"
] | |
e5d3734a72990e8998c1c1bc2a826d2fa314d7bc | eefb06b0d8c8c98c1e9cfc4c3852d5c453eb5429 | /data/input/Azure/azure-sdk-for-python/azure-mgmt-authorization/azure/mgmt/authorization/operations/role_assignments_operations.py | 414c73323fea26bfe65a1e3146bf7f80516e3bfa | [] | no_license | bopopescu/pythonanalyzer | db839453bde13bf9157b76e54735f11c2262593a | 8390a0139137574ab237b3ff5fe8ea61e8a0b76b | refs/heads/master | 2022-11-22T02:13:52.949119 | 2019-05-07T18:42:52 | 2019-05-07T18:42:52 | 282,079,884 | 0 | 0 | null | 2020-07-23T23:46:09 | 2020-07-23T23:46:08 | null | UTF-8 | Python | false | false | 30,973 | py | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft and contributors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.pipeline import ClientRawResponse
from msrestazure.azure_exceptions import CloudError
import uuid
from .. import models
class RoleAssignmentsOperations(object):
"""RoleAssignmentsOperations operations.
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An objec model deserializer.
"""
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self.config = config
def list_for_resource(
self, resource_group_name, resource_provider_namespace, parent_resource_path, resource_type, resource_name, filter=None, custom_headers={}, raw=False, **operation_config):
"""
Gets role assignments of the resource.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param resource_provider_namespace: Resource identity.
:type resource_provider_namespace: str
:param parent_resource_path: Resource identity.
:type parent_resource_path: str
:param resource_type: Resource identity.
:type resource_type: str
:param resource_name: Resource identity.
:type resource_name: str
:param filter: The filter to apply on the operation.
:type filter: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: :class:`RoleAssignmentPaged
<azure.mgmt.authorization.models.RoleAssignmentPaged>`
"""
def internal_paging(next_link=None, raw=False):
if not next_link:
# Construct URL
url = '/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{parentResourcePath}/{resourceType}/{resourceName}providers/Microsoft.Authorization/roleAssignments'
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'resourceProviderNamespace': self._serialize.url("resource_provider_namespace", resource_provider_namespace, 'str'),
'parentResourcePath': self._serialize.url("parent_resource_path", parent_resource_path, 'str', skip_quote=True),
'resourceType': self._serialize.url("resource_type", resource_type, 'str', skip_quote=True),
'resourceName': self._serialize.url("resource_name", resource_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
if filter is not None:
query_parameters['$filter'] = self._serialize.query("filter", filter, 'str')
query_parameters['api-version'] = self._serialize.query("self.config.api_version", self.config.api_version, 'str')
else:
url = next_link
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(
request, header_parameters, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
return response
# Deserialize response
deserialized = models.RoleAssignmentPaged(internal_paging, self._deserialize.dependencies)
if raw:
header_dict = {}
client_raw_response = models.RoleAssignmentPaged(internal_paging, self._deserialize.dependencies, header_dict)
return client_raw_response
return deserialized
def list_for_resource_group(
self, resource_group_name, filter=None, custom_headers={}, raw=False, **operation_config):
"""
Gets role assignments of the resource group.
:param resource_group_name: Resource group name.
:type resource_group_name: str
:param filter: The filter to apply on the operation.
:type filter: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: :class:`RoleAssignmentPaged
<azure.mgmt.authorization.models.RoleAssignmentPaged>`
"""
def internal_paging(next_link=None, raw=False):
if not next_link:
# Construct URL
url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Authorization/roleAssignments'
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
if filter is not None:
query_parameters['$filter'] = self._serialize.query("filter", filter, 'str')
query_parameters['api-version'] = self._serialize.query("self.config.api_version", self.config.api_version, 'str')
else:
url = next_link
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(
request, header_parameters, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
return response
# Deserialize response
deserialized = models.RoleAssignmentPaged(internal_paging, self._deserialize.dependencies)
if raw:
header_dict = {}
client_raw_response = models.RoleAssignmentPaged(internal_paging, self._deserialize.dependencies, header_dict)
return client_raw_response
return deserialized
def delete(
self, scope, role_assignment_name, custom_headers={}, raw=False, **operation_config):
"""
Delete role assignment.
:param scope: Scope.
:type scope: str
:param role_assignment_name: Role assignment name.
:type role_assignment_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: :class:`RoleAssignment
<azure.mgmt.authorization.models.RoleAssignment>`
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
"""
# Construct URL
url = '/{scope}/providers/Microsoft.Authorization/roleAssignments/{roleAssignmentName}'
path_format_arguments = {
'scope': self._serialize.url("scope", scope, 'str', skip_quote=True),
'roleAssignmentName': self._serialize.url("role_assignment_name", role_assignment_name, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.config.api_version", self.config.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.delete(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('RoleAssignment', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def create(
self, scope, role_assignment_name, properties=None, custom_headers={}, raw=False, **operation_config):
"""
Create role assignment.
:param scope: Scope.
:type scope: str
:param role_assignment_name: Role assignment name.
:type role_assignment_name: str
:param properties: Gets or sets role assignment properties.
:type properties: :class:`RoleAssignmentProperties
<azure.mgmt.authorization.models.RoleAssignmentProperties>`
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: :class:`RoleAssignment
<azure.mgmt.authorization.models.RoleAssignment>`
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
"""
parameters = models.RoleAssignmentCreateParameters(properties=properties)
# Construct URL
url = '/{scope}/providers/Microsoft.Authorization/roleAssignments/{roleAssignmentName}'
path_format_arguments = {
'scope': self._serialize.url("scope", scope, 'str', skip_quote=True),
'roleAssignmentName': self._serialize.url("role_assignment_name", role_assignment_name, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.config.api_version", self.config.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct body
body_content = self._serialize.body(parameters, 'RoleAssignmentCreateParameters')
# Construct and send request
request = self._client.put(url, query_parameters)
response = self._client.send(
request, header_parameters, body_content, **operation_config)
if response.status_code not in [201]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 201:
deserialized = self._deserialize('RoleAssignment', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def get(
self, scope, role_assignment_name, custom_headers={}, raw=False, **operation_config):
"""
Get single role assignment.
:param scope: Scope.
:type scope: str
:param role_assignment_name: Role assignment name.
:type role_assignment_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: :class:`RoleAssignment
<azure.mgmt.authorization.models.RoleAssignment>`
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
"""
# Construct URL
url = '/{scope}/providers/Microsoft.Authorization/roleAssignments/{roleAssignmentName}'
path_format_arguments = {
'scope': self._serialize.url("scope", scope, 'str', skip_quote=True),
'roleAssignmentName': self._serialize.url("role_assignment_name", role_assignment_name, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.config.api_version", self.config.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('RoleAssignment', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def delete_by_id(
self, role_assignment_id, custom_headers={}, raw=False, **operation_config):
"""
Delete role assignment.
:param role_assignment_id: Role assignment Id
:type role_assignment_id: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: :class:`RoleAssignment
<azure.mgmt.authorization.models.RoleAssignment>`
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
"""
# Construct URL
url = '/{roleAssignmentId}'
path_format_arguments = {
'roleAssignmentId': self._serialize.url("role_assignment_id", role_assignment_id, 'str', skip_quote=True)
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.config.api_version", self.config.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.delete(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('RoleAssignment', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def create_by_id(
self, role_assignment_id, properties=None, custom_headers={}, raw=False, **operation_config):
"""
Create role assignment by Id.
:param role_assignment_id: Role assignment Id
:type role_assignment_id: str
:param properties: Gets or sets role assignment properties.
:type properties: :class:`RoleAssignmentProperties
<azure.mgmt.authorization.models.RoleAssignmentProperties>`
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: :class:`RoleAssignment
<azure.mgmt.authorization.models.RoleAssignment>`
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
"""
parameters = models.RoleAssignmentCreateParameters(properties=properties)
# Construct URL
url = '/{roleAssignmentId}'
path_format_arguments = {
'roleAssignmentId': self._serialize.url("role_assignment_id", role_assignment_id, 'str', skip_quote=True)
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.config.api_version", self.config.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct body
body_content = self._serialize.body(parameters, 'RoleAssignmentCreateParameters')
# Construct and send request
request = self._client.put(url, query_parameters)
response = self._client.send(
request, header_parameters, body_content, **operation_config)
if response.status_code not in [201]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 201:
deserialized = self._deserialize('RoleAssignment', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def get_by_id(
self, role_assignment_id, custom_headers={}, raw=False, **operation_config):
"""
Get single role assignment.
:param role_assignment_id: Role assignment Id
:type role_assignment_id: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: :class:`RoleAssignment
<azure.mgmt.authorization.models.RoleAssignment>`
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
"""
# Construct URL
url = '/{roleAssignmentId}'
path_format_arguments = {
'roleAssignmentId': self._serialize.url("role_assignment_id", role_assignment_id, 'str', skip_quote=True)
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.config.api_version", self.config.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('RoleAssignment', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def list(
self, filter=None, custom_headers={}, raw=False, **operation_config):
"""
Gets role assignments of the subscription.
:param filter: The filter to apply on the operation.
:type filter: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: :class:`RoleAssignmentPaged
<azure.mgmt.authorization.models.RoleAssignmentPaged>`
"""
def internal_paging(next_link=None, raw=False):
if not next_link:
# Construct URL
url = '/subscriptions/{subscriptionId}/providers/Microsoft.Authorization/roleAssignments'
path_format_arguments = {
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
if filter is not None:
query_parameters['$filter'] = self._serialize.query("filter", filter, 'str')
query_parameters['api-version'] = self._serialize.query("self.config.api_version", self.config.api_version, 'str')
else:
url = next_link
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(
request, header_parameters, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
return response
# Deserialize response
deserialized = models.RoleAssignmentPaged(internal_paging, self._deserialize.dependencies)
if raw:
header_dict = {}
client_raw_response = models.RoleAssignmentPaged(internal_paging, self._deserialize.dependencies, header_dict)
return client_raw_response
return deserialized
def list_for_scope(
self, scope, filter=None, custom_headers={}, raw=False, **operation_config):
"""
Gets role assignments of the scope.
:param scope: Scope.
:type scope: str
:param filter: The filter to apply on the operation.
:type filter: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: :class:`RoleAssignmentPaged
<azure.mgmt.authorization.models.RoleAssignmentPaged>`
"""
def internal_paging(next_link=None, raw=False):
if not next_link:
# Construct URL
url = '/{scope}/providers/Microsoft.Authorization/roleAssignments'
path_format_arguments = {
'scope': self._serialize.url("scope", scope, 'str', skip_quote=True)
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
if filter is not None:
query_parameters['$filter'] = self._serialize.query("filter", filter, 'str')
query_parameters['api-version'] = self._serialize.query("self.config.api_version", self.config.api_version, 'str')
else:
url = next_link
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(
request, header_parameters, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
return response
# Deserialize response
deserialized = models.RoleAssignmentPaged(internal_paging, self._deserialize.dependencies)
if raw:
header_dict = {}
client_raw_response = models.RoleAssignmentPaged(internal_paging, self._deserialize.dependencies, header_dict)
return client_raw_response
return deserialized
| [
"[email protected]"
] | |
6d23fa78b362bc10224a8f1806723888bb43209e | d725745f5c6b4ad99399aa50f368db39f5046f81 | /angr_platforms/ebpf/arch_ebpf.py | ce69367fd4bf1d4c59ced9909b3599b4fed7282f | [
"BSD-2-Clause"
] | permissive | angr/angr-platforms | 6816d777ea4696af05290613a490e91b8daa79ea | 06db4e6a594af47aaeb0a5071f2cdb9a8c30f7f5 | refs/heads/master | 2023-03-05T10:15:20.783462 | 2023-02-20T18:38:12 | 2023-02-20T18:38:12 | 86,003,468 | 60 | 28 | BSD-2-Clause | 2023-08-31T19:50:46 | 2017-03-23T22:28:04 | Python | UTF-8 | Python | false | false | 1,582 | py | from archinfo import Arch, Register, RegisterOffset, register_arch
class ArchExtendedBPF(Arch):
"""Extended BPF arch."""
name = "eBPF"
bits = 64
vex_arch = None
qemu_name = "eBPF"
ida_processor = "eBPF"
max_inst_bytes = 8
instruction_alignment = 1
register_list = [
# return value from in-kernel function, and exit value for eBPF
Register(name="R0", vex_offset=0, size=8),
# arguments from eBPF program to in-kernel function
Register(name="R1", vex_offset=8, size=8),
Register(name="R2", vex_offset=16, size=8),
Register(name="R3", vex_offset=24, size=8),
Register(name="R4", vex_offset=32, size=8),
Register(name="R5", vex_offset=40, size=8),
# callee-saved registers that in-kernel function will preserve
Register(name="R6", vex_offset=48, size=8),
Register(name="R7", vex_offset=56, size=8),
Register(name="R8", vex_offset=64, size=8),
Register(name="R9", vex_offset=72, size=8),
# read-only frame pointer to access stack
Register(
name="R10",
vex_offset=80,
size=8,
default_value=(Arch.initial_sp, True, (Arch.initial_sp, "stack")),
),
# syscall number extracted from instr
Register(name="syscall", vex_offset=88, size=8),
Register(name="ip", vex_offset=96, size=8),
Register(name="ip_at_syscall", vex_offset=104, size=8),
]
bp_offset = RegisterOffset(80)
register_arch(["eBPF", "em_bpf"], 64, "any", ArchExtendedBPF)
| [
"[email protected]"
] | |
ff642b72630d63d5b705af2645e7dff9048fd4f1 | 2aba62d66c2c622bdc148cef451da76cae5fd76c | /exercise/learn_python_dm2039/ch16/ch16_29.py | 76ee38bf4b12556373fe7be46cb6fc70a6f66f03 | [] | no_license | NTUT-109AB8011/crawler | 6a76de2ab1848ebc8365e071e76c08ca7348be62 | a703ec741b48d3af615a757fed7607b1f8eb66a6 | refs/heads/master | 2023-03-26T22:39:59.527175 | 2021-03-30T03:29:22 | 2021-03-30T03:29:22 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 368 | py | # ch16_29.py
import re
# 測試1搜尋開始到結尾皆是數字的字串
msg = '09282028222'
pattern = '^\d+$'
txt = re.findall(pattern,msg) # 傳回搜尋結果
print(txt)
# 測試2搜尋開始到結尾皆是數字的字串
msg = '0928tuyr990'
pattern = '^\d+$'
txt = re.findall(pattern,msg) # 傳回搜尋結果
print(txt)
| [
"[email protected]"
] | |
0aae5c8f8123a150649e4799b4773a3c13888325 | b37e2bc89e3e3191194a6060e4bf7cef71482695 | /train_vae.py | 508b92a2dc58a2ee93c809857e5c895b577f7518 | [
"MIT"
] | permissive | biandh/DALLE-pytorch | b10bbc590c54b04fa60d2653d6934db86ee2633a | c2ccaa48b43fbb5c29b833c8cae082a797ffc8b5 | refs/heads/main | 2023-03-10T00:11:50.750174 | 2021-02-20T04:01:55 | 2021-02-20T04:01:55 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,489 | py | import math
from math import sqrt
import argparse
import torch
from torch.optim import Adam
from torch.optim.lr_scheduler import ExponentialLR
# vision imports
from torchvision import transforms as T
from torch.utils.data import DataLoader
from torchvision.datasets import ImageFolder
from torchvision.utils import make_grid, save_image
# dalle classes
from dalle_pytorch import DiscreteVAE
# argument parsing
parser = argparse.ArgumentParser()
parser.add_argument('--image_folder', type = str, required = True,
help='path to your folder of images for learning the discrete VAE and its codebook')
parser.add_argument('--image_size', type = int, required = False, default = 128,
help='image size')
args = parser.parse_args()
# constants
IMAGE_SIZE = args.image_size
IMAGE_PATH = args.image_folder
EPOCHS = 20
BATCH_SIZE = 8
LEARNING_RATE = 1e-3
LR_DECAY_RATE = 0.98
NUM_TOKENS = 8192
NUM_LAYERS = 2
NUM_RESNET_BLOCKS = 2
SMOOTH_L1_LOSS = False
EMB_DIM = 512
HID_DIM = 256
KL_LOSS_WEIGHT = 0
STARTING_TEMP = 1.
TEMP_MIN = 0.5
ANNEAL_RATE = 1e-6
NUM_IMAGES_SAVE = 4
# data
ds = ImageFolder(
IMAGE_PATH,
T.Compose([
T.Resize(IMAGE_SIZE),
T.CenterCrop(IMAGE_SIZE),
T.ToTensor(),
T.Normalize((0.5,) * 3, (0.5,) * 3)
])
)
dl = DataLoader(ds, BATCH_SIZE, shuffle = True)
vae_params = dict(
image_size = IMAGE_SIZE,
num_layers = NUM_LAYERS,
num_tokens = NUM_TOKENS,
codebook_dim = EMB_DIM,
hidden_dim = HID_DIM,
num_resnet_blocks = NUM_RESNET_BLOCKS
)
vae = DiscreteVAE(
**vae_params,
smooth_l1_loss = SMOOTH_L1_LOSS,
kl_div_loss_weight = KL_LOSS_WEIGHT
).cuda()
assert len(ds) > 0, 'folder does not contain any images'
print(f'{len(ds)} images found for training')
# optimizer
opt = Adam(vae.parameters(), lr = LEARNING_RATE)
sched = ExponentialLR(optimizer = opt, gamma = LR_DECAY_RATE)
# weights & biases experiment tracking
import wandb
wandb.config.num_tokens = NUM_TOKENS
wandb.config.smooth_l1_loss = SMOOTH_L1_LOSS
wandb.config.num_resnet_blocks = NUM_RESNET_BLOCKS
wandb.config.kl_loss_weight = KL_LOSS_WEIGHT
wandb.init(project='dalle_train_vae')
# starting temperature
global_step = 0
temp = STARTING_TEMP
for epoch in range(EPOCHS):
for i, (images, _) in enumerate(dl):
images = images.cuda()
loss, recons = vae(
images,
return_loss = True,
return_recons = True,
temp = temp
)
opt.zero_grad()
loss.backward()
opt.step()
logs = {}
if i % 100 == 0:
k = NUM_IMAGES_SAVE
with torch.no_grad():
codes = vae.get_codebook_indices(images[:k])
hard_recons = vae.decode(codes)
images, recons = map(lambda t: t[:k], (images, recons))
images, recons, hard_recons, codes = map(lambda t: t.detach().cpu(), (images, recons, hard_recons, codes))
images, recons, hard_recons = map(lambda t: make_grid(t, nrow = int(sqrt(k)), normalize = True, range = (-1, 1)), (images, recons, hard_recons))
logs = {
**logs,
'sample images': wandb.Image(images, caption = 'original images'),
'reconstructions': wandb.Image(recons, caption = 'reconstructions'),
'hard reconstructions': wandb.Image(hard_recons, caption = 'hard reconstructions'),
'codebook_indices': wandb.Histogram(codes),
'temperature': temp
}
save_obj = {
'hparams': vae_params,
'weights': vae.state_dict()
}
torch.save(save_obj, f'vae.pt')
wandb.save('./vae.pt')
# temperature anneal
temp = max(temp * math.exp(-ANNEAL_RATE * global_step), TEMP_MIN)
# lr decay
sched.step()
if i % 10 == 0:
lr = sched.get_last_lr()[0]
print(epoch, i, f'lr - {lr:6f} loss - {loss.item()}')
logs = {
**logs,
'epoch': epoch,
'iter': i,
'loss': loss.item(),
'lr': lr
}
wandb.log(logs)
global_step += 1
# save final vae and cleanup
save_obj = {
'hparams': vae_params,
'weights': vae.state_dict()
}
torch.save(save_obj, 'vae-final.pt')
wandb.save('./vae-final.pt')
wandb.finish()
| [
"[email protected]"
] | |
e211d58c9098c0d358cbab093986f7c079d0f6cf | c24fef69a42ac1da33c892eb85c955acc743354c | /multithreading_multiprocess/sell_ticket_with_deadlock.py | e37cca3733afac501890904587b122309c47a7c3 | [] | no_license | Arithmeticjia/leetcode-python | e3c1d5c5a2733c56637ee2fb51222c7465dc6425 | a24869d88cb41e53fb0abe482ba87dd1e54b2167 | refs/heads/master | 2021-08-15T21:55:11.511687 | 2021-01-03T03:50:54 | 2021-01-03T03:50:54 | 237,393,853 | 1 | 0 | null | null | null | null | GB18030 | Python | false | false | 641 | py | # coding:gbk
import threading
import time
total = 5 # 总共的票数
lock = threading.Lock() # 创建不可重入互斥锁
# rlock = threading.RLock() # 创建可重入互斥锁
def sale():
global total
lock.acquire()
lock.acquire()
time.sleep(1)
print('正在售出第%s张票\n' % total)
time.sleep(1)
total -= 1
lock.release()
lock.release()
if __name__ == '__main__':
threads = []
for i in range(5): # 创建5个线程,代表5个售票窗口
t = threading.Thread(target=sale, args=())
threads.append(t)
for t in threads: # 开始售票
t.start()
| [
"[email protected]"
] | |
2c831da32af3407d2f3ad1ee95dcb867b48d2bb7 | 2a54e8d6ed124c64abb9e075cc5524bb859ba0fa | /.history/3-OO-Python/2-attributes-methods_20200415002528.py | 405b115b79f2213ab1fda091ba07277c5d9961aa | [] | no_license | CaptainStorm21/Python-Foundation | 01b5fbaf7a913506518cf22e0339dd948e65cea1 | a385adeda74f43dd7fb2d99d326b0be23db25024 | refs/heads/master | 2021-05-23T01:29:18.885239 | 2020-04-23T19:18:06 | 2020-04-23T19:18:06 | 253,171,611 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 580 | py | # OOP
class PlayerCharacter :
# class object attribute - it is static
membership = True
# constructor method / init method
def __init__(self, name, age):
self.name = name #attributes
self.age = age
def run (self):
print('running')
return 'Workout is done'
player1 = PlayerCharacter('Josh', 23)
player2 = PlayerCharacter('Morgan', 22)
# player2.attack=('Player is attacking !')
#blueprint of the object
# help(player1)
# help(list)
#attributes - dynamic data - F.E, name, age
print(player1.membership) | [
"[email protected]"
] | |
1fdaa55271242cbe787943c39bc4a90874b5634e | 8b1cab0497815d6c927a9dd64cdca3a58b890286 | /fabfile-deploy-natively.py | 688fd1f0da0f3f63243f5e6eced3fc614bf2f71b | [] | no_license | dschien/ep_deploy | e29d951c7e03dff5c38f8db7df08e7e6f4a4013c | ad4f67cf50210e30e8eeb29eac77dc0c8174c101 | refs/heads/master | 2021-01-20T19:39:19.026050 | 2016-08-12T15:44:42 | 2016-08-12T15:44:42 | 60,100,623 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,397 | py | from fabric import colors
import os, time, boto
import ConfigParser
import boto.ec2
from fabric.contrib.files import exists
__author__ = 'schien'
from fabric.api import *
GIT_ORIGIN = "[email protected]"
# The git repo is the repo we should clone
GIT_REPO = "dschien/ep_site.git"
# env.hosts = ['52.18.118.168']
DB_ENDPOINT = 'ep.cg45k2qrlqro.eu-west-1.rds.amazonaws.com'
# The hosts we need to configure
# HOSTS = ["ec2-52-17-239-200.eu-west-1.compute.amazonaws.com"]
CONFIG_FILE = "ep.cfg"
config = ConfigParser.RawConfigParser()
config.read(CONFIG_FILE)
env.forward_agent = True
env.hosts = [config.get('ec2', 'host')]
AWS_ACCESS_KEY_ID = config.get('ec2', 'AWS_ACCESS_KEY_ID')
AWS_SECRET_ACCESS_KEY = config.get('ec2', 'AWS_SECRET_ACCESS_KEY')
# from django.utils.crypto import get_random_string
# chars = 'abcdefghijklmnopqrstuvwxyz0123456789!@#$%^&*(-_=+)'
secret_key = config.get('ec2', 'secret_key')
#### Environments
def production():
"Setup production settings"
# check_or_start_instance()
env.repo = ("ep-venv", "origin", "release")
env.virtualenv, env.parent, env.branch = env.repo
env.base = "/opt"
env.user = "ubuntu"
env.git_origin = GIT_ORIGIN
env.git_repo = GIT_REPO
env.dev_mode = False
env.key_filename = '~/.ssh/ep-host.pem'
env.forward_agent = True
def test():
run('sudo apt-get update')
def sub_git_clone():
"""
Clones a repository into the virtualenv at /project
:return:
"""
print colors.cyan('Clone repo...')
run(
"git clone %(git_origin)s:%(git_repo)s" % env)
def install_make_tools():
run('sudo apt-get update')
run('sudo apt-get -y install build-essential')
def install_py35():
run('sudo add-apt-repository ppa:fkrull/deadsnakes')
run('sudo apt-get update')
run('sudo apt-get -y install python3.5')
run('sudo apt-get -y install python3.5-venv')
run('sudo apt-get -y install python3.5-dev')
run('sudo apt-get -y install libfreetype6-dev')
run('sudo apt-get -y install libxft-dev')
run('sudo apt-get -y install libpq-dev')
run('sudo apt-get -y install lib32ncurses5-dev')
run('sudo apt-get -y install git')
run('sudo apt-get -y install supervisor')
run('echo -e "Host github.com\n\tStrictHostKeyChecking no\n" >> ~/.ssh/config')
def install_webstack():
run('sudo apt-get -y install nginx')
run('sudo mkdir /etc/nginx/ssl')
run(
'sudo openssl req -x509 -nodes -days 365 -newkey rsa:2048 -keyout /etc/nginx/ssl/nginx.key -out /etc/nginx/ssl/nginx.crt -subj "/C=UK/ST=Avon/L=Bristol/O=UoB/OU=CS/CN=cs.bris.ac.uk"')
run('sudo service nginx restart')
run('sudo update-rc.d nginx defaults')
def config_webstack():
run('sudo mv /etc/nginx/nginx.conf /etc/nginx/nginx.conf.old')
run('sudo cp ep_site/etc_services_conf/nginx.conf /etc/nginx/nginx.conf')
run('sudo chown root:root /etc/nginx/nginx.conf')
run('sudo cp ep_site/etc_services_conf/nginx-app-proxy.conf /etc/nginx/sites-available/')
if exists('/etc/nginx/sites-enabled/default'):
run('sudo rm /etc/nginx/sites-enabled/default')
if not exists('/etc/nginx/sites-enabled/nginx-app-proxy.conf'):
run(
'sudo ln -s /etc/nginx/sites-available/nginx-app-proxy.conf /etc/nginx/sites-enabled/nginx-app-proxy.conf' % env)
run('sudo chown root:root /etc/nginx/sites-available/nginx-app-proxy.conf')
# gunicorn
if not exists('/var/log/gunicorn/'):
run('sudo mkdir /var/log/gunicorn/')
if not exists('ep_site/log/'):
run('sudo mkdir ep_site/log/')
if not exists('ep_site/log/gunicorn'):
run('sudo mkdir ep_site/log/gunicorn')
run('sudo cp ep_site/etc_services_conf/gunicorn-supervisord.conf /etc/supervisor/conf.d/gunicorn-supervisord.conf')
run('sudo cp ep_site/etc_services_conf/supervisord-init /etc/init.d/supervisord')
run('sudo chmod +x /etc/init.d/supervisord')
run('sudo update-rc.d supervisord defaults')
def django_deploy():
with prefix('source ep-venv/bin/activate'):
# with cd('ep_site'):
run('sudo touch ep_site/log/ep.log')
run('sudo chown -R ubuntu ep_site/log')
run('cp ep_site/ep_site/local_settings.template.py ep_site/ep_site/local_settings.py')
print secret_key
run('sed -i -e "s/INSERT_SECRET_KEY/%(secret_key)s/g" ep_site/ep_site/local_settings.py' % {
'secret_key': secret_key})
run('sed -i -e "s/INSERT_ACCKEY_HERE/%(secret_key)s/g" ep_site/ep_site/local_settings.py' % {
'secret_key': AWS_ACCESS_KEY_ID})
run('sed -i -e "s/INSERT_SECKEY_HERE/%(secret_key)s/g" ep_site/ep_site/local_settings.py' % {
'secret_key': AWS_SECRET_ACCESS_KEY})
def django_update_actions():
with prefix('source ep-venv/bin/activate'):
run('python ep_site/manage.py collectstatic -v 0 --noinput')
run('python ep_site/manage.py migrate')
def install_numpy():
with prefix('source ep-venv/bin/activate'):
run('pip install "ipython[notebook]"')
def update():
with cd('ep_site'):
run('git pull')
def clone_git():
with cd('/opt'):
run('git ')
def deploy():
install_make_tools()
install_py35()
install_rabbit()
clone_git()
install_py_deps()
def install_rabbit():
run('sudo apt-get -y install rabbitmq-server')
def create_virtualenv():
run('pyvenv-3.5 ep-venv')
def install_py_deps():
with prefix('source ep-venv/bin/activate'):
run('pip install -r requirements.txt')
# def copy_projects():
# with cd('coms20805'):
# run('git pull')
# run('cp -R client_projects_2015/ /home/web/HTML/Teaching/Resources/COMS20805')
def sub_get_requirements():
"Gets the requirements for the project"
sudo("cd %(base)s/%(virtualenv)s; source bin/activate; pip install -r project/requirements.txt" % env)
def get_own_ip():
from urllib import urlopen
import re
data = str(urlopen('http://checkip.dyndns.com/').read())
# data = '<html><head><title>Current IP Check</title></head><body>Current IP Address: 65.96.168.198</body></html>\r\n'
return re.compile(r'Address: (\d+\.\d+\.\d+\.\d+)').search(data).group(1)
def check_or_start_instance():
"""
Check the instance security rule allows connection via SSL/22 from my IP
If it does not, add a new rule
If it the machine is running, set env.host
Else start
:return:
"""
MY_AMI = config.get('ec2', 'AMI')
SECURITY_GROUP = config.get('ec2', 'SECURITY_GROUP')
KEY_PATH = config.get('ec2', 'KEY_PATH')
INSTANCE_TYPE = config.get('ec2', 'INSTANCE_TYPE')
REGION = config.get('ec2', 'REGION')
os.environ["AWS_ACCESS_KEY_ID"] = config.get('ec2', 'AWS_ACCESS_KEY_ID')
os.environ["AWS_SECRET_ACCESS_KEY"] = config.get('ec2', 'AWS_SECRET_ACCESS_KEY')
conn = boto.ec2.connect_to_region(REGION)
security_groups = conn.get_all_security_groups()
if 'own_ip' not in env:
env.own_ip = get_own_ip()
# check ssh access from own IP is allowed
try:
[ep_group] = [x for x in security_groups if x.name == SECURITY_GROUP]
except ValueError:
pass
try:
# iterate over rules (grouped by protocol et al)
[own_cidr_ip_grant] = [rule for rule in ep_group.rules if
# iterate over grants inside rules (IP ranges)
filter(lambda grant: grant.cidr_ip == env.own_ip + '/32', rule.grants)]
except ValueError:
print(
'no rule for TCP/22 with own IP %(own_ip)s found in security group: %(sgroup)s' % {'own_ip': env.own_ip,
'sgroup': SECURITY_GROUP})
# ep_group.authorize('tcp', 22, 22, env.own_ip + '/32')
image = conn.get_image(MY_AMI)
try:
[ep_host_key_pair] = [x for x in conn.get_all_key_pairs() if x.name == 'ep-host']
except ValueError:
# this probably means the key is not defined
# get the first one in the belt for now:
print "GeoNode file not found in the server"
ep_host_key_pair = conn.get_all_key_pairs()[0]
reservations = conn.get_all_instances(filters={"tag:Name": "ep"})
instances = [i for r in reservations for i in r.instances]
instance = instances[0]
instance.start()
print "Firing up instance"
# Give it 10 minutes to appear online
for i in range(120):
time.sleep(5)
instance.update()
print instance.state
if instance.state == "running":
break
if instance.state == "running":
dns = instance.dns_name
print "Instance up and running at %s" % dns
config.set('ec2', 'HOST', dns)
config.set('ec2', 'INSTANCE', instance.id)
env.hosts = [dns, ]
env.user = config.get('ec2', 'USER')
env.key_filename = KEY_PATH
with open(CONFIG_FILE, 'wb') as configfile:
config.write(configfile)
print "ssh -i %s ubuntu@%s" % (KEY_PATH, dns)
print "Terminate the instance via the web interface %s" % instance
def create_new_instance(INSTANCE_TYPE, ep_group, ep_host_key_pair, image):
reservation = image.run(security_groups=[ep_group, ], key_name=ep_host_key_pair.name, instance_type=INSTANCE_TYPE)
instance = reservation.instances[0]
return instance
| [
"[email protected]"
] | |
8cde96fc88144ab64f34b48cdae3f18a63571685 | 3b31c39ab8269aa2d7060051db6ecab97e49aa8d | /mysite2/medicine/views.py | bddab105eba892c31415956a7d26473169709ddd | [] | no_license | seiya0723/medicine_checker_04 | ce2890890955f4c7ab1eef4a71b657963945d6ea | cd6c11e25b61a056097fd02ad37a8f11e7db7e31 | refs/heads/master | 2023-06-11T16:53:08.007479 | 2021-07-03T04:44:24 | 2021-07-03T04:44:24 | 382,526,844 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,515 | py | from django.shortcuts import render,redirect
from django.views import View
#クエリビルダ(複雑な検索処理を行う事ができる)
from django.db.models import Q
#JavaScript用にJSONレスポンスを返す
from django.http.response import JsonResponse
#レンダリングした後、文字列型にして返す
from django.template.loader import render_to_string
from .models import Medicine
#正規表現を使うので、インポート
import re
class IndexView(View):
def get(self, request, *args, **kwargs):
#何も書かれていない医薬品の炙り出し。
medicines = Medicine.objects.filter(effect="",caution="",dosage="",side_effect="")
print(len(list(medicines.values())))
#医薬品の開発会社が違うだけで中身は同じ(「」で囲まれた部分を除外し、比較。一致しているものを表示もしくは削除する。)
medicines = Medicine.objects.all().exclude(effect="",caution="",dosage="",side_effect="").order_by("name")
duplicate = 0
old_name = ""
for medicine in medicines:
new_name = medicine.name
#print("変更前:" + new_name)
#print("変更後:" + re.sub("「.*」","",new_name))
new_name = re.sub("「.*」","",new_name)
if old_name == new_name:
#print("重複している")
#TODO:ここで重複したmedicineのIDを記録する。
duplicate += 1
old_name = new_name
print("重複している数" + str(duplicate))
#約22200行 → 重複と説明なしの医薬品除外 → 約9400行
#Herokuの運用も可能になる。
return render(request,"medicine/index.html")
index = IndexView.as_view()
#Jsonでレスポンスを返す。
class SearchView(View):
def get(self, request, *args, **kwargs):
json = {"error":True}
if "search" in request.GET:
#(1)キーワードが空欄もしくはスペースのみの場合、ページにリダイレクト
if request.GET["search"] == "" or request.GET["search"].isspace():
#リダイレクトではなくAjaxで送信されているのでjsonで返す。
#return redirect("medicine:index")
return JsonResponse(json)
#チェックボックスがいずれも押されていない場合検索しない(全件出力され、処理が遅くなる)
if "name" not in request.GET and "effect" not in request.GET and "caution" not in request.GET and "dosage" not in request.GET and "side_effect" not in request.GET:
#リダイレクトではなくAjaxで送信されているのでjsonで返す。
#return redirect("medicine:index")
return JsonResponse(json)
#(2)キーワードをリスト化させる(複数指定の場合に対応させるため)
search = request.GET["search"].replace(" "," ")
search_list = search.split(" ")
#(3)クエリを作る
query = Q()
for word in search_list:
if word == "":
continue
#TIPS:AND検索の場合は&を、OR検索の場合は|を使用する。
if "name" in request.GET:
query |= Q(name__contains=word)
if "effect" in request.GET:
query |= Q(effect__contains=word)
if "caution" in request.GET:
query |= Q(caution__contains=word)
if "dosage" in request.GET:
query |= Q(dosage__contains=word)
if "side_effect" in request.GET:
query |= Q(side_effect__contains=word)
#(4)作ったクエリを実行
medicines = Medicine.objects.filter(query)
else:
medicines = []
context = { "medicines":medicines }
#検索結果のレンダリングを文字列型にして返す。
content = render_to_string("medicine/search.html",context,request)
#エラーフラグをFalseにして、検索結果のHTML(文字列型)のデータをJSON形式でレスポンス、JSに引き渡す。
json["error"] = False
json["content"] = content
return JsonResponse(json)
search = SearchView.as_view()
#テーブルにスタックする時、医薬品単体のデータを返す。
class SingleView(View):
def get(self, request, pk, *args, **kwargs):
print("single")
json = { "error":True }
#pkから医薬品情報一件を抜き取る、JSONで返すので辞書型に書き換え。
medicine = Medicine.objects.filter(id=pk).first()
#医薬品情報が無い場合はエラーを返す。
if not medicine:
return JsonResponse(json)
#json形式で送信できるように辞書型に変換する
dic = {}
dic["name"] = medicine.name
dic["effect"] = medicine.effect
dic["caution"] = medicine.caution
dic["dosage"] = medicine.dosage
dic["side_effect"] = medicine.side_effect
json["error"] = False
json["medicine"] = dic
return JsonResponse(json)
single = SingleView.as_view()
| [
"seiya@asahina"
] | seiya@asahina |
e676704593b04666cd09b4f9bc98470a844ee2c9 | 401abd0e20319927ef2d6aba5940f1e5ee0b4ee5 | /cwd.py | 5449b38ff831a97bf3788730e8d4a44bba4d7eae | [] | no_license | OctopusHugz/checker_hack_day | a821dc789212fbd5c21de34e50f6817c25bdca8a | fcab122bfc784720d41440357b3a6cd79f4e0986 | refs/heads/master | 2023-06-26T01:41:00.744815 | 2021-07-27T17:39:58 | 2021-07-27T17:39:58 | 300,801,217 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,194 | py | #!/usr/bin/env python3
from os import getcwd, path
from projects import (low_list, high_list, sedo_list,
interview_list, web_front_end_list, web_back_end_list,
web_react_list)
def pid_from_cwd():
"""Returns a project's ID based on the current working directory"""
cwd = path.basename(getcwd())
projects = {
"low": low_list,
"high": high_list,
"sedo": sedo_list,
"interview": interview_list,
"web_front_end": web_front_end_list,
"web_back_end": web_back_end_list,
"web_react": web_react_list
}
all_projects = list(projects.values())
# all projects is list of list of dicts where each dict is a project
for track in all_projects:
# track is a list of dicts where each dict is a project in that track
for project in track:
project_dir = list(project.values())[0]
project_id = list(project.keys())[0]
if cwd == project_dir:
return project_id
def parent_from_cwd():
"""Returns the parent directory based on the current working directory"""
parent = getcwd().split('/')[-2]
return parent
| [
"[email protected]"
] | |
6aa5a163b90b39c0ac27f13bb82e2ae042d17542 | 36222fc73431a89d41a342aa176158b8868bc41a | /accounts/migrations/0051_auto_20170412_1628.py | 3be2038a4dd7ec662e47fd8ab1e7a964244568c5 | [] | no_license | dxviidmg/CITNOVA | 9e3f555e192d4e875fc4b990b70c52e3f6fc8bc0 | f18d6e74082d0ddf58eaba439d5e20f2d48af7b9 | refs/heads/master | 2021-01-18T23:34:41.179481 | 2017-05-20T13:59:11 | 2017-05-20T13:59:11 | 87,117,216 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 860 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.10.5 on 2017-04-12 21:28
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('accounts', '0050_auto_20170412_1622'),
]
operations = [
migrations.AlterField(
model_name='expediente',
name='tipo',
field=models.CharField(choices=[('P. Física', 'P. Física'), ('P. Moral', 'P. Moral')], max_length=20),
),
migrations.AlterField(
model_name='perfil',
name='grado_profesional',
field=models.CharField(blank=True, choices=[('Mtro(a).', 'Mtro(a).'), ('Lic.', 'Lic.'), ('Tec.', 'Tec.'), ('Dr.', 'Dr.'), ('Arq.', 'Arq.'), ('Ing.', 'Ing.'), ('T. S. U.', 'T. S. U.')], default='C.', max_length=30),
),
]
| [
"[email protected]"
] | |
c3685030a33aff5508370080b26a5986fe2b03b1 | 31d10cf8f83fd04281f0e108ba0c9ed193f9ed7b | /0x01-python-if_else_loops_functions/6-print_comb3.py~ | 8a959951e4859dd39d222a64ffa40480d74aa967 | [] | no_license | RodrigoSierraV/holbertonschool-higher_level_programming | 822d41587c6336d363dd41609960a7ca23700fc2 | 7c671b5c0c46e2def8ccab760d7ceca1ca07702f | refs/heads/master | 2020-05-18T03:37:20.032499 | 2019-10-23T02:25:34 | 2019-10-23T02:25:34 | 184,111,862 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 234 | #!/usr/bin/python3
for i in range(0, 9 + 1):
for b in range(0, 9 + 1):
if b > i and (b + i) < 17:
print("{:d}{:d}, ".format(i, b), end="")
elif + i == 17:
print("{:d}{:d}".format(i, b))
| [
"[email protected]"
] | ||
1a48019324f811a2932ab415786dec956df484f8 | f36b733f9c24d4cabd0d3354e0344094fbf3c026 | /a10_saltstack/helpers/helper_modules/a10_cgnv6_lsn_port_overloading.py | 109481f957d3b7d3b6f10485242a324a36f8fd11 | [
"Apache-2.0"
] | permissive | a10networks/a10-saltstack | 08e13647e0187b09500ed3d9053ae06e7e808746 | 0d86043b1d09e75ea170e72fac5068254fc4037c | refs/heads/master | 2021-03-19T16:11:14.211706 | 2019-07-24T17:18:04 | 2019-07-24T17:18:04 | 123,501,933 | 2 | 3 | null | 2019-07-24T17:18:05 | 2018-03-01T22:55:53 | Python | UTF-8 | Python | false | false | 1,442 | py | # Copyright 2019 A10 Networks
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Hacky way of having access to object properties for evaluation
AVAILABLE_PROPERTIES = ["global","tcp","udp",]
REF_PROPERTIES = {
"global": "/axapi/v3/cgnv6/lsn/port-overloading/global",
"tcp": "/axapi/v3/cgnv6/lsn/port-overloading/tcp",
"udp": "/axapi/v3/cgnv6/lsn/port-overloading/udp",
}
MODULE_NAME = "port-overloading"
PARENT_KEYS = []
CHILD_KEYS = []
def new_url(**kwargs):
"""Return the URL for creating a resource"""
# To create the URL, we need to take the format string and return it with no params
url_base = "/axapi/v3/cgnv6/lsn/port-overloading"
f_dict = {}
return url_base.format(**f_dict)
def existing_url(**kwargs):
"""Return the URL for an existing resource"""
# Build the format dictionary
url_base = "/axapi/v3/cgnv6/lsn/port-overloading"
f_dict = {}
return url_base.format(**f_dict) | [
"[email protected]"
] | |
7febc115f14bfed876325b00ff64fcedfa4ca80e | 56e626db1b367f30e6978f5a5d573618823e9b6c | /train/train_multi_class_classify.py | 90931424639fe42f6842c9bf8a755ce8c6cee098 | [
"MIT"
] | permissive | witnesslq/transwarp-nlp | d9bdf53b8ded3ac07196b4ba82346429caeb5be8 | fc324253e9eff7d9d365ebb85ba81680bbe86f5f | refs/heads/master | 2021-01-21T09:39:31.373777 | 2017-05-11T12:51:43 | 2017-05-11T12:51:43 | 91,663,587 | 6 | 2 | null | 2017-05-18T07:39:42 | 2017-05-18T07:39:42 | null | UTF-8 | Python | false | false | 4,626 | py | # -*- coding: utf-8 -*-
import cPickle
import numpy as np
import tensorflow as tf
import os, time
from transwarpnlp.multi_class_classify.cnn_config import CnnConfig
from transwarpnlp.multi_class_classify import model_cnn
config = CnnConfig()
pkg_path = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
def train_cnn_classfier(train_path):
print("loading data...")
x = cPickle.load(open(os.path.join(train_path, "model/mr.txt"), "rb"))
# 读取出预处理后的数据 revs {"y":label,"text":"word1 word2 ..."}
# word_idx_map["word"]==>index
# vocab["word"]==>frequency
revs, _, _, word_idx_map, idx_word_map, vocab = x[0], x[1], x[2], x[3], x[4], x[5]
print("data loaded!")
revs = np.random.permutation(revs) # 原始的sample正负样本是分别聚在一起的,这里随机打散
n_batches = len(revs) / config.batch_size #
n_train_batches = int(np.round(n_batches * 0.9))
# 开始定义模型============================================
with tf.Graph().as_default(), tf.Session().as_default() as sess:
# 占位符 真实的输入输出
x_in = tf.placeholder(tf.int64, shape=[None, config.sentence_length], name="input_x")
y_in = tf.placeholder(tf.int64, [None], name="input_y")
keep_prob = tf.placeholder(tf.float32)
# 构建模型
loss, accuracy, embeddings = model_cnn.build_model(x_in, y_in, keep_prob)
# 训练模型========================================
num_steps = 10
global_step = tf.Variable(0)
learning_rate = tf.train.exponential_decay(1e-4, global_step, num_steps, 0.99, staircase=True) # 学习率递减
train_step = tf.train.AdagradOptimizer(learning_rate).minimize(loss, global_step=global_step)
# summaries,====================
timestamp = str(int(time.time()))
out_dir = os.path.join(train_path, "summary", timestamp)
print("Writing to {}\n".format(out_dir))
loss_summary = tf.summary.scalar("loss", loss)
acc_summary = tf.summary.scalar("accuracy", accuracy)
train_summary_op = tf.summary.merge([loss_summary, acc_summary])
train_summary_dir = os.path.join(out_dir, "summaries", "train")
train_summary_writer = tf.summary.FileWriter(train_summary_dir, sess.graph)
checkpoint_dir = os.path.join(train_path, "ckpt")
checkpoint_prefix = os.path.join(checkpoint_dir, "classify")
if not os.path.exists(checkpoint_dir):
os.makedirs(checkpoint_dir)
saver = tf.train.Saver(tf.global_variables())
sess.run(tf.global_variables_initializer())
ckpt = tf.train.get_checkpoint_state(checkpoint_dir)
if ckpt and ckpt.model_checkpoint_path:
saver.restore(sess, ckpt.model_checkpoint_path)
current_step = tf.train.global_step(sess, global_step)
print("current_step:", current_step)
if num_steps > int(current_step / 135):
num_steps = num_steps - int(current_step / 135)
print("continute step:", num_steps)
else:
num_steps = 0
batch_x_test, batch_y_test = model_cnn.get_test_batch(revs, word_idx_map)
for i in range(num_steps):
for minibatch_index in np.random.permutation(range(n_train_batches)): # 随机打散 每次输入的样本的顺序都不一样
batch_x, batch_y = model_cnn.generate_batch(revs, word_idx_map, minibatch_index)
# train_step.run(feed_dict={x_in: batch_x, y_in: batch_y, keep_prob: 0.5})
feed_dict = {x_in: batch_x, y_in: batch_y, keep_prob: 0.5}
_, step, summaries = sess.run([train_step, global_step, train_summary_op], feed_dict)
train_summary_writer.add_summary(summaries, step)
train_accuracy = accuracy.eval(feed_dict={x_in: batch_x_test, y_in: batch_y_test, keep_prob: 1.0})
current_step = tf.train.global_step(sess, global_step)
print("Update step %d, training accuracy %g" % (current_step, train_accuracy))
path = saver.save(sess, checkpoint_prefix, global_step=current_step)
print("Saved model checkpoint to {}\n".format(path))
return embeddings, sess, idx_word_map
if __name__ == "__main__":
train_path = os.path.join(pkg_path, "data/multi_class_classify")
embeddings, sess, idx_word_map = train_cnn_classfier(train_path)
final_embeddings = model_cnn.word2vec(embeddings, train_path, sess)
# cnn_classfier.display_word2vec(final_embeddings, idx_word_map) | [
"[email protected]"
] | |
7a749099ae0b4ffeccdc71b50ebdfe954e3b2755 | 8dcc345a522904fd2ebb4ce2f18ce425dca15868 | /upgrade_ipp_image.py | b3a2773e1d20b656aeb5b76e10e3c4ddd531fdb9 | [] | no_license | drunkwater/MyPerl | 45787b7f6cf241a97a8e1d3092656fa92511182a | 5d9c57b6f4eedb373a4fd55635f008f5e475fa19 | refs/heads/master | 2020-03-17T12:43:43.246462 | 2018-06-12T03:52:01 | 2018-06-12T03:52:01 | 133,600,445 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 12,257 | py | #!/usr/bin/python3
# -*- coding: utf-8 -*-
########################################################################################
# @filename : upgrade_ipp_image.py
# @author : Copyright (C) Church.Zhong
# @date : Fri Jun 8 14:48:56 HKT 2018
# @function : upgrade IP phone binary image file
# @see : C:\Program Files\Python36\Lib\urllib
# @require : python 3.6.5 works well
# @style : https://google.github.io/styleguide/pyguide.html
########################################################################################
import os
import time
import re
import subprocess
from datetime import datetime
from random import randint
import base64
import ipaddress
import urllib.request
import urllib.parse
import urllib.error
def do_http_basic_auth(ip, username, password, filename):
url = 'http://{0}{1}'.format(ip, '/mainform.cgi/Manu_Firmware_Upgrade.htm')
userAndPass = base64.b64encode('{0}:{1}'.format(username, password).encode()).decode('utf-8')
headers = {
'Connection' : 'keep-alive',
'Authorization' : 'Basic {0}'.format( userAndPass )
}
response = False
req = urllib.request.Request(url, headers=headers)
try:
with urllib.request.urlopen(req ) as f:
pass
print('GET {0},{1},{2}'.format(url, f.status, f.reason))
#print(f.info())
#page = f.read().decode(encoding='utf-8')
response = True
except urllib.error.HTTPError as e:
print(e.code())
print(e.read().decode(encoding='utf-8'))
time.sleep( 1 )
post = ('UPGRADESHOW=1')
headers = {
'Connection' : 'keep-alive',
'Content-Type' : 'application/x-www-form-urlencoded',
'Authorization' : 'Basic {0}'.format( userAndPass ),
'Content-Length' : len(post)
}
data=post.encode('utf-8')
req = urllib.request.Request(url, headers=headers)
try:
with urllib.request.urlopen(req, data=data) as f:
pass
print('POST {0},{1},{2}'.format(url, f.status, f.reason))
#print(f.info())
#page = f.read().decode(encoding='utf-8')
response = True
except urllib.error.HTTPError as e:
print(e.code())
print(e.read().decode(encoding='utf-8'))
time.sleep( 1 )
url = 'http://{0}{1}'.format(ip, '/upload.cgi')
moment = datetime.now().strftime("%b%d%Y%H%M%S")
boundary = '----WebKitFormBoundary{0}{1}'.format(moment, randint(0,9))
print ('boundary=' + boundary)
with open(filename, 'rb') as fd:
image = fd.read()
content = ("--%s\r\n" % boundary).encode('utf-8') + \
("Content-Disposition: form-data; name=\"localupgrade\"\r\n\r\n20").encode('utf-8') + \
("\r\n--%s\r\n" % boundary).encode('utf-8') + \
("Content-Disposition: form-data; name=\"upname\"; filename=\"%s\"\r\n" % filename).encode('utf-8') + \
("Content-Type: application/octet-stream\r\n\r\n").encode('utf-8') + \
image + \
("\r\n--%s--\r\n" % boundary).encode('utf-8')
data = content
headers = {
'Content-Type' : 'multipart/form-data; boundary=%s' % boundary,
'Authorization' : 'Basic {0}'.format( userAndPass ),
'Connection' : 'keep-alive',
'Content-Length' : len(content)
}
req = urllib.request.Request(url, headers=headers)
try:
with urllib.request.urlopen(req, data=data) as f:
pass
print('POST {0},{1},{2}'.format(url, f.status, f.reason))
#print(f.info())
#page = f.read().decode(encoding='utf-8')
response = True
except urllib.error.HTTPError as e:
print(e.code())
print(e.read().decode(encoding='utf-8'))
return response
def do_http_cookie_pair(ip, username, password, filename):
url = 'http://{0}{1}'.format(ip, '/mainform.cgi?go=mainframe.htm')
headers = {
'Connection' : 'keep-alive',
'Cookie' : 'session=',
}
response = False
req = urllib.request.Request(url, headers=headers)
try:
with urllib.request.urlopen(req ) as f:
pass
print('GET {0},{1},{2}'.format(url, f.status, f.reason))
#print(f.info())
#page = f.read().decode(encoding='utf-8')
response = True
except urllib.error.HTTPError as e:
print(e.code())
print(e.read().decode(encoding='utf-8'))
time.sleep( 1 )
url = 'http://{0}{1}'.format(ip, '/mainform.cgi/login_redirect.htm')
headers = {
'Connection' : 'keep-alive',
'Cookie' : 'session=',
}
response = False
req = urllib.request.Request(url, headers=headers)
try:
with urllib.request.urlopen(req ) as f:
pass
print('GET {0},{1},{2}'.format(url, f.status, f.reason))
#print(f.info())
#page = f.read().decode(encoding='utf-8')
response = True
except urllib.error.HTTPError as e:
print(e.code())
print(e.read().decode(encoding='utf-8'))
time.sleep( 1 )
url = 'http://{0}{1}'.format(ip, '/login.cgi')
headers = {
'Connection' : 'keep-alive',
'Cookie' : 'session=',
}
response = False
req = urllib.request.Request(url, headers=headers)
try:
with urllib.request.urlopen(req ) as f:
pass
print('GET {0},{1},{2}'.format(url, f.status, f.reason))
#print(f.info())
#page = f.read().decode(encoding='utf-8')
response = True
except urllib.error.HTTPError as e:
print(e.code())
print(e.read().decode(encoding='utf-8'))
time.sleep( 1 )
url = 'http://{0}{1}'.format(ip, '/login.cgi')
b64password = base64.b64encode('{}'.format( password ).encode()).decode('utf-8')
post = { 'user' : username, 'psw' : b64password }
data = urllib.parse.urlencode(post).encode('utf-8')
headers = {
'Content-Type' : 'application/x-www-form-urlencoded',
'Connection' : 'keep-alive',
'Cookie' : 'session='
}
SetCookie = ''
response = False
req = urllib.request.Request(url, headers=headers)
try:
with urllib.request.urlopen(req, data=data ) as f:
pass
print('POST {0},{1},{2}'.format(url, f.status, f.reason))
cookie = f.info()['Set-Cookie']
print('Set-Cookie: {}'.format( cookie ))
m = re.match(r'^session=(.*)\;\ path\=\/$', cookie)
if not m:
response = False
else:
SetCookie = m.group(1)
print('got shiny SetCookie={}'.format( SetCookie ))
#page = f.read().decode(encoding='utf-8')
response = True
except urllib.error.HTTPError as e:
print(e.code())
print(e.read().decode(encoding='utf-8'))
time.sleep( 1 )
url = 'http://{0}{1}'.format(ip, '/mainform.cgi/Manu_Firmware_Upgrade.htm')
b64password = base64.b64encode('{}'.format( password ).encode()).decode('utf-8')
headers = {
'Connection' : 'keep-alive',
'Cookie' : 'session={}'.format( SetCookie )
}
response = False
req = urllib.request.Request(url, headers=headers)
try:
with urllib.request.urlopen(req ) as f:
pass
print('GET {0},{1},{2}'.format(url, f.status, f.reason))
#print(f.info())
#page = f.read().decode(encoding='utf-8')
response = True
except urllib.error.HTTPError as e:
print(e.code())
print(e.read().decode(encoding='utf-8'))
time.sleep( 1 )
post = ('UPGRADESHOW=1')
headers = {
'Connection' : 'keep-alive',
'Content-Type' : 'application/x-www-form-urlencoded',
'Cookie' : 'session={}'.format( SetCookie ),
'Content-Length' : len(post)
}
data=post.encode('utf-8')
req = urllib.request.Request(url, headers=headers)
try:
with urllib.request.urlopen(req, data=data) as f:
pass
print('POST {0},{1},{2}'.format(url, f.status, f.reason))
#print(f.info())
#page = f.read().decode(encoding='utf-8')
response = True
except urllib.error.HTTPError as e:
print(e.code())
print(e.read().decode(encoding='utf-8'))
time.sleep( 1 )
url = 'http://{0}{1}'.format(ip, '/upload.cgi')
moment = datetime.now().strftime("%b%d%Y%H%M%S")
boundary = '----WebKitFormBoundary{0}{1}'.format(moment, randint(0,9))
print ('boundary=' + boundary)
with open(filename, 'rb') as fd:
image = fd.read()
content = ("--%s\r\n" % boundary).encode('utf-8') + \
("Content-Disposition: form-data; name=\"localupgrade\"\r\n\r\n20").encode('utf-8') + \
("\r\n--%s\r\n" % boundary).encode('utf-8') + \
("Content-Disposition: form-data; name=\"upname\"; filename=\"%s\"\r\n" % filename).encode('utf-8') + \
("Content-Type: application/octet-stream\r\n\r\n").encode('utf-8') + \
image + \
("\r\n--%s--\r\n" % boundary).encode('utf-8')
data = content
headers = {
'Content-Type' : 'multipart/form-data; boundary=%s' % boundary,
'Cookie' : 'session={}'.format( SetCookie ),
'Connection' : 'keep-alive',
'Content-Length' : len(content)
}
req = urllib.request.Request(url, headers=headers)
try:
with urllib.request.urlopen(req, data=data) as f:
pass
print('POST {0},{1},{2}'.format(url, f.status, f.reason))
#print(f.info())
#page = f.read().decode(encoding='utf-8')
response = True
except urllib.error.HTTPError as e:
print(e.code())
print(e.read().decode(encoding='utf-8'))
return response
# https://pymotw.com/3/argparse/
import argparse
def work():
parser = argparse.ArgumentParser()
parser.add_argument('-i', action='store',
default='172.17.179.100',
dest='ip_address',
help='Set HTTP ip_address for IPP')
parser.add_argument('-u', action='store',
default='admin',
dest='username',
help='Set HTTP username for IPP')
parser.add_argument('-p', action='store',
default='1234',
dest='password',
help='Set HTTP password for IPP')
parser.add_argument('-f', action='store',
default='',
dest='image_file',
help='Set image binary file for IPP')
parser.add_argument('-n', action='store_true',
default=False,
dest='nonLync',
help='Upgrade nonlync or SFB branch')
parser.add_argument('--version', action='version',
version='%(prog)s 1.0')
results = parser.parse_args()
print('ip_address = {!r}'.format(results.ip_address))
print('username = {!r}'.format(results.username))
print('password = {!r}'.format(results.password))
print('image_file = {!r}'.format(results.image_file))
print('nonLync = {!r}'.format(results.nonLync))
ip = ipaddress.ip_address(results.ip_address)
if 4 == ip.version:
print ("Valid IPv4")
elif 6 == ip.version:
print ("Valid IPv6")
results.ip_address = '[{}]'.format(ip)
else:
print ("Invalid ipaddress " % ip)
return
if True == results.nonLync:
print ("upgrade nonLync image!\n")
do_http_basic_auth(results.ip_address, results.username, results.password, results.image_file)
else:
print ("upgrade SFB/Lync image!\n")
do_http_cookie_pair(results.ip_address, results.username, results.password, results.image_file)
def main():
start = time.time()
work()
print('running time:%s' % (time.time() - start))
if __name__ == "__main__":
main() | [
"[email protected]"
] | |
4cdf34e45cb5d8eaa9a0dc255e0b2e23dca732a5 | 0bb49acb7bb13a09adafc2e43e339f4c956e17a6 | /OpenNodes/OpenProject/addComment.py | 20008f8c48e3a22a8264afab4a51366f970cea38 | [] | no_license | all-in-one-of/openassembler-7 | 94f6cdc866bceb844246de7920b7cbff9fcc69bf | 69704d1c4aa4b1b99f484c8c7884cf73d412fafe | refs/heads/master | 2021-01-04T18:08:10.264830 | 2010-07-02T10:50:16 | 2010-07-02T10:50:16 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,495 | py | ###OpenAssembler Node python file###
'''
define
{
name addComment
tags opdb
input dbPath Path "" ""
input string Comment "" ""
output int result "" ""
}
'''
import os, sys,time
from Setup import opdb_setup
from getCleanPath import getCleanPath
class addComment(opdb_setup,getCleanPath):
def addComment_main(self, **connections):
try:
Path=connections["Path"]
except:
Path=""
try:
Comment=connections["Comment"]
except:
Comment=""
try:
oas_output=connections["oas_output"]
except:
oas_output="result"
if oas_output=="result":
try:
readed=""
ProjectROOT=self.opdb_projects_settings(self.opdb_setup_read())
cleanpath=self.getCleanPath_main(Path=Path)
if cleanpath==0:
return 0
if str(cleanpath)==str(Path):
return 0
path=ProjectROOT+cleanpath.replace(":","/")
ltime=time.strftime("%Y%m%d%H%M%S",time.gmtime())
cuser=""
if os.name=="nt":
cuser=os.environ.get("USERNAME")
else:
cuser=os.environ.get("USER")
if os.path.isfile(path+"/comments.atr"):
pf=open(path+"/comments.atr","r")
readed=pf.read()
pf.close()
readed=readed.strip().lstrip()
ver=Path.split("@")[1]
comm=str(Comment).strip().lstrip().replace("\n"," | ").replace("\r","")
newline="- "+str(cuser)+" "+str(ver)+" "+str(ltime)+" || "+comm
textbody=newline+"\n"+readed+"\n"
pf=open(path+"/comments.atr","w")
pf.write(textbody)
pf.close()
return 1
except:
return 0
else:
return 0
| [
"laszlo.mates@732492aa-5b49-0410-a19c-07a6d82ec771"
] | laszlo.mates@732492aa-5b49-0410-a19c-07a6d82ec771 |
d6e25c415c45703003f5f0ffb9e717bad338c732 | d0a1b71a91c67a9bca2d6c41738effde7bf3d503 | /be_loop_projector.py | a3456e470ae695e53647acd24ca5bff809aebbe6 | [
"MIT"
] | permissive | ssomnath/be_processes | a1a5a0f886b30768bc7552eab9d0a73e04fccf3c | d6e028926f2d79d78030e51ab6b440ff27bd6351 | refs/heads/master | 2020-07-04T20:48:08.129114 | 2019-11-27T16:06:35 | 2019-11-27T16:06:35 | 202,412,038 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 26,446 | py | import numpy as np
import joblib
from pyUSID.io.hdf_utils import copy_region_refs, write_simple_attrs, create_results_group, write_reduced_anc_dsets, \
create_empty_dataset, write_main_dataset, get_attr, get_unit_values, reshape_to_n_dims, get_sort_order
from pyUSID.io.usi_data import USIDataset
from pyUSID.processing.process import Process
from pyUSID.processing.comp_utils import get_MPI
# From this project:
from be_sho_fitter import sho32
from be_loop import projectLoop
'''
Custom dtype for the datasets created during fitting.
'''
loop_metrics32 = np.dtype({'names': ['Area', 'Centroid x', 'Centroid y',
'Rotation Angle [rad]', 'Offset'],
'formats': [np.float32, np.float32, np.float32,
np.float32, np.float32]})
class BELoopProjector(Process):
def __init__(self, h5_main, **kwargs):
super(BELoopProjector, self).__init__(h5_main, **kwargs)
if 'DC_Offset' in self.h5_main.spec_dim_labels:
self._fit_dim_name = 'DC_Offset'
elif 'write_bias' in self.h5_main.spec_dim_labels:
self._fit_dim_name = 'write_bias'
else:
raise ValueError('Neither "DC_Offset", nor "write_bias" were '
'spectroscopic dimension in the provided dataset '
'which has dimensions: {}'
'.'.format(self.h5_main.spec_dim_labels))
if 'FORC' in self.h5_main.spec_dim_labels:
self._forc_dim_name = 'FORC'
else:
self._forc_dim_name = 'FORC_Cycle'
# TODO: Need to catch KeyError s that would be thrown when attempting to access attributes
file_data_type = get_attr(h5_main.file, 'data_type')
meas_grp_name = h5_main.name.split('/')
h5_meas_grp = h5_main.file[meas_grp_name[1]]
meas_data_type = get_attr(h5_meas_grp, 'data_type')
if h5_main.dtype != sho32:
raise TypeError('Provided dataset is not a SHO results dataset.')
# This check is clunky but should account for case differences.
# If Python2 support is dropped, simplify with# single check using case
if not (
meas_data_type.lower != file_data_type.lower or meas_data_type.upper != file_data_type.upper):
message = 'Mismatch between file and Measurement group data types for the chosen dataset.\n'
message += 'File data type is {}. The data type for Measurement group {} is {}'.format(
file_data_type,
h5_meas_grp.name,
meas_data_type)
raise ValueError(message)
if file_data_type == 'BEPSData':
if get_attr(h5_meas_grp, 'VS_mode') not in ['DC modulation mode',
'current mode']:
raise ValueError(
'Provided dataset has a mode: "' + get_attr(h5_meas_grp,
'VS_mode') + '" is not a '
'"DC modulation" or "current mode" BEPS dataset')
elif get_attr(h5_meas_grp, 'VS_cycle_fraction') != 'full':
raise ValueError('Provided dataset does not have full cycles')
elif file_data_type == 'cKPFMData':
if get_attr(h5_meas_grp, 'VS_mode') != 'cKPFM':
raise ValueError(
'Provided dataset has an unsupported VS_mode: "' + get_attr(
h5_meas_grp, 'VS_mode') + '"')
# #####################################################################
self.process_name = "Loop_Projection"
self.parms_dict = {'projection_method': 'pycroscopy BE loop model'}
# Now Extract some basic parameters that are necessary for either the guess or fit
self.dc_offsets_mat = self._get_dc_offsets(self.h5_main.h5_spec_inds,
self.h5_main.h5_spec_vals,
self._fit_dim_name,
self._forc_dim_name,
verbose=self.verbose)
def _create_results_datasets(self):
"""
Setup the Loop_Fit Group and the loop projection datasets
"""
# First grab the spectroscopic indices and values and position indices
# TODO: Avoid unnecessary namespace pollution
# self._sho_spec_inds = self.h5_main.h5_spec_inds
# self._sho_spec_vals = self.h5_main.h5_spec_vals
# self._sho_pos_inds = self.h5_main.h5_pos_inds
# Which row in the spec datasets is DC offset?
self._fit_spec_index = self.h5_main.spec_dim_labels.index(self._fit_dim_name)
# TODO: Unkown usage of variable. Waste either way
# self._fit_offset_index = 1 + self._fit_spec_index
# Calculate the number of loops per position
cycle_start_inds = np.argwhere(self.h5_main.h5_spec_inds[self._fit_spec_index, :] == 0).flatten()
tot_cycles = cycle_start_inds.size
if self.verbose:
print('Found {} cycles starting at indices: {}'.format(tot_cycles, cycle_start_inds))
# Make the results group
self.h5_results_grp = create_results_group(self.h5_main, self.process_name)
write_simple_attrs(self.h5_results_grp, self.parms_dict)
# Write datasets
self.h5_projected_loops = create_empty_dataset(self.h5_main, np.float32, 'Projected_Loops',
h5_group=self.h5_results_grp)
h5_loop_met_spec_inds, h5_loop_met_spec_vals = write_reduced_anc_dsets(self.h5_results_grp, self.h5_main.h5_spec_inds,
self.h5_main.h5_spec_vals, self._fit_dim_name,
basename='Loop_Metrics', verbose=self.verbose)
self.h5_loop_metrics = write_main_dataset(self.h5_results_grp, (self.h5_main.shape[0], tot_cycles), 'Loop_Metrics',
'Metrics', 'compound', None, None, dtype=loop_metrics32,
h5_pos_inds=self.h5_main.h5_pos_inds,
h5_pos_vals=self.h5_main.h5_pos_vals,
h5_spec_inds=h5_loop_met_spec_inds,
h5_spec_vals=h5_loop_met_spec_vals)
# Copy region reference:
# copy_region_refs(self.h5_main, self.h5_projected_loops)
# copy_region_refs(self.h5_main, self.h5_loop_metrics)
self.h5_main.file.flush()
self._met_spec_inds = self.h5_loop_metrics.h5_spec_inds
if self.verbose and self.mpi_rank == 0:
print('Finished creating Guess dataset')
def _read_data_chunk(self):
"""
Returns the next chunk of data for the guess or the fit
"""
# The Process class should take care of all the basic reading
super(BELoopProjector, self)._read_data_chunk()
if self.data is None:
# Nothing we can do at this point
return
if self.verbose and self.mpi_rank == 0:
print('BELoopProjector got raw data of shape {} from super'
'.'.format(self.data.shape))
"""
Now self.data contains data for N pixels.
The challenge is that this may contain M FORC cycles
Each FORC cycle needs its own V DC vector
So, we can't blindly use the inherited unit_compute.
Our variables now are Position, Vdc, FORC, all others
We want M lists of [VDC x all other variables]
The challenge is that VDC and FORC are inner dimensions -
neither the fastest nor the slowest (guaranteed)
"""
# resp_2d, dc_vec lists is what this function returns
self.data = self.get_forc_pairs_from_sho_2d(self.data)
if self.verbose and self.mpi_rank == 0:
print('Reshaped raw data to {} FORC datasets, each of shape {}'
'.'.format(len(self.data[0]), self.data[0][0].shape))
@staticmethod
def _get_dc_offsets(h5_spec_inds, h5_spec_vals, fit_dim_name,
forc_dim_name, verbose=False):
# FORC is the decider whether or not DC_Offset changes.
# FORC_Repeats etc. should not matter
spec_unit_vals = get_unit_values(h5_spec_inds,
h5_spec_vals,
verbose=False)
if forc_dim_name not in spec_unit_vals.keys():
if verbose:
print(
'This is not a FORC dataset. Just taking unit values for DC Offset')
dc_val_mat = np.expand_dims(spec_unit_vals[fit_dim_name], axis=0)
else:
# Reshape the Spec values matrix into an N dimensional array
if verbose:
print(
'This is a FORC dataset. Reshaping Spectroscopic Values to N dimensions')
ret_vals = reshape_to_n_dims(h5_spec_vals,
np.expand_dims(
np.arange(h5_spec_vals.shape[0]),
axis=1),
h5_spec_inds, get_labels=True)
spec_vals_nd, success, spec_nd_labels = ret_vals
if success != True:
raise ValueError(
'Unable to reshape Spectroscopic values to get DC offsets for each FORC')
# We will be using "in" quite a bit. So convert to list
spec_nd_labels = list(spec_nd_labels)
if verbose:
print('Reshaped Spectroscopic Values to: {}'.format(
spec_vals_nd.shape))
print(
'Spectroscopic dimension names: {}'.format(spec_nd_labels))
# Note the indices of all other dimensions
all_other_dims = set(range(len(spec_nd_labels))) - \
set([spec_nd_labels.index(fit_dim_name),
spec_nd_labels.index(forc_dim_name)])
# Set up a new order where FORC is at 0 and DC is at 1 and all
# other dimensions (useless) follow
new_order = [spec_nd_labels.index(forc_dim_name),
spec_nd_labels.index(fit_dim_name)] + list(
all_other_dims)
if verbose:
print('Will transpose this N-dim matrix as: {}'.format(
new_order))
# Apply this new order to the matrix and the labels
spec_vals_nd = spec_vals_nd.transpose(new_order)
spec_nd_labels = np.array(spec_nd_labels)[new_order]
if verbose:
print('After transpose shape and names:\n\t{}\n\t{}'.format(
spec_vals_nd.shape, spec_nd_labels))
# Now remove all other dimensions using a list of slices:
keep_list = [slice(None), slice(None)] + [slice(0, 1) for _ in
range(
len(all_other_dims))]
# Don't forget to remove singular dimensions using squeeze
dc_val_mat = spec_vals_nd[keep_list].squeeze()
# Unnecessary but let's keep track of dimension names anyway
spec_nd_labels = spec_nd_labels[:2]
if verbose:
print(
'After removing all other dimensions. Shape is: {} and dimensions are: {}'.format(
dc_val_mat.shape, spec_nd_labels))
return dc_val_mat
@staticmethod
def reshape_sho_chunk_to_nd(data_2d, raw_dim_labels,
h5_pos_inds, h5_spec_inds,
verbose=False):
ret_vals = reshape_to_n_dims(data_2d, h5_pos_inds[:data_2d.shape[0]],
h5_spec_inds)
data_nd_auto, success = ret_vals
if success != True:
raise ValueError(
'Unable to reshape data chunk of shape {} to N dimensions'.format(
data_2d.shape))
if verbose:
print('Reshaped raw data from: {} to {}'.format(data_2d.shape,
data_nd_auto.shape))
# By default it is fast to slow!
pos_sort = get_sort_order(h5_pos_inds)[::-1]
spec_sort = get_sort_order(h5_spec_inds)[::-1]
swap_order = list(pos_sort) + list(len(pos_sort) + spec_sort)
if verbose:
print(
'Dimensions will be permuted as {} to arrange them from slowest to fastest'.format(
swap_order))
data_nd_s2f = data_nd_auto.transpose(swap_order)
dim_labels_s2f = np.array(raw_dim_labels)[swap_order]
if verbose:
print(
'After rearranging array is of shape: {}, dimensions are ordered as: {}'.format(
data_nd_s2f.shape, dim_labels_s2f))
return data_nd_s2f, dim_labels_s2f
@staticmethod
def break_nd_by_forc(data_nd_s2f, dim_labels_s2f, num_forcs, forc_dim_name,
verbose=False):
if num_forcs > 1:
# Fundamental assumption: FORC will always be the slowest dimension
# YOu can have repeats, cycles etc. but all of those will
# coreespond to the same FORC index - a single defintion for DC_Off
forc_dim_ind = dim_labels_s2f.index(forc_dim_name)
forc_less_labels_s2f = dim_labels_s2f[
:forc_dim_ind] + dim_labels_s2f[
forc_dim_ind + 1:]
if verbose:
print(
'"FORC" was found at index: {} in the dimension labels (slow>>fast): {}'.format(
forc_dim_ind, dim_labels_s2f))
print('Dimensions after removing FORC: {}'.format(
forc_less_labels_s2f))
single_forc_indices = [slice(None) for _ in
range(len(dim_labels_s2f))]
forc_dsets = []
switch = True
for forc_ind in range(num_forcs):
single_forc_indices[forc_dim_ind] = slice(forc_ind,
forc_ind + 1)
temp = data_nd_s2f[single_forc_indices].squeeze()
if verbose and switch:
print(
'Slice list used to slice index: {} of FORC: {}'.format(
forc_ind, single_forc_indices))
print('Shape of matrix after slicing FORC: {}'.format(
temp.shape))
switch = False
forc_dsets.append(temp)
else:
forc_dsets = [data_nd_s2f]
forc_less_labels_s2f = dim_labels_s2f
return forc_dsets, list(forc_less_labels_s2f)
@staticmethod
def get_forc_pairs(forc_dsets, forc_less_labels_s2f, dc_val_mat,
fit_dim_name, verbose=False):
dc_vec = []
resp_2d = []
switch = True
for dc_offsets, forc_less_mat_nd in zip(dc_val_mat, forc_dsets):
if len(forc_less_labels_s2f) != forc_less_mat_nd.ndim:
raise ValueError('Length of labels: {} does not match with '
'number of dimensions of dataset: {}'
'.'.format(len(forc_less_labels_s2f),
forc_less_mat_nd.ndim))
dc_dim_ind = forc_less_labels_s2f.index(fit_dim_name)
if verbose and switch:
print(
'"DC_Offset" found at index: {} in list of dimensions: {}'.format(
dc_dim_ind, forc_less_labels_s2f))
slower_than_dc_dim_inds = list(range(dc_dim_ind))
faster_than_dc_dim_inds = list(
range(dc_dim_ind + 1, len(forc_less_labels_s2f)))
trans_order = slower_than_dc_dim_inds + faster_than_dc_dim_inds + [
dc_dim_ind]
if verbose and switch:
print(
'Transposing the data matrix as: {} to get "DC_Offset" as last dimension'.format(
trans_order))
shifted_matrix = forc_less_mat_nd.transpose(trans_order)
shifted_labels = list(np.array(forc_less_labels_s2f)[trans_order])
if verbose and switch:
print(
'Shape of permuted array: {} with dimension names: {}'.format(
shifted_matrix.shape, shifted_labels))
all_x_vdc = shifted_matrix.reshape(-1, shifted_matrix.shape[-1])
if verbose and switch:
print('Shape of 2D array after flattening: {}'.format(
all_x_vdc.shape))
switch = False
dc_vec.append(dc_offsets)
resp_2d.append(all_x_vdc)
return resp_2d, dc_vec, list(shifted_matrix.shape), shifted_labels
def get_forc_pairs_from_sho_2d(self, data_2d):
data_nd_s2f, dim_labels_s2f = self.reshape_sho_chunk_to_nd(data_2d,
self.h5_main.n_dim_labels,
self.h5_main.h5_pos_inds,
self.h5_main.h5_spec_inds,
verbose=self.verbose)
self._dim_labels_s2f = list(dim_labels_s2f)
forc_dsets, forc_less_labels_s2f = self.break_nd_by_forc(data_nd_s2f,
list(dim_labels_s2f),
self.dc_offsets_mat.shape[0],
self._forc_dim_name,
verbose=self.verbose)
self._num_forcs = len(forc_dsets)
ret_vals = self.get_forc_pairs(forc_dsets, forc_less_labels_s2f,
self.dc_offsets_mat, self._fit_dim_name, verbose=self.verbose)
resp_2d, dc_vec, self.pre_flattening_shape, self.pre_flattening_dim_name_order = ret_vals
return resp_2d, dc_vec
def _unit_computation(self):
if self.verbose and self.mpi_rank == 0:
print("Rank {} at custom _unit_computation".format(self.mpi_rank))
resp_2d_list, dc_vec_list = self.data
req_cores = self._cores
MPI = get_MPI()
if MPI is not None:
rank = MPI.COMM_WORLD.Get_rank()
cores = 1
else:
rank = 0
cores = self._cores
if self.verbose:
print(
'Rank {} starting computing on {} cores (requested {} cores)'.format(
rank, cores, req_cores))
if cores > 1:
values = []
for loops_2d, curr_vdc in zip(resp_2d_list, dc_vec_list):
values += [joblib.delayed(self._map_function)(x, [curr_vdc]) for x
in loops_2d]
results = joblib.Parallel(n_jobs=cores)(values)
# Finished reading the entire data set
print('Rank {} finished parallel computation'.format(rank))
else:
if self.verbose:
print("Rank {} computing serially ...".format(rank))
# List comprehension vs map vs for loop?
# https://stackoverflow.com/questions/1247486/python-list-comprehension-vs-map
results = []
for loops_2d, curr_vdc in zip(resp_2d_list, dc_vec_list):
results += [self._map_function(vector, curr_vdc) for vector in
loops_2d]
self._results = results
def compute(self, override=False):
return super(BELoopProjector, self).compute(override=override)
project_loops = compute
@staticmethod
def _map_function(sho_response, dc_offset):
# projected_loop = np.zeros(shape=sho_response.shape, dtype=np.float32)
ancillary = np.zeros(shape=1, dtype=loop_metrics32)
pix_dict = projectLoop(np.squeeze(dc_offset),
sho_response['Amplitude [V]'],
sho_response['Phase [rad]'])
projected_loop = pix_dict['Projected Loop']
ancillary['Rotation Angle [rad]'] = pix_dict['Rotation Matrix'][0]
ancillary['Offset'] = pix_dict['Rotation Matrix'][1]
ancillary['Area'] = pix_dict['Geometric Area']
ancillary['Centroid x'] = pix_dict['Centroid'][0]
ancillary['Centroid y'] = pix_dict['Centroid'][1]
return projected_loop, ancillary
@staticmethod
def _reformat_results_chunk(num_forcs, proj_loops, first_n_dim_shape,
first_n_dim_names, dim_labels_s2f,
num_pos_dims, forc_dim_name, verbose=False):
# What we need to do is put the forc back as the slowest dimension before the pre_flattening shape:
if num_forcs > 1:
first_n_dim_shape = [num_forcs] + first_n_dim_shape
first_n_dim_names = [forc_dim_name] + first_n_dim_names
if verbose:
print('Dimension sizes & order: {} and names: {} that flattened '
'results will be reshaped to'
'.'.format(first_n_dim_shape, first_n_dim_names))
# Now, reshape the flattened 2D results to its N-dim form before flattening (now FORC included):
first_n_dim_results = proj_loops.reshape(first_n_dim_shape)
# Need to put data back to slowest >> fastest dim
map_to_s2f = [first_n_dim_names.index(dim_name) for dim_name in
dim_labels_s2f]
if verbose:
print('Will permute as: {} to arrange dimensions from slowest to '
'fastest varying'.format(map_to_s2f))
results_nd_s2f = first_n_dim_results.transpose(map_to_s2f)
if verbose:
print('Shape: {} and dimension labels: {} of results arranged from'
' slowest to fastest varying'
'.'.format(results_nd_s2f.shape, dim_labels_s2f))
pos_size = np.prod(results_nd_s2f.shape[:num_pos_dims])
spec_size = np.prod(results_nd_s2f.shape[num_pos_dims:])
if verbose:
print('Results will be flattend to: {}'
'.'.format((pos_size, spec_size)))
results_2d = results_nd_s2f.reshape(pos_size, spec_size)
return results_2d
def _write_results_chunk(self):
"""
self._results is now a zipped tuple containing:
1. a projected loop (an array of float32) and
2. a single compound element for hte loop metrics
Step 1 will be to unzip the two components into separate arrays
Step 2 will fold back the flattened 1 / 2D array into the N-dim form
Step 3 will reverse all transposes
Step 4 will flatten back to its original 2D form
Step 5 will finally write the data to an HDF5 file
"""
# Step 1: unzip the two components in results into separate arrays
loop_mets = np.zeros(shape=len(self._results), dtype=loop_metrics32)
proj_loops = np.zeros(shape=(len(self._results),
self.dc_offsets_mat.shape[1]),
dtype=np.float32)
if self.verbose:
print('Prepared empty arrays for loop metrics of shape: {} and '
'projected loops of shape: {}.'
''.format(loop_mets.shape, proj_loops.shape))
for ind in range(len(self._results)):
proj_loops[ind] = self._results[ind][0]
loop_mets[ind] = self._results[ind][1]
if self.verbose:
print('Unzipped results into Projected loops and Metrics arrays')
# Step 2: Fold to N-D before reversing transposes:
loops_2d = self._reformat_results_chunk(self._num_forcs, proj_loops,
self.pre_flattening_shape,
self.pre_flattening_dim_name_order,
self._dim_labels_s2f,
len(self.h5_main.pos_dim_labels),
self._forc_dim_name,
verbose=self.verbose)
met_labels_s2f = self._dim_labels_s2f.copy()
met_labels_s2f.remove(self._fit_dim_name)
mets_2d = self._reformat_results_chunk(self._num_forcs, loop_mets,
self.pre_flattening_shape[:-1],
self.pre_flattening_dim_name_order[:-1],
met_labels_s2f,
len(self.h5_main.pos_dim_labels),
self._forc_dim_name,
verbose=self.verbose)
# Which pixels are we working on?
curr_pixels = self._get_pixels_in_current_batch()
if self.verbose:
print('Writing projected loops of shape: {} to {} pixels in dataset of shape: {}'.format(loops_2d.shape, len(curr_pixels), self.h5_projected_loops.shape))
print('Writing loop metrics of shape: {} to {} pixels in dataset of shape: {}'.format(mets_2d.shape, len(curr_pixels), self.h5_loop_metrics.shape))
self.h5_projected_loops[curr_pixels, :] = loops_2d
self.h5_loop_metrics[curr_pixels, :] = mets_2d
"""
if self.verbose and self.mpi_rank == 0:
print('Finished ?')
""" | [
"[email protected]"
] | |
4b5832a605f16bffdeb23750b8ef37198504d3b2 | c0c8aeb5aaf08925d8c9e1d660b02c89cbc7ad71 | /Algorithms/Medium/47. Permutations II/answer.py | 2df5db8def3857a1e5ac90f79f3ef6b455e5fc92 | [
"Apache-2.0"
] | permissive | kenwoov/PlayLeetCode | b2fdc43d799c37683a9efdc31c4df159cf553bf5 | 4012a2f0a099a502df1e5df2e39faa75fe6463e8 | refs/heads/master | 2022-12-17T05:54:22.775972 | 2020-09-26T14:08:43 | 2020-09-26T14:08:43 | 214,839,611 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 484 | py | from typing import List
class Solution:
def permuteUnique(self, nums: List[int]) -> List[List[int]]:
res = []
def dfs(n, r, path):
if not n and path not in r:
r.append(path)
else:
for i in range(len(n)):
dfs(n[:i]+n[i+1:], r, path+[n[i]])
dfs(nums, res, [])
return res
if __name__ == "__main__":
s = Solution()
result = s.permuteUnique([1,1,2])
print(result) | [
"[email protected]"
] | |
f2c2cb9e12e923d4a06e08ac3ca969bebfed1aa3 | 1b1b5908dce757e9aa638507baa788f0f0e16611 | /rango/migrations/0008_auto_20170101_2008.py | b86f6213843fb8e6e9fb55a28bd53b135c7abe3a | [] | no_license | mish24/Rango | 5223f1c43ce74e2f336d95a3f03621b05d5af049 | fea0b6364bc265cdc09c75f745dd317fd38c0422 | refs/heads/master | 2021-04-28T21:34:44.014904 | 2017-01-04T21:27:26 | 2017-01-04T21:27:26 | 77,769,000 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 447 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.10.3 on 2017-01-01 20:08
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('rango', '0007_auto_20170101_1804'),
]
operations = [
migrations.AlterField(
model_name='category',
name='slug',
field=models.SlugField(unique=True),
),
]
| [
"[email protected]"
] | |
2769fde5a6fcf7fc0bb42f8ee954d96b8448f6c5 | df20743069e3c81128438ecc8a368b1853dc8137 | /overrides/scr/Spell1141 - Lawful Sword.py | bb31be7e81e6a900b07745035c61feb4e1f6a0d0 | [
"MIT"
] | permissive | dolio/ToEE_Mods | 3f020d82e590a63a04047912d8d76fa2212957d7 | 53aa8086b89b25d7afb3104c5d8896c8a38c89b0 | refs/heads/main | 2023-04-09T06:17:47.064224 | 2021-04-29T09:41:58 | 2021-04-29T09:41:58 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 699 | py | from toee import *
def OnBeginSpellCast(spell):
print "Lawful Sword OnBeginSpellCast"
print "spell.target_list=", spell.target_list
print "spell.caster=", spell.caster, " caster.level= ", spell.caster_level
def OnSpellEffect(spell):
print "Lawful Sword OnSpellEffect"
spell.duration = 1 * spell.caster_level
spellTarget = spell.target_list[0]
spellTarget.obj.condition_add_with_args('sp-Lawful Sword', spell.id, spell.duration)
spellTarget.partsys_id = game.particles('sp-Heroism', spellTarget.obj)
spell.spell_end(spell.id)
def OnBeginRound(spell):
print "Lawful Sword OnBeginRound"
def OnEndSpellCast(spell):
print "Lawful Sword OnEndSpellCast"
| [
"[email protected]"
] | |
ed9cdc6d73f30f066bf941daa653cda3d55256f4 | d6a87864028abde8da69b0a1075e3d4c483ed73c | /Reverse Integer.py | d8954fcc8255c6824667ffee755a63529a7904a4 | [] | no_license | Windsooon/LeetCode | 7ef78c7e001c1e6924244869a7ba5491d33eb246 | 409d7db811d41dbcc7ce8cda82b77eff35585657 | refs/heads/master | 2021-01-10T15:26:16.986357 | 2020-01-01T14:57:58 | 2020-01-01T14:57:58 | 54,531,267 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 272 | py | class Solution:
# edge case [-2**31, 2**31-1]
def reverse(self, x: int) -> int:
if str(x)[0] == '-':
return 0 if -int(str(x)[1:][::-1]) < -2**31 else -int(str(x)[1:][::-1])
return 0 if int(str(x)[::-1]) > 2**31-1 else int(str(x)[::-1])
| [
"[email protected]"
] | |
7369171e172ec35558a34b912f87ae620a62b2bd | db0e49a94c2554ec8853133d09afca65d697eb62 | /ucp/benchmarks/backends/__init__.py | 82957ed11af80a3f672847d21d7830a6f8b122b7 | [
"BSD-3-Clause"
] | permissive | rapidsai/ucx-py | 415c631039c9c6ceb8d90b04e872d5a61a12eb0f | 9ba056f9f2b1af169c6312b178e9853b066928bd | refs/heads/branch-0.34 | 2023-09-02T21:33:25.839513 | 2023-08-28T13:40:30 | 2023-08-28T13:40:30 | 149,822,197 | 103 | 45 | BSD-3-Clause | 2023-09-08T18:41:12 | 2018-09-21T21:53:38 | Python | UTF-8 | Python | false | false | 93 | py | # Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
# See file LICENSE for terms.
| [
"[email protected]"
] | |
c05311f36d454f07ffe8a84a2f0c8d3ccbd05600 | 296132d2c5d95440b3ce5f4401078a6d0f736f5a | /homeassistant/components/tomorrowio/sensor.py | 260ce7390ebe7428c265fc69990adb5dab5d553b | [
"Apache-2.0"
] | permissive | mezz64/home-assistant | 5349a242fbfa182159e784deec580d2800173a3b | 997d4fbe5308b01d14ceabcfe089c2bc511473dd | refs/heads/dev | 2023-03-16T22:31:52.499528 | 2022-12-08T02:55:25 | 2022-12-08T02:55:25 | 68,411,158 | 2 | 1 | Apache-2.0 | 2023-03-10T06:56:54 | 2016-09-16T20:04:27 | Python | UTF-8 | Python | false | false | 14,070 | py | """Sensor component that handles additional Tomorrowio data for your location."""
from __future__ import annotations
from abc import abstractmethod
from collections.abc import Callable
from dataclasses import dataclass
from typing import Any
from pytomorrowio.const import (
HealthConcernType,
PollenIndex,
PrecipitationType,
PrimaryPollutantType,
)
from homeassistant.components.sensor import (
SensorDeviceClass,
SensorEntity,
SensorEntityDescription,
)
from homeassistant.config_entries import ConfigEntry
from homeassistant.const import (
CONCENTRATION_MICROGRAMS_PER_CUBIC_METER,
CONCENTRATION_PARTS_PER_MILLION,
CONF_API_KEY,
CONF_NAME,
IRRADIATION_BTUS_PER_HOUR_SQUARE_FOOT,
IRRADIATION_WATTS_PER_SQUARE_METER,
PERCENTAGE,
UnitOfLength,
UnitOfPressure,
UnitOfSpeed,
UnitOfTemperature,
)
from homeassistant.core import HomeAssistant
from homeassistant.helpers.entity_platform import AddEntitiesCallback
from homeassistant.util import slugify
from homeassistant.util.unit_conversion import DistanceConverter, SpeedConverter
from homeassistant.util.unit_system import US_CUSTOMARY_SYSTEM
from . import TomorrowioDataUpdateCoordinator, TomorrowioEntity
from .const import (
DOMAIN,
TMRW_ATTR_CARBON_MONOXIDE,
TMRW_ATTR_CHINA_AQI,
TMRW_ATTR_CHINA_HEALTH_CONCERN,
TMRW_ATTR_CHINA_PRIMARY_POLLUTANT,
TMRW_ATTR_CLOUD_BASE,
TMRW_ATTR_CLOUD_CEILING,
TMRW_ATTR_CLOUD_COVER,
TMRW_ATTR_DEW_POINT,
TMRW_ATTR_EPA_AQI,
TMRW_ATTR_EPA_HEALTH_CONCERN,
TMRW_ATTR_EPA_PRIMARY_POLLUTANT,
TMRW_ATTR_FEELS_LIKE,
TMRW_ATTR_FIRE_INDEX,
TMRW_ATTR_NITROGEN_DIOXIDE,
TMRW_ATTR_OZONE,
TMRW_ATTR_PARTICULATE_MATTER_10,
TMRW_ATTR_PARTICULATE_MATTER_25,
TMRW_ATTR_POLLEN_GRASS,
TMRW_ATTR_POLLEN_TREE,
TMRW_ATTR_POLLEN_WEED,
TMRW_ATTR_PRECIPITATION_TYPE,
TMRW_ATTR_PRESSURE_SURFACE_LEVEL,
TMRW_ATTR_SOLAR_GHI,
TMRW_ATTR_SULPHUR_DIOXIDE,
TMRW_ATTR_WIND_GUST,
)
@dataclass
class TomorrowioSensorEntityDescription(SensorEntityDescription):
"""Describes a Tomorrow.io sensor entity."""
unit_imperial: str | None = None
unit_metric: str | None = None
multiplication_factor: Callable[[float], float] | float | None = None
imperial_conversion: Callable[[float], float] | float | None = None
value_map: Any | None = None
def __post_init__(self) -> None:
"""Handle post init."""
if (self.unit_imperial is None and self.unit_metric is not None) or (
self.unit_imperial is not None and self.unit_metric is None
):
raise ValueError(
"Entity descriptions must include both imperial and metric units or "
"they must both be None"
)
# From https://cfpub.epa.gov/ncer_abstracts/index.cfm/fuseaction/display.files/fileID/14285
# x ug/m^3 = y ppb * molecular weight / 24.45
def convert_ppb_to_ugm3(molecular_weight: int | float) -> Callable[[float], float]:
"""Return function to convert ppb to ug/m^3."""
return lambda x: (x * molecular_weight) / 24.45
SENSOR_TYPES = (
TomorrowioSensorEntityDescription(
key=TMRW_ATTR_FEELS_LIKE,
name="Feels Like",
native_unit_of_measurement=UnitOfTemperature.CELSIUS,
device_class=SensorDeviceClass.TEMPERATURE,
),
TomorrowioSensorEntityDescription(
key=TMRW_ATTR_DEW_POINT,
name="Dew Point",
native_unit_of_measurement=UnitOfTemperature.CELSIUS,
device_class=SensorDeviceClass.TEMPERATURE,
),
# Data comes in as hPa
TomorrowioSensorEntityDescription(
key=TMRW_ATTR_PRESSURE_SURFACE_LEVEL,
name="Pressure (Surface Level)",
native_unit_of_measurement=UnitOfPressure.HPA,
device_class=SensorDeviceClass.PRESSURE,
),
# Data comes in as W/m^2, convert to BTUs/(hr * ft^2) for imperial
# https://www.theunitconverter.com/watt-square-meter-to-btu-hour-square-foot-conversion/
TomorrowioSensorEntityDescription(
key=TMRW_ATTR_SOLAR_GHI,
name="Global Horizontal Irradiance",
unit_imperial=IRRADIATION_BTUS_PER_HOUR_SQUARE_FOOT,
unit_metric=IRRADIATION_WATTS_PER_SQUARE_METER,
imperial_conversion=(1 / 3.15459),
),
# Data comes in as km, convert to miles for imperial
TomorrowioSensorEntityDescription(
key=TMRW_ATTR_CLOUD_BASE,
name="Cloud Base",
unit_imperial=UnitOfLength.MILES,
unit_metric=UnitOfLength.KILOMETERS,
imperial_conversion=lambda val: DistanceConverter.convert(
val,
UnitOfLength.KILOMETERS,
UnitOfLength.MILES,
),
),
# Data comes in as km, convert to miles for imperial
TomorrowioSensorEntityDescription(
key=TMRW_ATTR_CLOUD_CEILING,
name="Cloud Ceiling",
unit_imperial=UnitOfLength.MILES,
unit_metric=UnitOfLength.KILOMETERS,
imperial_conversion=lambda val: DistanceConverter.convert(
val,
UnitOfLength.KILOMETERS,
UnitOfLength.MILES,
),
),
TomorrowioSensorEntityDescription(
key=TMRW_ATTR_CLOUD_COVER,
name="Cloud Cover",
native_unit_of_measurement=PERCENTAGE,
),
# Data comes in as m/s, convert to mi/h for imperial
TomorrowioSensorEntityDescription(
key=TMRW_ATTR_WIND_GUST,
name="Wind Gust",
unit_imperial=UnitOfSpeed.MILES_PER_HOUR,
unit_metric=UnitOfSpeed.METERS_PER_SECOND,
imperial_conversion=lambda val: SpeedConverter.convert(
val, UnitOfSpeed.METERS_PER_SECOND, UnitOfSpeed.MILES_PER_HOUR
),
),
TomorrowioSensorEntityDescription(
key=TMRW_ATTR_PRECIPITATION_TYPE,
name="Precipitation Type",
value_map=PrecipitationType,
device_class=SensorDeviceClass.ENUM,
options=["freezing_rain", "ice_pellets", "none", "rain", "snow"],
translation_key="precipitation_type",
icon="mdi:weather-snowy-rainy",
),
# Data comes in as ppb, convert to µg/m^3
# Molecular weight of Ozone is 48
TomorrowioSensorEntityDescription(
key=TMRW_ATTR_OZONE,
name="Ozone",
native_unit_of_measurement=CONCENTRATION_MICROGRAMS_PER_CUBIC_METER,
multiplication_factor=convert_ppb_to_ugm3(48),
device_class=SensorDeviceClass.OZONE,
),
TomorrowioSensorEntityDescription(
key=TMRW_ATTR_PARTICULATE_MATTER_25,
name="Particulate Matter < 2.5 μm",
native_unit_of_measurement=CONCENTRATION_MICROGRAMS_PER_CUBIC_METER,
device_class=SensorDeviceClass.PM25,
),
TomorrowioSensorEntityDescription(
key=TMRW_ATTR_PARTICULATE_MATTER_10,
name="Particulate Matter < 10 μm",
native_unit_of_measurement=CONCENTRATION_MICROGRAMS_PER_CUBIC_METER,
device_class=SensorDeviceClass.PM10,
),
# Data comes in as ppb, convert to µg/m^3
# Molecular weight of Nitrogen Dioxide is 46.01
TomorrowioSensorEntityDescription(
key=TMRW_ATTR_NITROGEN_DIOXIDE,
name="Nitrogen Dioxide",
native_unit_of_measurement=CONCENTRATION_MICROGRAMS_PER_CUBIC_METER,
multiplication_factor=convert_ppb_to_ugm3(46.01),
device_class=SensorDeviceClass.NITROGEN_DIOXIDE,
),
# Data comes in as ppb, convert to ppm
TomorrowioSensorEntityDescription(
key=TMRW_ATTR_CARBON_MONOXIDE,
name="Carbon Monoxide",
native_unit_of_measurement=CONCENTRATION_PARTS_PER_MILLION,
multiplication_factor=1 / 1000,
device_class=SensorDeviceClass.CO,
),
# Data comes in as ppb, convert to µg/m^3
# Molecular weight of Sulphur Dioxide is 64.07
TomorrowioSensorEntityDescription(
key=TMRW_ATTR_SULPHUR_DIOXIDE,
name="Sulphur Dioxide",
native_unit_of_measurement=CONCENTRATION_MICROGRAMS_PER_CUBIC_METER,
multiplication_factor=convert_ppb_to_ugm3(64.07),
device_class=SensorDeviceClass.SULPHUR_DIOXIDE,
),
TomorrowioSensorEntityDescription(
key=TMRW_ATTR_EPA_AQI,
name="US EPA Air Quality Index",
device_class=SensorDeviceClass.AQI,
),
TomorrowioSensorEntityDescription(
key=TMRW_ATTR_EPA_PRIMARY_POLLUTANT,
name="US EPA Primary Pollutant",
value_map=PrimaryPollutantType,
),
TomorrowioSensorEntityDescription(
key=TMRW_ATTR_EPA_HEALTH_CONCERN,
name="US EPA Health Concern",
value_map=HealthConcernType,
device_class=SensorDeviceClass.ENUM,
options=[
"good",
"hazardous",
"moderate",
"unhealthy_for_sensitive_groups",
"unhealthy",
"very_unhealthy",
],
translation_key="health_concern",
icon="mdi:hospital",
),
TomorrowioSensorEntityDescription(
key=TMRW_ATTR_CHINA_AQI,
name="China MEP Air Quality Index",
device_class=SensorDeviceClass.AQI,
),
TomorrowioSensorEntityDescription(
key=TMRW_ATTR_CHINA_PRIMARY_POLLUTANT,
name="China MEP Primary Pollutant",
value_map=PrimaryPollutantType,
),
TomorrowioSensorEntityDescription(
key=TMRW_ATTR_CHINA_HEALTH_CONCERN,
name="China MEP Health Concern",
value_map=HealthConcernType,
device_class=SensorDeviceClass.ENUM,
options=[
"good",
"hazardous",
"moderate",
"unhealthy_for_sensitive_groups",
"unhealthy",
"very_unhealthy",
],
translation_key="health_concern",
icon="mdi:hospital",
),
TomorrowioSensorEntityDescription(
key=TMRW_ATTR_POLLEN_TREE,
name="Tree Pollen Index",
value_map=PollenIndex,
device_class=SensorDeviceClass.ENUM,
options=["high", "low", "medium", "none", "very_high", "very_low"],
translation_key="pollen_index",
icon="mdi:flower-pollen",
),
TomorrowioSensorEntityDescription(
key=TMRW_ATTR_POLLEN_WEED,
name="Weed Pollen Index",
value_map=PollenIndex,
device_class=SensorDeviceClass.ENUM,
options=["high", "low", "medium", "none", "very_high", "very_low"],
translation_key="pollen_index",
icon="mdi:flower-pollen",
),
TomorrowioSensorEntityDescription(
key=TMRW_ATTR_POLLEN_GRASS,
name="Grass Pollen Index",
value_map=PollenIndex,
device_class=SensorDeviceClass.ENUM,
options=["high", "low", "medium", "none", "very_high", "very_low"],
translation_key="pollen_index",
icon="mdi:flower-pollen",
),
TomorrowioSensorEntityDescription(
TMRW_ATTR_FIRE_INDEX,
name="Fire Index",
icon="mdi:fire",
),
)
async def async_setup_entry(
hass: HomeAssistant,
config_entry: ConfigEntry,
async_add_entities: AddEntitiesCallback,
) -> None:
"""Set up a config entry."""
coordinator = hass.data[DOMAIN][config_entry.data[CONF_API_KEY]]
entities = [
TomorrowioSensorEntity(hass, config_entry, coordinator, 4, description)
for description in SENSOR_TYPES
]
async_add_entities(entities)
def handle_conversion(
value: float | int, conversion: Callable[[float], float] | float
) -> float:
"""Handle conversion of a value based on conversion type."""
if callable(conversion):
return round(conversion(float(value)), 2)
return round(float(value) * conversion, 2)
class BaseTomorrowioSensorEntity(TomorrowioEntity, SensorEntity):
"""Base Tomorrow.io sensor entity."""
entity_description: TomorrowioSensorEntityDescription
_attr_entity_registry_enabled_default = False
def __init__(
self,
hass: HomeAssistant,
config_entry: ConfigEntry,
coordinator: TomorrowioDataUpdateCoordinator,
api_version: int,
description: TomorrowioSensorEntityDescription,
) -> None:
"""Initialize Tomorrow.io Sensor Entity."""
super().__init__(config_entry, coordinator, api_version)
self.entity_description = description
self._attr_name = f"{self._config_entry.data[CONF_NAME]} - {description.name}"
self._attr_unique_id = (
f"{self._config_entry.unique_id}_{slugify(description.name)}"
)
if self.entity_description.native_unit_of_measurement is None:
self._attr_native_unit_of_measurement = description.unit_metric
if hass.config.units is US_CUSTOMARY_SYSTEM:
self._attr_native_unit_of_measurement = description.unit_imperial
@property
@abstractmethod
def _state(self) -> int | float | None:
"""Return the raw state."""
@property
def native_value(self) -> str | int | float | None:
"""Return the state."""
state = self._state
desc = self.entity_description
if state is None:
return state
if desc.value_map is not None:
return desc.value_map(state).name.lower()
if desc.multiplication_factor is not None:
state = handle_conversion(state, desc.multiplication_factor)
# If there is an imperial conversion needed and the instance is using imperial,
# apply the conversion logic.
if (
desc.imperial_conversion
and desc.unit_imperial is not None
and desc.unit_imperial != desc.unit_metric
and self.hass.config.units is US_CUSTOMARY_SYSTEM
):
return handle_conversion(state, desc.imperial_conversion)
return state
class TomorrowioSensorEntity(BaseTomorrowioSensorEntity):
"""Sensor entity that talks to Tomorrow.io v4 API to retrieve non-weather data."""
@property
def _state(self) -> int | float | None:
"""Return the raw state."""
val = self._get_current_property(self.entity_description.key)
assert not isinstance(val, str)
return val
| [
"[email protected]"
] | |
6f9c7fb744dc3a96ad6bb2b4190e57f301d9d99f | 3ee0d5a2cc955c4fb5583f4b88463e783cad8e9e | /examples/ds3/t370401.py | 17d5a22cc327ae9d733236fe6ad1666e4e3c0613 | [] | no_license | vawser/ESDLang | 47b18f7f14b26ae24d8c39d20701ffb0e0017f3c | 9455d423f4fae534abba7b98339c61e7f1350f53 | refs/heads/master | 2021-04-18T13:30:42.990177 | 2019-09-17T04:33:46 | 2019-09-17T04:33:46 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,518 | py | # -*- coding: utf-8 -*-
def t370401_1():
"""State 0,1"""
assert GetCurrentStateElapsedTime() > 1
while True:
"""State 2"""
call = t370401_x10()
assert IsClientPlayer() == 1
"""State 3"""
call = t370401_x11()
assert not IsClientPlayer()
def t370401_x0(z2=6000, flag3=1575, flag4=6000, flag5=6000, flag6=6000, flag7=6000):
"""State 0"""
while True:
"""State 1"""
assert (not GetOneLineHelpStatus() and not IsTalkingToSomeoneElse() and not IsClientPlayer()
and not IsPlayerDead() and not IsCharacterDisabled())
"""State 3"""
assert (GetEventStatus(flag3) == 1 or GetEventStatus(flag4) == 1 or GetEventStatus(flag5) ==
1 or GetEventStatus(flag6) == 1 or GetEventStatus(flag7) == 1)
"""State 2"""
if (not (not GetOneLineHelpStatus() and not IsTalkingToSomeoneElse() and not IsClientPlayer()
and not IsPlayerDead() and not IsCharacterDisabled())):
pass
elif (not GetEventStatus(flag3) and not GetEventStatus(flag4) and not GetEventStatus(flag5) and
not GetEventStatus(flag6) and not GetEventStatus(flag7)):
pass
elif CheckActionButtonArea(z2):
break
"""State 4"""
return 0
def t370401_x1():
"""State 0,1"""
if not CheckSpecificPersonTalkHasEnded(0):
"""State 7"""
ClearTalkProgressData()
StopEventAnimWithoutForcingConversationEnd(0)
"""State 6"""
ReportConversationEndToHavokBehavior()
else:
pass
"""State 2"""
if CheckSpecificPersonGenericDialogIsOpen(0) == 1:
"""State 3"""
ForceCloseGenericDialog()
else:
pass
"""State 4"""
if CheckSpecificPersonMenuIsOpen(-1, 0) == 1 and not CheckSpecificPersonGenericDialogIsOpen(0):
"""State 5"""
ForceCloseMenu()
else:
pass
"""State 8"""
return 0
def t370401_x2():
"""State 0,1"""
ClearTalkProgressData()
StopEventAnimWithoutForcingConversationEnd(0)
ForceCloseGenericDialog()
ForceCloseMenu()
ReportConversationEndToHavokBehavior()
"""State 2"""
return 0
def t370401_x3(text2=_, z1=_, flag2=0, mode2=1):
"""State 0,5"""
assert t370401_x2() and CheckSpecificPersonTalkHasEnded(0) == 1
"""State 2"""
SetEventState(z1, 1)
"""State 1"""
TalkToPlayer(text2, -1, -1, flag2)
assert CheckSpecificPersonTalkHasEnded(0) == 1
"""State 4"""
if not mode2:
pass
else:
"""State 3"""
ReportConversationEndToHavokBehavior()
"""State 6"""
return 0
def t370401_x4(text1=_, flag1=0, mode1=1):
"""State 0,4"""
assert t370401_x2() and CheckSpecificPersonTalkHasEnded(0) == 1
"""State 1"""
TalkToPlayer(text1, -1, -1, flag1)
assert CheckSpecificPersonTalkHasEnded(0) == 1
"""State 3"""
if not mode1:
pass
else:
"""State 2"""
ReportConversationEndToHavokBehavior()
"""State 5"""
return 0
def t370401_x5():
"""State 0,1,2"""
if not GetEventStatus(50006301):
"""State 3,7"""
# talk:40000200:"Welcome, our gracious Lord."
assert t370401_x4(text1=40000200, flag1=0, mode1=1)
"""State 5"""
SetEventState(73700330, 1)
else:
"""State 4,6"""
# talk:40000300:"Your spouse awaits you, you are very near."
assert t370401_x4(text1=40000300, flag1=0, mode1=1)
"""State 8"""
return 0
def t370401_x6():
"""State 0,7"""
assert t370401_x1()
"""State 4"""
assert GetCurrentStateElapsedFrames() > 1
"""State 1"""
assert not GetEventStatus(1576) and not GetEventStatus(1577)
"""State 2"""
if GetDistanceToPlayer() < 10:
"""State 5,9"""
call = t370401_x14()
if call.Done():
pass
elif GetEventStatus(1576) == 1 or GetEventStatus(1577) == 1:
"""State 3"""
Quit()
elif GetDistanceToPlayer() > 12:
"""State 8"""
assert t370401_x1()
else:
"""State 6"""
pass
"""State 10"""
return 0
def t370401_x7():
"""State 0,1"""
if GetEventStatus(1578) == 1:
"""State 2"""
pass
else:
"""State 3"""
if GetDistanceToPlayer() < 10:
"""State 4,7"""
# talk:40000800:"Ahh, our gracious Lord..."
call = t370401_x4(text1=40000800, flag1=0, mode1=1)
if call.Done():
pass
elif GetDistanceToPlayer() > 12:
"""State 6"""
assert t370401_x1()
else:
"""State 5"""
pass
"""State 8"""
return 0
def t370401_x8():
"""State 0,2,1,3"""
return 0
def t370401_x9():
"""State 0,1,2,3"""
assert t370401_x1()
"""State 4"""
return 0
def t370401_x10():
"""State 0"""
while True:
"""State 1"""
call = t370401_x12()
assert not GetEventStatus(1564)
"""State 2"""
call = t370401_x13()
assert GetEventStatus(1564) == 1
def t370401_x11():
"""State 0,1"""
assert t370401_x1()
"""State 2"""
return 0
def t370401_x12():
"""State 0,2"""
call = t370401_x15()
assert CheckSelfDeath() == 1
"""State 1"""
t370401_x7()
def t370401_x13():
"""State 0"""
def t370401_x14():
"""State 0,1"""
if not GetEventStatus(73700321):
"""State 2,6"""
# talk:40000400:" "
assert t370401_x3(text2=40000400, z1=73700321, flag2=0, mode2=1)
elif not GetEventStatus(73700322):
"""State 3,7"""
# talk:40000500:" "
assert t370401_x3(text2=40000500, z1=73700322, flag2=0, mode2=1)
else:
"""State 4,5"""
SetEventState(73700321, 0)
SetEventState(73700322, 0)
"""State 8"""
# talk:40000600:"Whatever for!"
assert t370401_x4(text1=40000600, flag1=0, mode1=1)
"""State 9"""
return 0
def t370401_x15():
"""State 0"""
while True:
"""State 6"""
call = t370401_x0(z2=6000, flag3=1575, flag4=6000, flag5=6000, flag6=6000, flag7=6000)
if call.Done():
"""State 4"""
call = t370401_x5()
if call.Done():
pass
elif IsAttackedBySomeone() == 1:
"""State 2"""
Label('L0')
call = t370401_x6()
def ExitPause():
RemoveMyAggro()
if call.Done():
pass
elif IsPlayerDead() == 1:
break
elif IsPlayerDead() == 1:
break
elif GetDistanceToPlayer() > 5:
"""State 5"""
call = t370401_x9()
if call.Done() and GetDistanceToPlayer() < 4.9:
pass
elif IsAttackedBySomeone() == 1:
Goto('L0')
elif IsAttackedBySomeone() == 1:
Goto('L0')
elif IsPlayerDead() == 1:
break
elif GetEventStatus(73700330) == 1:
"""State 1"""
assert not GetEventStatus(73700330)
"""State 3"""
t370401_x8()
| [
"[email protected]"
] | |
6daf09de199c4720ba3b97533878c2c6117c7379 | 2be43de3e8b6ce2f46da2c9afb021a6ea2abb74a | /neighbour/settings.py | 0c3691a9f2cd1a344b46b36ff6d4cc39343afdea | [
"LicenseRef-scancode-other-permissive"
] | permissive | CollinsMuiruri/IS-PROJECT | 345877fe61d14e94c8ec4a0bf8f6a4d76698fd43 | 2e59bb95a6dc3483e699140bde6792f6e92e1356 | refs/heads/master | 2021-09-09T12:01:01.065687 | 2019-07-16T14:29:46 | 2019-07-16T14:29:46 | 197,208,301 | 0 | 0 | null | 2021-09-08T01:08:59 | 2019-07-16T14:19:51 | Python | UTF-8 | Python | false | false | 3,667 | py | """
Django settings for neighbour project.
Generated by 'django-admin startproject' using Django 1.11.
For more information on this file, see
https://docs.djangoproject.com/en/1.11/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.11/ref/settings/
"""
import os
import dj_database_url
from decouple import config
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.11/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = config('SECRET_KEY')
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = config('DEBUG', default=False, cast=bool)
ALLOWED_HOSTS = ['*']
# Application definition
INSTALLED_APPS = [
'hood',
'chief',
'bootstrap3',
'bootstrap4',
'jet_django',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'neighbour.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'templates')],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'neighbour.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.11/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql',
'NAME': 'neighbour',
'USER': 'collins',
'PASSWORD': 'wildgoosechase'
}
}
db_from_env = dj_database_url.config(conn_max_age=500)
DATABASES['default'].update(db_from_env)
# Password validation
# https://docs.djangoproject.com/en/1.11/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.11/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.11/howto/static-files/
STATIC_URL = '/static/'
STATICFILES_DIRS = [
os.path.join(BASE_DIR, "static"),
]
STATIC_ROOT = os.path.join(BASE_DIR, "staticfiles")
STATICFILES_STORAGE = 'whitenoise.django.GzipManifestStaticFilesStorage'
MEDIA_URL = '/media/'
MEDIA_ROOT = os.path.join(BASE_DIR, 'media')
| [
"[email protected]"
] | |
047e8abdf1b097fc3d9312e4b3df9a03efecc976 | 11ca230c3db96ac41fa90104d502fde51aae306c | /04.기하학적 변환/6.remapping.py | 562d2fb460ecc045ae972bc4af5e18ea09445cfa | [] | no_license | wonsgong/Computer-Vision | e849ead6cea5ab5c274ef78643961a6138a6e975 | 09ada035299032337498f36198d2b8d3c3de1f01 | refs/heads/main | 2023-05-30T15:37:19.548360 | 2021-06-09T10:27:08 | 2021-06-09T10:27:08 | 353,696,068 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 382 | py | import sys
import numpy as np
import cv2
src = cv2.imread('image/tekapo.bmp')
if src is None:
print("Image load failed")
sys.exit()
h,w = src.shape[:2]
mapy, mapx = np.indices((h,w),dtype=np.float32)
mapy = mapy + 10 * np.sin(mapx / 32)
dst = cv2.remap(src,mapx,mapy,cv2.INTER_LINEAR)
cv2.imshow('src',src)
cv2.imshow('dst',dst)
cv2.waitKey()
cv2.destroyAllWindows()
| [
"[email protected]"
] | |
91f0986327d0af5377be5d57f2f62bb0a350c79c | 2d93403fac1645fdbf1727f0d17fbea6eeef470a | /decorators/class_decorator.py | f027fda59d0aee11df10298deccf8addf27338d7 | [
"MIT"
] | permissive | Minkov/python-oop-2020-02 | d13c8c8feaa9ad41c524fc82887a98745115ac57 | d2acb1504c1a135cded2ae6ff42acccb303d9ab1 | refs/heads/master | 2021-02-04T00:43:14.997404 | 2020-03-26T18:21:03 | 2020-03-26T18:21:03 | 243,588,830 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 539 | py | class Logger:
def __init__(self, function):
self.function = function
def __call__(self, *args, **kwargs):
def wrapper(*args, **kwargs):
try:
result = func(*args, **kwargs)
return result
except Exception as ex:
# with open(file, 'a') as log_file:
# log_file.write(f'{ex} thrown from {func.__name__}\n')
print(f'{ex} thrown from {func.__name__}')
raise ex
return wrapper | [
"[email protected]"
] | |
0366f2bbf07bea5d9926d82c21d9601671a10744 | 2d3cb7101cae992a58a1b91ee22be7285bc3154e | /pyart/core/setup.py | be4d0e3bc25ad59e259bea291dc0e2f41650a9f4 | [
"BSD-3-Clause"
] | permissive | scollis/pyart | 341aca11a1e1b43482028bb688ad901e61f9a494 | 1a74b33e33df024cbc203ab1936eb5e7df4e92e7 | refs/heads/main | 2022-02-19T11:57:09.279907 | 2022-01-20T17:31:02 | 2022-01-20T17:31:02 | 450,486,932 | 1 | 0 | NOASSERTION | 2022-01-21T12:41:51 | 2022-01-21T12:41:51 | null | UTF-8 | Python | false | false | 343 | py |
def configuration(parent_package='', top_path=None):
from numpy.distutils.misc_util import Configuration
config = Configuration('core', parent_package, top_path)
config.add_data_dir('tests')
return config
if __name__ == '__main__':
from numpy.distutils.core import setup
setup(**configuration(top_path='').todict())
| [
"[email protected]"
] | |
b68a8a14342800adc9d59d021eb2f95735b0fe85 | c39f999cae8825afe2cdf1518d93ba31bd4c0e95 | /PYME/DSView/modules/coloc.py | 0cd204438b7107a81e42648c9b67dddfd168f0b5 | [] | no_license | WilliamRo/CLipPYME | 0b69860136a9b2533f2f29fc29408d7471cb934d | 6596167034c727ad7dad0a741dd59e0e48f6852a | refs/heads/master | 2023-05-11T09:50:58.605989 | 2023-05-09T02:17:47 | 2023-05-09T02:17:47 | 60,789,741 | 3 | 1 | null | 2016-06-17T08:52:44 | 2016-06-09T16:30:14 | Python | UTF-8 | Python | false | false | 10,125 | py | #!/usr/bin/python
##################
# coloc.py
#
# Copyright David Baddeley, 2011
# [email protected]
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##################
import numpy
import numpy as np
import wx
import pylab
from PYME.DSView.dsviewer_npy_nb import ViewIm3D, ImageStack
class ColocSettingsDialog(wx.Dialog):
def __init__(self, parent, pxSize=100, names = []):
wx.Dialog.__init__(self, parent, title='Colocalisation Settings')
sizer1 = wx.BoxSizer(wx.VERTICAL)
hsizer = wx.BoxSizer(wx.HORIZONTAL)
hsizer.Add(wx.StaticText(self, -1, 'Minimum Distance:'), 1,wx.ALL|wx.ALIGN_CENTER_VERTICAL,5)
self.tMin = wx.TextCtrl(self, -1, '-600')
hsizer.Add(self.tMin, 0, wx.ALL|wx.ALIGN_CENTER_VERTICAL, 5)
sizer1.Add(hsizer, 0, wx.EXPAND)
hsizer = wx.BoxSizer(wx.HORIZONTAL)
hsizer.Add(wx.StaticText(self, -1, 'Maximum Distance:'), 1,wx.ALL|wx.ALIGN_CENTER_VERTICAL,5)
self.tMax = wx.TextCtrl(self, -1, '2000')
hsizer.Add(self.tMax, 0, wx.ALL|wx.ALIGN_CENTER_VERTICAL, 5)
sizer1.Add(hsizer, 0, wx.EXPAND)
hsizer = wx.BoxSizer(wx.HORIZONTAL)
hsizer.Add(wx.StaticText(self, -1, 'Bin Size:'), 1,wx.ALL|wx.ALIGN_CENTER_VERTICAL,5)
self.tStep = wx.TextCtrl(self, -1, '%d' % (2*pxSize))
hsizer.Add(self.tStep, 0, wx.ALL|wx.ALIGN_CENTER_VERTICAL, 5)
sizer1.Add(hsizer, 0, wx.EXPAND)
if len(names) > 0:
hsizer = wx.BoxSizer(wx.HORIZONTAL)
hsizer.Add(wx.StaticText(self, -1, '1st Channel:'), 1,wx.ALL|wx.ALIGN_CENTER_VERTICAL,5)
self.cChan1 = wx.Choice(self, -1, choices=names)
self.cChan1.SetSelection(0)
hsizer.Add(self.cChan1, 0, wx.ALL|wx.ALIGN_CENTER_VERTICAL, 5)
sizer1.Add(hsizer, 0, wx.EXPAND)
hsizer = wx.BoxSizer(wx.HORIZONTAL)
hsizer.Add(wx.StaticText(self, -1, '2st Channel:'), 1,wx.ALL|wx.ALIGN_CENTER_VERTICAL,5)
self.cChan2 = wx.Choice(self, -1, choices=names)
self.cChan2.SetSelection(1)
hsizer.Add(self.cChan2, 0, wx.ALL|wx.ALIGN_CENTER_VERTICAL, 5)
sizer1.Add(hsizer, 0, wx.EXPAND)
bOK = wx.Button(self, wx.ID_OK, 'OK')
sizer1.Add(bOK, 0, wx.ALL|wx.ALIGN_RIGHT, 5)
self.SetSizerAndFit(sizer1)
def GetBins(self):
return numpy.arange(float(self.tMin.GetValue()), float(self.tMax.GetValue()), float(self.tStep.GetValue()))
def GetChans(self):
return [self.cChan1.GetSelection(), self.cChan2.GetSelection()]
class colocaliser:
def __init__(self, dsviewer):
self.dsviewer = dsviewer
self.do = dsviewer.do
self.image = dsviewer.image
PROC_COLOCALISE = wx.NewId()
PROC_COLOCALISE_EDT = wx.NewId()
dsviewer.mProcessing.AppendSeparator()
dsviewer.mProcessing.Append(PROC_COLOCALISE, "&Colocalisation", "", wx.ITEM_NORMAL)
dsviewer.mProcessing.Append(PROC_COLOCALISE_EDT, "EDT Colocalisation", "", wx.ITEM_NORMAL)
wx.EVT_MENU(dsviewer, PROC_COLOCALISE, self.OnColocBasic)
wx.EVT_MENU(dsviewer, PROC_COLOCALISE_EDT, self.OnColoc)
def OnColoc(self, event):
from PYME.Analysis.Colocalisation import correlationCoeffs, edtColoc
voxelsize = [1e3*self.image.mdh.getEntry('voxelsize.x') ,1e3*self.image.mdh.getEntry('voxelsize.y'), 1e3*self.image.mdh.getEntry('voxelsize.z')]
try:
names = self.image.mdh.getEntry('ChannelNames')
except:
names = ['Channel %d' % n for n in range(self.image.data.shape[3])]
dlg = ColocSettingsDialog(self.dsviewer, voxelsize[0], names)
dlg.ShowModal()
bins = dlg.GetBins()
chans = dlg.GetChans()
dlg.Destroy()
#assume we have exactly 2 channels #FIXME - add a selector
#grab image data
imA = self.image.data[:,:,:,chans[0]].squeeze()
imB = self.image.data[:,:,:,chans[1]].squeeze()
#assume threshold is half the colour bounds - good if using threshold mode
tA = self.do.Offs[chans[0]] + .5/self.do.Gains[chans[0]] #pylab.mean(self.ivps[0].clim)
tB = self.do.Offs[chans[1]] + .5/self.do.Gains[chans[1]] #pylab.mean(self.ivps[0].clim)
nameA = names[chans[0]]
nameB = names[chans[1]]
voxelsize = voxelsize[:imA.ndim] #trunctate to number of dimensions
print('Calculating Pearson and Manders coefficients ...')
pearson = correlationCoeffs.pearson(imA, imB)
MA, MB = correlationCoeffs.thresholdedManders(imA, imB, tA, tB)
print('Performing distance transform ...')
bnA, bmA, binsA = edtColoc.imageDensityAtDistance(imB, imA > tA, voxelsize, bins)
print('Performing distance transform (reversed) ...')
bnB, bmB, binsB = edtColoc.imageDensityAtDistance(imA, imB > tB, voxelsize, bins)
#print binsB, bmB
plots = []
pnames = []
pylab.figure()
pylab.figtext(.1, .95, 'Pearson: %2.2f M1: %2.2f M2: %2.2f' % (pearson, MA, MB))
pylab.subplot(211)
p = bmA/bmA.sum()
#print p
pylab.bar(binsA[:-1], p, binsA[1] - binsA[0])
pylab.xlabel('Distance from edge of %s [nm]' % nameA)
pylab.ylabel('Density of %s' % nameB)
plots.append(p.reshape(-1, 1,1))
pnames.append('Dens. %s from %s' % (nameB, nameA))
pylab.subplot(212)
p = bmB/bmB.sum()
pylab.bar(binsB[:-1], p, binsB[1] - binsB[0])
pylab.xlabel('Distance from edge of %s [nm]' % nameB)
pylab.ylabel('Density of %s' % nameA)
plots.append(p.reshape(-1, 1,1))
pnames.append('Dens. %s from %s' % (nameA, nameB))
pylab.figure()
pylab.figtext(.1, .95, 'Pearson: %2.2f M1: %2.2f M2: %2.2f' % (pearson, MA, MB))
pylab.subplot(211)
fA = bmA*bnA
p = fA/fA.sum()
pylab.bar(binsA[:-1], p, binsA[1] - binsA[0])
pylab.xlabel('Distance from edge of %s [nm]' % nameA)
pylab.ylabel('Fraction of %s' % nameB)
plots.append(p.reshape(-1, 1,1))
pnames.append('Frac. %s from %s' % (nameB, nameA))
pylab.subplot(212)
fB = bmB*bnB
p = fB/fB.sum()
pylab.bar(binsB[:-1], p, binsB[1] - binsB[0])
pylab.xlabel('Distance from edge of %s [nm]' % nameB)
pylab.ylabel('Fraction of %s' % nameA)
plots.append(p.reshape(-1, 1,1))
pnames.append('Frac. %s from %s' % (nameA, nameB))
pylab.show()
im = ImageStack(plots, titleStub='Radial Distribution')
im.xvals = bins[:-1]
im.xlabel = 'Distance [nm]'
im.ylabel = 'Fraction'
im.defaultExt = '.txt'
im.mdh['voxelsize.x'] = (bins[1] - bins[0])*1e-3
im.mdh['ChannelNames'] = pnames
im.mdh['Profile.XValues'] = im.xvals
im.mdh['Profile.XLabel'] = im.xlabel
im.mdh['Profile.YLabel'] = im.ylabel
im.mdh['Colocalisation.Channels'] = names
im.mdh['Colocalisation.Thresholds'] = [tA, tB]
im.mdh['Colocalisation.Pearson'] = pearson
im.mdh['Colocalisation.Manders'] = [MA, MB]
im.mdh['OriginalImage'] = self.image.filename
ViewIm3D(im, mode='graph')
def OnColocBasic(self, event):
from PYME.Analysis.Colocalisation import correlationCoeffs, edtColoc
voxelsize = [1e3*self.image.mdh.getEntry('voxelsize.x') ,1e3*self.image.mdh.getEntry('voxelsize.y'), 1e3*self.image.mdh.getEntry('voxelsize.z')]
try:
names = self.image.mdh.getEntry('ChannelNames')
except:
names = ['Channel %d' % n for n in range(self.image.data.shape[3])]
dlg = ColocSettingsDialog(self.dsviewer, voxelsize[0], names)
dlg.ShowModal()
bins = dlg.GetBins()
chans = dlg.GetChans()
dlg.Destroy()
#assume we have exactly 2 channels #FIXME - add a selector
#grab image data
imA = self.image.data[:,:,:,chans[0]].squeeze()
imB = self.image.data[:,:,:,chans[1]].squeeze()
#assume threshold is half the colour bounds - good if using threshold mode
tA = self.do.Offs[chans[0]] + .5/self.do.Gains[chans[0]] #pylab.mean(self.ivps[0].clim)
tB = self.do.Offs[chans[1]] + .5/self.do.Gains[chans[1]] #pylab.mean(self.ivps[0].clim)
nameA = names[chans[0]]
nameB = names[chans[1]]
voxelsize = voxelsize[:imA.ndim] #trunctate to number of dimensions
print('Calculating Pearson and Manders coefficients ...')
pearson = correlationCoeffs.pearson(imA, imB)
MA, MB = correlationCoeffs.thresholdedManders(imA, imB, tA, tB)
I1 = imA.ravel()
I2 = imB.ravel()
h1 = np.histogram2d(np.clip(I1/I1.mean(), 0, 100), np.clip(I2/I2.mean(), 0, 100), 200)
pylab.figure()
pylab.figtext(.1, .95, 'Pearson: %2.2f M1: %2.2f M2: %2.2f' % (pearson, MA, MB))
pylab.subplot(111)
pylab.imshow(np.log10(h1[0] + .1).T)
pylab.xlabel('%s' % nameA)
pylab.ylabel('%s' % nameB)
pylab.show()
def Plug(dsviewer):
dsviewer.coloc = colocaliser(dsviewer)
| [
"[email protected]"
] | |
ad509fa452ed2f6659bfbdd82033485f1dd7412f | 6fa7f99d3d3d9b177ef01ebf9a9da4982813b7d4 | /yFEMocjdiRjPhoDqv_2.py | 85c35556b13ff3699cdce5d83ac3bd06301dcef3 | [] | no_license | daniel-reich/ubiquitous-fiesta | 26e80f0082f8589e51d359ce7953117a3da7d38c | 9af2700dbe59284f5697e612491499841a6c126f | refs/heads/master | 2023-04-05T06:40:37.328213 | 2021-04-06T20:17:44 | 2021-04-06T20:17:44 | 355,318,759 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 554 | py |
def prime_in_range(n1, n2):
Range = []
Start = n1
End = n2
while (Start <= End):
Range.append(Start)
Start += 1
Counter = 0
Length = len(Range)
while (Counter < Length):
Value = Range[Counter]
Factor = 1
Factors = []
while (Factor <= Value):
if (Value % Factor == 0):
Factors.append(Factor)
Factor += 1
else:
Factor += 1
Span = len(Factors)
if (Span == 2):
return True
else:
Counter += 1
return False
| [
"[email protected]"
] | |
15167f5da718def7ad4042fcb6379b086f5a8513 | dcb9c42dde1436a474dbedbde9f30eaabc898ad3 | /scripts/marline-khavele.py | d7a8fa0c4e8f06ad48bbec6932ac708ccc817819 | [] | no_license | farouk-afolabi/HNG-Script | c96aba3264d1f6d2e12119131d4126e680f9bb81 | 5cd2796f7a3c3d72237232237b9c68b666bf5dee | refs/heads/master | 2022-09-27T19:15:30.303847 | 2020-06-03T21:06:46 | 2020-06-03T21:06:46 | 269,051,273 | 1 | 1 | null | 2020-06-03T18:59:39 | 2020-06-03T09:54:36 | JavaScript | UTF-8 | Python | false | false | 175 | py | # stage 2 task
name = "Marline Khavele"
id = "HNG-04957"
language = "python"
print(
f"Hello World, This is {name } with HNGi7 {id} using {language} for stage 2 task"
)
| [
"[email protected]"
] | |
840a8257774b3b458a5efd8b803cc773510357c9 | c88a6e7d909746d8473bc2300c37920c4295bb78 | /src/transformers/models/opt/modeling_opt.py | 9339b98ea8a67e71400b687e5507c47017fab524 | [
"Apache-2.0"
] | permissive | kssteven418/BigLittleDecoder | d54ea8d474eaddec7f4d8b8f42f5ef57612d4ce5 | a31b7e3efe893ac4cba48f679138a9b9ccfb7c63 | refs/heads/main | 2023-05-22T14:25:23.773708 | 2023-02-26T22:36:39 | 2023-02-26T22:36:39 | 599,960,048 | 47 | 3 | Apache-2.0 | 2023-02-26T22:36:40 | 2023-02-10T09:07:37 | Python | UTF-8 | Python | false | false | 56,729 | py | # coding=utf-8
# Copyright 2022 The Fairseq Authors and The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" PyTorch OPT model."""
import random
from typing import List, Optional, Tuple, Union
import torch
import torch.utils.checkpoint
from torch import nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACT2FN
from ...modeling_outputs import (
BaseModelOutputWithPast,
CausalLMOutputWithPast,
QuestionAnsweringModelOutput,
SequenceClassifierOutputWithPast,
)
from ...modeling_utils import PreTrainedModel
from ...utils import (
add_code_sample_docstrings,
add_start_docstrings,
add_start_docstrings_to_model_forward,
logging,
replace_return_docstrings,
)
from .configuration_opt import OPTConfig
logger = logging.get_logger(__name__)
_CHECKPOINT_FOR_DOC = "facebook/opt-350m"
_CONFIG_FOR_DOC = "OPTConfig"
_TOKENIZER_FOR_DOC = "GPT2Tokenizer"
# Base model docstring
_EXPECTED_OUTPUT_SHAPE = [1, 8, 1024]
# SequenceClassification docstring
_CHECKPOINT_FOR_SEQUENCE_CLASSIFICATION = "ArthurZ/opt-350m-dummy-sc"
_SEQ_CLASS_EXPECTED_LOSS = 1.71
_SEQ_CLASS_EXPECTED_OUTPUT = "'LABEL_0'"
OPT_PRETRAINED_MODEL_ARCHIVE_LIST = [
"facebook/opt-125m",
"facebook/opt-350m",
"facebook/opt-1.3b",
"facebook/opt-2.7b",
"facebook/opt-6.7b",
"facebook/opt-13b",
"facebook/opt-30b",
# See all OPT models at https://huggingface.co/models?filter=opt
]
def _make_causal_mask(input_ids_shape: torch.Size, dtype: torch.dtype, past_key_values_length: int = 0):
"""
Make causal mask used for bi-directional self-attention.
"""
bsz, tgt_len = input_ids_shape
mask = torch.full((tgt_len, tgt_len), torch.tensor(torch.finfo(dtype).min))
mask_cond = torch.arange(mask.size(-1))
mask.masked_fill_(mask_cond < (mask_cond + 1).view(mask.size(-1), 1), 0)
mask = mask.to(dtype)
if past_key_values_length > 0:
mask = torch.cat([torch.zeros(tgt_len, past_key_values_length, dtype=dtype), mask], dim=-1)
return mask[None, None, :, :].expand(bsz, 1, tgt_len, tgt_len + past_key_values_length)
def _expand_mask(mask: torch.Tensor, dtype: torch.dtype, tgt_len: Optional[int] = None):
"""
Expands attention_mask from `[bsz, seq_len]` to `[bsz, 1, tgt_seq_len, src_seq_len]`.
"""
bsz, src_len = mask.size()
tgt_len = tgt_len if tgt_len is not None else src_len
expanded_mask = mask[:, None, None, :].expand(bsz, 1, tgt_len, src_len).to(dtype)
inverted_mask = 1.0 - expanded_mask
return inverted_mask.masked_fill(inverted_mask.to(torch.bool), torch.finfo(dtype).min)
class OPTLearnedPositionalEmbedding(nn.Embedding):
"""
This module learns positional embeddings up to a fixed maximum size.
"""
def __init__(self, num_embeddings: int, embedding_dim: int):
# OPT is set up so that if padding_idx is specified then offset the embedding ids by 2
# and adjust num_embeddings appropriately. Other models don't have this hack
self.offset = 2
super().__init__(num_embeddings + self.offset, embedding_dim)
def forward(self, attention_mask: torch.LongTensor, past_key_values_length: int = 0):
"""`input_ids_shape` is expected to be [bsz x seqlen]."""
attention_mask = attention_mask.long()
# create positions depending on attention_mask
positions = (torch.cumsum(attention_mask, dim=1).type_as(attention_mask) * attention_mask).long() - 1
# cut positions if `past_key_values_length` is > 0
positions = positions[:, past_key_values_length:]
return super().forward(positions + self.offset)
class OPTAttention(nn.Module):
"""Multi-headed attention from 'Attention Is All You Need' paper"""
def __init__(
self,
embed_dim: int,
num_heads: int,
dropout: float = 0.0,
is_decoder: bool = False,
bias: bool = True,
):
super().__init__()
self.embed_dim = embed_dim
self.num_heads = num_heads
self.dropout = dropout
self.head_dim = embed_dim // num_heads
if (self.head_dim * num_heads) != self.embed_dim:
raise ValueError(
f"embed_dim must be divisible by num_heads (got `embed_dim`: {self.embed_dim}"
f" and `num_heads`: {num_heads})."
)
self.scaling = self.head_dim**-0.5
self.is_decoder = is_decoder
self.k_proj = nn.Linear(embed_dim, embed_dim, bias=bias)
self.v_proj = nn.Linear(embed_dim, embed_dim, bias=bias)
self.q_proj = nn.Linear(embed_dim, embed_dim, bias=bias)
self.out_proj = nn.Linear(embed_dim, embed_dim, bias=bias)
def _shape(self, tensor: torch.Tensor, seq_len: int, bsz: int):
return tensor.view(bsz, seq_len, self.num_heads, self.head_dim).transpose(1, 2).contiguous()
def forward(
self,
hidden_states: torch.Tensor,
key_value_states: Optional[torch.Tensor] = None,
past_key_value: Optional[Tuple[torch.Tensor]] = None,
attention_mask: Optional[torch.Tensor] = None,
layer_head_mask: Optional[torch.Tensor] = None,
output_attentions: bool = False,
) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]:
"""Input shape: Batch x Time x Channel"""
# if key_value_states are provided this layer is used as a cross-attention layer
# for the decoder
is_cross_attention = key_value_states is not None
bsz, tgt_len, _ = hidden_states.size()
# get query proj
query_states = self.q_proj(hidden_states) * self.scaling
# get key, value proj
if is_cross_attention and past_key_value is not None:
# reuse k,v, cross_attentions
key_states = past_key_value[0]
value_states = past_key_value[1]
elif is_cross_attention:
# cross_attentions
key_states = self._shape(self.k_proj(key_value_states), -1, bsz)
value_states = self._shape(self.v_proj(key_value_states), -1, bsz)
elif past_key_value is not None:
# reuse k, v, self_attention
key_states = self._shape(self.k_proj(hidden_states), -1, bsz)
value_states = self._shape(self.v_proj(hidden_states), -1, bsz)
key_states = torch.cat([past_key_value[0], key_states], dim=2)
value_states = torch.cat([past_key_value[1], value_states], dim=2)
else:
# self_attention
key_states = self._shape(self.k_proj(hidden_states), -1, bsz)
value_states = self._shape(self.v_proj(hidden_states), -1, bsz)
if self.is_decoder:
# if cross_attention save Tuple(torch.Tensor, torch.Tensor) of all cross attention key/value_states.
# Further calls to cross_attention layer can then reuse all cross-attention
# key/value_states (first "if" case)
# if uni-directional self-attention (decoder) save Tuple(torch.Tensor, torch.Tensor) of
# all previous decoder key/value_states. Further calls to uni-directional self-attention
# can concat previous decoder key/value_states to current projected key/value_states (third "elif" case)
# if encoder bi-directional self-attention `past_key_value` is always `None`
past_key_value = (key_states, value_states)
proj_shape = (bsz * self.num_heads, -1, self.head_dim)
query_states = self._shape(query_states, tgt_len, bsz).view(*proj_shape)
key_states = key_states.view(*proj_shape)
value_states = value_states.view(*proj_shape)
src_len = key_states.size(1)
attn_weights = torch.bmm(query_states, key_states.transpose(1, 2))
if attn_weights.size() != (bsz * self.num_heads, tgt_len, src_len):
raise ValueError(
f"Attention weights should be of size {(bsz * self.num_heads, tgt_len, src_len)}, but is"
f" {attn_weights.size()}"
)
if attention_mask is not None:
if attention_mask.size() != (bsz, 1, tgt_len, src_len):
raise ValueError(
f"Attention mask should be of size {(bsz, 1, tgt_len, src_len)}, but is {attention_mask.size()}"
)
attn_weights = attn_weights.view(bsz, self.num_heads, tgt_len, src_len) + attention_mask
attn_weights = torch.max(attn_weights, torch.tensor(torch.finfo(attn_weights.dtype).min))
attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len, src_len)
# upcast to fp32 if the weights are in fp16. Please see https://github.com/huggingface/transformers/pull/17437
if attn_weights.dtype == torch.float16:
attn_weights = nn.functional.softmax(attn_weights, dim=-1, dtype=torch.float32).to(torch.float16)
else:
attn_weights = nn.functional.softmax(attn_weights, dim=-1)
if layer_head_mask is not None:
if layer_head_mask.size() != (self.num_heads,):
raise ValueError(
f"Head mask for a single layer should be of size {(self.num_heads,)}, but is"
f" {layer_head_mask.size()}"
)
attn_weights = layer_head_mask.view(1, -1, 1, 1) * attn_weights.view(bsz, self.num_heads, tgt_len, src_len)
attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len, src_len)
if output_attentions:
# this operation is a bit awkward, but it's required to
# make sure that attn_weights keeps its gradient.
# In order to do so, attn_weights have to be reshaped
# twice and have to be reused in the following
attn_weights_reshaped = attn_weights.view(bsz, self.num_heads, tgt_len, src_len)
attn_weights = attn_weights_reshaped.view(bsz * self.num_heads, tgt_len, src_len)
else:
attn_weights_reshaped = None
attn_probs = nn.functional.dropout(attn_weights, p=self.dropout, training=self.training)
attn_output = torch.bmm(attn_probs, value_states)
if attn_output.size() != (bsz * self.num_heads, tgt_len, self.head_dim):
raise ValueError(
f"`attn_output` should be of size {(bsz, self.num_heads, tgt_len, self.head_dim)}, but is"
f" {attn_output.size()}"
)
attn_output = attn_output.view(bsz, self.num_heads, tgt_len, self.head_dim)
attn_output = attn_output.transpose(1, 2)
# Use the `embed_dim` from the config (stored in the class) rather than `hidden_state` because `attn_output` can be
# partitioned aross GPUs when using tensor-parallelism.
attn_output = attn_output.reshape(bsz, tgt_len, self.embed_dim)
attn_output = self.out_proj(attn_output)
return attn_output, attn_weights_reshaped, past_key_value
class OPTDecoderLayer(nn.Module):
def __init__(self, config: OPTConfig):
super().__init__()
self.embed_dim = config.hidden_size
self.self_attn = OPTAttention(
embed_dim=self.embed_dim,
num_heads=config.num_attention_heads,
dropout=config.attention_dropout,
is_decoder=True,
)
self.do_layer_norm_before = config.do_layer_norm_before
self.dropout = config.dropout
self.activation_fn = ACT2FN[config.activation_function]
self.self_attn_layer_norm = nn.LayerNorm(self.embed_dim)
self.fc1 = nn.Linear(self.embed_dim, config.ffn_dim)
self.fc2 = nn.Linear(config.ffn_dim, self.embed_dim)
self.final_layer_norm = nn.LayerNorm(self.embed_dim)
def forward(
self,
hidden_states: torch.Tensor,
attention_mask: Optional[torch.Tensor] = None,
layer_head_mask: Optional[torch.Tensor] = None,
output_attentions: Optional[bool] = False,
use_cache: Optional[bool] = False,
past_key_value: Optional[Tuple[torch.Tensor]] = None,
) -> Tuple[torch.FloatTensor, Optional[Tuple[torch.FloatTensor, torch.FloatTensor]]]:
"""
Args:
hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)`
attention_mask (`torch.FloatTensor`, *optional*): attention mask of size
`(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values.
layer_head_mask (`torch.FloatTensor`, *optional*): mask for attention heads in a given layer of size
`(encoder_attention_heads,)`.
output_attentions (`bool`, *optional*):
Whether or not to return the attentions tensors of all attention layers. See `attentions` under
returned tensors for more detail.
use_cache (`bool`, *optional*):
If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding
(see `past_key_values`).
past_key_value (`Tuple(torch.FloatTensor)`, *optional*): cached past key and value projection states
"""
residual = hidden_states
# 125m, 1.7B, ..., 175B applies layer norm BEFORE attention
if self.do_layer_norm_before:
hidden_states = self.self_attn_layer_norm(hidden_states)
# Self Attention
hidden_states, self_attn_weights, present_key_value = self.self_attn(
hidden_states=hidden_states,
past_key_value=past_key_value,
attention_mask=attention_mask,
layer_head_mask=layer_head_mask,
output_attentions=output_attentions,
)
hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)
hidden_states = residual + hidden_states
# 350m applies layer norm AFTER attention
if not self.do_layer_norm_before:
hidden_states = self.self_attn_layer_norm(hidden_states)
# Fully Connected
hidden_states_shape = hidden_states.shape
hidden_states = hidden_states.reshape(-1, hidden_states.size(-1))
residual = hidden_states
# 125m, 1.7B, ..., 175B applies layer norm BEFORE attention
if self.do_layer_norm_before:
hidden_states = self.final_layer_norm(hidden_states)
hidden_states = self.fc1(hidden_states)
hidden_states = self.activation_fn(hidden_states)
hidden_states = self.fc2(hidden_states)
hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)
hidden_states = (residual + hidden_states).view(hidden_states_shape)
# 350m applies layer norm AFTER attention
if not self.do_layer_norm_before:
hidden_states = self.final_layer_norm(hidden_states)
outputs = (hidden_states,)
if output_attentions:
outputs += (self_attn_weights,)
if use_cache:
outputs += (present_key_value,)
return outputs
OPT_START_DOCSTRING = r"""
This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the
library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads
etc.)
This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass.
Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage
and behavior.
Parameters:
config ([`OPTConfig`]):
Model configuration class with all the parameters of the model. Initializing with a config file does not
load the weights associated with the model, only the configuration. Check out the
[`~PreTrainedModel.from_pretrained`] method to load the model weights.
"""
@add_start_docstrings(
"The bare OPT Model outputting raw hidden-states without any specific head on top.",
OPT_START_DOCSTRING,
)
class OPTPreTrainedModel(PreTrainedModel):
config_class = OPTConfig
base_model_prefix = "model"
supports_gradient_checkpointing = True
_no_split_modules = ["OPTDecoderLayer"]
_keys_to_ignore_on_load_unexpected = [r"decoder\.version"]
def _init_weights(self, module):
std = self.config.init_std
if isinstance(module, nn.Linear):
module.weight.data.normal_(mean=0.0, std=std)
if module.bias is not None:
module.bias.data.zero_()
elif isinstance(module, nn.Embedding):
module.weight.data.normal_(mean=0.0, std=std)
if module.padding_idx is not None:
module.weight.data[module.padding_idx].zero_()
def _set_gradient_checkpointing(self, module, value=False):
if isinstance(module, (OPTDecoder)):
module.gradient_checkpointing = value
OPT_INPUTS_DOCSTRING = r"""
Args:
input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide
it.
Indices can be obtained using [`GPT2Tokenizer`]. See [`PreTrainedTokenizer.encode`] and
[`PreTrainedTokenizer.__call__`] for details.
[What are input IDs?](../glossary#input-ids)
attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
[What are attention masks?](../glossary#attention-mask)
Indices can be obtained using [`OPTTokenizer`]. See [`PreTrainedTokenizer.encode`] and
[`PreTrainedTokenizer.__call__`] for details.
If `past_key_values` is used, optionally only the last `decoder_input_ids` have to be input (see
`past_key_values`).
If you want to change padding behavior, you should read [`modeling_opt._prepare_decoder_attention_mask`]
and modify to your needs. See diagram 1 in [the paper](https://arxiv.org/abs/1910.13461) for more
information on the default strategy.
head_mask (`torch.Tensor` of shape `(encoder_layers, encoder_attention_heads)`, *optional*):
Mask to nullify selected heads of the attention modules in the encoder. Mask values selected in `[0, 1]`:
- 1 indicates the head is **not masked**,
- 0 indicates the head is **masked**.
past_key_values (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`):
Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of shape
`(batch_size, num_heads, sequence_length, embed_size_per_head)`) and 2 additional tensors of shape
`(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)`.
Contains pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention
blocks) that can be used (see `past_key_values` input) to speed up sequential decoding.
If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that
don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all
`decoder_input_ids` of shape `(batch_size, sequence_length)`.
inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This
is useful if you want more control over how to convert `input_ids` indices into associated vectors than the
model's internal embedding lookup matrix.
use_cache (`bool`, *optional*):
If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see
`past_key_values`).
output_attentions (`bool`, *optional*):
Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
tensors for more detail.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
"""
class OPTDecoder(OPTPreTrainedModel):
"""
Transformer decoder consisting of *config.num_hidden_layers* layers. Each layer is a [`OPTDecoderLayer`]
Args:
config: OPTConfig
"""
def __init__(self, config: OPTConfig):
super().__init__(config)
self.dropout = config.dropout
self.layerdrop = config.layerdrop
self.padding_idx = config.pad_token_id
self.max_target_positions = config.max_position_embeddings
self.vocab_size = config.vocab_size
self.embed_tokens = nn.Embedding(config.vocab_size, config.word_embed_proj_dim, self.padding_idx)
self.embed_positions = OPTLearnedPositionalEmbedding(config.max_position_embeddings, config.hidden_size)
if config.word_embed_proj_dim != config.hidden_size:
self.project_out = nn.Linear(config.hidden_size, config.word_embed_proj_dim, bias=False)
else:
self.project_out = None
if config.word_embed_proj_dim != config.hidden_size:
self.project_in = nn.Linear(config.word_embed_proj_dim, config.hidden_size, bias=False)
else:
self.project_in = None
# Note that the only purpose of `config._remove_final_layer_norm` is to keep backward compatibility
# with checkpoints that have been fine-tuned before transformers v4.20.1
# see https://github.com/facebookresearch/metaseq/pull/164
if config.do_layer_norm_before and not config._remove_final_layer_norm:
self.final_layer_norm = nn.LayerNorm(config.hidden_size)
else:
self.final_layer_norm = None
self.layers = nn.ModuleList([OPTDecoderLayer(config) for _ in range(config.num_hidden_layers)])
self.gradient_checkpointing = False
# Initialize weights and apply final processing
self.post_init()
def get_input_embeddings(self):
return self.embed_tokens
def set_input_embeddings(self, value):
self.embed_tokens = value
# Copied from transformers.models.bart.modeling_bart.BartDecoder._prepare_decoder_attention_mask
def _prepare_decoder_attention_mask(self, attention_mask, input_shape, inputs_embeds, past_key_values_length):
# create causal mask
# [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len]
combined_attention_mask = None
if input_shape[-1] > 1:
combined_attention_mask = _make_causal_mask(
input_shape, inputs_embeds.dtype, past_key_values_length=past_key_values_length
).to(inputs_embeds.device)
if attention_mask is not None:
# [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len]
expanded_attn_mask = _expand_mask(attention_mask, inputs_embeds.dtype, tgt_len=input_shape[-1]).to(
inputs_embeds.device
)
combined_attention_mask = (
expanded_attn_mask if combined_attention_mask is None else expanded_attn_mask + combined_attention_mask
)
return combined_attention_mask
def forward(
self,
input_ids: torch.LongTensor = None,
attention_mask: Optional[torch.Tensor] = None,
head_mask: Optional[torch.Tensor] = None,
past_key_values: Optional[List[torch.FloatTensor]] = None,
inputs_embeds: Optional[torch.FloatTensor] = None,
use_cache: Optional[bool] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
) -> Union[Tuple, BaseModelOutputWithPast]:
r"""
Args:
input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you
provide it.
Indices can be obtained using [`OPTTokenizer`]. See [`PreTrainedTokenizer.encode`] and
[`PreTrainedTokenizer.__call__`] for details.
[What are input IDs?](../glossary#input-ids)
attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
[What are attention masks?](../glossary#attention-mask)
head_mask (`torch.Tensor` of shape `(num_hidden_layers, num_attention_heads)`, *optional*):
Mask to nullify selected heads of the attention modules. Mask values selected in `[0, 1]`:
- 1 indicates the head is **not masked**,
- 0 indicates the head is **masked**.
past_key_values (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`):
Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of
shape `(batch_size, num_heads, sequence_length, embed_size_per_head)`) and 2 additional tensors of
Contains pre-computed hidden-states (key and values in the self-attention blocks and in the
cross-attention blocks) that can be used (see `past_key_values` input) to speed up sequential decoding.
If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those
that don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of
all `decoder_input_ids` of shape `(batch_size, sequence_length)`.
inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation.
This is useful if you want more control over how to convert `input_ids` indices into associated vectors
than the model's internal embedding lookup matrix.
output_attentions (`bool`, *optional*):
Whether or not to return the attentions tensors of all attention layers. See `attentions` under
returned tensors for more detail.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors
for more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
"""
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
use_cache = use_cache if use_cache is not None else self.config.use_cache
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
# retrieve input_ids and inputs_embeds
if input_ids is not None and inputs_embeds is not None:
raise ValueError("You cannot specify both decoder_input_ids and decoder_inputs_embeds at the same time")
elif input_ids is not None:
input_shape = input_ids.size()
input_ids = input_ids.view(-1, input_shape[-1])
elif inputs_embeds is not None:
input_shape = inputs_embeds.size()[:-1]
else:
raise ValueError("You have to specify either decoder_input_ids or decoder_inputs_embeds")
past_key_values_length = past_key_values[0][0].shape[2] if past_key_values is not None else 0
if inputs_embeds is None:
inputs_embeds = self.embed_tokens(input_ids)
# embed positions
if attention_mask is None:
attention_mask = torch.ones(inputs_embeds.shape[:2], dtype=torch.bool, device=inputs_embeds.device)
pos_embeds = self.embed_positions(attention_mask, past_key_values_length)
attention_mask = self._prepare_decoder_attention_mask(
attention_mask, input_shape, inputs_embeds, past_key_values_length
)
if self.project_in is not None:
inputs_embeds = self.project_in(inputs_embeds)
hidden_states = inputs_embeds + pos_embeds
# decoder layers
all_hidden_states = () if output_hidden_states else None
all_self_attns = () if output_attentions else None
next_decoder_cache = () if use_cache else None
# check if head_mask has a correct number of layers specified if desired
for attn_mask, mask_name in zip([head_mask], ["head_mask"]):
if attn_mask is not None:
if attn_mask.size()[0] != (len(self.layers)):
raise ValueError(
f"The `{mask_name}` should be specified for {len(self.layers)} layers, but it is for"
f" {head_mask.size()[0]}."
)
for idx, decoder_layer in enumerate(self.layers):
# add LayerDrop (see https://arxiv.org/abs/1909.11556 for description)
if output_hidden_states:
all_hidden_states += (hidden_states,)
dropout_probability = random.uniform(0, 1)
if self.training and (dropout_probability < self.layerdrop):
continue
past_key_value = past_key_values[idx] if past_key_values is not None else None
if self.gradient_checkpointing and self.training:
if use_cache:
logger.warning(
"`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`..."
)
use_cache = False
def create_custom_forward(module):
def custom_forward(*inputs):
# None for past_key_value
return module(*inputs, output_attentions, None)
return custom_forward
layer_outputs = torch.utils.checkpoint.checkpoint(
create_custom_forward(decoder_layer),
hidden_states,
attention_mask,
head_mask[idx] if head_mask is not None else None,
None,
)
else:
layer_outputs = decoder_layer(
hidden_states,
attention_mask=attention_mask,
layer_head_mask=(head_mask[idx] if head_mask is not None else None),
past_key_value=past_key_value,
output_attentions=output_attentions,
use_cache=use_cache,
)
hidden_states = layer_outputs[0]
if use_cache:
next_decoder_cache += (layer_outputs[2 if output_attentions else 1],)
if output_attentions:
all_self_attns += (layer_outputs[1],)
if self.final_layer_norm is not None:
hidden_states = self.final_layer_norm(hidden_states)
if self.project_out is not None:
hidden_states = self.project_out(hidden_states)
# add hidden states from the last decoder layer
if output_hidden_states:
all_hidden_states += (hidden_states,)
next_cache = next_decoder_cache if use_cache else None
if not return_dict:
return tuple(v for v in [hidden_states, next_cache, all_hidden_states, all_self_attns] if v is not None)
return BaseModelOutputWithPast(
last_hidden_state=hidden_states,
past_key_values=next_cache,
hidden_states=all_hidden_states,
attentions=all_self_attns,
)
@add_start_docstrings(
"The bare OPT Model outputting raw hidden-states without any specific head on top.",
OPT_START_DOCSTRING,
)
class OPTModel(OPTPreTrainedModel):
def __init__(self, config: OPTConfig):
super().__init__(config)
self.decoder = OPTDecoder(config)
# Initialize weights and apply final processing
self.post_init()
def get_input_embeddings(self):
return self.decoder.embed_tokens
def set_input_embeddings(self, value):
self.decoder.embed_tokens = value
def get_decoder(self):
return self.decoder
@add_start_docstrings_to_model_forward(OPT_INPUTS_DOCSTRING)
@add_code_sample_docstrings(
processor_class=_TOKENIZER_FOR_DOC,
checkpoint=_CHECKPOINT_FOR_DOC,
output_type=BaseModelOutputWithPast,
config_class=_CONFIG_FOR_DOC,
expected_output=_EXPECTED_OUTPUT_SHAPE,
)
def forward(
self,
input_ids: torch.LongTensor = None,
attention_mask: Optional[torch.Tensor] = None,
head_mask: Optional[torch.Tensor] = None,
past_key_values: Optional[List[torch.FloatTensor]] = None,
inputs_embeds: Optional[torch.FloatTensor] = None,
use_cache: Optional[bool] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
) -> Union[Tuple, BaseModelOutputWithPast]:
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
use_cache = use_cache if use_cache is not None else self.config.use_cache
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
# decoder outputs consists of (dec_features, past_key_value, dec_hidden, dec_attn)
decoder_outputs = self.decoder(
input_ids=input_ids,
attention_mask=attention_mask,
head_mask=head_mask,
past_key_values=past_key_values,
inputs_embeds=inputs_embeds,
use_cache=use_cache,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
if not return_dict:
return decoder_outputs
return BaseModelOutputWithPast(
last_hidden_state=decoder_outputs.last_hidden_state,
past_key_values=decoder_outputs.past_key_values,
hidden_states=decoder_outputs.hidden_states,
attentions=decoder_outputs.attentions,
)
class OPTForCausalLM(OPTPreTrainedModel):
_keys_to_ignore_on_load_missing = [r"lm_head.weight"]
def __init__(self, config):
super().__init__(config)
self.model = OPTModel(config)
# the lm_head weight is automatically tied to the embed tokens weight
self.lm_head = nn.Linear(config.word_embed_proj_dim, config.vocab_size, bias=False)
# Initialize weights and apply final processing
self.post_init()
def get_input_embeddings(self):
return self.model.decoder.embed_tokens
def set_input_embeddings(self, value):
self.model.decoder.embed_tokens = value
def get_output_embeddings(self):
return self.lm_head
def set_output_embeddings(self, new_embeddings):
self.lm_head = new_embeddings
def set_decoder(self, decoder):
self.model.decoder = decoder
def get_decoder(self):
return self.model.decoder
@replace_return_docstrings(output_type=CausalLMOutputWithPast, config_class=_CONFIG_FOR_DOC)
def forward(
self,
input_ids: torch.LongTensor = None,
attention_mask: Optional[torch.Tensor] = None,
head_mask: Optional[torch.Tensor] = None,
past_key_values: Optional[List[torch.FloatTensor]] = None,
inputs_embeds: Optional[torch.FloatTensor] = None,
labels: Optional[torch.LongTensor] = None,
use_cache: Optional[bool] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
) -> Union[Tuple, CausalLMOutputWithPast]:
r"""
Args:
input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you
provide it.
Indices can be obtained using [`OPTTokenizer`]. See [`PreTrainedTokenizer.encode`] and
[`PreTrainedTokenizer.__call__`] for details.
[What are input IDs?](../glossary#input-ids)
attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
[What are attention masks?](../glossary#attention-mask)
head_mask (`torch.Tensor` of shape `(num_hidden_layers, num_attention_heads)`, *optional*):
Mask to nullify selected heads of the attention modules. Mask values selected in `[0, 1]`:
- 1 indicates the head is **not masked**,
- 0 indicates the head is **masked**.
past_key_values (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`):
Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of
shape `(batch_size, num_heads, sequence_length, embed_size_per_head)`) and 2 additional tensors of
shape `(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)`. The two additional
tensors are only required when the model is used as a decoder in a Sequence to Sequence model.
Contains pre-computed hidden-states (key and values in the self-attention blocks and in the
cross-attention blocks) that can be used (see `past_key_values` input) to speed up sequential decoding.
If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those
that don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of
all `decoder_input_ids` of shape `(batch_size, sequence_length)`.
inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation.
This is useful if you want more control over how to convert `input_ids` indices into associated vectors
than the model's internal embedding lookup matrix.
labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
Labels for computing the masked language modeling loss. Indices should either be in `[0, ...,
config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored
(masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`.
use_cache (`bool`, *optional*):
If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding
(see `past_key_values`).
output_attentions (`bool`, *optional*):
Whether or not to return the attentions tensors of all attention layers. See `attentions` under
returned tensors for more detail.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors
for more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
Returns:
Example:
```python
>>> from transformers import GPT2Tokenizer, OPTForCausalLM
>>> model = OPTForCausalLM.from_pretrained("facebook/opt-350m")
>>> tokenizer = GPT2Tokenizer.from_pretrained("facebook/opt-350m")
>>> prompt = "Hey, are you consciours? Can you talk to me?"
>>> inputs = tokenizer(prompt, return_tensors="pt")
>>> # Generate
>>> generate_ids = model.generate(inputs.input_ids, max_length=30)
>>> tokenizer.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0]
"Hey, are you consciours? Can you talk to me?\nI'm not consciours, but I can talk to you."
```"""
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
# decoder outputs consists of (dec_features, layer_state, dec_hidden, dec_attn)
outputs = self.model.decoder(
input_ids=input_ids,
attention_mask=attention_mask,
head_mask=head_mask,
past_key_values=past_key_values,
inputs_embeds=inputs_embeds,
use_cache=use_cache,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
logits = self.lm_head(outputs[0]).contiguous()
loss = None
if labels is not None:
# Shift so that tokens < n predict n
shift_logits = logits[..., :-1, :].contiguous()
shift_labels = labels[..., 1:].contiguous()
# Flatten the tokens
loss_fct = CrossEntropyLoss()
loss = loss_fct(shift_logits.view(-1, self.config.vocab_size), shift_labels.view(-1))
if not return_dict:
output = (logits,) + outputs[1:]
return (loss,) + output if loss is not None else output
return CausalLMOutputWithPast(
loss=loss,
logits=logits,
past_key_values=outputs.past_key_values,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
def prepare_inputs_for_generation(self, input_ids, past=None, attention_mask=None, use_cache=None, **kwargs):
# if model is used as a decoder in encoder-decoder model, the decoder attention mask is created on the fly
if attention_mask is None:
attention_mask = input_ids.new_ones(input_ids.shape)
if past:
input_ids = input_ids[:, -1:]
# first step, decoder_cached_states are empty
return {
"input_ids": input_ids, # encoder_outputs is defined. input_ids not needed
"attention_mask": attention_mask,
"past_key_values": past,
"use_cache": use_cache,
}
@staticmethod
def _reorder_cache(past, beam_idx):
reordered_past = ()
for layer_past in past:
reordered_past += (tuple(past_state.index_select(0, beam_idx) for past_state in layer_past),)
return reordered_past
@add_start_docstrings(
"""
The OPT Model transformer with a sequence classification head on top (linear layer).
[`OPTForSequenceClassification`] uses the last token in order to do the classification, as other causal models
(e.g. GPT-2) do.
Since it does classification on the last token, it requires to know the position of the last token. If a
`pad_token_id` is defined in the configuration, it finds the last token that is not a padding token in each row. If
no `pad_token_id` is defined, it simply takes the last value in each row of the batch. Since it cannot guess the
padding tokens when `inputs_embeds` are passed instead of `input_ids`, it does the same (take the last value in
each row of the batch).
""",
OPT_START_DOCSTRING,
)
class OPTForSequenceClassification(OPTPreTrainedModel):
_keys_to_ignore_on_load_missing = [r"lm_head.weight"]
def __init__(self, config: OPTConfig):
super().__init__(config)
self.num_labels = config.num_labels
self.model = OPTModel(config)
self.score = nn.Linear(config.word_embed_proj_dim, self.num_labels, bias=False)
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(OPT_INPUTS_DOCSTRING)
@add_code_sample_docstrings(
processor_class=_TOKENIZER_FOR_DOC,
checkpoint=_CHECKPOINT_FOR_SEQUENCE_CLASSIFICATION,
output_type=SequenceClassifierOutputWithPast,
config_class=_CONFIG_FOR_DOC,
expected_output=_SEQ_CLASS_EXPECTED_OUTPUT,
expected_loss=_SEQ_CLASS_EXPECTED_LOSS,
)
def forward(
self,
input_ids: Optional[torch.LongTensor] = None,
attention_mask: Optional[torch.FloatTensor] = None,
head_mask: Optional[torch.FloatTensor] = None,
past_key_values: Optional[Tuple[Tuple[torch.Tensor]]] = None,
inputs_embeds: Optional[torch.FloatTensor] = None,
labels: Optional[torch.LongTensor] = None,
use_cache: Optional[bool] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
) -> Union[Tuple, SequenceClassifierOutputWithPast]:
r"""
labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
Labels for computing the sequence classification/regression loss. Indices should be in `[0, ...,
config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
`config.num_labels > 1` a classification loss is computed (Cross-Entropy).
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
transformer_outputs = self.model(
input_ids,
past_key_values=past_key_values,
attention_mask=attention_mask,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
use_cache=use_cache,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
hidden_states = transformer_outputs[0]
logits = self.score(hidden_states)
if input_ids is not None:
batch_size, sequence_length = input_ids.shape[:2]
else:
batch_size, sequence_length = inputs_embeds.shape[:2]
if self.config.pad_token_id is None:
sequence_lengths = -1
else:
if input_ids is not None:
sequence_lengths = torch.ne(input_ids, self.config.pad_token_id).sum(-1) - 1
else:
sequence_lengths = -1
logger.warning(
f"{self.__class__.__name__} will not detect padding tokens in `inputs_embeds`. Results may be "
"unexpected if using padding tokens in conjunction with `inputs_embeds.`"
)
pooled_logits = logits[torch.arange(batch_size, device=logits.device), sequence_lengths]
loss = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
self.config.problem_type = "regression"
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
self.config.problem_type = "single_label_classification"
else:
self.config.problem_type = "multi_label_classification"
if self.config.problem_type == "regression":
loss_fct = MSELoss()
if self.num_labels == 1:
loss = loss_fct(pooled_logits.squeeze(), labels.squeeze())
else:
loss = loss_fct(pooled_logits, labels)
elif self.config.problem_type == "single_label_classification":
loss_fct = CrossEntropyLoss()
loss = loss_fct(pooled_logits.view(-1, self.num_labels), labels.view(-1))
elif self.config.problem_type == "multi_label_classification":
loss_fct = BCEWithLogitsLoss()
loss = loss_fct(pooled_logits, labels)
if not return_dict:
output = (pooled_logits,) + transformer_outputs[1:]
return ((loss,) + output) if loss is not None else output
return SequenceClassifierOutputWithPast(
loss=loss,
logits=pooled_logits,
past_key_values=transformer_outputs.past_key_values,
hidden_states=transformer_outputs.hidden_states,
attentions=transformer_outputs.attentions,
)
def get_input_embeddings(self):
return self.model.decoder.embed_tokens
def set_input_embeddings(self, value):
self.model.decoder.embed_tokens = value
@add_start_docstrings(
"""
The OPT Model transformer with a span classification head on top for extractive question-answering tasks like SQuAD
(a linear layers on top of the hidden-states output to compute `span start logits` and `span end logits`).
""",
OPT_START_DOCSTRING,
)
class OPTForQuestionAnswering(OPTPreTrainedModel):
_keys_to_ignore_on_load_missing = [r"lm_head.weight"]
def __init__(self, config: OPTConfig):
super().__init__(config)
self.model = OPTModel(config)
self.qa_outputs = nn.Linear(config.word_embed_proj_dim, 2)
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(OPT_INPUTS_DOCSTRING)
@replace_return_docstrings(output_type=QuestionAnsweringModelOutput, config_class=_CONFIG_FOR_DOC)
def forward(
self,
input_ids: Optional[torch.LongTensor] = None,
attention_mask: Optional[torch.FloatTensor] = None,
head_mask: Optional[torch.FloatTensor] = None,
past_key_values: Optional[Tuple[Tuple[torch.Tensor]]] = None,
inputs_embeds: Optional[torch.FloatTensor] = None,
start_positions: Optional[torch.LongTensor] = None,
end_positions: Optional[torch.LongTensor] = None,
use_cache: Optional[bool] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
) -> Union[Tuple, QuestionAnsweringModelOutput]:
r"""
start_positions (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
Labels for position (index) of the start of the labelled span for computing the token classification loss.
Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence
are not taken into account for computing the loss.
end_positions (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
Labels for position (index) of the end of the labelled span for computing the token classification loss.
Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence
are not taken into account for computing the loss.
Returns:
Example:
```python
>>> from transformers import GPT2Tokenizer, OPTForQuestionAnswering
>>> import torch
>>> torch.manual_seed(4) # doctest: +IGNORE_RESULT
>>> tokenizer = GPT2Tokenizer.from_pretrained("facebook/opt-350m")
>>> # note: we are loading a OPTForQuestionAnswering from the hub here,
>>> # so the head will be randomly initialized, hence the predictions will be random
>>> model = OPTForQuestionAnswering.from_pretrained("facebook/opt-350m")
>>> question, text = "Who was Jim Henson?", "Jim Henson was a nice puppet"
>>> inputs = tokenizer(question, text, return_tensors="pt")
>>> with torch.no_grad():
... outputs = model(**inputs)
>>> answer_start_index = outputs.start_logits.argmax()
>>> answer_end_index = outputs.end_logits.argmax()
>>> predict_answer_tokens = inputs.input_ids[0, answer_start_index : answer_end_index + 1]
>>> predicted = tokenizer.decode(predict_answer_tokens)
>>> predicted
' Henson?'
```"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
transformer_outputs = self.model(
input_ids,
past_key_values=past_key_values,
attention_mask=attention_mask,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
use_cache=use_cache,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
hidden_states = transformer_outputs[0]
logits = self.qa_outputs(hidden_states)
start_logits, end_logits = logits.split(1, dim=-1)
start_logits = start_logits.squeeze(-1).contiguous()
end_logits = end_logits.squeeze(-1).contiguous()
total_loss = None
if start_positions is not None and end_positions is not None:
# If we are on multi-GPU, split add a dimension
if len(start_positions.size()) > 1:
start_positions = start_positions.squeeze(-1)
if len(end_positions.size()) > 1:
end_positions = end_positions.squeeze(-1)
# sometimes the start/end positions are outside our model inputs, we ignore these terms
ignored_index = start_logits.size(1)
start_positions = start_positions.clamp(0, ignored_index)
end_positions = end_positions.clamp(0, ignored_index)
loss_fct = CrossEntropyLoss(ignore_index=ignored_index)
start_loss = loss_fct(start_logits, start_positions)
end_loss = loss_fct(end_logits, end_positions)
total_loss = (start_loss + end_loss) / 2
if not return_dict:
output = (start_logits, end_logits) + transformer_outputs[2:]
return ((total_loss,) + output) if total_loss is not None else output
return QuestionAnsweringModelOutput(
loss=total_loss,
start_logits=start_logits,
end_logits=end_logits,
hidden_states=transformer_outputs.hidden_states,
attentions=transformer_outputs.attentions,
)
def get_input_embeddings(self):
return self.model.decoder.embed_tokens
def set_input_embeddings(self, value):
self.model.decoder.embed_tokens = value
| [
"[email protected]"
] | |
c91d47c4a234399c275744c668f60c5ac3ac7dcc | 4ea6a1eb0c55f4d974ec4a0d2d3bb3228c48b62a | /django/apps/photo/migrations/0025_unique_together_story_image.py | 6bab330e3add6d836278674da1be5b4fa5f81218 | [
"Apache-2.0"
] | permissive | universitas/universitas.no | 16993d2fb65f21eff4a0cfd72540278276b24531 | 911a2541c77eca522ba5a723f175786f4f9eb481 | refs/heads/master | 2023-04-28T14:51:56.849564 | 2021-09-21T18:49:36 | 2021-09-21T18:52:17 | 19,112,283 | 19 | 6 | Apache-2.0 | 2023-04-15T19:12:19 | 2014-04-24T14:50:36 | Python | UTF-8 | Python | false | false | 1,418 | py | from collections import Counter
from django.db import migrations
import sorl.thumbnail.fields
import apps.photo.models
from utils.merge_model_objects import merge_instances
def dedupe_storyimages(apps, schema_editor):
"""merge storyimages with same parent_story/imagefile."""
StoryImage = apps.get_model("stories", "StoryImage")
pairs = StoryImage.objects.values_list('imagefile_id', 'parent_story_id')
dupes = (key for key, val in Counter(pairs).items() if val > 1)
for imagefile, parent_story in dupes:
story_images = StoryImage.objects.filter(
imagefile=imagefile,
parent_story=parent_story,
).order_by('-top', 'index')
merge_instances(*story_images)
class Migration(migrations.Migration):
dependencies = [
('photo', '0024_auto_20180421_1957'),
]
operations = [
migrations.RunPython(
code=dedupe_storyimages,
reverse_code=migrations.RunPython.noop,
),
migrations.AlterField(
model_name='imagefile',
name='original',
field=sorl.thumbnail.fields.ImageField(
height_field='full_height',
max_length=1024,
null=True,
upload_to=apps.photo.models.upload_image_to,
verbose_name='original',
width_field='full_width'
),
),
]
| [
"[email protected]"
] | |
d5a32dd2120e713308cbab8ae1ce4c1061696c20 | 5f14603614bf9357b03c147af3423bb500f15ad8 | /fe2/assettag/send_mail.py | ec40e165f715420971f0ada09c8d5d00bb7cedba | [] | no_license | damnedsteven/emcn | 76aa5449db00a0cb1dd8487c1bf19b4d4ed52014 | 89cdeb9d200f699772a0473fe9fd9b030d78cbc7 | refs/heads/master | 2021-01-23T05:24:03.401715 | 2018-05-03T09:12:52 | 2018-05-03T09:12:52 | 86,296,572 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,618 | py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
from email import encoders
from email.header import Header
from email.mime.text import MIMEText
from email.utils import parseaddr, formataddr
from email.mime.multipart import MIMEMultipart, MIMEBase
import smtplib
from datetime import datetime, timedelta
import MySQLdb
import os
my_path = os.path.dirname(os.path.abspath(__file__))
import sys
reload(sys)
sys.setdefaultencoding('utf8')
def _format_addr(s):
name, addr = parseaddr(s)
return formataddr((Header(name, 'utf-8').encode(), addr))
def query_mysql(query):
# Get data from 200 DB
conn = MySQLdb.connect("16.187.230.200", "yi", "asdfqwer", "shortage", charset = 'utf8')
cursor = conn.cursor()
cursor.execute(query)
#get header and rows
header = [i[0] for i in cursor.description]
rows = [list(i) for i in cursor.fetchall()]
#append header to rows
rows.insert(0,header)
cursor.close()
conn.close()
return rows
#take list of lists as argument
def nlist_to_html(list2d):
#bold header
htable=u'<table border="1" bordercolor=000000 cellspacing="0" cellpadding="1" style="table-layout:fixed;vertical-align:bottom;font-size:13px;font-family:verdana,sans,sans-serif;border-collapse:collapse;border:1px solid rgb(130,130,130)" >'
list2d[0] = [u'<b>' + i + u'</b>' for i in list2d[0]]
#
for row in list2d:
newrow = u'<tr>'
newrow += u'<td align="left" style="padding:1px 4px">'+unicode(row[0])+u'</td>'
row.remove(row[0])
newrow = newrow + ''.join([u'<td align="right" style="padding:1px 4px">' + unicode(x or "") + u'</td>' for x in row])
newrow += '</tr>'
htable+= newrow
htable += '</table>'
return htable
def sql_html(query):
return nlist_to_html(query_mysql(query))
now = datetime.now()
earlier = now - timedelta(hours=12)
# from_date = earlier.strftime('%y') + '/' + earlier.strftime('%m') + '/' + earlier.strftime('%d') + '-' + earlier.strftime('%H')
to_date = now.strftime('%y') + '/' + now.strftime('%m') + '/' + now.strftime('%d') + '-' + now.strftime('%H')
from_addr = '[email protected]'
to_addr = ['[email protected]']
cc_addr = ['[email protected]']
bcc_addr = ['[email protected]']
# to_addr = ['[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]']
smtp_server = 'smtp3.hpe.com'
query = """
SELECT
is_copy `Copy#`,
pn `Part No.`,
ctrl_id `Ctrl ID`,
buyer_name `Buyer`,
shortage_qty `TTL-S`,
pline_shortage_qty `S-RAW`,
passthru_shortage_qty `S-OPT`,
earliest_bkpl `Earliest BKPL Time`,
arrival_qty `Supp.Q`,
eta `ETA`,
CASE
WHEN slot = '0' THEN 'morning'
WHEN slot = '1' THEN 'afternoon'
WHEN slot = '2' THEN 'night'
END `Slot`,
remark `Remark`,
carrier.name `Carrier`,
judge_supply `Judge Supply?`,
shortage_reason.name `Shortage Reason (Category)`,
shortage_reason_detail `Shortage Reason (Comments)`,
bill_number `HAWB`,
date_format(lastupdated, "%b %d %Y %h:%i %p") `Updated`
FROM
pn
LEFT JOIN
carrier
ON pn.id_carrier=carrier.id
LEFT JOIN
shortage_reason
ON pn.id_shortage_reason=shortage_reason.id
WHERE (status=1 OR is_copy = -1) AND received IS NULL
ORDER BY pn
"""
text = """\
<html>
<head></head>
<body>
<p>Hi all,<br><br>
Here is the latest material shortage status, pls check and fill in the ETA schedule asap. Pls let <a href="mailto:[email protected]">SJ, Taojun (EMCN Warehouse)</a> know if there is any wrong information. Thanks for your attention!<br>
<br>请登录网页版缺料显示系统: <a href="http://16.187.228.117/shortage/buyer/">网址</a>
</p>
<br>
</body>
</html>
"""
table = sql_html(query)
text2 = """\
<html>
<head></head>
<body>
<p><br>Thanks & Best Regs.<br>
cpmo ESSN warehouse system<br>
Tel: 862120510334
</p>
<br>
</body>
</html>
"""
msg = MIMEMultipart()
# 邮件正文是MIMEText:
msg.attach(MIMEText(text+table+text2, 'html', 'utf-8'))
msg['From'] = _format_addr('Shortage Alert <%s>' % from_addr)
# msg['To'] = _format_addr('recipient <%s>' % ",".join(to_addr))
msg['To'] = ", ".join(to_addr)
# msg['CC'] = _format_addr('admin <%s>' % ",".join(cc_addr))
msg['CC'] = ", ".join(cc_addr)
msg['BCC'] = ", ".join(bcc_addr)
msg['Subject'] = Header('for Buyer - ESSN material shortage (%s)' % (to_date), 'utf-8').encode()
to_addrs = to_addr + cc_addr + bcc_addr
server = smtplib.SMTP(smtp_server, 25)
server.set_debuglevel(1)
#server.login(from_addr, password)
server.sendmail(from_addr, to_addrs, msg.as_string())
server.quit()
| [
"[email protected]"
] | |
57678fa594c866acbcce6e886f60a4c7d20dca47 | f4b07a7596b17ec8651223c402cb91cb56087b2d | /evennia/utils/evform.py | 9e4453efdd0cec0978e93c24cbd52c96d9395e9d | [
"BSD-3-Clause"
] | permissive | tajmone/evennia | 092e146802af6e4f5567cddd51ce55496e158262 | 22f220f58dc30571a55d88b8e33583c043ab6827 | refs/heads/master | 2021-01-21T01:02:28.507130 | 2015-03-21T06:53:38 | 2015-03-21T06:53:38 | 30,980,401 | 1 | 0 | null | 2015-03-19T01:48:09 | 2015-02-18T18:43:46 | Python | UTF-8 | Python | false | false | 17,452 | py | # coding=utf-8
"""
EvForm - a way to create advanced ASCII forms
This is intended for creating advanced ASCII game forms, such as a
large pretty character sheet or info document.
The system works on the basis of a readin template that is given in a
separate Python file imported into the handler. This file contains
some optional settings and a string mapping out the form. The template
has markers in it to denounce fields to fill. The markers map the
absolute size of the field and will be filled with an `evtable.EvCell`
object when displaying the form.
Note, when printing examples with ANSI color, you need to wrap
the output in `unicode()`, such as `print unicode(form)`. This is
due to a bug in the Python parser and the `print` statement.
Example of input file `testform.py`:
```python
FORMCHAR = "x"
TABLECHAR = "c"
FORM = '''
.------------------------------------------------.
| |
| Name: xxxxx1xxxxx Player: xxxxxxx2xxxxxxx |
| xxxxxxxxxxx |
| |
>----------------------------------------------<
| |
| Desc: xxxxxxxxxxx STR: x4x DEX: x5x |
| xxxxx3xxxxx INT: x6x STA: x7x |
| xxxxxxxxxxx LUC: x8x MAG: x9x |
| |
>----------------------------------------------<
| | |
| cccccccc | ccccccccccccccccccccccccccccccccccc |
| cccccccc | ccccccccccccccccccccccccccccccccccc |
| cccAcccc | ccccccccccccccccccccccccccccccccccc |
| cccccccc | ccccccccccccccccccccccccccccccccccc |
| cccccccc | cccccccccccccccccBccccccccccccccccc |
| | |
-------------------------------------------------
'''
```
The first line of the `FORM` string is ignored. The forms and table
markers must mark out complete, unbroken rectangles, each containing
one embedded single-character identifier (so the smallest element
possible is a 3-character wide form). The identifier can be any
character except for the `FORM_CHAR` and `TABLE_CHAR` and some of the
common ASCII-art elements, like space, `_` `|` `*` etc (see
`INVALID_FORMCHARS` in this module). Form Rectangles can have any size,
but must be separated from each other by at least one other
character's width.
Use as follows:
```python
import evform
# create a new form from the template
form = evform.EvForm("path/to/testform.py")
(MudForm can also take a dictionary holding
the required keys FORMCHAR, TABLECHAR and FORM)
# add data to each tagged form cell
form.map(cells={1: "Tom the Bouncer",
2: "Griatch",
3: "A sturdy fellow",
4: 12,
5: 10,
6: 5,
7: 18,
8: 10,
9: 3})
# create the EvTables
tableA = evform.EvTable("HP","MV","MP",
table=[["**"], ["*****"], ["***"]],
border="incols")
tableB = evform.EvTable("Skill", "Value", "Exp",
table=[["Shooting", "Herbalism", "Smithing"],
[12,14,9],["550/1200", "990/1400", "205/900"]],
border="incols")
# add the tables to the proper ids in the form
form.map(tables={"A": tableA,
"B": tableB}
# unicode is required since the example contains non-ascii characters
print unicode(form)
```
This produces the following result:
```
.------------------------------------------------.
| |
| Name: Tom the Player: Griatch |
| Bouncer |
| |
>----------------------------------------------<
| |
| Desc: A sturdy STR: 12 DEX: 10 |
| fellow INT: 5 STA: 18 |
| LUC: 10 MAG: 3 |
| |
>----------------------------------------------<
| | |
| HP|MV|MP | Skill |Value |Exp |
| ~~+~~+~~ | ~~~~~~~~~~~+~~~~~~~~~~~+~~~~~~~~~~~ |
| **|**|** | Shooting |12 |550/1200 |
| |**|* | Herbalism |14 |990/1400 |
| |* | | Smithing |9 |205/900 |
| | |
------------------------------------------------
```
The marked forms have been replaced with EvCells of text and with
EvTables. The form can be updated by simply re-applying `form.map()`
with the updated data.
When working with the template ASCII file, you can use `form.reload()`
to re-read the template and re-apply all existing mappings.
Each component is restrained to the width and height specified by the
template, so it will resize to fit (or crop text if the area is too
small for it). If you try to fit a table into an area it cannot fit
into (when including its borders and at least one line of text), the
form will raise an error.
"""
import re
import copy
from evennia.utils.evtable import EvCell, EvTable
from evennia.utils.utils import all_from_module, to_str, to_unicode
from evennia.utils.ansi import ANSIString
# non-valid form-identifying characters (which can thus be
# used as separators between forms without being detected
# as an identifier). These should be listed in regex form.
INVALID_FORMCHARS = r"\s\/\|\\\*\_\-\#\<\>\~\^\:\;\.\,"
def _to_ansi(obj, regexable=False):
"convert to ANSIString"
if isinstance(obj, dict):
return dict((key, _to_ansi(value, regexable=regexable)) for key, value in obj.items())
elif hasattr(obj, "__iter__"):
return [_to_ansi(o) for o in obj]
else:
return ANSIString(to_unicode(obj), regexable=regexable)
class EvForm(object):
"""
This object is instantiated with a text file and parses
it for rectangular form fields. It can then be fed a
mapping so as to populate the fields with fixed-width
EvCell or Tables.
"""
def __init__(self, filename=None, cells=None, tables=None, form=None, **kwargs):
"""
Initiate the form
keywords:
filename - path to template file
form - dictionary of {"CELLCHAR":char,
"TABLECHAR":char,
"FORM":templatestring}
if this is given, filename is not read.
cells - a dictionary mapping of {id:text}
tables - dictionary mapping of {id:EvTable}
other kwargs are fed as options to the EvCells and EvTables
(see `evtable.EvCell` and `evtable.EvTable` for more info).
"""
self.filename = filename
self.input_form_dict = form
self.cells_mapping = dict((to_str(key, force_string=True), value) for key, value in cells.items()) if cells else {}
self.tables_mapping = dict((to_str(key, force_string=True), value) for key, value in tables.items()) if tables else {}
self.cellchar = "x"
self.tablechar = "c"
self.raw_form = []
self.form = []
# clean kwargs (these cannot be overridden)
kwargs.pop("enforce_size", None)
kwargs.pop("width", None)
kwargs.pop("height", None)
# table/cell options
self.options = kwargs
self.reload()
def _parse_rectangles(self, cellchar, tablechar, form, **kwargs):
"""
Parse a form for rectangular formfields identified by
formchar enclosing an identifier.
"""
# update options given at creation with new input - this
# allows e.g. self.map() to add custom settings for individual
# cells/tables
custom_options = copy.copy(self.options)
custom_options.update(kwargs)
nform = len(form)
mapping = {}
cell_coords = {}
table_coords = {}
# Locate the identifier tags and the horizontal end coords for all forms
re_cellchar = re.compile(r"%s+([^%s%s])%s+" % (cellchar, INVALID_FORMCHARS, cellchar, cellchar))
re_tablechar = re.compile(r"%s+([^%s%s|])%s+" % (tablechar, INVALID_FORMCHARS, tablechar, tablechar))
for iy, line in enumerate(_to_ansi(form, regexable=True)):
# find cells
ix0 = 0
while True:
match = re_cellchar.search(line, ix0)
if match:
# get the width of the rectangle directly from the match
cell_coords[match.group(1)] = [iy, match.start(), match.end()]
ix0 = match.end()
else:
break
# find tables
ix0 = 0
while True:
match = re_tablechar.search(line, ix0)
if match:
# get the width of the rectangle directly from the match
table_coords[match.group(1)] = [iy, match.start(), match.end()]
ix0 = match.end()
else:
break
#print "cell_coords:", cell_coords
#print "table_coords:", table_coords
# get rectangles and assign EvCells
for key, (iy, leftix, rightix) in cell_coords.items():
# scan up to find top of rectangle
dy_up = 0
if iy > 0:
for i in range(1,iy):
#print "dy_up:", [form[iy-i][ix] for ix in range(leftix, rightix)]
if all(form[iy-i][ix] == cellchar for ix in range(leftix, rightix)):
dy_up += 1
else:
break
# find bottom edge of rectangle
dy_down = 0
if iy < nform-1:
for i in range(1,nform-iy-1):
#print "dy_down:", [form[iy+i][ix]for ix in range(leftix, rightix)]
if all(form[iy+i][ix] == cellchar for ix in range(leftix, rightix)):
dy_down += 1
else:
break
# we have our rectangle. Calculate size of EvCell.
iyup = iy - dy_up
iydown = iy + dy_down
width = rightix - leftix
height = abs(iyup - iydown) + 1
# we have all the coordinates we need. Create EvCell.
data = self.cells_mapping.get(key, "")
#if key == "1":
# print "creating cell '%s' (%s):" % (key, data)
# print "iy=%s, iyup=%s, iydown=%s, leftix=%s, rightix=%s, width=%s, height=%s" % (iy, iyup, iydown, leftix, rightix, width, height)
options = { "pad_left":0, "pad_right":0, "pad_top":0, "pad_bottom":0, "align":"l", "valign":"t", "enforce_size":True}
options.update(custom_options)
#if key=="4":
#print "options:", options
mapping[key] = (iyup, leftix, width, height, EvCell(data, width=width, height=height,**options))
# get rectangles and assign Tables
for key, (iy, leftix, rightix) in table_coords.items():
# scan up to find top of rectangle
dy_up = 0
if iy > 0:
for i in range(1,iy):
#print "dy_up:", [form[iy-i][ix] for ix in range(leftix, rightix)]
if all(form[iy-i][ix] == tablechar for ix in range(leftix, rightix)):
dy_up += 1
else:
break
# find bottom edge of rectangle
dy_down = 0
if iy < nform-1:
for i in range(1,nform-iy-1):
#print "dy_down:", [form[iy+i][ix]for ix in range(leftix, rightix)]
if all(form[iy+i][ix] == tablechar for ix in range(leftix, rightix)):
dy_down += 1
else:
break
# we have our rectangle. Calculate size of Table.
iyup = iy - dy_up
iydown = iy + dy_down
width = rightix - leftix
height = abs(iyup - iydown) + 1
# we have all the coordinates we need. Create Table.
table = self.tables_mapping.get(key, None)
#print "creating table '%s' (%s):" % (key, data)
#print "iy=%s, iyup=%s, iydown=%s, leftix=%s, rightix=%s, width=%s, height=%s" % (iy, iyup, iydown, leftix, rightix, width, height)
options = { "pad_left":0, "pad_right":0, "pad_top":0, "pad_bottom":0,
"align":"l", "valign":"t", "enforce_size":True}
options.update(custom_options)
#print "options:", options
if table:
table.reformat(width=width, height=height, **options)
else:
table = EvTable(width=width, height=height, **options)
mapping[key] = (iyup, leftix, width, height, table)
return mapping
def _populate_form(self, raw_form, mapping):
"""
Insert cell contents into form at given locations
"""
form = copy.copy(raw_form)
for key, (iy0, ix0, width, height, cell_or_table) in mapping.items():
# rect is a list of <height> lines, each <width> wide
rect = cell_or_table.get()
for il, rectline in enumerate(rect):
formline = form[iy0+il]
# insert new content, replacing old
form[iy0+il] = formline = formline[:ix0] + rectline + formline[ix0+width:]
return form
def map(self, cells=None, tables=None, **kwargs):
"""
Add mapping for form.
cells - a dictionary of {identifier:celltext}
tables - a dictionary of {identifier:table}
kwargs will be forwarded to tables/cells. See
evtable.EvCell and evtable.EvTable for info.
"""
# clean kwargs (these cannot be overridden)
kwargs.pop("enforce_size", None)
kwargs.pop("width", None)
kwargs.pop("height", None)
new_cells = dict((to_str(key, force_string=True), value) for key, value in cells.items()) if cells else {}
new_tables = dict((to_str(key, force_string=True), value) for key, value in tables.items()) if tables else {}
self.cells_mapping.update(new_cells)
self.tables_mapping.update(new_tables)
self.reload()
def reload(self, filename=None, form=None, **kwargs):
"""
Creates the form from a stored file name
"""
# clean kwargs (these cannot be overridden)
kwargs.pop("enforce_size", None)
kwargs.pop("width", None)
kwargs.pop("height", None)
if form or self.input_form_dict:
datadict = form if form else self.input_form_dict
self.input_form_dict = datadict
elif filename or self.filename:
filename = filename if filename else self.filename
datadict = all_from_module(filename)
self.filename = filename
else:
datadict = {}
cellchar = to_str(datadict.get("FORMCHAR", "x"))
self.cellchar = to_str(cellchar[0] if len(cellchar) > 1 else cellchar)
tablechar = datadict.get("TABLECHAR", "c")
self.tablechar = tablechar[0] if len(tablechar) > 1 else tablechar
# split into a list of list of lines. Form can be indexed with form[iy][ix]
self.raw_form = _to_ansi(to_unicode(datadict.get("FORM", "")).split("\n"))
# strip first line
self.raw_form = self.raw_form[1:] if self.raw_form else self.raw_form
self.options.update(kwargs)
# parse and replace
self.mapping = self._parse_rectangles(self.cellchar, self.tablechar, self.raw_form, **kwargs)
self.form = self._populate_form(self.raw_form, self.mapping)
def __str__(self):
"Prints the form"
return ANSIString("\n").join([line for line in self.form])
def __unicode__(self):
"prints the form"
return unicode(ANSIString("\n").join([line for line in self.form]))
def _test():
"test evform"
form = EvForm("evennia.utils.evform_test")
# add data to each tagged form cell
form.map(cells={1: "{gTom the Bouncer{n",
2: "{yGriatch{n",
3: "A sturdy fellow",
4: 12,
5: 10,
6: 5,
7: 18,
8: 10,
9: 3})
# create the EvTables
tableA = EvTable("HP","MV","MP",
table=[["**"], ["*****"], ["***"]],
border="incols")
tableB = EvTable("Skill", "Value", "Exp",
table=[["Shooting", "Herbalism", "Smithing"],
[12,14,9],["550/1200", "990/1400", "205/900"]],
border="incols")
# add the tables to the proper ids in the form
form.map(tables={"A": tableA,
"B": tableB})
# unicode is required since the example contains non-ascii characters
print unicode(form)
return form
| [
"[email protected]"
] | |
a031c313195e9f2b8cf80dab81820b5fad9aebac | 851b8ac597146bf467b96dea48331332eba48833 | /custom_components/lightwave2/sensor.py | e7865f1dec4978ec26fca4528c2fb4c5804b8bf5 | [] | no_license | bigal82/bruces_homeassistant_config | 9d569052ed1efd58a4f7035eba19007ff6be56c5 | 3def555be1b8e72a0f7a4978974d96c54544053a | refs/heads/main | 2023-08-24T09:09:37.406093 | 2021-10-25T07:09:56 | 2021-10-25T07:09:56 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,166 | py | import logging
from .const import LIGHTWAVE_LINK2, LIGHTWAVE_ENTITIES, LIGHTWAVE_WEBHOOK, DOMAIN
from homeassistant.components.sensor import STATE_CLASS_MEASUREMENT, STATE_CLASS_TOTAL_INCREASING, SensorEntity, SensorEntityDescription
from homeassistant.const import POWER_WATT, ENERGY_WATT_HOUR, DEVICE_CLASS_POWER, DEVICE_CLASS_ENERGY
from homeassistant.core import callback
DEPENDENCIES = ['lightwave2']
_LOGGER = logging.getLogger(__name__)
ENERGY_SENSORS = [
SensorEntityDescription(
key="power",
native_unit_of_measurement=POWER_WATT,
device_class=DEVICE_CLASS_POWER,
state_class=STATE_CLASS_MEASUREMENT,
name="Current Consumption",
),
SensorEntityDescription(
key="energy",
native_unit_of_measurement=ENERGY_WATT_HOUR,
device_class=DEVICE_CLASS_ENERGY,
state_class=STATE_CLASS_TOTAL_INCREASING,
name="Total Consumption",
)
]
async def async_setup_entry(hass, config_entry, async_add_entities):
"""Find and return LightWave sensors."""
sensors = []
link = hass.data[DOMAIN][config_entry.entry_id][LIGHTWAVE_LINK2]
url = hass.data[DOMAIN][config_entry.entry_id][LIGHTWAVE_WEBHOOK]
for featureset_id, name in link.get_energy():
for description in ENERGY_SENSORS:
sensors.append(LWRF2Sensor(name, featureset_id, link, url, description))
for featureset_id, name in link.get_switches():
if link.get_featureset_by_id(featureset_id).reports_power():
for description in ENERGY_SENSORS:
sensors.append(LWRF2Sensor(name, featureset_id, link, url, description))
for featureset_id, name in link.get_lights():
if link.get_featureset_by_id(featureset_id).reports_power():
for description in ENERGY_SENSORS:
sensors.append(LWRF2Sensor(name, featureset_id, link, url, description))
hass.data[DOMAIN][config_entry.entry_id][LIGHTWAVE_ENTITIES].extend(sensors)
async_add_entities(sensors)
class LWRF2Sensor(SensorEntity):
"""Representation of a LightWaveRF power usage sensor."""
def __init__(self, name, featureset_id, link, url, description):
self._name = f"{name} {description.name}"
self._device = name
_LOGGER.debug("Adding sensor: %s ", self._name)
self._featureset_id = featureset_id
self._lwlink = link
self._url = url
self.entity_description = description
self._state = self._lwlink.get_featureset_by_id(self._featureset_id).features[self.entity_description.key][1]
self._gen2 = self._lwlink.get_featureset_by_id(
self._featureset_id).is_gen2()
async def async_added_to_hass(self):
"""Subscribe to events."""
await self._lwlink.async_register_callback(self.async_update_callback)
if self._url is not None:
for featurename in self._lwlink.get_featureset_by_id(self._featureset_id).features:
featureid = self._lwlink.get_featureset_by_id(self._featureset_id).features[featurename][0]
_LOGGER.debug("Registering webhook: %s %s", featurename, featureid.replace("+", "P"))
req = await self._lwlink.async_register_webhook(self._url, featureid, "hass" + featureid.replace("+", "P"), overwrite = True)
@callback
def async_update_callback(self, **kwargs):
"""Update the component's state."""
self.async_schedule_update_ha_state(True)
@property
def should_poll(self):
"""Lightwave2 library will push state, no polling needed"""
return False
@property
def assumed_state(self):
"""Gen 2 devices will report state changes, gen 1 doesn't"""
return not self._gen2
async def async_update(self):
"""Update state"""
self._state = self._lwlink.get_featureset_by_id(self._featureset_id).features[self.entity_description.key][1]
@property
def name(self):
"""Lightwave switch name."""
return self._name
@property
def unique_id(self):
"""Unique identifier. Provided by hub."""
return f"{self._featureset_id}_{self.entity_description.key}"
@property
def native_value(self):
return self._state
@property
def device_state_attributes(self):
"""Return the optional state attributes."""
attribs = {}
for featurename, featuredict in self._lwlink.get_featureset_by_id(self._featureset_id).features.items():
attribs['lwrf_' + featurename] = featuredict[1]
attribs['lrwf_product_code'] = self._lwlink.get_featureset_by_id(self._featureset_id).product_code
return attribs
@property
def device_info(self):
return {
'identifiers': {
# Serial numbers are unique identifiers within a specific domain
(DOMAIN, self._featureset_id)
},
'name': self._device,
'manufacturer': "Lightwave RF",
'model': self._lwlink.get_featureset_by_id(
self._featureset_id).product_code
#TODO 'via_device': (hue.DOMAIN, self.api.bridgeid),
} | [
"[email protected]"
] | |
e864f690bfea3be073b042cf3904b65ff035cfcb | 6c066611b11a8de5e2c22c30cfcc578a4c49edce | /GLSL/Source/Crok_blobs_GL/Crok_blobs_GL.py | 204b07f1e6bfa638561e524a79bbf87b93df6fb1 | [] | no_license | NatronGitHub/natron-plugins | ad2d9227637b4b86b45f92856fa54d327872a0a6 | b0c499fb6391024f54be9f26ed41b5cf7475d574 | refs/heads/master | 2022-12-12T10:02:20.252222 | 2022-11-30T02:29:04 | 2022-11-30T02:29:04 | 130,576,224 | 332 | 67 | null | 2022-11-30T02:29:05 | 2018-04-22T14:39:29 | Python | UTF-8 | Python | false | false | 41,352 | py | # -*- coding: utf-8 -*-
# DO NOT EDIT THIS FILE
# This file was automatically generated by Natron PyPlug exporter version 10.
# Hand-written code should be added in a separate file named Crok_blobs_GLExt.py
# See http://natron.readthedocs.org/en/master/devel/groups.html#adding-hand-written-code-callbacks-etc
# Note that Viewers are never exported
import NatronEngine
import sys
# Try to import the extensions file where callbacks and hand-written code should be located.
try:
from Crok_blobs_GLExt import *
except ImportError:
pass
def getPluginID():
return "natron.community.plugins.Crok_blobs_GL"
def getLabel():
return "Crok_blobs_GL"
def getVersion():
return 1.0
def getIconPath():
return "Crok_blobs_GL.png"
def getGrouping():
return "Community/GLSL/Source"
def getPluginDescription():
return "Creates blob like sturctures.\n( https://vimeo.com/86783700 )"
def createInstance(app,group):
# Create all nodes in the group
# Create the parameters of the group node the same way we did for all internal nodes
lastNode = group
lastNode.setColor(0.9529, 0.4314, 1)
# Create the user parameters
lastNode.Controls = lastNode.createPageParam("Controls", "Controls")
param = lastNode.createStringParam("sep01", "")
param.setType(NatronEngine.StringParam.TypeEnum.eStringTypeLabel)
# Add the param to the page
lastNode.Controls.addParam(param)
# Set param properties
param.setHelp("")
param.setAddNewLine(True)
param.setEvaluateOnChange(False)
param.setAnimationEnabled(False)
lastNode.sep01 = param
del param
param = lastNode.createStringParam("sep02", "")
param.setType(NatronEngine.StringParam.TypeEnum.eStringTypeLabel)
# Add the param to the page
lastNode.Controls.addParam(param)
# Set param properties
param.setHelp("")
param.setAddNewLine(True)
param.setEvaluateOnChange(False)
param.setAnimationEnabled(False)
lastNode.sep02 = param
del param
param = lastNode.createSeparatorParam("ASPECT", "Aspect")
# Add the param to the page
lastNode.Controls.addParam(param)
# Set param properties
param.setHelp("")
param.setAddNewLine(True)
param.setPersistent(False)
param.setEvaluateOnChange(False)
lastNode.ASPECT = param
del param
param = lastNode.createStringParam("sep03", "")
param.setType(NatronEngine.StringParam.TypeEnum.eStringTypeLabel)
# Add the param to the page
lastNode.Controls.addParam(param)
# Set param properties
param.setHelp("")
param.setAddNewLine(True)
param.setEvaluateOnChange(False)
param.setAnimationEnabled(False)
lastNode.sep03 = param
del param
param = lastNode.createStringParam("sep04", "")
param.setType(NatronEngine.StringParam.TypeEnum.eStringTypeLabel)
# Add the param to the page
lastNode.Controls.addParam(param)
# Set param properties
param.setHelp("")
param.setAddNewLine(True)
param.setEvaluateOnChange(False)
param.setAnimationEnabled(False)
lastNode.sep04 = param
del param
param = lastNode.createDoubleParam("Shadertoy1_2paramValueFloat3", "Shape : ")
param.setMinimum(-3, 0)
param.setMaximum(3, 0)
param.setDisplayMinimum(-3, 0)
param.setDisplayMaximum(3, 0)
param.setDefaultValue(0.7, 0)
param.restoreDefaultValue(0)
# Add the param to the page
lastNode.Controls.addParam(param)
# Set param properties
param.setAddNewLine(True)
param.setAnimationEnabled(True)
lastNode.Shadertoy1_2paramValueFloat3 = param
del param
param = lastNode.createStringParam("sep05", "")
param.setType(NatronEngine.StringParam.TypeEnum.eStringTypeLabel)
# Add the param to the page
lastNode.Controls.addParam(param)
# Set param properties
param.setHelp("")
param.setAddNewLine(True)
param.setEvaluateOnChange(False)
param.setAnimationEnabled(False)
lastNode.sep05 = param
del param
param = lastNode.createDoubleParam("Shadertoy1_2paramValueFloat2", "Smoothness : ")
param.setMinimum(-1000, 0)
param.setMaximum(1000, 0)
param.setDisplayMinimum(-1000, 0)
param.setDisplayMaximum(1000, 0)
param.setDefaultValue(0.5, 0)
param.restoreDefaultValue(0)
# Add the param to the page
lastNode.Controls.addParam(param)
# Set param properties
param.setAddNewLine(True)
param.setAnimationEnabled(True)
lastNode.Shadertoy1_2paramValueFloat2 = param
del param
param = lastNode.createStringParam("sep06", "")
param.setType(NatronEngine.StringParam.TypeEnum.eStringTypeLabel)
# Add the param to the page
lastNode.Controls.addParam(param)
# Set param properties
param.setHelp("")
param.setAddNewLine(True)
param.setEvaluateOnChange(False)
param.setAnimationEnabled(False)
lastNode.sep06 = param
del param
param = lastNode.createStringParam("sep07", "")
param.setType(NatronEngine.StringParam.TypeEnum.eStringTypeLabel)
# Add the param to the page
lastNode.Controls.addParam(param)
# Set param properties
param.setHelp("")
param.setAddNewLine(True)
param.setEvaluateOnChange(False)
param.setAnimationEnabled(False)
lastNode.sep07 = param
del param
param = lastNode.createSeparatorParam("CAMERA", "Camera")
# Add the param to the page
lastNode.Controls.addParam(param)
# Set param properties
param.setHelp("")
param.setAddNewLine(True)
param.setPersistent(False)
param.setEvaluateOnChange(False)
lastNode.CAMERA = param
del param
param = lastNode.createStringParam("sep08", "")
param.setType(NatronEngine.StringParam.TypeEnum.eStringTypeLabel)
# Add the param to the page
lastNode.Controls.addParam(param)
# Set param properties
param.setHelp("")
param.setAddNewLine(True)
param.setEvaluateOnChange(False)
param.setAnimationEnabled(False)
lastNode.sep08 = param
del param
param = lastNode.createStringParam("sep09", "")
param.setType(NatronEngine.StringParam.TypeEnum.eStringTypeLabel)
# Add the param to the page
lastNode.Controls.addParam(param)
# Set param properties
param.setHelp("")
param.setAddNewLine(True)
param.setEvaluateOnChange(False)
param.setAnimationEnabled(False)
lastNode.sep09 = param
del param
param = lastNode.createDoubleParam("Shadertoy1_2paramValueFloat4", "Zoom : ")
param.setMinimum(1.000000000000001e-05, 0)
param.setMaximum(10, 0)
param.setDisplayMinimum(1.000000000000001e-05, 0)
param.setDisplayMaximum(10, 0)
param.setDefaultValue(1, 0)
param.restoreDefaultValue(0)
# Add the param to the page
lastNode.Controls.addParam(param)
# Set param properties
param.setAddNewLine(True)
param.setAnimationEnabled(True)
lastNode.Shadertoy1_2paramValueFloat4 = param
del param
param = lastNode.createStringParam("sep10", "")
param.setType(NatronEngine.StringParam.TypeEnum.eStringTypeLabel)
# Add the param to the page
lastNode.Controls.addParam(param)
# Set param properties
param.setHelp("")
param.setAddNewLine(True)
param.setEvaluateOnChange(False)
param.setAnimationEnabled(False)
lastNode.sep10 = param
del param
param = lastNode.createDouble2DParam("Shadertoy1_2paramValueVec28", "Camera : ")
# Add the param to the page
lastNode.Controls.addParam(param)
# Set param properties
param.setAddNewLine(True)
param.setAnimationEnabled(True)
lastNode.Shadertoy1_2paramValueVec28 = param
del param
param = lastNode.createStringParam("sep11", "")
param.setType(NatronEngine.StringParam.TypeEnum.eStringTypeLabel)
# Add the param to the page
lastNode.Controls.addParam(param)
# Set param properties
param.setHelp("")
param.setAddNewLine(True)
param.setEvaluateOnChange(False)
param.setAnimationEnabled(False)
lastNode.sep11 = param
del param
param = lastNode.createStringParam("sep12", "")
param.setType(NatronEngine.StringParam.TypeEnum.eStringTypeLabel)
# Add the param to the page
lastNode.Controls.addParam(param)
# Set param properties
param.setHelp("")
param.setAddNewLine(True)
param.setEvaluateOnChange(False)
param.setAnimationEnabled(False)
lastNode.sep12 = param
del param
param = lastNode.createSeparatorParam("TIMING", "Timing")
# Add the param to the page
lastNode.Controls.addParam(param)
# Set param properties
param.setHelp("")
param.setAddNewLine(True)
param.setPersistent(False)
param.setEvaluateOnChange(False)
lastNode.TIMING = param
del param
param = lastNode.createStringParam("sep13", "")
param.setType(NatronEngine.StringParam.TypeEnum.eStringTypeLabel)
# Add the param to the page
lastNode.Controls.addParam(param)
# Set param properties
param.setHelp("")
param.setAddNewLine(True)
param.setEvaluateOnChange(False)
param.setAnimationEnabled(False)
lastNode.sep13 = param
del param
param = lastNode.createStringParam("sep14", "")
param.setType(NatronEngine.StringParam.TypeEnum.eStringTypeLabel)
# Add the param to the page
lastNode.Controls.addParam(param)
# Set param properties
param.setHelp("")
param.setAddNewLine(True)
param.setEvaluateOnChange(False)
param.setAnimationEnabled(False)
lastNode.sep14 = param
del param
param = lastNode.createDoubleParam("Shadertoy1_2paramValueFloat0", "Speed : ")
param.setDefaultValue(5, 0)
param.restoreDefaultValue(0)
# Add the param to the page
lastNode.Controls.addParam(param)
# Set param properties
param.setAddNewLine(True)
param.setAnimationEnabled(True)
lastNode.Shadertoy1_2paramValueFloat0 = param
del param
param = lastNode.createStringParam("sep15", "")
param.setType(NatronEngine.StringParam.TypeEnum.eStringTypeLabel)
# Add the param to the page
lastNode.Controls.addParam(param)
# Set param properties
param.setHelp("")
param.setAddNewLine(True)
param.setEvaluateOnChange(False)
param.setAnimationEnabled(False)
lastNode.sep15 = param
del param
param = lastNode.createDoubleParam("Shadertoy1_2paramValueFloat1", "Offset : ")
param.setMinimum(-1000, 0)
param.setMaximum(1000, 0)
param.setDisplayMinimum(-1000, 0)
param.setDisplayMaximum(1000, 0)
# Add the param to the page
lastNode.Controls.addParam(param)
# Set param properties
param.setAddNewLine(True)
param.setAnimationEnabled(True)
lastNode.Shadertoy1_2paramValueFloat1 = param
del param
param = lastNode.createStringParam("sep17", "")
param.setType(NatronEngine.StringParam.TypeEnum.eStringTypeLabel)
# Add the param to the page
lastNode.Controls.addParam(param)
# Set param properties
param.setHelp("")
param.setAddNewLine(True)
param.setEvaluateOnChange(False)
param.setAnimationEnabled(False)
lastNode.sep17 = param
del param
param = lastNode.createStringParam("sep16", "")
param.setType(NatronEngine.StringParam.TypeEnum.eStringTypeLabel)
# Add the param to the page
lastNode.Controls.addParam(param)
# Set param properties
param.setHelp("")
param.setAddNewLine(True)
param.setEvaluateOnChange(False)
param.setAnimationEnabled(False)
lastNode.sep16 = param
del param
param = lastNode.createSeparatorParam("COLOURS", "Colours")
# Add the param to the page
lastNode.Controls.addParam(param)
# Set param properties
param.setHelp("")
param.setAddNewLine(True)
param.setPersistent(False)
param.setEvaluateOnChange(False)
lastNode.COLOURS = param
del param
param = lastNode.createStringParam("sep18", "")
param.setType(NatronEngine.StringParam.TypeEnum.eStringTypeLabel)
# Add the param to the page
lastNode.Controls.addParam(param)
# Set param properties
param.setHelp("")
param.setAddNewLine(True)
param.setEvaluateOnChange(False)
param.setAnimationEnabled(False)
lastNode.sep18 = param
del param
param = lastNode.createStringParam("sep19", "")
param.setType(NatronEngine.StringParam.TypeEnum.eStringTypeLabel)
# Add the param to the page
lastNode.Controls.addParam(param)
# Set param properties
param.setHelp("")
param.setAddNewLine(True)
param.setEvaluateOnChange(False)
param.setAnimationEnabled(False)
lastNode.sep19 = param
del param
param = lastNode.createDoubleParam("Shadertoy1_2paramValueFloat5", "Layer : ")
param.setMinimum(1, 0)
param.setMaximum(99.99999999999999, 0)
param.setDisplayMinimum(1, 0)
param.setDisplayMaximum(99.99999999999999, 0)
param.setDefaultValue(8, 0)
param.restoreDefaultValue(0)
# Add the param to the page
lastNode.Controls.addParam(param)
# Set param properties
param.setAddNewLine(True)
param.setAnimationEnabled(True)
lastNode.Shadertoy1_2paramValueFloat5 = param
del param
param = lastNode.createStringParam("sep20", "")
param.setType(NatronEngine.StringParam.TypeEnum.eStringTypeLabel)
# Add the param to the page
lastNode.Controls.addParam(param)
# Set param properties
param.setHelp("")
param.setAddNewLine(True)
param.setEvaluateOnChange(False)
param.setAnimationEnabled(False)
lastNode.sep20 = param
del param
param = lastNode.createColorParam("Shadertoy1_2paramValueVec36", "Blobs colour : ", False)
param.setDefaultValue(0.2, 1)
param.restoreDefaultValue(1)
param.setDefaultValue(0.3, 2)
param.restoreDefaultValue(2)
# Add the param to the page
lastNode.Controls.addParam(param)
# Set param properties
param.setAddNewLine(True)
param.setAnimationEnabled(True)
lastNode.Shadertoy1_2paramValueVec36 = param
del param
param = lastNode.createStringParam("sep21", "")
param.setType(NatronEngine.StringParam.TypeEnum.eStringTypeLabel)
# Add the param to the page
lastNode.Controls.addParam(param)
# Set param properties
param.setHelp("")
param.setAddNewLine(True)
param.setEvaluateOnChange(False)
param.setAnimationEnabled(False)
lastNode.sep21 = param
del param
param = lastNode.createColorParam("Shadertoy1_2paramValueVec37", "Fog colour : ", False)
param.setDefaultValue(1, 0)
param.restoreDefaultValue(0)
param.setDefaultValue(1, 1)
param.restoreDefaultValue(1)
param.setDefaultValue(1, 2)
param.restoreDefaultValue(2)
# Add the param to the page
lastNode.Controls.addParam(param)
# Set param properties
param.setAddNewLine(True)
param.setAnimationEnabled(True)
lastNode.Shadertoy1_2paramValueVec37 = param
del param
param = lastNode.createStringParam("sep22", "")
param.setType(NatronEngine.StringParam.TypeEnum.eStringTypeLabel)
# Add the param to the page
lastNode.Controls.addParam(param)
# Set param properties
param.setHelp("")
param.setAddNewLine(True)
param.setEvaluateOnChange(False)
param.setAnimationEnabled(False)
lastNode.sep22 = param
del param
param = lastNode.createStringParam("sep23", "")
param.setType(NatronEngine.StringParam.TypeEnum.eStringTypeLabel)
# Add the param to the page
lastNode.Controls.addParam(param)
# Set param properties
param.setHelp("")
param.setAddNewLine(True)
param.setEvaluateOnChange(False)
param.setAnimationEnabled(False)
lastNode.sep23 = param
del param
param = lastNode.createSeparatorParam("OUTPUT", "Output")
# Add the param to the page
lastNode.Controls.addParam(param)
# Set param properties
param.setHelp("")
param.setAddNewLine(True)
param.setPersistent(False)
param.setEvaluateOnChange(False)
lastNode.OUTPUT = param
del param
param = lastNode.createStringParam("sep24", "")
param.setType(NatronEngine.StringParam.TypeEnum.eStringTypeLabel)
# Add the param to the page
lastNode.Controls.addParam(param)
# Set param properties
param.setHelp("")
param.setAddNewLine(True)
param.setEvaluateOnChange(False)
param.setAnimationEnabled(False)
lastNode.sep24 = param
del param
param = lastNode.createStringParam("sep25", "")
param.setType(NatronEngine.StringParam.TypeEnum.eStringTypeLabel)
# Add the param to the page
lastNode.Controls.addParam(param)
# Set param properties
param.setHelp("")
param.setAddNewLine(True)
param.setEvaluateOnChange(False)
param.setAnimationEnabled(False)
lastNode.sep25 = param
del param
param = lastNode.createChoiceParam("Shadertoy1_2bbox", "Output BBox : ")
param.setDefaultValue(1)
param.restoreDefaultValue()
# Add the param to the page
lastNode.Controls.addParam(param)
# Set param properties
param.setAddNewLine(True)
param.setAnimationEnabled(True)
lastNode.Shadertoy1_2bbox = param
del param
param = lastNode.createChoiceParam("Shadertoy1_2NatronParamFormatChoice", "Format : ")
param.setDefaultValue(6)
param.restoreDefaultValue()
# Add the param to the page
lastNode.Controls.addParam(param)
# Set param properties
param.setAddNewLine(False)
param.setAnimationEnabled(False)
lastNode.Shadertoy1_2NatronParamFormatChoice = param
del param
param = lastNode.createStringParam("sep26", "")
param.setType(NatronEngine.StringParam.TypeEnum.eStringTypeLabel)
# Add the param to the page
lastNode.Controls.addParam(param)
# Set param properties
param.setHelp("")
param.setAddNewLine(True)
param.setEvaluateOnChange(False)
param.setAnimationEnabled(False)
lastNode.sep26 = param
del param
param = lastNode.createStringParam("sep27", "")
param.setType(NatronEngine.StringParam.TypeEnum.eStringTypeLabel)
# Add the param to the page
lastNode.Controls.addParam(param)
# Set param properties
param.setHelp("")
param.setAddNewLine(True)
param.setEvaluateOnChange(False)
param.setAnimationEnabled(False)
lastNode.sep27 = param
del param
lastNode.Credits = lastNode.createPageParam("Credits", "Credits")
param = lastNode.createStringParam("sep101", "")
param.setType(NatronEngine.StringParam.TypeEnum.eStringTypeLabel)
# Add the param to the page
lastNode.Credits.addParam(param)
# Set param properties
param.setHelp("")
param.setAddNewLine(True)
param.setEvaluateOnChange(False)
param.setAnimationEnabled(False)
lastNode.sep101 = param
del param
param = lastNode.createStringParam("sep102", "")
param.setType(NatronEngine.StringParam.TypeEnum.eStringTypeLabel)
# Add the param to the page
lastNode.Credits.addParam(param)
# Set param properties
param.setHelp("")
param.setAddNewLine(True)
param.setEvaluateOnChange(False)
param.setAnimationEnabled(False)
lastNode.sep102 = param
del param
param = lastNode.createSeparatorParam("NAME", "Crok_blobs_GL v1.0")
# Add the param to the page
lastNode.Credits.addParam(param)
# Set param properties
param.setHelp("")
param.setAddNewLine(True)
param.setPersistent(False)
param.setEvaluateOnChange(False)
lastNode.NAME = param
del param
param = lastNode.createStringParam("sep103", "")
param.setType(NatronEngine.StringParam.TypeEnum.eStringTypeLabel)
# Add the param to the page
lastNode.Credits.addParam(param)
# Set param properties
param.setHelp("")
param.setAddNewLine(True)
param.setEvaluateOnChange(False)
param.setAnimationEnabled(False)
lastNode.sep103 = param
del param
param = lastNode.createStringParam("sep104", "")
param.setType(NatronEngine.StringParam.TypeEnum.eStringTypeLabel)
# Add the param to the page
lastNode.Credits.addParam(param)
# Set param properties
param.setHelp("")
param.setAddNewLine(True)
param.setEvaluateOnChange(False)
param.setAnimationEnabled(False)
lastNode.sep104 = param
del param
param = lastNode.createSeparatorParam("LINE01", "")
# Add the param to the page
lastNode.Credits.addParam(param)
# Set param properties
param.setHelp("")
param.setAddNewLine(True)
param.setPersistent(False)
param.setEvaluateOnChange(False)
lastNode.LINE01 = param
del param
param = lastNode.createStringParam("sep105", "")
param.setType(NatronEngine.StringParam.TypeEnum.eStringTypeLabel)
# Add the param to the page
lastNode.Credits.addParam(param)
# Set param properties
param.setHelp("")
param.setAddNewLine(True)
param.setEvaluateOnChange(False)
param.setAnimationEnabled(False)
lastNode.sep105 = param
del param
param = lastNode.createStringParam("sep106", "")
param.setType(NatronEngine.StringParam.TypeEnum.eStringTypeLabel)
# Add the param to the page
lastNode.Credits.addParam(param)
# Set param properties
param.setHelp("")
param.setAddNewLine(True)
param.setEvaluateOnChange(False)
param.setAnimationEnabled(False)
lastNode.sep106 = param
del param
param = lastNode.createSeparatorParam("FR", "ShaderToy 0.8.8")
# Add the param to the page
lastNode.Credits.addParam(param)
# Set param properties
param.setHelp("")
param.setAddNewLine(True)
param.setPersistent(False)
param.setEvaluateOnChange(False)
lastNode.FR = param
del param
param = lastNode.createStringParam("sep107", "")
param.setType(NatronEngine.StringParam.TypeEnum.eStringTypeLabel)
# Add the param to the page
lastNode.Credits.addParam(param)
# Set param properties
param.setHelp("")
param.setAddNewLine(True)
param.setEvaluateOnChange(False)
param.setAnimationEnabled(False)
lastNode.sep107 = param
del param
param = lastNode.createStringParam("sep108", "")
param.setType(NatronEngine.StringParam.TypeEnum.eStringTypeLabel)
# Add the param to the page
lastNode.Credits.addParam(param)
# Set param properties
param.setHelp("")
param.setAddNewLine(True)
param.setEvaluateOnChange(False)
param.setAnimationEnabled(False)
lastNode.sep108 = param
del param
param = lastNode.createSeparatorParam("CONVERSION", " (Fabrice Fernandez - 2018)")
# Add the param to the page
lastNode.Credits.addParam(param)
# Set param properties
param.setHelp("")
param.setAddNewLine(True)
param.setPersistent(False)
param.setEvaluateOnChange(False)
lastNode.CONVERSION = param
del param
param = lastNode.createStringParam("sep109", "")
param.setType(NatronEngine.StringParam.TypeEnum.eStringTypeLabel)
# Add the param to the page
lastNode.Credits.addParam(param)
# Set param properties
param.setHelp("")
param.setAddNewLine(True)
param.setEvaluateOnChange(False)
param.setAnimationEnabled(False)
lastNode.sep109 = param
del param
param = lastNode.createStringParam("sep110", "")
param.setType(NatronEngine.StringParam.TypeEnum.eStringTypeLabel)
# Add the param to the page
lastNode.Credits.addParam(param)
# Set param properties
param.setHelp("")
param.setAddNewLine(True)
param.setEvaluateOnChange(False)
param.setAnimationEnabled(False)
lastNode.sep110 = param
del param
# Refresh the GUI with the newly created parameters
lastNode.setPagesOrder(['Controls', 'Credits', 'Node', 'Settings'])
lastNode.refreshUserParamsGUI()
del lastNode
# Start of node "Output2"
lastNode = app.createNode("fr.inria.built-in.Output", 1, group)
lastNode.setLabel("Output2")
lastNode.setPosition(4139, 4048)
lastNode.setSize(90, 36)
lastNode.setColor(0.7, 0.7, 0.7)
groupOutput2 = lastNode
del lastNode
# End of node "Output2"
# Start of node "Shadertoy1_2"
lastNode = app.createNode("net.sf.openfx.Shadertoy", 1, group)
lastNode.setScriptName("Shadertoy1_2")
lastNode.setLabel("Shadertoy1_2")
lastNode.setPosition(4139, 3875)
lastNode.setSize(90, 36)
lastNode.setColor(0.3, 0.5, 0.2)
groupShadertoy1_2 = lastNode
param = lastNode.getParam("paramValueFloat0")
if param is not None:
param.setValue(5, 0)
del param
param = lastNode.getParam("paramValueFloat1")
if param is not None:
param.setValue(0, 0)
del param
param = lastNode.getParam("paramValueFloat2")
if param is not None:
param.setValue(0.5, 0)
del param
param = lastNode.getParam("paramValueFloat3")
if param is not None:
param.setValue(0.7, 0)
del param
param = lastNode.getParam("paramValueFloat4")
if param is not None:
param.setValue(1, 0)
del param
param = lastNode.getParam("paramValueFloat5")
if param is not None:
param.setValue(8, 0)
del param
param = lastNode.getParam("paramValueVec36")
if param is not None:
param.setValue(0, 0)
param.setValue(0.2, 1)
param.setValue(0.3, 2)
del param
param = lastNode.getParam("paramValueVec37")
if param is not None:
param.setValue(1, 0)
param.setValue(1, 1)
param.setValue(1, 2)
del param
param = lastNode.getParam("paramValueVec28")
if param is not None:
param.setValue(0, 0)
param.setValue(0, 1)
del param
param = lastNode.getParam("imageShaderSource")
if param is not None:
param.setValue("//\r\n//\r\n// MMMMMMMMMMMMMMMMMMMMMMMMMMMM\r\n// MM. .MM\r\n// MM. .MMMMMMMMMMMMMMMMMMMMMM. .MM\r\n// MM. .MMMMMMMMMMMMMMMMMMMMMMMM. .MM\r\n// MM. .MMMM MMMMMMM MMM. .MM\r\n// MM. .MMM MMMMMM MMM. .MM\r\n// MM. .MmM MMMM MMM. .MM\r\n// MM. .MMM MM MMM. .MM\r\n// MM. .MMM M MMM. .MM\r\n// MM. .MMM MMM. .MM\r\n// MM. .MMM MMM. .MM\r\n// MM. .MMM M MMM. .MM\r\n// MM. .MMM MM MMM. .MM\r\n// MM. .MMM MMM MMM. .MM\r\n// MM. .MMM MMMM MMM. .MM\r\n// MM. .MMMMMMMMMMMMMMMMMMMMMMMM. .MM\r\n// MM. .MMMMMMMMMMMMMMMMMMMMMM. .MM\r\n// MM. .MM\r\n// MMMMMMMMMMMMMMMMMMMMMMMMMMMM\r\n//\r\n//\r\n//\r\n//\r\n// Adaptation pour Natron par F. Fernandez\r\n// Code original : crok_blobs Matchbox pour Autodesk Flame\r\n\r\n// Adapted to Natron by F.Fernandez\r\n// Original code : crok_blobs Matchbox for Autodesk Flame\r\n\r\n\r\n\r\nuniform float Speed = 5.0; // Speed : (speed)\r\nuniform float Offset = 0.0; // Offset : (offset), min=-1000, max=1000\r\nuniform float Smoothness = 0.5; // Smoothness : (smoothness), min=-1000, max=1000\r\nuniform float Shape = 0.7; // Shape : (shape), min=-3, max=3\r\nuniform float Zoom = 1.0; // Zoom : (zoom), min=0.00001, max=10\r\nuniform float Layer = 8.0; // Layer : (layer), min=1.0, max=100.0\r\n\r\nuniform vec3 Blobs = vec3(0.0,0.2,0.3);\r\nuniform vec3 Depth = vec3(1,1,1);\r\n\r\nvec2 Resolution = vec2(iResolution.x * Zoom, iResolution.y * Zoom);\r\nfloat Time = iTime *-.05 * Speed + Offset;\r\n\r\n\r\n\r\nuniform vec2 Camera;\r\n\r\n\r\nconst float pi = 3.14159;\r\n\r\nvec3 rotate(vec3 v,vec2 r) \r\n{\r\n\tmat3 rxmat = mat3(1, 0 , 0 ,\r\n\t\t\t 0,cos(r.y),-sin(r.y),\r\n\t\t\t 0,sin(r.y), cos(r.y));\r\n\tmat3 rymat = mat3(cos(r.x), 0,-sin(r.x),\r\n\t\t\t 0 , 1, 0 ,\r\n\t\t\t sin(r.x), 0,cos(r.x));\r\n\t\r\n\t\r\n\treturn v*rxmat*rymat;\r\n\t\r\n}\r\n\r\nfloat snoise(vec3 v);\r\n\r\n//\r\n// Description : Array and textureless GLSL 2D/3D/4D simplex \r\n// noise functions.\r\n// Author : Ian McEwan, Ashima Arts.\r\n// Maintainer : ijm\r\n// Lastmod : 20110822 (ijm)\r\n// License : Copyright (C) 2011 Ashima Arts. All rights reserved.\r\n// Distributed under the MIT License. See LICENSE file.\r\n// https://github.com/ashima/webglse-noi\r\n// \r\n\r\nvec3 mod289(vec3 x) {\r\n return x - floor(x * (1.0 / 289.0)) * 289.0;\r\n}\r\n\r\nvec4 mod289(vec4 x) {\r\n return x - floor(x * (1.0 / 289.0)) * 289.0;\r\n}\r\n\r\nvec4 permute(vec4 x) {\r\n return mod289(((x*34.0)+1.0)*x);\r\n}\r\n\r\nvec4 taylorInvSqrt(vec4 r)\r\n{\r\n return 1.79284291400159 - 0.85373472095314 * r;\r\n}\r\n\r\nfloat snoise(vec3 v)\r\n { \r\n const vec2 C = vec2(1.0/6.0, 1.0/3.0) ;\r\n const vec4 D = vec4(0.0, 0.5, 1.0, 2.0);\r\n\r\n// First corner\r\n vec3 i = floor(v + dot(v, C.yyy) );\r\n vec3 x0 = v - i + dot(i, C.xxx) ;\r\n\r\n// Other corners\r\n vec3 g = step(x0.yzx, x0.xyz);\r\n vec3 l = 1.0 - g;\r\n vec3 i1 = min( g.xyz, l.zxy );\r\n vec3 i2 = max( g.xyz, l.zxy );\r\n vec3 x1 = x0 - i1 + C.xxx;\r\n vec3 x2 = x0 - i2 + C.yyy; // 2.0*C.x = 1/3 = C.y\r\n vec3 x3 = x0 - D.yyy; // -1.0+3.0*C.x = -0.5 = -D.y\r\n\r\n// Permutations\r\n i = mod289(i); \r\n vec4 p = permute( permute( permute( \r\n i.z + vec4(0.0, i1.z, i2.z, 1.0 ))\r\n + i.y + vec4(0.0, i1.y, i2.y, 1.0 )) \r\n + i.x + vec4(0.0, i1.x, i2.x, 1.0 ));\r\n\r\n// Gradients: 7x7 points over a square, mapped onto an octahedron.\r\n// The ring size 17*17 = 289 is close to a multiple of 49 (49*6 = 294)\r\n float n_ = 0.142857142857; // 1.0/7.0\r\n vec3 ns = n_ * D.wyz - D.xzx;\r\n\r\n vec4 j = p - 49.0 * floor(p * ns.z * ns.z); // mod(p,7*7)\r\n\r\n vec4 x_ = floor(j * ns.z);\r\n vec4 y_ = floor(j - 7.0 * x_ ); // mod(j,N)\r\n\r\n vec4 x = x_ *ns.x + ns.yyyy;\r\n vec4 y = y_ *ns.x + ns.yyyy;\r\n vec4 h = 1.0 - abs(x) - abs(y);\r\n\r\n vec4 b0 = vec4( x.xy, y.xy );\r\n vec4 b1 = vec4( x.zw, y.zw );\r\n\r\n //vec4 s0 = vec4(lessThan(b0,0.0))*2.0 - 1.0;\r\n //vec4 s1 = vec4(lessThan(b1,0.0))*2.0 - 1.0;\r\n vec4 s0 = floor(b0)*2.0 + 1.0;\r\n vec4 s1 = floor(b1)*2.0 + 1.0;\r\n vec4 sh = -step(h, vec4(0.0));\r\n\r\n vec4 a0 = b0.xzyw + s0.xzyw*sh.xxyy ;\r\n vec4 a1 = b1.xzyw + s1.xzyw*sh.zzww ;\r\n\r\n vec3 p0 = vec3(a0.xy,h.x);\r\n vec3 p1 = vec3(a0.zw,h.y);\r\n vec3 p2 = vec3(a1.xy,h.z);\r\n vec3 p3 = vec3(a1.zw,h.w);\r\n\r\n//Normalise gradients\r\n vec4 norm = taylorInvSqrt(vec4(dot(p0,p0), dot(p1,p1), dot(p2, p2), dot(p3,p3)));\r\n p0 *= norm.x;\r\n p1 *= norm.y;\r\n p2 *= norm.z;\r\n p3 *= norm.w;\r\n\r\n// Mix final noise value\r\n vec4 m = max(0.6 - vec4(dot(x0,x0), dot(x1,x1), dot(x2,x2), dot(x3,x3)), 0.0);\r\n m = m * m;\r\n return 42.0 * dot( m*m, vec4( dot(p0,x0), dot(p1,x1), \r\n dot(p2,x2), dot(p3,x3) ) );\r\n }\r\n \r\nvoid mainImage( out vec4 fragColor, in vec2 fragCoord )\r\n{\r\n\r\n \tvec2 res = vec2(Resolution.x/Resolution.y,1.0);\r\n \tvec2 p = ( fragCoord.xy / Resolution.y ) -(res/2.0);\r\n\t\r\n \tvec2 m = (Camera-0.5)*pi*vec2(2.,1.);\r\n\t\r\n \tvec3 color = vec3(0.0);\r\n\t\r\n \tvec3 pos = normalize(rotate(vec3(p,0.5),vec2(m)));\r\n\t\r\n \tfloat dist = 0.0;\r\n\t\r\n \tfor(float i = 1.;i <= Layer;i++)\r\n \t{\r\n \t\tfloat shell = abs(snoise(pos*i+vec3(Time,0,0)*0.3));\r\n\t\t\r\n \t\tshell = smoothstep(0.5 * Smoothness,0.6 * Shape,shell);\r\n\t\t\r\n \t\tdist = max(dist,shell*(1.-(i/Layer)));\r\n \t}\r\n\t\r\n \tcolor = mix(vec3(Blobs),vec3(Depth),1.-dist);\r\n\t\t\r\n \tfragColor = vec4( color.xyz , 1.0 );\r\n\r\n }\r\n\r\n\r\n ")
del param
param = lastNode.getParam("inputEnable0")
if param is not None:
param.setValue(False)
del param
param = lastNode.getParam("inputEnable1")
if param is not None:
param.setValue(False)
del param
param = lastNode.getParam("inputEnable2")
if param is not None:
param.setValue(False)
del param
param = lastNode.getParam("inputEnable3")
if param is not None:
param.setValue(False)
del param
param = lastNode.getParam("bbox")
if param is not None:
param.set("format")
del param
param = lastNode.getParam("NatronParamFormatSize")
if param is not None:
param.setValue(1920, 0)
param.setValue(1080, 1)
del param
param = lastNode.getParam("mouseParams")
if param is not None:
param.setValue(False)
del param
param = lastNode.getParam("paramCount")
if param is not None:
param.setValue(9, 0)
del param
param = lastNode.getParam("paramType0")
if param is not None:
param.set("float")
del param
param = lastNode.getParam("paramName0")
if param is not None:
param.setValue("Speed")
del param
param = lastNode.getParam("paramLabel0")
if param is not None:
param.setValue("Speed :")
del param
param = lastNode.getParam("paramHint0")
if param is not None:
param.setValue("speed")
del param
param = lastNode.getParam("paramDefaultFloat0")
if param is not None:
param.setValue(5, 0)
del param
param = lastNode.getParam("paramType1")
if param is not None:
param.set("float")
del param
param = lastNode.getParam("paramName1")
if param is not None:
param.setValue("Offset")
del param
param = lastNode.getParam("paramLabel1")
if param is not None:
param.setValue("Offset :")
del param
param = lastNode.getParam("paramHint1")
if param is not None:
param.setValue("offset")
del param
param = lastNode.getParam("paramMinFloat1")
if param is not None:
param.setValue(-1000, 0)
del param
param = lastNode.getParam("paramMaxFloat1")
if param is not None:
param.setValue(1000, 0)
del param
param = lastNode.getParam("paramType2")
if param is not None:
param.set("float")
del param
param = lastNode.getParam("paramName2")
if param is not None:
param.setValue("Smoothness")
del param
param = lastNode.getParam("paramLabel2")
if param is not None:
param.setValue("Smoothness :")
del param
param = lastNode.getParam("paramHint2")
if param is not None:
param.setValue("smoothness")
del param
param = lastNode.getParam("paramDefaultFloat2")
if param is not None:
param.setValue(0.5, 0)
del param
param = lastNode.getParam("paramMinFloat2")
if param is not None:
param.setValue(-1000, 0)
del param
param = lastNode.getParam("paramMaxFloat2")
if param is not None:
param.setValue(1000, 0)
del param
param = lastNode.getParam("paramType3")
if param is not None:
param.set("float")
del param
param = lastNode.getParam("paramName3")
if param is not None:
param.setValue("Shape")
del param
param = lastNode.getParam("paramLabel3")
if param is not None:
param.setValue("Shape :")
del param
param = lastNode.getParam("paramHint3")
if param is not None:
param.setValue("shape")
del param
param = lastNode.getParam("paramDefaultFloat3")
if param is not None:
param.setValue(0.7, 0)
del param
param = lastNode.getParam("paramMinFloat3")
if param is not None:
param.setValue(-3, 0)
del param
param = lastNode.getParam("paramMaxFloat3")
if param is not None:
param.setValue(3, 0)
del param
param = lastNode.getParam("paramType4")
if param is not None:
param.set("float")
del param
param = lastNode.getParam("paramName4")
if param is not None:
param.setValue("Zoom")
del param
param = lastNode.getParam("paramLabel4")
if param is not None:
param.setValue("Zoom :")
del param
param = lastNode.getParam("paramHint4")
if param is not None:
param.setValue("zoom")
del param
param = lastNode.getParam("paramDefaultFloat4")
if param is not None:
param.setValue(1, 0)
del param
param = lastNode.getParam("paramMinFloat4")
if param is not None:
param.setValue(1.000000000000001e-05, 0)
del param
param = lastNode.getParam("paramMaxFloat4")
if param is not None:
param.setValue(10, 0)
del param
param = lastNode.getParam("paramType5")
if param is not None:
param.set("float")
del param
param = lastNode.getParam("paramName5")
if param is not None:
param.setValue("Layer")
del param
param = lastNode.getParam("paramLabel5")
if param is not None:
param.setValue("Layer :")
del param
param = lastNode.getParam("paramHint5")
if param is not None:
param.setValue("layer")
del param
param = lastNode.getParam("paramDefaultFloat5")
if param is not None:
param.setValue(8, 0)
del param
param = lastNode.getParam("paramMinFloat5")
if param is not None:
param.setValue(1, 0)
del param
param = lastNode.getParam("paramMaxFloat5")
if param is not None:
param.setValue(99.99999999999999, 0)
del param
param = lastNode.getParam("paramType6")
if param is not None:
param.set("vec3")
del param
param = lastNode.getParam("paramName6")
if param is not None:
param.setValue("Blobs")
del param
param = lastNode.getParam("paramLabel6")
if param is not None:
param.setValue("Blobs")
del param
param = lastNode.getParam("paramDefaultVec36")
if param is not None:
param.setValue(0.2, 1)
param.setValue(0.3, 2)
del param
param = lastNode.getParam("paramType7")
if param is not None:
param.set("vec3")
del param
param = lastNode.getParam("paramName7")
if param is not None:
param.setValue("Depth")
del param
param = lastNode.getParam("paramLabel7")
if param is not None:
param.setValue("Depth")
del param
param = lastNode.getParam("paramDefaultVec37")
if param is not None:
param.setValue(1, 0)
param.setValue(1, 1)
param.setValue(1, 2)
del param
param = lastNode.getParam("paramType8")
if param is not None:
param.set("vec2")
del param
param = lastNode.getParam("paramName8")
if param is not None:
param.setValue("Camera")
del param
param = lastNode.getParam("paramLabel8")
if param is not None:
param.setValue("Camera")
del param
del lastNode
# End of node "Shadertoy1_2"
# Now that all nodes are created we can connect them together, restore expressions
groupOutput2.connectInput(0, groupShadertoy1_2)
param = groupShadertoy1_2.getParam("paramValueFloat0")
group.getParam("Shadertoy1_2paramValueFloat0").setAsAlias(param)
del param
param = groupShadertoy1_2.getParam("paramValueFloat1")
group.getParam("Shadertoy1_2paramValueFloat1").setAsAlias(param)
del param
param = groupShadertoy1_2.getParam("paramValueFloat2")
group.getParam("Shadertoy1_2paramValueFloat2").setAsAlias(param)
del param
param = groupShadertoy1_2.getParam("paramValueFloat3")
group.getParam("Shadertoy1_2paramValueFloat3").setAsAlias(param)
del param
param = groupShadertoy1_2.getParam("paramValueFloat4")
group.getParam("Shadertoy1_2paramValueFloat4").setAsAlias(param)
del param
param = groupShadertoy1_2.getParam("paramValueFloat5")
group.getParam("Shadertoy1_2paramValueFloat5").setAsAlias(param)
del param
param = groupShadertoy1_2.getParam("paramValueVec36")
group.getParam("Shadertoy1_2paramValueVec36").setAsAlias(param)
del param
param = groupShadertoy1_2.getParam("paramValueVec37")
group.getParam("Shadertoy1_2paramValueVec37").setAsAlias(param)
del param
param = groupShadertoy1_2.getParam("paramValueVec28")
group.getParam("Shadertoy1_2paramValueVec28").setAsAlias(param)
del param
param = groupShadertoy1_2.getParam("bbox")
group.getParam("Shadertoy1_2bbox").setAsAlias(param)
del param
param = groupShadertoy1_2.getParam("NatronParamFormatChoice")
group.getParam("Shadertoy1_2NatronParamFormatChoice").setAsAlias(param)
del param
try:
extModule = sys.modules["Crok_blobs_GLExt"]
except KeyError:
extModule = None
if extModule is not None and hasattr(extModule ,"createInstanceExt") and hasattr(extModule.createInstanceExt,"__call__"):
extModule.createInstanceExt(app,group)
| [
"[email protected]"
] | |
37152c7ebeacda42c49779f5fbbe920279f08de2 | e3365bc8fa7da2753c248c2b8a5c5e16aef84d9f | /indices/chrysali.py | 0c30f886170a3046e3450a147617a926be67ee54 | [] | no_license | psdh/WhatsintheVector | e8aabacc054a88b4cb25303548980af9a10c12a8 | a24168d068d9c69dc7a0fd13f606c080ae82e2a6 | refs/heads/master | 2021-01-25T10:34:22.651619 | 2015-09-23T11:54:06 | 2015-09-23T11:54:06 | 42,749,205 | 2 | 3 | null | 2015-09-23T11:54:07 | 2015-09-18T22:06:38 | Python | UTF-8 | Python | false | false | 203 | py | ii = [('RogePAV2.py', 5), ('GodwWSL2.py', 1), ('RogePAV.py', 4), ('RennJIT.py', 60), ('LeakWTI3.py', 1), ('ChalTPW2.py', 1), ('GellWPT.py', 3), ('GilmCRS.py', 1), ('WestJIT2.py', 54), ('KirbWPW2.py', 3)] | [
"[email protected]"
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.