hexsha
stringlengths 40
40
| size
int64 3
1.03M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 3
972
| max_stars_repo_name
stringlengths 6
130
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 3
972
| max_issues_repo_name
stringlengths 6
130
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
116k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 3
972
| max_forks_repo_name
stringlengths 6
130
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 3
1.03M
| avg_line_length
float64 1.13
941k
| max_line_length
int64 2
941k
| alphanum_fraction
float64 0
1
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
b9bd441fe793748c0f27e58f0035b22ac8be6846
| 609 |
py
|
Python
|
src/jycampus/apps/userbase/migrations/0013_auto_20190809_0914.py
|
lijoev/KCC
|
c6fdeda909f0b4243ca4e23ad98a481cbaf7a7c7
|
[
"Unlicense"
] | null | null | null |
src/jycampus/apps/userbase/migrations/0013_auto_20190809_0914.py
|
lijoev/KCC
|
c6fdeda909f0b4243ca4e23ad98a481cbaf7a7c7
|
[
"Unlicense"
] | 3 |
2020-02-12T02:57:19.000Z
|
2021-06-10T21:42:20.000Z
|
src/jycampus/apps/userbase/migrations/0013_auto_20190809_0914.py
|
lijoev/KCC
|
c6fdeda909f0b4243ca4e23ad98a481cbaf7a7c7
|
[
"Unlicense"
] | null | null | null |
# -*- coding: utf-8 -*-
# Generated by Django 1.11 on 2019-08-09 09:14
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('userbase', '0012_auto_20190809_0452'),
]
operations = [
migrations.AlterField(
model_name='participants',
name='is_participant',
field=models.BooleanField(),
),
migrations.AlterField(
model_name='participants',
name='is_volunteer',
field=models.BooleanField(),
),
]
| 23.423077 | 48 | 0.597701 |
6873439748263cf9c2e7e41de1ac99762ec33f59
| 8,201 |
py
|
Python
|
devices/camera.py
|
mmonajem/oxcart
|
3a5bbf48c7bf54d78d481e30c7f854e1d27b8d8c
|
[
"Apache-2.0"
] | null | null | null |
devices/camera.py
|
mmonajem/oxcart
|
3a5bbf48c7bf54d78d481e30c7f854e1d27b8d8c
|
[
"Apache-2.0"
] | null | null | null |
devices/camera.py
|
mmonajem/oxcart
|
3a5bbf48c7bf54d78d481e30c7f854e1d27b8d8c
|
[
"Apache-2.0"
] | null | null | null |
import time
import cv2
import numpy as np
from pypylon import pylon
import variables
class Camera:
def __init__(self, devices, tlFactory, cameras, converter):
self.devices = devices
self.tlFactory = tlFactory
self.cameras = cameras
self.converter = converter
self.cameras[0].Open()
self.cameras[0].ExposureAuto.SetValue('Off')
self.cameras[0].ExposureTime.SetValue(1000000)
self.cameras[1].Open()
self.cameras[1].ExposureAuto.SetValue('Off')
self.cameras[1].ExposureTime.SetValue(350000)
def update_cameras(self, lock):
# Starts grabbing for all cameras starting with index 0. The grabbing
# set up for free-running continuous acquisition.
self.cameras.StartGrabbing(pylon.GrabStrategy_LatestImageOnly)
while self.cameras.IsGrabbing():
# if variables.light == False:
# grabResult0 = self.cameras[0].RetrieveResult(1000, pylon.TimeoutHandling_ThrowException)
# grabResult1 = self.cameras[1].RetrieveResult(1000, pylon.TimeoutHandling_ThrowException)
# elif variables.light == True:
grabResult0 = self.cameras[0].RetrieveResult(1000, pylon.TimeoutHandling_ThrowException)
grabResult1 = self.cameras[1].RetrieveResult(1000, pylon.TimeoutHandling_ThrowException)
image0 = self.converter.Convert(grabResult0)
img0 = image0.GetArray()
image1 = self.converter.Convert(grabResult1)
img1 = image1.GetArray()
# Original size is 2048 * 2448
img0_orig = cv2.resize(img0, dsize=(2048, 2048), interpolation=cv2.INTER_CUBIC).astype(np.int32)
img0_zoom = cv2.resize(img0[800:1100, 1800:2300], dsize=(1200, 500), interpolation=cv2.INTER_CUBIC).astype(
np.int32)
img1_orig = cv2.resize(img1, dsize=(2048, 2048), interpolation=cv2.INTER_CUBIC).astype(np.int32)
img1_zoom = cv2.resize(img1[1120:1300, 1000:1520], dsize=(1200, 500), interpolation=cv2.INTER_CUBIC).astype(
np.int32)
if variables.index_save_image % 100 == 0 and variables.start_flag:
cv2.imwrite(variables.path + "\\side_%s.png" % variables.index_save_image, img0_orig)
cv2.imwrite(variables.path + "\\side_zoom_%s.png" % variables.index_save_image, img0_zoom)
cv2.imwrite(variables.path + '\\bottom_%s.png' % variables.index_save_image, img1_orig)
cv2.imwrite(variables.path + '\\bottom_zoom_%s.png' % variables.index_save_image, img1_zoom)
img0_zoom_marker = cv2.drawMarker(img0_zoom, (1050, 310), (0, 0, 255), markerType=cv2.MARKER_TRIANGLE_UP,
markerSize=40, thickness=2, line_type=cv2.LINE_AA)
img1_zoom_marker = cv2.drawMarker(img1_zoom, (1100, 285), (0, 0, 255), markerType=cv2.MARKER_TRIANGLE_UP,
markerSize=40, thickness=2, line_type=cv2.LINE_AA)
with lock:
variables.img0_zoom = np.require(img0_zoom_marker, np.uint8, 'C')
variables.img1_zoom = np.require(img1_zoom_marker, np.uint8, 'C')
variables.img0_orig = np.swapaxes(img0_orig, 0, 1)
variables.img1_orig = np.swapaxes(img1_orig, 0, 1)
variables.index_save_image += 1
if variables.index_save_image % 100 == 0 and variables.start_flag:
cv2.imwrite(variables.path + "\\side_%s.png" % variables.index_save_image, img0_orig)
cv2.imwrite(variables.path + "\\side_zoom_%s.png" % variables.index_save_image, img0_zoom)
cv2.imwrite(variables.path + '\\bottom_%s.png' % variables.index_save_image, img1_orig)
cv2.imwrite(variables.path + '\\bottom_zoom_%s.png' % variables.index_save_image, img1_zoom)
grabResult0.Release()
grabResult1.Release()
if variables.sample_adjust:
self.camera_s_d()
variables.sample_adjust = False
def light_switch(self, ):
if not variables.light:
self.cameras[0].Open()
self.cameras[0].ExposureTime.SetValue(2000)
# self.cameras[0].AcquisitionFrameRate.SetValue(150)
self.cameras[1].Open()
self.cameras[1].ExposureTime.SetValue(2000)
# self.cameras[1].AcquisitionFrameRate.SetValue(150)
variables.light = True
variables.sample_adjust = True
elif variables.light:
self.cameras[0].Open()
self.cameras[0].ExposureTime.SetValue(1000000)
self.cameras[1].Open()
self.cameras[1].ExposureTime.SetValue(350000)
variables.light = False
variables.sample_adjust = False
def camera_s_d(self, ):
# The exit code of the sample application.
img0 = []
img1 = []
windowName = 'Sample Alignment'
while self.cameras.IsGrabbing():
if not self.cameras.IsGrabbing():
break
try:
grabResult = self.cameras.RetrieveResult(200, pylon.TimeoutHandling_ThrowException)
# When the cameras in the array are created the camera context value
# is set to the index of the camera in the array.
# The camera context is a user settable value.
# This value is attached to each grab result and can be used
# to determine the camera that produced the grab result.
cameraContextValue = grabResult.GetCameraContext()
if grabResult.GrabSucceeded():
image = self.converter.Convert(grabResult) # Access the openCV image data
if cameraContextValue == 0: # If camera 0, save array into img0[]
img0 = image.GetArray()
else: # if camera 1, save array into img1[]
img1 = image.GetArray()
# If there is no img1, the first time, make img1=img0
# Need the same length arrays to concatenate
if len(img1) == 0:
img1 = img0
img0_zoom = cv2.resize(img0[800:1100, 1800:2300], dsize=(2448, 1000), interpolation=cv2.INTER_CUBIC)
img1_zoom = cv2.resize(img1[1100:1350, 1000:1550], dsize=(2448, 1000),
interpolation=cv2.INTER_CUBIC)
img0_zoom = cv2.drawMarker(img0_zoom, (2150, 620), (0, 0, 255),
markerType=cv2.MARKER_TRIANGLE_UP,
markerSize=80, thickness=2, line_type=cv2.LINE_AA)
img1_zoom = cv2.drawMarker(img1_zoom, (2100, 530), (0, 0, 255),
markerType=cv2.MARKER_TRIANGLE_UP,
markerSize=80, thickness=2, line_type=cv2.LINE_AA)
img0_f = np.concatenate((img0, img0_zoom), axis=0)
img1_f = np.concatenate((img1, img1_zoom), axis=0)
vis = np.concatenate((img0_f, img1_f), axis=1) # Combine 2 images horizontally
cv2.namedWindow(windowName, cv2.WINDOW_NORMAL)
cv2.resizeWindow(windowName, 2500, 1200)
cv2.imshow(windowName, vis) # displays image in specified window
k = cv2.waitKey(1)
if k == 27: # If press ESC key
print('ESC')
cv2.destroyAllWindows()
break
except:
pass
grabResult.Release()
time.sleep(0.05)
# If window has been closed using the X button, close program
# getWindowProperty() returns -1 as soon as the window is closed
if cv2.getWindowProperty(windowName, 0) < 0 or not variables.light_swich:
grabResult.Release()
cv2.destroyAllWindows()
break
| 49.107784 | 120 | 0.58249 |
7b876291e668704c672d7655714869f2de892cdd
| 4,691 |
py
|
Python
|
test/functional/feature_spentindex.py
|
bitcoinquark/bitcoinquark
|
f1bd752ee58859f2b24a52405723706064c9c151
|
[
"MIT"
] | null | null | null |
test/functional/feature_spentindex.py
|
bitcoinquark/bitcoinquark
|
f1bd752ee58859f2b24a52405723706064c9c151
|
[
"MIT"
] | null | null | null |
test/functional/feature_spentindex.py
|
bitcoinquark/bitcoinquark
|
f1bd752ee58859f2b24a52405723706064c9c151
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# Copyright (c) 2014-2016 The Bitcoin Core developers
# Copyright (c) 2017 The BitcoinQuark developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
# Test spentindex
#
import time
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import *
from test_framework.script import *
from test_framework.mininode import *
import binascii
class SpentIndexTest(BitcoinTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 4
self.extra_args = [["-debug"],["-debug", "-addressindex"],["-debug", "-addressindex"],["-debug", "-addressindex"]]
def setup_network(self):
self.setup_nodes()
connect_nodes(self.nodes[0], 1)
connect_nodes(self.nodes[0], 2)
connect_nodes(self.nodes[0], 3)
self.is_network_split = False
self.sync_all()
def run_test(self):
print ("Mining blocks...")
self.nodes[0].generate(105)
self.sync_all()
chain_height = self.nodes[1].getblockcount()
assert_equal(chain_height, 105)
# Check that
print ("Testing spent index...")
privkey = "cSdkPxkAjA4HDr5VHgsebAPDEh9Gyub4HK8UJr2DFGGqKKy4K5sG"
address = "mgY65WSfEmsyYaYPQaXhmXMeBhwp4EcsQW"
addressHash = hex_str_to_bytes("0b2f0a0c31bfe0406b0ccc1381fdbe311946dadc")
scriptPubKey = CScript([OP_DUP, OP_HASH160, addressHash, OP_EQUALVERIFY, OP_CHECKSIG])
unspent = self.nodes[0].listunspent()
tx = CTransaction()
amount = unspent[0]["amount"] * 100000000
tx.vin = [CTxIn(COutPoint(int(unspent[0]["txid"], 16), unspent[0]["vout"]))]
tx.vout = [CTxOut(amount, scriptPubKey)]
tx.rehash()
signed_tx = self.nodes[0].signrawtransaction(binascii.hexlify(tx.serialize()).decode("utf-8"))
txid = self.nodes[0].sendrawtransaction(signed_tx["hex"], True)
self.nodes[0].generate(1)
self.sync_all()
print ("Testing getspentinfo method...")
# Check that the spentinfo works standalone
info = self.nodes[1].getspentinfo({"txid": unspent[0]["txid"], "index": unspent[0]["vout"]})
assert_equal(info["txid"], txid)
assert_equal(info["index"], 0)
assert_equal(info["height"], 106)
print ("Testing getrawtransaction method...")
# Check that verbose raw transaction includes spent info
txVerbose = self.nodes[3].getrawtransaction(unspent[0]["txid"], 1)
assert_equal(txVerbose["vout"][unspent[0]["vout"]]["spentTxId"], txid)
assert_equal(txVerbose["vout"][unspent[0]["vout"]]["spentIndex"], 0)
assert_equal(txVerbose["vout"][unspent[0]["vout"]]["spentHeight"], 106)
# Check that verbose raw transaction includes input values
txVerbose2 = self.nodes[3].getrawtransaction(txid, 1)
assert_equal(txVerbose2["vin"][0]["value"], Decimal(unspent[0]["amount"]))
assert_equal(txVerbose2["vin"][0]["valueSat"], amount)
# Check that verbose raw transaction includes address values and input values
privkey2 = "cSdkPxkAjA4HDr5VHgsebAPDEh9Gyub4HK8UJr2DFGGqKKy4K5sG"
address2 = "mgY65WSfEmsyYaYPQaXhmXMeBhwp4EcsQW"
addressHash2 = hex_str_to_bytes("0b2f0a0c31bfe0406b0ccc1381fdbe311946dadc")
scriptPubKey2 = CScript([OP_DUP, OP_HASH160, addressHash2, OP_EQUALVERIFY, OP_CHECKSIG])
tx2 = CTransaction()
tx2.vin = [CTxIn(COutPoint(int(txid, 16), 0))]
tx2.vout = [CTxOut(amount, scriptPubKey2)]
tx.rehash()
self.nodes[0].importprivkey(privkey)
signed_tx2 = self.nodes[0].signrawtransaction(binascii.hexlify(tx2.serialize()).decode("utf-8"))
txid2 = self.nodes[0].sendrawtransaction(signed_tx2["hex"], True)
# Check the mempool index
self.sync_all()
txVerbose3 = self.nodes[1].getrawtransaction(txid2, 1)
assert_equal(txVerbose3["vin"][0]["address"], address2)
assert_equal(txVerbose3["vin"][0]["value"], Decimal(unspent[0]["amount"]))
assert_equal(txVerbose3["vin"][0]["valueSat"], amount)
# Check the database index
self.nodes[0].generate(1)
self.sync_all()
txVerbose4 = self.nodes[3].getrawtransaction(txid2, 1)
assert_equal(txVerbose4["vin"][0]["address"], address2)
assert_equal(txVerbose4["vin"][0]["value"], Decimal(unspent[0]["amount"]))
assert_equal(txVerbose4["vin"][0]["valueSat"], amount)
print ("Passed\n")
if __name__ == '__main__':
SpentIndexTest().main()
| 40.439655 | 122 | 0.661266 |
d9af44b16c36e6a831a6344a553cd6fc2d17e7cf
| 1,910 |
py
|
Python
|
venv/Lib/site-packages/appier/asynchronous.py
|
victor-freitas/TwilioSMS
|
d71b18a7e8765aa545dd785d119f3719e5eb1606
|
[
"Apache-2.0"
] | null | null | null |
venv/Lib/site-packages/appier/asynchronous.py
|
victor-freitas/TwilioSMS
|
d71b18a7e8765aa545dd785d119f3719e5eb1606
|
[
"Apache-2.0"
] | null | null | null |
venv/Lib/site-packages/appier/asynchronous.py
|
victor-freitas/TwilioSMS
|
d71b18a7e8765aa545dd785d119f3719e5eb1606
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Hive Appier Framework
# Copyright (c) 2008-2019 Hive Solutions Lda.
#
# This file is part of Hive Appier Framework.
#
# Hive Appier Framework is free software: you can redistribute it and/or modify
# it under the terms of the Apache License as published by the Apache
# Foundation, either version 2.0 of the License, or (at your option) any
# later version.
#
# Hive Appier Framework is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# Apache License for more details.
#
# You should have received a copy of the Apache License along with
# Hive Appier Framework. If not, see <http://www.apache.org/licenses/>.
__author__ = "João Magalhães <[email protected]>"
""" The author(s) of the module """
__version__ = "1.0.0"
""" The version of the module """
__revision__ = "$LastChangedRevision$"
""" The revision number of the module """
__date__ = "$LastChangedDate$"
""" The last change date of the module """
__copyright__ = "Copyright (c) 2008-2019 Hive Solutions Lda."
""" The copyright for the module """
__license__ = "Apache License, Version 2.0"
""" The license for the module """
# imports the base (old) version of the async implementation
# that should be compatible with all the available python
# interpreters, base collection of async library
from .async_old import * #@UnusedWildImport
# verifies if the current python interpreter version supports
# the new version of the async implementation and if that's the
# case runs the additional import of symbols, this should override
# most of the symbols that have just been created
if is_neo(): from .async_neo import * #@UnusedWildImport
def header_a():
return await_wrap(header_a_())
def ensure_a(*args, **kwargs):
return await_wrap(ensure_a_(*args, **kwargs))
| 34.107143 | 79 | 0.740838 |
52227649ea9706e93cf9fa5cc710364ec08306bf
| 1,144 |
py
|
Python
|
setup.py
|
kurhula/nodeenv
|
2b2ff31364a8837a11dffc1e667df349fae8cedb
|
[
"BSD-3-Clause"
] | null | null | null |
setup.py
|
kurhula/nodeenv
|
2b2ff31364a8837a11dffc1e667df349fae8cedb
|
[
"BSD-3-Clause"
] | null | null | null |
setup.py
|
kurhula/nodeenv
|
2b2ff31364a8837a11dffc1e667df349fae8cedb
|
[
"BSD-3-Clause"
] | 1 |
2020-04-13T17:43:43.000Z
|
2020-04-13T17:43:43.000Z
|
"""
nodeenv
~~~~~~~
Node.js Virtual Environment builder.
"""
import os
from setuptools import setup
from nodeenv import nodeenv_version
def read_file(file_name):
return open(
os.path.join(
os.path.dirname(os.path.abspath(__file__)),
file_name
)
).read()
ldesc = read_file('README.rst')
ldesc += "\n\n" + read_file('CHANGES')
setup(
name='nodeenv',
version=nodeenv_version,
url='https://github.com/ekalinin/nodeenv',
license='BSD',
author='Eugene Kalinin',
author_email='[email protected]',
install_requires=[],
description="Node.js virtual environment builder",
long_description=ldesc,
py_modules=['nodeenv'],
entry_points={
'console_scripts': ['nodeenv = nodeenv:main']
},
zip_safe=False,
platforms='any',
classifiers=[
'Development Status :: 4 - Beta',
'Environment :: Web Environment',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Programming Language :: Python',
'Topic :: Software Development :: Libraries :: Python Modules'
]
)
| 23.833333 | 70 | 0.624126 |
2cb4afae5d863b8fee0691f17d12c6ddca0d6ae0
| 48 |
py
|
Python
|
openre/agent/client/action/__init__.py
|
openre/openre
|
c5969df92cac83bdafd049e1c0a3bcf56b51223a
|
[
"MIT"
] | null | null | null |
openre/agent/client/action/__init__.py
|
openre/openre
|
c5969df92cac83bdafd049e1c0a3bcf56b51223a
|
[
"MIT"
] | null | null | null |
openre/agent/client/action/__init__.py
|
openre/openre
|
c5969df92cac83bdafd049e1c0a3bcf56b51223a
|
[
"MIT"
] | 1 |
2016-02-14T11:20:57.000Z
|
2016-02-14T11:20:57.000Z
|
# -*- coding: utf-8 -*-
"""
Client actions
"""
| 8 | 23 | 0.479167 |
ab1860fc9c35c552db7ba06a5456ec7effdb9f92
| 4,019 |
py
|
Python
|
dotdotdot/config.py
|
nehararora/dotdotdot
|
a3e76596272745e2bd1d653139558011b39ea040
|
[
"BSD-2-Clause"
] | null | null | null |
dotdotdot/config.py
|
nehararora/dotdotdot
|
a3e76596272745e2bd1d653139558011b39ea040
|
[
"BSD-2-Clause"
] | 4 |
2019-02-14T18:30:03.000Z
|
2019-02-22T16:48:59.000Z
|
dotdotdot/config.py
|
nehararora/dotdotdot
|
a3e76596272745e2bd1d653139558011b39ea040
|
[
"BSD-2-Clause"
] | 2 |
2019-09-03T15:52:54.000Z
|
2019-09-10T17:42:10.000Z
|
# -*- coding: utf-8 -*-
"""
config: Load application configuration and return an object representation.
Allows accessing configuration using "dot-notation" for supported configuration
file formats.
Supported formats are:
* yml
* TODO: ini
* TODO: json
"""
import yaml
# from enum import Enum
# Formats = Enum('Formats', names=[('yml', 1), ('ini', 2)])
def repr_fx(self):
"""
Object representation. Function gets added as a method
to generated classes.
:return: string object representation
"""
return yaml.dump(self)
def str_fx(self):
"""
String representation. Function gets added as a method
to generated classes.
:return: string object representation
"""
return yaml.dump(self, default_flow_style=False)
def get_fx(self, key, default=None):
"""
Allow for c.get(foo) invocation.
:param self: Config object
:param key: config key to look for
:param default: value if key is missing
:return:
"""
key_exists = hasattr(self, key)
if key_exists:
return get_item_fx(self, key)
elif default:
return default
else:
raise KeyError
def get_item_fx(self, key):
"""
Function to implement __getitem__
:param self:
:param key:
:return:
"""
if hasattr(self, key):
return getattr(self, key)
else:
raise KeyError
def __validate():
"""
Hook to validate config.
:return:
"""
# TODO: implement
def __determine_config_type():
"""
Find out the type of the configuration file.
:return:
"""
class Config(object):
"""
The configuration object that will be populated.
"""
pass
Config.__repr__ = repr_fx
Config.__str__ = str_fx
Config.__getitem__ = get_item_fx
Config.get = get_fx
def __construct(config, yml):
"""
Recursive function to construct an object corresponding to given value.
Adds elements from the input yaml to the configuration object in the first
argument. For complex value types recursively instantiates new objects and
attaches them into the configuration tree.
The intent is to be able to access the yaml config using dot notation -
e.g. config.a.b.c.
:param config: The config object to populate.
:param yml: The yaml corresponding to the conf parameter.
"""
for key in yml:
if type(yml[key]) == dict:
# create an object for the subsection
klass = type(key, (), {})
klass.__repr__ = repr_fx
klass.__str__ = str_fx
klass.__getitem__ = get_item_fx
klass.get = get_fx
obj = klass()
__construct(obj, yml[key])
setattr(config, key, obj)
else:
# just set simple value
setattr(config, key, yml[key])
def load(paths):
"""
Entry point for the config module.
Load yml config files at specified path and convert to a config object.
Merges the yaml files specified by the paths parameter - keys in a file
later in the list override earlier keys.
:param paths: List of complete paths of config files.
:return Config object with member properties
"""
if not paths:
raise ConfigException(message='No configuration file specified',
reason=paths)
yaml_dict = {}
if type(paths) == str:
paths = [paths]
# for every filename in list...
for path in paths:
# read config file...
with open(path) as f:
# get config as dict...
y = yaml.safe_load(f)
# and merge into a single yaml dict.
yaml_dict.update(y)
config = Config()
# get object for each key and set on the config object
__construct(config, yaml_dict)
return config
class ConfigException(Exception):
def __init__(self, message, reason):
self.message = message
self.reason = reason
def __str__(self):
return repr(self.message)
| 23.231214 | 79 | 0.628266 |
c4a23084fe377a7b449d5c8ec6c291862f4a50ca
| 1,599 |
py
|
Python
|
src/nlp_pytorch/data/base_dataset.py
|
dbradf/nlp-pytorch
|
957e3c5a1edf1f2ae9a8e281729395bed886bc87
|
[
"Apache-2.0"
] | null | null | null |
src/nlp_pytorch/data/base_dataset.py
|
dbradf/nlp-pytorch
|
957e3c5a1edf1f2ae9a8e281729395bed886bc87
|
[
"Apache-2.0"
] | null | null | null |
src/nlp_pytorch/data/base_dataset.py
|
dbradf/nlp-pytorch
|
957e3c5a1edf1f2ae9a8e281729395bed886bc87
|
[
"Apache-2.0"
] | null | null | null |
import pandas as pd
from torch.utils.data import Dataset, DataLoader
TRAIN = "train"
TEST = "test"
VAL = "val"
class SplitDataset(Dataset):
def __init__(self, dataframe: pd.DataFrame, vectorizer) -> None:
self.dataframe = dataframe
self.vectorizer = vectorizer
self.train_df = self.dataframe[self.dataframe.split == TRAIN]
self.train_size = len(self.train_df)
self.val_df = self.dataframe[self.dataframe.split == VAL]
self.val_size = len(self.val_df)
self.test_df = self.dataframe[self.dataframe.split == TEST]
self.test_size = len(self.test_df)
self._lookup_dict = {
TRAIN: (self.train_df, self.train_size),
VAL: (self.val_df, self.val_size),
TEST: (self.test_df, self.test_size),
}
self.set_split(TRAIN)
def set_split(self, split: str = TRAIN):
self._target_split = split
self._target_df, self._target_size = self._lookup_dict[split]
def get_num_batches(self, batch_size):
return len(self) // batch_size
def __len__(self):
return self._target_size
def generate_batches(
dataset: Dataset,
batch_size: int,
shuffle: bool = True,
drop_last: bool = True,
device: str = "cpu",
):
dataloader = DataLoader(
dataset=dataset, batch_size=batch_size, shuffle=shuffle, drop_last=drop_last
)
for data_dict in dataloader:
out_data_dict = {}
for name, tensor in data_dict.items():
out_data_dict[name] = data_dict[name].to(device)
yield out_data_dict
| 27.568966 | 84 | 0.642902 |
bf90ffc9f40e89de18e4d1842aa7af0c6467996a
| 1,687 |
py
|
Python
|
test/test_examples.py
|
poponealex/suprenam
|
d57c99a2e43ad659b9ed70830402f46e7d31e02e
|
[
"MIT"
] | 8 |
2022-03-05T19:41:37.000Z
|
2022-03-06T08:04:43.000Z
|
test/test_examples.py
|
poponealex/suprenam
|
d57c99a2e43ad659b9ed70830402f46e7d31e02e
|
[
"MIT"
] | 2 |
2022-01-25T18:57:17.000Z
|
2022-03-14T13:24:59.000Z
|
test/test_examples.py
|
poponealex/suprenam
|
d57c99a2e43ad659b9ed70830402f46e7d31e02e
|
[
"MIT"
] | null | null | null |
from pathlib import Path
import pytest
__import__("sys").path[0:0] = "."
import src.secure_clauses as sc
from src.file_system import FileSystem
from extract_examples import extract_examples
from reformat_examples import main as reformat_examples
EXCEPTIONS = {
"FileNotFoundError": FileNotFoundError,
"SeveralTargetsError": getattr(sc, "SeveralTargetsError"),
"SeveralSourcesError": getattr(sc, "SeveralSourcesError"),
"DuplicatedClauseError": getattr(sc, "DuplicatedClauseError"),
}
PATHS = [Path(line) for line in Path("test/fhs.txt").read_text().strip().split("\n")]
EXAMPLES_MD_PATH = Path("test/examples.md")
reformat_examples(EXAMPLES_MD_PATH)
@pytest.mark.parametrize(
"i, title, example, expected",
[d.values() for d in extract_examples(EXAMPLES_MD_PATH)[:]],
)
def test(i, title, example, expected):
clauses = [(Path(row[0]), row[1]) for row in example]
fs = FileSystem(PATHS)
original_fs = set(fs)
if isinstance(expected, tuple):
(exception_name, expected_culprit) = expected
with pytest.raises(EXCEPTIONS[exception_name]) as culprit:
sc.secure_clauses(fs, clauses)
assert culprit.value.args[0] == expected_culprit
assert fs == original_fs
else:
safe_clauses = sc.secure_clauses(fs, clauses)
additions = {Path(row[1]) for row in expected}
deletions = {Path(row[0]) for row in expected}
(additions, deletions) = (additions - deletions, deletions - additions)
assert original_fs - fs == deletions
assert fs - original_fs == additions
if __name__ == "__main__": # pragma: no cover
pytest.main(["-qq", __import__("sys").argv[0]])
| 33.74 | 85 | 0.694724 |
d0c4b18ded546de284ad3da1e2bb9f8358242ed0
| 1,475 |
py
|
Python
|
tests/test_taskid.py
|
danielk333/pter
|
50e2eee5436abe0cf566b1dcebf5279bdca931b2
|
[
"MIT"
] | 35 |
2020-06-30T05:18:42.000Z
|
2022-03-09T01:34:41.000Z
|
tests/test_taskid.py
|
danielk333/pter
|
50e2eee5436abe0cf566b1dcebf5279bdca931b2
|
[
"MIT"
] | 13 |
2021-04-28T06:51:30.000Z
|
2022-01-07T15:11:44.000Z
|
tests/test_taskid.py
|
danielk333/pter
|
50e2eee5436abe0cf566b1dcebf5279bdca931b2
|
[
"MIT"
] | 5 |
2021-04-27T09:25:09.000Z
|
2021-12-22T23:16:55.000Z
|
import unittest
import pathlib
from pytodotxt import Task
from pter import utils
from pter.source import Source
class FakeSource:
def __init__(self, tasks):
self.tasks = tasks
self.filename = pathlib.Path('/tmp/test.txt')
class TestTaskIDGeneration(unittest.TestCase):
def setUp(self):
self.sources = [
Source(FakeSource([Task('task id:30'), Task('task id:prefixed22')])),
Source(FakeSource([Task('task id:41')]))]
self.sources[0].update_contexts_and_projects()
self.sources[1].update_contexts_and_projects()
def test_no_prefix_no_existing(self):
sources = [Source(FakeSource([]))]
sources[0].update_contexts_and_projects()
nextid = utils.new_task_id(sources)
self.assertEqual(nextid, '1')
def test_no_prefix_existing(self):
nextid = utils.new_task_id(self.sources)
self.assertEqual(nextid, '42')
def test_prefix_no_existing(self):
nextid = utils.new_task_id(self.sources, 'project')
self.assertEqual(nextid, 'project1')
def test_prefix_existing(self):
nextid = utils.new_task_id(self.sources, 'prefixed')
self.assertEqual(nextid, 'prefixed23')
def test_decimals(self):
sources = [Source(FakeSource([Task('task id:9'), Task('task id:12')]))]
sources[0].update_contexts_and_projects()
nextid = utils.new_task_id(sources)
self.assertEqual(nextid, '13')
| 30.729167 | 85 | 0.663051 |
e4b2aa45b304cbbb4ca6e93ef206df95c549ced9
| 5,300 |
py
|
Python
|
tests/util.py
|
alexey-v-paramonov/grab
|
a3bfda3ae16b615eeaa0323c2ba72a9d901cba7b
|
[
"MIT"
] | null | null | null |
tests/util.py
|
alexey-v-paramonov/grab
|
a3bfda3ae16b615eeaa0323c2ba72a9d901cba7b
|
[
"MIT"
] | null | null | null |
tests/util.py
|
alexey-v-paramonov/grab
|
a3bfda3ae16b615eeaa0323c2ba72a9d901cba7b
|
[
"MIT"
] | 1 |
2022-01-12T14:51:27.000Z
|
2022-01-12T14:51:27.000Z
|
import os
from unittest import TestCase
import logging
from contextlib import contextmanager
from tempfile import mkdtemp, mkstemp
from shutil import rmtree
import platform
import itertools
from test_server import TestServer
from grab import Grab
from grab import base
logger = logging.getLogger('tests.util') # pylint: disable=invalid-name
TEST_DIR = os.path.dirname(os.path.realpath(__file__))
TEST_SERVER_PORT = 9876
ADDRESS = 'localhost'
EXTRA_PORT1 = TEST_SERVER_PORT + 1
EXTRA_PORT2 = TEST_SERVER_PORT + 2
NON_ROUTABLE_IP = '10.0.0.0'
GLOBAL = {
'backends': [],
'grab_transport': None,
'network_service': None,
}
@contextmanager
def temp_dir(root_dir=None):
dir_ = mkdtemp(dir=root_dir)
yield dir_
rmtree(dir_)
@contextmanager
def temp_file(root_dir=None):
fdesc, file_ = mkstemp(dir=root_dir)
yield file_
os.close(fdesc)
try:
os.unlink(file_)
except (IOError, OSError):
if 'Windows' in platform.system():
logger.error('Ignoring IOError raised when trying to delete'
' temp file %s created in `temp_file` context'
' manager', file_)
else:
raise
def build_grab(*args, **kwargs):
"""Builds the Grab instance with default options."""
kwargs.setdefault('transport', GLOBAL['grab_transport'])
return Grab(*args, **kwargs)
def build_spider(cls, **kwargs):
"""Builds the Spider instance with default options."""
kwargs.setdefault('grab_transport', GLOBAL['grab_transport'])
kwargs.setdefault('network_service', GLOBAL['network_service'])
return cls(**kwargs)
class BaseGrabTestCase(TestCase):
@classmethod
def setUpClass(cls):
cls.server = start_server()
def setUp(self):
self.server.reset()
@classmethod
def tearDownClass(cls):
cls.server.stop()
def start_server():
logger.debug('Starting test server on %s:%s', ADDRESS, TEST_SERVER_PORT)
server = TestServer(address=ADDRESS, port=TEST_SERVER_PORT,
extra_ports=[EXTRA_PORT1, EXTRA_PORT2])
server.start()
return server
def exclude_grab_transport(*names):
def decorator(func):
def caller(*args, **kwargs):
if GLOBAL['grab_transport'] in names:
func_name = '%s:%s' % (func.__module__, func.__name__)
logger.debug('Running test %s for grab transport %s is'
' restricted', func_name,
GLOBAL['grab_transport'])
return None
else:
return func(*args, **kwargs)
return caller
return decorator
def only_grab_transport(*names):
def decorator(func):
def caller(*args, **kwargs):
if GLOBAL['grab_transport'] in names:
return func(*args, **kwargs)
else:
func_name = '%s:%s' % (func.__module__, func.__name__)
logger.debug('Running test %s for grab transport %s is'
' restricted', func_name,
GLOBAL['grab_transport'])
return None
return caller
return decorator
def exclude_spider_transport(*names):
def decorator(func):
def caller(*args, **kwargs):
if GLOBAL['network_service'] in names:
func_name = '%s:%s' % (func.__module__, func.__name__)
logger.debug('Running test %s for spider transport %s is'
' restricted', func_name,
GLOBAL['network_service'])
return None
else:
return func(*args, **kwargs)
return caller
return decorator
def only_spider_transport(*names):
def decorator(func):
def caller(*args, **kwargs):
if GLOBAL['network_service'] in names:
return func(*args, **kwargs)
else:
func_name = '%s:%s' % (func.__module__, func.__name__)
logger.debug('Running test %s for spider transport %s is'
' restricted', func_name,
GLOBAL['network_service'])
return None
return caller
return decorator
def skip_test_if(condition, why_message):
def decorator(func):
def caller(*args, **kwargs):
if condition():
func_name = '%s:%s' % (func.__module__, func.__name__)
logger.debug('Skipping test %s because %s',
func_name, why_message)
return None
else:
return func(*args, **kwargs)
return caller
return decorator
def run_test_if(condition, why_message):
def decorator(func):
def caller(*args, **kwargs):
if condition():
return func(*args, **kwargs)
else:
func_name = '%s:%s' % (func.__module__, func.__name__)
logger.debug('Running test %s is restricted because'
' it is not %s', func_name, why_message)
return None
return caller
return decorator
def reset_request_counter():
base.REQUEST_COUNTER = itertools.count(1)
| 29.943503 | 76 | 0.583396 |
63ae8ebc8cde7f5c00a8f28d9a347a9a7c2b89d9
| 300 |
py
|
Python
|
examples/models/gaussian/create_gaussian_conv_rbm.py
|
recogna-lab/recogners
|
711295f53b47c76246d57df24b75d18bc7ec62e8
|
[
"MIT"
] | 1 |
2019-07-04T19:36:42.000Z
|
2019-07-04T19:36:42.000Z
|
examples/models/gaussian/create_gaussian_conv_rbm.py
|
recogna-lab/recogners
|
711295f53b47c76246d57df24b75d18bc7ec62e8
|
[
"MIT"
] | null | null | null |
examples/models/gaussian/create_gaussian_conv_rbm.py
|
recogna-lab/recogners
|
711295f53b47c76246d57df24b75d18bc7ec62e8
|
[
"MIT"
] | 1 |
2019-12-09T16:18:51.000Z
|
2019-12-09T16:18:51.000Z
|
from learnergy.models.gaussian import GaussianConvRBM
# Creates a GaussianConvRBM-based class
model = GaussianConvRBM(
visible_shape=(32, 32),
filter_shape=(9, 9),
n_filters=16,
n_channels=3,
steps=1,
learning_rate=0.00001,
momentum=0.5,
decay=0,
use_gpu=True,
)
| 20 | 53 | 0.686667 |
4453f2881bf60fe4d6318e4b8297e983a64db563
| 5,220 |
py
|
Python
|
pds_github_util/gh_pages/summary.py
|
nasa-pds-engineering-node/pds-github-util
|
d65aa96787e77fe8d4ee8c023d5c6ca32bbd13c9
|
[
"Apache-2.0"
] | null | null | null |
pds_github_util/gh_pages/summary.py
|
nasa-pds-engineering-node/pds-github-util
|
d65aa96787e77fe8d4ee8c023d5c6ca32bbd13c9
|
[
"Apache-2.0"
] | 1 |
2021-10-13T19:02:04.000Z
|
2021-10-13T19:02:04.000Z
|
pds_github_util/gh_pages/summary.py
|
nasa-pds-engineering-node/pds-github-util
|
d65aa96787e77fe8d4ee8c023d5c6ca32bbd13c9
|
[
"Apache-2.0"
] | null | null | null |
import os
import logging
from pds_github_util.tags.tags import Tags
from pds_github_util.utils import RstClothReferenceable
from pds_github_util.corral.herd import Herd
logger = logging.getLogger(__name__)
COLUMNS = ['manual', 'changelog', 'requirements', 'download', 'license', 'feedback']
REPO_TYPES = {
'tool': {
'title': 'Standalone Tools and Libraries',
'description': 'PDS tools for discipline nodes, data providers and users.'
},
'service': {
'title': 'Discipline Node Services',
'description': 'PDS servers that Discipline Node should deploy to publish their archive at PDS level'
},
'library': {
'title': 'Libraries',
'description': 'Libraries supported by PDS'
},
'core': {
'title': 'Engineering Node Services',
'description': 'PDS servers deployed by PDS Engineering Node at central level'
},
'other': {
'title': 'Other Tools and Libraries (dependencies)',
'description': 'Other software assets re-used in previously listed applications'
},
'unknown': {
'title': 'Additional software assets',
'description': ''
}
}
def get_table_columns_md():
def column_header(column):
return f''
column_headers = []
for column in COLUMNS:
column_headers.append(column_header(column))
return ["tool", "version", "last updated", "description", *column_headers]
def get_table_columns_rst():
column_headers = []
for column in COLUMNS:
column_headers.append(f'l |{column}|')
return ["tool", "version", "last updated", "description", *column_headers]
def rst_column_header_images(d):
for column in COLUMNS:
d.deffered_directive('image', arg=f'https://nasa-pds.github.io/pdsen-corral/images/{column}_text.png', fields=[('alt', column)], reference=column)
def write_md_file(herd, output_file_name, version):
from mdutils import MdUtils
software_summary_md = MdUtils(file_name=output_file_name, title=f'Software Summary (build {version})')
table = get_table_columns_md()
n_columns = len(table)
for k, ch in herd.get_cattle_heads().items():
table.extend(ch.get_table_row(format='md'))
software_summary_md.new_table(columns=n_columns,
rows=herd.number_of_heads() + 1,
text=table,
text_align='center')
logger.info(f'Create file {output_file_name}.md')
software_summary_md.create_md_file()
def write_rst_introduction(d: RstClothReferenceable, version: str):
d.title(f'Software Catalog (build {version})')
d.content(f'The software provided for the build {version} are listed hereafter and organized by category:')
d.newline()
for t, section in REPO_TYPES.items():
if t != 'unknown':
d.li(f"`{section['title']}`_")
d.newline()
def write_rst_file(herd, output_file_name, version):
d = RstClothReferenceable()
write_rst_introduction(d, version)
# create one section per type of repo
data = {t: [] for t in REPO_TYPES}
for k, ch in herd.get_cattle_heads().items():
ch.set_rst(d)
if ch.type in REPO_TYPES.keys():
data[ch.type].append(ch.get_table_row(format='rst'))
else:
logger.warning("unknown type for repo %s in build version %s", ch.repo_name, version)
data['unknown'].append(ch.get_table_row(format='rst'))
for type, type_data in data.items():
if type_data:
d.h1(REPO_TYPES[type]['title'])
d.content(REPO_TYPES[type]['description'])
d.table(
get_table_columns_rst(),
data=type_data
)
rst_column_header_images(d)
logger.info(f'Create file {output_file_name}.rst')
d.write(f'{output_file_name}.rst')
def write_build_summary(
gitmodules=None,
root_dir='.',
output_file_name=None,
token=None,
dev=False,
version=None,
format='md'):
herd = Herd(gitmodules=gitmodules, dev=dev, token=token)
if version is None:
version = herd.get_shepard_version()
else:
# for unit test
herd.set_shepard_version(version)
logger.info(f'build version is {version}')
is_dev = Tags.JAVA_DEV_SUFFIX in version or Tags.PYTHON_DEV_SUFFIX in version
if dev and not is_dev:
logger.error(f'version of build does not contain {Tags.JAVA_DEV_SUFFIX} or {Tags.PYTHON_DEV_SUFFIX}, dev build summary is not generated')
exit(1)
elif not dev and is_dev:
logger.error(f'version of build contains {Tags.JAVA_DEV_SUFFIX} or {Tags.PYTHON_DEV_SUFFIX}, release build summary is not generated')
exit(1)
if not output_file_name:
output_file_name = os.path.join(root_dir, version, 'index')
os.makedirs(os.path.dirname(output_file_name), exist_ok=True)
if format == 'md':
write_md_file(herd, output_file_name, version)
elif format == 'rst':
write_rst_file(herd, output_file_name, version)
return herd
| 31.257485 | 154 | 0.646935 |
ff620e9de0f3d0b9899897e1f49f68f092e2ad22
| 4,614 |
py
|
Python
|
test/functional/combine_logs.py
|
MagnaChain/MagnaChain-dev-master
|
c83e7a8b9e8f9ae8684a0e3a11f1eeb42dfa1272
|
[
"MIT"
] | 26 |
2018-11-30T09:01:34.000Z
|
2020-03-11T00:41:52.000Z
|
test/functional/combine_logs.py
|
MagnaChain/MagnaChain-dev-master
|
c83e7a8b9e8f9ae8684a0e3a11f1eeb42dfa1272
|
[
"MIT"
] | 3 |
2019-01-01T09:06:22.000Z
|
2019-04-01T10:06:01.000Z
|
test/functional/combine_logs.py
|
MagnaChain/MagnaChain-dev-master
|
c83e7a8b9e8f9ae8684a0e3a11f1eeb42dfa1272
|
[
"MIT"
] | 24 |
2018-11-30T03:32:53.000Z
|
2020-03-20T04:30:34.000Z
|
#!/usr/bin/env python3
"""Combine logs from multiple magnachain nodes as well as the test_framework log.
This streams the combined log output to stdout. Use combine_logs.py > outputfile
to write to an outputfile."""
import argparse
from collections import defaultdict, namedtuple
import heapq
import itertools
import os
import re
import sys
# Matches on the date format at the start of the log event
TIMESTAMP_PATTERN = re.compile(r"^\d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2}\.\d{6}")
LogEvent = namedtuple('LogEvent', ['timestamp', 'source', 'event'])
def main():
"""Main function. Parses args, reads the log files and renders them as text or html."""
parser = argparse.ArgumentParser(usage='%(prog)s [options] <test temporary directory>', description=__doc__)
parser.add_argument('-c', '--color', dest='color', action='store_true', help='outputs the combined log with events colored by source (requires posix terminal colors. Use less -r for viewing)')
parser.add_argument('--html', dest='html', action='store_true', help='outputs the combined log as html. Requires jinja2. pip install jinja2')
args, unknown_args = parser.parse_known_args()
if args.color and os.name != 'posix':
print("Color output requires posix terminal colors.")
sys.exit(1)
if args.html and args.color:
print("Only one out of --color or --html should be specified")
sys.exit(1)
# There should only be one unknown argument - the path of the temporary test directory
if len(unknown_args) != 1:
print("Unexpected arguments" + str(unknown_args))
sys.exit(1)
log_events = read_logs(unknown_args[0])
print_logs(log_events, color=args.color, html=args.html)
def read_logs(tmp_dir):
"""Reads log files.
Delegates to generator function get_log_events() to provide individual log events
for each of the input log files."""
files = [("test", "%s/test_framework.log" % tmp_dir)]
for i in itertools.count():
logfile = "{}/node{}/regtest/debug.log".format(tmp_dir, i)
if not os.path.isfile(logfile):
break
files.append(("node%d" % i, logfile))
return heapq.merge(*[get_log_events(source, f) for source, f in files])
def get_log_events(source, logfile):
"""Generator function that returns individual log events.
Log events may be split over multiple lines. We use the timestamp
regex match as the marker for a new log event."""
try:
with open(logfile, 'r') as infile:
event = ''
timestamp = ''
for line in infile:
# skip blank lines
if line == '\n':
continue
# if this line has a timestamp, it's the start of a new log event.
time_match = TIMESTAMP_PATTERN.match(line)
if time_match:
if event:
yield LogEvent(timestamp=timestamp, source=source, event=event.rstrip())
event = line
timestamp = time_match.group()
# if it doesn't have a timestamp, it's a continuation line of the previous log.
else:
event += "\n" + line
# Flush the final event
yield LogEvent(timestamp=timestamp, source=source, event=event.rstrip())
except FileNotFoundError:
print("File %s could not be opened. Continuing without it." % logfile, file=sys.stderr)
def print_logs(log_events, color=False, html=False):
"""Renders the iterator of log events into text or html."""
if not html:
colors = defaultdict(lambda: '')
if color:
colors["test"] = "\033[0;36m" # CYAN
colors["node0"] = "\033[0;34m" # BLUE
colors["node1"] = "\033[0;32m" # GREEN
colors["node2"] = "\033[0;31m" # RED
colors["node3"] = "\033[0;33m" # YELLOW
colors["reset"] = "\033[0m" # Reset font color
for event in log_events:
print("{0} {1: <5} {2} {3}".format(colors[event.source.rstrip()], event.source, event.event, colors["reset"]))
else:
try:
import jinja2
except ImportError:
print("jinja2 not found. Try `pip install jinja2`")
sys.exit(1)
print(jinja2.Environment(loader=jinja2.FileSystemLoader('./'))
.get_template('combined_log_template.html')
.render(title="Combined Logs from testcase", log_events=[event._asdict() for event in log_events]))
if __name__ == '__main__':
main()
| 40.121739 | 196 | 0.618336 |
cb2e67c037fdc11c6cc4d1f6262564abd2764301
| 17,446 |
py
|
Python
|
mmpose/models/backbones/mspn.py
|
hohmannr/mmpose
|
f24d56fde8bb206f6bfc2392dafcdc52c66a9236
|
[
"Apache-2.0"
] | 15 |
2020-12-10T07:43:34.000Z
|
2022-03-21T08:38:36.000Z
|
mmpose/models/backbones/mspn.py
|
hohmannr/mmpose
|
f24d56fde8bb206f6bfc2392dafcdc52c66a9236
|
[
"Apache-2.0"
] | null | null | null |
mmpose/models/backbones/mspn.py
|
hohmannr/mmpose
|
f24d56fde8bb206f6bfc2392dafcdc52c66a9236
|
[
"Apache-2.0"
] | 10 |
2020-12-17T07:57:10.000Z
|
2022-03-08T07:17:12.000Z
|
import copy as cp
import torch.nn as nn
import torch.nn.functional as F
from mmcv.cnn import (ConvModule, MaxPool2d, constant_init, kaiming_init,
normal_init)
from mmcv.runner.checkpoint import load_state_dict
from mmpose.utils import get_root_logger
from ..registry import BACKBONES
from .base_backbone import BaseBackbone
from .resnet import Bottleneck as _Bottleneck
from .utils.utils import get_state_dict
class Bottleneck(_Bottleneck):
expansion = 4
"""Bottleneck block for MSPN.
Args:
in_channels (int): Input channels of this block.
out_channels (int): Output channels of this block.
stride (int): stride of the block. Default: 1
downsample (nn.Module): downsample operation on identity branch.
Default: None
norm_cfg (dict): dictionary to construct and config norm layer.
Default: dict(type='BN')
"""
def __init__(self, in_channels, out_channels, **kwargs):
super().__init__(in_channels, out_channels * 4, **kwargs)
class DownsampleModule(nn.Module):
"""Downsample module for MSPN.
Args:
block (nn.Module): Downsample block.
num_blocks (list): Number of blocks in each downsample unit.
num_units (int): Numbers of downsample units. Default: 4
has_skip (bool): Have skip connections from prior upsample
module or not. Default:False
norm_cfg (dict): dictionary to construct and config norm layer.
Default: dict(type='BN')
in_channels (int): Number of channels of the input feature to
downsample module. Default: 64
"""
def __init__(self,
block,
num_blocks,
num_units=4,
has_skip=False,
norm_cfg=dict(type='BN'),
in_channels=64):
# Protect mutable default arguments
norm_cfg = cp.deepcopy(norm_cfg)
super().__init__()
self.has_skip = has_skip
self.in_channels = in_channels
assert len(num_blocks) == num_units
self.num_blocks = num_blocks
self.num_units = num_units
self.norm_cfg = norm_cfg
self.layer1 = self._make_layer(block, in_channels, num_blocks[0])
for i in range(1, num_units):
module_name = f'layer{i + 1}'
self.add_module(
module_name,
self._make_layer(
block, in_channels * pow(2, i), num_blocks[i], stride=2))
def _make_layer(self, block, out_channels, blocks, stride=1):
downsample = None
if stride != 1 or self.in_channels != out_channels * block.expansion:
downsample = ConvModule(
self.in_channels,
out_channels * block.expansion,
kernel_size=1,
stride=stride,
padding=0,
norm_cfg=self.norm_cfg,
act_cfg=None,
inplace=True)
units = list()
units.append(
block(
self.in_channels,
out_channels,
stride=stride,
downsample=downsample,
norm_cfg=self.norm_cfg))
self.in_channels = out_channels * block.expansion
for _ in range(1, blocks):
units.append(block(self.in_channels, out_channels))
return nn.Sequential(*units)
def forward(self, x, skip1, skip2):
out = list()
for i in range(self.num_units):
module_name = f'layer{i + 1}'
module_i = getattr(self, module_name)
x = module_i(x)
if self.has_skip:
x = x + skip1[i] + skip2[i]
out.append(x)
out.reverse()
return tuple(out)
class UpsampleUnit(nn.Module):
"""Upsample unit for upsample module.
Args:
ind (int): Indicates whether to interpolate (>0) and whether to
generate feature map for the next hourglass-like module.
num_units (int): Number of units that form a upsample module. Along
with ind and gen_cross_conv, nm_units is used to decide whether
to generate feature map for the next hourglass-like module.
in_channels (int): Channel number of the skip-in feature maps from
the corresponding downsample unit.
unit_channels (int): Channel number in this unit. Default:256.
gen_skip: (bool): Whether or not to generate skips for the posterior
downsample module. Default:False
gen_cross_conv (bool): Whether to generate feature map for the next
hourglass-like module. Default:False
norm_cfg (dict): dictionary to construct and config norm layer.
Default: dict(type='BN')
out_channels (in): Number of channels of feature output by upsample
module. Must equal to in_channels of downsample module. Default:64
"""
def __init__(self,
ind,
num_units,
in_channels,
unit_channels=256,
gen_skip=False,
gen_cross_conv=False,
norm_cfg=dict(type='BN'),
out_channels=64):
# Protect mutable default arguments
norm_cfg = cp.deepcopy(norm_cfg)
super().__init__()
self.num_units = num_units
self.norm_cfg = norm_cfg
self.in_skip = ConvModule(
in_channels,
unit_channels,
kernel_size=1,
stride=1,
padding=0,
norm_cfg=self.norm_cfg,
act_cfg=None,
inplace=True)
self.relu = nn.ReLU(inplace=True)
self.ind = ind
if self.ind > 0:
self.up_conv = ConvModule(
unit_channels,
unit_channels,
kernel_size=1,
stride=1,
padding=0,
norm_cfg=self.norm_cfg,
act_cfg=None,
inplace=True)
self.gen_skip = gen_skip
if self.gen_skip:
self.out_skip1 = ConvModule(
in_channels,
in_channels,
kernel_size=1,
stride=1,
padding=0,
norm_cfg=self.norm_cfg,
inplace=True)
self.out_skip2 = ConvModule(
unit_channels,
in_channels,
kernel_size=1,
stride=1,
padding=0,
norm_cfg=self.norm_cfg,
inplace=True)
self.gen_cross_conv = gen_cross_conv
if self.ind == num_units - 1 and self.gen_cross_conv:
self.cross_conv = ConvModule(
unit_channels,
out_channels,
kernel_size=1,
stride=1,
padding=0,
norm_cfg=self.norm_cfg,
inplace=True)
def forward(self, x, up_x):
out = self.in_skip(x)
if self.ind > 0:
up_x = F.interpolate(
up_x,
size=(x.size(2), x.size(3)),
mode='bilinear',
align_corners=True)
up_x = self.up_conv(up_x)
out = out + up_x
out = self.relu(out)
skip1 = None
skip2 = None
if self.gen_skip:
skip1 = self.out_skip1(x)
skip2 = self.out_skip2(out)
cross_conv = None
if self.ind == self.num_units - 1 and self.gen_cross_conv:
cross_conv = self.cross_conv(out)
return out, skip1, skip2, cross_conv
class UpsampleModule(nn.Module):
"""Upsample module for MSPN.
Args:
unit_channels (int): Channel number in the upsample units.
Default:256.
num_units (int): Numbers of upsample units. Default: 4
gen_skip (bool): Whether to generate skip for posterior downsample
module or not. Default:False
gen_cross_conv (bool): Whether to generate feature map for the next
hourglass-like module. Default:False
norm_cfg (dict): dictionary to construct and config norm layer.
Default: dict(type='BN')
out_channels (int): Number of channels of feature output by upsample
module. Must equal to in_channels of downsample module. Default:64
"""
def __init__(self,
unit_channels=256,
num_units=4,
gen_skip=False,
gen_cross_conv=False,
norm_cfg=dict(type='BN'),
out_channels=64):
# Protect mutable default arguments
norm_cfg = cp.deepcopy(norm_cfg)
super().__init__()
self.in_channels = list()
for i in range(num_units):
self.in_channels.append(Bottleneck.expansion * out_channels *
pow(2, i))
self.in_channels.reverse()
self.num_units = num_units
self.gen_skip = gen_skip
self.gen_cross_conv = gen_cross_conv
self.norm_cfg = norm_cfg
for i in range(num_units):
module_name = f'up{i + 1}'
self.add_module(
module_name,
UpsampleUnit(
i,
self.num_units,
self.in_channels[i],
unit_channels,
self.gen_skip,
self.gen_cross_conv,
norm_cfg=self.norm_cfg,
out_channels=64))
def forward(self, x):
out = list()
skip1 = list()
skip2 = list()
cross_conv = None
for i in range(self.num_units):
module_i = getattr(self, f'up{i + 1}')
if i == 0:
outi, skip1_i, skip2_i, _ = module_i(x[i], None)
elif i == self.num_units - 1:
outi, skip1_i, skip2_i, cross_conv = module_i(x[i], out[i - 1])
else:
outi, skip1_i, skip2_i, _ = module_i(x[i], out[i - 1])
out.append(outi)
skip1.append(skip1_i)
skip2.append(skip2_i)
skip1.reverse()
skip2.reverse()
return out, skip1, skip2, cross_conv
class SingleStageNetwork(nn.Module):
"""Single_stage Network.
Args:
unit_channels (int): Channel number in the upsample units. Default:256.
num_units (int): Numbers of downsample/upsample units. Default: 4
gen_skip (bool): Whether to generate skip for posterior downsample
module or not. Default:False
gen_cross_conv (bool): Whether to generate feature map for the next
hourglass-like module. Default:False
has_skip (bool): Have skip connections from prior upsample
module or not. Default:False
num_blocks (list): Number of blocks in each downsample unit.
Default: [2, 2, 2, 2] Note: Make sure num_units==len(num_blocks)
norm_cfg (dict): dictionary to construct and config norm layer.
Default: dict(type='BN')
in_channels (int): Number of channels of the feature from ResNetTop.
Default: 64.
"""
def __init__(self,
has_skip=False,
gen_skip=False,
gen_cross_conv=False,
unit_channels=256,
num_units=4,
num_blocks=[2, 2, 2, 2],
norm_cfg=dict(type='BN'),
in_channels=64):
# Protect mutable default arguments
norm_cfg = cp.deepcopy(norm_cfg)
num_blocks = cp.deepcopy(num_blocks)
super().__init__()
assert len(num_blocks) == num_units
self.has_skip = has_skip
self.gen_skip = gen_skip
self.gen_cross_conv = gen_cross_conv
self.num_units = num_units
self.unit_channels = unit_channels
self.num_blocks = num_blocks
self.norm_cfg = norm_cfg
self.downsample = DownsampleModule(Bottleneck, num_blocks, num_units,
has_skip, norm_cfg, in_channels)
self.upsample = UpsampleModule(unit_channels, num_units, gen_skip,
gen_cross_conv, norm_cfg, in_channels)
def forward(self, x, skip1, skip2):
mid = self.downsample(x, skip1, skip2)
out, skip1, skip2, cross_conv = self.upsample(mid)
return out, skip1, skip2, cross_conv
class ResNetTop(nn.Module):
"""ResNet top for MSPN.
Args:
norm_cfg (dict): dictionary to construct and config norm layer.
Default: dict(type='BN')
channels (int): Number of channels of the feature output by ResNetTop.
"""
def __init__(self, norm_cfg=dict(type='BN'), channels=64):
# Protect mutable default arguments
norm_cfg = cp.deepcopy(norm_cfg)
super().__init__()
self.top = nn.Sequential(
ConvModule(
3,
channels,
kernel_size=7,
stride=2,
padding=3,
norm_cfg=norm_cfg,
inplace=True), MaxPool2d(kernel_size=3, stride=2, padding=1))
def forward(self, img):
return self.top(img)
@BACKBONES.register_module()
class MSPN(BaseBackbone):
"""MSPN backbone.
Paper ref: Li et al. "Rethinking on Multi-Stage Networks
for Human Pose Estimation" (CVPR 2020).
Args:
unit_channels (int): Number of Channels in an upsample unit.
Default: 256
num_stages (int): Number of stages in a multi-stage MSPN. Default: 4
num_units (int): NUmber of downsample/upsample units in a single-stage
network. Default: 4
Note: Make sure num_units == len(self.num_blocks)
num_blocks (list): Number of bottlenecks in each
downsample unit. Default: [2, 2, 2, 2]
norm_cfg (dict): dictionary to construct and config norm layer.
Default: dict(type='BN')
res_top_channels (int): Number of channels of feature from ResNetTop.
Default: 64.
Example:
>>> from mmpose.models import MSPN
>>> import torch
>>> self = MSPN(num_stages=2,num_units=2,num_blocks=[2,2])
>>> self.eval()
>>> inputs = torch.rand(1, 3, 511, 511)
>>> level_outputs = self.forward(inputs)
>>> for level_output in level_outputs:
... for feature in level_output:
... print(tuple(feature.shape))
...
(1, 256, 64, 64)
(1, 256, 128, 128)
(1, 256, 64, 64)
(1, 256, 128, 128)
"""
def __init__(self,
unit_channels=256,
num_stages=4,
num_units=4,
num_blocks=[2, 2, 2, 2],
norm_cfg=dict(type='BN'),
res_top_channels=64):
# Protect mutable default arguments
norm_cfg = cp.deepcopy(norm_cfg)
num_blocks = cp.deepcopy(num_blocks)
super().__init__()
self.unit_channels = unit_channels
self.num_stages = num_stages
self.num_units = num_units
self.num_blocks = num_blocks
self.norm_cfg = norm_cfg
assert self.num_stages > 0
assert self.num_units > 1
assert self.num_units == len(self.num_blocks)
self.top = ResNetTop(norm_cfg=norm_cfg)
self.multi_stage_mspn = nn.ModuleList([])
for i in range(self.num_stages):
if i == 0:
has_skip = False
else:
has_skip = True
if i != self.num_stages - 1:
gen_skip = True
gen_cross_conv = True
else:
gen_skip = False
gen_cross_conv = False
self.multi_stage_mspn.append(
SingleStageNetwork(has_skip, gen_skip, gen_cross_conv,
unit_channels, num_units, num_blocks,
norm_cfg, res_top_channels))
def forward(self, x):
"""Model forward function."""
out_feats = []
skip1 = None
skip2 = None
x = self.top(x)
for i in range(self.num_stages):
out, skip1, skip2, x = self.multi_stage_mspn[i](x, skip1, skip2)
out_feats.append(out)
return out_feats
def init_weights(self, pretrained=None):
"""Initialize model weights."""
if isinstance(pretrained, str):
logger = get_root_logger()
state_dict = get_state_dict(pretrained)
load_state_dict(
self.top, state_dict['top'], strict=False, logger=logger)
for i in range(self.num_stages):
load_state_dict(
self.multi_stage_mspn[i].downsample,
state_dict['bottlenecks'],
strict=False,
logger=logger)
for m in self.multi_stage_mspn.modules():
if isinstance(m, nn.Conv2d):
kaiming_init(m)
elif isinstance(m, nn.BatchNorm2d):
constant_init(m, 1)
elif isinstance(m, nn.Linear):
normal_init(m, std=0.01)
for m in self.top.modules():
if isinstance(m, nn.Conv2d):
kaiming_init(m)
| 35.387424 | 79 | 0.556632 |
c390294ee3425e1214799c54023ec1566d013dc6
| 1,336 |
py
|
Python
|
data_retriever/retriever.py
|
johanahlqvist/poe-neural-pricer
|
f37617e0c4f744522ee60e968be6d6f628f053aa
|
[
"MIT"
] | 4 |
2018-03-07T10:28:44.000Z
|
2019-05-09T19:14:40.000Z
|
data_retriever/retriever.py
|
johanahlqvist/poe-neural-pricer
|
f37617e0c4f744522ee60e968be6d6f628f053aa
|
[
"MIT"
] | null | null | null |
data_retriever/retriever.py
|
johanahlqvist/poe-neural-pricer
|
f37617e0c4f744522ee60e968be6d6f628f053aa
|
[
"MIT"
] | null | null | null |
# Retrieves data from the public stash API
import requests
import json
from random import randint
import time
BASE_API_URL = 'http://www.pathofexile.com/api/public-stash-tabs'
class Retriever:
def retrieve(self, next_id, request_time):
payload = {}
payload = {'id': next_id}
while time.time() <= request_time:
time.sleep(0.1)
r = requests.get(BASE_API_URL, params=payload)
json_data = json.loads(r.text.lower())
last_id = next_id
next_id = self._decode_next_id(json_data)
if next_id == -1:
# Retry loading
return self.retrieve(last_id, request_time)
elif next_id == None:
return None
return json_data['stashes']
def _decode_next_id(self, json_data):
# Indicates that there is a next page of data to retrieve
# If not true, the next update has not yet been released
if 'stashes' not in json_data:
print('A retriever was rate limited, pausing for 20 seconds..')
# We requested too fast, chill.
time.sleep(20)
return -1
if len(json_data['stashes']) > 0:
return json_data['next_change_id']
else:
print('Could not yet get next id. Reached end of updates for now.')
return None
| 33.4 | 79 | 0.61003 |
8e16cccd685d6ca29164b2cf838d35b593386105
| 977 |
py
|
Python
|
levenshtein-word-replacement/levenshtein-word-replacement.py
|
catseye/NaNoGenLab
|
3e4a7314e6023557856e1cc910e9d0edc4daf43c
|
[
"Unlicense"
] | 20 |
2015-06-05T14:02:12.000Z
|
2021-11-02T22:19:18.000Z
|
levenshtein-word-replacement/levenshtein-word-replacement.py
|
catseye/NaNoGenLab
|
3e4a7314e6023557856e1cc910e9d0edc4daf43c
|
[
"Unlicense"
] | 1 |
2015-10-15T12:58:35.000Z
|
2015-10-15T12:58:35.000Z
|
levenshtein-word-replacement/levenshtein-word-replacement.py
|
catseye/NaNoGenLab
|
3e4a7314e6023557856e1cc910e9d0edc4daf43c
|
[
"Unlicense"
] | 1 |
2021-04-08T23:50:06.000Z
|
2021-04-08T23:50:06.000Z
|
#!/usr/bin/env python
import editdist
import random
import string
def main(argv):
words = []
with open(argv[2], 'r') as f:
for line in f:
bits = line.strip().split()
for bit in bits:
words.extend(bit.split('--'))
with open(argv[1], 'r') as f:
for line in f:
bits = line.strip().split()
for bit in bits:
for tidbit in bit.split('--'):
output_word(tidbit, words)
def output_word(word, words):
best_x = None
best_dist = 1000000000
for x, candidate in enumerate(words):
dist = editdist.distance(word, candidate)
if dist < best_dist:
best_dist = dist
best_x = x
if best_dist == 0:
break
chosen = words.pop(best_x)
sys.stdout.write(chosen + ' ')
sys.stdout.flush() # 'cos it's a bit pokey :)
if __name__ == '__main__':
import sys
main(sys.argv)
| 22.72093 | 50 | 0.528147 |
706425b5d9663cca6a7e2cd4c4c74ba888d5beec
| 4,055 |
py
|
Python
|
frontend/pages/admin_portal/message_sender.py
|
zagaran/instant-census
|
62dd5bbc62939f43776a10708ef663722ead98af
|
[
"MIT"
] | 1 |
2021-06-01T17:03:47.000Z
|
2021-06-01T17:03:47.000Z
|
frontend/pages/admin_portal/message_sender.py
|
zagaran/instant-census
|
62dd5bbc62939f43776a10708ef663722ead98af
|
[
"MIT"
] | null | null | null |
frontend/pages/admin_portal/message_sender.py
|
zagaran/instant-census
|
62dd5bbc62939f43776a10708ef663722ead98af
|
[
"MIT"
] | null | null | null |
from flask import request, session, jsonify, flash
from flask.blueprints import Blueprint
from pytz import UnknownTimeZoneError
import pytz
from bson.objectid import ObjectId
from backend.admin_portal.common_helpers import validate_cohort, validate_user, validate_cohort_id_match, \
raise_404_error, raise_400_error
from backend.admin_portal.survey_builder_helpers import check_attributes
from backend.outgoing.exit_points import send_control_message
from conf.settings import SHOW_DELETED_USERS
from constants.cohorts import CohortStatus
from constants.messages import DEFAULT_USER_PAUSE, DEFAULT_USER_RESTART
from constants.users import Status
from database.tracking.admins import Admin
from frontend import templating, auth
from utils.formatters import convert_unicode
from utils.time import convert_from_utc, now
message_sender = Blueprint('message_sender', __name__)
@message_sender.route("/<cohort_id>/send/<user_id>", methods=["GET"])
@auth.admin
@templating.template("/admin_portal/message_sender.html")
def render_message_sender(cohort_id, user_id):
user = validate_user(user_id)
admin = Admin(ObjectId(session["admin_id"]))
# do not show if deleted and option isn't set
if not SHOW_DELETED_USERS and user["status"] == Status.deleted:
raise_404_error("User not found.")
cohort = validate_cohort(cohort_id)
if cohort["status"] == CohortStatus.completed:
cohort_completed = True
else:
cohort_completed = False
validate_cohort_id_match(user["cohort_id"], ObjectId(cohort_id))
# get data
user_messages = user.all_messages()
# convert messages to admin timezone
admin_timezone = admin["timezone"]
for message in user_messages:
try:
message["time"] = convert_from_utc(message["time"], admin_timezone)
except UnknownTimeZoneError:
pass
# convert user create time to admin timezone and set 3 letter timezone
timezone = ""
try:
user["create_time"] = convert_from_utc(user["create_time"], admin_timezone)
timezone = pytz.timezone(admin_timezone).tzname(now(), is_dst=False)
except UnknownTimeZoneError:
pass
return {
"page": "message_sender",
"cohort": cohort,
"cohort_id": cohort_id,
"COHORT_COMPLETED": cohort_completed,
"timezone": timezone,
"user": user,
"user_messages": user_messages,
"DEFAULT_COHORT_PAUSE": DEFAULT_USER_PAUSE,
"DEFAULT_USER_RESTART": DEFAULT_USER_RESTART
}
@message_sender.route("/send_manual_message", methods=["POST"])
@auth.admin
def send_manual_message():
user_id = request.form["user_id"]
user = validate_user(user_id)
# do not send if user is deleted
if user["status"] == Status.deleted:
raise_400_error("User is deleted.")
cohort_id = request.form["cohort_id"]
cohort = validate_cohort(cohort_id)
validate_cohort_id_match(user["cohort_id"], ObjectId(cohort_id))
if cohort.is_initiated() == False:
flash("This action is not allowed on a cohort that has not been initiated or has been completed.", "error")
return jsonify({
"reload": "true"
})
message = convert_unicode(request.form["message"])
try:
check_attributes({"text": message}, cohort)
except:
# The flash message happens in check_attributes, but we run a try/except to force frontend to reload
return jsonify({
"reload": "true"
})
send_control_message(user, message, delay=False)
return "success"
@message_sender.route("/mark_as_handled", methods=["POST"])
@auth.admin
def mark_as_handled():
user_id = request.form["userId"]
user = validate_user(user_id)
# mark user messages as handled
user_messages = user.all_messages()
for message in user_messages:
if "needs_review" in message and message["needs_review"] is True:
message.update(needs_review=False)
# mark user as handled
user.update(needs_review=False)
return "success"
| 37.201835 | 115 | 0.713687 |
544078bfaa0ad4f586c44eb56111e56480b4fcac
| 42,165 |
py
|
Python
|
rpython/translator/c/funcgen.py
|
bxtkezhan/rpython
|
10d53fe95495c3be5933320c277f6dd26238408b
|
[
"MIT"
] | 11 |
2020-07-02T13:57:46.000Z
|
2022-01-23T18:04:42.000Z
|
rpython/translator/c/funcgen.py
|
GabriellaUwa/pypy
|
2ede3b557a25cb49db969e942ca5a7f8a9eae0d4
|
[
"Apache-2.0",
"OpenSSL"
] | 1 |
2021-11-05T06:29:33.000Z
|
2021-11-06T12:33:46.000Z
|
rpython/translator/c/funcgen.py
|
GabriellaUwa/pypy
|
2ede3b557a25cb49db969e942ca5a7f8a9eae0d4
|
[
"Apache-2.0",
"OpenSSL"
] | 2 |
2020-12-17T09:35:36.000Z
|
2021-11-24T15:34:08.000Z
|
import sys
from rpython.translator.c.support import cdecl
from rpython.translator.c.support import llvalue_from_constant, gen_assignments
from rpython.translator.c.support import c_string_constant, barebonearray
from rpython.flowspace.model import Variable, Constant, mkentrymap
from rpython.rtyper.lltypesystem.lltype import (Ptr, Void, Bool, Signed, Unsigned,
SignedLongLong, Float, UnsignedLongLong, Char, UniChar, ContainerType,
Array, FixedSizeArray, ForwardReference, FuncType)
from rpython.rtyper.lltypesystem.rffi import INT
from rpython.rtyper.lltypesystem.llmemory import Address
from rpython.translator.backendopt.ssa import SSI_to_SSA
from rpython.translator.backendopt.innerloop import find_inner_loops
from rpython.tool.identity_dict import identity_dict
from rpython.rlib.objectmodel import CDefinedIntSymbolic
LOCALVAR = 'l_%s'
KEEP_INLINED_GRAPHS = False
def make_funcgen(graph, db, exception_policy, functionname):
graph._seen_by_the_backend = True
# apply the exception transformation
if db.exctransformer:
db.exctransformer.create_exception_handling(graph)
# apply the gc transformation
if db.gctransformer:
db.gctransformer.transform_graph(graph)
return FunctionCodeGenerator(graph, db, exception_policy, functionname)
class FunctionCodeGenerator(object):
"""
Collects information about a function which we have to generate
from a flow graph.
"""
def __init__(self, graph, db, exception_policy, functionname):
self.graph = graph
self.db = db
self.gcpolicy = db.gcpolicy
self.exception_policy = exception_policy
self.functionname = functionname
self.collect_var_and_types()
for v in self.vars:
T = v.concretetype
# obscure: skip forward references and hope for the best
# (needed for delayed function pointers)
if isinstance(T, Ptr) and T.TO.__class__ == ForwardReference:
continue
db.gettype(T) # force the type to be considered by the database
self.illtypes = None
def collect_var_and_types(self):
#
# collect all variables and constants used in the body,
# and get their types now
#
# NOTE: cannot use dictionaries with Constants as keys, because
# Constants may hash and compare equal but have different lltypes
self.all_cached_consts = None # will be filled after implementation_end
mix = [self.graph.getreturnvar()]
self.more_ll_values = []
for block in self.graph.iterblocks():
mix.extend(block.inputargs)
for op in block.operations:
mix.extend(op.args)
mix.append(op.result)
for link in block.exits:
mix.extend(link.getextravars())
mix.extend(link.args)
if hasattr(link, 'llexitcase'):
self.more_ll_values.append(link.llexitcase)
elif link.exitcase is not None:
mix.append(Constant(link.exitcase))
uniquemix = []
seen = identity_dict()
for v in mix:
if v not in seen:
uniquemix.append(v)
seen[v] = True
self.vars = uniquemix
def implementation_begin(self):
SSI_to_SSA(self.graph)
self.collect_var_and_types()
self.blocknum = {}
for block in self.graph.iterblocks():
self.blocknum[block] = len(self.blocknum)
db = self.db
lltypes = identity_dict()
for v in self.vars:
T = v.concretetype
typename = db.gettype(T)
lltypes[v] = T, typename
self.illtypes = lltypes
self.innerloops = {} # maps the loop's header block to a Loop()
for loop in find_inner_loops(self.graph, Bool):
self.innerloops[loop.headblock] = loop
def graphs_to_patch(self):
yield self.graph
def implementation_end(self):
self.all_cached_consts = list(self.allconstantvalues())
self.illtypes = None
self.vars = None
self.blocknum = None
self.innerloops = None
def argnames(self):
return [LOCALVAR % v.name for v in self.graph.getargs()]
def allvariables(self):
return [v for v in self.vars if isinstance(v, Variable)]
def allconstants(self):
return [c for c in self.vars if isinstance(c, Constant)]
def allconstantvalues(self):
for c in self.vars:
if isinstance(c, Constant):
yield llvalue_from_constant(c)
for llvalue in self.more_ll_values:
yield llvalue
def lltypemap(self, v):
T, typename = self.illtypes[v]
return T
def lltypename(self, v):
T, typename = self.illtypes[v]
return typename
def expr(self, v, special_case_void=True):
if isinstance(v, Variable):
if self.lltypemap(v) is Void and special_case_void:
return '/* nothing */'
else:
return LOCALVAR % v.name
elif isinstance(v, Constant):
value = llvalue_from_constant(v)
if value is None and not special_case_void:
return 'nothing'
else:
return self.db.get(value)
else:
raise TypeError("expr(%r)" % (v,))
# ____________________________________________________________
def cfunction_declarations(self):
# declare the local variables, excluding the function arguments
seen = set()
for a in self.graph.getargs():
seen.add(a.name)
result_by_name = []
for v in self.allvariables():
name = v.name
if name not in seen:
seen.add(name)
result = cdecl(self.lltypename(v), LOCALVAR % name) + ';'
if self.lltypemap(v) is Void:
continue #result = '/*%s*/' % result
result_by_name.append((v._name, result))
result_by_name.sort()
return [result for name, result in result_by_name]
# ____________________________________________________________
extra_return_text = None
def cfunction_body(self):
if self.db.reverse_debugger:
from rpython.translator.revdb import gencsupp
(extra_enter_text, self.extra_return_text) = (
gencsupp.prepare_function(self))
if extra_enter_text:
yield extra_enter_text
graph = self.graph
# ----- for gc_enter_roots_frame
_seen = set()
for block in graph.iterblocks():
for op in block.operations:
if op.opname == 'gc_enter_roots_frame':
_seen.add(tuple(op.args))
if _seen:
assert len(_seen) == 1, (
"multiple different gc_enter_roots_frame in %r" % (graph,))
for line in self.gcpolicy.enter_roots_frame(self, list(_seen)[0]):
yield line
# ----- done
# Locate blocks with a single predecessor, which can be written
# inline in place of a "goto":
entrymap = mkentrymap(graph)
self.inlinable_blocks = {
block for block in entrymap if len(entrymap[block]) == 1}
yield ''
for line in self.gen_goto(graph.startblock):
yield line
# Only blocks left are those that have more than one predecessor.
for block in graph.iterblocks():
if block in self.inlinable_blocks:
continue
for line in self.gen_block(block):
yield line
def gen_block(self, block):
if 1: # (preserve indentation)
self._current_block = block
myblocknum = self.blocknum[block]
if block in self.inlinable_blocks:
# debug comment
yield '/* block%d: (inlined) */' % myblocknum
else:
yield 'block%d:' % myblocknum
if block in self.innerloops:
for line in self.gen_while_loop_hack(block):
yield line
return
for i, op in enumerate(block.operations):
for line in self.gen_op(op):
yield line
if len(block.exits) == 0:
assert len(block.inputargs) == 1
# regular return block
retval = self.expr(block.inputargs[0])
if self.exception_policy != "exc_helper":
yield 'RPY_DEBUG_RETURN();'
if self.extra_return_text:
yield self.extra_return_text
yield 'return %s;' % retval
return
elif block.exitswitch is None:
# single-exit block
assert len(block.exits) == 1
for op in self.gen_link(block.exits[0]):
yield op
else:
assert not block.canraise
# block ending in a switch on a value
TYPE = self.lltypemap(block.exitswitch)
if TYPE == Bool:
expr = self.expr(block.exitswitch)
for link in block.exits[:0:-1]:
assert link.exitcase in (False, True)
if not link.exitcase:
expr = '!' + expr
yield 'if (%s) {' % expr
for op in self.gen_link(link):
yield '\t' + op
yield '}'
link = block.exits[0]
assert link.exitcase in (False, True)
for op in self.gen_link(link):
yield op
elif TYPE in (Signed, Unsigned, SignedLongLong,
UnsignedLongLong, Char, UniChar):
defaultlink = None
expr = self.expr(block.exitswitch)
yield 'switch (%s) {' % self.expr(block.exitswitch)
for link in block.exits:
if link.exitcase == 'default':
defaultlink = link
continue
yield 'case %s:' % self.db.get(link.llexitcase)
for op in self.gen_link(link):
yield '\t' + op
# 'break;' not needed, as gen_link ends in a 'goto'
# Emit default case
yield 'default:'
if defaultlink is None:
yield '\tassert(!"bad switch!!"); abort();'
else:
for op in self.gen_link(defaultlink):
yield '\t' + op
yield '}'
else:
raise TypeError("exitswitch type not supported"
" Got %r" % (TYPE,))
def gen_link(self, link):
"Generate the code to jump across the given Link."
assignments = []
for a1, a2 in zip(link.args, link.target.inputargs):
a2type, a2typename = self.illtypes[a2]
if a2type is Void:
continue
src = self.expr(a1)
dest = LOCALVAR % a2.name
assignments.append((a2typename, dest, src))
for line in gen_assignments(assignments):
yield line
for line in self.gen_goto(link.target, link):
yield line
def gen_goto(self, target, link=None):
"""Recursively expand block with inlining or goto.
Blocks that have only one predecessor are inlined directly, all others
are reached via goto.
"""
label = 'block%d' % self.blocknum[target]
if target in self.innerloops:
loop = self.innerloops[target]
if link is loop.links[-1]: # link that ends a loop
label += '_back'
if target in self.inlinable_blocks:
for line in self.gen_block(target):
yield line
else:
yield 'goto %s;' % label
def gen_op(self, op):
macro = 'OP_%s' % op.opname.upper()
line = None
if (op.opname.startswith('gc_') and
op.opname not in ('gc_load_indexed', 'gc_store',
'gc_store_indexed')):
meth = getattr(self.gcpolicy, macro, None)
if meth:
line = meth(self, op)
else:
meth = getattr(self, macro, None)
if meth:
line = meth(op)
if line is None:
lst = [self.expr(v) for v in op.args]
lst.append(self.expr(op.result))
line = '%s(%s);' % (macro, ', '.join(lst))
if self.db.reverse_debugger:
from rpython.translator.revdb import gencsupp
if op.opname in gencsupp.set_revdb_protected:
line = gencsupp.emit(line, self.lltypename(op.result),
self.expr(op.result))
if "\n" not in line:
yield line
else:
for line in line.splitlines():
yield line
def gen_while_loop_hack(self, headblock):
# a GCC optimization hack: generate 'while' statement in the
# source to convince the C compiler that it is really dealing
# with loops. For the head of a loop (i.e. the block where the
# decision is) we produce code like this:
#
# headblock:
# while (1) {
# ...headblock operations...
# if (!cond) break;
# goto firstbodyblock;
# headblock_back: ;
# }
#
# The real body of the loop is not syntactically within the
# scope of { }, but apparently this doesn't matter to GCC as
# long as it is within the { } via the chain of goto's starting
# at firstbodyblock: and ending at headblock_back:. We need to
# duplicate the operations of headblock, though, because the
# chain of gotos entering the loop must arrive outside the
# while() at the headblock: label and the chain of goto's that
# close the loop must arrive inside the while() at the
# headblock_back: label.
looplinks = self.innerloops[headblock].links
enterlink = looplinks[0]
assert len(headblock.exits) == 2
assert isinstance(headblock.exits[0].exitcase, bool)
assert isinstance(headblock.exits[1].exitcase, bool)
i = list(headblock.exits).index(enterlink)
exitlink = headblock.exits[1 - i]
yield 'while (1) {'
for i, op in enumerate(headblock.operations):
for line in self.gen_op(op):
yield '\t' + line
expr = self.expr(headblock.exitswitch)
if enterlink.exitcase == True:
expr = '!' + expr
yield '\tif (%s) break;' % expr
for op in self.gen_link(enterlink):
yield '\t' + op
yield ' block%d_back: ;' % self.blocknum[headblock]
yield '}'
for op in self.gen_link(exitlink):
yield op
# ____________________________________________________________
# the C preprocessor cannot handle operations taking a variable number
# of arguments, so here are Python methods that do it
def OP_NEWLIST(self, op):
args = [self.expr(v) for v in op.args]
r = self.expr(op.result)
if len(args) == 0:
return 'OP_NEWLIST0(%s);' % (r, )
else:
args.insert(0, '%d' % len(args))
return 'OP_NEWLIST((%s), %s);' % (', '.join(args), r)
def OP_NEWDICT(self, op):
args = [self.expr(v) for v in op.args]
r = self.expr(op.result)
if len(args) == 0:
return 'OP_NEWDICT0(%s);' % (r, )
else:
assert len(args) % 2 == 0
args.insert(0, '%d' % (len(args)//2))
return 'OP_NEWDICT((%s), %s);' % (', '.join(args), r)
def OP_NEWTUPLE(self, op):
args = [self.expr(v) for v in op.args]
r = self.expr(op.result)
args.insert(0, '%d' % len(args))
return 'OP_NEWTUPLE((%s), %s);' % (', '.join(args), r)
def OP_SIMPLE_CALL(self, op):
args = [self.expr(v) for v in op.args]
r = self.expr(op.result)
args.append('NULL')
return 'OP_SIMPLE_CALL((%s), %s);' % (', '.join(args), r)
def OP_CALL_ARGS(self, op):
args = [self.expr(v) for v in op.args]
r = self.expr(op.result)
return 'OP_CALL_ARGS((%s), %s);' % (', '.join(args), r)
def generic_call(self, FUNC, fnexpr, args_v, v_result, targets=None):
args = []
assert len(args_v) == len(FUNC.TO.ARGS)
for v, ARGTYPE in zip(args_v, FUNC.TO.ARGS):
if ARGTYPE is Void:
continue # skip 'void' argument
args.append(self.expr(v))
# special case for rctypes: by-value container args:
# XXX is this still needed now that rctypes is gone
if isinstance(ARGTYPE, ContainerType):
args[-1] = '*%s' % (args[-1],)
line = '%s(%s);' % (fnexpr, ', '.join(args))
if self.lltypemap(v_result) is not Void:
# skip assignment of 'void' return value
r = self.expr(v_result)
line = '%s = %s' % (r, line)
else:
r = None
if targets is not None:
for graph in targets:
if getattr(graph, 'inhibit_tail_call', False):
line += '\nPYPY_INHIBIT_TAIL_CALL();'
break
elif self.db.reverse_debugger:
from rpython.translator.revdb import gencsupp
line = gencsupp.emit_residual_call(self, line, v_result, r)
return line
def OP_DIRECT_CALL(self, op):
fn = op.args[0]
try:
targets = [fn.value._obj.graph]
except AttributeError:
targets = None
return self.generic_call(fn.concretetype, self.expr(fn),
op.args[1:], op.result, targets)
def OP_INDIRECT_CALL(self, op):
fn = op.args[0]
return self.generic_call(fn.concretetype, self.expr(fn),
op.args[1:-1], op.result, op.args[-1].value)
def OP_ADR_CALL(self, op):
ARGTYPES = [v.concretetype for v in op.args[1:]]
RESTYPE = op.result.concretetype
FUNC = Ptr(FuncType(ARGTYPES, RESTYPE))
typename = self.db.gettype(FUNC)
fnaddr = op.args[0]
fnexpr = '((%s)%s)' % (cdecl(typename, ''), self.expr(fnaddr))
return self.generic_call(FUNC, fnexpr, op.args[1:], op.result)
def OP_JIT_CONDITIONAL_CALL(self, op):
return 'abort(); /* jit_conditional_call */'
def OP_JIT_CONDITIONAL_CALL_VALUE(self, op):
return 'abort(); /* jit_conditional_call_value */'
# low-level operations
def generic_get(self, op, sourceexpr, accessing_mem=True):
T = self.lltypemap(op.result)
newvalue = self.expr(op.result, special_case_void=False)
result = '%s = %s;' % (newvalue, sourceexpr)
if T is Void:
result = '/* %s */' % result
if self.db.reverse_debugger:
S = self.lltypemap(op.args[0]).TO
if (S._gckind != 'gc' and not S._hints.get('is_excdata')
and not S._hints.get('static_immutable')
and not S._hints.get('ignore_revdb')
and accessing_mem):
from rpython.translator.revdb import gencsupp
result = gencsupp.emit(result, self.lltypename(op.result),
newvalue)
return result
def generic_set(self, op, targetexpr):
newvalue = self.expr(op.args[-1], special_case_void=False)
result = '%s = %s;' % (targetexpr, newvalue)
T = self.lltypemap(op.args[-1])
if T is Void:
result = '/* %s */' % result
if self.db.reverse_debugger:
S = self.lltypemap(op.args[0]).TO
if S._gckind != 'gc' and not S._hints.get('is_excdata'):
from rpython.translator.revdb import gencsupp
result = gencsupp.emit_void(result)
return result
def OP_GETFIELD(self, op, ampersand='', accessing_mem=True):
assert isinstance(op.args[1], Constant)
STRUCT = self.lltypemap(op.args[0]).TO
structdef = self.db.gettypedefnode(STRUCT)
baseexpr_is_const = isinstance(op.args[0], Constant)
expr = ampersand + structdef.ptr_access_expr(self.expr(op.args[0]),
op.args[1].value,
baseexpr_is_const)
return self.generic_get(op, expr, accessing_mem=accessing_mem)
def OP_BARE_SETFIELD(self, op):
assert isinstance(op.args[1], Constant)
STRUCT = self.lltypemap(op.args[0]).TO
structdef = self.db.gettypedefnode(STRUCT)
baseexpr_is_const = isinstance(op.args[0], Constant)
expr = structdef.ptr_access_expr(self.expr(op.args[0]),
op.args[1].value,
baseexpr_is_const)
return self.generic_set(op, expr)
def OP_GETSUBSTRUCT(self, op):
RESULT = self.lltypemap(op.result).TO
if (isinstance(RESULT, FixedSizeArray) or
(isinstance(RESULT, Array) and barebonearray(RESULT))):
return self.OP_GETFIELD(op, ampersand='', accessing_mem=False)
else:
return self.OP_GETFIELD(op, ampersand='&', accessing_mem=False)
def OP_GETARRAYSIZE(self, op):
ARRAY = self.lltypemap(op.args[0]).TO
if isinstance(ARRAY, FixedSizeArray):
return '%s = %d;' % (self.expr(op.result),
ARRAY.length)
else:
return self.generic_get(op, '%s->length;' % self.expr(op.args[0]))
def OP_GETARRAYITEM(self, op):
ARRAY = self.lltypemap(op.args[0]).TO
ptr = self.expr(op.args[0])
index = self.expr(op.args[1])
arraydef = self.db.gettypedefnode(ARRAY)
return self.generic_get(op, arraydef.itemindex_access_expr(ptr, index))
def OP_SETARRAYITEM(self, op):
ARRAY = self.lltypemap(op.args[0]).TO
ptr = self.expr(op.args[0])
index = self.expr(op.args[1])
arraydef = self.db.gettypedefnode(ARRAY)
return self.generic_set(op, arraydef.itemindex_access_expr(ptr, index))
OP_BARE_SETARRAYITEM = OP_SETARRAYITEM
def OP_GETARRAYSUBSTRUCT(self, op):
ARRAY = self.lltypemap(op.args[0]).TO
ptr = self.expr(op.args[0])
index = self.expr(op.args[1])
arraydef = self.db.gettypedefnode(ARRAY)
return '%s = &%s;' % (self.expr(op.result),
arraydef.itemindex_access_expr(ptr, index))
def interior_expr(self, args, rettype=False):
TYPE = args[0].concretetype.TO
expr = self.expr(args[0])
for i, arg in enumerate(args[1:]):
defnode = self.db.gettypedefnode(TYPE)
if arg.concretetype is Void:
fieldname = arg.value
if i == 0:
expr = defnode.ptr_access_expr(expr, fieldname)
else:
expr = defnode.access_expr(expr, fieldname)
if isinstance(TYPE, FixedSizeArray):
TYPE = TYPE.OF
else:
TYPE = getattr(TYPE, fieldname)
else:
indexexpr = self.expr(arg)
if i == 0:
expr = defnode.itemindex_access_expr(expr, indexexpr)
else:
expr = defnode.access_expr_varindex(expr, indexexpr)
TYPE = TYPE.OF
if rettype:
return expr, TYPE
else:
return expr
def OP_GETINTERIORFIELD(self, op):
return self.generic_get(op, self.interior_expr(op.args))
def OP_BARE_SETINTERIORFIELD(self, op):
return self.generic_set(op, self.interior_expr(op.args[:-1]))
def OP_GETINTERIORARRAYSIZE(self, op):
expr, ARRAY = self.interior_expr(op.args, True)
if isinstance(ARRAY, FixedSizeArray):
return '%s = %d;'%(self.expr(op.result), ARRAY.length)
else:
assert isinstance(ARRAY, Array)
return self.generic_get(op, '%s.length;' % expr)
def OP_PTR_NONZERO(self, op):
return '%s = (%s != NULL);' % (self.expr(op.result),
self.expr(op.args[0]))
def OP_PTR_ISZERO(self, op):
return '%s = (%s == NULL);' % (self.expr(op.result),
self.expr(op.args[0]))
def OP_PTR_EQ(self, op):
return '%s = (%s == %s);' % (self.expr(op.result),
self.expr(op.args[0]),
self.expr(op.args[1]))
def OP_PTR_NE(self, op):
return '%s = (%s != %s);' % (self.expr(op.result),
self.expr(op.args[0]),
self.expr(op.args[1]))
def _op_boehm_malloc(self, op, is_atomic):
expr_result = self.expr(op.result)
res = 'OP_BOEHM_ZERO_MALLOC(%s, %s, void*, %d, 0);' % (
self.expr(op.args[0]),
expr_result,
is_atomic)
if self.db.reverse_debugger:
from rpython.translator.revdb import gencsupp
res += gencsupp.record_malloc_uid(expr_result)
return res
def OP_BOEHM_MALLOC(self, op):
return self._op_boehm_malloc(op, 0)
def OP_BOEHM_MALLOC_ATOMIC(self, op):
return self._op_boehm_malloc(op, 1)
def OP_BOEHM_REGISTER_FINALIZER(self, op):
if self.db.reverse_debugger:
from rpython.translator.revdb import gencsupp
return gencsupp.boehm_register_finalizer(self, op)
return 'GC_REGISTER_FINALIZER(%s, (GC_finalization_proc)%s, NULL, NULL, NULL);' \
% (self.expr(op.args[0]), self.expr(op.args[1]))
def OP_DIRECT_FIELDPTR(self, op):
return self.OP_GETFIELD(op, ampersand='&', accessing_mem=False)
def OP_DIRECT_ARRAYITEMS(self, op):
ARRAY = self.lltypemap(op.args[0]).TO
items = self.expr(op.args[0])
if not isinstance(ARRAY, FixedSizeArray) and not barebonearray(ARRAY):
items += '->items'
return '%s = %s;' % (self.expr(op.result), items)
def OP_DIRECT_PTRADD(self, op):
ARRAY = self.lltypemap(op.args[0]).TO
if ARRAY._hints.get("render_as_void"):
return '%s = (char *)%s + %s;' % (
self.expr(op.result),
self.expr(op.args[0]),
self.expr(op.args[1]))
else:
return '%s = %s + %s;' % (
self.expr(op.result),
self.expr(op.args[0]),
self.expr(op.args[1]))
def _check_split_gc_address_space(self, op):
if self.db.split_gc_address_space:
TYPE = self.lltypemap(op.result)
TSRC = self.lltypemap(op.args[0])
gcdst = isinstance(TYPE, Ptr) and TYPE.TO._gckind == 'gc'
gcsrc = isinstance(TSRC, Ptr) and TSRC.TO._gckind == 'gc'
if gcsrc != gcdst:
raise Exception(
"cast between pointer types changes the address\n"
"space, but the 'split_gc_address_space' option is enabled:\n"
" func: %s\n"
" op: %s\n"
" from: %s\n"
" to: %s" % (self.graph, op, TSRC, TYPE))
def OP_CAST_POINTER(self, op):
self._check_split_gc_address_space(op)
TYPE = self.lltypemap(op.result)
typename = self.db.gettype(TYPE)
result = []
result.append('%s = (%s)%s;' % (self.expr(op.result),
cdecl(typename, ''),
self.expr(op.args[0])))
return '\t'.join(result)
OP_CAST_PTR_TO_ADR = OP_CAST_POINTER
OP_CAST_ADR_TO_PTR = OP_CAST_POINTER
OP_CAST_OPAQUE_PTR = OP_CAST_POINTER
def OP_CAST_PTR_TO_INT(self, op):
if self.db.reverse_debugger:
TSRC = self.lltypemap(op.args[0])
if isinstance(TSRC, Ptr) and TSRC.TO._gckind == 'gc':
from rpython.translator.revdb import gencsupp
return gencsupp.cast_gcptr_to_int(self, op)
return self.OP_CAST_POINTER(op)
def OP_REVDB_DO_NEXT_CALL(self, op):
self.revdb_do_next_call = True
return "/* revdb_do_next_call */"
def OP_LENGTH_OF_SIMPLE_GCARRAY_FROM_OPAQUE(self, op):
return ('%s = *(long *)(((char *)%s) + RPY_SIZE_OF_GCHEADER);'
' /* length_of_simple_gcarray_from_opaque */'
% (self.expr(op.result), self.expr(op.args[0])))
def OP_CAST_INT_TO_PTR(self, op):
self._check_split_gc_address_space(op)
TYPE = self.lltypemap(op.result)
typename = self.db.gettype(TYPE)
return "%s = (%s)%s;" % (self.expr(op.result), cdecl(typename, ""),
self.expr(op.args[0]))
def OP_SAME_AS(self, op):
result = []
TYPE = self.lltypemap(op.result)
assert self.lltypemap(op.args[0]) == TYPE
if TYPE is not Void:
result.append('%s = %s;' % (self.expr(op.result),
self.expr(op.args[0])))
return '\t'.join(result)
def OP_HINT(self, op):
hints = op.args[1].value
return '%s\t/* hint: %r */' % (self.OP_SAME_AS(op), hints)
def OP_KEEPALIVE(self, op): # xxx what should be the sematics consequences of this
v = op.args[0]
TYPE = self.lltypemap(v)
if TYPE is Void:
return "/* kept alive: void */"
if isinstance(TYPE, Ptr) and TYPE.TO._gckind == 'gc':
meth = getattr(self.gcpolicy, 'GC_KEEPALIVE', None)
if meth:
return meth(self, v)
return "/* kept alive: %s */" % self.expr(v)
#address operations
def OP_RAW_STORE(self, op):
addr = self.expr(op.args[0])
offset = self.expr(op.args[1])
value = self.expr(op.args[2])
TYPE = op.args[2].concretetype
typename = cdecl(self.db.gettype(TYPE).replace('@', '*@'), '')
return (
'((%(typename)s) (((char *)%(addr)s) + %(offset)s))[0] = %(value)s;'
% locals())
OP_BARE_RAW_STORE = OP_RAW_STORE
OP_GC_STORE = OP_RAW_STORE # the difference is only in 'revdb_protect'
def OP_RAW_LOAD(self, op):
addr = self.expr(op.args[0])
offset = self.expr(op.args[1])
result = self.expr(op.result)
TYPE = op.result.concretetype
typename = cdecl(self.db.gettype(TYPE).replace('@', '*@'), '')
return (
"%(result)s = ((%(typename)s) (((char *)%(addr)s) + %(offset)s))[0];"
% locals())
def OP_GC_LOAD_INDEXED(self, op):
addr = self.expr(op.args[0])
index = self.expr(op.args[1])
scale = self.expr(op.args[2])
base_ofs = self.expr(op.args[3])
result = self.expr(op.result)
TYPE = op.result.concretetype
typename = cdecl(self.db.gettype(TYPE).replace('@', '*@'), '')
return (
"%(result)s = ((%(typename)s) (((char *)%(addr)s) + "
"%(base_ofs)s + %(scale)s * %(index)s))[0];"
% locals())
def OP_GC_STORE_INDEXED(self, op):
addr = self.expr(op.args[0])
index = self.expr(op.args[1])
value = self.expr(op.args[2])
scale = self.expr(op.args[3])
base_ofs = self.expr(op.args[4])
TYPE = op.args[2].concretetype
typename = cdecl(self.db.gettype(TYPE).replace('@', '*@'), '')
return (
"((%(typename)s) (((char *)%(addr)s) + "
"%(base_ofs)s + %(scale)s * %(index)s))[0] = %(value)s;"
% locals())
def OP_CAST_PRIMITIVE(self, op):
self._check_split_gc_address_space(op)
TYPE = self.lltypemap(op.result)
val = self.expr(op.args[0])
result = self.expr(op.result)
if TYPE == Bool:
return "%(result)s = !!%(val)s;" % locals()
ORIG = self.lltypemap(op.args[0])
if ORIG is Char:
val = "(unsigned char)%s" % val
elif ORIG is UniChar:
val = "(unsigned long)%s" % val
typename = cdecl(self.db.gettype(TYPE), '')
return "%(result)s = (%(typename)s)(%(val)s);" % locals()
OP_FORCE_CAST = OP_CAST_PRIMITIVE # xxx the same logic works
def OP_RESUME_POINT(self, op):
return '/* resume point %s */'%(op.args[0],)
def OP_DEBUG_PRINT(self, op):
# XXX
from rpython.rtyper.lltypesystem.rstr import STR
format = []
argv = []
if self.db.reverse_debugger:
format.append('{%d} ')
argv.append('(int)getpid()')
free_line = ""
for arg in op.args:
T = arg.concretetype
if T == Ptr(STR):
if isinstance(arg, Constant):
format.append(''.join(arg.value.chars).replace('%', '%%'))
else:
format.append('%s')
argv.append('RPyString_AsCharP(%s)' % self.expr(arg))
free_line = "RPyString_FreeCache();"
continue
elif T == Signed:
format.append('%ld')
elif T == INT:
format.append('%d')
elif T == Unsigned:
format.append('%lu')
elif T == Float:
format.append('%f')
elif isinstance(T, Ptr) or T == Address:
format.append('%p')
elif T == Char:
if isinstance(arg, Constant):
format.append(arg.value.replace('%', '%%'))
continue
format.append('%c')
elif T == Bool:
format.append('%s')
argv.append('(%s) ? "True" : "False"' % self.expr(arg))
continue
elif T == SignedLongLong:
if sys.platform == 'win32':
format.append('%I64d')
else:
format.append('%lld')
elif T == UnsignedLongLong:
if sys.platform == 'win32':
format.append('%I64u')
else:
format.append('%llu')
else:
raise Exception("don't know how to debug_print %r" % (T,))
argv.append(self.expr(arg))
argv.insert(0, c_string_constant(' '.join(format) + '\n'))
return (
"if (PYPY_HAVE_DEBUG_PRINTS) { fprintf(PYPY_DEBUG_FILE, %s); %s}"
% (', '.join(argv), free_line))
def _op_debug(self, macro, op):
v_cat, v_timestamp = op.args
if isinstance(v_cat, Constant):
string_literal = c_string_constant(''.join(v_cat.value.chars))
return "%s = %s(%s, %s);" % (self.expr(op.result),
macro,
string_literal,
self.expr(v_timestamp))
else:
x = "%s = %s(RPyString_AsCharP(%s), %s);\n" % (self.expr(op.result),
macro,
self.expr(v_cat),
self.expr(v_timestamp))
x += "RPyString_FreeCache();"
return x
def OP_DEBUG_START(self, op):
return self._op_debug('PYPY_DEBUG_START', op)
def OP_DEBUG_STOP(self, op):
return self._op_debug('PYPY_DEBUG_STOP', op)
def OP_HAVE_DEBUG_PRINTS_FOR(self, op):
arg = op.args[0]
assert isinstance(arg, Constant) and isinstance(arg.value, str)
string_literal = c_string_constant(arg.value)
return '%s = pypy_have_debug_prints_for(%s);' % (
self.expr(op.result), string_literal)
def OP_DEBUG_ASSERT(self, op):
return 'RPyAssert(%s, %s);' % (self.expr(op.args[0]),
c_string_constant(op.args[1].value))
def OP_DEBUG_ASSERT_NOT_NONE(self, op):
return 'RPyAssert(%s != NULL, "ll_assert_not_none() failed");' % (
self.expr(op.args[0]),)
def OP_DEBUG_FATALERROR(self, op):
# XXX
from rpython.rtyper.lltypesystem.rstr import STR
msg = op.args[0]
assert msg.concretetype == Ptr(STR)
if isinstance(msg, Constant):
msg = c_string_constant(''.join(msg.value.chars))
else:
msg = 'RPyString_AsCharP(%s)' % self.expr(msg)
return 'fprintf(stderr, "%%s\\n", %s); abort();' % msg
def OP_DEBUG_LLINTERPCALL(self, op):
result = 'abort(); /* debug_llinterpcall should be unreachable */'
TYPE = self.lltypemap(op.result)
if TYPE is not Void:
typename = self.db.gettype(TYPE)
result += '\n%s = (%s)0;' % (self.expr(op.result),
cdecl(typename, ''))
return result
def OP_DEBUG_NONNULL_POINTER(self, op):
expr = self.expr(op.args[0])
return 'if ((-8192 <= (long)%s) && (((long)%s) < 8192)) abort();' % (
expr, expr)
def OP_INSTRUMENT_COUNT(self, op):
counter_label = op.args[1].value
self.db.instrument_ncounter = max(self.db.instrument_ncounter,
counter_label+1)
counter_label = self.expr(op.args[1])
return 'PYPY_INSTRUMENT_COUNT(%s);' % counter_label
def OP_IS_EARLY_CONSTANT(self, op):
return '%s = 0; /* IS_EARLY_CONSTANT */' % (self.expr(op.result),)
def OP_JIT_MARKER(self, op):
return '/* JIT_MARKER %s */' % op
def OP_JIT_FORCE_VIRTUALIZABLE(self, op):
return '/* JIT_FORCE_VIRTUALIZABLE %s */' % op
def OP_JIT_FORCE_VIRTUAL(self, op):
return '%s = %s; /* JIT_FORCE_VIRTUAL */' % (self.expr(op.result),
self.expr(op.args[0]))
def OP_JIT_IS_VIRTUAL(self, op):
return '%s = 0; /* JIT_IS_VIRTUAL */' % (self.expr(op.result),)
def OP_JIT_FORCE_QUASI_IMMUTABLE(self, op):
return '/* JIT_FORCE_QUASI_IMMUTABLE %s */' % op
def OP_JIT_FFI_SAVE_RESULT(self, op):
return '/* JIT_FFI_SAVE_RESULT %s */' % op
def OP_JIT_ENTER_PORTAL_FRAME(self, op):
return '/* JIT_ENTER_PORTAL_FRAME %s */' % op
def OP_JIT_LEAVE_PORTAL_FRAME(self, op):
return '/* JIT_LEAVE_PORTAL_FRAME %s */' % op
def OP_GET_GROUP_MEMBER(self, op):
typename = self.db.gettype(op.result.concretetype)
return '%s = (%s)_OP_GET_GROUP_MEMBER(%s, %s);' % (
self.expr(op.result),
cdecl(typename, ''),
self.expr(op.args[0]),
self.expr(op.args[1]))
def OP_GET_NEXT_GROUP_MEMBER(self, op):
typename = self.db.gettype(op.result.concretetype)
return '%s = (%s)_OP_GET_NEXT_GROUP_MEMBER(%s, %s, %s);' % (
self.expr(op.result),
cdecl(typename, ''),
self.expr(op.args[0]),
self.expr(op.args[1]),
self.expr(op.args[2]))
def getdebugfunctionname(self):
name = self.functionname
if name.startswith('pypy_g_'):
name = name[7:]
return name
def OP_DEBUG_RECORD_TRACEBACK(self, op):
return 'PYPY_DEBUG_RECORD_TRACEBACK("%s");' % (
self.getdebugfunctionname(),)
def OP_DEBUG_CATCH_EXCEPTION(self, op):
gottype = self.expr(op.args[0])
exprs = []
for c_limited_type in op.args[1:]:
exprs.append('%s == %s' % (gottype, self.expr(c_limited_type)))
return 'PYPY_DEBUG_CATCH_EXCEPTION("%s", %s, %s);' % (
self.getdebugfunctionname(), gottype, ' || '.join(exprs))
def OP_INT_BETWEEN(self, op):
if (isinstance(op.args[0], Constant) and
isinstance(op.args[2], Constant) and
op.args[2].value - op.args[0].value == 1):
# (a <= b < a+1) ----> (b == a)
return '%s = (%s == %s); /* was INT_BETWEEN */' % (
self.expr(op.result),
self.expr(op.args[1]),
self.expr(op.args[0]))
else:
return None # use the default
def OP_THREADLOCALREF_GET(self, op):
if isinstance(op.args[0], Constant):
typename = self.db.gettype(op.result.concretetype)
assert isinstance(op.args[0].value, CDefinedIntSymbolic)
fieldname = op.args[0].value.expr
assert fieldname.startswith('RPY_TLOFS_')
fieldname = fieldname[10:]
return '%s = (%s)RPY_THREADLOCALREF_GET(%s);' % (
self.expr(op.result),
cdecl(typename, ''),
fieldname)
else:
# this is used for the fall-back path in the JIT
return self.OP_THREADLOCALREF_LOAD(op)
def OP_THREADLOCALREF_LOAD(self, op):
typename = self.db.gettype(op.result.concretetype)
return 'OP_THREADLOCALREF_LOAD(%s, %s, %s);' % (
cdecl(typename, ''),
self.expr(op.args[0]),
self.expr(op.result))
def OP_THREADLOCALREF_STORE(self, op):
typename = self.db.gettype(op.args[1].concretetype)
return 'OP_THREADLOCALREF_STORE(%s, %s, %s);' % (
cdecl(typename, ''),
self.expr(op.args[0]),
self.expr(op.args[1]))
| 39.480337 | 89 | 0.540543 |
89825d4d73ef627799bcc97495c584a4398d2f08
| 6,989 |
py
|
Python
|
src/qibo/tests/test_core_hamiltonians.py
|
mofeing/qibo
|
3eb675ba893bf35f103d41a8a64c86aae9cbf616
|
[
"Apache-2.0"
] | 51 |
2021-05-21T23:50:49.000Z
|
2022-03-28T12:12:34.000Z
|
src/qibo/tests/test_core_hamiltonians.py
|
mofeing/qibo
|
3eb675ba893bf35f103d41a8a64c86aae9cbf616
|
[
"Apache-2.0"
] | 138 |
2021-05-20T14:29:13.000Z
|
2022-03-30T09:17:59.000Z
|
src/qibo/tests/test_core_hamiltonians.py
|
mofeing/qibo
|
3eb675ba893bf35f103d41a8a64c86aae9cbf616
|
[
"Apache-2.0"
] | 20 |
2021-06-11T18:13:09.000Z
|
2022-03-28T07:32:09.000Z
|
"""Test methods in `qibo/core/hamiltonians.py`."""
import pytest
import numpy as np
from qibo import hamiltonians, K
from qibo.tests.utils import random_complex
def test_hamiltonian_init():
with pytest.raises(TypeError):
H = hamiltonians.Hamiltonian(2, "test")
H1 = hamiltonians.Hamiltonian(2, np.eye(4))
H1 = hamiltonians.Hamiltonian(2, np.eye(4))
H1 = hamiltonians.Hamiltonian(2, K.eye(4))
H1 = hamiltonians.Hamiltonian(2, K.eye(4))
with pytest.raises(ValueError):
H1 = hamiltonians.Hamiltonian(-2, np.eye(4))
with pytest.raises(RuntimeError):
H2 = hamiltonians.Hamiltonian(np.eye(2), np.eye(4))
with pytest.raises(ValueError):
H3 = hamiltonians.Hamiltonian(4, np.eye(10))
@pytest.mark.parametrize("dtype", K.numeric_types)
def test_hamiltonian_algebraic_operations(dtype):
"""Test basic hamiltonian overloading."""
def transformation_a(a, b):
c1 = dtype(0.1)
return a + c1 * b
def transformation_b(a, b):
c1 = dtype(2)
c2 = dtype(3.5)
return c1 * a - b * c2
def transformation_c(a, b, use_eye=False):
c1 = dtype(4.5)
if use_eye:
return a + c1 * np.eye(a.shape[0]) - b
else:
return a + c1 - b
def transformation_d(a, b, use_eye=False):
c1 = dtype(10.5)
c2 = dtype(2)
if use_eye:
return c1 * np.eye(a.shape[0]) - a + c2 * b
else:
return c1 - a + c2 * b
H1 = hamiltonians.XXZ(nqubits=2, delta=0.5)
H2 = hamiltonians.XXZ(nqubits=2, delta=1)
mH1, mH2 = K.to_numpy(H1.matrix), K.to_numpy(H2.matrix)
hH1 = transformation_a(mH1, mH2)
hH2 = transformation_b(mH1, mH2)
hH3 = transformation_c(mH1, mH2, use_eye=True)
hH4 = transformation_d(mH1, mH2, use_eye=True)
HT1 = transformation_a(H1, H2)
HT2 = transformation_b(H1, H2)
HT3 = transformation_c(H1, H2)
HT4 = transformation_d(H1, H2)
K.assert_allclose(hH1, HT1.matrix)
K.assert_allclose(hH2, HT2.matrix)
K.assert_allclose(hH3, HT3.matrix)
K.assert_allclose(hH4, HT4.matrix)
def test_hamiltonian_addition():
H1 = hamiltonians.Y(nqubits=3)
H2 = hamiltonians.TFIM(nqubits=3, h=1.0)
H = H1 + H2
matrix = H1.matrix + H2.matrix
K.assert_allclose(H.matrix, matrix)
H = H1 - 0.5 * H2
matrix = H1.matrix - 0.5 * H2.matrix
K.assert_allclose(H.matrix, matrix)
H1 = hamiltonians.XXZ(nqubits=2, delta=0.5)
H2 = hamiltonians.XXZ(nqubits=3, delta=0.1)
with pytest.raises(RuntimeError):
R = H1 + H2
with pytest.raises(RuntimeError):
R = H1 - H2
def test_hamiltonian_operation_errors():
"""Testing hamiltonian not implemented errors."""
H1 = hamiltonians.XXZ(nqubits=2, delta=0.5)
H2 = hamiltonians.XXZ(nqubits=2, delta=0.1)
with pytest.raises(NotImplementedError):
R = H1 * H2
with pytest.raises(NotImplementedError):
R = H1 + "a"
with pytest.raises(NotImplementedError):
R = H2 - (2,)
with pytest.raises(NotImplementedError):
R = [3] - H1
def test_hamiltonian_matmul():
"""Test matrix multiplication between Hamiltonians and state vectors."""
H1 = hamiltonians.TFIM(nqubits=3, h=1.0)
H2 = hamiltonians.Y(nqubits=3)
m1 = K.to_numpy(H1.matrix)
m2 = K.to_numpy(H2.matrix)
K.assert_allclose((H1 @ H2).matrix, m1 @ m2)
K.assert_allclose((H2 @ H1).matrix, m2 @ m1)
v = random_complex(8, dtype=m1.dtype)
m = random_complex((8, 8), dtype=m1.dtype)
H1v = H1 @ K.cast(v)
H1m = H1 @ K.cast(m)
K.assert_allclose(H1v, m1.dot(v))
K.assert_allclose(H1m, m1 @ m)
from qibo.core.states import VectorState
H1state = H1 @ VectorState.from_tensor(K.cast(v))
K.assert_allclose(H1state, m1.dot(v))
with pytest.raises(ValueError):
H1 @ np.zeros((8, 8, 8), dtype=m1.dtype)
with pytest.raises(NotImplementedError):
H1 @ 2
@pytest.mark.parametrize("dense", [True, False])
def test_hamiltonian_exponentiation(dense):
from scipy.linalg import expm
H = hamiltonians.XXZ(nqubits=2, delta=0.5, dense=dense)
target_matrix = expm(-0.5j * K.to_numpy(H.matrix))
K.assert_allclose(H.exp(0.5), target_matrix)
H = hamiltonians.XXZ(nqubits=2, delta=0.5, dense=dense)
_ = H.eigenvectors()
K.assert_allclose(H.exp(0.5), target_matrix)
@pytest.mark.parametrize("dense", [True, False])
@pytest.mark.parametrize("density_matrix", [True, False])
def test_hamiltonian_expectation(dense, density_matrix):
h = hamiltonians.XXZ(nqubits=3, delta=0.5, dense=dense)
matrix = K.to_numpy(h.matrix)
if density_matrix:
state = random_complex((8, 8))
state = state + state.T.conj()
norm = np.trace(state)
target_ev = np.trace(matrix.dot(state)).real
else:
state = random_complex(8)
norm = np.sum(np.abs(state) ** 2)
target_ev = np.sum(state.conj() * matrix.dot(state)).real
K.assert_allclose(h.expectation(state), target_ev)
K.assert_allclose(h.expectation(state, True), target_ev / norm)
def test_hamiltonian_expectation_errors():
h = hamiltonians.XXZ(nqubits=3, delta=0.5)
state = random_complex((4, 4, 4))
with pytest.raises(ValueError):
h.expectation(state)
with pytest.raises(TypeError):
h.expectation("test")
@pytest.mark.parametrize("dtype", K.numeric_types)
@pytest.mark.parametrize("dense", [True, False])
def test_hamiltonian_eigenvalues(dtype, dense):
"""Testing hamiltonian eigenvalues scaling."""
H1 = hamiltonians.XXZ(nqubits=2, delta=0.5, dense=dense)
H1_eigen = H1.eigenvalues()
hH1_eigen = K.eigvalsh(H1.matrix)
K.assert_allclose(H1_eigen, hH1_eigen)
c1 = dtype(2.5)
H2 = c1 * H1
hH2_eigen = K.eigvalsh(c1 * H1.matrix)
K.assert_allclose(H2._eigenvalues, hH2_eigen)
c2 = dtype(-11.1)
H3 = H1 * c2
hH3_eigen = K.eigvalsh(H1.matrix * c2)
K.assert_allclose(H3._eigenvalues, hH3_eigen)
@pytest.mark.parametrize("dtype", K.numeric_types)
@pytest.mark.parametrize("dense", [True, False])
def test_hamiltonian_eigenvectors(dtype, dense):
"""Testing hamiltonian eigenvectors scaling."""
H1 = hamiltonians.XXZ(nqubits=2, delta=0.5, dense=dense)
V1 = K.to_numpy(H1.eigenvectors())
U1 = K.to_numpy(H1.eigenvalues())
K.assert_allclose(H1.matrix, V1 @ np.diag(U1) @ V1.T)
# Check ground state
K.assert_allclose(H1.ground_state(), V1[:, 0])
c1 = dtype(2.5)
H2 = c1 * H1
V2 = K.to_numpy(H2._eigenvectors)
U2 = K.to_numpy(H2._eigenvalues)
K.assert_allclose(H2.matrix, V2 @ np.diag(U2) @ V2.T)
c2 = dtype(-11.1)
H3 = H1 * c2
V3 = K.to_numpy(H3.eigenvectors())
U3 = K.to_numpy(H3._eigenvalues)
K.assert_allclose(H3.matrix, V3 @ np.diag(U3) @ V3.T)
c3 = dtype(0)
H4 = c3 * H1
V4 = K.to_numpy(H4._eigenvectors)
U4 = K.to_numpy(H4._eigenvalues)
K.assert_allclose(H4.matrix, V4 @ np.diag(U4) @ V4.T)
| 31.913242 | 76 | 0.648877 |
d058dbb6f63bd6e14e75c30afb2b3858681cd80f
| 1,337 |
py
|
Python
|
digest/migrations/0014_auto_20150731_0859.py
|
PURNA-ROCK/pythondigest
|
ba21758a25a47de19800b208c420f16d6688a16b
|
[
"MIT"
] | 124 |
2015-08-17T19:41:16.000Z
|
2022-01-12T00:25:52.000Z
|
digest/migrations/0014_auto_20150731_0859.py
|
PURNA-ROCK/pythondigest
|
ba21758a25a47de19800b208c420f16d6688a16b
|
[
"MIT"
] | 62 |
2015-08-17T02:13:20.000Z
|
2020-04-17T19:07:40.000Z
|
digest/migrations/0014_auto_20150731_0859.py
|
PURNA-ROCK/pythondigest
|
ba21758a25a47de19800b208c420f16d6688a16b
|
[
"MIT"
] | 73 |
2015-08-18T13:50:47.000Z
|
2021-09-27T14:09:47.000Z
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [('digest', '0013_auto_20150730_1613'), ]
operations = [migrations.CreateModel(
name='Tag',
fields=[('id', models.AutoField(verbose_name='ID',
serialize=False,
auto_created=True,
primary_key=True)),
('name', models.CharField(
max_length=255,
verbose_name='\u041d\u0430\u0437\u0432\u0430\u043d\u0438\u0435 \u0442\u044d\u0433\u0430')
), ],
options={
'verbose_name':
'\u0422\u044d\u0433 \u043a \u043d\u043e\u0432\u043e\u0441\u0442\u0438',
'verbose_name_plural':
'\u0422\u044d\u0433\u0438 \u043a \u043d\u043e\u0432\u043e\u0441\u0442\u044f\u043c',
}, ),
migrations.AddField(
model_name='item',
name='tags',
field=models.ManyToManyField(to='digest.Tag',
null=True,
verbose_name='\u0422\u044d\u0433\u0438',
blank=True), ), ]
| 40.515152 | 109 | 0.495138 |
c7c8fd8e358a81126d6721dc7d9201b2c8caa890
| 3,117 |
py
|
Python
|
FD/lenet.py
|
Lucasc-99/Meta-set
|
14bd29aa3facb9e44d7eb7af66b45b626cbd4ed5
|
[
"MIT"
] | 13 |
2021-05-26T09:45:41.000Z
|
2021-12-27T20:56:53.000Z
|
FD/lenet.py
|
Lucasc-99/Meta-set
|
14bd29aa3facb9e44d7eb7af66b45b626cbd4ed5
|
[
"MIT"
] | null | null | null |
FD/lenet.py
|
Lucasc-99/Meta-set
|
14bd29aa3facb9e44d7eb7af66b45b626cbd4ed5
|
[
"MIT"
] | 4 |
2021-08-29T08:56:36.000Z
|
2022-03-05T09:57:39.000Z
|
import torch
import torch.nn as nn
import torch.nn.functional as F
def init_weights(m):
classname = m.__class__.__name__
if classname.find('Conv2d') != -1 or classname.find('ConvTranspose2d') != -1:
nn.init.kaiming_uniform_(m.weight)
nn.init.zeros_(m.bias)
elif classname.find('BatchNorm') != -1:
nn.init.normal_(m.weight, 1.0, 0.02)
nn.init.zeros_(m.bias)
elif classname.find('Linear') != -1:
nn.init.xavier_normal_(m.weight)
nn.init.zeros_(m.bias)
# For SVHN dataset
class DTN(nn.Module):
def __init__(self):
super(DTN, self).__init__()
self.conv_params = nn.Sequential(
nn.Conv2d(3, 64, kernel_size=5, stride=2, padding=2),
nn.BatchNorm2d(64),
nn.Dropout2d(0.1),
nn.ReLU(),
nn.Conv2d(64, 128, kernel_size=5, stride=2, padding=2),
nn.BatchNorm2d(128),
nn.Dropout2d(0.3),
nn.ReLU(),
nn.Conv2d(128, 256, kernel_size=5, stride=2, padding=2),
nn.BatchNorm2d(256),
nn.Dropout2d(0.5),
nn.ReLU()
)
self.fc_params = nn.Sequential(
nn.Linear(256 * 4 * 4, 512),
nn.BatchNorm1d(512),
nn.ReLU(),
nn.Dropout()
)
self.classifier = nn.Linear(512, 10)
self.__in_features = 512
def forward(self, x):
x = self.conv_params(x)
x = x.view(x.size(0), -1)
x = self.fc_params(x)
y = self.classifier(x)
return x, y
def output_num(self):
return self.__in_features
class LeNet(nn.Module):
def __init__(self):
super(LeNet, self).__init__()
self.conv_params = nn.Sequential(
nn.Conv2d(1, 20, kernel_size=5),
nn.MaxPool2d(2),
nn.ReLU(),
nn.Conv2d(20, 50, kernel_size=5),
nn.Dropout2d(p=0.5),
nn.MaxPool2d(2),
nn.ReLU(),
)
self.fc_params = nn.Sequential(nn.Linear(50 * 4 * 4, 500), nn.ReLU(), nn.Dropout(p=0.5))
self.classifier = nn.Linear(500, 10)
self.__in_features = 500
def forward(self, x):
x = self.conv_params(x)
x = x.view(x.size(0), -1)
x = self.fc_params(x)
y = self.classifier(x)
return x, y
def output_num(self):
return self.__in_features
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.conv1 = nn.Conv2d(3, 32, 3, 1)
self.conv2 = nn.Conv2d(32, 64, 3, 1)
self.dropout1 = nn.Dropout2d(0.25)
self.dropout2 = nn.Dropout2d(0.5)
self.fc1 = nn.Linear(9216, 128)
self.fc2 = nn.Linear(128, 10)
def forward(self, x):
x = self.conv1(x)
x = F.relu(x)
x = self.conv2(x)
x = F.max_pool2d(x, 2)
x = self.dropout1(x)
x = torch.flatten(x, 1)
x = self.fc1(x)
# x = F.relu(x)
# x = self.dropout2(x)
# x = self.fc2(x)
output = x
# output = F.log_softmax(x, dim=1)
return output
| 28.081081 | 96 | 0.53128 |
af3eaa518e5fce8535319284506107764cd20e0a
| 4,747 |
py
|
Python
|
examples/sweeper.py
|
BitCashCF/multimerchant-python
|
9dcf40c1542f38ffa2b39a21d8dba8fb7fb2b027
|
[
"MIT"
] | 29 |
2015-01-18T18:44:58.000Z
|
2022-03-31T02:28:05.000Z
|
examples/sweeper.py
|
BitCashCF/multimerchant-python
|
9dcf40c1542f38ffa2b39a21d8dba8fb7fb2b027
|
[
"MIT"
] | 2 |
2016-08-12T06:18:13.000Z
|
2017-07-18T08:49:01.000Z
|
examples/sweeper.py
|
BitCashCF/multimerchant-python
|
9dcf40c1542f38ffa2b39a21d8dba8fb7fb2b027
|
[
"MIT"
] | 19 |
2015-05-03T19:28:10.000Z
|
2022-02-09T09:46:10.000Z
|
from multimerchant.wallet import Wallet
import time
from block_io import BlockIo
import six
import os
import sys
try:
os.environ["HD_PRIVKEY"]
except KeyError:
print "Please generate an HD wallet first. See README.rst on https://github.com/blockio/multimerchant-python"
print "Or do this:"
print "\t $ python"
print "\t >> from multimerchant.wallet import Wallet"
print "\t >> print \"My HD Private Key:\", Wallet.new_generate_wallet(network=\"DOGETEST\")"
print "\t >> quit()"
print "\t $ HD_PRIVKEY=STRING_FROM_ABOVE python sweeper.py"
print "... where sweeper.py is this file."
sys.exit(1)
# Please use the Dogecoin Testnet here -- you have free coins on sign up at Block.io
# Dogecoin Testnet because of the static demo amount for withdrawals/sweeps below
block_io = BlockIo('Your Dogecoin Testnet API Key', 'Your Secret PIN', 2)
network = block_io.get_balance()['data']['network'] # extract the network of our API Key
# create a wallet using a master secret -- this one is super insecure, but it's an example
# don't have an HD privkey yet? Create one by using:
#
# $ python
# >> from multimerchant.wallet import Wallet
# >> hd_privkey = Wallet.new_random_wallet(network="DOGETEST").serialize()
# >> print "My Super Secret HD Wallet:", hd_privkey
#
# The 'network' value above can be: BTC, BTCTEST, DOGE, DOGETEST, LTC, LTCTEST
# Get the relevant network's API Key at Block.io for use in this example
w = Wallet.deserialize(os.environ['HD_PRIVKEY'], network=network)
# or generate an insecure version like this:
# w = Wallet.from_master_secret("correct horse battery staple", network=network)
# BIP32 wallets are children derived from a single master seed (you generated this with the instructions above)
# You can specify a child by an ID. For instance, for child_id=1:
# let's generate 5 wallets
addresses = []
children = [] # the payment addresses we'll generate from the seed
for child_id in range(1,6):
child = w.get_child(child_id, is_prime=True, as_private=True)
addresses.insert(len(addresses), child.to_address())
children.insert(len(children), child)
six.print_("Child No.", child_id, ". Address="+child.to_address(), "PrivKey="+child.export_to_wif())
# check the balance for these addresses using Block.io
all_addresses = ','.join(str(x) for x in addresses)
response = block_io.get_address_balance(addresses=all_addresses) # the addresses parameter can be a comma-separated list of addresses here
# NOTE: Amounts deposited into addresses through Block.io green addresses will be immediately available
# even with 0 confirmations
six.print_(">> Total Balance in All Addresses:", response['data']['available_balance'], network)
for addrinfo in response['data']['balances']:
six.print_(" >> Balances in", addrinfo['address'])
six.print_(" >>> Available:", addrinfo['available_balance'], network) # either confirmed or from a green address
six.print_(" >>> Pending:", addrinfo['pending_received_balance'], network) # is neither from a green address, nor is it confirmed
# let's transfer some testnet coins into the first child address
amounts = "500.0" # DOGETEST
response = block_io.withdraw(to_addresses=children[0].to_address(), amounts=amounts)
six.print_("* Depositing", amounts, network, "into", children[0].to_address())
six.print_(">> Deposit Proof Transaction ID:", response['data']['txid']) # you can view this on https://chain.so immediately
time.sleep(2) # let the transaction propagate on the network for a bit
# so far so good. Let's sweep the coins out of the first child, and into the second child
# NOTE: While you can specify the number of confirmations required for coins to be swept,
# please beware that deposits from green addresses will show as available in get_address_balance calls.
# This might cause confusion when the sweep_from_address call returns an error when sweeping amounts with
# confirmations > 0
six.print_("* Sweeping all funds (confirmed and unconfirmed) from", children[0].to_address(), "to", children[1].to_address())
response = block_io.sweep_from_address(from_address=children[0].to_address(), private_key=children[0].export_to_wif(), to_address=children[1].to_address())
six.print_(">> Amount swept from", children[0].to_address(), "into", children[1].to_address(), "=", response['data']['amount_sent'], network)
six.print_(">> Transaction ID:", response['data']['txid'])
# Note: the swept amount does not need to be confirmed. In the above case, the amount was not confirmed
# but was swept into the destination address immediately
# You can sweep only confirmed amounts if you wish by adding "confirmations=X" to the sweep_from_address call,
# where X is the number of confirmations
| 47.949495 | 155 | 0.742364 |
53ec161ec76e395cec0aa33e853686b12b13f82c
| 652 |
py
|
Python
|
omnitool/shared.py
|
Berserker66/omnitool
|
6bf88ba86a7c68a968f8c8db569b57e6ba836e8e
|
[
"MIT"
] | 40 |
2015-03-15T14:38:24.000Z
|
2021-12-18T04:30:39.000Z
|
omnitool/shared.py
|
Berserker66/omnitool
|
6bf88ba86a7c68a968f8c8db569b57e6ba836e8e
|
[
"MIT"
] | 31 |
2015-03-14T12:12:14.000Z
|
2022-02-27T17:50:56.000Z
|
omnitool/shared.py
|
Berserker66/omnitool
|
6bf88ba86a7c68a968f8c8db569b57e6ba836e8e
|
[
"MIT"
] | 13 |
2015-07-31T11:40:41.000Z
|
2021-04-09T14:36:07.000Z
|
import sys
import os
from pathlib import Path
from appdirs import user_config_dir
from .version import Version
__all__ = ("__version__", "appdata", "cachepath", "cache", "lang", "theme")
__version__ = Version(180501)
appdata = user_config_dir('omnitool', "", roaming=True)
cachepath = os.path.join(appdata, "cache.dill")
##filled in by omnitool\__init__.py:
cache = None
lang = None
theme = None
exit_prog = None
cores = 1 # amount of cpu cores
##end of autofill
if getattr(sys, 'frozen', False):
datadir = Path(sys.executable).parent
else:
datadir = Path(__file__).parent
if False:
from .Language import english as lang # IDE hook
| 22.482759 | 75 | 0.722393 |
ab62c3bc191b88bcbd1f9fd1ad94b1cb13e0f200
| 21,880 |
py
|
Python
|
libqtile/layout/tree.py
|
ValentijnvdBeek/qtile
|
92bfafe2cf98789e531b2381c92eb689742c667c
|
[
"MIT"
] | 4 |
2015-07-14T00:03:22.000Z
|
2020-10-03T04:45:05.000Z
|
libqtile/layout/tree.py
|
ValentijnvdBeek/qtile
|
92bfafe2cf98789e531b2381c92eb689742c667c
|
[
"MIT"
] | null | null | null |
libqtile/layout/tree.py
|
ValentijnvdBeek/qtile
|
92bfafe2cf98789e531b2381c92eb689742c667c
|
[
"MIT"
] | null | null | null |
# -*- coding:utf-8 -*-
# Copyright (c) 2011 Mounier Florian
# Copyright (c) 2011 Paul Colomiets
# Copyright (c) 2012 roger
# Copyright (c) 2012-2014 Tycho Andersen
# Copyright (c) 2013 Tao Sauvage
# Copyright (c) 2013 Arnas Udovicius
# Copyright (c) 2014 ramnes
# Copyright (c) 2014 Sean Vig
# Copyright (c) 2014 Nathan Hoad
# Copyright (c) 2014 dequis
# Copyright (c) 2014 Thomas Sarboni
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from .base import Layout
from .. import drawer, hook, window
to_superscript = dict(zip(map(ord, u'0123456789'), map(ord, u'⁰¹²³⁴⁵⁶⁷⁸⁹')))
class TreeNode(object):
def __init__(self):
self.children = []
self.parent = None
self.expanded = True
self._children_top = None
self._children_bot = None
def add(self, node, hint=None):
"""Add a node below this node
The `hint` is a node to place the new node after in this nodes
children.
"""
node.parent = self
if hint is not None:
try:
idx = self.children.index(hint)
except ValueError:
pass
else:
self.children.insert(idx + 1, node)
return
self.children.append(node)
def draw(self, layout, top, level=0):
"""Draw the node and its children to a layout
Draws this node to the given layout (presumably a TreeTab), starting
from a y-offset of `top` and at the given level.
"""
self._children_top = top
if self.expanded:
for i in self.children:
top = i.draw(layout, top, level)
self._children_bot = top
return top
def button_press(self, x, y):
"""Returns self or sibling which got the click"""
# if we store the locations of each child, it would be possible to do
# this without having to traverse the tree...
if not (self._children_top <= y < self._children_bot):
return
for i in self.children:
res = i.button_press(x, y)
if res is not None:
return res
def add_superscript(self, title):
"""Prepend superscript denoting the number of hidden children"""
if not self.expanded and self.children:
return u"{:d}".format(
len(self.children)
).translate(to_superscript).encode('utf-8') + title
return title
def get_first_window(self):
"""Find the first Window under this node
Returns self if this is a `Window`, otherwise finds first `Window` by
depth-first search
"""
if isinstance(self, Window):
return self
for i in self.children:
node = i.get_first_window()
if node:
return node
def get_last_window(self):
"""Find the last Window under this node
Finds last `Window` by depth-first search, otherwise returns self if
this is a `Window`.
"""
for i in reversed(self.children):
node = i.get_last_window()
if node:
return node
if isinstance(self, Window):
return self
def get_next_window(self):
if self.children and self.expanded:
return self.children[0]
node = self
while not isinstance(node, Root):
parent = node.parent
idx = parent.children.index(node)
for i in range(idx + 1, len(parent.children)):
res = parent.children[i].get_first_window()
if res:
return res
node = parent
def get_prev_window(self):
node = self
while not isinstance(node, Root):
parent = node.parent
idx = parent.children.index(node)
if idx == 0 and isinstance(parent, Window):
return parent
for i in range(idx - 1, -1, -1):
res = parent.children[i].get_last_window()
if res:
return res
node = parent
class Root(TreeNode):
def __init__(self, sections, default_section=None):
super(Root, self).__init__()
self.sections = {}
for section in sections:
self.add_section(section)
if default_section is None:
self.def_section = self.children[0]
else:
self.def_section = self.sections[default_section]
def add(self, win, hint=None):
"""Add a new window
Adds a new `Window` to the tree. The location of the new node is
controlled by looking:
* `hint` kwarg - place the node next to this node
* win.tree_section - place the window in the given section, by name
* default section - fallback to default section (first section, if
not otherwise set)
"""
parent = None
if hint is not None:
parent = hint.parent
if parent is None:
sect = getattr(win, 'tree_section', None)
if sect is not None:
parent = self.sections.get(sect)
if parent is None:
parent = self.def_section
node = Window(win)
parent.add(node, hint=hint)
return node
def add_section(self, name):
"""Add a new Section with the given name"""
if name in self.sections:
raise ValueError("Duplicate section name")
node = Section(name)
node.parent = self
self.sections[name] = node
self.children.append(node)
def del_section(self, name):
"""Remove the Section with the given name"""
if name not in self.sections:
raise ValueError("Section name not found")
if len(self.children) == 1:
raise ValueError("Can't delete last section")
sec = self.sections[name]
# move the children of the deleted section to the previous section
# if delecting the first section, add children to second section
idx = min(self.children.index(sec), 1)
next_sec = self.children[idx - 1]
# delete old section, reparent children to next section
del self.children[idx]
next_sec.children.extend(sec.children)
for i in sec.children:
i.parent = next_sec
class Section(TreeNode):
def __init__(self, title):
super(Section, self).__init__()
self.title = title
def draw(self, layout, top, level=0):
del layout._layout.width # no centering
# draw a horizontal line above the section
layout._drawer.draw_hbar(
layout.section_fg,
0,
layout.panel_width,
top,
linewidth=1
)
# draw the section title
layout._layout.font_size = layout.section_fontsize
layout._layout.text = self.add_superscript(self.title)
layout._layout.colour = layout.section_fg
layout._layout.draw(
x=layout.section_left,
y=top + layout.section_top
)
top += layout._layout.height + \
layout.section_top + \
layout.section_padding
# run the TreeNode draw to draw children (if expanded)
top = super(Section, self).draw(layout, top, level)
return top + layout.section_bottom
class Window(TreeNode):
def __init__(self, win):
super(Window, self).__init__()
self.window = win
self._title_top = None
def draw(self, layout, top, level=0):
self._title_top = top
# setup parameters for drawing self
left = layout.padding_left + level * layout.level_shift
layout._layout.font_size = layout.fontsize
layout._layout.text = self.add_superscript(self.window.name)
if self.window is layout._focused:
fg = layout.active_fg
bg = layout.active_bg
else:
fg = layout.inactive_fg
bg = layout.inactive_bg
layout._layout.colour = fg
layout._layout.width = layout.panel_width - left
# get a text frame from the above
framed = layout._layout.framed(
layout.border_width,
bg,
layout.padding_x,
layout.padding_y
)
# draw the text frame at the given point
framed.draw_fill(left, top)
top += framed.height + layout.vspace + layout.border_width
# run the TreeNode draw to draw children (if expanded)
return super(Window, self).draw(layout, top, level + 1)
def button_press(self, x, y):
"""Returns self if clicked on title else returns sibling"""
if self._title_top <= y < self._children_top:
return self
return super(Window, self).button_press(x, y)
def remove(self):
"""Removes this Window
If this window has children, the first child takes the place of this
window, and any remaining children are reparented to that node
"""
if self.children:
head = self.children[0]
# add the first child to our parent, next to ourselves
self.parent.add(head, hint=self)
# move remaining children to be under the new head
for i in self.children[1:]:
head.add(i)
self.parent.children.remove(self)
del self.children
class TreeTab(Layout):
"""Tree Tab Layout
This layout works just like Max but displays tree of the windows at the
left border of the screen, which allows you to overview all opened windows.
It's designed to work with ``uzbl-browser`` but works with other windows
too.
"""
defaults = [
("bg_color", "000000", "Background color of tabs"),
("active_bg", "000080", "Background color of active tab"),
("active_fg", "ffffff", "Foreground color of active tab"),
("inactive_bg", "606060", "Background color of inactive tab"),
("inactive_fg", "ffffff", "Foreground color of inactive tab"),
("margin_left", 6, "Left margin of tab panel"),
("margin_y", 6, "Vertical margin of tab panel"),
("padding_left", 6, "Left padding for tabs"),
("padding_x", 6, "Left padding for tab label"),
("padding_y", 2, "Top padding for tab label"),
("border_width", 2, "Width of the border"),
("vspace", 2, "Space between tabs"),
("level_shift", 8, "Shift for children tabs"),
("font", "sans", "Font"),
("fontsize", 14, "Font pixel size."),
("fontshadow", None, "font shadow color, default is None (no shadow)"),
("section_fontsize", 11, "Font pixel size of section label"),
("section_fg", "ffffff", "Color of section label"),
("section_top", 4, "Top margin of section label"),
("section_bottom", 6, "Bottom margin of section"),
("section_padding", 4, "Bottom of margin section label"),
("section_left", 4, "Left margin of section label"),
("panel_width", 150, "Width of the left panel"),
("sections", ['Default'], "Foreground color of inactive tab"),
("name", "treetab", "Name of this layout."),
("previous_on_rm", False, "Focus previous window on close instead of first."),
]
def __init__(self, **config):
Layout.__init__(self, **config)
self.add_defaults(TreeTab.defaults)
self._focused = None
self._panel = None
self._drawer = None
self._layout = None
self._tree = Root(self.sections)
self._nodes = {}
def clone(self, group):
c = Layout.clone(self, group)
c._focused = None
c._panel = None
c._tree = Root(self.sections)
return c
def _get_window(self):
return self._focused
def focus(self, win):
self._focused = win
def focus_first(self):
win = self._tree.get_first_window()
if win:
return win.window
def focus_last(self):
win = self._tree.get_last_window()
if win:
return win.window
def focus_next(self, client):
win = self._nodes[client].get_next_window()
if win:
return win.window
def focus_previous(self, client):
win = self._nodes[client].get_prev_window()
if win:
return win.window
def blur(self):
# Does not clear current window, will change if new one
# will be focused. This works better when floating window
# will be next focused one
pass
def add(self, win):
if self._focused:
node = self._tree.add(win, hint=self._nodes[self._focused])
else:
node = self._tree.add(win)
self._nodes[win] = node
def remove(self, win):
if win not in self._nodes:
return
if self.previous_on_rm:
self._focused = self.focus_previous(win)
else:
self._focused = self.focus_first()
if self._focused is win:
self._focused = None
self._nodes[win].remove()
del self._nodes[win]
self.draw_panel()
def _create_panel(self):
self._panel = window.Internal.create(
self.group.qtile,
0,
0,
self.panel_width,
100
)
self._create_drawer()
self._panel.handle_Expose = self._panel_Expose
self._panel.handle_ButtonPress = self._panel_ButtonPress
self.group.qtile.windowMap[self._panel.window.wid] = self._panel
hook.subscribe.client_name_updated(self.draw_panel)
hook.subscribe.focus_change(self.draw_panel)
def _panel_Expose(self, e):
self.draw_panel()
def draw_panel(self, *args):
if not self._panel:
return
self._drawer.clear(self.bg_color)
self._tree.draw(self, 0)
self._drawer.draw(offsetx=0, width=self.panel_width)
def _panel_ButtonPress(self, event):
node = self._tree.button_press(event.event_x, event.event_y)
if node:
self.group.focus(node.window, False)
def configure(self, client, screen):
if self._nodes and client is self._focused:
client.place(
screen.x, screen.y,
screen.width, screen.height,
0,
None
)
client.unhide()
else:
client.hide()
def finalize(self):
Layout.finalize(self)
if self._drawer is not None:
self._drawer.finalize()
def info(self):
d = Layout.info(self)
d["clients"] = [x.name for x in self._nodes]
d["sections"] = [x.title for x in self._tree.children]
return d
def show(self, screen):
if not self._panel:
self._create_panel()
panel, body = screen.hsplit(self.panel_width)
self._resize_panel(panel)
self._panel.unhide()
def hide(self):
if self._panel:
self._panel.hide()
def cmd_down(self):
"""Switch down in the window list"""
win = None
if self._focused:
win = self._nodes[self._focused].get_next_window()
if not win:
win = self._tree.get_first_window()
if win:
self.group.focus(win.window, False)
self._focused = win.window if win else None
cmd_next = cmd_down
def cmd_up(self):
"""Switch up in the window list"""
win = None
if self._focused:
win = self._nodes[self._focused].get_prev_window()
if not win:
win = self._tree.get_last_window()
if win:
self.group.focus(win.window, False)
self._focused = win.window if win else None
cmd_previous = cmd_up
def cmd_move_up(self):
win = self._focused
if not win:
return
node = self._nodes[win]
p = node.parent.children
idx = p.index(node)
if idx > 0:
p[idx] = p[idx - 1]
p[idx - 1] = node
self.draw_panel()
def cmd_move_down(self):
win = self._focused
if not win:
return
node = self._nodes[win]
p = node.parent.children
idx = p.index(node)
if idx < len(p) - 1:
p[idx] = p[idx + 1]
p[idx + 1] = node
self.draw_panel()
def cmd_move_left(self):
win = self._focused
if not win:
return
node = self._nodes[win]
if not isinstance(node.parent, Section):
node.parent.children.remove(node)
node.parent.parent.add(node)
self.draw_panel()
def cmd_add_section(self, name):
"""Add named section to tree"""
self._tree.add_section(name)
self.draw_panel()
def cmd_del_section(self, name):
"""Add named section to tree"""
self._tree.del_section(name)
self.draw_panel()
def cmd_section_up(self):
win = self._focused
if not win:
return
node = self._nodes[win]
snode = node
while not isinstance(snode, Section):
snode = snode.parent
idx = snode.parent.children.index(snode)
if idx > 0:
node.parent.children.remove(node)
snode.parent.children[idx - 1].add(node)
self.draw_panel()
def cmd_section_down(self):
win = self._focused
if not win:
return
node = self._nodes[win]
snode = node
while not isinstance(snode, Section):
snode = snode.parent
idx = snode.parent.children.index(snode)
if idx < len(snode.parent.children) - 1:
node.parent.children.remove(node)
snode.parent.children[idx + 1].add(node)
self.draw_panel()
def cmd_sort_windows(self, sorter, create_sections=True):
"""Sorts window to sections using sorter function
Parameters
==========
sorter : function with single arg returning string
returns name of the section where window should be
create_sections :
if this parameter is True (default), if sorter returns unknown
section name it will be created dynamically
"""
for sec in self._tree.children:
for win in sec.children[:]:
nname = sorter(win.window)
if nname is None or nname == sec.title:
continue
try:
nsec = self._tree.sections[nname]
except KeyError:
if create_sections:
self._tree.add_section(nname)
nsec = self._tree.sections[nname]
else:
continue
sec.children.remove(win)
nsec.children.append(win)
win.parent = nsec
self.draw_panel()
def cmd_move_right(self):
win = self._focused
if not win:
return
node = self._nodes[win]
idx = node.parent.children.index(node)
if idx > 0:
node.parent.children.remove(node)
node.parent.children[idx - 1].add(node)
self.draw_panel()
def cmd_expand_branch(self):
if not self._focused:
return
self._nodes[self._focused].expanded = True
self.draw_panel()
def cmd_collapse_branch(self):
if not self._focused:
return
self._nodes[self._focused].expanded = False
self.draw_panel()
def cmd_increase_ratio(self):
self.panel_width += 10
self.group.layoutAll()
def cmd_decrease_ratio(self):
self.panel_width -= 10
self.group.layoutAll()
def _create_drawer(self):
if self._drawer is None:
self._drawer = drawer.Drawer(
self.group.qtile,
self._panel.window.wid,
self.panel_width,
self.group.screen.dheight
)
self._drawer.clear(self.bg_color)
self._layout = self._drawer.textlayout(
"",
"ffffff",
self.font,
self.fontsize,
self.fontshadow,
wrap=False
)
def layout(self, windows, screen):
panel, body = screen.hsplit(self.panel_width)
self._resize_panel(panel)
Layout.layout(self, windows, body)
def _resize_panel(self, rect):
if self._panel:
self._panel.place(
rect.x, rect.y,
rect.width, rect.height,
0,
None
)
self._create_drawer()
self.draw_panel()
| 32.511144 | 86 | 0.578016 |
c674fc70964ef5289ef6e9bcd06a11918a7fa1e2
| 495 |
py
|
Python
|
ip-messaging/rest/members/retrieve-member/retrieve-member.6.x.py
|
azaddeveloper/api-snippets
|
f88b153cd7186fa70b33733b205886502db0d1f2
|
[
"MIT"
] | 3 |
2020-05-05T10:01:02.000Z
|
2021-02-06T14:23:13.000Z
|
ip-messaging/rest/members/retrieve-member/retrieve-member.6.x.py
|
azaddeveloper/api-snippets
|
f88b153cd7186fa70b33733b205886502db0d1f2
|
[
"MIT"
] | null | null | null |
ip-messaging/rest/members/retrieve-member/retrieve-member.6.x.py
|
azaddeveloper/api-snippets
|
f88b153cd7186fa70b33733b205886502db0d1f2
|
[
"MIT"
] | 1 |
2019-10-02T14:36:36.000Z
|
2019-10-02T14:36:36.000Z
|
# Download the Python helper library from twilio.com/docs/python/install
from twilio.rest import Client
# Initialize the client
account = "ACXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX"
token = "your_auth_token"
client = Client(account, token)
member = client.chat \
.services("ISXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX") \
.channels("CHXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX") \
.members("MBXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX") \
.fetch()
print(member.sid)
| 30.9375 | 72 | 0.707071 |
14b118ca5afc0f8fefcacab8d40e74a35bfc7053
| 7,718 |
py
|
Python
|
nltk/corpus/reader/senseval.py
|
joelgrondman/nltk205sslfix
|
ca141115a2a4d11da62b43f204285bcc74bcbda6
|
[
"Apache-2.0"
] | 137 |
2015-03-14T06:16:37.000Z
|
2022-03-27T08:32:38.000Z
|
nltk/corpus/reader/senseval.py
|
shareablee/nltk-2.0.5
|
f0c0862c4bcc726bc635d7e1a34e714ac35753ca
|
[
"Apache-2.0"
] | 2 |
2020-08-09T17:03:47.000Z
|
2020-08-19T06:45:17.000Z
|
nltk/corpus/reader/senseval.py
|
shareablee/nltk-2.0.5
|
f0c0862c4bcc726bc635d7e1a34e714ac35753ca
|
[
"Apache-2.0"
] | 57 |
2015-01-06T14:18:27.000Z
|
2021-12-26T09:37:30.000Z
|
# Natural Language Toolkit: Senseval 2 Corpus Reader
#
# Copyright (C) 2001-2012 NLTK Project
# Author: Trevor Cohn <[email protected]>
# Steven Bird <[email protected]> (modifications)
# URL: <http://www.nltk.org/>
# For license information, see LICENSE.TXT
"""
Read from the Senseval 2 Corpus.
SENSEVAL [http://www.senseval.org/]
Evaluation exercises for Word Sense Disambiguation.
Organized by ACL-SIGLEX [http://www.siglex.org/]
Prepared by Ted Pedersen <[email protected]>, University of Minnesota,
http://www.d.umn.edu/~tpederse/data.html
Distributed with permission.
The NLTK version of the Senseval 2 files uses well-formed XML.
Each instance of the ambiguous words "hard", "interest", "line", and "serve"
is tagged with a sense identifier, and supplied with context.
"""
import os
import re
import xml.sax
from xmldocs import XMLCorpusReader
from nltk.tokenize import *
from xml.etree import ElementTree
from util import *
from api import *
class SensevalInstance(object):
def __init__(self, word, position, context, senses):
self.word = word
self.senses = tuple(senses)
self.position = position
self.context = context
def __repr__(self):
return ('SensevalInstance(word=%r, position=%r, '
'context=%r, senses=%r)' %
(self.word, self.position, self.context, self.senses))
class SensevalCorpusReader(CorpusReader):
def instances(self, fileids=None):
return concat([SensevalCorpusView(fileid, enc)
for (fileid, enc) in self.abspaths(fileids, True)])
def raw(self, fileids=None):
"""
:return: the text contents of the given fileids, as a single string.
"""
if fileids is None: fileids = self._fileids
elif isinstance(fileids, basestring): fileids = [fileids]
return concat([self.open(f).read() for f in fileids])
def _entry(self, tree):
elts = []
for lexelt in tree.findall('lexelt'):
for inst in lexelt.findall('instance'):
sense = inst[0].attrib['senseid']
context = [(w.text, w.attrib['pos'])
for w in inst[1]]
elts.append( (sense, context) )
return elts
class SensevalCorpusView(StreamBackedCorpusView):
def __init__(self, fileid, encoding):
StreamBackedCorpusView.__init__(self, fileid, encoding=encoding)
self._word_tokenizer = WhitespaceTokenizer()
self._lexelt_starts = [0] # list of streampos
self._lexelts = [None] # list of lexelt names
def read_block(self, stream):
# Decide which lexical element we're in.
lexelt_num = bisect.bisect_right(self._lexelt_starts, stream.tell())-1
lexelt = self._lexelts[lexelt_num]
instance_lines = []
in_instance = False
while True:
line = stream.readline()
if line == '':
assert instance_lines == []
return []
# Start of a lexical element?
if line.lstrip().startswith('<lexelt'):
lexelt_num += 1
m = re.search('item=("[^"]+"|\'[^\']+\')', line)
assert m is not None # <lexelt> has no 'item=...'
lexelt = m.group(1)[1:-1]
if lexelt_num < len(self._lexelts):
assert lexelt == self._lexelts[lexelt_num]
else:
self._lexelts.append(lexelt)
self._lexelt_starts.append(stream.tell())
# Start of an instance?
if line.lstrip().startswith('<instance'):
assert instance_lines == []
in_instance = True
# Body of an instance?
if in_instance:
instance_lines.append(line)
# End of an instance?
if line.lstrip().startswith('</instance'):
xml_block = '\n'.join(instance_lines)
xml_block = _fixXML(xml_block)
inst = ElementTree.fromstring(xml_block)
return [self._parse_instance(inst, lexelt)]
def _parse_instance(self, instance, lexelt):
senses = []
context = []
position = None
for child in instance:
if child.tag == 'answer':
senses.append(child.attrib['senseid'])
elif child.tag == 'context':
context += self._word_tokenizer.tokenize(child.text)
for cword in child:
if cword.tag == 'compound':
cword = cword[0] # is this ok to do?
if cword.tag == 'head':
# Some santiy checks:
assert position is None, 'head specified twice'
assert cword.text.strip() or len(cword)==1
assert not (cword.text.strip() and len(cword)==1)
# Record the position of the head:
position = len(context)
# Addd on the head word itself:
if cword.text.strip():
context.append(cword.text.strip())
elif cword[0].tag == 'wf':
context.append((cword[0].text,
cword[0].attrib['pos']))
if cword[0].tail:
context += self._word_tokenizer.tokenize(
cword[0].tail)
else:
assert False, 'expected CDATA or wf in <head>'
elif cword.tag == 'wf':
context.append((cword.text, cword.attrib['pos']))
elif cword.tag == 's':
pass # Sentence boundary marker.
else:
print 'ACK', cword.tag
assert False, 'expected CDATA or <wf> or <head>'
if cword.tail:
context += self._word_tokenizer.tokenize(cword.tail)
else:
assert False, 'unexpected tag %s' % child.tag
return SensevalInstance(lexelt, position, context, senses)
def _fixXML(text):
"""
Fix the various issues with Senseval pseudo-XML.
"""
# <~> or <^> => ~ or ^
text = re.sub(r'<([~\^])>', r'\1', text)
# fix lone &
text = re.sub(r'(\s+)\&(\s+)', r'\1&\2', text)
# fix """
text = re.sub(r'"""', '\'"\'', text)
# fix <s snum=dd> => <s snum="dd"/>
text = re.sub(r'(<[^<]*snum=)([^">]+)>', r'\1"\2"/>', text)
# fix foreign word tag
text = re.sub(r'<\&frasl>\s*<p[^>]*>', 'FRASL', text)
# remove <&I .>
text = re.sub(r'<\&I[^>]*>', '', text)
# fix <{word}>
text = re.sub(r'<{([^}]+)}>', r'\1', text)
# remove <@>, <p>, </p>
text = re.sub(r'<(@|/?p)>', r'', text)
# remove <&M .> and <&T .> and <&Ms .>
text = re.sub(r'<&\w+ \.>', r'', text)
# remove <!DOCTYPE... > lines
text = re.sub(r'<!DOCTYPE[^>]*>', r'', text)
# remove <[hi]> and <[/p]> etc
text = re.sub(r'<\[\/?[^>]+\]*>', r'', text)
# take the thing out of the brackets: <…>
text = re.sub(r'<(\&\w+;)>', r'\1', text)
# and remove the & for those patterns that aren't regular XML
text = re.sub(r'&(?!amp|gt|lt|apos|quot)', r'', text)
# fix 'abc <p="foo"/>' style tags - now <wf pos="foo">abc</wf>
text = re.sub(r'[ \t]*([^<>\s]+?)[ \t]*<p="([^"]*"?)"/>',
r' <wf pos="\2">\1</wf>', text)
text = re.sub(r'\s*"\s*<p=\'"\'/>', " <wf pos='\"'>\"</wf>", text)
return text
| 38.59 | 78 | 0.518269 |
f2e74285a345bf7f2647bc1b09051ff4f579a405
| 2,899 |
py
|
Python
|
brian2/__init__.py
|
CharleeSF/brian2
|
d2be1ed33a8ac51b1891f89a2544123a937c43ff
|
[
"BSD-2-Clause"
] | null | null | null |
brian2/__init__.py
|
CharleeSF/brian2
|
d2be1ed33a8ac51b1891f89a2544123a937c43ff
|
[
"BSD-2-Clause"
] | null | null | null |
brian2/__init__.py
|
CharleeSF/brian2
|
d2be1ed33a8ac51b1891f89a2544123a937c43ff
|
[
"BSD-2-Clause"
] | null | null | null |
'''
Brian 2.0
'''
# Import setuptools to do some monkey patching of distutils, necessary for
# working weave/Cython on Windows with the Python for C++ compiler
import setuptools as _setuptools
# Check basic dependencies
import sys
from distutils.version import LooseVersion
missing = []
try:
import numpy
except ImportError as ex:
sys.stderr.write('Importing numpy failed: %s\n' % ex)
missing.append('numpy')
try:
import sympy
except ImportError as ex:
sys.stderr.write('Importing sympy failed: %s\n' % ex)
missing.append('sympy')
try:
import pyparsing
except ImportError as ex:
sys.stderr.write('Importing pyparsing failed: %s\n' % ex)
missing.append('pyparsing')
try:
import jinja2
except ImportError as ex:
sys.stderr.write('Importing Jinja2 failed: %s\n' % ex)
missing.append('jinja2')
try:
import cpuinfo
except Exception as ex:
sys.stderr.write('Importing cpuinfo failed: %s\n' % ex)
# we don't append it to "missing", Brian runs fine without it
if len(missing):
raise ImportError('Some required dependencies are missing:\n' + ', '.join(missing))
try:
from pylab import *
except ImportError:
# Do the non-matplotlib pylab imports manually
from numpy import *
from numpy.fft import *
from numpy.random import *
from numpy.linalg import *
import numpy.ma as ma
# don't let numpy's datetime hide stdlib
import datetime
# Make sure that Brian's unit-aware functions are used, even when directly
# using names prefixed with numpy or np
import brian2.numpy_ as numpy
import brian2.numpy_ as np
# delete some annoying names from the namespace
if 'x' in globals():
del x
if 'f' in globals():
del f
if 'rate' in globals():
del rate
__docformat__ = "restructuredtext en"
__version__ = '2.1+git'
__release_date__ = '2016-11-18'
from brian2.only import *
# Check for outdated dependency versions
def _check_dependency_version(name, version):
from core.preferences import prefs
from utils.logger import get_logger
logger = get_logger(__name__)
module = sys.modules[name]
if not isinstance(module.__version__, basestring): # mocked module
return
if not LooseVersion(module.__version__) >= LooseVersion(version):
message = '%s is outdated (got version %s, need version %s)' % (name,
module.__version__,
version)
if prefs.core.outdated_dependency_error:
raise ImportError(message)
else:
logger.warn(message, 'outdated_dependency')
for name, version in [('numpy', '1.9'),
('sympy', '0.7.6'),
('jinja2', '2.7')]:
_check_dependency_version(name, version)
# Initialize the logging system
BrianLogger.initialize()
| 29.282828 | 91 | 0.658848 |
db53019c9accbf1bdb73ecdea18a60806cc2921e
| 2,879 |
py
|
Python
|
control/tsndctl/postprocessing.py
|
getty708/atr-tk
|
33c6bb42ebdf16c188707e79078ea9dd3a80d8d0
|
[
"MIT"
] | null | null | null |
control/tsndctl/postprocessing.py
|
getty708/atr-tk
|
33c6bb42ebdf16c188707e79078ea9dd3a80d8d0
|
[
"MIT"
] | 2 |
2021-10-10T14:07:01.000Z
|
2022-03-23T12:59:56.000Z
|
control/tsndctl/postprocessing.py
|
getty708/atr-tk
|
33c6bb42ebdf16c188707e79078ea9dd3a80d8d0
|
[
"MIT"
] | null | null | null |
from pathlib import Path
import numpy as np
import pandas as pd
import json
from logging import getLogger
logger = getLogger("tsndctl.postprocessing")
METADATA_KEYS = [
"MemEntryCount",
"FreeMemSize",
"ReadMemDataCtl",
"EntryInfo",
"EntryDetail",
]
def summarize_log_parse_results(metadata, df_ags, messages):
df_ags = df_ags[df_ags["ts"] > 0].reset_index(drop=True)
warnings = [msg for msg in messages if "WARNING" in msg]
num_records = metadata.get("EntryInfo")[0].get("num_records")
error_rate = 1. - len(df_ags) / (num_records / 2)
if len(df_ags) > 0:
ts = df_ags["ts"].values
duration = (ts[-1] - ts[0]) / 1000.
else:
duration = 0
summary = {
"records": num_records,
"ags_expected": num_records//2,
"ags": len(df_ags),
"messages": len(messages),
"warnings": len(warnings),
"error_rates": error_rate,
"duration": duration,
}
return summary
def parse_json(data):
data = data.strip().replace("\n", "").replace("'", "\"")
return json.loads(data)
def parse_logfile(logfile):
"""
Args:
logfile (str): path to a target logfile.
"""
metadata = dict()
df_ags = []
messages = []
with open(logfile, "r") as f:
lines = f.readlines()
for i, line in enumerate(lines[:]):
line = line.replace("\n", "")
if "::" not in line:
messages.append(line)
continue
line = line.split(" - ")
if len(line) == 2:
_, body = line
if len(body.split("::")) == 2:
body_key, body_dict = body.split("::")
body_dict = parse_json(body_dict)
else:
body_key = body
body_dict = dict()
if body_key in METADATA_KEYS:
if body_key not in metadata.keys():
metadata[body_key] = []
metadata[body_key].append(body_dict)
elif body_key == "AgsDataEvent":
df_ags.append({
"ts": body_dict.get("ts"),
"acc_x": body_dict.get("acc")[0],
"acc_y": body_dict.get("acc")[1],
"acc_z": body_dict.get("acc")[2],
"gyro_x": body_dict.get("gyro")[0],
"gyro_y": body_dict.get("gyro")[1],
"gyro_z": body_dict.get("gyro")[2],
})
else:
logger.warning(f"unknown record: {line}")
df_ags = pd.DataFrame(df_ags)
summary = summarize_log_parse_results(metadata, df_ags, messages)
return (metadata, df_ags, messages), summary
| 29.989583 | 76 | 0.49809 |
c917dd92241a141aaa4f3c7bd71dffef09e0ddb4
| 1,696 |
py
|
Python
|
tests/bugs/core_4678_test.py
|
reevespaul/firebird-qa
|
98f16f425aa9ab8ee63b86172f959d63a2d76f21
|
[
"MIT"
] | null | null | null |
tests/bugs/core_4678_test.py
|
reevespaul/firebird-qa
|
98f16f425aa9ab8ee63b86172f959d63a2d76f21
|
[
"MIT"
] | null | null | null |
tests/bugs/core_4678_test.py
|
reevespaul/firebird-qa
|
98f16f425aa9ab8ee63b86172f959d63a2d76f21
|
[
"MIT"
] | null | null | null |
#coding:utf-8
#
# id: bugs.core_4678
# title: Regression: incorrect calculation of byte-length for view columns
# decription:
# tracker_id: CORE-4678
# min_versions: ['2.5.4']
# versions: 2.5.4
# qmid: None
import pytest
from firebird.qa import db_factory, isql_act, Action
# version: 2.5.4
# resources: None
substitutions_1 = []
init_script_1 = """
recreate table test(id int);
commit;
set term ^;
execute block as
begin
execute statement 'drop sequence gen_memo_id';
when any do begin end
end ^
set term ;^
commit;
create generator gen_memo_id;
recreate table test (
id int not null,
memo blob sub_type 1 segment size 100 character set ascii
);
create index memo_idx1 on test computed by (upper(trim(cast(substring(memo from 1 for 128) as varchar(128)))));
set term ^ ;
create or alter trigger test_bi for test
active before insert position 0
as
begin
if (new.id is null) then
new.id = gen_id(gen_memo_id,1);
end
^
set term ; ^
commit;
"""
db_1 = db_factory(sql_dialect=3, init=init_script_1)
test_script_1 = """
insert into test(memo) values( 'foo-rio-bar' );
rollback;
-- Confirmed on WI-V2.5.2.26540 (official release):
-- exception on ROLLBACK raises with text:
-- ===
-- Statement failed, SQLSTATE = HY000
-- BLOB not found
-- ===
-- No reconnect is required, all can be done in one ISQL attachment.
"""
act_1 = isql_act('db_1', test_script_1, substitutions=substitutions_1)
@pytest.mark.version('>=2.5.4')
def test_1(act_1: Action):
act_1.execute()
| 22.918919 | 115 | 0.630307 |
29d60fef1678dd018281cf886138f7ae624a2b99
| 1,320 |
py
|
Python
|
graphene/types/base.py
|
spacether/graphene
|
37d6eaea465c8dca981efd173b7c74db9a01830e
|
[
"MIT"
] | null | null | null |
graphene/types/base.py
|
spacether/graphene
|
37d6eaea465c8dca981efd173b7c74db9a01830e
|
[
"MIT"
] | null | null | null |
graphene/types/base.py
|
spacether/graphene
|
37d6eaea465c8dca981efd173b7c74db9a01830e
|
[
"MIT"
] | null | null | null |
from typing import Type
from ..utils.subclass_with_meta import SubclassWithMeta
from ..utils.trim_docstring import trim_docstring
class BaseOptions:
name = None # type: str
description = None # type: str
_frozen = False # type: bool
def __init__(self, class_type):
self.class_type = class_type # type: Type
def freeze(self):
self._frozen = True
def __setattr__(self, name, value):
if not self._frozen:
super(BaseOptions, self).__setattr__(name, value)
else:
raise Exception(f"Can't modify frozen Options {self}")
def __repr__(self):
return f"<{self.__class__.__name__} name={repr(self.name)}>"
class BaseType(SubclassWithMeta):
@classmethod
def create_type(cls, class_name, **options):
return type(class_name, (cls,), {"Meta": options})
@classmethod
def __init_subclass_with_meta__(
cls, name=None, description=None, _meta=None, **_kwargs
):
assert "_meta" not in cls.__dict__, "Can't assign directly meta"
if not _meta:
return
_meta.name = name or cls.__name__
_meta.description = description or trim_docstring(cls.__doc__)
_meta.freeze()
cls._meta = _meta
super(BaseType, cls).__init_subclass_with_meta__()
| 28.695652 | 72 | 0.651515 |
72cc56ba0db59ae19c6e5c6ad741615255b95018
| 16,450 |
py
|
Python
|
aquarium.py
|
edouardparis/aquarium
|
7877c68360171a2a800a96bd71bec08ac9257775
|
[
"BSD-3-Clause"
] | null | null | null |
aquarium.py
|
edouardparis/aquarium
|
7877c68360171a2a800a96bd71bec08ac9257775
|
[
"BSD-3-Clause"
] | null | null | null |
aquarium.py
|
edouardparis/aquarium
|
7877c68360171a2a800a96bd71bec08ac9257775
|
[
"BSD-3-Clause"
] | null | null | null |
#!/usr/bin/env python3
import argparse
import logging
import os
import shutil
import socket
import subprocess
import sys
import test_framework
import time
import traceback
from concurrent import futures
from test_framework.bitcoind import BitcoinD
from test_framework.revault_network import RevaultNetwork
from test_framework.utils import (
POSTGRES_USER,
POSTGRES_PASS,
POSTGRES_HOST,
POSTGRES_IS_SETUP,
EXECUTOR_WORKERS,
LOG_LEVEL,
DEBUG_GUI,
)
BASE_DIR = os.getenv("BASE_DIR", os.path.abspath("demo"))
SRC_DIR = os.getenv("SRC_DIR", os.path.abspath("src"))
COORDINATORD_SRC_DIR = os.path.join(SRC_DIR, "coordinatord")
COSIGNERD_SRC_DIR = os.path.join(SRC_DIR, "cosignerd")
MIRADORD_SRC_DIR = os.path.join(SRC_DIR, "miradord")
REVAULTD_SRC_DIR = os.path.join(SRC_DIR, "revaultd")
REVAULT_GUI_SRC_DIR = os.path.join(SRC_DIR, "revault-gui")
SHELL = os.getenv("SHELL", "bash")
COORDINATORD_VERSION = os.getenv("COORDINATORD_VERSION", "master")
COSIGNERD_VERSION = os.getenv("COSIGNERD_VERSION", "master")
MIRADORD_VERSION = os.getenv("MIRADORD_VERSION", "master")
REVAULTD_VERSION = os.getenv("REVAULTD_VERSION", "master")
REVAULT_GUI_VERSION = os.getenv("REVAULT_GUI_VERSION", "master")
WITH_GUI = os.getenv("WITH_GUI", "1") == "1"
WITH_ALL_HWS = os.getenv("WITH_ALL_HWS", "0") == "1"
# FIXME: use tmp
def bitcoind_dir():
return os.path.join(BASE_DIR, "bitcoind")
def executor():
return futures.ThreadPoolExecutor(
max_workers=EXECUTOR_WORKERS, thread_name_prefix="revault-demo"
)
def is_listening(host, port):
"""Check if a service is listening there."""
s = socket.socket()
try:
s.connect((host, port))
return True
except socket.error:
return False
def build_src(src_dir, version, git_url):
if not os.path.isdir(src_dir):
if not os.path.isdir(SRC_DIR):
os.makedirs(SRC_DIR)
subprocess.check_call(["git", "-C", f"{SRC_DIR}", "clone", git_url])
subprocess.check_call(["git", "-C", f"{src_dir}", "fetch", "origin"])
subprocess.check_call(["git", "-C", f"{src_dir}", "checkout", f"{version}"])
subprocess.check_call(
["cargo", "build", "--manifest-path", f"{src_dir}/Cargo.toml"]
)
def build_all_binaries(build_cosig, build_wt, build_coordinator=True):
if build_coordinator:
logging.info(
f"Building coordinatord at '{COORDINATORD_VERSION}' in '{COORDINATORD_SRC_DIR}'"
)
build_src(
COORDINATORD_SRC_DIR,
COORDINATORD_VERSION,
"https://github.com/revault/coordinatord",
)
else:
logging.info("Skipping the build of the coordinator, using the dummy one.")
if build_cosig:
logging.info(
f"Building cosignerd at '{COSIGNERD_VERSION}' in '{COSIGNERD_SRC_DIR}'"
)
build_src(
COSIGNERD_SRC_DIR, COSIGNERD_VERSION, "https://github.com/revault/cosignerd"
)
if build_wt:
logging.info(
f"Building miradord at '{MIRADORD_VERSION}' in '{MIRADORD_SRC_DIR}'"
)
build_src(
MIRADORD_SRC_DIR, MIRADORD_VERSION, "https://github.com/revault/miradord"
)
logging.info(f"Building revaultd at '{REVAULTD_VERSION}' in '{REVAULTD_SRC_DIR}'")
build_src(REVAULTD_SRC_DIR, REVAULTD_VERSION, "https://github.com/revault/revaultd")
if WITH_GUI:
logging.info(
f"Building revault-gui at '{REVAULT_GUI_VERSION}' in '{REVAULT_GUI_SRC_DIR}',"
" this may take some time"
)
build_src(
REVAULT_GUI_SRC_DIR,
REVAULT_GUI_VERSION,
"https://github.com/edouardparis/revault-gui",
)
logging.info("Building revault-gui's dummysigner")
subprocess.check_call(
[
"cargo",
"build",
"--manifest-path",
f"{REVAULT_GUI_SRC_DIR}/contrib/tools/dummysigner/Cargo.toml",
]
)
def bitcoind():
bitcoind = BitcoinD(bitcoin_dir=bitcoind_dir())
bitcoind.startup()
bitcoind.rpc.createwallet(bitcoind.rpc.wallet_name, False, False, "", False, True)
while bitcoind.rpc.getbalance() < 50:
bitcoind.rpc.generatetoaddress(1, bitcoind.rpc.getnewaddress())
while bitcoind.rpc.getblockcount() <= 1:
time.sleep(0.1)
return bitcoind
def deploy(
n_stks, n_mans, n_stkmans, csv, mans_thresh=None, with_cosigs=False, policies=[]
):
with_wts = len(policies) > 0
if not POSTGRES_IS_SETUP:
logging.info("No Postgre backend given, will use a dummy coordinator")
if POSTGRES_IS_SETUP and not is_listening(POSTGRES_HOST, 5432):
logging.error(f"No Postgre server listening on {POSTGRES_HOST}:5432.")
print(
f"A simple way to get started with one given your POSTGRES_PASS and POSTGRES_USER:"
)
print(
f" docker run --rm -d -p 5432:5432 --name postgres-coordinatord -e POSTGRES_PASSWORD={POSTGRES_PASS} -e POSTGRES_USER={POSTGRES_USER} -e POSTGRES_DB=coordinator_db postgres:alpine"
)
sys.exit(1)
if n_stks + n_stkmans < 1:
logging.error("Need at least 1 stakeholder")
sys.exit(1)
if n_mans + n_stkmans < 1:
logging.error("Need at least 1 manager")
sys.exit(1)
if mans_thresh is not None and (
mans_thresh > n_mans + n_stkmans or mans_thresh < 1
):
logging.error("Invalid managers threshold")
sys.exit(1)
for p in policies:
if not os.path.isfile(p):
logging.error(f"No plugin at '{p}'")
sys.exit(1)
if os.path.isdir(BASE_DIR):
logging.warning("Base directory exists already")
resp = input(f"Remove non-empty '{BASE_DIR}' and start fresh? (y/n) ")
if resp.lower() == "y":
shutil.rmtree(BASE_DIR)
else:
logging.info("Exiting")
sys.exit(1)
logging.info("Checking the source directories..")
build_all_binaries(build_cosig=with_cosigs, build_wt=with_wts, build_coordinator=POSTGRES_IS_SETUP)
logging.info("Setting up bitcoind")
bd = bitcoind()
# In any case cleanup bitcoind before exiting
try:
logging.info(
f"Deploying a Revault network with {n_stks} only-stakeholders,"
f" {n_mans} only-managers, {n_stkmans} both stakeholders and managers,"
f" a CSV of {csv} and a managers threshold of {mans_thresh or n_mans + n_stkmans}"
)
# Monkey patch the servers binaries paths
test_framework.revaultd.REVAULTD_PATH = os.path.join(
REVAULTD_SRC_DIR, "target", "debug", "revaultd"
)
test_framework.coordinatord.COORDINATORD_PATH = os.path.join(
COORDINATORD_SRC_DIR, "target", "debug", "coordinatord"
)
test_framework.cosignerd.COSIGNERD_PATH = os.path.join(
COSIGNERD_SRC_DIR, "target", "debug", "cosignerd"
)
test_framework.miradord.MIRADORD_PATH = os.path.join(
MIRADORD_SRC_DIR, "target", "debug", "miradord"
)
rn = RevaultNetwork(
BASE_DIR,
bd,
executor(),
POSTGRES_USER,
POSTGRES_PASS,
POSTGRES_HOST,
)
rn.deploy(
n_stks,
n_mans,
n_stkmans,
csv,
mans_thresh,
with_watchtowers=with_wts,
with_cosigs=with_cosigs,
)
if with_wts:
# NOTE: no config. We use hardcoded values for the demo.
policies = [{"path": p} for p in policies]
for stk in rn.stk_wallets + rn.stkman_wallets:
stk.watchtower.add_plugins(policies)
dummysigner_conf_file = os.path.join(BASE_DIR, "dummysigner.toml")
# We use a hack to avoid having to modify the test_framework to include the GUI.
if WITH_GUI:
emergency_address = rn.emergency_address
deposit_desc = rn.deposit_desc
unvault_desc = rn.unvault_desc
cpfp_desc = rn.cpfp_desc
with open(dummysigner_conf_file, "w") as f:
f.write(f'emergency_address = "{emergency_address}"\n')
for i, stk in enumerate(rn.stk_wallets):
f.write("[[keys]]\n")
f.write(f'name = "stakeholder_{i}_key"\n')
f.write(f'xpriv = "{stk.stk_keychain.hd.get_xpriv()}"\n')
for i, man in enumerate(rn.man_wallets):
f.write("[[keys]]\n")
f.write(f'name = "manager_{i}_key"\n')
f.write(f'xpriv = "{man.man_keychain.hd.get_xpriv()}"\n')
for i, stkman in enumerate(rn.stkman_wallets):
f.write("[[keys]]\n")
f.write(f'name = "stkman_{i}_stakeholder_key"\n')
f.write(f'xpriv = "{stkman.stk_keychain.hd.get_xpriv()}"\n')
f.write("[[keys]]\n")
f.write(f'name = "stkman_{i}_manager_key"\n')
f.write(f'xpriv = "{stkman.man_keychain.hd.get_xpriv()}"\n')
f.write("[descriptors]\n")
f.write(f'deposit_descriptor = "{deposit_desc}"\n')
f.write(f'unvault_descriptor = "{unvault_desc}"\n')
f.write(f'cpfp_descriptor = "{cpfp_desc}"\n')
for p in rn.participants():
p.gui_conf_file = os.path.join(
p.datadir_with_network, "gui_config.toml"
)
with open(p.gui_conf_file, "w") as f:
f.write(f"revaultd_config_path = '{p.conf_file}'\n")
f.write(f"revaultd_path = '{test_framework.revaultd.REVAULTD_PATH}'\n")
f.write(f"log_level = '{LOG_LEVEL}'\n")
f.write(f"debug = {'true' if DEBUG_GUI else 'false'}")
revault_gui = os.path.join(
REVAULT_GUI_SRC_DIR, "target", "debug", "revault-gui"
)
dummysigner = os.path.join(
REVAULT_GUI_SRC_DIR,
"contrib",
"tools",
"dummysigner",
"target",
"debug",
"dummysigner",
)
revault_cli = os.path.join(REVAULTD_SRC_DIR, "target", "debug", "revault-cli")
aliases_file = os.path.join(BASE_DIR, "aliases.sh")
with open(aliases_file, "w") as f:
f.write('PS1="(Revault demo) $PS1"\n') # It's a hack it shouldn't be there
f.write(f"alias bd=\"bitcoind -datadir='{bd.bitcoin_dir}'\"\n")
f.write(
f"alias bcli=\"bitcoin-cli -datadir='{bd.bitcoin_dir}' -rpcwallet='{bd.rpc.wallet_name}'\"\n"
)
for i, stk in enumerate(rn.stk_wallets):
f.write(f'alias stk{i}cli="{revault_cli} --conf {stk.conf_file}"\n')
f.write(f'alias stk{i}d="{test_framework.revaultd.REVAULTD_PATH} --conf {stk.conf_file}"\n')
if WITH_GUI:
f.write(
f"alias stk{i}gui='{revault_gui} --conf {stk.gui_conf_file} > /dev/null'\n"
)
if WITH_ALL_HWS:
f.write(
f"alias stk{i}hw='{dummysigner} {stk.stk_keychain.hd.get_xpriv()} > /dev/null'\n"
)
for i, man in enumerate(rn.man_wallets):
f.write(f'alias man{i}cli="{revault_cli} --conf {man.conf_file}"\n')
f.write(f'alias man{i}d="{test_framework.revaultd.REVAULTD_PATH} --conf {man.conf_file}"\n')
if WITH_GUI:
f.write(
f"alias man{i}gui='{revault_gui} --conf {man.gui_conf_file} > /dev/null'\n"
)
if WITH_ALL_HWS:
f.write(
f"alias man{i}hw='{dummysigner} {man.man_keychain.hd.get_xpriv()} > /dev/null'\n"
)
for i, stkman in enumerate(rn.stkman_wallets):
f.write(
f'alias stkman{i}cli="{revault_cli} --conf {stkman.conf_file}"\n'
)
f.write(
f'alias stkman{i}d="{test_framework.revaultd.REVAULTD_PATH} --conf {stkman.conf_file}"\n'
)
if WITH_GUI:
f.write(
f"alias stkman{i}gui='{revault_gui} --conf {stkman.gui_conf_file} > /dev/null'\n"
)
if WITH_ALL_HWS:
f.write(
f"alias stkman{i}hwstk='{dummysigner} {stkman.stk_keychain.hd.get_xpriv()} > /dev/null'\n"
)
f.write(
f"alias stkman{i}hwman='{dummysigner} {stkman.man_keychain.hd.get_xpriv()} > /dev/null'\n"
)
# hw for all the keys.
if WITH_GUI:
f.write(f"alias hw='{dummysigner} --conf {dummysigner_conf_file} > /dev/null'\n")
with open(aliases_file, "r") as f:
available_aliases = "".join(f.readlines()[1:])
print("Dropping you into a shell. Exit to end the session.", end="\n\n")
print(f"Available aliases: \n{available_aliases}\n")
# In any case clean up all daemons before exiting
try:
subprocess.call([SHELL, "--init-file", f"{aliases_file}", "-i"])
except Exception as e:
logging.error(f"Got error: '{str(e)}'")
logging.error(traceback.format_exc())
finally:
logging.info("Cleaning up Revault deployment")
rn.cleanup()
except Exception as e:
logging.error(f"Got error: '{str(e)}'")
logging.error(traceback.format_exc())
finally:
logging.info("Cleaning up bitcoind")
bd.cleanup()
def setup_logging():
log_level = logging.INFO
if LOG_LEVEL.lower() in ["debug", "info", "warning"]:
log_level = LOG_LEVEL.upper()
logging.basicConfig(level=log_level)
# Much hacky, much fancy
logging.addLevelName(
logging.INFO, f"\033[1;34m{logging.getLevelName(logging.INFO)}\033[1;0m"
)
logging.addLevelName(
logging.WARNING, f"\033[1;33m{logging.getLevelName(logging.WARNING)}\033[1;0m"
)
logging.addLevelName(
logging.ERROR, f"\033[1;31m{logging.getLevelName(logging.ERROR)}\033[1;0m"
)
def parse_args():
parser = argparse.ArgumentParser()
deploy_config = parser.add_argument_group("Deployment configuration")
deploy_config.add_argument(
"-stks",
"--stakeholders",
type=int,
help="The number of only-stakeholder",
required=True,
)
deploy_config.add_argument(
"-mans",
"--managers",
type=int,
help="The number of only-manager",
required=True,
)
deploy_config.add_argument(
"-stkmans",
"--stakeholder-managers",
type=int,
help="The number of both stakeholder-manager",
required=True,
)
deploy_config.add_argument(
"-csv",
"--timelock",
type=int,
help="The number of blocks during which an Unvault attempt can be canceled",
required=True,
)
deploy_config.add_argument(
"-mansthresh",
"--managers-threshold",
type=int,
)
deploy_config.add_argument(
"-cosigs",
"--with-cosigning-servers",
action="store_true",
help="Enable cosigning servers to allow Spend policies at the cost of weaker assumptions",
)
deploy_config.add_argument(
"-policy",
"--spending-policy",
action="append",
default=[],
dest="policies",
help="Enforce a spending policy on all watchtowers by specifying a path to a "
"watchtower plugin. Specify this option multiple times to enable multiple "
"policies.",
)
return parser.parse_args()
if __name__ == "__main__":
setup_logging()
args = parse_args()
deploy(
args.stakeholders,
args.managers,
args.stakeholder_managers,
args.timelock,
args.managers_threshold,
args.with_cosigning_servers,
args.policies,
)
| 36.474501 | 195 | 0.577933 |
ab83a8b46b8fea2ff15247ef66a3151c035e4c07
| 23,740 |
py
|
Python
|
analog_ec/layout/passives/resistor/ladder.py
|
xyabc/bag_analog_ec
|
e92b4ce8d6422d9a5731381bb3feeba54dfe33a9
|
[
"BSD-3-Clause"
] | 1 |
2021-08-03T12:32:46.000Z
|
2021-08-03T12:32:46.000Z
|
analog_ec/layout/passives/resistor/ladder.py
|
xyabc/bag_analog_ec
|
e92b4ce8d6422d9a5731381bb3feeba54dfe33a9
|
[
"BSD-3-Clause"
] | null | null | null |
analog_ec/layout/passives/resistor/ladder.py
|
xyabc/bag_analog_ec
|
e92b4ce8d6422d9a5731381bb3feeba54dfe33a9
|
[
"BSD-3-Clause"
] | 1 |
2020-01-07T04:54:47.000Z
|
2020-01-07T04:54:47.000Z
|
# -*- coding: utf-8 -*-
"""This module defines resistor ladder layout generators.
"""
from typing import TYPE_CHECKING, Dict, Set, Any, Tuple
from itertools import chain
from bag.layout.template import TemplateBase
from abs_templates_ec.resistor.core import ResArrayBase
from analog_ec.layout.passives.substrate import SubstrateWrapper
if TYPE_CHECKING:
from bag.layout.template import TemplateDB
class ResLadderCore(ResArrayBase):
"""An template for creating a resistor ladder from VDD to VSS.
Parameters
----------
temp_db : :class:`bag.layout.template.TemplateDB`
the template database.
lib_name : str
the layout library name.
params : Dict[str, Any]
the parameter values.
used_names : Set[str]
a set of already used cell names.
**kwargs :
dictionary of optional parameters. See documentation of
:class:`bag.layout.template.TemplateBase` for details.
"""
def __init__(self, temp_db, lib_name, params, used_names, **kwargs):
# type: (TemplateDB, str, Dict[str, Any], Set[str], **kwargs) -> None
ResArrayBase.__init__(self, temp_db, lib_name, params, used_names, **kwargs)
self._sch_params = None
@property
def sch_params(self):
return self._sch_params
@classmethod
def get_params_info(cls):
# type: () -> Dict[str, str]
return dict(
l='unit resistor length, in meters.',
w='unit resistor width, in meters.',
sub_type='the substrate type.',
threshold='the substrate threshold flavor.',
nx='number of resistors in a row. Must be even.',
ny='number of resistors in a column.',
ndum='number of dummy resistors.',
res_options='Configuration dictionary for ResArrayBase.',
half_blk_x='True to allow half horizontal blocks.',
half_blk_y='True to allow half vertical blocks.',
show_pins='True to show pins.',
)
@classmethod
def get_default_param_values(cls):
# type: () -> Dict[str, Any]
return dict(
res_options=None,
half_blk_x=True,
half_blk_y=True,
show_pins=True,
)
def draw_layout(self):
# type: () -> None
l = self.params['l']
w = self.params['w']
sub_type = self.params['sub_type']
threshold = self.params['threshold']
nx = self.params['nx']
ny = self.params['ny']
ndum = self.params['ndum']
res_options = self.params['res_options']
half_blk_x = self.params['half_blk_x']
half_blk_y = self.params['half_blk_y']
# error checking
if nx % 2 != 0 or nx <= 0:
raise ValueError('number of resistors in a row must be even and positive.')
if ny % 2 != 0 or ny <= 0:
raise ValueError('number of resistors in a column must be even and positive.')
# compute draw_array parameters
if res_options is None:
res_options = {}
elif 'min_tracks' in res_options:
res_options = res_options.copy()
res_options.pop('min_tracks')
# compute min tracks
hcon_space = 0
vcon_space = 0
min_tracks = (4 + 2 * hcon_space, 7 + vcon_space, nx, 1)
top_layer = self.bot_layer_id + 3
self.draw_array(l, w, sub_type, threshold, nx=nx + 2 * ndum, ny=ny + 2 * ndum,
min_tracks=min_tracks, top_layer=top_layer, connect_up=True,
half_blk_x=half_blk_x, half_blk_y=half_blk_y, **res_options)
# export supplies and recompute array_box/size
tmp = self._draw_metal_tracks(nx, ny, ndum, hcon_space)
hcon_idx_list, vcon_idx_list, xm_bot_idx, num_xm_sup = tmp
self._connect_ladder(nx, ny, ndum, hcon_idx_list, vcon_idx_list, xm_bot_idx, num_xm_sup)
# set schematic parameters
res_type = res_options.get('res_type', 'standard')
self._sch_params = dict(
l=l,
w=w,
intent=res_type,
nout=nx * ny,
ndum=(nx + ny) * 2 * ndum + 4 * ndum ** 2,
sub_name='',
)
def _connect_ladder(self, nx, ny, ndum, hcon_idx_list, vcon_idx_list, xm_bot_idx, num_xm_sup):
tp_idx = self.top_port_idx
bp_idx = self.bot_port_idx
# connect main ladder
for row_idx in range(ndum, ny + ndum):
rmod = row_idx - ndum
for col_idx in range(ndum, nx + ndum):
if ((col_idx == ndum and rmod % 2 == 1) or
(col_idx == nx - 1 + ndum and rmod % 2 == 0)):
mode = 1 if row_idx == ny + ndum - 1 else 0
self._connect_tb(row_idx, col_idx, ndum, tp_idx, hcon_idx_list,
vcon_idx_list, xm_bot_idx, mode=mode)
if col_idx != nx - 1 + ndum:
self._connect_lr(row_idx, col_idx, nx, ndum, tp_idx, bp_idx, hcon_idx_list,
vcon_idx_list, xm_bot_idx)
# connect to ground
self._connect_tb(ndum - 1, ndum, ndum, tp_idx, hcon_idx_list,
vcon_idx_list, xm_bot_idx, mode=-1)
# connect to supplies
self._connect_ground(nx, ndum, hcon_idx_list, vcon_idx_list, xm_bot_idx, num_xm_sup)
self._connect_power(ny, ndum, hcon_idx_list, vcon_idx_list, xm_bot_idx, num_xm_sup)
# connect horizontal dummies
for row_idx in range(ny + 2 * ndum):
if row_idx < ndum or row_idx >= ny + ndum:
col_iter = range(nx + 2 * ndum)
else:
col_iter = chain(range(ndum), range(nx + ndum, nx + 2 * ndum))
for col_idx in col_iter:
conn_tb = col_idx < ndum or col_idx >= nx + ndum
self._connect_dummy(row_idx, col_idx, conn_tb, tp_idx, bp_idx,
hcon_idx_list, vcon_idx_list)
def _connect_power(self, ny, ndum, hcon_idx_list, vcon_idx_list, xm_bot_idx, num_xm_sup):
hm_off, vm_off, xm_off = self.get_track_offsets(ny + ndum, ndum)[:3]
vm_prev = self.get_track_offsets(ndum, ndum - 1)[1]
hm_layer = self.bot_layer_id
vm_layer = hm_layer + 1
hconn = hcon_idx_list[0]
vm_idx_list = [vm_off + vcon_idx_list[2], vm_prev + vcon_idx_list[-3],
vm_prev + vcon_idx_list[-2]]
xm_idx_list = [xm_off + xm_bot_idx + idx for idx in range(num_xm_sup)]
for vm_idx in vm_idx_list:
# connect supply to vm layer
self.add_via_on_grid(hm_layer, hm_off + hconn, vm_idx)
# connect supply to xm layer
for xm_idx in xm_idx_list:
self.add_via_on_grid(vm_layer, vm_idx, xm_idx)
def _connect_ground(self, nx, ndum, hcon_idx_list, vcon_idx_list, xm_bot_idx, num_xm_sup):
xm_prev = self.get_track_offsets(ndum - 1, ndum)[2]
hm_off, vm_off, xm_off = self.get_track_offsets(ndum, ndum)[:3]
vm_prev = self.get_track_offsets(ndum, ndum - 1)[1]
hm_layer = self.bot_layer_id
vm_layer = hm_layer + 1
hconn = hcon_idx_list[0]
# connect all dummies to ground
self.add_via_on_grid(hm_layer, hm_off + hconn, vm_prev + vcon_idx_list[-4])
vm_idx_list = [vm_off + vcon_idx_list[1], vm_off + vcon_idx_list[2],
vm_prev + vcon_idx_list[-3], vm_prev + vcon_idx_list[-2]]
xm_idx_list = [xm_prev + xm_bot_idx + idx for idx in range(nx - num_xm_sup, nx)]
xm_idx_list.append(xm_off + xm_bot_idx)
for vm_idx in vm_idx_list:
# connect supply to vm layer
self.add_via_on_grid(hm_layer, hm_off + hconn, vm_idx)
# connect supply to xm layer
for xm_idx in xm_idx_list:
self.add_via_on_grid(vm_layer, vm_idx, xm_idx)
def _connect_dummy(self, row_idx, col_idx, conn_tb, tp_idx, bp_idx,
hcon_idx_list, vcon_idx_list):
hm_off, vm_off = self.get_track_offsets(row_idx, col_idx)[:2]
hm_layer = self.bot_layer_id
self.add_via_on_grid(hm_layer, hm_off + tp_idx, vm_off + vcon_idx_list[3])
self.add_via_on_grid(hm_layer, hm_off + tp_idx, vm_off + vcon_idx_list[-4])
self.add_via_on_grid(hm_layer, hm_off + hcon_idx_list[1], vm_off + vcon_idx_list[3])
self.add_via_on_grid(hm_layer, hm_off + hcon_idx_list[1], vm_off + vcon_idx_list[-4])
self.add_via_on_grid(hm_layer, hm_off + bp_idx, vm_off + vcon_idx_list[3])
self.add_via_on_grid(hm_layer, hm_off + bp_idx, vm_off + vcon_idx_list[-4])
if conn_tb:
self.add_via_on_grid(hm_layer, hm_off + tp_idx, vm_off + vcon_idx_list[1])
self.add_via_on_grid(hm_layer, hm_off + bp_idx, vm_off + vcon_idx_list[1])
def _connect_lr(self, row_idx, col_idx, nx, ndum, tp_idx, bp_idx, hcon_idx_list,
vcon_idx_list, xm_bot_idx):
hm_off, vm_off, xm_off = self.get_track_offsets(row_idx, col_idx)[:3]
vm_next = self.get_track_offsets(row_idx, col_idx + 1)[1]
hm_layer = self.bot_layer_id
col_real = col_idx - ndum
row_real = row_idx - ndum
if col_real % 2 == 0:
port = bp_idx
conn = hcon_idx_list[1]
else:
port = tp_idx
conn = hcon_idx_list[0]
self.add_via_on_grid(hm_layer, hm_off + port, vm_off + vcon_idx_list[-4])
self.add_via_on_grid(hm_layer, hm_off + conn, vm_off + vcon_idx_list[-4])
self.add_via_on_grid(hm_layer, hm_off + conn, vm_off + vcon_idx_list[-1])
self.add_via_on_grid(hm_layer, hm_off + conn, vm_next + vcon_idx_list[3])
self.add_via_on_grid(hm_layer, hm_off + port, vm_next + vcon_idx_list[3])
# connect to output port
vm_layer = hm_layer + 1
if row_real % 2 == 0:
xm_idx = xm_bot_idx + col_real + 1
else:
xm_idx = xm_bot_idx + (nx - 1 - col_real)
self.add_via_on_grid(vm_layer, vm_off + vcon_idx_list[-1], xm_off + xm_idx)
def _connect_tb(self, row_idx, col_idx, ndum, tp_idx, hcon_idx_list,
vcon_idx_list, xm_bot_idx, mode=0):
# mode = 0 is normal connection, mode = 1 is vdd connection, mode = -1 is vss connection
hm_off, vm_off = self.get_track_offsets(row_idx, col_idx)[:2]
hm_next, _, xm_next = self.get_track_offsets(row_idx + 1, col_idx)[:3]
hm_layer = self.bot_layer_id
if col_idx == ndum:
conn1 = vcon_idx_list[1]
tap = vcon_idx_list[2]
conn2 = vcon_idx_list[3]
else:
conn1 = vcon_idx_list[-2]
tap = vcon_idx_list[-3]
conn2 = vcon_idx_list[-4]
if mode >= 0:
self.add_via_on_grid(hm_layer, hm_off + tp_idx, vm_off + conn1)
self.add_via_on_grid(hm_layer, hm_next + hcon_idx_list[0], vm_off + conn1)
if mode == 0:
self.add_via_on_grid(hm_layer, hm_next + hcon_idx_list[0], vm_off + tap)
# connect to output port
vm_layer = hm_layer + 1
self.add_via_on_grid(vm_layer, vm_off + tap, xm_next + xm_bot_idx)
if mode <= 0:
self.add_via_on_grid(hm_layer, hm_next + hcon_idx_list[0], vm_off + conn2)
self.add_via_on_grid(hm_layer, hm_next + tp_idx, vm_off + conn2)
def _draw_metal_tracks(self, nx, ny, ndum, hcon_space):
show_pins = self.params['show_pins']
num_h_tracks, num_v_tracks, num_x_tracks = self.num_tracks[:3]
xm_bot_idx = (num_x_tracks - nx) / 2
tp_idx = self.top_port_idx
bp_idx = self.bot_port_idx
hm_dtr = hcon_space + 1
if tp_idx + hm_dtr >= num_h_tracks or bp_idx - hm_dtr < 0:
# use inner hm tracks instead.
hm_dtr *= -1
bcon_idx = bp_idx - hm_dtr
tcon_idx = tp_idx + hm_dtr
# get via extensions
grid = self.grid
hm_layer = self.bot_layer_id
vm_layer = hm_layer + 1
xm_layer = vm_layer + 1
hm_ext, vm_ext = grid.get_via_extensions(hm_layer, 1, 1, unit_mode=True)
vmx_ext, _ = grid.get_via_extensions(vm_layer, 1, 1, unit_mode=True)
vm_tidx = [-0.5, 0.5, 1.5, 2.5, num_v_tracks - 3.5, num_v_tracks - 2.5,
num_v_tracks - 1.5, num_v_tracks - 0.5]
# get unit block size
blk_w, blk_h = self.res_unit_size
# find top X layer track index that can be connected to supply.
hm_off, vm_off, xm_off = self.get_track_offsets(0, 0)[:3]
vm_y1 = max(grid.get_wire_bounds(hm_layer, hm_off + max(bp_idx, bcon_idx),
unit_mode=True)[1] + vm_ext,
grid.get_wire_bounds(xm_layer, xm_off + xm_bot_idx,
unit_mode=True)[1] + vmx_ext)
xm_vdd_top_idx = grid.find_next_track(xm_layer, vm_y1 - vmx_ext, half_track=True,
mode=-1, unit_mode=True)
num_xm_sup = int(xm_vdd_top_idx - xm_bot_idx - xm_off + 1)
# get lower/upper bounds of output ports.
xm_lower = grid.get_wire_bounds(vm_layer, vm_off + vm_tidx[0], unit_mode=True)[0]
vm_off = self.get_track_offsets(0, nx + 2 * ndum - 1)[1]
xm_upper = grid.get_wire_bounds(vm_layer, vm_off + vm_tidx[-1], unit_mode=True)[1]
# expand range by +/- 1 to draw metal pattern on dummies too
for row_idx in range(ny + 2 * ndum):
for col_idx in range(nx + 2 * ndum):
hm_off, vm_off, xm_off = self.get_track_offsets(row_idx, col_idx)[:3]
# extend port tracks on hm layer
hm_lower, _ = grid.get_wire_bounds(vm_layer, vm_off + vm_tidx[1], unit_mode=True)
_, hm_upper = grid.get_wire_bounds(vm_layer, vm_off + vm_tidx[-2], unit_mode=True)
self.add_wires(hm_layer, hm_off + bp_idx, hm_lower - hm_ext, hm_upper + hm_ext,
num=2, pitch=tp_idx - bp_idx, unit_mode=True)
# draw hm layer bridge
hm_lower, _ = grid.get_wire_bounds(vm_layer, vm_off + vm_tidx[0], unit_mode=True)
_, hm_upper = grid.get_wire_bounds(vm_layer, vm_off + vm_tidx[3], unit_mode=True)
pitch = tcon_idx - bcon_idx
self.add_wires(hm_layer, hm_off + bcon_idx, hm_lower - hm_ext, hm_upper + hm_ext,
num=2, pitch=pitch, unit_mode=True)
hm_lower, _ = grid.get_wire_bounds(vm_layer, vm_off + vm_tidx[-4], unit_mode=True)
_, hm_upper = grid.get_wire_bounds(vm_layer, vm_off + vm_tidx[-1], unit_mode=True)
self.add_wires(hm_layer, hm_off + bcon_idx, hm_lower - hm_ext, hm_upper + hm_ext,
num=2, pitch=pitch, unit_mode=True)
# draw vm layer bridges
vm_lower = min(grid.get_wire_bounds(hm_layer, hm_off + min(bp_idx, bcon_idx),
unit_mode=True)[0] - vm_ext,
grid.get_wire_bounds(xm_layer, xm_off + xm_bot_idx,
unit_mode=True)[0] - vmx_ext)
vm_upper = max(grid.get_wire_bounds(hm_layer, hm_off + max(tp_idx, tcon_idx),
unit_mode=True)[1] + vm_ext,
grid.get_wire_bounds(xm_layer, xm_off + xm_bot_idx + nx - 1,
unit_mode=True)[1] + vmx_ext)
self.add_wires(vm_layer, vm_off + vm_tidx[0], vm_lower, vm_upper,
num=2, pitch=3, unit_mode=True)
self.add_wires(vm_layer, vm_off + vm_tidx[-4], vm_lower, vm_upper,
num=2, pitch=3, unit_mode=True)
vm_y1 = max(grid.get_wire_bounds(hm_layer, hm_off + max(bp_idx, bcon_idx),
unit_mode=True)[1] + vm_ext,
grid.get_wire_bounds(xm_layer, xm_off + xm_bot_idx,
unit_mode=True)[1] + vmx_ext)
vm_y2 = min(grid.get_wire_bounds(hm_layer, hm_off + min(tp_idx, tcon_idx),
unit_mode=True)[0] - vm_ext,
grid.get_wire_bounds(xm_layer, xm_off + xm_bot_idx + nx - 1,
unit_mode=True)[0] - vmx_ext)
self.add_wires(vm_layer, vm_off + vm_tidx[1], vm_y2 - blk_h, vm_y1,
num=2, pitch=1, unit_mode=True)
self.add_wires(vm_layer, vm_off + vm_tidx[1], vm_y2, vm_y1 + blk_h,
num=2, pitch=1, unit_mode=True)
self.add_wires(vm_layer, vm_off + vm_tidx[-3], vm_y2 - blk_h, vm_y1,
num=2, pitch=1, unit_mode=True)
self.add_wires(vm_layer, vm_off + vm_tidx[-3], vm_y2, vm_y1 + blk_h,
num=2, pitch=1, unit_mode=True)
# draw and export output ports
for row in range(ny + 2 * ndum):
tr_off = self.get_track_offsets(row, 0)[2]
for tidx in range(nx):
warr = self.add_wires(xm_layer, tr_off + xm_bot_idx + tidx, lower=xm_lower,
upper=xm_upper, unit_mode=True)
if row < ndum or (row == ndum and tidx == 0):
net_name = 'VSS'
elif row >= ny + ndum:
net_name = 'VDD'
else:
net_name = 'out<%d>' % (tidx + (row - ndum) * nx)
self.add_pin(net_name, warr, show=show_pins)
return [bcon_idx, tcon_idx], vm_tidx, xm_bot_idx, num_xm_sup
class ResLadder(SubstrateWrapper):
"""Adds substrate contacts to resistor ladder.
Parameters
----------
temp_db : :class:`bag.layout.template.TemplateDB`
the template database.
lib_name : str
the layout library name.
params : Dict[str, Any]
the parameter values.
used_names : Set[str]
a set of already used cell names.
**kwargs :
dictionary of optional parameters. See documentation of
:class:`bag.layout.template.TemplateBase` for details.
"""
def __init__(self, temp_db, lib_name, params, used_names, **kwargs):
# type: (TemplateDB, str, Dict[str, Any], Set[str], **kwargs) -> None
SubstrateWrapper.__init__(self, temp_db, lib_name, params, used_names, **kwargs)
self._num_tracks = None # type: Tuple[int, ...]
self._core_offset = None # type: Tuple[int, int]
@property
def num_tracks(self):
# type: () -> Tuple[int, ...]
"""Returns the number of tracks per resistor block on each routing layer."""
return self._num_tracks
@property
def core_offset(self):
# type: () -> Tuple[int, int]
return self._core_offset
@classmethod
def get_params_info(cls):
# type: () -> Dict[str, str]
ans = ResLadderCore.get_params_info()
ans['sub_w'] = 'substrate contact width. Set to 0 to disable drawing substrate contact.'
ans['sub_lch'] = 'substrate contact channel length.'
ans['sub_tr_w'] = 'substrate track width in number of tracks. None for default.'
return ans
@classmethod
def get_default_param_values(cls):
# type: () -> Dict[str, Any]
ans = ResLadderCore.get_default_param_values()
ans['sub_tr_w'] = None
return ans
def draw_layout(self):
# type: () -> None
res_params = self.params.copy()
sub_lch = res_params.pop('sub_lch')
sub_w = res_params.pop('sub_w')
sub_type = self.params['sub_type']
threshold = self.params['threshold']
res_options = self.params['res_options']
sub_tr_w = self.params['sub_tr_w']
show_pins = self.params['show_pins']
if res_options is None:
res_type = 'standard'
else:
res_type = res_options.get('res_type', 'standard')
inst, _ = self.draw_layout_helper(ResLadderCore, res_params, sub_lch, sub_w, sub_tr_w,
sub_type, threshold, show_pins, is_passive=True,
res_type=res_type)
self._num_tracks = inst.master.num_tracks
self._core_offset = inst.master.core_offset
class ResLadderTop(TemplateBase):
"""Adds supply fill to resistor ladder.
Parameters
----------
temp_db : :class:`bag.layout.template.TemplateDB`
the template database.
lib_name : str
the layout library name.
params : Dict[str, Any]
the parameter values.
used_names : Set[str]
a set of already used cell names.
**kwargs :
dictionary of optional parameters. See documentation of
:class:`bag.layout.template.TemplateBase` for details.
"""
def __init__(self, temp_db, lib_name, params, used_names, **kwargs):
# type: (TemplateDB, str, Dict[str, Any], Set[str], **kwargs) -> None
TemplateBase.__init__(self, temp_db, lib_name, params, used_names, **kwargs)
self._sch_params = None
@property
def sch_params(self):
return self._sch_params
@classmethod
def get_params_info(cls):
# type: () -> Dict[str, str]
return ResLadder.get_params_info()
@classmethod
def get_default_param_values(cls):
# type: () -> Dict[str, Any]
return ResLadder.get_default_param_values()
def draw_layout(self):
# type: () -> None
show_pins = self.params['show_pins']
params = self.params.copy()
params['show_pins'] = False
master = self.new_template(params=params, temp_cls=ResLadder)
inst = self.add_instance(master, 'XLADDER')
sup_table = {'VDD': [], 'VSS': []}
for name in inst.port_names_iter():
if name in sup_table:
sup_table[name].extend(inst.port_pins_iter(name))
else:
self.reexport(inst.get_port(name), show=show_pins)
sup_layer = sup_table['VSS'][0].layer_id
top_layer = sup_layer + 1
self.set_size_from_bound_box(top_layer, master.bound_box)
self.array_box = master.array_box
self._sch_params = master.sch_params
# get power fill width and spacing
sup_width = 1
edge_margin = 200
sup_spacing = self.grid.get_num_space_tracks(sup_layer, sup_width)
num_sup_tracks = master.num_tracks[-1]
# make sure every resistor sees the same power fill
if sup_width + sup_spacing > num_sup_tracks:
raise ValueError('Cannot draw power fill with width = %d' % sup_width)
while num_sup_tracks % (sup_width + sup_spacing) != 0:
sup_spacing += 1
x0 = master.core_offset[0]
# draw power fill and export supplies
vdd_list, vss_list = sup_table['VDD'], sup_table['VSS']
vdd_list, vss_list = self.do_power_fill(top_layer, 200, 200, vdd_warrs=vdd_list,
vss_warrs=vss_list, fill_width=sup_width,
fill_space=sup_spacing, x_margin=edge_margin,
y_margin=edge_margin, tr_offset=x0, unit_mode=True)
self.add_pin('VDD', vdd_list, show=show_pins)
self.add_pin('VSS', vss_list, show=show_pins)
| 44.708098 | 99 | 0.584751 |
4ca03ce5cccc025b85374645f3d02d451e94d3e4
| 2,332 |
py
|
Python
|
test_add_group.py
|
Katie777/Python_project
|
135f5c09dd77ac83dda658608171205efb3d4e68
|
[
"Apache-2.0"
] | null | null | null |
test_add_group.py
|
Katie777/Python_project
|
135f5c09dd77ac83dda658608171205efb3d4e68
|
[
"Apache-2.0"
] | null | null | null |
test_add_group.py
|
Katie777/Python_project
|
135f5c09dd77ac83dda658608171205efb3d4e68
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
from selenium.webdriver.firefox.webdriver import WebDriver
from selenium.webdriver.common.action_chains import ActionChains
import time, unittest
def is_alert_present(wd):
try:
wd.switch_to_alert().text
return True
except:
return False
class test_add_group(unittest.TestCase):
def setUp(self):
self.wd = WebDriver()
self.wd.implicitly_wait(60)
def test_test_add_group(self):
success = True
wd = self.wd
wd.get("http://localhost/addressbook/index.php")
wd.find_element_by_name("pass").click()
wd.find_element_by_name("pass").send_keys("\\undefined")
wd.find_element_by_name("user").click()
wd.find_element_by_name("user").clear()
wd.find_element_by_name("user").send_keys("admin")
wd.find_element_by_id("LoginForm").click()
wd.find_element_by_name("pass").click()
wd.find_element_by_name("pass").clear()
wd.find_element_by_name("pass").send_keys("secret")
wd.find_element_by_xpath("//form[@id='LoginForm']/input[3]").click()
wd.find_element_by_link_text("groups").click()
wd.find_element_by_name("new").click()
wd.find_element_by_name("group_name").click()
wd.find_element_by_name("group_name").clear()
wd.find_element_by_name("group_name").send_keys("ff")
wd.find_element_by_name("group_header").click()
wd.find_element_by_name("group_header").clear()
wd.find_element_by_name("group_header").send_keys("hh")
wd.find_element_by_name("group_footer").click()
wd.find_element_by_name("group_footer").clear()
wd.find_element_by_name("group_footer").send_keys("hh")
wd.find_element_by_xpath("//div[@id='content']/form").click()
wd.find_element_by_name("submit").click()
wd.find_element_by_link_text("group page").click()
wd.find_element_by_link_text("Logout").click()
wd.find_element_by_name("pass").click()
wd.find_element_by_name("pass").send_keys("\\undefined")
wd.find_element_by_name("user").click()
wd.find_element_by_name("user").send_keys("\\undefined")
self.assertTrue(success)
def tearDown(self):
self.wd.quit()
if __name__ == '__main__':
unittest.main()
| 40.206897 | 76 | 0.665952 |
3508265122b7f0292e81459384358d6f43bccb6e
| 901 |
py
|
Python
|
level4_linkedlist.py
|
hgleagle/python-challenge
|
801c6a5ca3d785f91e2b6d03856ef0b9983ba4f7
|
[
"MIT"
] | null | null | null |
level4_linkedlist.py
|
hgleagle/python-challenge
|
801c6a5ca3d785f91e2b6d03856ef0b9983ba4f7
|
[
"MIT"
] | null | null | null |
level4_linkedlist.py
|
hgleagle/python-challenge
|
801c6a5ca3d785f91e2b6d03856ef0b9983ba4f7
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
from urllib import request
from urllib.parse import urlparse, urljoin
import re
if __name__ == "__main__":
url = "http://www.pythonchallenge.com/pc/def/linkedlist.php"
with request.urlopen(url) as f:
html = f.read().decode()
data = re.findall('a href="(.*?)">', html, 0)[0]
next_url = urljoin(url, data)
olddata = data.split('=')[-1]
uri = next_url.replace(olddata, "%s")
pattern = re.compile("and the next nothing is (\d+)")
while True:
print(next_url)
with request.urlopen(next_url) as f:
html = f.read().decode()
m = pattern.search(html)
if m is None:
break
num = m.group(1)
print("going to %s", num)
# if m.group(1) == str(16044):
# num = int(m.group(1)) // 2
next_url = uri % num
| 29.064516 | 64 | 0.532741 |
f06c518b7b6776bb134f48309c2f1c8dabc1e0a9
| 4,015 |
py
|
Python
|
ingest.py
|
automationlogic/hackday-data-miners
|
882e22c81f5b1a0d5a4a38b225cf78dce8b2b796
|
[
"MIT"
] | null | null | null |
ingest.py
|
automationlogic/hackday-data-miners
|
882e22c81f5b1a0d5a4a38b225cf78dce8b2b796
|
[
"MIT"
] | null | null | null |
ingest.py
|
automationlogic/hackday-data-miners
|
882e22c81f5b1a0d5a4a38b225cf78dce8b2b796
|
[
"MIT"
] | 1 |
2020-12-14T18:40:13.000Z
|
2020-12-14T18:40:13.000Z
|
import requests as rq
import logging
import os
import time
import datetime
import csv
import json
from random import randint
from google.cloud import bigquery
from google.cloud.exceptions import NotFound, Conflict
PRECIPITATION_TYPE = "pr"
TEMPERATURE_TYPE = "tas"
MONTHLY_MOVING_AVERAGE = "mavg"
print("Preparing..")
project_id = "data-miners-279116"
dataset_id = "wb_climate_data"
precipitation_table_id = "wb_climate_precipitations"
temperature_table_id = "wb_climate_temperatures"
bq_client = bigquery.Client()
precipitation_table_ref = bq_client.dataset(dataset_id).table(precipitation_table_id)
temperature_table_ref = bq_client.dataset(dataset_id).table(temperature_table_id)
def ingest(event, context):
create_table(precipitation_table_ref)
create_table(temperature_table_ref)
for start_year in [1980, 2020]:
end_year = start_year + 19
extract(precipitation_table_ref, MONTHLY_MOVING_AVERAGE, PRECIPITATION_TYPE, start_year, end_year, "GBR")
extract(temperature_table_ref, MONTHLY_MOVING_AVERAGE, TEMPERATURE_TYPE, start_year, end_year, "GBR")
def extract(table_ref, api_type, var, start_year, end_year, country_code):
url = f"http://climatedataapi.worldbank.org/climateweb/rest/v1/country/{api_type}/{var}/{start_year}/{end_year}/{country_code}.json"
print(f"URL: {url}")
response = rq.get(url)
response.raise_for_status() # raise Exception if not a 200 OK
raw_json_rows = response.json()
print("Raw rows in API response:")
print(raw_json_rows)
json_rows = []
for raw_row in raw_json_rows:
json_rows.append({
"GCM": raw_row["gcm"],
"var": raw_row["variable"],
"from_year": raw_row["fromYear"],
"to_year": raw_row["toYear"],
"Jan": raw_row["monthVals"][0],
"Feb": raw_row["monthVals"][1],
"Mar": raw_row["monthVals"][2],
"Apr": raw_row["monthVals"][3],
"May": raw_row["monthVals"][4],
"Jun": raw_row["monthVals"][5],
"Jul": raw_row["monthVals"][6],
"Aug": raw_row["monthVals"][7],
"Sep": raw_row["monthVals"][8],
"Oct": raw_row["monthVals"][9],
"Nov": raw_row["monthVals"][10],
"Dec": raw_row["monthVals"][11],
})
print("JSON rows (cleaned):")
print(json_rows)
print("About to insert rows in BigQuery...")
insert_response = bq_client.insert_rows_json(table_ref, json_rows)
print("Insert response:")
print(insert_response)
def create_table(table_ref):
schema = [
bigquery.SchemaField("GCM", "STRING", "NULLABLE"),
bigquery.SchemaField("var", "STRING", "NULLABLE"),
bigquery.SchemaField("from_year", "INTEGER", "NULLABLE"),
bigquery.SchemaField("to_year", "INTEGER", "NULLABLE"),
bigquery.SchemaField("Jan", "FLOAT", "NULLABLE"),
bigquery.SchemaField("Feb", "FLOAT", "NULLABLE"),
bigquery.SchemaField("Mar", "FLOAT", "NULLABLE"),
bigquery.SchemaField("Apr", "FLOAT", "NULLABLE"),
bigquery.SchemaField("May", "FLOAT", "NULLABLE"),
bigquery.SchemaField("Jun", "FLOAT", "NULLABLE"),
bigquery.SchemaField("Jul", "FLOAT", "NULLABLE"),
bigquery.SchemaField("Aug", "FLOAT", "NULLABLE"),
bigquery.SchemaField("Sep", "FLOAT", "NULLABLE"),
bigquery.SchemaField("Oct", "FLOAT", "NULLABLE"),
bigquery.SchemaField("Nov", "FLOAT", "NULLABLE"),
bigquery.SchemaField("Dec", "FLOAT", "NULLABLE")
]
table = bigquery.Table(table_ref, schema=schema)
try:
bq_client.get_table(table)
except NotFound:
try:
table = bq_client.create_table(table)
print("Created table {}.{}.{}".format(table.project, table.dataset_id, table.table_id))
# print("Going to sleep for 90 seconds to ensure data availability in newly created table")
# time.sleep(90)
except Conflict:
pass
return
| 34.913043 | 136 | 0.649315 |
0280863f83ef18e1aefd1d9192a26b6b17c06257
| 4,962 |
py
|
Python
|
homeassistant/components/websocket_api/connection.py
|
liangleslie/core
|
cc807b4d597daaaadc92df4a93c6e30da4f570c6
|
[
"Apache-2.0"
] | 30,023 |
2016-04-13T10:17:53.000Z
|
2020-03-02T12:56:31.000Z
|
homeassistant/components/websocket_api/connection.py
|
liangleslie/core
|
cc807b4d597daaaadc92df4a93c6e30da4f570c6
|
[
"Apache-2.0"
] | 24,710 |
2016-04-13T08:27:26.000Z
|
2020-03-02T12:59:13.000Z
|
homeassistant/components/websocket_api/connection.py
|
liangleslie/core
|
cc807b4d597daaaadc92df4a93c6e30da4f570c6
|
[
"Apache-2.0"
] | 11,956 |
2016-04-13T18:42:31.000Z
|
2020-03-02T09:32:12.000Z
|
"""Connection session."""
from __future__ import annotations
import asyncio
from collections.abc import Callable, Hashable
from contextvars import ContextVar
from typing import TYPE_CHECKING, Any
import voluptuous as vol
from homeassistant.auth.models import RefreshToken, User
from homeassistant.core import Context, HomeAssistant, callback
from homeassistant.exceptions import HomeAssistantError, Unauthorized
from . import const, messages
if TYPE_CHECKING:
from .http import WebSocketAdapter
current_connection = ContextVar["ActiveConnection | None"](
"current_connection", default=None
)
class ActiveConnection:
"""Handle an active websocket client connection."""
def __init__(
self,
logger: WebSocketAdapter,
hass: HomeAssistant,
send_message: Callable[[str | dict[str, Any] | Callable[[], str]], None],
user: User,
refresh_token: RefreshToken,
) -> None:
"""Initialize an active connection."""
self.logger = logger
self.hass = hass
self.send_message = send_message
self.user = user
self.refresh_token_id = refresh_token.id
self.subscriptions: dict[Hashable, Callable[[], Any]] = {}
self.last_id = 0
current_connection.set(self)
def context(self, msg: dict[str, Any]) -> Context:
"""Return a context."""
return Context(user_id=self.user.id)
@callback
def send_result(self, msg_id: int, result: Any | None = None) -> None:
"""Send a result message."""
self.send_message(messages.result_message(msg_id, result))
async def send_big_result(self, msg_id: int, result: Any) -> None:
"""Send a result message that would be expensive to JSON serialize."""
content = await self.hass.async_add_executor_job(
const.JSON_DUMP, messages.result_message(msg_id, result)
)
self.send_message(content)
@callback
def send_error(self, msg_id: int, code: str, message: str) -> None:
"""Send a error message."""
self.send_message(messages.error_message(msg_id, code, message))
@callback
def async_handle(self, msg: dict[str, Any]) -> None:
"""Handle a single incoming message."""
handlers = self.hass.data[const.DOMAIN]
try:
msg = messages.MINIMAL_MESSAGE_SCHEMA(msg)
cur_id = msg["id"]
except vol.Invalid:
self.logger.error("Received invalid command", msg)
self.send_message(
messages.error_message(
msg.get("id"),
const.ERR_INVALID_FORMAT,
"Message incorrectly formatted.",
)
)
return
if cur_id <= self.last_id:
self.send_message(
messages.error_message(
cur_id, const.ERR_ID_REUSE, "Identifier values have to increase."
)
)
return
if msg["type"] not in handlers:
self.logger.info("Received unknown command: {}".format(msg["type"]))
self.send_message(
messages.error_message(
cur_id, const.ERR_UNKNOWN_COMMAND, "Unknown command."
)
)
return
handler, schema = handlers[msg["type"]]
try:
handler(self.hass, self, schema(msg))
except Exception as err: # pylint: disable=broad-except
self.async_handle_exception(msg, err)
self.last_id = cur_id
@callback
def async_handle_close(self) -> None:
"""Handle closing down connection."""
for unsub in self.subscriptions.values():
unsub()
@callback
def async_handle_exception(self, msg: dict[str, Any], err: Exception) -> None:
"""Handle an exception while processing a handler."""
log_handler = self.logger.error
code = const.ERR_UNKNOWN_ERROR
err_message = None
if isinstance(err, Unauthorized):
code = const.ERR_UNAUTHORIZED
err_message = "Unauthorized"
elif isinstance(err, vol.Invalid):
code = const.ERR_INVALID_FORMAT
err_message = vol.humanize.humanize_error(msg, err)
elif isinstance(err, asyncio.TimeoutError):
code = const.ERR_TIMEOUT
err_message = "Timeout"
elif isinstance(err, HomeAssistantError):
err_message = str(err)
# This if-check matches all other errors but also matches errors which
# result in an empty message. In that case we will also log the stack
# trace so it can be fixed.
if not err_message:
err_message = "Unknown error"
log_handler = self.logger.exception
log_handler("Error handling message: %s (%s)", err_message, code)
self.send_message(messages.error_message(msg["id"], code, err_message))
| 33.302013 | 85 | 0.616082 |
9c32cd2951989e4535d0af5bc9a761c15560f13a
| 44,482 |
py
|
Python
|
icoshift3/icoshift.py
|
Sour-Smelno/icoshift_py3
|
1a4b6947bfa61fb66682e04372d92865bd517637
|
[
"BSD-2-Clause-FreeBSD"
] | null | null | null |
icoshift3/icoshift.py
|
Sour-Smelno/icoshift_py3
|
1a4b6947bfa61fb66682e04372d92865bd517637
|
[
"BSD-2-Clause-FreeBSD"
] | null | null | null |
icoshift3/icoshift.py
|
Sour-Smelno/icoshift_py3
|
1a4b6947bfa61fb66682e04372d92865bd517637
|
[
"BSD-2-Clause-FreeBSD"
] | null | null | null |
from __future__ import division, print_function
import numpy
import scipy as sp
from scipy.sparse import dia_matrix
from numpy import nanmean, nanmedian
from copy import copy
import sys
import logging
try:
basestring
except NameError:
basestring = str
def is_number(s):
try:
if isinstance(s, basestring):
if s.isdigit():
return True
else:
float(s)
return True
elif isinstance(s, int):
return True
elif isinstance(s, list) and len(s) != 0:
if(all(isinstance(x, (int, float)) for x in s)):
# l = [n for n in s if n.isdigit()]
# if len(l) == len(s):
return True
else:
float(s)
return True
except ValueError:
return False
def cat(dim, *args):
# type: (object, object) -> object
try:
return numpy.concatenate([r for r in args if r.shape[0] > 0], axis=dim)
except:
return numpy.stack([r for r in args if r.shape[0]>0], axis=-1)
def sortrows(a, i):
i = numpy.argsort(a[:, i])
b = a[i, :]
return b
def nans(r, c):
a = numpy.empty((r, c))
a[:] = numpy.nan
return a
def min_with_indices(d):
d = d.flatten()
ml = numpy.min(d)
mi = numpy.array(list(d).index(ml))
return ml, mi
def max_with_indices(d):
d = d.flatten()
ml = numpy.max(d)
mi = numpy.array(list(d).index(ml))
return ml, mi
def icoshift(xt, xp, inter='whole', n='f', scale=None, coshift_preprocessing=False,
coshift_preprocessing_max_shift=None, fill_with_previous=True, average2_multiplier=3):
'''
interval Correlation Optimized shifting
[xcs, ints, ind, target] = icoshift(xt, xp, inter[, n[, options[, scale]]])
Splits a spectral database into "inter" intervals and coshift each vector
left-right to get the maximum correlation toward a reference or toward an
average spectrum in that interval. Missing parts on the edges after
shifting are filled with "closest" value or with "NaNs".
INPUT
xt (1 * mt) : target vector.
Use 'average' if you want to use the average spectrum as a reference
Use 'median' if you want to use the median spectrum as a reference
Use 'max' if you want to use for each segment the corresponding actual spectrum having max features as a reference
Use 'average2' for using the average of the average multiplied for a requested number (default=3) as a reference
xp (np * mp) : Matrix of sample vectors to be aligned as a sample-set
towards common target xt
inter : definition of alignment mode
'whole' : it works on the whole spectra (no intervals).
nint : (numeric) number of many intervals.
'ndata' : (string) length of regular intervals
(remainders attached to the last).
[I1s I1e, I2s...]: interval definition. ('i(n)s' interval
n start, 'i(n)e' interval n end).
(refs:refe) : shift the whole spectra according to a
reference signal(s) in the region
refs:refe (in sampling points)
'refs-refe' : `shift the whole spectra according to a
reference signal(s) in the region
refs-refe (in scale units)
n (1 * 1) : (optional)
n = integer n.: maximum shift correction in data
points/scale units (cf. options[4])
in x/rows. It must be >0
n = 'b' (best): the algorithm search for the best n
for each interval (it can be time consuming!)
n = 'f' (fast): fast search for the best n for each interval (default)
a logging.warn is displayed for each interval if "n" appears too small
options (1 * 5): (optional)
(0) triggers plots & warnings:
0 : no on-screen output
1 : only warnings (default)
2 : warnings and plots
(1) selects filling mode
0 : using not a number
1 : using previous point (default)
(2) turns on Co-shift preprocessing
0 : no Co-shift preprocessing (default)
1 :
(3)
it has to be given in scale units if option(5)=1
(4) 0 : intervals are given in No. of datapoints (deafult)
1 : intervals are given in ppm --> use scale for inter and n
scale : vector of scalars used as axis for plot (optional)
coshift_preprocessing (bool) (optional; default=False): Execute a Co-shift step before carrying out iCOshift
coshift_preprocessing_max_shift (int) (optional): Max allowed shift for the Co-shift preprocessing
(default = equal to n if not specified)
fill_with_previous (bool) (optional; default=True): Fill using previous point (default); set to False to np.nan
average2_multiplier (int) (optional; default=3): Multiplier used for the average2 algorithm
OUTPUT
xcs (np * mt): shift corrected vector or matrix
ints (ni * 4) : defined intervals (Int. No., starting point, ending point, size)
ind (np * ni): matrix of indexes reporting how many points each spectrum
has been shifted for each interval (+ left, - right)
target (1 x mp): actual target used for the final alignment
Authors:
Francesco Savorani - Department of Food Science
Quality & Technology - Spectroscopy and Chemometrics group
Faculty of Sciences
University of Copenhagen - Denmark
email: [email protected] - www.models.life.ku.dk
Giorgio Tomasi - Department of Basic Science and Environment
Soil and Environmental Chemistry group
Faculty of Life Sciences
University of Copenhagen - Denmark
email: [email protected] - www.igm.life.ku.dk
Python implementation by:
Martin Fitzpatrick - Rheumatology Research Group
Centre for Translational Inflammation Research
School of Immunity and Infection
University of Birmingham - United Kingdom
email: [email protected]
170508 (FrSa) first working code
211008 (FrSa) improvements and bugs correction
111108 (Frsa) Splitting into regular intervals (number of intervals or wideness in datapoints) implemented
141108 (GT) FFT alignment function implemented
171108 (FrSa) options implemented
241108 (FrSa) Automatic search for the best or the fastest n for each interval implemented
261108 (FrSa) Plots improved
021208 (FrSa) 'whole' case added & robustness improved
050309 (GT) Implentation of interpolation modes (nan); Cosmetics; Graphics
240309 (GT) Fixed bug in handling missing values
060709 (FrSa) 'max' target and output 'target' added. Some speed, plot and robustness improvements
241109 (GT) Interval and band definition in units (added options[4])
021209 (GT) Minor debugging for the handling of options[4]
151209 (FrSa) Cosmetics and minor debugging for the handling of options[4]
151110 (FrSa) Option 'Max' works now also for alignment towards a reference signal
310311 (FrSa) Bugfix for the 'whole' case when mp < 101
030712 (FrSa) Introducing the 'average2' xt (target) for a better automatic target definition. Plots updated to include also this case
281023 (MF) Initial implementation of Python version of icoshift algorithm. PLOTS NOT INCLUDED
'''
# RETURNS [xcs, ints, ind, target]
# Take a copy of the xp vector since we mangle it somewhat
xp = copy(xp)
if scale is None:
using_custom_scale = False
scale = numpy.array(range(0, xp.shape[1]))
else:
using_custom_scale = True
dec_scale = numpy.diff(scale)
inc_scale = scale[0] - scale[1]
flag_scale_dir = inc_scale < 0
flag_di_scale = numpy.any(abs(dec_scale) > 2 * numpy.min(abs(dec_scale)))
if len(scale) != max(scale.shape):
raise(Exception, 'scale must be a vector')
if max(scale.shape) != xp.shape[1]:
raise(Exception, 'x and scale are not of compatible length %d vs. %d' % (max(scale.shape), xp.shape[1]))
if inc_scale == 0 or not numpy.all(numpy.sign(dec_scale) == - numpy.sign(inc_scale)):
raise(Exception, 'scale must be strictly monotonic')
if coshift_preprocessing_max_shift is None:
coshift_preprocessing_max_shift = n
# ERRORS CHECK
# Constant
# To avoid out of memory errors when 'whole', the job is divided in
# blocks of 32MB
block_size = 2 ** 25
max_flag = False
avg2_flag = False
xt_basis = xt
if isinstance(xt, str):
if xt == 'average':
xt = nanmean(xp, axis=0).reshape(1, -1)
elif xt == 'median':
xt = nanmedian(xp, axis=0).reshape(1, -1)
elif xt == 'average2':
xt = nanmean(xp, axis=0).reshape(1,-1)
avg2_flag = True
elif xt == 'max':
xt = numpy.zeros((1, xp.shape[1]))
max_flag = True
nt = xt.shape
mt = 1
np, mp = xp.shape
if mt != mp:
raise(Exception, 'Target "xt" and sample "xp" must have the same number of columns')
if is_number(inter):
try:
if isinstance(inter, basestring):
if inter.isdigit():
inter = int(inter)
else:
inter = float(inter)
if inter > mp:
raise (Exception, 'Number of intervals "inter" must be smaller than number of variables in xp')
except:
if (all(a >= mp for a in inter)):
raise(Exception, 'Number of intervals "inter" must be smaller than number of variables in xp')
# Set defaults if the settings are not set
# options = [options[oi] if oi < len(options) else d for oi, d in enumerate([1, 1, 0, 0, 0]) ]
if using_custom_scale:
prec = abs(numpy.min(numpy.unique(dec_scale)))
if flag_di_scale:
logging.warn('Scale vector is not continuous, the defined intervals might not reflect actual ranges')
flag_coshift = (not inter == 'whole') and coshift_preprocessing
if flag_coshift:
if using_custom_scale:
coshift_preprocessing_max_shift = dscal2dpts(coshift_preprocessing_max_shift, scale, prec)
if max_flag:
xt = nanmean(xp, axis=0).reshape(1,-1)
co_shift_scale = scale if using_custom_scale else None
xp, nil, wint, _ = icoshift(xt, xp, 'whole',
coshift_preprocessing=False,
coshift_preprocessing_max_shift=coshift_preprocessing_max_shift,
scale=co_shift_scale,
fill_with_previous=True,
average2_multiplier=average2_multiplier )
if xt_basis == 'average' or xt_basis == 'average2':
xt = nanmean(xp).reshape(1, -1)
elif xt_basis == 'median':
xt = nanmedian(xp).reshape(1, -1)
else: # max?
xt = xt.reshape(1,-1)
whole = False
flag2 = False
if isinstance(inter, basestring):
if inter == 'whole':
inter = numpy.array([0, mp - 1]).reshape(1, -1)
whole = True
elif '-' in inter:
interv = regexp(inter, '(-{0,1}\\d*\\.{0,1}\\d*)-(-{0,1}\\d*\\.{0,1}\\d*)', 'tokens')
interv = sort(scal2pts(float(cat(0, interv[:])), scale, prec))
if interv.size != 2:
raise(Exception, 'Invalid range for reference signal')
inter = range(interv[0], (interv[1] + 1))
flag2 = True
else:
interv = float(inter)
if using_custom_scale:
interv = dscal2dpts(interv, scale, prec)
else:
interv = int(round(interv))
inter = defints(xp, interv)
elif isinstance(inter, int):
# Build interval list
# e.g. 5 intervals on 32768 to match MATLAB algorithm should be
#0, 6554, 6554, 13108, 13108, 19662, 19662, 26215, 26215, 32767
# Distributes vars_left_over in the first "vars_left_over" intervals
# startint = [(1:(N+1):(remain - 1)*(N+1)+1)'; ((remain - 1) * (N + 1) + 1 + 1 + N:N:mP)'];
remain = mp % inter
step = int( float(mp) / inter )
segments = []
o = 0
while o < mp:
segments.extend([o, o])
if remain > 0:
o += 1
remain -=1
o += step
# Chop of duplicate zero
segments = segments[1:]
segments.append(mp) # Add on final step
inter = numpy.array(segments, dtype=int).reshape(1, -1)
logging.info("Calculated intervals: %s" % inter)
elif isinstance(inter, (list, tuple)): # if is a list of tuples ; add else
inter = numpy.asarray(inter)
flag2 = numpy.array_equal(numpy.fix(inter), inter) and max(inter.shape) > 1 and numpy.array_equal(
numpy.array([1, numpy.max(inter) - numpy.min(inter) + 1]).reshape(1, -1), inter.shape) and numpy.array_equal(numpy.unique(numpy.diff(inter, 1, 2)), 1)
if not flag2 and using_custom_scale:
inter = scal2pts(inter, scale, prec)
if numpy.any(inter[0:2:] > inter[1:2:]) and not flag_scale_dir:
inter = numpy.flipud(numpy.reshape(inter, 2, max(inter.shape) / 2))
inter = inter[:].T
else:
raise(Exception, 'The number of intervals must be "whole", an integer, or a list of tuples of integers')
if(len(inter.shape) > 1):
nint, mint = inter.shape
else:
nint = 1
mint = inter.shape[0]
inter = inter.reshape(1,-1)
scfl = numpy.array_equal(numpy.fix(scale), scale) and not using_custom_scale
if isinstance(inter, basestring) and n not in ['b', 'f']:
raise(Exception, '"n" must be a scalar b or f')
elif isinstance(n, int) or isinstance(n, float):
if n <= 0:
raise(Exception, 'Shift(s) "n" must be larger than zero')
if scfl and not isinstance(n, int):
logging.warn('"n" must be an integer if scale is ignored; first element (i.e. %d) used' % n)
n = numpy.round(n)
else:
if using_custom_scale:
n = dscal2dpts(n, scale, prec)
if not flag2 and numpy.any(numpy.diff(numpy.reshape(inter, (2, mint // 2)), 1, 0) < n):
raise(Exception, 'Shift "n" must be not larger than the size of the smallest interval')
flag = numpy.isnan(cat(0, xt.reshape(1, -1), xp))
frag = False
ref = lambda e: numpy.reshape(e, (2, max(e.shape) // 2))
vec = lambda a: a.flatten()
mi, pmi = min_with_indices(inter)
ma, pma = max_with_indices(inter)
# There are missing values in the dataset; so remove them before starting
# if they line up between datasets
if vec(flag).any():
if numpy.array_equal(flag[numpy.ones((np, 1), dtype=int), :], flag[1:,:]):
select = numpy.any
else:
select = numpy.all
if flag2:
intern_ = remove_nan(
numpy.array([0, pma - pmi]).reshape(1, -1), cat(0, xt[:, inter], xp[:, inter]), select)
if intern_.shape[0] != 1:
raise(Exception, 'Reference region contains a pattern of missing that cannot be handled consistently')
elif not numpy.array_equal(intern_, numpy.array([1, inter[-2] - inter[0] + 1]).reshape(1, -1)):
logging.warn('The missing values at the boundaries of the reference region will be ignored')
intern_ = range(inter[0] + intern_[0], (inter[0] + intern_[1] + 1))
else:
intern_, flag_nan = remove_nan(
ref(inter), cat(0, xt, xp), select, flags=True)
intern_ = intern_.flatten()
if 0 in intern_.shape:
raise(Exception, 'Cannot handle this pattern of missing values.')
if max(intern_.shape) != max(inter.shape) and not flag2:
if whole:
if max(intern_.shape) > 2:
xseg, in_or = extract_segments(cat(0, xt, xp), ref(intern_))
InOrf = in_or.flatten()
inter = numpy.array([InOrf[0], InOrf[-1]]).reshape(1, -1) #check this
in_or = cat(1, ref(intern_), in_or)
xp = xseg[1:, :]
xt = xseg[0, :].reshape(1, -1)
frag = True
else:
logging.warn('To handle the pattern of missing values, %d segments are created/removed' % (abs(max(intern_.shape) - max(inter.shape)) / 2) )
inter = intern_
nint, mint = inter.shape
xcs = xp
mi, pmi = min_with_indices(inter)
ma, pma = max_with_indices(inter)
flag = max(inter.shape) > 1 and numpy.array_equal(numpy.array([1, pma - pmi + 1]).reshape(1, -1), numpy.array(inter.shape).reshape(1, -1)) \
and numpy.array_equal(numpy.unique(numpy.diff(inter.reshape(1,-1), 1, 1)),numpy.array([1]))
if flag:
if n == 'b':
logging.info('Automatic searching for the best "n" for the reference window "ref_w" enabled. That can take a longer time.')
elif n == 'f':
logging.info('Fast automatic searching for the best "n" for the reference window "ref_w" enabled.')
if max_flag:
amax, bmax = max_with_indices( numpy.sum(xp) )
xt[mi:ma] = xp[bmax, mi:ma]
ind = nans(np, 1)
missind = numpy.logical_not(numpy.all(numpy.isnan(xp), axis=1))
xcs[missind, :], ind[missind], _ = coshifta(xt, xp[missind,:], inter, n, fill_with_previous=fill_with_previous,
block_size=block_size)
ints = numpy.array([1, mi, ma]).reshape(1, -1)
else:
if mint > 1:
if mint % 2:
raise(Exception, 'Wrong definition of intervals ("inter")')
if ma > mp:
raise(Exception, 'Intervals ("inter") exceed samples matrix dimension')
# allint=[(1:round(mint/2))' inter(1:2:mint)' inter(2:2:mint)'];
# allint =
# 1 1 6555
# 2 6555 13109
# 3 13109 19663
# 4 19663 26216
# 5 26216 32768
# ans =
# 5 3
inter_list = list(inter.flatten())
allint = numpy.array([
range(mint//2),
inter_list[0::2],
inter_list[1::2],
])
allint = allint.T
sinter = numpy.sort(allint, axis=0)
intdif = numpy.diff(sinter)
if numpy.any(intdif[1:2:max(intdif.shape)] < 0):
logging.warn('The user-defined intervals are overlapping: is that intentional?')
ints = allint
ints = numpy.append(ints, ints[:, 2] - ints[:, 1])
ind = numpy.zeros((np, allint.shape[0]))
if n == 'b':
logging.info('Automatic searching for the best "n" for each interval enabled. This can take a long time...')
elif n == 'f':
logging.info('Fast automatic searching for the best "n" for each interval enabled')
for i in range(0, allint.shape[0]):
if whole:
logging.info('Co-shifting the whole %s samples...' % np)
else:
logging.info('Co-shifting interval no. %s of %s...' % (i, allint.shape[0]) )
# FIXME? 0:2, or 1:2?
intervalnow = xp[:, allint[i, 1]:allint[i, 2]]
if max_flag:
amax, bmax = max_with_indices(numpy.sum(intervalnow, axis=1))
target = intervalnow[bmax, :]
xt[0, allint[i, 1]:allint[i, 2]] = target
else:
target = xt[:, allint[i, 1]:allint[i, 2]]
missind = ~numpy.all(numpy.isnan(intervalnow), axis=1)
if not numpy.all(numpy.isnan(target)) and numpy.any(missind):
cosh_interval, loc_ind, _ = coshifta(target, intervalnow[missind, :], numpy.array([0]), n,
fill_with_previous=fill_with_previous, block_size=block_size)
xcs[missind, allint[i, 1]:allint[i, 2]] = cosh_interval
ind[missind, i] = loc_ind.flatten()
else:
xcs[:, allint[i, 1]:allint[i, 1]] = intervalnow
if avg2_flag:
for i in range(0, allint.shape[0]):
if whole:
logging.info('Co-shifting again the whole %d samples... ' % np)
else:
logging.info('Co-shifting again interval no. %d of %d... ' % (i, allint.shape[0]))
intervalnow = xp[:, allint[i, 1]:allint[i, 2]]
target1 = numpy.mean(xcs[:, allint[i, 1]:allint[i, 2]], axis=0)
min_interv = numpy.min(target1)
target = (target1 - min_interv) * average2_multiplier
missind = ~numpy.all(numpy.isnan(intervalnow), 1)
if (not numpy.all(numpy.isnan(target))) and (numpy.sum(missind) != 0):
cosh_interval, loc_ind, _ = coshifta(target, intervalnow[missind, :], 0, n,
fill_with_previous=fill_with_previous, block_size=block_size)
xcs[missind, allint[i, 1]:allint[i, 2]] = cosh_interval
xt[0, allint[i, 1]:allint[i, 2]] = target
ind[missind, i] = loc_ind.T
else:
xcs[:, allint[i, 1]:allint[i, 2]] = intervalnow
if frag:
xn = nans(np, mp)
for i_sam in range(0, np):
for i_seg in range(0, in_or.shape[0]):
xn[i_sam, in_or[i_seg, 0]:in_or[i_seg, 1]
+ 1] = xcs[i_sam, in_or[i_seg, 2]:in_or[i_seg, 3] + 1]
if loc_ind[i_sam] < 0:
if flag_nan[i_seg, 0, i_sam]:
xn[i_sam, in_or[i_seg, 0]:in_or[i_seg, 0]
- loc_ind[i_sam, 0] + 1] = numpy.nan
else:
if loc_ind[i_sam] > 0:
if flag_nan[i_seg, 1, i_sam]:
xn[i_sam, (int(in_or[i_seg, 1] - loc_ind[i_sam, 0] + 1)):in_or[i_seg, 1]+1] = numpy.nan
xcs = xn
target = xt
if flag_coshift:
ind = ind + wint * numpy.ones( (1, ind.shape[1]) )
return xcs, ints, ind, target
def coshifta(xt, xp, ref_w=0, n=numpy.array([1, 2, 3]), fill_with_previous=True, block_size=(2 ** 25)):
if len(ref_w) == 0 or ref_w.shape[0] == 0:
ref_w = numpy.array([0])
if numpy.all(ref_w >= 0):
rw = max(ref_w.shape)
else:
rw = 1
if fill_with_previous:
filling = -numpy.inf
else:
filling = numpy.nan
if isinstance(xt, str) and xt == 'average':
xt = nanmean(xp, axis=0)
# Make two dimensional
xt = xt.reshape(1, -1)
nt, mt = xt.shape
np, mp = xp.shape
if len(ref_w.shape) > 1:
nr, mr = ref_w.shape
else:
nr, mr = ref_w.shape[0], 0
logging.info('mt=%d, mp=%d' % (mt, mp))
if mt != mp:
raise(Exception, 'Target "xt" and sample "xp" must be of compatible size (%d, %d)' % (mt, mp) )
if not isinstance(n, str) and numpy.any(n <= 0):
raise(Exception, 'shift(s) "n" must be larger than zero')
if nr != 1:
raise(Exception, 'Reference windows "ref_w" must be either a single vector or 0')
if rw > 1 and (numpy.min(ref_w) < 1) or (numpy.max(ref_w) > mt):
raise(Exception, 'Reference window "ref_w" must be a subset of xp')
if nt != 1:
raise(Exception, 'Target "xt" must be a single row spectrum/chromatogram')
auto = 0
if n == 'b':
auto = 1
if rw != 1:
n = int(0.05 * mr)
n = 10 if n < 10 else n
src_step = int(mr * 0.05)
else:
n = int(0.05 * mp)
n = 10 if n < 10 else n
src_step = int(mp * 0.05)
try_last = 0
elif n == 'f':
auto = 1
if rw != 1:
n = mr - 1
src_step = numpy.round(mr / 2) - 1
else:
n = mp - 1
src_step = numpy.round(mp / 2) - 1
try_last = 1
if nt != 1:
raise(Exception, 'ERROR: Target "xt" must be a single row spectrum/chromatogram')
xw = nans(np, mp)
ind = numpy.zeros((1, np))
n_blocks = int(numpy.ceil(sys.getsizeof(xp) / block_size))
sam_xblock = numpy.array([int(np / n_blocks)])
sam_xblock = sam_xblock.T
ind_blocks = sam_xblock[numpy.ones(n_blocks, dtype=bool)]
ind_blocks[0:int(np % sam_xblock)] = sam_xblock + 1
ind_blocks = numpy.array([0, numpy.cumsum(ind_blocks, 0)], dtype=ind_blocks.dtype).flatten()
if auto == 1:
while auto == 1:
if filling == -numpy.inf:
xtemp = cat(1, numpy.tile(xp[:, :1], (1, n)),
xp, numpy.tile(xp[:, -1:, ], (1, n)))
elif numpy.isnan(filling):
# FIXME
xtemp = cat(1,
nans(np, n), xp, nans(np, n))
if rw == 1:
ref_w = numpy.arange(0, mp).reshape(1,-1)
ind = nans(np, 1)
r = False
for i_block in range(0, n_blocks):
block_indices = numpy.array(range(ind_blocks[i_block], ind_blocks[i_block + 1]))
xpTemp = xp[:, ref_w[0,:]]
xpTemp = xpTemp[block_indices,:]
# xpTemp = xpTemp.take(ref_w, axis=1)
# xpTemp = xpTemp.reshape(max(block_indices.shape),max(ref_w.shape))
_, ind[block_indices], ri = cc_fft_shift(xt[0, ref_w].reshape(1,-1), xpTemp,
numpy.array([-n, n, 2, 1, -99999], dtype=int) )
if not r:
r = numpy.empty((0, ri.shape[1]))
r = cat(0, r, ri).T
temp_index = range(-n, n+1)
for i_sam in range(0, np):
index = numpy.flatnonzero(temp_index == ind[i_sam])[0]
t_index = range(index, index+mp)
xw[i_sam, :] = [xtemp[i_sam, j] for j in t_index]
if (numpy.max(abs(ind)) == n) and try_last != 1:
if n + src_step >= ref_w.shape[0]:
try_last = 1
continue
n += src_step
continue
else:
if (numpy.max(abs(ind)) < n) and n + src_step < len(ref_w) and try_last != 1:
n += src_step
try_last = 1
continue
else:
auto = 0
logging.info('Best shift allowed for this interval = %d' % n)
else:
if filling == -numpy.inf:
xtemp = cat(1, numpy.tile(xp[:, :1], (1., n)),
xp, numpy.tile(xp[:, -1:, ], (1., n)))
elif numpy.isnan(filling):
xtemp = cat(1,
nans(np, n), xp, nans(np, n))
if rw == 1:
ref_w = numpy.arange(0, mp) #.reshape(1,-1)
ind = nans(np, 1)
r = numpy.array([])
for i_block in range(n_blocks):
block_indices = range(ind_blocks[i_block], ind_blocks[i_block + 1])
dummy, ind[block_indices], ri = cc_fft_shift(xt[0, ref_w].reshape(1,-1), xp[block_indices, :][:, ref_w],
numpy.array([-n, n, 2, 1, filling]))
r = cat(0, r, ri)
temp_index = numpy.arange(-n, n+1)
for i_sam in range(0, np):
index = numpy.flatnonzero(temp_index == ind[i_sam])
xw[i_sam, :] = xtemp[i_sam, index:index + mp]
if numpy.max(abs(ind)) == n:
logging.warn('Scrolling window size "n" may not be enough wide because extreme limit has been reached')
return xw, ind, r
def defints(xp, interv):
np, mp = xp.shape
sizechk = mp / interv - round(mp / interv)
plus = (mp / interv - round(mp / interv)) * interv
logging.warn('The last interval will not fulfill the selected intervals size "inter" = %f' % interv)
if plus >= 0:
logging.warn('Size of the last interval = %d ' % plus)
else:
logging.warn('Size of the last interval = %d' % (interv + plus))
if sizechk != 0:
logging.info('The last interval will not fulfill the selected intervals size "inter"=%f.' % interv)
logging.info('Size of the last interval = %f ' % plus)
t = range(0, (mp + 1), interv)
t.extend([mp])
# t = cat(1, numpy.array(range(0, (mp + 1), interv)), numpy.array(mp))
if t[-1] == t[-2]:
t[-1] = numpy.array([])
t = cat(1, numpy.array(t[0: - 1]), numpy.array(t[1:])-1)
inter = t.reshape(-1,1)[:,0]
return inter
def cc_fft_shift(t, x=False, options=numpy.array([])):
dim_x = numpy.array(x.shape)
dim_t = numpy.array(t.shape)
options_default = numpy.array([-numpy.fix(dim_t[-1] * 0.5), numpy.fix(dim_t[-1] * 0.5), len(t.shape) -1, 1, numpy.nan])
options = numpy.array([options[oi] if oi < len(options) else d for oi, d in enumerate(options_default)], dtype=int)
options[numpy.isnan(options)] = options_default[numpy.isnan(options)]
if options[0] > options[1]:
raise(Exception, 'Lower bound for shift is larger than upper bound')
time_dim = int(options[2] - 1) #why???????????
if dim_x[time_dim] != dim_t[time_dim]:
raise(Exception, 'Target and signals do not have compatible dimensions')
ord_ = numpy.array(
[time_dim] +
list(range(1, time_dim)) +
list(range(time_dim, len(x.shape) - 1)) +
[0]
).T
x_fft = numpy.transpose(x, ord_) # permute
x_fft = numpy.reshape(x_fft, (dim_x[time_dim], numpy.prod(dim_x[ord_[1:]])))
# FIXME? Sparse/dense switchg
p = numpy.arange(0, numpy.prod(dim_x[ ord_[1:] ] ) )
s = numpy.max(p) + 1
b = dia_matrix( (1.0/numpy.sqrt(numpy.nansum(x_fft ** 2, axis=0)), [0]), shape=(s,s) ).todense()
x_fft = numpy.dot(x_fft, b)
t = numpy.transpose(t, ord_)
t = numpy.reshape(t, (dim_t[time_dim], numpy.prod(dim_t[ord_[1:]])))
t = normalise(t)
np, mp = x_fft.shape
nt = t.shape[0]
flag_miss = numpy.any(numpy.isnan(x_fft)) or numpy.any(numpy.isnan(t))
if flag_miss:
if len(x.shape) > 2:
raise(Exception, 'Multidimensional handling of missing not implemented, yet')
miss_off = nans(1, mp)
for i_signal in range(0, mp):
limits = remove_nan(
numpy.array([0, np - 1]).reshape(1, -1), x_fft[:, i_signal].reshape(1, -1), numpy.all)
if limits.shape != (2, 1):
#raise(Exception, 'Missing values can be handled only if leading or trailing')
pass
if numpy.any(cat(1, limits[0], mp - limits[1]) > numpy.max(abs(options[0:2]))):
raise(Exception, 'Missing values band larger than largest admitted shift')
miss_off[i_signal] = limits[0]
if numpy.any(miss_off[i_signal-1] > 1):
x_fft[0:limits[1] - limits[0] + 1, #watch out right here -cw 1/25/21
i_signal] = x_fft[limits[0]:limits[1], i_signal]
if limits[1] < np:
x_fft[(limits[1] - limits[0] + 1):np, i_signal] = 0
limits = remove_nan(numpy.array([0, nt - 1]), t.T, numpy.all)
t[0:limits[1] - limits[0] + 1, :] = t[limits[0]:limits[1],:]
t[limits[1] - limits[0] + 1:np, :] = 0
miss_off = miss_off[0:mp] - limits[0]
x_fft = cat(0, x_fft, numpy.zeros(
(numpy.max(numpy.abs(options[0:2])), numpy.prod(dim_x[int(ord_[1:])], axis=0))
))
t = cat(0, t, numpy.zeros(
(numpy.max(numpy.abs(options[0:2])), numpy.prod(dim_t[ord_[1:]], axis=0))
))
len_fft = max(x_fft.shape[0], t.shape[0])
shift = numpy.arange(options[0], options[1] + 1)
if (options[0] < 0) and (options[1] > 0):
ind = list(range(int(len_fft + options[0]), int(len_fft))) + \
list(range(0, int(options[1] + 1)))
elif (options[0] < 0) and (options[1] < 0):
ind = list(range(len_fft + options[0], (len_fft + options[1] + 1)))
elif (options[0] < 0) and (options[1] == 0):
ind = list(range(int(len_fft + options[0]),
int(len_fft + options[1] + 1))) + [1]
else:
# ind = Options(1) + 1:Options(2) + 1
ind = range(int(options[0]), int(options[1] + 1))
# Pad to next ^2 for performance on the FFT
fft_pad = int( 2**numpy.ceil( numpy.log2(len_fft) ) )
x_fft = numpy.fft.fft(x_fft, fft_pad, axis=0)
t_fft = numpy.fft.fft(t, fft_pad, axis=0)
t_fft = numpy.conj(t_fft)
t_fft = numpy.tile(t_fft, (1, dim_x[0]))
dt = x_fft * t_fft
cc = numpy.fft.ifft(dt, fft_pad, axis=0)
if len(ord_[1:-1]) == 0:
k = 1
else:
k = numpy.prod(dim_x[ord_[1:-1]])
cc = numpy.reshape(cc[ind, :], ( options[1]-options[0]+1, k, dim_x[0]) )
if options[3] == 0:
cc = numpy.squeeze(numpy.mean(cc, axis=1))
else:
if options[3] == 1:
cc = numpy.squeeze(numpy.prod(cc, axis=1))
else:
raise(Exception, 'Invalid options for correlation of multivariate signals')
pos = cc.argmax(axis=0)
values = cat(1, numpy.reshape(shift, (len(shift), 1)), cc)
shift = shift[pos]
if flag_miss:
shift = shift + miss_off
x_warp = nans(*[dim_x[0]] + list(dim_t[1:]))
ind = numpy.tile(numpy.nan, (len(x.shape), 18))
indw = ind
time_dim = numpy.array([time_dim])
for i_X in range(0, dim_x[0]):
ind_c = i_X
if shift[i_X] >= 0:
ind = numpy.arange(shift[i_X], dim_x[time_dim]).reshape(1, -1)
indw = numpy.arange(0, dim_x[time_dim] - shift[i_X]).reshape(1, -1)
if options[4] == - numpy.inf:
o = numpy.zeros(abs(shift[i_X])).astype(int)
if len(o) > 0:
ind = cat(1,
ind,
numpy.array(dim_x[time_dim[o]] - 1).reshape(1, -1)
)
indw = cat(1,
indw,
numpy.arange(dim_x[time_dim] - shift[i_X],
dim_x[time_dim]).reshape(1, -1)
)
elif shift[i_X] < 0:
ind = numpy.arange(0, dim_x[time_dim] + shift[i_X]).reshape(1, -1)
indw = numpy.arange(-shift[i_X], dim_x[time_dim]).reshape(1, -1)
if options[4] == - numpy.inf:
ind = cat(1, numpy.zeros((1, -shift[i_X])), ind)
indw = cat( 1, numpy.arange(0, -shift[i_X]).reshape(1, -1), indw)
x_warp[ind_c, indw.astype(int)] = x[ind_c, ind.astype(int)]
shift = numpy.reshape(shift, (len(shift), 1))
return x_warp, shift, values
def remove_nan(b, signal, select=numpy.any, flags=False):
'''
Rearrange segments so that they do not include nan's
[Bn] = remove_nan(b, signal, select)
[an, flag]
INPUT
b : (p * 2) Boundary matrix (i.e. [Seg_start(1) Seg_end(1); Seg_start(2) Seg_end(2);...]
signal: (n * 2) Matrix of signals (with signals on rows)
select: (1 * 1) function handle to selecting operator
e.g. numpy.any (default) eliminate a column from signal matrix
if one or more elements are missing
numpy.all eliminate a column from signal matrix
if all elements are missing
OUTPUT
Bn : (q * 2) new Boundary matrix in which nan's are removed
flag: (q * 2 * n) flag matrix if there are nan before (column 1) or after (column 2)
the corresponding segment in the n signals.
Author: Giorgio Tomasi
[email protected]
Created : 25 February, 2009
Last modified: 23 March, 2009; 18:02
Python implementation: Martin Fitzpatrick
[email protected]
Last modified: 28th October, 2013
HISTORY
1.00.00 09 Mar 09 -> First working version
2.00.00 23 Mar 09 -> Added output for adjacent nan's in signals
2.01.00 23 Mar 09 -> Added select input parameter
'''
c = nans(b.shape[0], b.shape[1] if len(b.shape) > 1 else 1)
b = b.reshape(1, -1)
count = 0
signal = numpy.isnan(signal)
for i_el in range(0, b.shape[0]):
ind = numpy.arange(b[i_el, 0], b[i_el, 1] + 1)
in_ = select(signal[:, ind], axis=0)
if numpy.any(in_):
p = numpy.diff(numpy.array([0] + in_).reshape(1, -1), 1, axis=1)
a = numpy.flatnonzero(p < 0) + 1
b = numpy.flatnonzero(p > 0)
if numpy.any(~in_[0]):
a = cat(1, numpy.array([0]), a)
else:
b = b[1:]
if numpy.any(~in_[-1]):
b = cat(1, b, numpy.array([max(ind.shape) - 1]))
a = numpy.unique(a)
b = numpy.unique(b)
# d = ind[cat(1, a, b)]
d = numpy.stack((a, b), axis=-1)
c.resize(d.shape, refcheck=False)
c[count:count + max(a.shape) + 1] = d
count = count + max(a.shape)
else:
c[count, :] = b[i_el,:]
count += 1
c = c.astype(int)
an = c
if flags:
flag = numpy.empty((c.shape[0], 2, signal.shape[0]), dtype=bool)
flag[:] = False
c_inds = c[:, 0] > 1
c_inds = c_inds.astype(bool)
c_inde = c[:, 1] < (signal.shape[1] -1)
c_inde = c_inde.astype(bool)
flag[c_inds, 0, :] = signal[:, c[c_inds, 0] - 1].T
flag[c_inde, 1, :] = signal[:, c[c_inde, 1] + 1].T
return an, flag
else:
return an
def normalise(x, flag=False):
'''
Column-wise normalise matrix
nan's are ignored
[xn] = normalise(x, flag)
INPUT
x : Marix
flag: true if any NaNs are present (optional - it saves time for large matrices)
OUTPUT
xn: Column-wise normalised matrix
Author: Giorgio Tomasi
[email protected]
Created : 09 March, 2009; 13:18
Last modified: 09 March, 2009; 13:50
Python implementation: Martin Fitzpatrick
[email protected]
Last modified: 28th October, 2013
'''
if not flag:
p_att = ~numpy.isnan(x)
flag = numpy.any(~p_att[:])
else:
p_att = ~numpy.isnan(x)
m, n = x.shape
xn = nans(m, n)
if flag:
for i_n in range(0, n):
n = numpy.linalg.norm(x[p_att[:, i_n], i_n])
if not n:
n = 1
xn[p_att[:, i_n], i_n] = x[p_att[:, i_n], i_n] / n
else:
for i_n in range(0, n):
n = numpy.linalg.norm(x[:, i_n])
if not n:
n = 1
xn[:, i_n] = x[:, i_n] / n
return xn
def extract_segments(x, segments):
'''
Extract segments from signals
[xseg] = extract_segments(x, segments)
? [xseg, segnew] = extract_segments(x, segments)
INPUT
x : (n * p) data matrix
segments: (s * 2) segment boundary matrix
OUTPUT
xseg: (n * q) data matrix in which segments have been removed
segnew: New segment layout
Author: Giorgio Tomasi
[email protected]
Python implementation: Martin Fitzpatrick
[email protected]
Last modified: 28th October, 2013
Created : 23 March, 2009; 07:51
Last modified: 23 March, 2009; 15:07
HISTORY
0.00.01 23 Mar 09 -> Generated function with blank help
1.00.00 23 Mar 09 -> First working version
'''
n, p = x.shape
# segments = segments.T
Sd = numpy.diff(segments, axis=1)
q = numpy.sum(Sd + 1)
s, t = segments.shape
flag_si = t != 2
flag_in = numpy.any(segments[:] != numpy.fix(segments[:]))
flag_ob = numpy.any(segments[:, 0] < 0) or numpy.any(segments[:, 1] > p-1)
flag_ni = numpy.any(numpy.diff(segments[:, 0]) < 0) or numpy.any(
numpy.diff(segments[:, 1]) < 0)
flag_ab = numpy.any(Sd < 2)
if flag_si:
raise(Exception, 'Segment boundary matrix must have two columns')
if flag_in:
raise(Exception, 'Segment boundaries must be integers')
if flag_ob:
raise(Exception, 'Segment boundaries outside of segment')
if flag_ni:
raise(Exception, 'segments boundaries must be monotonically increasing')
if flag_ab:
raise(Exception, 'segments must be at least two points long')
xseg = nans(n, q)
origin = 0
segnew = []
for seg in segments:
data = x[:, seg[0]:seg[1] + 1]
segment_size = data.shape[1]
xseg[:, origin:origin + segment_size] = data
segnew.append([origin, origin + segment_size - 1])
origin = origin + segment_size
segnew = numpy.array(segnew)
return xseg, segnew
def find_nearest(array, value):
idx = (numpy.abs(array-value)).argmin()
return array[idx], idx
def scal2pts(ppmi, ppm=[], prec=None):
"""
Transforms scalars into data points
pts = scal2pts(values, scal)
INPUT
values: scalars whose position is sought
scal : vector scalars
prec : precision (optional) to handle endpoints
OUTPUT
pts : position of the requested scalars (nan if it is outside of 'scal')
Author: Giorgio Tomasi
[email protected]
Created : 12 February, 2009; 17:43
Last modified: 11 March, 2009; 15:14
Python implementation: Martin Fitzpatrick
[email protected]
Last modified: 28th October, 2013
HISTORY
1.00.00 12 Feb 09 -> First working version
1.01.00 11 Mar 09 -> Added input parameter check
"""
rev = ppm[0] > ppm[1]
if prec is None:
prec = min(abs(numpy.unique(numpy.diff(ppm))))
pts = []
for i in ppmi:
nearest_v, idx = find_nearest(ppm, i)
if abs(nearest_v-i) > prec:
pts.append(numpy.nan)
else:
pts.append( idx )
return numpy.array(pts)
def dscal2dpts(d, ppm, prec=None):
"""
Translates an interval width from scal to the best approximation in sampling points.
i = dppm2dpts(delta, scal, prec)
INPUT
delta: interval width in scale units
scal : scale
prec : precision on the scal axes
OUTPUT
i: interval widths in sampling points
Author: Giorgio Tomasi
[email protected]
Last modified: 21st February, 2009
Python implementation: Martin Fitzpatrick
[email protected]
Last modified: 28th October, 2013
"""
if d == 0:
return 0
if d <= 0:
raise(Exception, 'delta must be positive')
if ppm[0] < ppm[1]: # Scale in order
i = scal2pts(numpy.array([ppm[0] + d]), ppm, prec) -1
else:
i = max(ppm.shape) - scal2pts(numpy.array([ppm[-1] + d]), ppm, prec) +1
return i[0]
| 34.508922 | 162 | 0.540848 |
baea76ac4e82735bca6348ba631bd45929cd648d
| 6,160 |
py
|
Python
|
emergency_tool.py
|
Denevola/openNAMU
|
08d8d82e101ed9f47d34b0c58c8359db54887e2c
|
[
"BSD-3-Clause"
] | null | null | null |
emergency_tool.py
|
Denevola/openNAMU
|
08d8d82e101ed9f47d34b0c58c8359db54887e2c
|
[
"BSD-3-Clause"
] | null | null | null |
emergency_tool.py
|
Denevola/openNAMU
|
08d8d82e101ed9f47d34b0c58c8359db54887e2c
|
[
"BSD-3-Clause"
] | null | null | null |
# Load
import time
from route.tool.func import *
while True:
data_db_load = input('Load DB (Y) [Y, N] : ')
if data_db_load in ('Y', 'N'):
break
if data_db_load == 'Y':
data_db_set = class_check_json()
db_data_get(data_db_set['type'])
do_db_set(data_db_set)
load_db = get_db_connect_old(data_db_set)
conn = load_db.db_load()
curs = conn.cursor()
else:
print('----')
print('You can use [9, 11]')
# Main
print('----')
print('1. Backlink reset')
print('2. reCAPTCHA delete')
print('3. Ban delete')
print('4. Change host')
print('5. Change port')
print('6. Change skin')
print('7. Change password')
print('8. Change version')
print('9. Delete set.json')
print('10. Change name')
print('11. Delete mysql.json')
print('12. All title count reset')
print('13. Cache data reset')
print('14. Delete Main <HEAD>')
print('15. Give owner')
print('16. Delete 2FA password')
print('17. Change markup')
print('----')
what_i_do = input('Select : ')
if what_i_do == '1':
print('----')
go_num = input('All delete (Y) [Y, N] : ')
if not go_num == 'N':
curs.execute(db_change("delete from back"))
conn.commit()
print('----')
try:
go_num = int(input('Count (100) : '))
except ValueError:
go_num = 100
num = 0
print('----')
print('Load...')
curs.execute(
db_change(
"select title from data d "
"where not exists ("
"select title from back where link = d.title limit 1"
")"
""
)
)
title = curs.fetchall()
print('----')
print('Rest : ' + str(len(title)))
print('Start : ' + title[0][0])
time.sleep(1)
print('----')
for name in title:
num += 1
if num % go_num == 0:
print(str(num) + ' : ' + name[0])
if num % 100 == 0:
conn.commit()
curs.execute(db_change("select data from data where title = ?"), [name[0]])
data = curs.fetchall()
get_class_render = class_do_render(conn)
get_class_render.do_render(name[0], data[0][0], 'backlink', '')
elif what_i_do == '2':
curs.execute(db_change("delete from other where name = 'recaptcha'"))
curs.execute(db_change("delete from other where name = 'sec_re'"))
elif what_i_do == '3':
print('----')
user_data = input('IP or Name : ')
curs.execute(
db_change(
"insert into rb (block, end, today, blocker, why, band) "
"values (?, ?, ?, ?, ?, ?)"
),
[
user_data,
'release',
get_time(),
'tool:emergency',
'',
'',
]
)
curs.execute(db_change("update rb set ongoing = '' where block = ?"), [user_data])
elif what_i_do == '4':
print('----')
host = input('Host : ')
curs.execute(db_change("update other set data = ? where name = 'host'"), [host])
elif what_i_do == '5':
print('----')
port = int(input('Port : '))
curs.execute(db_change("update other set data = ? where name = 'port'"), [port])
elif what_i_do == '6':
print('----')
skin = input('Skin name : ')
curs.execute(db_change("update other set data = ? where name = 'skin'"), [skin])
elif what_i_do == '7':
print('----')
print('1. sha256')
print('2. sha3')
print('----')
what_i_do = input('Select : ')
print('----')
user_name = input('User name : ')
print('----')
user_pw = input('User password : ')
if what_i_do == '1':
hashed = hashlib.sha256(bytes(user_pw, 'utf-8')).hexdigest()
elif what_i_do == '2':
if sys.version_info < (3, 6):
hashed = sha3.sha3_256(bytes(user_pw, 'utf-8')).hexdigest()
else:
hashed = hashlib.sha3_256(bytes(user_pw, 'utf-8')).hexdigest()
else:
raise ValueError(what_i_do)
curs.execute(db_change("update user_set set data = ? where id = ? and name = 'pw'"), [
hashed,
user_name
])
elif what_i_do == '8':
print('----')
new_ver = input('Insert version (0000000) : ')
if new_ver == '':
new_ver = '0000000'
curs.execute(db_change("update other set data = ? where name = 'ver'"), [new_ver])
elif what_i_do == '9':
if os.path.exists(os.path.join('data', 'set.json')):
os.remove(os.path.join('data', 'set.json'))
elif what_i_do == '10':
print('----')
user_name = input('User name : ')
print('----')
new_name = input('New name : ')
curs.execute(
db_change("update user_set set id = ? where id = ?"),
[new_name, user_name]
)
elif what_i_do == '11':
if os.path.exists(os.path.join('data', 'mysql.json')):
os.remove(os.path.join('data', 'mysql.json'))
elif what_i_do == '12':
curs.execute(db_change("select count(*) from data"))
count_data = curs.fetchall()
if count_data:
count_data = count_data[0][0]
else:
count_data = 0
curs.execute(db_change('delete from other where name = "count_all_title"'))
curs.execute(
db_change(
'insert into other (name, data) values ("count_all_title", ?)'
),
[str(count_data)]
)
elif what_i_do == '13':
curs.execute(db_change('delete from cache_data'))
elif what_i_do == '14':
curs.execute(db_change('delete from other where name = "head"'))
elif what_i_do == '15':
print('----')
user_name = input('User name : ')
curs.execute(db_change("update user_set set data = 'owner' where id = ? and name = 'acl'"), [user_name])
elif what_i_do == '16':
print('----')
user_name = input('User name : ')
curs.execute(db_change('select data from user_set where name = "2fa" and id = ?'), [user_name])
if curs.fetchall():
curs.execute(db_change("update user_set set data = '' where name = '2fa' and id = ?"), [user_name])
elif what_i_do == '17':
print('----')
markup = input('Markup name : ')
curs.execute(db_change("update other set data = ? where name = 'markup'"), [markup])
else:
raise ValueError(what_i_do)
if data_db_load == 'Y':
conn.commit()
print('----')
print('OK')
| 26.899563 | 108 | 0.559578 |
a9d262e997257fc72439879316a09b04795b607a
| 2,565 |
py
|
Python
|
openstack_dashboard/dashboards/admin/routers/tables.py
|
rackerlabs/horizon
|
8914ed95fc8fa44771f5f3ec827e325a5855b60a
|
[
"Apache-2.0"
] | 2 |
2018-10-21T22:30:29.000Z
|
2020-11-21T08:58:31.000Z
|
openstack_dashboard/dashboards/admin/routers/tables.py
|
e/horizon
|
abbce256b68178ebf42816eb87303292212c1dfe
|
[
"Apache-2.0"
] | null | null | null |
openstack_dashboard/dashboards/admin/routers/tables.py
|
e/horizon
|
abbce256b68178ebf42816eb87303292212c1dfe
|
[
"Apache-2.0"
] | null | null | null |
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2012, Nachi Ueno, NTT MCL, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import logging
from django.template.defaultfilters import title
from django.utils.translation import ugettext_lazy as _
from horizon import tables
from openstack_dashboard import api
from openstack_dashboard.dashboards.project.routers import tables as r_tables
LOG = logging.getLogger(__name__)
class DeleteRouter(r_tables.DeleteRouter):
redirect_url = "horizon:admin:routers:index"
def delete(self, request, obj_id):
search_opts = {'device_owner': 'network:router_interface',
'device_id': obj_id}
ports = api.neutron.port_list(request, **search_opts)
for port in ports:
api.neutron.router_remove_interface(request, obj_id,
port_id=port.id)
super(DeleteRouter, self).delete(request, obj_id)
def allowed(self, request, router=None):
return True
class UpdateRow(tables.Row):
ajax = True
def get_data(self, request, router_id):
router = api.neutron.router_get(request, router_id)
return router
class RoutersTable(tables.DataTable):
tenant = tables.Column("tenant_name", verbose_name=_("Project"))
name = tables.Column("name",
verbose_name=_("Name"),
link="horizon:admin:routers:detail")
status = tables.Column("status",
filters=(title,),
verbose_name=_("Status"),
status=True)
ext_net = tables.Column(r_tables.get_external_network,
verbose_name=_("External Network"))
def get_object_display(self, obj):
return obj.name
class Meta:
name = "Routers"
verbose_name = _("Routers")
status_columns = ["status"]
row_class = UpdateRow
table_actions = (DeleteRouter,)
row_actions = (DeleteRouter,)
| 33.75 | 78 | 0.647953 |
2db31191eda14880291d71cc397c0fcf6bbe0c55
| 10,137 |
py
|
Python
|
test_stopwatch.py
|
bsc-s2/stopwatch
|
805752aa7c341dbb162eb430c4032ce829366ec3
|
[
"Apache-2.0"
] | null | null | null |
test_stopwatch.py
|
bsc-s2/stopwatch
|
805752aa7c341dbb162eb430c4032ce829366ec3
|
[
"Apache-2.0"
] | null | null | null |
test_stopwatch.py
|
bsc-s2/stopwatch
|
805752aa7c341dbb162eb430c4032ce829366ec3
|
[
"Apache-2.0"
] | null | null | null |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import enum
import pytest
from mock import Mock
from stopwatch import (
format_report,
TraceAnnotation,
StopWatch,
)
class MyBuckets(enum.Enum):
BUCKET_A = 1
BUCKET_B = 2
def add_timers(sw):
with sw.timer('root', start_time=20, end_time=920):
sw.add_annotation("Cooltag", event_time=50)
sw.add_slow_annotation("Slowtag", 100)
sw.add_slow_annotation("MegaSlowtag", 1000)
# First child span.
with sw.timer('child1', start_time=40, end_time=140, bucket=MyBuckets.BUCKET_A):
with sw.timer('grand_children1', start_time=60, end_time=80):
pass
with sw.timer('grand_children2', start_time=100, end_time=120):
pass
# Second child span with same name.
with sw.timer('child1', start_time=160, end_time=300, bucket=MyBuckets.BUCKET_A):
with sw.timer('grand_children3', start_time=180, end_time=190):
pass
with sw.timer('grand_children2', start_time=220, end_time=280):
pass
# Third child span with different name.
with sw.timer('child2', start_time=320, end_time=880, bucket=MyBuckets.BUCKET_B):
with sw.timer('grand_children3', start_time=380, end_time=390):
pass
with sw.timer('grand_children1', start_time=520, end_time=780):
pass
class TestStopWatch(object):
def test_default_exports(self):
sw = StopWatch()
add_timers(sw)
def test_stopwatch_cancel(self):
"""Test that spans can be correctly cancelled and not reported."""
sw = StopWatch()
sw.start('root')
sw.start('child')
sw.cancel('child')
sw.end('root')
agg_values = sw.get_last_aggregated_report().aggregated_values
assert len(agg_values) == 1
assert 'root' in agg_values
def test_stopwatch_cancel_context_manager(self):
"""Test that spans can be cancelled while inside a span context."""
sw = StopWatch()
with sw.timer('root'):
with sw.timer('child'):
sw.cancel('child')
with sw.timer('grand'):
pass
agg_values = sw.get_last_aggregated_report().aggregated_values
assert len(agg_values) == 2
assert all([span in agg_values for span in ('root', 'root#grand')])
def test_sampling_timer(self):
for i in range(100):
sw = StopWatch()
with sw.timer('root', start_time=20, end_time=120):
with sw.sampling_timer('child', p=0.5, start_time=40, end_time=100):
pass
agg_report = sw.get_last_aggregated_report()
assert len(agg_report.aggregated_values) in (1, 2)
if len(agg_report.aggregated_values) == 2:
assert agg_report.aggregated_values['root#child'] == [60000.0, 1, None]
def test_scope_in_loop(self):
sw = StopWatch()
with sw.timer('root', start_time=20, end_time=120):
for t in range(30, 100, 10):
with sw.timer('child', start_time=t, end_time=t + 5):
pass
agg_report = sw.get_last_aggregated_report()
assert agg_report.aggregated_values == {
'root': [100000.0, 1, None],
'root#child': [35000.0, 7, None],
}
assert agg_report.root_timer_data.start_time == 20.0
assert agg_report.root_timer_data.end_time == 120.0
assert agg_report.root_timer_data.name == 'root'
def test_override_exports(self):
export_tracing = Mock()
export_timers = Mock()
sw = StopWatch(
export_tracing_func=export_tracing,
export_aggregated_timers_func=export_timers,
)
add_timers(sw)
agg_report = sw.get_last_aggregated_report()
traces = sw.get_last_trace_report()
export_timers.assert_called_once_with(aggregated_report=agg_report)
export_tracing.assert_called_once_with(reported_traces=traces)
assert agg_report.aggregated_values == {
'root': [900000.0, 1, None],
'root#child1': [240000.0, 2, MyBuckets.BUCKET_A],
'root#child1#grand_children1': [20000.0, 1, None],
'root#child1#grand_children2': [80000.0, 2, None],
'root#child1#grand_children3': [10000.0, 1, None],
'root#child2': [560000.0, 1, MyBuckets.BUCKET_B],
'root#child2#grand_children1': [260000.0, 1, None],
'root#child2#grand_children3': [10000.0, 1, None],
}
assert agg_report.root_timer_data.start_time == 20.0
assert agg_report.root_timer_data.end_time == 920.0
assert agg_report.root_timer_data.name == 'root'
assert agg_report.root_timer_data.trace_annotations == [
TraceAnnotation('Cooltag', '1', 50),
TraceAnnotation('Slowtag', '1', 920),
]
# Traces are listed in the same order that scopes close
assert [(trace.name, trace.log_name, trace.start_time,
trace.end_time, trace.parent_span_id) for trace in traces] == [
('grand_children1', 'root#child1#grand_children1', 60, 80, traces[2].span_id),
('grand_children2', 'root#child1#grand_children2', 100, 120, traces[2].span_id),
('child1', 'root#child1', 40, 140, traces[9].span_id),
('grand_children3', 'root#child1#grand_children3', 180, 190, traces[5].span_id),
('grand_children2', 'root#child1#grand_children2', 220, 280, traces[5].span_id),
('child1', 'root#child1', 160, 300, traces[9].span_id),
('grand_children3', 'root#child2#grand_children3', 380, 390, traces[8].span_id),
('grand_children1', 'root#child2#grand_children1', 520, 780, traces[8].span_id),
('child2', 'root#child2', 320, 880, traces[9].span_id),
('root', 'root', 20, 920, None),
]
assert all(trace.trace_annotations == [] for trace in traces[:9])
assert traces[9].trace_annotations == [
TraceAnnotation('Cooltag', '1', 50),
TraceAnnotation('Slowtag', '1', 920),
]
def test_trace_annotations(self):
sw = StopWatch()
sw.add_annotation('key0', 'value0', event_time=0)
with sw.timer('root', start_time=10, end_time=1000):
with sw.timer('child', start_time=20, end_time=900):
sw.add_span_annotation('key1', 'value1', event_time=101)
sw.add_span_annotation('key2', 'value2', event_time=104)
sw.add_annotation('key3', 'value3', event_time=107)
trace_report = sw.get_last_trace_report()
assert len(trace_report) == 2
assert trace_report[0].name == 'child'
assert trace_report[0].trace_annotations == [
TraceAnnotation('key1', 'value1', 101),
TraceAnnotation('key2', 'value2', 104),
]
assert trace_report[1].name == 'root'
assert trace_report[1].trace_annotations == [
TraceAnnotation('key0', 'value0', 0),
TraceAnnotation('key3', 'value3', 107),
]
def test_exception_annotation(self):
class SpecialError(Exception):
pass
sw = StopWatch()
with pytest.raises(SpecialError):
with sw.timer('root', start_time=10, end_time=1000):
raise SpecialError("Ahhh")
trace_report = sw.get_last_trace_report()
assert trace_report[0].trace_annotations == [
TraceAnnotation('Exception', 'SpecialError', 1000),
]
def test_format_report(self):
sw = StopWatch()
add_timers(sw)
agg_report = sw.get_last_aggregated_report()
formatted_report = format_report(agg_report)
assert formatted_report == \
"************************\n" \
"*** StopWatch Report ***\n" \
"************************\n" \
"root 900000.000ms (100%)\n" \
" BUCKET_A child1 2 240000.000ms (27%)\n" \
" grand_children1 1 20000.000ms (2%)\n" \
" grand_children2 2 80000.000ms (9%)\n" \
" grand_children3 1 10000.000ms (1%)\n" \
" BUCKET_B child2 1 560000.000ms (62%)\n" \
" grand_children1 1 260000.000ms (29%)\n" \
" grand_children3 1 10000.000ms (1%)\n" \
"Annotations: Cooltag, Slowtag"
formatted_report2 = sw.format_last_report()
assert formatted_report == formatted_report2
def test_time_func(self):
"""Test override of the time_func"""
time_mock = Mock(side_effect=[50, 70])
sw = StopWatch(time_func=time_mock)
# Should call our timer func once on entry and once on exit
with sw.timer('root'):
pass
agg_report = sw.get_last_aggregated_report()
assert agg_report.aggregated_values == {
'root': [20000.0, 1, None],
}
assert agg_report.root_timer_data.start_time == 50.0
assert agg_report.root_timer_data.end_time == 70.0
assert agg_report.root_timer_data.name == 'root'
def test_time_func_default(self):
"""Make sure that the default time_func=None"""
sw = StopWatch(time_func=None)
with sw.timer('root'):
pass
agg_report = sw.get_last_aggregated_report()
tr_data = agg_report.root_timer_data
assert tr_data.name == 'root'
assert tr_data.end_time >= tr_data.start_time
def test_export_default(self):
"""Make sure that passing None in explicitly works"""
sw = StopWatch(export_aggregated_timers_func=None, export_tracing_func=None)
with sw.timer('root'):
pass
| 42.062241 | 92 | 0.58834 |
9cb06d620205a93a94f516b116f25e9090ae7432
| 12,120 |
py
|
Python
|
vmotion.py
|
h-mineta/vmware-pyvmomi-tools
|
5d48d6fb2169a519a6f24bf2d27ee3f94ae95fea
|
[
"MIT"
] | null | null | null |
vmotion.py
|
h-mineta/vmware-pyvmomi-tools
|
5d48d6fb2169a519a6f24bf2d27ee3f94ae95fea
|
[
"MIT"
] | null | null | null |
vmotion.py
|
h-mineta/vmware-pyvmomi-tools
|
5d48d6fb2169a519a6f24bf2d27ee3f94ae95fea
|
[
"MIT"
] | null | null | null |
#!/usr/bin/python3.5 -tt
# -*- coding: utf-8 -*-
"""
Copyright (c) 2017 h-mineta <[email protected]>
This software is released under the MIT License.
pip3 install pyvmomi pytz
"""
import atexit
import sys
import re
from datetime import datetime
from logging import getLogger, Formatter, StreamHandler, CRITICAL, WARNING, INFO, DEBUG
logger = getLogger(__name__)
from pyVim import connect
from pyVmomi import vmodl
from pyVmomi import vim
import pytz
from tools import cli, get
def setup_args():
parser = cli.build_arg_parser()
parser.add_argument('-V', '--vmhosts',
required=True,
action='append',
help='VMhost names')
parser.add_argument('-P', '--destination-pool',
required=False,
default=None,
help='Destination resource pool name')
parser.add_argument('-H', '--destination-esxi',
required=False,
default=None,
help='Destination ESXi hostname')
parser.add_argument('-D', '--destination-datastore',
required=False,
default=None,
help='Destination datastore name')
parser.add_argument('--verbose',
action='store_true',
default=False,
help='Verbose mode(default: False)',)
parser.add_argument('--timezone',
required=False,
default='Asia/Tokyo',
help='Default time zone (Asia/Tokyo)')
return cli.prompt_for_password(parser.parse_args())
def print_task(task, timezone_name='Asia/Tokyo'):
error_type = None
message = ''
if task.error != None:
error = task.error
if isinstance(error, vmodl.fault.InvalidArgument):
error_type = 'InvalidArgument'
elif isinstance(error, vmodl.RuntimeFault):
error_type = 'RuntimeFault'
if isinstance(error, vim.fault.DisallowedOperationOnFailoverHost):
error_type = 'DisallowedOperationOnFailoverHost'
elif isinstance(error, vim.fault.FileFault):
error_type = 'FileFault'
elif isinstance(error, vim.fault.InsufficientResourcesFault):
error_type = 'InsufficientResourcesFault'
elif isinstance(error, vmodl.fault.InvalidArgument):
error_type = 'InvalidArgument'
elif isinstance(error, vim.fault.InvalidState):
if isinstance(error, vim.fault.InvalidPowerState):
error_type = 'InvalidPowerState'
elif isinstance(error, vim.fault.InvalidDatastore):
error_type = 'InvalidDatastore'
elif isinstance(error, vim.fault.InvalidHostState):
error_type = 'InvalidHostState'
elif isinstance(error, vim.fault.InvalidVmState):
error_type = 'InvalidVmState'
elif isinstance(error, vim.fault.VmPowerOnDisabled):
error_type = 'VmPowerOnDisabled'
else:
error_type = 'InvalidState'
elif isinstance(error, vim.fault.MigrationFault):
error_type = 'MigrationFault'
elif isinstance(error, vim.fault.Timedout):
error_type = 'Timedout'
elif isinstance(error, vim.fault.VmConfigFault):
error_type = 'VmConfigFault'
else:
error_type = str(type(error))
# error message
if hasattr(error, 'msg'):
message = error.msg
tz = pytz.timezone(timezone_name)
time_to_queue = tz.normalize(task.queueTime.astimezone(tz))
time_to_start = tz.normalize(task.startTime.astimezone(tz))
time_to_complite = "unset"
time_to_difference = "unset"
if task.completeTime:
time_to_complite = tz.normalize(task.completeTime.astimezone(tz))
time_to_difference = task.completeTime - task.startTime
output = "View TaskInfo" \
+ "\n Task : " + str(task.task).strip('\'') \
+ "\n Queue time : " + time_to_queue.strftime('%Y-%m-%d %H:%M:%S %Z') \
+ "\n Start time : " + time_to_start.strftime('%Y-%m-%d %H:%M:%S %Z') \
+ "\n Complete time : " + time_to_complite.strftime('%Y-%m-%d %H:%M:%S %Z') \
+ "\n Diff time : " + str(time_to_difference) + ' (complete - start)' \
+ "\n Name : " + task.entityName \
+ "\n Entyty : " + str(task.entity).strip('\'') \
+ "\n State : " + task.state \
+ "\n Cancelled : " + str(task.cancelled) \
+ "\n Cancelable : " + str(task.cancelable)
if error_type:
output = output \
+ "\n Error type : " + error_type \
+ "\n Error message : " + message
logger.error(output + "\n")
else:
logger.info(output + "\n")
def wait_for_tasks(service_instance, tasks):
finish_tasks = {}
if not len(tasks):
return finish_tasks
property_collector = service_instance.content.propertyCollector
task_list = [str(task) for task in tasks]
# Create filter
obj_specs = [vmodl.query.PropertyCollector.ObjectSpec(obj=task)
for task in tasks]
property_spec = vmodl.query.PropertyCollector.PropertySpec(type=vim.Task,
pathSet=[],
all=True)
filter_spec = vmodl.query.PropertyCollector.FilterSpec()
filter_spec.objectSet = obj_specs
filter_spec.propSet = [property_spec]
pcfilter = property_collector.CreateFilter(filter_spec, True)
atexit.register(pcfilter.Destroy)
try:
version = None
# Loop looking for updates till the state moves to a completed state.
while len(task_list):
update = property_collector.WaitForUpdates(version)
for filter_set in update.filterSet:
for obj_set in filter_set.objectSet:
task = obj_set.obj
task_name = str(task)
if not task_name in task_list:
continue
for change in obj_set.changeSet:
# Append finish task values
if isinstance(change.val, vim.TaskInfo):
# set
finish_tasks[task_name] = change.val
logger.info("Name: %s, State: %s" % (finish_tasks[task_name].entityName, finish_tasks[task_name].state))
elif task_name in finish_tasks:
matchese = re.match(r'^info\.(.+)$', change.name)
if matchese:
# modify(Update)
key = matchese.group(1)
setattr(finish_tasks[task_name], key, change.val)
if key == 'progress' and isinstance(change.val, int) == True:
logger.debug("Name: %s, Progress: %d" % (finish_tasks[task_name].entityName, change.val))
elif key == 'state':
if change.val == 'error':
logger.error("Name: %s, State: %s" % (finish_tasks[task_name].entityName, change.val))
else:
logger.info("Name: %s, State: %s" % (finish_tasks[task_name].entityName, change.val))
if task_name in finish_tasks \
and (finish_tasks[task_name].state == 'success' or finish_tasks[task_name].state == 'error') \
and finish_tasks[task_name].completeTime != None:
# Remove task from taskList
task_list.remove(task_name)
# Move to next version
version = update.version
except vmodl.RuntimeFault as ex:
logger.error('Caught RuntimeFault fault : ' + ex.msg)
except vmodl.MethodFault as ex:
logger.error('Caught MethodFault fault : ' + ex.msg)
except Exception as ex:
raise
return finish_tasks
def main():
args = setup_args()
exit_status = 0
# logger setting
formatter = Formatter('[%(asctime)s]%(levelname)s - %(message)s')
#formatter = Formatter('[%(asctime)s][%(funcName)s:%(lineno)d]%(levelname)s - %(message)s')
logger.setLevel(DEBUG) # debug 固定
console = StreamHandler()
if hasattr(args, 'verbose') and args.verbose == True:
console.setLevel(DEBUG)
else:
console.setLevel(INFO)
console.setFormatter(formatter)
logger.addHandler(console)
if not args.destination_esxi and not args.destination_datastore:
logger.critical("Could not destination esxi or datastore")
sys.exit(1)
try:
if args.disable_ssl_verification:
service_instance = connect.SmartConnectNoSSL(host=args.host,
user=args.user,
pwd=args.password,
port=int(args.port))
else:
service_instance = connect.SmartConnect(host=args.host,
user=args.user,
pwd=args.password,
port=int(args.port))
if not service_instance:
logger.critical("Could not connect to the specified host ' \
'using specified username and password")
sys.exit(1)
atexit.register(connect.Disconnect, service_instance)
content = service_instance.RetrieveContent()
# VM List作成
vm_list = get.get_vms_by_names(content, args.vmhosts)
if len(vm_list) == 0:
logger.warning('Virtual Machine is not found')
sys.exit(1)
# Relocate(vMotion)のためのSpecデータ作成
relocate_spec = vim.VirtualMachineRelocateSpec()
if args.destination_esxi:
relocate_spec.host = get.get_host_by_name(content, args.destination_esxi)
if relocate_spec.host is None:
logger.warning('ESXi host is not found')
sys.exit(1)
if args.destination_datastore:
relocate_spec.datastore = get.get_datastore_by_name(content, args.destination_datastore)
if relocate_spec.datastore is None:
logger.warning('Datastore is not found')
sys.exit(1)
if args.destination_pool:
relocate_spec.pool = get.get_pool(content, args.destination_pool)
if relocate_spec.pool is None:
logger.warning('Pool is not found')
sys.exit(1)
# 例) ['vim.Task:task-1137', 'vim.Task:task-1138', 'vim.Task:task-1139']
task_list = [vm.RelocateVM_Task(spec=relocate_spec, priority='defaultPriority') for vm in vm_list]
if len(task_list) == 0:
logger.error('Relocate task is not create')
sys.exit(2)
finish_tasks = {}
finish_tasks = wait_for_tasks(service_instance, task_list)
if len(finish_tasks) == 0:
logger.error('Finish task is not found')
sys.exit(2)
for key in finish_tasks.keys():
print_task(finish_tasks[key], args.timezone)
if finish_tasks[key].state == 'error':
exit_status = 2
except vmodl.MethodFault as ex:
logger.critical('Caught vmodl fault : ' + ex.msg)
import traceback
traceback.print_exc()
sys.exit(253)
except Exception as ex:
logger.critical('Caught exception : ' + str(ex))
import traceback
traceback.print_exc()
sys.exit(254)
sys.exit(exit_status)
# Start program
if __name__ == "__main__":
main()
| 37.757009 | 132 | 0.555776 |
eecc2ca3c67e160a036e3106dba5b1bd669c6af2
| 4,092 |
py
|
Python
|
src/riaps/run/ansPort.py
|
mbellabah/riaps-pycom
|
6995d1b5d58c5f8b03b6eb75a5c5c34f9de00f5e
|
[
"Apache-2.0"
] | null | null | null |
src/riaps/run/ansPort.py
|
mbellabah/riaps-pycom
|
6995d1b5d58c5f8b03b6eb75a5c5c34f9de00f5e
|
[
"Apache-2.0"
] | null | null | null |
src/riaps/run/ansPort.py
|
mbellabah/riaps-pycom
|
6995d1b5d58c5f8b03b6eb75a5c5c34f9de00f5e
|
[
"Apache-2.0"
] | null | null | null |
'''
Created on Oct 10, 2016
@author: riaps
'''
import time
import zmq
import struct
from .port import Port
from riaps.run.exc import OperationError,PortError
from riaps.utils.config import Config
from zmq.error import ZMQError
try:
import cPickle
pickle = cPickle
except:
cPickle = None
import pickle
class AnsPort(Port):
'''
classdocs
'''
def __init__(self, parentComponent, portName, portSpec):
'''
Constructor
'''
super(AnsPort,self).__init__(parentComponent,portName)
self.req_type = portSpec["req_type"]
self.rep_type = portSpec["rep_type"]
self.isTimed = portSpec["timed"]
self.deadline = portSpec["deadline"] * 0.001 # msec
parentActor = parentComponent.parent
self.isLocalPort = parentActor.isLocalMessage(self.req_type) and parentActor.isLocalMessage(self.rep_type)
self.identity = None
self.info = None
def setup(self):
pass
def setupSocket(self,owner):
self.setOwner(owner)
self.socket = self.context.socket(zmq.ROUTER)
self.socket.setsockopt(zmq.SNDTIMEO,self.sendTimeout)
self.setupCurve(True)
self.host = ''
if not self.isLocalPort:
globalHost = self.getGlobalIface()
self.portNum = self.socket.bind_to_random_port("tcp://" + globalHost)
self.host = globalHost
else:
localHost = self.getLocalIface()
self.portNum = self.socket.bind_to_random_port("tcp://" + localHost)
self.host = localHost
self.info = ('ans',self.isLocalPort,self.name,str(self.req_type) + '#' + str(self.rep_type), self.host,self.portNum)
return self.info
def update(self, host, port):
raise OperationError("Unsupported update() on AnsPort")
def reset(self):
pass
def getSocket(self):
return self.socket
def inSocket(self):
return True
def get_identity(self):
return self.identity
def set_identity(self,identity):
self.identity = identity
def ans_port_recv(self,is_pyobj):
try:
msgFrames = self.socket.recv_multipart() # Receive multipart (IDENTITY + payload) message
except zmq.error.ZMQError as e:
raise PortError("recv error (%d)" % e.errno, e.errno) from e
if self.isTimed:
self.recvTime = time.time()
self.identity = msgFrames[0] # Separate identity, it is a Frame
if is_pyobj:
result = pickle.loads(msgFrames[1]) # Separate payload (pyobj)
else:
result = msgFrames[1] # Separate payload (bytes)
if len(msgFrames) == 3: # If we have a send time stamp
rawMsg = msgFrames[2]
rawTuple = struct.unpack("d", rawMsg)
self.sendTime = rawTuple[0]
return result
def ans_port_send(self,msg,is_pyobj):
try:
sendMsg = [self.identity] # Identity is already a frame
if is_pyobj:
payload = zmq.Frame(pickle.dumps(msg)) # Pickle python payload
else:
payload = zmq.Frame(msg) # Take bytearray
sendMsg += [payload]
if self.isTimed:
now = time.time()
now = struct.pack("d", now)
nowFrame = zmq.Frame(now)
sendMsg += [nowFrame]
self.socket.send_multipart(sendMsg)
except zmq.error.ZMQError as e:
raise PortError("send error (%d)" % e.errno, e.errno) from e
return True
def recv_pyobj(self):
return self.ans_port_recv(True)
def send_pyobj(self,msg):
return self.ans_port_send(msg,True)
def recv(self):
return self.ans_port_recv(False)
def send(self, _msg):
return self.ans_port_send(False)
def getInfo(self):
return self.info
| 32.220472 | 124 | 0.577957 |
6f637310ac06c515ecc8437d0af948bf6de4d3a0
| 658 |
py
|
Python
|
clients/python-fastapi/generated/src/openapi_server/models/free_style_projectactions.py
|
cliffano/jenkins-api-clients-generator
|
522d02b3a130a29471df5ec1d3d22c822b3d0813
|
[
"MIT"
] | null | null | null |
clients/python-fastapi/generated/src/openapi_server/models/free_style_projectactions.py
|
cliffano/jenkins-api-clients-generator
|
522d02b3a130a29471df5ec1d3d22c822b3d0813
|
[
"MIT"
] | null | null | null |
clients/python-fastapi/generated/src/openapi_server/models/free_style_projectactions.py
|
cliffano/jenkins-api-clients-generator
|
522d02b3a130a29471df5ec1d3d22c822b3d0813
|
[
"MIT"
] | null | null | null |
# coding: utf-8
from __future__ import annotations
from datetime import date, datetime # noqa: F401
import re # noqa: F401
from typing import Any, Dict, List, Optional # noqa: F401
from pydantic import AnyUrl, BaseModel, EmailStr, validator # noqa: F401
class FreeStyleProjectactions(BaseModel):
"""NOTE: This class is auto generated by OpenAPI Generator (https://openapi-generator.tech).
Do not edit the class manually.
FreeStyleProjectactions - a model defined in OpenAPI
_class: The _class of this FreeStyleProjectactions [Optional].
"""
_class: Optional[str] = None
FreeStyleProjectactions.update_forward_refs()
| 26.32 | 96 | 0.743161 |
d49e9592c8658910d6180947346f6788ba5fdb29
| 498 |
py
|
Python
|
tests/assignments/test_assign7.py
|
acc-cosc-1336/cosc-1336-spring-2018-vcruz350
|
0cee9fde3d4129c51626c4e0c870972aebec9b95
|
[
"MIT"
] | null | null | null |
tests/assignments/test_assign7.py
|
acc-cosc-1336/cosc-1336-spring-2018-vcruz350
|
0cee9fde3d4129c51626c4e0c870972aebec9b95
|
[
"MIT"
] | 1 |
2018-03-08T19:46:08.000Z
|
2018-03-08T20:00:47.000Z
|
tests/assignments/test_assign7.py
|
acc-cosc-1336/cosc-1336-spring-2018-vcruz350
|
0cee9fde3d4129c51626c4e0c870972aebec9b95
|
[
"MIT"
] | null | null | null |
import unittest
#write the import for function for assignment7 sum_list_values
from src.assignments.assignment7 import sum_list_values
class Test_Assign7(unittest.TestCase):
def sample_test(self):
self.assertEqual(1,1)
#create a test for the sum_list_values function with list elements:
# bill 23 16 19 22
def test_sum_w_23_16_19_22(self):
test_list = ['bill', 23, 16, 19, 22]
self.assertEqual(80, sum_list_values(test_list))
#unittest.main(verbosity=2)
| 29.294118 | 71 | 0.736948 |
9c1270a5bbbe1cd0bca120127700d0922f79d41b
| 2,933 |
py
|
Python
|
scripts/m2.py
|
byu-iot-security/cheeziot_webserver
|
bf3b173ff0ef0165329212f2ee65485191f75abc
|
[
"MIT"
] | 2 |
2017-02-13T11:55:56.000Z
|
2017-02-15T08:27:49.000Z
|
scripts/m2.py
|
byu-iot-security/cheeziot_webserver
|
bf3b173ff0ef0165329212f2ee65485191f75abc
|
[
"MIT"
] | null | null | null |
scripts/m2.py
|
byu-iot-security/cheeziot_webserver
|
bf3b173ff0ef0165329212f2ee65485191f75abc
|
[
"MIT"
] | null | null | null |
import pymongo
import os
import datetime
from base64 import decodestring
from bson.objectid import ObjectId
from pymongo import MongoClient
config_file = open("config", 'r')
collection = ''
database = ''
#Parse the configuration (config) file
for line in config_file:
field,val = line.split("=")
if(field == "COLLECTION"):
collection = val.rstrip()
elif(field == "DATABASE"):
database = val.rstrip()
print collection
print database
client = MongoClient('localhost', 27017)
# TODO: Only retrieve records with image data
# Assume that all records have an image, for now
# Get a hardcoded entry
# entry = client[database][collection].find_one({"_id": ObjectId("58a61687870a765994850d5a")})
# Sort from newest to oldest based on the kaa timestamp, and return the newest record
# For sorting nested fields, see http://stackoverflow.com/questions/12031507/mongodb-sorting-by-nested-object-value
entry = client[database][collection].find().sort("header.timestamp", pymongo.DESCENDING).limit(1)[0]
# Get the most recent image record according to _id
# The _id field will contain an implicit timestamp in it
# See http://stackoverflow.com/questions/4421207/mongodb-how-to-get-the-last-n-records
# entry = client[database][collection].find().sort("_id", pymongo.DESCENDING).limit(1)[0]
# NOTE: find_one() and find().limit(1) aren't perfectly interchangeable
# See http://dba.stackexchange.com/questions/7573/difference-between-mongodbs-find-and-findone-calls
# # Other tests
# cursor = client[database][collection].find().sort("_id", pymongo.DESCENDING)
# cursor = client[database][collection].find()
# print cursor[0].get("_id")
print "-----------------------"
print entry.get("_id")
person_name = entry.get("event").get("person_name")
if person_name:
name = person_name.rstrip()
else:
name = "?"
if os.path.isfile("public/faces.html"):
os.remove("public/faces.html")
#construct the faces.html page to be served to a client.
last_seen = open("public/faces.html", "w")
last_seen.write("<!doctype html>\n")
last_seen.write(" <head>\n")
last_seen.write(" <title>Faces of the Clyde</title>\n")
last_seen.write(" </head>\n")
last_seen.write(" <body>\n")
last_seen.write(" <img src=\"images/faces.png\">\n")
last_seen.write(" <div>\n")
last_seen.write(" <img src=\"images/test_out.bmp\" width=\"200\" height=\"200\">\n")
name_string = " <font size = \"6\" face=\"Courier New\">" + name + "</b>\n"
last_seen.write(" </div>\n")
last_seen.write(" <div>\n")
last_seen.write(name_string)
last_seen.write(" </div>\n")
last_seen.write(" </body>\n")
last_seen.write("</html>\n")
last_seen.close()
raw_image_data = entry.get("event").get("image_data")
#if test_out.bmp already exists, delete it
if os.path.isfile("public/images/test_out.bmp"):
os.remove("public/images/test_out.bmp")
f = file("public/images/test_out.bmp", "wb")
for i in raw_image_data:
f.write(decodestring(i))
| 30.237113 | 115 | 0.707467 |
3a92596535361ffe65d246ee1e13cd2e0b154c17
| 4,896 |
py
|
Python
|
inria/visualization.py
|
gerberka/catalyst-inria
|
06dbd6b34b030255ea3df6383434b42e9da85f56
|
[
"MIT"
] | 44 |
2019-04-25T19:45:22.000Z
|
2022-02-07T12:56:46.000Z
|
inria/visualization.py
|
gerberka/catalyst-inria
|
06dbd6b34b030255ea3df6383434b42e9da85f56
|
[
"MIT"
] | 10 |
2020-04-26T19:53:34.000Z
|
2022-01-11T11:58:24.000Z
|
inria/visualization.py
|
gerberka/catalyst-inria
|
06dbd6b34b030255ea3df6383434b42e9da85f56
|
[
"MIT"
] | 6 |
2019-09-10T09:37:15.000Z
|
2022-01-14T14:10:10.000Z
|
from typing import Callable, Optional, List, Union
import cv2
import numpy as np
from pytorch_toolbelt.utils.torch_utils import rgb_image_from_tensor, to_numpy
from inria.dataset import (
OUTPUT_OFFSET_KEY,
OUTPUT_MASK_4_KEY,
OUTPUT_MASK_32_KEY,
OUTPUT_MASK_16_KEY,
OUTPUT_MASK_8_KEY,
OUTPUT_MASK_2_KEY,
)
def draw_inria_predictions(
input: dict,
output: dict,
inputs_to_labels:Callable,
outputs_to_labels: Callable,
image_key="features",
image_id_key: Optional[str] = "image_id",
targets_key="targets",
outputs_key="logits",
mean=(0.485, 0.456, 0.406),
std=(0.229, 0.224, 0.225),
max_images=None,
image_format: Union[str, Callable] = "bgr",
) -> List[np.ndarray]:
"""
Render visualization of model's prediction for binary segmentation problem.
This function draws a color-coded overlay on top of the image, with color codes meaning:
- green: True positives
- red: False-negatives
- yellow: False-positives
:param input: Input batch (model's input batch)
:param output: Output batch (model predictions)
:param image_key: Key for getting image
:param image_id_key: Key for getting image id/fname
:param targets_key: Key for getting ground-truth mask
:param outputs_key: Key for getting model logits for predicted mask
:param mean: Mean vector user during normalization
:param std: Std vector user during normalization
:param max_images: Maximum number of images to visualize from batch
(If you have huge batch, saving hundreds of images may make TensorBoard slow)
:param targets_threshold: Threshold to convert target values to binary.
Default value 0.5 is safe for both smoothed and hard labels.
:param logits_threshold: Threshold to convert model predictions (raw logits) values to binary.
Default value 0.0 is equivalent to 0.5 after applying sigmoid activation
:param image_format: Source format of the image tensor to conver to RGB representation.
Can be string ("gray", "rgb", "brg") or function `convert(np.ndarray)->nd.ndarray`.
:return: List of images
"""
images = []
num_samples = len(input[image_key])
if max_images is not None:
num_samples = min(num_samples, max_images)
true_masks = to_numpy(inputs_to_labels(input[targets_key])).astype(bool)
pred_masks = to_numpy(outputs_to_labels(output[outputs_key])).astype(bool)
for i in range(num_samples):
image = rgb_image_from_tensor(input[image_key][i], mean, std)
if image_format == "bgr":
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
elif image_format == "gray":
image = cv2.cvtColor(image, cv2.COLOR_GRAY2RGB)
elif hasattr(image_format, "__call__"):
image = image_format(image)
overlay = image.copy()
true_mask = true_masks[i]
pred_mask = pred_masks[i]
overlay[true_mask & pred_mask] = np.array(
[0, 250, 0], dtype=overlay.dtype
) # Correct predictions (Hits) painted with green
overlay[true_mask & ~pred_mask] = np.array([250, 0, 0], dtype=overlay.dtype) # Misses painted with red
overlay[~true_mask & pred_mask] = np.array(
[250, 250, 0], dtype=overlay.dtype
) # False alarm painted with yellow
overlay = cv2.addWeighted(image, 0.5, overlay, 0.5, 0, dtype=cv2.CV_8U)
if OUTPUT_OFFSET_KEY in output:
offset = to_numpy(output[OUTPUT_OFFSET_KEY][i]) * 32
offset = np.expand_dims(offset, -1)
x = offset[0, ...].clip(min=0, max=1) * np.array([255, 0, 0]) + (-offset[0, ...]).clip(
min=0, max=1
) * np.array([0, 0, 255])
y = offset[1, ...].clip(min=0, max=1) * np.array([255, 0, 255]) + (-offset[1, ...]).clip(
min=0, max=1
) * np.array([0, 255, 0])
offset = (x + y).clip(0, 255).astype(np.uint8)
offset = cv2.resize(offset, (image.shape[1], image.shape[0]))
overlay = np.row_stack([overlay, offset])
dsv_inputs = [OUTPUT_MASK_2_KEY, OUTPUT_MASK_4_KEY, OUTPUT_MASK_8_KEY, OUTPUT_MASK_16_KEY, OUTPUT_MASK_32_KEY]
for dsv_input_key in dsv_inputs:
if dsv_input_key in output:
dsv_p = to_numpy(output[dsv_input_key][i].detach().float().sigmoid().squeeze(0))
dsv_p = cv2.resize((dsv_p * 255).astype(np.uint8), (image.shape[1], image.shape[0]))
dsv_p = cv2.cvtColor(dsv_p, cv2.COLOR_GRAY2RGB)
overlay = np.row_stack([overlay, dsv_p])
if image_id_key is not None and image_id_key in input:
image_id = input[image_id_key][i]
cv2.putText(overlay, str(image_id), (10, 15), cv2.FONT_HERSHEY_PLAIN, 1, (250, 250, 250))
images.append(overlay)
return images
| 42.206897 | 118 | 0.649918 |
57d21cbc4569406ee7ec424e134992748e849937
| 4,416 |
py
|
Python
|
python/pb/envoy/annotations/deprecation_pb2.py
|
pomerium/enterprise-client
|
899e568e59955533179167ff08066962991476c5
|
[
"Apache-2.0"
] | 1 |
2021-09-14T04:34:29.000Z
|
2021-09-14T04:34:29.000Z
|
src/envoy/annotations/deprecation_pb2.py
|
pomerium/enterprise-client-python
|
366d72cc9cd6dc05fae704582deb13b1ccd20a32
|
[
"Apache-2.0"
] | 3 |
2021-09-15T15:10:41.000Z
|
2022-01-04T21:03:03.000Z
|
python/pb/envoy/annotations/deprecation_pb2.py
|
pomerium/enterprise-client
|
899e568e59955533179167ff08066962991476c5
|
[
"Apache-2.0"
] | 1 |
2021-09-14T17:32:12.000Z
|
2021-09-14T17:32:12.000Z
|
# -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: envoy/annotations/deprecation.proto
"""Generated protocol buffer code."""
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from google.protobuf import descriptor_pb2 as google_dot_protobuf_dot_descriptor__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='envoy/annotations/deprecation.proto',
package='envoy.annotations',
syntax='proto3',
serialized_options=b'Z8github.com/envoyproxy/go-control-plane/envoy/annotations',
create_key=_descriptor._internal_create_key,
serialized_pb=b'\n#envoy/annotations/deprecation.proto\x12\x11\x65nvoy.annotations\x1a google/protobuf/descriptor.proto:?\n\x15\x64isallowed_by_default\x12\x1d.google.protobuf.FieldOptions\x18\xe7\xad\xaeZ \x01(\x08:E\n\x1b\x64\x65precated_at_minor_version\x12\x1d.google.protobuf.FieldOptions\x18\xf2\xe8\x80K \x01(\t:H\n\x1a\x64isallowed_by_default_enum\x12!.google.protobuf.EnumValueOptions\x18\xf5\xce\xb6! \x01(\x08:N\n deprecated_at_minor_version_enum\x12!.google.protobuf.EnumValueOptions\x18\xc1\xbe\xb3V \x01(\tB:Z8github.com/envoyproxy/go-control-plane/envoy/annotationsb\x06proto3'
,
dependencies=[google_dot_protobuf_dot_descriptor__pb2.DESCRIPTOR,])
DISALLOWED_BY_DEFAULT_FIELD_NUMBER = 189503207
disallowed_by_default = _descriptor.FieldDescriptor(
name='disallowed_by_default', full_name='envoy.annotations.disallowed_by_default', index=0,
number=189503207, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=True, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key)
DEPRECATED_AT_MINOR_VERSION_FIELD_NUMBER = 157299826
deprecated_at_minor_version = _descriptor.FieldDescriptor(
name='deprecated_at_minor_version', full_name='envoy.annotations.deprecated_at_minor_version', index=1,
number=157299826, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=True, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key)
DISALLOWED_BY_DEFAULT_ENUM_FIELD_NUMBER = 70100853
disallowed_by_default_enum = _descriptor.FieldDescriptor(
name='disallowed_by_default_enum', full_name='envoy.annotations.disallowed_by_default_enum', index=2,
number=70100853, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=True, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key)
DEPRECATED_AT_MINOR_VERSION_ENUM_FIELD_NUMBER = 181198657
deprecated_at_minor_version_enum = _descriptor.FieldDescriptor(
name='deprecated_at_minor_version_enum', full_name='envoy.annotations.deprecated_at_minor_version_enum', index=3,
number=181198657, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=True, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key)
DESCRIPTOR.extensions_by_name['disallowed_by_default'] = disallowed_by_default
DESCRIPTOR.extensions_by_name['deprecated_at_minor_version'] = deprecated_at_minor_version
DESCRIPTOR.extensions_by_name['disallowed_by_default_enum'] = disallowed_by_default_enum
DESCRIPTOR.extensions_by_name['deprecated_at_minor_version_enum'] = deprecated_at_minor_version_enum
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
google_dot_protobuf_dot_descriptor__pb2.FieldOptions.RegisterExtension(disallowed_by_default)
google_dot_protobuf_dot_descriptor__pb2.FieldOptions.RegisterExtension(deprecated_at_minor_version)
google_dot_protobuf_dot_descriptor__pb2.EnumValueOptions.RegisterExtension(disallowed_by_default_enum)
google_dot_protobuf_dot_descriptor__pb2.EnumValueOptions.RegisterExtension(deprecated_at_minor_version_enum)
DESCRIPTOR._options = None
# @@protoc_insertion_point(module_scope)
| 59.675676 | 594 | 0.845788 |
ef88abd0dc622197c9dbf3ac4806e24681c5cedc
| 47 |
py
|
Python
|
graphpy/__init__.py
|
tscizzle/pygraph
|
b6fe9e21bd30ae4249d02a1818b49351575fa98e
|
[
"MIT"
] | 1 |
2017-04-09T21:58:28.000Z
|
2017-04-09T21:58:28.000Z
|
graphpy/__init__.py
|
tscizzle/graphpy
|
b6fe9e21bd30ae4249d02a1818b49351575fa98e
|
[
"MIT"
] | null | null | null |
graphpy/__init__.py
|
tscizzle/graphpy
|
b6fe9e21bd30ae4249d02a1818b49351575fa98e
|
[
"MIT"
] | null | null | null |
"""
Module defining a graph data structure
"""
| 11.75 | 38 | 0.702128 |
0d9e8f5a3d35ec8414e4a7dcd3e00b6d58fc9ee6
| 2,270 |
py
|
Python
|
scripts/merge_func_table.py
|
hbc/mammoth_code
|
2e6909514e8ff232981ea2cb03f078257bc5c847
|
[
"MIT"
] | 1 |
2017-05-22T01:18:13.000Z
|
2017-05-22T01:18:13.000Z
|
scripts/merge_func_table.py
|
hbc/mammoth_code
|
2e6909514e8ff232981ea2cb03f078257bc5c847
|
[
"MIT"
] | null | null | null |
scripts/merge_func_table.py
|
hbc/mammoth_code
|
2e6909514e8ff232981ea2cb03f078257bc5c847
|
[
"MIT"
] | null | null | null |
import os
import sys
import pybedtools
import shutil
from collections import defaultdict
hg19="ann/hg19_pos.bed"
variants="all_23genomes.xls"
ann="ann/hg19_ann.txt"
hg19_map = {}
with open(hg19) as inh:
for line in inh:
cols = line.strip().split()
hg19_map[cols[3]] = "%s:%s" % (cols[0], int(cols[1]) + 1)
ann_map = defaultdict(dict)
with open(ann) as inh:
for line in inh:
cols = line.strip().split()
idx = "chr%s:%s" % (cols[7], cols[8])
cols[4] = cols[4] if cols[4] != "X" else "STOP"
cols[5] = cols[5] if cols[5] != "X" else "STOP"
ann_map[idx].update({"%s %s" % (cols[4],cols[5]): cols[34]})
# print ann_map[idx]
def _get_change(cols):
if cols[7] != "NA":
ref = cols[7]
elif cols[27]:
ref = aa_name[cols[27][2:5]]
if cols[11] != "NA":
alt = cols[11]
elif cols[18] != "NA":
alt = cols[18]
else:
if cols[27].endswith("*"):
alt = "STOP"
elif cols[27][-3:] not in aa_name:
alt = "NA"
else:
alt = aa_name[cols[27][-3:]]
return [ref, alt]
aa_name = {'Ala': 'A', 'Arg': 'R', 'Asn': 'N', 'Asp': 'D', 'Cys': 'C',
'Gln': 'Q', 'Glu': 'E', 'Gly': 'G', 'His': 'H', 'Ile': 'I',
'Leu': 'L', 'Lys': 'K', 'Met': 'M', 'Phe': 'F', 'Pro': 'P',
'Ser': 'S', 'Thr': 'T', 'Trp': 'W', 'Tyr': 'Y', 'Val': 'V',
'Ter': 'STOP', '': 'NA'}
with open(variants) as inh:
for line in inh:
if line.find("gene") > -1:
print "\t".join(line.strip().split("\t") + ["polyphen2"])
continue
cols=[c.replace("\"", "") for c in line.strip().split("\t")]
chrom, pos = cols[0], int(cols[1])
idx = "%s:%s" % (chrom.replace("\"", ""), pos)
score = "NA"
if idx in hg19_map:
if hg19_map[idx] in ann_map:
change = _get_change(cols)
score = []
for item in ann_map[hg19_map[idx]]:
score.append(ann_map[hg19_map[idx]][item])
if " ".join(change) == item:
score = score[-1]
break
score = ",".join(score)
print "\t".join(cols + [score])
| 31.971831 | 70 | 0.463877 |
d4a87db7a3a5292d9840172f7e248163d44bcf4f
| 2,356 |
py
|
Python
|
readthedocs/core/mixins.py
|
yarons/readthedocs.org
|
05c99a0adc222a1d48654d305b492ec142c3026b
|
[
"MIT"
] | 4,054 |
2015-01-01T00:58:07.000Z
|
2019-06-28T05:50:49.000Z
|
readthedocs/core/mixins.py
|
yarons/readthedocs.org
|
05c99a0adc222a1d48654d305b492ec142c3026b
|
[
"MIT"
] | 4,282 |
2015-01-01T21:38:49.000Z
|
2019-06-28T15:41:00.000Z
|
readthedocs/core/mixins.py
|
yarons/readthedocs.org
|
05c99a0adc222a1d48654d305b492ec142c3026b
|
[
"MIT"
] | 3,224 |
2015-01-01T07:38:45.000Z
|
2019-06-28T09:19:10.000Z
|
"""Common mixin classes for views."""
from functools import lru_cache
from django.conf import settings
from django.contrib.auth.mixins import LoginRequiredMixin
from vanilla import ListView
from readthedocs.projects.models import Feature
from readthedocs.subscriptions.models import PlanFeature
class ListViewWithForm(ListView):
"""List view that also exposes a create form."""
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context['form'] = self.get_form(data=None, files=None)
return context
class PrivateViewMixin(LoginRequiredMixin):
pass
class ProxiedAPIMixin:
# DRF has BasicAuthentication and SessionAuthentication as default classes.
# We don't support neither in the community site.
authentication_classes = []
class CDNCacheControlMixin:
"""
Allow to cache views at the CDN level when privacy levels are enabled.
The cache control header is only used when privacy levels
are enabled (otherwise everything is public by default).
Views that can be cached should always return the same response for all
users (anonymous and authenticated users), like when the version attached
to the request is public.
To cache a view you can either set the `cache_request` attribute to `True`,
or override the `can_be_cached` method.
We use ``CDN-Cache-Control``, to control caching at the CDN level only.
This doesn't affect caching at the browser level (``Cache-Control``).
See https://developers.cloudflare.com/cache/about/cdn-cache-control.
"""
cache_request = False
def dispatch(self, request, *args, **kwargs):
response = super().dispatch(request, *args, **kwargs)
if settings.ALLOW_PRIVATE_REPOS and self.can_be_cached(request):
response.headers['CDN-Cache-Control'] = 'public'
return response
def can_be_cached(self, request):
return self.cache_request
@lru_cache(maxsize=1)
def _is_cache_enabled(self, project):
"""Helper function to check if CDN is enabled for a project."""
plan_has_cdn = PlanFeature.objects.get_feature(
obj=project, type=PlanFeature.TYPE_CDN
)
return settings.ALLOW_PRIVATE_REPOS and (
plan_has_cdn or project.has_feature(Feature.CDN_ENABLED)
)
| 31.413333 | 79 | 0.714346 |
1a498ea267bd0926a18f1ed85e138b27e35dbbbe
| 3,029 |
py
|
Python
|
script/analysis/movie_xz.py
|
soumide1102/nubhlight
|
85046add8b7e2c1419538864eb54205d33078772
|
[
"BSD-3-Clause"
] | 16 |
2020-02-05T22:59:21.000Z
|
2022-03-18T11:05:37.000Z
|
script/analysis/movie_xz.py
|
soumide1102/nubhlight
|
85046add8b7e2c1419538864eb54205d33078772
|
[
"BSD-3-Clause"
] | 13 |
2020-03-06T02:10:48.000Z
|
2021-06-15T20:00:30.000Z
|
script/analysis/movie_xz.py
|
soumide1102/nubhlight
|
85046add8b7e2c1419538864eb54205d33078772
|
[
"BSD-3-Clause"
] | 4 |
2020-02-21T04:59:44.000Z
|
2020-12-10T21:42:12.000Z
|
################################################################################
# #
# GENERATE MOVIES FROM SIMULATION OUTPUT #
# #
################################################################################
import sys; sys.dont_write_bytecode = True
sys.path.insert(0, '../')
import numpy as np
import hdf5_to_dict as io
import matplotlib
matplotlib.use('agg')
import matplotlib.pyplot as plt
import util
import glob
import os
import plot as bplt
FIGX = 13
FIGY = 10
SIZE = 40
if len(sys.argv) != 2:
util.warn('PATH TO DUMP FOLDER NEEDED AS ARGUMENT')
sys.exit()
path = sys.argv[1]
#files = np.sort(glob.glob(os.path.join(path, "dump*.h5")))
print 'Getting list of full dumps...'
#files = io.get_dumps_full(os.path.join(path, 'dumps/'))
files = io.get_dumps_reduced(os.path.join(path, 'dumps/'))
#FRAMEDIR = 'FRAMES'
FRAMEDIR = os.path.join(path, 'frames_xz/')
print FRAMEDIR
util.make_dir(FRAMEDIR)
hdr = io.load_hdr(files[0])
geom = io.load_geom(hdr)
print len(files)
def plot(args):
n = args
imname = 'frame_%08d.png' % n
imname = os.path.join(FRAMEDIR, imname)
print '%08d / ' % (n+1) + '%08d' % len(files)
print imname
# Ignore if frame already exists
if os.path.isfile(imname):
return
dump = io.load_dump(files[n], geom)
fig = plt.figure(figsize=(FIGX, FIGY))
fig.suptitle('t = %05.2g' % dump['t'])
ax = plt.subplot(2,2,1)
bplt.plot_xz(ax, geom, np.log10(dump['RHO']), dump,
vmin=-4, vmax = 0, label='RHO')
bplt.overlay_field(ax, geom, dump, NLEV=10)
ax.set_xlim([-SIZE, SIZE]); ax.set_ylim([-SIZE, SIZE])
ax = plt.subplot(2,2,2)
bplt.plot_xz(ax, geom, np.log10(dump['beta']), dump,
vmin=-2, vmax=2, label='beta', cmap='RdBu_r')
bplt.overlay_field(ax, geom, dump, NLEV=10)
ax.set_xlim([-SIZE, SIZE]); ax.set_ylim([-SIZE, SIZE])
ax = plt.subplot(2,2,3)
bplt.plot_xy(ax, geom, np.log10(dump['RHO']), dump,
vmin=-4, vmax=0, label='RHO')
ax.set_xlim([-SIZE, SIZE]); ax.set_ylim([-SIZE, SIZE])
ax = plt.subplot(2,2,4)
bplt.plot_xy(ax, geom, np.log10(dump['beta']), dump,
vmin=-2, vmax=2, label='beta', cmap='RdBu_r')
ax.set_xlim([-SIZE, SIZE]); ax.set_ylim([-SIZE, SIZE])
#ax.pcolormesh(dump['X1'][:,:,0], dump['X2'][:,:,0], dump['RHO'][:,:,0])
plt.savefig(imname, bbox_inches='tight', dpi=100)
plt.close(fig)
import multiprocessing
import signal
import psutil
nthreads = psutil.cpu_count(logical=False)
nthreads = 4
print 'Number of CPUs: %i' % psutil.cpu_count(logical=False)
original_sigint_handler = signal.signal(signal.SIGINT, signal.SIG_IGN)
pool = multiprocessing.Pool(nthreads)
signal.signal(signal.SIGINT, original_sigint_handler)
try:
res = pool.map_async(plot, range(len(files)))
res.get(720000)
except KeyboardInterrupt:
print 'Caught interrupt!'
pool.terminate()
else:
pool.close()
pool.join()
| 28.308411 | 81 | 0.593595 |
8da94eaa42175351b60a2d7752aec8d51ed6958f
| 17,098 |
py
|
Python
|
main.py
|
mustartt/pixiv-bot
|
48a0ea29841ab6d1eddaebd2b60fc9d2a246ee85
|
[
"MIT"
] | null | null | null |
main.py
|
mustartt/pixiv-bot
|
48a0ea29841ab6d1eddaebd2b60fc9d2a246ee85
|
[
"MIT"
] | null | null | null |
main.py
|
mustartt/pixiv-bot
|
48a0ea29841ab6d1eddaebd2b60fc9d2a246ee85
|
[
"MIT"
] | null | null | null |
import discord
from discord.ext import commands
import credentials
from pixiv_module import PixivModule
from pixivapi.enums import SearchTarget, Size, ContentType, Sort
from pixivapi.models import Illustration
from typing import List, Tuple, Dict
from functools import reduce
import io
import asyncio
import random
import re
from Levenshtein import ratio
from PIL import Image
cred = credentials.Credentials('settings.cfg')
TOKEN = cred.get_item('DEFAULT', 'discord_token')
cmd_pref = cred.get_item('DEFAULT', 'command_prefix')
pixiv_username = cred.get_item('DEFAULT', 'pixiv_username')
pixiv_password = cred.get_item('DEFAULT', 'pixiv_password')
pixiv_refresh = cred.get_refresh_token()
LEFT_ARROW = '\u2B05'
RIGHT_ARROW = '\u27A1'
HEART = '\u2764'
DOWNLOAD = '\u2B07'
async def add_reactions(msg):
await msg.add_reaction(LEFT_ARROW)
await msg.add_reaction(RIGHT_ARROW)
await msg.add_reaction(HEART)
await msg.add_reaction(DOWNLOAD)
# create PixivModule
pixiv = PixivModule(pixiv_username, pixiv_password,
cred.write_refresh_token,
refresh_token=pixiv_refresh).get_client()
client = commands.Bot(command_prefix=cmd_pref)
client.remove_command('help')
@client.event
async def on_ready():
print(f"{client.user.name} has connected to discord.")
activity = discord.Activity(type=discord.ActivityType.watching, name=f'prefix {cmd_pref}')
await client.change_presence(activity=activity)
# create check authentication loop task
client.loop.create_task(check_auth())
@client.command(name='test')
async def test(ctx, *, query):
await ctx.send('test')
INTERVAL = 15 * 60
async def check_auth():
while True:
await asyncio.sleep(INTERVAL)
try:
pixiv.search_popular('rem')
except Exception as err:
print('Exception Raised in check_auth()')
print(err)
# attempt to reauthenticate
pixiv.authenticate(pixiv_refresh)
@client.command(name='help')
async def help(ctx):
embed=discord.Embed(title="pixiv-bot Help Page", color=0xff6b6b)
embed.add_field(name="Commands",
value="""`?search tag1, tag2, ...` Searches pixiv.net
for the top 30 most popular illustrations associated
with the tags. Enter tags seperated by commas.""",
inline=False)
embed.add_field(name="Reaction System",
value=f"""
- React to {LEFT_ARROW} to go back to the previous panel/image.
- React to {RIGHT_ARROW} to go to the next panel/image.
- React to {HEART} to find 3 related images.
- React to {DOWNLOAD} to get the full quality images.
""",
inline=False)
await ctx.send(embed=embed)
# max download size
FILE_SIZE_MAX = 7900000
IMAGE_QUALITY = 85
def process_image(image_buffer: io.BytesIO) -> io.BytesIO:
if image_buffer.getbuffer().nbytes < FILE_SIZE_MAX:
return image_buffer
image_buffer.seek(0)
image = Image.open(image_buffer)
new_buffer = io.BytesIO()
image.save(new_buffer, format='JPEG', quality=IMAGE_QUALITY)
new_buffer.seek(0)
return new_buffer
@client.command(name='download')
async def download(ctx, illust_id: int):
# trigger typing
await ctx.trigger_typing()
try:
illust = pixiv.fetch_illustration(illust_id)
file_streams = pixiv.get_illust_byte_streams(illust, size=Size.ORIGINAL)
# check for oversized files
num_of_large = len([buffer
for buffer in file_streams
if buffer.getbuffer().nbytes > FILE_SIZE_MAX])
if num_of_large:
await ctx.send(f'There are {num_of_large} file(s) that are over 8MBs. Performing compressions.')
# DEBUG:
image_binaries = [process_image(x) for x in file_streams]
# send images as attachments
await ctx.send(files=[discord.File(fp=stream,
filename=f'{illust.id}_{index}.jpg')
for index, stream in enumerate(image_binaries)])
except Exception as err:
await ctx.send('Failed to download.')
print(err)
THRESHOLD = 0.5
@client.command(name='search')
async def search(ctx, *, query: str):
#trigger typing
await ctx.trigger_typing()
def find_best_tag(query: str, query_item: Dict[str, str]) -> Tuple[str, float]:
def calc_max_ratio(query: str, query_item: Dict[str, str]) -> float:
eng_tag = query_item['translated_name']
jap_tag = query_item['name']
eng_ratio = ratio(query.lower(), str(eng_tag).lower())
jap_ratio = ratio(query.lower(), str(jap_tag).lower())
return max(eng_ratio, jap_ratio)
best_item = max(tag_suggestions, key=lambda x: calc_max_ratio(query, x))
return (best_item['name'], calc_max_ratio(query, best_item))
tag_list = query.split(',')
tag_result = []
#DEBUG:
#await ctx.send(f'```{tag_list}```')
for tag in tag_list:
tag_suggestions = pixiv.search_autocomplete(tag.strip())
# create query for only valid tags
if tag_suggestions:
best, confidence = find_best_tag(tag, tag_suggestions)
if confidence < THRESHOLD:
tag_result.append(tag)
else:
tag_result.append(best)
else:
tag_result.append(tag.strip())
# generate api query tags
compiled_query = ' '.join(tag_result)
query_display = ' '.join(map(lambda x: f'`#{x}`', tag_result))
#DEBUG:
#await ctx.send(f'```{compiled_query}```')
# get illustrations
# TODO: Change to pixiv.search_popular()
#res = pixiv.search_popular_preview(compiled_query, search_target=SearchTarget.TAGS_PARTIAL)
res = pixiv.search_popular_preview(compiled_query) # use exact tag matching
illusts = res['illustrations'] # array of Illustrations
curr_page = 0
pages_total = len(illusts)
# create gallery embed
preview = pixiv.get_illust_byte_streams(illusts[curr_page])[0]
embed, file = create_embed_file('Search Results',
f'tags: {query_display}',
illusts[curr_page].id,
preview)
embed.set_footer(text=f'Page {curr_page+1}/{pages_total} id: {illusts[curr_page].id}')
message = await ctx.send(embed=embed, file=file)
# add reactions
await add_reactions(message)
# implements the reaction to controls
def check(reaction, user):
return not user.bot and reaction.message == message
while True:
try:
reaction, user = await client.wait_for('reaction_add', timeout=TIMEOUT,
check=check)
if reaction.emoji == LEFT_ARROW and pages_total > 1:
#trigger typing
await ctx.trigger_typing()
# calc new page
curr_page = curr_page - 1
if curr_page < 0:
curr_page = pages_total - 1
# edit gallery embed
preview = pixiv.get_illust_byte_streams(illusts[curr_page])[0]
embed, file = create_embed_file('Search Results',
f'tags: {query_display}',
illusts[curr_page].id,
preview)
embed.set_footer(text=f'Page {curr_page+1}/{pages_total} id: {illusts[curr_page].id}')
await message.delete()
message = await ctx.send(embed=embed, file=file)
# add reactions
await add_reactions(message)
if reaction.emoji == RIGHT_ARROW and pages_total > 1:
#trigger typing
await ctx.trigger_typing()
# calc new page
curr_page = (curr_page + 1) % pages_total
# edit gallery embed
preview = pixiv.get_illust_byte_streams(illusts[curr_page])[0]
embed, file = create_embed_file('Search Results for',
f'tags: {query_display}',
illusts[curr_page].id,
preview)
embed.set_footer(text=f'Page {curr_page+1}/{pages_total} id: {illusts[curr_page].id}')
await message.delete()
message = await ctx.send(embed=embed, file=file)
# add reactions
await add_reactions(message)
if reaction.emoji == HEART:
# trigger typing
await ctx.trigger_typing()
await ctx.invoke(client.get_command('search_related'),
illust_id=illusts[curr_page].id)
if reaction.emoji == DOWNLOAD:
# invoke download command
await ctx.invoke(client.get_command('download'),
illust_id=illusts[curr_page].id)
except asyncio.TimeoutError:
break
except Exception as err:
print("Something else went wrong")
print(err)
break
@client.command(name='get_tag_popular_result')
async def get_tag_popular_result(ctx, *, query: str):
res = pixiv.search_popular_preview(query)
content = ""
for illust in res['illustrations']:
content += f'{illust.id} -> {illust.title} {illust.total_bookmarks}' + '\n'
await ctx.send(f'```{content}```')
#FIRST_CAPTURE = 10
@client.command(name='search_related')
async def search_related(ctx, illust_id:int, number=3):
#trigger typing
await ctx.trigger_typing()
# query related images
res = pixiv.fetch_illustration_related(illust_id)
related = res['illustrations']
"""
sorted(res['illustrations'][:FIRST_CAPTURE],
key=lambda work: work.total_bookmarks,
reverse=True)
"""
# check if query is empty
if not related:
await ctx.send("No result found.")
return
# send number of images as gallery
await asyncio.wait([
ctx.invoke(client.get_command('create_gallery'),
illust_id=illust.id)
for illust in related[:number]
])
@client.command(name='search_tag')
async def search_tag(ctx, *, tag:str):
# Create Message Embed Object
embed=discord.Embed(title="Search Result Tags",
description="Please select the the appropriate tags",
color=0xff9214)
async with ctx.typing():
tag_result = pixiv.search_autocomplete(tag)
if tag_result:
# Process the tag results
for index, tag_dict in enumerate(tag_result):
eng_tag = tag_dict['translated_name']
jap_tag = tag_dict['name']
embed.add_field(name=f"{index+1}. {jap_tag}",
value=eng_tag,
inline=False)
else:
embed.description = ""
embed.add_field(name="No result found",
value="Please check the tag again.",
inline=False)
# Send Embeded Message
await ctx.send(embed=embed)
"""
@search_tag.error
async def search_tag_error(ctx, error):
if isinstance(error, commands.MissingRequiredArgument):
await ctx.send("Missing Arguements.")
"""
TIMEOUT = 30.0
def create_embed_file(title: str,
description: str,
image_name:str,
file_stream: io.BytesIO) -> Tuple[discord.Embed, discord.File]:
"""
Creates a discord.Embed with title, description, image_name, and the image's
BytesIO Stream. To produce a tuple (discord.Embed, discord.File)
"""
caption = re.sub('<[^<]+?>', '', description)
caption = re.sub('http\S+', '', caption)
embed = discord.Embed(title=title, description=caption, color=0x00cec9)
embed.set_image(url=f"attachment://{image_name}.jpg")
# reset image byte stream back to 0
file_stream.seek(0)
file = discord.File(fp=file_stream, filename=f"{image_name}.jpg")
return (embed, file)
@client.command(name='create_gallery')
async def create_gallery(ctx, illust_id:int):
"""
TODO: Attempts to open file in image_cache
- if does not exists, download the images
- if images has multiple panels download as id_p{panel_number}.{ext}
After react period expires after 30 seconds, images are optionally purged
- preview images uses Size.LARGE (for now)
"""
#trigger typing
await ctx.trigger_typing()
illust = pixiv.fetch_illustration(illust_id)
image_binaries = pixiv.get_illust_byte_streams(illust)
pages_total = len(image_binaries)
curr_page = 0 # index starts at 0 -> display + 1
# multi page illustration
embed, file = create_embed_file(illust.title,
illust.caption,
f"{illust.id}_p{curr_page}",
image_binaries[curr_page])
embed.set_footer(text=f'Page Index {curr_page+1}/{pages_total} id: {illust.id}')
message = await ctx.send(file=file, embed=embed)
# add reaction emojis
await add_reactions(message)
# implements the reaction to controls
def check(reaction, user):
return not user.bot and reaction.message == message
while True:
try:
reaction, user = await client.wait_for('reaction_add', timeout=TIMEOUT,
check=check)
if reaction.emoji == LEFT_ARROW and pages_total > 1:
# calc new page
curr_page = curr_page - 1
if curr_page < 0:
curr_page = pages_total - 1
# edit current embed
embed, file = create_embed_file(illust.title,
illust.caption,
f"{illust.id}_p{curr_page}",
image_binaries[curr_page])
embed.set_footer(text=f'Page Index {curr_page+1}/{pages_total} id: {illust.id}')
# resend message
await message.delete()
message = await ctx.send(file=file, embed=embed)
# add reaction emojis
await add_reactions(message)
if reaction.emoji == RIGHT_ARROW and pages_total > 1:
# calc new page
curr_page = (curr_page + 1) % pages_total
# edit current embed
embed, file = create_embed_file(illust.title,
illust.caption,
f"{illust.id}_p{curr_page}",
image_binaries[curr_page])
embed.set_footer(text=f'Page Index {curr_page+1}/{pages_total} id: {illust.id}')
# resend message
await message.delete()
message = await ctx.send(file=file, embed=embed)
# add reaction emojis
await add_reactions(message)
if reaction.emoji == HEART:
await ctx.invoke(client.get_command('search_related'),
illust_id=illust.id)
if reaction.emoji == DOWNLOAD:
# invoke download command
await ctx.invoke(client.get_command('download'),
illust_id=illust.id)
except asyncio.TimeoutError:
break
except Exception as err:
print("Something else went wrong")
print(err)
break
# Starting Discord Bot
client.run(TOKEN)
| 30.208481 | 109 | 0.549596 |
ded0ca98c35800f3d6b43f09b40257d126e474bc
| 1,623 |
py
|
Python
|
neural_mmo/forge/blade/io/stimulus/hook.py
|
fangqyi/sandbox-society
|
39a539994f47f4937ec3ff698f8ed08a5161e14a
|
[
"MIT"
] | 4 |
2020-11-08T22:33:15.000Z
|
2020-11-21T15:45:43.000Z
|
neural_mmo/forge/blade/io/stimulus/hook.py
|
davidADSP/neural-mmo
|
c9510890576fad1037474340194d0b5342a9b45f
|
[
"MIT"
] | 1 |
2021-09-30T07:57:46.000Z
|
2021-10-02T00:39:11.000Z
|
neural_mmo/forge/blade/io/stimulus/hook.py
|
davidADSP/neural-mmo
|
c9510890576fad1037474340194d0b5342a9b45f
|
[
"MIT"
] | 2 |
2021-09-16T16:43:03.000Z
|
2021-09-28T18:12:41.000Z
|
from pdb import set_trace as T
class StimHook:
def __init__(self, meta, config):
self.meta = meta
self.config = config
self.inputs(meta, config)
def inputs(self, cls, config):
for _, c in cls:
self.__dict__[c.name] = c(config)
def outputs(self, config):
data = {}
for name, cls in self.meta:
attr = self.__dict__[cls.name]
data[cls.name] = attr.packet()
return data
def packet(self):
return self.outputs(self.config)
| 67.625 | 82 | 0.187924 |
50ab5b93937d89850af48a9c68fd87bb010f82fd
| 3,217 |
py
|
Python
|
python/tvm/meta_schedule/testing/run_subgraph_meta_schedule.py
|
hypercubestart/relax
|
0972498b14b2d75dbb179ea332378137ecebcfdc
|
[
"Apache-2.0"
] | null | null | null |
python/tvm/meta_schedule/testing/run_subgraph_meta_schedule.py
|
hypercubestart/relax
|
0972498b14b2d75dbb179ea332378137ecebcfdc
|
[
"Apache-2.0"
] | null | null | null |
python/tvm/meta_schedule/testing/run_subgraph_meta_schedule.py
|
hypercubestart/relax
|
0972498b14b2d75dbb179ea332378137ecebcfdc
|
[
"Apache-2.0"
] | null | null | null |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=missing-docstring
import argparse
import logging
from os import cpu_count
from typing import Optional
import tvm
from tvm import meta_schedule as ms
from tvm import tir
from tvm.meta_schedule.testing.te_workload import create_te_workload
def _parse_args():
args = argparse.ArgumentParser()
args.add_argument(
"--workload",
type=str,
required=True,
)
args.add_argument(
"--target",
type=str,
required=True,
)
args.add_argument(
"--num-trials",
type=int,
required=True,
)
args.add_argument(
"--work-dir",
type=str,
required=True,
)
args.add_argument(
"--rpc-host",
type=str,
required=True,
)
args.add_argument(
"--rpc-port",
type=int,
required=True,
)
args.add_argument(
"--rpc-key",
type=str,
required=True,
)
args.add_argument(
"--rpc-workers",
type=int,
required=True,
)
parsed = args.parse_args()
parsed.target = tvm.target.Target(parsed.target)
parsed.rpc_config = ms.runner.RPCConfig(
tracker_host=parsed.rpc_host,
tracker_port=parsed.rpc_port,
tracker_key=parsed.rpc_key,
session_timeout_sec=60,
)
return parsed
logging.basicConfig()
logging.getLogger("tvm.meta_schedule").setLevel(logging.DEBUG)
ARGS = _parse_args()
def main():
alloc_repeat = 1
runner = ms.runner.RPCRunner(
rpc_config=ARGS.rpc_config,
evaluator_config=ms.runner.EvaluatorConfig(
number=3,
repeat=1,
min_repeat_ms=100,
enable_cpu_cache_flush=False,
),
alloc_repeat=alloc_repeat,
max_workers=ARGS.rpc_workers,
)
sch: Optional[tir.Schedule] = ms.tune_tir(
mod=create_te_workload(ARGS.workload, 0),
target=ARGS.target,
config=ms.EvolutionarySearchConfig(
num_trials_per_iter=64,
num_trials_total=ARGS.num_trials,
init_min_unmeasured=50,
),
runner=runner, # type: ignore
task_name=ARGS.workload,
work_dir=ARGS.work_dir,
num_threads=cpu_count(),
)
if sch is None:
print("No valid schedule found!")
else:
print(sch.mod.script())
print(sch.trace)
if __name__ == "__main__":
main()
| 26.368852 | 68 | 0.641902 |
6f8f06b4678227be03bf2e08b7699619129f4195
| 7,530 |
py
|
Python
|
src/azure-cli/azure/cli/command_modules/appconfig/_utils.py
|
Zay2k/azure-cli
|
1ef59bace4dfad9cba137f49c8974aca4b0fff90
|
[
"MIT"
] | 1 |
2020-08-10T23:50:16.000Z
|
2020-08-10T23:50:16.000Z
|
src/azure-cli/azure/cli/command_modules/appconfig/_utils.py
|
Zay2k/azure-cli
|
1ef59bace4dfad9cba137f49c8974aca4b0fff90
|
[
"MIT"
] | 2 |
2020-09-12T04:31:23.000Z
|
2020-09-14T06:31:04.000Z
|
src/azure-cli/azure/cli/command_modules/appconfig/_utils.py
|
mjain2/azure-cli
|
1ef59bace4dfad9cba137f49c8974aca4b0fff90
|
[
"MIT"
] | null | null | null |
from __future__ import print_function
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
# pylint: disable=line-too-long
from knack.log import get_logger
from knack.prompting import NoTTYException, prompt_y_n
from knack.util import CLIError
from azure.appconfiguration import AzureAppConfigurationClient
from azure.mgmt.appconfiguration.models import ErrorException
from ._client_factory import cf_configstore
from ._constants import HttpHeaders
logger = get_logger(__name__)
def construct_connection_string(cmd, config_store_name):
connection_string_template = 'Endpoint={};Id={};Secret={}'
# If the logged in user/Service Principal does not have 'Reader' or 'Contributor' role
# assigned for the requested AppConfig, resolve_store_metadata will raise CLI error
resource_group_name, endpoint = resolve_store_metadata(cmd, config_store_name)
try:
config_store_client = cf_configstore(cmd.cli_ctx)
access_keys = config_store_client.list_keys(resource_group_name, config_store_name)
for entry in access_keys:
if not entry.read_only:
return connection_string_template.format(endpoint, entry.id, entry.value)
except ErrorException as ex:
raise CLIError('Failed to get access keys for the App Configuration "{}". Make sure that the account that logged in has sufficient permissions to access the App Configuration store.\n{}'.format(config_store_name, str(ex)))
raise CLIError('Cannot find a read write access key for the App Configuration {}'.format(config_store_name))
def resolve_store_metadata(cmd, config_store_name):
try:
config_store_client = cf_configstore(cmd.cli_ctx)
all_stores = config_store_client.list()
for store in all_stores:
if store.name.lower() == config_store_name.lower():
# Id has a fixed structure /subscriptions/subscriptionName/resourceGroups/groupName/providers/providerName/configurationStores/storeName"
return store.id.split('/')[4], store.endpoint
except ErrorException as ex:
raise CLIError("Failed to get the list of App Configuration stores for the current user. Make sure that the account that logged in has sufficient permissions to access the App Configuration store.\n{}".format(str(ex)))
raise CLIError("Failed to find the App Configuration store '{}'.".format(config_store_name))
def user_confirmation(message, yes=False):
if yes:
return
try:
if not prompt_y_n(message):
raise CLIError('Operation cancelled.')
except NoTTYException:
raise CLIError(
'Unable to prompt for confirmation as no tty available. Use --yes.')
def resolve_connection_string(cmd, config_store_name=None, connection_string=None):
string = ''
error_message = '''You may have specified both store name and connection string, which is a conflict.
Please specify exactly ONE (suggest connection string) in one of the following options:\n
1 pass in App Configuration store name as a parameter\n
2 pass in connection string as a parameter\n
3 preset App Configuration store name using 'az configure --defaults app_configuration_store=xxxx'\n
4 preset connection string using 'az configure --defaults appconfig_connection_string=xxxx'\n
5 preset connection in environment variable like set AZURE_APPCONFIG_CONNECTION_STRING=xxxx'''
if config_store_name:
string = construct_connection_string(cmd, config_store_name)
if connection_string:
if string and ';'.join(sorted(connection_string.split(';'))) != string:
raise CLIError(error_message)
string = connection_string
connection_string_env = cmd.cli_ctx.config.get(
'appconfig', 'connection_string', None)
if connection_string_env:
if not is_valid_connection_string(connection_string_env):
raise CLIError(
"The environment variable connection string is invalid. Correct format should be Endpoint=https://example.appconfig.io;Id=xxxxx;Secret=xxxx")
if string and ';'.join(sorted(connection_string_env.split(';'))) != string:
raise CLIError(error_message)
string = connection_string_env
if not string:
raise CLIError(
'Please specify config store name or connection string(suggested).')
return string
def is_valid_connection_string(connection_string):
if connection_string is not None:
segments = connection_string.split(';')
if len(segments) != 3:
return False
segments.sort()
if segments[0].startswith('Endpoint=') and segments[1].startswith('Id=') and segments[2].startswith('Secret='):
return True
return False
def get_store_name_from_connection_string(connection_string):
if is_valid_connection_string(connection_string):
segments = dict(seg.split("=", 1) for seg in connection_string.split(";"))
endpoint = segments.get("Endpoint")
if endpoint:
return endpoint.split("//")[1].split('.')[0]
return None
def prep_null_label_for_url_encoding(label=None):
if label is not None:
import ast
# ast library requires quotes around string
label = '"{0}"'.format(label)
label = ast.literal_eval(label)
return label
def get_appconfig_data_client(cmd, name, connection_string, auth_mode, endpoint):
azconfig_client = None
if auth_mode == "key":
connection_string = resolve_connection_string(cmd, name, connection_string)
try:
azconfig_client = AzureAppConfigurationClient.from_connection_string(connection_string=connection_string,
user_agent=HttpHeaders.USER_AGENT)
except ValueError as ex:
raise CLIError("Failed to initialize AzureAppConfigurationClient due to an exception: {}".format(str(ex)))
if auth_mode == "login":
if not endpoint:
try:
if name:
_, endpoint = resolve_store_metadata(cmd, name)
else:
raise CLIError("App Configuration endpoint or name should be provided if auth mode is 'login'.")
except Exception as ex:
raise CLIError(str(ex) + "\nYou may be able to resolve this issue by providing App Configuration endpoint instead of name.")
from azure.cli.core._profile import Profile
profile = Profile(cli_ctx=cmd.cli_ctx)
# Due to this bug in get_login_credentials: https://github.com/Azure/azure-cli/issues/15179,
# we need to manage the AAD scope by passing appconfig endpoint as resource
cred, _, _ = profile.get_login_credentials(resource=endpoint)
try:
azconfig_client = AzureAppConfigurationClient(credential=cred,
base_url=endpoint,
user_agent=HttpHeaders.USER_AGENT)
except (ValueError, TypeError) as ex:
raise CLIError("Failed to initialize AzureAppConfigurationClient due to an exception: {}".format(str(ex)))
return azconfig_client
| 46.196319 | 230 | 0.678088 |
06d1577fc668e54031b4ed7b5e2ca808c40f9dd3
| 3,910 |
py
|
Python
|
scripts/release/release.py
|
Cosmo-Tech/onboarding-brewery-webapp
|
7ca365bbe25fbdf000434c207fa118754cc4f8cd
|
[
"MIT"
] | null | null | null |
scripts/release/release.py
|
Cosmo-Tech/onboarding-brewery-webapp
|
7ca365bbe25fbdf000434c207fa118754cc4f8cd
|
[
"MIT"
] | 102 |
2021-09-14T09:53:56.000Z
|
2022-03-29T10:13:59.000Z
|
scripts/release/release.py
|
Cosmo-Tech/onboarding-brewery-webapp
|
7ca365bbe25fbdf000434c207fa118754cc4f8cd
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# -*- coding:utf-8 -*-
# Copyright (c) Cosmo Tech.
# Licensed under the MIT license.
import os
import sys
import argparse
from tools_common.git import (
check_head,
check_branch_is_main,
check_tag_exists,
commit_all_changes,
create_tag,
create_branch,
get_top_level_folder,
pull,
switch,
)
from tools_common.os import rm_dir, mv_file
from tools_common.jq import check_jq, get_npm_package_version, set_npm_package_version
def parse_arguments():
parser = argparse.ArgumentParser(
description='''Create release tags for Azure Sample Webapp. These tags will not be pushed by the script, this
operation must be done by the user.
'''
)
parser.add_argument("version", help="Version to release (e.g. 2.0.0)")
args = parser.parse_args()
return args.version
def check_all(version_brewery, version_vanilla):
return (
check_jq()
and check_branch_is_main()
and pull()
and check_head()
and not check_tag_exists(version_brewery)
and not check_tag_exists(version_vanilla)
)
def update_package_version(new_version):
root_folder = get_top_level_folder()
current_version = get_npm_package_version(root_folder)
if current_version != new_version:
print(f'Updating package version: {current_version} -> {new_version}')
return set_npm_package_version(root_folder, new_version)
return False
def update_package_version_pre_tag(new_version):
if update_package_version(new_version):
commit_all_changes(f"chore: bump webapp version to {new_version}")
def update_package_version_post_tag(new_version):
try:
version_details = new_version.split(".")
if version_details[2] != '0':
print("Not tagging a new minor version. Dev version will NOT be set in package.json.")
return
version_details[1] = str(int(version_details[1]) + 1)
new_dev_version = '.'.join(version_details) + '-dev'
if update_package_version(new_dev_version):
commit_all_changes(f"chore: bump webapp version to {new_dev_version}")
except Exception:
print(f"Could not determine next version number from version '{new_version}'."
"Dev version will NOT be set in package.json.")
def remove_specific_files():
root_folder = get_top_level_folder()
clean_cypress(root_folder)
clean_config(root_folder)
def clean_cypress(root_folder):
files_to_remove = [
'commons/actions/brewery',
'commons/constants/brewery',
'integration/brewery',
]
for file_to_remove in files_to_remove:
path = os.path.join(root_folder, 'cypress', file_to_remove)
rm_dir(path)
def clean_config(root_folder):
config_file_path = os.path.join(root_folder, 'src/config/ScenarioParameters.js')
vanilla_config_file_path = os.path.join(root_folder, 'src/config/ScenarioParameters.vanilla.js')
mv_file(vanilla_config_file_path, config_file_path)
def main():
version = parse_arguments()
version_brewery = f"v{version}-brewery"
version_vanilla = f"v{version}-vanilla"
branch_vanilla = f"release/{version}-vanilla"
if not check_all(version_brewery, version_vanilla):
sys.exit(1)
update_package_version_pre_tag(version)
create_tag(version_brewery)
create_branch(branch_vanilla)
remove_specific_files()
commit_all_changes(f'chore: prepare release {version_vanilla}')
create_tag(version_vanilla)
switch('-') # Switch back to the previous branch or commit
update_package_version_post_tag(version)
print('''
The release script ran successfully. Please check created tags are correct, and push them with:
git push --tags
You can get the release changelog with:
git-conventional-commits changelog
''')
return
if __name__ == "__main__":
main()
| 30.310078 | 117 | 0.71023 |
ce9a35962e82e88648a19d0e8772ba0217ef229d
| 12,218 |
py
|
Python
|
support/verify-reviews.py
|
andschwa/mesos
|
77f052996c9ae846d5793ef8c7c89fbb70c8372a
|
[
"Apache-2.0"
] | null | null | null |
support/verify-reviews.py
|
andschwa/mesos
|
77f052996c9ae846d5793ef8c7c89fbb70c8372a
|
[
"Apache-2.0"
] | null | null | null |
support/verify-reviews.py
|
andschwa/mesos
|
77f052996c9ae846d5793ef8c7c89fbb70c8372a
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python3
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This script is used to build and test (verify) reviews that are posted
to ReviewBoard. The script is intended for use by automated "ReviewBots"
that are run on ASF infrastructure (or by anyone that wishes to donate
some compute power). For example, see 'support/jenkins/reviewbot.sh'.
The script performs the following sequence:
* A query grabs review IDs from Reviewboard.
* In reverse order (most recent first), the script determines if the
review needs verification (if the review has been updated or changed
since the last run through this script).
* For each review that needs verification:
* The review is applied (via 'support/apply-reviews.py').
* Mesos is built and unit tests are run.
* The result is posted to ReviewBoard.
"""
import argparse
import time
import datetime
import json
import os
import platform
import subprocess
import sys
import urllib.error
import urllib.parse
import urllib.request
from common import ReviewBoardHandler, ReviewError, REVIEWBOARD_URL
REVIEW_SIZE = 1000000 # 1 MB in bytes.
# This is the mesos repo ID obtained from querying the reviews.apache.org API
MESOS_REPOSITORY_ID = 122
def parse_parameters():
"""Method to parse the arguments for argparse."""
parser = argparse.ArgumentParser(
description="Reviews that need verification from the Review Board")
parser.add_argument("-u", "--user", type=str, required=True,
help="Review Board user name")
parser.add_argument("-p", "--password", type=str, required=True,
help="Review Board user password")
parser.add_argument("-r", "--reviews", type=int, required=False,
default=-1, help="The number of reviews to fetch,"
" that will need verification")
parser.add_argument("--skip-verify", action='store_true', required=False,
help="Skip the verification and just write the review"
" ids that need verification")
default_hours_behind = 8
datetime_before = (datetime.datetime.now() -
datetime.timedelta(hours=default_hours_behind))
datetime_before_string = datetime_before.isoformat()
default_query = {"status": "pending", "repository": MESOS_REPOSITORY_ID,
"last-updated-from": datetime_before_string.split(".")[0]}
parser.add_argument("-q", "--query", type=str, required=False,
help="Query parameters, passed as string in JSON"
" format. Example: '%s'" % json.dumps(
default_query),
default=json.dumps(default_query))
subparsers = parser.add_subparsers(title="The script plug-in type")
file_parser = subparsers.add_parser(
"file", description="File plug-in just writes to a file all"
" the review ids that need verification")
file_parser.add_argument("-o", "--out-file", type=str, required=True,
help="The out file with the reviews IDs that"
" need verification")
return parser.parse_args()
def shell(command):
"""Run a shell command."""
print(command)
return subprocess.check_output(
command, stderr=subprocess.STDOUT, shell=True)
def apply_review(review_id):
"""Apply a review using the script apply-reviews.py."""
print("Applying review %s" % review_id)
shell("python support/apply-reviews.py -n -r %s" % review_id)
def apply_reviews(review_request, reviews, handler):
"""Apply multiple reviews at once."""
# If there are no reviewers specified throw an error.
if not review_request["target_people"]:
raise ReviewError("No reviewers specified. Please find a reviewer by"
" asking on JIRA or the mailing list.")
# If there is a circular dependency throw an error.`
if review_request["id"] in reviews:
raise ReviewError("Circular dependency detected for review %s."
"Please fix the 'depends_on' field."
% review_request["id"])
else:
reviews.append(review_request["id"])
# First recursively apply the dependent reviews.
for review in review_request["depends_on"]:
review_url = review["href"]
print("Dependent review: %s" % review_url)
apply_reviews(handler.api(review_url)["review_request"],
reviews, handler)
# Now apply this review if not yet submitted.
if review_request["status"] != "submitted":
apply_review(review_request["id"])
def post_review(review_request, message, handler):
"""Post a review on the review board."""
print("Posting review: %s" % message)
review_url = review_request["links"]["reviews"]["href"]
data = urllib.parse.urlencode({'body_top': message, 'public': 'true'})
handler.api(review_url, data)
# @atexit.register
def cleanup():
"""Clean the git repository."""
try:
shell("git clean -fd")
HEAD = shell("git rev-parse HEAD")
print(HEAD)
shell("git checkout HEAD -- %s" % HEAD)
except subprocess.CalledProcessError as err:
print("Failed command: %s\n\nError: %s" % (err.cmd, err.output))
def verify_review(review_request, handler):
"""Verify a review."""
print("Verifying review %s" % review_request["id"])
build_output = "build_" + str(review_request["id"])
try:
# Recursively apply the review and its dependents.
reviews = []
apply_reviews(review_request, reviews, handler)
reviews.reverse() # Reviews are applied in the reverse order.
if platform.system() == 'Windows':
command = "support\\windows-build.bat"
# There is no equivalent to `tee` on Windows.
subprocess.check_call(
['cmd', '/c', '%s 2>&1 > %s' % (command, build_output)])
else:
# Launch docker build script.
# TODO(jojy): Launch 'docker_build.sh' in subprocess so that
# verifications can be run in parallel for various configurations.
configuration = ("export "
"OS='ubuntu:14.04' "
"BUILDTOOL='autotools' "
"COMPILER='gcc' "
"CONFIGURATION='--verbose "
"--disable-libtool-wrappers' "
"ENVIRONMENT='GLOG_v=1 MESOS_VERBOSE=1'")
command = "%s; ./support/docker-build.sh" % configuration
# `tee` the output so that the console can log the whole build
# output. `pipefail` ensures that the exit status of the build
# command ispreserved even after tee'ing.
subprocess.check_call(['bash', '-c',
'set -o pipefail; %s 2>&1 | tee %s'
% (command, build_output)])
# Success!
post_review(
review_request,
"Patch looks great!\n\n"
"Reviews applied: %s\n\n"
"Passed command: %s" % (reviews, command), handler)
except subprocess.CalledProcessError as err:
# If we are here because the docker build command failed, read the
# output from `build_output` file. For all other command failures read
# the output from `e.output`.
if os.path.exists(build_output):
output = open(build_output).read()
else:
output = err.output
if platform.system() == 'Windows':
# We didn't output anything during the build (because `tee`
# doesn't exist), so we print the output to stdout upon error.
# Pylint raises a no-member error on that line due to a bug
# fixed in pylint 1.7.
# TODO(ArmandGrillet): Remove this once pylint updated to >= 1.7.
# pylint: disable=no-member
sys.stdout.buffer.write(output)
# Truncate the output when posting the review as it can be very large.
if len(output) > REVIEW_SIZE:
output = "...<truncated>...\n" + output[-REVIEW_SIZE:]
output += "\nFull log: "
output += urllib.parse.urljoin(os.environ['BUILD_URL'], 'console')
post_review(
review_request,
"Bad patch!\n\n" \
"Reviews applied: %s\n\n" \
"Failed command: %s\n\n" \
"Error:\n%s" % (reviews, err.cmd, output), handler)
except ReviewError as err:
post_review(
review_request,
"Bad review!\n\n" \
"Reviews applied: %s\n\n" \
"Error:\n%s" % (reviews, err.args[0]), handler)
# Clean up.
# cleanup()
def verification_needed_write(review_ids, parameters):
"""Write the IDs of the review requests that need verification."""
num_reviews = len(review_ids)
print("%s review requests need verification" % num_reviews)
# out_file parameter is mandatory to be passed
try:
# Using file plug-in
with open(parameters.out_file, 'w') as f:
f.write('\n'.join(review_ids))
except Exception:
print("Failed opening file '%s' for writing" % parameters.out_file)
raise
def main():
"""Main function to verify the submitted reviews."""
parameters = parse_parameters()
print("\n%s - Running %s" % (time.strftime('%m-%d-%y_%T'),
os.path.abspath(__file__)))
# The colon from timestamp gets encoded and we don't want it to be encoded.
# Replacing %3A with colon.
query_string = urllib.parse.urlencode(
json.loads(parameters.query)).replace("%3A", ":")
review_requests_url = "%s/api/review-requests/?%s" % (REVIEWBOARD_URL,
query_string)
handler = ReviewBoardHandler(parameters.user, parameters.password)
num_reviews = 0
review_ids = []
review_requests = handler.api(review_requests_url)
for review_request in reversed(review_requests["review_requests"]):
if parameters.reviews == -1 or num_reviews < parameters.reviews:
try:
needs_verification = handler.needs_verification(review_request)
if not needs_verification:
continue
# An exception is raised if cyclic dependencies are found
handler.get_dependent_review_ids(review_request)
except ReviewError as err:
message = ("Bad review!\n\n"
"Error:\n%s" % (err.args[0]))
handler.post_review(review_request, message, handler)
continue
except Exception as err:
print("Error occured: %s" % err)
needs_verification = False
print("WARNING: Cannot find if review %s needs"
" verification" % (review_request["id"]))
if not needs_verification:
continue
review_ids.append(str(review_request["id"]))
num_reviews += 1
if not parameters.skip_verify:
verify_review(review_request, handler)
verification_needed_write(review_ids, parameters)
if __name__ == '__main__':
main()
| 40.456954 | 79 | 0.612784 |
fab0aa529d477d112e0bdba96c7544646318360d
| 8,533 |
py
|
Python
|
tests/integration/commands/apps.py
|
wilzbach/cli
|
bac7edb42618f3aeecd81ec80d5bec144fa893c2
|
[
"Apache-2.0"
] | null | null | null |
tests/integration/commands/apps.py
|
wilzbach/cli
|
bac7edb42618f3aeecd81ec80d5bec144fa893c2
|
[
"Apache-2.0"
] | null | null | null |
tests/integration/commands/apps.py
|
wilzbach/cli
|
bac7edb42618f3aeecd81ec80d5bec144fa893c2
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
import os
from unittest import mock
import click
from pytest import mark
from story import api, awesome, cli
from story.helpers import datetime
def test_list(runner, patch, init_sample_app_in_cwd):
patch.object(datetime, 'parse_psql_date_str', side_effect=[
'my_app_1_parsed_date',
'my_app_2_parsed_date'
])
patch.object(datetime, 'reltime', side_effect=[
'my_app_1_reltime',
'my_app_2_reltime',
])
ts_app_1 = '2019-06-26T10:31:22.499142+00:00'
ts_app_2 = '2018-12-17T10:19:56.659736+00:00'
with runner.runner.isolated_filesystem():
init_sample_app_in_cwd()
from story import api
from story.commands.apps import apps
patch.object(api.Apps, 'list',
return_value=[
{
'name': 'my_app_1',
'timestamp': ts_app_1,
'maintenance': False
},
{
'name': 'my_app_2',
'timestamp': ts_app_2,
'maintenance': True
},
]
)
args = ['list']
result = runner.run(apps, args=args)
expected_output = """
NAME STATE CREATED
============================================
my_app_1 running my_app_1_reltime
my_app_2 in maintenance my_app_2_reltime
""" # noqa (because there's a trailing whitespace in the header)
assert result.stdout.strip() == expected_output.strip()
assert datetime.reltime.mock_calls == [
mock.call('my_app_1_parsed_date'),
mock.call('my_app_2_parsed_date')
]
assert datetime.parse_psql_date_str.mock_calls == [
mock.call(ts_app_1),
mock.call(ts_app_2)
]
def test_list_no_apps(runner, patch, init_sample_app_in_cwd):
with runner.runner.isolated_filesystem():
init_sample_app_in_cwd()
from story import api
from story.commands.apps import apps
patch.object(api.Apps, 'list', return_value=[])
args = ['list']
result = runner.run(apps, args=args)
assert 'No application found' in result.stdout
assert 'story apps create' in result.stdout
def test_do_open(runner, init_sample_app_in_cwd, patch):
patch.object(click, 'launch')
with runner.runner.isolated_filesystem():
init_sample_app_in_cwd()
from story.commands.apps import do_open
result = runner.run(do_open)
app_url = 'https://my_app.storyscriptapp.com/'
click.launch.assert_called_with(app_url)
assert app_url in result.stdout
@mark.parametrize('isatty', [True, False])
def test_url(patch, runner, init_sample_app_in_cwd, isatty):
patch.object(click, 'launch')
with runner.runner.isolated_filesystem():
init_sample_app_in_cwd()
from story.commands import apps
patch.object(apps, '_isatty', return_value=isatty)
result = runner.run(apps.url)
app_url = 'https://my_app.storyscriptapp.com/'
if isatty:
app_url += '\n'
assert app_url == result.stdout
@mark.parametrize('all_apps', [True, False])
@mark.parametrize('yes_to_all', [True, False])
@mark.parametrize('app_name', [None, 'my_secret_app'])
def test_destroy(patch, runner, init_sample_app_in_cwd, all_apps,
yes_to_all, app_name):
if all_apps and app_name: # Invalid combination.
return
patch.object(api.Apps, 'destroy')
if all_apps:
patch.object(api.Apps, 'list', return_value=[
{'name': 'my_app_1'},
{'name': 'my_app_2'},
{'name': 'my_app_3'},
])
patch.object(cli, 'track')
args = []
stdin = 'y\n'
if all_apps:
stdin = 'y\ny\ny\n'
args.append('--all')
if yes_to_all:
args.append('--yes')
stdin = None
if app_name:
args.append('-a')
args.append(app_name)
with runner.runner.isolated_filesystem():
init_sample_app_in_cwd()
from story.commands import apps
result = runner.run(apps.destroy, stdin=stdin, args=args)
if not app_name:
app_name = 'my_app' # The default app name, from the current directory
if all_apps:
for i in range(1, 4):
assert f'Destroying application \'my_app_{i}\'' in result.stdout
assert api.Apps.destroy.mock_calls == [
mock.call(app='my_app_1'),
mock.call(app='my_app_2'),
mock.call(app='my_app_3'),
]
assert cli.track.mock_calls == [
mock.call('App Destroyed', {'App name': 'my_app_1'}),
mock.call('App Destroyed', {'App name': 'my_app_2'}),
mock.call('App Destroyed', {'App name': 'my_app_3'}),
]
else:
assert f'Destroying application \'{app_name}\'' in result.stdout
api.Apps.destroy.assert_called_with(app=app_name)
cli.track.assert_called_with('App Destroyed', {'App name': app_name})
def test_create_inside_an_existing_project(runner, init_sample_app_in_cwd):
with runner.runner.isolated_filesystem():
init_sample_app_in_cwd()
from story.commands.apps import create
result = runner.run(create, exit_code=1)
assert 'There appears to be a Storyscript Cloud project in' \
f' {os.getcwd()}/story.yml already' in result.stdout
def test_create_with_short_name(runner):
with runner.runner.isolated_filesystem():
from story.commands.apps import create
result = runner.run(create, args=['a'], exit_code=1)
assert 'The name you specified is too short.' in result.stdout
assert 'use at least 4 characters' in result.stdout
@mark.parametrize('custom_app_name', [None, 'my_custom_app_name_is_too_cool'])
@mark.parametrize('team', [None, 'my_team_name'])
def test_create(runner, patch, custom_app_name, team):
args = []
if custom_app_name:
app_name = custom_app_name
args.append(app_name)
else:
app_name = 'my_app_name_different_than_default'
patch.object(awesome, 'new', return_value=app_name)
if team:
args.append('--team')
args.append(team)
from story.commands import apps
patch.object(apps, 'create_story_yaml', side_effect=apps.create_story_yaml)
patch.object(api.Apps, 'create')
patch.object(cli, 'track')
with runner.runner.isolated_filesystem():
result = runner.run(apps.create, args=args, exit_code=0)
with open('story.yml') as f:
actual_contents_of_story_yml = f.read()
assert 'Creating story.yml…' in result.stdout
assert f'App Name: {app_name}' in result.stdout
assert f'App URL: https://{app_name}.storyscriptapp.com/' in result.stdout
assert 'We hope you enjoy your deployment experience' in result.stdout
assert actual_contents_of_story_yml == f'app_name: {app_name}\n'
apps.create_story_yaml.assert_called_with(app_name)
api.Apps.create.assert_called_with(name=app_name, team=team)
cli.track.assert_called_with('App Created', {'App name': app_name})
def test_init_inside_an_existing_project(runner, init_sample_app_in_cwd):
with runner.runner.isolated_filesystem():
init_sample_app_in_cwd()
from story.commands import apps
result = runner.run(apps.init, args=['myapp'], exit_code=1)
assert 'There appears to be a Storyscript Cloud project in' \
f' {os.getcwd()}/story.yml already' in result.stdout
def test_init(runner, patch):
app_name = 'myapp'
patch.object(api.Apps, 'get_uuid_from_hostname')
from story.commands import apps
patch.object(apps, 'create_story_yaml', side_effect=apps.create_story_yaml)
with runner.runner.isolated_filesystem():
result = runner.run(apps.init, args=[app_name], exit_code=0)
with open('story.yml') as f:
actual_contents_of_story_yml = f.read()
apps.create_story_yaml.assert_called_with(app_name)
api.Apps.get_uuid_from_hostname.assert_called_with(app_name)
assert 'Creating story.yml…' in result.stdout
assert f'App Name: {app_name}' in result.stdout
assert f'App URL: https://{app_name}.storyscriptapp.com/' in result.stdout
assert 'We hope you enjoy your deployment experience' in result.stdout
assert actual_contents_of_story_yml == f'app_name: {app_name}\n'
| 31.603704 | 79 | 0.635533 |
a320d45a0b90579a5fe1aa94dd244c6818458cf8
| 6,397 |
py
|
Python
|
resources/enoki_lldb.py
|
njroussel/enoki
|
23a9f986077db7dfd102579379bd4f0682285dab
|
[
"BSD-3-Clause"
] | 115 |
2020-11-11T13:40:05.000Z
|
2022-01-28T23:26:00.000Z
|
resources/enoki_lldb.py
|
njroussel/enoki
|
23a9f986077db7dfd102579379bd4f0682285dab
|
[
"BSD-3-Clause"
] | 16 |
2021-05-14T15:27:32.000Z
|
2022-01-18T10:51:48.000Z
|
resources/enoki_lldb.py
|
njroussel/enoki
|
23a9f986077db7dfd102579379bd4f0682285dab
|
[
"BSD-3-Clause"
] | 7 |
2021-01-14T11:24:18.000Z
|
2021-12-28T21:51:41.000Z
|
###############################################################################
# LLDB Script to improve introspection of array types when debugging software
# using Enoki. Copy this file to "~/.lldb" (creating the directory, if not
# present) and then apppend the following line to the file "~/.lldbinit"
# (again, creating it if, not already present):
###############################################################################
# command script import ~/.lldb/enoki_lldb.py
###############################################################################
import sys
import lldb
simple_types = {
'bool',
'char', 'unsigned char',
'short', 'unsigned short',
'int', 'unsigned int',
'long', 'unsigned long',
'long long', 'unsigned long long',
'float', 'double'
}
class StaticArraySynthProvider:
def __init__(self, instance, internal_dict):
self.instance = instance
def update(self):
itype = self.instance.GetType().GetCanonicalType().GetUnqualifiedType()
itype_name = itype.name
# Extract derived type
if itype_name.startswith('enoki::StaticArrayImpl'):
itype = itype.GetTemplateArgumentType(3)
itype_name = itype.name
# Determine the size
self.size = int(itype_name[itype_name.rfind(',')+1:itype_name.rfind('>')])
self.is_mask = 'Mask' in itype_name
data = self.instance.GetChildMemberWithName('m_data')
if data:
self.data_type = data.GetType().GetTemplateArgumentType(0)
else:
self.data_type = itype.GetTemplateArgumentType(0)
self.type_size = self.data_type.GetByteSize()
self.is_simple = self.data_type.name in simple_types
self.kmask = self.instance.GetChildMemberWithName('k')
def has_children(self):
return not self.is_simple and self.size > 0
def num_children(self):
return 0 if self.is_simple else self.size
def get_child_index(self, name):
try:
return int(name)
except Exception:
return -1
def get_child_at_index(self, index):
if index < 0 or index >= self.size:
return None
return self.instance.CreateChildAtOffset(
str(index), index * self.type_size, self.data_type)
def get_summary(self):
if self.is_simple:
if not self.is_mask:
result = [str(self.get_child_at_index(i).value) for i in range(self.size)]
else:
if self.kmask:
# AVX512 mask register
result = list(reversed(format(int(self.kmask.unsigned), '0%ib' % self.size)))
else:
result = [None] * self.size
for i in range(self.size):
value = self.get_child_at_index(i).value
result[i] = '0' if (value == '0' or value == 'false') else '1'
return '[' + ', '.join(result) + ']'
else:
return ''
class DynamicArraySummaryProvider:
def __init__(self, instance, internal_dict):
self.instance = instance
def update(self):
self.size = self.instance.GetChildMemberWithName('m_size').unsigned
self.packet_count = self.instance.GetChildMemberWithName('m_packets_allocated').unsigned
self.packet_type = self.instance.GetType().GetCanonicalType().\
GetUnqualifiedType().GetTemplateArgumentType(0)
self.packet_size = self.packet_type.GetByteSize()
self.ptr = self.instance.GetChildMemberWithName('m_packets').GetData()
error = lldb.SBError()
self.ptr = self.ptr.GetUnsignedInt64(offset=0, error=error) if self.ptr.GetByteSize() == 8 \
else self.ptr.GetUnsignedInt32(offset=0, error=error)
self.limit = 20
def has_children(self):
return False
def num_children(self):
return 0
def get_child_index(self, name):
return None
def get_child_at_index(self, index):
return None
def get_summary(self):
values = []
for i in range(self.packet_count):
value = str(self.instance.CreateValueFromAddress(str(i),
self.ptr + i*self.packet_size, self.packet_type))
assert value[-1] == ']'
values += value[value.rfind('[')+1:-1].split(', ')
if len(values) > self.size:
values = values[0:self.size]
break
if len(values) > self.limit:
break
if len(values) > self.limit:
values = values[0:self.limit]
values.append(".. %i skipped .." % (self.size - self.limit))
return '[' + ', '.join(values) + ']'
def attach(enoki_category, synth_class, type_name, summary=True, synth=True):
if summary:
def summary_func(instance, internal_dict):
synth = synth_class(instance.GetNonSyntheticValue(), internal_dict)
synth.update()
return synth.get_summary()
summary_func.__name__ = synth_class.__name__ + 'SummaryWrapper'
setattr(sys.modules[__name__], summary_func.__name__, summary_func)
summary = lldb.SBTypeSummary.CreateWithFunctionName(__name__ + '.' + summary_func.__name__)
summary.SetOptions(lldb.eTypeOptionCascade)
enoki_category.AddTypeSummary(lldb.SBTypeNameSpecifier(type_name, True), summary)
if synth:
synth = lldb.SBTypeSynthetic.CreateWithClassName(__name__ + '.' + synth_class.__name__)
synth.SetOptions(lldb.eTypeOptionCascade)
enoki_category.AddTypeSynthetic(lldb.SBTypeNameSpecifier(type_name, True), synth)
def __lldb_init_module(debugger, internal_dict):
enoki_category = debugger.CreateCategory('enoki')
enoki_category.SetEnabled(True)
# Static Enoki arrays
regexp_1 = r'enoki::(Array|Packet|Complex|Matrix|' \
'Quaternion|StaticArrayImpl)(Mask)?<.+>'
# Mitsuba 2 is one of the main users of Enoki. For convenience, also
# declare its custom array types here
regexp_2 = r'mitsuba::(Vector|Point|Normal|Spectrum|Color)<.+>'
regexp_combined = r'^(%s)|(%s)$' % (regexp_1, regexp_2)
attach(enoki_category, StaticArraySynthProvider, regexp_combined)
# Dynamic Enoki arrays
attach(enoki_category, DynamicArraySummaryProvider,
r"^enoki::DynamicArray(Impl)?<.+>$")
| 37.19186 | 100 | 0.606065 |
60145b361b8b56eed1d930ce1cdbbc6548d81245
| 26 |
py
|
Python
|
server_param.py
|
liuruoze/HierNet-SC2
|
7abfde0088e90416f11922d67c0f09659c7ecf81
|
[
"Apache-2.0"
] | 2 |
2022-02-28T08:39:43.000Z
|
2022-03-03T02:28:23.000Z
|
server_param.py
|
liuruoze/HierNet-SC2
|
7abfde0088e90416f11922d67c0f09659c7ecf81
|
[
"Apache-2.0"
] | null | null | null |
server_param.py
|
liuruoze/HierNet-SC2
|
7abfde0088e90416f11922d67c0f09659c7ecf81
|
[
"Apache-2.0"
] | null | null | null |
used_devices = "4,5,6,7"
| 8.666667 | 24 | 0.615385 |
6da9b7edac8a3a6abee995acece1f2e0cef98f09
| 683 |
py
|
Python
|
common/src/stack/command/stack/commands/list/node/xml/plugin_salt.py
|
khanfluence/stacki-cumulus-switch
|
df54afb20f6ea6a3a136b3c09b30df54ea79ffcc
|
[
"BSD-3-Clause"
] | null | null | null |
common/src/stack/command/stack/commands/list/node/xml/plugin_salt.py
|
khanfluence/stacki-cumulus-switch
|
df54afb20f6ea6a3a136b3c09b30df54ea79ffcc
|
[
"BSD-3-Clause"
] | null | null | null |
common/src/stack/command/stack/commands/list/node/xml/plugin_salt.py
|
khanfluence/stacki-cumulus-switch
|
df54afb20f6ea6a3a136b3c09b30df54ea79ffcc
|
[
"BSD-3-Clause"
] | null | null | null |
# @copyright@
# Copyright (c) 2006 - 2018 Teradata
# All rights reserved. Stacki(r) v5.x stacki.com
# https://github.com/Teradata/stacki/blob/master/LICENSE.txt
# @copyright@
import os
import stack.commands
class Plugin(stack.commands.Plugin):
"Include compiled salt templates into profile"
def provides(self):
return 'salt'
def run(self, attrs):
try:
fin = open(os.path.join(os.sep, 'export',
'stack', 'salt',
'compiled',
attrs['hostname'],
'kickstart.xml'), 'r')
except:
fin = None
if fin:
self.owner.addText('<stack:post>\n')
for line in fin.readlines():
self.owner.addText(line)
self.addText('</stack:post>\n')
| 20.088235 | 60 | 0.650073 |
bc3acc8d18713ba16638b28d66decd71bf6b06bc
| 764 |
py
|
Python
|
ingestors/media/svg.py
|
EruditePanda/ingestors
|
7084092e610597fa75eb0d87c6d67e80687296f2
|
[
"MIT"
] | null | null | null |
ingestors/media/svg.py
|
EruditePanda/ingestors
|
7084092e610597fa75eb0d87c6d67e80687296f2
|
[
"MIT"
] | null | null | null |
ingestors/media/svg.py
|
EruditePanda/ingestors
|
7084092e610597fa75eb0d87c6d67e80687296f2
|
[
"MIT"
] | null | null | null |
import logging
from ingestors.base import Ingestor
from ingestors.support.pdf import PDFSupport
from ingestors.util import join_path
log = logging.getLogger(__name__)
class SVGIngestor(Ingestor, PDFSupport):
MIME_TYPES = [
'image/svg+xml'
]
EXTENSIONS = ['svg']
SCORE = 20
def ingest(self, file_path):
pdf_path = join_path(self.work_path, 'image.pdf')
self.exec_command('convert',
file_path,
'-density', '300',
'-define',
'pdf:fit-page=A4',
pdf_path)
self.assert_outfile(pdf_path)
self.result.flag(self.result.FLAG_IMAGE)
self.pdf_alternative_extract(pdf_path)
| 27.285714 | 57 | 0.575916 |
ac7fb74551ede6adf9d3f1b08060d08f29d95131
| 25,614 |
py
|
Python
|
dialogflow_v2beta1/proto/document_pb2.py
|
czahedi/dialogflow-python-client-v2
|
d9150d1def0a7262dc496b2f1313e02e7ae1a0b6
|
[
"Apache-2.0"
] | null | null | null |
dialogflow_v2beta1/proto/document_pb2.py
|
czahedi/dialogflow-python-client-v2
|
d9150d1def0a7262dc496b2f1313e02e7ae1a0b6
|
[
"Apache-2.0"
] | 3 |
2020-03-23T18:01:51.000Z
|
2021-03-19T23:15:15.000Z
|
pyenv/lib/python3.6/site-packages/dialogflow_v2beta1/proto/document_pb2.py
|
ronald-rgr/ai-chatbot-smartguide
|
c9c830feb6b66c2e362f8fb5d147ef0c4f4a08cf
|
[
"Apache-2.0"
] | null | null | null |
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: google/cloud/dialogflow_v2beta1/proto/document.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
from google.protobuf import descriptor_pb2
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from google.api import annotations_pb2 as google_dot_api_dot_annotations__pb2
from google.longrunning import operations_pb2 as google_dot_longrunning_dot_operations__pb2
from google.protobuf import empty_pb2 as google_dot_protobuf_dot_empty__pb2
from google.protobuf import field_mask_pb2 as google_dot_protobuf_dot_field__mask__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='google/cloud/dialogflow_v2beta1/proto/document.proto',
package='google.cloud.dialogflow.v2beta1',
syntax='proto3',
serialized_pb=_b('\n4google/cloud/dialogflow_v2beta1/proto/document.proto\x12\x1fgoogle.cloud.dialogflow.v2beta1\x1a\x1cgoogle/api/annotations.proto\x1a#google/longrunning/operations.proto\x1a\x1bgoogle/protobuf/empty.proto\x1a google/protobuf/field_mask.proto\"\x94\x02\n\x08\x44ocument\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x14\n\x0c\x64isplay_name\x18\x02 \x01(\t\x12\x11\n\tmime_type\x18\x03 \x01(\t\x12P\n\x0fknowledge_types\x18\x04 \x03(\x0e\x32\x37.google.cloud.dialogflow.v2beta1.Document.KnowledgeType\x12\x15\n\x0b\x63ontent_uri\x18\x05 \x01(\tH\x00\x12\x11\n\x07\x63ontent\x18\x06 \x01(\tH\x00\"K\n\rKnowledgeType\x12\x1e\n\x1aKNOWLEDGE_TYPE_UNSPECIFIED\x10\x00\x12\x07\n\x03\x46\x41Q\x10\x01\x12\x11\n\rEXTRACTIVE_QA\x10\x02\x42\x08\n\x06source\"M\n\x14ListDocumentsRequest\x12\x0e\n\x06parent\x18\x01 \x01(\t\x12\x11\n\tpage_size\x18\x02 \x01(\x05\x12\x12\n\npage_token\x18\x03 \x01(\t\"n\n\x15ListDocumentsResponse\x12<\n\tdocuments\x18\x01 \x03(\x0b\x32).google.cloud.dialogflow.v2beta1.Document\x12\x17\n\x0fnext_page_token\x18\x02 \x01(\t\"\"\n\x12GetDocumentRequest\x12\x0c\n\x04name\x18\x01 \x01(\t\"d\n\x15\x43reateDocumentRequest\x12\x0e\n\x06parent\x18\x01 \x01(\t\x12;\n\x08\x64ocument\x18\x02 \x01(\x0b\x32).google.cloud.dialogflow.v2beta1.Document\"%\n\x15\x44\x65leteDocumentRequest\x12\x0c\n\x04name\x18\x01 \x01(\t\"\xb2\x01\n\x1aKnowledgeOperationMetadata\x12P\n\x05state\x18\x01 \x01(\x0e\x32\x41.google.cloud.dialogflow.v2beta1.KnowledgeOperationMetadata.State\"B\n\x05State\x12\x15\n\x11STATE_UNSPECIFIED\x10\x00\x12\x0b\n\x07PENDING\x10\x01\x12\x0b\n\x07RUNNING\x10\x02\x12\x08\n\x04\x44ONE\x10\x03\x32\xf1\x07\n\tDocuments\x12\x81\x02\n\rListDocuments\x12\x35.google.cloud.dialogflow.v2beta1.ListDocumentsRequest\x1a\x36.google.cloud.dialogflow.v2beta1.ListDocumentsResponse\"\x80\x01\x82\xd3\xe4\x93\x02z\x12\x37/v2beta1/{parent=projects/*/knowledgeBases/*}/documentsZ?\x12=/v2beta1/{parent=projects/*/agent/knowledgeBases/*}/documents\x12\xf0\x01\n\x0bGetDocument\x12\x33.google.cloud.dialogflow.v2beta1.GetDocumentRequest\x1a).google.cloud.dialogflow.v2beta1.Document\"\x80\x01\x82\xd3\xe4\x93\x02z\x12\x37/v2beta1/{name=projects/*/knowledgeBases/*/documents/*}Z?\x12=/v2beta1/{name=projects/*/agent/knowledgeBases/*/documents/*}\x12\xff\x01\n\x0e\x43reateDocument\x12\x36.google.cloud.dialogflow.v2beta1.CreateDocumentRequest\x1a\x1d.google.longrunning.Operation\"\x95\x01\x82\xd3\xe4\x93\x02\x8e\x01\"7/v2beta1/{parent=projects/*/knowledgeBases/*}/documents:\x08\x64ocumentZI\"=/v2beta1/{parent=projects/*/agent/knowledgeBases/*}/documents:\x08\x64ocument\x12\xea\x01\n\x0e\x44\x65leteDocument\x12\x36.google.cloud.dialogflow.v2beta1.DeleteDocumentRequest\x1a\x1d.google.longrunning.Operation\"\x80\x01\x82\xd3\xe4\x93\x02z*7/v2beta1/{name=projects/*/knowledgeBases/*/documents/*}Z?*=/v2beta1/{name=projects/*/agent/knowledgeBases/*/documents/*}B\xab\x01\n#com.google.cloud.dialogflow.v2beta1B\rDocumentProtoP\x01ZIgoogle.golang.org/genproto/googleapis/cloud/dialogflow/v2beta1;dialogflow\xf8\x01\x01\xa2\x02\x02\x44\x46\xaa\x02\x1fGoogle.Cloud.Dialogflow.V2beta1b\x06proto3')
,
dependencies=[google_dot_api_dot_annotations__pb2.DESCRIPTOR,google_dot_longrunning_dot_operations__pb2.DESCRIPTOR,google_dot_protobuf_dot_empty__pb2.DESCRIPTOR,google_dot_protobuf_dot_field__mask__pb2.DESCRIPTOR,])
_DOCUMENT_KNOWLEDGETYPE = _descriptor.EnumDescriptor(
name='KnowledgeType',
full_name='google.cloud.dialogflow.v2beta1.Document.KnowledgeType',
filename=None,
file=DESCRIPTOR,
values=[
_descriptor.EnumValueDescriptor(
name='KNOWLEDGE_TYPE_UNSPECIFIED', index=0, number=0,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='FAQ', index=1, number=1,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='EXTRACTIVE_QA', index=2, number=2,
options=None,
type=None),
],
containing_type=None,
options=None,
serialized_start=411,
serialized_end=486,
)
_sym_db.RegisterEnumDescriptor(_DOCUMENT_KNOWLEDGETYPE)
_KNOWLEDGEOPERATIONMETADATA_STATE = _descriptor.EnumDescriptor(
name='State',
full_name='google.cloud.dialogflow.v2beta1.KnowledgeOperationMetadata.State',
filename=None,
file=DESCRIPTOR,
values=[
_descriptor.EnumValueDescriptor(
name='STATE_UNSPECIFIED', index=0, number=0,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='PENDING', index=1, number=1,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='RUNNING', index=2, number=2,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='DONE', index=3, number=3,
options=None,
type=None),
],
containing_type=None,
options=None,
serialized_start=979,
serialized_end=1045,
)
_sym_db.RegisterEnumDescriptor(_KNOWLEDGEOPERATIONMETADATA_STATE)
_DOCUMENT = _descriptor.Descriptor(
name='Document',
full_name='google.cloud.dialogflow.v2beta1.Document',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='name', full_name='google.cloud.dialogflow.v2beta1.Document.name', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='display_name', full_name='google.cloud.dialogflow.v2beta1.Document.display_name', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='mime_type', full_name='google.cloud.dialogflow.v2beta1.Document.mime_type', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='knowledge_types', full_name='google.cloud.dialogflow.v2beta1.Document.knowledge_types', index=3,
number=4, type=14, cpp_type=8, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='content_uri', full_name='google.cloud.dialogflow.v2beta1.Document.content_uri', index=4,
number=5, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='content', full_name='google.cloud.dialogflow.v2beta1.Document.content', index=5,
number=6, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
_DOCUMENT_KNOWLEDGETYPE,
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
_descriptor.OneofDescriptor(
name='source', full_name='google.cloud.dialogflow.v2beta1.Document.source',
index=0, containing_type=None, fields=[]),
],
serialized_start=220,
serialized_end=496,
)
_LISTDOCUMENTSREQUEST = _descriptor.Descriptor(
name='ListDocumentsRequest',
full_name='google.cloud.dialogflow.v2beta1.ListDocumentsRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='parent', full_name='google.cloud.dialogflow.v2beta1.ListDocumentsRequest.parent', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='page_size', full_name='google.cloud.dialogflow.v2beta1.ListDocumentsRequest.page_size', index=1,
number=2, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='page_token', full_name='google.cloud.dialogflow.v2beta1.ListDocumentsRequest.page_token', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=498,
serialized_end=575,
)
_LISTDOCUMENTSRESPONSE = _descriptor.Descriptor(
name='ListDocumentsResponse',
full_name='google.cloud.dialogflow.v2beta1.ListDocumentsResponse',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='documents', full_name='google.cloud.dialogflow.v2beta1.ListDocumentsResponse.documents', index=0,
number=1, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='next_page_token', full_name='google.cloud.dialogflow.v2beta1.ListDocumentsResponse.next_page_token', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=577,
serialized_end=687,
)
_GETDOCUMENTREQUEST = _descriptor.Descriptor(
name='GetDocumentRequest',
full_name='google.cloud.dialogflow.v2beta1.GetDocumentRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='name', full_name='google.cloud.dialogflow.v2beta1.GetDocumentRequest.name', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=689,
serialized_end=723,
)
_CREATEDOCUMENTREQUEST = _descriptor.Descriptor(
name='CreateDocumentRequest',
full_name='google.cloud.dialogflow.v2beta1.CreateDocumentRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='parent', full_name='google.cloud.dialogflow.v2beta1.CreateDocumentRequest.parent', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='document', full_name='google.cloud.dialogflow.v2beta1.CreateDocumentRequest.document', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=725,
serialized_end=825,
)
_DELETEDOCUMENTREQUEST = _descriptor.Descriptor(
name='DeleteDocumentRequest',
full_name='google.cloud.dialogflow.v2beta1.DeleteDocumentRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='name', full_name='google.cloud.dialogflow.v2beta1.DeleteDocumentRequest.name', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=827,
serialized_end=864,
)
_KNOWLEDGEOPERATIONMETADATA = _descriptor.Descriptor(
name='KnowledgeOperationMetadata',
full_name='google.cloud.dialogflow.v2beta1.KnowledgeOperationMetadata',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='state', full_name='google.cloud.dialogflow.v2beta1.KnowledgeOperationMetadata.state', index=0,
number=1, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
_KNOWLEDGEOPERATIONMETADATA_STATE,
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=867,
serialized_end=1045,
)
_DOCUMENT.fields_by_name['knowledge_types'].enum_type = _DOCUMENT_KNOWLEDGETYPE
_DOCUMENT_KNOWLEDGETYPE.containing_type = _DOCUMENT
_DOCUMENT.oneofs_by_name['source'].fields.append(
_DOCUMENT.fields_by_name['content_uri'])
_DOCUMENT.fields_by_name['content_uri'].containing_oneof = _DOCUMENT.oneofs_by_name['source']
_DOCUMENT.oneofs_by_name['source'].fields.append(
_DOCUMENT.fields_by_name['content'])
_DOCUMENT.fields_by_name['content'].containing_oneof = _DOCUMENT.oneofs_by_name['source']
_LISTDOCUMENTSRESPONSE.fields_by_name['documents'].message_type = _DOCUMENT
_CREATEDOCUMENTREQUEST.fields_by_name['document'].message_type = _DOCUMENT
_KNOWLEDGEOPERATIONMETADATA.fields_by_name['state'].enum_type = _KNOWLEDGEOPERATIONMETADATA_STATE
_KNOWLEDGEOPERATIONMETADATA_STATE.containing_type = _KNOWLEDGEOPERATIONMETADATA
DESCRIPTOR.message_types_by_name['Document'] = _DOCUMENT
DESCRIPTOR.message_types_by_name['ListDocumentsRequest'] = _LISTDOCUMENTSREQUEST
DESCRIPTOR.message_types_by_name['ListDocumentsResponse'] = _LISTDOCUMENTSRESPONSE
DESCRIPTOR.message_types_by_name['GetDocumentRequest'] = _GETDOCUMENTREQUEST
DESCRIPTOR.message_types_by_name['CreateDocumentRequest'] = _CREATEDOCUMENTREQUEST
DESCRIPTOR.message_types_by_name['DeleteDocumentRequest'] = _DELETEDOCUMENTREQUEST
DESCRIPTOR.message_types_by_name['KnowledgeOperationMetadata'] = _KNOWLEDGEOPERATIONMETADATA
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
Document = _reflection.GeneratedProtocolMessageType('Document', (_message.Message,), dict(
DESCRIPTOR = _DOCUMENT,
__module__ = 'google.cloud.dialogflow_v2beta1.proto.document_pb2'
,
__doc__ = """A document resource.
Attributes:
name:
The document resource name. The name must be empty when
creating a document. Format: ``projects/<Project
ID>/knowledgeBases/<Knowledge Base ID>/documents/<Document
ID>``.
display_name:
Required. The display name of the document. The name must be
1024 bytes or less; otherwise, the creation request fails.
mime_type:
Required. The MIME type of this document.
knowledge_types:
Required. The knowledge type of document content.
source:
Required. The source of this document.
content_uri:
The URI where the file content is located. For documents
stored in Google Cloud Storage, these URIs must have the form
``gs://<bucket-name>/<object-name>``. NOTE: External URLs
must correspond to public webpages, i.e., they must be indexed
by Google Search. In particular, URLs for showing documents in
Google Cloud Storage (i.e. the URL in your browser) are not
supported. Instead use the ``gs://`` format URI described
above.
content:
The raw content of the document. This field is only permitted
for EXTRACTIVE\_QA and FAQ knowledge types.
""",
# @@protoc_insertion_point(class_scope:google.cloud.dialogflow.v2beta1.Document)
))
_sym_db.RegisterMessage(Document)
ListDocumentsRequest = _reflection.GeneratedProtocolMessageType('ListDocumentsRequest', (_message.Message,), dict(
DESCRIPTOR = _LISTDOCUMENTSREQUEST,
__module__ = 'google.cloud.dialogflow_v2beta1.proto.document_pb2'
,
__doc__ = """Request message for
[Documents.ListDocuments][google.cloud.dialogflow.v2beta1.Documents.ListDocuments].
Attributes:
parent:
Required. The knowledge base to list all documents for.
Format: ``projects/<Project ID>/knowledgeBases/<Knowledge Base
ID>``.
page_size:
Optional. The maximum number of items to return in a single
page. By default 10 and at most 100.
page_token:
Optional. The next\_page\_token value returned from a previous
list request.
""",
# @@protoc_insertion_point(class_scope:google.cloud.dialogflow.v2beta1.ListDocumentsRequest)
))
_sym_db.RegisterMessage(ListDocumentsRequest)
ListDocumentsResponse = _reflection.GeneratedProtocolMessageType('ListDocumentsResponse', (_message.Message,), dict(
DESCRIPTOR = _LISTDOCUMENTSRESPONSE,
__module__ = 'google.cloud.dialogflow_v2beta1.proto.document_pb2'
,
__doc__ = """Response message for
[Documents.ListDocuments][google.cloud.dialogflow.v2beta1.Documents.ListDocuments].
Attributes:
documents:
The list of documents.
next_page_token:
Token to retrieve the next page of results, or empty if there
are no more results in the list.
""",
# @@protoc_insertion_point(class_scope:google.cloud.dialogflow.v2beta1.ListDocumentsResponse)
))
_sym_db.RegisterMessage(ListDocumentsResponse)
GetDocumentRequest = _reflection.GeneratedProtocolMessageType('GetDocumentRequest', (_message.Message,), dict(
DESCRIPTOR = _GETDOCUMENTREQUEST,
__module__ = 'google.cloud.dialogflow_v2beta1.proto.document_pb2'
,
__doc__ = """Request message for
[Documents.GetDocument][google.cloud.dialogflow.v2beta1.Documents.GetDocument].
Attributes:
name:
Required. The name of the document to retrieve. Format
``projects/<Project ID>/knowledgeBases/<Knowledge Base
ID>/documents/<Document ID>``.
""",
# @@protoc_insertion_point(class_scope:google.cloud.dialogflow.v2beta1.GetDocumentRequest)
))
_sym_db.RegisterMessage(GetDocumentRequest)
CreateDocumentRequest = _reflection.GeneratedProtocolMessageType('CreateDocumentRequest', (_message.Message,), dict(
DESCRIPTOR = _CREATEDOCUMENTREQUEST,
__module__ = 'google.cloud.dialogflow_v2beta1.proto.document_pb2'
,
__doc__ = """Request message for
[Documents.CreateDocument][google.cloud.dialogflow.v2beta1.Documents.CreateDocument].
Attributes:
parent:
Required. The knoweldge base to create a document for. Format:
``projects/<Project ID>/knowledgeBases/<Knowledge Base ID>``.
document:
Required. The document to create.
""",
# @@protoc_insertion_point(class_scope:google.cloud.dialogflow.v2beta1.CreateDocumentRequest)
))
_sym_db.RegisterMessage(CreateDocumentRequest)
DeleteDocumentRequest = _reflection.GeneratedProtocolMessageType('DeleteDocumentRequest', (_message.Message,), dict(
DESCRIPTOR = _DELETEDOCUMENTREQUEST,
__module__ = 'google.cloud.dialogflow_v2beta1.proto.document_pb2'
,
__doc__ = """Request message for
[Documents.DeleteDocument][google.cloud.dialogflow.v2beta1.Documents.DeleteDocument].
Attributes:
name:
The name of the document to delete. Format:
``projects/<Project ID>/knowledgeBases/<Knowledge Base
ID>/documents/<Document ID>``.
""",
# @@protoc_insertion_point(class_scope:google.cloud.dialogflow.v2beta1.DeleteDocumentRequest)
))
_sym_db.RegisterMessage(DeleteDocumentRequest)
KnowledgeOperationMetadata = _reflection.GeneratedProtocolMessageType('KnowledgeOperationMetadata', (_message.Message,), dict(
DESCRIPTOR = _KNOWLEDGEOPERATIONMETADATA,
__module__ = 'google.cloud.dialogflow_v2beta1.proto.document_pb2'
,
__doc__ = """Metadata in google::longrunning::Operation for Knowledge operations.
Attributes:
state:
Required. The current state of this operation.
""",
# @@protoc_insertion_point(class_scope:google.cloud.dialogflow.v2beta1.KnowledgeOperationMetadata)
))
_sym_db.RegisterMessage(KnowledgeOperationMetadata)
DESCRIPTOR.has_options = True
DESCRIPTOR._options = _descriptor._ParseOptions(descriptor_pb2.FileOptions(), _b('\n#com.google.cloud.dialogflow.v2beta1B\rDocumentProtoP\001ZIgoogle.golang.org/genproto/googleapis/cloud/dialogflow/v2beta1;dialogflow\370\001\001\242\002\002DF\252\002\037Google.Cloud.Dialogflow.V2beta1'))
_DOCUMENTS = _descriptor.ServiceDescriptor(
name='Documents',
full_name='google.cloud.dialogflow.v2beta1.Documents',
file=DESCRIPTOR,
index=0,
options=None,
serialized_start=1048,
serialized_end=2057,
methods=[
_descriptor.MethodDescriptor(
name='ListDocuments',
full_name='google.cloud.dialogflow.v2beta1.Documents.ListDocuments',
index=0,
containing_service=None,
input_type=_LISTDOCUMENTSREQUEST,
output_type=_LISTDOCUMENTSRESPONSE,
options=_descriptor._ParseOptions(descriptor_pb2.MethodOptions(), _b('\202\323\344\223\002z\0227/v2beta1/{parent=projects/*/knowledgeBases/*}/documentsZ?\022=/v2beta1/{parent=projects/*/agent/knowledgeBases/*}/documents')),
),
_descriptor.MethodDescriptor(
name='GetDocument',
full_name='google.cloud.dialogflow.v2beta1.Documents.GetDocument',
index=1,
containing_service=None,
input_type=_GETDOCUMENTREQUEST,
output_type=_DOCUMENT,
options=_descriptor._ParseOptions(descriptor_pb2.MethodOptions(), _b('\202\323\344\223\002z\0227/v2beta1/{name=projects/*/knowledgeBases/*/documents/*}Z?\022=/v2beta1/{name=projects/*/agent/knowledgeBases/*/documents/*}')),
),
_descriptor.MethodDescriptor(
name='CreateDocument',
full_name='google.cloud.dialogflow.v2beta1.Documents.CreateDocument',
index=2,
containing_service=None,
input_type=_CREATEDOCUMENTREQUEST,
output_type=google_dot_longrunning_dot_operations__pb2._OPERATION,
options=_descriptor._ParseOptions(descriptor_pb2.MethodOptions(), _b('\202\323\344\223\002\216\001\"7/v2beta1/{parent=projects/*/knowledgeBases/*}/documents:\010documentZI\"=/v2beta1/{parent=projects/*/agent/knowledgeBases/*}/documents:\010document')),
),
_descriptor.MethodDescriptor(
name='DeleteDocument',
full_name='google.cloud.dialogflow.v2beta1.Documents.DeleteDocument',
index=3,
containing_service=None,
input_type=_DELETEDOCUMENTREQUEST,
output_type=google_dot_longrunning_dot_operations__pb2._OPERATION,
options=_descriptor._ParseOptions(descriptor_pb2.MethodOptions(), _b('\202\323\344\223\002z*7/v2beta1/{name=projects/*/knowledgeBases/*/documents/*}Z?*=/v2beta1/{name=projects/*/agent/knowledgeBases/*/documents/*}')),
),
])
_sym_db.RegisterServiceDescriptor(_DOCUMENTS)
DESCRIPTOR.services_by_name['Documents'] = _DOCUMENTS
# @@protoc_insertion_point(module_scope)
| 42.618968 | 3,117 | 0.752713 |
bce91a1942646d72b93179aba19ddb33e2fb85da
| 1,402 |
py
|
Python
|
openprocurement/relocation/tenders/views/bid.py
|
openprocurement/openprocurement.relocation.tenders
|
c8b545ad08a233beda93fa56324605208eb08bd9
|
[
"Apache-2.0"
] | null | null | null |
openprocurement/relocation/tenders/views/bid.py
|
openprocurement/openprocurement.relocation.tenders
|
c8b545ad08a233beda93fa56324605208eb08bd9
|
[
"Apache-2.0"
] | null | null | null |
openprocurement/relocation/tenders/views/bid.py
|
openprocurement/openprocurement.relocation.tenders
|
c8b545ad08a233beda93fa56324605208eb08bd9
|
[
"Apache-2.0"
] | 4 |
2017-03-20T15:07:41.000Z
|
2018-02-20T09:24:37.000Z
|
# -*- coding: utf-8 -*-
from openprocurement.api.utils import (
json_view,
opresource,
APIResource,
save_tender,
ROUTE_PREFIX,
context_unpack
)
from openprocurement.relocation.core.utils import change_ownership
from openprocurement.relocation.core.validation import validate_ownership_data
from openprocurement.relocation.tenders.validation import validate_tender_bid_accreditation_level
@opresource(name='Bid ownership',
path='/tenders/{tender_id}/bids/{bid_id}/ownership',
description="Bid Ownership")
class BidResource(APIResource):
@json_view(permission='create_bid',
validators=(validate_tender_bid_accreditation_level,
validate_ownership_data,))
def post(self):
bid = self.request.context
tender = self.request.validated['tender']
location = self.request.route_path('Tender Bids', tender_id=tender.id, bid_id=bid.id)
location = location[len(ROUTE_PREFIX):] # strips /api/<version>
if change_ownership(self.request, location) and save_tender(self.request):
self.LOGGER.info('Updated bid {} ownership of tender {}'.format(bid.id, tender.id),
extra=context_unpack(self.request, {'MESSAGE_ID': 'bid_ownership_update'}, {'bid_id': bid.id, 'tender_id': tender.id}))
return {'data': bid.serialize('view')}
| 41.235294 | 148 | 0.686876 |
257640f69d1843ebc1604bebde9b44115e6e6a2b
| 615 |
py
|
Python
|
setup.py
|
nbraud/ppb-vector
|
9998a8f2c468b1b1335a799b77f1a5450e05f2a8
|
[
"Artistic-2.0"
] | 2 |
2018-12-07T22:29:56.000Z
|
2019-05-21T20:11:28.000Z
|
setup.py
|
nbraud/ppb-vector
|
9998a8f2c468b1b1335a799b77f1a5450e05f2a8
|
[
"Artistic-2.0"
] | 170 |
2017-10-12T22:50:42.000Z
|
2022-02-04T16:45:39.000Z
|
setup.py
|
nbraud/ppb-vector
|
9998a8f2c468b1b1335a799b77f1a5450e05f2a8
|
[
"Artistic-2.0"
] | 25 |
2017-10-12T23:07:29.000Z
|
2020-05-29T11:53:24.000Z
|
#!/usr/bin/env python3
from setuptools import setup
def requirements(section=None):
"""Helper for loading dependencies from requirements files."""
if section is None:
filename = "requirements.txt"
else:
filename = f"requirements-{section}.txt"
with open(filename) as file:
return [line.strip() for line in file]
# See setup.cfg for the actual configuration.
setup(
# setup needs to be able to import the library, for attr: to work
setup_requires=requirements() + ['pytest-runner'],
install_requires=requirements(),
tests_require=requirements('tests'),
)
| 26.73913 | 69 | 0.691057 |
5a2dd6195b4b9105aac8de9c956e3fec7394342c
| 4,334 |
py
|
Python
|
psdaq/psdaq/pyxpm/surf/devices/ti/_UCD92xx.py
|
ZhenghengLi/lcls2
|
94e75c6536954a58c8937595dcac295163aa1cdf
|
[
"BSD-3-Clause-LBNL"
] | 134 |
2017-02-22T18:07:00.000Z
|
2022-03-21T16:12:23.000Z
|
python/surf/devices/ti/_UCD92xx.py
|
a-panella/surf
|
b7c116c9f84760bda2c1ea9fa89fddef58dd831d
|
[
"BSD-3-Clause-LBNL"
] | 251 |
2017-04-26T23:42:42.000Z
|
2022-03-03T18:48:43.000Z
|
python/surf/devices/ti/_UCD92xx.py
|
a-panella/surf
|
b7c116c9f84760bda2c1ea9fa89fddef58dd831d
|
[
"BSD-3-Clause-LBNL"
] | 38 |
2017-02-21T21:15:03.000Z
|
2022-02-06T00:22:37.000Z
|
#-----------------------------------------------------------------------------
# This file is part of the 'SLAC Firmware Standard Library'. It is subject to
# the license terms in the LICENSE.txt file found in the top-level directory
# of this distribution and at:
# https://confluence.slac.stanford.edu/display/ppareg/LICENSE.html.
# No part of the 'SLAC Firmware Standard Library', including this file, may be
# copied, modified, propagated, or distributed except according to the terms
# contained in the LICENSE.txt file.
#-----------------------------------------------------------------------------
import pyrogue as pr
import surf.protocols.i2c
class UCD92xx(surf.protocols.i2c.PMBus):
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.add(pr.LinkVariable(
name = 'VIN',
mode = 'RO',
units = 'V',
disp = '{:1.3f}',
linkedGet = self.getPMbusLinearDataFormat11Bit,
dependencies = [self.READ_VIN],
))
self.add(pr.LinkVariable(
name = 'IIN',
mode = 'RO',
units = 'A',
disp = '{:1.3f}',
linkedGet = self.getPMbusLinearDataFormat11Bit,
dependencies = [self.READ_IIN],
))
self.add(pr.LinkVariable(
name = 'VOUT',
mode = 'RO',
units = 'V',
disp = '{:1.3f}',
linkedGet = surf.protocols.i2c.getPMbusLinearDataFormat,
dependencies = [self.READ_VIN],
))
self.add(pr.LinkVariable(
name = 'IOUT',
mode = 'RO',
units = 'A',
disp = '{:1.3f}',
linkedGet = self.getPMbusLinearDataFormat11Bit,
dependencies = [self.READ_IOUT],
))
self.add(pr.LinkVariable(
name = 'TEMPERATURE[1]',
mode = 'RO',
units = 'degC',
disp = '{:1.3f}',
linkedGet = self.getPMbusLinearDataFormat11Bit,
dependencies = [self.READ_TEMPERATURE_1],
))
self.add(pr.LinkVariable(
name = 'TEMPERATURE[2]',
mode = 'RO',
units = 'degC',
disp = '{:1.3f}',
linkedGet = self.getPMbusLinearDataFormat11Bit,
dependencies = [self.READ_TEMPERATURE_2],
))
self.add(pr.LinkVariable(
name = 'FAN_SPEED[1]',
mode = 'RO',
units = 'RPM',
disp = '{:1.3f}',
linkedGet = self.getPMbusLinearDataFormat11Bit,
dependencies = [self.READ_FAN_SPEED_1],
))
self.add(pr.LinkVariable(
name = 'DUTY_CYCLE',
mode = 'RO',
units = '%',
disp = '{:1.3f}',
linkedGet = self.getPMbusLinearDataFormat11Bit,
dependencies = [self.READ_DUTY_CYCLE],
))
self.add(pr.LinkVariable(
name = 'POUT',
mode = 'RO',
units = 'W',
disp = '{:1.3f}',
linkedGet = self.getPMbusLinearDataFormat11Bit,
dependencies = [self.READ_POUT],
))
self.add(pr.LinkVariable(
name = 'PIN',
mode = 'RO',
units = 'W',
disp = '{:1.3f}',
linkedGet = self.getPMbusLinearDataFormat11Bit,
dependencies = [self.READ_PIN],
))
@staticmethod
def getPMbusLinearDataFormat11Bit(var):
# Get the 16-bt RAW value
raw = var.dependencies[0].value()
# V is a 16-bit unsigned binary integer mantissa,
V = 1.0*raw
# The exponent is reported in the bottom 5 bits of the VOUT_MODE parameter.
# In the UCD92xx, this exponent is a read-only parameter
# whose value is fixed at –12. This allows setting voltage-related variables
# over a range from 0 to 15.9997V, with a resolution of 0.244mV.
X = -12.0
return V*(2**X)
| 34.951613 | 84 | 0.469082 |
daeeb0a27b11f20a3661ef7a04f80ca12e3d3e5f
| 859 |
py
|
Python
|
batch_cover.py
|
geocompass/robosat.pink
|
44d95183322ba68c0728de44a66e50f510bfd919
|
[
"MIT"
] | 31 |
2019-10-19T04:07:51.000Z
|
2022-02-25T11:14:04.000Z
|
batch_cover.py
|
geocompass/robosat.pink
|
44d95183322ba68c0728de44a66e50f510bfd919
|
[
"MIT"
] | 2 |
2020-03-13T01:26:15.000Z
|
2020-10-30T01:59:29.000Z
|
batch_cover.py
|
geocompass/robosat.pink
|
44d95183322ba68c0728de44a66e50f510bfd919
|
[
"MIT"
] | 19 |
2019-10-31T04:52:08.000Z
|
2022-03-04T06:24:56.000Z
|
from datetime import datetime
import time
import json
from robosat_pink.geoc import RSPcover, utils
from app.libs import utils_geom
def cover(dsPath,geojson,out):
return RSPcover.main(dsPath,geojson,out)
if __name__ == "__main__":
# # if cover by dir
# dsPath = "/data/dataset/train/train_2/tdttianjin/training/labels"
# geojson = None
# out = None
# if cover by geojson
dsPath = None
dir = '/data/dataset/train/train_3_0527'
# jsonFile = open(dir + "/centroid_buffer_union.json", 'r')
geojson = dir + "/centroid_buffer_union.json"
out = [dir+'/cover']
# training dataset directory
startTime = datetime.now()
ts = time.time()
result = cover(dsPath,geojson,out)
endTime = datetime.now()
timeSpend = (endTime-startTime).seconds
print("Cover DONE!All spends:", timeSpend, "seconds!")
| 26.84375 | 71 | 0.681024 |
6a0de30bc8091dbe2d17e8c2130ca0f93768d32e
| 512 |
py
|
Python
|
shakenfist_client/commandline/label.py
|
mandoonandy/client-python
|
ee7bba251ec8c132b772cd715a4dd677c8075057
|
[
"Apache-2.0"
] | null | null | null |
shakenfist_client/commandline/label.py
|
mandoonandy/client-python
|
ee7bba251ec8c132b772cd715a4dd677c8075057
|
[
"Apache-2.0"
] | null | null | null |
shakenfist_client/commandline/label.py
|
mandoonandy/client-python
|
ee7bba251ec8c132b772cd715a4dd677c8075057
|
[
"Apache-2.0"
] | null | null | null |
import click
@click.group(help='Label commands')
def label():
pass
@label.command(name='update',
help=('Update a label to use a new blob.\n\n'
'LABEL: The name of the label to update\n\n'
'BLOB_UUID: The UUID of the blob to use.'))
@click.argument('label', type=click.STRING)
@click.argument('blob_uuid', type=click.STRING)
@click.pass_context
def label_update(ctx, label, blob_uuid=None):
ctx.obj['CLIENT'].update_label(label, blob_uuid)
| 28.444444 | 69 | 0.636719 |
022f75d5fc7e8402d1f384e4397dfce1acf49095
| 14,899 |
py
|
Python
|
cms/tests/reversion_tests.py
|
donce/django-cms
|
eff8c00345891098f4f317bcd821722e567faa4d
|
[
"BSD-3-Clause"
] | null | null | null |
cms/tests/reversion_tests.py
|
donce/django-cms
|
eff8c00345891098f4f317bcd821722e567faa4d
|
[
"BSD-3-Clause"
] | null | null | null |
cms/tests/reversion_tests.py
|
donce/django-cms
|
eff8c00345891098f4f317bcd821722e567faa4d
|
[
"BSD-3-Clause"
] | null | null | null |
# -*- coding: utf-8 -*-
from __future__ import with_statement
import shutil
from os.path import join
from cms.utils.conf import get_cms_setting
from cms.utils.urlutils import admin_reverse
from djangocms_text_ckeditor.models import Text
from django.conf import settings
from django.contrib.contenttypes.models import ContentType
from django.core.files.uploadedfile import SimpleUploadedFile
import reversion
from reversion.models import Revision, Version
from cms.models import Page, Title, Placeholder
from cms.models.pluginmodel import CMSPlugin
from cms.test_utils.project.fileapp.models import FileModel
from cms.test_utils.testcases import CMSTestCase, TransactionCMSTestCase, URL_CMS_PAGE, URL_CMS_PAGE_CHANGE, URL_CMS_PAGE_ADD, \
URL_CMS_PLUGIN_ADD, URL_CMS_PLUGIN_EDIT
if hasattr(reversion.models, 'VERSION_CHANGE'):
from reversion.models import VERSION_CHANGE
class BasicReversionTestCase(CMSTestCase):
def setUp(self):
self.user = self._create_user("test", True, True)
def test_number_revisions(self):
with self.login_user_context(self.user):
self.assertEqual(Revision.objects.all().count(), 0)
self.page_data = self.get_new_page_data()
response = self.client.post(URL_CMS_PAGE_ADD, self.page_data)
self.assertRedirects(response, URL_CMS_PAGE)
self.assertEqual(Page.objects.all().count(), 2)
self.assertEqual(Revision.objects.all().count(), 1)
class ReversionTestCase(TransactionCMSTestCase):
def setUp(self):
u = self._create_user("test", True, True)
with self.login_user_context(u):
# add a new text plugin
self.page_data = self.get_new_page_data()
response = self.client.post(URL_CMS_PAGE_ADD, self.page_data)
self.assertRedirects(response, URL_CMS_PAGE)
page = Page.objects.all()[0]
placeholderpk = page.placeholders.get(slot="body").pk
plugin_data = {
'plugin_type': "TextPlugin",
'plugin_language': settings.LANGUAGES[0][0],
'placeholder_id': placeholderpk,
'plugin_parent': '',
}
response = self.client.post(URL_CMS_PLUGIN_ADD, plugin_data)
self.assertEqual(response.status_code, 200)
# now edit the plugin
edit_url = URL_CMS_PLUGIN_EDIT + response.content.decode('utf8').split("edit-plugin/")[1].split("/")[
0] + "/"
response = self.client.get(edit_url)
self.assertEqual(response.status_code, 200)
response = self.client.post(edit_url, {"body": "Hello World"})
self.assertEqual(response.status_code, 200)
txt = Text.objects.all()[0]
self.assertEqual("Hello World", txt.body)
self.txt = txt
# change the content
response = self.client.post(edit_url, {"body": "Bye Bye World"})
self.assertEqual(response.status_code, 200)
txt = Text.objects.all()[0]
self.assertEqual("Bye Bye World", txt.body)
p_data = self.page_data.copy()
response = self.client.post(URL_CMS_PAGE_CHANGE % page.pk, p_data)
self.assertRedirects(response, URL_CMS_PAGE)
page.publish('en')
self.user = u
def test_revert(self):
"""
Test that you can revert a plugin
"""
with self.login_user_context(self.user):
self.assertEqual(Page.objects.all().count(), 2)
self.assertEqual(Title.objects.all().count(), 2)
self.assertEqual(CMSPlugin.objects.all().count(), 2)
self.assertEqual(Revision.objects.all().count(), 5)
ctype = ContentType.objects.get_for_model(Page)
revision = Revision.objects.all()[2]
version = Version.objects.get(content_type=ctype, revision=revision)
page = Page.objects.all()[0]
history_url = URL_CMS_PAGE_CHANGE % (page.pk) + "history/"
response = self.client.get(history_url)
self.assertEqual(response.status_code, 200)
revert_url = history_url + "%s/" % version.pk
response = self.client.get(revert_url)
self.assertEqual(response.status_code, 200)
response = self.client.post("%s?language=en&" % revert_url, self.page_data)
self.assertRedirects(response, URL_CMS_PAGE_CHANGE % page.pk)
# test for publisher_is_draft, published is set for both draft and
# published page
self.assertEqual(Page.objects.all()[0].publisher_is_draft, True)
self.assertEqual(CMSPlugin.objects.all().count(), 2)
# test that CMSPlugin subclasses are reverted
self.assertEqual(Text.objects.all().count(), 2)
self.assertEqual(Text.objects.get(pk=self.txt.pk).body, "Hello World")
self.assertEqual(Revision.objects.all().count(), 6)
def test_undo_redo(self):
"""
Test that you can revert a plugin
"""
with self.login_user_context(self.user):
self.assertEqual(Page.objects.all().count(), 2)
self.assertEqual(Title.objects.all().count(), 2)
self.assertEqual(CMSPlugin.objects.all().count(), 2)
self.assertEqual(Revision.objects.all().count(), 5)
self.assertEqual(Placeholder.objects.count(), 5)
ctype = ContentType.objects.get_for_model(Page)
revision = Revision.objects.all()[2]
Version.objects.get(content_type=ctype, revision=revision)
page = Page.objects.all()[0]
undo_url = admin_reverse("cms_page_undo", args=[page.pk])
response = self.client.post(undo_url)
self.assertEqual(response.status_code, 200)
page = Page.objects.all()[0]
self.assertTrue(page.revision_id != 0)
rev = page.revision_id
redo_url = admin_reverse("cms_page_redo", args=[page.pk])
response = self.client.post(redo_url)
self.assertEqual(response.status_code, 200)
page = Page.objects.all()[0]
self.assertTrue(page.revision_id != rev)
txt = Text.objects.all()[0]
edit_url = URL_CMS_PLUGIN_EDIT + str(txt.pk) + "/"
response = self.client.post(edit_url, {"body": "Hello World2"})
self.assertEqual(response.status_code, 200)
page = Page.objects.all()[0]
self.assertEqual(page.revision_id, 0)
self.assertEqual(2, CMSPlugin.objects.all().count())
placeholderpk = page.placeholders.filter(slot="body")[0].pk
plugin_data = {
'plugin_type': "TextPlugin",
'plugin_language': settings.LANGUAGES[0][0],
'placeholder_id': placeholderpk,
'plugin_parent': '',
}
response = self.client.post(URL_CMS_PLUGIN_ADD, plugin_data)
self.assertEqual(response.status_code, 200)
# now edit the plugin
edit_url = URL_CMS_PLUGIN_EDIT + response.content.decode('utf8').split("edit-plugin/")[1].split("/")[
0] + "/"
response = self.client.get(edit_url)
self.assertEqual(response.status_code, 200)
response = self.client.post(edit_url, {"body": "Hello World"})
self.assertEqual(response.status_code, 200)
self.assertEqual(3, CMSPlugin.objects.all().count())
self.client.post(undo_url)
self.client.post(undo_url)
self.assertEqual(2, CMSPlugin.objects.all().count())
self.assertEqual(Placeholder.objects.count(), 5)
def test_undo_slug_collision(self):
data1 = self.get_new_page_data()
data2 = self.get_new_page_data()
data1['slug'] = 'page1'
data2['slug'] = 'page2'
with self.login_user_context(self.get_superuser()):
response = self.client.post(URL_CMS_PAGE_ADD, data1)
self.assertEqual(response.status_code, 302)
response = self.client.post(URL_CMS_PAGE_ADD, data2)
self.assertEqual(response.status_code, 302)
page1 = Page.objects.get(title_set__slug='page1')
page2 = Page.objects.get(title_set__slug='page2')
data1['slug'] = 'page3'
response = self.client.post(URL_CMS_PAGE_CHANGE % page1.pk, data1)
self.assertEqual(response.status_code, 302)
data2['slug'] = 'page1'
response = self.client.post(URL_CMS_PAGE_CHANGE % page2.pk, data2)
self.assertEqual(response.status_code, 302)
undo_url = admin_reverse("cms_page_undo", args=[page1.pk])
response = self.client.post(undo_url)
self.assertEqual(response.status_code, 200)
self.assertEqual(Title.objects.get(page=page1).slug, 'page3')
response = self.client.get(admin_reverse("cms_page_changelist"))
self.assertEqual(response.status_code, 200)
response = self.client.get('/en/?%s' % get_cms_setting('CMS_TOOLBAR_URL__EDIT_ON'))
self.assertEqual(response.status_code, 200)
response = self.client.get('/en/page1/?%s' % get_cms_setting('CMS_TOOLBAR_URL__EDIT_ON'))
self.assertEqual(response.status_code, 200)
def test_recover(self):
"""
Test that you can recover a page
"""
with self.login_user_context(self.user):
self.assertEqual(Revision.objects.all().count(), 5)
ctype = ContentType.objects.get_for_model(Page)
revision = Revision.objects.all()[4]
version = Version.objects.filter(content_type=ctype, revision=revision)[0]
self.assertEqual(Page.objects.all().count(), 2)
self.assertEqual(CMSPlugin.objects.all().count(), 2)
self.assertEqual(Text.objects.all().count(), 2)
page = Page.objects.all()[0]
page_pk = page.pk
page.delete()
self.assertEqual(Page.objects.all().count(), 0)
self.assertEqual(CMSPlugin.objects.all().count(), 0)
self.assertEqual(Text.objects.all().count(), 0)
recover_url = URL_CMS_PAGE + "recover/"
response = self.client.get(recover_url)
self.assertEqual(response.status_code, 200)
recover_url += "%s/" % version.pk
response = self.client.get(recover_url)
self.assertEqual(response.status_code, 200)
response = self.client.post(recover_url, self.page_data)
self.assertRedirects(response, URL_CMS_PAGE_CHANGE % page_pk)
self.assertEqual(Page.objects.all().count(), 1)
self.assertEqual(CMSPlugin.objects.all().count(), 1)
# test that CMSPlugin subclasses are recovered
self.assertEqual(Text.objects.all().count(), 1)
def test_recover_path_collision(self):
with self.login_user_context(self.user):
self.assertEqual(Page.objects.count(), 2)
page_data2 = self.get_new_page_data()
response = self.client.post(URL_CMS_PAGE_ADD, page_data2)
self.assertRedirects(response, URL_CMS_PAGE)
page_data3 = self.get_new_page_data()
response = self.client.post(URL_CMS_PAGE_ADD, page_data3)
self.assertRedirects(response, URL_CMS_PAGE)
page2 = Page.objects.all()[2]
page3 = Page.objects.all()[3]
self.assertEqual(page3.path, '0004')
ctype = ContentType.objects.get_for_model(Page)
revision = Revision.objects.order_by('-pk')[1]
version = Version.objects.filter(content_type=ctype, revision=revision)[0]
page2_pk = page2.pk
page2.delete()
self.assertEqual(Page.objects.count(), 3)
page_data4 = self.get_new_page_data()
response = self.client.post(URL_CMS_PAGE_ADD, page_data4)
self.assertRedirects(response, URL_CMS_PAGE)
page4 = Page.objects.all()[3]
self.assertEqual(Page.objects.count(), 4)
self.assertEqual(page4.path, '0005')
recover_url = URL_CMS_PAGE + "recover/"
response = self.client.get(recover_url)
self.assertEqual(response.status_code, 200)
recover_url += "%s/" % version.pk
response = self.client.get(recover_url)
self.assertEqual(response.status_code, 200)
response = self.client.post(recover_url, page_data2)
self.assertRedirects(response, URL_CMS_PAGE_CHANGE % page2_pk)
self.assertEqual(Page.objects.all().count(), 5)
def test_publish_limits(self):
with self.login_user_context(self.user):
with self.settings(CMS_MAX_PAGE_PUBLISH_REVERSIONS=2, CMS_MAX_PAGE_HISTORY_REVERSIONS=2):
page = Page.objects.all()[0]
page_pk = page.pk
self.assertEqual(Revision.objects.all().count(), 5)
for x in range(10):
publish_url = URL_CMS_PAGE + "%s/en/publish/" % page_pk
response = self.client.get(publish_url)
self.assertEqual(response.status_code, 302)
self.assertEqual(Revision.objects.all().count(), 4)
class ReversionFileFieldTests(CMSTestCase):
def tearDown(self):
shutil.rmtree(join(settings.MEDIA_ROOT, 'fileapp'))
def test_file_persistence(self):
content = b'content1'
with reversion.create_revision():
# add a file instance
file1 = FileModel()
file1.test_file.save('file1.txt', SimpleUploadedFile('file1.txt', content), False)
file1.save()
# manually add a revision because we use the explicit way
# django-cms uses too.
adapter = reversion.get_adapter(FileModel)
if hasattr(reversion.models, 'VERSION_CHANGE'):
reversion.revision_context_manager.add_to_context(
reversion.default_revision_manager, file1,
adapter.get_version_data(file1, VERSION_CHANGE))
else:
reversion.revision_context_manager.add_to_context(
reversion.default_revision_manager, file1,
adapter.get_version_data(file1))
# reload the instance from db
file2 = FileModel.objects.all()[0]
# delete the instance.
file2.delete()
# revert the old version
file_version = reversion.get_for_object(file1)[0]
file_version.revert()
# reload the reverted instance and check for its content
file1 = FileModel.objects.all()[0]
self.assertEqual(file1.test_file.file.read(), content)
| 45.148485 | 128 | 0.620981 |
84146a5d732e19df6ed2cd4b7b4224614777ce24
| 10,559 |
py
|
Python
|
.venv/lib/python3.8/site-packages/pandas/io/json/_table_schema.py
|
acrucetta/Chicago_COVI_WebApp
|
a37c9f492a20dcd625f8647067394617988de913
|
[
"MIT",
"Unlicense"
] | 115 |
2020-06-18T15:00:58.000Z
|
2022-03-02T10:13:19.000Z
|
.venv/lib/python3.8/site-packages/pandas/io/json/_table_schema.py
|
acrucetta/Chicago_COVI_WebApp
|
a37c9f492a20dcd625f8647067394617988de913
|
[
"MIT",
"Unlicense"
] | 37 |
2020-10-20T08:30:53.000Z
|
2020-12-22T13:15:45.000Z
|
.venv/lib/python3.8/site-packages/pandas/io/json/_table_schema.py
|
acrucetta/Chicago_COVI_WebApp
|
a37c9f492a20dcd625f8647067394617988de913
|
[
"MIT",
"Unlicense"
] | 60 |
2020-07-22T14:53:10.000Z
|
2022-03-23T10:17:59.000Z
|
"""
Table Schema builders
https://specs.frictionlessdata.io/json-table-schema/
"""
from typing import TYPE_CHECKING, Any, Dict, Optional, cast
import warnings
import pandas._libs.json as json
from pandas._typing import DtypeObj, FrameOrSeries, JSONSerializable
from pandas.core.dtypes.common import (
is_bool_dtype,
is_categorical_dtype,
is_datetime64_dtype,
is_datetime64tz_dtype,
is_integer_dtype,
is_numeric_dtype,
is_period_dtype,
is_string_dtype,
is_timedelta64_dtype,
)
from pandas.core.dtypes.dtypes import CategoricalDtype
from pandas import DataFrame
import pandas.core.common as com
if TYPE_CHECKING:
from pandas.core.indexes.multi import MultiIndex # noqa: F401
loads = json.loads
def as_json_table_type(x: DtypeObj) -> str:
"""
Convert a NumPy / pandas type to its corresponding json_table.
Parameters
----------
x : np.dtype or ExtensionDtype
Returns
-------
str
the Table Schema data types
Notes
-----
This table shows the relationship between NumPy / pandas dtypes,
and Table Schema dtypes.
============== =================
Pandas type Table Schema type
============== =================
int64 integer
float64 number
bool boolean
datetime64[ns] datetime
timedelta64[ns] duration
object str
categorical any
=============== =================
"""
if is_integer_dtype(x):
return "integer"
elif is_bool_dtype(x):
return "boolean"
elif is_numeric_dtype(x):
return "number"
elif is_datetime64_dtype(x) or is_datetime64tz_dtype(x) or is_period_dtype(x):
return "datetime"
elif is_timedelta64_dtype(x):
return "duration"
elif is_categorical_dtype(x):
return "any"
elif is_string_dtype(x):
return "string"
else:
return "any"
def set_default_names(data):
"""Sets index names to 'index' for regular, or 'level_x' for Multi"""
if com.all_not_none(*data.index.names):
nms = data.index.names
if len(nms) == 1 and data.index.name == "index":
warnings.warn("Index name of 'index' is not round-trippable")
elif len(nms) > 1 and any(x.startswith("level_") for x in nms):
warnings.warn("Index names beginning with 'level_' are not round-trippable")
return data
data = data.copy()
if data.index.nlevels > 1:
names = [
name if name is not None else f"level_{i}"
for i, name in enumerate(data.index.names)
]
data.index.names = names
else:
data.index.name = data.index.name or "index"
return data
def convert_pandas_type_to_json_field(arr):
dtype = arr.dtype
if arr.name is None:
name = "values"
else:
name = arr.name
field: Dict[str, JSONSerializable] = {
"name": name,
"type": as_json_table_type(dtype),
}
if is_categorical_dtype(dtype):
cats = dtype.categories
ordered = dtype.ordered
field["constraints"] = {"enum": list(cats)}
field["ordered"] = ordered
elif is_period_dtype(dtype):
field["freq"] = dtype.freq.freqstr
elif is_datetime64tz_dtype(dtype):
field["tz"] = dtype.tz.zone
return field
def convert_json_field_to_pandas_type(field):
"""
Converts a JSON field descriptor into its corresponding NumPy / pandas type
Parameters
----------
field
A JSON field descriptor
Returns
-------
dtype
Raises
------
ValueError
If the type of the provided field is unknown or currently unsupported
Examples
--------
>>> convert_json_field_to_pandas_type({'name': 'an_int',
'type': 'integer'})
'int64'
>>> convert_json_field_to_pandas_type({'name': 'a_categorical',
'type': 'any',
'constraints': {'enum': [
'a', 'b', 'c']},
'ordered': True})
'CategoricalDtype(categories=['a', 'b', 'c'], ordered=True)'
>>> convert_json_field_to_pandas_type({'name': 'a_datetime',
'type': 'datetime'})
'datetime64[ns]'
>>> convert_json_field_to_pandas_type({'name': 'a_datetime_with_tz',
'type': 'datetime',
'tz': 'US/Central'})
'datetime64[ns, US/Central]'
"""
typ = field["type"]
if typ == "string":
return "object"
elif typ == "integer":
return "int64"
elif typ == "number":
return "float64"
elif typ == "boolean":
return "bool"
elif typ == "duration":
return "timedelta64"
elif typ == "datetime":
if field.get("tz"):
return f"datetime64[ns, {field['tz']}]"
else:
return "datetime64[ns]"
elif typ == "any":
if "constraints" in field and "ordered" in field:
return CategoricalDtype(
categories=field["constraints"]["enum"], ordered=field["ordered"]
)
else:
return "object"
raise ValueError(f"Unsupported or invalid field type: {typ}")
def build_table_schema(
data: FrameOrSeries,
index: bool = True,
primary_key: Optional[bool] = None,
version: bool = True,
) -> Dict[str, JSONSerializable]:
"""
Create a Table schema from ``data``.
Parameters
----------
data : Series, DataFrame
index : bool, default True
Whether to include ``data.index`` in the schema.
primary_key : bool or None, default True
Column names to designate as the primary key.
The default `None` will set `'primaryKey'` to the index
level or levels if the index is unique.
version : bool, default True
Whether to include a field `pandas_version` with the version
of pandas that generated the schema.
Returns
-------
schema : dict
Notes
-----
See `Table Schema
<https://pandas.pydata.org/docs/user_guide/io.html#table-schema>`__ for
conversion types.
Timedeltas as converted to ISO8601 duration format with
9 decimal places after the seconds field for nanosecond precision.
Categoricals are converted to the `any` dtype, and use the `enum` field
constraint to list the allowed values. The `ordered` attribute is included
in an `ordered` field.
Examples
--------
>>> df = pd.DataFrame(
... {'A': [1, 2, 3],
... 'B': ['a', 'b', 'c'],
... 'C': pd.date_range('2016-01-01', freq='d', periods=3),
... }, index=pd.Index(range(3), name='idx'))
>>> build_table_schema(df)
{'fields': [{'name': 'idx', 'type': 'integer'},
{'name': 'A', 'type': 'integer'},
{'name': 'B', 'type': 'string'},
{'name': 'C', 'type': 'datetime'}],
'pandas_version': '0.20.0',
'primaryKey': ['idx']}
"""
if index is True:
data = set_default_names(data)
schema: Dict[str, Any] = {}
fields = []
if index:
if data.index.nlevels > 1:
data.index = cast("MultiIndex", data.index)
for level, name in zip(data.index.levels, data.index.names):
new_field = convert_pandas_type_to_json_field(level)
new_field["name"] = name
fields.append(new_field)
else:
fields.append(convert_pandas_type_to_json_field(data.index))
if data.ndim > 1:
for column, s in data.items():
fields.append(convert_pandas_type_to_json_field(s))
else:
fields.append(convert_pandas_type_to_json_field(data))
schema["fields"] = fields
if index and data.index.is_unique and primary_key is None:
if data.index.nlevels == 1:
schema["primaryKey"] = [data.index.name]
else:
schema["primaryKey"] = data.index.names
elif primary_key is not None:
schema["primaryKey"] = primary_key
if version:
schema["pandas_version"] = "0.20.0"
return schema
def parse_table_schema(json, precise_float):
"""
Builds a DataFrame from a given schema
Parameters
----------
json :
A JSON table schema
precise_float : boolean
Flag controlling precision when decoding string to double values, as
dictated by ``read_json``
Returns
-------
df : DataFrame
Raises
------
NotImplementedError
If the JSON table schema contains either timezone or timedelta data
Notes
-----
Because :func:`DataFrame.to_json` uses the string 'index' to denote a
name-less :class:`Index`, this function sets the name of the returned
:class:`DataFrame` to ``None`` when said string is encountered with a
normal :class:`Index`. For a :class:`MultiIndex`, the same limitation
applies to any strings beginning with 'level_'. Therefore, an
:class:`Index` name of 'index' and :class:`MultiIndex` names starting
with 'level_' are not supported.
See Also
--------
build_table_schema : Inverse function.
pandas.read_json
"""
table = loads(json, precise_float=precise_float)
col_order = [field["name"] for field in table["schema"]["fields"]]
df = DataFrame(table["data"], columns=col_order)[col_order]
dtypes = {
field["name"]: convert_json_field_to_pandas_type(field)
for field in table["schema"]["fields"]
}
# Cannot directly use as_type with timezone data on object; raise for now
if any(str(x).startswith("datetime64[ns, ") for x in dtypes.values()):
raise NotImplementedError('table="orient" can not yet read timezone data')
# No ISO constructor for Timedelta as of yet, so need to raise
if "timedelta64" in dtypes.values():
raise NotImplementedError(
'table="orient" can not yet read ISO-formatted Timedelta data'
)
df = df.astype(dtypes)
if "primaryKey" in table["schema"]:
df = df.set_index(table["schema"]["primaryKey"])
if len(df.index.names) == 1:
if df.index.name == "index":
df.index.name = None
else:
df.index.names = [
None if x.startswith("level_") else x for x in df.index.names
]
return df
| 30.255014 | 88 | 0.585946 |
ab15c8eb7eec7c069f2ef75c715d6cca27f91d4b
| 683 |
py
|
Python
|
indexes.py
|
asysc2020/contentbox
|
5c155976e0ce7ea308d62293ab89624d97b21d09
|
[
"Apache-2.0"
] | 39 |
2015-06-10T23:18:07.000Z
|
2021-10-21T04:29:06.000Z
|
indexes.py
|
asysc2020/contentbox
|
5c155976e0ce7ea308d62293ab89624d97b21d09
|
[
"Apache-2.0"
] | 2 |
2016-08-22T12:38:10.000Z
|
2017-01-26T18:37:33.000Z
|
indexes.py
|
asysc2020/contentbox
|
5c155976e0ce7ea308d62293ab89624d97b21d09
|
[
"Apache-2.0"
] | 26 |
2015-06-10T22:09:15.000Z
|
2021-06-27T15:45:15.000Z
|
# Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from dbindexer import autodiscover
autodiscover()
import search
search.autodiscover()
| 35.947368 | 74 | 0.77306 |
ad9e15df2a7c9172677e753b3c0f486fc206b22d
| 1,298 |
py
|
Python
|
cached_data_used/Rosenbrock_restricted.py
|
fjwillemsen/BayesianOptimization-autotuning
|
9af48014079a98e05324cb9d67cb8660aaf26c28
|
[
"Apache-2.0"
] | 1 |
2022-02-25T22:11:48.000Z
|
2022-02-25T22:11:48.000Z
|
cached_data_used/Rosenbrock_restricted.py
|
fjwillemsen/BayesianOptimization-autotuning
|
9af48014079a98e05324cb9d67cb8660aaf26c28
|
[
"Apache-2.0"
] | null | null | null |
cached_data_used/Rosenbrock_restricted.py
|
fjwillemsen/BayesianOptimization-autotuning
|
9af48014079a98e05324cb9d67cb8660aaf26c28
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
import sys
import kernel_tuner
import numpy as np
from collections import OrderedDict
prog_name = "rosenbrock_constrained"
def tune(device_name, strategy="bayes_opt", strategy_options=None, verbose=False, quiet=False, simulation_mode=True):
#input dimensions and data
x = 100
y = 100
problem_size = None
args = []
metrics = OrderedDict()
#setup tunable parameters
tune_params = OrderedDict()
tune_params["x"] = np.linspace(-1.5, 1.5, num=100).tolist()
tune_params["y"] = np.linspace(-1.5, 1.5, num=100).tolist()
restrict = ["x**2 + y**2<=2"]
#start tuning
results, env = kernel_tuner.tune_kernel(prog_name + "_kernel", prog_name + ".cu", problem_size, args, tune_params, metrics=metrics, verbose=verbose,
quiet=quiet, restrictions=restrict, cache="cache_files/" + prog_name + "_" + device_name, strategy=strategy,
strategy_options=strategy_options, simulation_mode=simulation_mode)
# print(len(results))
return results, env
if __name__ == "__main__":
if len(sys.argv) != 2:
print("Usage: python {}.py [device name]".format(prog_name))
exit(1)
device_name = sys.argv[1]
tune(device_name)
| 29.5 | 152 | 0.640986 |
c62ad4b81958db9a3e241dbb1674f1bf959c93e8
| 88,909 |
py
|
Python
|
Bio/Seq.py
|
bugra-emanet/biopython-bugra-emanet
|
d9f5cfd011232532b1e23802ea20f1b0523fc933
|
[
"BSD-3-Clause"
] | null | null | null |
Bio/Seq.py
|
bugra-emanet/biopython-bugra-emanet
|
d9f5cfd011232532b1e23802ea20f1b0523fc933
|
[
"BSD-3-Clause"
] | 1 |
2019-02-24T18:24:56.000Z
|
2019-02-27T02:31:56.000Z
|
Bio/Seq.py
|
zruan/biopython
|
cc13861df74f498ed22e41817df5d44d2051becb
|
[
"BSD-3-Clause"
] | null | null | null |
# Copyright 2000 Andrew Dalke.
# Copyright 2000-2002 Brad Chapman.
# Copyright 2004-2005, 2010 by M de Hoon.
# Copyright 2007-2020 by Peter Cock.
# All rights reserved.
#
# This file is part of the Biopython distribution and governed by your
# choice of the "Biopython License Agreement" or the "BSD 3-Clause License".
# Please see the LICENSE file that should have been included as part of this
# package.
"""Provide objects to represent biological sequences.
See also the Seq_ wiki and the chapter in our tutorial:
- `HTML Tutorial`_
- `PDF Tutorial`_
.. _Seq: http://biopython.org/wiki/Seq
.. _`HTML Tutorial`: http://biopython.org/DIST/docs/tutorial/Tutorial.html
.. _`PDF Tutorial`: http://biopython.org/DIST/docs/tutorial/Tutorial.pdf
"""
import array
import sys
import warnings
from Bio import BiopythonWarning
from Bio.Data.IUPACData import ambiguous_dna_complement, ambiguous_rna_complement
from Bio.Data.IUPACData import ambiguous_dna_letters as _ambiguous_dna_letters
from Bio.Data.IUPACData import ambiguous_rna_letters as _ambiguous_rna_letters
from Bio.Data import CodonTable
def _maketrans(complement_mapping):
"""Make a python string translation table (PRIVATE).
Arguments:
- complement_mapping - a dictionary such as ambiguous_dna_complement
and ambiguous_rna_complement from Data.IUPACData.
Returns a translation table (a string of length 256) for use with the
python string's translate method to use in a (reverse) complement.
Compatible with lower case and upper case sequences.
For internal use only.
"""
before = "".join(complement_mapping.keys())
after = "".join(complement_mapping.values())
before += before.lower()
after += after.lower()
return str.maketrans(before, after)
_dna_complement_table = _maketrans(ambiguous_dna_complement)
_rna_complement_table = _maketrans(ambiguous_rna_complement)
_rna_complement_table[ord("T")] = _rna_complement_table[ord("U")]
_rna_complement_table[ord("t")] = _rna_complement_table[ord("u")]
class Seq:
"""Read-only sequence object (essentially a string with biological methods).
Like normal python strings, our basic sequence object is immutable.
This prevents you from doing my_seq[5] = "A" for example, but does allow
Seq objects to be used as dictionary keys.
The Seq object provides a number of string like methods (such as count,
find, split and strip).
The Seq object also provides some biological methods, such as complement,
reverse_complement, transcribe, back_transcribe and translate (which are
not applicable to protein sequences).
"""
def __init__(self, data):
"""Create a Seq object.
Arguments:
- data - Sequence, required (string)
You will typically use Bio.SeqIO to read in sequences from files as
SeqRecord objects, whose sequence will be exposed as a Seq object via
the seq property.
However, will often want to create your own Seq objects directly:
>>> from Bio.Seq import Seq
>>> my_seq = Seq("MKQHKAMIVALIVICITAVVAALVTRKDLCEVHIRTGQTEVAVF")
>>> my_seq
Seq('MKQHKAMIVALIVICITAVVAALVTRKDLCEVHIRTGQTEVAVF')
>>> print(my_seq)
MKQHKAMIVALIVICITAVVAALVTRKDLCEVHIRTGQTEVAVF
"""
# Enforce string storage
if not isinstance(data, str):
raise TypeError(
"The sequence data given to a Seq object should "
"be a string (not another Seq object etc)"
)
self._data = data
def __repr__(self):
"""Return (truncated) representation of the sequence for debugging."""
if len(self) > 60:
# Shows the last three letters as it is often useful to see if
# there is a stop codon at the end of a sequence.
# Note total length is 54+3+3=60
return f"{self.__class__.__name__}('{str(self[:54])}...{str(self[-3:])}')"
else:
return f"{self.__class__.__name__}({self._data!r})"
def __str__(self):
"""Return the full sequence as a python string, use str(my_seq).
Note that Biopython 1.44 and earlier would give a truncated
version of repr(my_seq) for str(my_seq). If you are writing code
which need to be backwards compatible with really old Biopython,
you should continue to use my_seq.tostring() as follows::
try:
# The old way, removed in Biopython 1.73
as_string = seq_obj.tostring()
except AttributeError:
# The new way, needs Biopython 1.45 or later.
# Don't use this on Biopython 1.44 or older as truncates
as_string = str(seq_obj)
"""
return self._data
def __hash__(self):
"""Hash of the sequence as a string for comparison.
See Seq object comparison documentation (method ``__eq__`` in
particular) as this has changed in Biopython 1.65. Older versions
would hash on object identity.
"""
return hash(str(self))
def __eq__(self, other):
"""Compare the sequence to another sequence or a string (README).
Historically comparing Seq objects has done Python object comparison.
After considerable discussion (keeping in mind constraints of the
Python language, hashes and dictionary support), Biopython now uses
simple string comparison (with a warning about the change).
If you still need to support releases prior to Biopython 1.65, please
just do explicit comparisons:
>>> from Bio.Seq import Seq
>>> seq1 = Seq("ACGT")
>>> seq2 = Seq("ACGT")
>>> id(seq1) == id(seq2)
False
>>> str(seq1) == str(seq2)
True
The new behaviour is to use string-like equality:
>>> from Bio.Seq import Seq
>>> seq1 == seq2
True
>>> seq1 == "ACGT"
True
"""
return str(self) == str(other)
def __lt__(self, other):
"""Implement the less-than operand."""
if isinstance(other, (str, Seq, MutableSeq)):
return str(self) < str(other)
raise TypeError(
f"'<' not supported between instances of '{type(self).__name__}'"
f" and '{type(other).__name__}'"
)
def __le__(self, other):
"""Implement the less-than or equal operand."""
if isinstance(other, (str, Seq, MutableSeq)):
return str(self) <= str(other)
raise TypeError(
f"'<=' not supported between instances of '{type(self).__name__}'"
f" and '{type(other).__name__}'"
)
def __gt__(self, other):
"""Implement the greater-than operand."""
if isinstance(other, (str, Seq, MutableSeq)):
return str(self) > str(other)
raise TypeError(
f"'>' not supported between instances of '{type(self).__name__}'"
f" and '{type(other).__name__}'"
)
def __ge__(self, other):
"""Implement the greater-than or equal operand."""
if isinstance(other, (str, Seq, MutableSeq)):
return str(self) >= str(other)
raise TypeError(
f"'>=' not supported between instances of '{type(self).__name__}'"
f" and '{type(other).__name__}'"
)
def __len__(self):
"""Return the length of the sequence, use len(my_seq)."""
return len(self._data) # Seq API requirement
def __getitem__(self, index): # Seq API requirement
"""Return a subsequence of single letter, use my_seq[index].
>>> my_seq = Seq('ACTCGACGTCG')
>>> my_seq[5]
'A'
"""
if isinstance(index, int):
# Return a single letter as a string
return self._data[index]
else:
# Return the (sub)sequence as another Seq object
return Seq(self._data[index])
def __add__(self, other):
"""Add another sequence or string to this sequence.
>>> from Bio.Seq import Seq
>>> Seq("MELKI") + "LV"
Seq('MELKILV')
"""
if isinstance(other, (str, Seq, MutableSeq)):
return self.__class__(str(self) + str(other))
from Bio.SeqRecord import SeqRecord # Lazy to avoid circular imports
if isinstance(other, SeqRecord):
# Get the SeqRecord's __radd__ to handle this
return NotImplemented
else:
raise TypeError
def __radd__(self, other):
"""Add a sequence on the left.
>>> from Bio.Seq import Seq
>>> "LV" + Seq("MELKI")
Seq('LVMELKI')
Adding two Seq (like) objects is handled via the __add__ method.
"""
if isinstance(other, (str, Seq, MutableSeq)):
return self.__class__(str(other) + str(self))
else:
raise TypeError
def __mul__(self, other):
"""Multiply Seq by integer.
>>> from Bio.Seq import Seq
>>> Seq('ATG') * 2
Seq('ATGATG')
"""
if not isinstance(other, int):
raise TypeError(f"can't multiply {self.__class__.__name__} by non-int type")
return self.__class__(str(self) * other)
def __rmul__(self, other):
"""Multiply integer by Seq.
>>> from Bio.Seq import Seq
>>> 2 * Seq('ATG')
Seq('ATGATG')
"""
if not isinstance(other, int):
raise TypeError(f"can't multiply {self.__class__.__name__} by non-int type")
return self.__class__(str(self) * other)
def __imul__(self, other):
"""Multiply Seq in-place.
>>> from Bio.Seq import Seq
>>> seq = Seq('ATG')
>>> seq *= 2
>>> seq
Seq('ATGATG')
"""
if not isinstance(other, int):
raise TypeError(f"can't multiply {self.__class__.__name__} by non-int type")
return self.__class__(str(self) * other)
def tomutable(self): # Needed? Or use a function?
"""Return the full sequence as a MutableSeq object.
>>> from Bio.Seq import Seq
>>> my_seq = Seq("MKQHKAMIVALIVICITAVVAAL")
>>> my_seq
Seq('MKQHKAMIVALIVICITAVVAAL')
>>> my_seq.tomutable()
MutableSeq('MKQHKAMIVALIVICITAVVAAL')
"""
return MutableSeq(str(self))
def count(self, sub, start=0, end=sys.maxsize):
"""Return a non-overlapping count, like that of a python string.
This behaves like the python string method of the same name,
which does a non-overlapping count!
For an overlapping search use the newer count_overlap() method.
Returns an integer, the number of occurrences of substring
argument sub in the (sub)sequence given by [start:end].
Optional arguments start and end are interpreted as in slice
notation.
Arguments:
- sub - a string or another Seq object to look for
- start - optional integer, slice start
- end - optional integer, slice end
e.g.
>>> from Bio.Seq import Seq
>>> my_seq = Seq("AAAATGA")
>>> print(my_seq.count("A"))
5
>>> print(my_seq.count("ATG"))
1
>>> print(my_seq.count(Seq("AT")))
1
>>> print(my_seq.count("AT", 2, -1))
1
HOWEVER, please note because python strings and Seq objects (and
MutableSeq objects) do a non-overlapping search, this may not give
the answer you expect:
>>> "AAAA".count("AA")
2
>>> print(Seq("AAAA").count("AA"))
2
An overlapping search, as implemented in .count_overlap(),
would give the answer as three!
"""
return str(self).count(str(sub), start, end)
def count_overlap(self, sub, start=0, end=sys.maxsize):
"""Return an overlapping count.
For a non-overlapping search use the count() method.
Returns an integer, the number of occurrences of substring
argument sub in the (sub)sequence given by [start:end].
Optional arguments start and end are interpreted as in slice
notation.
Arguments:
- sub - a string or another Seq object to look for
- start - optional integer, slice start
- end - optional integer, slice end
e.g.
>>> from Bio.Seq import Seq
>>> print(Seq("AAAA").count_overlap("AA"))
3
>>> print(Seq("ATATATATA").count_overlap("ATA"))
4
>>> print(Seq("ATATATATA").count_overlap("ATA", 3, -1))
1
Where substrings do not overlap, should behave the same as
the count() method:
>>> from Bio.Seq import Seq
>>> my_seq = Seq("AAAATGA")
>>> print(my_seq.count_overlap("A"))
5
>>> my_seq.count_overlap("A") == my_seq.count("A")
True
>>> print(my_seq.count_overlap("ATG"))
1
>>> my_seq.count_overlap("ATG") == my_seq.count("ATG")
True
>>> print(my_seq.count_overlap(Seq("AT")))
1
>>> my_seq.count_overlap(Seq("AT")) == my_seq.count(Seq("AT"))
True
>>> print(my_seq.count_overlap("AT", 2, -1))
1
>>> my_seq.count_overlap("AT", 2, -1) == my_seq.count("AT", 2, -1)
True
HOWEVER, do not use this method for such cases because the
count() method is much for efficient.
"""
sub_str = str(sub)
self_str = str(self)
overlap_count = 0
while True:
start = self_str.find(sub_str, start, end) + 1
if start != 0:
overlap_count += 1
else:
return overlap_count
def __contains__(self, char):
"""Implement the 'in' keyword, like a python string.
e.g.
>>> from Bio.Seq import Seq
>>> my_dna = Seq("ATATGAAATTTGAAAA")
>>> "AAA" in my_dna
True
>>> Seq("AAA") in my_dna
True
"""
return str(char) in str(self)
def find(self, sub, start=0, end=sys.maxsize):
"""Find method, like that of a python string.
This behaves like the python string method of the same name.
Returns an integer, the index of the first occurrence of substring
argument sub in the (sub)sequence given by [start:end].
Arguments:
- sub - a string or another Seq object to look for
- start - optional integer, slice start
- end - optional integer, slice end
Returns -1 if the subsequence is NOT found.
e.g. Locating the first typical start codon, AUG, in an RNA sequence:
>>> from Bio.Seq import Seq
>>> my_rna = Seq("GUCAUGGCCAUUGUAAUGGGCCGCUGAAAGGGUGCCCGAUAGUUG")
>>> my_rna.find("AUG")
3
"""
return str(self).find(str(sub), start, end)
def rfind(self, sub, start=0, end=sys.maxsize):
"""Find from right method, like that of a python string.
This behaves like the python string method of the same name.
Returns an integer, the index of the last (right most) occurrence of
substring argument sub in the (sub)sequence given by [start:end].
Arguments:
- sub - a string or another Seq object to look for
- start - optional integer, slice start
- end - optional integer, slice end
Returns -1 if the subsequence is NOT found.
e.g. Locating the last typical start codon, AUG, in an RNA sequence:
>>> from Bio.Seq import Seq
>>> my_rna = Seq("GUCAUGGCCAUUGUAAUGGGCCGCUGAAAGGGUGCCCGAUAGUUG")
>>> my_rna.rfind("AUG")
15
"""
return str(self).rfind(str(sub), start, end)
def index(self, sub, start=0, end=sys.maxsize):
"""Like find() but raise ValueError when the substring is not found.
>>> from Bio.Seq import Seq
>>> my_rna = Seq("GUCAUGGCCAUUGUAAUGGGCCGCUGAAAGGGUGCCCGAUAGUUG")
>>> my_rna.find("T")
-1
>>> my_rna.index("T")
Traceback (most recent call last):
...
ValueError: substring not found...
"""
return str(self).index(str(sub), start, end)
def rindex(self, sub, start=0, end=sys.maxsize):
"""Like rfind() but raise ValueError when the substring is not found."""
return str(self).rindex(str(sub), start, end)
def startswith(self, prefix, start=0, end=sys.maxsize):
"""Return True if the Seq starts with the given prefix, False otherwise.
This behaves like the python string method of the same name.
Return True if the sequence starts with the specified prefix
(a string or another Seq object), False otherwise.
With optional start, test sequence beginning at that position.
With optional end, stop comparing sequence at that position.
prefix can also be a tuple of strings to try. e.g.
>>> from Bio.Seq import Seq
>>> my_rna = Seq("GUCAUGGCCAUUGUAAUGGGCCGCUGAAAGGGUGCCCGAUAGUUG")
>>> my_rna.startswith("GUC")
True
>>> my_rna.startswith("AUG")
False
>>> my_rna.startswith("AUG", 3)
True
>>> my_rna.startswith(("UCC", "UCA", "UCG"), 1)
True
"""
if isinstance(prefix, tuple):
prefix_strs = tuple(str(p) for p in prefix)
return str(self).startswith(prefix_strs, start, end)
else:
return str(self).startswith(str(prefix), start, end)
def endswith(self, suffix, start=0, end=sys.maxsize):
"""Return True if the Seq ends with the given suffix, False otherwise.
This behaves like the python string method of the same name.
Return True if the sequence ends with the specified suffix
(a string or another Seq object), False otherwise.
With optional start, test sequence beginning at that position.
With optional end, stop comparing sequence at that position.
suffix can also be a tuple of strings to try. e.g.
>>> from Bio.Seq import Seq
>>> my_rna = Seq("GUCAUGGCCAUUGUAAUGGGCCGCUGAAAGGGUGCCCGAUAGUUG")
>>> my_rna.endswith("UUG")
True
>>> my_rna.endswith("AUG")
False
>>> my_rna.endswith("AUG", 0, 18)
True
>>> my_rna.endswith(("UCC", "UCA", "UUG"))
True
"""
if isinstance(suffix, tuple):
suffix_strs = tuple(str(p) for p in suffix)
return str(self).endswith(suffix_strs, start, end)
else:
return str(self).endswith(str(suffix), start, end)
def split(self, sep=None, maxsplit=-1):
"""Split method, like that of a python string.
This behaves like the python string method of the same name.
Return a list of the 'words' in the string (as Seq objects),
using sep as the delimiter string. If maxsplit is given, at
most maxsplit splits are done. If maxsplit is omitted, all
splits are made.
Following the python string method, sep will by default be any
white space (tabs, spaces, newlines) but this is unlikely to
apply to biological sequences.
e.g.
>>> from Bio.Seq import Seq
>>> my_rna = Seq("GUCAUGGCCAUUGUAAUGGGCCGCUGAAAGGGUGCCCGAUAGUUG")
>>> my_aa = my_rna.translate()
>>> my_aa
Seq('VMAIVMGR*KGAR*L')
>>> for pep in my_aa.split("*"):
... pep
Seq('VMAIVMGR')
Seq('KGAR')
Seq('L')
>>> for pep in my_aa.split("*", 1):
... pep
Seq('VMAIVMGR')
Seq('KGAR*L')
See also the rsplit method:
>>> for pep in my_aa.rsplit("*", 1):
... pep
Seq('VMAIVMGR*KGAR')
Seq('L')
"""
return [Seq(part) for part in str(self).split(str(sep), maxsplit)]
def rsplit(self, sep=None, maxsplit=-1):
"""Do a right split method, like that of a python string.
This behaves like the python string method of the same name.
Return a list of the 'words' in the string (as Seq objects),
using sep as the delimiter string. If maxsplit is given, at
most maxsplit splits are done COUNTING FROM THE RIGHT.
If maxsplit is omitted, all splits are made.
Following the python string method, sep will by default be any
white space (tabs, spaces, newlines) but this is unlikely to
apply to biological sequences.
e.g. print(my_seq.rsplit("*",1))
See also the split method.
"""
return [Seq(part) for part in str(self).rsplit(str(sep), maxsplit)]
def strip(self, chars=None):
"""Return a new Seq object with leading and trailing ends stripped.
This behaves like the python string method of the same name.
Optional argument chars defines which characters to remove. If
omitted or None (default) then as for the python string method,
this defaults to removing any white space.
e.g. print(my_seq.strip("-"))
See also the lstrip and rstrip methods.
"""
return Seq(str(self).strip(str(chars)))
def lstrip(self, chars=None):
"""Return a new Seq object with leading (left) end stripped.
This behaves like the python string method of the same name.
Optional argument chars defines which characters to remove. If
omitted or None (default) then as for the python string method,
this defaults to removing any white space.
e.g. print(my_seq.lstrip("-"))
See also the strip and rstrip methods.
"""
return Seq(str(self).lstrip(str(chars)))
def rstrip(self, chars=None):
"""Return a new Seq object with trailing (right) end stripped.
This behaves like the python string method of the same name.
Optional argument chars defines which characters to remove. If
omitted or None (default) then as for the python string method,
this defaults to removing any white space.
e.g. Removing a nucleotide sequence's polyadenylation (poly-A tail):
>>> from Bio.Seq import Seq
>>> my_seq = Seq("CGGTACGCTTATGTCACGTAGAAAAAA")
>>> my_seq
Seq('CGGTACGCTTATGTCACGTAGAAAAAA')
>>> my_seq.rstrip("A")
Seq('CGGTACGCTTATGTCACGTAG')
See also the strip and lstrip methods.
"""
return Seq(str(self).rstrip(str(chars)))
def upper(self):
"""Return an upper case copy of the sequence.
>>> from Bio.Seq import Seq
>>> my_seq = Seq("VHLTPeeK*")
>>> my_seq
Seq('VHLTPeeK*')
>>> my_seq.lower()
Seq('vhltpeek*')
>>> my_seq.upper()
Seq('VHLTPEEK*')
"""
return Seq(str(self).upper())
def lower(self):
"""Return a lower case copy of the sequence.
>>> from Bio.Seq import Seq
>>> my_seq = Seq("CGGTACGCTTATGTCACGTAGAAAAAA")
>>> my_seq
Seq('CGGTACGCTTATGTCACGTAGAAAAAA')
>>> my_seq.lower()
Seq('cggtacgcttatgtcacgtagaaaaaa')
See also the upper method.
"""
return Seq(str(self).lower())
def encode(self, encoding="utf-8", errors="strict"):
"""Return an encoded version of the sequence as a bytes object.
The Seq object aims to match the interface of a Python string.
This is essentially to save you doing str(my_seq).encode() when
you need a bytes string, for example for computing a hash:
>>> from Bio.Seq import Seq
>>> Seq("ACGT").encode("ascii")
b'ACGT'
"""
return str(self).encode(encoding, errors)
def complement(self):
"""Return the complement sequence by creating a new Seq object.
This method is intended for use with DNA sequences:
>>> from Bio.Seq import Seq
>>> my_dna = Seq("CCCCCGATAG")
>>> my_dna
Seq('CCCCCGATAG')
>>> my_dna.complement()
Seq('GGGGGCTATC')
You can of course used mixed case sequences,
>>> from Bio.Seq import Seq
>>> my_dna = Seq("CCCCCgatA-GD")
>>> my_dna
Seq('CCCCCgatA-GD')
>>> my_dna.complement()
Seq('GGGGGctaT-CH')
Note in the above example, ambiguous character D denotes
G, A or T so its complement is H (for C, T or A).
Note that if the sequence contains neither T nor U, we
assume it is DNA and map any A character to T:
>>> Seq("CGA").complement()
Seq('GCT')
>>> Seq("CGAT").complement()
Seq('GCTA')
If you actually have RNA, this currently works but we
may deprecate this later. We recommend using the new
complement_rna method instead:
>>> Seq("CGAU").complement()
Seq('GCUA')
>>> Seq("CGAU").complement_rna()
Seq('GCUA')
If the sequence contains both T and U, an exception is
raised:
>>> Seq("CGAUT").complement()
Traceback (most recent call last):
...
ValueError: Mixed RNA/DNA found
Trying to complement a protein sequence gives a meaningless
sequence:
>>> my_protein = Seq("MAIVMGR")
>>> my_protein.complement()
Seq('KTIBKCY')
Here "M" was interpreted as the IUPAC ambiguity code for
"A" or "C", with complement "K" for "T" or "G". Likewise
"A" has complement "T". The letter "I" has no defined
meaning under the IUPAC convention, and is unchanged.
"""
if ("U" in self._data or "u" in self._data) and (
"T" in self._data or "t" in self._data
):
# TODO - Handle this cleanly?
raise ValueError("Mixed RNA/DNA found")
elif "U" in self._data or "u" in self._data:
ttable = _rna_complement_table
else:
ttable = _dna_complement_table
# Much faster on really long sequences than the previous loop based
# one. Thanks to Michael Palmer, University of Waterloo.
return Seq(str(self).translate(ttable))
def reverse_complement(self):
"""Return the reverse complement sequence by creating a new Seq object.
This method is intended for use with DNA sequences:
>>> from Bio.Seq import Seq
>>> my_dna = Seq("CCCCCGATAGNR")
>>> my_dna
Seq('CCCCCGATAGNR')
>>> my_dna.reverse_complement()
Seq('YNCTATCGGGGG')
Note in the above example, since R = G or A, its complement
is Y (which denotes C or T).
You can of course used mixed case sequences,
>>> from Bio.Seq import Seq
>>> my_dna = Seq("CCCCCgatA-G")
>>> my_dna
Seq('CCCCCgatA-G')
>>> my_dna.reverse_complement()
Seq('C-TatcGGGGG')
As discussed for the complement method, if the sequence
contains neither T nor U, is is assumed to be DNA and
will map any letter A to T.
If you are dealing with RNA you should use the new
reverse_complement_rna method instead
>>> Seq("CGA").reverse_complement() # defaults to DNA
Seq('TCG')
>>> Seq("CGA").reverse_complement_rna()
Seq('UCG')
If the sequence contains both T and U, an exception is raised:
>>> Seq("CGAUT").reverse_complement()
Traceback (most recent call last):
...
ValueError: Mixed RNA/DNA found
Trying to reverse complement a protein sequence will give
a meaningless sequence:
>>> from Bio.Seq import Seq
>>> my_protein = Seq("MAIVMGR")
>>> my_protein.reverse_complement()
Seq('YCKBITK')
Here "M" was interpretted as the IUPAC ambiguity code for
"A" or "C", with complement "K" for "T" or "G" - and so on.
"""
# Use -1 stride/step to reverse the complement
return self.complement()[::-1]
def complement_rna(self):
"""Complement of an RNA sequence.
>>> Seq("CGA").complement() # defaults to DNA
Seq('GCT')
>>> Seq("CGA").complement_rna()
Seq('GCU')
Any T in the sequence is treated as a U:
>>> Seq("CGAUT").complement_rna()
Seq('GCUAA')
"""
return Seq(str(self).translate(_rna_complement_table))
def reverse_complement_rna(self):
"""Reverse complement of an RNA sequence.
>>> from Bio.Seq import Seq
>>> Seq("ACG").reverse_complement_rna()
Seq('CGU')
"""
# Use -1 stride/step to reverse the complement
return self.complement_rna()[::-1]
def transcribe(self):
"""Return the RNA sequence from a DNA sequence by creating a new Seq object.
>>> from Bio.Seq import Seq
>>> coding_dna = Seq("ATGGCCATTGTAATGGGCCGCTGAAAGGGTGCCCGATAG")
>>> coding_dna
Seq('ATGGCCATTGTAATGGGCCGCTGAAAGGGTGCCCGATAG')
>>> coding_dna.transcribe()
Seq('AUGGCCAUUGUAAUGGGCCGCUGAAAGGGUGCCCGAUAG')
Trying to transcribe an RNA sequence should have no effect.
If you have a nucleotide sequence which might be DNA or RNA
(or even a mixture), calling the transcribe method will ensure
any T becomes U.
Trying to transcribe a protein sequence will replace any
T for Threonine with U for Selenocysteine, which has no
biologically plausible rational. Older versions of Biopython
would throw an exception.
>>> from Bio.Seq import Seq
>>> my_protein = Seq("MAIVMGRT")
>>> my_protein.transcribe()
Seq('MAIVMGRU')
"""
return Seq(str(self).replace("T", "U").replace("t", "u"))
def back_transcribe(self):
"""Return the DNA sequence from an RNA sequence by creating a new Seq object.
>>> from Bio.Seq import Seq
>>> messenger_rna = Seq("AUGGCCAUUGUAAUGGGCCGCUGAAAGGGUGCCCGAUAG")
>>> messenger_rna
Seq('AUGGCCAUUGUAAUGGGCCGCUGAAAGGGUGCCCGAUAG')
>>> messenger_rna.back_transcribe()
Seq('ATGGCCATTGTAATGGGCCGCTGAAAGGGTGCCCGATAG')
Trying to back-transcribe DNA has no effect, If you have a nucleotide
sequence which might be DNA or RNA (or even a mixture), calling the
back-transcribe method will ensure any T becomes U.
Trying to back-transcribe a protein sequence will replace any U for
Selenocysteine with T for Threonine, which is biologically meaningless.
Older versions of Biopython would raise an exception here:
>>> from Bio.Seq import Seq
>>> my_protein = Seq("MAIVMGRU")
>>> my_protein.back_transcribe()
Seq('MAIVMGRT')
"""
return Seq(str(self).replace("U", "T").replace("u", "t"))
def translate(
self, table="Standard", stop_symbol="*", to_stop=False, cds=False, gap="-"
):
"""Turn a nucleotide sequence into a protein sequence by creating a new Seq object.
This method will translate DNA or RNA sequences. It should not
be used on protein sequences as any result will be biologically
meaningless.
Arguments:
- table - Which codon table to use? This can be either a name
(string), an NCBI identifier (integer), or a CodonTable
object (useful for non-standard genetic codes). This
defaults to the "Standard" table.
- stop_symbol - Single character string, what to use for
terminators. This defaults to the asterisk, "*".
- to_stop - Boolean, defaults to False meaning do a full
translation continuing on past any stop codons (translated as the
specified stop_symbol). If True, translation is terminated at
the first in frame stop codon (and the stop_symbol is not
appended to the returned protein sequence).
- cds - Boolean, indicates this is a complete CDS. If True,
this checks the sequence starts with a valid alternative start
codon (which will be translated as methionine, M), that the
sequence length is a multiple of three, and that there is a
single in frame stop codon at the end (this will be excluded
from the protein sequence, regardless of the to_stop option).
If these tests fail, an exception is raised.
- gap - Single character string to denote symbol used for gaps.
Defaults to the minus sign.
e.g. Using the standard table:
>>> coding_dna = Seq("GTGGCCATTGTAATGGGCCGCTGAAAGGGTGCCCGATAG")
>>> coding_dna.translate()
Seq('VAIVMGR*KGAR*')
>>> coding_dna.translate(stop_symbol="@")
Seq('VAIVMGR@KGAR@')
>>> coding_dna.translate(to_stop=True)
Seq('VAIVMGR')
Now using NCBI table 2, where TGA is not a stop codon:
>>> coding_dna.translate(table=2)
Seq('VAIVMGRWKGAR*')
>>> coding_dna.translate(table=2, to_stop=True)
Seq('VAIVMGRWKGAR')
In fact, GTG is an alternative start codon under NCBI table 2, meaning
this sequence could be a complete CDS:
>>> coding_dna.translate(table=2, cds=True)
Seq('MAIVMGRWKGAR')
It isn't a valid CDS under NCBI table 1, due to both the start codon
and also the in frame stop codons:
>>> coding_dna.translate(table=1, cds=True)
Traceback (most recent call last):
...
Bio.Data.CodonTable.TranslationError: First codon 'GTG' is not a start codon
If the sequence has no in-frame stop codon, then the to_stop argument
has no effect:
>>> coding_dna2 = Seq("TTGGCCATTGTAATGGGCCGC")
>>> coding_dna2.translate()
Seq('LAIVMGR')
>>> coding_dna2.translate(to_stop=True)
Seq('LAIVMGR')
NOTE - Ambiguous codons like "TAN" or "NNN" could be an amino acid
or a stop codon. These are translated as "X". Any invalid codon
(e.g. "TA?" or "T-A") will throw a TranslationError.
NOTE - This does NOT behave like the python string's translate
method. For that use str(my_seq).translate(...) instead
"""
if isinstance(table, str) and len(table) == 256:
raise ValueError(
"The Seq object translate method DOES NOT take "
"a 256 character string mapping table like "
"the python string object's translate method. "
"Use str(my_seq).translate(...) instead."
)
try:
table_id = int(table)
except ValueError:
# Assume its a table name
# The same table can be used for RNA or DNA
codon_table = CodonTable.ambiguous_generic_by_name[table]
except (AttributeError, TypeError):
# Assume its a CodonTable object
if isinstance(table, CodonTable.CodonTable):
codon_table = table
else:
raise ValueError("Bad table argument") from None
else:
# Assume its a table ID
# The same table can be used for RNA or DNA
codon_table = CodonTable.ambiguous_generic_by_id[table_id]
return Seq(
_translate_str(str(self), codon_table, stop_symbol, to_stop, cds, gap=gap)
)
def ungap(self, gap="-"):
"""Return a copy of the sequence without the gap character(s).
The gap character now defaults to the minus sign, and can only
be specified via the method argument. This is no longer possible
via the sequence's alphabet (as was possible up to Biopython 1.77):
>>> from Bio.Seq import Seq
>>> my_dna = Seq("-ATA--TGAAAT-TTGAAAA")
>>> my_dna
Seq('-ATA--TGAAAT-TTGAAAA')
>>> my_dna.ungap("-")
Seq('ATATGAAATTTGAAAA')
"""
if not gap:
raise ValueError("Gap character required.")
elif len(gap) != 1 or not isinstance(gap, str):
raise ValueError(f"Unexpected gap character, {gap!r}")
return Seq(str(self).replace(gap, ""))
def join(self, other):
"""Return a merge of the sequences in other, spaced by the sequence from self.
Accepts either a Seq or string (and iterates over the letters), or an
iterable containing Seq or string objects. These arguments will be
concatenated with the calling sequence as the spacer:
>>> concatenated = Seq('NNNNN').join([Seq("AAA"), Seq("TTT"), Seq("PPP")])
>>> concatenated
Seq('AAANNNNNTTTNNNNNPPP')
Joining the letters of a single sequence:
>>> Seq('NNNNN').join(Seq("ACGT"))
Seq('ANNNNNCNNNNNGNNNNNT')
>>> Seq('NNNNN').join("ACGT")
Seq('ANNNNNCNNNNNGNNNNNT')
"""
if isinstance(other, (str, Seq, MutableSeq)):
return self.__class__(str(self).join(str(other)))
from Bio.SeqRecord import SeqRecord # Lazy to avoid circular imports
if isinstance(other, SeqRecord):
raise TypeError("Iterable cannot be a SeqRecord")
for c in other:
if isinstance(c, SeqRecord):
raise TypeError("Iterable cannot contain SeqRecords")
elif not isinstance(c, (str, Seq, MutableSeq)):
raise TypeError("Input must be an iterable of Seqs or Strings")
return self.__class__(str(self).join([str(_) for _ in other]))
class UnknownSeq(Seq):
"""Read-only sequence object of known length but unknown contents.
If you have an unknown sequence, you can represent this with a normal
Seq object, for example:
>>> my_seq = Seq("N"*5)
>>> my_seq
Seq('NNNNN')
>>> len(my_seq)
5
>>> print(my_seq)
NNNNN
However, this is rather wasteful of memory (especially for large
sequences), which is where this class is most useful:
>>> unk_five = UnknownSeq(5)
>>> unk_five
UnknownSeq(5, character='?')
>>> len(unk_five)
5
>>> print(unk_five)
?????
You can add unknown sequence together. Provided the characters are the
same, you get another memory saving UnknownSeq:
>>> unk_four = UnknownSeq(4)
>>> unk_four
UnknownSeq(4, character='?')
>>> unk_four + unk_five
UnknownSeq(9, character='?')
If the characters are different, addition gives an ordinary Seq object:
>>> unk_nnnn = UnknownSeq(4, character="N")
>>> unk_nnnn
UnknownSeq(4, character='N')
>>> unk_nnnn + unk_four
Seq('NNNN????')
Combining with a real Seq gives a new Seq object:
>>> known_seq = Seq("ACGT")
>>> unk_four + known_seq
Seq('????ACGT')
>>> known_seq + unk_four
Seq('ACGT????')
"""
def __init__(self, length, alphabet=None, character="?"):
"""Create a new UnknownSeq object.
Arguments:
- length - Integer, required.
- alphabet - no longer used, must be None.
- character - single letter string, default "?". Typically "N"
for nucleotides, "X" for proteins, and "?" otherwise.
"""
if alphabet is not None:
raise ValueError("The alphabet argument is no longer supported")
self._length = int(length)
if self._length < 0:
# TODO - Block zero length UnknownSeq? You can just use a Seq!
raise ValueError("Length must not be negative.")
if not character or len(character) != 1:
raise ValueError("character argument should be a single letter string.")
self._character = character
def __len__(self):
"""Return the stated length of the unknown sequence."""
return self._length
def __str__(self):
"""Return the unknown sequence as full string of the given length."""
return self._character * self._length
def __repr__(self):
"""Return (truncated) representation of the sequence for debugging."""
return f"UnknownSeq({self._length}, character={self._character!r})"
def __add__(self, other):
"""Add another sequence or string to this sequence.
Adding two UnknownSeq objects returns another UnknownSeq object
provided the character is the same.
>>> from Bio.Seq import UnknownSeq
>>> UnknownSeq(10, character='X') + UnknownSeq(5, character='X')
UnknownSeq(15, character='X')
If the characters differ, an UnknownSeq object cannot be used, so a
Seq object is returned:
>>> from Bio.Seq import UnknownSeq
>>> UnknownSeq(10, character='X') + UnknownSeq(5, character="x")
Seq('XXXXXXXXXXxxxxx')
If adding a string to an UnknownSeq, a new Seq is returned:
>>> from Bio.Seq import UnknownSeq
>>> UnknownSeq(5, character='X') + "LV"
Seq('XXXXXLV')
"""
if isinstance(other, UnknownSeq) and other._character == self._character:
return UnknownSeq(len(self) + len(other), character=self._character)
# Offload to the base class...
return Seq(str(self)) + other
def __radd__(self, other):
"""Add a sequence on the left."""
# If other is an UnknownSeq, then __add__ would be called.
# Offload to the base class...
return other + Seq(str(self))
def __mul__(self, other):
"""Multiply UnknownSeq by integer.
>>> from Bio.Seq import UnknownSeq
>>> UnknownSeq(3) * 2
UnknownSeq(6, character='?')
>>> UnknownSeq(3, character="N") * 2
UnknownSeq(6, character='N')
"""
if not isinstance(other, int):
raise TypeError(f"can't multiply {self.__class__.__name__} by non-int type")
return self.__class__(len(self) * other, character=self._character)
def __rmul__(self, other):
"""Multiply integer by UnknownSeq.
>>> from Bio.Seq import UnknownSeq
>>> 2 * UnknownSeq(3)
UnknownSeq(6, character='?')
>>> 2 * UnknownSeq(3, character="N")
UnknownSeq(6, character='N')
"""
if not isinstance(other, int):
raise TypeError(f"can't multiply {self.__class__.__name__} by non-int type")
return self.__class__(len(self) * other, character=self._character)
def __imul__(self, other):
"""Multiply UnknownSeq in-place.
>>> from Bio.Seq import UnknownSeq
>>> seq = UnknownSeq(3, character="N")
>>> seq *= 2
>>> seq
UnknownSeq(6, character='N')
"""
if not isinstance(other, int):
raise TypeError(f"can't multiply {self.__class__.__name__} by non-int type")
return self.__class__(len(self) * other, character=self._character)
def __getitem__(self, index):
"""Get a subsequence from the UnknownSeq object.
>>> unk = UnknownSeq(8, character="N")
>>> print(unk[:])
NNNNNNNN
>>> print(unk[5:3])
<BLANKLINE>
>>> print(unk[1:-1])
NNNNNN
>>> print(unk[1:-1:2])
NNN
"""
if isinstance(index, int):
# TODO - Check the bounds without wasting memory
return str(self)[index]
old_length = self._length
step = index.step
if step is None or step == 1:
# This calculates the length you'd get from ("N"*old_length)[index]
start = index.start
end = index.stop
if start is None:
start = 0
elif start < 0:
start = max(0, old_length + start)
elif start > old_length:
start = old_length
if end is None:
end = old_length
elif end < 0:
end = max(0, old_length + end)
elif end > old_length:
end = old_length
new_length = max(0, end - start)
elif step == 0:
raise ValueError("slice step cannot be zero")
else:
# TODO - handle step efficiently
new_length = len(("X" * old_length)[index])
# assert new_length == len(("X"*old_length)[index]), \
# (index, start, end, step, old_length,
# new_length, len(("X"*old_length)[index]))
return UnknownSeq(new_length, character=self._character)
def count(self, sub, start=0, end=sys.maxsize):
"""Return a non-overlapping count, like that of a python string.
This behaves like the python string (and Seq object) method of the
same name, which does a non-overlapping count!
For an overlapping search use the newer count_overlap() method.
Returns an integer, the number of occurrences of substring
argument sub in the (sub)sequence given by [start:end].
Optional arguments start and end are interpreted as in slice
notation.
Arguments:
- sub - a string or another Seq object to look for
- start - optional integer, slice start
- end - optional integer, slice end
>>> "NNNN".count("N")
4
>>> Seq("NNNN").count("N")
4
>>> UnknownSeq(4, character="N").count("N")
4
>>> UnknownSeq(4, character="N").count("A")
0
>>> UnknownSeq(4, character="N").count("AA")
0
HOWEVER, please note because that python strings and Seq objects (and
MutableSeq objects) do a non-overlapping search, this may not give
the answer you expect:
>>> UnknownSeq(4, character="N").count("NN")
2
>>> UnknownSeq(4, character="N").count("NNN")
1
"""
sub_str = str(sub)
len_self, len_sub_str = self._length, len(sub_str)
# Handling case where substring not in self
if set(sub_str) != set(self._character):
return 0
# Setting None to the default arguments
if start is None:
start = 0
if end is None:
end = sys.maxsize
# Truncating start and end to max of self._length and min of -self._length
start = max(min(start, len_self), -len_self)
end = max(min(end, len_self), -len_self)
# Convert start and ends to positive indexes
if start < 0:
start += len_self
if end < 0:
end += len_self
# Handle case where end <= start (no negative step argument here)
# and case where len_sub_str is larger than the search space
if end <= start or (end - start) < len_sub_str:
return 0
# 'Normal' calculation
return (end - start) // len_sub_str
def count_overlap(self, sub, start=0, end=sys.maxsize):
"""Return an overlapping count.
For a non-overlapping search use the count() method.
Returns an integer, the number of occurrences of substring
argument sub in the (sub)sequence given by [start:end].
Optional arguments start and end are interpreted as in slice
notation.
Arguments:
- sub - a string or another Seq object to look for
- start - optional integer, slice start
- end - optional integer, slice end
e.g.
>>> from Bio.Seq import UnknownSeq
>>> UnknownSeq(4, character="N").count_overlap("NN")
3
>>> UnknownSeq(4, character="N").count_overlap("NNN")
2
Where substrings do not overlap, should behave the same as
the count() method:
>>> UnknownSeq(4, character="N").count_overlap("N")
4
>>> UnknownSeq(4, character="N").count_overlap("N") == UnknownSeq(4, character="N").count("N")
True
>>> UnknownSeq(4, character="N").count_overlap("A")
0
>>> UnknownSeq(4, character="N").count_overlap("A") == UnknownSeq(4, character="N").count("A")
True
>>> UnknownSeq(4, character="N").count_overlap("AA")
0
>>> UnknownSeq(4, character="N").count_overlap("AA") == UnknownSeq(4, character="N").count("AA")
True
"""
sub_str = str(sub)
len_self, len_sub_str = self._length, len(sub_str)
# Handling case where substring not in self
if set(sub_str) != set(self._character):
return 0
# Setting None to the default arguments
if start is None:
start = 0
if end is None:
end = sys.maxsize
# Truncating start and end to max of self._length and min of -self._length
start = max(min(start, len_self), -len_self)
end = max(min(end, len_self), -len_self)
# Convert start and ends to positive indexes
if start < 0:
start += len_self
if end < 0:
end += len_self
# Handle case where end <= start (no negative step argument here)
# and case where len_sub_str is larger than the search space
if end <= start or (end - start) < len_sub_str:
return 0
# 'Normal' calculation
return end - start - len_sub_str + 1
def complement(self):
"""Return the complement of an unknown nucleotide equals itself.
>>> my_nuc = UnknownSeq(8)
>>> my_nuc
UnknownSeq(8, character='?')
>>> print(my_nuc)
????????
>>> my_nuc.complement()
UnknownSeq(8, character='?')
>>> print(my_nuc.complement())
????????
"""
return self
def complement_rna(self):
"""Return the complement assuming it is RNA."""
return self.complement()
def reverse_complement(self):
"""Return the reverse complement of an unknown sequence.
The reverse complement of an unknown nucleotide equals itself:
>>> from Bio.Seq import UnknownSeq
>>> example = UnknownSeq(6, character="N")
>>> print(example)
NNNNNN
>>> print(example.reverse_complement())
NNNNNN
"""
return self
def reverse_complement_rna(self):
"""Return the reverse complement assuming it is RNA."""
return self.reverse_complement()
def transcribe(self):
"""Return an unknown RNA sequence from an unknown DNA sequence.
>>> my_dna = UnknownSeq(10, character="N")
>>> my_dna
UnknownSeq(10, character='N')
>>> print(my_dna)
NNNNNNNNNN
>>> my_rna = my_dna.transcribe()
>>> my_rna
UnknownSeq(10, character='N')
>>> print(my_rna)
NNNNNNNNNN
"""
s = Seq(self._character).transcribe()
return UnknownSeq(self._length, character=str(s))
def back_transcribe(self):
"""Return an unknown DNA sequence from an unknown RNA sequence.
>>> my_rna = UnknownSeq(20, character="N")
>>> my_rna
UnknownSeq(20, character='N')
>>> print(my_rna)
NNNNNNNNNNNNNNNNNNNN
>>> my_dna = my_rna.back_transcribe()
>>> my_dna
UnknownSeq(20, character='N')
>>> print(my_dna)
NNNNNNNNNNNNNNNNNNNN
"""
s = Seq(self._character).back_transcribe()
return UnknownSeq(self._length, character=str(s))
def upper(self):
"""Return an upper case copy of the sequence.
>>> from Bio.Seq import UnknownSeq
>>> my_seq = UnknownSeq(20, character="n")
>>> my_seq
UnknownSeq(20, character='n')
>>> print(my_seq)
nnnnnnnnnnnnnnnnnnnn
>>> my_seq.upper()
UnknownSeq(20, character='N')
>>> print(my_seq.upper())
NNNNNNNNNNNNNNNNNNNN
See also the lower method.
"""
return UnknownSeq(self._length, character=self._character.upper())
def lower(self):
"""Return a lower case copy of the sequence.
>>> from Bio.Seq import UnknownSeq
>>> my_seq = UnknownSeq(20, character="X")
>>> my_seq
UnknownSeq(20, character='X')
>>> print(my_seq)
XXXXXXXXXXXXXXXXXXXX
>>> my_seq.lower()
UnknownSeq(20, character='x')
>>> print(my_seq.lower())
xxxxxxxxxxxxxxxxxxxx
See also the upper method.
"""
return UnknownSeq(self._length, character=self._character.lower())
def translate(
self, table="Standard", stop_symbol="*", to_stop=False, cds=False, gap="-"
):
"""Translate an unknown nucleotide sequence into an unknown protein.
e.g.
>>> my_seq = UnknownSeq(9, character="N")
>>> print(my_seq)
NNNNNNNNN
>>> my_protein = my_seq.translate()
>>> my_protein
UnknownSeq(3, character='X')
>>> print(my_protein)
XXX
In comparison, using a normal Seq object:
>>> my_seq = Seq("NNNNNNNNN")
>>> print(my_seq)
NNNNNNNNN
>>> my_protein = my_seq.translate()
>>> my_protein
Seq('XXX')
>>> print(my_protein)
XXX
"""
return UnknownSeq(self._length // 3, character="X")
def ungap(self, gap="-"):
"""Return a copy of the sequence without the gap character(s).
The gap character now defaults to the minus sign, and can only
be specified via the method argument. This is no longer possible
via the sequence's alphabet (as was possible up to Biopython 1.77):
>>> from Bio.Seq import UnknownSeq
>>> my_dna = UnknownSeq(20, character='N')
>>> my_dna
UnknownSeq(20, character='N')
>>> my_dna.ungap() # using default
UnknownSeq(20, character='N')
>>> my_dna.ungap("-")
UnknownSeq(20, character='N')
If the UnknownSeq is using the gap character, then an empty Seq is
returned:
>>> my_gap = UnknownSeq(20, character="-")
>>> my_gap
UnknownSeq(20, character='-')
>>> my_gap.ungap() # using default
Seq('')
>>> my_gap.ungap("-")
Seq('')
"""
# Offload the argument validation
s = Seq(self._character).ungap(gap)
if s:
return UnknownSeq(self._length, character=self._character)
else:
return Seq("")
def join(self, other):
"""Return a merge of the sequences in other, spaced by the sequence from self.
Accepts either a Seq or string (and iterates over the letters), or an
iterable containing Seq or string objects. These arguments will be
concatenated with the calling sequence as the spacer:
>>> concatenated = UnknownSeq(5).join([Seq("AAA"), Seq("TTT"), Seq("PPP")])
>>> concatenated
Seq('AAA?????TTT?????PPP')
If all the inputs are also UnknownSeq using the same character, then it
returns a new UnknownSeq:
>>> UnknownSeq(5).join([UnknownSeq(3), UnknownSeq(3), UnknownSeq(3)])
UnknownSeq(19, character='?')
Examples taking a single sequence and joining the letters:
>>> UnknownSeq(3).join("ACGT")
Seq('A???C???G???T')
>>> UnknownSeq(3).join(UnknownSeq(4))
UnknownSeq(13, character='?')
Will only return an UnknownSeq object if all of the objects to be joined are
also UnknownSeqs with the same character as the spacer, similar to how the
addition of an UnknownSeq and another UnknownSeq would work.
"""
from Bio.SeqRecord import SeqRecord # Lazy to avoid circular imports
if isinstance(other, (str, Seq, MutableSeq)):
if isinstance(other, UnknownSeq) and self._character == other._character:
# Special case, can return an UnknownSeq
return self.__class__(
len(other) + len(self) * (len(other) - 1), character=self._character
)
return Seq(str(self).join(str(other)))
if isinstance(other, SeqRecord):
raise TypeError("Iterable cannot be a SeqRecord")
for c in other:
if isinstance(c, SeqRecord):
raise TypeError("Iterable cannot contain SeqRecords")
elif not isinstance(c, (str, Seq, MutableSeq)):
raise TypeError("Input must be an iterable of Seqs or Strings")
temp_data = str(self).join([str(_) for _ in other])
if temp_data.count(self._character) == len(temp_data):
# Can return an UnknownSeq
return self.__class__(len(temp_data), character=self._character)
return Seq(temp_data)
class MutableSeq:
"""An editable sequence object.
Unlike normal python strings and our basic sequence object (the Seq class)
which are immutable, the MutableSeq lets you edit the sequence in place.
However, this means you cannot use a MutableSeq object as a dictionary key.
>>> from Bio.Seq import MutableSeq
>>> my_seq = MutableSeq("ACTCGTCGTCG")
>>> my_seq
MutableSeq('ACTCGTCGTCG')
>>> my_seq[5]
'T'
>>> my_seq[5] = "A"
>>> my_seq
MutableSeq('ACTCGACGTCG')
>>> my_seq[5]
'A'
>>> my_seq[5:8] = "NNN"
>>> my_seq
MutableSeq('ACTCGNNNTCG')
>>> len(my_seq)
11
Note that the MutableSeq object does not support as many string-like
or biological methods as the Seq object.
"""
def __init__(self, data):
"""Initialize the class."""
if isinstance(data, str): # TODO - What about unicode?
self.data = array.array("u", data)
elif isinstance(data, (Seq, int, float)):
raise TypeError(
"The sequence data given to a MutableSeq object "
"should be a string or an array (not a Seq object etc)"
)
else:
self.data = data # assumes the input is an array
def __repr__(self):
"""Return (truncated) representation of the sequence for debugging."""
if len(self) > 60:
# Shows the last three letters as it is often useful to see if
# there is a stop codon at the end of a sequence.
# Note total length is 54+3+3=60
return f"{self.__class__.__name__}('{str(self[:54])}...{str(self[-3:])}')"
else:
return f"{self.__class__.__name__}('{str(self)}')"
def __str__(self):
"""Return the full sequence as a python string.
Note that Biopython 1.44 and earlier would give a truncated
version of repr(my_seq) for str(my_seq). If you are writing code
which needs to be backwards compatible with old Biopython, you
should continue to use my_seq.tostring() rather than str(my_seq).
"""
# See test_GAQueens.py for an historic usage of a non-string alphabet!
return "".join(self.data)
def __eq__(self, other):
"""Compare the sequence to another sequence or a string.
Historically comparing DNA to RNA, or Nucleotide to Protein would
raise an exception. This was later downgraded to a warning, but since
Biopython 1.78 the alphabet is ignored for comparisons.
If you need to support older Biopython versions, please just do
explicit comparisons:
>>> seq1 = MutableSeq("ACGT")
>>> seq2 = MutableSeq("ACGT")
>>> id(seq1) == id(seq2)
False
>>> str(seq1) == str(seq2)
True
Biopython now does:
>>> seq1 == seq2
True
>>> seq1 == Seq("ACGT")
True
>>> seq1 == "ACGT"
True
"""
if isinstance(other, MutableSeq):
return self.data == other.data
return str(self) == str(other)
def __lt__(self, other):
"""Implement the less-than operand."""
if isinstance(other, MutableSeq):
return self.data < other.data
if isinstance(other, (str, Seq, UnknownSeq)):
return str(self) < str(other)
raise TypeError(
f"'<' not supported between instances of '{type(self).__name__}'"
f" and '{type(other).__name__}'"
)
def __le__(self, other):
"""Implement the less-than or equal operand."""
if isinstance(other, MutableSeq):
return self.data <= other.data
if isinstance(other, (str, Seq, UnknownSeq)):
return str(self) <= str(other)
raise TypeError(
f"'<=' not supported between instances of '{type(self).__name__}'"
f" and '{type(other).__name__}'"
)
def __gt__(self, other):
"""Implement the greater-than operand."""
if isinstance(other, MutableSeq):
return self.data > other.data
if isinstance(other, (str, Seq, UnknownSeq)):
return str(self) > str(other)
raise TypeError(
f"'>' not supported between instances of '{type(self).__name__}'"
f" and '{type(other).__name__}'"
)
def __ge__(self, other):
"""Implement the greater-than or equal operand."""
if isinstance(other, MutableSeq):
return self.data >= other.data
if isinstance(other, (str, Seq, UnknownSeq)):
return str(self) >= str(other)
raise TypeError(
f"'>=' not supported between instances of '{type(self).__name__}'"
f" and '{type(other).__name__}'"
)
def __len__(self):
"""Return the length of the sequence, use len(my_seq)."""
return len(self.data)
def __getitem__(self, index):
"""Return a subsequence of single letter, use my_seq[index].
>>> my_seq = MutableSeq('ACTCGACGTCG')
>>> my_seq[5]
'A'
"""
if isinstance(index, int):
# Return a single letter as a string
return self.data[index]
else:
# Return the (sub)sequence as another Seq object
return MutableSeq(self.data[index])
def __setitem__(self, index, value):
"""Set a subsequence of single letter via value parameter.
>>> my_seq = MutableSeq('ACTCGACGTCG')
>>> my_seq[0] = 'T'
>>> my_seq
MutableSeq('TCTCGACGTCG')
"""
if isinstance(index, int):
# Replacing a single letter with a new string
self.data[index] = value
else:
# Replacing a sub-sequence
if isinstance(value, MutableSeq):
self.data[index] = value.data
elif isinstance(value, type(self.data)):
self.data[index] = value
else:
self.data[index] = array.array("u", str(value))
def __delitem__(self, index):
"""Delete a subsequence of single letter.
>>> my_seq = MutableSeq('ACTCGACGTCG')
>>> del my_seq[0]
>>> my_seq
MutableSeq('CTCGACGTCG')
"""
# Could be deleting a single letter, or a slice
del self.data[index]
def __add__(self, other):
"""Add another sequence or string to this sequence.
Returns a new MutableSeq object.
"""
if isinstance(other, MutableSeq):
# See test_GAQueens.py for an historic usage of a non-string
# alphabet! Adding the arrays should support this.
return self.__class__(self.data + other.data)
elif isinstance(other, (str, Seq)):
return self.__class__(str(self) + str(other))
else:
raise TypeError
def __radd__(self, other):
"""Add a sequence on the left.
>>> from Bio.Seq import MutableSeq
>>> "LV" + MutableSeq("MELKI")
MutableSeq('LVMELKI')
"""
if isinstance(other, MutableSeq):
# See test_GAQueens.py for an historic usage of a non-string
# alphabet! Adding the arrays should support this.
return self.__class__(other.data + self.data)
elif isinstance(other, (str, Seq)):
return self.__class__(str(other) + str(self))
else:
raise TypeError
def __mul__(self, other):
"""Multiply MutableSeq by integer.
Note this is not in-place and returns a new object,
matching native Python list multiplication.
>>> from Bio.Seq import MutableSeq
>>> MutableSeq('ATG') * 2
MutableSeq('ATGATG')
"""
if not isinstance(other, int):
raise TypeError(f"can't multiply {self.__class__.__name__} by non-int type")
return self.__class__(self.data * other)
def __rmul__(self, other):
"""Multiply integer by MutableSeq.
Note this is not in-place and returns a new object,
matching native Python list multiplication.
>>> from Bio.Seq import MutableSeq
>>> 2 * MutableSeq('ATG')
MutableSeq('ATGATG')
"""
if not isinstance(other, int):
raise TypeError(f"can't multiply {self.__class__.__name__} by non-int type")
return self.__class__(self.data * other)
def __imul__(self, other):
"""Multiply MutableSeq in-place.
>>> from Bio.Seq import MutableSeq
>>> seq = MutableSeq('ATG')
>>> seq *= 2
>>> seq
MutableSeq('ATGATG')
"""
if not isinstance(other, int):
raise TypeError(f"can't multiply {self.__class__.__name__} by non-int type")
return self.__class__(self.data * other)
def append(self, c):
"""Add a subsequence to the mutable sequence object.
>>> my_seq = MutableSeq('ACTCGACGTCG')
>>> my_seq.append('A')
>>> my_seq
MutableSeq('ACTCGACGTCGA')
No return value.
"""
self.data.append(c)
def insert(self, i, c):
"""Add a subsequence to the mutable sequence object at a given index.
>>> my_seq = MutableSeq('ACTCGACGTCG')
>>> my_seq.insert(0,'A')
>>> my_seq
MutableSeq('AACTCGACGTCG')
>>> my_seq.insert(8,'G')
>>> my_seq
MutableSeq('AACTCGACGGTCG')
No return value.
"""
self.data.insert(i, c)
def pop(self, i=(-1)):
"""Remove a subsequence of a single letter at given index.
>>> my_seq = MutableSeq('ACTCGACGTCG')
>>> my_seq.pop()
'G'
>>> my_seq
MutableSeq('ACTCGACGTC')
>>> my_seq.pop()
'C'
>>> my_seq
MutableSeq('ACTCGACGT')
Returns the last character of the sequence.
"""
c = self.data[i]
del self.data[i]
return c
def remove(self, item):
"""Remove a subsequence of a single letter from mutable sequence.
>>> my_seq = MutableSeq('ACTCGACGTCG')
>>> my_seq.remove('C')
>>> my_seq
MutableSeq('ATCGACGTCG')
>>> my_seq.remove('A')
>>> my_seq
MutableSeq('TCGACGTCG')
No return value.
"""
for i in range(len(self.data)):
if self.data[i] == item:
del self.data[i]
return
raise ValueError("MutableSeq.remove(x): x not in list")
def count(self, sub, start=0, end=sys.maxsize):
"""Return a non-overlapping count, like that of a python string.
This behaves like the python string method of the same name,
which does a non-overlapping count!
For an overlapping search use the newer count_overlap() method.
Returns an integer, the number of occurrences of substring
argument sub in the (sub)sequence given by [start:end].
Optional arguments start and end are interpreted as in slice
notation.
Arguments:
- sub - a string or another Seq object to look for
- start - optional integer, slice start
- end - optional integer, slice end
e.g.
>>> from Bio.Seq import MutableSeq
>>> my_mseq = MutableSeq("AAAATGA")
>>> print(my_mseq.count("A"))
5
>>> print(my_mseq.count("ATG"))
1
>>> print(my_mseq.count(Seq("AT")))
1
>>> print(my_mseq.count("AT", 2, -1))
1
HOWEVER, please note because that python strings, Seq objects and
MutableSeq objects do a non-overlapping search, this may not give
the answer you expect:
>>> "AAAA".count("AA")
2
>>> print(MutableSeq("AAAA").count("AA"))
2
An overlapping search would give the answer as three!
"""
try:
search = str(sub)
except AttributeError:
search = sub
if not isinstance(search, str):
raise TypeError("expected a string, Seq or MutableSeq")
if len(search) == 1:
# Try and be efficient and work directly from the array.
count = 0
for c in self.data[start:end]:
if c == search:
count += 1
return count
else:
# TODO - Can we do this more efficiently?
return str(self).count(search, start, end)
def count_overlap(self, sub, start=0, end=sys.maxsize):
"""Return an overlapping count.
For a non-overlapping search use the count() method.
Returns an integer, the number of occurrences of substring
argument sub in the (sub)sequence given by [start:end].
Optional arguments start and end are interpreted as in slice
notation.
Arguments:
- sub - a string or another Seq object to look for
- start - optional integer, slice start
- end - optional integer, slice end
e.g.
>>> from Bio.Seq import MutableSeq
>>> print(MutableSeq("AAAA").count_overlap("AA"))
3
>>> print(MutableSeq("ATATATATA").count_overlap("ATA"))
4
>>> print(MutableSeq("ATATATATA").count_overlap("ATA", 3, -1))
1
Where substrings do not overlap, should behave the same as
the count() method:
>>> from Bio.Seq import MutableSeq
>>> my_mseq = MutableSeq("AAAATGA")
>>> print(my_mseq.count_overlap("A"))
5
>>> my_mseq.count_overlap("A") == my_mseq.count("A")
True
>>> print(my_mseq.count_overlap("ATG"))
1
>>> my_mseq.count_overlap("ATG") == my_mseq.count("ATG")
True
>>> print(my_mseq.count_overlap(Seq("AT")))
1
>>> my_mseq.count_overlap(Seq("AT")) == my_mseq.count(Seq("AT"))
True
>>> print(my_mseq.count_overlap("AT", 2, -1))
1
>>> my_mseq.count_overlap("AT", 2, -1) == my_mseq.count("AT", 2, -1)
True
HOWEVER, do not use this method for such cases because the
count() method is much for efficient.
"""
# The implementation is currently identical to that of
# Seq.count_overlap() apart from the definition of sub_str
sub_str = str(sub)
self_str = str(self)
overlap_count = 0
while True:
start = self_str.find(sub_str, start, end) + 1
if start != 0:
overlap_count += 1
else:
return overlap_count
def index(self, item):
"""Return first occurrence position of a single entry (i.e. letter).
>>> my_seq = MutableSeq("ACTCGACGTCG")
>>> my_seq.index("A")
0
>>> my_seq.index("T")
2
>>> my_seq.index(Seq("T"))
2
Note unlike a Biopython Seq object, or Python string, multi-letter
subsequences are not supported. Instead this acts like an array or
a list of the entries. There is therefore no ``.rindex()`` method.
"""
# TODO?: return self.data.index(i)
for i in range(len(self.data)):
if self.data[i] == item:
return i
raise ValueError("MutableSeq.index(x): x not in list")
def reverse(self):
"""Modify the mutable sequence to reverse itself.
No return value.
"""
self.data.reverse()
def complement(self):
"""Modify the mutable sequence to take on its complement.
No return value.
If the sequence contains neither T nor U, DNA is assumed
and any A will be mapped to T.
If the sequence contains both T and U, an exception is raised.
"""
if "U" in self.data and "T" in self.data:
raise ValueError("Mixed RNA/DNA found")
elif "U" in self.data:
d = ambiguous_rna_complement
else:
d = ambiguous_dna_complement
mixed = d.copy() # We're going to edit this to be mixed case!
mixed.update((x.lower(), y.lower()) for x, y in d.items())
self.data = [mixed[_] for _ in self.data]
self.data = array.array("u", self.data)
def reverse_complement(self):
"""Modify the mutable sequence to take on its reverse complement.
No return value.
"""
self.complement()
self.data.reverse()
def extend(self, other):
"""Add a sequence to the original mutable sequence object.
>>> my_seq = MutableSeq('ACTCGACGTCG')
>>> my_seq.extend('A')
>>> my_seq
MutableSeq('ACTCGACGTCGA')
>>> my_seq.extend('TTT')
>>> my_seq
MutableSeq('ACTCGACGTCGATTT')
No return value.
"""
if isinstance(other, MutableSeq):
for c in other.data:
self.data.append(c)
else:
for c in other:
self.data.append(c)
def toseq(self):
"""Return the full sequence as a new immutable Seq object.
>>> from Bio.Seq import MutableSeq
>>> my_mseq = MutableSeq("MKQHKAMIVALIVICITAVVAAL")
>>> my_mseq
MutableSeq('MKQHKAMIVALIVICITAVVAAL')
>>> my_mseq.toseq()
Seq('MKQHKAMIVALIVICITAVVAAL')
"""
return Seq("".join(self.data))
def join(self, other):
"""Return a merge of the sequences in other, spaced by the sequence from self.
Accepts all Seq objects and Strings as objects to be concatenated with the spacer
>>> concatenated = MutableSeq('NNNNN').join([Seq("AAA"), Seq("TTT"), Seq("PPP")])
>>> concatenated
Seq('AAANNNNNTTTNNNNNPPP')
Throws error if other is not an iterable and if objects inside of the iterable
are not Seq or String objects
"""
# returns Seq object instead of MutableSeq
return self.toseq().join(other)
# The transcribe, backward_transcribe, and translate functions are
# user-friendly versions of the corresponding functions in Bio.Transcribe
# and Bio.Translate. The functions work both on Seq objects, and on strings.
def transcribe(dna):
"""Transcribe a DNA sequence into RNA.
If given a string, returns a new string object.
Given a Seq or MutableSeq, returns a new Seq object.
e.g.
>>> transcribe("ACTGN")
'ACUGN'
"""
if isinstance(dna, Seq):
return dna.transcribe()
elif isinstance(dna, MutableSeq):
return dna.toseq().transcribe()
else:
return dna.replace("T", "U").replace("t", "u")
def back_transcribe(rna):
"""Return the RNA sequence back-transcribed into DNA.
If given a string, returns a new string object.
Given a Seq or MutableSeq, returns a new Seq object.
e.g.
>>> back_transcribe("ACUGN")
'ACTGN'
"""
if isinstance(rna, Seq):
return rna.back_transcribe()
elif isinstance(rna, MutableSeq):
return rna.toseq().back_transcribe()
else:
return rna.replace("U", "T").replace("u", "t")
def _translate_str(
sequence, table, stop_symbol="*", to_stop=False, cds=False, pos_stop="X", gap=None
):
"""Translate nucleotide string into a protein string (PRIVATE).
Arguments:
- sequence - a string
- table - a CodonTable object (NOT a table name or id number)
- stop_symbol - a single character string, what to use for terminators.
- to_stop - boolean, should translation terminate at the first
in frame stop codon? If there is no in-frame stop codon
then translation continues to the end.
- pos_stop - a single character string for a possible stop codon
(e.g. TAN or NNN)
- cds - Boolean, indicates this is a complete CDS. If True, this
checks the sequence starts with a valid alternative start
codon (which will be translated as methionine, M), that the
sequence length is a multiple of three, and that there is a
single in frame stop codon at the end (this will be excluded
from the protein sequence, regardless of the to_stop option).
If these tests fail, an exception is raised.
- gap - Single character string to denote symbol used for gaps.
Defaults to None.
Returns a string.
e.g.
>>> from Bio.Data import CodonTable
>>> table = CodonTable.ambiguous_dna_by_id[1]
>>> _translate_str("AAA", table)
'K'
>>> _translate_str("TAR", table)
'*'
>>> _translate_str("TAN", table)
'X'
>>> _translate_str("TAN", table, pos_stop="@")
'@'
>>> _translate_str("TA?", table)
Traceback (most recent call last):
...
Bio.Data.CodonTable.TranslationError: Codon 'TA?' is invalid
In a change to older versions of Biopython, partial codons are now
always regarded as an error (previously only checked if cds=True)
and will trigger a warning (likely to become an exception in a
future release).
If **cds=True**, the start and stop codons are checked, and the start
codon will be translated at methionine. The sequence must be an
while number of codons.
>>> _translate_str("ATGCCCTAG", table, cds=True)
'MP'
>>> _translate_str("AAACCCTAG", table, cds=True)
Traceback (most recent call last):
...
Bio.Data.CodonTable.TranslationError: First codon 'AAA' is not a start codon
>>> _translate_str("ATGCCCTAGCCCTAG", table, cds=True)
Traceback (most recent call last):
...
Bio.Data.CodonTable.TranslationError: Extra in frame stop codon found.
"""
sequence = sequence.upper()
amino_acids = []
forward_table = table.forward_table
stop_codons = table.stop_codons
if table.nucleotide_alphabet is not None:
valid_letters = set(table.nucleotide_alphabet.upper())
else:
# Assume the worst case, ambiguous DNA or RNA:
valid_letters = set(
_ambiguous_dna_letters.upper() + _ambiguous_rna_letters.upper()
)
n = len(sequence)
# Check for tables with 'ambiguous' (dual-coding) stop codons:
dual_coding = [c for c in stop_codons if c in forward_table]
if dual_coding:
c = dual_coding[0]
if to_stop:
raise ValueError(
"You cannot use 'to_stop=True' with this table as it contains"
f" {len(dual_coding)} codon(s) which can be both STOP and an"
f" amino acid (e.g. '{c}' -> '{forward_table[c]}' or STOP)."
)
warnings.warn(
f"This table contains {len(dual_coding)} codon(s) which code(s) for"
f" both STOP and an amino acid (e.g. '{c}' -> '{forward_table[c]}'"
" or STOP). Such codons will be translated as amino acid.",
BiopythonWarning,
)
if cds:
if str(sequence[:3]).upper() not in table.start_codons:
raise CodonTable.TranslationError(
f"First codon '{sequence[:3]}' is not a start codon"
)
if n % 3 != 0:
raise CodonTable.TranslationError(
f"Sequence length {n} is not a multiple of three"
)
if str(sequence[-3:]).upper() not in stop_codons:
raise CodonTable.TranslationError(
f"Final codon '{sequence[-3:]}' is not a stop codon"
)
# Don't translate the stop symbol, and manually translate the M
sequence = sequence[3:-3]
n -= 6
amino_acids = ["M"]
elif n % 3 != 0:
warnings.warn(
"Partial codon, len(sequence) not a multiple of three. "
"Explicitly trim the sequence or add trailing N before "
"translation. This may become an error in future.",
BiopythonWarning,
)
if gap is not None:
if not isinstance(gap, str):
raise TypeError("Gap character should be a single character string.")
elif len(gap) > 1:
raise ValueError("Gap character should be a single character string.")
for i in range(0, n - n % 3, 3):
codon = sequence[i : i + 3]
try:
amino_acids.append(forward_table[codon])
except (KeyError, CodonTable.TranslationError):
if codon in table.stop_codons:
if cds:
raise CodonTable.TranslationError(
"Extra in frame stop codon found."
) from None
if to_stop:
break
amino_acids.append(stop_symbol)
elif valid_letters.issuperset(set(codon)):
# Possible stop codon (e.g. NNN or TAN)
amino_acids.append(pos_stop)
elif gap is not None and codon == gap * 3:
# Gapped translation
amino_acids.append(gap)
else:
raise CodonTable.TranslationError(
f"Codon '{codon}' is invalid"
) from None
return "".join(amino_acids)
def translate(
sequence, table="Standard", stop_symbol="*", to_stop=False, cds=False, gap=None
):
"""Translate a nucleotide sequence into amino acids.
If given a string, returns a new string object. Given a Seq or
MutableSeq, returns a Seq object.
Arguments:
- table - Which codon table to use? This can be either a name
(string), an NCBI identifier (integer), or a CodonTable object
(useful for non-standard genetic codes). Defaults to the "Standard"
table.
- stop_symbol - Single character string, what to use for any
terminators, defaults to the asterisk, "*".
- to_stop - Boolean, defaults to False meaning do a full
translation continuing on past any stop codons
(translated as the specified stop_symbol). If
True, translation is terminated at the first in
frame stop codon (and the stop_symbol is not
appended to the returned protein sequence).
- cds - Boolean, indicates this is a complete CDS. If True, this
checks the sequence starts with a valid alternative start
codon (which will be translated as methionine, M), that the
sequence length is a multiple of three, and that there is a
single in frame stop codon at the end (this will be excluded
from the protein sequence, regardless of the to_stop option).
If these tests fail, an exception is raised.
- gap - Single character string to denote symbol used for gaps.
Defaults to None.
A simple string example using the default (standard) genetic code:
>>> coding_dna = "GTGGCCATTGTAATGGGCCGCTGAAAGGGTGCCCGATAG"
>>> translate(coding_dna)
'VAIVMGR*KGAR*'
>>> translate(coding_dna, stop_symbol="@")
'VAIVMGR@KGAR@'
>>> translate(coding_dna, to_stop=True)
'VAIVMGR'
Now using NCBI table 2, where TGA is not a stop codon:
>>> translate(coding_dna, table=2)
'VAIVMGRWKGAR*'
>>> translate(coding_dna, table=2, to_stop=True)
'VAIVMGRWKGAR'
In fact this example uses an alternative start codon valid under NCBI
table 2, GTG, which means this example is a complete valid CDS which
when translated should really start with methionine (not valine):
>>> translate(coding_dna, table=2, cds=True)
'MAIVMGRWKGAR'
Note that if the sequence has no in-frame stop codon, then the to_stop
argument has no effect:
>>> coding_dna2 = "GTGGCCATTGTAATGGGCCGC"
>>> translate(coding_dna2)
'VAIVMGR'
>>> translate(coding_dna2, to_stop=True)
'VAIVMGR'
NOTE - Ambiguous codons like "TAN" or "NNN" could be an amino acid
or a stop codon. These are translated as "X". Any invalid codon
(e.g. "TA?" or "T-A") will throw a TranslationError.
It will however translate either DNA or RNA.
NOTE - Since version 1.71 Biopython contains codon tables with 'ambiguous
stop codons'. These are stop codons with unambiguous sequence but which
have a context dependent coding as STOP or as amino acid. With these tables
'to_stop' must be False (otherwise a ValueError is raised). The dual
coding codons will always be translated as amino acid, except for
'cds=True', where the last codon will be translated as STOP.
>>> coding_dna3 = "ATGGCACGGAAGTGA"
>>> translate(coding_dna3)
'MARK*'
>>> translate(coding_dna3, table=27) # Table 27: TGA -> STOP or W
'MARKW'
It will however raise a BiopythonWarning (not shown).
>>> translate(coding_dna3, table=27, cds=True)
'MARK'
>>> translate(coding_dna3, table=27, to_stop=True)
Traceback (most recent call last):
...
ValueError: You cannot use 'to_stop=True' with this table ...
"""
if isinstance(sequence, Seq):
return sequence.translate(table, stop_symbol, to_stop, cds)
elif isinstance(sequence, MutableSeq):
# Return a Seq object
return sequence.toseq().translate(table, stop_symbol, to_stop, cds)
else:
# Assume its a string, return a string
try:
codon_table = CodonTable.ambiguous_generic_by_id[int(table)]
except ValueError:
codon_table = CodonTable.ambiguous_generic_by_name[table]
except (AttributeError, TypeError):
if isinstance(table, CodonTable.CodonTable):
codon_table = table
else:
raise ValueError("Bad table argument") from None
return _translate_str(sequence, codon_table, stop_symbol, to_stop, cds, gap=gap)
def reverse_complement(sequence):
"""Return the reverse complement sequence of a nucleotide string.
If given a string, returns a new string object.
Given a Seq or a MutableSeq, returns a new Seq object.
Supports unambiguous and ambiguous nucleotide sequences.
e.g.
>>> reverse_complement("ACTG-NH")
'DN-CAGT'
If neither T nor U is present, DNA is assumed and A is mapped to T:
>>> reverse_complement("A")
'T'
"""
return complement(sequence)[::-1]
def complement(sequence):
"""Return the complement sequence of a DNA string.
If given a string, returns a new string object.
Given a Seq or a MutableSeq, returns a new Seq object.
Supports unambiguous and ambiguous nucleotide sequences.
e.g.
>>> complement("ACTG-NH")
'TGAC-ND'
If neither T nor U is present, DNA is assumed and A is mapped to T:
>>> complement("A")
'T'
However, this may not be supported in future. Please use the
complement_rna function if you have RNA.
"""
if isinstance(sequence, Seq):
# Return a Seq
return sequence.complement()
elif isinstance(sequence, MutableSeq):
# Return a Seq
# Don't use the MutableSeq reverse_complement method as it is
# 'in place'.
return sequence.toseq().complement()
# Assume its a string.
# In order to avoid some code duplication, the old code would turn the
# string into a Seq, use the reverse_complement method, and convert back
# to a string.
# This worked, but is over five times slower on short sequences!
if ("U" in sequence or "u" in sequence) and ("T" in sequence or "t" in sequence):
raise ValueError("Mixed RNA/DNA found")
elif "U" in sequence or "u" in sequence:
# TODO - warning or exception in future?
ttable = _rna_complement_table
else:
ttable = _dna_complement_table
return sequence.translate(ttable)
def complement_rna(sequence):
"""Return the complement sequence of an RNA string.
>>> complement("ACG") # assumed DNA
'TGC'
>>> complement_rna("ACG")
'UGC'
Any T in the sequence is treated as a U.
"""
if isinstance(sequence, Seq):
# Return a Seq
return sequence.complement_rna()
elif isinstance(sequence, MutableSeq):
# Return a Seq
return sequence.toseq().complement_rna()
return sequence.translate(_rna_complement_table)
def _test():
"""Run the Bio.Seq module's doctests (PRIVATE)."""
print("Running doctests...")
import doctest
doctest.testmod(optionflags=doctest.IGNORE_EXCEPTION_DETAIL)
print("Done")
if __name__ == "__main__":
_test()
| 34.594942 | 104 | 0.597645 |
1c07cf5fef5087ed8c62f177cca98cd001959a88
| 745 |
py
|
Python
|
data/plugins/objects/rotten tomato crate.py
|
FavyTeam/Elderscape_server
|
38bf75396e4e13222be67d5f15eb0b9862dca6bb
|
[
"MIT"
] | 3 |
2019-05-09T16:59:13.000Z
|
2019-05-09T18:29:57.000Z
|
data/plugins/objects/rotten tomato crate.py
|
FavyTeam/Elderscape_server
|
38bf75396e4e13222be67d5f15eb0b9862dca6bb
|
[
"MIT"
] | null | null | null |
data/plugins/objects/rotten tomato crate.py
|
FavyTeam/Elderscape_server
|
38bf75396e4e13222be67d5f15eb0b9862dca6bb
|
[
"MIT"
] | 7 |
2019-07-11T23:04:40.000Z
|
2021-08-02T14:27:13.000Z
|
from game.item import ItemAssistant
def first_click_object_3195(player):
if player.playerRights == 1 or player.playerRights == 2:
if (ItemAssistant.getFreeInventorySlots(player) == 0):
player.getDH().sendStatement("You need at least 1 free inventory space to do this.")
return
if (System.currentTimeMillis() - player.buryDelay > 1200):
player.buryDelay = System.currentTimeMillis();
ItemAssistant.addItem(player, 2518, ItemAssistant.getFreeInventorySlots(player))
player.startAnimation(832)
player.getPA().sendMessage("You take some rotten tomatoes from the crate.")
else:
player.getPA().sendMessage("Nice try, but only staff may do this.")
| 53.214286 | 96 | 0.680537 |
94e430d0eb4bc907c273ab94dd7faba44f637e2c
| 4,750 |
py
|
Python
|
miniapp/miniapp/orbital/lcao.py
|
savcardamone/tyche-
|
ea89edea89a607291e4fe0ba738d75522f54dc1a
|
[
"MIT"
] | null | null | null |
miniapp/miniapp/orbital/lcao.py
|
savcardamone/tyche-
|
ea89edea89a607291e4fe0ba738d75522f54dc1a
|
[
"MIT"
] | 1 |
2018-12-28T13:30:16.000Z
|
2018-12-29T10:30:33.000Z
|
miniapp/miniapp/orbital/lcao.py
|
savcardamone/tyche
|
ea89edea89a607291e4fe0ba738d75522f54dc1a
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
"""lcao.py: Linear combination of atomic orbitals."""
__author__ = "Salvatore Cardamone"
__email__ = "[email protected]"
import sys
import itertools
import numpy as np
import untangle
from miniapp.orbital import atomic_orbital as ao
class LCAO():
"""Wrapper around list of atomic orbitals which constitute the LCAO.
"""
def __init__(self, system, input_file):
"""Class constructor.
Build up the LCAO by iterating over atoms in the system and building up the
appropriate AOs on each centre based on the input atomic basis set.
"""
# Pull out the level of the xml we want to be looping through
xml = untangle.parse(input_file)
atomic_basis_sets = xml.Input.Wavefunction.Basis
self.aos = []
for atom in system.atoms:
atomic_aos = []
# Loop over atomic basis sets specified in the file until we find the appropriate one
for atomic_basis_set in atomic_basis_sets:
if atomic_basis_set['atom'] == atom.atom_type:
# Loop over AOs in this contraction and append each to the LCAO
# We explicitly construct the multiple atomic orbitals arising from l > 0
for contraction in atomic_basis_set.Contraction:
coeffs = np.fromstring(contraction.Coeff.cdata, dtype=float, sep=',')
zetas = np.fromstring(contraction.Zeta.cdata, dtype=float, sep=',')
if contraction['ang_mom'] == "s":
ang_moms = [np.array([0, 0, 0], dtype=int)]
elif contraction['ang_mom'] == "p":
ang_moms = [
np.array([1, 0, 0], dtype=int), np.array([0, 1, 0], dtype=int),
np.array([0, 0, 1], dtype=int)
]
elif contraction['ang_mom'] == "d":
ang_moms = [
np.array([2, 0, 0], dtype=int), np.array([0, 2, 0], dtype=int),
np.array([0, 0, 2], dtype=int), np.array([1, 1, 0], dtype=int),
np.array([1, 0, 1], dtype=int), np.array([0, 1, 1], dtype=int)
]
else:
sys.exit("Unrecognised contraction ang mom: {0}".format(
contraction['ang_mom']))
# Loop over all (-l <= m <= l) and construct the corresponding
# atomic orbital
for ang_mom in ang_moms:
atomic_aos.append(
ao.AtomicOrbital(atom.pos, coeffs, zetas, ang_mom)
)
if not atomic_aos:
sys.exit("Could not find atomic basis for atom type {0}".format(atom.atom_type))
else:
self.aos.append(atomic_aos)
self.aos = list(itertools.chain.from_iterable(self.aos))
self.num_aos = len(self.aos)
def __str__(self):
"""Object string representation.
"""
lcao_str = "Linear Combination of Atomic Orbitals\n"
lcao_str += "Number of AOs: {0}\n".format(self.num_aos)
for iao in self.aos:
lcao_str += iao.__str__()
return lcao_str
def overlap_matrix(self):
"""Compute the overlap matrix for all AOs in the LCAO.
"""
matrix = np.zeros((self.num_aos, self.num_aos), dtype=float)
for iao in range(self.num_aos):
for jao in range(self.num_aos):
matrix[iao, jao] = ao.AtomicOrbital.overlap(self.aos[iao], self.aos[jao])
return matrix
def evaluate(self, pos):
"""Evaluate all atomic orbitals in the LCAO at a given position.
"""
ao_vals = np.zeros((self.num_aos,), dtype=float)
for iao in range(self.num_aos):
ao_vals[iao] = self.aos[iao].evaluate(pos)
return ao_vals
def gradient(self, pos):
"""Evaluate the gradient of all atomic orbitals in the LCAO at a given position.
"""
ao_grads = np.zeros((self.num_aos, 3), dtype=float)
for iao in range(self.num_aos):
ao_grads[iao, :] = self.aos[iao].gradient(pos)
return ao_grads
def laplacian(self, pos):
"""Evaluate the laplacian of all atomic orbitals in the LCAO at a given position.
"""
ao_lapls = np.zeros((self.num_aos,), dtype=float)
for iao in range(self.num_aos):
ao_lapls[iao] = self.aos[iao].laplacian(pos)
return ao_lapls
| 39.915966 | 97 | 0.537684 |
0a63b711f529240cf70fa5d610d48df49b34f533
| 6,278 |
py
|
Python
|
wxnodegraph/nodeGraph.py
|
Derfies/wxnodegraph
|
43777d15fb995485558d0c22aed513a1babeb107
|
[
"MIT"
] | 2 |
2021-02-18T11:19:24.000Z
|
2021-07-30T02:43:54.000Z
|
wxnodegraph/nodeGraph.py
|
Derfies/wxnodegraph
|
43777d15fb995485558d0c22aed513a1babeb107
|
[
"MIT"
] | null | null | null |
wxnodegraph/nodeGraph.py
|
Derfies/wxnodegraph
|
43777d15fb995485558d0c22aed513a1babeb107
|
[
"MIT"
] | null | null | null |
import math
import wx
import json
from constants import *
from wire import Wire
from node import Node
class NodeGraph( wx.ScrolledWindow ):
def __init__(self, parent, id, log, size = wx.DefaultSize):
wx.ScrolledWindow.__init__(self, parent, id, (0, 0), size=size, style=wx.SUNKEN_BORDER)
self.nodes = {}
self.srcNode = None
self.srcPlug = None
self.tmpWire = None
self.maxWidth = W
self.maxHeight = H
self.SetBackgroundColour( 'Grey' )
self.SetVirtualSize( (self.maxWidth, self.maxHeight) )
self.SetScrollRate( 20, 20 )
# create a PseudoDC to record our drawing
self.pdc = wx.PseudoDC()
self.Bind( wx.EVT_PAINT, self.OnPaint )
self.Bind( wx.EVT_ERASE_BACKGROUND, lambda x: None )
self.Bind( wx.EVT_LEFT_DOWN, self.OnLeftDown )
self.Bind( wx.EVT_MOTION, self.OnMotion )
self.Bind( wx.EVT_LEFT_UP, self.OnLeftUp )
def ConvertCoords( self, pnt ):
xView, yView = self.GetViewStart()
xDelta, yDelta = self.GetScrollPixelsPerUnit()
return wx.Point(pnt[0] + (xView * xDelta), pnt[1] + (yView * yDelta))
def OffsetRect(self, r):
xView, yView = self.GetViewStart()
xDelta, yDelta = self.GetScrollPixelsPerUnit()
r.OffsetXY(-(xView*xDelta),-(yView*yDelta))
def AppendNode( self, label, pos, ins, outs, colour=None ):
node = Node( self, label, colour, rect=wx.Rect( pos.x, pos.y, 150, 100 ), ins=ins, outs=outs )
nId = node.GetId()
self.pdc.SetId( nId )
node.Draw( self.pdc )
self.pdc.SetIdBounds( nId, node.GetRect() )
self.nodes[nId] = node
return node
def OnLeftDown( self, evt ):
pnt = evt.GetPosition()
winPnt = self.ConvertCoords( pnt )
self.srcNode = self.HitTest( winPnt )
if self.srcNode is not None:
self.srcPlug = self.srcNode.HitTest( winPnt.x, winPnt.y )
if self.srcPlug is not None:
self.srcPlug.Disconnect()
self.tmpWire = Wire( self.srcNode.GetRect().GetPosition() + self.srcPlug.GetPosition(), pnt, self.srcPlug.GetType() )
self.lastPnt = pnt
def OnMotion( self, evt ):
if not evt.LeftIsDown() or self.srcNode is None:
return
pnt = evt.GetPosition()
winPnt = self.ConvertCoords( pnt )
if self.srcPlug is None:
dPnt = pnt - self.lastPnt
r = self.pdc.GetIdBounds( self.srcNode.GetId() )
self.pdc.TranslateId( self.srcNode.GetId(), dPnt[0], dPnt[1] )
r2 = self.pdc.GetIdBounds( self.srcNode.GetId() )
r = r.Union( r2 )
self.OffsetRect( r )
self.RefreshRect( r, False )
self.lastPnt = pnt
self.srcNode.SetRect( r2 )
# Redraw wires
for plug in self.srcNode.GetPlugs():
for wire in plug.GetWires():
pnt1 = wire.srcNode.GetRect().GetPosition() + wire.srcPlug.GetPosition()
pnt2 = wire.dstNode.GetRect().GetPosition() + wire.dstPlug.GetPosition()
self.DrawWire( wire, pnt1, pnt2 )
elif self.tmpWire is not None:
self.DrawWire( self.tmpWire, pnt2=winPnt )
def OnLeftUp( self, evt ):
# Attempt to make a connection.
if self.srcNode is not None:
pnt = evt.GetPosition()
winPnt = self.ConvertCoords( pnt )
dstNode = self.HitTest( winPnt )
if dstNode is not None:
dstPlug = dstNode.HitTest( winPnt.x, winPnt.y )
if dstPlug is not None and self.srcPlug.GetType() != dstPlug.GetType() and self.srcNode.GetId() != dstNode.GetId():
self.srcPlug.Connect( dstPlug )
# Erase the temp wire.
if self.tmpWire is not None:
rect = self.pdc.GetIdBounds( self.tmpWire.GetId() )
self.pdc.RemoveId( self.tmpWire.GetId() )
self.OffsetRect( rect )
self.RefreshRect( rect, False )
self.srcNode = None
self.srcPlug = None
self.tmpWire = None
def HitTest( self, pt ):
idxs = self.pdc.FindObjects( pt[0], pt[1], HIT_RADIUS )
hits = [
idx
for idx in idxs
if idx in self.nodes
]
return self.nodes[hits[0]] if hits else None
def OnPaint(self, event):
# Create a buffered paint DC. It will create the real wx.PaintDC and
# then blit the bitmap to it when dc is deleted.
dc = wx.BufferedPaintDC( self )
dc = wx.GCDC( dc )
# Use PrepateDC to set position correctly.
self.PrepareDC( dc )
# We need to clear the dc BEFORE calling PrepareDC.
bg = wx.Brush( self.GetBackgroundColour() )
dc.SetBackground( bg )
dc.Clear()
# Create a clipping rect from our position and size and the Update
# Region.
xv, yv = self.GetViewStart()
dx, dy = self.GetScrollPixelsPerUnit()
x, y = (xv * dx, yv * dy)
rgn = self.GetUpdateRegion()
rgn.Offset( x, y )
r = rgn.GetBox()
# Draw to the dc using the calculated clipping rect.
self.pdc.DrawToDCClipped( dc, r )
def DrawWire( self, wire, pnt1=None, pnt2=None ):
rect1 = wire.GetRect()
if pnt1 is not None:
wire.pnt1 = pnt1
if pnt2 is not None:
wire.pnt2 = pnt2
rect2 = wire.GetRect()
rect = rect1.Union( rect2 )
self.OffsetRect( rect )
self.pdc.ClearId( wire.GetId() )
wire.Draw( self.pdc )
self.RefreshRect( rect, False )
def Load( self, filePath ):
with open( filePath, 'r' ) as f:
data = json.load( f )
for nodeData in data:
props = nodeData['properties']
node = self.AppendNode(
props['name'],
wx.Point( props['x'], props['y'] ),
nodeData['ins'].keys(),
nodeData['outs'].keys(),
props['color']
)
def GetNodePlug( self, node, plug ):
return node.GetPlug( plug )
| 35.072626 | 133 | 0.560529 |
8f2533ca67bc2b43921795ea1187ba28df02a34c
| 3,608 |
py
|
Python
|
city_scrapers/mixins/wayne_commission.py
|
danielahuang/city-scrapers
|
711d1995f59100793e771068a6f5d9149e773412
|
[
"MIT"
] | null | null | null |
city_scrapers/mixins/wayne_commission.py
|
danielahuang/city-scrapers
|
711d1995f59100793e771068a6f5d9149e773412
|
[
"MIT"
] | null | null | null |
city_scrapers/mixins/wayne_commission.py
|
danielahuang/city-scrapers
|
711d1995f59100793e771068a6f5d9149e773412
|
[
"MIT"
] | null | null | null |
# THIS IS JUST A MIXIN. IT MAY USE THINGS THAT ARE NOT ACTUALLY USABLE YET,
# BUT IT WILL BE INTEGRATED INTO A REGULAR AGENCY SPIDER.
# -*- coding: utf-8 -*-
from datetime import datetime
from urllib.parse import urljoin
from dateutil.parser import parse as dateparse
from city_scrapers.constants import CANCELED, COMMITTEE
class WayneCommissionMixin:
timezone = 'America/Detroit'
allowed_domains = ['www.waynecounty.com']
classification = COMMITTEE
location = {
'name': '7th floor meeting room, Guardian Building',
'address': '500 Griswold St, Detroit, MI 48226',
'neighborhood': '',
}
description = ''
def parse(self, response):
"""
`parse` should always `yield` a dict that follows the Event Schema
<https://city-bureau.github.io/city-scrapers/06_event_schema.html>.
Change the `_parse_id`, `_parse_name`, etc methods to fit your scraping
needs.
"""
for item in self._parse_entries(response):
data = {
'_type': 'event',
'name': self.meeting_name,
'event_description': self.description,
'classification': self.classification,
'start': self._parse_start(item),
'end': {
'date': None,
'time': None,
'note': ''
},
'all_day': False,
'location': self.location,
'documents': self._parse_documents(item, response.url),
'sources': [{
'url': response.url,
'note': ''
}]
}
data['id'] = self._generate_id(data)
data['status'] = self._parse_status(item, data)
yield data
def _parse_entries(self, response):
return response.xpath('//tbody/tr[child::td/text()]')
@staticmethod
def _parse_documents(item, base_url):
url = item.xpath('td/a/@href').extract_first()
url = urljoin(base_url, url) if url is not None else ''
if url != '':
note = item.xpath('td/a/text()').extract_first()
note = note.lower() if note is not None else ''
return [{'url': url, 'note': note}]
return []
def _parse_start(self, item):
"""
Parse start date and time.
"""
# Calendar shows only meetings in current year.
yearStr = datetime.now().year
# Dateparse can't always handle the inconsistent dates, so
# let's normalize them using scrapy's regular expressions.
month_str = item.xpath('.//td[2]/text()').re(r'[a-zA-Z]{3}')[0]
day_str = item.xpath('.//td[2]/text()').re(r'\d+')[0]
time_str = item.xpath('.//td[3]/text()').extract_first()
date_str = dateparse('{0} {1} {2} {3}'.format(month_str, day_str, yearStr, time_str))
return {'date': date_str.date(), 'time': date_str.time(), 'note': ''}
def _parse_status(self, item, data):
"""
Parse or generate status of meeting.
Postponed meetings will be considered cancelled.
"""
status_str = item.xpath('.//td[4]//text()').extract_first()
# If the agenda column text contains "postpone" or "cancel" we consider it cancelled.
if ('cancel' in status_str.lower()) or ('postpone' in status_str.lower()):
return CANCELED
# If it's not one of the above statuses, use the status logic from spider.py
else:
return self._generate_status(data, '')
| 36.444444 | 93 | 0.564302 |
fe2fe7f8f06bc0b538fa453d5357d64677fdb3d3
| 6,396 |
py
|
Python
|
tools/eval.py
|
mystlee/YOLOX_AUDIO
|
849a881e303e21932911d4771fb79ed9a08a1ce5
|
[
"Apache-2.0"
] | 2 |
2021-11-22T04:29:24.000Z
|
2021-11-22T04:29:26.000Z
|
tools/eval.py
|
kata1110/YOLOX_AUDIO
|
546b20c1f09e2407145cd9569d9b2d5d5bbed85f
|
[
"Apache-2.0"
] | null | null | null |
tools/eval.py
|
kata1110/YOLOX_AUDIO
|
546b20c1f09e2407145cd9569d9b2d5d5bbed85f
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python3
# -*- coding:utf-8 -*-
# Copyright (c) Megvii, Inc. and its affiliates.
import argparse
import os
import random
import warnings
from loguru import logger
import torch
import torch.backends.cudnn as cudnn
from torch.nn.parallel import DistributedDataParallel as DDP
from yolox.core import launch
from yolox.exp import get_exp
from yolox.utils import configure_nccl, fuse_model, get_local_rank, get_model_info, setup_logger
def make_parser():
parser = argparse.ArgumentParser("YOLOX Eval")
parser.add_argument("-expn", "--experiment-name", type=str, default="yolox__svhn")
parser.add_argument("-n", "--name", type=str, default="yolox_s", help="model name")
# distributed
parser.add_argument(
"--dist-backend", default="nccl", type=str, help="distributed backend"
)
parser.add_argument(
"--dist-url",
default=None,
type=str,
help="url used to set up distributed training",
)
parser.add_argument("-b", "--batch-size", type=int, default=32, help="batch size")
parser.add_argument(
"-d", "--devices", default=4, type=int, help="device for training"
)
parser.add_argument(
"--num_machines", default=1, type=int, help="num of node for training"
)
parser.add_argument(
"--machine_rank", default=0, type=int, help="node rank for multi-node training"
)
parser.add_argument(
"-f",
"--exp_file",
default="exps/yolox__svhn/yolox_s.py",
type=str,
help="pls input your expriment description file",
)
parser.add_argument("-c", "--ckpt", default="YOLOX_outputs/yolox__svhn/latest_ckpt.pth", type=str, help="ckpt for eval")
parser.add_argument("--conf", default=0.01, type=float, help="test conf")
parser.add_argument("--nms", default=0.65, type=float, help="test nms threshold")
parser.add_argument("--tsize", default=320, type=int, help="test img size")
parser.add_argument("--seed", default=None, type=int, help="eval seed")
parser.add_argument(
"--fp16",
dest="fp16",
default=True,
action="store_true",
help="Adopting mix precision evaluating.",
)
parser.add_argument(
"--fuse",
dest="fuse",
default=False,
action="store_true",
help="Fuse conv and bn for testing.",
)
parser.add_argument(
"--trt",
dest="trt",
default=False,
action="store_true",
help="Using TensorRT model for testing.",
)
parser.add_argument(
"--legacy",
dest="legacy",
default=False,
action="store_true",
help="To be compatible with older versions",
)
parser.add_argument(
"--test",
dest="test",
default=False,
action="store_true",
help="Evaluating on test-dev set.",
)
parser.add_argument(
"--speed",
dest="speed",
default=False,
action="store_true",
help="speed test only.",
)
parser.add_argument(
"opts",
help="Modify config options using the command-line",
default=None,
nargs=argparse.REMAINDER,
)
return parser
@logger.catch
def main(exp, args, num_gpu):
if args.seed is not None:
random.seed(args.seed)
torch.manual_seed(args.seed)
cudnn.deterministic = True
warnings.warn(
"You have chosen to seed testing. This will turn on the CUDNN deterministic setting, "
)
is_distributed = num_gpu > 1
# set environment variables for distributed training
configure_nccl()
cudnn.benchmark = True
rank = get_local_rank()
file_name = os.path.join(exp.output_dir, args.experiment_name)
if rank == 0:
os.makedirs(file_name, exist_ok=True)
setup_logger(file_name, distributed_rank=rank, filename="val_log.txt", mode="a")
logger.info("Args: {}".format(args))
if args.conf is not None:
exp.test_conf = args.conf
if args.nms is not None:
exp.nmsthre = args.nms
if args.tsize is not None:
exp.test_size = (args.tsize, args.tsize)
model = exp.get_model()
logger.info("Model Summary: {}".format(get_model_info(model, exp.test_size)))
logger.info("Model Structure:\n{}".format(str(model)))
evaluator = exp.get_evaluator(args.batch_size, is_distributed, args.test)
torch.cuda.set_device(rank)
model.cuda(rank)
model.eval()
if not args.speed and not args.trt:
if args.ckpt is None:
ckpt_file = os.path.join(file_name, "best_ckpt.pth")
else:
ckpt_file = args.ckpt
logger.info("loading checkpoint from {}".format(ckpt_file))
loc = "cuda:{}".format(rank)
ckpt = torch.load(ckpt_file, map_location=loc)
model.load_state_dict(ckpt["model"])
logger.info("loaded checkpoint done.")
if is_distributed:
model = DDP(model, device_ids=[rank])
if args.fuse:
logger.info("\tFusing model...")
model = fuse_model(model)
if args.trt:
assert (
not args.fuse and not is_distributed and args.batch_size == 1
), "TensorRT model is not support model fusing and distributed inferencing!"
trt_file = os.path.join(file_name, "model_trt.pth")
assert os.path.exists(
trt_file
), "TensorRT model is not found!\n Run tools/trt.py first!"
model.head.decode_in_inference = False
decoder = model.head.decode_outputs
else:
trt_file = None
decoder = None
# start evaluate
*_, summary = evaluator.evaluate(
model, is_distributed, args.fp16, trt_file, decoder, exp.test_size
)
logger.info("\n" + summary)
if __name__ == "__main__":
args = make_parser().parse_args()
exp = get_exp(args.exp_file, args.name)
exp.merge(args.opts)
if not args.experiment_name:
args.experiment_name = exp.exp_name
num_gpu = torch.cuda.device_count() if args.devices is None else args.devices
assert num_gpu <= torch.cuda.device_count()
dist_url = "auto" if args.dist_url is None else args.dist_url
launch(
main,
num_gpu,
args.num_machines,
args.machine_rank,
backend=args.dist_backend,
dist_url=dist_url,
args=(exp, args, num_gpu),
)
| 30.312796 | 124 | 0.628049 |
fe3a890cbffd1fe6788abd6a716a21ec55a81b1e
| 1,615 |
py
|
Python
|
ANT/preprocess.py
|
KevinBastianYang/ANT
|
6e638c1a6f3bfc726329e67ad72ee67639f3dfe4
|
[
"MIT"
] | null | null | null |
ANT/preprocess.py
|
KevinBastianYang/ANT
|
6e638c1a6f3bfc726329e67ad72ee67639f3dfe4
|
[
"MIT"
] | null | null | null |
ANT/preprocess.py
|
KevinBastianYang/ANT
|
6e638c1a6f3bfc726329e67ad72ee67639f3dfe4
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
###Author:JCY
###Usage: bam_to_sam() is a function that transforms the bam file to sam file
### trim_head() is a function that extracts the useful information of sam file
### read_to_umi() is a function that links the reads and umis that come from the same cell
import os
import re
import commands
import gzip
from linkage import get_linkage
def bam_to_sam(parameterANT,cell_name):
star_out_path = parameterANT["STAR_OUT_PATH"]
trans_cmd = "samtools view -h "+star_out_path+"STAR_out_"+cell_name+"/Aligned.toTranscriptome.out.bam > "+star_out_path+"STAR_out_"+cell_name+"/TranscriptCoord.sam"
os.system(trans_cmd)
print "bam to sam for "+cell_name+" finished\n"
def trim_head(parameterANT,cell_name):
star_out_path = parameterANT["STAR_OUT_PATH"]
command = "grep '^[^@]' "+star_out_path+"STAR_out_"+cell_name+"/TranscriptCoord.sam"
trim_header = commands.getoutput(command).split('\n')
return trim_header
def read_to_umi(parameterANT,cell_name):
cell_path = parameterANT["CELL_DIR"]
cell_number,tran_dict = get_linkage(parameterANT)
read_file = cell_path+"cell_"+cell_number[cell_name]+"_"+cell_name+".fastq.gz"
umi_file = cell_path+"cell_"+cell_number[cell_name]+"_"+cell_name+".umi"
fastq_list = []
with gzip.open(read_file,'r') as rf:
for line in rf:
if line[0] == '@':
# '@' dumped
fastq_list.append(line[1:].strip())
umi_list = []
with open(umi_file,'r') as uf:
for line in uf:
umi_list.append(line.strip())
read_umi_map = dict()
for i in range(0,len(fastq_list)):
read_umi_map[fastq_list[i]] = umi_list[i]
return read_umi_map
| 32.959184 | 165 | 0.733746 |
f25bc2afdafcc0b7febe60790874d3b5736137e1
| 14,269 |
py
|
Python
|
plugins/modules/oci_monitoring_metric_data_actions.py
|
slmjy/oci-ansible-collection
|
349c91e2868bf4706a6e3d6fb3b47fc622bfe11b
|
[
"Apache-2.0"
] | 108 |
2020-05-19T20:46:10.000Z
|
2022-03-25T14:10:01.000Z
|
plugins/modules/oci_monitoring_metric_data_actions.py
|
slmjy/oci-ansible-collection
|
349c91e2868bf4706a6e3d6fb3b47fc622bfe11b
|
[
"Apache-2.0"
] | 90 |
2020-06-14T22:07:11.000Z
|
2022-03-07T05:40:29.000Z
|
plugins/modules/oci_monitoring_metric_data_actions.py
|
slmjy/oci-ansible-collection
|
349c91e2868bf4706a6e3d6fb3b47fc622bfe11b
|
[
"Apache-2.0"
] | 42 |
2020-08-30T23:09:12.000Z
|
2022-03-25T16:58:01.000Z
|
#!/usr/bin/python
# Copyright (c) 2020, 2021 Oracle and/or its affiliates.
# This software is made available to you under the terms of the GPL 3.0 license or the Apache 2.0 license.
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
# Apache License v2.0
# See LICENSE.TXT for details.
# GENERATED FILE - DO NOT EDIT - MANUAL CHANGES WILL BE OVERWRITTEN
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {
"metadata_version": "1.1",
"status": ["preview"],
"supported_by": "community",
}
DOCUMENTATION = """
---
module: oci_monitoring_metric_data_actions
short_description: Perform actions on a MetricData resource in Oracle Cloud Infrastructure
description:
- Perform actions on a MetricData resource in Oracle Cloud Infrastructure
- "For I(action=summarize_metrics_data), returns aggregated data that match the criteria specified in the request. Compartment OCID required.
For information on metric queries, see L(Building Metric Queries,https://docs.cloud.oracle.com/iaas/Content/Monitoring/Tasks/buildingqueries.htm).
For important limits information, see L(Limits on
Monitoring,https://docs.cloud.oracle.com/iaas/Content/Monitoring/Concepts/monitoringoverview.htm#Limits).
Transactions Per Second (TPS) per-tenancy limit for this operation: 10."
version_added: "2.9.0"
author: Oracle (@oracle)
options:
compartment_id:
description:
- The L(OCID,https://docs.cloud.oracle.com/iaas/Content/General/Concepts/identifiers.htm) of the compartment containing the
resources monitored by the metric that you are searching for. Use tenancyId to search in
the root compartment.
- "Example: `ocid1.compartment.oc1..exampleuniqueID`"
type: str
required: true
namespace:
description:
- The source service or application to use when searching for metric data points to aggregate.
- "Example: `oci_computeagent`"
type: str
required: true
resource_group:
description:
- Resource group that you want to match. A null value returns only metric data that has no resource groups. The specified resource group must exist
in the definition of the posted metric. Only one resource group can be applied per metric.
A valid resourceGroup value starts with an alphabetical character and includes only alphanumeric characters, periods (.), underscores (_), hyphens
(-), and dollar signs ($).
- "Example: `frontend-fleet`"
type: str
query:
description:
- "The Monitoring Query Language (MQL) expression to use when searching for metric data points to
aggregate. The query must specify a metric, statistic, and interval.
Supported values for interval depend on the specified time range. More interval values are supported for smaller time ranges.
You can optionally specify dimensions and grouping functions.
Supported grouping functions: `grouping()`, `groupBy()`."
- Construct your query to avoid exceeding limits on returned data. See L(MetricData Reference,https://docs.cloud.oracle.com/en-
us/iaas/api/#/en/monitoring/20180401/MetricData).
- For details about Monitoring Query Language (MQL), see
L(Monitoring Query Language (MQL) Reference,https://docs.cloud.oracle.com/iaas/Content/Monitoring/Reference/mql.htm).
For available dimensions, review the metric definition for the supported service.
See L(Supported Services,https://docs.cloud.oracle.com/iaas/Content/Monitoring/Concepts/monitoringoverview.htm#SupportedServices).
- "Example: `CpuUtilization[1m].sum()`"
type: str
required: true
start_time:
description:
- "The beginning of the time range to use when searching for metric data points.
Format is defined by RFC3339. The response includes metric data points for the startTime.
Default value: the timestamp 3 hours before the call was sent."
- "Example: `2019-02-01T01:02:29.600Z`"
type: str
end_time:
description:
- "The end of the time range to use when searching for metric data points.
Format is defined by RFC3339. The response excludes metric data points for the endTime.
Default value: the timestamp representing when the call was sent."
- "Example: `2019-02-01T02:02:29.600Z`"
type: str
resolution:
description:
- "The time between calculated aggregation windows. Use with the query interval to vary the
frequency at which aggregated data points are returned. For example, use a query interval of
5 minutes with a resolution of 1 minute to retrieve five-minute aggregations at a one-minute
frequency. The resolution must be equal or less than the interval in the query. The default
resolution is 1m (one minute). Supported values: `1m`-`60m`, `1h`-`24h`, `1d`."
- "Example: `5m`"
type: str
compartment_id_in_subtree:
description:
- When true, returns resources from all compartments and subcompartments. The parameter can
only be set to true when compartmentId is the tenancy OCID (the tenancy is the root compartment).
A true value requires the user to have tenancy-level permissions. If this requirement is not met,
then the call is rejected. When false, returns resources from only the compartment specified in
compartmentId. Default is false.
type: bool
action:
description:
- The action to perform on the MetricData.
type: str
required: true
choices:
- "summarize_metrics_data"
extends_documentation_fragment: [ oracle.oci.oracle ]
"""
EXAMPLES = """
- name: Perform action summarize_metrics_data on metric_data
oci_monitoring_metric_data_actions:
# required
compartment_id: "ocid1.compartment.oc1..xxxxxxEXAMPLExxxxxx"
namespace: namespace_example
query: query_example
action: summarize_metrics_data
# optional
resource_group: resource_group_example
start_time: start_time_example
end_time: end_time_example
resolution: resolution_example
compartment_id_in_subtree: true
"""
RETURN = """
metric_data:
description:
- Details of the MetricData resource acted upon by the current operation
returned: on success
type: complex
contains:
namespace:
description:
- The reference provided in a metric definition to indicate the source service or
application that emitted the metric.
- "Example: `oci_computeagent`"
returned: on success
type: str
sample: namespace_example
resource_group:
description:
- Resource group provided with the posted metric. A resource group is a custom string that you can match when retrieving custom metrics. Only
one resource group can be applied per metric.
A valid resourceGroup value starts with an alphabetical character and includes only alphanumeric characters, periods (.), underscores (_),
hyphens (-), and dollar signs ($).
- "Example: `frontend-fleet`"
returned: on success
type: str
sample: resource_group_example
compartment_id:
description:
- The L(OCID,https://docs.cloud.oracle.com/iaas/Content/General/Concepts/identifiers.htm) of the compartment containing the
resources from which the aggregated data was returned.
returned: on success
type: str
sample: "ocid1.compartment.oc1..xxxxxxEXAMPLExxxxxx"
name:
description:
- The name of the metric.
- "Example: `CpuUtilization`"
returned: on success
type: str
sample: name_example
dimensions:
description:
- Qualifiers provided in the definition of the returned metric.
Available dimensions vary by metric namespace. Each dimension takes the form of a key-value pair.
- "Example: `\\"resourceId\\": \\"ocid1.instance.region1.phx.exampleuniqueID\\"`"
returned: on success
type: dict
sample: {}
metadata:
description:
- The references provided in a metric definition to indicate extra information about the metric.
- "Example: `\\"unit\\": \\"bytes\\"`"
returned: on success
type: dict
sample: {}
resolution:
description:
- "The time between calculated aggregation windows. Use with the query interval to vary the
frequency at which aggregated data points are returned. For example, use a query interval of
5 minutes with a resolution of 1 minute to retrieve five-minute aggregations at a one-minute
frequency. The resolution must be equal or less than the interval in the query. The default
resolution is 1m (one minute). Supported values: `1m`-`60m`, `1h`-`24h`, `1d`."
- "Example: `5m`"
returned: on success
type: str
sample: resolution_example
aggregated_datapoints:
description:
- The list of timestamp-value pairs returned for the specified request. Metric values are rolled up to the start time specified in the request.
For important limits information related to data points, see MetricData Reference at the top of this page.
returned: on success
type: complex
contains:
timestamp:
description:
- The date and time associated with the value of this data point. Format defined by RFC3339.
- "Example: `2019-02-01T01:02:29.600Z`"
returned: on success
type: str
sample: "2013-10-20T19:20:30+01:00"
value:
description:
- Numeric value of the metric.
- "Example: `10.4`"
returned: on success
type: float
sample: 1.2
sample: {
"namespace": "namespace_example",
"resource_group": "resource_group_example",
"compartment_id": "ocid1.compartment.oc1..xxxxxxEXAMPLExxxxxx",
"name": "name_example",
"dimensions": {},
"metadata": {},
"resolution": "resolution_example",
"aggregated_datapoints": [{
"timestamp": "2013-10-20T19:20:30+01:00",
"value": 1.2
}]
}
"""
from ansible.module_utils.basic import AnsibleModule
from ansible_collections.oracle.oci.plugins.module_utils import (
oci_common_utils,
oci_wait_utils,
)
from ansible_collections.oracle.oci.plugins.module_utils.oci_resource_utils import (
OCIActionsHelperBase,
get_custom_class,
)
try:
from oci.monitoring import MonitoringClient
from oci.monitoring.models import SummarizeMetricsDataDetails
HAS_OCI_PY_SDK = True
except ImportError:
HAS_OCI_PY_SDK = False
class MetricDataActionsHelperGen(OCIActionsHelperBase):
"""
Supported actions:
summarize_metrics_data
"""
def summarize_metrics_data(self):
action_details = oci_common_utils.convert_input_data_to_model_class(
self.module.params, SummarizeMetricsDataDetails
)
return oci_wait_utils.call_and_wait(
call_fn=self.client.summarize_metrics_data,
call_fn_args=(),
call_fn_kwargs=dict(
compartment_id=self.module.params.get("compartment_id"),
summarize_metrics_data_details=action_details,
compartment_id_in_subtree=self.module.params.get(
"compartment_id_in_subtree"
),
),
waiter_type=oci_wait_utils.NONE_WAITER_KEY,
operation="{0}_{1}".format(
self.module.params.get("action").upper(),
oci_common_utils.ACTION_OPERATION_KEY,
),
waiter_client=self.get_waiter_client(),
resource_helper=self,
wait_for_states=self.get_action_desired_states(
self.module.params.get("action")
),
)
MetricDataActionsHelperCustom = get_custom_class("MetricDataActionsHelperCustom")
class ResourceHelper(MetricDataActionsHelperCustom, MetricDataActionsHelperGen):
pass
def main():
module_args = oci_common_utils.get_common_arg_spec(
supports_create=False, supports_wait=False
)
module_args.update(
dict(
compartment_id=dict(type="str", required=True),
namespace=dict(type="str", required=True),
resource_group=dict(type="str"),
query=dict(type="str", required=True),
start_time=dict(type="str"),
end_time=dict(type="str"),
resolution=dict(type="str"),
compartment_id_in_subtree=dict(type="bool"),
action=dict(type="str", required=True, choices=["summarize_metrics_data"]),
)
)
module = AnsibleModule(argument_spec=module_args, supports_check_mode=True)
if not HAS_OCI_PY_SDK:
module.fail_json(msg="oci python sdk required for this module.")
resource_helper = ResourceHelper(
module=module,
resource_type="metric_data",
service_client_class=MonitoringClient,
namespace="monitoring",
)
result = resource_helper.perform_action(module.params.get("action"))
module.exit_json(**result)
if __name__ == "__main__":
main()
| 43.503049 | 160 | 0.64139 |
173b9b8e7abd764d94a45ee0f5cf88f92ca940c1
| 864 |
py
|
Python
|
tests/basics/PrintFuture.py
|
hclivess/Nuitka
|
9c7ec9696e69a3901b25d5bce720c921d45c931b
|
[
"Apache-2.0"
] | null | null | null |
tests/basics/PrintFuture.py
|
hclivess/Nuitka
|
9c7ec9696e69a3901b25d5bce720c921d45c931b
|
[
"Apache-2.0"
] | 1 |
2019-03-01T11:33:40.000Z
|
2019-03-01T11:33:40.000Z
|
tests/basics/PrintFuture.py
|
hclivess/Nuitka
|
9c7ec9696e69a3901b25d5bce720c921d45c931b
|
[
"Apache-2.0"
] | 1 |
2019-03-26T16:56:21.000Z
|
2019-03-26T16:56:21.000Z
|
# Copyright 2019, Kay Hayen, mailto:[email protected]
#
# Python tests originally created or extracted from other peoples work. The
# parts were too small to be protected.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from __future__ import print_function
print("hallo welt", end = ',')
print("this is the end")
| 39.272727 | 79 | 0.716435 |
c6000061c0b27ff7d20013414ea22c2840c73e58
| 293 |
py
|
Python
|
python-intro/methods_list.py
|
elzasimoes/selenium-python
|
50e4ad8a46864b06193eda09aa2a2a047f98974c
|
[
"CC0-1.0"
] | 2 |
2020-06-03T04:45:35.000Z
|
2020-07-10T03:21:17.000Z
|
python-intro/methods_list.py
|
elzasimoes/selenium-python
|
50e4ad8a46864b06193eda09aa2a2a047f98974c
|
[
"CC0-1.0"
] | null | null | null |
python-intro/methods_list.py
|
elzasimoes/selenium-python
|
50e4ad8a46864b06193eda09aa2a2a047f98974c
|
[
"CC0-1.0"
] | 2 |
2020-06-03T11:52:33.000Z
|
2020-06-07T00:01:57.000Z
|
x = [1, 2, 3]
x.append(4) #Fila
print(f'Valor do append: {x}')
x.insert(4, 0) #Lista
print(f'Valor do insert: {x}')
x.count(2)
print(f'Valor do count: {x}')
x.remove(2)
print(f'Valor do remove: {x}')
x.pop() #Pilha
print(f'Valor do Pop: {x}')
x.reverse()
print(f'Valor do Reverse: {x}')
| 14.65 | 31 | 0.610922 |
31ecef9de8261886586eb1f9d059ac24519b18aa
| 725 |
py
|
Python
|
tapis_cli/commands/taccapis/v2/actors/models/message.py
|
bpachev/tapis-cli
|
c3128fb5b63ef74e06b737bbd95ef28fb24f0d32
|
[
"BSD-3-Clause"
] | 8 |
2020-10-18T22:48:23.000Z
|
2022-01-10T09:16:14.000Z
|
tapis_cli/commands/taccapis/v2/actors/models/message.py
|
bpachev/tapis-cli
|
c3128fb5b63ef74e06b737bbd95ef28fb24f0d32
|
[
"BSD-3-Clause"
] | 238 |
2019-09-04T14:37:54.000Z
|
2020-04-15T16:24:24.000Z
|
tapis_cli/commands/taccapis/v2/actors/models/message.py
|
bpachev/tapis-cli
|
c3128fb5b63ef74e06b737bbd95ef28fb24f0d32
|
[
"BSD-3-Clause"
] | 5 |
2019-09-20T04:23:49.000Z
|
2020-01-16T17:45:14.000Z
|
"""Data model and functions for Tapis Message
"""
from tapis_cli.commands.taccapis.v2 import SERVICE_VERSION
from tapis_cli.commands.taccapis import TapisModel
from tapis_cli.display import Verbosity
from tapis_cli.search import argtype, argmod
__all__ = ['Message']
class Message(TapisModel):
service_id_type = 'Message'
SEARCH_ARGS = [
("executionId", argtype.STRING, Verbosity.BRIEF,
argmod.STRING_DEFAULTS, argmod.DEFAULT, None, None, False),
("msg", argtype.STRING, Verbosity.BRIEF, argmod.STRING_DEFAULTS,
argmod.DEFAULT, None, None, False),
("_links", argtype.ARRAY, Verbosity.VERBOSE, argmod.STRING_DEFAULTS,
argmod.DEFAULT, None, 'links', False)
]
| 31.521739 | 76 | 0.715862 |
f81bcfe3cc50aaca22ed98918aa0d809d3348755
| 5,197 |
py
|
Python
|
dhelp/files/tests/test_csv.py
|
thePortus/dhelp
|
cac762f9a6e6e75f731219007153c0a1b4a685f8
|
[
"MIT"
] | 3 |
2018-03-07T18:37:39.000Z
|
2022-02-23T01:12:56.000Z
|
dhelp/files/tests/test_csv.py
|
thePortus/dhelp
|
cac762f9a6e6e75f731219007153c0a1b4a685f8
|
[
"MIT"
] | 18 |
2018-03-03T22:01:38.000Z
|
2021-06-01T21:32:30.000Z
|
dhelp/files/tests/test_csv.py
|
thePortus/dhelp
|
cac762f9a6e6e75f731219007153c0a1b4a685f8
|
[
"MIT"
] | null | null | null |
#!/usr/bin/python
import unittest
import os
import csv
import shutil
from ..csv import CSVFile
fixtures_src = os.path.join(
os.path.dirname(__file__),
'fixtures',
'csv'
)
fixtures_dest = os.path.join(
os.path.dirname(__file__),
'fixtures',
'.testing'
)
options = {
'silent': False
}
class CSVFileLayer:
@classmethod
def testSetUp(cls):
# remove any extant temp fixture files
if os.path.exists(fixtures_dest):
shutil.rmtree(fixtures_dest)
# ensure requisite parent dirs created, make them if not
if not os.path.exists(os.path.dirname(fixtures_dest)):
os.makedirs(os.path.dirname(fixtures_dest))
# copy fixture files to temp dir
shutil.copytree(fixtures_src, fixtures_dest)
@classmethod
def testTearDown(cls):
# destroy any temporary fixture files remaining
if os.path.exists(fixtures_dest):
shutil.rmtree(fixtures_dest)
class TestCSVFile(unittest.TestCase):
layer = CSVFileLayer
def test_load(self):
# first record should match
exempla = CSVFile(
os.path.join(fixtures_dest, 'fake_data.csv'),
options=options
)
exempla = exempla.load()[0]['text']
comparanda = 'This is the first record'
return self.assertEqual(exempla, comparanda)
def test_save(self):
# should correctly modified the first record
csv_records = []
# manually open csv file
with open(
os.path.join(
fixtures_dest,
'fake_data.csv'
),
'r+'
) as csv_file:
csv_reader = csv.DictReader(csv_file)
for csv_record in csv_reader:
csv_records.append(csv_record)
# alter first record, then save to file
csv_records[0]['text'] = 'Altered test record'
exempla = CSVFile(
os.path.join(fixtures_dest, 'fake_data.csv'),
options={'overwrite': True, 'silent': False}
)
exempla.save(
csv_records,
fieldnames=['id', 'text', 'notes'],
)
# manually reopen csv file to check for results
csv_records = []
with open(
os.path.join(
fixtures_dest,
'fake_data.csv'
),
'r+'
) as csv_file:
csv_reader = csv.DictReader(csv_file)
for csv_record in csv_reader:
csv_records.append(csv_record)
return self.assertEqual(csv_records[0]['text'], 'Altered test record')
def test_modify(self):
# should have modified first record
def modify_function(csv_record):
csv_record['text'] = 'Altered test record'
return csv_record
exempla = CSVFile(
os.path.join(fixtures_dest, 'fake_data.csv'),
options={'silent': False, 'overwrite': True}
)
exempla.modify(
os.path.join(fixtures_dest, 'fake_data_modified.csv'),
modify_function
)
# manually reopen csv file to check for results
csv_records = []
with open(
os.path.join(
fixtures_dest,
'fake_data_modified.csv'
),
'r+'
) as csv_file:
csv_reader = csv.DictReader(csv_file)
for csv_record in csv_reader:
csv_records.append(csv_record)
return self.assertEqual(csv_records[4]['text'], 'Altered test record')
def test_column_to_txts(self):
# should produce a folder of .txt files
exempla = ''
comparanda = 'This is the first record'
destination = os.path.join(
fixtures_dest,
'csv',
'txt'
)
CSVFile(
os.path.join(fixtures_dest, 'fake_data.csv'),
options=options
).column_to_txts(
destination=destination,
text_col='text',
filename_col='id',
options={'overwrite': True}
)
# open file manually to check for match
with open(
os.path.join(fixtures_dest, 'csv', 'txt', '1.txt'),
mode='r+'
) as readfile:
exempla = readfile.read()
return self.assertEqual(exempla, comparanda)
def test_context_manager(self):
exempla = CSVFile(
os.path.join(fixtures_dest, 'fake_data.csv'),
options=options
)
comparanda = 'Testing file'
with exempla as data_rows:
edited_rows = data_rows
for edited_row in edited_rows:
edited_row['text'] = 'Testing file'
exempla.save_data = edited_rows
# load manually to check
with open(
os.path.join(
fixtures_dest,
'fake_data.csv'
),
mode='r+'
) as csv_file:
csv_reader = csv.DictReader(csv_file)
# get value from text column of first row
exempla = next(csv_reader)['text']
return self.assertEqual(exempla, comparanda)
| 29.528409 | 78 | 0.558014 |
ec9133b2daa330c465f7fe58c629fdf958d9e307
| 150 |
py
|
Python
|
sajou/elements/__init__.py
|
cristobaltapia/sajou
|
3693149218bfec9e902e55a5341e22be7b522e29
|
[
"MIT"
] | 1 |
2018-02-06T12:17:02.000Z
|
2018-02-06T12:17:02.000Z
|
sajou/elements/__init__.py
|
cristobaltapia/sajou
|
3693149218bfec9e902e55a5341e22be7b522e29
|
[
"MIT"
] | null | null | null |
sajou/elements/__init__.py
|
cristobaltapia/sajou
|
3693149218bfec9e902e55a5341e22be7b522e29
|
[
"MIT"
] | null | null | null |
"""
This module contains the different elements available.
"""
from sajou.elements.beam2d import Beam2D
from sajou.elements.spring2d import Spring2D
| 21.428571 | 54 | 0.806667 |
50557403a7742745609067ddc067c75b139583eb
| 975 |
py
|
Python
|
8-patch-remove-unnecessary-files.py
|
muraoka7/tool4ipp
|
28295ab1724410f8e1439b317f0fdb169a6e9a78
|
[
"Apache-2.0"
] | 1 |
2020-07-16T17:41:36.000Z
|
2020-07-16T17:41:36.000Z
|
8-patch-remove-unnecessary-files.py
|
muraoka7/tool4ipp
|
28295ab1724410f8e1439b317f0fdb169a6e9a78
|
[
"Apache-2.0"
] | 2 |
2021-09-08T01:44:37.000Z
|
2022-03-12T00:18:04.000Z
|
8-patch-remove-unnecessary-files.py
|
muraoka7/tool4ipp
|
28295ab1724410f8e1439b317f0fdb169a6e9a78
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
# -*- coding:utf-8 -*-
#
# Copyright 2020 IBM Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import multiprocessing as mp
import os
from pathlib import Path
import sys
p = Path(sys.argv[1])
files = list(p.glob("*/*.org.*"))
files += list(p.glob("*/category.txt"))
files += list(p.glob("*/interlinks.json"))
files += list(p.glob("*/raw_text.txt"))
files += list(p.glob("*/meta.json"))
with mp.Pool(8) as p:
res = p.map(os.remove, files)
| 28.676471 | 74 | 0.712821 |
abe863dcd15222d0176b70d3ddaa19581be1fa58
| 10,199 |
py
|
Python
|
boto/sdb/db/model.py
|
clouddocx/boto
|
44978cb92a6020ea49a89662e85e1280e29c1350
|
[
"MIT"
] | null | null | null |
boto/sdb/db/model.py
|
clouddocx/boto
|
44978cb92a6020ea49a89662e85e1280e29c1350
|
[
"MIT"
] | null | null | null |
boto/sdb/db/model.py
|
clouddocx/boto
|
44978cb92a6020ea49a89662e85e1280e29c1350
|
[
"MIT"
] | null | null | null |
# Copyright (c) 2006,2007,2008 Mitch Garnaat http://garnaat.org/
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
from boto.sdb.db.property import Property
from boto.sdb.db.key import Key
from boto.sdb.db.query import Query
import boto
from boto.compat import filter
class ModelMeta(type):
"Metaclass for all Models"
def __init__(cls, name, bases, dict):
super(ModelMeta, cls).__init__(name, bases, dict)
# Make sure this is a subclass of Model - mainly copied from django ModelBase (thanks!)
cls.__sub_classes__ = []
# Do a delayed import to prevent possible circular import errors.
from boto.sdb.db.manager import get_manager
try:
if filter(lambda b: issubclass(b, Model), bases):
for base in bases:
base = Model(object)
base.__sub_classes__.append(cls)
cls._manager = get_manager(cls)
# look for all of the Properties and set their names
for key in dict.keys():
if isinstance(dict[key], Property):
property = dict[key]
property.__property_config__(cls, key)
prop_names = []
props = cls.properties()
for prop in props:
if not prop.__class__.__name__.startswith('_'):
prop_names.append(prop.name)
setattr(cls, '_prop_names', prop_names)
except NameError:
# 'Model' isn't defined yet, meaning we're looking at our own
# Model class, defined below.
pass
class Model(object):
__metaclass__ = ModelMeta
__consistent__ = False # Consistent is set off by default
id = None
@classmethod
def get_lineage(cls):
l = [c.__name__ for c in cls.mro()]
l.reverse()
return '.'.join(l)
@classmethod
def kind(cls):
return cls.__name__
@classmethod
def _get_by_id(cls, id, manager=None):
if not manager:
manager = cls._manager
return manager.get_object(cls, id)
@classmethod
def get_by_id(cls, ids=None, parent=None):
if isinstance(ids, list):
objs = [cls._get_by_id(id) for id in ids]
return objs
else:
return cls._get_by_id(ids)
get_by_ids = get_by_id
@classmethod
def get_by_key_name(cls, key_names, parent=None):
raise NotImplementedError("Key Names are not currently supported")
@classmethod
def find(cls, limit=None, next_token=None, **params):
q = Query(cls, limit=limit, next_token=next_token)
for key, value in params.items():
q.filter('%s =' % key, value)
return q
@classmethod
def all(cls, limit=None, next_token=None):
return cls.find(limit=limit, next_token=next_token)
@classmethod
def get_or_insert(key_name, **kw):
raise NotImplementedError("get_or_insert not currently supported")
@classmethod
def properties(cls, hidden=True):
properties = []
while cls:
for key in cls.__dict__.keys():
prop = cls.__dict__[key]
if isinstance(prop, Property):
if hidden or not prop.__class__.__name__.startswith('_'):
properties.append(prop)
if len(cls.__bases__) > 0:
cls = cls.__bases__[0]
else:
cls = None
return properties
@classmethod
def find_property(cls, prop_name):
property = None
while cls:
for key in cls.__dict__.keys():
prop = cls.__dict__[key]
if isinstance(prop, Property):
if not prop.__class__.__name__.startswith('_') and prop_name == prop.name:
property = prop
if len(cls.__bases__) > 0:
cls = cls.__bases__[0]
else:
cls = None
return property
@classmethod
def get_xmlmanager(cls):
if not hasattr(cls, '_xmlmanager'):
from boto.sdb.db.manager.xmlmanager import XMLManager
cls._xmlmanager = XMLManager(cls, None, None, None,
None, None, None, None, False)
return cls._xmlmanager
@classmethod
def from_xml(cls, fp):
xmlmanager = cls.get_xmlmanager()
return xmlmanager.unmarshal_object(fp)
def __init__(self, id=None, **kw):
self._loaded = False
# first try to initialize all properties to their default values
for prop in self.properties(hidden=False):
try:
setattr(self, prop.name, prop.default_value())
except ValueError:
pass
if 'manager' in kw:
self._manager = kw['manager']
self.id = id
for key in kw:
if key != 'manager':
# We don't want any errors populating up when loading an object,
# so if it fails we just revert to it's default value
try:
setattr(self, key, kw[key])
except Exception as e:
boto.log.exception(e)
def __repr__(self):
return '%s<%s>' % (self.__class__.__name__, self.id)
def __str__(self):
return str(self.id)
def __eq__(self, other):
return other and isinstance(other, Model) and self.id == other.id
def _get_raw_item(self):
return self._manager.get_raw_item(self)
def load(self):
if self.id and not self._loaded:
self._manager.load_object(self)
def reload(self):
if self.id:
self._loaded = False
self._manager.load_object(self)
def put(self, expected_value=None):
"""
Save this object as it is, with an optional expected value
:param expected_value: Optional tuple of Attribute, and Value that
must be the same in order to save this object. If this
condition is not met, an SDBResponseError will be raised with a
Confict status code.
:type expected_value: tuple or list
:return: This object
:rtype: :class:`boto.sdb.db.model.Model`
"""
self._manager.save_object(self, expected_value)
return self
save = put
def put_attributes(self, attrs):
"""
Save just these few attributes, not the whole object
:param attrs: Attributes to save, key->value dict
:type attrs: dict
:return: self
:rtype: :class:`boto.sdb.db.model.Model`
"""
assert(isinstance(attrs, dict)), "Argument must be a dict of key->values to save"
for prop_name in attrs:
value = attrs[prop_name]
prop = self.find_property(prop_name)
assert(prop), "Property not found: %s" % prop_name
self._manager.set_property(prop, self, prop_name, value)
self.reload()
return self
def delete_attributes(self, attrs):
"""
Delete just these attributes, not the whole object.
:param attrs: Attributes to save, as a list of string names
:type attrs: list
:return: self
:rtype: :class:`boto.sdb.db.model.Model`
"""
assert(isinstance(attrs, list)), "Argument must be a list of names of keys to delete."
self._manager.domain.delete_attributes(self.id, attrs)
self.reload()
return self
save_attributes = put_attributes
def delete(self):
self._manager.delete_object(self)
def key(self):
return Key(obj=self)
def set_manager(self, manager):
self._manager = manager
def to_dict(self):
props = {}
for prop in self.properties(hidden=False):
props[prop.name] = getattr(self, prop.name)
obj = {'properties': props,
'id': self.id}
return {self.__class__.__name__: obj}
def to_xml(self, doc=None):
xmlmanager = self.get_xmlmanager()
doc = xmlmanager.marshal_object(self, doc)
return doc
@classmethod
def find_subclass(cls, name):
"""Find a subclass with a given name"""
if name == cls.__name__:
return cls
for sc in cls.__sub_classes__:
r = sc.find_subclass(name)
if r is not None:
return r
class Expando(Model):
def __setattr__(self, name, value):
if name in self._prop_names:
object.__setattr__(self, name, value)
elif name.startswith('_'):
object.__setattr__(self, name, value)
elif name == 'id':
object.__setattr__(self, name, value)
else:
self._manager.set_key_value(self, name, value)
object.__setattr__(self, name, value)
def __getattr__(self, name):
if not name.startswith('_'):
value = self._manager.get_key_value(self, name)
if value:
object.__setattr__(self, name, value)
return value
raise AttributeError
| 34.224832 | 95 | 0.596333 |
1457114d7427a190d9754d8e584e2ccbcfa65b78
| 4,417 |
py
|
Python
|
audio_encoder/audio.py
|
Domhnall-Liopa/Lip2Wav
|
236ae24cd7945da8a75ddea1cfdc3da271c3c59f
|
[
"MIT"
] | null | null | null |
audio_encoder/audio.py
|
Domhnall-Liopa/Lip2Wav
|
236ae24cd7945da8a75ddea1cfdc3da271c3c59f
|
[
"MIT"
] | null | null | null |
audio_encoder/audio.py
|
Domhnall-Liopa/Lip2Wav
|
236ae24cd7945da8a75ddea1cfdc3da271c3c59f
|
[
"MIT"
] | null | null | null |
from scipy.ndimage.morphology import binary_dilation
from audio_encoder.params_data import *
from pathlib import Path
from typing import Optional, Union
import numpy as np
import webrtcvad
import librosa
import struct
int16_max = (2 ** 15) - 1
def preprocess_wav(fpath_or_wav: Union[str, Path, np.ndarray],
source_sr: Optional[int] = None):
"""
Applies the preprocessing operations used in training the Speaker Encoder to a waveform
either on disk or in memory. The waveform will be resampled to match the data hyperparameters.
:param fpath_or_wav: either a filepath to an audio file (many extensions are supported, not
just .wav), either the waveform as a numpy array of floats.
:param source_sr: if passing an audio waveform, the sampling rate of the waveform before
preprocessing. After preprocessing, the waveform's sampling rate will match the data
hyperparameters. If passing a filepath, the sampling rate will be automatically detected and
this argument will be ignored.
"""
# Load the wav from disk if needed
if isinstance(fpath_or_wav, str) or isinstance(fpath_or_wav, Path):
wav, source_sr = librosa.load(fpath_or_wav, sr=None)
else:
wav = fpath_or_wav
# Resample the wav if needed
if source_sr is not None and source_sr != sampling_rate:
wav = librosa.resample(wav, source_sr, sampling_rate)
# Apply the preprocessing: normalize volume and shorten long silences
wav = normalize_volume(wav, audio_norm_target_dBFS, increase_only=True)
wav = trim_long_silences(wav)
return wav
def wav_to_mel_spectrogram(wav):
"""
Derives a mel spectrogram ready to be used by the encoder from a preprocessed audio waveform.
Note: this not a log-mel spectrogram.
"""
frames = librosa.feature.melspectrogram(
wav,
sampling_rate,
n_fft=int(sampling_rate * mel_window_length / 1000),
hop_length=int(sampling_rate * mel_window_step / 1000),
n_mels=mel_n_channels
)
return frames.astype(np.float32).T
def trim_long_silences(wav):
"""
Ensures that segments without voice in the waveform remain no longer than a
threshold determined by the VAD parameters in params.py.
:param wav: the raw waveform as a numpy array of floats
:return: the same waveform with silences trimmed away (length <= original wav length)
"""
# Compute the voice detection window size
samples_per_window = (vad_window_length * sampling_rate) // 1000
# Trim the end of the audio to have a multiple of the window size
wav = wav[:len(wav) - (len(wav) % samples_per_window)]
# Convert the float waveform to 16-bit mono PCM
pcm_wave = struct.pack("%dh" % len(wav), *(np.round(wav * int16_max)).astype(np.int16))
# Perform voice activation detection
voice_flags = []
vad = webrtcvad.Vad(mode=3)
for window_start in range(0, len(wav), samples_per_window):
window_end = window_start + samples_per_window
voice_flags.append(vad.is_speech(pcm_wave[window_start * 2:window_end * 2],
sample_rate=sampling_rate))
voice_flags = np.array(voice_flags)
# Smooth the voice detection with a moving average
def moving_average(array, width):
array_padded = np.concatenate((np.zeros((width - 1) // 2), array, np.zeros(width // 2)))
ret = np.cumsum(array_padded, dtype=float)
ret[width:] = ret[width:] - ret[:-width]
return ret[width - 1:] / width
audio_mask = moving_average(voice_flags, vad_moving_average_width)
audio_mask = np.round(audio_mask).astype(np.bool)
# Dilate the voiced regions
audio_mask = binary_dilation(audio_mask, np.ones(vad_max_silence_length + 1))
audio_mask = np.repeat(audio_mask, samples_per_window)
return wav[audio_mask == True]
def normalize_volume(wav, target_dBFS, increase_only=False, decrease_only=False):
if increase_only and decrease_only:
raise ValueError("Both increase only and decrease only are set")
rms = np.sqrt(np.mean((wav * int16_max) ** 2))
wave_dBFS = 20 * np.log10(rms / int16_max)
dBFS_change = target_dBFS - wave_dBFS
if dBFS_change < 0 and increase_only or dBFS_change > 0 and decrease_only:
return wav
return wav * (10 ** (dBFS_change / 20))
| 40.154545 | 98 | 0.696174 |
83bc961f91449acbb613b555a848043c1dc2c991
| 1,362 |
py
|
Python
|
test_autoarray/dataset/files/interferometer/fits_1d_maker.py
|
caoxiaoyue/PyAutoArray
|
e10d3d6a5b8dd031f2ad277486bd539bd5858b2a
|
[
"MIT"
] | 5 |
2019-09-26T02:18:25.000Z
|
2021-12-11T16:29:20.000Z
|
test_autoarray/dataset/files/interferometer/fits_1d_maker.py
|
caoxiaoyue/PyAutoArray
|
e10d3d6a5b8dd031f2ad277486bd539bd5858b2a
|
[
"MIT"
] | 3 |
2020-03-30T14:25:57.000Z
|
2021-12-21T17:10:55.000Z
|
test_autoarray/dataset/files/interferometer/fits_1d_maker.py
|
caoxiaoyue/PyAutoArray
|
e10d3d6a5b8dd031f2ad277486bd539bd5858b2a
|
[
"MIT"
] | 4 |
2020-03-03T11:35:41.000Z
|
2022-01-21T17:37:35.000Z
|
from astropy.io import fits
import numpy as np
from os import path
file_path = path.join(
"{}".format(path.dirname(path.realpath(__file__))), "files", "array"
)
array1 = np.ones((3))
array2 = 2.0 * np.ones((3))
array3 = 3.0 * np.ones((3))
array4 = 4.0 * np.ones((3))
array5 = 5.0 * np.ones((3))
array6 = 6.0 * np.ones((3))
array7 = 7.0 * np.ones((3))
array8 = 8.0 * np.ones((3))
# fits.writeto(simulator=array1, filename=path + "3_ones.fits", overwrite=True)
# fits.writeto(simulator=array2, filename=path + "3_twos.fits")
# fits.writeto(simulator=array3, filename=path + "3_threes.fits")
# fits.writeto(simulator=array4, filename=path + "3_fours.fits")
# fits.writeto(simulator=array5, filename=path + "3_fives.fits")
# fits.writeto(simulator=array6, filename=path + "3_sixes.fits")
# fits.writeto(simulator=array7, filename=path + "3_sevens.fits")
# fits.writeto(simulator=array8, filename=path + "3_eights.fits")
hdu_list = fits.HDUList()
hdu_list.append(fits.ImageHDU(array1))
hdu_list.append(fits.ImageHDU(array2))
hdu_list.append(fits.ImageHDU(array3))
hdu_list.append(fits.ImageHDU(array4))
hdu_list.append(fits.ImageHDU(array5))
hdu_list.append(fits.ImageHDU(array6))
hdu_list.append(fits.ImageHDU(array7))
hdu_list.append(fits.ImageHDU(array8))
hdu_list.writeto(path.join(file_path, "3_multiple_hdu.fits"))
| 35.842105 | 80 | 0.709985 |
82007ba9b50e432e08d654f7199b22bc209be5bd
| 1,345 |
py
|
Python
|
examples/boxplot.py
|
andrewhw/VizThoughts
|
738a6e05a22a6cb377b61b236c7b894e7d2194c6
|
[
"CC-BY-4.0"
] | null | null | null |
examples/boxplot.py
|
andrewhw/VizThoughts
|
738a6e05a22a6cb377b61b236c7b894e7d2194c6
|
[
"CC-BY-4.0"
] | null | null | null |
examples/boxplot.py
|
andrewhw/VizThoughts
|
738a6e05a22a6cb377b61b236c7b894e7d2194c6
|
[
"CC-BY-4.0"
] | null | null | null |
#!/usr/bin/env python
# Simple boxplot examples
import os
import pandas as pd
import seaborn as sns
from matplotlib import pyplot as plt
import math
import random
import datapoint_generator as gen # our own point generator
# check if we have some data, and if not, use our generator
# module to create some
if not os.path.exists("boxplot-data.csv"):
gen.generate_points("boxplot-data.csv")
# Data now created, so read into a pandas dataframe
df = pd.read_csv("boxplot-data.csv")
# set overall theme for seaborn plotting
sns.set_theme(style="ticks")
# get the figure when the canvas is blank -- once drawn, we can save it
fig = plt.figure()
# Draw on the canvas using a seaborn plotting tool.
# This creates a scatter plot for a single data set
ax = sns.boxplot(x="Source", y="Measure", data=df)
# Uncomment to add the points on top of the plot
## Add the "swarmed" datapoints on top of the current plot
#ax = sns.swarmplot(x="Source", y="Measure", data=df, size=3, color="k")
# reassign y axis label to a better value. Note the use of
# $$ "quoting" to enter LaTeX math mode to have access to the
# Greek letter "mu"
plt.ylabel(r'Measure [$\mu$S]')
# Remove the top and right box lines from the plot frame
sns.despine()
# save the canvas to a PDF file named after the data set
fig.savefig(f'boxplot.pdf', bbox_inches="tight")
| 26.372549 | 72 | 0.733086 |
74669c503759416465dd26094d4790be54c5e6cc
| 10,483 |
py
|
Python
|
flatmc.py
|
iamholger/flatmc
|
aa2b5268988e15c36cfe826e5d1a62c86ce1ff63
|
[
"MIT"
] | 3 |
2019-03-27T18:36:59.000Z
|
2019-05-04T11:05:45.000Z
|
flatmc.py
|
SebastianMacaluso/flatmc
|
aa2b5268988e15c36cfe826e5d1a62c86ce1ff63
|
[
"MIT"
] | null | null | null |
flatmc.py
|
SebastianMacaluso/flatmc
|
aa2b5268988e15c36cfe826e5d1a62c86ce1ff63
|
[
"MIT"
] | 3 |
2019-04-29T15:54:47.000Z
|
2019-11-07T14:42:56.000Z
|
import numpy as np
from numba import jit, njit
from vector import Vec4
@njit
def u_2(r):
"""
Integration variable u_2 --- solution for r = 2u^1 - 1u^2
"""
return 1. - np.sqrt(1.-r)
@njit(fastmath=True)
def u_3(r):
"""
Integration variable u_3 --- solution for r = 3u^2 - 2u^3
"""
x = pow(1.-2.*r+2.*np.sqrt(r*(r-1.)+0.j),1./3.)
y = (2.-(1.-1.j*np.sqrt(3.))/x-(1.+1.j*np.sqrt(3.))*x)/4.
return y.real
@njit(fastmath=True)
def u_4(r):
"""
Integration variable u_4 --- solution for r = 4u^3 - 3u^4
"""
y = pow(r+np.sqrt(r*r*(1-r)+0.j),1./3.)
x = 3./2.*(r/y+y)
y = np.sqrt(1.+x)
z = (1.+y-np.sqrt(2.-x+2./y))/3.
return z.real
@njit(fastmath=True)
def f(x, a, r):
"""
The equation ax^(a-1) - (a-1)x^a - r = 0
To be used as argument in solver
"""
return a*x**(a-1) - (a-1)*x**a - r
@njit(fastmath=True)
def fp(x, a):
"""
First derivative of f
"""
return a*(a-1)*(x**(a-2) - x**(a-1))
@njit(fastmath=True)
def fpp(x, a):
"""
Second derivative of
"""
return a*(a-1)*((a-2)*x**(a-3) - (a-1)*x**(a-2))
def get_u(a, r):
"""
Solve f for u
a = n + 1 -i in Simon's notation
The lowest order case is n=3 and i = 2, i.e. a = 2
"""
if a < 2 : raise Exception("a = {} not implemented".format(a))
from scipy import optimize
if a == 2: return u_2(r)
elif a == 3: return u_3(r)
elif a == 4: return u_4(r)
else:
return optimize.newton(lambda x : f(x, a, r), r, fprime=lambda x: fp(x,a), fprime2=lambda x: fpp(x,a))
def rho(Min, Mout, mext=0.0):
"""
Helper function for mass term eq (5)
"""
M2 = Min*Min
return 0.125 * np.sqrt( (M2 - (Mout+mext)*(Mout+mext)) * (M2 - (Mout-mext)*(Mout-mext))) / M2
def rho_massless(Min, Mout):
"""
Helper function for mass term eq (5)
"""
M2 = Min*Min
M22 = Mout*Mout
return 0.125 * np.sqrt( M2*M2 - 2*M2*M22 + M22*M22) / M2
def generate_point(pa,pb,rans):
# The final momenta
# MOM = [ -rans[-1]*pa, -rans[-2]*pb ]
MOM = [ -pa, -pb ] # NOTE this fixes the incoming momenta
_Q = -MOM[0]-MOM[1]
# Storage of intermediate Masses, Qs
M = [_Q.M()]
ALLQ =[_Q]
U, R = [], [] # Store the u and random numbers r
for i in range(2, NP+1):
# print("now i = {}".format(i))
if i < NP:
# print("Solving for u_{}, M_{}".format(i, i))
r = rans[3*(i-2)+2]
u = get_u(NP+1-i, r)
U.append(u)
R.append(r)
# Simon's paper must be wrong here, check
_M = np.sqrt(u*_Q.M2()) # M_i^2
else:
_M = 0
# print("Got M_{}={}".format(i, _M))
M.append(_M)
q = 4*M[-2] * rho_massless(M[-2], M[-1])
# Random numbers for costheta and phi
costheta = 2*rans[3*(i-2)] - 1
phi = 2.*np.pi*rans[3*(i-2)+1]
# Generated 4 Vectors
# p_(i-1)
sintheta = np.sqrt(1. - costheta*costheta)
p = q*Vec4(1, np.cos(phi)*sintheta, np.sin(phi)*sintheta, costheta)
# print("p_{} = {} {}".format(i+1, p, np.sqrt(abs(p.M2()))))
# now Q_i
_Q = Vec4(np.sqrt(q*q + M[-1]*M[-1]), -p.px, -p.py, -p.pz)
# print("Q_{} = {} {}".format(i, _Q, np.sqrt(abs(_Q.M2()))))
p = ALLQ[i-2].BoostBack(p)
_Q = ALLQ[i-2].BoostBack(_Q)
# print ALLQ[i-2]-_Q-p
# print "p boosted ",p,p.M2()
# print "Q boosted ",_Q,np.sqrt(abs(_Q.M2()))
# print "sum p+Q ",(p+_Q),(p+_Q).M()
MOM.append(p)
ALLQ.append(_Q)
MOM.append(_Q)
return MOM
def generate_weight(pa,pb,mom):
Q = -mom[0]-mom[1]
rans = []
for i in range(2, NP+1):
# print("now i = {}".format(i))
p = Q.Boost(mom[i])
# print 'p = ',p
costh = p[3]/p.P()
phi = p.Phi()
if phi < 0: phi += 2.*np.pi
# print "phi = ",phi
rans.append((1+costh)/2.)
rans.append(phi/(2.*np.pi))
if i < NP:
m = (Q-mom[i]).M2() / Q.M2()
u = f(m, NP+1-i, 0)
# print Q.M2(),(Q-mom[i]).M2(),(mom[3]+mom[4]).M2(),m,u
# print Q
Q -= mom[i]
# print Q
rans.append(u)
else:
_M = 0
rans.append(-(mom[1]*pa)/(pa*pb))
rans.append(-(mom[0]*pb)/(pa*pb))
return rans
def ME_ESW(P):
"""
Calculate the matrix element for g(p1) g(p2) --> g(p3) g(p4) g(p5)
Using eq (7.51) in QCD for collider physics.
P ... list of 4 momentum vectors
"""
from itertools import permutations
permutations=list(permutations([0,1,2,3,4])) # All 120 permutations
# M = const * A * B / C
# A = sum_permutations {1 2} ^ 4
A = 0
B = 0
for i in permutations:
A+= (P[i[0]] * P[i[1]])**4
B+= (P[i[0]] * P[i[1]]) * (P[i[1]] * P[i[2]]) * (P[i[2]] * P[i[3]]) * (P[i[3]] * P[i[4]]) * (P[i[4]] * P[i[0]])
C = 1
for i in range(5):
for j in range(5):
if i <j:
# print("i={}, j={}: {} * {} = {}".format(i, j, P[i], P[j], P[i]*P[j]))
C *= P[i]*P[j]
return A*B/C
def ME_PLB(P):
"""
Calculate the matrix element for g(p1) g(p2) --> g(p3) g(p4) g(p5)
Using eq (18) in Berends et al, Phys Let B 103 (1981) p 124 ff.
P ... list of 4 momentum vectors
"""
from itertools import permutations, combinations
permutations= [
(0,1,2,3,4),
(0,1,2,4,3),
(0,1,3,2,4),
(0,1,3,4,2),
(0,1,4,2,3),
(0,1,4,3,2),
(0,2,1,3,4),
(0,2,1,4,3),
(0,2,3,1,4),
(0,2,4,1,3),
(0,3,1,2,4),
(0,3,2,1,4),
]
kpermutations = list(combinations([0,1,2,3,4], 2))
# M = const * A * B / C
# A = sum_permutations {1 2} ^ 4
A = 0
for i in kpermutations:
A+= (P[i[0]] * P[i[1]])**4
B = 0
for i in permutations:
# print("(k{} * k{})^4".format(i[0]+1, i[1]+1))
B+= (P[i[0]] * P[i[1]]) * (P[i[1]] * P[i[2]]) * (P[i[2]] * P[i[3]]) * (P[i[3]] * P[i[4]]) * (P[i[4]] * P[i[0]])
C = 1
for i in range(5):
for j in range(5):
if i <j:
# print("i={}, j={}: {} * {} = {}".format(i, j, P[i], P[j], P[i]*P[j]))
C *= P[i]*P[j]
return A*B/C
if __name__ == "__main__":
import sys
np.random.seed(1)
pa = Vec4(7000,0,0,7000)
pb = Vec4(7000,0,0,-7000)
if len(sys.argv) <2:
print("Please specify the number of external particles, exiting")
sys.exit(1)
NP = int(sys.argv[1]) # Number of external particles
if NP<3:
print("NP should be >=3 for the whole thing to make sense, exiting")
sys.exit(1)
rans = [ np.random.rand() for i in range(0,3*NP-4+2) ]
moms = generate_point(pa,pb,rans)
msum = Vec4()
for num, mom in enumerate(moms):
msum += mom
print("p_{} = {} {}".format(num+1, mom, mom.M2()))
print("Mom sum {}".format(msum))
ranc = generate_weight(pa,pb,moms)
for r in range(0,len(rans)):
print("r_{} = {} -> dev. {}".format(r, ranc[r], ranc[r]/rans[r]-1))
print("120*Berends: {:.20f}".format(120*ME_PLB(moms)))
print("Ellis: {:.20f}".format(ME_ESW(moms)))
import time
t1=time.time()
Y = []
NSAMPLES=int(sys.argv[2])
X=[]
for _ in range(NSAMPLES):
rans = [ np.random.rand() for i in range(0,3*NP-4+2) ]
X.append(rans[0:5])
moms = generate_point(pa,pb,rans)
Y.append(ME_PLB(moms))
t2=time.time()
print("Generation of {} configuration took {} seconds".format(NSAMPLES, t2-t1))
import apprentice
t1=time.time()
apprentice.RationalApproximation(X,Y, order=(5,5), strategy=3)
t2=time.time()
print("Approximation took {} seconds".format(t2-t1))
# from IPython import embed
# embed()
# import matplotlib.pyplot as plt
# plt.style.use("ggplot")
# plt.xlabel("$\log_{10}(ME)$")
# plt.hist(np.log10(Y), bins=51,histtype='step', label="Exact")
# plt.yscale("log")
# import apprentice
# S=apprentice.Scaler(X)
# XX = S.scale(X)
# m=int(sys.argv[3])
# n=int(sys.argv[4])
# # R = apprentice.RationalApproximation(XX, H, order=(m,n))
# # R.save("approx_{}_{}.json".format(m,n))
# R=apprentice.RationalApproximation(fname="approx_1_12.json")
# from IPython import embed
# embed()
# HH = []
# t1=time.time()
# for x in XX:
# HH.append(R(x))
# t2=time.time()
# print("Evaluation of {} configuration took {} seconds".format(NSAMPLES, t2-t1))
# plt.hist(np.log10(HH), bins=51,histtype='step', label="Approx")
# plt.legend()
# plt.savefig("test_{}_{}.pdf".format(m,n))
# res = []
# for num, x in enumerate(XX):
# res.append((R(x) - H[num])/H[num])
# plt.clf()
# plt.hist(res, bins=5001)
# plt.xlim((-10,10))
# plt.yscale("log")
# plt.savefig("residual_{}_{}.pdf".format(m,n))
# sys.exit(1)
# for m in range(1, 2):
# for n in range(5, 15):
# print("Now ({},{})".format(m,n))
# R = apprentice.RationalApproximation(XX, H, order=(m,n))
# R.save("approx_{}_{}.json".format(m,n))
# res = []
# for num, x in enumerate(XX):
# res.append((R(x) - H[num])/H[num])
# plt.clf()
# plt.hist(res, bins=5000)
# plt.xlim((-10,10))
# plt.savefig("residual_{}_{}.pdf".format(m,n))
# m=int(sys.argv[3])
# n=int(sys.argv[4])
# R = apprentice.RationalApproximation(XX, H, order=(m,n))
# R.save("approx_{}_{}.json".format(m,n))
# # from IPython import embed
# # embed()
# res = []
# for num, x in enumerate(XX):
# res.append((R(x) - H[num])/H[num])
# plt.clf()
# plt.hist(res, bins=5000)
# plt.xlim((-10,10))
# plt.savefig("residual_{}_{}.pdf".format(m,n))
# from IPython import embed
# embed()
| 27.880319 | 120 | 0.470858 |
b1c51abece36b9763c5915fc2f6f985a3a1fbcff
| 45,238 |
py
|
Python
|
moto/sagemaker/models.py
|
thomassross/moto
|
407d5c853dbee9b9e132d97b41414b7dca475765
|
[
"Apache-2.0"
] | 1 |
2019-10-18T16:10:01.000Z
|
2019-10-18T16:10:01.000Z
|
moto/sagemaker/models.py
|
thomassross/moto
|
407d5c853dbee9b9e132d97b41414b7dca475765
|
[
"Apache-2.0"
] | 4 |
2017-09-30T07:52:52.000Z
|
2021-12-13T06:56:55.000Z
|
moto/sagemaker/models.py
|
thomassross/moto
|
407d5c853dbee9b9e132d97b41414b7dca475765
|
[
"Apache-2.0"
] | 2 |
2021-11-24T08:05:43.000Z
|
2021-11-25T16:18:48.000Z
|
from __future__ import unicode_literals
import os
from boto3 import Session
from datetime import datetime
from moto.core import ACCOUNT_ID, BaseBackend, BaseModel, CloudFormationModel
from moto.core.exceptions import RESTError
from moto.sagemaker import validators
from .exceptions import MissingModel, ValidationError
class BaseObject(BaseModel):
def camelCase(self, key):
words = []
for i, word in enumerate(key.split("_")):
words.append(word.title())
return "".join(words)
def gen_response_object(self):
response_object = dict()
for key, value in self.__dict__.items():
if "_" in key:
response_object[self.camelCase(key)] = value
else:
response_object[key[0].upper() + key[1:]] = value
return response_object
@property
def response_object(self):
return self.gen_response_object()
class FakeTrainingJob(BaseObject):
def __init__(
self,
region_name,
training_job_name,
hyper_parameters,
algorithm_specification,
role_arn,
input_data_config,
output_data_config,
resource_config,
vpc_config,
stopping_condition,
tags,
enable_network_isolation,
enable_inter_container_traffic_encryption,
enable_managed_spot_training,
checkpoint_config,
debug_hook_config,
debug_rule_configurations,
tensor_board_output_config,
experiment_config,
):
self.training_job_name = training_job_name
self.hyper_parameters = hyper_parameters
self.algorithm_specification = algorithm_specification
self.role_arn = role_arn
self.input_data_config = input_data_config
self.output_data_config = output_data_config
self.resource_config = resource_config
self.vpc_config = vpc_config
self.stopping_condition = stopping_condition
self.tags = tags
self.enable_network_isolation = enable_network_isolation
self.enable_inter_container_traffic_encryption = (
enable_inter_container_traffic_encryption
)
self.enable_managed_spot_training = enable_managed_spot_training
self.checkpoint_config = checkpoint_config
self.debug_hook_config = debug_hook_config
self.debug_rule_configurations = debug_rule_configurations
self.tensor_board_output_config = tensor_board_output_config
self.experiment_config = experiment_config
self.training_job_arn = FakeTrainingJob.arn_formatter(
training_job_name, region_name
)
self.creation_time = self.last_modified_time = datetime.now().strftime(
"%Y-%m-%d %H:%M:%S"
)
self.model_artifacts = {
"S3ModelArtifacts": os.path.join(
self.output_data_config["S3OutputPath"],
self.training_job_name,
"output",
"model.tar.gz",
)
}
self.training_job_status = "Completed"
self.secondary_status = "Completed"
self.algorithm_specification["MetricDefinitions"] = [
{
"Name": "test:dcg",
"Regex": "#quality_metric: host=\\S+, test dcg <score>=(\\S+)",
}
]
now_string = datetime.now().strftime("%Y-%m-%d %H:%M:%S")
self.creation_time = now_string
self.last_modified_time = now_string
self.training_start_time = now_string
self.training_end_time = now_string
self.secondary_status_transitions = [
{
"Status": "Starting",
"StartTime": self.creation_time,
"EndTime": self.creation_time,
"StatusMessage": "Preparing the instances for training",
}
]
self.final_metric_data_list = [
{
"MetricName": "train:progress",
"Value": 100.0,
"Timestamp": self.creation_time,
}
]
@property
def response_object(self):
response_object = self.gen_response_object()
return {
k: v for k, v in response_object.items() if v is not None and v != [None]
}
@property
def response_create(self):
return {"TrainingJobArn": self.training_job_arn}
@staticmethod
def arn_formatter(endpoint_name, region_name):
return (
"arn:aws:sagemaker:"
+ region_name
+ ":"
+ str(ACCOUNT_ID)
+ ":training-job/"
+ endpoint_name
)
class FakeEndpoint(BaseObject, CloudFormationModel):
def __init__(
self,
region_name,
endpoint_name,
endpoint_config_name,
production_variants,
data_capture_config,
tags,
):
self.endpoint_name = endpoint_name
self.endpoint_arn = FakeEndpoint.arn_formatter(endpoint_name, region_name)
self.endpoint_config_name = endpoint_config_name
self.production_variants = production_variants
self.data_capture_config = data_capture_config
self.tags = tags or []
self.endpoint_status = "InService"
self.failure_reason = None
self.creation_time = self.last_modified_time = datetime.now().strftime(
"%Y-%m-%d %H:%M:%S"
)
@property
def response_object(self):
response_object = self.gen_response_object()
return {
k: v for k, v in response_object.items() if v is not None and v != [None]
}
@property
def response_create(self):
return {"EndpointArn": self.endpoint_arn}
@staticmethod
def arn_formatter(endpoint_name, region_name):
return (
"arn:aws:sagemaker:"
+ region_name
+ ":"
+ str(ACCOUNT_ID)
+ ":endpoint/"
+ endpoint_name
)
@property
def physical_resource_id(self):
return self.endpoint_arn
def get_cfn_attribute(self, attribute_name):
# https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-sagemaker-endpoint.html#aws-resource-sagemaker-endpoint-return-values
from moto.cloudformation.exceptions import UnformattedGetAttTemplateException
if attribute_name == "EndpointName":
return self.endpoint_name
raise UnformattedGetAttTemplateException()
@staticmethod
def cloudformation_name_type():
return None
@staticmethod
def cloudformation_type():
# https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-sagemaker-endpoint.html
return "AWS::SageMaker::Endpoint"
@classmethod
def create_from_cloudformation_json(
cls, resource_name, cloudformation_json, region_name
):
sagemaker_backend = sagemaker_backends[region_name]
# Get required properties from provided CloudFormation template
properties = cloudformation_json["Properties"]
endpoint_config_name = properties["EndpointConfigName"]
endpoint = sagemaker_backend.create_endpoint(
endpoint_name=resource_name,
endpoint_config_name=endpoint_config_name,
tags=properties.get("Tags", []),
)
return endpoint
@classmethod
def update_from_cloudformation_json(
cls, original_resource, new_resource_name, cloudformation_json, region_name,
):
# Changes to the Endpoint will not change resource name
cls.delete_from_cloudformation_json(
original_resource.endpoint_arn, cloudformation_json, region_name
)
new_resource = cls.create_from_cloudformation_json(
original_resource.endpoint_name, cloudformation_json, region_name
)
return new_resource
@classmethod
def delete_from_cloudformation_json(
cls, resource_name, cloudformation_json, region_name
):
# Get actual name because resource_name actually provides the ARN
# since the Physical Resource ID is the ARN despite SageMaker
# using the name for most of its operations.
endpoint_name = resource_name.split("/")[-1]
sagemaker_backends[region_name].delete_endpoint(endpoint_name)
class FakeEndpointConfig(BaseObject, CloudFormationModel):
def __init__(
self,
region_name,
endpoint_config_name,
production_variants,
data_capture_config,
tags,
kms_key_id,
):
self.validate_production_variants(production_variants)
self.endpoint_config_name = endpoint_config_name
self.endpoint_config_arn = FakeEndpointConfig.arn_formatter(
endpoint_config_name, region_name
)
self.production_variants = production_variants or []
self.data_capture_config = data_capture_config or {}
self.tags = tags or []
self.kms_key_id = kms_key_id
self.creation_time = datetime.now().strftime("%Y-%m-%d %H:%M:%S")
def validate_production_variants(self, production_variants):
for production_variant in production_variants:
self.validate_instance_type(production_variant["InstanceType"])
def validate_instance_type(self, instance_type):
VALID_INSTANCE_TYPES = [
"ml.r5d.12xlarge",
"ml.r5.12xlarge",
"ml.p2.xlarge",
"ml.m5.4xlarge",
"ml.m4.16xlarge",
"ml.r5d.24xlarge",
"ml.r5.24xlarge",
"ml.p3.16xlarge",
"ml.m5d.xlarge",
"ml.m5.large",
"ml.t2.xlarge",
"ml.p2.16xlarge",
"ml.m5d.12xlarge",
"ml.inf1.2xlarge",
"ml.m5d.24xlarge",
"ml.c4.2xlarge",
"ml.c5.2xlarge",
"ml.c4.4xlarge",
"ml.inf1.6xlarge",
"ml.c5d.2xlarge",
"ml.c5.4xlarge",
"ml.g4dn.xlarge",
"ml.g4dn.12xlarge",
"ml.c5d.4xlarge",
"ml.g4dn.2xlarge",
"ml.c4.8xlarge",
"ml.c4.large",
"ml.c5d.xlarge",
"ml.c5.large",
"ml.g4dn.4xlarge",
"ml.c5.9xlarge",
"ml.g4dn.16xlarge",
"ml.c5d.large",
"ml.c5.xlarge",
"ml.c5d.9xlarge",
"ml.c4.xlarge",
"ml.inf1.xlarge",
"ml.g4dn.8xlarge",
"ml.inf1.24xlarge",
"ml.m5d.2xlarge",
"ml.t2.2xlarge",
"ml.c5d.18xlarge",
"ml.m5d.4xlarge",
"ml.t2.medium",
"ml.c5.18xlarge",
"ml.r5d.2xlarge",
"ml.r5.2xlarge",
"ml.p3.2xlarge",
"ml.m5d.large",
"ml.m5.xlarge",
"ml.m4.10xlarge",
"ml.t2.large",
"ml.r5d.4xlarge",
"ml.r5.4xlarge",
"ml.m5.12xlarge",
"ml.m4.xlarge",
"ml.m5.24xlarge",
"ml.m4.2xlarge",
"ml.p2.8xlarge",
"ml.m5.2xlarge",
"ml.r5d.xlarge",
"ml.r5d.large",
"ml.r5.xlarge",
"ml.r5.large",
"ml.p3.8xlarge",
"ml.m4.4xlarge",
]
if not validators.is_one_of(instance_type, VALID_INSTANCE_TYPES):
message = "Value '{}' at 'instanceType' failed to satisfy constraint: Member must satisfy enum value set: {}".format(
instance_type, VALID_INSTANCE_TYPES
)
raise ValidationError(message=message)
@property
def response_object(self):
response_object = self.gen_response_object()
return {
k: v for k, v in response_object.items() if v is not None and v != [None]
}
@property
def response_create(self):
return {"EndpointConfigArn": self.endpoint_config_arn}
@staticmethod
def arn_formatter(model_name, region_name):
return (
"arn:aws:sagemaker:"
+ region_name
+ ":"
+ str(ACCOUNT_ID)
+ ":endpoint-config/"
+ model_name
)
@property
def physical_resource_id(self):
return self.endpoint_config_arn
def get_cfn_attribute(self, attribute_name):
# https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-sagemaker-endpointconfig.html#aws-resource-sagemaker-endpointconfig-return-values
from moto.cloudformation.exceptions import UnformattedGetAttTemplateException
if attribute_name == "EndpointConfigName":
return self.endpoint_config_name
raise UnformattedGetAttTemplateException()
@staticmethod
def cloudformation_name_type():
return None
@staticmethod
def cloudformation_type():
# https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-sagemaker-endpointconfig.html
return "AWS::SageMaker::EndpointConfig"
@classmethod
def create_from_cloudformation_json(
cls, resource_name, cloudformation_json, region_name
):
sagemaker_backend = sagemaker_backends[region_name]
# Get required properties from provided CloudFormation template
properties = cloudformation_json["Properties"]
production_variants = properties["ProductionVariants"]
endpoint_config = sagemaker_backend.create_endpoint_config(
endpoint_config_name=resource_name,
production_variants=production_variants,
data_capture_config=properties.get("DataCaptureConfig", {}),
kms_key_id=properties.get("KmsKeyId"),
tags=properties.get("Tags", []),
)
return endpoint_config
@classmethod
def update_from_cloudformation_json(
cls, original_resource, new_resource_name, cloudformation_json, region_name,
):
# Most changes to the endpoint config will change resource name for EndpointConfigs
cls.delete_from_cloudformation_json(
original_resource.endpoint_config_arn, cloudformation_json, region_name
)
new_resource = cls.create_from_cloudformation_json(
new_resource_name, cloudformation_json, region_name
)
return new_resource
@classmethod
def delete_from_cloudformation_json(
cls, resource_name, cloudformation_json, region_name
):
# Get actual name because resource_name actually provides the ARN
# since the Physical Resource ID is the ARN despite SageMaker
# using the name for most of its operations.
endpoint_config_name = resource_name.split("/")[-1]
sagemaker_backends[region_name].delete_endpoint_config(endpoint_config_name)
class Model(BaseObject, CloudFormationModel):
def __init__(
self,
region_name,
model_name,
execution_role_arn,
primary_container,
vpc_config,
containers=[],
tags=[],
):
self.model_name = model_name
self.creation_time = datetime.now().strftime("%Y-%m-%d %H:%M:%S")
self.containers = containers
self.tags = tags
self.enable_network_isolation = False
self.vpc_config = vpc_config
self.primary_container = primary_container
self.execution_role_arn = execution_role_arn or "arn:test"
self.model_arn = self.arn_for_model_name(self.model_name, region_name)
@property
def response_object(self):
response_object = self.gen_response_object()
return {
k: v for k, v in response_object.items() if v is not None and v != [None]
}
@property
def response_create(self):
return {"ModelArn": self.model_arn}
@staticmethod
def arn_for_model_name(model_name, region_name):
return (
"arn:aws:sagemaker:"
+ region_name
+ ":"
+ str(ACCOUNT_ID)
+ ":model/"
+ model_name
)
@property
def physical_resource_id(self):
return self.model_arn
def get_cfn_attribute(self, attribute_name):
# https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-sagemaker-model.html#aws-resource-sagemaker-model-return-values
from moto.cloudformation.exceptions import UnformattedGetAttTemplateException
if attribute_name == "ModelName":
return self.model_name
raise UnformattedGetAttTemplateException()
@staticmethod
def cloudformation_name_type():
return None
@staticmethod
def cloudformation_type():
# https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-sagemaker-model.html
return "AWS::SageMaker::Model"
@classmethod
def create_from_cloudformation_json(
cls, resource_name, cloudformation_json, region_name
):
sagemaker_backend = sagemaker_backends[region_name]
# Get required properties from provided CloudFormation template
properties = cloudformation_json["Properties"]
execution_role_arn = properties["ExecutionRoleArn"]
primary_container = properties["PrimaryContainer"]
model = sagemaker_backend.create_model(
ModelName=resource_name,
ExecutionRoleArn=execution_role_arn,
PrimaryContainer=primary_container,
VpcConfig=properties.get("VpcConfig", {}),
Containers=properties.get("Containers", []),
Tags=properties.get("Tags", []),
)
return model
@classmethod
def update_from_cloudformation_json(
cls, original_resource, new_resource_name, cloudformation_json, region_name,
):
# Most changes to the model will change resource name for Models
cls.delete_from_cloudformation_json(
original_resource.model_arn, cloudformation_json, region_name
)
new_resource = cls.create_from_cloudformation_json(
new_resource_name, cloudformation_json, region_name
)
return new_resource
@classmethod
def delete_from_cloudformation_json(
cls, resource_name, cloudformation_json, region_name
):
# Get actual name because resource_name actually provides the ARN
# since the Physical Resource ID is the ARN despite SageMaker
# using the name for most of its operations.
model_name = resource_name.split("/")[-1]
sagemaker_backends[region_name].delete_model(model_name)
class VpcConfig(BaseObject):
def __init__(self, security_group_ids, subnets):
self.security_group_ids = security_group_ids
self.subnets = subnets
@property
def response_object(self):
response_object = self.gen_response_object()
return {
k: v for k, v in response_object.items() if v is not None and v != [None]
}
class Container(BaseObject):
def __init__(self, **kwargs):
self.container_hostname = kwargs.get("container_hostname", "localhost")
self.model_data_url = kwargs.get("data_url", "")
self.model_package_name = kwargs.get("package_name", "pkg")
self.image = kwargs.get("image", "")
self.environment = kwargs.get("environment", {})
@property
def response_object(self):
response_object = self.gen_response_object()
return {
k: v for k, v in response_object.items() if v is not None and v != [None]
}
class FakeSagemakerNotebookInstance(CloudFormationModel):
def __init__(
self,
region_name,
notebook_instance_name,
instance_type,
role_arn,
subnet_id,
security_group_ids,
kms_key_id,
tags,
lifecycle_config_name,
direct_internet_access,
volume_size_in_gb,
accelerator_types,
default_code_repository,
additional_code_repositories,
root_access,
):
self.validate_volume_size_in_gb(volume_size_in_gb)
self.validate_instance_type(instance_type)
self.region_name = region_name
self.notebook_instance_name = notebook_instance_name
self.instance_type = instance_type
self.role_arn = role_arn
self.subnet_id = subnet_id
self.security_group_ids = security_group_ids
self.kms_key_id = kms_key_id
self.tags = tags or []
self.lifecycle_config_name = lifecycle_config_name
self.direct_internet_access = direct_internet_access
self.volume_size_in_gb = volume_size_in_gb
self.accelerator_types = accelerator_types
self.default_code_repository = default_code_repository
self.additional_code_repositories = additional_code_repositories
self.root_access = root_access
self.status = None
self.creation_time = self.last_modified_time = datetime.now()
self.start()
def validate_volume_size_in_gb(self, volume_size_in_gb):
if not validators.is_integer_between(volume_size_in_gb, mn=5, optional=True):
message = "Invalid range for parameter VolumeSizeInGB, value: {}, valid range: 5-inf"
raise ValidationError(message=message)
def validate_instance_type(self, instance_type):
VALID_INSTANCE_TYPES = [
"ml.p2.xlarge",
"ml.m5.4xlarge",
"ml.m4.16xlarge",
"ml.t3.xlarge",
"ml.p3.16xlarge",
"ml.t2.xlarge",
"ml.p2.16xlarge",
"ml.c4.2xlarge",
"ml.c5.2xlarge",
"ml.c4.4xlarge",
"ml.c5d.2xlarge",
"ml.c5.4xlarge",
"ml.c5d.4xlarge",
"ml.c4.8xlarge",
"ml.c5d.xlarge",
"ml.c5.9xlarge",
"ml.c5.xlarge",
"ml.c5d.9xlarge",
"ml.c4.xlarge",
"ml.t2.2xlarge",
"ml.c5d.18xlarge",
"ml.t3.2xlarge",
"ml.t3.medium",
"ml.t2.medium",
"ml.c5.18xlarge",
"ml.p3.2xlarge",
"ml.m5.xlarge",
"ml.m4.10xlarge",
"ml.t2.large",
"ml.m5.12xlarge",
"ml.m4.xlarge",
"ml.t3.large",
"ml.m5.24xlarge",
"ml.m4.2xlarge",
"ml.p2.8xlarge",
"ml.m5.2xlarge",
"ml.p3.8xlarge",
"ml.m4.4xlarge",
]
if not validators.is_one_of(instance_type, VALID_INSTANCE_TYPES):
message = "Value '{}' at 'instanceType' failed to satisfy constraint: Member must satisfy enum value set: {}".format(
instance_type, VALID_INSTANCE_TYPES
)
raise ValidationError(message=message)
@property
def arn(self):
return (
"arn:aws:sagemaker:"
+ self.region_name
+ ":"
+ str(ACCOUNT_ID)
+ ":notebook-instance/"
+ self.notebook_instance_name
)
@property
def url(self):
return "{}.notebook.{}.sagemaker.aws".format(
self.notebook_instance_name, self.region_name
)
def start(self):
self.status = "InService"
@property
def is_deletable(self):
return self.status in ["Stopped", "Failed"]
def stop(self):
self.status = "Stopped"
@property
def physical_resource_id(self):
return self.arn
def get_cfn_attribute(self, attribute_name):
# https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-sagemaker-notebookinstance.html#aws-resource-sagemaker-notebookinstance-return-values
from moto.cloudformation.exceptions import UnformattedGetAttTemplateException
if attribute_name == "NotebookInstanceName":
return self.notebook_instance_name
raise UnformattedGetAttTemplateException()
@staticmethod
def cloudformation_name_type():
return None
@staticmethod
def cloudformation_type():
# https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-sagemaker-notebookinstance.html
return "AWS::SageMaker::NotebookInstance"
@classmethod
def create_from_cloudformation_json(
cls, resource_name, cloudformation_json, region_name
):
# Get required properties from provided CloudFormation template
properties = cloudformation_json["Properties"]
instance_type = properties["InstanceType"]
role_arn = properties["RoleArn"]
notebook = sagemaker_backends[region_name].create_notebook_instance(
notebook_instance_name=resource_name,
instance_type=instance_type,
role_arn=role_arn,
)
return notebook
@classmethod
def update_from_cloudformation_json(
cls, original_resource, new_resource_name, cloudformation_json, region_name,
):
# Operations keep same resource name so delete old and create new to mimic update
cls.delete_from_cloudformation_json(
original_resource.arn, cloudformation_json, region_name
)
new_resource = cls.create_from_cloudformation_json(
original_resource.notebook_instance_name, cloudformation_json, region_name
)
return new_resource
@classmethod
def delete_from_cloudformation_json(
cls, resource_name, cloudformation_json, region_name
):
# Get actual name because resource_name actually provides the ARN
# since the Physical Resource ID is the ARN despite SageMaker
# using the name for most of its operations.
notebook_instance_name = resource_name.split("/")[-1]
backend = sagemaker_backends[region_name]
backend.stop_notebook_instance(notebook_instance_name)
backend.delete_notebook_instance(notebook_instance_name)
class FakeSageMakerNotebookInstanceLifecycleConfig(BaseObject, CloudFormationModel):
def __init__(
self, region_name, notebook_instance_lifecycle_config_name, on_create, on_start
):
self.region_name = region_name
self.notebook_instance_lifecycle_config_name = (
notebook_instance_lifecycle_config_name
)
self.on_create = on_create
self.on_start = on_start
self.creation_time = self.last_modified_time = datetime.now().strftime(
"%Y-%m-%d %H:%M:%S"
)
self.notebook_instance_lifecycle_config_arn = FakeSageMakerNotebookInstanceLifecycleConfig.arn_formatter(
self.notebook_instance_lifecycle_config_name, self.region_name
)
@staticmethod
def arn_formatter(notebook_instance_lifecycle_config_name, region_name):
return (
"arn:aws:sagemaker:"
+ region_name
+ ":"
+ str(ACCOUNT_ID)
+ ":notebook-instance-lifecycle-configuration/"
+ notebook_instance_lifecycle_config_name
)
@property
def response_object(self):
response_object = self.gen_response_object()
return {
k: v for k, v in response_object.items() if v is not None and v != [None]
}
@property
def response_create(self):
return {"TrainingJobArn": self.training_job_arn}
@property
def physical_resource_id(self):
return self.notebook_instance_lifecycle_config_arn
def get_cfn_attribute(self, attribute_name):
# https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-sagemaker-notebookinstancelifecycleconfig.html#aws-resource-sagemaker-notebookinstancelifecycleconfig-return-values
from moto.cloudformation.exceptions import UnformattedGetAttTemplateException
if attribute_name == "NotebookInstanceLifecycleConfigName":
return self.notebook_instance_lifecycle_config_name
raise UnformattedGetAttTemplateException()
@staticmethod
def cloudformation_name_type():
return None
@staticmethod
def cloudformation_type():
# https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-sagemaker-notebookinstancelifecycleconfig.html
return "AWS::SageMaker::NotebookInstanceLifecycleConfig"
@classmethod
def create_from_cloudformation_json(
cls, resource_name, cloudformation_json, region_name
):
properties = cloudformation_json["Properties"]
config = sagemaker_backends[
region_name
].create_notebook_instance_lifecycle_config(
notebook_instance_lifecycle_config_name=resource_name,
on_create=properties.get("OnCreate"),
on_start=properties.get("OnStart"),
)
return config
@classmethod
def update_from_cloudformation_json(
cls, original_resource, new_resource_name, cloudformation_json, region_name,
):
# Operations keep same resource name so delete old and create new to mimic update
cls.delete_from_cloudformation_json(
original_resource.notebook_instance_lifecycle_config_arn,
cloudformation_json,
region_name,
)
new_resource = cls.create_from_cloudformation_json(
original_resource.notebook_instance_lifecycle_config_name,
cloudformation_json,
region_name,
)
return new_resource
@classmethod
def delete_from_cloudformation_json(
cls, resource_name, cloudformation_json, region_name
):
# Get actual name because resource_name actually provides the ARN
# since the Physical Resource ID is the ARN despite SageMaker
# using the name for most of its operations.
config_name = resource_name.split("/")[-1]
backend = sagemaker_backends[region_name]
backend.delete_notebook_instance_lifecycle_config(config_name)
class SageMakerModelBackend(BaseBackend):
def __init__(self, region_name=None):
self._models = {}
self.notebook_instances = {}
self.endpoint_configs = {}
self.endpoints = {}
self.training_jobs = {}
self.notebook_instance_lifecycle_configurations = {}
self.region_name = region_name
def reset(self):
region_name = self.region_name
self.__dict__ = {}
self.__init__(region_name)
def create_model(self, **kwargs):
model_obj = Model(
region_name=self.region_name,
model_name=kwargs.get("ModelName"),
execution_role_arn=kwargs.get("ExecutionRoleArn"),
primary_container=kwargs.get("PrimaryContainer", {}),
vpc_config=kwargs.get("VpcConfig", {}),
containers=kwargs.get("Containers", []),
tags=kwargs.get("Tags", []),
)
self._models[kwargs.get("ModelName")] = model_obj
return model_obj
def describe_model(self, model_name=None):
model = self._models.get(model_name)
if model:
return model
message = "Could not find model '{}'.".format(
Model.arn_for_model_name(model_name, self.region_name)
)
raise ValidationError(message=message)
def list_models(self):
return self._models.values()
def delete_model(self, model_name=None):
for model in self._models.values():
if model.model_name == model_name:
self._models.pop(model.model_name)
break
else:
raise MissingModel(model=model_name)
def create_notebook_instance(
self,
notebook_instance_name,
instance_type,
role_arn,
subnet_id=None,
security_group_ids=None,
kms_key_id=None,
tags=None,
lifecycle_config_name=None,
direct_internet_access="Enabled",
volume_size_in_gb=5,
accelerator_types=None,
default_code_repository=None,
additional_code_repositories=None,
root_access=None,
):
self._validate_unique_notebook_instance_name(notebook_instance_name)
notebook_instance = FakeSagemakerNotebookInstance(
region_name=self.region_name,
notebook_instance_name=notebook_instance_name,
instance_type=instance_type,
role_arn=role_arn,
subnet_id=subnet_id,
security_group_ids=security_group_ids,
kms_key_id=kms_key_id,
tags=tags,
lifecycle_config_name=lifecycle_config_name,
direct_internet_access=direct_internet_access
if direct_internet_access is not None
else "Enabled",
volume_size_in_gb=volume_size_in_gb if volume_size_in_gb is not None else 5,
accelerator_types=accelerator_types,
default_code_repository=default_code_repository,
additional_code_repositories=additional_code_repositories,
root_access=root_access,
)
self.notebook_instances[notebook_instance_name] = notebook_instance
return notebook_instance
def _validate_unique_notebook_instance_name(self, notebook_instance_name):
if notebook_instance_name in self.notebook_instances:
duplicate_arn = self.notebook_instances[notebook_instance_name].arn
message = "Cannot create a duplicate Notebook Instance ({})".format(
duplicate_arn
)
raise ValidationError(message=message)
def get_notebook_instance(self, notebook_instance_name):
try:
return self.notebook_instances[notebook_instance_name]
except KeyError:
raise ValidationError(message="RecordNotFound")
def get_notebook_instance_by_arn(self, arn):
instances = [
notebook_instance
for notebook_instance in self.notebook_instances.values()
if notebook_instance.arn == arn
]
if len(instances) == 0:
raise ValidationError(message="RecordNotFound")
return instances[0]
def start_notebook_instance(self, notebook_instance_name):
notebook_instance = self.get_notebook_instance(notebook_instance_name)
notebook_instance.start()
def stop_notebook_instance(self, notebook_instance_name):
notebook_instance = self.get_notebook_instance(notebook_instance_name)
notebook_instance.stop()
def delete_notebook_instance(self, notebook_instance_name):
notebook_instance = self.get_notebook_instance(notebook_instance_name)
if not notebook_instance.is_deletable:
message = "Status ({}) not in ([Stopped, Failed]). Unable to transition to (Deleting) for Notebook Instance ({})".format(
notebook_instance.status, notebook_instance.arn
)
raise ValidationError(message=message)
del self.notebook_instances[notebook_instance_name]
def get_notebook_instance_tags(self, arn):
try:
notebook_instance = self.get_notebook_instance_by_arn(arn)
return notebook_instance.tags or []
except RESTError:
return []
def create_notebook_instance_lifecycle_config(
self, notebook_instance_lifecycle_config_name, on_create, on_start
):
if (
notebook_instance_lifecycle_config_name
in self.notebook_instance_lifecycle_configurations
):
message = "Unable to create Notebook Instance Lifecycle Config {}. (Details: Notebook Instance Lifecycle Config already exists.)".format(
FakeSageMakerNotebookInstanceLifecycleConfig.arn_formatter(
notebook_instance_lifecycle_config_name, self.region_name
)
)
raise ValidationError(message=message)
lifecycle_config = FakeSageMakerNotebookInstanceLifecycleConfig(
region_name=self.region_name,
notebook_instance_lifecycle_config_name=notebook_instance_lifecycle_config_name,
on_create=on_create,
on_start=on_start,
)
self.notebook_instance_lifecycle_configurations[
notebook_instance_lifecycle_config_name
] = lifecycle_config
return lifecycle_config
def describe_notebook_instance_lifecycle_config(
self, notebook_instance_lifecycle_config_name
):
try:
return self.notebook_instance_lifecycle_configurations[
notebook_instance_lifecycle_config_name
].response_object
except KeyError:
message = "Unable to describe Notebook Instance Lifecycle Config '{}'. (Details: Notebook Instance Lifecycle Config does not exist.)".format(
FakeSageMakerNotebookInstanceLifecycleConfig.arn_formatter(
notebook_instance_lifecycle_config_name, self.region_name
)
)
raise ValidationError(message=message)
def delete_notebook_instance_lifecycle_config(
self, notebook_instance_lifecycle_config_name
):
try:
del self.notebook_instance_lifecycle_configurations[
notebook_instance_lifecycle_config_name
]
except KeyError:
message = "Unable to delete Notebook Instance Lifecycle Config '{}'. (Details: Notebook Instance Lifecycle Config does not exist.)".format(
FakeSageMakerNotebookInstanceLifecycleConfig.arn_formatter(
notebook_instance_lifecycle_config_name, self.region_name
)
)
raise ValidationError(message=message)
def create_endpoint_config(
self,
endpoint_config_name,
production_variants,
data_capture_config,
tags,
kms_key_id,
):
endpoint_config = FakeEndpointConfig(
region_name=self.region_name,
endpoint_config_name=endpoint_config_name,
production_variants=production_variants,
data_capture_config=data_capture_config,
tags=tags,
kms_key_id=kms_key_id,
)
self.validate_production_variants(production_variants)
self.endpoint_configs[endpoint_config_name] = endpoint_config
return endpoint_config
def validate_production_variants(self, production_variants):
for production_variant in production_variants:
if production_variant["ModelName"] not in self._models:
message = "Could not find model '{}'.".format(
Model.arn_for_model_name(
production_variant["ModelName"], self.region_name
)
)
raise ValidationError(message=message)
def describe_endpoint_config(self, endpoint_config_name):
try:
return self.endpoint_configs[endpoint_config_name].response_object
except KeyError:
message = "Could not find endpoint configuration '{}'.".format(
FakeEndpointConfig.arn_formatter(endpoint_config_name, self.region_name)
)
raise ValidationError(message=message)
def delete_endpoint_config(self, endpoint_config_name):
try:
del self.endpoint_configs[endpoint_config_name]
except KeyError:
message = "Could not find endpoint configuration '{}'.".format(
FakeEndpointConfig.arn_formatter(endpoint_config_name, self.region_name)
)
raise ValidationError(message=message)
def create_endpoint(
self, endpoint_name, endpoint_config_name, tags,
):
try:
endpoint_config = self.describe_endpoint_config(endpoint_config_name)
except KeyError:
message = "Could not find endpoint_config '{}'.".format(
FakeEndpointConfig.arn_formatter(endpoint_config_name, self.region_name)
)
raise ValidationError(message=message)
endpoint = FakeEndpoint(
region_name=self.region_name,
endpoint_name=endpoint_name,
endpoint_config_name=endpoint_config_name,
production_variants=endpoint_config["ProductionVariants"],
data_capture_config=endpoint_config["DataCaptureConfig"],
tags=tags,
)
self.endpoints[endpoint_name] = endpoint
return endpoint
def describe_endpoint(self, endpoint_name):
try:
return self.endpoints[endpoint_name].response_object
except KeyError:
message = "Could not find endpoint configuration '{}'.".format(
FakeEndpoint.arn_formatter(endpoint_name, self.region_name)
)
raise ValidationError(message=message)
def delete_endpoint(self, endpoint_name):
try:
del self.endpoints[endpoint_name]
except KeyError:
message = "Could not find endpoint configuration '{}'.".format(
FakeEndpoint.arn_formatter(endpoint_name, self.region_name)
)
raise ValidationError(message=message)
def get_endpoint_by_arn(self, arn):
endpoints = [
endpoint
for endpoint in self.endpoints.values()
if endpoint.endpoint_arn == arn
]
if len(endpoints) == 0:
message = "RecordNotFound"
raise ValidationError(message=message)
return endpoints[0]
def get_endpoint_tags(self, arn):
try:
endpoint = self.get_endpoint_by_arn(arn)
return endpoint.tags or []
except RESTError:
return []
def create_training_job(
self,
training_job_name,
hyper_parameters,
algorithm_specification,
role_arn,
input_data_config,
output_data_config,
resource_config,
vpc_config,
stopping_condition,
tags,
enable_network_isolation,
enable_inter_container_traffic_encryption,
enable_managed_spot_training,
checkpoint_config,
debug_hook_config,
debug_rule_configurations,
tensor_board_output_config,
experiment_config,
):
training_job = FakeTrainingJob(
region_name=self.region_name,
training_job_name=training_job_name,
hyper_parameters=hyper_parameters,
algorithm_specification=algorithm_specification,
role_arn=role_arn,
input_data_config=input_data_config,
output_data_config=output_data_config,
resource_config=resource_config,
vpc_config=vpc_config,
stopping_condition=stopping_condition,
tags=tags,
enable_network_isolation=enable_network_isolation,
enable_inter_container_traffic_encryption=enable_inter_container_traffic_encryption,
enable_managed_spot_training=enable_managed_spot_training,
checkpoint_config=checkpoint_config,
debug_hook_config=debug_hook_config,
debug_rule_configurations=debug_rule_configurations,
tensor_board_output_config=tensor_board_output_config,
experiment_config=experiment_config,
)
self.training_jobs[training_job_name] = training_job
return training_job
def describe_training_job(self, training_job_name):
try:
return self.training_jobs[training_job_name].response_object
except KeyError:
message = "Could not find training job '{}'.".format(
FakeTrainingJob.arn_formatter(training_job_name, self.region_name)
)
raise ValidationError(message=message)
def delete_training_job(self, training_job_name):
try:
del self.training_jobs[training_job_name]
except KeyError:
message = "Could not find endpoint configuration '{}'.".format(
FakeTrainingJob.arn_formatter(training_job_name, self.region_name)
)
raise ValidationError(message=message)
def get_training_job_by_arn(self, arn):
training_jobs = [
training_job
for training_job in self.training_jobs.values()
if training_job.training_job_arn == arn
]
if len(training_jobs) == 0:
raise ValidationError(message="RecordNotFound")
return training_jobs[0]
def get_training_job_tags(self, arn):
try:
training_job = self.get_training_job_by_arn(arn)
return training_job.tags or []
except RESTError:
return []
sagemaker_backends = {}
for region in Session().get_available_regions("sagemaker"):
sagemaker_backends[region] = SageMakerModelBackend(region)
for region in Session().get_available_regions("sagemaker", partition_name="aws-us-gov"):
sagemaker_backends[region] = SageMakerModelBackend(region)
for region in Session().get_available_regions("sagemaker", partition_name="aws-cn"):
sagemaker_backends[region] = SageMakerModelBackend(region)
| 36.07496 | 201 | 0.64479 |
63042962c5ce7ccb79fb6a8aa2af3d668cf0e0f4
| 1,000 |
py
|
Python
|
encargo-backend/app/encargoapi/database.py
|
coonprogrammer/encargoexpress
|
ca8d9eeec47f59486113780e0b1c2a54dcc89043
|
[
"MIT"
] | null | null | null |
encargo-backend/app/encargoapi/database.py
|
coonprogrammer/encargoexpress
|
ca8d9eeec47f59486113780e0b1c2a54dcc89043
|
[
"MIT"
] | null | null | null |
encargo-backend/app/encargoapi/database.py
|
coonprogrammer/encargoexpress
|
ca8d9eeec47f59486113780e0b1c2a54dcc89043
|
[
"MIT"
] | null | null | null |
from sqlalchemy import event
from sqlalchemy.ext.declarative import as_declarative, declared_attr
from sqlalchemy.orm import sessionmaker
from encargoapi import db
from encargoapi.journal.model import Journal
class ModelBase(object):
created_on = db.Column(db.DateTime, default=db.func.now())
updated_on = db.Column(db.DateTime, default=db.func.now(), onupdate=db.func.now())
@declared_attr
def __tablename__(self):
return self.__name__.lower()
def get_dict(self):
return {
item.key:self.__dict__[item.key] for item in self.__table__.columns.values()
}
def on_model_change(self, form, model, is_created=False):
journal = Journal(
table = self.__name__.lower(),
field_name = '',
new_value = '',
)
# def my_before_commit(session):
# import ipdb;ipdb.set_trace()
# print "before commit!"
# Session = sessionmaker()
# event.listen(Session, "before_commit", my_before_commit)
| 27.777778 | 88 | 0.678 |
e6bdcffbf72025a38d3619e1633923e63819c6db
| 1,306 |
py
|
Python
|
tensorflow/python/estimator/model_fn.py
|
uve/tensorflow
|
e08079463bf43e5963acc41da1f57e95603f8080
|
[
"Apache-2.0"
] | null | null | null |
tensorflow/python/estimator/model_fn.py
|
uve/tensorflow
|
e08079463bf43e5963acc41da1f57e95603f8080
|
[
"Apache-2.0"
] | null | null | null |
tensorflow/python/estimator/model_fn.py
|
uve/tensorflow
|
e08079463bf43e5963acc41da1f57e95603f8080
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""model_fn python module.
Importing from tensorflow.python.estimator is unsupported
and will soon break!
"""
# pylint: disable=unused-import,g-bad-import-order,g-import-not-at-top,wildcard-import
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow_estimator.python.estimator import model_fn
# Include attrs that start with single underscore.
_HAS_DYNAMIC_ATTRIBUTES = True
model_fn.__all__ = [s for s in dir(model_fn) if not s.startswith('__')]
from tensorflow_estimator.python.estimator.model_fn import *
| 39.575758 | 87 | 0.722818 |
7af49d7fc0040f05f46f5d6fa3bc8cb556432749
| 570 |
py
|
Python
|
src/network/bo/peer.py
|
TimHabeck/blockchain-lab
|
3cd050ee43f26cf0a1f70869100f0b40a6abae07
|
[
"RSA-MD"
] | null | null | null |
src/network/bo/peer.py
|
TimHabeck/blockchain-lab
|
3cd050ee43f26cf0a1f70869100f0b40a6abae07
|
[
"RSA-MD"
] | null | null | null |
src/network/bo/peer.py
|
TimHabeck/blockchain-lab
|
3cd050ee43f26cf0a1f70869100f0b40a6abae07
|
[
"RSA-MD"
] | null | null | null |
class Peer():
def __init__(self, host, port) -> None:
super().__init__()
self._host = host
self._port = port
def get_host(self):
return self._host
def set_host(self, host):
self._host = host
def get_port(self):
return self._port
def set_port(self, port):
self._port = port
def to_dict(self):
return {
"host": self.get_host(),
"port": self.get_port(),
}
@staticmethod
def from_dict(dict):
return Peer(dict["host"], dict["port"])
| 20.357143 | 47 | 0.535088 |
53942995ff7bad26efd11b7504d6f761e9259886
| 3,521 |
py
|
Python
|
app/tools/monitor.py
|
cuizhenduo/Monitors
|
28de2942f389cd7461d01688e570f9fd1e9d57cf
|
[
"Apache-2.0"
] | null | null | null |
app/tools/monitor.py
|
cuizhenduo/Monitors
|
28de2942f389cd7461d01688e570f9fd1e9d57cf
|
[
"Apache-2.0"
] | null | null | null |
app/tools/monitor.py
|
cuizhenduo/Monitors
|
28de2942f389cd7461d01688e570f9fd1e9d57cf
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
import psutil,time
from pprint import pprint
import datetime
#定义获取系统信息的类
class Monitor(object):
#单位转换
def bytes_to_gb(self,data,key=""):
if key == "percent":
return data
else:
return round(data / (1024 ** 3), 2)
#获取cpu信息
def cpu(self):
#percpu:True获取每个cpu的使用率,False获取平均使用率
#1.平均 2单独 3物理CPU核心数 4逻辑cpu核心数
data = dict(
percent_avg = psutil.cpu_percent(percpu=False,interval=0),
percent_per = psutil.cpu_percent(percpu=True, interval=0),
num_p = psutil.cpu_count(logical=False),
num_l = psutil.cpu_count(logical=True)
)
return data
# 获取内存信息
def mem(self):
info = psutil.virtual_memory()
data = dict(
total = self.bytes_to_gb(info.total),
used=self.bytes_to_gb(info.used),
free=self.bytes_to_gb(info.free),
percent=info.percent
)
return data
#获取交换分区信息
def swap(self):
info = psutil.swap_memory()
data = dict(
total=self.bytes_to_gb(info.total),
used=self.bytes_to_gb(info.used),
free=self.bytes_to_gb(info.free),
percent=info.percent
)
return data
#获取磁盘信息
def disk(self):
# 专门获取磁盘分区信息
info = psutil.disk_partitions()
# 列表推导式
data = [
dict(
device=v.device,
mountpoint=v.mountpoint,
fstype=v.fstype,
opts=v.opts,
used={
k: self.bytes_to_gb(v, k)
for k, v in psutil.disk_usage(v.mountpoint)._asdict().items()
}
)
for v in info
]
return data
#获取网络信息
def net(self):
#获取地址信息
addrs = psutil .net_if_addrs()
addrs_info = {
k:[
dict(
family = val.family.name,
address = val.address,
netmask = val.netmask,
broadcast = val.broadcast
)
for val in v if val.family.name == "AF_INET"
][0]
for k,v in addrs.items()
}
#获取输入输出信息
io = psutil.net_io_counters(pernic=True)
data = [
dict(
name = k,
bytes_sent = v.bytes_sent,
bytes_recv=v.bytes_recv,
packets_sent = v.packets_sent,
packets_recv=v.packets_recv,
**addrs_info[k]
)
for k,v in io.items()
]
return data
#转化时间戳
def td(self,tm):
dt = datetime.datetime.fromtimestamp(tm)
return dt.strftime("%Y-%m-%d %H:%M:%S")
#获取日期时间
def dt(self):
return datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")
#获取最近开机时间
def lastest_start_time(self):
return self.td(psutil.boot_time())
#获取登录用户
def logined_user(self):
data = [
dict(
name = v.name,
terminal = v.terminal,
host = v.host,
started = self.td(v.started),
pid = v.pid
)
for v in psutil.users()
]
return data
if __name__ == "__main__":
m = Monitor()
#print(m.mem())
pprint(m.logined_user())
"""
for v in range(1,11):
print(m.cpu())
time.sleep(1)
"""
| 27.507813 | 81 | 0.483385 |
dfd9e961fcb42cb933faf5fe10f9623bf74be1cd
| 13,897 |
py
|
Python
|
mysql2pgsql/lib/postgres_writer.py
|
gengo/py-mysql2pgsql
|
3cf490f10ef3ce305e488d1ab5153fc208dedaf1
|
[
"MIT"
] | null | null | null |
mysql2pgsql/lib/postgres_writer.py
|
gengo/py-mysql2pgsql
|
3cf490f10ef3ce305e488d1ab5153fc208dedaf1
|
[
"MIT"
] | null | null | null |
mysql2pgsql/lib/postgres_writer.py
|
gengo/py-mysql2pgsql
|
3cf490f10ef3ce305e488d1ab5153fc208dedaf1
|
[
"MIT"
] | null | null | null |
from __future__ import absolute_import
import re
from cStringIO import StringIO
from datetime import datetime, date, timedelta
from psycopg2.extensions import QuotedString, Binary, AsIs
from pytz import timezone
class PostgresWriter(object):
"""Base class for :py:class:`mysql2pgsql.lib.postgres_file_writer.PostgresFileWriter`
and :py:class:`mysql2pgsql.lib.postgres_db_writer.PostgresDbWriter`.
"""
def __init__(self, tz=False):
self.column_types = {}
if tz:
self.tz = timezone('UTC')
self.tz_offset = '+00:00'
else:
self.tz = None
self.tz_offset = ''
def column_description(self, column):
return '"%s" %s' % (column['name'], self.column_type_info(column))
def column_type(self, column):
hash_key = hash(frozenset(column.items()))
self.column_types[hash_key] = self.column_type_info(column).split(" ")[0]
return self.column_types[hash_key]
def column_type_info(self, column):
"""
"""
if column.get('auto_increment', None):
return 'integer DEFAULT nextval(\'%s_%s_seq\'::regclass) NOT NULL' % (
column['table_name'], column['name'])
null = "" if column['null'] else " NOT NULL"
def get_type(column):
"""This in conjunction with :py:class:`mysql2pgsql.lib.mysql_reader.MysqlReader._convert_type`
determines the PostgreSQL data type. In my opinion this is way too fugly, will need
to refactor one day.
"""
t = lambda v: not v == None
default = (' DEFAULT %s' % QuotedString(column['default']).getquoted()) if t(column['default']) else None
if column['type'] == 'char':
default = ('%s::char' % default) if t(default) else None
return default, 'character(%s)' % column['length']
elif column['type'] == 'varchar':
default = ('%s::character varying' % default) if t(default) else None
return default, 'character varying(%s)' % column['length']
elif column['type'] == 'integer':
default = (" DEFAULT %s" % (column['default'] if t(column['default']) else 'NULL')) if t(default) else None
return default, 'integer'
elif column['type'] == 'bigint':
default = (" DEFAULT %s" % (column['default'] if t(column['default']) else 'NULL')) if t(default) else None
return default, 'bigint'
elif column['type'] == 'tinyint':
default = (" DEFAULT %s" % (column['default'] if t(column['default']) else 'NULL')) if t(default) else None
return default, 'smallint'
elif column['type'] == 'boolean':
default = (" DEFAULT %s" % ('true' if int(column['default']) == 1 else 'false')) if t(default) else None
return default, 'boolean'
elif column['type'] == 'float':
default = (" DEFAULT %s" % (column['default'] if t(column['default']) else 'NULL')) if t(default) else None
return default, 'real'
elif column['type'] == 'float unsigned':
default = (" DEFAULT %s" % (column['default'] if t(column['default']) else 'NULL')) if t(default) else None
return default, 'real'
elif column['type'] in ('numeric', 'decimal'):
default = (" DEFAULT %s" % (column['default'] if t(column['default']) else 'NULL')) if t(default) else None
return default, 'numeric(%s, %s)' % (column['length'] or 20, column['decimals'] or 0)
elif column['type'] == 'double precision':
default = (" DEFAULT %s" % (column['default'] if t(column['default']) else 'NULL')) if t(default) else None
return default, 'double precision'
elif column['type'] == 'datetime':
default = None
if self.tz:
return default, 'timestamp with time zone'
else:
return default, 'timestamp without time zone'
elif column['type'] == 'date':
default = None
return default, 'date'
elif column['type'] == 'timestamp':
if column['default'] == None:
default = None
elif "CURRENT_TIMESTAMP" in column['default']:
default = ' DEFAULT CURRENT_TIMESTAMP'
elif "0000-00-00 00:00" in column['default']:
if self.tz:
default = " DEFAULT '1970-01-01T00:00:00.000000%s'" % self.tz_offset
elif "0000-00-00 00:00:00" in column['default']:
default = " DEFAULT '1970-01-01 00:00:00'"
else:
default = " DEFAULT '1970-01-01 00:00'"
if self.tz:
return default, 'timestamp with time zone'
else:
return default, 'timestamp without time zone'
elif column['type'] == 'time':
default = " DEFAULT '00:00:00'::interval"
return default, 'interval'
elif 'blob' in column['type'] or 'binary' in column['type']:
return default, 'bytea'
elif column['type'] in ('tinytext', 'mediumtext', 'longtext', 'text'):
return default, 'text'
elif re.search(r'^enum', column['type']):
default = (' %s::character varying' % default) if t(default) else None
enum = re.sub(r'^enum|\(|\)', '', column['type'])
# TODO: will work for "'.',',',''''" but will fail for "'.'',','.'"
max_enum_size = max([len(e.replace("''", "'")) for e in enum.split("','")])
return default, ' character varying(%s) check(%s in (%s))' % (max_enum_size, column['name'], enum)
elif 'bit(' in column['type']:
return ' DEFAULT %s' % column['default'].upper() if column['default'] else column['default'], 'varbit(%s)' % re.search(r'\((\d+)\)', column['type']).group(1)
elif 'set(' in column['type']:
if default:
default = ' DEFAULT ARRAY[%s]::text[]' % ','.join(QuotedString(v).getquoted() for v in re.search(r"'(.*)'", default).group(1).split(','))
return default, 'text[]'
else:
raise Exception('unknown %s' % column['type'])
default, column_type = get_type(column)
return '%s%s%s' % (column_type, (default if not default == None else ''), null)
def process_row(self, table, row):
"""Examines row data from MySQL and alters
the values when necessary to be compatible with
sending to PostgreSQL via the copy command
"""
for index, column in enumerate(table.columns):
hash_key = hash(frozenset(column.items()))
column_type = self.column_types[hash_key] if hash_key in self.column_types else self.column_type(column)
if row[index] == None and ('timestamp' not in column_type or not column['default']):
row[index] = '\N'
elif row[index] == None and column['default']:
if self.tz:
row[index] = '1970-01-01T00:00:00.000000' + self.tz_offset
else:
row[index] = '1970-01-01 00:00:00'
elif 'bit' in column_type:
row[index] = bin(ord(row[index]))[2:]
elif isinstance(row[index], (str, unicode, basestring)):
if column_type == 'bytea':
row[index] = Binary(row[index]).getquoted()[1:-8] if row[index] else row[index]
elif 'text[' in column_type:
row[index] = '{%s}' % ','.join('"%s"' % v.replace('"', r'\"') for v in row[index].split(','))
else:
row[index] = row[index].replace('\\', r'\\').replace('\n', r'\n').replace('\t', r'\t').replace('\r', r'\r').replace('\0', '')
elif column_type == 'boolean':
# We got here because you used a tinyint(1), if you didn't want a bool, don't use that type
row[index] = 't' if row[index] not in (None, 0) else 'f' if row[index] == 0 else row[index]
elif isinstance(row[index], (date, datetime)):
if isinstance(row[index], datetime) and self.tz:
try:
if row[index].tzinfo:
row[index] = row[index].astimezone(self.tz).isoformat()
else:
row[index] = datetime(*row[index].timetuple()[:6], tzinfo=self.tz).isoformat()
except Exception as e:
print e.message
else:
row[index] = row[index].isoformat()
elif isinstance(row[index], timedelta):
hours = row[index].days*24
sec_hours, remainder = divmod( row[index].seconds, 3600 )
hours += int(sec_hours)
minutes, seconds = divmod(remainder, 60)
#row[index] = datetime.fromtimestamp(row[index].total_seconds()).time().isoformat()
row[index] = "%s:%s:%s" % ( str(hours).zfill(2), str(int(minutes)).zfill(2), str(int(seconds)).zfill(2) )
else:
row[index] = AsIs(row[index]).getquoted()
def table_attributes(self, table):
primary_keys = []
serial_key = None
maxval = None
columns = StringIO()
for column in table.columns:
if column['auto_increment']:
serial_key = column['name']
maxval = 1 if column['maxval'] < 1 else column['maxval'] + 1
if column['primary_key']:
primary_keys.append(column['name'])
columns.write(' %s,\n' % self.column_description(column))
return primary_keys, serial_key, maxval, columns.getvalue()[:-2]
def truncate(self, table):
serial_key = None
maxval = None
for column in table.columns:
if column['auto_increment']:
serial_key = column['name']
maxval = 1 if column['maxval'] < 1 else column['maxval'] + 1
truncate_sql = 'TRUNCATE "%s" CASCADE;' % table.name
serial_key_sql = None
if serial_key:
serial_key_sql = "SELECT pg_catalog.setval(pg_get_serial_sequence(%(table_name)s, %(serial_key)s), %(maxval)s, true);" % {
'table_name': QuotedString(table.name).getquoted(),
'serial_key': QuotedString(serial_key).getquoted(),
'maxval': maxval}
return (truncate_sql, serial_key_sql)
def write_table(self, table):
primary_keys, serial_key, maxval, columns = self.table_attributes(table)
serial_key_sql = []
table_sql = []
if serial_key:
serial_key_seq = '%s_%s_seq' % (table.name, serial_key)
serial_key_sql.append('DROP SEQUENCE IF EXISTS %s CASCADE;' % serial_key_seq)
serial_key_sql.append("""CREATE SEQUENCE %s INCREMENT BY 1
NO MAXVALUE NO MINVALUE CACHE 1;""" % serial_key_seq)
serial_key_sql.append('SELECT pg_catalog.setval(%s, %s, true);' % (QuotedString(serial_key_seq).getquoted(), maxval))
table_sql.append('DROP TABLE IF EXISTS "%s" CASCADE;' % table.name)
table_sql.append('CREATE TABLE "%s" (\n%s\n)\nWITHOUT OIDS;' % (table.name.encode('utf8'), columns))
return (table_sql, serial_key_sql)
def write_indexes(self, table):
index_sql = []
primary_index = [idx for idx in table.indexes if idx.get('primary', None)]
if primary_index:
index_sql.append('ALTER TABLE "%(table_name)s" ADD CONSTRAINT "%(index_name)s_pkey" PRIMARY KEY(%(column_names)s);' % {
'table_name': table.name,
'index_name': '%s_%s' % (table.name, '_'.join(primary_index[0]['columns'])),
'column_names': ', '.join('"%s"' % col for col in primary_index[0]['columns']),
})
for index in table.indexes:
if 'primary' in index:
continue
unique = 'UNIQUE ' if index.get('unique', None) else ''
index_name = '%s_%s' % (table.name, '_'.join(index['columns']))
index_sql.append('DROP INDEX IF EXISTS "%s" CASCADE;' % index_name)
index_sql.append('CREATE %(unique)sINDEX "%(index_name)s" ON "%(table_name)s" (%(column_names)s);' % {
'unique': unique,
'index_name': index_name,
'table_name': table.name,
'column_names': ', '.join('"%s"' % col for col in index['columns']),
})
return index_sql
def write_constraints(self, table):
constraint_sql = []
for key in table.foreign_keys:
constraint_sql.append("""ALTER TABLE "%(table_name)s" ADD FOREIGN KEY ("%(column_name)s")
REFERENCES "%(ref_table_name)s"(%(ref_column_name)s) %(on_delete)s %(on_update)s;""" % {
'table_name': table.name,
'column_name': key['column'],
'ref_table_name': key['ref_table'],
'ref_column_name': key['ref_column'],
'on_delete': key['on_delete'] if key['on_delete'] == '' else 'ON DELETE %s' % (key['on_delete']),
'on_update': key['on_update'] if key['on_update'] == '' else 'ON UPDATE %s' % (key['on_update'])})
return constraint_sql
def close(self):
raise NotImplementedError
def write_contents(self, table, reader):
raise NotImplementedError
| 51.47037 | 173 | 0.537886 |
9910653e7a3413301f4ea40e4e77a482d678af3d
| 3,546 |
py
|
Python
|
gapid_tests/initialization_tests/vkCreateInstance/vkCreateInstance_tests.py
|
AWoloszyn/vulkan_test_applications
|
5e9f86cdbd4e2344f41db9e0a578fe9fba41106f
|
[
"Apache-2.0"
] | null | null | null |
gapid_tests/initialization_tests/vkCreateInstance/vkCreateInstance_tests.py
|
AWoloszyn/vulkan_test_applications
|
5e9f86cdbd4e2344f41db9e0a578fe9fba41106f
|
[
"Apache-2.0"
] | null | null | null |
gapid_tests/initialization_tests/vkCreateInstance/vkCreateInstance_tests.py
|
AWoloszyn/vulkan_test_applications
|
5e9f86cdbd4e2344f41db9e0a578fe9fba41106f
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2017 Google Inc.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from gapit_test_framework import gapit_test, require, require_equal
from gapit_test_framework import require_not_equal, little_endian_bytes_to_int
from gapit_test_framework import GapitTest
from vulkan_constants import *
@gapit_test("vkCreateInstance_test")
class NullApplicationInfoTest(GapitTest):
def expect(self):
"""Expect that the applicationInfoPointer is null for the first
vkCreateInstance"""
architecture = self.architecture
create_instance = require(self.next_call_of("vkCreateInstance"))
require_not_equal(create_instance.hex_pCreateInfo, 0)
create_info_memory = require(
create_instance.get_read_data(create_instance.hex_pCreateInfo,
architecture.int_integerSize))
require_equal(
little_endian_bytes_to_int(create_info_memory),
VK_STRUCTURE_TYPE_INSTANCE_CREATE_INFO)
applicationInfoPointer = require(
create_instance.get_read_data(create_instance.hex_pCreateInfo +
architecture.int_pointerSize * 3,
architecture.int_pointerSize))
require_equal(little_endian_bytes_to_int(applicationInfoPointer), 0)
@gapit_test("vkCreateInstance_test")
class NonNullApplicationInfoTest(GapitTest):
def expect(self):
"""Expect that the applicationInfoPointer is not null for the second
vkCreateInstance, and that it contains some of the expected data."""
architecture = self.architecture
create_instance = require(self.nth_call_of("vkCreateInstance", 2))
require_not_equal(create_instance.hex_pCreateInfo, 0)
create_info_memory = require(
create_instance.get_read_data(create_instance.hex_pCreateInfo,
architecture.int_integerSize))
require_equal(
little_endian_bytes_to_int(create_info_memory),
VK_STRUCTURE_TYPE_INSTANCE_CREATE_INFO)
applicationInfoPointer = require(
create_instance.get_read_data(create_instance.hex_pCreateInfo +
architecture.int_pointerSize * 3,
architecture.int_pointerSize))
require_not_equal(little_endian_bytes_to_int(applicationInfoPointer), 0)
# The 2nd pointer(3rd element) in VkApplicationInfo should be the string
# Application
application_info_application_name_ptr = require(
create_instance.get_read_data(
little_endian_bytes_to_int(applicationInfoPointer) + 2 *
architecture.int_pointerSize, architecture.int_pointerSize))
application_name = require(
create_instance.get_read_string(
little_endian_bytes_to_int(
application_info_application_name_ptr)))
require_equal(application_name, "Application")
| 46.051948 | 80 | 0.694303 |
c24d7e459b8c0d6beb5f2bdc7177f60969179a6a
| 428 |
py
|
Python
|
modules/update_windows_choco_apps/apply.py
|
jameswdelancey/cosmos_inventory_example
|
2143be1bd75c570c485da3c79ce1ad7983860f3b
|
[
"MIT"
] | null | null | null |
modules/update_windows_choco_apps/apply.py
|
jameswdelancey/cosmos_inventory_example
|
2143be1bd75c570c485da3c79ce1ad7983860f3b
|
[
"MIT"
] | null | null | null |
modules/update_windows_choco_apps/apply.py
|
jameswdelancey/cosmos_inventory_example
|
2143be1bd75c570c485da3c79ce1ad7983860f3b
|
[
"MIT"
] | null | null | null |
import datetime
# import os
import logging
import subprocess
out = subprocess.check_output(
["choco", "feature", "enable", "-n=allowGlobalConfirmation"]
)
logging.debug("choco feature enable allowGlobalConfirmation output: %s", out.decode())
if datetime.datetime.now().hour == 12:
out = subprocess.check_output(["choco", "update", "all"])
logging.debug("choco upgrade all output: %s", out.decode())
| 28.533333 | 87 | 0.691589 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.