ext
stringclasses 9
values | sha
stringlengths 40
40
| content
stringlengths 3
1.04M
|
---|---|---|
py | 1a3b84f4b59606c48dcdcad238518144200416a2 | # Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Common neural network layer initializers, consistent with definitions
used in Keras and Sonnet.
"""
from jax._src.nn.initializers import (
constant as constant,
delta_orthogonal as delta_orthogonal,
glorot_normal as glorot_normal,
glorot_uniform as glorot_uniform,
he_normal as he_normal,
he_uniform as he_uniform,
kaiming_normal as kaiming_normal,
kaiming_uniform as kaiming_uniform,
lecun_normal as lecun_normal,
lecun_uniform as lecun_uniform,
normal as normal,
ones as ones,
orthogonal as orthogonal,
uniform as uniform,
variance_scaling as variance_scaling,
xavier_normal as xavier_normal,
xavier_uniform as xavier_uniform,
zeros as zeros,
)
|
py | 1a3b855dcc1b4d1ce4883c49616804133779039e | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 2019/3/10 20:15
# @Author : wxiong
# @Site :
# @File : DataBaseClass.py.py
# @Desc : Definition of DataBase class .py script created by wxiong
import pandas as pd
import datetime
import logging
import psycopg2
from DataBase.config import ConfigDB
logger = logging.getLogger(__name__)
logging.basicConfig(level=logging.INFO) # format='%(asctime)s - %(name)s - %(levelname)s - %(message)s')
class DataBase():
def __init__(self, **kwargs):
self.db_name = kwargs.pop('db_name', 'postgres')
self.db_user = kwargs.pop('db_user', ConfigDB.USER.value)
self.db_host = kwargs.pop('db_host', 'localhost')
self.db_password = kwargs.pop('db_password', '')
self.new_db = kwargs.pop('new_db', False)
self.reconfig(**kwargs)
try:
conn = psycopg2.connect(host = self.db_host,
database = self.db_name,
user = self.db_user,
password = self.db_password)
conn.set_isolation_level(psycopg2.extensions.ISOLATION_LEVEL_AUTOCOMMIT)
self.connection = conn
self.cursor = conn.cursor()
except Exception as e:
logger.error('DB {db_name} does not exist: {err_msg}'.format(db_name = self.db_name), err_msg = e)
if self.new_db:
logger.info('Creating new database {db_name}'.format(db_name = self.db_name))
conn = psycopg2.connect(host = self.db_host,
database ='postgres',
user = self.db_user,
password = self.db_password)
conn.set_isolation_level(psycopg2.extensions.ISOLATION_LEVEL_AUTOCOMMIT)
cur = conn.cursor()
cur.execute('CREATE DATABASE {}'.format(self.db_name))
cur.close()
conn.close()
conn = psycopg2.connect(host=self.db_host,
database=self.db_name,
user=self.db_user,
password=self.db_password)
self.connection = conn
self.cursor = conn.cursor()
def reconfig(self, **kwargs):
for k, v in kwargs.items():
setattr(self, k, v)
def __del__(self):
logger.info('[Delete] Closing connection and cursor...')
self.connection.close()
self.cursor.close()
def getConn(self):
return self.connection
def execute(self, query):
cur = self.cursor
cur.execute(query)
def createTable(self, tableDict):
logger.info('[create table] Creating table {}'.format(tableDict['tableName']))
query = '''
CREATE TABLE {name} (
{fields}
)
'''.format(name = tableDict['tableName'],
fields = ','.join([' '.join(field)
for field in tableDict['fields']]))
try:
self.execute(query)
except Exception as e:
logger.info('Table {} already exist'.format(tableDict['tableName']), e)
def createTables(self, tableDictList):
for tableDict in tableDictList:
self.createTable(tableDict)
|
py | 1a3b879e742bd9d36dbeb8e65cc4a1ff2780f448 | import logging
import astropy.units as u
from astropy.wcs import (WCS, WCSSUB_CELESTIAL, WCSSUB_CUBEFACE,
WCSSUB_LATITUDE, WCSSUB_LONGITUDE, WCSSUB_SPECTRAL,
WCSSUB_STOKES, InvalidSubimageSpecificationError)
# Use this once in specutils
from ...utils.wcs_utils import (convert_spectral_axis,
determine_ctype_from_vconv)
from ..wcs_adapter import WCSAdapter, WCSAxes
__all__ = ['FITSWCSAdapter']
class FITSWCSAdapter(WCSAdapter):
"""
Adapter class that adds support for FITSWCS objects.
In the wild, fits WCS headers are often non-standard compliant, but
can be interpreted with little ambiguity (e.g. the CTYPE of the
wavelength axis is called "Wavelength" instead of the standard fits
"WAVE"). In some common cases, this class will thus read files that
are not fully compliant. In these cases, it prints a warning message.
"""
wrapped_class = WCS
axes = None
substitute_spec_axis_names = ['linear', 'wavelength']
def __init__(self, wcs):
super(FITSWCSAdapter, self).__init__(wcs)
self._spec_axis = None
# Store a reference to all axes information within the wcs object
self.axes = WCSAxes(
longitude=self.wcs.sub([WCSSUB_LONGITUDE]),
latitude=self.wcs.sub([WCSSUB_LATITUDE]),
cubeface=self.wcs.sub([WCSSUB_CUBEFACE]),
spectral=self.wcs.sub([WCSSUB_SPECTRAL]),
stokes=self.wcs.sub([WCSSUB_STOKES]),
celestial=self.wcs.sub([WCSSUB_CELESTIAL])
)
# TODO: make this more efficient. Check to see whether the spectral
# axis was actually parsed
if self.axes.spectral.naxis == 0:
self.axes = self.axes._replace(spectral=self.wcs.sub([self.spec_axis + 1]))
def __getitem__(self, item):
"""Pass slicing information to the internal `FITSWCS` object."""
return self.wcs[item]
def __deepcopy__(self, *args, **kwargs):
"""
Ensure deepcopy is passed through to the underlying fits wcs object.
Doing so allows for proper memoization handling in the astropy fits
machinery.
"""
return self.__class__(self.wcs.__deepcopy__(*args, **kwargs))
def world_to_pixel(self, world_array):
"""
Method for performing the world to pixel transformations.
"""
with u.set_enabled_equivalencies(u.spectral()):
world_array = u.Quantity(world_array, unit=self.spectral_axis_unit)
return self.axes.spectral.all_world2pix(world_array.value, 0)[0]
def pixel_to_world(self, pixel_array):
"""
Method for performing the pixel to world transformations.
"""
return u.Quantity(self.axes.spectral.all_pix2world(pixel_array, 0)[0],
self.spectral_axis_unit)
@property
def spec_axis(self):
"""
Try and parse the spectral axis of the fits wcs object.
"""
self._spec_axis = self.wcs.wcs.spec
if (self._spec_axis < 0) and (self._wcs.wcs.spec) < 0:
ctypelist = [c.lower() for c in self.wcs.wcs.ctype]
for n in self.substitute_spec_axis_names:
if n in ctypelist:
self._spec_axis = ctypelist.index(n)
logging.warning("WCS has a non-standard spectral axis, 'ctype's might be incorrect. Assuming the axis {} labeled '{}' is spectral and proceeding.".format(self._spec_axis, n))
break
else:
raise InvalidSubimageSpecificationError(
"Cannot find a spectral axis in the provided WCS."
"Are your 'ctype's correct?")
return self._spec_axis
@property
def spectral_axis_unit(self):
"""
Returns the unit of the spectral axis.
"""
return self._wcs.wcs.cunit[self.spec_axis]
@property
def rest_frequency(self):
"""
Returns the rest frequency defined in the WCS.
"""
return self.wcs.wcs.restfrq
@property
def rest_wavelength(self):
"""
Returns the rest wavelength defined in the WCS.
"""
return self.wcs.wcs.restwav
def bin_edges(self):
# the WCS doesn't know about its own pixel array
edge_indices = list(self.axes.spectral.pixel_indices - 0.5) + \
[self.axes.spectral.pixel_indices[-1] + 0.5]
return self.pixel_to_world(edge_indices, 0)
def with_spectral_unit(self, unit, rest_value=None, velocity_convention=None):
# Shorter versions to keep lines under 80
ctype_from_vconv = determine_ctype_from_vconv
out_ctype = ctype_from_vconv(self._wcs.wcs.ctype[self.spec_axis],
unit,
velocity_convention=velocity_convention)
new_wcs = convert_spectral_axis(self._wcs, unit, out_ctype,
rest_value=rest_value)
new_wcs.wcs.set()
return new_wcs
|
py | 1a3b8830b96fb70800bbf2183f8db759d68d716f | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# */AIPND-revision/intropyproject-classify-pet-images/check_images.py
#
# TODO 0: Add your information below for Programmer & Date Created.
# PROGRAMMER: Luis Candanedo
# DATE CREATED: 5/24/2020
# REVISED DATE:
# PURPOSE: Classifies pet images using a pretrained CNN model, compares these
# classifications to the true identity of the pets in the images, and
# summarizes how well the CNN performed on the image classification task.
# Note that the true identity of the pet (or object) in the image is
# indicated by the filename of the image. Therefore, your program must
# first extract the pet image label from the filename before
# classifying the images using the pretrained CNN model. With this
# program we will be comparing the performance of 3 different CNN model
# architectures to determine which provides the 'best' classification.
#
# Use argparse Expected Call with <> indicating expected user input:
# python check_images.py --dir <directory with images> --arch <model>
# --dogfile <file that contains dognames>
# Example call:
# python check_images.py --dir pet_images/ --arch vgg --dogfile dognames.txt
##
# Imports python modules
from time import time, sleep
# Imports print functions that check the lab
from print_functions_for_lab_checks import *
# Imports functions created for this program
from get_input_args import get_input_args
from get_pet_labels import get_pet_labels
from classify_images import classify_images
from adjust_results4_isadog import adjust_results4_isadog
from calculates_results_stats import calculates_results_stats
from print_results import print_results
# Main program function defined below
def main():
# TODO 0: Measures total program runtime by collecting start time
start_time = time()
#sleep(5)
# TODO 1: Define get_input_args function within the file get_input_args.py
# This function retrieves 3 Command Line Arugments from user as input from
# the user running the program from a terminal window. This function returns
# the collection of these command line arguments from the function call as
# the variable in_arg
in_arg = get_input_args()
# Function that checks command line arguments using in_arg
check_command_line_arguments(in_arg)
# TODO 2: Define get_pet_labels function within the file get_pet_labels.py
# Once the get_pet_labels function has been defined replace 'None'
# in the function call with in_arg.dir Once you have done the replacements
# your function call should look like this:
# get_pet_labels(in_arg.dir)
# This function creates the results dictionary that contains the results,
# this dictionary is returned from the function call as the variable results
results = get_pet_labels(in_arg.dir)
#print(results)
# Function that checks Pet Images in the results Dictionary using results
check_creating_pet_image_labels(results)
# TODO 3: Define classify_images function within the file classiy_images.py
# Once the classify_images function has been defined replace first 'None'
# in the function call with in_arg.dir and replace the last 'None' in the
# function call with in_arg.arch Once you have done the replacements your
# function call should look like this:
# classify_images(in_arg.dir, results, in_arg.arch)
# Creates Classifier Labels with classifier function, Compares Labels,
# and adds these results to the results dictionary - results
classify_images(in_arg.dir, results, in_arg.arch)
# Function that checks Results Dictionary using results
check_classifying_images(results)
# TODO 4: Define adjust_results4_isadog function within the file adjust_results4_isadog.py
# Once the adjust_results4_isadog function has been defined replace 'None'
# in the function call with in_arg.dogfile Once you have done the
# replacements your function call should look like this:
# adjust_results4_isadog(results, in_arg.dogfile)
# Adjusts the results dictionary to determine if classifier correctly
# classified images as 'a dog' or 'not a dog'. This demonstrates if
# model can correctly classify dog images as dogs (regardless of breed)
adjust_results4_isadog(results, in_arg.dogfile)
# Function that checks Results Dictionary for is-a-dog adjustment using results
check_classifying_labels_as_dogs(results)
# TODO 5: Define calculates_results_stats function within the file calculates_results_stats.py
# This function creates the results statistics dictionary that contains a
# summary of the results statistics (this includes counts & percentages). This
# dictionary is returned from the function call as the variable results_stats
# Calculates results of run and puts statistics in the Results Statistics
# Dictionary - called results_stats
results_stats = calculates_results_stats(results)
# Function that checks Results Statistics Dictionary using results_stats
check_calculating_results(results, results_stats)
# TODO 6: Define print_results function within the file print_results.py
# Once the print_results function has been defined replace 'None'
# in the function call with in_arg.arch Once you have done the
# replacements your function call should look like this:
# print_results(results, results_stats, in_arg.arch, True, True)
# Prints summary results, incorrect classifications of dogs (if requested)
# and incorrectly classified breeds (if requested)
print_results(results, results_stats, in_arg.arch, True, True)
# TODO 0: Measure total program runtime by collecting end time
end_time = time()
# TODO 0: Computes overall runtime in seconds & prints it in hh:mm:ss format
tot_time = end_time-start_time#calculate difference between end time and start time
print("\n** Total Elapsed Runtime:",
str(int((tot_time/3600)))+":"+str(int((tot_time%3600)/60))+":"
+str(int((tot_time%3600)%60)) )
# Call to main function to run the program
if __name__ == "__main__":
main()
|
py | 1a3b88910fe2a8b72dedabf6ff61f30df1baef18 | import asyncio
import warnings
import pytest
from distributed import Worker, WorkerPlugin
from distributed.utils_test import async_wait_for, gen_cluster, inc
class MyPlugin(WorkerPlugin):
name = "MyPlugin"
def __init__(self, data, expected_notifications=None):
self.data = data
self.expected_notifications = expected_notifications
def setup(self, worker):
assert isinstance(worker, Worker)
self.worker = worker
self.worker._my_plugin_status = "setup"
self.worker._my_plugin_data = self.data
self.observed_notifications = []
def teardown(self, worker):
self.worker._my_plugin_status = "teardown"
if self.expected_notifications is not None:
assert len(self.observed_notifications) == len(self.expected_notifications)
for expected, real in zip(
self.expected_notifications, self.observed_notifications
):
assert expected == real
def transition(self, key, start, finish, **kwargs):
self.observed_notifications.append(
{"key": key, "start": start, "finish": finish}
)
@gen_cluster(client=True, nthreads=[])
async def test_create_with_client(c, s):
await c.register_worker_plugin(MyPlugin(123))
worker = await Worker(s.address, loop=s.loop)
assert worker._my_plugin_status == "setup"
assert worker._my_plugin_data == 123
await worker.close()
assert worker._my_plugin_status == "teardown"
@gen_cluster(client=True, nthreads=[])
async def test_remove_with_client(c, s):
await c.register_worker_plugin(MyPlugin(123), name="foo")
await c.register_worker_plugin(MyPlugin(546), name="bar")
worker = await Worker(s.address, loop=s.loop)
# remove the 'foo' plugin
await c.unregister_worker_plugin("foo")
assert worker._my_plugin_status == "teardown"
# check that on the scheduler registered worker plugins we only have 'bar'
assert len(s.worker_plugins) == 1
assert "bar" in s.worker_plugins
# check on the worker plugins that we only have 'bar'
assert len(worker.plugins) == 1
assert "bar" in worker.plugins
# let's remove 'bar' and we should have none worker plugins
await c.unregister_worker_plugin("bar")
assert worker._my_plugin_status == "teardown"
assert not s.worker_plugins
assert not worker.plugins
@gen_cluster(client=True, nthreads=[])
async def test_remove_with_client_raises(c, s):
await c.register_worker_plugin(MyPlugin(123), name="foo")
worker = await Worker(s.address, loop=s.loop)
with pytest.raises(ValueError, match="bar"):
await c.unregister_worker_plugin("bar")
@gen_cluster(client=True, worker_kwargs={"plugins": [MyPlugin(5)]})
async def test_create_on_construction(c, s, a, b):
assert len(a.plugins) == len(b.plugins) == 1
assert a._my_plugin_status == "setup"
assert a._my_plugin_data == 5
@gen_cluster(nthreads=[("127.0.0.1", 1)], client=True)
async def test_normal_task_transitions_called(c, s, w):
expected_notifications = [
{"key": "task", "start": "released", "finish": "waiting"},
{"key": "task", "start": "waiting", "finish": "ready"},
{"key": "task", "start": "ready", "finish": "executing"},
{"key": "task", "start": "executing", "finish": "memory"},
{"key": "task", "start": "memory", "finish": "released"},
{"key": "task", "start": "released", "finish": "forgotten"},
]
plugin = MyPlugin(1, expected_notifications=expected_notifications)
await c.register_worker_plugin(plugin)
await c.submit(lambda x: x, 1, key="task")
await async_wait_for(lambda: not w.tasks, timeout=10)
@gen_cluster(nthreads=[("127.0.0.1", 1)], client=True)
async def test_failing_task_transitions_called(c, s, w):
def failing(x):
raise Exception()
expected_notifications = [
{"key": "task", "start": "released", "finish": "waiting"},
{"key": "task", "start": "waiting", "finish": "ready"},
{"key": "task", "start": "ready", "finish": "executing"},
{"key": "task", "start": "executing", "finish": "error"},
{"key": "task", "start": "error", "finish": "released"},
{"key": "task", "start": "released", "finish": "forgotten"},
]
plugin = MyPlugin(1, expected_notifications=expected_notifications)
await c.register_worker_plugin(plugin)
with pytest.raises(Exception):
await c.submit(failing, 1, key="task")
@gen_cluster(
nthreads=[("127.0.0.1", 1)], client=True, worker_kwargs={"resources": {"X": 1}}
)
async def test_superseding_task_transitions_called(c, s, w):
expected_notifications = [
{"key": "task", "start": "released", "finish": "waiting"},
{"key": "task", "start": "waiting", "finish": "constrained"},
{"key": "task", "start": "constrained", "finish": "executing"},
{"key": "task", "start": "executing", "finish": "memory"},
{"key": "task", "start": "memory", "finish": "released"},
{"key": "task", "start": "released", "finish": "forgotten"},
]
plugin = MyPlugin(1, expected_notifications=expected_notifications)
await c.register_worker_plugin(plugin)
await c.submit(lambda x: x, 1, key="task", resources={"X": 1})
await async_wait_for(lambda: not w.tasks, timeout=10)
@gen_cluster(nthreads=[("127.0.0.1", 1)], client=True)
async def test_dependent_tasks(c, s, w):
dsk = {"dep": 1, "task": (inc, "dep")}
expected_notifications = [
{"key": "dep", "start": "released", "finish": "waiting"},
{"key": "dep", "start": "waiting", "finish": "ready"},
{"key": "dep", "start": "ready", "finish": "executing"},
{"key": "dep", "start": "executing", "finish": "memory"},
{"key": "task", "start": "released", "finish": "waiting"},
{"key": "task", "start": "waiting", "finish": "ready"},
{"key": "task", "start": "ready", "finish": "executing"},
{"key": "task", "start": "executing", "finish": "memory"},
{"key": "dep", "start": "memory", "finish": "released"},
{"key": "task", "start": "memory", "finish": "released"},
{"key": "task", "start": "released", "finish": "forgotten"},
{"key": "dep", "start": "released", "finish": "forgotten"},
]
plugin = MyPlugin(1, expected_notifications=expected_notifications)
await c.register_worker_plugin(plugin)
await c.get(dsk, "task", sync=False)
await async_wait_for(lambda: not w.tasks, timeout=10)
@gen_cluster(nthreads=[("127.0.0.1", 1)], client=True)
async def test_empty_plugin(c, s, w):
class EmptyPlugin:
pass
await c.register_worker_plugin(EmptyPlugin())
@gen_cluster(nthreads=[("127.0.0.1", 1)], client=True)
async def test_default_name(c, s, w):
class MyCustomPlugin(WorkerPlugin):
pass
await c.register_worker_plugin(MyCustomPlugin())
assert len(w.plugins) == 1
assert next(iter(w.plugins)).startswith("MyCustomPlugin-")
@gen_cluster(client=True, nthreads=[("", 1)])
async def test_assert_no_warning_no_overload(c, s, a):
"""Assert we do not receive a deprecation warning if we do not overload any
methods
"""
class Dummy(WorkerPlugin):
pass
with warnings.catch_warnings(record=True) as record:
await c.register_worker_plugin(Dummy())
assert await c.submit(inc, 1, key="x") == 2
while "x" in a.tasks:
await asyncio.sleep(0.01)
assert not record
@gen_cluster(nthreads=[("127.0.0.1", 1)], client=True)
async def test_WorkerPlugin_overwrite(c, s, w):
class MyCustomPlugin(WorkerPlugin):
name = "custom"
def setup(self, worker):
self.worker = worker
self.worker.foo = 0
def transition(self, *args, **kwargs):
self.worker.foo = 123
def teardown(self, worker):
del self.worker.foo
await c.register_worker_plugin(MyCustomPlugin())
assert w.foo == 0
await c.submit(inc, 0)
assert w.foo == 123
while s.tasks or w.tasks:
await asyncio.sleep(0.01)
class MyCustomPlugin(WorkerPlugin):
name = "custom"
def setup(self, worker):
self.worker = worker
self.worker.bar = 0
def transition(self, *args, **kwargs):
self.worker.bar = 456
def teardown(self, worker):
del self.worker.bar
await c.register_worker_plugin(MyCustomPlugin())
assert not hasattr(w, "foo")
assert w.bar == 0
await c.submit(inc, 0)
assert w.bar == 456
|
py | 1a3b88cec47ebb84e9728b3f524a35759362f266 | # -*- coding: utf-8 -*-
#
# Unless explicitly stated otherwise all files in this repository are licensed
# under the Apache 2 License.
#
# This product includes software developed at Datadog
# (https://www.datadoghq.com/).
#
# Copyright 2018 Datadog, Inc.
#
"""Change number of characters for issues/prs.
Revision ID: e415dc8c4f46
Revises: d0382d9c12f2
Create Date: 2018-03-29 14:10:57.829813
"""
from alembic import op
import sqlalchemy as sa
revision = 'e415dc8c4f46'
down_revision = 'd0382d9c12f2'
def upgrade():
op.alter_column(
'issues', 'name',
existing_type=sa.String(length=64),
type_=sa.Text(),
existing_nullable=True
)
op.alter_column(
'issues', 'url',
existing_type=sa.String(length=64),
type_=sa.Text(),
existing_nullable=True
)
op.alter_column(
'pull_requests', 'name',
existing_type=sa.String(length=64),
type_=sa.Text(),
existing_nullable=True
)
op.alter_column(
'pull_requests', 'url',
existing_type=sa.String(length=64),
type_=sa.Text(),
existing_nullable=True
)
def downgrade():
op.alter_column(
'pull_requests', 'url',
existing_type=sa.Text(),
type_=sa.String(length=64),
existing_nullable=True
)
op.alter_column(
'pull_requests', 'name',
existing_type=sa.Text(),
type_=sa.String(length=64),
existing_nullable=True
)
op.alter_column(
'issues', 'url',
existing_type=sa.Text(),
type_=sa.String(length=64),
existing_nullable=True
)
op.alter_column(
'issues', 'name',
existing_type=sa.Text(),
type_=sa.String(length=64),
existing_nullable=True
)
|
py | 1a3b88e18e38b88d75ad17a0bb6a2965d1e60406 | import unittest
import paddle.v2.fluid.core as core
from paddle.v2.fluid.executor import Executor
import paddle.v2.fluid.layers as layers
from paddle.v2.fluid.backward import append_backward_ops
from paddle.v2.fluid.framework import g_main_program
import numpy
class TestShrinkRNNMemory(unittest.TestCase):
def test_shrink_rnn_memory(self):
x = layers.data('x', shape=[100], data_type='float32')
x.stop_gradient = False
table = layers.lod_rank_table(x=x)
i = layers.zeros(dtype='int64', shape=[1])
mem1 = layers.shrink_memory(x=x, i=i, table=table)
i = layers.increment(x=i)
i.stop_gradient = True
mem2 = layers.shrink_memory(x=mem1, i=i, table=table)
i = layers.increment(x=i)
i.stop_gradient = True
mem3 = layers.shrink_memory(x=mem2, i=i, table=table)
cpu = core.CPUPlace()
tensor = core.LoDTensor()
tensor.set_lod([[0, 2, 5, 6]])
tensor_np = numpy.random.random(size=(3, 100)).astype('float32')
tensor.set(tensor_np, cpu)
exe = Executor(cpu)
outs = map(numpy.array,
exe.run(feed={'x': tensor}, fetch_list=[mem1, mem2, mem3]))
self.assertTrue(numpy.allclose(tensor_np[0:3], outs[0]))
self.assertTrue(numpy.allclose(tensor_np[0:2], outs[1]))
self.assertTrue(numpy.allclose(tensor_np[0:1], outs[2]))
mem3_mean = layers.mean(x=mem3)
append_backward_ops(loss=mem3_mean)
x_grad = map(numpy.array,
exe.run(feed={'x': tensor},
fetch_list=[
g_main_program.global_block().var('x@GRAD')
]))[0]
self.assertAlmostEqual(1.0, x_grad.sum(), delta=0.1)
if __name__ == '__main__':
unittest.main()
|
py | 1a3b89f6666e447fdede7a3413fe51a6b591e768 | #!/usr/bin/env python
# coding=utf-8
"""
Ant Group
Copyright (c) 2004-2021 All Rights Reserved.
------------------------------------------------------
File Name : lr_train_and_predict.py
Author : Qizhi Zhang
Email: [email protected]
Create Time : 2021/5/21 上午10:13
Description : description what the main function of this file
"""
from stensorflow.engine.start_server import start_local_server, start_client
import tensorflow as tf
from stensorflow.global_var import StfConfig
from stensorflow.basic.basic_class.private import PrivateTensor
from stensorflow.ml.logistic_regression import LogisticRegression
import numpy as np
import random
import time
random.seed(0)
"""
A Example of training a LR model on a dataset of feature number 291 and predict using
this model.
The features are in the party L, the label is in the party R.
"""
#start_local_server(config_file="../conf/config_ym.json")
start_local_server(config_file="../conf/config_epsilon.json")
#start_client(config_file="../conf/config_ym.json", job_name="workerR")
matchColNum = 0
featureNumX = 3000
featureNumY = 0
record_num = 10
epoch = 100
batch_size = 2
learning_rate = 0.01
clip_value = 5.0
train_batch_num = epoch * record_num // batch_size + 1
pred_record_num = 10
pred_batch_num = pred_record_num // batch_size + 1
# -------------define a private tensor x_train of party L and a private tensor y_train on the party R
x_train = PrivateTensor(owner='L')
y_train = PrivateTensor(owner='R')
format_x = [["a"]] * matchColNum + [[0.2]] * featureNumX
format_y = [["a"]] * matchColNum + [[0.3]] * featureNumY + [[1.0]]
# ----------------- load data from files -------------------
x_train.load_from_file(path=StfConfig.train_file_onL,
record_defaults=format_x, batch_size=batch_size, repeat=epoch + 2, skip_col_num=matchColNum,
clip_value=clip_value, skip_row_num=0)
y_train.load_from_file(path=StfConfig.train_file_onR,
record_defaults=format_y, batch_size=batch_size, repeat=epoch + 2, skip_col_num=matchColNum,
clip_value=clip_value, skip_row_num=0)
print("StfConfig.parties=", StfConfig.parties)
# ----------- build a LR model ---------------
model = LogisticRegression(num_features=featureNumX + featureNumY, learning_rate=learning_rate)
# -------------start a tensorflow session, and initialize all variables -----------------
sess = tf.compat.v1.Session(StfConfig.target)
init_op = tf.compat.v1.global_variables_initializer()
sess.run(init_op)
# -------------train the model ------------------------
start_time = time.time()
model.fit(sess=sess, x=x_train, y=y_train, num_batches=train_batch_num)
print("train time=", time.time()-start_time)
save_op = model.save(model_file_path="./")
sess.run(save_op)
# ------------define the private tensors for test dataset ----------------
x_test = PrivateTensor(owner='L')
y_test = PrivateTensor(owner='R')
x_test.load_from_file(path=StfConfig.pred_file_onL,
record_defaults=format_x, batch_size=batch_size, repeat=2, skip_col_num=matchColNum,
clip_value=clip_value, skip_row_num=0)
id = y_test.load_from_file_withid(path=StfConfig.pred_file_onR,
record_defaults=format_y, batch_size=batch_size, repeat=2,
id_col_num=matchColNum, clip_value=clip_value, skip_row_num=0)
# --------------predict --------------
model.predict(id, x_test, pred_batch_num, sess)
sess.close()
|
py | 1a3b8a3b6566f8022e1576c01d5728cd5e699f10 | # https://math.stackexchange.com/questions/4231713/has-anyone-ever-attempted-to-find-all-splits-of-a-rectangle-into-smaller-rectang
# from random import randint
from numpy import random
# from pymclevel.box import BoundingBox
def make2dList(nRows, nCols):
newList = []
for row in xrange(nRows):
# give each new row an empty list
newList.append([])
for col in xrange(nCols):
# initialize with 0s
newList[row].append(0)
return newList
class RectangleSplitter:
def __init__(self, width, length):
self._groundMatrix = make2dList(width, length)
self.newRectMinWidth = 0 # min(width, 3)
self.newRectMinLength = 0 # min(length, 3)
# docs: https://numpy.org/doc/stable/reference/random/legacy.html#numpy.random.RandomState
# distribution graphs: https://statdist.com/
self.randomState = random.RandomState()
# def __init__(self, selectionBox) -> None:
# selectionBox = BoundingBox(selectionBox) # DEBUG: to get the class shown correctly in IDE
# self._groundMatrix = make2dList(selectionBox.width, selectionBox.length)
def Partition(self, partitionCount):
"""
example groundMatrix:
y0, y1, y2
x0 [0 1 2]
x1 [3 4 5]
x2 [6 7 8]
x3 [ 9 10 11]
Algorithm:
for n = partitionCount
- random left or top edge
- count number of distinct rectangles on that edge + at which index they start
- random amount of rect to use (full length)
- push border random % amount in => parameter
- fill with new index
"""
for n in xrange(1, partitionCount):
self.CalculatePartition(n)
return self._groundMatrix
def CalculatePartition(self, n):
print("partition: " + str(n))
# left = self.randomState.randint(0, 2)
left = n % 2
leftRectStartList = self.GetListOfLeftBorderRectangleStarts()
leftRectBorderCount = len(leftRectStartList)
topRectStartList = self.GetListOfTopBorderRectangleStarts()
topRectBorderCount = len(topRectStartList)
# add the end of base rectangle as last elements:
topRectStartList.append(len(self._groundMatrix[0]))
leftRectStartList.append(len(self._groundMatrix))
# print("left List: " + str(leftRectBorderCount))
# print(leftRectStartList)
# print("top List: " + str(topRectBorderCount))
# print(topRectStartList)
if left == 1:
# push from left
rectIndex = self.GetRandomPushy(0, topRectBorderCount)
min_width = 0 # max(self.newRectMinWidth, leftRectStartList[0]-1)
newRectMaxX = self.GetRandomNormal(min_width, leftRectStartList[0] - 1)
newRectMaxY = topRectStartList[rectIndex]-1 # next rect start is the max
# print("push from left to x/y: " + str(newRectMaxX) + "/" + str(newRectMaxY))
elif left == 0:
# push from top
rectIndex = self.GetRandomPushy(0, leftRectBorderCount)
newRectMaxX = leftRectStartList[rectIndex]-1 # next rect start is the max
min_width = 0 # max(self.newRectMinWidth, leftRectStartList[0] - 1)
newRectMaxY = self.GetRandomNormal(min_width, topRectStartList[0] - 1)
# print("push from top to x/y: " + str(newRectMaxX) + "/" + str(newRectMaxY))
self.FillNextPartition(n, newRectMaxX, newRectMaxY)
# print(self._groundMatrix)
def FillNextPartition(self, partitionId, maxX, maxY):
for x in xrange(0, maxX + 1):
for z in xrange(0, maxY + 1):
self._groundMatrix[x][z] = partitionId
def GetListOfTopBorderRectangleStarts(self):
count = []
lastRectangleId = -1
for yi, y in enumerate(self._groundMatrix[0]):
if y != lastRectangleId:
count.append(yi)
lastRectangleId = y
count.pop(0) # remove first change
return count
def GetListOfLeftBorderRectangleStarts(self):
count = []
lastRectangleId = -1
for yi, y in enumerate(self._groundMatrix):
if y[0] != lastRectangleId:
count.append(yi)
lastRectangleId = y[0]
count.pop(0) # remove first change
return count
def GetRandomPushy(self, start, end):
if end <= start:
return start
value = self.randomState.beta(4, 2)
# value = self.randomState.normal(0.5, 0.1)
# value = self.randomState.beta(1, 1) # uniform
#print ("rand value between " + str(start) + " end " + str(end) + " is: " + str(value))
value = int(round(start + (value / float(1 / float(end - start)))))
#print ("rand value between " + str(start) + " end " + str(end) + " is: " + str(value))
return value
def GetRandomNormal(self, start, end):
if end <= start:
return start
# value = self.randomState.beta(2, 4)
value = self.randomState.normal(0.5, 0.1)
# value = self.randomState.beta(1, 1) # uniform
#print ("rand value between " + str(start) + " end " + str(end) + " is: " + str(value))
value = int(round(start + (value / float(1 / float(end - start)))))
#print ("rand value between " + str(start) + " end " + str(end) + " is: " + str(value))
return value
|
py | 1a3b8adee387aa022ecc9ddd490e42edaf820522 | # Generated by Django 3.1 on 2021-03-05 08:03
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('members', '0001_initial'),
]
operations = [
migrations.RemoveField(
model_name='member',
name='notification',
),
]
|
py | 1a3b8b05b321e2faa4e2a26ff08017c2056bba0a | import pathlib
from simtk.openmm.app import *
from simtk.openmm import *
from simtk.unit import *
from sys import stdout
def run_sim(top_path, coord_path, output_path, sim_time, sim_num):
print("Loading amber files...")
prmtop = AmberPrmtopFile(str(top_path))
inpcrd = AmberInpcrdFile(str(coord_path))
print("Loading amber files... Done.")
print("Creating system...")
system = prmtop.createSystem(
nonbondedMethod=PME, nonbondedCutoff=1 * nanometer, constraints=HBonds
)
system.addForce(MonteCarloBarostat(1 * bar, 300 * kelvin))
integrator = LangevinIntegrator(300 * kelvin, 1 / picosecond, 2 * femtosecond)
platform = Platform.getPlatformByName("CUDA")
simulation = Simulation(prmtop.topology, system, integrator, platform)
simulation.context.setPositions(inpcrd.positions)
if inpcrd.boxVectors is not None:
simulation.context.setPeriodicBoxVectors(*inpcrd.boxVectors)
print("Creating system... Done.")
# Minimise energy
print("Minimising energy...")
simulation.minimizeEnergy()
print("Minimising energy... Done.")
# Setup logging for NPT
log_frequency = 100_000
simulation.reporters.append(PDBReporter(
str(output_path / f"npt_production_{sim_num:02d}.pdb"),log_frequency))
simulation.reporters.append(
StateDataReporter(
str(output_path / f"npt_production_{sim_num:02d}.csv"),
log_frequency,
step=True,
potentialEnergy=True,
kineticEnergy=True,
temperature=True,
volume=True,
speed=True,
time=True,
)
)
# NPT production run (with a barostat for constant pressure rather than volume)
print("Running NPT production...")
for ns_passed in range(1, sim_time + 1):
simulation.step(500_000) # run simulation for 500,000 steps, 1ns
if not (ns_passed % 5): # "not" occurs every 5ns because 5%5 = 0
simulation.saveState(str(output_path / f"npt_production_{ns_passed}ns.xml"))
simulation.saveCheckpoint(str(output_path / f"npt_production_{ns_passed}ns.chk"))
print(f"Completed {ns_passed}ns...")
print("Running NPT production... Done.")
return
if __name__ == '__main__':
top_path = pathlib.Path("enhgfp_onby_t3p.parm7")
coord_path = pathlib.Path("enhgfp_onby_t3p.rst7")
sim_time = 20
for i in range(1, 21):
sim_num = i
output_path = pathlib.Path(f"simulation_{i:02d}/") # gives the number "i" with a 0 in front if single digit
output_path.mkdir()
print(f"Starting simulation {i}...")
run_sim(top_path, coord_path, output_path, sim_time, sim_num)
print(f"Completed simulation {i}.")
|
py | 1a3b8cb6add7df3b0e1b631da890784f4a53caab | #!/usr/bin/env python3
# Software License Agreement (BSD License)
#
# Copyright (c) 2021, UFACTORY, Inc.
# All rights reserved.
#
# Author: Vinman <[email protected]> <[email protected]>
from launch import LaunchDescription
from launch.actions import IncludeLaunchDescription
from launch.launch_description_sources import PythonLaunchDescriptionSource
from launch.substitutions import LaunchConfiguration, PathJoinSubstitution
from launch_ros.substitutions import FindPackageShare
def generate_launch_description():
prefix = LaunchConfiguration('prefix', default='')
hw_ns = LaunchConfiguration('hw_ns', default='xarm')
limited = LaunchConfiguration('limited', default=False)
effort_control = LaunchConfiguration('effort_control', default=False)
velocity_control = LaunchConfiguration('velocity_control', default=False)
add_gripper = LaunchConfiguration('add_gripper', default=False)
add_vacuum_gripper = LaunchConfiguration('add_vacuum_gripper', default=False)
add_other_geometry = LaunchConfiguration('add_other_geometry', default=False)
geometry_type = LaunchConfiguration('geometry_type', default='box')
geometry_mass = LaunchConfiguration('geometry_mass', default=0.1)
geometry_height = LaunchConfiguration('geometry_height', default=0.1)
geometry_radius = LaunchConfiguration('geometry_radius', default=0.1)
geometry_length = LaunchConfiguration('geometry_length', default=0.1)
geometry_width = LaunchConfiguration('geometry_width', default=0.1)
geometry_mesh_filename = LaunchConfiguration('geometry_mesh_filename', default='')
geometry_mesh_origin_xyz = LaunchConfiguration('geometry_mesh_origin_xyz', default='"0 0 0"')
geometry_mesh_origin_rpy = LaunchConfiguration('geometry_mesh_origin_rpy', default='"0 0 0"')
geometry_mesh_tcp_xyz = LaunchConfiguration('geometry_mesh_tcp_xyz', default='"0 0 0"')
geometry_mesh_tcp_rpy = LaunchConfiguration('geometry_mesh_tcp_rpy', default='"0 0 0"')
# xarm moveit gazebo launch
# xarm_moveit_config/launch/_xarm_moveit_gazobo.launch.py
xarm_moveit_gazebo_launch = IncludeLaunchDescription(
PythonLaunchDescriptionSource(PathJoinSubstitution([FindPackageShare('xarm_moveit_config'), 'launch', '_xarm_moveit_gazebo.launch.py'])),
launch_arguments={
'prefix': prefix,
'hw_ns': hw_ns,
'limited': limited,
'effort_control': effort_control,
'velocity_control': velocity_control,
'add_gripper': add_gripper,
'add_vacuum_gripper': add_vacuum_gripper,
'dof': '7',
'no_gui_ctrl': 'false',
'add_other_geometry': add_other_geometry,
'geometry_type': geometry_type,
'geometry_mass': geometry_mass,
'geometry_height': geometry_height,
'geometry_radius': geometry_radius,
'geometry_length': geometry_length,
'geometry_width': geometry_width,
'geometry_mesh_filename': geometry_mesh_filename,
'geometry_mesh_origin_xyz': geometry_mesh_origin_xyz,
'geometry_mesh_origin_rpy': geometry_mesh_origin_rpy,
'geometry_mesh_tcp_xyz': geometry_mesh_tcp_xyz,
'geometry_mesh_tcp_rpy': geometry_mesh_tcp_rpy,
}.items(),
)
return LaunchDescription([
xarm_moveit_gazebo_launch
])
|
py | 1a3b8cebaa4a1f3388150c0c7fd90019c910af0a | # Copyright (c) 2021 AccelByte Inc. All Rights Reserved.
# This is licensed software from AccelByte Inc, for limitations
# and restrictions contact your company contract manager.
#
# Code generated. DO NOT EDIT!
# template_file: python-cli-command.j2
# justice-cloudsave-service (3.0.1)
# pylint: disable=duplicate-code
# pylint: disable=line-too-long
# pylint: disable=missing-function-docstring
# pylint: disable=missing-module-docstring
# pylint: disable=too-many-arguments
# pylint: disable=too-many-branches
# pylint: disable=too-many-instance-attributes
# pylint: disable=too-many-lines
# pylint: disable=too-many-locals
# pylint: disable=too-many-public-methods
# pylint: disable=too-many-return-statements
# pylint: disable=too-many-statements
# pylint: disable=unused-import
import json
import yaml
from typing import Optional
import click
from .._utils import login_as as login_as_internal
from .._utils import to_dict
from accelbyte_py_sdk.api.cloudsave import admin_put_player_record_handler_v1 as admin_put_player_record_handler_v1_internal
from accelbyte_py_sdk.api.cloudsave.models import ModelsPlayerRecordRequest
from accelbyte_py_sdk.api.cloudsave.models import ModelsResponseError
@click.command()
@click.argument("body", type=str)
@click.argument("key", type=str)
@click.argument("user_id", type=str)
@click.option("--namespace", type=str)
@click.option("--login_as", type=click.Choice(["client", "user"], case_sensitive=False))
@click.option("--login_with_auth", type=str)
@click.option("--doc", type=bool)
def admin_put_player_record_handler_v1(
body: str,
key: str,
user_id: str,
namespace: Optional[str] = None,
login_as: Optional[str] = None,
login_with_auth: Optional[str] = None,
doc: Optional[bool] = None,
):
if doc:
click.echo(admin_put_player_record_handler_v1_internal.__doc__)
return
x_additional_headers = None
if login_with_auth:
x_additional_headers = {
"Authorization": login_with_auth
}
else:
login_as_internal(login_as)
if body is not None:
try:
body_json = json.loads(body)
body = ModelsPlayerRecordRequest.create_from_dict(body_json)
except ValueError as e:
raise Exception(f"Invalid JSON for 'body'. {str(e)}") from e
result, error = admin_put_player_record_handler_v1_internal(
body=body,
key=key,
user_id=user_id,
namespace=namespace,
x_additional_headers=x_additional_headers,
)
if error:
raise Exception(f"adminPutPlayerRecordHandlerV1 failed: {str(error)}")
click.echo(yaml.safe_dump(to_dict(result), sort_keys=False))
admin_put_player_record_handler_v1.operation_id = "adminPutPlayerRecordHandlerV1"
admin_put_player_record_handler_v1.is_deprecated = False
|
py | 1a3b8e28b2616b8c27e3aa2743fa56f1f6327aaa | """
Holds classes and utility methods related to build graph
"""
import copy
import logging
import os
import threading
from pathlib import Path
from typing import Sequence, Tuple, List, Any, Optional, Dict, cast, NamedTuple
from copy import deepcopy
from uuid import uuid4
import tomlkit
from samcli.lib.build.exceptions import InvalidBuildGraphException
from samcli.lib.providers.provider import Function, LayerVersion
from samcli.lib.samlib.resource_metadata_normalizer import (
SAM_RESOURCE_ID_KEY,
SAM_IS_NORMALIZED,
)
from samcli.lib.utils.packagetype import ZIP
from samcli.lib.utils.architecture import X86_64
LOG = logging.getLogger(__name__)
DEFAULT_BUILD_GRAPH_FILE_NAME = "build.toml"
DEFAULT_DEPENDENCIES_DIR = os.path.join(".aws-sam", "deps")
# filed names for the toml table
PACKAGETYPE_FIELD = "packagetype"
CODE_URI_FIELD = "codeuri"
RUNTIME_FIELD = "runtime"
METADATA_FIELD = "metadata"
FUNCTIONS_FIELD = "functions"
SOURCE_HASH_FIELD = "source_hash"
MANIFEST_HASH_FIELD = "manifest_hash"
ENV_VARS_FIELD = "env_vars"
LAYER_NAME_FIELD = "layer_name"
BUILD_METHOD_FIELD = "build_method"
COMPATIBLE_RUNTIMES_FIELD = "compatible_runtimes"
LAYER_FIELD = "layer"
ARCHITECTURE_FIELD = "architecture"
HANDLER_FIELD = "handler"
def _function_build_definition_to_toml_table(
function_build_definition: "FunctionBuildDefinition",
) -> tomlkit.api.Table:
"""
Converts given function_build_definition into toml table representation
Parameters
----------
function_build_definition: FunctionBuildDefinition
FunctionBuildDefinition which will be converted into toml table
Returns
-------
tomlkit.api.Table
toml table of FunctionBuildDefinition
"""
toml_table = tomlkit.table()
if function_build_definition.packagetype == ZIP:
toml_table[CODE_URI_FIELD] = function_build_definition.codeuri
toml_table[RUNTIME_FIELD] = function_build_definition.runtime
toml_table[ARCHITECTURE_FIELD] = function_build_definition.architecture
toml_table[HANDLER_FIELD] = function_build_definition.handler
if function_build_definition.source_hash:
toml_table[SOURCE_HASH_FIELD] = function_build_definition.source_hash
toml_table[MANIFEST_HASH_FIELD] = function_build_definition.manifest_hash
toml_table[PACKAGETYPE_FIELD] = function_build_definition.packagetype
toml_table[FUNCTIONS_FIELD] = [f.full_path for f in function_build_definition.functions]
if function_build_definition.metadata:
toml_table[METADATA_FIELD] = function_build_definition.metadata
if function_build_definition.env_vars:
toml_table[ENV_VARS_FIELD] = function_build_definition.env_vars
return toml_table
def _toml_table_to_function_build_definition(uuid: str, toml_table: tomlkit.api.Table) -> "FunctionBuildDefinition":
"""
Converts given toml table into FunctionBuildDefinition instance
Parameters
----------
uuid: str
key of the function toml_table instance
toml_table: tomlkit.api.Table
function build definition as toml table
Returns
-------
FunctionBuildDefinition
FunctionBuildDefinition of given toml table
"""
function_build_definition = FunctionBuildDefinition(
toml_table.get(RUNTIME_FIELD),
toml_table.get(CODE_URI_FIELD),
toml_table.get(PACKAGETYPE_FIELD, ZIP),
toml_table.get(ARCHITECTURE_FIELD, X86_64),
dict(toml_table.get(METADATA_FIELD, {})),
toml_table.get(HANDLER_FIELD, ""),
toml_table.get(SOURCE_HASH_FIELD, ""),
toml_table.get(MANIFEST_HASH_FIELD, ""),
dict(toml_table.get(ENV_VARS_FIELD, {})),
)
function_build_definition.uuid = uuid
return function_build_definition
def _layer_build_definition_to_toml_table(layer_build_definition: "LayerBuildDefinition") -> tomlkit.api.Table:
"""
Converts given layer_build_definition into toml table representation
Parameters
----------
layer_build_definition: LayerBuildDefinition
LayerBuildDefinition which will be converted into toml table
Returns
-------
tomlkit.api.Table
toml table of LayerBuildDefinition
"""
toml_table = tomlkit.table()
toml_table[LAYER_NAME_FIELD] = layer_build_definition.full_path
toml_table[CODE_URI_FIELD] = layer_build_definition.codeuri
toml_table[BUILD_METHOD_FIELD] = layer_build_definition.build_method
toml_table[COMPATIBLE_RUNTIMES_FIELD] = layer_build_definition.compatible_runtimes
toml_table[ARCHITECTURE_FIELD] = layer_build_definition.architecture
if layer_build_definition.source_hash:
toml_table[SOURCE_HASH_FIELD] = layer_build_definition.source_hash
toml_table[MANIFEST_HASH_FIELD] = layer_build_definition.manifest_hash
if layer_build_definition.env_vars:
toml_table[ENV_VARS_FIELD] = layer_build_definition.env_vars
toml_table[LAYER_FIELD] = layer_build_definition.layer.full_path
return toml_table
def _toml_table_to_layer_build_definition(uuid: str, toml_table: tomlkit.api.Table) -> "LayerBuildDefinition":
"""
Converts given toml table into LayerBuildDefinition instance
Parameters
----------
uuid: str
key of the toml_table instance
toml_table: tomlkit.api.Table
layer build definition as toml table
Returns
-------
LayerBuildDefinition
LayerBuildDefinition of given toml table
"""
layer_build_definition = LayerBuildDefinition(
toml_table.get(LAYER_NAME_FIELD),
toml_table.get(CODE_URI_FIELD),
toml_table.get(BUILD_METHOD_FIELD),
toml_table.get(COMPATIBLE_RUNTIMES_FIELD),
toml_table.get(ARCHITECTURE_FIELD, X86_64),
toml_table.get(SOURCE_HASH_FIELD, ""),
toml_table.get(MANIFEST_HASH_FIELD, ""),
dict(toml_table.get(ENV_VARS_FIELD, {})),
)
layer_build_definition.uuid = uuid
return layer_build_definition
class BuildHashingInformation(NamedTuple):
"""
Holds hashing information for the source folder and the manifest file
"""
source_hash: str
manifest_hash: str
class BuildGraph:
"""
Contains list of build definitions, with ability to read and write them into build.toml file
"""
# private lock for build.toml reads and writes
__toml_lock = threading.Lock()
# global table build definitions key
FUNCTION_BUILD_DEFINITIONS = "function_build_definitions"
LAYER_BUILD_DEFINITIONS = "layer_build_definitions"
def __init__(self, build_dir: str) -> None:
# put build.toml file inside .aws-sam folder
self._filepath = Path(build_dir).parent.joinpath(DEFAULT_BUILD_GRAPH_FILE_NAME)
self._function_build_definitions: List["FunctionBuildDefinition"] = []
self._layer_build_definitions: List["LayerBuildDefinition"] = []
self._atomic_read()
def get_function_build_definitions(self) -> Tuple["FunctionBuildDefinition", ...]:
return tuple(self._function_build_definitions)
def get_layer_build_definitions(self) -> Tuple["LayerBuildDefinition", ...]:
return tuple(self._layer_build_definitions)
def get_function_build_definition_with_full_path(
self, function_full_path: str
) -> Optional["FunctionBuildDefinition"]:
"""
Returns FunctionBuildDefinition instance of given function logical id.
Parameters
----------
function_full_path : str
Function full path that will be searched in the function build definitions
Returns
-------
Optional[FunctionBuildDefinition]
If a function build definition found returns it, otherwise returns None
"""
for function_build_definition in self._function_build_definitions:
for build_definition_function in function_build_definition.functions:
if build_definition_function.full_path == function_full_path:
return function_build_definition
return None
def put_function_build_definition(
self, function_build_definition: "FunctionBuildDefinition", function: Function
) -> None:
"""
Puts the newly read function build definition into existing build graph.
If graph already contains a function build definition which is same as the newly passed one, then it will add
the function to the existing one, discarding the new one
If graph doesn't contain such unique function build definition, it will be added to the current build graph
Parameters
----------
function_build_definition: FunctionBuildDefinition
function build definition which is newly read from template.yaml file
function: Function
function details for this function build definition
"""
if function_build_definition in self._function_build_definitions:
previous_build_definition = self._function_build_definitions[
self._function_build_definitions.index(function_build_definition)
]
LOG.debug(
"Same function build definition found, adding function (Previous: %s, Current: %s, Function: %s)",
previous_build_definition,
function_build_definition,
function,
)
previous_build_definition.add_function(function)
else:
LOG.debug(
"Unique function build definition found, adding as new (Function Build Definition: %s, Function: %s)",
function_build_definition,
function,
)
function_build_definition.add_function(function)
self._function_build_definitions.append(function_build_definition)
def put_layer_build_definition(self, layer_build_definition: "LayerBuildDefinition", layer: LayerVersion) -> None:
"""
Puts the newly read layer build definition into existing build graph.
If graph already contains a layer build definition which is same as the newly passed one, then it will add
the layer to the existing one, discarding the new one
If graph doesn't contain such unique layer build definition, it will be added to the current build graph
Parameters
----------
layer_build_definition: LayerBuildDefinition
layer build definition which is newly read from template.yaml file
layer: Layer
layer details for this layer build definition
"""
if layer_build_definition in self._layer_build_definitions:
previous_build_definition = self._layer_build_definitions[
self._layer_build_definitions.index(layer_build_definition)
]
LOG.debug(
"Same Layer build definition found, adding layer (Previous: %s, Current: %s, Layer: %s)",
previous_build_definition,
layer_build_definition,
layer,
)
previous_build_definition.layer = layer
else:
LOG.debug(
"Unique Layer build definition found, adding as new (Layer Build Definition: %s, Layer: %s)",
layer_build_definition,
layer,
)
layer_build_definition.layer = layer
self._layer_build_definitions.append(layer_build_definition)
def clean_redundant_definitions_and_update(self, persist: bool) -> None:
"""
Removes build definitions which doesn't have any function in it, which means these build definitions
are no longer used, and they can be deleted
If persist parameter is given True, build graph is written to .aws-sam/build.toml file
"""
self._function_build_definitions[:] = [
fbd for fbd in self._function_build_definitions if len(fbd.functions) > 0
]
self._layer_build_definitions[:] = [bd for bd in self._layer_build_definitions if bd.layer]
if persist:
self._atomic_write()
def update_definition_hash(self) -> None:
"""
Updates the build.toml file with the newest source_hash values of the partial build's definitions
This operation is atomic, that no other thread accesses build.toml
during the process of reading and modifying the hash value
"""
with BuildGraph.__toml_lock:
stored_function_definitions = copy.deepcopy(self._function_build_definitions)
stored_layer_definitions = copy.deepcopy(self._layer_build_definitions)
self._read()
function_content = BuildGraph._compare_hash_changes(
stored_function_definitions, self._function_build_definitions
)
layer_content = BuildGraph._compare_hash_changes(stored_layer_definitions, self._layer_build_definitions)
if function_content or layer_content:
self._write_source_hash(function_content, layer_content)
self._function_build_definitions = stored_function_definitions
self._layer_build_definitions = stored_layer_definitions
@staticmethod
def _compare_hash_changes(
input_list: Sequence["AbstractBuildDefinition"], compared_list: Sequence["AbstractBuildDefinition"]
) -> Dict[str, BuildHashingInformation]:
"""
Helper to compare the function and layer definition changes in hash value
Returns a dictionary that has uuid as key, updated hash value as value
"""
content = {}
for compared_def in compared_list:
for stored_def in input_list:
if stored_def == compared_def:
old_hash = compared_def.source_hash
updated_hash = stored_def.source_hash
old_manifest_hash = compared_def.manifest_hash
updated_manifest_hash = stored_def.manifest_hash
uuid = stored_def.uuid
if old_hash != updated_hash or old_manifest_hash != updated_manifest_hash:
content[uuid] = BuildHashingInformation(updated_hash, updated_manifest_hash)
compared_def.download_dependencies = old_manifest_hash != updated_manifest_hash
return content
def _write_source_hash(
self, function_content: Dict[str, BuildHashingInformation], layer_content: Dict[str, BuildHashingInformation]
) -> None:
"""
Helper to write source_hash values to build.toml file
"""
document = {}
if not self._filepath.exists():
open(self._filepath, "a+").close() # pylint: disable=consider-using-with
txt = self._filepath.read_text()
# .loads() returns a TOMLDocument,
# and it behaves like a standard dictionary according to https://github.com/sdispater/tomlkit.
# in tomlkit 0.7.2, the types are broken (tomlkit#128, #130, #134) so here we convert it to Dict.
document = cast(Dict, tomlkit.loads(txt))
for function_uuid, hashing_info in function_content.items():
if function_uuid in document.get(BuildGraph.FUNCTION_BUILD_DEFINITIONS, {}):
function_build_definition = document[BuildGraph.FUNCTION_BUILD_DEFINITIONS][function_uuid]
function_build_definition[SOURCE_HASH_FIELD] = hashing_info.source_hash
function_build_definition[MANIFEST_HASH_FIELD] = hashing_info.manifest_hash
LOG.info(
"Updated source_hash and manifest_hash field in build.toml for function with UUID %s", function_uuid
)
for layer_uuid, hashing_info in layer_content.items():
if layer_uuid in document.get(BuildGraph.LAYER_BUILD_DEFINITIONS, {}):
layer_build_definition = document[BuildGraph.LAYER_BUILD_DEFINITIONS][layer_uuid]
layer_build_definition[SOURCE_HASH_FIELD] = hashing_info.source_hash
layer_build_definition[MANIFEST_HASH_FIELD] = hashing_info.manifest_hash
LOG.info("Updated source_hash and manifest_hash field in build.toml for layer with UUID %s", layer_uuid)
self._filepath.write_text(tomlkit.dumps(document)) # type: ignore
def _read(self) -> None:
"""
Reads build.toml file into array of build definition
Each build definition will have empty function list, which will be populated from the current template.yaml file
"""
LOG.debug("Instantiating build definitions")
self._function_build_definitions = []
self._layer_build_definitions = []
document = {}
try:
txt = self._filepath.read_text()
# .loads() returns a TOMLDocument,
# and it behaves like a standard dictionary according to https://github.com/sdispater/tomlkit.
# in tomlkit 0.7.2, the types are broken (tomlkit#128, #130, #134) so here we convert it to Dict.
document = cast(Dict, tomlkit.loads(txt))
except OSError:
LOG.debug("No previous build graph found, generating new one")
function_build_definitions_table = document.get(BuildGraph.FUNCTION_BUILD_DEFINITIONS, {})
for function_build_definition_key in function_build_definitions_table:
function_build_definition = _toml_table_to_function_build_definition(
function_build_definition_key, function_build_definitions_table[function_build_definition_key]
)
self._function_build_definitions.append(function_build_definition)
layer_build_definitions_table = document.get(BuildGraph.LAYER_BUILD_DEFINITIONS, {})
for layer_build_definition_key in layer_build_definitions_table:
layer_build_definition = _toml_table_to_layer_build_definition(
layer_build_definition_key, layer_build_definitions_table[layer_build_definition_key]
)
self._layer_build_definitions.append(layer_build_definition)
def _atomic_read(self) -> None:
"""
Performs the _read() method with a global lock acquired
It makes sure no other thread accesses build.toml when a read is happening
"""
with BuildGraph.__toml_lock:
self._read()
def _write(self) -> None:
"""
Writes build definition details into build.toml file, which would be used by the next build.
build.toml file will contain the same information as build graph,
function details will only be preserved as function names
layer details will only be preserved as layer names
"""
# convert build definition list into toml table
function_build_definitions_table = tomlkit.table()
for function_build_definition in self._function_build_definitions:
build_definition_as_table = _function_build_definition_to_toml_table(function_build_definition)
function_build_definitions_table.add(function_build_definition.uuid, build_definition_as_table)
layer_build_definitions_table = tomlkit.table()
for layer_build_definition in self._layer_build_definitions:
build_definition_as_table = _layer_build_definition_to_toml_table(layer_build_definition)
layer_build_definitions_table.add(layer_build_definition.uuid, build_definition_as_table)
# create toml document and add build definitions
document = tomlkit.document()
document.add(tomlkit.comment("This file is auto generated by SAM CLI build command"))
# we need to cast `Table` to `Item` because of tomlkit#135.
document.add(BuildGraph.FUNCTION_BUILD_DEFINITIONS, cast(tomlkit.items.Item, function_build_definitions_table))
document.add(BuildGraph.LAYER_BUILD_DEFINITIONS, cast(tomlkit.items.Item, layer_build_definitions_table))
if not self._filepath.exists():
open(self._filepath, "a+").close() # pylint: disable=consider-using-with
self._filepath.write_text(tomlkit.dumps(document))
def _atomic_write(self) -> None:
"""
Performs the _write() method with a global lock acquired
It makes sure no other thread accesses build.toml when a write is happening
"""
with BuildGraph.__toml_lock:
self._write()
class AbstractBuildDefinition:
"""
Abstract class for build definition
Build definition holds information about each unique build
"""
def __init__(
self, source_hash: str, manifest_hash: str, env_vars: Optional[Dict] = None, architecture: str = X86_64
) -> None:
self.uuid = str(uuid4())
self.source_hash = source_hash
self.manifest_hash = manifest_hash
self._env_vars = env_vars if env_vars else {}
self.architecture = architecture
# following properties are used during build time and they don't serialize into build.toml file
self.download_dependencies: bool = True
@property
def dependencies_dir(self) -> str:
return str(os.path.join(DEFAULT_DEPENDENCIES_DIR, self.uuid))
@property
def env_vars(self) -> Dict:
return deepcopy(self._env_vars)
class LayerBuildDefinition(AbstractBuildDefinition):
"""
LayerBuildDefinition holds information about each unique layer build
"""
def __init__(
self,
full_path: str,
codeuri: Optional[str],
build_method: Optional[str],
compatible_runtimes: Optional[List[str]],
architecture: str,
source_hash: str = "",
manifest_hash: str = "",
env_vars: Optional[Dict] = None,
):
super().__init__(source_hash, manifest_hash, env_vars, architecture)
self.full_path = full_path
self.codeuri = codeuri
self.build_method = build_method
self.compatible_runtimes = compatible_runtimes
# Note(xinhol): In our code, we assume "layer" is never None. We should refactor
# this and move "layer" out of LayerBuildDefinition to take advantage of type check.
self.layer: LayerVersion = None # type: ignore
def __str__(self) -> str:
return (
f"LayerBuildDefinition({self.full_path}, {self.codeuri}, {self.source_hash}, {self.uuid}, "
f"{self.build_method}, {self.compatible_runtimes}, {self.architecture}, {self.env_vars})"
)
def __eq__(self, other: Any) -> bool:
"""
Checks equality of the layer build definition
Parameters
----------
other: Any
other layer build definition to compare
Returns
-------
bool
True if both layer build definitions has same following properties, False otherwise
"""
if not isinstance(other, LayerBuildDefinition):
return False
return (
self.full_path == other.full_path
and self.codeuri == other.codeuri
and self.build_method == other.build_method
and self.compatible_runtimes == other.compatible_runtimes
and self.env_vars == other.env_vars
and self.architecture == other.architecture
)
class FunctionBuildDefinition(AbstractBuildDefinition):
"""
LayerBuildDefinition holds information about each unique function build
"""
def __init__(
self,
runtime: Optional[str],
codeuri: Optional[str],
packagetype: str,
architecture: str,
metadata: Optional[Dict],
handler: Optional[str],
source_hash: str = "",
manifest_hash: str = "",
env_vars: Optional[Dict] = None,
) -> None:
super().__init__(source_hash, manifest_hash, env_vars, architecture)
self.runtime = runtime
self.codeuri = codeuri
self.packagetype = packagetype
self.handler = handler
# Skip SAM Added metadata properties
metadata_copied = deepcopy(metadata) if metadata else {}
metadata_copied.pop(SAM_RESOURCE_ID_KEY, "")
metadata_copied.pop(SAM_IS_NORMALIZED, "")
self.metadata = metadata_copied
self.functions: List[Function] = []
def add_function(self, function: Function) -> None:
self.functions.append(function)
def get_function_name(self) -> str:
self._validate_functions()
return self.functions[0].name
def get_handler_name(self) -> Optional[str]:
self._validate_functions()
return self.functions[0].handler
def get_full_path(self) -> str:
"""
Return the build identifier of the first function
"""
self._validate_functions()
return self.functions[0].full_path
def get_build_dir(self, artifact_root_dir: str) -> str:
"""
Return the directory path relative to root build directory
"""
self._validate_functions()
return self.functions[0].get_build_dir(artifact_root_dir)
def _validate_functions(self) -> None:
if not self.functions:
raise InvalidBuildGraphException("Build definition doesn't have any function definition to build")
def __str__(self) -> str:
return (
"BuildDefinition("
f"{self.runtime}, {self.codeuri}, {self.packagetype}, {self.source_hash}, "
f"{self.uuid}, {self.metadata}, {self.env_vars}, {self.architecture}, "
f"{[f.functionname for f in self.functions]})"
)
def __eq__(self, other: Any) -> bool:
"""
Checks equality of the function build definition
Parameters
----------
other: Any
other function build definition to compare
Returns
-------
bool
True if both function build definitions has same following properties, False otherwise
"""
if not isinstance(other, FunctionBuildDefinition):
return False
# each build with custom Makefile definition should be handled separately
if self.metadata and self.metadata.get("BuildMethod", None) == "makefile":
return False
if self.metadata and self.metadata.get("BuildMethod", None) == "esbuild":
# For esbuild, we need to check if handlers within the same CodeUri are the same
# if they are different, it should create a separate build definition
if self.handler != other.handler:
return False
return (
self.runtime == other.runtime
and self.codeuri == other.codeuri
and self.packagetype == other.packagetype
and self.metadata == other.metadata
and self.env_vars == other.env_vars
and self.architecture == other.architecture
)
|
py | 1a3b8e3fab897a8030ffafff3e32fd6c13fbcc43 | '''
userManager for Docklet
provide a class for managing users and usergroups in Docklet
Warning: in some early versions, "token" stand for the instance of class model.User
now it stands for a string that can be parsed to get that instance.
in all functions start with "@administration_required" or "@administration_or_self_required", "token" is the instance
Original author: Liu Peidong
'''
from model import db, User, UserGroup, Notification, UserUsage
from functools import wraps
import os, subprocess, math
import hashlib
import pam
from base64 import b64encode
import env
from settings import settings
import smtplib
from email.mime.text import MIMEText
from email.mime.multipart import MIMEMultipart
from email.header import Header
from datetime import datetime
import json
from log import logger
from lvmtool import *
PAM = pam.pam()
fspath = env.getenv('FS_PREFIX')
data_quota = env.getenv('DATA_QUOTA')
data_quota_cmd = env.getenv('DATA_QUOTA_CMD')
if (env.getenv('EXTERNAL_LOGIN').lower() == 'true'):
from plugin import external_receive
def administration_required(func):
@wraps(func)
def wrapper(*args, **kwargs):
if ( ('cur_user' in kwargs) == False):
return {"success":'false', "reason":"Cannot get cur_user"}
cur_user = kwargs['cur_user']
if ((cur_user.user_group == 'admin') or (cur_user.user_group == 'root')):
return func(*args, **kwargs)
else:
return {"success": 'false', "reason": 'Unauthorized Action'}
return wrapper
def administration_or_self_required(func):
@wraps(func)
def wrapper(*args, **kwargs):
if ( (not ('cur_user' in kwargs)) or (not ('user' in kwargs))):
return {"success":'false', "reason":"Cannot get cur_user or user"}
cur_user = kwargs['cur_user']
user = kwargs['user']
if ((cur_user.user_group == 'admin') or (cur_user.user_group == 'root') or (cur_user.username == user.username)):
return func(*args, **kwargs)
else:
return {"success": 'false', "reason": 'Unauthorized Action'}
return wrapper
def token_required(func):
@wraps(func)
def wrapper(*args, **kwargs):
if ( ('cur_user' in kwargs) == False):
return {"success":'false', "reason":"Cannot get cur_user"}
return func(*args, **kwargs)
return wrapper
def send_activated_email(to_address, username):
email_from_address = settings.get('EMAIL_FROM_ADDRESS')
if (email_from_address in ['\'\'', '\"\"', '']):
return
#text = 'Dear '+ username + ':\n' + ' Your account in docklet has been activated'
text = '<html><h4>Dear '+ username + ':</h4>'
text += '''<p> Your account in <a href='%s'>%s</a> has been activated</p>
<p> Enjoy your personal workspace in the cloud !</p>
<br>
<p> Note: DO NOT reply to this email!</p>
<br><br>
<p> <a href='http://docklet.unias.org'>Docklet Team</a>, SEI, PKU</p>
''' % (env.getenv("PORTAL_URL"), env.getenv("PORTAL_URL"))
text += '<p>'+ str(datetime.now()) + '</p>'
text += '</html>'
subject = 'Docklet account activated'
msg = MIMEMultipart()
textmsg = MIMEText(text,'html','utf-8')
msg['Subject'] = Header(subject, 'utf-8')
msg['From'] = email_from_address
msg['To'] = to_address
msg.attach(textmsg)
s = smtplib.SMTP()
s.connect()
s.sendmail(email_from_address, to_address, msg.as_string())
s.close()
def send_remind_activating_email(username):
#admin_email_address = env.getenv('ADMIN_EMAIL_ADDRESS')
nulladdr = ['\'\'', '\"\"', '']
email_from_address = settings.get('EMAIL_FROM_ADDRESS')
admin_email_address = settings.get('ADMIN_EMAIL_ADDRESS')
if (email_from_address in nulladdr or admin_email_address in nulladdr):
return
#text = 'Dear '+ username + ':\n' + ' Your account in docklet has been activated'
text = '<html><h4>Dear '+ 'admin' + ':</h4>'
text += '''<p> An activating request for %s in <a href='%s'>%s</a> has been sent</p>
<p> Please check it !</p>
<br/><br/>
<p> Docklet Team, SEI, PKU</p>
''' % (username, env.getenv("PORTAL_URL"), env.getenv("PORTAL_URL"))
text += '<p>'+ str(datetime.utcnow()) + '</p>'
text += '</html>'
subject = 'An activating request in Docklet has been sent'
if admin_email_address[0] == '"':
admins_addr = admin_email_address[1:-1].split(" ")
else:
admins_addr = admin_email_address.split(" ")
alladdr=""
for addr in admins_addr:
alladdr = alladdr+addr+", "
alladdr=alladdr[:-2]
msg = MIMEMultipart()
textmsg = MIMEText(text,'html','utf-8')
msg['Subject'] = Header(subject, 'utf-8')
msg['From'] = email_from_address
msg['To'] = alladdr
msg.attach(textmsg)
s = smtplib.SMTP()
s.connect()
try:
s.sendmail(email_from_address, admins_addr, msg.as_string())
except Exception as e:
logger.error(e)
s.close()
class userManager:
def __init__(self, username = 'root', password = None):
'''
Try to create the database when there is none
initialize 'root' user and 'root' & 'primary' group
'''
try:
User.query.all()
except:
db.create_all()
if password == None:
#set a random password
password = os.urandom(16)
password = b64encode(password).decode('utf-8')
fsdir = env.getenv('FS_PREFIX')
f = open(fsdir + '/local/generated_password.txt', 'w')
f.write("User=%s\nPass=%s\n"%(username, password))
f.close()
sys_admin = User(username, hashlib.sha512(password.encode('utf-8')).hexdigest())
sys_admin.status = 'normal'
sys_admin.nickname = 'root'
sys_admin.description = 'Root_User'
sys_admin.user_group = 'root'
sys_admin.auth_method = 'local'
db.session.add(sys_admin)
path = env.getenv('DOCKLET_LIB')
subprocess.call([path+"/userinit.sh", username])
db.session.commit()
if not os.path.exists(fspath+"/global/sys/quota"):
groupfile = open(fspath+"/global/sys/quota",'w')
groups = []
groups.append({'name':'root', 'quotas':{ 'cpu':'4', 'disk':'2000', 'data':'100', 'memory':'2000', 'image':'10', 'idletime':'24', 'vnode':'8', 'portmapping': '8', 'input_rate_limit':'10000', 'output_rate_limit':'10000'}})
groups.append({'name':'admin', 'quotas':{'cpu':'4', 'disk':'2000', 'data':'100', 'memory':'2000', 'image':'10', 'idletime':'24', 'vnode':'8', 'portmapping': '8', 'input_rate_limit':'10000', 'output_rate_limit':'10000'}})
groups.append({'name':'primary', 'quotas':{'cpu':'4', 'disk':'2000', 'data':'100', 'memory':'2000', 'image':'10', 'idletime':'24', 'vnode':'8', 'portmapping': '8', 'input_rate_limit':'10000', 'output_rate_limit':'10000'}})
groups.append({'name':'foundation', 'quotas':{'cpu':'4', 'disk':'2000', 'data':'100', 'memory':'2000', 'image':'10', 'idletime':'24', 'vnode':'8', 'portmapping': '8', 'input_rate_limit':'10000', 'output_rate_limit':'10000'}})
groupfile.write(json.dumps(groups))
groupfile.close()
if not os.path.exists(fspath+"/global/sys/quotainfo"):
quotafile = open(fspath+"/global/sys/quotainfo",'w')
quotas = {}
quotas['default'] = 'foundation'
quotas['quotainfo'] = []
quotas['quotainfo'].append({'name':'cpu', 'hint':'the cpu quota, number of cores, e.g. 4'})
quotas['quotainfo'].append({'name':'memory', 'hint':'the memory quota, number of MB , e.g. 4000'})
quotas['quotainfo'].append({'name':'disk', 'hint':'the disk quota, number of MB, e.g. 4000'})
quotas['quotainfo'].append({'name':'data', 'hint':'the quota of data space, number of GB, e.g. 100'})
quotas['quotainfo'].append({'name':'image', 'hint':'how many images the user can save, e.g. 10'})
quotas['quotainfo'].append({'name':'idletime', 'hint':'will stop cluster after idletime, number of hours, e.g. 24'})
quotas['quotainfo'].append({'name':'vnode', 'hint':'how many containers the user can have, e.g. 8'})
quotas['quotainfo'].append({'name':'portmapping', 'hint':'how many ports the user can map, e.g. 8'})
quotas['quotainfo'].append({'name':'input_rate_limit', 'hint':'the ingress speed of the network, number of kbps. 0 means the rate are unlimited.'})
quotas['quotainfo'].append({'name':'output_rate_limit', 'hint':'the egress speed of the network, number of kbps. 0 means the rate are unlimited.'})
quotafile.write(json.dumps(quotas))
quotafile.close()
if not os.path.exists(fspath+"/global/sys/lxc.default"):
settingfile = open(fspath+"/global/sys/lxc.default", 'w')
settings = {}
settings['cpu'] = "2"
settings["memory"] = "2000"
settings["disk"] = "2000"
settingfile.write(json.dumps(settings))
settingfile.close()
try:
UserUsage.query.all()
except:
db.create_all()
def auth_local(self, username, password):
password = hashlib.sha512(password.encode('utf-8')).hexdigest()
user = User.query.filter_by(username = username).first()
if (user == None):
return {"success":'false', "reason": "User did not exist"}
if (user.password != password):
return {"success":'false', "reason": "Wrong password"}
result = {
"success": 'true',
"data":{
"username" : user.username,
"avatar" : user.avatar,
"nickname" : user.nickname,
"description" : user.description,
"status" : user.status,
"group" : user.user_group,
"token" : user.generate_auth_token(),
}
}
return result
def auth_pam(self, username, password):
user = User.query.filter_by(username = username).first()
pamresult = PAM.authenticate(username, password)
if (pamresult == False or (user != None and user.auth_method != 'pam')):
return {"success":'false', "reason": "Wrong password or wrong login method"}
if (user == None):
newuser = self.newuser();
newuser.username = username
newuser.password = "no_password"
newuser.nickname = username
newuser.status = "init"
newuser.user_group = "primary"
newuser.auth_method = "pam"
self.register(user = newuser)
user = User.query.filter_by(username = username).first()
result = {
"success": 'true',
"data":{
"username" : user.username,
"avatar" : user.avatar,
"nickname" : user.nickname,
"description" : user.description,
"status" : user.status,
"group" : user.user_group,
"token" : user.generate_auth_token(),
}
}
return result
def auth_external(self, form):
if (env.getenv('EXTERNAL_LOGIN') != 'True'):
failed_result = {'success': 'false', 'reason' : 'external auth disabled'}
return failed_result
result = external_receive.external_auth_receive_request(form)
if (result['success'] != 'True'):
failed_result = {'success':'false', 'result': result}
return failed_result
username = result['username']
user = User.query.filter_by(username = username).first()
if (user != None and user.auth_method == result['auth_method']):
result = {
"success": 'true',
"data":{
"username" : user.username,
"avatar" : user.avatar,
"nickname" : user.nickname,
"description" : user.description,
"status" : user.status,
"group" : user.user_group,
"token" : user.generate_auth_token(),
}
}
return result
if (user != None and user.auth_method != result['auth_method']):
result = {'success': 'false', 'reason': 'other kinds of account already exists'}
return result
#user == None , register an account for external user
newuser = self.newuser();
newuser.username = result['username']
newuser.password = result['password']
newuser.avatar = result['avatar']
newuser.nickname = result['nickname']
newuser.description = result['description']
newuser.e_mail = result['e_mail']
newuser.truename = result['truename']
newuser.student_number = result['student_number']
newuser.status = result['status']
newuser.user_group = result['user_group']
newuser.auth_method = result['auth_method']
newuser.department = result['department']
newuser.tel = result['tel']
self.register(user = newuser)
user = User.query.filter_by(username = username).first()
result = {
"success": 'true',
"data":{
"username" : user.username,
"avatar" : user.avatar,
"nickname" : user.nickname,
"description" : user.description,
"status" : user.status,
"group" : user.user_group,
"token" : user.generate_auth_token(),
}
}
return result
def auth(self, username, password):
'''
authenticate a user by username & password
return a token as well as some user information
'''
user = User.query.filter_by(username = username).first()
if (user == None or user.auth_method =='pam'):
return self.auth_pam(username, password)
elif (user.auth_method == 'local'):
return self.auth_local(username, password)
else:
result = {'success':'false', 'reason':'auth_method error'}
return result
def auth_token(self, token):
'''
authenticate a user by a token
when succeeded, return the database iterator
otherwise return None
'''
user = User.verify_auth_token(token)
return user
def set_nfs_quota_bygroup(self,groupname, quota):
if not data_quota == "True":
return
users = User.query.filter_by(user_group = groupname).all()
for user in users:
self.set_nfs_quota(user.username, quota)
def set_nfs_quota(self, username, quota):
if not data_quota == "True":
return
nfspath = "/users/%s/data" % username
try:
cmd = data_quota_cmd % (nfspath,quota+"GB")
sys_run(cmd.strip('"'))
except Exception as e:
logger.error(e)
@administration_required
def query(*args, **kwargs):
'''
Usage: query(username = 'xxx', cur_user = token_from_auth)
|| query(ID = a_integer, cur_user = token_from_auth)
Provide information about one user that administrators need to use
'''
if ( 'ID' in kwargs):
user = User.query.filter_by(id = kwargs['ID']).first()
if (user == None):
return {"success":False, "reason":"User does not exist"}
result = {
"success":'true',
"data":{
"username" : user.username,
"password" : user.password,
"avatar" : user.avatar,
"nickname" : user.nickname,
"description" : user.description,
"status" : user.status,
"e_mail" : user.e_mail,
"student_number": user.student_number,
"department" : user.department,
"truename" : user.truename,
"tel" : user.tel,
"register_date" : "%s"%(user.register_date),
"group" : user.user_group,
"description" : user.description,
"beans" : user.beans,
},
"token": user
}
return result
if ( 'username' not in kwargs):
return {"success":'false', "reason":"Cannot get 'username'"}
username = kwargs['username']
user = User.query.filter_by(username = username).first()
if (user == None):
return {"success":'false', "reason":"User does not exist"}
result = {
"success": 'true',
"data":{
"username" : user.username,
"password" : user.password,
"avatar" : user.avatar,
"nickname" : user.nickname,
"description" : user.description,
"status" : user.status,
"e_mail" : user.e_mail,
"student_number": user.student_number,
"department" : user.department,
"truename" : user.truename,
"tel" : user.tel,
"register_date" : "%s"%(user.register_date),
"group" : user.user_group,
"beans" : user.beans,
},
"token": user
}
return result
@token_required
def selfQuery(*args, **kwargs):
'''
Usage: selfQuery(cur_user = token_from_auth)
List informantion for oneself
'''
user = kwargs['cur_user']
groupfile = open(fspath+"/global/sys/quota",'r')
groups = json.loads(groupfile.read())
groupfile.close()
group = None
for one_group in groups:
if one_group['name'] == user.user_group:
group = one_group['quotas']
break
else:
for one_group in groups:
if one_group['name'] == "primary":
group = one_group['quotas']
break
result = {
"success": 'true',
"data":{
"username" : user.username,
"id": user.id,
"password" : user.password,
"avatar" : user.avatar,
"nickname" : user.nickname,
"description" : user.description,
"status" : user.status,
"e_mail" : user.e_mail,
"student_number": user.student_number,
"department" : user.department,
"truename" : user.truename,
"tel" : user.tel,
"register_date" : "%s"%(user.register_date),
"group" : user.user_group,
"groupinfo": group,
"beans" : user.beans,
"auth_method": user.auth_method,
},
}
return result
@token_required
def selfModify(*args, **kwargs):
'''
Usage: selfModify(cur_user = token_from_auth, newValue = form)
Modify informantion for oneself
'''
form = kwargs['newValue']
name = form.get('name', None)
value = form.get('value', None)
if (name == None or value == None):
result = {'success': 'false'}
return result
user = User.query.filter_by(username = kwargs['cur_user'].username).first()
if (name == 'nickname'):
user.nickname = value
elif (name == 'description'):
user.description = value
elif (name == 'department'):
user.department = value
elif (name == 'e_mail'):
user.e_mail = value
elif (name == 'tel'):
user.tel = value
elif (name == 'password'):
old_password = hashlib.sha512(form.get('old_value', '').encode('utf-8')).hexdigest()
if (user.password != old_password):
result = {'success': 'false'}
return result
user.password = hashlib.sha512(value.encode('utf-8')).hexdigest()
else:
result = {'success': 'false'}
return result
db.session.commit()
result = {'success': 'true'}
return result
@token_required
def usageQuery(self, *args, **kwargs):
'''
Usage: usageQuery(cur_user = token_from_auth)
Query the quota and usage of user
'''
cur_user = kwargs['cur_user']
groupname = cur_user.user_group
groupinfo = self.groupQuery(name = groupname)['data']
usage = UserUsage.query.filter_by(username = cur_user.username).first()
if usage == None:
new_usage = UserUsage(cur_user.username)
db.session.add(new_usage)
db.session.commit()
usageinfo = {
'username': cur_user.username,
'cpu': '0',
'memory': '0',
'disk': '0'
}
else:
usageinfo = {
'username': usage.username,
'cpu': usage.cpu,
'memory': usage.memory,
'disk': usage.disk
}
settingfile = open(fspath+"/global/sys/lxc.default" , 'r')
defaultsetting = json.loads(settingfile.read())
settingfile.close()
return {'success': 'true', 'quota' : groupinfo, 'usage' : usageinfo, 'default': defaultsetting }
@token_required
def usageInc(self, *args, **kwargs):
'''
Usage: usageModify(cur_user = token_from_auth, modification = data_from_form)
Modify the usage info of user
'''
cur_user = kwargs['cur_user']
modification = kwargs['modification']
logger.info("record usage for user:%s" % cur_user.username)
groupname = cur_user.user_group
groupinfo = self.groupQuery(name = groupname)['data']
usage = UserUsage.query.filter_by(username = cur_user.username).first()
if usage == None:
new_usage = UserUsage(cur_user.username)
db.session.add(new_usage)
db.session.commit()
usage = UserUsage.query.filter_by(username = cur_user.username).first()
if int(modification['cpu']) <= 0 or int(modification['memory']) <= 0 or int(modification['disk']) <= 0:
return {'success':False, 'result':"cpu,memory and disk setting cannot less than zero"}
cpu = int(usage.cpu) + int(modification['cpu'])
memory = int(usage.memory) + int(modification['memory'])
disk = int(usage.disk) + int(modification['disk'])
if cpu > int(groupinfo['cpu']):
logger.error("cpu quota exceed, user:%s" % cur_user.username)
return {'success':False, 'result':"cpu quota exceed"}
if memory > int(groupinfo['memory']):
logger.error("memory quota exceed, user:%s" % cur_user.username)
return {'success':False, 'result':"memory quota exceed"}
if disk > int(groupinfo['disk']):
logger.error("disk quota exceed, user:%s" % cur_user.username)
return {'success':False, 'result':"disk quota exceed"}
usage.cpu = str(cpu)
usage.memory = str(memory)
usage.disk = str(disk)
db.session.commit()
return {'success':True, 'result':"distribute the resource"}
@token_required
def usageRecover(self, *args, **kwargs):
'''
Usage: usageModify(cur_user = token_from_auth, modification = data_from_form)
Recover the usage info when create container failed
'''
cur_user = kwargs['cur_user']
modification = kwargs['modification']
logger.info("recover usage for user:%s" % cur_user.username)
usage = UserUsage.query.filter_by(username = cur_user.username).first()
if usage == None:
new_usage = UserUsage(cur_user.username)
db.session.add(new_usage)
db.session.commit()
usage = UserUsage.query.filter_by(username = cur_user.username).first()
return True
cpu = int(usage.cpu) - int(modification['cpu'])
memory = int(usage.memory) - int(modification['memory'])
disk = int(usage.disk) - int(modification['disk'])
if cpu < 0:
cpu = 0
if memory < 0:
memory = 0
if disk < 0:
disk = 0
usage.cpu = str(cpu)
usage.memory = str(memory)
usage.disk = str(disk)
db.session.commit()
return {'success':True}
@token_required
def usageRelease(self, *args, **kwargs):
cur_user = kwargs['cur_user']
cpu = kwargs['cpu']
memory = kwargs['memory']
disk = kwargs['disk']
usage = UserUsage.query.filter_by(username = cur_user.username).first()
if usage == None:
new_usage = UserUsage(cur_user.username)
db.session.add(new_usage)
db.session.commit()
return {'success':True}
nowcpu = int(usage.cpu) - int(cpu)
nowmemory = int(usage.memory) - int(memory)
nowdisk = int(usage.disk) - int(disk)
if nowcpu < 0:
nowcpu = 0
if nowmemory < 0:
nowmemory = 0
if nowdisk < 0:
nowdisk = 0
usage.cpu = str(nowcpu)
usage.memory = str(nowmemory)
usage.disk = str(nowdisk)
db.session.commit()
return {'success':True}
def initUsage(*args, **kwargs):
"""
init the usage info when start docklet with init mode
"""
usages = UserUsage.query.all()
for usage in usages:
usage.cpu = "0"
usage.memory = "0"
usage.disk = "0"
db.session.commit()
return True
@administration_required
def userList(*args, **kwargs):
'''
Usage: list(cur_user = token_from_auth)
List all users for an administrator
'''
alluser = User.query.all()
result = {
"success": 'true',
"data":[]
}
for user in alluser:
userinfo = [
user.id,
user.username,
user.truename,
user.e_mail,
user.tel,
"%s"%(user.register_date),
user.status,
user.user_group,
user.beans,
'',
]
result["data"].append(userinfo)
return result
@administration_required
def groupList(*args, **kwargs):
'''
Usage: list(cur_user = token_from_auth)
List all groups for an administrator
'''
groupfile = open(fspath+"/global/sys/quota",'r')
groups = json.loads(groupfile.read())
groupfile.close()
quotafile = open(fspath+"/global/sys/quotainfo",'r')
quotas = json.loads(quotafile.read())
quotafile.close()
result = {
"success": 'true',
"groups": groups,
"quotas": quotas['quotainfo'],
"default": quotas['default'],
}
return result
@administration_required
def change_default_group(*args, **kwargs):
form = kwargs['form']
default_group = form.get('defaultgroup')
quotafile = open(fspath+"/global/sys/quotainfo",'r')
quotas = json.loads(quotafile.read())
quotafile.close()
quotas['default'] = default_group
quotafile = open(fspath+"/global/sys/quotainfo",'w')
quotafile.write(json.dumps(quotas))
quotafile.close()
return { 'success':'true', 'action':'change default group' }
def groupQuery(self, *args, **kwargs):
'''
Usage: groupQuery(name = XXX, cur_user = token_from_auth)
List a group for an administrator
'''
groupfile = open(fspath+"/global/sys/quota",'r')
groups = json.loads(groupfile.read())
groupfile.close()
for group in groups:
if group['name'] == kwargs['name']:
result = {
"success":'true',
"data": group['quotas'],
}
return result
else:
return {"success":False, "reason":"Group does not exist"}
@administration_required
def groupListName(*args, **kwargs):
'''
Usage: grouplist(cur_user = token_from_auth)
List all group names for an administrator
'''
groupfile = open(fspath+"/global/sys/quota",'r')
groups = json.loads(groupfile.read())
groupfile.close()
result = {
"groups": [],
}
for group in groups:
result["groups"].append(group['name'])
return result
@administration_required
def groupModify(self, *args, **kwargs):
'''
Usage: groupModify(newValue = dict_from_form, cur_user = token_from_auth)
'''
groupfile = open(fspath+"/global/sys/quota",'r')
groups = json.loads(groupfile.read())
groupfile.close()
for group in groups:
if group['name'] == kwargs['newValue'].get('groupname',None):
form = kwargs['newValue']
for key in form.keys():
if key == "data":
if not group['quotas'][key] == form.get(key):
self.set_nfs_quota_bygroup(group['name'],form.get(key))
else:
pass
if key == "groupname" or key == "token":
pass
else:
if key == "vnode":
vnode = int(form['vnode'])
val = str(2**(round(math.log(vnode+3, 2))) - 3 )
group["quotas"][key] = val
else:
group['quotas'][key] = form.get(key)
groupfile = open(fspath+"/global/sys/quota",'w')
groupfile.write(json.dumps(groups))
groupfile.close()
return {"success":'true'}
else:
return {"success":'false', "reason":"UserGroup does not exist"}
@administration_required
def modify(self, *args, **kwargs):
'''
modify a user's information in database
will send an e-mail when status is changed from 'applying' to 'normal'
Usage: modify(newValue = dict_from_form, cur_user = token_from_auth)
'''
if ( kwargs['newValue'].get('Instruction', '') == 'Activate'):
user_modify = User.query.filter_by(id = kwargs['newValue'].get('ID', None)).first()
user_modify.status = 'normal'
send_activated_email(user_modify.e_mail, user_modify.username)
db.session.commit()
return {"success": "true"}
if ( kwargs['newValue'].get('password', '') != ''):
user_modify = User.query.filter_by(username = kwargs['newValue'].get('username', None)).first()
new_password = kwargs['newValue'].get('password','')
new_password = hashlib.sha512(new_password.encode('utf-8')).hexdigest()
user_modify.password = new_password
db.session.commit()
return {"success": "true"}
user_modify = User.query.filter_by(username = kwargs['newValue'].get('username', None)).first()
if (user_modify == None):
return {"success":'false', "reason":"User does not exist"}
#try:
form = kwargs['newValue']
user_modify.truename = form.get('truename', '')
user_modify.e_mail = form.get('e_mail', '')
user_modify.department = form.get('department', '')
user_modify.student_number = form.get('student_number', '')
user_modify.tel = form.get('tel', '')
user_modify.user_group = form.get('group', '')
user_modify.auth_method = form.get('auth_method', '')
if (user_modify.status == 'applying' and form.get('status', '') == 'normal'):
send_activated_email(user_modify.e_mail, user_modify.username)
user_modify.status = form.get('status', '')
#if (form.get('password', '') != ''):
#new_password = form.get('password','')
#new_password = hashlib.sha512(new_password.encode('utf-8')).hexdigest()
#user_modify.password = new_password
#self.chpassword(cur_user = user_modify, password = form.get('password','no_password'))
#modify password in another function now
db.session.commit()
res = self.groupQuery(name=user_modify.user_group)
if res['success']:
self.set_nfs_quota(user_modify.username,res['data']['data'])
return {"success":'true'}
#except:
#return {"success":'false', "reason":"Something happened"}
@token_required
def chpassword(*args, **kwargs):
'''
Usage: chpassword(cur_user = token_from_auth, password = 'your_password')
'''
cur_user = kwargs['cur_user']
cur_user.password = hashlib.sha512(kwargs['password'].encode('utf-8')).hexdigest()
def newuser(*args, **kwargs):
'''
Usage : newuser()
The only method to create a new user
call this method first, modify the return value which is a database row instance,then call self.register()
'''
user_new = User('newuser', 'asdf1234')
quotafile = open(fspath+"/global/sys/quotainfo",'r')
quotas = json.loads(quotafile.read())
quotafile.close()
user_new.user_group = quotas['default']
user_new.avatar = 'default.png'
return user_new
def register(self, *args, **kwargs):
'''
Usage: register(user = modified_from_newuser())
'''
if (kwargs['user'].username == None or kwargs['user'].username == ''):
return {"success":'false', "reason": "Empty username"}
user_check = User.query.filter_by(username = kwargs['user'].username).first()
if (user_check != None and user_check.status != "init"):
#for the activating form
return {"success":'false', "reason": "Unauthorized action"}
newuser = kwargs['user']
if (user_check != None and (user_check.status == "init")):
db.session.delete(user_check)
db.session.commit()
else:
newuser.password = hashlib.sha512(newuser.password.encode('utf-8')).hexdigest()
db.session.add(newuser)
db.session.commit()
# if newuser status is normal, init some data for this user
# now initialize for all kind of users
#if newuser.status == 'normal':
path = env.getenv('DOCKLET_LIB')
subprocess.call([path+"/userinit.sh", newuser.username])
res = self.groupQuery(name=newuser.user_group)
if res['success']:
self.set_nfs_quota(newuser.username,res['data']['data'])
return {"success":'true'}
@administration_required
def quotaadd(*args, **kwargs):
form = kwargs.get('form')
quotaname = form.get("quotaname")
default_value = form.get("default_value")
hint = form.get("hint")
if (quotaname == None):
return { "success":'false', "reason": "Empty quota name"}
if (default_value == None):
default_value = "--"
groupfile = open(fspath+"/global/sys/quota",'r')
groups = json.loads(groupfile.read())
groupfile.close()
for group in groups:
group['quotas'][quotaname] = default_value
groupfile = open(fspath+"/global/sys/quota",'w')
groupfile.write(json.dumps(groups))
groupfile.close()
quotafile = open(fspath+"/global/sys/quotainfo",'r')
quotas = json.loads(quotafile.read())
quotafile.close()
quotas['quotainfo'].append({'name':quotaname, 'hint':hint})
quotafile = open(fspath+"/global/sys/quotainfo",'w')
quotafile.write(json.dumps(quotas))
quotafile.close()
return {"success":'true'}
@administration_required
def groupadd(*args, **kwargs):
form = kwargs.get('form')
groupname = form.get("groupname")
if (groupname == None):
return {"success":'false', "reason": "Empty group name"}
groupfile = open(fspath+"/global/sys/quota",'r')
groups = json.loads(groupfile.read())
groupfile.close()
group = {
'name': groupname,
'quotas': {}
}
for key in form.keys():
if key == "groupname" or key == "token":
pass
else:
if key == "vnode":
vnode = int(form['vnode'])
val = str(2**(round(math.log(vnode+3, 2))) - 3 )
group['quotas'][key] = val
else:
group['quotas'][key] = form.get(key)
groups.append(group)
groupfile = open(fspath+"/global/sys/quota",'w')
groupfile.write(json.dumps(groups))
groupfile.close()
return {"success":'true'}
@administration_required
def groupdel(*args, **kwargs):
name = kwargs.get('name', None)
if (name == None):
return {"success":'false', "reason": "Empty group name"}
groupfile = open(fspath+"/global/sys/quota",'r')
groups = json.loads(groupfile.read())
groupfile.close()
for group in groups:
if group['name'] == name:
groups.remove(group)
break
groupfile = open(fspath+"/global/sys/quota",'w')
groupfile.write(json.dumps(groups))
groupfile.close()
return {"success":'true'}
@administration_required
def lxcsettingList(*args, **kwargs):
lxcsettingfile = open(fspath+"/global/sys/lxc.default", 'r')
lxcsetting = json.loads(lxcsettingfile.read())
lxcsettingfile.close()
return {"success": 'true', 'data':lxcsetting}
@administration_required
def chlxcsetting(*args, **kwargs):
form = kwargs['form']
lxcsetting = {}
lxcsetting['cpu'] = form['lxcCpu']
lxcsetting['memory'] = form['lxcMemory']
lxcsetting['disk'] = form['lxcDisk']
lxcsettingfile = open(fspath+"/global/sys/lxc.default", 'w')
lxcsettingfile.write(json.dumps(lxcsetting))
lxcsettingfile.close()
return {"success": 'true'}
@administration_required
def cloud_account_query(*args, **kwargs):
accountfile = open(fspath+"/global/sys/cloudaccount", 'r')
account = json.loads(accountfile.read())
accountfile.close()
return {"success": 'true', 'accounts':account}
@administration_required
def cloud_account_add(*args, **kwargs):
form = kwargs.get('form')
accountfile = open(fspath+"/global/sys/cloudaccount", 'r')
account = json.loads(accountfile.read())
accountfile.close()
account.append(
{ 'cloudname' : form['cloudname'],
'username' : form['username'],
'password' : form['password'],
})
accountfile = open(fspath+"/global/sys/cloudaccount", 'w')
accountfile.write(json.dumps(account))
accountfile.close()
return {"success": 'true'}
@administration_required
def cloud_account_del(*args, **kwargs):
form = kwargs.get('form')
cloudname = form['cloudname']
accountfile = open(fspath+"/global/sys/cloudaccount", 'r')
account = json.loads(accountfile.read())
accountfile.close()
for acc in account:
if acc['cloudname'] == cloudname:
account.remove(acc)
break
accountfile = open(fspath+"/global/sys/cloudaccount", 'w')
accountfile.write(json.dumps(account))
accountfile.close()
return {"success": 'true'}
@administration_required
def cloud_account_modify(*args, **kwargs):
form = kwargs.get('form')
cloudname = form['cloudname']
accountfile = open(fspath+"/global/sys/cloudaccount", 'r')
account = json.loads(accountfile.read())
accountfile.close()
for acc in account:
if acc['cloudname'] == cloudname:
acc['username'] = form['username']
acc['password'] = form['password']
break
accountfile = open(fspath+"/global/sys/cloudaccount", 'w')
accountfile.write(json.dumps(account))
accountfile.close()
return {"success": "true"}
def queryForDisplay(*args, **kwargs):
'''
Usage: queryForDisplay(user = token_from_auth)
Provide information about one user that administrators need to use
'''
if ( 'user' not in kwargs):
return {"success":'false', "reason":"Cannot get 'user'"}
user = kwargs['user']
if (user == None):
return {"success":'false', "reason":"User does not exist"}
result = {
"success": 'true',
"data":{
"username" : user.username,
"password" : user.password,
"avatar" : user.avatar,
"nickname" : user.nickname,
"description" : user.description,
"status" : user.status,
"e_mail" : user.e_mail,
"student_number": user.student_number,
"department" : user.department,
"truename" : user.truename,
"tel" : user.tel,
"register_date" : "%s"%(user.register_date),
"group" : user.user_group,
"auth_method": user.auth_method,
}
}
return result
# def usermodify(rowID, columnID, newValue, cur_user):
# '''not used now'''
# user = um.query(ID = request.form["rowID"], cur_user = root).get('token', None)
# result = um.modify(user = user, columnID = request.form["columnID"], newValue = request.form["newValue"], cur_user = root)
# return json.dumps(result)
|
py | 1a3b8fe4acd4a5eeb0ca7c631815b25f1037384a | #!python3
"""
Python 3 wrapper for identifying objects in images
Requires DLL compilation
Both the GPU and no-GPU version should be compiled; the no-GPU version should be renamed "yolo_cpp_dll_nogpu.dll".
On a GPU system, you can force CPU evaluation by any of:
- Set global variable DARKNET_FORCE_CPU to True
- Set environment variable CUDA_VISIBLE_DEVICES to -1
- Set environment variable "FORCE_CPU" to "true"
- Set environment variable "DARKNET_PATH" to path darknet lib .so (for Linux)
Directly viewing or returning bounding-boxed images requires scikit-image to be installed (`pip install scikit-image`)
Original *nix 2.7: https://github.com/pjreddie/darknet/blob/0f110834f4e18b30d5f101bf8f1724c34b7b83db/python/darknet.py
Windows Python 2.7 version: https://github.com/AlexeyAB/darknet/blob/fc496d52bf22a0bb257300d3c79be9cd80e722cb/build/darknet/x64/darknet.py
@author: Philip Kahn
@date: 20180503
"""
from ctypes import *
import math
import random
import os
class BOX(Structure):
_fields_ = [("x", c_float),
("y", c_float),
("w", c_float),
("h", c_float)]
class DETECTION(Structure):
_fields_ = [("bbox", BOX),
("classes", c_int),
("prob", POINTER(c_float)),
("mask", POINTER(c_float)),
("objectness", c_float),
("sort_class", c_int),
("uc", POINTER(c_float)),
("points", c_int),
("embeddings", POINTER(c_float)),
("embedding_size", c_int),
("sim", c_float),
("track_id", c_int)]
class DETNUMPAIR(Structure):
_fields_ = [("num", c_int),
("dets", POINTER(DETECTION))]
class IMAGE(Structure):
_fields_ = [("w", c_int),
("h", c_int),
("c", c_int),
("data", POINTER(c_float))]
class METADATA(Structure):
_fields_ = [("classes", c_int),
("names", POINTER(c_char_p))]
def network_width(net):
return lib.network_width(net)
def network_height(net):
return lib.network_height(net)
def bbox2points(bbox):
"""
From bounding box yolo format
to corner points cv2 rectangle
"""
x, y, w, h = bbox
xmin = int(round(x - (w / 2)))
xmax = int(round(x + (w / 2)))
ymin = int(round(y - (h / 2)))
ymax = int(round(y + (h / 2)))
return xmin, ymin, xmax, ymax
def class_colors(names):
"""
Create a dict with one random BGR color for each
class name
"""
return {name: (
random.randint(0, 255),
random.randint(0, 255),
random.randint(0, 255)) for name in names}
def load_network(config_file, data_file, weights, batch_size=1):
"""
load model description and weights from config files
args:
config_file (str): path to .cfg model file
data_file (str): path to .data model file
weights (str): path to weights
returns:
network: trained model
class_names
class_colors
"""
network = load_net_custom(
config_file.encode("ascii"),
weights.encode("ascii"), 0, batch_size)
metadata = load_meta(data_file.encode("ascii"))
class_names = [metadata.names[i].decode("ascii") for i in range(metadata.classes)]
colors = class_colors(class_names)
return network, class_names, colors
def print_detections(detections):
print("\nObjects:")
for label, confidence, bbox in detections:
if label == "person":
x, y, w, h = bbox
print("{}: {}%".format(label, confidence))
def draw_boxes(detections, image, colors):
import cv2
for label, confidence, bbox in detections:
if label == "person":
left, top, right, bottom = bbox2points(bbox)
cv2.rectangle(image, (left, top), (right, bottom), colors[label], 1)
cv2.putText(image, "{} [{:.2f}]".format(label, float(confidence)),
(left, top - 5), cv2.FONT_HERSHEY_SIMPLEX, 0.5,
colors[label], 2)
return image
def decode_detection(detections):
decoded = []
for label, confidence, bbox in detections:
confidence = str(round(confidence * 100, 2))
decoded.append((str(label), confidence, bbox))
return decoded
def remove_negatives(detections, class_names, num):
"""
Remove all classes with 0% confidence within the detection
"""
predictions = []
for j in range(num):
for idx, name in enumerate(class_names):
if detections[j].prob[idx] > 0:
bbox = detections[j].bbox
bbox = (bbox.x, bbox.y, bbox.w, bbox.h)
predictions.append((name, detections[j].prob[idx], (bbox)))
return predictions
def detect_image(network, class_names, image, thresh=.5, hier_thresh=.5, nms=.45):
"""
Returns a list with highest confidence class and their bbox
"""
pnum = pointer(c_int(0))
predict_image(network, image)
detections = get_network_boxes(network, image.w, image.h,
thresh, hier_thresh, None, 0, pnum, 0)
num = pnum[0]
if nms:
do_nms_sort(detections, num, len(class_names), nms)
predictions = remove_negatives(detections, class_names, num)
predictions = decode_detection(predictions)
free_detections(detections, num)
return sorted(predictions, key=lambda x: x[1])
# lib = CDLL("/home/pjreddie/documents/darknet/libdarknet.so", RTLD_GLOBAL)
# lib = CDLL("libdarknet.so", RTLD_GLOBAL)
hasGPU = True
if os.name == "nt":
cwd = os.path.dirname(__file__)
os.environ['PATH'] = cwd + ';' + os.environ['PATH']
winGPUdll = os.path.join(cwd, "yolo_cpp_dll.dll")
winNoGPUdll = os.path.join(cwd, "yolo_cpp_dll_nogpu.dll")
envKeys = list()
for k, v in os.environ.items():
envKeys.append(k)
try:
try:
tmp = os.environ["FORCE_CPU"].lower()
if tmp in ["1", "true", "yes", "on"]:
raise ValueError("ForceCPU")
else:
print("Flag value {} not forcing CPU mode".format(tmp))
except KeyError:
# We never set the flag
if 'CUDA_VISIBLE_DEVICES' in envKeys:
if int(os.environ['CUDA_VISIBLE_DEVICES']) < 0:
raise ValueError("ForceCPU")
try:
global DARKNET_FORCE_CPU
if DARKNET_FORCE_CPU:
raise ValueError("ForceCPU")
except NameError as cpu_error:
print(cpu_error)
if not os.path.exists(winGPUdll):
raise ValueError("NoDLL")
lib = CDLL(winGPUdll, RTLD_GLOBAL)
except (KeyError, ValueError):
hasGPU = False
if os.path.exists(winNoGPUdll):
lib = CDLL(winNoGPUdll, RTLD_GLOBAL)
print("Notice: CPU-only mode")
else:
# Try the other way, in case no_gpu was compile but not renamed
lib = CDLL(winGPUdll, RTLD_GLOBAL)
print("Environment variables indicated a CPU run, but we didn't find {}. Trying a GPU run anyway.".format(winNoGPUdll))
else:
lib = CDLL(os.path.join(
os.environ.get('DARKNET_PATH', './'),
"libdarknet.so"), RTLD_GLOBAL)
lib.network_width.argtypes = [c_void_p]
lib.network_width.restype = c_int
lib.network_height.argtypes = [c_void_p]
lib.network_height.restype = c_int
copy_image_from_bytes = lib.copy_image_from_bytes
copy_image_from_bytes.argtypes = [IMAGE,c_char_p]
predict = lib.network_predict_ptr
predict.argtypes = [c_void_p, POINTER(c_float)]
predict.restype = POINTER(c_float)
if hasGPU:
set_gpu = lib.cuda_set_device
set_gpu.argtypes = [c_int]
init_cpu = lib.init_cpu
make_image = lib.make_image
make_image.argtypes = [c_int, c_int, c_int]
make_image.restype = IMAGE
get_network_boxes = lib.get_network_boxes
get_network_boxes.argtypes = [c_void_p, c_int, c_int, c_float, c_float, POINTER(c_int), c_int, POINTER(c_int), c_int]
get_network_boxes.restype = POINTER(DETECTION)
make_network_boxes = lib.make_network_boxes
make_network_boxes.argtypes = [c_void_p]
make_network_boxes.restype = POINTER(DETECTION)
free_detections = lib.free_detections
free_detections.argtypes = [POINTER(DETECTION), c_int]
free_batch_detections = lib.free_batch_detections
free_batch_detections.argtypes = [POINTER(DETNUMPAIR), c_int]
free_ptrs = lib.free_ptrs
free_ptrs.argtypes = [POINTER(c_void_p), c_int]
network_predict = lib.network_predict_ptr
network_predict.argtypes = [c_void_p, POINTER(c_float)]
reset_rnn = lib.reset_rnn
reset_rnn.argtypes = [c_void_p]
load_net = lib.load_network
load_net.argtypes = [c_char_p, c_char_p, c_int]
load_net.restype = c_void_p
load_net_custom = lib.load_network_custom
load_net_custom.argtypes = [c_char_p, c_char_p, c_int, c_int]
load_net_custom.restype = c_void_p
free_network_ptr = lib.free_network_ptr
free_network_ptr.argtypes = [c_void_p]
free_network_ptr.restype = c_void_p
do_nms_obj = lib.do_nms_obj
do_nms_obj.argtypes = [POINTER(DETECTION), c_int, c_int, c_float]
do_nms_sort = lib.do_nms_sort
do_nms_sort.argtypes = [POINTER(DETECTION), c_int, c_int, c_float]
free_image = lib.free_image
free_image.argtypes = [IMAGE]
letterbox_image = lib.letterbox_image
letterbox_image.argtypes = [IMAGE, c_int, c_int]
letterbox_image.restype = IMAGE
load_meta = lib.get_metadata
lib.get_metadata.argtypes = [c_char_p]
lib.get_metadata.restype = METADATA
load_image = lib.load_image_color
load_image.argtypes = [c_char_p, c_int, c_int]
load_image.restype = IMAGE
rgbgr_image = lib.rgbgr_image
rgbgr_image.argtypes = [IMAGE]
predict_image = lib.network_predict_image
predict_image.argtypes = [c_void_p, IMAGE]
predict_image.restype = POINTER(c_float)
predict_image_letterbox = lib.network_predict_image_letterbox
predict_image_letterbox.argtypes = [c_void_p, IMAGE]
predict_image_letterbox.restype = POINTER(c_float)
network_predict_batch = lib.network_predict_batch
network_predict_batch.argtypes = [c_void_p, IMAGE, c_int, c_int, c_int,
c_float, c_float, POINTER(c_int), c_int, c_int]
network_predict_batch.restype = POINTER(DETNUMPAIR)
|
py | 1a3b8fe8c58c7278fa1c0480ecc290ab10e6de00 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright 2014-2021 The PySCF Developers. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Authors: Qiming Sun <[email protected]>
# Susi Lehtola <[email protected]>
'''
XC functional, the interface to libxc
(http://www.tddft.org/programs/octopus/wiki/index.php/Libxc)
'''
import sys
import warnings
import copy
import ctypes
import math
import numpy
from pyscf import lib
from pyscf.dft.xc.utils import remove_dup, format_xc_code
from pyscf import __config__
_itrf = lib.load_library('libxc_itrf')
_itrf.LIBXC_is_lda.restype = ctypes.c_int
_itrf.LIBXC_is_gga.restype = ctypes.c_int
_itrf.LIBXC_is_meta_gga.restype = ctypes.c_int
_itrf.LIBXC_needs_laplacian.restype = ctypes.c_int
_itrf.LIBXC_needs_laplacian.argtypes = [ctypes.c_int]
_itrf.LIBXC_is_hybrid.restype = ctypes.c_int
_itrf.LIBXC_is_cam_rsh.restype = ctypes.c_int
_itrf.LIBXC_max_deriv_order.restype = ctypes.c_int
_itrf.LIBXC_number_of_functionals.restype = ctypes.c_int
_itrf.LIBXC_functional_numbers.argtypes = (numpy.ctypeslib.ndpointer(dtype=numpy.intc, ndim=1, flags=("W", "C", "A")), )
_itrf.LIBXC_functional_name.argtypes = [ctypes.c_int]
_itrf.LIBXC_functional_name.restype = ctypes.c_char_p
_itrf.LIBXC_hybrid_coeff.argtypes = [ctypes.c_int]
_itrf.LIBXC_hybrid_coeff.restype = ctypes.c_double
_itrf.LIBXC_nlc_coeff.argtypes = [ctypes.c_int,ctypes.POINTER(ctypes.c_double)]
_itrf.LIBXC_rsh_coeff.argtypes = [ctypes.c_int,ctypes.POINTER(ctypes.c_double)]
_itrf.LIBXC_version.restype = ctypes.c_char_p
_itrf.LIBXC_reference.restype = ctypes.c_char_p
_itrf.LIBXC_reference_doi.restype = ctypes.c_char_p
_itrf.LIBXC_xc_reference.argtypes = [ctypes.c_int, (ctypes.c_char_p * 8)]
def libxc_version():
'''Returns the version of libxc'''
return _itrf.LIBXC_version().decode("UTF-8")
def libxc_reference():
'''Returns the reference to libxc'''
return _itrf.LIBXC_reference().decode("UTF-8")
def libxc_reference_doi():
'''Returns the reference to libxc'''
return _itrf.LIBXC_reference_doi().decode("UTF-8")
__version__ = libxc_version()
__reference__ = libxc_reference()
__reference_doi__ = libxc_reference_doi()
# Runtime detection of available functionals
dynamic_func = getattr(__config__, 'dft_libxc_dynamic', False)
if dynamic_func:
def available_libxc_functionals():
# Number of functionals is
nfunc = _itrf.LIBXC_number_of_functionals()
# Get functional numbers
numbers = numpy.zeros(nfunc, dtype=numpy.intc)
_itrf.LIBXC_functional_numbers(numbers)
# Returned array
return {_itrf.LIBXC_functional_name(x).decode("UTF-8").upper() : x for x in numbers}
XC = XC_CODES = available_libxc_functionals()
PROBLEMATIC_XC = dict([])
else:
# XC dict is generated by
#import pylibxc
#for xcname in pylibxc.util.xc_available_functional_names():
# f = pylibxc.LibXCFunctional(xcname, 1)
# f_id = f.get_number()
# ref = f.get_references()
# key = f"'{xcname.upper()}'"
# print(f"{key:<31s}: {f_id:<3d}, # {ref[0]}")
XC = XC_CODES = {
'LDA_C_1D_CSC' : 18 , # M. Casula, S. Sorella, and G. Senatore, Phys. Rev. B 74, 245427 (2006)
'LDA_C_1D_LOOS' : 26 , # P.-F. Loos, J. Chem. Phys. 138, 064108 (2013)
'LDA_C_2D_AMGB' : 15 , # C. Attaccalite, S. Moroni, P. Gori-Giorgi, and G. B. Bachelet, Phys. Rev. Lett. 88, 256601 (2002)
'LDA_C_2D_PRM' : 16 , # S. Pittalis, E. Rasanen, and M. A. L. Marques, Phys. Rev. B 78, 195322 (2008)
'LDA_C_BR78' : 552, # G. B. Jr. and S. M. Rothstein, J. Chem. Phys. 69, 1177 (1978)
'LDA_C_CHACHIYO' : 287, # T. Chachiyo, J. Chem. Phys. 145, 021101 (2016)
'LDA_C_CHACHIYO_MOD' : 307, # T. Chachiyo and H. Chachiyo, Comput. Theor. Chem. 1172, 112669 (2020)
'LDA_C_GK72' : 578, # R. G. Gordon and Y. S. Kim, J. Chem. Phys. 56, 3122 (1972)
'LDA_C_GL' : 5 , # O. Gunnarsson and B. I. Lundqvist, Phys. Rev. B 13, 4274 (1976)
'LDA_C_GOMBAS' : 24 , # P. Gombas, Fortschr. Phys. 13, 137 (1965)
'LDA_C_HL' : 4 , # L. Hedin and B. I. Lundqvist, J. Phys. C: Solid State Phys. 4, 2064 (1971)
'LDA_C_KARASIEV' : 579, # V. V. Karasiev, J. Chem. Phys. 145, 157101 (2016)
'LDA_C_KARASIEV_MOD' : 308, # T. Chachiyo and H. Chachiyo, Comput. Theor. Chem. 1172, 112669 (2020)
'LDA_C_LP96' : 289, # S. Liu and R. G. Parr, Phys. Rev. A 53, 2211 (1996)
'LDA_C_MCWEENY' : 551, # R. McWeeny, in The New World of Quantum Chemistry, edited by B. Pullman and R. Parr (Reidel, Boston, 1976) pp. 3--31
'LDA_C_ML1' : 22 , # E. I. Proynov and D. R. Salahub, Phys. Rev. B 49, 7874 (1994)
'LDA_C_ML2' : 23 , # E. I. Proynov and D. R. Salahub, Phys. Rev. B 49, 7874 (1994)
'LDA_C_OB_PW' : 14 , # G. Ortiz and P. Ballone, Phys. Rev. B 50, 1391 (1994)
'LDA_C_OB_PZ' : 11 , # G. Ortiz and P. Ballone, Phys. Rev. B 50, 1391 (1994)
'LDA_C_OW' : 574, # P. A. Stewart and P. M. W. Gill, J. Chem. Soc., Faraday Trans. 91, 4337 (1995)
'LDA_C_OW_LYP' : 573, # P. A. Stewart and P. M. W. Gill, J. Chem. Soc., Faraday Trans. 91, 4337 (1995)
'LDA_C_PK09' : 554, # E. Proynov and J. Kong, Phys. Rev. A 79, 014103 (2009)
'LDA_C_PMGB06' : 590, # S. Paziani, S. Moroni, P. Gori-Giorgi, and G. B. Bachelet, Phys. Rev. B 73, 155111 (2006)
'LDA_C_PW' : 12 , # J. P. Perdew and Y. Wang, Phys. Rev. B 45, 13244 (1992)
'LDA_C_PW_MOD' : 13 , # J. P. Perdew and Y. Wang, Phys. Rev. B 45, 13244 (1992), added extra digits to some constants as in the PBE routine (http://dft.rutgers.edu/pubs/PBE.asc)
'LDA_C_PW_RPA' : 25 , # J. P. Perdew and Y. Wang, Phys. Rev. B 45, 13244 (1992)
'LDA_C_PZ' : 9 , # J. P. Perdew and A. Zunger, Phys. Rev. B 23, 5048 (1981)
'LDA_C_PZ_MOD' : 10 , # J. P. Perdew and A. Zunger, Phys. Rev. B 23, 5048 (1981), modified to improve the matching between the low- and high-rs parts
'LDA_C_RC04' : 27 , # S. Ragot and P. Cortona, J. Chem. Phys. 121, 7671 (2004)
'LDA_C_RPA' : 3 , # M. Gell-Mann and K. A. Brueckner, Phys. Rev. 106, 364 (1957)
'LDA_C_RPW92' : 684, # M. Ruggeri, P. L. Rios, and A. Alavi, Phys. Rev. B 98, 161105 (2018)
'LDA_C_UPW92' : 683, # M. Ruggeri, P. L. Rios, and A. Alavi, Phys. Rev. B 98, 161105 (2018)
'LDA_C_VBH' : 17 , # U. von Barth and L. Hedin, J. Phys. C: Solid State Phys. 5, 1629 (1972)
'LDA_C_VWN' : 7 , # S. H. Vosko, L. Wilk, and M. Nusair, Can. J. Phys. 58, 1200 (1980)
'LDA_C_VWN_1' : 28 , # S. H. Vosko, L. Wilk, and M. Nusair, Can. J. Phys. 58, 1200 (1980)
'LDA_C_VWN_2' : 29 , # S. H. Vosko, L. Wilk, and M. Nusair, Can. J. Phys. 58, 1200 (1980)
'LDA_C_VWN_3' : 30 , # S. H. Vosko, L. Wilk, and M. Nusair, Can. J. Phys. 58, 1200 (1980)
'LDA_C_VWN_4' : 31 , # S. H. Vosko, L. Wilk, and M. Nusair, Can. J. Phys. 58, 1200 (1980)
'LDA_C_VWN_RPA' : 8 , # S. H. Vosko, L. Wilk, and M. Nusair, Can. J. Phys. 58, 1200 (1980)
'LDA_C_WIGNER' : 2 , # E. Wigner, Trans. Faraday Soc. 34, 678 (1938)
'LDA_C_XALPHA' : 6 , # J. C. Slater, Phys. Rev. 81, 385 (1951)
'LDA_K_GDS08_WORKER' : 100001, # L. M. Ghiringhelli and L. Delle Site, Phys. Rev. B 77, 073104 (2008)
'LDA_K_LP' : 51 , # C. Lee and R. G. Parr, Phys. Rev. A 35, 2377 (1987)
'LDA_K_LP96' : 580, # S. Liu and R. G. Parr, Phys. Rev. A 53, 2211 (1996)
'LDA_K_TF' : 50 , # L. H. Thomas, Math. Proc. Cambridge Philos. Soc. 23, 542 (1927)
'LDA_K_ZLP' : 550, # P. Fuentealba and O. Reyes, Chem. Phys. Lett. 232, 31 (1995)
'LDA_X' : 1 , # P. A. M. Dirac, Math. Proc. Cambridge Philos. Soc. 26, 376 (1930)
'LDA_X_1D_EXPONENTIAL' : 600, # N. Helbig, J. I. Fuks, M. Casula, M. J. Verstraete, M. A. L. Marques, I. V. Tokatly, and A. Rubio, Phys. Rev. A 83, 032503 (2011)
'LDA_X_1D_SOFT' : 21 , # N. Helbig, J. I. Fuks, M. Casula, M. J. Verstraete, M. A. L. Marques, I. V. Tokatly, and A. Rubio, Phys. Rev. A 83, 032503 (2011)
'LDA_X_2D' : 19 , # P. A. M. Dirac, Math. Proc. Cambridge Philos. Soc. 26, 376 (1930)
'LDA_X_ERF' : 546, # P. M. W. Gill, R. D. Adamson, and J. A. Pople, Mol. Phys. 88, 1005 (1996)
'LDA_X_RAE' : 549, # A. Rae, Chem. Phys. Lett. 18, 574 (1973)
'LDA_X_REL' : 532, # A. K. Rajagopal, J. Phys. C: Solid State Phys. 11, L943 (1978)
'LDA_X_SLOC' : 692, # K. Finzel and A. I. Baranov, Int. J. Quantum Chem. 117, 40 (2017)
'LDA_XC_1D_EHWLRG_1' : 536, # M. T. Entwistle, M. J. P. Hodgson, J. Wetherell, B. Longstaff, J. D. Ramsden, and R. W. Godby, Phys. Rev. B 94, 205134 (2016)
'LDA_XC_1D_EHWLRG_2' : 537, # M. T. Entwistle, M. J. P. Hodgson, J. Wetherell, B. Longstaff, J. D. Ramsden, and R. W. Godby, Phys. Rev. B 94, 205134 (2016)
'LDA_XC_1D_EHWLRG_3' : 538, # M. T. Entwistle, M. J. P. Hodgson, J. Wetherell, B. Longstaff, J. D. Ramsden, and R. W. Godby, Phys. Rev. B 94, 205134 (2016)
'LDA_XC_BN05' : 588, # R. Baer and D. Neuhauser, Phys. Rev. Lett. 94, 043002 (2005)
'LDA_XC_GDSMFB' : 577, # S. Groth, T. Dornheim, T. Sjostrom, F. D. Malone, W. M. C. Foulkes, and M. Bonitz, Phys. Rev. Lett. 119, 135001 (2017)
'LDA_XC_KSDT' : 259, # V. V. Karasiev, T. Sjostrom, J. Dufty, and S. B. Trickey, Phys. Rev. Lett. 112, 076403 (2014)
'LDA_XC_LP_A' : 547, # C. Lee and R. G. Parr, Phys. Rev. A 42, 193 (1990)
'LDA_XC_LP_B' : 548, # C. Lee and R. G. Parr, Phys. Rev. A 42, 193 (1990)
'LDA_XC_TETER93' : 20 , # S. Goedecker, M. Teter, and J. Hutter, Phys. Rev. B 54, 1703 (1996)
'LDA_XC_TIH' : 599, # D. J. Tozer, V. E. Ingamells, and N. C. Handy, J. Chem. Phys. 105, 9200 (1996)
'LDA_XC_ZLP' : 43 , # Q. Zhao, M. Levy, and R. G. Parr, Phys. Rev. A 47, 918 (1993)
'HYB_LDA_XC_CAM_LDA0' : 178, # M. A. Mosquera, C. H. Borca, M. A. Ratner, and G. C. Schatz, J. Phys. Chem. A 120, 1605 (2016)
'HYB_LDA_XC_LDA0' : 177, # P. Rinke, A. Schleife, E. Kioupakis, A. Janotti, C. Rodl, F. Bechstedt, M. Scheffler, and C. G. Van de Walle, Phys. Rev. Lett. 108, 126404 (2012)
'GGA_C_ACGGA' : 39 , # A. Cancio, G. P. Chen, B. T. Krull, and K. Burke, J. Chem. Phys. 149, 084116 (2018)
'GGA_C_ACGGAP' : 176, # A. Cancio, G. P. Chen, B. T. Krull, and K. Burke, J. Chem. Phys. 149, 084116 (2018)
'GGA_C_AM05' : 135, # R. Armiento and A. E. Mattsson, Phys. Rev. B 72, 085108 (2005)
'GGA_C_APBE' : 186, # L. A. Constantin, E. Fabiano, S. Laricchia, and F. Della Sala, Phys. Rev. Lett. 106, 186406 (2011)
'GGA_C_BMK' : 280, # A. D. Boese and J. M. L. Martin, J. Chem. Phys. 121, 3405 (2004)
'GGA_C_CCDF' : 313, # J. T. Margraf, C. Kunkel, and K. Reuter, J. Chem. Phys. 150, 244116 (2019)
'GGA_C_CHACHIYO' : 309, # T. Chachiyo and H. Chachiyo, Comput. Theor. Chem. 1172, 112669 (2020)
'GGA_C_CS1' : 565, # N. C. Handy and A. J. Cohen, J. Chem. Phys. 116, 5411 (2002)
'GGA_C_FT97' : 88 , # M. Filatov and W. Thiel, Int. J. Quantum Chem. 62, 603 (1997)
'GGA_C_GAM' : 33 , # H. S. Yu, W. Zhang, P. Verma, X. He, and D. G. Truhlar, Phys. Chem. Chem. Phys. 17, 12146 (2015)
'GGA_C_GAPC' : 555, # E. Fabiano, P. E. Trevisanutto, A. Terentjevs, and L. A. Constantin, J. Chem. Theory Comput. 10, 2016 (2014)
'GGA_C_GAPLOC' : 556, # E. Fabiano, P. E. Trevisanutto, A. Terentjevs, and L. A. Constantin, J. Chem. Theory Comput. 10, 2016 (2014)
'GGA_C_HCTH_A' : 97 , # F. A. Hamprecht, A. J. Cohen, D. J. Tozer, and N. C. Handy, J. Chem. Phys. 109, 6264 (1998)
'GGA_C_HYB_TAU_HCTH' : 283, # A. D. Boese and N. C. Handy, J. Chem. Phys. 116, 9559 (2002)
'GGA_C_LM' : 137, # D. C. Langreth and M. J. Mehl, Phys. Rev. Lett. 47, 446 (1981)
'GGA_C_LYP' : 131, # C. Lee, W. Yang, and R. G. Parr, Phys. Rev. B 37, 785 (1988)
'GGA_C_MGGAC' : 712, # B. Patra, S. Jana, L. A. Constantin, and P. Samal, Phys. Rev. B 100, 155140 (2019)
'GGA_C_N12' : 80 , # R. Peverati and D. G. Truhlar, J. Chem. Theory Comput. 8, 2310 (2012)
'GGA_C_N12_SX' : 79 , # R. Peverati and D. G. Truhlar, Phys. Chem. Chem. Phys. 14, 16187 (2012)
'GGA_C_OP_B88' : 87 , # T. Tsuneda, T. Suzumura, and K. Hirao, J. Chem. Phys. 110, 10664 (1999)
'GGA_C_OP_G96' : 85 , # T. Tsuneda, T. Suzumura, and K. Hirao, J. Chem. Phys. 110, 10664 (1999)
'GGA_C_OP_PBE' : 86 , # T. Tsuneda, T. Suzumura, and K. Hirao, J. Chem. Phys. 110, 10664 (1999)
'GGA_C_OP_PW91' : 262, # T. Tsuneda, T. Suzumura, and K. Hirao, J. Chem. Phys. 110, 10664 (1999)
'GGA_C_OP_XALPHA' : 84 , # T. Tsuneda, T. Suzumura, and K. Hirao, J. Chem. Phys. 110, 10664 (1999)
'GGA_C_OPTC' : 200, # A. J. Cohen and N. C. Handy, Mol. Phys. 99, 607 (2001)
'GGA_C_P86' : 132, # J. P. Perdew, Phys. Rev. B 33, 8822 (1986)
'GGA_C_P86_FT' : 217, # J. P. Perdew, Phys. Rev. B 33, 8822 (1986)
'GGA_C_P86VWN' : 252, # J. P. Perdew, Phys. Rev. B 33, 8822 (1986)
'GGA_C_P86VWN_FT' : 253, # J. P. Perdew, Phys. Rev. B 33, 8822 (1986)
'GGA_C_PBE' : 130, # J. P. Perdew, K. Burke, and M. Ernzerhof, Phys. Rev. Lett. 77, 3865 (1996)
'GGA_C_PBE_JRGX' : 138, # L. S. Pedroza, A. J. R. da Silva, and K. Capelle, Phys. Rev. B 79, 201106 (2009)
'GGA_C_PBE_MOL' : 272, # J. M. del Campo, J. L. Gazquez, S. B. Trickey, and A. Vela, J. Chem. Phys. 136, 104108 (2012)
'GGA_C_PBE_SOL' : 133, # J. P. Perdew, A. Ruzsinszky, G. I. Csonka, O. A. Vydrov, G. E. Scuseria, L. A. Constantin, X. Zhou, and K. Burke, Phys. Rev. Lett. 100, 136406 (2008)
'GGA_C_PBE_VWN' : 216, # E. Kraisler, G. Makov, and I. Kelson, Phys. Rev. A 82, 042516 (2010)
'GGA_C_PBEFE' : 258, # R. Sarmiento-Perez, S. Botti, and M. A. L. Marques, J. Chem. Theory Comput. 11, 3844 (2015)
'GGA_C_PBEINT' : 62 , # E. Fabiano, L. A. Constantin, and F. Della Sala, Phys. Rev. B 82, 113104 (2010)
'GGA_C_PBELOC' : 246, # L. A. Constantin, E. Fabiano, and F. Della Sala, Phys. Rev. B 86, 035130 (2012)
'GGA_C_PW91' : 134, # J. P. Perdew, in Proceedings of the 75. WE-Heraeus-Seminar and 21st Annual International Symposium on Electronic Structure of Solids, edited by P. Ziesche and H. Eschrig (Akademie Verlag, Berlin, 1991) p. 11
'GGA_C_Q2D' : 47 , # L. Chiodo, L. A. Constantin, E. Fabiano, and F. Della Sala, Phys. Rev. Lett. 108, 126402 (2012)
'GGA_C_REGTPSS' : 83 , # J. P. Perdew, A. Ruzsinszky, G. I. Csonka, L. A. Constantin, and J. Sun, Phys. Rev. Lett. 103, 026403 (2009)
'GGA_C_REVTCA' : 99 , # V. Tognetti, P. Cortona, and C. Adamo, Chem. Phys. Lett. 460, 536 (2008)
'GGA_C_RGE2' : 143, # A. Ruzsinszky, G. I. Csonka, and G. E. Scuseria, J. Chem. Theory Comput. 5, 763 (2009)
'GGA_C_SCAN_E0' : 553, # J. Sun, A. Ruzsinszky, and J. P. Perdew, Phys. Rev. Lett. 115, 036402 (2015)
'GGA_C_SG4' : 534, # L. A. Constantin, A. Terentjevs, F. Della Sala, P. Cortona, and E. Fabiano, Phys. Rev. B 93, 045126 (2016)
'GGA_C_SOGGA11' : 152, # R. Peverati, Y. Zhao, and D. G. Truhlar, J. Phys. Chem. Lett. 2, 1991 (2011)
'GGA_C_SOGGA11_X' : 159, # R. Peverati and D. G. Truhlar, J. Chem. Phys. 135, 191102 (2011)
'GGA_C_SPBE' : 89 , # M. Swart, M. Sola, and F. M. Bickelhaupt, J. Chem. Phys. 131, 094103 (2009)
'GGA_C_TAU_HCTH' : 281, # A. D. Boese and N. C. Handy, J. Chem. Phys. 116, 9559 (2002)
'GGA_C_TCA' : 100, # V. Tognetti, P. Cortona, and C. Adamo, J. Chem. Phys. 128, 034101 (2008)
'GGA_C_TM_LYP' : 559, # A. J. Thakkar and S. P. McCarthy, J. Chem. Phys. 131, 134109 (2009)
'GGA_C_TM_PBE' : 560, # A. J. Thakkar and S. P. McCarthy, J. Chem. Phys. 131, 134109 (2009)
'GGA_C_W94' : 561, # L. C. Wilson, Chem. Phys. 181, 337 (1994)
'GGA_C_WI' : 148, # L. C. Wilson and S. Ivanov, Int. J. Quantum Chem. 69, 523 (1998)
'GGA_C_WI0' : 153, # L. C. Wilson and S. Ivanov, Int. J. Quantum Chem. 69, 523 (1998)
'GGA_C_WL' : 147, # L. C. Wilson and M. Levy, Phys. Rev. B 41, 12930 (1990)
'GGA_C_XPBE' : 136, # X. Xu and W. A. Goddard, J. Chem. Phys. 121, 4068 (2004)
'GGA_C_ZPBEINT' : 61 , # L. A. Constantin, E. Fabiano, and F. Della Sala, Phys. Rev. B 84, 233103 (2011)
'GGA_C_ZPBESOL' : 63 , # L. A. Constantin, E. Fabiano, and F. Della Sala, Phys. Rev. B 84, 233103 (2011)
'GGA_C_ZVPBEINT' : 557, # L. A. Constantin, E. Fabiano, and F. D. Sala, J. Chem. Phys. 137, 194105 (2012)
'GGA_C_ZVPBELOC' : 606, # E. Fabiano, L. A. Constantin, P. Cortona, and F. Della Sala, J. Chem. Theory Comput. 11, 122 (2015)
'GGA_C_ZVPBESOL' : 558, # L. A. Constantin, E. Fabiano, and F. D. Sala, J. Chem. Phys. 137, 194105 (2012)
'GGA_K_ABSP1' : 506, # P. K. Acharya, L. J. Bartolotti, S. B. Sears, and R. G. Parr, Proc. Natl. Acad. Sci. U. S. A. 77, 6978 (1980)
'GGA_K_ABSP2' : 507, # P. K. Acharya, L. J. Bartolotti, S. B. Sears, and R. G. Parr, Proc. Natl. Acad. Sci. U. S. A. 77, 6978 (1980)
'GGA_K_ABSP3' : 277, # P. K. Acharya, L. J. Bartolotti, S. B. Sears, and R. G. Parr, Proc. Natl. Acad. Sci. U. S. A. 77, 6978 (1980)
'GGA_K_ABSP4' : 278, # P. K. Acharya, L. J. Bartolotti, S. B. Sears, and R. G. Parr, Proc. Natl. Acad. Sci. U. S. A. 77, 6978 (1980)
'GGA_K_APBE' : 185, # L. A. Constantin, E. Fabiano, S. Laricchia, and F. Della Sala, Phys. Rev. Lett. 106, 186406 (2011)
'GGA_K_APBEINT' : 54 , # S. Laricchia, E. Fabiano, L. A. Constantin, and F. Della Sala, J. Chem. Theory Comput. 7, 2439 (2011)
'GGA_K_BALTIN' : 504, # R. Baltin, Z. Naturforsch. A 27, 1176 (1972)
'GGA_K_DK' : 516, # A. E. DePristo and J. D. Kress, Phys. Rev. A 35, 438 (1987)
'GGA_K_ERNZERHOF' : 520, # M. Ernzerhof, J. Mol. Struct.: THEOCHEM 501--502, 59 (2000)
'GGA_K_EXP4' : 597, # V. V. Karasiev, S. B. Trickey, and F. E. Harris, J. Comput.-Aided Mater. Des. 13, 111 (2006)
'GGA_K_FR_B88' : 514, # P. Fuentealba and O. Reyes, Chem. Phys. Lett. 232, 31 (1995)
'GGA_K_FR_PW86' : 515, # P. Fuentealba and O. Reyes, Chem. Phys. Lett. 232, 31 (1995)
'GGA_K_GDS08' : 591, # L. M. Ghiringhelli and L. Delle Site, Phys. Rev. B 77, 073104 (2008)
'GGA_K_GE2' : 501, # A. S. Kompaneets and E. S. Pavlovskii, Zh. Eksp. Teor. Fiz. 31, 427 (1956), [J. Exp. Theor. Phys. 4, 328 (1957)]
'GGA_K_GHDS10' : 592, # L. M. Ghiringhelli, I. P. Hamilton, and L. D. Site, J. Chem. Phys. 132, 014106 (2010)
'GGA_K_GHDS10R' : 593, # S. B. Trickey, V. V. Karasiev, and A. Vela, Phys. Rev. B 84, 075146 (2011)
'GGA_K_GOLDEN' : 502, # S. Golden, Phys. Rev. 105, 604 (1957)
'GGA_K_GP85' : 510, # S. K. Ghosh and R. G. Parr, J. Chem. Phys. 82, 3307 (1985)
'GGA_K_GR' : 508, # J. L. Gazquez and J. Robles, J. Chem. Phys. 76, 1467 (1982)
'GGA_K_LC94' : 521, # A. Lembarki and H. Chermette, Phys. Rev. A 50, 5328 (1994)
'GGA_K_LGAP' : 620, # L. A. Constantin, E. Fabiano, S. Smiga, and F. Della Sala, Phys. Rev. B 95, 115153 (2017)
'GGA_K_LGAP_GE' : 633, # L. A. Constantin, E. Fabiano, S. Smiga, and F. Della Sala, Phys. Rev. B 95, 115153 (2017)
'GGA_K_LIEB' : 505, # E. H. Lieb, Rev. Mod. Phys. 53, 603 (1981)
'GGA_K_LKT' : 613, # K. Luo, V. V. Karasiev, and S. B. Trickey, Phys. Rev. B 98, 041111 (2018)
'GGA_K_LLP' : 522, # H. Lee, C. Lee, and R. G. Parr, Phys. Rev. A 44, 768 (1991)
'GGA_K_LUDENA' : 509, # E. V. Ludena, in Cond. Matt. Theor., Vol. 1, edited by F. B. Malik (Plenum, New York, 1986) p. 183
'GGA_K_MEYER' : 57 , # A. Meyer, G. C. Wang, and W. H. Young, Z. Naturforsch. A 31, 898 (1976)
'GGA_K_OL1' : 512, # H. Ou-Yang and M. Levy, Int. J. Quantum Chem. 40, 379 (1991)
'GGA_K_OL2' : 513, # H. Ou-Yang and M. Levy, Int. J. Quantum Chem. 40, 379 (1991)
'GGA_K_PBE2' : 616, # V. V. Karasiev, S. B. Trickey, and F. E. Harris, J. Comput.-Aided Mater. Des. 13, 111 (2006)
'GGA_K_PBE3' : 595, # V. V. Karasiev, S. B. Trickey, and F. E. Harris, J. Comput.-Aided Mater. Des. 13, 111 (2006)
'GGA_K_PBE4' : 596, # V. V. Karasiev, S. B. Trickey, and F. E. Harris, J. Comput.-Aided Mater. Des. 13, 111 (2006)
'GGA_K_PEARSON' : 511, # D. J. Lacks and R. G. Gordon, J. Chem. Phys. 100, 4446 (1994)
'GGA_K_PERDEW' : 517, # J. P. Perdew, Phys. Lett. A 165, 79 (1992)
'GGA_K_PG1' : 219, # L. A. Constantin, E. Fabiano, and F. Della Sala, J. Phys. Chem. Lett. 9, 4385 (2018), pMID: 30019904
'GGA_K_RATIONAL_P' : 218, # J. Lehtomaki and O. Lopez-Acevedo, Phys. Rev. B 100, 165111 (2019)
'GGA_K_REVAPBE' : 55 , # L. A. Constantin, E. Fabiano, S. Laricchia, and F. Della Sala, Phys. Rev. Lett. 106, 186406 (2011)
'GGA_K_REVAPBEINT' : 53 , # S. Laricchia, E. Fabiano, L. A. Constantin, and F. Della Sala, J. Chem. Theory Comput. 7, 2439 (2011)
'GGA_K_TFVW' : 52 , # C. F. von Weizsacker, Z. Phys. 96, 431 (1935)
'GGA_K_TFVW_OPT' : 635, # L. A. Espinosa Leal, A. Karpenko, M. A. Caro, and O. Lopez-Acevedo, Phys. Chem. Chem. Phys. 17, 31463 (2015)
'GGA_K_THAKKAR' : 523, # A. J. Thakkar, Phys. Rev. A 46, 6920 (1992)
'GGA_K_TKVLN' : 594, # S. B. Trickey, V. V. Karasiev, and A. Vela, Phys. Rev. B 84, 075146 (2011)
'GGA_K_TW1' : 187, # F. Tran and T. A. Wesolowski, Int. J. Quantum Chem. 89, 441 (2002)
'GGA_K_TW2' : 188, # F. Tran and T. A. Wesolowski, Int. J. Quantum Chem. 89, 441 (2002)
'GGA_K_TW3' : 189, # F. Tran and T. A. Wesolowski, Int. J. Quantum Chem. 89, 441 (2002)
'GGA_K_TW4' : 190, # F. Tran and T. A. Wesolowski, Int. J. Quantum Chem. 89, 441 (2002)
'GGA_K_VJKS' : 519, # L. Vitos, B. Johansson, J. Kollar, and H. L. Skriver, Phys. Rev. A 61, 052511 (2000)
'GGA_K_VSK' : 518, # L. Vitos, H. L. Skriver, and J. Kollar, Phys. Rev. B 57, 12611 (1998)
'GGA_K_VT84F' : 619, # V. V. Karasiev, D. Chakraborty, O. A. Shukruto, and S. B. Trickey, Phys. Rev. B 88, 161108 (2013)
'GGA_K_VW' : 500, # C. F. von Weizsacker, Z. Phys. 96, 431 (1935)
'GGA_K_YT65' : 503, # K. Yonei and Y. Tomishima, J. Phys. Soc. Jpn. 20, 1051 (1965)
'GGA_X_2D_B86' : 128, # J. G. Vilhena, E. Rasanen, M. A. L. Marques, and S. Pittalis, J. Chem. Theory Comput. 10, 1837 (2014)
'GGA_X_2D_B86_MGC' : 124, # S. Pittalis, E. Rasanen, J. G. Vilhena, and M. A. L. Marques, Phys. Rev. A 79, 012503 (2009)
'GGA_X_2D_B88' : 127, # J. G. Vilhena, E. Rasanen, M. A. L. Marques, and S. Pittalis, J. Chem. Theory Comput. 10, 1837 (2014)
'GGA_X_2D_PBE' : 129, # J. G. Vilhena, E. Rasanen, M. A. L. Marques, and S. Pittalis, J. Chem. Theory Comput. 10, 1837 (2014)
'GGA_X_AIRY' : 192, # L. A. Constantin, A. Ruzsinszky, and J. P. Perdew, Phys. Rev. B 80, 035125 (2009)
'GGA_X_AK13' : 56 , # R. Armiento and S. Kummel, Phys. Rev. Lett. 111, 036402 (2013)
'GGA_X_AM05' : 120, # R. Armiento and A. E. Mattsson, Phys. Rev. B 72, 085108 (2005)
'GGA_X_APBE' : 184, # L. A. Constantin, E. Fabiano, S. Laricchia, and F. Della Sala, Phys. Rev. Lett. 106, 186406 (2011)
'GGA_X_B86' : 103, # A. D. Becke, J. Chem. Phys. 84, 4524 (1986)
'GGA_X_B86_MGC' : 105, # A. D. Becke, J. Chem. Phys. 84, 4524 (1986)
'GGA_X_B86_R' : 41 , # I. Hamada, Phys. Rev. B 89, 121103 (2014)
'GGA_X_B88' : 106, # A. D. Becke, Phys. Rev. A 38, 3098 (1988)
'GGA_X_B88_6311G' : 179, # J. M. Ugalde, C. Sarasola, and M. Aguado, J. Phys. B: At., Mol. Opt. Phys. 27, 423 (1994)
'GGA_X_B88M' : 570, # E. Proynov, H. Chermette, and D. R. Salahub, J. Chem. Phys. 113, 10013 (2000)
'GGA_X_BAYESIAN' : 125, # J. J. Mortensen, K. Kaasbjerg, S. L. Frederiksen, J. K. Norskov, J. P. Sethna, and K. W. Jacobsen, Phys. Rev. Lett. 95, 216401 (2005)
'GGA_X_BCGP' : 38 , # K. Burke, A. Cancio, T. Gould, and S. Pittalis, ArXiv e-prints (2014), arXiv:1409.4834 [cond-mat.mtrl-sci]
'GGA_X_BEEFVDW' : 285, # J. Wellendorff, K. T. Lundgaard, A. Mogelhoj, V. Petzold, D. D. Landis, J. K. Norskov, T. Bligaard, and K. W. Jacobsen, Phys. Rev. B 85, 235149 (2012)
'GGA_X_BPCCAC' : 98 , # E. Bremond, D. Pilard, I. Ciofini, H. Chermette, C. Adamo, and P. Cortona, Theor. Chem. Acc. 131, 1184 (2012)
'GGA_X_C09X' : 158, # V. R. Cooper, Phys. Rev. B 81, 161104 (2010)
'GGA_X_CAP' : 270, # J. Carmona-Espindola, J. L. Gazquez, A. Vela, and S. B. Trickey, J. Chem. Phys. 142, 054105 (2015)
'GGA_X_CHACHIYO' : 298, # T. Chachiyo and H. Chachiyo, Molecules 25, 3485 (2020)
'GGA_X_DK87_R1' : 111, # A. E. DePristo and J. D. Kress, J. Chem. Phys. 86, 1425 (1987)
'GGA_X_DK87_R2' : 112, # A. E. DePristo and J. D. Kress, J. Chem. Phys. 86, 1425 (1987)
'GGA_X_EB88' : 271, # P. Elliott and K. Burke, Can. J. Chem. 87, 1485 (2009)
'GGA_X_ECMV92' : 215, # E. Engel, J. A. Chevary, L. D. Macdonald, and S. H. Vosko, Z. Phys. D: At., Mol. Clusters 23, 7 (1992)
'GGA_X_EV93' : 35 , # E. Engel and S. H. Vosko, Phys. Rev. B 47, 13164 (1993)
'GGA_X_FD_LB94' : 604, # A. P. Gaiduk and V. N. Staroverov, Phys. Rev. A 83, 012509 (2011)
'GGA_X_FD_REVLB94' : 605, # A. P. Gaiduk and V. N. Staroverov, Phys. Rev. A 83, 012509 (2011)
'GGA_X_FT97_A' : 114, # M. Filatov and W. Thiel, Mol. Phys. 91, 847 (1997)
'GGA_X_FT97_B' : 115, # M. Filatov and W. Thiel, Mol. Phys. 91, 847 (1997)
'GGA_X_G96' : 107, # P. M. W. Gill, Mol. Phys. 89, 433 (1996)
'GGA_X_GAM' : 32 , # H. S. Yu, W. Zhang, P. Verma, X. He, and D. G. Truhlar, Phys. Chem. Chem. Phys. 17, 12146 (2015)
'GGA_X_GG99' : 535, # A. T. Gilbert and P. M. Gill, Chem. Phys. Lett. 312, 511 (1999)
'GGA_X_HCTH_A' : 34 , # F. A. Hamprecht, A. J. Cohen, D. J. Tozer, and N. C. Handy, J. Chem. Phys. 109, 6264 (1998)
'GGA_X_HERMAN' : 104, # F. Herman, J. P. V. Dyke, and I. B. Ortenburger, Phys. Rev. Lett. 22, 807 (1969)
'GGA_X_HJS_B88' : 527, # T. M. Henderson, B. G. Janesko, and G. E. Scuseria, J. Chem. Phys. 128, 194105 (2008)
'GGA_X_HJS_B88_V2' : 46 , # E. Weintraub, T. M. Henderson, and G. E. Scuseria, J. Chem. Theory Comput. 5, 754 (2009)
'GGA_X_HJS_B97X' : 528, # T. M. Henderson, B. G. Janesko, and G. E. Scuseria, J. Chem. Phys. 128, 194105 (2008)
'GGA_X_HJS_PBE' : 525, # T. M. Henderson, B. G. Janesko, and G. E. Scuseria, J. Chem. Phys. 128, 194105 (2008)
'GGA_X_HJS_PBE_SOL' : 526, # T. M. Henderson, B. G. Janesko, and G. E. Scuseria, J. Chem. Phys. 128, 194105 (2008)
'GGA_X_HTBS' : 191, # P. Haas, F. Tran, P. Blaha, and K. Schwarz, Phys. Rev. B 83, 205117 (2011)
'GGA_X_ITYH' : 529, # H. Iikura, T. Tsuneda, T. Yanai, and K. Hirao, J. Chem. Phys. 115, 3540 (2001)
'GGA_X_ITYH_OPTX' : 622, # N. C. Handy and A. J. Cohen, Mol. Phys. 99, 403 (2001)
'GGA_X_ITYH_PBE' : 623, # J. P. Perdew, K. Burke, and M. Ernzerhof, Phys. Rev. Lett. 77, 3865 (1996)
'GGA_X_KGG99' : 544, # A. T. Gilbert and P. M. Gill, Chem. Phys. Lett. 312, 511 (1999)
'GGA_X_KT1' : 145, # T. W. Keal and D. J. Tozer, J. Chem. Phys. 119, 3015 (2003)
'GGA_X_LAG' : 193, # L. Vitos, B. Johansson, J. Kollar, and H. L. Skriver, Phys. Rev. B 62, 10046 (2000)
'GGA_X_LAMBDA_CH_N' : 44 , # M. M. Odashima, K. Capelle, and S. B. Trickey, J. Chem. Theory Comput. 5, 798 (2009)
'GGA_X_LAMBDA_LO_N' : 45 , # M. M. Odashima, K. Capelle, and S. B. Trickey, J. Chem. Theory Comput. 5, 798 (2009)
'GGA_X_LAMBDA_OC2_N' : 40 , # M. M. Odashima, K. Capelle, and S. B. Trickey, J. Chem. Theory Comput. 5, 798 (2009)
'GGA_X_LB' : 160, # R. van Leeuwen and E. J. Baerends, Phys. Rev. A 49, 2421 (1994)
'GGA_X_LBM' : 182, # P. R. T. Schipper, O. V. Gritsenko, S. J. A. van Gisbergen, and E. J. Baerends, J. Chem. Phys. 112, 1344 (2000)
'GGA_X_LG93' : 113, # D. J. Lacks and R. G. Gordon, Phys. Rev. A 47, 4681 (1993)
'GGA_X_LSPBE' : 168, # J. C. Pacheco-Kato, J. M. del Campo, J. L. Gazquez, S. Trickey, and A. Vela, Chem. Phys. Lett. 651, 268 (2016)
'GGA_X_LSRPBE' : 169, # J. C. Pacheco-Kato, J. M. del Campo, J. L. Gazquez, S. Trickey, and A. Vela, Chem. Phys. Lett. 651, 268 (2016)
'GGA_X_LV_RPW86' : 58 , # K. Berland and P. Hyldgaard, Phys. Rev. B 89, 035412 (2014)
'GGA_X_MB88' : 149, # V. Tognetti and C. Adamo, J. Phys. Chem. A 113, 14415 (2009)
'GGA_X_MPBE' : 122, # C. Adamo and V. Barone, J. Chem. Phys. 116, 5933 (2002)
'GGA_X_MPW91' : 119, # C. Adamo and V. Barone, J. Chem. Phys. 108, 664 (1998)
'GGA_X_N12' : 82 , # R. Peverati and D. G. Truhlar, J. Chem. Theory Comput. 8, 2310 (2012)
'GGA_X_NCAP' : 180, # J. Carmona-Espindola, J. L. Gazquez, A. Vela, and S. B. Trickey, J. Chem. Theory Comput. 15, 303 (2019)
'GGA_X_OL2' : 183, # P. Fuentealba and O. Reyes, Chem. Phys. Lett. 232, 31 (1995)
'GGA_X_OPTB86B_VDW' : 171, # J. Klimes, D. R. Bowler, and A. Michaelides, Phys. Rev. B 83, 195131 (2011)
'GGA_X_OPTB88_VDW' : 139, # J. Klimes, D. R. Bowler, and A. Michaelides, J. Phys.: Condens. Matter 22, 022201 (2010)
'GGA_X_OPTPBE_VDW' : 141, # J. Klimes, D. R. Bowler, and A. Michaelides, J. Phys.: Condens. Matter 22, 022201 (2010)
'GGA_X_OPTX' : 110, # N. C. Handy and A. J. Cohen, Mol. Phys. 99, 403 (2001)
'GGA_X_PBE' : 101, # J. P. Perdew, K. Burke, and M. Ernzerhof, Phys. Rev. Lett. 77, 3865 (1996)
'GGA_X_PBE_JSJR' : 126, # L. S. Pedroza, A. J. R. da Silva, and K. Capelle, Phys. Rev. B 79, 201106 (2009)
'GGA_X_PBE_MOL' : 49 , # J. M. del Campo, J. L. Gazquez, S. B. Trickey, and A. Vela, J. Chem. Phys. 136, 104108 (2012)
'GGA_X_PBE_R' : 102, # Y. Zhang and W. Yang, Phys. Rev. Lett. 80, 890 (1998)
'GGA_X_PBE_SOL' : 116, # J. P. Perdew, A. Ruzsinszky, G. I. Csonka, O. A. Vydrov, G. E. Scuseria, L. A. Constantin, X. Zhou, and K. Burke, Phys. Rev. Lett. 100, 136406 (2008)
'GGA_X_PBE_TCA' : 59 , # V. Tognetti, P. Cortona, and C. Adamo, Chem. Phys. Lett. 460, 536 (2008)
'GGA_X_PBEA' : 121, # G. K. H. Madsen, Phys. Rev. B 75, 195108 (2007)
'GGA_X_PBEFE' : 265, # R. Sarmiento-Perez, S. Botti, and M. A. L. Marques, J. Chem. Theory Comput. 11, 3844 (2015)
'GGA_X_PBEINT' : 60 , # E. Fabiano, L. A. Constantin, and F. Della Sala, Phys. Rev. B 82, 113104 (2010)
'GGA_X_PBEK1_VDW' : 140, # J. Klimes, D. R. Bowler, and A. Michaelides, J. Phys.: Condens. Matter 22, 022201 (2010)
'GGA_X_PBEPOW' : 539, # Eric Bremond, J. Chem. Phys. 145, 244102 (2016)
'GGA_X_PBETRANS' : 291, # Eric Bremond, I. Ciofini, and C. Adamo, Mol. Phys. 114, 1059 (2016)
'GGA_X_PW86' : 108, # J. P. Perdew and W. Yue, Phys. Rev. B 33, 8800 (1986)
'GGA_X_PW91' : 109, # J. P. Perdew, in Proceedings of the 75. WE-Heraeus-Seminar and 21st Annual International Symposium on Electronic Structure of Solids, edited by P. Ziesche and H. Eschrig (Akademie Verlag, Berlin, 1991) p. 11
'GGA_X_PW91_MOD' : 316, # J. P. Perdew, in Proceedings of the 75. WE-Heraeus-Seminar and 21st Annual International Symposium on Electronic Structure of Solids, edited by P. Ziesche and H. Eschrig (Akademie Verlag, Berlin, 1991) p. 11
'GGA_X_Q2D' : 48 , # L. Chiodo, L. A. Constantin, E. Fabiano, and F. Della Sala, Phys. Rev. Lett. 108, 126402 (2012)
'GGA_X_REVSSB_D' : 312, # M. Swart, M. Sola, and F. M. Bickelhaupt, J. Comput. Chem. 32, 1117 (2011)
'GGA_X_RGE2' : 142, # A. Ruzsinszky, G. I. Csonka, and G. E. Scuseria, J. Chem. Theory Comput. 5, 763 (2009)
'GGA_X_RPBE' : 117, # B. Hammer, L. B. Hansen, and J. K. Norskov, Phys. Rev. B 59, 7413 (1999)
'GGA_X_RPW86' : 144, # E. D. Murray, K. Lee, and D. C. Langreth, J. Chem. Theory Comput. 5, 2754 (2009)
'GGA_X_S12G' : 495, # M. Swart, Chem. Phys. Lett. 580, 166 (2013)
'GGA_X_SFAT' : 530, # A. Savin and H.-J. Flad, Int. J. Quantum Chem. 56, 327 (1995)
'GGA_X_SFAT_PBE' : 601, # A. Savin and H.-J. Flad, Int. J. Quantum Chem. 56, 327 (1995)
'GGA_X_SG4' : 533, # L. A. Constantin, A. Terentjevs, F. Della Sala, P. Cortona, and E. Fabiano, Phys. Rev. B 93, 045126 (2016)
'GGA_X_SOGGA' : 150, # Y. Zhao and D. G. Truhlar, J. Chem. Phys. 128, 184109 (2008)
'GGA_X_SOGGA11' : 151, # R. Peverati, Y. Zhao, and D. G. Truhlar, J. Phys. Chem. Lett. 2, 1991 (2011)
'GGA_X_SSB' : 91 , # M. Swart, M. Sola, and F. M. Bickelhaupt, J. Chem. Phys. 131, 094103 (2009)
'GGA_X_SSB_D' : 92 , # M. Swart, M. Sola, and F. M. Bickelhaupt, J. Chem. Phys. 131, 094103 (2009)
'GGA_X_SSB_SW' : 90 , # M. Swart, M. Sola, and F. M. Bickelhaupt, J. Comput. Methods Sci. Eng. 9, 69 (2009)
'GGA_X_VMT84_GE' : 68 , # A. Vela, J. C. Pacheco-Kato, J. L. Gazquez, J. M. del Campo, and S. B. Trickey, J. Chem. Phys. 136, 144115 (2012)
'GGA_X_VMT84_PBE' : 69 , # A. Vela, J. C. Pacheco-Kato, J. L. Gazquez, J. M. del Campo, and S. B. Trickey, J. Chem. Phys. 136, 144115 (2012)
'GGA_X_VMT_GE' : 70 , # A. Vela, V. Medel, and S. B. Trickey, J. Chem. Phys. 130, 244103 (2009)
'GGA_X_VMT_PBE' : 71 , # A. Vela, V. Medel, and S. B. Trickey, J. Chem. Phys. 130, 244103 (2009)
'GGA_X_WC' : 118, # Z. Wu and R. E. Cohen, Phys. Rev. B 73, 235116 (2006)
'GGA_X_WPBEH' : 524, # J. Heyd, G. E. Scuseria, and M. Ernzerhof, J. Chem. Phys. 118, 8207 (2003)
'GGA_X_XPBE' : 123, # X. Xu and W. A. Goddard, J. Chem. Phys. 121, 4068 (2004)
'GGA_XC_B97_D' : 170, # S. Grimme, J. Comput. Chem. 27, 1787 (2006)
'GGA_XC_B97_GGA1' : 96 , # A. J. Cohen and N. C. Handy, Chem. Phys. Lett. 316, 160 (2000)
'GGA_XC_BEEFVDW' : 286, # J. Wellendorff, K. T. Lundgaard, A. Mogelhoj, V. Petzold, D. D. Landis, J. K. Norskov, T. Bligaard, and K. W. Jacobsen, Phys. Rev. B 85, 235149 (2012)
'GGA_XC_EDF1' : 165, # R. D. Adamson, P. M. W. Gill, and J. A. Pople, Chem. Phys. Lett. 284, 6 (1998)
'GGA_XC_HCTH_120' : 162, # A. D. Boese, N. L. Doltsinis, N. C. Handy, and M. Sprik, J. Chem. Phys. 112, 1670 (2000)
'GGA_XC_HCTH_147' : 163, # A. D. Boese, N. L. Doltsinis, N. C. Handy, and M. Sprik, J. Chem. Phys. 112, 1670 (2000)
'GGA_XC_HCTH_407' : 164, # A. D. Boese and N. C. Handy, J. Chem. Phys. 114, 5497 (2001)
'GGA_XC_HCTH_407P' : 93 , # A. D. Boese, A. Chandra, J. M. L. Martin, and D. Marx, J. Chem. Phys. 119, 5965 (2003)
'GGA_XC_HCTH_93' : 161, # F. A. Hamprecht, A. J. Cohen, D. J. Tozer, and N. C. Handy, J. Chem. Phys. 109, 6264 (1998)
'GGA_XC_HCTH_P14' : 95 , # G. Menconi, P. J. Wilson, and D. J. Tozer, J. Chem. Phys. 114, 3958 (2001)
'GGA_XC_HCTH_P76' : 94 , # G. Menconi, P. J. Wilson, and D. J. Tozer, J. Chem. Phys. 114, 3958 (2001)
'GGA_XC_HLE16' : 545, # P. Verma and D. G. Truhlar, J. Phys. Chem. Lett. 8, 380 (2017)
'GGA_XC_KT1' : 167, # T. W. Keal and D. J. Tozer, J. Chem. Phys. 119, 3015 (2003)
'GGA_XC_KT2' : 146, # T. W. Keal and D. J. Tozer, J. Chem. Phys. 119, 3015 (2003)
'GGA_XC_KT3' : 587, # T. W. Keal and D. J. Tozer, J. Chem. Phys. 121, 5654 (2004)
'GGA_XC_LB07' : 589, # E. Livshits and R. Baer, Phys. Chem. Chem. Phys. 9, 2932 (2007)
'GGA_XC_MOHLYP' : 194, # N. E. Schultz, Y. Zhao, and D. G. Truhlar, J. Phys. Chem. A 109, 11127 (2005)
'GGA_XC_MOHLYP2' : 195, # J. Zheng, Y. Zhao, and D. G. Truhlar, J. Chem. Theory Comput. 5, 808 (2009)
'GGA_XC_MPWLYP1W' : 174, # E. E. Dahlke and D. G. Truhlar, J. Phys. Chem. B 109, 15677 (2005)
'GGA_XC_NCAP' : 181, # J. Carmona-Espindola, J. L. Gazquez, A. Vela, and S. B. Trickey, J. Chem. Theory Comput. 15, 303 (2019)
'GGA_XC_OBLYP_D' : 67 , # L. Goerigk and S. Grimme, J. Chem. Theory Comput. 6, 107 (2010)
'GGA_XC_OPBE_D' : 65 , # L. Goerigk and S. Grimme, J. Chem. Theory Comput. 6, 107 (2010)
'GGA_XC_OPWLYP_D' : 66 , # L. Goerigk and S. Grimme, J. Chem. Theory Comput. 6, 107 (2010)
'GGA_XC_PBE1W' : 173, # E. E. Dahlke and D. G. Truhlar, J. Phys. Chem. B 109, 15677 (2005)
'GGA_XC_PBELYP1W' : 175, # E. E. Dahlke and D. G. Truhlar, J. Phys. Chem. B 109, 15677 (2005)
'GGA_XC_TH1' : 154, # D. J. Tozer and N. C. Handy, J. Chem. Phys. 108, 2545 (1998)
'GGA_XC_TH2' : 155, # D. J. Tozer and N. C. Handy, J. Phys. Chem. A 102, 3162 (1998)
'GGA_XC_TH3' : 156, # N. C. Handy and D. J. Tozer, Mol. Phys. 94, 707 (1998)
'GGA_XC_TH4' : 157, # N. C. Handy and D. J. Tozer, Mol. Phys. 94, 707 (1998)
'GGA_XC_TH_FC' : 197, # D. J. Tozer, N. C. Handy, and W. H. Green, Chem. Phys. Lett. 273, 183 (1997)
'GGA_XC_TH_FCFO' : 198, # D. J. Tozer, N. C. Handy, and W. H. Green, Chem. Phys. Lett. 273, 183 (1997)
'GGA_XC_TH_FCO' : 199, # D. J. Tozer, N. C. Handy, and W. H. Green, Chem. Phys. Lett. 273, 183 (1997)
'GGA_XC_TH_FL' : 196, # D. J. Tozer, N. C. Handy, and W. H. Green, Chem. Phys. Lett. 273, 183 (1997)
'GGA_XC_VV10' : 255, # O. A. Vydrov and T. Van Voorhis, J. Chem. Phys. 133, 244103 (2010)
'GGA_XC_XLYP' : 166, # X. Xu and W. A. Goddard, Proc. Natl. Acad. Sci. U. S. A. 101, 2673 (2004)
#'HYB_GGA_X_LC2GAU' : 710, # J.-W. Song, M. A. Watson, and K. Hirao, J. Chem. Phys. 131, 144108 (2009)
#'HYB_GGA_X_LCGAU' : 708, # J.-W. Song, S. Tokura, T. Sato, M. A. Watson, and K. Hirao, J. Chem. Phys. 127, 154109 (2007)
#'HYB_GGA_X_LCGAU_CORE' : 709, # J.-W. Song, M. A. Watson, A. Nakata, and K. Hirao, J. Chem. Phys. 129, 184113 (2008)
'HYB_GGA_X_N12_SX' : 81 , # R. Peverati and D. G. Truhlar, Phys. Chem. Chem. Phys. 14, 16187 (2012)
'HYB_GGA_X_S12H' : 496, # M. Swart, Chem. Phys. Lett. 580, 166 (2013)
'HYB_GGA_X_SOGGA11_X' : 426, # R. Peverati and D. G. Truhlar, J. Chem. Phys. 135, 191102 (2011)
'HYB_GGA_XC_APBE0' : 607, # E. Fabiano, L. A. Constantin, P. Cortona, and F. Della Sala, J. Chem. Theory Comput. 11, 122 (2015)
'HYB_GGA_XC_APF' : 409, # A. Austin, G. A. Petersson, M. J. Frisch, F. J. Dobek, G. Scalmani, and K. Throssell, J. Chem. Theory Comput. 8, 4989 (2012)
'HYB_GGA_XC_B1LYP' : 416, # C. Adamo and V. Barone, Chem. Phys. Lett. 274, 242 (1997)
'HYB_GGA_XC_B1PW91' : 417, # C. Adamo and V. Barone, Chem. Phys. Lett. 274, 242 (1997)
'HYB_GGA_XC_B1WC' : 412, # D. I. Bilc, R. Orlando, R. Shaltaf, G.-M. Rignanese, J. Iniguez, and P. Ghosez, Phys. Rev. B 77, 165107 (2008)
#'HYB_GGA_XC_B2PLYP' : 713, # S. Grimme, J. Chem. Phys. 124, 034108 (2006)
'HYB_GGA_XC_B3LYP' : 402, # P. J. Stephens, F. J. Devlin, C. F. Chabalowski, and M. J. Frisch, J. Phys. Chem. 98, 11623 (1994)
'HYB_GGA_XC_B3LYP5' : 475, # P. J. Stephens, F. J. Devlin, C. F. Chabalowski, and M. J. Frisch, J. Phys. Chem. 98, 11623 (1994)
'HYB_GGA_XC_B3LYP_MCM1' : 461, # M. T. Caldeira and R. Custodio, J. Mol. Model. 25, 62 (2019)
'HYB_GGA_XC_B3LYP_MCM2' : 462, # M. T. Caldeira and R. Custodio, J. Mol. Model. 25, 62 (2019)
'HYB_GGA_XC_B3LYPS' : 459, # M. Reiher, O. Salomon, and B. A. Hess, Theor. Chem. Acc. 107, 48 (2001)
'HYB_GGA_XC_B3P86' : 403, # Defined through Gaussian implementation
#'HYB_GGA_XC_B3P86_NWCHEM' : 315, # Defined through NWChem implementation
'HYB_GGA_XC_B3PW91' : 401, # A. D. Becke, J. Chem. Phys. 98, 5648 (1993)
'HYB_GGA_XC_B5050LYP' : 572, # Y. Shao, M. Head-Gordon, and A. I. Krylov, J. Chem. Phys. 118, 4807 (2003)
'HYB_GGA_XC_B97' : 407, # A. D. Becke, J. Chem. Phys. 107, 8554 (1997)
'HYB_GGA_XC_B97_1' : 408, # F. A. Hamprecht, A. J. Cohen, D. J. Tozer, and N. C. Handy, J. Chem. Phys. 109, 6264 (1998)
'HYB_GGA_XC_B97_1P' : 266, # A. J. Cohen and N. C. Handy, Chem. Phys. Lett. 316, 160 (2000)
'HYB_GGA_XC_B97_2' : 410, # P. J. Wilson, T. J. Bradley, and D. J. Tozer, J. Chem. Phys. 115, 9233 (2001)
'HYB_GGA_XC_B97_3' : 414, # T. W. Keal and D. J. Tozer, J. Chem. Phys. 123, 121103 (2005)
'HYB_GGA_XC_B97_K' : 413, # A. D. Boese and J. M. L. Martin, J. Chem. Phys. 121, 3405 (2004)
'HYB_GGA_XC_BHANDH' : 435, # A. D. Becke, J. Chem. Phys. 98, 1372 (1993)
'HYB_GGA_XC_BHANDHLYP' : 436, # A. D. Becke, J. Chem. Phys. 98, 1372 (1993)
'HYB_GGA_XC_BLYP35' : 499, # M. Renz, K. Theilacker, C. Lambert, and M. Kaupp, J. Am. Chem. Soc. 131, 16292 (2009)
'HYB_GGA_XC_CAM_B3LYP' : 433, # T. Yanai, D. P. Tew, and N. C. Handy, Chem. Phys. Lett. 393, 51 (2004)
'HYB_GGA_XC_CAM_O3LYP' : 395, # M. P. Bircher and U. Rothlisberger, J. Chem. Theory Comput. 14, 3184 (2018)
'HYB_GGA_XC_CAM_PBEH' : 681, # W. Chen, G. Miceli, G.-M. Rignanese, and A. Pasquarello, Phys. Rev. Mater. 2, 073803 (2018)
'HYB_GGA_XC_CAM_QTP_00' : 490, # P. Verma and R. J. Bartlett, J. Chem. Phys. 140, 18A534 (2014)
'HYB_GGA_XC_CAM_QTP_01' : 482, # Y. Jin and R. J. Bartlett, J. Chem. Phys. 145, 034107 (2016)
'HYB_GGA_XC_CAM_QTP_02' : 491, # R. L. A. Haiduke and R. J. Bartlett, J. Chem. Phys. 148, 184106 (2018)
'HYB_GGA_XC_CAMH_B3LYP' : 614, # Y. Shao, Y. Mei, D. Sundholm, and V. R. I. Kaila, J. Chem. Theory Comput. 16, 587 (2020), https://doi.org/10.1021/acs.jctc.9b00823
'HYB_GGA_XC_CAMY_B3LYP' : 470, # M. Seth and T. Ziegler, J. Chem. Theory Comput. 8, 901 (2012)
'HYB_GGA_XC_CAMY_BLYP' : 455, # Y. Akinaga and S. Ten-no, Chem. Phys. Lett. 462, 348 (2008)
'HYB_GGA_XC_CAMY_PBEH' : 682, # W. Chen, G. Miceli, G.-M. Rignanese, and A. Pasquarello, Phys. Rev. Mater. 2, 073803 (2018)
'HYB_GGA_XC_CAP0' : 477, # J. Carmona-Espindola, J. L. Gazquez, A. Vela, and S. B. Trickey, Theor. Chem. Acc. 135, 120 (2016)
'HYB_GGA_XC_EDF2' : 476, # C. Y. Lin, M. W. George, and P. M. W. Gill, Aust. J. Chem. 57, 365 (2004)
'HYB_GGA_XC_HAPBE' : 608, # E. Fabiano, L. A. Constantin, P. Cortona, and F. Della Sala, J. Chem. Theory Comput. 11, 122 (2015)
'HYB_GGA_XC_HFLYP' : 314, # C. Lee, W. Yang, and R. G. Parr, Phys. Rev. B 37, 785 (1988)
#'HYB_GGA_XC_HISS' : 717, # T. M. Henderson, A. F. Izmaylov, G. E. Scuseria, and A. Savin, J. Chem. Phys. 127, 221103 (2007)
'HYB_GGA_XC_HJS_B88' : 431, # T. M. Henderson, B. G. Janesko, and G. E. Scuseria, J. Chem. Phys. 128, 194105 (2008)
'HYB_GGA_XC_HJS_B97X' : 432, # T. M. Henderson, B. G. Janesko, and G. E. Scuseria, J. Chem. Phys. 128, 194105 (2008)
'HYB_GGA_XC_HJS_PBE' : 429, # T. M. Henderson, B. G. Janesko, and G. E. Scuseria, J. Chem. Phys. 128, 194105 (2008)
'HYB_GGA_XC_HJS_PBE_SOL' : 430, # T. M. Henderson, B. G. Janesko, and G. E. Scuseria, J. Chem. Phys. 128, 194105 (2008)
'HYB_GGA_XC_HPBEINT' : 472, # E. Fabiano, L. A. Constantin, and F. Della Sala, Int. J. Quantum Chem. 113, 673 (2013)
'HYB_GGA_XC_HSE03' : 427, # J. Heyd, G. E. Scuseria, and M. Ernzerhof, J. Chem. Phys. 118, 8207 (2003)
'HYB_GGA_XC_HSE06' : 428, # J. Heyd, G. E. Scuseria, and M. Ernzerhof, J. Chem. Phys. 118, 8207 (2003)
'HYB_GGA_XC_HSE12' : 479, # J. E. Moussa, P. A. Schultz, and J. R. Chelikowsky, J. Chem. Phys. 136, 204117 (2012)
'HYB_GGA_XC_HSE12S' : 480, # J. E. Moussa, P. A. Schultz, and J. R. Chelikowsky, J. Chem. Phys. 136, 204117 (2012)
'HYB_GGA_XC_HSE_SOL' : 481, # L. Schimka, J. Harl, and G. Kresse, J. Chem. Phys. 134, 024116 (2011)
'HYB_GGA_XC_KMLYP' : 485, # J. K. Kang and C. B. Musgrave, J. Chem. Phys. 115, 11040 (2001)
'HYB_GGA_XC_LC_BLYP' : 400, # L. N. Anderson, M. B. Oviedo, and B. M. Wong, J. Chem. Theory Comput. 13, 1656 (2017)
'HYB_GGA_XC_LC_BOP' : 636, # J.-W. Song, T. Hirosawa, T. Tsuneda, and K. Hirao, J. Chem. Phys. 126, 154105 (2007)
'HYB_GGA_XC_LC_PBEOP' : 637, # Y. Tawada, T. Tsuneda, S. Yanagisawa, T. Yanai, and K. Hirao, J. Chem. Phys. 120, 8425 (2004)
'HYB_GGA_XC_LC_QTP' : 492, # R. L. A. Haiduke and R. J. Bartlett, J. Chem. Phys. 148, 184106 (2018)
'HYB_GGA_XC_LC_VV10' : 469, # O. A. Vydrov and T. Van Voorhis, J. Chem. Phys. 133, 244103 (2010)
'HYB_GGA_XC_LC_WPBE' : 478, # O. A. Vydrov and G. E. Scuseria, J. Chem. Phys. 125, 234109 (2006)
'HYB_GGA_XC_LC_WPBE08_WHS' : 488, # E. Weintraub, T. M. Henderson, and G. E. Scuseria, J. Chem. Theory Comput. 5, 754 (2009)
'HYB_GGA_XC_LC_WPBE_WHS' : 486, # E. Weintraub, T. M. Henderson, and G. E. Scuseria, J. Chem. Theory Comput. 5, 754 (2009)
'HYB_GGA_XC_LC_WPBEH_WHS' : 487, # E. Weintraub, T. M. Henderson, and G. E. Scuseria, J. Chem. Theory Comput. 5, 754 (2009)
'HYB_GGA_XC_LC_WPBESOL_WHS' : 489, # E. Weintraub, T. M. Henderson, and G. E. Scuseria, J. Chem. Theory Comput. 5, 754 (2009)
'HYB_GGA_XC_LCY_BLYP' : 468, # Y. Akinaga and S. Ten-no, Chem. Phys. Lett. 462, 348 (2008)
'HYB_GGA_XC_LCY_PBE' : 467, # M. Seth and T. Ziegler, J. Chem. Theory Comput. 8, 901 (2012)
'HYB_GGA_XC_LRC_WPBE' : 473, # M. A. Rohrdanz, K. M. Martins, and J. M. Herbert, J. Chem. Phys. 130, 054112 (2009)
'HYB_GGA_XC_LRC_WPBEH' : 465, # M. A. Rohrdanz, K. M. Martins, and J. M. Herbert, J. Chem. Phys. 130, 054112 (2009)
'HYB_GGA_XC_MB3LYP_RC04' : 437, # V. Tognetti, P. Cortona, and C. Adamo, Chem. Phys. Lett. 439, 381 (2007)
'HYB_GGA_XC_MPW1K' : 405, # B. J. Lynch, P. L. Fast, M. Harris, and D. G. Truhlar, J. Phys. Chem. A 104, 4811 (2000)
'HYB_GGA_XC_MPW1LYP' : 483, # C. Adamo and V. Barone, J. Chem. Phys. 108, 664 (1998)
'HYB_GGA_XC_MPW1PBE' : 484, # C. Adamo and V. Barone, J. Chem. Phys. 108, 664 (1998)
'HYB_GGA_XC_MPW1PW' : 418, # C. Adamo and V. Barone, J. Chem. Phys. 108, 664 (1998)
'HYB_GGA_XC_MPW3LYP' : 419, # Y. Zhao and D. G. Truhlar, J. Phys. Chem. A 108, 6908 (2004)
'HYB_GGA_XC_MPW3PW' : 415, # C. Adamo and V. Barone, J. Chem. Phys. 108, 664 (1998)
'HYB_GGA_XC_MPWLYP1M' : 453, # N. E. Schultz, Y. Zhao, and D. G. Truhlar, J. Phys. Chem. A 109, 11127 (2005)
'HYB_GGA_XC_O3LYP' : 404, # A. J. Cohen and N. C. Handy, Mol. Phys. 99, 607 (2001)
'HYB_GGA_XC_PBE0_13' : 456, # P. Cortona, J. Chem. Phys. 136, 086101 (2012)
'HYB_GGA_XC_PBE50' : 290, # Y. A. Bernard, Y. Shao, and A. I. Krylov, J. Chem. Phys. 136, 204103 (2012)
'HYB_GGA_XC_PBE_MOL0' : 273, # J. M. del Campo, J. L. Gazquez, S. B. Trickey, and A. Vela, J. Chem. Phys. 136, 104108 (2012)
'HYB_GGA_XC_PBE_MOLB0' : 276, # J. M. del Campo, J. L. Gazquez, S. B. Trickey, and A. Vela, J. Chem. Phys. 136, 104108 (2012)
'HYB_GGA_XC_PBE_SOL0' : 274, # J. M. del Campo, J. L. Gazquez, S. B. Trickey, and A. Vela, J. Chem. Phys. 136, 104108 (2012)
'HYB_GGA_XC_PBEB0' : 275, # J. M. del Campo, J. L. Gazquez, S. B. Trickey, and A. Vela, J. Chem. Phys. 136, 104108 (2012)
'HYB_GGA_XC_PBEH' : 406, # C. Adamo and V. Barone, J. Chem. Phys. 110, 6158 (1999)
'HYB_GGA_XC_QTP17' : 460, # Y. Jin and R. J. Bartlett, J. Chem. Phys. 149, 064111 (2018)
'HYB_GGA_XC_RCAM_B3LYP' : 610, # A. J. Cohen, P. Mori-Sanchez, and W. Yang, J. Chem. Phys. 126, 191109 (2007)
'HYB_GGA_XC_REVB3LYP' : 454, # L. Lu, H. Hu, H. Hou, and B. Wang, Comput. Theor. Chem. 1015, 64 (2013)
'HYB_GGA_XC_SB98_1A' : 420, # H. L. Schmider and A. D. Becke, J. Chem. Phys. 108, 9624 (1998)
'HYB_GGA_XC_SB98_1B' : 421, # H. L. Schmider and A. D. Becke, J. Chem. Phys. 108, 9624 (1998)
'HYB_GGA_XC_SB98_1C' : 422, # H. L. Schmider and A. D. Becke, J. Chem. Phys. 108, 9624 (1998)
'HYB_GGA_XC_SB98_2A' : 423, # H. L. Schmider and A. D. Becke, J. Chem. Phys. 108, 9624 (1998)
'HYB_GGA_XC_SB98_2B' : 424, # H. L. Schmider and A. D. Becke, J. Chem. Phys. 108, 9624 (1998)
'HYB_GGA_XC_SB98_2C' : 425, # H. L. Schmider and A. D. Becke, J. Chem. Phys. 108, 9624 (1998)
#'HYB_GGA_XC_SRC1_BLYP' : 714, # N. A. Besley, M. J. G. Peach, and D. J. Tozer, Phys. Chem. Chem. Phys. 11, 10350 (2009)
#'HYB_GGA_XC_SRC2_BLYP' : 715, # N. A. Besley, M. J. G. Peach, and D. J. Tozer, Phys. Chem. Chem. Phys. 11, 10350 (2009)
'HYB_GGA_XC_TUNED_CAM_B3LYP' : 434, # K. Okuno, Y. Shigeta, R. Kishi, H. Miyasaka, and M. Nakano, J. Photochem. Photobiol., A 235, 29 (2012)
'HYB_GGA_XC_WB97' : 463, # J.-D. Chai and M. Head-Gordon, J. Chem. Phys. 128, 084106 (2008)
'HYB_GGA_XC_WB97X' : 464, # J.-D. Chai and M. Head-Gordon, J. Chem. Phys. 128, 084106 (2008)
'HYB_GGA_XC_WB97X_D' : 471, # J.-D. Chai and M. Head-Gordon, Phys. Chem. Chem. Phys. 10, 6615 (2008)
'HYB_GGA_XC_WB97X_D3' : 399, # Y.-S. Lin, G.-D. Li, S.-P. Mao, and J.-D. Chai, J. Chem. Theory Comput. 9, 263 (2013)
'HYB_GGA_XC_WB97X_V' : 466, # N. Mardirossian and M. Head-Gordon, Phys. Chem. Chem. Phys. 16, 9904 (2014)
'HYB_GGA_XC_WC04' : 611, # K. W. Wiitala, T. R. Hoye, and C. J. Cramer, J. Chem. Theory Comput. 2, 1085 (2006)
'HYB_GGA_XC_WHPBE0' : 615, # Y. Shao, Y. Mei, D. Sundholm, and V. R. I. Kaila, J. Chem. Theory Comput. 16, 587 (2020), https://doi.org/10.1021/acs.jctc.9b00823
'HYB_GGA_XC_WP04' : 612, # K. W. Wiitala, T. R. Hoye, and C. J. Cramer, J. Chem. Theory Comput. 2, 1085 (2006)
'HYB_GGA_XC_X3LYP' : 411, # X. Xu and W. A. Goddard, Proc. Natl. Acad. Sci. U. S. A. 101, 2673 (2004)
'MGGA_C_B88' : 571, # A. D. Becke, J. Chem. Phys. 88, 1053 (1988)
'MGGA_C_B94' : 397, # A. D. Becke, Int. J. Quantum Chem. 52, 625 (1994)
'MGGA_C_BC95' : 240, # A. D. Becke, J. Chem. Phys. 104, 1040 (1996)
'MGGA_C_CS' : 72 , # R. Colle and O. Salvetti, Theor. Chim. Acta 37, 329 (1975)
'MGGA_C_DLDF' : 37 , # K. Pernal, R. Podeszwa, K. Patkowski, and K. Szalewicz, Phys. Rev. Lett. 103, 263201 (2009)
'MGGA_C_HLTAPW' : 699, # S. Lehtola and M. A. L. Marques, Meta-local density functionals: a new rung on jacob's ladder, (2020), arXiv:2006.16835 [physics.chem-ph]
'MGGA_C_KCIS' : 562, # J. Rey and A. Savin, Int. J. Quantum Chem. 69, 581 (1998)
'MGGA_C_KCISK' : 638, # J. Rey and A. Savin, Int. J. Quantum Chem. 69, 581 (1998)
'MGGA_C_M05' : 237, # Y. Zhao, N. E. Schultz, and D. G. Truhlar, J. Chem. Phys. 123, 161103 (2005)
'MGGA_C_M05_2X' : 238, # Y. Zhao, N. E. Schultz, and D. G. Truhlar, J. Chem. Theory Comput. 2, 364 (2006)
'MGGA_C_M06' : 235, # Y. Zhao and D. G. Truhlar, Theor. Chem. Acc. 120, 215 (2008)
'MGGA_C_M06_2X' : 236, # Y. Zhao and D. G. Truhlar, Theor. Chem. Acc. 120, 215 (2008)
'MGGA_C_M06_HF' : 234, # Y. Zhao and D. G. Truhlar, J. Phys. Chem. A 110, 13126 (2006)
'MGGA_C_M06_L' : 233, # Y. Zhao and D. G. Truhlar, J. Chem. Phys. 125, 194101 (2006)
'MGGA_C_M06_SX' : 311, # Y. Wang, P. Verma, L. Zhang, Y. Li, Z. Liu, D. G. Truhlar, and X. He, Proc. Natl. Acad. Sci. U. S. A. 117, 2294 (2020), https://www.pnas.org/content/117/5/2294.full.pdf
'MGGA_C_M08_HX' : 78 , # Y. Zhao and D. G. Truhlar, J. Chem. Theory Comput. 4, 1849 (2008)
'MGGA_C_M08_SO' : 77 , # Y. Zhao and D. G. Truhlar, J. Chem. Theory Comput. 4, 1849 (2008)
'MGGA_C_M11' : 76 , # R. Peverati and D. G. Truhlar, J. Phys. Chem. Lett. 2, 2810 (2011)
'MGGA_C_M11_L' : 75 , # R. Peverati and D. G. Truhlar, J. Phys. Chem. Lett. 3, 117 (2012)
'MGGA_C_MN12_L' : 74 , # R. Peverati and D. G. Truhlar, Phys. Chem. Chem. Phys. 14, 13171 (2012)
'MGGA_C_MN12_SX' : 73 , # R. Peverati and D. G. Truhlar, Phys. Chem. Chem. Phys. 14, 16187 (2012)
'MGGA_C_MN15' : 269, # H. S. Yu, X. He, S. L. Li, and D. G. Truhlar, Chem. Sci. 7, 5032 (2016)
'MGGA_C_MN15_L' : 261, # H. S. Yu, X. He, and D. G. Truhlar, J. Chem. Theory Comput. 12, 1280 (2016)
'MGGA_C_PKZB' : 239, # J. P. Perdew, S. Kurth, A. Zupan, and P. Blaha, Phys. Rev. Lett. 82, 2544 (1999)
'MGGA_C_R2SCAN' : 498, # J. W. Furness, A. D. Kaplan, J. Ning, J. P. Perdew, and J. Sun, J. Phys. Chem. Lett. 11, 8208 (2020)
'MGGA_C_R2SCANL' : 719, # D. Mejia-Rodriguez and S. B. Trickey, Phys. Rev. B 102, 121109 (2020)
'MGGA_C_REVM06' : 306, # Y. Wang, P. Verma, X. Jin, D. G. Truhlar, and X. He, Proc. Natl. Acad. Sci. U. S. A. 115, 10257 (2018)
'MGGA_C_REVM06_L' : 294, # Y. Wang, X. Jin, H. S. Yu, D. G. Truhlar, and X. He, Proc. Natl. Acad. Sci. U. S. A. 114, 8487 (2017)
'MGGA_C_REVM11' : 172, # P. Verma, Y. Wang, S. Ghosh, X. He, and D. G. Truhlar, J. Phys. Chem. A 123, 2966 (2019)
'MGGA_C_REVSCAN' : 582, # P. D. Mezei, G. I. Csonka, and M. Kallay, J. Chem. Theory Comput. 14, 2469 (2018)
'MGGA_C_REVSCAN_VV10' : 585, # P. D. Mezei, G. I. Csonka, and M. Kallay, J. Chem. Theory Comput. 14, 2469 (2018)
'MGGA_C_REVTM' : 694, # S. Jana, K. Sharma, and P. Samal, J. Phys. Chem. A 123, 6356 (2019)
'MGGA_C_REVTPSS' : 241, # J. P. Perdew, A. Ruzsinszky, G. I. Csonka, L. A. Constantin, and J. Sun, Phys. Rev. Lett. 103, 026403 (2009)
'MGGA_C_RSCAN' : 494, # A. P. Bartok and J. R. Yates, J. Chem. Phys. 150, 161101 (2019)
'MGGA_C_SCAN' : 267, # J. Sun, A. Ruzsinszky, and J. P. Perdew, Phys. Rev. Lett. 115, 036402 (2015)
'MGGA_C_SCAN_RVV10' : 292, # H. Peng, Z.-H. Yang, J. P. Perdew, and J. Sun, Phys. Rev. X 6, 041005 (2016)
'MGGA_C_SCAN_VV10' : 584, # J. G. Brandenburg, J. E. Bates, J. Sun, and J. P. Perdew, Phys. Rev. B 94, 115144 (2016)
'MGGA_C_SCANL' : 702, # D. Mejia-Rodriguez and S. B. Trickey, Phys. Rev. A 96, 052512 (2017)
'MGGA_C_SCANL_RVV10' : 703, # D. Mejia-Rodriguez and S. B. Trickey, Phys. Rev. A 96, 052512 (2017)
'MGGA_C_SCANL_VV10' : 704, # D. Mejia-Rodriguez and S. B. Trickey, Phys. Rev. A 96, 052512 (2017)
'MGGA_C_TM' : 251, # J. Tao and Y. Mo, Phys. Rev. Lett. 117, 073001 (2016)
'MGGA_C_TPSS' : 231, # J. Tao, J. P. Perdew, V. N. Staroverov, and G. E. Scuseria, Phys. Rev. Lett. 91, 146401 (2003)
'MGGA_C_TPSSLOC' : 247, # L. A. Constantin, E. Fabiano, and F. Della Sala, Phys. Rev. B 86, 035130 (2012)
'MGGA_C_VSXC' : 232, # T. V. Voorhis and G. E. Scuseria, J. Chem. Phys. 109, 400 (1998)
'MGGA_K_CSK1' : 629, # A. C. Cancio, D. Stewart, and A. Kuna, J. Chem. Phys. 144, 084107 (2016)
'MGGA_K_CSK4' : 630, # A. C. Cancio, D. Stewart, and A. Kuna, J. Chem. Phys. 144, 084107 (2016)
'MGGA_K_CSK_LOC1' : 631, # A. C. Cancio, D. Stewart, and A. Kuna, J. Chem. Phys. 144, 084107 (2016)
'MGGA_K_CSK_LOC4' : 632, # A. C. Cancio, D. Stewart, and A. Kuna, J. Chem. Phys. 144, 084107 (2016)
'MGGA_K_GEA2' : 627, # A. S. Kompaneets and E. S. Pavlovskii, Zh. Eksp. Teor. Fiz. 31, 427 (1956), [J. Exp. Theor. Phys. 4, 328 (1957)]
'MGGA_K_GEA4' : 628, # C. H. Hodges, Can. J. Phys. 51, 1428 (1973)
'MGGA_K_L04' : 617, # S. Laricchia, L. A. Constantin, E. Fabiano, and F. Della Sala, J. Chem. Theory Comput. 10, 164 (2014)
'MGGA_K_L06' : 618, # S. Laricchia, L. A. Constantin, E. Fabiano, and F. Della Sala, J. Chem. Theory Comput. 10, 164 (2014)
'MGGA_K_PC07' : 543, # J. P. Perdew and L. A. Constantin, Phys. Rev. B 75, 155109 (2007)
'MGGA_K_PC07_OPT' : 634, # D. Mejia-Rodriguez and S. B. Trickey, Phys. Rev. A 96, 052512 (2017)
'MGGA_K_PGSL025' : 220, # L. A. Constantin, E. Fabiano, and F. Della Sala, J. Phys. Chem. Lett. 9, 4385 (2018), pMID: 30019904
'MGGA_K_RDA' : 621, # V. V. Karasiev, R. S. Jones, S. B. Trickey, and F. E. Harris, Phys. Rev. B 80, 245120 (2009)
'MGGA_X_2D_JS17' : 609, # S. Jana and P. Samal, J. Phys. Chem. A 121, 4804 (2017)
'MGGA_X_2D_PRHG07' : 210, # S. Pittalis, E. Rasanen, N. Helbig, and E. K. U. Gross, Phys. Rev. B 76, 235314 (2007)
'MGGA_X_2D_PRHG07_PRP10' : 211, # S. Pittalis, E. Rasanen, N. Helbig, and E. K. U. Gross, Phys. Rev. B 76, 235314 (2007)
'MGGA_X_B00' : 284, # A. D. Becke, J. Chem. Phys. 112, 4020 (2000)
'MGGA_X_BJ06' : 207, # A. D. Becke and E. R. Johnson, J. Chem. Phys. 124, 221101 (2006)
'MGGA_X_BLOC' : 244, # L. A. Constantin, E. Fabiano, and F. Della Sala, J. Chem. Theory Comput. 9, 2256 (2013)
'MGGA_X_BR89' : 206, # A. D. Becke and M. R. Roussel, Phys. Rev. A 39, 3761 (1989)
'MGGA_X_BR89_1' : 214, # A. D. Becke and M. R. Roussel, Phys. Rev. A 39, 3761 (1989)
'MGGA_X_BR89_EXPLICIT' : 586, # A. D. Becke and M. R. Roussel, Phys. Rev. A 39, 3761 (1989)
'MGGA_X_BR89_EXPLICIT_1' : 602, # A. D. Becke and M. R. Roussel, Phys. Rev. A 39, 3761 (1989)
'MGGA_X_EDMGGA' : 686, # J. Tao, J. Chem. Phys. 115, 3519 (2001)
'MGGA_X_GDME_0' : 689, # R. M. Koehl, G. K. Odom, and G. E. Scuseria, Mol. Phys. 87, 835 (1996)
'MGGA_X_GDME_KOS' : 690, # R. M. Koehl, G. K. Odom, and G. E. Scuseria, Mol. Phys. 87, 835 (1996)
'MGGA_X_GDME_NV' : 687, # J. W. Negele and D. Vautherin, Phys. Rev. C 5, 1472 (1972)
'MGGA_X_GDME_VT' : 691, # R. M. Koehl, G. K. Odom, and G. E. Scuseria, Mol. Phys. 87, 835 (1996)
'MGGA_X_GVT4' : 204, # T. V. Voorhis and G. E. Scuseria, J. Chem. Phys. 109, 400 (1998)
'MGGA_X_GX' : 575, # P.-F. Loos, J. Chem. Phys. 146, 114108 (2017)
'MGGA_X_HLTA' : 698, # S. Lehtola and M. A. L. Marques, Meta-local density functionals: a new rung on jacob's ladder, (2020), arXiv:2006.16835 [physics.chem-ph]
'MGGA_X_JK' : 256, # P. Jemmer and P. J. Knowles, Phys. Rev. A 51, 3571 (1995)
'MGGA_X_LTA' : 201, # M. Ernzerhof and G. E. Scuseria, J. Chem. Phys. 111, 911 (1999)
'MGGA_X_M06_L' : 203, # Y. Zhao and D. G. Truhlar, J. Chem. Phys. 125, 194101 (2006)
'MGGA_X_M11_L' : 226, # R. Peverati and D. G. Truhlar, J. Phys. Chem. Lett. 3, 117 (2012)
'MGGA_X_MBEEF' : 249, # J. Wellendorff, K. T. Lundgaard, K. W. Jacobsen, and T. Bligaard, J. Chem. Phys. 140, 144107 (2014)
'MGGA_X_MBEEFVDW' : 250, # K. T. Lundgaard, J. Wellendorff, J. Voss, K. W. Jacobsen, and T. Bligaard, Phys. Rev. B 93, 235162 (2016)
'MGGA_X_MBR' : 716, # A. Patra, S. Jana, H. Myneni, and P. Samal, Phys. Chem. Chem. Phys. 21, 19639 (2019)
'MGGA_X_MBRXC_BG' : 696, # B. Patra, S. Jana, L. A. Constantin, and P. Samal, Phys. Rev. B 100, 045147 (2019)
'MGGA_X_MBRXH_BG' : 697, # B. Patra, S. Jana, L. A. Constantin, and P. Samal, Phys. Rev. B 100, 045147 (2019)
'MGGA_X_MGGAC' : 711, # B. Patra, S. Jana, L. A. Constantin, and P. Samal, Phys. Rev. B 100, 155140 (2019)
'MGGA_X_MK00' : 230, # F. R. Manby and P. J. Knowles, J. Chem. Phys. 112, 7002 (2000)
'MGGA_X_MK00B' : 243, # F. R. Manby and P. J. Knowles, J. Chem. Phys. 112, 7002 (2000)
'MGGA_X_MN12_L' : 227, # R. Peverati and D. G. Truhlar, Phys. Chem. Chem. Phys. 14, 13171 (2012)
'MGGA_X_MN15_L' : 260, # H. S. Yu, X. He, and D. G. Truhlar, J. Chem. Theory Comput. 12, 1280 (2016)
'MGGA_X_MODTPSS' : 245, # J. P. Perdew, A. Ruzsinszky, J. Tao, G. I. Csonka, and G. E. Scuseria, Phys. Rev. A 76, 042506 (2007)
'MGGA_X_MS0' : 221, # J. Sun, B. Xiao, and A. Ruzsinszky, J. Chem. Phys. 137, 051101 (2012)
'MGGA_X_MS1' : 222, # J. Sun, R. Haunschild, B. Xiao, I. W. Bulik, G. E. Scuseria, and J. P. Perdew, J. Chem. Phys. 138, 044113 (2013)
'MGGA_X_MS2' : 223, # J. Sun, R. Haunschild, B. Xiao, I. W. Bulik, G. E. Scuseria, and J. P. Perdew, J. Chem. Phys. 138, 044113 (2013)
'MGGA_X_MS2_REV' : 228, # J. Sun, R. Haunschild, B. Xiao, I. W. Bulik, G. E. Scuseria, and J. P. Perdew, J. Chem. Phys. 138, 044113 (2013)
'MGGA_X_MS2B' : 300, # J. W. Furness and J. Sun, Phys. Rev. B 99, 041119 (2019)
'MGGA_X_MS2BS' : 301, # J. W. Furness and J. Sun, ArXiv e-prints (2018), arXiv:1805.11707v1 [physics.chem-ph]
'MGGA_X_MVS' : 257, # J. Sun, J. P. Perdew, and A. Ruzsinszky, Proc. Natl. Acad. Sci. U. S. A. 112, 685 (2015)
'MGGA_X_MVSB' : 302, # J. W. Furness and J. Sun, ArXiv e-prints (2018), arXiv:1805.11707v1 [physics.chem-ph]
'MGGA_X_MVSBS' : 303, # J. W. Furness and J. Sun, ArXiv e-prints (2018), arXiv:1805.11707v1 [physics.chem-ph]
'MGGA_X_PBE_GX' : 576, # P.-F. Loos, J. Chem. Phys. 146, 114108 (2017)
'MGGA_X_PKZB' : 213, # J. P. Perdew, S. Kurth, A. Zupan, and P. Blaha, Phys. Rev. Lett. 82, 2544 (1999)
'MGGA_X_R2SCAN' : 497, # J. W. Furness, A. D. Kaplan, J. Ning, J. P. Perdew, and J. Sun, J. Phys. Chem. Lett. 11, 8208 (2020)
'MGGA_X_R2SCANL' : 718, # D. Mejia-Rodriguez and S. B. Trickey, Phys. Rev. B 102, 121109 (2020)
'MGGA_X_REGTPSS' : 603, # A. Ruzsinszky, J. Sun, B. Xiao, and G. I. Csonka, J. Chem. Theory Comput. 8, 2078 (2012)
'MGGA_X_REVM06_L' : 293, # Y. Wang, X. Jin, H. S. Yu, D. G. Truhlar, and X. He, Proc. Natl. Acad. Sci. U. S. A. 114, 8487 (2017)
'MGGA_X_REVSCAN' : 581, # P. D. Mezei, G. I. Csonka, and M. Kallay, J. Chem. Theory Comput. 14, 2469 (2018)
'MGGA_X_REVSCANL' : 701, # D. Mejia-Rodriguez and S. B. Trickey, Phys. Rev. A 96, 052512 (2017)
'MGGA_X_REVTM' : 693, # S. Jana, K. Sharma, and P. Samal, J. Phys. Chem. A 123, 6356 (2019)
'MGGA_X_REVTPSS' : 212, # J. P. Perdew, A. Ruzsinszky, G. I. Csonka, L. A. Constantin, and J. Sun, Phys. Rev. Lett. 103, 026403 (2009)
'MGGA_X_RLDA' : 688, # X. Campi and A. Bouyssy, Phys. Lett. B 73, 263 (1978)
'MGGA_X_RPP09' : 209, # E. Rasanen, S. Pittalis, and C. R. Proetto, J. Chem. Phys. 132, 044112 (2010)
'MGGA_X_RSCAN' : 493, # A. P. Bartok and J. R. Yates, J. Chem. Phys. 150, 161101 (2019)
'MGGA_X_RTPSS' : 299, # A. J. Garza, A. T. Bell, and M. Head-Gordon, J. Chem. Theory Comput. 14, 3083 (2018)
'MGGA_X_SA_TPSS' : 542, # L. A. Constantin, E. Fabiano, J. M. Pitarke, and F. Della Sala, Phys. Rev. B 93, 115127 (2016)
'MGGA_X_SCAN' : 263, # J. Sun, A. Ruzsinszky, and J. P. Perdew, Phys. Rev. Lett. 115, 036402 (2015)
'MGGA_X_SCANL' : 700, # D. Mejia-Rodriguez and S. B. Trickey, Phys. Rev. A 96, 052512 (2017)
'MGGA_X_TASK' : 707, # T. Aschebrock and S. Kummel, Phys. Rev. Res. 1, 033082 (2019)
'MGGA_X_TAU_HCTH' : 205, # A. D. Boese and N. C. Handy, J. Chem. Phys. 116, 9559 (2002)
'MGGA_X_TB09' : 208, # F. Tran and P. Blaha, Phys. Rev. Lett. 102, 226401 (2009)
'MGGA_X_TH' : 225, # T. Tsuneda and K. Hirao, Phys. Rev. B 62, 15527 (2000)
'MGGA_X_TLDA' : 685, # F. G. Eich and M. Hellgren, J. Chem. Phys. 141, 224107 (2014)
'MGGA_X_TM' : 540, # J. Tao and Y. Mo, Phys. Rev. Lett. 117, 073001 (2016)
'MGGA_X_TPSS' : 202, # J. Tao, J. P. Perdew, V. N. Staroverov, and G. E. Scuseria, Phys. Rev. Lett. 91, 146401 (2003)
'MGGA_X_VT84' : 541, # J. M. del Campo, J. L. Gazquez, S. Trickey, and A. Vela, Chem. Phys. Lett. 543, 179 (2012)
'MGGA_XC_B97M_V' : 254, # N. Mardirossian and M. Head-Gordon, J. Chem. Phys. 142, 074111 (2015)
'MGGA_XC_CC06' : 229, # A. C. Cancio and M. Y. Chou, Phys. Rev. B 74, 081202 (2006)
'MGGA_XC_HLE17' : 288, # P. Verma and D. G. Truhlar, J. Phys. Chem. C 121, 7144 (2017)
'MGGA_XC_LP90' : 564, # C. Lee and R. G. Parr, Phys. Rev. A 42, 193 (1990)
'MGGA_XC_OTPSS_D' : 64 , # L. Goerigk and S. Grimme, J. Chem. Theory Comput. 6, 107 (2010)
'MGGA_XC_TPSSLYP1W' : 242, # E. E. Dahlke and D. G. Truhlar, J. Phys. Chem. B 109, 15677 (2005)
'MGGA_XC_ZLP' : 42 , # Q. Zhao, M. Levy, and R. G. Parr, Phys. Rev. A 47, 918 (1993)
'HYB_MGGA_X_BMK' : 279, # A. D. Boese and J. M. L. Martin, J. Chem. Phys. 121, 3405 (2004)
'HYB_MGGA_X_DLDF' : 36 , # K. Pernal, R. Podeszwa, K. Patkowski, and K. Szalewicz, Phys. Rev. Lett. 103, 263201 (2009)
'HYB_MGGA_X_JS18' : 705, # S. Jana and P. Samal, Phys. Chem. Chem. Phys. 20, 8999 (2018)
'HYB_MGGA_X_M05' : 438, # Y. Zhao, N. E. Schultz, and D. G. Truhlar, J. Chem. Phys. 123, 161103 (2005)
'HYB_MGGA_X_M05_2X' : 439, # Y. Zhao, N. E. Schultz, and D. G. Truhlar, J. Chem. Theory Comput. 2, 364 (2006)
'HYB_MGGA_X_M06' : 449, # Y. Zhao and D. G. Truhlar, Theor. Chem. Acc. 120, 215 (2008)
'HYB_MGGA_X_M06_2X' : 450, # Y. Zhao and D. G. Truhlar, Theor. Chem. Acc. 120, 215 (2008)
'HYB_MGGA_X_M06_HF' : 444, # Y. Zhao and D. G. Truhlar, J. Phys. Chem. A 110, 13126 (2006)
'HYB_MGGA_X_M06_SX' : 310, # Y. Wang, P. Verma, L. Zhang, Y. Li, Z. Liu, D. G. Truhlar, and X. He, Proc. Natl. Acad. Sci. U. S. A. 117, 2294 (2020), https://www.pnas.org/content/117/5/2294.full.pdf
'HYB_MGGA_X_M08_HX' : 295, # Y. Zhao and D. G. Truhlar, J. Chem. Theory Comput. 4, 1849 (2008)
'HYB_MGGA_X_M08_SO' : 296, # Y. Zhao and D. G. Truhlar, J. Chem. Theory Comput. 4, 1849 (2008)
'HYB_MGGA_X_M11' : 297, # R. Peverati and D. G. Truhlar, J. Phys. Chem. Lett. 2, 2810 (2011)
'HYB_MGGA_X_MN12_SX' : 248, # R. Peverati and D. G. Truhlar, Phys. Chem. Chem. Phys. 14, 16187 (2012)
'HYB_MGGA_X_MN15' : 268, # H. S. Yu, X. He, S. L. Li, and D. G. Truhlar, Chem. Sci. 7, 5032 (2016)
'HYB_MGGA_X_MS2H' : 224, # J. Sun, R. Haunschild, B. Xiao, I. W. Bulik, G. E. Scuseria, and J. P. Perdew, J. Chem. Phys. 138, 044113 (2013)
'HYB_MGGA_X_MVSH' : 474, # J. Sun, J. P. Perdew, and A. Ruzsinszky, Proc. Natl. Acad. Sci. U. S. A. 112, 685 (2015)
'HYB_MGGA_X_PJS18' : 706, # B. Patra, S. Jana, and P. Samal, Phys. Chem. Chem. Phys. 20, 8991 (2018)
'HYB_MGGA_X_REVM06' : 305, # Y. Wang, P. Verma, X. Jin, D. G. Truhlar, and X. He, Proc. Natl. Acad. Sci. U. S. A. 115, 10257 (2018)
'HYB_MGGA_X_REVM11' : 304, # P. Verma, Y. Wang, S. Ghosh, X. He, and D. G. Truhlar, J. Phys. Chem. A 123, 2966 (2019)
'HYB_MGGA_X_REVSCAN0' : 583, # P. D. Mezei, G. I. Csonka, and M. Kallay, J. Chem. Theory Comput. 14, 2469 (2018)
'HYB_MGGA_X_SCAN0' : 264, # K. Hui and J.-D. Chai, J. Chem. Phys. 144, 044114 (2016)
'HYB_MGGA_X_TAU_HCTH' : 282, # A. D. Boese and N. C. Handy, J. Chem. Phys. 116, 9559 (2002)
'HYB_MGGA_XC_B0KCIS' : 563, # J. Toulouse, A. Savin, and C. Adamo, J. Chem. Phys. 117, 10465 (2002)
'HYB_MGGA_XC_B86B95' : 441, # A. D. Becke, J. Chem. Phys. 104, 1040 (1996)
'HYB_MGGA_XC_B88B95' : 440, # A. D. Becke, J. Chem. Phys. 104, 1040 (1996)
'HYB_MGGA_XC_B94_HYB' : 398, # A. D. Becke, Int. J. Quantum Chem. 52, 625 (1994)
'HYB_MGGA_XC_B98' : 598, # A. D. Becke, J. Chem. Phys. 109, 2092 (1998)
'HYB_MGGA_XC_BB1K' : 443, # Y. Zhao, B. J. Lynch, and D. G. Truhlar, J. Phys. Chem. A 108, 2715 (2004)
'HYB_MGGA_XC_EDMGGAH' : 695, # J. Tao, J. Chem. Phys. 116, 2335 (2002)
'HYB_MGGA_XC_MPW1B95' : 445, # Y. Zhao and D. G. Truhlar, J. Phys. Chem. A 108, 6908 (2004)
'HYB_MGGA_XC_MPW1KCIS' : 566, # Y. Zhao, N. Gonzalez-Garcia, and D. G. Truhlar, J. Phys. Chem. A 109, 2012 (2005)
'HYB_MGGA_XC_MPWB1K' : 446, # Y. Zhao and D. G. Truhlar, J. Phys. Chem. A 108, 6908 (2004)
'HYB_MGGA_XC_MPWKCIS1K' : 567, # Y. Zhao, N. Gonzalez-Garcia, and D. G. Truhlar, J. Phys. Chem. A 109, 2012 (2005)
'HYB_MGGA_XC_PBE1KCIS' : 568, # Y. Zhao and D. G. Truhlar, J. Chem. Theory Comput. 1, 415 (2005)
'HYB_MGGA_XC_PW6B95' : 451, # Y. Zhao and D. G. Truhlar, J. Phys. Chem. A 109, 5656 (2005)
'HYB_MGGA_XC_PW86B95' : 442, # A. D. Becke, J. Chem. Phys. 104, 1040 (1996)
'HYB_MGGA_XC_PWB6K' : 452, # Y. Zhao and D. G. Truhlar, J. Phys. Chem. A 109, 5656 (2005)
'HYB_MGGA_XC_REVTPSSH' : 458, # G. I. Csonka, J. P. Perdew, and A. Ruzsinszky, J. Chem. Theory Comput. 6, 3688 (2010)
'HYB_MGGA_XC_TPSS0' : 396, # S. Grimme, J. Phys. Chem. A 109, 3067 (2005)
'HYB_MGGA_XC_TPSS1KCIS' : 569, # Y. Zhao, B. J. Lynch, and D. G. Truhlar, Phys. Chem. Chem. Phys. 7, 43 (2005)
'HYB_MGGA_XC_TPSSH' : 457, # V. N. Staroverov, G. E. Scuseria, J. Tao, and J. P. Perdew, J. Chem. Phys. 119, 12129 (2003)
'HYB_MGGA_XC_WB97M_V' : 531, # N. Mardirossian and M. Head-Gordon, J. Chem. Phys. 144, 214110 (2016)
'HYB_MGGA_XC_X1B95' : 447, # Y. Zhao and D. G. Truhlar, J. Phys. Chem. A 108, 6908 (2004)
'HYB_MGGA_XC_XB1K' : 448, # Y. Zhao and D. G. Truhlar, J. Phys. Chem. A 108, 6908 (2004)
}
#PROBLEMATIC_XC = dict([(XC_CODES[x], x) for x in
# ('GGA_C_SPBE', 'MGGA_X_REVTPSS')])
PROBLEMATIC_XC = dict([])
def _xc_key_without_underscore(xc_keys):
new_xc = []
for key, xc_id in xc_keys.items():
for delimeter in ('_XC_', '_X_', '_C_', '_K_'):
if delimeter in key:
key0, key1 = key.split(delimeter)
new_key1 = key1.replace('_', '').replace('-', '')
if key1 != new_key1:
new_xc.append((key0+delimeter+new_key1, xc_id))
break
return new_xc
XC_CODES.update(_xc_key_without_underscore(XC_CODES))
del(_xc_key_without_underscore)
#
# alias
#
XC_CODES.update({
'GGA_C_BCGP' : 'GGA_C_ACGGA',
'LDA' : 1 ,
'SLATER' : 1 ,
'VWN3' : 8,
'VWNRPA' : 8,
'VWN5' : 7,
'B88' : 106,
'PBE0' : 406,
'PBE1PBE' : 406,
'OPTXCORR' : '0.7344536875999693*SLATER - 0.6984752285760186*OPTX,',
'B3LYP' : 'B3LYP5', # VWN5 version
'B3LYP5' : '.2*HF + .08*SLATER + .72*B88, .81*LYP + .19*VWN',
'B3LYPG' : 402, # VWN3, used by Gaussian
'B3P86' : 'B3P865', # VWN5 version
'B3P865' : '.2*HF + .08*SLATER + .72*B88, .81*P86 + .19*VWN',
# FIXME: Check if Gaussian takes a different form for B3P86
#'B3P86G' : 403, # VWN3, used by Gaussian
'B3P86G' : '.2*HF + .08*SLATER + .72*B88, .81*P86 + .19*VWN3',
'B3PW91' : 'B3PW915',
'B3PW915' : '.2*HF + .08*SLATER + .72*B88, .81*PW91 + .19*VWN',
#'B3PW91G' : '.2*HF + .08*SLATER + .72*B88, .81*PW91 + .19*VWN3',
'B3PW91G' : 401,
#'O3LYP5' : '.1161*HF + .9262*SLATER + .8133*OPTXCORR, .81*LYP + .19*VWN5',
#'O3LYPG' : '.1161*HF + .9262*SLATER + .8133*OPTXCORR, .81*LYP + .19*VWN3',
'O3LYP' : 404, # in libxc == '.1161*HF + 0.071006917*SLATER + .8133*OPTX, .81*LYP + .19*VWN5', may be erroreous
'MPW3PW' : 'MPW3PW5', # VWN5 version
'MPW3PW5' : '.2*HF + .08*SLATER + .72*MPW91, .81*PW91 + .19*VWN',
'MPW3PWG' : 415, # VWN3, used by Gaussian
'MPW3LYP' : 'MPW3LYP5', # VWN5 version
'MPW3LYP5' : '.218*HF + .073*SLATER + .709*MPW91, .871*LYP + .129*VWN',
'MPW3LYPG' : 419, # VWN3, used by Gaussian
'REVB3LYP' : 'REVB3LYP5', # VWN5 version
'REVB3LYP5' : '.2*HF + .13*SLATER + .67*B88, .84*LYP + .16*VWN',
'REVB3LYPG' : 454, # VWN3, used by Gaussian
'X3LYP' : 'X3LYP5', # VWN5 version
'X3LYP5' : '.218*HF + .073*SLATER + .542385*B88 + .166615*PW91, .871*LYP + .129*VWN',
'X3LYPG' : 411, # VWN3, used by Gaussian
'CAMB3LYP' : 'HYB_GGA_XC_CAM_B3LYP',
'CAMYBLYP' : 'HYB_GGA_XC_CAMY_BLYP',
'CAMYB3LYP' : 'HYB_GGA_XC_CAMY_B3LYP',
'B5050LYP' : '.5*HF + .08*SLATER + .42*B88, .81*LYP + .19*VWN',
'MPW1LYP' : '.25*HF + .75*MPW91, LYP',
'MPW1PBE' : '.25*HF + .75*MPW91, PBE',
'PBE50' : '.5*HF + .5*PBE, PBE',
'REVPBE0' : '.25*HF + .75*PBE_R, PBE',
'B1B95' : 440,
'TPSS0' : '.25*HF + .75*TPSS, TPSS',
}) # noqa: E501
XC_KEYS = set(XC_CODES.keys())
# Some XC functionals have conventional name, like M06-L means M06-L for X
# functional and M06-L for C functional, PBE mean PBE-X plus PBE-C. If the
# conventional name was placed in the XC_CODES, it may lead to recursive
# reference when parsing the xc description. These names (as exceptions of
# XC_CODES) are listed in XC_ALIAS below and they should be treated as a
# shortcut for XC functional.
XC_ALIAS = {
# Conventional name : name in XC_CODES
'BLYP' : 'B88,LYP',
'BP86' : 'B88,P86',
'PW91' : 'PW91,PW91',
'PBE' : 'PBE,PBE',
'REVPBE' : 'PBE_R,PBE',
'PBESOL' : 'PBE_SOL,PBE_SOL',
'PKZB' : 'PKZB,PKZB',
'TPSS' : 'TPSS,TPSS',
'REVTPSS' : 'REVTPSS,REVTPSS',
'SCAN' : 'SCAN,SCAN',
'RSCAN' : 'RSCAN,RSCAN',
'R2SCAN' : 'R2SCAN,R2SCAN',
'SCANL' : 'SCANL,SCANL',
'R2SCANL' : 'R2SCANL,R2SCANL',
'SOGGA' : 'SOGGA,PBE',
'BLOC' : 'BLOC,TPSSLOC',
'OLYP' : 'OPTX,LYP',
'OPBE' : 'OPTX,PBE',
'RPBE' : 'RPBE,PBE',
'BPBE' : 'B88,PBE',
'MPW91' : 'MPW91,PW91',
'HFLYP' : 'HF,LYP',
'HFPW92' : 'HF,PW_MOD',
'SPW92' : 'SLATER,PW_MOD',
'SVWN' : 'SLATER,VWN',
'MS0' : 'MS0,REGTPSS',
'MS1' : 'MS1,REGTPSS',
'MS2' : 'MS2,REGTPSS',
'MS2H' : 'MS2H,REGTPSS',
'MVS' : 'MVS,REGTPSS',
'MVSH' : 'MVSH,REGTPSS',
'SOGGA11' : 'SOGGA11,SOGGA11',
'SOGGA11_X' : 'SOGGA11_X,SOGGA11_X',
'KT1' : 'KT1,VWN',
'KT2' : 'GGA_XC_KT2',
'KT3' : 'GGA_XC_KT3',
'DLDF' : 'DLDF,DLDF',
'GAM' : 'GAM,GAM',
'M06_L' : 'M06_L,M06_L',
'M06_SX' : 'M06_SX,M06_SX',
'M11_L' : 'M11_L,M11_L',
'MN12_L' : 'MN12_L,MN12_L',
'MN15_L' : 'MN15_L,MN15_L',
'N12' : 'N12,N12',
'N12_SX' : 'N12_SX,N12_SX',
'MN12_SX' : 'MN12_SX,MN12_SX',
'MN15' : 'MN15,MN15',
'MBEEF' : 'MBEEF,PBE_SOL',
'SCAN0' : 'SCAN0,SCAN',
'PBEOP' : 'PBE,OP_PBE',
'BOP' : 'B88,OP_B88',
# new in libxc-4.2.3
'REVSCAN' : 'MGGA_X_REVSCAN,MGGA_C_REVSCAN',
'REVSCAN_VV10' : 'MGGA_X_REVSCAN,MGGA_C_REVSCAN_VV10',
'SCAN_VV10' : 'MGGA_X_SCAN,MGGA_C_SCAN_VV10',
'SCAN_RVV10' : 'MGGA_X_SCAN,MGGA_C_SCAN_RVV10',
'M05' : 'HYB_MGGA_X_M05,MGGA_C_M05',
'M06' : 'HYB_MGGA_X_M06,MGGA_C_M06',
'M05_2X' : 'HYB_MGGA_X_M05_2X,MGGA_C_M05_2X',
'M06_2X' : 'HYB_MGGA_X_M06_2X,MGGA_C_M06_2X',
# extra aliases
'SOGGA11X' : 'SOGGA11_X',
'M06L' : 'M06_L',
'M11L' : 'M11_L',
'MN12L' : 'MN12_L',
'MN15L' : 'MN15_L',
'N12SX' : 'N12_SX',
'MN12SX' : 'MN12_SX',
'M052X' : 'M05_2X',
'M062X' : 'M06_2X',
} # noqa: E122
XC_ALIAS.update([(key.replace('-',''), XC_ALIAS[key])
for key in XC_ALIAS if '-' in key])
VV10_XC = set(('B97M_V', 'WB97M_V', 'WB97X_V', 'VV10', 'LC_VV10',
'REVSCAN_VV10',
'SCAN_VV10', 'SCAN_RVV10', 'SCANL_VV10', 'SCANL_RVV10'))
VV10_XC = VV10_XC.union(set([x.replace('_', '') for x in VV10_XC]))
def xc_reference(xc_code):
'''Returns the reference to the individual XC functional'''
hyb, fn_facs = parse_xc(xc_code)
refs = []
c_refs = (ctypes.c_char_p * 8)()
for xid, fac in fn_facs:
_itrf.LIBXC_xc_reference(xid, c_refs)
for ref in c_refs:
if ref:
refs.append(ref.decode("UTF-8"))
return refs
def xc_type(xc_code):
if xc_code is None:
return None
elif isinstance(xc_code, str):
if is_nlc(xc_code):
return 'NLC'
hyb, fn_facs = parse_xc(xc_code)
else:
fn_facs = [(xc_code, 1)] # mimic fn_facs
if not fn_facs:
return 'HF'
elif all(_itrf.LIBXC_is_lda(ctypes.c_int(xid)) for xid, fac in fn_facs):
return 'LDA'
elif any(_itrf.LIBXC_is_meta_gga(ctypes.c_int(xid)) for xid, fac in fn_facs):
return 'MGGA'
else:
# any(_itrf.LIBXC_is_gga(ctypes.c_int(xid)) for xid, fac in fn_facs)
# include hybrid_xc
return 'GGA'
def is_lda(xc_code):
return xc_type(xc_code) == 'LDA'
def is_hybrid_xc(xc_code):
if xc_code is None:
return False
elif isinstance(xc_code, str):
if xc_code.isdigit():
return _itrf.LIBXC_is_hybrid(ctypes.c_int(int(xc_code)))
else:
if 'HF' in xc_code:
return True
if hybrid_coeff(xc_code) != 0:
return True
if rsh_coeff(xc_code) != [0, 0, 0]:
return True
return False
elif isinstance(xc_code, int):
return _itrf.LIBXC_is_hybrid(ctypes.c_int(xc_code))
else:
return any((is_hybrid_xc(x) for x in xc_code))
def is_meta_gga(xc_code):
return xc_type(xc_code) == 'MGGA'
def is_gga(xc_code):
return xc_type(xc_code) == 'GGA'
def needs_laplacian(xc_code):
return _itrf.LIBXC_needs_laplacian(xc_code) != 0
def is_nlc(xc_code):
return '__VV10' in xc_code.upper()
def max_deriv_order(xc_code):
hyb, fn_facs = parse_xc(xc_code)
if fn_facs:
return min(_itrf.LIBXC_max_deriv_order(ctypes.c_int(xid)) for xid, fac in fn_facs)
else:
return 3
def test_deriv_order(xc_code, deriv, raise_error=False):
support = deriv <= max_deriv_order(xc_code)
if not support and raise_error:
from pyscf.dft import xcfun
msg = ('libxc library does not support derivative order %d for %s' %
(deriv, xc_code))
try:
if xcfun.test_deriv_order(xc_code, deriv, raise_error=False):
msg += ('''
This functional derivative is supported in the xcfun library.
The following code can be used to change the libxc library to xcfun library:
from pyscf.dft import xcfun
mf._numint.libxc = xcfun
''')
raise NotImplementedError(msg)
except KeyError as e:
sys.stderr.write('\n'+msg+'\n')
sys.stderr.write('%s not found in xcfun library\n\n' % xc_code)
raise e
return support
def hybrid_coeff(xc_code, spin=0):
'''Support recursively defining hybrid functional
'''
hyb, fn_facs = parse_xc(xc_code)
for xid, fac in fn_facs:
hyb[0] += fac * _itrf.LIBXC_hybrid_coeff(ctypes.c_int(xid))
return hyb[0]
def nlc_coeff(xc_code):
'''Get NLC coefficients
'''
nlc_code = None
if isinstance(xc_code, str) and '__VV10' in xc_code.upper():
xc_code, nlc_code = xc_code.upper().split('__', 1)
hyb, fn_facs = parse_xc(xc_code)
nlc_pars = [0, 0]
nlc_tmp = (ctypes.c_double*2)()
for xid, fac in fn_facs:
_itrf.LIBXC_nlc_coeff(xid, nlc_tmp)
nlc_pars[0] += nlc_tmp[0]
nlc_pars[1] += nlc_tmp[1]
if nlc_pars[0] == 0 and nlc_pars[1] == 0:
if nlc_code is not None:
# Use VV10 NLC parameters by default for the general case
_itrf.LIBXC_nlc_coeff(XC_CODES['GGA_XC_' + nlc_code], nlc_tmp)
nlc_pars[0] += nlc_tmp[0]
nlc_pars[1] += nlc_tmp[1]
else:
raise NotImplementedError(
'%s does not have NLC part. Available functionals are %s' %
(xc_code, ', '.join(VV10_XC.keys())))
return nlc_pars
def rsh_coeff(xc_code):
'''Range-separated parameter and HF exchange components: omega, alpha, beta
Exc_RSH = c_LR * LR_HFX + c_SR * SR_HFX + (1-c_SR) * Ex_SR + (1-c_LR) * Ex_LR + Ec
= alpha * HFX + beta * SR_HFX + (1-c_SR) * Ex_SR + (1-c_LR) * Ex_LR + Ec
= alpha * LR_HFX + hyb * SR_HFX + (1-c_SR) * Ex_SR + (1-c_LR) * Ex_LR + Ec
SR_HFX = < pi | e^{-omega r_{12}}/r_{12} | iq >
LR_HFX = < pi | (1-e^{-omega r_{12}})/r_{12} | iq >
alpha = c_LR
beta = c_SR - c_LR = hyb - alpha
'''
if xc_code is None:
return 0, 0, 0
check_omega = True
if isinstance(xc_code, str) and ',' in xc_code:
# Parse only X part for the RSH coefficients. This is to handle
# exceptions for C functionals such as M11.
xc_code = format_xc_code(xc_code)
xc_code = xc_code.split(',')[0] + ','
if 'SR_HF' in xc_code or 'LR_HF' in xc_code or 'RSH(' in xc_code:
check_omega = False
hyb, fn_facs = parse_xc(xc_code)
hyb, alpha, omega = hyb
beta = hyb - alpha
rsh_pars = [omega, alpha, beta]
rsh_tmp = (ctypes.c_double*3)()
_itrf.LIBXC_rsh_coeff(433, rsh_tmp)
for xid, fac in fn_facs:
_itrf.LIBXC_rsh_coeff(xid, rsh_tmp)
if rsh_pars[0] == 0:
rsh_pars[0] = rsh_tmp[0]
elif check_omega:
# Check functional is actually a CAM functional
if rsh_tmp[0] != 0 and not _itrf.LIBXC_is_cam_rsh(ctypes.c_int(xid)):
raise KeyError('Libxc functional %i employs a range separation '
'kernel that is not supported in PySCF' % xid)
# Check omega
if (rsh_tmp[0] != 0 and rsh_pars[0] != rsh_tmp[0]):
raise ValueError('Different values of omega found for RSH functionals')
rsh_pars[1] += rsh_tmp[1] * fac
rsh_pars[2] += rsh_tmp[2] * fac
return rsh_pars
def parse_xc_name(xc_name='LDA,VWN'):
'''Convert the XC functional name to libxc library internal ID.
'''
fn_facs = parse_xc(xc_name)[1]
return fn_facs[0][0], fn_facs[1][0]
def parse_xc(description):
r'''Rules to input functional description:
* The given functional description must be a one-line string.
* The functional description is case-insensitive.
* The functional description string has two parts, separated by ",". The
first part describes the exchange functional, the second is the correlation
functional.
- If "," was not in string, the entire string is considered as a
compound XC functional (including both X and C functionals, such as b3lyp).
- To input only X functional (without C functional), leave the second
part blank. E.g. description='slater,' means pure LDA functional.
- To neglect X functional (just apply C functional), leave the first
part blank. E.g. description=',vwn' means pure VWN functional.
- If compound XC functional is specified, no matter whehter it is in the
X part (the string in front of comma) or the C part (the string behind
comma), both X and C functionals of the compound XC functional will be
used.
* The functional name can be placed in arbitrary order. Two name needs to
be separated by operators "+" or "-". Blank spaces are ignored.
NOTE the parser only reads operators "+" "-" "*". / is not in support.
* A functional name can have at most one factor. If the factor is not
given, it is set to 1. Compound functional can be scaled as a unit. For
example '0.5*b3lyp' is equivalent to
'HF*0.1 + .04*LDA + .36*B88, .405*LYP + .095*VWN'
* String "HF" stands for exact exchange (HF K matrix). Putting "HF" in
correlation functional part is the same to putting "HF" in exchange
part.
* String "RSH" means range-separated operator. Its format is
RSH(omega, alpha, beta). Another way to input RSH is to use keywords
SR_HF and LR_HF: "SR_HF(0.1) * alpha_plus_beta" and "LR_HF(0.1) *
alpha" where the number in parenthesis is the value of omega.
* Be careful with the libxc convention on GGA functional, in which the LDA
contribution has been included.
Args:
xc_code : str
A string to describe the linear combination of different XC functionals.
The X and C functional are separated by comma like '.8*LDA+.2*B86,VWN'.
If "HF" was appeared in the string, it stands for the exact exchange.
rho : ndarray
Shape of ((*,N)) for electron density (and derivatives) if spin = 0;
Shape of ((*,N),(*,N)) for alpha/beta electron density (and derivatives) if spin > 0;
where N is number of grids.
rho (*,N) are ordered as (den,grad_x,grad_y,grad_z,laplacian,tau)
where grad_x = d/dx den, laplacian = \nabla^2 den, tau = 1/2(\nabla f)^2
In spin unrestricted case,
rho is ((den_u,grad_xu,grad_yu,grad_zu,laplacian_u,tau_u)
(den_d,grad_xd,grad_yd,grad_zd,laplacian_d,tau_d))
Kwargs:
spin : int
spin polarized if spin > 0
relativity : int
No effects.
verbose : int or object of :class:`Logger`
No effects.
Returns:
ex, vxc, fxc, kxc
where
* vxc = (vrho, vsigma, vlapl, vtau) for restricted case
* vxc for unrestricted case
| vrho[:,2] = (u, d)
| vsigma[:,3] = (uu, ud, dd)
| vlapl[:,2] = (u, d)
| vtau[:,2] = (u, d)
* fxc for restricted case:
(v2rho2, v2rhosigma, v2sigma2, v2lapl2, vtau2, v2rholapl, v2rhotau, v2lapltau, v2sigmalapl, v2sigmatau)
* fxc for unrestricted case:
| v2rho2[:,3] = (u_u, u_d, d_d)
| v2rhosigma[:,6] = (u_uu, u_ud, u_dd, d_uu, d_ud, d_dd)
| v2sigma2[:,6] = (uu_uu, uu_ud, uu_dd, ud_ud, ud_dd, dd_dd)
| v2lapl2[:,3]
| vtau2[:,3]
| v2rholapl[:,4]
| v2rhotau[:,4]
| v2lapltau[:,4]
| v2sigmalapl[:,6]
| v2sigmatau[:,6]
* kxc for restricted case:
v3rho3, v3rho2sigma, v3rhosigma2, v3sigma3,
v3rho2tau, v3rhosigmatau, v3rhotau2, v3sigma2tau, v3sigmatau2, v3tau3
* kxc for unrestricted case:
| v3rho3[:,4] = (u_u_u, u_u_d, u_d_d, d_d_d)
| v3rho2sigma[:,9] = (u_u_uu, u_u_ud, u_u_dd, u_d_uu, u_d_ud, u_d_dd, d_d_uu, d_d_ud, d_d_dd)
| v3rhosigma2[:,12] = (u_uu_uu, u_uu_ud, u_uu_dd, u_ud_ud, u_ud_dd, u_dd_dd, d_uu_uu, d_uu_ud, d_uu_dd, d_ud_ud, d_ud_dd, d_dd_dd)
| v3sigma3[:,10] = (uu_uu_uu, uu_uu_ud, uu_uu_dd, uu_ud_ud, uu_ud_dd, uu_dd_dd, ud_ud_ud, ud_ud_dd, ud_dd_dd, dd_dd_dd)
| v3rho2tau
| v3rhosigmatau
| v3rhotau2
| v3sigma2tau
| v3sigmatau2
| v3tau3
see also libxc_itrf.c
''' # noqa: E501
hyb = [0, 0, 0] # hybrid, alpha, omega (== SR_HF, LR_HF, omega)
if description is None:
return hyb, []
elif isinstance(description, int):
return hyb, [(description, 1.)]
elif not isinstance(description, str): #isinstance(description, (tuple,list)):
return parse_xc('%s,%s' % tuple(description))
def assign_omega(omega, hyb_or_sr, lr=0):
if hyb[2] == omega or omega == 0:
hyb[0] += hyb_or_sr
hyb[1] += lr
elif hyb[2] == 0:
hyb[0] += hyb_or_sr
hyb[1] += lr
hyb[2] = omega
else:
raise ValueError('Different values of omega found for RSH functionals')
fn_facs = []
def parse_token(token, ftype, search_xc_alias=False):
if token:
if token[0] == '-':
sign = -1
token = token[1:]
else:
sign = 1
if '*' in token:
fac, key = token.split('*')
if fac[0].isalpha():
fac, key = key, fac
fac = sign * float(fac)
else:
fac, key = sign, token
if key[:3] == 'RSH':
# RSH(alpha; beta; omega): Range-separated-hybrid functional
# See also utils.format_xc_code
alpha, beta, omega = [float(x) for x in key[4:-1].split(';')]
assign_omega(omega, fac*(alpha+beta), fac*alpha)
elif key == 'HF':
hyb[0] += fac
hyb[1] += fac # also add to LR_HF
elif 'SR_HF' in key:
if '(' in key:
omega = float(key.split('(')[1].split(')')[0])
assign_omega(omega, fac, 0)
else: # Assuming this omega the same to the existing omega
hyb[0] += fac
elif 'LR_HF' in key:
if '(' in key:
omega = float(key.split('(')[1].split(')')[0])
assign_omega(omega, 0, fac)
else:
hyb[1] += fac # == alpha
elif key.isdigit():
fn_facs.append((int(key), fac))
else:
if search_xc_alias and key in XC_ALIAS:
x_id = XC_ALIAS[key]
elif key in XC_CODES:
x_id = XC_CODES[key]
else:
possible_xc_for = fpossible_dic[ftype]
possible_xc = XC_KEYS.intersection(possible_xc_for(key))
if possible_xc:
if len(possible_xc) > 1:
sys.stderr.write('Possible xc_code %s matches %s. '
% (list(possible_xc), key))
for x_id in possible_xc: # Prefer X functional
if '_X_' in x_id:
break
else:
x_id = possible_xc.pop()
sys.stderr.write('XC parser takes %s\n' % x_id)
sys.stderr.write('You can add prefix to %s for a '
'specific functional (e.g. X_%s, '
'HYB_MGGA_X_%s)\n'
% (key, key, key))
else:
x_id = possible_xc.pop()
x_id = XC_CODES[x_id]
else:
raise KeyError('Unknown %s functional %s' % (ftype, key))
if isinstance(x_id, str):
hyb1, fn_facs1 = parse_xc(x_id)
# Recursively scale the composed functional, to support e.g. '0.5*b3lyp'
if hyb1[0] != 0 or hyb1[1] != 0:
assign_omega(hyb1[2], hyb1[0]*fac, hyb1[1]*fac)
fn_facs.extend([(xid, c*fac) for xid, c in fn_facs1])
elif x_id is None:
raise NotImplementedError('%s functional %s' % (ftype, key))
else:
fn_facs.append((x_id, fac))
def possible_x_for(key):
return set((key,
'LDA_X_'+key, 'GGA_X_'+key, 'MGGA_X_'+key,
'HYB_GGA_X_'+key, 'HYB_MGGA_X_'+key))
def possible_xc_for(key):
return set((key, 'LDA_XC_'+key, 'GGA_XC_'+key, 'MGGA_XC_'+key,
'HYB_GGA_XC_'+key, 'HYB_MGGA_XC_'+key))
def possible_k_for(key):
return set((key,
'LDA_K_'+key, 'GGA_K_'+key,))
def possible_x_k_for(key):
return possible_x_for(key).union(possible_k_for(key))
def possible_c_for(key):
return set((key,
'LDA_C_'+key, 'GGA_C_'+key, 'MGGA_C_'+key))
fpossible_dic = {'X': possible_x_for,
'C': possible_c_for,
'compound XC': possible_xc_for,
'K': possible_k_for,
'X or K': possible_x_k_for}
description = format_xc_code(description)
if '-' in description: # To handle e.g. M06-L
for key in _NAME_WITH_DASH:
if key in description:
description = description.replace(key, _NAME_WITH_DASH[key])
if ',' in description:
x_code, c_code = description.split(',')
for token in x_code.replace('-', '+-').replace(';+', ';').split('+'):
parse_token(token, 'X or K')
for token in c_code.replace('-', '+-').replace(';+', ';').split('+'):
parse_token(token, 'C')
else:
for token in description.replace('-', '+-').replace(';+', ';').split('+'):
parse_token(token, 'compound XC', search_xc_alias=True)
if hyb[2] == 0: # No omega is assigned. LR_HF is 0 for normal Coulomb operator
hyb[1] = 0
return hyb, remove_dup(fn_facs)
_NAME_WITH_DASH = {'SR-HF' : 'SR_HF',
'LR-HF' : 'LR_HF',
'OTPSS-D' : 'OTPSS_D',
'B97-1' : 'B97_1',
'B97-2' : 'B97_2',
'B97-3' : 'B97_3',
'B97-K' : 'B97_K',
'B97-D' : 'B97_D',
'HCTH-93' : 'HCTH_93',
'HCTH-120' : 'HCTH_120',
'HCTH-147' : 'HCTH_147',
'HCTH-407' : 'HCTH_407',
'WB97X-D' : 'WB97X_D',
'WB97X-V' : 'WB97X_V',
'WB97M-V' : 'WB97M_V',
'B97M-V' : 'B97M_V',
'M05-2X' : 'M05_2X',
'M06-L' : 'M06_L',
'M06-HF' : 'M06_HF',
'M06-2X' : 'M06_2X',
'M08-HX' : 'M08_HX',
'M08-SO' : 'M08_SO',
'M11-L' : 'M11_L',
'MN12-L' : 'MN12_L',
'MN15-L' : 'MN15_L',
'MN12-SX' : 'MN12_SX',
'N12-SX' : 'N12_SX',
'LRC-WPBE' : 'LRC_WPBE',
'LRC-WPBEH': 'LRC_WPBEH',
'LC-VV10' : 'LC_VV10',
'CAM-B3LYP': 'CAM_B3LYP'}
def eval_xc(xc_code, rho, spin=0, relativity=0, deriv=1, omega=None, verbose=None):
r'''Interface to call libxc library to evaluate XC functional, potential
and functional derivatives.
* The given functional xc_code must be a one-line string.
* The functional xc_code is case-insensitive.
* The functional xc_code string has two parts, separated by ",". The
first part describes the exchange functional, the second part sets the
correlation functional.
- If "," not appeared in string, the entire string is treated as the
name of a compound functional (containing both the exchange and
the correlation functional) which was declared in the functional
aliases list. The full list of functional aliases can be obtained by
calling the function pyscf.dft.xcfun.XC_ALIAS.keys() .
If the string was not found in the aliased functional list, it is
treated as X functional.
- To input only X functional (without C functional), leave the second
part blank. E.g. description='slater,' means a functional with LDA
contribution only.
- To neglect the contribution of X functional (just apply C functional),
leave blank in the first part, e.g. description=',vwn' means a
functional with VWN only.
- If compound XC functional is specified, no matter whether it is in the
X part (the string in front of comma) or the C part (the string behind
comma), both X and C functionals of the compound XC functional will be
used.
* The functional name can be placed in arbitrary order. Two names need to
be separated by operators "+" or "-". Blank spaces are ignored.
NOTE the parser only reads operators "+" "-" "*". / is not supported.
* A functional name can have at most one factor. If the factor is not
given, it is set to 1. Compound functional can be scaled as a unit. For
example '0.5*b3lyp' is equivalent to
'HF*0.1 + .04*LDA + .36*B88, .405*LYP + .095*VWN'
* String "HF" stands for exact exchange (HF K matrix). "HF" can be put in
the correlation functional part (after comma). Putting "HF" in the
correlation part is the same to putting "HF" in the exchange part.
* String "RSH" means range-separated operator. Its format is
RSH(omega, alpha, beta). Another way to input RSH is to use keywords
SR_HF and LR_HF: "SR_HF(0.1) * alpha_plus_beta" and "LR_HF(0.1) *
alpha" where the number in parenthesis is the value of omega.
* Be careful with the libxc convention of GGA functional, in which the LDA
contribution is included.
Args:
xc_code : str
A string to describe the linear combination of different XC functionals.
The X and C functional are separated by comma like '.8*LDA+.2*B86,VWN'.
If "HF" (exact exchange) is appeared in the string, the HF part will
be skipped. If an empty string "" is given, the returns exc, vxc,...
will be vectors of zeros.
rho : ndarray
Shape of ((*,N)) for electron density (and derivatives) if spin = 0;
Shape of ((*,N),(*,N)) for alpha/beta electron density (and derivatives) if spin > 0;
where N is number of grids.
rho (*,N) are ordered as (den,grad_x,grad_y,grad_z,laplacian,tau)
where grad_x = d/dx den, laplacian = \nabla^2 den, tau = 1/2(\nabla f)^2
In spin unrestricted case,
rho is ((den_u,grad_xu,grad_yu,grad_zu,laplacian_u,tau_u)
(den_d,grad_xd,grad_yd,grad_zd,laplacian_d,tau_d))
Kwargs:
spin : int
spin polarized if spin > 0
relativity : int
No effects.
verbose : int or object of :class:`Logger`
No effects.
Returns:
ex, vxc, fxc, kxc
where
* vxc = (vrho, vsigma, vlapl, vtau) for restricted case
* vxc for unrestricted case
| vrho[:,2] = (u, d)
| vsigma[:,3] = (uu, ud, dd)
| vlapl[:,2] = (u, d)
| vtau[:,2] = (u, d)
* fxc for restricted case:
(v2rho2, v2rhosigma, v2sigma2, v2lapl2, vtau2, v2rholapl, v2rhotau, v2lapltau, v2sigmalapl, v2sigmatau)
* fxc for unrestricted case:
| v2rho2[:,3] = (u_u, u_d, d_d)
| v2rhosigma[:,6] = (u_uu, u_ud, u_dd, d_uu, d_ud, d_dd)
| v2sigma2[:,6] = (uu_uu, uu_ud, uu_dd, ud_ud, ud_dd, dd_dd)
| v2lapl2[:,3]
| vtau2[:,3]
| v2rholapl[:,4]
| v2rhotau[:,4]
| v2lapltau[:,4]
| v2sigmalapl[:,6]
| v2sigmatau[:,6]
* kxc for restricted case:
(v3rho3, v3rho2sigma, v3rhosigma2, v3sigma3)
* kxc for unrestricted case:
| v3rho3[:,4] = (u_u_u, u_u_d, u_d_d, d_d_d)
| v3rho2sigma[:,9] = (u_u_uu, u_u_ud, u_u_dd, u_d_uu, u_d_ud, u_d_dd, d_d_uu, d_d_ud, d_d_dd)
| v3rhosigma2[:,12] = (u_uu_uu, u_uu_ud, u_uu_dd, u_ud_ud, u_ud_dd, u_dd_dd, d_uu_uu, d_uu_ud, d_uu_dd, d_ud_ud, d_ud_dd, d_dd_dd)
| v3sigma3[:,10] = (uu_uu_uu, uu_uu_ud, uu_uu_dd, uu_ud_ud, uu_ud_dd, uu_dd_dd, ud_ud_ud, ud_ud_dd, ud_dd_dd, dd_dd_dd)
see also libxc_itrf.c
''' # noqa: E501
hyb, fn_facs = parse_xc(xc_code)
if omega is not None:
hyb[2] = float(omega)
return _eval_xc(hyb, fn_facs, rho, spin, relativity, deriv, verbose)
def _eval_xc(hyb, fn_facs, rho, spin=0, relativity=0, deriv=1, verbose=None):
assert(deriv <= 3)
if spin == 0:
nspin = 1
rho_u = rho_d = numpy.asarray(rho, order='C')
else:
nspin = 2
rho_u = numpy.asarray(rho[0], order='C')
rho_d = numpy.asarray(rho[1], order='C')
assert(rho_u.dtype == numpy.double)
assert(rho_d.dtype == numpy.double)
if rho_u.ndim == 1:
rho_u = rho_u.reshape(1,-1)
rho_d = rho_d.reshape(1,-1)
ngrids = rho_u.shape[1]
fn_ids = [x[0] for x in fn_facs]
facs = [x[1] for x in fn_facs]
if hyb[2] != 0:
# Current implementation does not support different omegas for
# different RSH functionals if there are multiple RSHs
omega = [hyb[2]] * len(facs)
else:
omega = [0] * len(facs)
fn_ids_set = set(fn_ids)
if fn_ids_set.intersection(PROBLEMATIC_XC):
problem_xc = [PROBLEMATIC_XC[k]
for k in fn_ids_set.intersection(PROBLEMATIC_XC)]
warnings.warn('Libxc functionals %s may have discrepancy to xcfun '
'library.\n' % problem_xc)
if any([needs_laplacian(fid) for fid in fn_ids]):
raise NotImplementedError('laplacian in meta-GGA method')
n = len(fn_ids)
if (n == 0 or # xc_code = '' or xc_code = 'HF', an empty functional
all((is_lda(x) for x in fn_ids))):
if spin == 0:
nvar = 1
else:
nvar = 2
elif any((is_meta_gga(x) for x in fn_ids)):
if spin == 0:
nvar = 4
else:
nvar = 9
else: # GGA
if spin == 0:
nvar = 2
else:
nvar = 5
outlen = (math.factorial(nvar+deriv) //
(math.factorial(nvar) * math.factorial(deriv)))
outbuf = numpy.zeros((outlen,ngrids))
_itrf.LIBXC_eval_xc(ctypes.c_int(n),
(ctypes.c_int*n)(*fn_ids),
(ctypes.c_double*n)(*facs),
(ctypes.c_double*n)(*omega),
ctypes.c_int(nspin),
ctypes.c_int(deriv), ctypes.c_int(rho_u.shape[1]),
rho_u.ctypes.data_as(ctypes.c_void_p),
rho_d.ctypes.data_as(ctypes.c_void_p),
outbuf.ctypes.data_as(ctypes.c_void_p))
exc = outbuf[0]
vxc = fxc = kxc = None
if nvar == 1: # LDA
if deriv > 0:
vxc = (outbuf[1], None, None, None)
if deriv > 1:
fxc = (outbuf[2],) + (None,)*9
if deriv > 2:
kxc = (outbuf[3], None, None, None)
elif nvar == 2:
if spin == 0: # GGA
if deriv > 0:
vxc = (outbuf[1], outbuf[2], None, None)
if deriv > 1:
fxc = (outbuf[3], outbuf[4], outbuf[5],) + (None,)*7
if deriv > 2:
kxc = outbuf[6:10]
else: # LDA
if deriv > 0:
vxc = (outbuf[1:3].T, None, None, None)
if deriv > 1:
fxc = (outbuf[3:6].T,) + (None,)*9
if deriv > 2:
kxc = (outbuf[6:10].T, None, None, None)
elif nvar == 5: # GGA
if deriv > 0:
vxc = (outbuf[1:3].T, outbuf[3:6].T, None, None)
if deriv > 1:
fxc = (outbuf[6:9].T, outbuf[9:15].T, outbuf[15:21].T) + (None,)*7
if deriv > 2:
kxc = (outbuf[21:25].T, outbuf[25:34].T, outbuf[34:46].T, outbuf[46:56].T)
elif nvar == 4: # MGGA
if deriv > 0:
vxc = outbuf[1:5]
if deriv > 1:
fxc = outbuf[5:15]
if deriv > 2:
kxc = outbuf[15:19]
elif nvar == 9: # MGGA
if deriv > 0:
vxc = (outbuf[1:3].T, outbuf[3:6].T, outbuf[6:8].T, outbuf[8:10].T)
if deriv > 1:
fxc = (outbuf[10:13].T, outbuf[13:19].T, outbuf[19:25].T,
outbuf[25:28].T, outbuf[28:31].T, outbuf[31:35].T,
outbuf[35:39].T, outbuf[39:43].T, outbuf[43:49].T,
outbuf[49:55].T)
return exc, vxc, fxc, kxc
def define_xc_(ni, description, xctype='LDA', hyb=0, rsh=(0,0,0)):
'''Define XC functional. See also :func:`eval_xc` for the rules of input description.
Args:
ni : an instance of :class:`NumInt`
description : str
A string to describe the linear combination of different XC functionals.
The X and C functional are separated by comma like '.8*LDA+.2*B86,VWN'.
If "HF" was appeared in the string, it stands for the exact exchange.
Kwargs:
xctype : str
'LDA' or 'GGA' or 'MGGA'
hyb : float
hybrid functional coefficient
rsh : a list of three floats
coefficients (omega, alpha, beta) for range-separated hybrid functional.
omega is the exponent factor in attenuated Coulomb operator e^{-omega r_{12}}/r_{12}
alpha is the coefficient for long-range part, hybrid coefficient
can be obtained by alpha + beta
Examples:
>>> mol = gto.M(atom='O 0 0 0; H 0 0 1; H 0 1 0', basis='ccpvdz')
>>> mf = dft.RKS(mol)
>>> define_xc_(mf._numint, '.2*HF + .08*LDA + .72*B88, .81*LYP + .19*VWN')
>>> mf.kernel()
-76.3783361189611
>>> define_xc_(mf._numint, 'LDA*.08 + .72*B88 + .2*HF, .81*LYP + .19*VWN')
>>> mf.kernel()
-76.3783361189611
>>> def eval_xc(xc_code, rho, *args, **kwargs):
... exc = 0.01 * rho**2
... vrho = 0.01 * 2 * rho
... vxc = (vrho, None, None, None)
... fxc = None # 2nd order functional derivative
... kxc = None # 3rd order functional derivative
... return exc, vxc, fxc, kxc
>>> define_xc_(mf._numint, eval_xc, xctype='LDA')
>>> mf.kernel()
48.8525211046668
'''
if isinstance(description, str):
ni.eval_xc = lambda xc_code, rho, *args, **kwargs: \
eval_xc(description, rho, *args, **kwargs)
ni.hybrid_coeff = lambda *args, **kwargs: hybrid_coeff(description)
ni.rsh_coeff = lambda *args: rsh_coeff(description)
ni._xc_type = lambda *args: xc_type(description)
elif callable(description):
ni.eval_xc = description
ni.hybrid_coeff = lambda *args, **kwargs: hyb
ni.rsh_coeff = lambda *args, **kwargs: rsh
ni._xc_type = lambda *args: xctype
else:
raise ValueError('Unknown description %s' % description)
return ni
def define_xc(ni, description, xctype='LDA', hyb=0, rsh=(0,0,0)):
return define_xc_(copy.copy(ni), description, xctype, hyb, rsh)
define_xc.__doc__ = define_xc_.__doc__
|
py | 1a3b9047103ae17a88b853a1131e9cabb913bc09 | #!/usr/bin/env python
# -*- encoding: utf-8 -*-
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from setuptools import setup
setup(
use_scm_version=True,
setup_requires=["setuptools_scm"],
)
|
py | 1a3b909c9ca89f37483db32a8086f399f0c0e102 | from django.db import models
from .validators import validate_extension
from account.models import Account
# Create your models here.
import os
#%%
def user_directory_path(instance, filename):
# file will be uploaded to MEDIA_ROOT/user_<id>/<filename>
return 'user_{0}/files/{1}'.format(instance.user.id, filename)
class ExcelDocument(models.Model):
uploaded_at = models.DateTimeField(auto_now_add=True)
user = models.ForeignKey(Account, editable=False, null=True, blank=True, on_delete=models.CASCADE)
upload = models.FileField(upload_to=user_directory_path, validators=[validate_extension])
def filename(self):
return os.path.basename(self.upload.name)
def retrieve_data(request):
# This query will yield you the files that are relevant to the specifc user.
data = ExcelDocument.objects.filter(user=request.user.id)
return data
def __str__(self):
return str(self.upload)
|
py | 1a3b90bd46b6c88fadde65575364cd496c78b7e7 | """
ID: fufa0001
LANG: PYTHON3
TASK: milk2
"""
fin = open('milk2.in','r')
fout = open('milk2.out','w')
count, *times = fin.readlines()
for i in range(0,int(count)):
times[i]=list(map(int, times[i].split()))
times = sorted(times, key=lambda tup: tup[0])
merged = []
for higher in times:
if not merged:
merged.append(higher)
else:
lower = merged[-1]
if higher[0] <= lower[1]:
upper_bound = max(lower[1], higher[1])
merged[-1] = (lower[0], upper_bound) # replace by merged interval
else:
merged.append(higher)
longest_milk = 0
longest_no_milk = 0
for i in range(0,len(merged)):
diff = merged[i][1] - merged[i][0]
if diff > longest_milk:
longest_milk = diff
if i != len(merged) - 1:
diff = merged[i+1][0] - merged[i][1]
if diff > longest_no_milk:
longest_no_milk = diff
fout.write(str(longest_milk) + " " + str(longest_no_milk) + "\n")
fout.close()
|
py | 1a3b90d7b8f2af24a827c5ea58fc32a12f9b6330 | # Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
import os
import socket
from os.path import join as pjoin
def get_spec_path(spec, package_name, path_replacements={}, use_bin=False):
"""Extracts the prefix path for the given spack package
path_replacements is a dictionary with string replacements for the path.
"""
if not use_bin:
path = spec[package_name].prefix
else:
path = spec[package_name].prefix.bin
path = os.path.realpath(path)
for key in path_replacements:
path = path.replace(key, path_replacements[key])
return path
class Axom(CachedCMakePackage, CudaPackage):
"""Axom provides a robust, flexible software infrastructure for the development
of multi-physics applications and computational tools."""
maintainers = ['white238']
homepage = "https://github.com/LLNL/axom"
git = "https://github.com/LLNL/axom.git"
version('main', branch='main', submodules=True)
version('develop', branch='develop', submodules=True)
version('0.5.0', tag='v0.5.0', submodules=True)
version('0.4.0', tag='v0.4.0', submodules=True)
version('0.3.3', tag='v0.3.3', submodules=True)
version('0.3.2', tag='v0.3.2', submodules=True)
version('0.3.1', tag='v0.3.1', submodules=True)
version('0.3.0', tag='v0.3.0', submodules=True)
version('0.2.9', tag='v0.2.9', submodules=True)
root_cmakelists_dir = 'src'
# -----------------------------------------------------------------------
# Variants
# -----------------------------------------------------------------------
variant('shared', default=True,
description='Enable build of shared libraries')
variant('debug', default=False,
description='Build debug instead of optimized version')
variant('examples', default=True, description='Build examples')
variant('tools', default=True, description='Build tools')
variant('cpp14', default=True, description="Build with C++14 support")
variant('fortran', default=True, description="Build with Fortran support")
variant("python", default=False, description="Build python support")
variant("mpi", default=True, description="Build MPI support")
variant('openmp', default=True, description='Turn on OpenMP support.')
variant("mfem", default=False, description="Build with mfem")
variant("hdf5", default=True, description="Build with hdf5")
variant("lua", default=True, description="Build with Lua")
variant("scr", default=False, description="Build with SCR")
variant("umpire", default=True, description="Build with umpire")
variant("raja", default=True, description="Build with raja")
varmsg = "Build development tools (such as Sphinx, Doxygen, etc...)"
variant("devtools", default=False, description=varmsg)
# -----------------------------------------------------------------------
# Dependencies
# -----------------------------------------------------------------------
# Basics
depends_on("[email protected]:", type='build')
depends_on("mpi", when="+mpi")
# Libraries
depends_on("conduit+python", when="+python")
depends_on("conduit~python", when="~python")
depends_on("conduit+hdf5", when="+hdf5")
depends_on("conduit~hdf5", when="~hdf5")
# HDF5 needs to be the same as Conduit's
depends_on("[email protected]:1.8.999~cxx~fortran", when="+hdf5")
depends_on("lua", when="+lua")
depends_on("scr", when="+scr")
depends_on("kvtree@master", when="+scr")
depends_on("dtcmp", when="+scr")
depends_on("raja~openmp", when="+raja~openmp")
depends_on("raja+openmp", when="+raja+openmp")
depends_on("raja+cuda", when="+raja+cuda")
depends_on("umpire~openmp", when="+umpire~openmp")
depends_on("umpire+openmp", when="+umpire+openmp")
depends_on("umpire+cuda", when="+umpire+cuda")
for sm_ in CudaPackage.cuda_arch_values:
depends_on('raja cuda_arch={0}'.format(sm_),
when='+raja cuda_arch={0}'.format(sm_))
depends_on('umpire cuda_arch={0}'.format(sm_),
when='+umpire cuda_arch={0}'.format(sm_))
depends_on("mfem", when="+mfem")
depends_on("mfem~mpi", when="+mfem~mpi")
depends_on("python", when="+python")
# Devtools
depends_on("cppcheck", when="+devtools")
depends_on("doxygen", when="+devtools")
depends_on("graphviz", when="+devtools")
depends_on("python", when="+devtools")
depends_on("py-sphinx", when="+devtools")
depends_on("py-shroud", when="+devtools")
depends_on("[email protected]", when="+devtools", type='build')
# Conduit's cmake config files moved and < 0.4.0 can't find it
conflicts("^[email protected]:", when="@:0.4.0")
# Sidre requires conduit_blueprint_mpi.hpp
conflicts("^conduit@:0.6.0", when="@0.5.0:")
def flag_handler(self, name, flags):
if self.spec.satisfies('%cce') and name == 'fflags':
flags.append('-ef')
if name in ('cflags', 'cxxflags', 'cppflags', 'fflags'):
return (None, None, None) # handled in the cmake cache
return (flags, None, None)
def _get_sys_type(self, spec):
sys_type = spec.architecture
# if on llnl systems, we can use the SYS_TYPE
if "SYS_TYPE" in env:
sys_type = env["SYS_TYPE"]
return sys_type
@property
def cache_name(self):
hostname = socket.gethostname()
if "SYS_TYPE" in env:
# Are we on a LLNL system then strip node number
hostname = hostname.rstrip('1234567890')
return "{0}-{1}-{2}@{3}.cmake".format(
hostname,
self._get_sys_type(self.spec),
self.spec.compiler.name,
self.spec.compiler.version
)
def initconfig_compiler_entries(self):
spec = self.spec
entries = super(Axom, self).initconfig_compiler_entries()
if "+fortran" in spec or self.compiler.fc is not None:
entries.append(cmake_cache_option("ENABLE_FORTRAN", True))
else:
entries.append(cmake_cache_option("ENABLE_FORTRAN", False))
if ((self.compiler.fc is not None)
and ("gfortran" in self.compiler.fc)
and ("clang" in self.compiler.cxx)):
libdir = pjoin(os.path.dirname(
os.path.dirname(self.compiler.cxx)), "lib")
flags = ""
for _libpath in [libdir, libdir + "64"]:
if os.path.exists(_libpath):
flags += " -Wl,-rpath,{0}".format(_libpath)
description = ("Adds a missing libstdc++ rpath")
if flags:
entries.append(cmake_cache_string("BLT_EXE_LINKER_FLAGS", flags,
description))
if "+cpp14" in spec:
entries.append(cmake_cache_string("BLT_CXX_STD", "c++14", ""))
return entries
def initconfig_hardware_entries(self):
spec = self.spec
entries = super(Axom, self).initconfig_hardware_entries()
if "+cuda" in spec:
entries.append(cmake_cache_option("ENABLE_CUDA", True))
entries.append(cmake_cache_option("CUDA_SEPARABLE_COMPILATION",
True))
entries.append(
cmake_cache_option("AXOM_ENABLE_ANNOTATIONS", True))
# CUDA_FLAGS
cudaflags = "-restrict --expt-extended-lambda "
if not spec.satisfies('cuda_arch=none'):
cuda_arch = spec.variants['cuda_arch'].value[0]
entries.append(cmake_cache_string(
"CMAKE_CUDA_ARCHITECTURES",
cuda_arch))
cudaflags += '-arch sm_${CMAKE_CUDA_ARCHITECTURES} '
else:
entries.append(
"# cuda_arch could not be determined\n\n")
if "+cpp14" in spec:
cudaflags += " -std=c++14"
else:
cudaflags += " -std=c++11"
entries.append(
cmake_cache_string("CMAKE_CUDA_FLAGS", cudaflags))
entries.append(
"# nvcc does not like gtest's 'pthreads' flag\n")
entries.append(
cmake_cache_option("gtest_disable_pthreads", True))
entries.append("#------------------{0}".format("-" * 30))
entries.append("# Hardware Specifics")
entries.append("#------------------{0}\n".format("-" * 30))
# OpenMP
entries.append(cmake_cache_option("ENABLE_OPENMP",
spec.satisfies('+openmp')))
# Enable death tests
entries.append(cmake_cache_option(
"ENABLE_GTEST_DEATH_TESTS",
not spec.satisfies('+cuda target=ppc64le:')
))
if (self.compiler.fc is not None) and ("xlf" in self.compiler.fc):
# Grab lib directory for the current fortran compiler
libdir = pjoin(os.path.dirname(
os.path.dirname(self.compiler.fc)),
"lib")
description = ("Adds a missing rpath for libraries "
"associated with the fortran compiler")
linker_flags = "${BLT_EXE_LINKER_FLAGS} -Wl,-rpath," + libdir
entries.append(cmake_cache_string("BLT_EXE_LINKER_FLAGS",
linker_flags, description))
if "+shared" in spec:
linker_flags = "${CMAKE_SHARED_LINKER_FLAGS} -Wl,-rpath," \
+ libdir
entries.append(cmake_cache_string(
"CMAKE_SHARED_LINKER_FLAGS",
linker_flags, description))
description = ("Converts C-style comments to Fortran style "
"in preprocessed files")
entries.append(cmake_cache_string(
"BLT_FORTRAN_FLAGS",
"-WF,-C! -qxlf2003=polymorphic",
description))
if spec.satisfies('target=ppc64le:'):
# Fix for working around CMake adding implicit link directories
# returned by the BlueOS compilers to link executables with
# non-system default stdlib
_gcc_prefix = "/usr/tce/packages/gcc/gcc-4.9.3/lib64"
if os.path.exists(_gcc_prefix):
_gcc_prefix2 = pjoin(
_gcc_prefix,
"gcc/powerpc64le-unknown-linux-gnu/4.9.3")
_link_dirs = "{0};{1}".format(_gcc_prefix, _gcc_prefix2)
entries.append(cmake_cache_string(
"BLT_CMAKE_IMPLICIT_LINK_DIRECTORIES_EXCLUDE",
_link_dirs))
return entries
def initconfig_mpi_entries(self):
spec = self.spec
entries = super(Axom, self).initconfig_mpi_entries()
if "+mpi" in spec:
entries.append(cmake_cache_option("ENABLE_MPI", True))
if spec['mpi'].name == 'spectrum-mpi':
entries.append(cmake_cache_string("BLT_MPI_COMMAND_APPEND",
"mpibind"))
else:
entries.append(cmake_cache_option("ENABLE_MPI", False))
return entries
def initconfig_package_entries(self):
spec = self.spec
entries = []
# TPL locations
entries.append("#------------------{0}".format("-" * 60))
entries.append("# TPLs")
entries.append("#------------------{0}\n".format("-" * 60))
# Try to find the common prefix of the TPL directory, including the
# compiler. If found, we will use this in the TPL paths
compiler_str = str(spec.compiler).replace('@', '-')
prefix_paths = prefix.split(compiler_str)
path_replacements = {}
if len(prefix_paths) == 2:
tpl_root = os.path.realpath(pjoin(prefix_paths[0], compiler_str))
path_replacements[tpl_root] = "${TPL_ROOT}"
entries.append("# Root directory for generated TPLs\n")
entries.append(cmake_cache_path("TPL_ROOT", tpl_root))
conduit_dir = get_spec_path(spec, "conduit", path_replacements)
entries.append(cmake_cache_path("CONDUIT_DIR", conduit_dir))
# optional tpls
for dep in ('mfem', 'hdf5', 'lua', 'raja', 'umpire'):
if '+%s' % dep in spec:
dep_dir = get_spec_path(spec, dep, path_replacements)
entries.append(cmake_cache_path('%s_DIR' % dep.upper(),
dep_dir))
else:
entries.append('# %s not built\n' % dep.upper())
if '+scr' in spec:
dep_dir = get_spec_path(spec, 'scr', path_replacements)
entries.append(cmake_cache_path('SCR_DIR', dep_dir))
# scr's dependencies
for dep in ('kvtree', 'dtcmp'):
if spec.satisfies('^{0}'.format(dep)):
dep_dir = get_spec_path(spec, dep, path_replacements)
entries.append(cmake_cache_path('%s_DIR' % dep.upper(), dep_dir))
else:
entries.append('# scr not built\n')
##################################
# Devtools
##################################
entries.append("#------------------{0}".format("-" * 60))
entries.append("# Devtools")
entries.append("#------------------{0}\n".format("-" * 60))
# Add common prefix to path replacement list
if "+devtools" in spec:
# Grab common devtools root and strip the trailing slash
path1 = os.path.realpath(spec["cppcheck"].prefix)
path2 = os.path.realpath(spec["doxygen"].prefix)
devtools_root = os.path.commonprefix([path1, path2])[:-1]
path_replacements[devtools_root] = "${DEVTOOLS_ROOT}"
entries.append(
"# Root directory for generated developer tools\n")
entries.append(cmake_cache_path("DEVTOOLS_ROOT", devtools_root))
# Only turn on clangformat support if devtools is on
clang_fmt_path = spec['llvm'].prefix.bin.join('clang-format')
entries.append(cmake_cache_path(
"CLANGFORMAT_EXECUTABLE", clang_fmt_path))
else:
entries.append("# ClangFormat disabled due to disabled devtools\n")
entries.append(cmake_cache_option("ENABLE_CLANGFORMAT", False))
if spec.satisfies('^python') or "+devtools" in spec:
python_path = os.path.realpath(spec['python'].command.path)
for key in path_replacements:
python_path = python_path.replace(key, path_replacements[key])
entries.append(cmake_cache_path("PYTHON_EXECUTABLE", python_path))
enable_docs = spec.satisfies('^doxygen') or spec.satisfies('^py-sphinx')
entries.append(cmake_cache_option("ENABLE_DOCS", enable_docs))
if spec.satisfies('^py-sphinx'):
python_bin_dir = get_spec_path(spec, "python",
path_replacements,
use_bin=True)
entries.append(cmake_cache_path("SPHINX_EXECUTABLE",
pjoin(python_bin_dir,
"sphinx-build")))
if spec.satisfies('^py-shroud'):
shroud_bin_dir = get_spec_path(spec, "py-shroud",
path_replacements, use_bin=True)
entries.append(cmake_cache_path("SHROUD_EXECUTABLE",
pjoin(shroud_bin_dir, "shroud")))
for dep in ('cppcheck', 'doxygen'):
if spec.satisfies('^%s' % dep):
dep_bin_dir = get_spec_path(spec, dep, path_replacements,
use_bin=True)
entries.append(cmake_cache_path('%s_EXECUTABLE' % dep.upper(),
pjoin(dep_bin_dir, dep)))
return entries
def cmake_args(self):
options = []
if self.run_tests is False:
options.append('-DENABLE_TESTS=OFF')
else:
options.append('-DENABLE_TESTS=ON')
options.append(self.define_from_variant(
'BUILD_SHARED_LIBS', 'shared'))
options.append(self.define_from_variant(
'AXOM_ENABLE_EXAMPLES', 'examples'))
options.append(self.define_from_variant(
'AXOM_ENABLE_TOOLS', 'tools'))
return options
def patch(self):
if self.spec.satisfies('%cce'):
filter_file('PROPERTIES LINKER_LANGUAGE CXX',
'PROPERTIES LINKER_LANGUAGE CXX \n LINK_FLAGS "-fopenmp"',
'src/axom/quest/examples/CMakeLists.txt')
|
py | 1a3b91e6ec4e91c8e85503791cd5b87adc1ce993 | # ------------------------------------
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
# ------------------------------------
from typing import TYPE_CHECKING
from uuid import uuid4
from azure.core.tracing.decorator import distributed_trace
from azure.core.tracing.decorator_async import distributed_trace_async
from .._models import KeyVaultRoleAssignment, KeyVaultRoleDefinition
from .._internal import AsyncKeyVaultClientBase
if TYPE_CHECKING:
# pylint:disable=ungrouped-imports
from typing import Any, Optional, Union
from uuid import UUID
from azure.core.async_paging import AsyncItemPaged
from .._enums import KeyVaultRoleScope
class KeyVaultAccessControlClient(AsyncKeyVaultClientBase):
"""Manages role-based access to Azure Key Vault.
:param str vault_url: URL of the vault the client will manage. This is also called the vault's "DNS Name".
:param credential: an object which can provide an access token for the vault, such as a credential from
:mod:`azure.identity`
"""
# pylint:disable=protected-access
@distributed_trace_async
async def create_role_assignment(
self, role_scope: "Union[str, KeyVaultRoleScope]", role_definition_id: str, principal_id: str, **kwargs: "Any"
) -> KeyVaultRoleAssignment:
"""Create a role assignment.
:param role_scope: scope the role assignment will apply over. :class:`KeyVaultRoleScope` defines common broad
scopes. Specify a narrower scope as a string.
:type role_scope: str or KeyVaultRoleScope
:param str role_definition_id: ID of the role's definition
:param str principal_id: Azure Active Directory object ID of the principal which will be assigned the role. The
principal can be a user, service principal, or security group.
:keyword role_assignment_name: a name for the role assignment. Must be a UUID.
:paramtype role_assignment_name: str or uuid.UUID
:rtype: ~azure.keyvault.administration.KeyVaultRoleAssignment
"""
role_assignment_name = kwargs.pop("role_assignment_name", None) or uuid4()
create_parameters = self._client.role_assignments.models.RoleAssignmentCreateParameters(
properties=self._client.role_assignments.models.RoleAssignmentProperties(
principal_id=principal_id, role_definition_id=str(role_definition_id)
)
)
assignment = await self._client.role_assignments.create(
vault_base_url=self._vault_url,
scope=role_scope,
role_assignment_name=str(role_assignment_name),
parameters=create_parameters,
**kwargs
)
return KeyVaultRoleAssignment._from_generated(assignment)
@distributed_trace_async
async def delete_role_assignment(
self, role_scope: "Union[str, KeyVaultRoleScope]", role_assignment_name: "Union[str, UUID]", **kwargs: "Any"
) -> KeyVaultRoleAssignment:
"""Delete a role assignment.
:param role_scope: the assignment's scope, for example "/", "/keys", or "/keys/<specific key identifier>".
:class:`KeyVaultRoleScope` defines common broad scopes. Specify a narrower scope as a string.
:type role_scope: str or KeyVaultRoleScope
:param role_assignment_name: the assignment's name.
:type role_assignment_name: str or uuid.UUID
:returns: the deleted assignment
:rtype: ~azure.keyvault.administration.KeyVaultRoleAssignment
"""
assignment = await self._client.role_assignments.delete(
vault_base_url=self._vault_url, scope=role_scope, role_assignment_name=str(role_assignment_name), **kwargs
)
return KeyVaultRoleAssignment._from_generated(assignment)
@distributed_trace_async
async def get_role_assignment(
self, role_scope: "Union[str, KeyVaultRoleScope]", role_assignment_name: "Union[str, UUID]", **kwargs: "Any"
) -> KeyVaultRoleAssignment:
"""Get a role assignment.
:param role_scope: the assignment's scope, for example "/", "/keys", or "/keys/<specific key identifier>".
:class:`KeyVaultRoleScope` defines common broad scopes. Specify a narrower scope as a string.
:type role_scope: str or KeyVaultRoleScope
:param role_assignment_name: the assignment's name.
:type role_assignment_name: str or uuid.UUID
:rtype: ~azure.keyvault.administration.KeyVaultRoleAssignment
"""
assignment = await self._client.role_assignments.get(
vault_base_url=self._vault_url, scope=role_scope, role_assignment_name=str(role_assignment_name), **kwargs
)
return KeyVaultRoleAssignment._from_generated(assignment)
@distributed_trace
def list_role_assignments(
self, role_scope: "Union[str, KeyVaultRoleScope]", **kwargs: "Any"
) -> "AsyncItemPaged[KeyVaultRoleAssignment]":
"""List all role assignments for a scope.
:param role_scope: scope of the role assignments. :class:`KeyVaultRoleScope` defines common broad
scopes. Specify a narrower scope as a string.
:type role_scope: str or KeyVaultRoleScope
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.keyvault.administration.KeyVaultRoleAssignment]
"""
return self._client.role_assignments.list_for_scope(
self._vault_url,
role_scope,
cls=lambda result: [KeyVaultRoleAssignment._from_generated(a) for a in result],
**kwargs
)
@distributed_trace_async
async def set_role_definition(
self,
role_scope: "Union[str, KeyVaultRoleScope]",
role_definition_name: "Optional[Union[str, UUID]]" = None,
**kwargs: "Any"
) -> "KeyVaultRoleDefinition":
"""Creates or updates a custom role definition.
:param role_scope: scope of the role definition. :class:`KeyVaultRoleScope` defines common broad scopes.
Specify a narrower scope as a string. Managed HSM only supports '/', or KeyVaultRoleScope.GLOBAL.
:type role_scope: str or KeyVaultRoleScope
:param role_definition_name: the unique role definition name. Unless a UUID is provided, a new role definition
will be created with a generated unique name. Providing the unique name of an existing role definition will
update that role definition.
:type role_definition_name: str or uuid.UUID
:keyword str role_name: the role's display name. If unspecified when creating or updating a role definition, the
role name will be set to an empty string.
:keyword str description: a description of the role definition. If unspecified when creating or updating a role
definition, the description will be set to an empty string.
:keyword permissions: the role definition's permissions. If unspecified when creating or updating a role
definition, the role definition will have no action permissions.
:paramtype permissions: Iterable[KeyVaultPermission]
:keyword assignable_scopes: the scopes for which the role definition can be assigned.
:paramtype assignable_scopes: Iterable[str] or Iterable[KeyVaultRoleScope]
:returns: The created or updated role definition
:rtype: ~azure.keyvault.administration.KeyVaultRoleDefinition
"""
permissions = [
self._client.role_definitions.models.Permission(
actions=p.actions,
not_actions=p.not_actions,
data_actions=p.data_actions,
not_data_actions=p.not_data_actions,
)
for p in kwargs.pop("permissions", None) or []
]
properties = self._client.role_definitions.models.RoleDefinitionProperties(
role_name=kwargs.pop("role_name", None),
description=kwargs.pop("description", None),
permissions=permissions,
assignable_scopes=kwargs.pop("assignable_scopes", None),
)
parameters = self._client.role_definitions.models.RoleDefinitionCreateParameters(properties=properties)
definition = await self._client.role_definitions.create_or_update(
vault_base_url=self._vault_url,
scope=role_scope,
role_definition_name=str(role_definition_name or uuid4()),
parameters=parameters,
**kwargs
)
return KeyVaultRoleDefinition._from_generated(definition)
@distributed_trace_async
async def get_role_definition(
self, role_scope: "Union[str, KeyVaultRoleScope]", role_definition_name: "Union[str, UUID]", **kwargs: "Any"
) -> "KeyVaultRoleDefinition":
"""Get the specified role definition.
:param role_scope: scope of the role definition. :class:`KeyVaultRoleScope` defines common broad scopes.
Specify a narrower scope as a string. Managed HSM only supports '/', or KeyVaultRoleScope.GLOBAL.
:type role_scope: str or KeyVaultRoleScope
:param role_definition_name: the role definition's name.
:type role_definition_name: str or uuid.UUID
:rtype: ~azure.keyvault.administration.KeyVaultRoleDefinition
"""
definition = await self._client.role_definitions.get(
vault_base_url=self._vault_url, scope=role_scope, role_definition_name=str(role_definition_name), **kwargs
)
return KeyVaultRoleDefinition._from_generated(definition)
@distributed_trace_async
async def delete_role_definition(
self, role_scope: "Union[str, KeyVaultRoleScope]", role_definition_name: "Union[str, UUID]", **kwargs: "Any"
) -> "KeyVaultRoleDefinition":
"""Deletes a custom role definition.
:param role_scope: scope of the role definition. :class:`KeyVaultRoleScope` defines common broad scopes.
Specify a narrower scope as a string. Managed HSM only supports '/', or KeyVaultRoleScope.GLOBAL.
:type role_scope: str or KeyVaultRoleScope
:param role_definition_name: the role definition's name.
:type role_definition_name: str or uuid.UUID
:returns: the deleted role definition
:rtype: ~azure.keyvault.administration.KeyVaultRoleDefinition
"""
definition = await self._client.role_definitions.delete(
vault_base_url=self._vault_url, scope=role_scope, role_definition_name=str(role_definition_name), **kwargs
)
return KeyVaultRoleDefinition._from_generated(definition)
@distributed_trace
def list_role_definitions(
self, role_scope: "Union[str, KeyVaultRoleScope]", **kwargs: "Any"
) -> "AsyncItemPaged[KeyVaultRoleDefinition]":
"""List all role definitions applicable at and above a scope.
:param role_scope: scope of the role definitions. :class:`KeyVaultRoleScope` defines common broad
scopes. Specify a narrower scope as a string.
:type role_scope: str or KeyVaultRoleScope
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.keyvault.administration.KeyVaultRoleDefinition]
"""
return self._client.role_definitions.list(
self._vault_url,
role_scope,
cls=lambda result: [KeyVaultRoleDefinition._from_generated(d) for d in result],
**kwargs
)
|
py | 1a3b92b052be63c6d02ef90148071b10689b58d3 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
transcrypt.py
Encrypt files using pyscrypt (https://github.com/ricmoo/pyscrypt)
Copyright (c) 2021 Rainer Schwarzbach
License: MIT, see LICENSE file
"""
import argparse
import base64
import getpass
import io
import logging
import pathlib
import sys
import pyscrypt
#
# Constants
#
MESSAGE_FORMAT = '%(levelname)-8s\u2551 %(message)s'
RETURNCODE_OK = 0
RETURNCODE_ERROR = 1
#
# Functions
#
def encrypt(arguments):
"""Encrypt the input file,
and write the result either to the output file,
or Ascii85-emcode to stdout
"""
if arguments.input_file:
source_data = arguments.input_file.read_bytes()
else:
source_data = sys.stdin.buffer.read()
#
encryption_password = getpass.getpass(
'Enter encryption password: ').encode('utf-8')
# Encrypt using the password
temp_file = io.BytesIO()
scrypt_file = pyscrypt.ScryptFile(
temp_file,
encryption_password,
1024, 1, 1)
scrypt_file.write(source_data)
scrypt_file.finalize()
if arguments.output_file:
arguments.output_file.write_bytes(temp_file.getvalue())
else:
sys.stdout.buffer.write(
base64.a85encode(temp_file.getvalue(), wrapcol=76))
sys.stdout.write('\n')
#
return True
def decrypt(arguments):
"""Decrypt the input file"""
if arguments.input_file:
source_data = arguments.input_file.read_bytes()
else:
source_data = sys.stdin.buffer.read()
#
try:
source_data = base64.a85decode(source_data)
except ValueError:
pass
#
decryption_password = getpass.getpass(
'Enter decryption password: ').encode('utf-8')
scrypt_file = pyscrypt.ScryptFile(
io.BytesIO(source_data),
password=decryption_password)
try:
decrypted_data = scrypt_file.read()
except pyscrypt.file.InvalidScryptFileFormat as error:
logging.error('Error while decrypting input: %s', error)
return False
#
if arguments.output_file:
arguments.output_file.write_bytes(decrypted_data)
else:
sys.stdout.buffer.write(decrypted_data)
#
return True
def __get_arguments():
"""Parse command line arguments"""
argument_parser = argparse.ArgumentParser(
description='Encrypt the input file to a scrypt file.'
' If the scrypt file is written to stdout, it is encoded'
' using Ascii85.')
argument_parser.set_defaults(loglevel=logging.INFO)
argument_parser.add_argument(
'-v', '--verbose',
action='store_const',
const=logging.DEBUG,
dest='loglevel',
help='Output all messages including debug level')
argument_parser.add_argument(
'-q', '--quiet',
action='store_const',
const=logging.WARNING,
dest='loglevel',
help='Limit message output to warnings and errors')
argument_parser.add_argument(
'-d', '--decrypt',
action='store_true',
help='Decrypt instead of encrypting. Accepts Ascii85 encoded input.')
argument_parser.add_argument(
'-i', '--input-file',
type=pathlib.Path,
help='The input file (default: standard input).')
argument_parser.add_argument(
'-o', '--output-file',
type=pathlib.Path,
help='The output file (default: standard output).')
return argument_parser.parse_args()
def main(arguments):
"""Main routine, calling functions from above as required.
Returns a returncode which is used as the script's exit code.
"""
logging.basicConfig(format=MESSAGE_FORMAT,
level=arguments.loglevel)
if arguments.decrypt:
success = decrypt(arguments)
else:
success = encrypt(arguments)
#
if success:
return RETURNCODE_OK
#
return RETURNCODE_ERROR
if __name__ == '__main__':
# Call main() with the provided command line arguments
# and exit with its returncode
sys.exit(main(__get_arguments()))
# vim: fileencoding=utf-8 sw=4 ts=4 sts=4 expandtab autoindent syntax=python:
|
py | 1a3b92c48854db1edf4fdc9acfb769784077781a |
# -*- coding: utf-8 -*-
##scrape all comments to each tweets
from time import sleep
import csv
import json
from urllib2 import urlopen,Request,ProxyHandler,build_opener,install_opener
import urllib2
import sys
reload(sys)
sys.setdefaultencoding('utf-8')
headers = {
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/56.0.2924.87 Safari/537.36'
}
proxy = '106.120.78.129:80'
proxy_handler = ProxyHandler({'http': proxy})
opener = build_opener(proxy_handler)
install_opener(opener)
print proxy,'ok'
def scrape_repost_page_url(tweetid):
pagenum = 1
repost_page_url = []
try:
baseurl = 'https://m.weibo.cn/api/statuses/repostTimeline?id='+tweetid+'&page='
url = baseurl+str(pagenum)
print url
req = Request(url, headers=headers)
response = urlopen(req)
jsonBytes = response.read()
jsonString = jsonBytes.decode('utf-8')
jsonObject = json.loads(jsonString)
if len(jsonObject)==2:
return None
else:
for i in xrange(1,jsonObject['max']+1):
repost_page_url.append(baseurl+str(i))
return repost_page_url
except urllib2.HTTPError as e:
print e
sleep(1)
scrape_repost_page_url(tweetid)
def scrape_repost(url):
try:
print url
req = Request(url, headers=headers)
response = urlopen(req)
jsonBytes = response.read()
jsonString = jsonBytes.decode('utf-8')
jsonObject = json.loads(jsonString)
for repost in jsonObject['data']:
time = repost['created_at']
repost_id = repost['id']
text = repost['raw_text']
like_counts = repost['like_counts']
user_id = repost['user']['id']
profile_url = repost['user']['profile_url']
screen_name = repost['user']['screen_name']
verified = repost['user']['verified']
verified_type = repost['user']['verified_type']
feature = [tweetid, repost_id, time, text, like_counts, user_id, \
profile_url, screen_name, verified, verified_type]
writer.writerow(feature)
except urllib2.HTTPError as e:
print e
sleep(1)
scrape_repost(url)
except:
pass
if __name__ = "__main__":
tweet_ids = []
file = open('D:\my_documents\competition\government\Report\event1\\tweetids_event1.txt')
for line in file:
tweet_ids.append(line.strip())
tweet_ids = tweet_ids[:1000]
global writer
file = open('D:\my_documents\competition\government\Report\event1\\repost.csv','wb')
writer = csv.writer(file)
count = 0
for tweetid in tweet_ids:
count += 1
print count
repost_url_list = scrape_repost_page_url(tweetid)
if repost_url_list!= None:
for url in repost_url_list:
scrape_repost(url)
else:
print count,'none'
|
py | 1a3b92f6688d8ff4e68cc24a7bf9993c89ba49ef | """The IPython kernel implementation"""
import asyncio
from contextlib import contextmanager
from functools import partial
import getpass
import signal
import sys
from IPython.core import release
from ipython_genutils.py3compat import builtin_mod, PY3, unicode_type, safe_unicode
from IPython.utils.tokenutil import token_at_cursor, line_at_cursor
from tornado import gen
from traitlets import Instance, Type, Any, List, Bool
from .comm import CommManager
from .kernelbase import Kernel as KernelBase
from .zmqshell import ZMQInteractiveShell
try:
from IPython.core.interactiveshell import _asyncio_runner
except ImportError:
_asyncio_runner = None
try:
from IPython.core.completer import rectify_completions as _rectify_completions, provisionalcompleter as _provisionalcompleter
_use_experimental_60_completion = True
except ImportError:
_use_experimental_60_completion = False
_EXPERIMENTAL_KEY_NAME = '_jupyter_types_experimental'
class IPythonKernel(KernelBase):
shell = Instance('IPython.core.interactiveshell.InteractiveShellABC',
allow_none=True)
shell_class = Type(ZMQInteractiveShell)
use_experimental_completions = Bool(True,
help="Set this flag to False to deactivate the use of experimental IPython completion APIs.",
).tag(config=True)
user_module = Any()
def _user_module_changed(self, name, old, new):
if self.shell is not None:
self.shell.user_module = new
user_ns = Instance(dict, args=None, allow_none=True)
def _user_ns_changed(self, name, old, new):
if self.shell is not None:
self.shell.user_ns = new
self.shell.init_user_ns()
# A reference to the Python builtin 'raw_input' function.
# (i.e., __builtin__.raw_input for Python 2.7, builtins.input for Python 3)
_sys_raw_input = Any()
_sys_eval_input = Any()
def __init__(self, **kwargs):
super(IPythonKernel, self).__init__(**kwargs)
# Initialize the InteractiveShell subclass
self.shell = self.shell_class.instance(parent=self,
profile_dir = self.profile_dir,
user_module = self.user_module,
user_ns = self.user_ns,
kernel = self,
)
self.shell.displayhook.session = self.session
self.shell.displayhook.pub_socket = self.iopub_socket
self.shell.displayhook.topic = self._topic('execute_result')
self.shell.display_pub.session = self.session
self.shell.display_pub.pub_socket = self.iopub_socket
self.comm_manager = CommManager(parent=self, kernel=self)
self.shell.configurables.append(self.comm_manager)
comm_msg_types = [ 'comm_open', 'comm_msg', 'comm_close' ]
for msg_type in comm_msg_types:
self.shell_handlers[msg_type] = getattr(self.comm_manager, msg_type)
help_links = List([
{
'text': "Python Reference",
'url': "https://docs.python.org/%i.%i" % sys.version_info[:2],
},
{
'text': "IPython Reference",
'url': "https://ipython.org/documentation.html",
},
{
'text': "NumPy Reference",
'url': "https://docs.scipy.org/doc/numpy/reference/",
},
{
'text': "SciPy Reference",
'url': "https://docs.scipy.org/doc/scipy/reference/",
},
{
'text': "Matplotlib Reference",
'url': "https://matplotlib.org/contents.html",
},
{
'text': "SymPy Reference",
'url': "http://docs.sympy.org/latest/index.html",
},
{
'text': "pandas Reference",
'url': "https://pandas.pydata.org/pandas-docs/stable/",
},
]).tag(config=True)
# Kernel info fields
implementation = 'ipython'
implementation_version = release.version
language_info = {
'name': 'python',
'version': sys.version.split()[0],
'mimetype': 'text/x-python',
'codemirror_mode': {
'name': 'ipython',
'version': sys.version_info[0]
},
'pygments_lexer': 'ipython%d' % (3 if PY3 else 2),
'nbconvert_exporter': 'python',
'file_extension': '.py'
}
@property
def banner(self):
return self.shell.banner
def start(self):
self.shell.exit_now = False
super(IPythonKernel, self).start()
def set_parent(self, ident, parent):
"""Overridden from parent to tell the display hook and output streams
about the parent message.
"""
super(IPythonKernel, self).set_parent(ident, parent)
self.shell.set_parent(parent)
def init_metadata(self, parent):
"""Initialize metadata.
Run at the beginning of each execution request.
"""
md = super(IPythonKernel, self).init_metadata(parent)
# FIXME: remove deprecated ipyparallel-specific code
# This is required for ipyparallel < 5.0
md.update({
'dependencies_met' : True,
'engine' : self.ident,
})
return md
def finish_metadata(self, parent, metadata, reply_content):
"""Finish populating metadata.
Run after completing an execution request.
"""
# FIXME: remove deprecated ipyparallel-specific code
# This is required by ipyparallel < 5.0
metadata['status'] = reply_content['status']
if reply_content['status'] == 'error' and reply_content['ename'] == 'UnmetDependency':
metadata['dependencies_met'] = False
return metadata
def _forward_input(self, allow_stdin=False):
"""Forward raw_input and getpass to the current frontend.
via input_request
"""
self._allow_stdin = allow_stdin
if PY3:
self._sys_raw_input = builtin_mod.input
builtin_mod.input = self.raw_input
else:
self._sys_raw_input = builtin_mod.raw_input
self._sys_eval_input = builtin_mod.input
builtin_mod.raw_input = self.raw_input
builtin_mod.input = lambda prompt='': eval(self.raw_input(prompt))
self._save_getpass = getpass.getpass
getpass.getpass = self.getpass
def _restore_input(self):
"""Restore raw_input, getpass"""
if PY3:
builtin_mod.input = self._sys_raw_input
else:
builtin_mod.raw_input = self._sys_raw_input
builtin_mod.input = self._sys_eval_input
getpass.getpass = self._save_getpass
@property
def execution_count(self):
return self.shell.execution_count
@execution_count.setter
def execution_count(self, value):
# Ignore the incrementing done by KernelBase, in favour of our shell's
# execution counter.
pass
@contextmanager
def _cancel_on_sigint(self, future):
"""ContextManager for capturing SIGINT and cancelling a future
SIGINT raises in the event loop when running async code,
but we want it to halt a coroutine.
Ideally, it would raise KeyboardInterrupt,
but this turns it into a CancelledError.
At least it gets a decent traceback to the user.
"""
sigint_future = asyncio.Future()
# whichever future finishes first,
# cancel the other one
def cancel_unless_done(f, _ignored):
if f.cancelled() or f.done():
return
f.cancel()
# when sigint finishes,
# abort the coroutine with CancelledError
sigint_future.add_done_callback(
partial(cancel_unless_done, future)
)
# when the main future finishes,
# stop watching for SIGINT events
future.add_done_callback(
partial(cancel_unless_done, sigint_future)
)
def handle_sigint(*args):
def set_sigint_result():
if sigint_future.cancelled() or sigint_future.done():
return
sigint_future.set_result(1)
# use add_callback for thread safety
self.io_loop.add_callback(set_sigint_result)
# set the custom sigint hander during this context
save_sigint = signal.signal(signal.SIGINT, handle_sigint)
try:
yield
finally:
# restore the previous sigint handler
signal.signal(signal.SIGINT, save_sigint)
@gen.coroutine
def do_execute(self, code, silent, store_history=True,
user_expressions=None, allow_stdin=False):
shell = self.shell # we'll need this a lot here
self._forward_input(allow_stdin)
reply_content = {}
if hasattr(shell, 'run_cell_async') and hasattr(shell, 'should_run_async'):
run_cell = shell.run_cell_async
should_run_async = shell.should_run_async
else:
should_run_async = lambda cell: False
# older IPython,
# use blocking run_cell and wrap it in coroutine
@gen.coroutine
def run_cell(*args, **kwargs):
return shell.run_cell(*args, **kwargs)
try:
# default case: runner is asyncio and asyncio is already running
# TODO: this should check every case for "are we inside the runner",
# not just asyncio
if (
_asyncio_runner
and should_run_async(code)
and shell.loop_runner is _asyncio_runner
and asyncio.get_event_loop().is_running()
):
coro = run_cell(code, store_history=store_history, silent=silent)
coro_future = asyncio.ensure_future(coro)
with self._cancel_on_sigint(coro_future):
res = yield coro_future
else:
# runner isn't already running,
# make synchronous call,
# letting shell dispatch to loop runners
res = shell.run_cell(code, store_history=store_history, silent=silent)
finally:
self._restore_input()
if res.error_before_exec is not None:
err = res.error_before_exec
else:
err = res.error_in_exec
if res.success:
reply_content[u'status'] = u'ok'
else:
reply_content[u'status'] = u'error'
reply_content.update({
u'traceback': shell._last_traceback or [],
u'ename': unicode_type(type(err).__name__),
u'evalue': safe_unicode(err),
})
# FIXME: deprecated piece for ipyparallel (remove in 5.0):
e_info = dict(engine_uuid=self.ident, engine_id=self.int_id,
method='execute')
reply_content['engine_info'] = e_info
# Return the execution counter so clients can display prompts
reply_content['execution_count'] = shell.execution_count - 1
if 'traceback' in reply_content:
self.log.info("Exception in execute request:\n%s", '\n'.join(reply_content['traceback']))
# At this point, we can tell whether the main code execution succeeded
# or not. If it did, we proceed to evaluate user_expressions
if reply_content['status'] == 'ok':
reply_content[u'user_expressions'] = \
shell.user_expressions(user_expressions or {})
else:
# If there was an error, don't even try to compute expressions
reply_content[u'user_expressions'] = {}
# Payloads should be retrieved regardless of outcome, so we can both
# recover partial output (that could have been generated early in a
# block, before an error) and always clear the payload system.
reply_content[u'payload'] = shell.payload_manager.read_payload()
# Be aggressive about clearing the payload because we don't want
# it to sit in memory until the next execute_request comes in.
shell.payload_manager.clear_payload()
return reply_content
def do_complete(self, code, cursor_pos):
if _use_experimental_60_completion and self.use_experimental_completions:
return self._experimental_do_complete(code, cursor_pos)
# FIXME: IPython completers currently assume single line,
# but completion messages give multi-line context
# For now, extract line from cell, based on cursor_pos:
if cursor_pos is None:
cursor_pos = len(code)
line, offset = line_at_cursor(code, cursor_pos)
line_cursor = cursor_pos - offset
txt, matches = self.shell.complete('', line, line_cursor)
return {'matches' : matches,
'cursor_end' : cursor_pos,
'cursor_start' : cursor_pos - len(txt),
'metadata' : {},
'status' : 'ok'}
def _experimental_do_complete(self, code, cursor_pos):
"""
Experimental completions from IPython, using Jedi.
"""
if cursor_pos is None:
cursor_pos = len(code)
with _provisionalcompleter():
raw_completions = self.shell.Completer.completions(code, cursor_pos)
completions = list(_rectify_completions(code, raw_completions))
comps = []
for comp in completions:
comps.append(dict(
start=comp.start,
end=comp.end,
text=comp.text,
type=comp.type,
))
if completions:
s = completions[0].start
e = completions[0].end
matches = [c.text for c in completions]
else:
s = cursor_pos
e = cursor_pos
matches = []
return {'matches': matches,
'cursor_end': e,
'cursor_start': s,
'metadata': {_EXPERIMENTAL_KEY_NAME: comps},
'status': 'ok'}
def do_inspect(self, code, cursor_pos, detail_level=0):
name = token_at_cursor(code, cursor_pos)
reply_content = {'status' : 'ok'}
reply_content['data'] = {}
reply_content['metadata'] = {}
try:
reply_content['data'].update(
self.shell.object_inspect_mime(
name,
detail_level=detail_level
)
)
if not self.shell.enable_html_pager:
reply_content['data'].pop('text/html')
reply_content['found'] = True
except KeyError:
reply_content['found'] = False
return reply_content
def do_history(self, hist_access_type, output, raw, session=0, start=0,
stop=None, n=None, pattern=None, unique=False):
if hist_access_type == 'tail':
hist = self.shell.history_manager.get_tail(n, raw=raw, output=output,
include_latest=True)
elif hist_access_type == 'range':
hist = self.shell.history_manager.get_range(session, start, stop,
raw=raw, output=output)
elif hist_access_type == 'search':
hist = self.shell.history_manager.search(
pattern, raw=raw, output=output, n=n, unique=unique)
else:
hist = []
return {
'status': 'ok',
'history' : list(hist),
}
def do_shutdown(self, restart):
self.shell.exit_now = True
return dict(status='ok', restart=restart)
def do_is_complete(self, code):
status, indent_spaces = self.shell.input_splitter.check_complete(code)
r = {'status': status}
if status == 'incomplete':
r['indent'] = ' ' * indent_spaces
return r
def do_apply(self, content, bufs, msg_id, reply_metadata):
from .serialize import serialize_object, unpack_apply_message
shell = self.shell
try:
working = shell.user_ns
prefix = "_"+str(msg_id).replace("-","")+"_"
f,args,kwargs = unpack_apply_message(bufs, working, copy=False)
fname = getattr(f, '__name__', 'f')
fname = prefix+"f"
argname = prefix+"args"
kwargname = prefix+"kwargs"
resultname = prefix+"result"
ns = { fname : f, argname : args, kwargname : kwargs , resultname : None }
# print ns
working.update(ns)
code = "%s = %s(*%s,**%s)" % (resultname, fname, argname, kwargname)
try:
exec(code, shell.user_global_ns, shell.user_ns)
result = working.get(resultname)
finally:
for key in ns:
working.pop(key)
result_buf = serialize_object(result,
buffer_threshold=self.session.buffer_threshold,
item_threshold=self.session.item_threshold,
)
except BaseException as e:
# invoke IPython traceback formatting
shell.showtraceback()
reply_content = {
u'traceback': shell._last_traceback or [],
u'ename': unicode_type(type(e).__name__),
u'evalue': safe_unicode(e),
}
# FIXME: deprecated piece for ipyparallel (remove in 5.0):
e_info = dict(engine_uuid=self.ident, engine_id=self.int_id, method='apply')
reply_content['engine_info'] = e_info
self.send_response(self.iopub_socket, u'error', reply_content,
ident=self._topic('error'))
self.log.info("Exception in apply request:\n%s", '\n'.join(reply_content['traceback']))
result_buf = []
reply_content['status'] = 'error'
else:
reply_content = {'status' : 'ok'}
return reply_content, result_buf
def do_clear(self):
self.shell.reset(False)
return dict(status='ok')
# This exists only for backwards compatibility - use IPythonKernel instead
class Kernel(IPythonKernel):
def __init__(self, *args, **kwargs):
import warnings
warnings.warn('Kernel is a deprecated alias of ipykernel.ipkernel.IPythonKernel',
DeprecationWarning)
super(Kernel, self).__init__(*args, **kwargs)
|
py | 1a3b93809093a3efba1712bb4ddb9e86fefc4b59 | from django.shortcuts import render
# Create your views here.
# Create your views here.
from django.contrib.auth.forms import UserCreationForm
from django.urls import reverse_lazy
from django.views.generic import CreateView
class SignUpView(CreateView):
form_class = UserCreationForm
success_url = reverse_lazy('login')
template_name = 'registration/signup.html' |
py | 1a3b93b3f877bf77bd188f2a09cc09ed7c285774 | _base_ = './deeplabv3plus_r50-d8_769x769_40k_cityscapes.py'
model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101))
|
py | 1a3b957f66c541b502dd035da0434e60e09e35c6 | """
Advent of Code 2020
Day 16
"""
def get_data(fname: str) -> tuple:
"""
Read the data file.
"""
with open(fname) as f:
texts = f.read().split('\n\n')
# Get the fields and all their valid values. Not space efficient,
# but there aren't that many of them.
fields = {}
for field in texts[0].split('\n'):
name, data = field.split(': ')
for pair in data.split(' or '):
mi, ma = pair.split('-')
ranges = fields.get(name, [])
ranges.extend(i for i in range(int(mi), int(ma)+1))
fields[name] = ranges
# Get my ticket.
_, data = texts[1].split('\n')
my_ticket = [int(d) for d in data.split(',')]
# Get the other tickets.
tickets = []
for ticket in texts[2].split('\n')[1:]:
tickets.append([int(t) for t in ticket.split(',')])
return fields, tickets, my_ticket
def sort_tickets(fields, tickets) -> tuple:
"""
Get the valid and invalid tickets.
"""
valid_numbers = set()
for f in fields.values():
valid_numbers.update(f)
valids, invalids = [], []
for ticket in tickets:
invalid = []
for n in ticket:
if n in valid_numbers: continue
invalid.append(n)
if invalid:
invalids.extend(invalid)
else:
valids.append(ticket)
return valids, invalids
def part1(fname: str) -> int:
"""Part 1.
Tests
>>> part1("./data/day16_test.txt")
71
"""
_, invalids = sort_tickets(*get_data(fname)[:2])
return sum(invalids)
def part2(fname: str) -> int:
"""Part 2.
This sucks. No test for now.
"""
fields, tickets, my_ticket = get_data(fname)
valids, invalids = sort_tickets(fields, tickets)
# If a field is valid, add it to a set of hypotheses
# *iff* it hasn't bene discarded before.
# If invalid, remove it from the hypotheses *forever*
# by adding it to the set of discards.
hypotheses = {k: set() for k in fields}
discards = {k: set() for k in fields}
for valid in valids:
for i, value in enumerate(valid):
for field, values in fields.items():
if value in values:
if i not in discards[field]:
hypotheses[field].add(i)
else:
hypotheses[field].discard(i)
discards[field].add(i)
# Sort the hypotheses into order, based on how many
# possibilities are in each field. Hopefully mono-
# tonically increasing.
hypotheses = {k:v for k, v in sorted(hypotheses.items(), key=lambda x: len(x[1]))}
# Now assign the certain fields in order. Each time
# we make an assignment, add the field to a list
# so we know what to ignore for future fields.
certain = {}
assigned = []
for field, hypos in hypotheses.items():
for assign in assigned:
hypos.discard(assign)
assert len(hypos) == 1
position, = hypos # Singleton set.
certain[field] = position
assigned.append(position)
# Now make the product for our ticket.
product = 1
for field, position in certain.items():
if field.startswith('departure'):
product *= my_ticket[position]
return product
if __name__ == "__main__":
import doctest
import sys
doctest.testmod(verbose=True)
fname = "./data/day16.txt"
print(f"Part 1 count: {part1(fname)}")
print(f"Part 2 product: {part2(fname)}")
|
py | 1a3b95a81eba8e20743c590c4698254e1924ff6d | from test_all_fixers import lib3to2FixerTestCase
class Test_metaclass(lib3to2FixerTestCase):
fixer = u'metaclass'
def test_unchanged(self):
self.unchanged(u"class X(): pass")
self.unchanged(u"class X(object): pass")
self.unchanged(u"class X(object1, object2): pass")
self.unchanged(u"class X(object1, object2, object3): pass")
s = u"""
class X():
def __metaclass__(self): pass
"""
self.unchanged(s)
s = u"""
class X():
a[23] = 74
"""
self.unchanged(s)
def test_comments(self):
a = u"""
class X():
# hi
__metaclass__ = AppleMeta
pass
"""
b = u"""
class X(metaclass=AppleMeta):
# hi
pass
"""
self.check(b, a)
a = u"""
class X():
__metaclass__ = Meta
pass
# Bedtime!
"""
b = u"""
class X(metaclass=Meta):
pass
# Bedtime!
"""
self.check(b, a)
def test_meta_noparent_odd_body(self):
# no-parent class, odd body
a = u"""
class X():
__metaclass__ = Q
pass
"""
b = u"""
class X(metaclass=Q):
pass
"""
self.check(b, a)
def test_meta_oneparent_no_body(self):
# one parent class, no body
a = u"""
class X(object):
__metaclass__ = Q
pass"""
b = u"""
class X(object, metaclass=Q): pass"""
self.check(b, a)
def test_meta_oneparent_simple_body_1(self):
# one parent, simple body
a = u"""
class X(object):
__metaclass__ = Meta
bar = 7
"""
b = u"""
class X(object, metaclass=Meta):
bar = 7
"""
self.check(b, a)
def test_meta_oneparent_simple_body_2(self):
a = u"""
class X():
__metaclass__ = Meta
x = 4; g = 23
"""
b = u"""
class X(metaclass=Meta):
x = 4; g = 23
"""
self.check(b, a)
def test_meta_oneparent_simple_body_3(self):
a = u"""
class X(object):
__metaclass__ = Meta
bar = 7
"""
b = u"""
class X(object, metaclass=Meta):
bar = 7
"""
self.check(b, a)
def test_meta_multiparent_simple_body_1(self):
# multiple inheritance, simple body
a = u"""
class X(clsA, clsB):
__metaclass__ = Meta
bar = 7
"""
b = u"""
class X(clsA, clsB, metaclass=Meta):
bar = 7
"""
self.check(b, a)
def test_meta_multiparent_simple_body_2(self):
# keywords in the class statement
a = u"""
class m(a, arg=23):
__metaclass__ = Meta
pass"""
b = u"""
class m(a, arg=23, metaclass=Meta):
pass"""
self.check(b, a)
def test_meta_expression_simple_body_1(self):
a = u"""
class X(expression(2 + 4)):
__metaclass__ = Meta
pass
"""
b = u"""
class X(expression(2 + 4), metaclass=Meta):
pass
"""
self.check(b, a)
def test_meta_expression_simple_body_2(self):
a = u"""
class X(expression(2 + 4), x**4):
__metaclass__ = Meta
pass
"""
b = u"""
class X(expression(2 + 4), x**4, metaclass=Meta):
pass
"""
self.check(b, a)
def test_meta_noparent_simple_body(self):
a = u"""
class X():
__metaclass__ = Meta
save.py = 23
out = 5
"""
b = u"""
class X(metaclass=Meta):
save.py = 23
out = 5
"""
self.check(b, a)
|
py | 1a3b961ad2ff02bccd6468622155cee1f0f71796 | from django.db import models
from api.models import UuidAuditedModel
class Product(UuidAuditedModel):
name = models.CharField(max_digits=63)
resource_type = models.CharField(max_digits=63)
describe = models.TextField(blank=True, null=True)
|
py | 1a3b972c027de78ce8a7d8ae5865de1b15a6f73a | #!/usr/bin/env python
#
# // SPDX-License-Identifier: BSD-3-CLAUSE
#
# (C) Copyright 2018, Xilinx, Inc.
#
import argparse
import os.path
import math
import sys
import timeit
import json
import xdnn, xdnn_io
import numpy as np
# example for multiple executors
def main(argv):
args = xdnn_io.processCommandLine(argv)
ret, handles = xdnn.createHandle(args['xclbin'], "kernelSxdnn_0")
# ret = xdnn.createHandle(g_xclbin, "kernelSxdnn_0", g_xdnnLib)
if ret != 0:
sys.exit(1)
labels = xdnn_io.get_labels(args['labels'])
# TODO dict of tuples instead?
fpgaRT = {}
fpgaOutputs = {}
fcWeights = {}
fcBiases = {}
netFiles = {}
confNames = []
args = args['jsoncfg'] # we do not use other args' keys
for netconf_args in args:
confName = str(netconf_args['name'])
confNames += [confName]
# netconf_args['netcfg'] = './data/{}_{}.json'.format(netconf_args['net'], netconf_args['dsp'])
fpgaRT[confName] = xdnn.XDNNFPGAOp(handles, netconf_args)
netconf_args['in_shape'] = tuple((netconf_args['batch_sz'],) + tuple(fpgaRT[confName].getInputDescriptors().itervalues().next()[1:] ))
(fcWeights[confName],
fcBiases[confName]) = xdnn_io.loadFCWeightsBias(netconf_args)
fpgaOutputs[confName] = np.empty ((netconf_args['batch_sz'], int(netconf_args['fpgaoutsz']),), dtype=np.float32, order='C')
netFiles[confName] = str(netconf_args['netcfg'])
batchArrays = []
for streamId, netconf_args in enumerate(args):
batchArrays.append(np.empty(netconf_args['in_shape'], dtype=np.float32, order='C'))
pl = []
img_paths = xdnn_io.getFilePaths(netconf_args['images'])
for j, p in enumerate(img_paths[:netconf_args['batch_sz']]):
batchArrays[-1][j, ...], _ = xdnn_io.loadImageBlobFromFile(p, netconf_args['img_raw_scale'],
netconf_args['img_mean'],
netconf_args['img_input_scale'],
netconf_args['in_shape'][2],
netconf_args['in_shape'][3])
pl.append(p)
confName = str(netconf_args['name'])
firstInputName = fpgaRT[confName].getInputs().iterkeys().next()
firstOutputName = fpgaRT[confName].getOutputs().iterkeys().next()
fpgaRT[confName].exec_async({ firstInputName : batchArrays[-1] }, { firstOutputName : fpgaOutputs[confName] }, streamId)
for streamId, confName in enumerate(confNames):
fpgaRT[confName].get_result (streamId)
for netconf_args in args:
confName = str(netconf_args['name'])
fcOut = np.empty( (netconf_args['batch_sz'], netconf_args['outsz']), dtype=np.float32, order = 'C')
xdnn.computeFC (fcWeights[confName], fcBiases[confName], fpgaOutputs[confName], fcOut)
softmaxOut = xdnn.computeSoftmax(fcOut)
xdnn_io.printClassification(softmaxOut, netconf_args['images'], labels);
xdnn.closeHandle()
if __name__ == '__main__':
argv = None
'''
import os
import re
XCLBIN_PATH = os.environ['XCLBIN_PATH']
DSP_WIDTH = 56
BITWIDTH = 8
argv = "--xclbin {0}/xdnn_v2_32x{1}_{2}pe_{3}b_{4}mb_bank21.xclbin \
--labels synset_words.txt \
--jsoncfg data/multinet.json".format(XCLBIN_PATH, DSP_WIDTH, 112/DSP_WIDTH, BITWIDTH, 2+DSP_WIDTH/14)
argv = re.split(r'(?<!,)\s+', argv)
'''
main(argv)
|
py | 1a3b977d7208be50728b6ec8218f2bf3ee63d7d5 | # This file is part of the Reproducible Open Benchmarks for Data Analysis
# Platform (ROB).
#
# Copyright (C) [2019-2020] NYU.
#
# ROB is free software; you can redistribute it and/or modify it under the
# terms of the MIT License; see LICENSE file for more details.
"""Information about the current version of the ROB platform."""
__version__ = '0.2.0'
|
py | 1a3b98b685defe9fb9e520905d6966e23ac37c3f | import asyncio
from decimal import Decimal
from os.path import join
from typing import Any, List, TYPE_CHECKING
import pandas as pd
import hummingbot.client.config.global_config_map as global_config
from hummingbot.client.config.config_helpers import missing_required_configs, save_to_yml
from hummingbot.client.config.config_validators import validate_bool, validate_decimal
from hummingbot.client.config.config_var import ConfigVar
from hummingbot.client.config.security import Security
from hummingbot.client.settings import CONF_FILE_PATH, GLOBAL_CONFIG_PATH
from hummingbot.client.ui.interface_utils import format_df_for_printout
from hummingbot.client.ui.style import load_style
from hummingbot.core.utils import map_df_to_str
from hummingbot.core.utils.async_utils import safe_ensure_future
from hummingbot.model.inventory_cost import InventoryCost
from hummingbot.strategy.perpetual_market_making import PerpetualMarketMakingStrategy
from hummingbot.strategy.pure_market_making import PureMarketMakingStrategy
from hummingbot.user.user_balances import UserBalances
if TYPE_CHECKING:
from hummingbot.client.hummingbot_application import HummingbotApplication
no_restart_pmm_keys_in_percentage = ["bid_spread", "ask_spread", "order_level_spread", "inventory_target_base_pct"]
no_restart_pmm_keys = ["order_amount",
"order_levels",
"filled_order_delay",
"inventory_skew_enabled",
"inventory_range_multiplier",
"price_ceiling",
"price_floor",
"moving_price_band_enabled",
"price_ceiling_pct",
"price_floor_pct",
"price_band_refresh_time"
]
global_configs_to_display = ["autofill_import",
"kill_switch_enabled",
"kill_switch_rate",
"telegram_enabled",
"telegram_token",
"telegram_chat_id",
"send_error_logs",
global_config.PMM_SCRIPT_ENABLED_KEY,
global_config.PMM_SCRIPT_FILE_PATH_KEY,
"ethereum_chain_name",
"gateway_enabled",
"gateway_cert_passphrase",
"gateway_api_host",
"gateway_api_port",
"rate_oracle_source",
"global_token",
"global_token_symbol",
"rate_limits_share_pct",
"create_command_timeout",
"other_commands_timeout",
"tables_format"]
color_settings_to_display = ["top-pane",
"bottom-pane",
"output-pane",
"input-pane",
"logs-pane",
"terminal-primary"]
class ConfigCommand:
def config(self, # type: HummingbotApplication
key: str = None,
value: str = None):
self.app.clear_input()
if key is None:
self.list_configs()
return
else:
if key not in self.config_able_keys():
self.notify("Invalid key, please choose from the list.")
return
safe_ensure_future(self._config_single_key(key, value), loop=self.ev_loop)
def list_configs(self, # type: HummingbotApplication
):
columns = ["Key", " Value"]
data = [[cv.key, cv.value] for cv in global_config.global_config_map.values()
if cv.key in global_configs_to_display and not cv.is_secure]
df = map_df_to_str(pd.DataFrame(data=data, columns=columns))
self.notify("\nGlobal Configurations:")
lines = [" " + line for line in format_df_for_printout(df, max_col_width=50).split("\n")]
self.notify("\n".join(lines))
data = [[cv.key, cv.value] for cv in global_config.global_config_map.values()
if cv.key in color_settings_to_display and not cv.is_secure]
df = map_df_to_str(pd.DataFrame(data=data, columns=columns))
self.notify("\nColor Settings:")
lines = [" " + line for line in format_df_for_printout(df, max_col_width=50).split("\n")]
self.notify("\n".join(lines))
if self.strategy_name is not None:
data = [[cv.printable_key or cv.key, cv.value] for cv in self.strategy_config_map.values() if not cv.is_secure]
df = map_df_to_str(pd.DataFrame(data=data, columns=columns))
self.notify("\nStrategy Configurations:")
lines = [" " + line for line in format_df_for_printout(df, max_col_width=50).split("\n")]
self.notify("\n".join(lines))
def config_able_keys(self # type: HummingbotApplication
) -> List[str]:
"""
Returns a list of configurable keys - using config command, excluding exchanges api keys
as they are set from connect command.
"""
keys = [c.key for c in global_config.global_config_map.values() if c.prompt is not None and not c.is_connect_key]
if self.strategy_config_map is not None:
keys += [c.key for c in self.strategy_config_map.values() if c.prompt is not None]
return keys
async def check_password(self, # type: HummingbotApplication
):
password = await self.app.prompt(prompt="Enter your password >>> ", is_password=True)
if password != Security.password:
self.notify("Invalid password, please try again.")
return False
else:
return True
# Make this function static so unit testing can be performed.
@staticmethod
def update_running_mm(mm_strategy, key: str, new_value: Any):
if key in no_restart_pmm_keys_in_percentage:
setattr(mm_strategy, key, new_value / Decimal("100"))
return True
elif key in no_restart_pmm_keys:
setattr(mm_strategy, key, new_value)
return True
return False
async def _config_single_key(self, # type: HummingbotApplication
key: str,
input_value):
"""
Configure a single variable only.
Prompt the user to finish all configurations if there are remaining empty configs at the end.
"""
self.placeholder_mode = True
self.app.hide_input = True
try:
config_var, config_map, file_path = None, None, None
if key in global_config.global_config_map:
config_map = global_config.global_config_map
file_path = GLOBAL_CONFIG_PATH
elif self.strategy_config_map is not None and key in self.strategy_config_map:
config_map = self.strategy_config_map
file_path = join(CONF_FILE_PATH, self.strategy_file_name)
config_var = config_map[key]
if input_value is None:
self.notify("Please follow the prompt to complete configurations: ")
if config_var.key == "inventory_target_base_pct":
await self.asset_ratio_maintenance_prompt(config_map, input_value)
elif config_var.key == "inventory_price":
await self.inventory_price_prompt(config_map, input_value)
else:
await self.prompt_a_config(config_var, input_value=input_value, assign_default=False)
if self.app.to_stop_config:
self.app.to_stop_config = False
return
await self.update_all_secure_configs()
missings = missing_required_configs(config_map)
if missings:
self.notify("\nThere are other configuration required, please follow the prompt to complete them.")
missings = await self._prompt_missing_configs(config_map)
save_to_yml(file_path, config_map)
self.notify("\nNew configuration saved:")
self.notify(f"{key}: {str(config_var.value)}")
self.app.app.style = load_style()
for config in missings:
self.notify(f"{config.key}: {str(config.value)}")
if isinstance(self.strategy, PureMarketMakingStrategy) or \
isinstance(self.strategy, PerpetualMarketMakingStrategy):
updated = ConfigCommand.update_running_mm(self.strategy, key, config_var.value)
if updated:
self.notify(f"\nThe current {self.strategy_name} strategy has been updated "
f"to reflect the new configuration.")
except asyncio.TimeoutError:
self.logger().error("Prompt timeout")
except Exception as err:
self.logger().error(str(err), exc_info=True)
finally:
self.app.hide_input = False
self.placeholder_mode = False
self.app.change_prompt(prompt=">>> ")
async def _prompt_missing_configs(self, # type: HummingbotApplication
config_map):
missings = missing_required_configs(config_map)
for config in missings:
await self.prompt_a_config(config)
if self.app.to_stop_config:
self.app.to_stop_config = False
return
if missing_required_configs(config_map):
return missings + (await self._prompt_missing_configs(config_map))
return missings
async def asset_ratio_maintenance_prompt(self, # type: HummingbotApplication
config_map,
input_value = None):
if input_value:
config_map['inventory_target_base_pct'].value = Decimal(input_value)
else:
exchange = config_map['exchange'].value
market = config_map["market"].value
base, quote = market.split("-")
balances = await UserBalances.instance().balances(exchange, base, quote)
if balances is None:
return
base_ratio = await UserBalances.base_amount_ratio(exchange, market, balances)
if base_ratio is None:
return
base_ratio = round(base_ratio, 3)
quote_ratio = 1 - base_ratio
base, quote = config_map["market"].value.split("-")
cvar = ConfigVar(key="temp_config",
prompt=f"On {exchange}, you have {balances.get(base, 0):.4f} {base} and "
f"{balances.get(quote, 0):.4f} {quote}. By market value, "
f"your current inventory split is {base_ratio:.1%} {base} "
f"and {quote_ratio:.1%} {quote}."
f" Would you like to keep this ratio? (Yes/No) >>> ",
required_if=lambda: True,
type_str="bool",
validator=validate_bool)
await self.prompt_a_config(cvar)
if cvar.value:
config_map['inventory_target_base_pct'].value = round(base_ratio * Decimal('100'), 1)
else:
if self.app.to_stop_config:
self.app.to_stop_config = False
return
await self.prompt_a_config(config_map["inventory_target_base_pct"])
async def inventory_price_prompt(
self, # type: HummingbotApplication
config_map,
input_value=None,
):
key = "inventory_price"
if input_value:
config_map[key].value = Decimal(input_value)
else:
exchange = config_map["exchange"].value
market = config_map["market"].value
base_asset, quote_asset = market.split("-")
if exchange.endswith("paper_trade"):
balances = global_config.global_config_map["paper_trade_account_balance"].value
else:
balances = await UserBalances.instance().balances(
exchange, base_asset, quote_asset
)
if balances.get(base_asset) is None:
return
cvar = ConfigVar(
key="temp_config",
prompt=f"On {exchange}, you have {balances[base_asset]:.4f} {base_asset}. "
f"What was the price for this amount in {quote_asset}? >>> ",
required_if=lambda: True,
type_str="decimal",
validator=lambda v: validate_decimal(
v, min_value=Decimal("0"), inclusive=True
),
)
await self.prompt_a_config(cvar)
config_map[key].value = cvar.value
try:
quote_volume = balances[base_asset] * cvar.value
except TypeError:
# TypeError: unsupported operand type(s) for *: 'decimal.Decimal' and 'NoneType' - bad input / no input
self.notify("Inventory price not updated due to bad input")
return
with self.trade_fill_db.get_new_session() as session:
with session.begin():
InventoryCost.add_volume(
session,
base_asset=base_asset,
quote_asset=quote_asset,
base_volume=balances[base_asset],
quote_volume=quote_volume,
overwrite=True,
)
|
py | 1a3b99312f5210c2c7a3e06f6b164f7a4bb2c91e | """File to hook into gunicorn for production deployments."""
from aflux_assurance_server import app
if __name__ == '__main__':
app.run() |
py | 1a3b9990ce2c803e3efe16fe8b2563246e4f2cbc | import tensorflow as tf
def main():
converter = tf.lite.TFLiteConverter.from_frozen_graph('../pb/frozen_shape_28.pb',
['new_input_node'], ['final_dense/MatMul'])
converter.optimizations = [tf.lite.Optimize.OPTIMIZE_FOR_LATENCY]
tflite_model = converter.convert()
with open("../tflite/model.lite", "wb") as f:
f.write(tflite_model)
if __name__ == '__main__':
main()
|
py | 1a3b9bc96bb0c5d25fcfceb4bcda6fc886cf39ba | import logging
import pickle
import random
from voxpopuli import Voice, PhonemeList
from typing import Union, Dict, List
from random import randint
from distance import levenshtein
from katalixia.tools import weighted_choice
class TreeNode:
def __init__(self):
self.children = dict() # type:Dict[str,Union[TreeNode, Leaf]]
self.leaves = list() # type:List[Leaf]
self.child_leaves_count = 0
def __getitem__(self, item):
return self.children[item]
@property
def total_leaves_count(self):
return len(self.leaves) + self.child_leaves_count
def insert(self, leaf: 'Leaf', current_pho_index):
try:
leaf_current_pho = leaf.phonemes[-current_pho_index]
except IndexError: # if this leaf has "no more" phonems to unstack, it's stored on this node's leaves
self.leaves.append(leaf)
return
if leaf_current_pho not in self.children:
self.children[leaf_current_pho] = leaf
else:
current_child = self.children[leaf_current_pho]
if isinstance(current_child, Leaf): # creating the new node
new_node = TreeNode()
new_node.insert(current_child, current_pho_index + 1)
new_node.insert(leaf, current_pho_index + 1)
self.children[leaf_current_pho] = new_node
elif isinstance(current_child, TreeNode):
current_child.insert(leaf, current_pho_index + 1)
self.child_leaves_count += 1
def find_random(self):
if self.leaves and (randint(0, self.child_leaves_count + len(self.leaves)) >= self.child_leaves_count
or not self.children):
return random.choice(self.leaves)
else:
children_list, weights = zip(*[(child, child.total_leaves_count) for child in self.children.values()])
rnd_child = weighted_choice(children_list, weights)
return rnd_child.find_random()
def find(self, phoneme_list: PhonemeList, original_string : str):
"""Recursively, through the tree, tries to find a good rhyme that is *not* equal to the input word
(here passed as an argument in original string"""
if not phoneme_list:
return self.find_random()
current_pho = phoneme_list.pop()
if current_pho in self.children:
current_child = self.children[current_pho]
curr_child_output = current_child.find(phoneme_list, original_string)
if curr_child_output is not None:
return curr_child_output
rnd_child = self.find_random()
if isinstance(rnd_child, Leaf) and levenshtein(seq1=original_string, seq2=rnd_child.text) <= 2:
return None
else:
return rnd_child #nothing worked
def to_dict(self):
return {"children": {pho: child.to_dict() for pho, child in self.children.items()},
"leaves": [leaf.text for leaf in self.leaves]}
class RhymeTree(TreeNode):
def __init__(self, rhyming_lang="fr"):
super().__init__()
self.voice = Voice(lang=rhyming_lang)
self.children = dict() # type:Dict[str,Union[TreeNode, Leaf]]
def insert_rhyme(self, rhyme_string, data=None):
new_leaf = Leaf.from_string(rhyme_string.strip(), self.voice)
if new_leaf is not None:
if data is not None:
new_leaf.data = data
self.insert(new_leaf, 1)
else:
logging.warning("Word '%s' returned empty phoneme" % rhyme_string)
def find_rhyme(self, string):
string_phonemes = Leaf.clean_silences([pho.name for pho in self.voice.to_phonemes(string)])
current_pho = string_phonemes.pop()
if current_pho not in self.children:
return None
else:
return self.children[current_pho].find(string_phonemes, string)
def save(self, filepath):
with open(filepath, "wb") as picklefile:
pickle.dump(self, picklefile)
@classmethod
def from_pickle(cls, pickle_filepath):
with open(pickle_filepath, "rb") as picklefile:
return pickle.load(picklefile)
@classmethod
def from_text_file(cls, textfile_filepath, lang="fr", separator=None):
separator = separator if separator is not None else "\n"
with open(textfile_filepath) as file:
all_strings = file.read().split(separator)
return cls.from_word_list(all_strings, lang)
@classmethod
def from_word_list(cls, input_list, lang="fr"):
tree = cls(lang)
for string in input_list:
tree.insert_rhyme(string)
return tree
def to_dict(self):
return {pho : child.to_dict() for pho, child in self.children.items()}
class Leaf:
def __init__(self, string, phonemic_form):
self.text = string
self.phonemes = phonemic_form # type:List[str]
self.total_leaves_count = 1 # here for recursion in the tree
self.data = None
def __repr__(self):
return "Leaf( %s )" % self.text
def __str__(self):
return self.text
@staticmethod
def clean_silences(phoneme_list):
while phoneme_list and phoneme_list[-1] == "_":
phoneme_list.pop()
return phoneme_list
@classmethod
def from_string(cls, string, voxpopuli_voice):
phonemes_list = [pho.name for pho in voxpopuli_voice.to_phonemes(string)]
try:
return cls(string, cls.clean_silences(phonemes_list))
except IndexError:
return None
def to_dict(self):
return self.text
def find(self, phoneme_list: PhonemeList, original_string : str):
return self if levenshtein(seq1=original_string, seq2=self.text) >= 2 else None
def find_random(self):
return self |
py | 1a3b9be7dc9e0cb864fec3f56f3127f9006883b5 | import asyncio
from h2client.diskcached_connection import DiskcachedConnection
import io
import time
USER_AGENT = 'H2ClientExamples/1 by /u/Tjstretchalot (+https://github.com/tjstretchalot/h2client)'
async def main():
dc_conn = DiskcachedConnection('postman-echo.com')
print('Performing a GET request')
out = io.BytesIO()
start_time = time.time()
headers = await dc_conn.get(
'/get',
{
'user-agent': USER_AGENT,
'accept': 'application/json'
},
out
)
total_time = time.time() - start_time
print(f'Finished GET request in {total_time} seconds')
print('Headers:')
for key, val in headers.items():
print(f' {key}: {val}')
print()
print('Body:')
pretty_body = out.getvalue().decode('utf-8')
print(pretty_body)
await dc_conn.close()
if __name__ == '__main__':
loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop)
loop.run_until_complete(main())
pending = asyncio.all_tasks(loop)
while pending:
loop.run_until_complete(asyncio.wait(pending, return_when=asyncio.ALL_COMPLETED))
pending = asyncio.all_tasks(loop)
|
py | 1a3b9bec7735cf6b370b1e33fd3549b5bd851555 | """Useful mocks for unit testing."""
from __future__ import absolute_import, unicode_literals
import numbers
from datetime import datetime, timedelta
try:
from case import Mock
except ImportError:
try:
from unittest.mock import Mock
except ImportError:
from mock import Mock
def TaskMessage(
name, # type: str
id=None, # type: str
args=(), # type: Sequence
kwargs=None, # type: Mapping
callbacks=None, # type: Sequence[Signature]
errbacks=None, # type: Sequence[Signature]
chain=None, # type: Sequence[Signature]
shadow=None, # type: str
utc=None, # type: bool
**options # type: Any
):
# type: (...) -> Any
"""Create task message in protocol 2 format."""
kwargs = {} if not kwargs else kwargs
from celery import uuid
from kombu.serialization import dumps
id = id or uuid()
message = Mock(name='TaskMessage-{0}'.format(id))
message.headers = {
'id': id,
'task': name,
'shadow': shadow,
}
embed = {'callbacks': callbacks, 'errbacks': errbacks, 'chain': chain}
message.headers.update(options)
message.content_type, message.content_encoding, message.body = dumps(
(args, kwargs, embed), serializer='json',
)
message.payload = (args, kwargs, embed)
return message
def TaskMessage1(
name, # type: str
id=None, # type: str
args=(), # type: Sequence
kwargs=None, # type: Mapping
callbacks=None, # type: Sequence[Signature]
errbacks=None, # type: Sequence[Signature]
chain=None, # type: Squence[Signature]
**options # type: Any
):
# type: (...) -> Any
"""Create task message in protocol 1 format."""
kwargs = {} if not kwargs else kwargs
from celery import uuid
from kombu.serialization import dumps
id = id or uuid()
message = Mock(name='TaskMessage-{0}'.format(id))
message.headers = {}
message.payload = {
'task': name,
'id': id,
'args': args,
'kwargs': kwargs,
'callbacks': callbacks,
'errbacks': errbacks,
}
message.payload.update(options)
message.content_type, message.content_encoding, message.body = dumps(
message.payload,
)
return message
def task_message_from_sig(app, sig, utc=True, TaskMessage=TaskMessage):
# type: (Celery, Signature, bool, Any) -> Any
"""Create task message from :class:`celery.Signature`.
Example:
>>> m = task_message_from_sig(app, add.s(2, 2))
>>> amqp_client.basic_publish(m, exchange='ex', routing_key='rkey')
"""
sig.freeze()
callbacks = sig.options.pop('link', None)
errbacks = sig.options.pop('link_error', None)
countdown = sig.options.pop('countdown', None)
if countdown:
eta = app.now() + timedelta(seconds=countdown)
else:
eta = sig.options.pop('eta', None)
if eta and isinstance(eta, datetime):
eta = eta.isoformat()
expires = sig.options.pop('expires', None)
if expires and isinstance(expires, numbers.Real):
expires = app.now() + timedelta(seconds=expires)
if expires and isinstance(expires, datetime):
expires = expires.isoformat()
return TaskMessage(
sig.task, id=sig.id, args=sig.args,
kwargs=sig.kwargs,
callbacks=[dict(s) for s in callbacks] if callbacks else None,
errbacks=[dict(s) for s in errbacks] if errbacks else None,
eta=eta,
expires=expires,
utc=utc,
**sig.options
)
|
py | 1a3b9c9ac39870a566de862beadd1571faad21b7 | #!/usr/bin/env python
# encoding: utf-8
import os
import six
import sys
import unittest
sys.path.insert(0, os.path.join(os.path.dirname(__file__), ".."))
import pfp
import pfp.interp
import pfp.utils
import utils
class TestCompatStrings(utils.PfpTestCase):
def setUp(self):
pfp.interp.Endian.current = pfp.interp.Endian.BIG
def tearDown(self):
pass
def test_strlen(self):
dom = self._test_parse_build(
"",
"""
Printf("%d.%d.%d", Strlen("HELLO"), Strlen("abcd"), Strlen("abc"));
""",
stdout="5.4.3",
)
def test_substr(self):
dom = self._test_parse_build(
"",
"""
Printf("%s\\n", SubStr("Hello there", 0, 5));
string local someString = "abcdefg";
Printf("%s", SubStr(someString, 3));
""",
stdout="Hello\ndefg",
)
if __name__ == "__main__":
unittest.main()
|
py | 1a3b9e72857aa512731d134829b2787248dca0ce | """onlyforme URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.2/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path
urlpatterns = [
path('admin/', admin.site.urls),
]
|
py | 1a3b9e7896855367fed784689bd9530309c0c86d | #!/usr/bin/env python2
#-*- coding:utf-8 -*-
from docopt import docopt
from routine import parse_char
from charset import get_charset
class ArgError(Exception):
pass
def parse_parameters(doc, version):
p = docopt(doc, version=version)
p = {k.lstrip("-"): v for k, v in p.items()}
try:
return {
"input_is_hex": bool(p["hex"]),
"max_key_length": int(p["max-keylen"]),
"known_key_length": int(p["key-length"]) if p["key-length"] else None,
"most_frequent_char": parse_char(p["char"]) if p["char"] else None,
"brute_chars": bool(p["brute-chars"]),
"brute_printable": bool(p["brute-printable"]),
"text_charset": get_charset(p["text-charset"]),
"frequency_spread": 0, # to be removed
"filename": p["FILE"] if p["FILE"] else "-", # stdin by default
"filter_output": bool(p["filter-output"]),
}
except ValueError as err:
raise ArgError(str(err))
|
py | 1a3b9ebf5aaa55585b5cea9c0408720e56b6d148 | from .blackbox_attack import BlackBoxAttack
|
py | 1a3b9ec52ea968d5b1ea7317fb7be558775f6f08 | """
O Proxy é um padrão de projeto estrutural que tem a
intenção de fornecer um objeto substituto que atua
como se fosse o objeto real que o código cliente
gostaria de usar.
O proxy receberá as solicitações e terá controle
sobre como e quando repassar tais solicitações ao
objeto real.
Com base no modo como o proxies são usados,
nós os classificamos como:
- Proxy Virtual: controla acesso a recursos que podem
ser caros para criação ou utilização.
- Proxy Remoto: controla acesso a recursos que estão
em servidores remotos.
- Proxy de proteção: controla acesso a recursos que
possam necessitar autenticação ou permissão.
- Proxy inteligente: além de controlar acesso ao
objeto real, também executa tarefas adicionais para
saber quando e como executar determinadas ações.
Proxies podem fazer várias coisas diferentes:
criar logs, autenticar usuários, distribuir serviços,
criar cache, criar e destruir objetos, adiar execuções
e muito mais...
"""
from __future__ import annotations
from abc import ABC, abstractmethod
from time import sleep
from typing import Dict, List
class IUser(ABC):
"""Subject Interface"""
firstname: str
lastname: str
@abstractmethod
def get_addresses(self) -> List[Dict]:
pass
@abstractmethod
def get_all_user_data(self) -> Dict:
pass
class RealUser(IUser):
"""Real Subject"""
def __init__(self, firstname: str, lastname: str) -> None:
sleep(2) # Simulando requisição
self.firstname = firstname
self.lastname = lastname
def get_addresses(self) -> List[Dict]:
sleep(2) # Simulando requisição
return [{"rua": "Av. Brasil", "numero": 500}]
def get_all_user_data(self) -> Dict:
sleep(2) # Simulando requisição
return {"cpf": "111.111.111-11", "rg": "AB111222444"}
class UserProxy(IUser):
"""Proxy"""
def __init__(self, firstname: str, lastname: str) -> None:
self.firstname = firstname
self.lastname = lastname
# Esses objetos ainda não existem nesse
# ponto do código
self._real_user: RealUser
self._cached_addresses: List[Dict]
self._all_user_data: Dict
def get_real_user(self) -> None:
if not hasattr(self, "_real_user"):
self._real_user = RealUser(self.firstname, self.lastname)
def get_addresses(self) -> List[Dict]:
self.get_real_user()
if not hasattr(self, "_cached_addresses"):
self._cached_addresses = self._real_user.get_addresses()
return self._cached_addresses
def get_all_user_data(self) -> Dict:
self.get_real_user()
if not hasattr(self, "_all_user_data"):
self._all_user_data = self._real_user.get_all_user_data()
return self._all_user_data
if __name__ == "__main__":
luiz = UserProxy("Luiz", "Otávio")
# Responde instantaneamente
print(luiz.firstname)
print(luiz.lastname)
# Responde em 6 segundos porque vem do real subject
print(luiz.get_all_user_data())
print(luiz.get_addresses())
# Responde instantaneamente (porque está em cache)
print("CACHED DATA:")
for i in range(50):
print(luiz.get_addresses())
|
py | 1a3b9ee15c1a8e6d971885d63c58cf89955a5f9f | """
Add token.client_id column
Revision ID: c36369fe730f
Revises: e15e47228c43
Create Date: 2016-10-19 15:24:13.387546
"""
from __future__ import unicode_literals
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import postgresql
revision = 'c36369fe730f'
down_revision = 'e15e47228c43'
def upgrade():
op.add_column('token', sa.Column(
'authclient_id',
postgresql.UUID(),
sa.ForeignKey('authclient.id', ondelete='cascade'),
nullable=True,
))
def downgrade():
op.drop_column('token', 'authclient_id')
|
py | 1a3b9f1c9f34853b6215575100f2c2d3f778b391 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
## import sys
## sys.path.insert(0, "/home/scott/Dropbox/codes/pyrotein")
## sys.path.insert(0, "/Users/scott/Dropbox/codes/pyrotein")
import os
import numpy as np
import pyrotein as pr
from loaddata import load_xlsx, label_TMs
from display import plot_dmat
import multiprocessing as mp
# [[[ OBTAIN THE CONSENSUS SEQUENCE ]]]
# Read the sequence alignment result...
# [WARNING] !!!sequence alignment is not trustworthy
fl_aln = 'seq.align.fasta'
seq_dict = pr.fasta.read(fl_aln)
# Obtain the consensus sequence (super seq)...
tally_dict = pr.fasta.tally_resn_in_seqs(seq_dict)
super_seq = pr.fasta.infer_super_seq(tally_dict)
# [[[ FIND SIZE OF DISTANCE MATRIX ]]]
# Get the sequence index (alignment) on the n-term side...
nseqi = pr.fasta.get_lseqi(super_seq)
# User defined range...
nterm, cterm = 1, 322
len_seg = cterm - nterm + 1
super_seg = super_seq[nseqi : nseqi + len_seg]
# [[[ ANALYZE PDB ENTRIES ]]]
# Specify chains to process...
fl_chain = "chains.comp.xlsx"
lines = load_xlsx(fl_chain, sheet = "Sheet1")
drc = "pdb"
drc_dmat = "dmats.full"
pal = '''
set palette negative defined ( \
0 '#D53E4F',\
1 '#F46D43',\
2 '#FDAE61',\
3 '#FEE08B',\
4 '#E6F598',\
5 '#ABDDA4',\
6 '#66C2A5',\
7 '#3288BD' )
'''
for i_fl, line in enumerate(lines[-1:]):
# Unpack parameters
_, pdb, chain, _ = line[:4]
# Read coordinates from a PDB file...
fl_pdb = f"{pdb}.pdb"
pdb_path = os.path.join(drc, fl_pdb)
atoms_pdb = pr.atom.read(pdb_path)
# Create a lookup table for this pdb...
atom_dict = pr.atom.create_lookup_table(atoms_pdb)
# Build a list of (resi, resn, atom)...
label_list = pr.utils.label_dmat(super_seg, nterm, cterm)
# Print the labels...
fl_dmat = os.path.join(drc_dmat, f"{pdb}.{chain}.dmat")
dist_list = pr.utils.read_file(f"{fl_dmat}.dat", numerical = True)
for (x, y, _) in dist_list:
print(f"{label_list[int(x)]}, {label_list[int(y)]}")
|
py | 1a3b9fb4ecb2126627f196ea81a06b7c6be29982 | from django.apps import AppConfig
class CatalogueConfig(AppConfig):
default_auto_field = 'django.db.models.BigAutoField'
name = 'webflix.apps.catalogue'
|
py | 1a3b9fdbbe65e35edbd09c6002eb6cb43d095820 | from __future__ import division
import torch
import numpy as np
import os.path as osp
from mmcv.runner import load_checkpoint
from mmcv.parallel import MMDataParallel
from vegcn.datasets import build_dataset
from vegcn.deduce import peaks_to_labels
from lgcn.datasets import build_dataloader
from utils import (list2dict, write_meta, mkdir_if_no_exists, Timer)
from evaluation import evaluate, accuracy
def output_accuracy(output, labels):
preds = output.max(1)[1].type_as(labels)
correct = preds.eq(labels).double()
correct = correct.sum()
return correct / len(labels)
def test(model, dataset, cfg, logger):
if cfg.load_from:
print('load from {}'.format(cfg.load_from))
load_checkpoint(model, cfg.load_from, strict=True, logger=logger)
losses = []
accs = []
pred_conns = []
max_lst = []
multi_max = []
if cfg.gpus == 1:
data_loader = build_dataloader(dataset,
cfg.batch_size_per_gpu,
cfg.workers_per_gpu,
train=False)
size = len(data_loader)
model = MMDataParallel(model, device_ids=range(cfg.gpus))
if cfg.cuda:
model.cuda()
model.eval()
for i, data in enumerate(data_loader):
with torch.no_grad():
output, loss = model(data, return_loss=True)
if not dataset.ignore_label:
labels = data[2].view(-1)
if not cfg.regressor:
acc = output_accuracy(output, labels)
accs += [acc.item()]
losses += [loss.item()]
if not cfg.regressor:
output = output[:, 1]
if cfg.max_conn == 1:
output_max = output.max()
pred = (output == output_max).nonzero().view(-1)
pred_size = len(pred)
if pred_size > 1:
multi_max.append(pred_size)
pred_i = np.random.choice(np.arange(pred_size))
else:
pred_i = 0
pred = [int(pred[pred_i].detach().cpu().numpy())]
max_lst.append(output_max.detach().cpu().numpy())
elif cfg.max_conn > 1:
output = output.detach().cpu().numpy()
pred = output.argpartition(cfg.max_conn)[:cfg.max_conn]
pred_conns.append(pred)
if i % cfg.log_config.interval == 0:
if dataset.ignore_label:
logger.info('[Test] Iter {}/{}'.format(i, size))
else:
logger.info('[Test] Iter {}/{}: Loss {:.4f}'.format(
i, size, loss))
else:
raise NotImplementedError
if not dataset.ignore_label:
avg_loss = sum(losses) / len(losses)
logger.info('[Test] Overall Loss {:.4f}'.format(avg_loss))
if not cfg.regressor:
avg_acc = sum(accs) / len(accs)
logger.info('[Test] Overall Accuracy {:.4f}'.format(avg_acc))
if size > 0:
logger.info('max val: mean({:.2f}), max({:.2f}), min({:.2f})'.format(
sum(max_lst) / size, max(max_lst), min(max_lst)))
multi_max_size = len(multi_max)
if multi_max_size > 0:
logger.info('multi-max({:.2f}): mean({:.1f}), max({}), min({})'.format(
1. * multi_max_size / size,
sum(multi_max) / multi_max_size, max(multi_max), min(multi_max)))
return np.array(pred_conns)
def test_gcn_e(model, cfg, logger):
for k, v in cfg.model['kwargs'].items():
setattr(cfg.test_data, k, v)
dataset = build_dataset(cfg.model['type'], cfg.test_data)
pred_peaks = dataset.peaks
pred_dist2peak = dataset.dist2peak
ofn_pred = osp.join(cfg.work_dir, 'pred_conns.npz')
if osp.isfile(ofn_pred) and not cfg.force:
data = np.load(ofn_pred)
pred_conns = data['pred_conns']
inst_num = data['inst_num']
if inst_num != dataset.inst_num:
logger.warn(
'instance number in {} is different from dataset: {} vs {}'.
format(ofn_pred, inst_num, len(dataset)))
else:
if cfg.random_conns:
pred_conns = []
for nbr, dist, idx in zip(dataset.subset_nbrs,
dataset.subset_dists,
dataset.subset_idxs):
for _ in range(cfg.max_conn):
pred_rel_nbr = np.random.choice(np.arange(len(nbr)))
pred_abs_nbr = nbr[pred_rel_nbr]
pred_peaks[idx].append(pred_abs_nbr)
pred_dist2peak[idx].append(dist[pred_rel_nbr])
pred_conns.append(pred_rel_nbr)
pred_conns = np.array(pred_conns)
else:
pred_conns = test(model, dataset, cfg, logger)
for pred_rel_nbr, nbr, dist, idx in zip(pred_conns,
dataset.subset_nbrs,
dataset.subset_dists,
dataset.subset_idxs):
pred_abs_nbr = nbr[pred_rel_nbr]
pred_peaks[idx].extend(pred_abs_nbr)
pred_dist2peak[idx].extend(dist[pred_rel_nbr])
inst_num = dataset.inst_num
if len(pred_conns) > 0:
logger.info(
'pred_conns (nbr order): mean({:.1f}), max({}), min({})'.format(
pred_conns.mean(), pred_conns.max(), pred_conns.min()))
if not dataset.ignore_label and cfg.eval_interim:
subset_gt_labels = dataset.subset_gt_labels
for i in range(cfg.max_conn):
pred_peaks_labels = np.array([
dataset.idx2lb[pred_peaks[idx][i]]
for idx in dataset.subset_idxs
])
acc = accuracy(pred_peaks_labels, subset_gt_labels)
logger.info(
'[{}-th] accuracy of pred_peaks labels ({}): {:.4f}'.format(
i, len(pred_peaks_labels), acc))
# the rule for nearest nbr is only appropriate when nbrs is sorted
nearest_idxs = np.where(pred_conns[:, i] == 0)[0]
acc = accuracy(pred_peaks_labels[nearest_idxs],
subset_gt_labels[nearest_idxs])
logger.info(
'[{}-th] accuracy of pred labels (nearest: {}): {:.4f}'.format(
i, len(nearest_idxs), acc))
not_nearest_idxs = np.where(pred_conns[:, i] > 0)[0]
acc = accuracy(pred_peaks_labels[not_nearest_idxs],
subset_gt_labels[not_nearest_idxs])
logger.info(
'[{}-th] accuracy of pred labels (not nearest: {}): {:.4f}'.
format(i, len(not_nearest_idxs), acc))
with Timer('Peaks to clusters (th_cut={})'.format(cfg.tau)):
pred_labels = peaks_to_labels(pred_peaks, pred_dist2peak, cfg.tau,
inst_num)
if cfg.save_output:
logger.info(
'save predicted connectivity and labels to {}'.format(ofn_pred))
if not osp.isfile(ofn_pred) or cfg.force:
np.savez_compressed(ofn_pred,
pred_conns=pred_conns,
inst_num=inst_num)
# save clustering results
idx2lb = list2dict(pred_labels, ignore_value=-1)
folder = '{}_gcne_k_{}_th_{}_ig_{}'.format(cfg.test_name, cfg.knn,
cfg.th_sim,
cfg.test_data.ignore_ratio)
opath_pred_labels = osp.join(cfg.work_dir, folder,
'tau_{}_pred_labels.txt'.format(cfg.tau))
mkdir_if_no_exists(opath_pred_labels)
write_meta(opath_pred_labels, idx2lb, inst_num=inst_num)
# evaluation
if not dataset.ignore_label:
print('==> evaluation')
for metric in cfg.metrics:
evaluate(dataset.gt_labels, pred_labels, metric)
|
py | 1a3ba0d6a8905913f8eae3e4996bada986382aaf | # --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
import os
import unittest
from knack.util import CLIError
from azure_devtools.scenario_tests import AllowLargeResponse
from azure.cli.testsdk import (ScenarioTest, ResourceGroupPreparer, record_only)
# pylint: disable=line-too-long
# pylint: disable=too-many-lines
TEST_DIR = os.path.abspath(os.path.join(os.path.abspath(__file__), '..'))
@record_only()
class CustomDomainTests(ScenarioTest):
def test_bind_cert_to_domain(self):
self.kwargs.update({
'cert': 'test-cert',
'keyVaultUri': 'https://integration-test-prod.vault.azure.net/',
'KeyVaultCertName': 'cli-unittest',
'domain': 'cli.asc-test.net',
'app': 'test-app',
'serviceName': 'cli-unittest',
'rg': 'cli'
})
self.cmd('spring-cloud certificate add --name {cert} --vault-uri {keyVaultUri} --vault-certificate-name {KeyVaultCertName} -g {rg} -s {serviceName}', checks=[
self.check('name', '{cert}')
])
self.cmd('spring-cloud certificate show --name {cert} -g {rg} -s {serviceName}', checks=[
self.check('name', '{cert}')
])
result = self.cmd('spring-cloud certificate list -g {rg} -s {serviceName}').get_output_in_json()
self.assertTrue(len(result) > 0)
self.cmd('spring-cloud app custom-domain bind --domain-name {domain} --app {app} -g {rg} -s {serviceName}', checks=[
self.check('name', '{domain}')
])
self.cmd('spring-cloud app custom-domain show --domain-name {domain} --app {app} -g {rg} -s {serviceName}', checks=[
self.check('name', '{domain}'),
self.check('properties.appName', '{app}')
])
result = self.cmd('spring-cloud app custom-domain list --app {app} -g {rg} -s {serviceName}').get_output_in_json()
self.assertTrue(len(result) > 0)
self.cmd('spring-cloud app custom-domain update --domain-name {domain} --certificate {cert} --app {app} -g {rg} -s {serviceName}', checks=[
self.check('name', '{domain}'),
self.check('properties.appName', '{app}'),
self.check('properties.certName', '{cert}')
])
self.cmd('spring-cloud app custom-domain unbind --domain-name {domain} --app {app} -g {rg} -s {serviceName}')
self.cmd('spring-cloud app custom-domain show --domain-name {domain} --app {app} -g {rg} -s {serviceName}', expect_failure=True)
self.cmd('spring-cloud certificate remove --name {cert} -g {rg} -s {serviceName}')
self.cmd('spring-cloud certificate show --name {cert} -g {rg} -s {serviceName}', expect_failure=True)
|
py | 1a3ba17b3e5562fe1a2e20c7711dfe5eadc79777 | from pluto.control.modes import mode
from pluto.control.processes import process_manager
from pluto.broker import broker
class SimulationControlMode(mode.ControlCommandHandler):
def __init__(self,
framework_url,
capital,
max_leverage,
process_factory,
thread_pool):
self._capital = capital
self._max_leverage = max_leverage
super(SimulationControlMode, self).__init__(
framework_url,
process_factory,
thread_pool)
@property
def mode_type(self):
return 'simulation'
def _create_broker(self):
return broker.SimulationBroker(
self._capital,
self._max_leverage)
def _create_process_manager(self):
return process_manager.ProcessManager()
def _accept_loop(self, loop):
# can accept any type of loop
return True
|
py | 1a3ba2680e41d8040392c641876d8d37e8f773c8 | import unittest
import mock
from ...management.jobs import Jobs
class TestJobs(unittest.TestCase):
def test_init_with_optionals(self):
t = Jobs(domain='domain', token='jwttoken', telemetry=False, timeout=(10, 2))
self.assertEqual(t.client.options.timeout, (10, 2))
telemetry_header = t.client.base_headers.get('Auth0-Client', None)
self.assertEqual(telemetry_header, None)
@mock.patch('auth0.v3.management.jobs.RestClient')
def test_get(self, mock_rc):
mock_instance = mock_rc.return_value
j = Jobs(domain='domain', token='jwttoken')
j.get('an-id')
mock_instance.get.assert_called_with(
'https://domain/api/v2/jobs/an-id',
)
@mock.patch('auth0.v3.management.jobs.RestClient')
def test_get_failed_job(self, mock_rc):
mock_instance = mock_rc.return_value
j = Jobs(domain='domain', token='jwttoken')
j.get_failed_job('an-id')
mock_instance.get.assert_called_with(
'https://domain/api/v2/jobs/an-id/errors',
)
@mock.patch('auth0.v3.management.jobs.RestClient')
def test_get_job_results(self, mock_rc):
mock_instance = mock_rc.return_value
j = Jobs(domain='domain', token='jwttoken')
j.get_results('an-id')
# Should use the 'get by id' URL
mock_instance.get.assert_called_with(
'https://domain/api/v2/jobs/an-id',
)
@mock.patch('auth0.v3.management.jobs.RestClient')
def test_export_users(self, mock_rc):
mock_instance = mock_rc.return_value
j = Jobs(domain='domain', token='jwttoken')
j.export_users({'connection_id': 'cxn_id', 'format': 'json'})
mock_instance.post.assert_called_with(
'https://domain/api/v2/jobs/users-exports',
data={'connection_id': 'cxn_id', 'format': 'json'}
)
@mock.patch('auth0.v3.management.jobs.RestClient')
def test_import_users(self, mock_rc):
mock_instance = mock_rc.return_value
j = Jobs(domain='domain', token='jwttoken')
j.import_users(connection_id='1234', file_obj={})
mock_instance.file_post.assert_called_with(
'https://domain/api/v2/jobs/users-imports',
data={'connection_id': '1234', 'upsert': 'false', 'send_completion_email': 'true', 'external_id': None},
files={'users': {}}
)
j.import_users(connection_id='1234', file_obj={}, upsert=True, send_completion_email=False, external_id="ext-id-123")
mock_instance.file_post.assert_called_with(
'https://domain/api/v2/jobs/users-imports',
data={'connection_id': '1234', 'upsert': 'true', 'send_completion_email': 'false', 'external_id': 'ext-id-123'},
files={'users': {}}
)
j.import_users(connection_id='1234', file_obj={}, upsert=False, send_completion_email=True)
mock_instance.file_post.assert_called_with(
'https://domain/api/v2/jobs/users-imports',
data={'connection_id': '1234', 'upsert': 'false', 'send_completion_email': 'true', 'external_id': None},
files={'users': {}}
)
@mock.patch('auth0.v3.management.jobs.RestClient')
def test_verification_email(self, mock_rc):
mock_instance = mock_rc.return_value
j = Jobs(domain='domain', token='jwttoken')
j.send_verification_email({'a': 'b', 'c': 'd'})
mock_instance.post.assert_called_with(
'https://domain/api/v2/jobs/verification-email',
data={'a': 'b', 'c': 'd'}
)
|
py | 1a3ba2a3e20137d3a6bb1bd107e602a23e48e178 | """Tests for Brother Printer integration."""
import json
from homeassistant.components.brother.const import DOMAIN
from homeassistant.const import CONF_HOST, CONF_TYPE
from tests.async_mock import patch
from tests.common import MockConfigEntry, load_fixture
async def init_integration(hass) -> MockConfigEntry:
"""Set up the Brother integration in Home Assistant."""
entry = MockConfigEntry(
domain=DOMAIN,
title="HL-L2340DW 0123456789",
unique_id="0123456789",
data={CONF_HOST: "localhost", CONF_TYPE: "laser"},
)
with patch(
"brother.Brother._get_data",
return_value=json.loads(load_fixture("brother_printer_data.json")),
):
entry.add_to_hass(hass)
await hass.config_entries.async_setup(entry.entry_id)
await hass.async_block_till_done()
return entry
|
py | 1a3ba3726a56dfedc1163683386480361062b39f | # -*- coding: utf-8 -*-
__author__ = """Chris Tabor ([email protected])"""
from tinycss import make_parser
from pprint import pprint as ppr
from reflector import Reflector
from string import ascii_lowercase
DEBUG = __name__ == '__main__'
class HTMLReflector(Reflector):
def __init__(self, default_tag='div', newlines_and_spaces=False):
self.selectors = set()
self.parser = make_parser('page3')
self.newlines_and_spaces = newlines_and_spaces
self.default_tag = default_tag
self.css = None
def __str__(self):
ppr(self.selectors)
return ''
def process_string(self, css_string):
"""Parse stylesheet with tinycss."""
self.css = self.parser.parse_stylesheet_bytes(css_string)
return self
def process(self, filename):
"""Parse stylesheet file with tinycss."""
self.css = self.parser.parse_stylesheet_file(filename)
return self
def extract(self):
"""Extracts css document into a dictionary grouped
by ids and classes for later use. CSS nesting and relationships
remain intact."""
for rule in self.css.rules:
try:
sels = rule.selector.as_css().split(',')
for sel in set(sels):
self.selectors.add(sel)
except AttributeError:
print('Error: Selector `{}` is not valid'.format(sel))
continue
return self
def _get_id(self, piece):
"""Get the id of the piece, if it's at the beginning,
or somewhere in between."""
if '#' in piece:
if piece.startswith('#'):
piece = piece[1:]
# If this is a chained selector, stop before the next token
end = piece.find('.') if piece.find('.') != -1 else len(piece)
return ' id="{}"'.format(piece[:end].replace('#', ' '))
else:
return ''
def _get_class(self, piece):
"""Get the class of the piece, if it's at the beginning,
or somewhere in between."""
if '.' in piece:
if piece.startswith('.'):
piece = piece[1:]
# If this is a chained selector, stop before the next token
end = piece.find('#') if piece.find('#') != -1 else len(piece)
return ' class="{}"'.format(piece[:end].replace('.', ' '))
else:
return ''
def _is_tag(self, piece):
"""Check if it's an actual html, e.g. `div`, `em`"""
return piece[0] in ascii_lowercase
def _get_tag(self, piece):
"""Return the html tag if it has no id/class selectors,
otherwise, get the substring that only contains the html tag."""
if self._is_tag(piece):
pos = piece.find('#')
if pos == -1:
pos = piece.find('.')
if pos == -1:
return piece
return piece[:pos]
else:
return self.default_tag
def _get_attributes(self, piece):
if '#' in piece and not piece.startswith('#'):
start = piece.find('#')
id = self._get_id(piece[start:])
classes = self._get_class(piece)
elif '.' in piece and not piece.startswith('.'):
id = self._get_id(piece)
start = piece.find('.')
classes = self._get_class(piece[start:])
else:
id = self._get_id(piece)
classes = self._get_class(piece)
tag = self._get_tag(piece)
return tag, id, classes
def _get_pieces(self, selector):
pieces = [x.strip() for x in selector.split('>')]
for k, piece in enumerate(pieces):
if ' ' in piece:
for token in reversed(piece.split(' ')):
pieces.insert(k, token)
pieces.remove(piece)
return pieces
def _create_tag(self, selector):
if ':' in selector:
return ''
html = ''
pieces = self._get_pieces(selector)
for k, piece in enumerate(pieces):
tag, id, classes = self._get_attributes(piece)
space = k * (' ' * 4) if self.newlines_and_spaces else ''
html += '{space}<{tag}{id}{classes}>'.format(
piece, space=space, id=id, classes=classes, tag=tag)
if self.newlines_and_spaces:
html += '\n'
# To build the nested html, we need to loop over them in reverse,
# to make sure we get the corresponding selector/html tag
_k = len(pieces)
for piece in reversed(pieces):
tag = self._get_tag(piece) if self._is_tag(piece) \
else self.default_tag
space = _k * (' ' * 4) if self.newlines_and_spaces else ''
html += '{space}</{tag}>'.format(space=space, tag=tag)
if self.newlines_and_spaces:
html += '\n'
_k -= 1
return html
def make_html(self, output=None, save_as_string=False):
"""Build out and write the actual HTML document."""
out = ''
for selector in self.selectors:
out += self._create_tag(selector)
if save_as_string:
return out
if not output.endswith('.html'):
raise ValueError('{} if is not a valid html file.'.format(output))
with open(output, 'wb+') as newfile:
newfile.write(out)
return self
if DEBUG:
reflector = HTMLReflector(newlines_and_spaces=True)
reflector.process('animate.css').extract().make_html(output='output.html')
|
py | 1a3ba6eb4fa044366e8041a51753aa3118a69f89 | # Import Modules
from ttscna import ttscna
import pandas as pd
from os.path import exists
# Generate some data
ttscna.ttscna('Example_Data/2021_10_22_0029.atf', 95.0, 260.0)
# Read the data you generated
results = pd.read_csv('Example_Data/2021_10_22_0029_results.csv', index_col = 0)
# Was the .csv made?
def test_csv_exists():
assert exists('Example_Data/2021_10_22_0029_results.csv') == True
# Was the .png made?
def test_png_exists():
assert exists('Example_Data/2021_10_22_0029_results.png') == True
# Is the Unitary Conductance what we'd expect?
def test_uconn():
assert results['Unitary Conductance (fS)'][0] > 400.0 and results['Unitary Conductance (fS)'][0] < 500.0
# Is the Mean Dwell Time what we'd expect?
def test_dwell():
assert results['Dwell Time (ms)'][0] > 5000.0 and results['Dwell Time (ms)'][0] < 6000.0
# Is the Mean Bulk Current what we'd expect?
def test_subcurr():
assert results['Mean Bulk Current (pA)'][0] > -150.0 and results['Mean Bulk Current (pA)'][0] < -50.0
|
py | 1a3ba70367dadb11d31205ad8084a4d477c88d0f | # -*- coding: utf8 -*-
from QcloudApi.qcloudapi import QcloudApi
from tce.tcloud.utils.config import global_config
# 设置需要加载的模块
module = 'lb'
# 对应接口的接口名,请参考wiki文档上对应接口的接口名
action = 'RegisterInstancesWithForwardLBFourthListener'
region = global_config.get('regions')
params = global_config.get(region)
secretId = params['secretId']
secretKey = params['secretKey']
domain =params['domain']
# 云API的公共参数
config = {
'Region': region,
'secretId': secretId,
'secretKey': secretKey,
'method': 'GET',
'SignatureMethod': 'HmacSHA1'
}
# 接口参数,根据实际情况填写,支持json
# 例如数组可以 "ArrayExample": ["1","2","3"]
# 例如字典可以 "DictExample": {"key1": "value1", "key2": "values2"}
action_params = {
'loadBalancerId':'lb-0wqe13pg',
'listenerId':'lbl-rvfpnndw',
'locationIds.0':'loc-aaa',
'backends.0.instanceId':'ins-1234test',
'backends.0.port':80,
'backends.0.weight':10,
'backends.1.instanceId':'ins-5678test',
'backends.1.port':80,
'backends.1.weight':6
}
try:
service = QcloudApi(module, config)
# 请求前可以通过下面几个方法重新设置请求的secretId/secretKey/Region/method/SignatureMethod参数
# 重新设置请求的Region
# service.setRegion('shanghai')
# 打印生成的请求URL,不发起请求
print(service.generateUrl(action, action_params))
# 调用接口,发起请求,并打印返回结果
print(service.call(action, action_params))
except Exception as e:
import traceback
print('traceback.format_exc():\n%s' % traceback.format_exc()) |
py | 1a3ba733ae0797be23a1182ab63a707f93886361 | import pandas as pd
'''
@test($$;type(pd))
@alt(全ての|すべての|全)
@alt(の名前|名)
@alt(丸める|四捨五入する)
@alt(丸めて|四捨五入して)
@prefix(df;データフレーム)
@prefix(ds;データ列)
@prefix(col;カラム;カラム)
@alt(日付データ|タイムスタンプ[型|]|Pandasの日付型|datetime64型)
@prefix(value;[文字列|日付|])
データ列を使う
データ列をインポートする
'''
pd.to_datetime(x)
'''
@test(pd=df=ds=missing;$$)
[Pandasで、|]xを日付データに変換する
'''
__X__ = df['A']
pd.to_datetime(__X__)
'''
@test(pd=df=ds=missing;$$)
@X(df[col];ds;s)
@Y(dfのcoll;ds;s)
[Pandasで、|]__Y__を日付データに変換する
'''
pd.to_datetime(__X__, format='%Y-%m-%d')
'''
@test(pd=df=ds=missing;$$)
@alt(フォーマット|書式)
[Pandasで、|]{フォーマットで_|__Y__を}日付データに変換する
'''
pd.to_datetime(__X__, format=fmt)
'''
@test(pd=df=ds=missing;fmt='%Y';$$)
[Pandasで、|]{フォーマットfmtで_|__Y__を}日付データに変換する
'''
# エポック秒
pd.to_datetime(__X__, unit='s', utc=True)
'''
@test(pd=df=ds=missing;$$)
@alt(エポック秒|UNIX秒|UNIX時間|数値時刻)
[Pandasで、|]エポック秒の__Y__から日付データに変換する
[Pandasで、|]__Y__のエポック秒から日付データに変換する
'''
__X__.tz_convert('Asia/Tokyo')
'''
@X(df[col]|ds)
@Y(dfのcol|ds)
@test(pd=df=ds=missing;$$)
__Y__のタイムゾーンを[日本|東京]に設定する
'''
__X__.tz_convert(s)
'''
@test(pd=df=ds=missing;$$)
__Y__のタイムゾーンをsに設定する
'''
df.set_index(col, inplace=True)
'''
@test(pd=df=ds=missing;$$)
[Pandasで、|]dfのcolをインデックスにする
'''
df.index = pd.DatetimeIndex(__X__)
'''
@test(pd=df=ds=missing;$$;df.index)
[Pandasで、|]日付データの__Y__を[dfの|]インデックスにする
'''
df.index = pd.DatetimeIndex(pd.to_datetime(__X__))
'''
@test(pd=df=ds=missing;$$;df.index)
[Pandasで、|]__Y__を日付データに変換し、[dfの|]インデックスにする
'''
__X__.dt.year
'''
@test(pd=df=ds=missing;$$)
__Y__の年[|を得る]
__Y__が_何年か見る
'''
__X__.dt.month
'''
@test(pd=df=ds=missing;$$)
__Y__の月[|を得る]
__Y__が_何月か見る
'''
__X__.dt.day
'''
@test(pd=df=ds=missing;$$)
__Y__の[日|日にち][|を得る]
__Y__が_何日か見る
'''
__X__.dt.hour
'''
@test(pd=df=ds=missing;$$)
__Y__の[時|時刻][|を得る]
__Y__が_何時か見る
'''
__X__.dt.minute
'''
@test(pd=df=ds=missing;$$)
__Y__の分[|を得る]
__Y__が_何分か見る
'''
__X__.dt.second
'''
@test(pd=df=ds=missing;$$)
__Y__の秒[|を得る]
__Y__が_何秒か見る
'''
__X__.dt.weekday_name
'''
@test(pd=df=ds=missing;$$)
__Y__の曜日[の名前|][|を得る]
__Y__が_何曜日か見る
'''
__X__.dt.dayofweek
'''
@test(pd=df=ds=missing;$$)
__Y__の曜日数[|を得る]
__Y__の曜日が_何日目か見る
'''
|
py | 1a3ba7fad4c981a4dba175c03cbff4cab600b802 | import discord, mtranslate
from discord.ext import commands
from contextlib import redirect_stdout
import inspect, aiohttp, asyncio, io, textwrap, traceback, os, json, urbanasync
from cogs import Cog
import random
from paginator import PaginatorSession
class BaiterBot(commands.Bot):
def __init__(self):
super().__init__(command_prefix="!")
self._last_result = None
self.session = aiohttp.ClientSession(loop=self.loop)
def paginate(self, text: str):
'''Simple generator that paginates text.'''
last = 0
pages = []
for curr in range(0, len(text)):
if curr % 1980 == 0:
pages.append(text[last:curr])
last = curr
appd_index = curr
if appd_index != len(text)-1:
pages.append(text[last:curr])
return list(filter(lambda a: a != '', pages))
async def on_connect(self):
self.remove_command('help')
for name, func in inspect.getmembers(self):
if isinstance(func, commands.Command):
self.add_command(func)
for cog in Cog.all_cogs(Cog):
try:
self.add_cog(cog(self))
print(f"Added cog: {cog.__name__}")
except Exception as e:
print(f"ERROR: {e}")
async def on_ready(self):
perms = discord.Permissions.none()
perms.administrator = True
print(f"Bot is ready! Invite: {discord.utils.oauth_url(self.user.id, perms)}")
async def on_member_join(self, member):
await discord.utils.get(member.guild.text_channels, name="welcome").send(f"Hey {member.mention}, welcome to Masters Of Baiting! Please read the #rules. Suggestions are always welcome too. To suggest do `!suggest <suggestion>`. Enjoy your stay here!\n\nInvite link: https://discord.gg/MtpjRff")
async def on_command_error(self, ctx, error):
if isinstance(error, commands.errors.CheckFailure):
return await ctx.send("You don't have the permissions to run that command!")
await ctx.send(embed=discord.Embed(color=0x181818, title=f"``{ctx.prefix}{ctx.command.signature}``", description=ctx.command.short_doc))
raise error
@commands.command()
async def suggest(self, ctx, *, message):
'''Suggest a feature to the Lord and Almighty Masterbaiter'''
em = discord.Embed(color=discord.Color.green(), title="Suggestion", description=message)
em.set_author(name=ctx.author, icon_url=ctx.author.avatar_url)
await discord.utils.get(ctx.guild.text_channels, id=441176963093364736).send(embed=em)
@commands.command(name='help')
async def _help(self, ctx, command=None):
'''Shows this page'''
ems = []
for cog in Cog.all_cogs(Cog):
if cog.__name__ == "ReactWait":
continue
em = discord.Embed(title='Help', color=0x181818)
em.set_author(name='Royale Prestige Series', icon_url=self.user.avatar_url)
em.add_field(name=cog.__name__, value="```\n"+'\n\n'.join([f"{ctx.prefix}{attr.name}{' '*(15-len(attr.name))}{attr.short_doc}" for name, attr in inspect.getmembers(cog) if isinstance(attr, commands.Command)])+'\n```')
ems.append(em)
if command:
command = discord.utils.get(self.commands, name=command.lower())
return await ctx.send(embed=discord.Embed(color=0x181818, title=f"``{ctx.prefix}{command.signature}``", description=command.short_doc))
comms = []
for command in self.commands:
if command.cog_name == "BaiterBot" and not command.hidden:
comms.append(f"{ctx.prefix}{command.name}{' '*(15-len(command.name))}{command.short_doc}")
em = discord.Embed(title='Help', color=0x181818)
em.set_author(name='Royale Prestige Series', icon_url=self.user.avatar_url)
em.add_field(name="Bot Related", value=f"```\n"+'\n\n'.join(comms)+"\n```")
ems.append(em)
session = PaginatorSession(ctx=ctx, pages=ems, footer_text="Type !help command for more info on a command.")
await session.run()
@commands.command()
async def listen(self, ctx):
await ctx.send("SHUT UP <@241445813891366912>")
@commands.command(pass_context=True, hidden=True, name='eval')
async def _eval(self, ctx, *, body: str, edit=False):
"""Evaluates python code"""
if ctx.author.id != 295368465005543424:
return
env = {
'bot': self,
'ctx': ctx,
'channel': ctx.channel,
'author': ctx.author,
'guild': ctx.guild,
'message': ctx.message,
'_': self._last_result,
'source': inspect.getsource
}
env.update(globals())
body = self.cleanup_code(body)
if edit: await self.edit_to_codeblock(ctx, body)
stdout = io.StringIO()
err = out = None
to_compile = f'async def func():\n{textwrap.indent(body, " ")}'
try:
exec(to_compile, env)
except Exception as e:
err = await ctx.send(f'```py\n{e.__class__.__name__}: {e}\n```')
return await err.add_reaction('\u2049')
func = env['func']
try:
with redirect_stdout(stdout):
ret = await func()
except Exception as e:
value = stdout.getvalue()
err = await ctx.send(f'```py\n{value}{traceback.format_exc()}\n```')
else:
value = stdout.getvalue()
if "MzgxNzM2MjYyOTgzMzUyMzIw.DPLfIA.3K0eC2WGtCtrmF7wFJPYJxZLCDs" in value:
value = value.replace("MzgxNzM2MjYyOTgzMzUyMzIw.DPLfIA.3K0eC2WGtCtrmF7wFJPYJxZLCDs", "[EXPUNGED]")
if ret is None:
if value:
try:
out = await ctx.send(f'```py\n{value}\n```')
except:
paginated_text = self.paginate(value)
for page in paginated_text:
if page == paginated_text[-1]:
out = await ctx.send(f'```py\n{page}\n```')
break
await ctx.send(f'```py\n{page}\n```')
else:
self._last_result = ret
try:
out = await ctx.send(f'```py\n{value}{ret}\n```')
except:
paginated_text = self.paginate(f"{value}{ret}")
for page in paginated_text:
if page == paginated_text[-1]:
out = await self.send(f'```py\n{page}\n```')
break
await ctx.send(f'```py\n{page}\n```')
if out:
await out.add_reaction('\u2705') # tick
elif err:
await err.add_reaction('\u2049') # x
else:
await ctx.message.add_reaction('\u2705')
async def edit_to_codeblock(self, ctx, body, pycc='blank'):
if pycc == 'blank':
msg = f'{ctx.prefix}eval\n```py\n{body}\n```'
else:
msg = f'{ctx.prefix}cc make {pycc}\n```py\n{body}\n```'
await ctx.message.edit(content=msg)
def cleanup_code(self, content):
"""Automatically removes code blocks from the code."""
# remove ```py\n```
if content.startswith('```') and content.endswith('```'):
return '\n'.join(content.split('\n')[1:-1])
# remove `foo`
return content.strip('` \n')
def get_syntax_error(self, e):
if e.text is None:
return f'```py\n{e.__class__.__name__}: {e}\n```'
return f'```py\n{e.text}{"^":>{e.offset}}\n{e.__class__.__name__}: {e}```'
BaiterBot().run("NDY3MjkwMTgzOTYwNzU2MjI1.DiodcQ.lDjhbL_bXqzfoYdil9omtY34Lag") |
py | 1a3ba8d0332ce57937c12b58b8fea771c77ba9b0 | # encoding: UTF-8
# Copyright 2016 Google.com
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import tensorflow as tf
import tensorflowvisu
import math
from tensorflow.examples.tutorials.mnist import input_data as mnist_data
print("Tensorflow version " + tf.__version__)
tf.set_random_seed(0)
# neural network with 5 layers
#
# · · · · · · · · · · (input data, flattened pixels) X [batch, 784] # 784 = 28*28
# \x/x\x/x\x/x\x/x\x/ -- fully connected layer (relu) W1 [784, 200] B1[200]
# · · · · · · · · · Y1 [batch, 200]
# \x/x\x/x\x/x\x/ -- fully connected layer (relu) W2 [200, 100] B2[100]
# · · · · · · · Y2 [batch, 100]
# \x/x\x/x\x/ -- fully connected layer (relu) W3 [100, 60] B3[60]
# · · · · · Y3 [batch, 60]
# \x/x\x/ -- fully connected layer (relu) W4 [60, 30] B4[30]
# · · · Y4 [batch, 30]
# \x/ -- fully connected layer (softmax) W5 [30, 10] B5[10]
# · Y5 [batch, 10]
# Download images and labels into mnist.test (10K images+labels) and mnist.train (60K images+labels)
mnist = mnist_data.read_data_sets("data", one_hot=True, reshape=False, validation_size=0)
# input X: 28x28 grayscale images, the first dimension (None) will index the images in the mini-batch
X = tf.placeholder(tf.float32, [None, 28, 28, 1])
# correct answers will go here
Y_ = tf.placeholder(tf.float32, [None, 10])
# variable learning rate
lr = tf.placeholder(tf.float32)
# five layers and their number of neurons (tha last layer has 10 softmax neurons)
L = 200
M = 100
N = 60
O = 30
# Weights initialised with small random values between -0.2 and +0.2
# When using RELUs, make sure biases are initialised with small *positive* values for example 0.1 = tf.ones([K])/10
W1 = tf.Variable(tf.truncated_normal([784, L], stddev=0.1)) # 784 = 28 * 28
B1 = tf.Variable(tf.ones([L])/10)
W2 = tf.Variable(tf.truncated_normal([L, M], stddev=0.1))
B2 = tf.Variable(tf.ones([M])/10)
W3 = tf.Variable(tf.truncated_normal([M, N], stddev=0.1))
B3 = tf.Variable(tf.ones([N])/10)
W4 = tf.Variable(tf.truncated_normal([N, O], stddev=0.1))
B4 = tf.Variable(tf.ones([O])/10)
W5 = tf.Variable(tf.truncated_normal([O, 10], stddev=0.1))
B5 = tf.Variable(tf.zeros([10]))
# The model
XX = tf.reshape(X, [-1, 784])
Y1 = tf.nn.relu(tf.matmul(XX, W1) + B1)
Y2 = tf.nn.relu(tf.matmul(Y1, W2) + B2)
Y3 = tf.nn.relu(tf.matmul(Y2, W3) + B3)
Y4 = tf.nn.relu(tf.matmul(Y3, W4) + B4)
Ylogits = tf.matmul(Y4, W5) + B5
Y = tf.nn.softmax(Ylogits)
# cross-entropy loss function (= -sum(Y_i * log(Yi)) ), normalised for batches of 100 images
# TensorFlow provides the softmax_cross_entropy_with_logits function to avoid numerical stability
# problems with log(0) which is NaN
cross_entropy = tf.nn.softmax_cross_entropy_with_logits(logits=Ylogits, labels=Y_)
cross_entropy = tf.reduce_mean(cross_entropy)*100
# accuracy of the trained model, between 0 (worst) and 1 (best)
correct_prediction = tf.equal(tf.argmax(Y, 1), tf.argmax(Y_, 1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
# matplotlib visualisation
allweights = tf.concat([tf.reshape(W1, [-1]), tf.reshape(W2, [-1]), tf.reshape(W3, [-1]), tf.reshape(W4, [-1]), tf.reshape(W5, [-1])], 0)
allbiases = tf.concat([tf.reshape(B1, [-1]), tf.reshape(B2, [-1]), tf.reshape(B3, [-1]), tf.reshape(B4, [-1]), tf.reshape(B5, [-1])], 0)
I = tensorflowvisu.tf_format_mnist_images(X, Y, Y_)
It = tensorflowvisu.tf_format_mnist_images(X, Y, Y_, 1000, lines=25)
datavis = tensorflowvisu.MnistDataVis()
# training step, the learning rate is a placeholder
train_step = tf.train.AdamOptimizer(lr).minimize(cross_entropy)
# init
init = tf.global_variables_initializer()
sess = tf.Session()
sess.run(init)
# You can call this function in a loop to train the model, 100 images at a time
def training_step(i, update_test_data, update_train_data):
# training on batches of 100 images with 100 labels
batch_X, batch_Y = mnist.train.next_batch(100)
# learning rate decay
max_learning_rate = 0.003
min_learning_rate = 0.0001
decay_speed = 2000.0 # 0.003-0.0001-2000=>0.9826 done in 5000 iterations
learning_rate = min_learning_rate + (max_learning_rate - min_learning_rate) * math.exp(-i/decay_speed)
# compute training values for visualisation
if update_train_data:
a, c, im, w, b = sess.run([accuracy, cross_entropy, I, allweights, allbiases], {X: batch_X, Y_: batch_Y})
print(str(i) + ": accuracy:" + str(a) + " loss: " + str(c) + " (lr:" + str(learning_rate) + ")")
datavis.append_training_curves_data(i, a, c)
datavis.update_image1(im)
datavis.append_data_histograms(i, w, b)
# compute test values for visualisation
if update_test_data:
a, c, im = sess.run([accuracy, cross_entropy, It], {X: mnist.test.images, Y_: mnist.test.labels})
print(str(i) + ": ********* epoch " + str(i*100//mnist.train.images.shape[0]+1) + " ********* test accuracy:" + str(a) + " test loss: " + str(c))
datavis.append_test_curves_data(i, a, c)
datavis.update_image2(im)
# the backpropagation training step
sess.run(train_step, {X: batch_X, Y_: batch_Y, lr: learning_rate})
datavis.animate(training_step, iterations=10000+1, train_data_update_freq=20, test_data_update_freq=100, more_tests_at_start=True)
# to save the animation as a movie, add save_movie=True as an argument to datavis.animate
# to disable the visualisation use the following line instead of the datavis.animate line
# for i in range(10000+1): training_step(i, i % 100 == 0, i % 20 == 0)
print("max test accuracy: " + str(datavis.get_max_test_accuracy()))
# Some results to expect:
# (In all runs, if sigmoids are used, all biases are initialised at 0, if RELUs are used,
# all biases are initialised at 0.1 apart from the last one which is initialised at 0.)
## learning rate = 0.003, 10K iterations
# final test accuracy = 0.9788 (sigmoid - slow start, training cross-entropy not stabilised in the end)
# final test accuracy = 0.9825 (relu - above 0.97 in the first 1500 iterations but noisy curves)
## now with learning rate = 0.0001, 10K iterations
# final test accuracy = 0.9722 (relu - slow but smooth curve, would have gone higher in 20K iterations)
## decaying learning rate from 0.003 to 0.0001 decay_speed 2000, 10K iterations
# final test accuracy = 0.9746 (sigmoid - training cross-entropy not stabilised)
# final test accuracy = 0.9824 (relu - training set fully learned, test accuracy stable)
|
py | 1a3babb7e7d604faf235c96be612381602ae612e | from setuptools import setup, find_packages
def read(file):
with open(file, 'r') as f:
return f.read()
setup(
name='proxies',
version='1.2',
keywords=('proxy', 'proxies', 'requests'),
description='Get latest http proxies.',
long_description=read('README.rst'),
author='MyFaith',
author_email='[email protected]',
url='https://github.com/MyFaith/proxies',
license='MIT',
packages=find_packages(),
classifiers=[
'Development Status :: 3 - Alpha',
'Intended Audience :: Developers',
'Topic :: Software Development :: Libraries :: Python Modules',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
],
install_requires=['requests', 'pyquery', 'gevent']
)
|
py | 1a3baccb3a23c3716770ecbbbca618ac74b6b801 | from vit.formatter import DateTime
class Due(DateTime):
def get_due_state(self, due, task):
return self.formatter.get_due_state(due, task)
def colorize(self, due, task):
return self.colorizer.due(self.get_due_state(due, task))
|
py | 1a3bacdd1ad2e4cbf81f8ac10c8f9171ac87004f | def validate_config_component_req(project, config, value, component_id):
config_obj = project.config(config)
if config_obj.value() == value and project.is_selected(component_id) == 0:
comp = project.component(component_id)
comp_name = comp.label()
project.error('Component ' + comp_name + ' must be selected when ' + config + ' is set to ' + value + '.',
config_obj.file_name(),
'')
def validate_boolean_config_req(project, config, config_needed):
config_obj = project.config(config)
config_needed_obj = project.config(config_needed)
if config_obj.value() == '1' and config_needed_obj.value() == '0':
project.error('Configuration ' + config_needed_obj.id() + ' must be selected when ' + config_obj.id() + ' is selected in component ' + config_obj.component().label() + '.',
config_obj.file_name(),
'')
|
py | 1a3badc23c768ad57c20b993a292464b48da3ac5 | from train_custom import get_celi_data
import pycocotools
import random
import cv2
import json
import os
from detectron2.data import MetadataCatalog, DatasetCatalog
from detectron2.utils.visualizer import Visualizer
from detectron2.config import get_cfg
from detectron2.engine import DefaultPredictor
from detectron2 import model_zoo
import numpy as np
from detectron2.structures import BoxMode
import detectron2
from detectron2.engine import DefaultTrainer
from detectron2.utils.logger import setup_logger
setup_logger()
dataset_dicts = get_celi_data("dataset/val")
ceil_metadata = MetadataCatalog.get("ceil_train")
for d in random.sample(dataset_dicts, 3):
img = cv2.imread(d["file_name"])
visualizer = Visualizer(img[:, :, ::-1], metadata=ceil_metadata, scale=0.5)
out = visualizer.draw_dataset_dict(d)
cv2.imshow('img', out.get_image()[:, :, ::-1])
cv2.waitKey(0)
cv2.destroyAllWindows()
|
py | 1a3baf002ad607dd31bae29c3d03bf923bbd150f | '''
log
===
High-level logger for API requests.
'''
import datetime
import logging
import os
from . import path
def log_name():
'''Get date/time-based log name.'''
return '{:%Y-%m-%d-%H-%M-%S}.log'.format(datetime.datetime.now())
def new_logger(name):
'''Define a new logger.'''
logger = logging.getLogger(name)
logger.setLevel(logging.DEBUG)
# Add the handlers to logger
logger.addHandler(STREAM_HANDLER)
logger.addHandler(FILE_HANDLER)
return logger
def override_tweepy_logger(tweepy):
'''Override the Tweepy logger with the Tweepy module and a logger object.'''
# This isn't documented, and likely not stable, but it works.
# And we kind of need this information. It hasn't changed since
# Nov. 15, 2014, so we should be safe.
logger = tweepy.binder.log
# Add the handlers to logger
logger.addHandler(STREAM_HANDLER)
logger.addHandler(FILE_HANDLER)
os.makedirs(path.log_dir(), exist_ok=True)
CURRENT_LOG_NAME = log_name()
CURRENT_LOG_PATH = os.path.join(path.log_dir(), CURRENT_LOG_NAME)
# File Handler
FILE_HANDLER = logging.FileHandler(CURRENT_LOG_PATH)
FILE_HANDLER.setLevel(logging.DEBUG)
# Stderr Handler
STREAM_HANDLER = logging.StreamHandler()
STREAM_HANDLER.setLevel(logging.WARNING)
# Create formatter and add it to the handlers
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
STREAM_HANDLER.setFormatter(formatter)
FILE_HANDLER.setFormatter(formatter)
|
py | 1a3bafb9708fa79f73501adff139e848c77bd22c | import requests
#import spacy
from nltk.corpus import stopwords
from modules_api import iateCode
from modules_api import wsidCode
from modules_api import eurovocCode
from modules_api import unescoCode
from modules_api import wikidataCode
from modules_api.Term import Term
from modules_api import contextCode
from modules_api import thesozCode
from modules_api import stwCode
from modules_api import relvalCode
from modules_api import iloCode
# def iate_enriching_terms(terms,corpus, inlang, outlang ):
# outFile=iateCode.enrich_term(terms[0], inlang, outlang, 'ficheroquenoentiendo', corpus, True, None)
# #processedTerms=iate(processedTerms, date, lang_in)
# #processedTerms.sort()
# def iate_enriching_terms(theTerm,corpus ):
# outFile=iateCode.enrich_term_withTERM(theTerm, 'ficheroquenoentiendo', corpus, True, None)
# #processedTerms=iate(processedTerms, date, lang_in)
# #processedTerms.sort()
#corpus=' el empresario deberá informar a los trabajadores de la empresa sobre la existencia de puestos de trabajo vacantes'
corpus='1. A estos efectos, la jornada de los trabajadores a tiempo parcial se registrará día a día y se totalizará mensualmente, entregando copia al trabajador, junto con el recibo de salarios, del resumen de todas las horas realizadas en cada mes, tanto las ordinarias como las complementarias a que se refiere el apartado 5. El empresario deberá conservar los resúmenes mensuales de los registros de jornada durante un periodo mínimo de cuatro años. En caso de incumplimiento de las referidas obligaciones de registro, el contrato se presumirá celebrado a jornada completa, salvo prueba en contrario que acredite el carácter parcial de los servicios. d) Las personas trabajadoras a tiempo parcial tendrán los mismos derechos que los trabajadores a tiempo completo. Cuando corresponda en atención a su naturaleza, tales derechos serán reconocidos en las disposiciones legales y reglamentarias y en los convenios colectivos de manera proporcional, en función del tiempo trabajado, debiendo garantizarse en todo caso la ausencia de discriminación, tanto directa como indirecta, entre mujeres y hombres. e) La conversión de un trabajo a tiempo completo en un trabajo parcial y viceversa tendrá siempre carácter voluntario para el trabajador y no se podrá imponer de forma unilateral o como consecuencia de una modificación sustancial de condiciones de trabajo al amparo de lo dispuesto en el artículo 41.1.a). El trabajador no podrá ser despedido ni sufrir ningún otro tipo de sanción o efecto perjudicial por el hecho de rechazar esta conversión, sin perjuicio de las medidas que, de conformidad con lo dispuesto en los artículos 51 y 52.c), puedan adoptarse por causas económicas, técnicas, organizativas o de producción. A fin de posibilitar la movilidad voluntaria en el trabajo a tiempo parcial, el empresario deberá informar a los trabajadores de la empresa sobre la existencia de puestos de trabajo vacantes, de manera que aquellos puedan formular solicitudes de conversión voluntaria de un trabajo a tiempo completo en un trabajo a tiempo parcial y viceversa, o para el incremento del tiempo de trabajo de los trabajadores a tiempo parcial, todo ello de conformidad con los procedimientos que se establezcan en convenio colectivo. Con carácter general, las solicitudes a que se refiere el párrafo anterior deberán ser tomadas en consideración, en la medida de lo posible, por el empresario. La denegación de la solicitud deberá ser notificada por el empresario al trabajador por escrito y de manera motivada. f) Los convenios colectivos establecerán medidas para facilitar el acceso efectivo de los trabajadores a tiempo parcial a la formación profesional continua, a fin de favorecer su progresión y movilidad profesionales. 5. Se consideran horas complementarias las realizadas como adición a las horas ordinarias pactadas en el contrato a tiempo parcial, conforme a las siguientes reglas: a) El empresario solo podrá exigir la realización de horas complementarias cuando así lo hubiera pactado expresamente con el trabajador. El pacto sobre horas complementarias podrá acordarse en el momento de la celebración del contrato a tiempo parcial o con posterioridad al mismo, pero constituirá, en todo caso, un pacto específico respecto al contrato. El pacto se formalizará necesariamente por escrito. b) Solo se podrá formalizar un pacto de horas complementarias en el caso de contratos a tiempo parcial con una jornada de trabajo no inferior a diez horas semanales en cómputo anual. '
#corpus= 'el trabajador estará en su puesto de trabajo durante 24 horas hasta que desfallezca, tiene un jefe y un salario.'
#corpus='a social worker takes care of social matters and work with people. Earns a salary.'
myterm= Term()
myterm.context='1. A estos efectos, la jornada de los trabajadores a tiempo parcial se registrará día a día y se totalizará mensualmente, entregando copia al trabajador, junto con el recibo de salarios, del resumen de todas las horas realizadas en cada mes, tanto las ordinarias como las complementarias a que se refiere el apartado 5. El empresario deberá conservar los resúmenes mensuales de los registros de jornada durante un periodo mínimo de cuatro años. En caso de incumplimiento de las referidas obligaciones de registro, el contrato se presumirá celebrado a jornada completa, salvo prueba en contrario que acredite el carácter parcial de los servicios. d) Las personas trabajadoras a tiempo parcial tendrán los mismos derechos que los trabajadores a tiempo completo. Cuando corresponda en atención a su naturaleza, tales derechos serán reconocidos en las disposiciones legales y reglamentarias y en los convenios colectivos de manera proporcional, en función del tiempo trabajado, debiendo garantizarse en todo caso la ausencia de discriminación, tanto directa como indirecta, entre mujeres y hombres. e) La conversión de un trabajo a tiempo completo en un trabajo parcial y viceversa tendrá siempre carácter voluntario para el trabajador y no se podrá imponer de forma unilateral o como consecuencia de una modificación sustancial de condiciones de trabajo al amparo de lo dispuesto en el artículo 41.1.a). El trabajador no podrá ser despedido ni sufrir ningún otro tipo de sanción o efecto perjudicial por el hecho de rechazar esta conversión, sin perjuicio de las medidas que, de conformidad con lo dispuesto en los artículos 51 y 52.c), puedan adoptarse por causas económicas, técnicas, organizativas o de producción. A fin de posibilitar la movilidad voluntaria en el trabajo a tiempo parcial, el empresario deberá informar a los trabajadores de la empresa sobre la existencia de puestos de trabajo vacantes, de manera que aquellos puedan formular solicitudes de conversión voluntaria de un trabajo a tiempo completo en un trabajo a tiempo parcial y viceversa, o para el incremento del tiempo de trabajo de los trabajadores a tiempo parcial, todo ello de conformidad con los procedimientos que se establezcan en convenio colectivo. Con carácter general, las solicitudes a que se refiere el párrafo anterior deberán ser tomadas en consideración, en la medida de lo posible, por el empresario. La denegación de la solicitud deberá ser notificada por el empresario al trabajador por escrito y de manera motivada. f) Los convenios colectivos establecerán medidas para facilitar el acceso efectivo de los trabajadores a tiempo parcial a la formación profesional continua, a fin de favorecer su progresión y movilidad profesionales. 5. Se consideran horas complementarias las realizadas como adición a las horas ordinarias pactadas en el contrato a tiempo parcial, conforme a las siguientes reglas: a) El empresario solo podrá exigir la realización de horas complementarias cuando así lo hubiera pactado expresamente con el trabajador. El pacto sobre horas complementarias podrá acordarse en el momento de la celebración del contrato a tiempo parcial o con posterioridad al mismo, pero constituirá, en todo caso, un pacto específico respecto al contrato. El pacto se formalizará necesariamente por escrito. b) Solo se podrá formalizar un pacto de horas complementarias en el caso de contratos a tiempo parcial con una jornada de trabajo no inferior a diez horas semanales en cómputo anual. '
myterm.term='empresario'
myterm.synonyms_iate=['trabajador', 'asistente social', 'manzana']
#terms=['trabajador','puesto de trabajo','horas']
myterm.langIn='es'
lang="de, en, nl"
myterm.langOut=lang.split(', ')
test=wikidataCode.enrich_term_wikidata(myterm)
print(test)
'''
iloCode.get_uri(myterm)
iloCode.get_synonyms(myterm)
iloCode.get_translations(myterm)
iloCode.get_relations(myterm)
print(myterm.ilo_id)
print(myterm.synonyms_ilo)
print(myterm.translations_ilo)
print(myterm.ilo_relations)
term_in = myterm.term
lang_in = myterm.langIn
synonyms = "trabajador, asistente social, manzana"
relvaltest=relvalCode.main(term_in, lang_in, synonyms)
print(relvaltest)
'''
# test=relvalCode.get_conceptNet_synonyms(myterm)
# print(test)
# iate_enriching_terms_withTERM(myterm,corpus)
#result = iateCode.request_term_to_iate(myterm, langIn, langOut)
# iateCode.request_term_to_iate_withTERM(myterm)
#vectors=['trabajo empresa puesto trabajador', 'otro vector cualquiera']
# test = wsidCode.get_vector_weights(myterm, corpus)
# maxw= iateCode.get_best_vector(myterm, corpus)
# iateCode.retrieve_data_from_best_vector(myterm)
# print(myterm.term)
# print(myterm.synonyms_iate)
# print(myterm.translations_iate)
# print(myterm.definitions_iate)
# eurovocCode.get_uri(myterm)
# eurovocCode.get_relations(myterm)
# eurovocCode.get_synonyms(myterm)
# eurovocCode.get_translations(myterm)
# print(myterm.translations_eurovoc)
# print(myterm.definitions_eurovoc)
# print(myterm.eurovoc_relations)
# unescoCode.get_uri(myterm)
# unescoCode.get_synonyms(myterm)
# unescoCode.get_translations(myterm)
# unescoCode.get_relations(myterm)
# print(myterm.unesco_relations)
# print(myterm.unesco_id)
# print(myterm.translations_unesco)
#wikidataCode.create_wikidata_vectors(myterm)
# wikidataCode.get_vector_weights(myterm, corpus)
# wikidataCode.get_best_vector_id(myterm, corpus)
# wikidataCode.get_langIn_data_from_best_vector(myterm, corpus)
# print(myterm.synonyms_wikidata)
# print(myterm.definitions_wikidata)
# wikidataCode.get_langOut_data_from_best_vector(myterm, corpus)
# wikidataCode.get_relations_from_best_vector(myterm, corpus)
# print(myterm.wikidata_relations)
# thesozCode.get_uri(myterm)
# thesozCode.get_definition(myterm)
# thesozCode.get_relations(myterm)
# thesozCode.get_synonyms(myterm)
# thesozCode.get_translations(myterm)
# stwCode.get_uri(myterm)
# stwCode.get_definition(myterm)
# stwCode.get_relations(myterm)
# stwCode.get_synonyms(myterm)
# stwCode.get_translations(myterm)
'''
# corpus= 'el trabajador estará en su puesto de trabajo durante 24 horas hasta que desfallezca'
# myterm=Term()
# myterm.term='trabajador'
# #terms=['trabajador','puesto de trabajo','horas']
# myterm.langIn='es'
# print(myterm.langIn)
# myterm.langOut=['en']
# iate_enriching_terms(myterm.term,corpus, myterm.langIn, myterm.langOut )
# result = iateCode.request_term_to_iate(myterm.term, myterm.langIn, myterm.langOut)
# vectors=result[1]
# items=result[0]
# response2=result[2]
# '''
# print(doc)
# f=open('doc.json', 'w+')
# f.write(doc)
# f.close()
# #vectors=['trabajo empresa puesto trabajador', 'otro vector cualquiera']
# '''
# test = wsidCode.get_vector_weights(myterm.term, corpus, vectors)
# maxw= iateCode.get_best_vector(vectors, myterm.term, corpus)
# index_max = maxw[1]
# result_item= iateCode.retrieve_data_from_best_vector(response2, index_max, myterm.langOut, myterm.langIn)
# print(result_item)
|
py | 1a3bafecacbe9120c909184982be7df27417c996 |
from .Cleanable import Cleanable
class Cleaner:
INSTANCE = None
def __init__(self):
self.reset()
def register(self, cleanable_object):
if isinstance(cleanable_object, Cleanable):
self._cleanables.append(cleanable_object)
else:
print("Attempted to register a non-Cleanable object: " + str(cleanable_object))
def clean(self):
while len(self._cleanables) > 0:
self._cleanables.pop().clean()
def reset(self):
self._cleanables = []
def getCleaner():
if Cleaner.INSTANCE is None:
Cleaner.INSTANCE = Cleaner()
return Cleaner.INSTANCE
|
py | 1a3baff13b024e2d21dca1658550ed102bfa9bdd | import torch
from torch import nn
from torch.nn import functional as F
from networks.cnn_networks import VGG19
from util.tps_grid_gen import TPSGridGen
class GANLoss(nn.Module):
def __init__(self, gan_mode, target_real_label=1.0, target_fake_label=0.0,
tensor=torch.cuda.FloatTensor):
super(GANLoss, self).__init__()
self.real_label = target_real_label
self.fake_label = target_fake_label
self.real_label_tensor = None
self.fake_label_tensor = None
self.zero_tensor = None
self.Tensor = tensor
self.gan_mode = gan_mode
if self.gan_mode == 'ls':
pass
elif self.gan_mode == 'original':
pass
elif self.gan_mode == 'w':
pass
elif self.gan_mode == 'hinge':
pass
else:
raise ValueError('gan_mode {} not implemented'.format(self.gan_mode))
def get_target_tensor(self, input, target_is_real):
if target_is_real:
if self.real_label_tensor is None:
self.real_label_tensor = self.Tensor(1).fill_(self.real_label)
self.real_label_tensor.requires_grad_(False)
return self.real_label_tensor.expand_as(input)
else:
if self.fake_label_tensor is None:
self.fake_label_tensor = self.Tensor(1).fill_(self.fake_label)
self.fake_label_tensor.requires_grad_(False)
return self.fake_label_tensor.expand_as(input)
def get_zero_tensor(self, input):
if self.zero_tensor is None:
self.zero_tensor = self.Tensor(1).fill_(0)
self.zero_tensor.requires_grad_(False)
return self.zero_tensor.expand_as(input)
def loss(self, input, target_is_real, for_discriminator=True):
if self.gan_mode == 'original': # cross entropy loss
target_tensor = self.get_target_tensor(input, target_is_real)
loss = F.binary_cross_entropy_with_logits(input, target_tensor)
return loss
elif self.gan_mode == 'ls': # mean squared loss
target_tensor = self.get_target_tensor(input, target_is_real)
return F.mse_loss(input, target_tensor)
elif self.gan_mode == 'hinge':
if for_discriminator:
if target_is_real:
minval = torch.min(input - 1, self.get_zero_tensor(input))
loss = -torch.mean(minval)
else:
minval = torch.min(-input - 1, self.get_zero_tensor(input))
loss = -torch.mean(minval)
else:
assert target_is_real, "The generator's hinge loss must be aiming for real"
loss = -torch.mean(input)
return loss
else:
# wgan
if target_is_real:
return -input.mean()
else:
return input.mean()
def __call__(self, input, target_is_real, for_discriminator=True):
if isinstance(input[0], list):
loss = 0
for input_i in input:
if isinstance(input_i, list):
pred = input_i[-1]
else:
pred = input_i
loss_tensor = self.loss(pred, target_is_real, for_discriminator)
bs = 1 if len(loss_tensor.size()) == 0 else loss_tensor.size(0)
new_loss = torch.mean(loss_tensor.view(bs, -1), dim=1)
loss += new_loss
return loss / len(input)
else:
return self.loss(input, target_is_real, for_discriminator)
class VGGLoss(nn.Module):
def __init__(self):
super(VGGLoss, self).__init__()
self.vgg = VGG19().cuda()
self.criterion = nn.L1Loss()
self.weights = [1.0/32, 1.0/16, 1.0/8, 1.0/4, 1.0]
def forward(self, x, y):
x_vgg, y_vgg = self.vgg(x), self.vgg(y)
loss = 0
for i in range(len(x_vgg)):
loss += self.weights[i] * self.criterion(x_vgg[i], y_vgg[i].detach())
return loss
class ConstraintLoss(nn.Module):
def __init__(self, opt):
super(ConstraintLoss, self).__init__()
self.opt = opt
def get_row(self, coord, num):
sec_dic=[]
for j in range(num):
sum = 0
buffer = 0
flag = False
max = -1
for i in range(num - 1):
differ=(coord[:, j * num + i + 1, :] - coord[:, j * num + i, :]) ** 2
if not flag:
second_dif = 0
flag = True
else:
second_dif = torch.abs(differ - buffer)
sec_dic.append(second_dif)
buffer=differ
sum+=second_dif
return torch.stack(sec_dic,dim=1)
def get_col(self,coor,num):
sec_dic=[]
for i in range(num):
sum = 0
buffer = 0
flag = False
max = -1
for j in range(num - 1):
differ = (coor[:, (j+1) * num + i , :] - coor[:, j * num + i, :]) ** 2
if not flag:
second_dif = 0
flag = True
else:
second_dif = torch.abs(differ-buffer)
sec_dic.append(second_dif)
buffer = differ
sum += second_dif
return torch.stack(sec_dic,dim=1)
def grad_row(self, coor, num):
sec_term = []
for j in range(num):
for i in range(1, num - 1):
x0, y0 = coor[:, j * num + i - 1, :][0]
x1, y1 = coor[:, j * num + i + 0, :][0]
x2, y2 = coor[:, j * num + i + 1, :][0]
grad = torch.abs((y1 - y0) * (x1 - x2) - (y1 - y2) * (x1 - x0))
sec_term.append(grad)
return sec_term
def grad_col(self, coor, num):
sec_term = []
for i in range(num):
for j in range(1, num - 1):
x0, y0 = coor[:, (j - 1) * num + i, :][0]
x1, y1 = coor[:, j * num + i, :][0]
x2, y2 = coor[:, (j + 1) * num + i, :][0]
grad = torch.abs((y1 - y0) * (x1 - x2) - (y1 - y2) * (x1 - x0))
sec_term.append(grad)
return sec_term
def forward(self, theta):
row = self.get_row(theta, self.opt['grid_size'])
col = self.get_col(theta, self.opt['grid_size'])
rg_loss = sum(self.grad_row(theta, self.opt['grid_size']))
cg_loss = sum(self.grad_col(theta, self.opt['grid_size']))
rg_loss = torch.max(rg_loss, torch.tensor(0.02).cuda())
cg_loss = torch.max(cg_loss, torch.tensor(0.02).cuda())
rx, ry, cx, cy = torch.tensor(0.08).cuda(), torch.tensor(0.08).cuda() \
, torch.tensor(0.08).cuda(), torch.tensor(0.08).cuda()
row_x, row_y = row[:, :, 0], row[:, :, 1]
col_x, col_y = col[:, :, 0], col[:, :, 1]
rx_loss = torch.max(rx, row_x).mean()
ry_loss = torch.max(ry, row_y).mean()
cx_loss = torch.max(cx, col_x).mean()
cy_loss = torch.max(cy, col_y).mean()
return rx_loss + ry_loss + cx_loss + cy_loss + rg_loss + cg_loss
class AlignmentLoss(nn.Module):
def __init__(self, opt):
super(AlignmentLoss, self).__init__()
self.opt = opt
self.tps = TPSGridGen(self.opt)
def forward(self, theta, pose_kp, img_kp, c_kp):
self.tps.apply_transformation(theta, c_kp)
return loss |
py | 1a3bb119592a6b2666d365a796ee56fccb34813e | from django.urls import path
from . import views
urlpatterns = [
path('',views.getRoutes, name="routes"),
path('products/',views.getProducts, name="products"),
path('products/<str:pk>/',views.getProduct, name="product")
] |
py | 1a3bb11963a75bfe61cb2e3117f91d329ef4caaf | import typing
from ...core import Function, I, Integer, Rational, cacheit, nan, oo, pi, zoo
from ...core.function import ArgumentIndexError, _coeff_isneg
from ...core.sympify import sympify
from ..combinatorial.factorials import RisingFactorial, factorial
from .exponential import exp, log
from .miscellaneous import sqrt
def _rewrite_hyperbolics_as_exp(expr):
expr = sympify(expr)
return expr.xreplace({h: h.rewrite(exp)
for h in expr.atoms(HyperbolicFunction)})
###############################################################################
# ######################### HYPERBOLIC FUNCTIONS ############################ #
###############################################################################
class HyperbolicFunction(Function):
"""
Base class for hyperbolic functions.
See Also
========
diofant.functions.elementary.hyperbolic.sinh
diofant.functions.elementary.hyperbolic.cosh
diofant.functions.elementary.hyperbolic.tanh
diofant.functions.elementary.hyperbolic.coth
"""
unbranched = True
class sinh(HyperbolicFunction):
r"""
The hyperbolic sine function, `\frac{e^x - e^{-x}}{2}`.
* sinh(x) -> Returns the hyperbolic sine of x
See Also
========
diofant.functions.elementary.hyperbolic.cosh
diofant.functions.elementary.hyperbolic.tanh
diofant.functions.elementary.hyperbolic.asinh
"""
def fdiff(self, argindex=1):
"""Returns the first derivative of this function."""
if argindex == 1:
return cosh(self.args[0])
else:
raise ArgumentIndexError(self, argindex)
def inverse(self, argindex=1):
"""Returns the inverse of this function."""
return asinh
@classmethod
def eval(cls, arg):
from .trigonometric import sin
arg = sympify(arg)
if arg.is_Number:
if arg in (oo, -oo, 0):
return arg
elif arg.is_negative:
return -cls(-arg)
else:
if arg is zoo:
return nan
i_coeff = arg.as_coefficient(I)
if i_coeff is not None:
return I * sin(i_coeff)
else:
if _coeff_isneg(arg):
return -cls(-arg)
if arg.func == asinh:
return arg.args[0]
if arg.func == acosh:
x = arg.args[0]
return sqrt(x - 1) * sqrt(x + 1)
if arg.func == atanh:
x = arg.args[0]
return x/sqrt(1 - x**2)
if arg.func == acoth:
x = arg.args[0]
return 1/(sqrt(x - 1) * sqrt(x + 1))
@staticmethod
@cacheit
def taylor_term(n, x, *previous_terms):
"""Returns the next term in the Taylor series expansion."""
if n < 0 or n % 2 == 0:
return Integer(0)
else:
x = sympify(x)
if len(previous_terms) >= 2:
p = previous_terms[-2]
return p * x**2 / (n*(n - 1))
else:
return x**n / factorial(n)
def _eval_conjugate(self):
return self.func(self.args[0].conjugate())
def as_real_imag(self, deep=True, **hints):
"""Returns this function as a complex coordinate."""
from .trigonometric import cos, sin
if self.args[0].is_extended_real:
if deep:
hints['complex'] = False
return self.expand(deep, **hints), Integer(0)
else:
return self, Integer(0)
if deep:
re, im = self.args[0].expand(deep, **hints).as_real_imag()
else:
re, im = self.args[0].as_real_imag()
return sinh(re)*cos(im), cosh(re)*sin(im)
def _eval_expand_complex(self, deep=True, **hints):
re_part, im_part = self.as_real_imag(deep=deep, **hints)
return re_part + im_part*I
def _eval_expand_trig(self, **hints):
arg = self.args[0]
x = None
if arg.is_Add: # TODO, implement more if deep stuff here
x, y = arg.as_two_terms()
else:
coeff, terms = arg.as_coeff_Mul(rational=True)
if coeff != 1 and coeff.is_Integer and terms != 1:
x = terms
y = (coeff - 1)*x
if x is not None:
return (sinh(x)*cosh(y) + sinh(y)*cosh(x)).expand(trig=True)
return sinh(arg)
def _eval_rewrite_as_tractable(self, arg):
return (exp(arg) - exp(-arg)) / 2
def _eval_rewrite_as_exp(self, arg):
return (exp(arg) - exp(-arg)) / 2
def _eval_rewrite_as_cosh(self, arg):
return -I*cosh(arg + pi*I/2)
def _eval_rewrite_as_tanh(self, arg):
tanh_half = tanh(arg/2)
return 2*tanh_half/(1 - tanh_half**2)
def _eval_rewrite_as_coth(self, arg):
coth_half = coth(arg/2)
return 2*coth_half/(coth_half**2 - 1)
def _eval_as_leading_term(self, x):
from ...series import Order
arg = self.args[0].as_leading_term(x)
if x in arg.free_symbols and Order(1, x).contains(arg):
return arg
else:
return self.func(arg)
def _eval_is_extended_real(self):
if self.args[0].is_extended_real:
return True
def _eval_is_finite(self):
if self.args[0].is_imaginary:
return True
class cosh(HyperbolicFunction):
r"""
The hyperbolic cosine function, `\frac{e^x + e^{-x}}{2}`.
* cosh(x) -> Returns the hyperbolic cosine of x
See Also
========
diofant.functions.elementary.hyperbolic.sinh
diofant.functions.elementary.hyperbolic.tanh
diofant.functions.elementary.hyperbolic.acosh
"""
def fdiff(self, argindex=1):
if argindex == 1:
return sinh(self.args[0])
else:
raise ArgumentIndexError(self, argindex)
@classmethod
def eval(cls, arg):
from .trigonometric import cos
arg = sympify(arg)
if arg.is_Number:
if arg in (oo, -oo):
return oo
elif arg == 0:
return Integer(1)
elif arg.is_negative:
return cls(-arg)
else:
if arg is zoo:
return nan
i_coeff = arg.as_coefficient(I)
if i_coeff is not None:
return cos(i_coeff)
else:
if _coeff_isneg(arg):
return cls(-arg)
if arg.func == asinh:
return sqrt(1 + arg.args[0]**2)
if arg.func == acosh:
return arg.args[0]
if arg.func == atanh:
return 1/sqrt(1 - arg.args[0]**2)
if arg.func == acoth:
x = arg.args[0]
return x/(sqrt(x - 1) * sqrt(x + 1))
@staticmethod
@cacheit
def taylor_term(n, x, *previous_terms):
if n < 0 or n % 2 == 1:
return Integer(0)
else:
x = sympify(x)
if len(previous_terms) >= 2:
p = previous_terms[-2]
return p * x**2 / (n*(n - 1))
else:
return x**n/factorial(n)
def _eval_conjugate(self):
return self.func(self.args[0].conjugate())
def as_real_imag(self, deep=True, **hints):
from .trigonometric import cos, sin
if self.args[0].is_extended_real:
if deep:
hints['complex'] = False
return self.expand(deep, **hints), Integer(0)
else:
return self, Integer(0)
if deep:
re, im = self.args[0].expand(deep, **hints).as_real_imag()
else:
re, im = self.args[0].as_real_imag()
return cosh(re)*cos(im), sinh(re)*sin(im)
def _eval_expand_complex(self, deep=True, **hints):
re_part, im_part = self.as_real_imag(deep=deep, **hints)
return re_part + im_part*I
def _eval_expand_trig(self, deep=True, **hints):
arg = self.args[0]
x = None
if arg.is_Add: # TODO, implement more if deep stuff here
x, y = arg.as_two_terms()
else:
coeff, terms = arg.as_coeff_Mul(rational=True)
if coeff != 1 and coeff.is_Integer and terms != 1:
x = terms
y = (coeff - 1)*x
if x is not None:
return (cosh(x)*cosh(y) + sinh(x)*sinh(y)).expand(trig=True)
return cosh(arg)
def _eval_rewrite_as_tractable(self, arg):
return (exp(arg) + exp(-arg)) / 2
def _eval_rewrite_as_exp(self, arg):
return (exp(arg) + exp(-arg)) / 2
def _eval_rewrite_as_sinh(self, arg):
return -I*sinh(arg + pi*I/2)
def _eval_rewrite_as_tanh(self, arg):
tanh_half = tanh(arg/2)**2
return (1 + tanh_half)/(1 - tanh_half)
def _eval_rewrite_as_coth(self, arg):
coth_half = coth(arg/2)**2
return (coth_half + 1)/(coth_half - 1)
def _eval_as_leading_term(self, x):
from ...series import Order
arg = self.args[0].as_leading_term(x)
if x in arg.free_symbols and Order(1, x).contains(arg):
return Integer(1)
else:
return self.func(arg)
def _eval_is_extended_real(self):
if self.args[0].is_extended_real:
return True
def _eval_is_finite(self):
if self.args[0].is_imaginary:
return True
class tanh(HyperbolicFunction):
r"""
The hyperbolic tangent function, `\frac{\sinh(x)}{\cosh(x)}`.
* tanh(x) -> Returns the hyperbolic tangent of x
See Also
========
diofant.functions.elementary.hyperbolic.sinh
diofant.functions.elementary.hyperbolic.cosh
diofant.functions.elementary.hyperbolic.atanh
"""
def fdiff(self, argindex=1):
if argindex == 1:
return 1 - tanh(self.args[0])**2
else:
raise ArgumentIndexError(self, argindex)
def inverse(self, argindex=1):
"""Returns the inverse of this function."""
return atanh
@classmethod
def eval(cls, arg):
from .trigonometric import tan
arg = sympify(arg)
if arg.is_Number:
if arg is oo:
return Integer(1)
elif arg == -oo:
return Integer(-1)
elif arg == 0:
return Integer(0)
elif arg.is_negative:
return -cls(-arg)
else:
if arg is zoo:
return nan
i_coeff = arg.as_coefficient(I)
if i_coeff is not None:
if _coeff_isneg(i_coeff):
return -I * tan(-i_coeff)
return I * tan(i_coeff)
else:
if _coeff_isneg(arg):
return -cls(-arg)
if arg.func == asinh:
x = arg.args[0]
return x/sqrt(1 + x**2)
if arg.func == acosh:
x = arg.args[0]
return sqrt(x - 1) * sqrt(x + 1) / x
if arg.func == atanh:
return arg.args[0]
if arg.func == acoth:
return 1/arg.args[0]
@staticmethod
@cacheit
def taylor_term(n, x, *previous_terms):
from .. import bernoulli
if n < 0 or n % 2 == 0:
return Integer(0)
else:
x = sympify(x)
a = 2**(n + 1)
B = bernoulli(n + 1)
F = factorial(n + 1)
return a*(a - 1) * B/F * x**n
def _eval_conjugate(self):
return self.func(self.args[0].conjugate())
def as_real_imag(self, deep=True, **hints):
from .trigonometric import cos, sin
if self.args[0].is_extended_real:
if deep:
hints['complex'] = False
return self.expand(deep, **hints), Integer(0)
else:
return self, Integer(0)
if deep:
re, im = self.args[0].expand(deep, **hints).as_real_imag()
else:
re, im = self.args[0].as_real_imag()
denom = sinh(re)**2 + cos(im)**2
return sinh(re)*cosh(re)/denom, sin(im)*cos(im)/denom
def _eval_rewrite_as_tractable(self, arg):
neg_exp, pos_exp = exp(-arg), exp(arg)
return (pos_exp - neg_exp)/(pos_exp + neg_exp)
def _eval_rewrite_as_exp(self, arg):
neg_exp, pos_exp = exp(-arg), exp(arg)
return (pos_exp - neg_exp)/(pos_exp + neg_exp)
def _eval_rewrite_as_sinh(self, arg):
return I*sinh(arg)/sinh(pi*I/2 - arg)
def _eval_rewrite_as_cosh(self, arg):
return I*cosh(pi*I/2 - arg)/cosh(arg)
def _eval_rewrite_as_coth(self, arg):
return 1/coth(arg)
def _eval_as_leading_term(self, x):
from ...series import Order
arg = self.args[0].as_leading_term(x)
if x in arg.free_symbols and Order(1, x).contains(arg):
return arg
else:
return self.func(arg)
def _eval_is_extended_real(self):
if self.args[0].is_extended_real:
return True
def _eval_is_finite(self):
if self.args[0].is_extended_real:
return True
class coth(HyperbolicFunction):
r"""
The hyperbolic cotangent function, `\frac{\cosh(x)}{\sinh(x)}`.
* coth(x) -> Returns the hyperbolic cotangent of x
"""
def fdiff(self, argindex=1):
if argindex == 1:
return -1/sinh(self.args[0])**2
else:
raise ArgumentIndexError(self, argindex)
def inverse(self, argindex=1):
"""Returns the inverse of this function."""
return acoth
@classmethod
def eval(cls, arg):
from .trigonometric import cot
arg = sympify(arg)
if arg.is_Number:
if arg is oo:
return Integer(1)
elif arg == -oo:
return Integer(-1)
elif arg == 0:
return zoo
elif arg.is_negative:
return -cls(-arg)
else:
if arg is zoo:
return nan
i_coeff = arg.as_coefficient(I)
if i_coeff is not None:
if _coeff_isneg(i_coeff):
return I * cot(-i_coeff)
return -I * cot(i_coeff)
else:
if _coeff_isneg(arg):
return -cls(-arg)
if arg.func == asinh:
x = arg.args[0]
return sqrt(1 + x**2)/x
if arg.func == acosh:
x = arg.args[0]
return x/(sqrt(x - 1) * sqrt(x + 1))
if arg.func == atanh:
return 1/arg.args[0]
if arg.func == acoth:
return arg.args[0]
@staticmethod
@cacheit
def taylor_term(n, x, *previous_terms):
from .. import bernoulli
if n == 0:
return 1 / sympify(x)
elif n < 0 or n % 2 == 0:
return Integer(0)
else:
x = sympify(x)
B = bernoulli(n + 1)
F = factorial(n + 1)
return 2**(n + 1) * B/F * x**n
def _eval_conjugate(self):
return self.func(self.args[0].conjugate())
def as_real_imag(self, deep=True, **hints):
from .trigonometric import cos, sin
if self.args[0].is_extended_real:
if deep:
hints['complex'] = False
return self.expand(deep, **hints), Integer(0)
else:
return self, Integer(0)
if deep:
re, im = self.args[0].expand(deep, **hints).as_real_imag()
else:
re, im = self.args[0].as_real_imag()
denom = sinh(re)**2 + sin(im)**2
return sinh(re)*cosh(re)/denom, -sin(im)*cos(im)/denom
def _eval_rewrite_as_tractable(self, arg):
neg_exp, pos_exp = exp(-arg), exp(arg)
return (pos_exp + neg_exp)/(pos_exp - neg_exp)
def _eval_rewrite_as_exp(self, arg):
neg_exp, pos_exp = exp(-arg), exp(arg)
return (pos_exp + neg_exp)/(pos_exp - neg_exp)
def _eval_rewrite_as_sinh(self, arg):
return -I*sinh(pi*I/2 - arg)/sinh(arg)
def _eval_rewrite_as_cosh(self, arg):
return -I*cosh(arg)/cosh(pi*I/2 - arg)
def _eval_rewrite_as_tanh(self, arg):
return 1/tanh(arg)
def _eval_as_leading_term(self, x):
from ...series import Order
arg = self.args[0].as_leading_term(x)
if x in arg.free_symbols and Order(1, x).contains(arg):
return 1/arg
else:
return self.func(arg)
class ReciprocalHyperbolicFunction(HyperbolicFunction):
"""Base class for reciprocal functions of hyperbolic functions."""
# To be defined in class
_reciprocal_of = None
_is_even: typing.Optional[bool] = None
_is_odd: typing.Optional[bool] = None
@classmethod
def eval(cls, arg):
if arg.could_extract_minus_sign():
if cls._is_even:
return cls(-arg)
elif cls._is_odd:
return -cls(-arg)
t = cls._reciprocal_of.eval(arg)
return 1/t if t is not None else t
def _call_reciprocal(self, method_name, *args, **kwargs):
# Calls method_name on _reciprocal_of
o = self._reciprocal_of(self.args[0])
return getattr(o, method_name)(*args, **kwargs)
def _rewrite_reciprocal(self, method_name, arg):
# Special handling for rewrite functions. If reciprocal rewrite returns
# unmodified expression, then return None
t = self._call_reciprocal(method_name, arg)
assert t is not None and t != self._reciprocal_of(arg)
return 1/t
def _eval_rewrite_as_exp(self, arg):
return self._rewrite_reciprocal('_eval_rewrite_as_exp', arg)
def _eval_rewrite_as_tractable(self, arg):
return self._rewrite_reciprocal('_eval_rewrite_as_tractable', arg)
def _eval_rewrite_as_tanh(self, arg):
return self._rewrite_reciprocal('_eval_rewrite_as_tanh', arg)
def _eval_rewrite_as_coth(self, arg):
return self._rewrite_reciprocal('_eval_rewrite_as_coth', arg)
def as_real_imag(self, deep=True, **hints):
return (1 / self._reciprocal_of(self.args[0])).as_real_imag(deep, **hints)
def _eval_conjugate(self):
return self.func(self.args[0].conjugate())
def _eval_expand_complex(self, deep=True, **hints):
re_part, im_part = self.as_real_imag(deep=True, **hints)
return re_part + I*im_part
def _eval_as_leading_term(self, x):
return (1/self._reciprocal_of(self.args[0]))._eval_as_leading_term(x)
def _eval_is_extended_real(self):
return self._reciprocal_of(self.args[0]).is_extended_real
def _eval_is_finite(self):
return (1/self._reciprocal_of(self.args[0])).is_finite
class csch(ReciprocalHyperbolicFunction):
r"""
The hyperbolic cosecant function, `\frac{2}{e^x - e^{-x}}`
* csch(x) -> Returns the hyperbolic cosecant of x
See Also
========
diofant.functions.elementary.hyperbolic.sinh
diofant.functions.elementary.hyperbolic.cosh
diofant.functions.elementary.hyperbolic.tanh
diofant.functions.elementary.hyperbolic.sech
diofant.functions.elementary.hyperbolic.asinh
diofant.functions.elementary.hyperbolic.acosh
"""
_reciprocal_of = sinh
_is_odd = True
def fdiff(self, argindex=1):
"""Returns the first derivative of this function."""
if argindex == 1:
return -coth(self.args[0]) * csch(self.args[0])
else:
raise ArgumentIndexError(self, argindex)
@staticmethod
@cacheit
def taylor_term(n, x, *previous_terms):
"""Returns the next term in the Taylor series expansion."""
from .. import bernoulli
if n == 0:
return 1/sympify(x)
elif n < 0 or n % 2 == 0:
return Integer(0)
else:
x = sympify(x)
B = bernoulli(n + 1)
F = factorial(n + 1)
return 2 * (1 - 2**n) * B/F * x**n
def _eval_rewrite_as_cosh(self, arg):
return I / cosh(arg + I * pi / 2)
class sech(ReciprocalHyperbolicFunction):
r"""
The hyperbolic secant function, `\frac{2}{e^x + e^{-x}}`
* sech(x) -> Returns the hyperbolic secant of x
See Also
========
diofant.functions.elementary.hyperbolic.sinh
diofant.functions.elementary.hyperbolic.cosh
diofant.functions.elementary.hyperbolic.tanh
diofant.functions.elementary.hyperbolic.coth
diofant.functions.elementary.hyperbolic.csch
diofant.functions.elementary.hyperbolic.asinh
diofant.functions.elementary.hyperbolic.acosh
"""
_reciprocal_of = cosh
_is_even = True
def fdiff(self, argindex=1):
if argindex == 1:
return - tanh(self.args[0])*sech(self.args[0])
else:
raise ArgumentIndexError(self, argindex)
@staticmethod
@cacheit
def taylor_term(n, x, *previous_terms):
from ..combinatorial.numbers import euler
if n < 0 or n % 2 == 1:
return Integer(0)
else:
x = sympify(x)
return euler(n) / factorial(n) * x**n
def _eval_rewrite_as_sinh(self, arg):
return I / sinh(arg + I * pi / 2)
###############################################################################
# ########################### HYPERBOLIC INVERSES ########################### #
###############################################################################
class asinh(Function):
"""
The inverse hyperbolic sine function.
* asinh(x) -> Returns the inverse hyperbolic sine of x
See Also
========
diofant.functions.elementary.hyperbolic.cosh
diofant.functions.elementary.hyperbolic.tanh
diofant.functions.elementary.hyperbolic.sinh
"""
def fdiff(self, argindex=1):
if argindex == 1:
return 1/sqrt(self.args[0]**2 + 1)
else:
raise ArgumentIndexError(self, argindex)
@classmethod
def eval(cls, arg):
from .trigonometric import asin
arg = sympify(arg)
if arg.is_Number:
if arg in (oo, -oo, 0):
return arg
elif arg == 1:
return log(sqrt(2) + 1)
elif arg == -1:
return log(sqrt(2) - 1)
elif arg.is_negative:
return -cls(-arg)
else:
if arg is zoo:
return zoo
i_coeff = arg.as_coefficient(I)
if i_coeff is not None:
return I * asin(i_coeff)
else:
if _coeff_isneg(arg):
return -cls(-arg)
@staticmethod
@cacheit
def taylor_term(n, x, *previous_terms):
if n < 0 or n % 2 == 0:
return Integer(0)
else:
x = sympify(x)
if len(previous_terms) >= 2 and n > 2:
p = previous_terms[-2]
return -p * (n - 2)**2/(n*(n - 1)) * x**2
else:
k = (n - 1) // 2
R = RisingFactorial(Rational(1, 2), k)
F = factorial(k)
return (-1)**k * R / F * x**n / n
def _eval_as_leading_term(self, x):
from ...series import Order
arg = self.args[0].as_leading_term(x)
if x in arg.free_symbols and Order(1, x).contains(arg):
return arg
else:
return self.func(arg)
def _eval_rewrite_as_log(self, x):
return log(x + sqrt(x**2 + 1))
def inverse(self, argindex=1):
"""Returns the inverse of this function."""
return sinh
class acosh(Function):
"""
The inverse hyperbolic cosine function.
* acosh(x) -> Returns the inverse hyperbolic cosine of x
See Also
========
diofant.functions.elementary.hyperbolic.asinh
diofant.functions.elementary.hyperbolic.atanh
diofant.functions.elementary.hyperbolic.cosh
"""
def fdiff(self, argindex=1):
if argindex == 1:
return 1/sqrt(self.args[0]**2 - 1)
else:
raise ArgumentIndexError(self, argindex)
@classmethod
def eval(cls, arg):
arg = sympify(arg)
if arg.is_Number:
if arg in (oo, -oo):
return oo
elif arg == 0:
return pi*I / 2
elif arg == 1:
return Integer(0)
elif arg == -1:
return pi*I
if arg.is_number:
cst_table = {
I: log(I*(1 + sqrt(2))),
-I: log(-I*(1 + sqrt(2))),
Rational(+1, 2): pi/3,
Rational(-1, 2): 2*pi/3,
sqrt(2)/2: pi/4,
-sqrt(2)/2: 3*pi/4,
1/sqrt(2): pi/4,
-1/sqrt(2): 3*pi/4,
sqrt(3)/2: pi/6,
-sqrt(3)/2: 5*pi/6,
(sqrt(3) - 1)/sqrt(2**3): 5*pi/12,
-(sqrt(3) - 1)/sqrt(2**3): 7*pi/12,
sqrt(2 + sqrt(2))/2: pi/8,
-sqrt(2 + sqrt(2))/2: 7*pi/8,
sqrt(2 - sqrt(2))/2: 3*pi/8,
-sqrt(2 - sqrt(2))/2: 5*pi/8,
(1 + sqrt(3))/(2*sqrt(2)): pi/12,
-(1 + sqrt(3))/(2*sqrt(2)): 11*pi/12,
(sqrt(5) + 1)/4: pi/5,
-(sqrt(5) + 1)/4: 4*pi/5
}
if arg in cst_table:
if arg.is_extended_real:
return cst_table[arg]*I
return cst_table[arg]
if arg.is_infinite:
return oo
@staticmethod
@cacheit
def taylor_term(n, x, *previous_terms):
if n == 0:
return pi*I / 2
elif n < 0 or n % 2 == 0:
return Integer(0)
else:
x = sympify(x)
if len(previous_terms) >= 2 and n > 2:
p = previous_terms[-2]
return p * (n - 2)**2/(n*(n - 1)) * x**2
else:
k = (n - 1) // 2
R = RisingFactorial(Rational(1, 2), k)
F = factorial(k)
return -R / F * I * x**n / n
def _eval_as_leading_term(self, x):
from ...series import Order
arg = self.args[0].as_leading_term(x)
if x in arg.free_symbols and Order(1, x).contains(arg):
return I*pi/2
else:
return self.func(arg)
def inverse(self, argindex=1):
"""Returns the inverse of this function."""
return cosh
def _eval_rewrite_as_log(self, x):
return log(x + sqrt(x - 1)*sqrt(x + 1))
def _eval_nseries(self, x, n, logx):
x0 = self.args[0].limit(x, 0)
if x0 == 1:
return self._eval_rewrite_as_log(self.args[0])._eval_nseries(x, n, logx)
else:
return super()._eval_nseries(x, n, logx)
class atanh(Function):
"""
The inverse hyperbolic tangent function.
* atanh(x) -> Returns the inverse hyperbolic tangent of x
See Also
========
diofant.functions.elementary.hyperbolic.asinh
diofant.functions.elementary.hyperbolic.acosh
diofant.functions.elementary.hyperbolic.tanh
"""
def fdiff(self, argindex=1):
if argindex == 1:
return 1/(1 - self.args[0]**2)
else:
raise ArgumentIndexError(self, argindex)
@classmethod
def eval(cls, arg):
from .trigonometric import atan
arg = sympify(arg)
if arg.is_Number:
if arg == 0:
return Integer(0)
elif arg == 1:
return oo
elif arg == -1:
return -oo
elif arg is oo:
return -I * atan(arg)
elif arg == -oo:
return I * atan(-arg)
elif arg.is_negative:
return -cls(-arg)
else:
if arg is zoo:
return nan
i_coeff = arg.as_coefficient(I)
if i_coeff is not None:
return I * atan(i_coeff)
else:
if _coeff_isneg(arg):
return -cls(-arg)
@staticmethod
@cacheit
def taylor_term(n, x, *previous_terms):
if n < 0 or n % 2 == 0:
return Integer(0)
else:
x = sympify(x)
return x**n / n
def _eval_as_leading_term(self, x):
from ...series import Order
arg = self.args[0].as_leading_term(x)
if x in arg.free_symbols and Order(1, x).contains(arg):
return arg
else:
return self.func(arg)
def inverse(self, argindex=1):
"""Returns the inverse of this function."""
return tanh
class acoth(Function):
"""
The inverse hyperbolic cotangent function.
* acoth(x) -> Returns the inverse hyperbolic cotangent of x
"""
def fdiff(self, argindex=1):
if argindex == 1:
return 1/(1 - self.args[0]**2)
else:
raise ArgumentIndexError(self, argindex)
@classmethod
def eval(cls, arg):
from .trigonometric import acot
arg = sympify(arg)
if arg.is_Number:
if arg in (oo, -oo):
return Integer(0)
elif arg == 0:
return pi*I / 2
elif arg == 1:
return oo
elif arg == -1:
return -oo
elif arg.is_negative:
return -cls(-arg)
else:
if arg is zoo:
return 0
i_coeff = arg.as_coefficient(I)
if i_coeff is not None:
return -I * acot(i_coeff)
else:
if _coeff_isneg(arg):
return -cls(-arg)
@staticmethod
@cacheit
def taylor_term(n, x, *previous_terms):
if n == 0:
return pi*I / 2
elif n < 0 or n % 2 == 0:
return Integer(0)
else:
x = sympify(x)
return x**n / n
def _eval_as_leading_term(self, x):
from ...series import Order
arg = self.args[0].as_leading_term(x)
if x in arg.free_symbols and Order(1, x).contains(arg):
return I*pi/2
else:
return self.func(arg)
def inverse(self, argindex=1):
"""Returns the inverse of this function."""
return coth
|
py | 1a3bb19465591e84e7e60b2da029b7729ab16665 | #!/usr/bin/env python2
## zip archive frontend for git-fast-import
##
## For example:
##
## mkdir project; cd project; git init
## python import-zips.py *.zip
## git log --stat import-zips
from os import popen, path
from sys import argv, exit, hexversion, stderr
from time import mktime
from zipfile import ZipFile
if hexversion < 0x01060000:
# The limiter is the zipfile module
stderr.write("import-zips.py: requires Python 1.6.0 or later.\n")
exit(1)
if len(argv) < 2:
print 'usage:', argv[0], '<zipfile>...'
exit(1)
branch_ref = 'refs/heads/import-zips'
committer_name = 'Z Ip Creator'
committer_email = '[email protected]'
fast_import = popen('git fast-import --quiet', 'w')
def printlines(list):
for str in list:
fast_import.write(str + "\n")
for zipfile in argv[1:]:
commit_time = 0
next_mark = 1
common_prefix = None
mark = dict()
zip = ZipFile(zipfile, 'r')
for name in zip.namelist():
if name.endswith('/'):
continue
info = zip.getinfo(name)
if commit_time < info.date_time:
commit_time = info.date_time
if common_prefix == None:
common_prefix = name[:name.rfind('/') + 1]
else:
while not name.startswith(common_prefix):
last_slash = common_prefix[:-1].rfind('/') + 1
common_prefix = common_prefix[:last_slash]
mark[name] = ':' + str(next_mark)
next_mark += 1
printlines(('blob', 'mark ' + mark[name], \
'data ' + str(info.file_size)))
fast_import.write(zip.read(name) + "\n")
committer = committer_name + ' <' + committer_email + '> %d +0000' % \
mktime(commit_time + (0, 0, 0))
printlines(('commit ' + branch_ref, 'committer ' + committer, \
'data <<EOM', 'Imported from ' + zipfile + '.', 'EOM', \
'', 'deleteall'))
for name in mark.keys():
fast_import.write('M 100644 ' + mark[name] + ' ' +
name[len(common_prefix):] + "\n")
printlines(('', 'tag ' + path.basename(zipfile), \
'from ' + branch_ref, 'tagger ' + committer, \
'data <<EOM', 'Package ' + zipfile, 'EOM', ''))
if fast_import.close():
exit(1)
|
py | 1a3bb19553e6a279f28045cbd81677a5fbc25c61 | import unittest
def join_left(left_set, right_set):
return _nested_join(left_set, right_set, _compose_left)
def _compose_left(key, primary, secondary):
return key, primary, secondary
def join_right(left_set, right_set):
return _nested_join(right_set, left_set, _compose_right)
def _compose_right(key, primary, secondary):
return key, secondary, primary
def _nested_join(primary_set, secondary_set, compose):
return {compose(key, val, _retrieve_val_from_set_by_key(key, secondary_set)) for (key, val) in primary_set}
def _retrieve_val_from_set_by_key(key, secondary_set):
return next(_extract_value(key, secondary_set), None)
def _extract_value(key, secondary_set):
return map(lambda pair: pair[1], _find_pair_in_set(secondary_set, key))
def _find_pair_in_set(secondary_set, key):
return filter(lambda pair: pair[0] == key, secondary_set)
class NestedJoinsTest(unittest.TestCase):
def testResultSetIsEmpty_whenJoinLeftTwoEmptySets(self):
self.assertEqual(set(), join_left(set(), set()))
def testResultSetHasNonesOnRight_whenJoinLeftNonemptyAndEmptySets(self):
self.assertEqual(
{(1, 'l1', None), (2, 'l2', None), (3, 'l3', None)},
join_left(
{(1, 'l1'), (2, 'l2'), (3, 'l3')},
set()
)
)
def testResultSetHasValues_forCorrespondingKeys_whenJoinLeftTwoNonemptySets(self):
self.assertEqual(
{(1, 'l1', 'r1'), (2, 'l2', None), (3, 'l3', 'r3')},
join_left(
{(1, 'l1'), (2, 'l2'), (3, 'l3')},
{(1, 'r1'), (3, 'r3')}
)
)
def testResultSetIsEmpty_whenJoinRightTwoEmptySets(self):
self.assertEqual(set(), join_right(set(), set()))
def testResultSetHasNonesOnLeft_whenJoinRightNonemptyAndEmptySets(self):
self.assertEqual(
{(1, None, 'r1'), (2, None, 'r2'), (3, None, 'r3')},
join_right(
set(),
{(1, 'r1'), (2, 'r2'), (3, 'r3')}
)
)
def testResultSetHasValuesOnLeft_forCorrespondingKeys_whenJoinRightTwoNonemptySets(self):
self.assertEqual(
{(1, 'l1', 'r1'), (2, None, 'r2'), (3, 'l3', 'r3')},
join_right(
{(1, 'l1'), (3, 'l3')},
{(1, 'r1'), (2, 'r2'), (3, 'r3')}
)
)
|
py | 1a3bb1ae9dc0f1d7b7af0a557d269fb4a4deb533 | # !/usr/bin/env python3
# /-*- coding: UTF-8 -*-
from math import prod
if __name__ == "__main__":
lst = list(map(int, input().split()))
lst = prod([int(a) for a in lst if a > 0])
print(lst)
|
py | 1a3bb219c363d7fc8764650dcb76429b236aeee7 | from typing import Optional
from parse import parse
def parse_github_org_name(org_url: str) -> Optional[str]:
"""
Get org name from a github url
"https://github.com/os3224" -> "os3224"
"""
r = parse("https://github.com/{}", org_url)
if r is None:
return ""
return r[1].strip().rstrip("/")
def parse_github_repo_name(repo_url: str) -> Optional[str]:
"""
Get github repo name from https url.
parse_github_repo_name("https://github.com/GusSand/Anubis")
-> "Anubis"
:param repo_url:
:return:
"""
r = parse("https://github.com/{}/{}", repo_url)
if r is None:
return ""
return r[1]
|
py | 1a3bb2257dadaeb82fb70d831e7b4ba2a72a7ff0 | #!/usr/bin/env python
import sys
import math
import time
import asyncio
import logging
import unittest
from os.path import join, realpath
from typing import Dict, Optional, List
from hummingbot.core.event.event_logger import EventLogger
from hummingbot.core.event.events import OrderBookEvent, OrderBookTradeEvent, TradeType
from hummingbot.connector.exchange.peatio.peatio_order_book_tracker import PeatioOrderBookTracker
from hummingbot.connector.exchange.peatio.peatio_api_order_book_data_source import PeatioAPIOrderBookDataSource
from hummingbot.core.data_type.order_book import OrderBook
from hummingbot.logger.struct_logger import METRICS_LOG_LEVEL
sys.path.insert(0, realpath(join(__file__, "../../../../../")))
logging.basicConfig(level=METRICS_LOG_LEVEL)
class PeatioOrderBookTrackerUnitTest(unittest.TestCase):
order_book_tracker: Optional[PeatioOrderBookTracker] = None
events: List[OrderBookEvent] = [
OrderBookEvent.TradeEvent
]
trading_pairs: List[str] = [
"BTC-USDT",
"ROGER-BTC",
]
@classmethod
def setUpClass(cls):
cls.ev_loop: asyncio.BaseEventLoop = asyncio.get_event_loop()
cls.order_book_tracker: PeatioOrderBookTracker = PeatioOrderBookTracker(cls.trading_pairs)
cls.order_book_tracker.start()
cls.ev_loop.run_until_complete(cls.wait_til_tracker_ready())
@classmethod
async def wait_til_tracker_ready(cls):
while True:
if len(cls.order_book_tracker.order_books) > 0:
print("Initialized real-time order books.")
return
await asyncio.sleep(1)
async def run_parallel_async(self, *tasks, timeout=None):
future: asyncio.Future = asyncio.ensure_future(asyncio.gather(*tasks))
timer = 0
while not future.done():
if timeout and timer > timeout:
raise Exception("Timeout running parallel async tasks in tests")
timer += 1
now = time.time()
_next_iteration = now // 1.0 + 1 # noqa: F841
await asyncio.sleep(1.0)
return future.result()
def run_parallel(self, *tasks):
return self.ev_loop.run_until_complete(self.run_parallel_async(*tasks))
def setUp(self):
self.event_logger = EventLogger()
for event_tag in self.events:
for trading_pair, order_book in self.order_book_tracker.order_books.items():
order_book.add_listener(event_tag, self.event_logger)
def test_order_book_trade_event_emission(self):
"""
Tests if the order book tracker is able to retrieve order book trade message from exchange and emit order book
trade events after correctly parsing the trade messages
"""
self.run_parallel(self.event_logger.wait_for(OrderBookTradeEvent))
print("\nRetrieved trade events.")
for ob_trade_event in self.event_logger.event_log:
self.assertTrue(type(ob_trade_event) == OrderBookTradeEvent)
self.assertTrue(ob_trade_event.trading_pair in self.trading_pairs)
self.assertTrue(type(ob_trade_event.timestamp) in [float, int])
self.assertTrue(type(ob_trade_event.amount) == float)
self.assertTrue(type(ob_trade_event.price) == float)
self.assertTrue(type(ob_trade_event.type) == TradeType)
# datetime is in seconds
self.assertTrue(math.ceil(math.log10(ob_trade_event.timestamp)) == 10)
self.assertTrue(ob_trade_event.amount > 0)
self.assertTrue(ob_trade_event.price > 0)
def test_tracker_integrity(self):
# Wait 5 seconds to process some diffs.
self.ev_loop.run_until_complete(asyncio.sleep(5.0))
order_books: Dict[str, OrderBook] = self.order_book_tracker.order_books
roger_btc: OrderBook = order_books["ROGER-BTC"]
self.assertIsNot(roger_btc.last_diff_uid, 0)
self.assertGreaterEqual(roger_btc.get_price_for_volume(True, 3000).result_price,
roger_btc.get_price(True))
self.assertLessEqual(roger_btc.get_price_for_volume(False, 3000).result_price,
roger_btc.get_price(False))
def test_api_get_last_traded_prices(self):
prices = self.ev_loop.run_until_complete(
PeatioAPIOrderBookDataSource.get_last_traded_prices(["BTC-USDT", "ROGER-BTC"]))
print("\n")
for key, value in prices.items():
print(f"{key} last_trade_price: {value}")
self.assertGreater(prices["BTC-USDT"], 1000)
self.assertLess(prices["ROGER-BTC"], 1)
|
py | 1a3bb29637288df9921b99bf6854ea0a84e288c5 | """
A Websocket example.
"""
import logging
import pkg_resources
import uvicorn
import bareutils.header as header
from bareasgi import (
Application,
HttpResponse,
text_writer
)
logging.basicConfig(level=logging.DEBUG)
async def index(_request):
"""Redirect to the test page"""
return HttpResponse(303, [(b'Location', b'/websocket_page')])
async def websocket_page(request):
"""Send the page with the example web socket"""
scheme = 'wss' if request.scope['scheme'] == 'https' else 'ws'
if request.scope['http_version'] in ('2', '2.0'):
authority = header.find_exact(
b':authority', request.scope['headers']).decode('ascii')
else:
host, port = request.scope['server']
authority = f'{host}:{port}'
web_socket_url = f"{scheme}://{authority}/websocket_handler"
print(web_socket_url)
page = request.info['html'].replace('WEB_SOCKET_URL', web_socket_url)
return HttpResponse(200, [(b'content-type', b'text/html')], text_writer(page))
async def websocket_handler(request):
"""The websocket callback handler"""
await request.web_socket.accept()
try:
while True:
text = await request.web_socket.receive()
if text is None:
break
await request.web_socket.send('You said: ' + text)
except Exception as error: # pylint: disable=broad-except
print(error)
await request.web_socket.close()
if __name__ == "__main__":
html_filename = pkg_resources.resource_filename(
__name__, "web_socket.html")
with open(html_filename, 'rt', encoding='utf-8') as file_ptr:
html = file_ptr.read()
app = Application(info=dict(html=html))
app.http_router.add({'GET'}, '/', index)
app.http_router.add({'GET'}, '/websocket_page', websocket_page)
app.ws_router.add('/websocket_handler', websocket_handler)
uvicorn.run(app, port=9009)
|
py | 1a3bb2e937eead04243667dba8aa5a0a27ea795a | from rubygems_utils import RubyGemsTestUtils
class RubyGemsTestrubygems_faraday_em_http(RubyGemsTestUtils):
def test_gem_list_rubygems_faraday_em_http(self):
self.gem_is_installed("faraday-em_http")
|
py | 1a3bb355930a9f8d889a28828efec8262140b29e | from celery import shared_task
from apps.employee.models import Employee
@shared_task
def add(x, y):
return x + y
@shared_task
def mul(x, y):
return x * y
@shared_task
def xsum(numbers):
return sum(numbers)
@shared_task
def count_widgets():
return Widget.objects.count()
@shared_task
def rename_widget(widget_id, name):
w = Widget.objects.get(id=widget_id)
w.name = name
w.save()
@shared_task
def Send_report():
total = Employee.objects.all().count()
send_mail(
'Relatório',
f'Relatório geral {total}',
'To',
['From'],
fail_silently=False,
)
|
py | 1a3bb35f4b37fd2ec316d5857611b13f33bbcb09 | __author__ = 'clarkmatthew'
from simplecli.basemenu import BaseMenu
class Cloud_Services_Menu(BaseMenu):
name = 'cloud_services_menu'
_summary = 'Cloud Services Menu'
_submenus = []
|
py | 1a3bb4c958efc8cc5ca4694bc37e92aa46c2d7dd | import cv2 as cv
import numpy as np
capture = cv.VideoCapture(0)
# check if connected
if capture.isOpened() is False:
print("Error opening camera 0")
exit()
# load model
model = cv.dnn.readNetFromCaffe('deploy.prototxt',
'res10_300x300_ssd_iter_140000_fp16.caffemodel')
# preprocessing
# image resize to 300x300 by substraction mean vlaues [104., 117., 123.]
# Define the codec and create VideoWriter object
fourcc = cv.VideoWriter_fourcc(*'XVID')
video_out = cv.VideoWriter('output.avi', fourcc, 20.0, (640, 480))
while capture.isOpened():
# capture frames, if read correctly ret is True
ret, img = capture.read()
if not ret:
print("Didn't receive frame. Stop ")
break
# write the flipped frame
video_out.write(img)
# display frame
h, w = img.shape[:2]
blob = cv.dnn.blobFromImage(img, 1.0, (300, 300), [
104., 117., 123.], False, False)
# set blob asinput and detect face
model.setInput(blob)
detections = model.forward()
faceCounter = 0
# draw detections above limit confidence > 0.7
for i in range(0, detections.shape[2]):
# confidence
confidence = detections[0, 0, i, 2]
#
if confidence > 0.7:
# face counter
faceCounter += 1
# get coordinates of the current detection
box = detections[0, 0, i, 3:7] * np.array([w, h, w, h])
(x1, y1, x2, y2) = box.astype("int")
# Draw the detection and the confidence:
cv.rectangle(img, (x1, y1), (x2, y2), (255, 0, 0), 3)
text = "{:.3f}%".format(confidence * 100)
y = y1 - 10 if y1 - 10 > 10 else y1 + 10
x = x1 - 10 if x1 - 10 > 10 else x1 + 10
cv.putText(img, text, (x1, y), cv.FONT_HERSHEY_SIMPLEX, 2, (255, 0, 0), 3)
cv.putText(img, "Cute Person", (x1, y2), cv.FONT_HERSHEY_SIMPLEX, 0.8, (0, 255, 255), 1)
cv.imshow("Camera frame", img)
k = cv.waitKey(1)
# check if key is q then exit
if k == ord("q"):
break
capture.release()
video_out.release()
cv.destroyAllWindows()
|
py | 1a3bb4eb44b41ef1da9eb2d70fd0f66dd0c07ffc | #!/usr/bin/env python2
# Copyright (c) 2014-2015 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
# Test RPC calls related to blockchain state. Tests correspond to code in
# rpcblockchain.cpp.
#
from decimal import Decimal
from test_framework.test_framework import PopbitTestFramework
from test_framework.authproxy import JSONRPCException
from test_framework.util import (
initialize_chain,
assert_equal,
assert_raises,
assert_is_hex_string,
assert_is_hash_string,
start_nodes,
connect_nodes_bi,
)
class BlockchainTest(PopbitTestFramework):
"""
Test blockchain-related RPC calls:
- gettxoutsetinfo
"""
def setup_chain(self):
print("Initializing test directory " + self.options.tmpdir)
initialize_chain(self.options.tmpdir)
def setup_network(self, split=False):
self.nodes = start_nodes(2, self.options.tmpdir)
connect_nodes_bi(self.nodes, 0, 1)
self.is_network_split = False
self.sync_all()
def run_test(self):
self._test_gettxoutsetinfo()
self._test_getblockheader()
def _test_gettxoutsetinfo(self):
node = self.nodes[0]
res = node.gettxoutsetinfo()
assert_equal(res[u'total_amount'], Decimal('8725.00000000'))
assert_equal(res[u'transactions'], 200)
assert_equal(res[u'height'], 200)
assert_equal(res[u'txouts'], 200)
assert_equal(res[u'bytes_serialized'], 13924),
assert_equal(len(res[u'bestblock']), 64)
assert_equal(len(res[u'hash_serialized']), 64)
def _test_getblockheader(self):
node = self.nodes[0]
assert_raises(
JSONRPCException, lambda: node.getblockheader('nonsense'))
besthash = node.getbestblockhash()
secondbesthash = node.getblockhash(199)
header = node.getblockheader(besthash)
assert_equal(header['hash'], besthash)
assert_equal(header['height'], 200)
assert_equal(header['confirmations'], 1)
assert_equal(header['previousblockhash'], secondbesthash)
assert_is_hex_string(header['chainwork'])
assert_is_hash_string(header['hash'])
assert_is_hash_string(header['previousblockhash'])
assert_is_hash_string(header['merkleroot'])
assert_is_hash_string(header['bits'], length=None)
assert isinstance(header['time'], int)
assert isinstance(header['mediantime'], int)
assert isinstance(header['nonce'], int)
assert isinstance(header['version'], int)
assert isinstance(header['difficulty'], Decimal)
if __name__ == '__main__':
BlockchainTest().main()
|
py | 1a3bb5c028bca47381570900c0b6b893e6cca3b0 | #Copyright 2010, Meka Robotics
#All rights reserved.
#http://mekabot.com
#Redistribution and use in source and binary forms, with or without
#modification, are permitted.
#THIS SOFTWARE IS PROVIDED BY THE Copyright HOLDERS AND CONTRIBUTORS
#"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
#LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
#FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
#Copyright OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
#INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES INCLUDING,
#BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
#LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
#CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
#LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
#ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
#POSSIBILITY OF SUCH DAMAGE.
import time
import m3.rt_proxy as m3p
import m3.toolbox as m3t
import m3.component_factory as m3f
import Numeric as nu
import m3.humanoid
# ######################################################
proxy = m3p.M3RtProxy()
proxy.start()
bot_name=m3t.get_robot_name()
if bot_name == "":
print 'Error: no robot components found:', bot_name
bot=m3f.create_component(bot_name)
proxy.publish_param(bot) #allow to set payload
proxy.subscribe_status(bot)
proxy.publish_command(bot)
proxy.make_operational_all()
bot.set_motor_power_on()
chains=bot.get_available_chains()
print 'Select chain'
chains=m3t.user_select_components_interactive(chains,single=True)
for c in chains:
ndof=bot.get_num_dof(c)
bot.set_mode_theta_gc(c)
bot.set_theta_deg(c,[0.0]*ndof)
bot.set_stiffness(c,[0.0]*ndof)
try:
while True:
proxy.step()
for c in chains:
print '---------------------------------------------'
print 'Chain: ',c
print 'Tool Position: (m)',bot.get_tool_position(c)
print 'Theta (Deg): ',bot.get_theta_deg(c)
print 'Tool Velocity (m/S)',bot.get_tool_velocity(c)
time.sleep(0.1)
except (KeyboardInterrupt,EOFError):
proxy.stop()
|
py | 1a3bb5d14890fbf69f6248b13013351ca4d59662 | """add_cca_tail.py - Adds CCA tails to fasta file sequences
================================================================
Purpose
-------
This script adds CCA tails to the RNA chromosomes and remove pseudogenes. It takes fasta files as input and outputs fasta files.
Usage
-----
Options
-------
**
Type::
for command line help.
Command line options
--------------------
"""
import sys
import re
import cgat.FastaIterator as FastaIterator
import cgatcore.iotools as IOTools
import cgatcore.experiment as E
import collections
def main(argv=None):
"""script main.
parses command line options in sys.argv, unless *argv* is given.
"""
if not argv:
argv = sys.argv
# setup command line parser
parser = E.OptionParser(
version="%prog version: $Id$", usage=globals()["__doc__"])
(options, args) = E.start(parser, argv=argv)
if len(args) == 0:
args.append("-")
E.info(options.stdin)
infile = IOTools.open_file(options.stdin.name)
iterator = FastaIterator.FastaIterator(infile)
# outfile_info = IOTools.open_file(options.info_file, "w")
d = collections.OrderedDict()
cluster_dict = dict()
# first iterate over the fasta file and generate a dict
# with the name (title) as the key and the sequence as the value
# Remove any pseudo sequences
for cur_record in iterator:
# This is a temp fix because bedtools getfasta --name seems to have
# changed the way it names the fasta titles. This may be temp but This
# will fix this issue for the time being.
m = re.match("(chr\d+.tRNA\d+-\S+-(pseudo)?)::\S+([+|-])", cur_record.title.replace("(","").replace(")",""))
if m == None:
continue
if m.group(2) == "pseudo":
pass
else:
key = str(m.group(1) + m.group(3))
d[key] = cur_record.sequence
# next iterate of over the dict give the cluster a number
# this will be used to then map back for the info name
for key, value in d.items():
# Add CCA tail
options.stdout.write((">%s\n%scca\n")%(key, value))
E.stop()
if __name__ == "__main__":
sys.exit(main(sys.argv))
|
py | 1a3bb5dfe3a0e715b69393f4e4049826cb4fb681 | import logging
from comm.ConnectionMonitor import ConnectionMonitor
from events import Events
logger = logging.getLogger(__name__)
class DirectConnectionMonitor(ConnectionMonitor):
"""Null implementation of a ConnectionMonitor that provides access to one device only.
When the scan is initiated, this device is "detected" and never removed thereafter.
Arguments:
connection : Connection
"""
def __init__(self, connection):
super().__init__(connection.name, self._scan_loop)
self._connection = connection
def _scan_loop(self):
self.notify_change(self._connection)
|
py | 1a3bb762a101de990b51399da1a92d4b274e7520 | import torch
import torchvision
import torch.optim as optim
from torch.autograd import Variable
import os
from .dcgan_model import Generator
from .dcgan_model import Discriminator
from .data_loader import get_dataloader
from .utils import save_model
def train(train_data_folder, val_data_folder, params):
train_data_loader = get_dataloader(train_data_folder, params["batch_size"])
val_data_loader = get_dataloader(val_data_folder, params["batch_size"])
# generator takes in a single channel image and outputs a 3-channel image
generator = Generator(1, 3)
# discriminator takes in a 3-channel image a single value
discriminator = Discriminator(3, 1)
generator.cuda()
discriminator.cuda()
g_optim = optim.Adam(generator.parameters(), lr=params["learning_rate"], betas=(params["beta1"], .999))
d_optim = optim.Adam(discriminator.parameters(), lr=params["learning_rate"], betas=(params["beta1"], .999))
d_criterion = torch.nn.BCEWithLogitsLoss()
g_adv_criterion = torch.nn.BCEWithLogitsLoss()
g_dist_criterion = torch.nn.L1Loss()
save_path = params["save_path"]
if not save_path[-1] == "/":
save_path += "/"
if not os.path.exists(save_path):
os.makedirs(save_path)
# for each epoch
for epoch in range(params["epochs"]):
# for each batch
total_training_d_loss, total_training_g_loss = 0, 0
num_training_batches = 0
for _, images in enumerate(train_data_loader):
d_loss, g_loss = single_iteration(images, generator, discriminator, g_optim, d_optim, g_adv_criterion, g_dist_criterion, d_criterion)
total_training_d_loss += d_loss
total_training_g_loss += g_loss
num_training_batches += 1
# validation accuracy
total_valid_d_loss, total_valid_g_loss = 0, 0
num_valid_batches = 0
for _, images in enumerate(val_data_loader):
validation_d_loss, validation_g_loss = validate(images, generator, discriminator, g_adv_criterion, g_dist_criterion, d_criterion)
total_valid_d_loss += validation_d_loss
total_valid_g_loss += validation_g_loss
num_valid_batches += 1
total_training_d_loss /= num_training_batches
total_training_g_loss /= num_training_batches
total_valid_d_loss /= num_valid_batches
total_valid_g_loss /= num_valid_batches
if epoch % params["print_interval"] == 0:
print("EPOCH {0}:\tTrain-D-Loss: {1:.4f}\tTrain-G-Loss: {2:.4f}\n\tValid-D-Loss: {3:.4f}\tValid-G-Loss: {4:.4f}".format(epoch, total_training_d_loss, total_training_g_loss, total_valid_d_loss, total_valid_g_loss))
if "save_interval" in params and epoch % params["save_interval"] == 0:
filename = save_path + "model_epoch_{}.pth".format(epoch)
save_model(filename, epoch, generator, discriminator, g_optim, d_optim)
save_model(save_path + "model_final.pth", epoch, generator, discriminator, g_optim, d_optim)
def single_iteration(images, generator, discriminator, g_optim, d_optim, g_adv_criterion, g_dist_criterion, d_criterion):
# get the corresponding grayscale images
grayscale_images = images[:, 0:1, :, :]
grayscale_images, images = Variable(grayscale_images.cuda()), Variable(images.cuda())
# train the discriminator on real color images
discriminator.zero_grad()
real_predictions = discriminator(images)
real_labels = torch.FloatTensor(images.size(0)).fill_(1)
real_labels = Variable(real_labels.cuda())
d_real_loss = d_criterion(torch.squeeze(real_predictions), real_labels)
d_real_loss.backward()
# train the discriminator on fake color images that are generated from the grayscale images
fake_images = generator(grayscale_images)
fake_predictions = discriminator(fake_images.detach())
fake_labels = torch.FloatTensor(fake_images.size(0)).fill_(0)
fake_labels = Variable(fake_labels.cuda())
d_fake_loss = d_criterion(torch.squeeze(fake_predictions), fake_labels)
d_fake_loss.backward()
total_d_loss = d_real_loss + d_fake_loss
d_optim.step()
# train the generator using the discriminator's predictions
generator.zero_grad()
fake_predictions = discriminator(fake_images)
g_adversarial_loss = g_adv_criterion(torch.squeeze(fake_predictions), real_labels)
g_dist_loss = g_dist_criterion(fake_images.view(fake_images.size(0), -1), images.view(images.size(0), -1))
total_g_loss = g_adversarial_loss + 100*g_dist_loss
total_g_loss.backward()
g_optim.step()
return total_d_loss.item(), total_g_loss.item()
def validate(images, generator, discriminator, g_adv_criterion, g_dist_criterion, d_criterion):
grayscale_images = images[:, 0:1, :, :]
grayscale_images, images = Variable(grayscale_images.cuda()), Variable(images.cuda())
real_predictions = discriminator(images)
real_labels = torch.FloatTensor(images.size(0)).fill_(1)
real_labels = Variable(real_labels.cuda())
d_real_loss = d_criterion(torch.squeeze(real_predictions), real_labels)
fake_images = generator(grayscale_images)
fake_predictions = discriminator(fake_images.detach())
fake_labels = torch.FloatTensor(fake_images.size(0)).fill_(1)
fake_labels = Variable(fake_labels.cuda())
d_fake_loss = d_criterion(torch.squeeze(fake_predictions), fake_labels)
fake_predictions = discriminator(fake_images)
total_d_loss = d_real_loss + d_fake_loss
g_adversarial_loss = g_adv_criterion(torch.squeeze(fake_predictions), real_labels)
g_dist_loss = g_dist_criterion(fake_images.view(fake_images.size(0), -1), images.view(images.size(0), -1))
total_g_loss = g_adversarial_loss + 100*g_dist_loss
return total_d_loss.item(), total_g_loss.item()
|
py | 1a3bb7e302aee94cf40875b96ca9f857d3c69e75 | #!/usr/bin/env python3
import os
import subprocess
import pypact as pp
import matplotlib.pyplot as plt
do_collapse = True
show_plot = True
group = 709
inventory = [('Fe', 1.0)]
# files file
def createfiles():
nuclear_data_base = os.getenv('NUCLEAR_DATA', os.path.join(os.sep, 'opt', 'fispact', 'nuclear_data'))
ff = pp.FilesFile(base_dir=nuclear_data_base)
ff.setXS('TENDL2015')
ff.setFissionYield('GEFY52')
ff.setProbTab('TENDL2015')
ff.setDecay('DECAY')
ff.setRegulatory('DECAY')
ff.setGammaAbsorb('DECAY')
for invalid in ff.invalidpaths():
print("FilesFile:: missing file: {}".format(invalid))
return ff
# input file
def createinput():
id = pp.InputData()
id.overwriteExisting()
id.enableJSON()
id.approxGammaSpectrum()
if do_collapse:
id.readXSData(group)
id.readDecayData()
id.enableSystemMonitor(False)
id.enableHalflifeInOutput()
id.enableHazardsInOutput()
id.setProjectile(pp.PROJECTILE_NEUTRON)
id.enableInitialInventoryInOutput()
id.setLogLevel(pp.LOG_SEVERITY_ERROR)
id.setAtomsThreshold(1.0e-3)
id.setDensity(7.875)
id.setMass(1.0e-3)
for e, r in inventory:
id.addElement(e, percentage=r*100.0)
id.addIrradiation(300.0, 1.1e15)
id.addCooling(10.0)
id.addCooling(100.0)
id.addCooling(1000.0)
id.addCooling(10000.0)
id.addCooling(100000.0)
id.validate()
return id
# fluxes file
def createflux():
# set monoenergetic flux at 14 MeV for group 709
flux = pp.FluxesFile(name="14 MeV (almost) monoenergetic", norm=1.0)
flux.setGroup(group)
flux.setValue(12.0e6, 0.1)
flux.setValue(13.0e6, 0.4)
flux.setValue(14.0e6, 1.0)
flux.validate()
return flux
# perform analysis on the output
def analyse(output):
# plot the final inventory ignoring the initial elements
elements = {}
ignore_elements = list(map(list, zip(*inventory)))[0]
if len(output) == 0:
print("No valid inventory output, exiting")
exit
for n in output[-1].nuclides:
if n.element not in ignore_elements:
if n.element in elements:
elements[n.element] += n.grams
else:
elements[n.element] = n.grams
total_grams = sum([g for e, g in elements.items()])
for e, g in elements.items():
print("{} {:.2f}%".format(e, g*100.0/total_grams))
# we must rescale the values
elements[e] = g/total_grams
labels, values = list(zip(*(list(elements.items()))))
if show_plot:
plt.pie(list(values), labels=list(labels), autopct='%2.2f%%', shadow=False)
plt.show()
# main script
input = createinput()
files = createfiles()
fluxes = createflux()
output = pp.compute(input, files, fluxes)
analyse(output)
|
py | 1a3bb8084e3a939d152ef820ddd21d1e90d39ad2 | #
# * The source code in this file is developed independently by NEC Corporation.
#
# # NLCPy License #
#
# Copyright (c) 2020-2021 NEC Corporation
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# * Neither NEC Corporation nor the names of its contributors may be
# used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
from __future__ import division, absolute_import, print_function
from numpy.testing import assert_array_almost_equal
import numpy as np
import nlcpy as ny
def test_me_case_1():
np_a = np.array([[1, 2, 1, 9, 10, 3, 2, 6, 7],
[2, 1, 8, 3, 7, 5, 10, 7, 2]])
ny_a = ny.array([[1, 2, 1, 9, 10, 3, 2, 6, 7],
[2, 1, 8, 3, 7, 5, 10, 7, 2]])
ans_np = np.cov(np_a)
ans_ny = ny.cov(ny_a)
print("numpy={} nlcpy={}".format(ans_np, ans_ny))
assert_array_almost_equal(ans_np, ans_ny.get())
def test_me_case_2():
np_a = np.array([-2.1, -1, 4.3])
ny_a = ny.array([-2.1, -1, 4.3])
ans_np = np.cov(np_a)
ans_ny = ny.cov(ny_a)
print("numpy={} nlcpy={}".format(ans_np, ans_ny))
assert_array_almost_equal(ans_np, ans_ny.get())
def test_me_case_3():
np_a = np.array([[1, 2, 1, 9, 10, 3, 2, 6, 7],
[2, 1, 8, 3, 7, 5, 10, 7, 2]])
ny_a = ny.array([[1, 2, 1, 9, 10, 3, 2, 6, 7],
[2, 1, 8, 3, 7, 5, 10, 7, 2]])
np_y = np.array([2, 1, 1, 8, 9, 4, 3, 5, 7])
ny_y = ny.array([2, 1, 1, 8, 9, 4, 3, 5, 7])
ans_np = np.cov(np_a, np_y)
ans_ny = ny.cov(ny_a, ny_y)
print("numpy={} nlcpy={}".format(ans_np, ans_ny))
assert_array_almost_equal(ans_np, ans_ny.get())
def test_me_case_4():
np_a = np.array([[1, 2, 1, 9, 10, 3, 2, 6, 7],
[2, 1, 8, 3, 7, 5, 10, 7, 2]])
ny_a = ny.array([[1, 2, 1, 9, 10, 3, 2, 6, 7],
[2, 1, 8, 3, 7, 5, 10, 7, 2]])
ans_np = np.cov(np_a.T, rowvar=False)
ans_ny = ny.cov(ny_a.T, rowvar=False)
print("numpy={} nlcpy={}".format(ans_np, ans_ny))
assert_array_almost_equal(ans_np, ans_ny.get())
def test_me_case_5():
np_a = np.array([[1, 2, 1, 9, 10, 3, 2, 6, 7],
[2, 1, 8, 3, 7, 5, 10, 7, 2]])
ny_a = ny.array([[1, 2, 1, 9, 10, 3, 2, 6, 7],
[2, 1, 8, 3, 7, 5, 10, 7, 2]])
ans_np = np.cov(np_a.T, rowvar=True)
ans_ny = ny.cov(ny_a.T, rowvar=True)
print("numpy={} nlcpy={}".format(ans_np, ans_ny))
assert_array_almost_equal(ans_np, ans_ny.get())
def test_me_case_6():
np_a = np.array([[1, 2, 1, 9, 10, 3, 2, 6, 7],
[2, 1, 8, 3, 7, 5, 10, 7, 2]])
ny_a = ny.array([[1, 2, 1, 9, 10, 3, 2, 6, 7],
[2, 1, 8, 3, 7, 5, 10, 7, 2]])
ans_np = np.cov(np_a, bias=False)
ans_ny = ny.cov(ny_a, bias=False)
print("numpy={} nlcpy={}".format(ans_np, ans_ny))
assert_array_almost_equal(ans_np, ans_ny.get())
def test_me_case_7():
np_a = np.array([[1, 2, 1, 9, 10, 3, 2, 6, 7],
[2, 1, 8, 3, 7, 5, 10, 7, 2]])
ny_a = ny.array([[1, 2, 1, 9, 10, 3, 2, 6, 7],
[2, 1, 8, 3, 7, 5, 10, 7, 2]])
ans_np = np.cov(np_a, bias=True)
ans_ny = ny.cov(ny_a, bias=True)
print("numpy={} nlcpy={}".format(ans_np, ans_ny))
assert_array_almost_equal(ans_np, ans_ny.get())
def test_me_case_8():
np_a = np.array([[1, 2, 1, 9, 10, 3, 2, 6, 7],
[2, 1, 8, 3, 7, 5, 10, 7, 2]])
ny_a = ny.array([[1, 2, 1, 9, 10, 3, 2, 6, 7],
[2, 1, 8, 3, 7, 5, 10, 7, 2]])
ans_np = np.cov(np_a, ddof=None)
ans_ny = ny.cov(ny_a, ddof=None)
print("numpy={} nlcpy={}".format(ans_np, ans_ny))
assert_array_almost_equal(ans_np, ans_ny.get())
def test_me_case_9():
np_a = np.array([[1, 2, 1, 9, 10, 3, 2, 6, 7],
[2, 1, 8, 3, 7, 5, 10, 7, 2]])
ny_a = ny.array([[1, 2, 1, 9, 10, 3, 2, 6, 7],
[2, 1, 8, 3, 7, 5, 10, 7, 2]])
ans_np = np.cov(np_a, ddof=0)
ans_ny = ny.cov(ny_a, ddof=0)
print("numpy={} nlcpy={}".format(ans_np, ans_ny))
assert_array_almost_equal(ans_np, ans_ny.get())
def test_me_case_10():
np_a = np.array([[1, 2, 1, 9, 10, 3, 2, 6, 7],
[2, 1, 8, 3, 7, 5, 10, 7, 2]])
ny_a = ny.array([[1, 2, 1, 9, 10, 3, 2, 6, 7],
[2, 1, 8, 3, 7, 5, 10, 7, 2]])
ans_np = np.cov(np_a, ddof=1)
ans_ny = ny.cov(ny_a, ddof=1)
print("numpy={} nlcpy={}".format(ans_np, ans_ny))
assert_array_almost_equal(ans_np, ans_ny.get())
def test_me_case_11():
np_a = np.array([[1, 2, 1, 9, 10, 3, 2, 6, 7],
[2, 1, 8, 3, 7, 5, 10, 7, 2]])
ny_a = ny.array([[1, 2, 1, 9, 10, 3, 2, 6, 7],
[2, 1, 8, 3, 7, 5, 10, 7, 2]])
ans_np = np.cov(np_a, ddof=2)
ans_ny = ny.cov(ny_a, ddof=2)
print("numpy={} nlcpy={}".format(ans_np, ans_ny))
assert_array_almost_equal(ans_np, ans_ny.get())
def testt_me_case_12():
np_a = np.array([[10, 5, 2, 4, 9, 3, 2], [10, 2, 8, 3, 7, 4, 1]])
np_y = np.array([1, 2, 2, 1, 1, 1, 1])
ny_a = ny.array([[10, 5, 2, 4, 9, 3, 2], [10, 2, 8, 3, 7, 4, 1]])
ny_y = ny.array([1, 2, 2, 1, 1, 1, 1])
ans_np = np.cov(np_a, fweights=np_y)
ans_ny = ny.cov(ny_a, fweights=ny_y)
print("numpy={} nlcpy={}".format(ans_np, ans_ny))
assert_array_almost_equal(ans_np, ans_ny.get())
def testt_me_case_13():
np_a = np.array([[10, 5, 2, 4, 9, 3, 2], [10, 2, 8, 3, 7, 4, 1]])
ny_a = ny.array([[10, 5, 2, 4, 9, 3, 2], [10, 2, 8, 3, 7, 4, 1]])
ans_np = np.cov(np_a, aweights=None)
ans_ny = ny.cov(ny_a, aweights=None)
print("numpy={} nlcpy={}".format(ans_np, ans_ny))
assert_array_almost_equal(ans_np, ans_ny.get())
def testt_me_case_14():
np_a = np.array([[10, 5, 2, 4, 9, 3, 2], [10, 2, 8, 3, 7, 4, 1]])
ny_a = ny.array([[10, 5, 2, 4, 9, 3, 2], [10, 2, 8, 3, 7, 4, 1]])
np_w = np.array([0.1, 0.2, 0.2, 0.2, 0.1, 0.1, 0.1])
ny_w = ny.array([0.1, 0.2, 0.2, 0.2, 0.1, 0.1, 0.1])
ans_np = np.cov(np_a, aweights=np_w)
ans_ny = ny.cov(ny_a, aweights=ny_w)
print("numpy={} nlcpy={}".format(ans_np, ans_ny))
assert_array_almost_equal(ans_np, ans_ny.get())
|
py | 1a3bb9941b512a92f110cbfc72bc6928be1ecc2a | # -*- coding: utf-8 -*-
# This file was generated
__version__ = '1.1.3.dev0'
from niswitch.enums import * # noqa: F403,F401,H303
from niswitch.errors import DriverWarning # noqa: F401
from niswitch.errors import Error # noqa: F401
from niswitch.session import Session # noqa: F401
def get_diagnostic_information():
'''Get diagnostic information about the system state that is suitable for printing or logging
returns: dict
note: Python bitness may be incorrect when running in a virtual environment
'''
import os
import pkg_resources
import platform
import struct
import sys
def is_python_64bit():
return (struct.calcsize("P") == 8)
def is_os_64bit():
return platform.machine().endswith('64')
def is_venv():
return 'VIRTUAL_ENV' in os.environ
info = {}
info['os'] = {}
info['python'] = {}
info['driver'] = {}
info['module'] = {}
if platform.system() == 'Windows':
try:
import winreg as winreg
except ImportError:
import _winreg as winreg
os_name = 'Windows'
try:
driver_version_key = winreg.OpenKey(winreg.HKEY_LOCAL_MACHINE, r"SOFTWARE\National Instruments\NI-SWITCH\CurrentVersion")
driver_version = winreg.QueryValueEx(driver_version_key, "Version")[0]
except WindowsError:
driver_version = 'Unknown'
elif platform.system() == 'Linux':
os_name = 'Linux'
driver_version = 'Unknown'
else:
raise SystemError('Unsupported platform: {}'.format(platform.system()))
installed_packages = pkg_resources.working_set
installed_packages_list = [{'name': i.key, 'version': i.version, } for i in installed_packages]
info['os']['name'] = os_name
info['os']['version'] = platform.version()
info['os']['bits'] = '64' if is_os_64bit() else '32'
info['driver']['name'] = "NI-SWITCH"
info['driver']['version'] = driver_version
info['module']['name'] = 'niswitch'
info['module']['version'] = "1.1.3.dev0"
info['python']['version'] = sys.version
info['python']['bits'] = '64' if is_python_64bit() else '32'
info['python']['is_venv'] = is_venv()
info['python']['packages'] = installed_packages_list
return info
def print_diagnostic_information():
'''Print diagnostic information in a format suitable for issue report
note: Python bitness may be incorrect when running in a virtual environment
'''
info = get_diagnostic_information()
row_format = ' {:<10} {}'
for type in ['OS', 'Driver', 'Module', 'Python']:
typename = type.lower()
print(type + ':')
for item in info[typename]:
if item != 'packages':
print(row_format.format(item.title() + ':', info[typename][item]))
print(' Installed Packages:')
for p in info['python']['packages']:
print((' ' * 8) + p['name'] + '==' + p['version'])
return info
|
py | 1a3bba156a57335e8df16c30c5ff46df6c7ba026 | # -*- coding: utf-8 -*-
#
# michael a.g. aïvázis
# orthologue
# (c) 1998-2022 all rights reserved
#
# externals
import pyre
# declaration
class Language(pyre.protocol, family="pyre.weaver.languages"):
"""
The protocol specification for output languages
"""
# constants
# the language normalization table
languages = {
"c++": "cxx",
"fortran": "f77",
"fortran77": "f77",
}
# framework hooks
@classmethod
def pyre_convert(cls, value, **kwds):
# if {value} is a string
if isinstance(value, str):
# convert to lower case
language = value.lower()
# and translate
return cls.languages.get(language, language)
# otherwise, I have nothing to say
return value
# interface
@pyre.provides
def render(self):
"""
Render the document
"""
@pyre.provides
def header(self):
"""
Render the header of the document
"""
@pyre.provides
def body(self):
"""
Render the body of the document
"""
@pyre.provides
def footer(self):
"""
Render the footer of the document
"""
# end of file
|
py | 1a3bbb4f3789446adc8bd3752647d2e46401e523 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
__authors__ = ["Katharina Eggensperger", "Matthias Feurer"]
__contact__ = "automl.org"
from collections import OrderedDict
from itertools import product
from io import StringIO
import sys
import pyparsing
from ConfigSpace.configuration_space import ConfigurationSpace
from ConfigSpace.hyperparameters import (
CategoricalHyperparameter,
UniformIntegerHyperparameter,
UniformFloatHyperparameter,
NumericalHyperparameter,
Constant,
IntegerHyperparameter,
NormalIntegerHyperparameter,
NormalFloatHyperparameter,
)
from ConfigSpace.conditions import (
EqualsCondition,
NotEqualsCondition,
InCondition,
AndConjunction,
OrConjunction,
ConditionComponent,
)
from ConfigSpace.forbidden import (
ForbiddenEqualsClause,
ForbiddenAndConjunction,
ForbiddenInClause,
AbstractForbiddenComponent,
MultipleValueForbiddenClause,
)
# Build pyparsing expressions for params
pp_param_name = pyparsing.Word(
pyparsing.alphanums + "_" + "-" + "@" + "." + ":" + ";" + "\\" + "/" + "?" + "!"
+ "$" + "%" + "&" + "*" + "+" + "<" + ">")
pp_digits = "0123456789"
pp_plusorminus = pyparsing.Literal('+') | pyparsing.Literal('-')
pp_int = pyparsing.Combine(pyparsing.Optional(pp_plusorminus) + pyparsing.Word(pp_digits))
pp_float = pyparsing.Combine(
pyparsing.Optional(pp_plusorminus) + pyparsing.Optional(pp_int) + "." + pp_int
)
pp_eorE = pyparsing.Literal('e') | pyparsing.Literal('E')
pp_floatorint = pp_float | pp_int
pp_e_notation = pyparsing.Combine(pp_floatorint + pp_eorE + pp_int)
pp_number = pp_e_notation | pp_float | pp_int
pp_numberorname = pp_number | pp_param_name
pp_il = pyparsing.Word("il")
pp_choices = pp_param_name + pyparsing.Optional(pyparsing.OneOrMore("," + pp_param_name))
pp_cont_param = pp_param_name + "[" + pp_number + "," + pp_number + "]" + \
"[" + pp_number + "]" + pyparsing.Optional(pp_il)
pp_cat_param = pp_param_name + "{" + pp_choices + "}" + "[" + pp_param_name + "]"
pp_condition = pp_param_name + "|" + pp_param_name + "in" + "{" + pp_choices + "}"
pp_forbidden_clause = "{" + pp_param_name + "=" + pp_numberorname + \
pyparsing.Optional(pyparsing.OneOrMore("," + pp_param_name + "=" + pp_numberorname)) + "}"
def build_categorical(param):
if param.probabilities is not None:
raise ValueError('The pcs format does not support categorical hyperparameters with '
'assigend weights/probabilities (for hyperparameter %s)' % param.name)
cat_template = "%s {%s} [%s]"
return cat_template % (param.name,
", ".join([str(value) for value in param.choices]),
str(param.default_value))
def build_constant(param):
constant_template = "%s {%s} [%s]"
return constant_template % (param.name, param.value, param.value)
def build_continuous(param):
if type(param) in (NormalIntegerHyperparameter,
NormalFloatHyperparameter):
param = param.to_uniform()
float_template = "%s%s [%s, %s] [%s]"
int_template = "%s%s [%d, %d] [%d]i"
if param.log:
float_template += "l"
int_template += "l"
if param.q is not None:
q_prefix = "Q%d_" % (int(param.q),)
else:
q_prefix = ""
default_value = param.default_value
if isinstance(param, IntegerHyperparameter):
default_value = int(default_value)
return int_template % (q_prefix, param.name, param.lower,
param.upper, default_value)
else:
return float_template % (q_prefix, param.name, str(param.lower),
str(param.upper), str(default_value))
def build_condition(condition):
if not isinstance(condition, ConditionComponent):
raise TypeError("build_condition must be called with an instance of "
"'%s', got '%s'" %
(ConditionComponent, type(condition)))
# Check if SMAC can handle the condition
if isinstance(condition, OrConjunction):
raise NotImplementedError("SMAC cannot handle OR conditions: %s" %
(condition))
if isinstance(condition, NotEqualsCondition):
raise NotImplementedError("SMAC cannot handle != conditions: %s" %
(condition))
# Now handle the conditions SMAC can handle
condition_template = "%s | %s in {%s}"
if isinstance(condition, AndConjunction):
return '\n'.join([
build_condition(cond) for cond in condition.components
])
elif isinstance(condition, InCondition):
return condition_template % (condition.child.name,
condition.parent.name,
", ".join(condition.values))
elif isinstance(condition, EqualsCondition):
return condition_template % (condition.child.name,
condition.parent.name,
condition.value)
else:
raise NotImplementedError(condition)
def build_forbidden(clause):
if not isinstance(clause, AbstractForbiddenComponent):
raise TypeError("build_forbidden must be called with an instance of "
"'%s', got '%s'" %
(AbstractForbiddenComponent, type(clause)))
if not isinstance(clause, (ForbiddenEqualsClause, ForbiddenAndConjunction)):
raise NotImplementedError("SMAC cannot handle '%s' of type %s" %
str(clause), (type(clause)))
retval = StringIO()
retval.write("{")
# Really simple because everything is an AND-conjunction of equals
# conditions
dlcs = clause.get_descendant_literal_clauses()
for dlc in dlcs:
if retval.tell() > 1:
retval.write(", ")
retval.write("%s=%s" % (dlc.hyperparameter.name, dlc.value))
retval.write("}")
retval.seek(0)
return retval.getvalue()
def read(pcs_string, debug=False):
"""
Read in a :py:class:`~ConfigSpace.configuration_space.ConfigurationSpace`
definition from a pcs file.
Example
-------
.. testsetup:: pcs_test
from ConfigSpace import ConfigurationSpace
import ConfigSpace.hyperparameters as CSH
from ConfigSpace.read_and_write import pcs
cs = ConfigurationSpace()
cs.add_hyperparameter(CSH.CategoricalHyperparameter('a', choices=[1, 2, 3]))
with open('configspace.pcs', 'w') as f:
f.write(pcs.write(cs))
.. doctest:: pcs_test
>>> from ConfigSpace.read_and_write import pcs
>>> with open('configspace.pcs', 'r') as fh:
... deserialized_conf = pcs.read(fh)
Parameters
----------
pcs_string : str
ConfigSpace definition in pcs format
debug : bool
Provides debug information. Defaults to False.
Returns
-------
:py:class:`~ConfigSpace.configuration_space.ConfigurationSpace`
The deserialized ConfigurationSpace object
"""
configuration_space = ConfigurationSpace()
conditions = []
forbidden = []
# some statistics
ct = 0
cont_ct = 0
cat_ct = 0
line_ct = 0
for line in pcs_string:
line_ct += 1
if "#" in line:
# It contains a comment
pos = line.find("#")
line = line[:pos]
# Remove quotes and whitespaces at beginning and end
line = line.replace('"', "").replace("'", "")
line = line.strip()
if "|" in line:
# It's a condition
try:
c = pp_condition.parseString(line)
conditions.append(c)
except pyparsing.ParseException:
raise NotImplementedError("Could not parse condition: %s" % line)
continue
if "}" not in line and "]" not in line:
continue
if line.startswith("{") and line.endswith("}"):
forbidden.append(line)
continue
if len(line.strip()) == 0:
continue
ct += 1
param = None
create = {"int": UniformIntegerHyperparameter,
"float": UniformFloatHyperparameter,
"categorical": CategoricalHyperparameter}
try:
param_list = pp_cont_param.parseString(line)
il = param_list[9:]
if len(il) > 0:
il = il[0]
param_list = param_list[:9]
name = param_list[0]
lower = float(param_list[2])
upper = float(param_list[4])
paramtype = "int" if "i" in il else "float"
log = True if "l" in il else False
default_value = float(param_list[7])
param = create[paramtype](name=name, lower=lower, upper=upper,
q=None, log=log, default_value=default_value)
cont_ct += 1
except pyparsing.ParseException:
pass
try:
param_list = pp_cat_param.parseString(line)
name = param_list[0]
choices = [c for c in param_list[2:-4:2]]
default_value = param_list[-2]
param = create["categorical"](name=name, choices=choices,
default_value=default_value)
cat_ct += 1
except pyparsing.ParseException:
pass
if param is None:
raise NotImplementedError("Could not parse: %s" % line)
configuration_space.add_hyperparameter(param)
for clause in forbidden:
# TODO test this properly!
# TODO Add a try/catch here!
# noinspection PyUnusedLocal
param_list = pp_forbidden_clause.parseString(clause)
tmp_list = []
clause_list = []
for value in param_list[1:]:
if len(tmp_list) < 3:
tmp_list.append(value)
else:
# So far, only equals is supported by SMAC
if tmp_list[1] == '=':
# TODO maybe add a check if the hyperparameter is
# actually in the configuration space
clause_list.append(ForbiddenEqualsClause(
configuration_space.get_hyperparameter(tmp_list[0]),
tmp_list[2]))
else:
raise NotImplementedError()
tmp_list = []
configuration_space.add_forbidden_clause(ForbiddenAndConjunction(
*clause_list))
# Now handle conditions
# If there are two conditions for one child, these two conditions are an
# AND-conjunction of conditions, thus we have to connect them
conditions_per_child = OrderedDict()
for condition in conditions:
child_name = condition[0]
if child_name not in conditions_per_child:
conditions_per_child[child_name] = list()
conditions_per_child[child_name].append(condition)
for child_name in conditions_per_child:
condition_objects = []
for condition in conditions_per_child[child_name]:
child = configuration_space.get_hyperparameter(child_name)
parent_name = condition[2]
parent = configuration_space.get_hyperparameter(parent_name)
restrictions = condition[5:-1:2]
# TODO: cast the type of the restriction!
if len(restrictions) == 1:
condition = EqualsCondition(child, parent, restrictions[0])
else:
condition = InCondition(child, parent, values=restrictions)
condition_objects.append(condition)
# Now we have all condition objects for this child, so we can build a
# giant AND-conjunction of them (if number of conditions >= 2)!
if len(condition_objects) > 1:
and_conjunction = AndConjunction(*condition_objects)
configuration_space.add_condition(and_conjunction)
else:
configuration_space.add_condition(condition_objects[0])
return configuration_space
def write(configuration_space):
"""
Create a string representation of a
:class:`~ConfigSpace.configuration_space.ConfigurationSpace` in pcs format.
This string can be written to file.
Example
-------
.. doctest::
>>> import ConfigSpace as CS
>>> import ConfigSpace.hyperparameters as CSH
>>> from ConfigSpace.read_and_write import pcs
>>> cs = CS.ConfigurationSpace()
>>> cs.add_hyperparameter(CSH.CategoricalHyperparameter('a', choices=[1, 2, 3]))
a, Type: Categorical, Choices: {1, 2, 3}, Default: 1
<BLANKLINE>
>>> with open('configspace.pcs', 'w') as fh:
... fh.write(pcs.write(cs))
15
Parameters
----------
configuration_space : :py:class:`~ConfigSpace.configuration_space.ConfigurationSpace`
a configuration space
Returns
-------
str
The string representation of the configuration space
"""
if not isinstance(configuration_space, ConfigurationSpace):
raise TypeError("pcs_parser.write expects an instance of %s, "
"you provided '%s'" % (ConfigurationSpace, type(configuration_space)))
param_lines = StringIO()
condition_lines = StringIO()
forbidden_lines = []
for hyperparameter in configuration_space.get_hyperparameters():
# Check if the hyperparameter names are valid SMAC names!
try:
pp_param_name.parseString(hyperparameter.name)
except pyparsing.ParseException:
raise ValueError(
"Illegal hyperparameter name for SMAC: %s" % hyperparameter.name)
# First build params
if param_lines.tell() > 0:
param_lines.write("\n")
if isinstance(hyperparameter, NumericalHyperparameter):
param_lines.write(build_continuous(hyperparameter))
elif isinstance(hyperparameter, CategoricalHyperparameter):
param_lines.write(build_categorical(hyperparameter))
elif isinstance(hyperparameter, Constant):
param_lines.write(build_constant(hyperparameter))
else:
raise TypeError("Unknown type: %s (%s)" % (
type(hyperparameter), hyperparameter))
for condition in configuration_space.get_conditions():
if condition_lines.tell() > 0:
condition_lines.write("\n")
condition_lines.write(build_condition(condition))
for forbidden_clause in configuration_space.get_forbiddens():
# Convert in-statement into two or more equals statements
dlcs = forbidden_clause.get_descendant_literal_clauses()
# First, get all in statements and convert them to equal statements
in_statements = []
other_statements = []
for dlc in dlcs:
if isinstance(dlc, MultipleValueForbiddenClause):
if not isinstance(dlc, ForbiddenInClause):
raise ValueError("SMAC cannot handle this forbidden "
"clause: %s" % dlc)
in_statements.append(
[ForbiddenEqualsClause(dlc.hyperparameter, value)
for value in dlc.values])
else:
other_statements.append(dlc)
# Second, create the product of all elements in the IN statements,
# create a ForbiddenAnd and add all ForbiddenEquals
if len(in_statements) > 0:
for i, p in enumerate(product(*in_statements)):
all_forbidden_clauses = list(p) + other_statements
f = ForbiddenAndConjunction(*all_forbidden_clauses)
forbidden_lines.append(build_forbidden(f))
else:
forbidden_lines.append(build_forbidden(forbidden_clause))
if condition_lines.tell() > 0:
condition_lines.seek(0)
param_lines.write("\n\n")
for line in condition_lines:
param_lines.write(line)
if len(forbidden_lines) > 0:
forbidden_lines.sort()
param_lines.write("\n\n")
for line in forbidden_lines:
param_lines.write(line)
param_lines.write("\n")
# Check if the default configuration is a valid configuration!
param_lines.seek(0)
return param_lines.getvalue()
if __name__ == "__main__":
fh = open(sys.argv[1])
orig_pcs = fh.readlines()
sp = read(orig_pcs, debug=True)
created_pcs = write(sp).split("\n")
print("============== Writing Results")
print("#Lines: ", len(created_pcs))
print("#LostLines: ", len(orig_pcs) - len(created_pcs))
diff = ["%s\n" % i for i in created_pcs if i not in " ".join(orig_pcs)]
print("Identical Lines: ", len(created_pcs) - len(diff))
print()
print("Up to 10 random different lines (of %d):" % len(diff))
print("".join(diff[:10]))
|
py | 1a3bbb9a5ea267849a955b40da4b8f9f0c02ed2e | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import Tkinter
import pickle
import ttk
import glob
from Tkinter import *
import PIL
from PIL import ImageTk, Image
import httplib, urllib, base64
from scipy import *
import networkx as nx
import numpy as np
from lxml import etree
import xml.etree.ElementTree as ET
global api_key
api_key='6b700f7ea9db408e9745c207da7ca827'
global thedata
thedata = np.genfromtxt(
'tab.csv', # file name
skip_header=0, # lines to skip at the top
skip_footer=0, # lines to skip at the bottom
delimiter=',', # column delimiter
dtype='float32', # data type
filling_values=0)
window = Tk()
l= PanedWindow(window, orient=VERTICAL)
c= PanedWindow(window, orient=VERTICAL)
r=PanedWindow(window, orient=VERTICAL)
l.pack(side=LEFT, fill=BOTH, pady=2, padx=2)
r.pack(side=RIGHT,expand=N, fill=BOTH, pady=2, padx=2)
c.pack(side=RIGHT,expand=Y, fill=BOTH, pady=2, padx=2)
global liste_stations,liste_code_stations
liste_code_stations=[]
liste_stations=[]
headers = {'api_key': api_key}
try:
conn = httplib.HTTPSConnection('api.wmata.com')
conn.request("GET", "/Rail.svc/Stations?", "{body}", headers)
response = conn.getresponse()
data = response.read()
root=ET.fromstring(data)
#print data
premier=root[0]
for i in range(0,len(premier)):
tmp=premier[i]
liste_code_stations.append(tmp[1].text)
liste_stations.append(tmp[8].text)
conn.close()
except Exception as e:
print("[Errno {0}] {1}".format(e.errno, e.strerror))
def afficher_carte():
image = Image.open("map2.png").resize((1000,900))
photo = ImageTk.PhotoImage(image)
canvas = Canvas(r, width = image.size[0], height = image.size[1])
canvas.create_image(0,0, anchor = NW, image=photo)
canvas.grid()
window.mainloop()
def get_code_from_name(name):
for i in range(0,len(liste_stations)):
if (liste_stations[i]==name):
return liste_code_stations[i]
def temps_entre_deux_stations(station1,station2):
headers = {'api_key': api_key,}
params = urllib.urlencode({'FromStationCode': station1,'ToStationCode': station2,})
try:
conn = httplib.HTTPSConnection('api.wmata.com')
conn.request("GET", "/Rail.svc/SrcStationToDstStationInfo?%s" % params, "{body}", headers)
response = conn.getresponse()
data = response.read()
#print data
root=ET.fromstring(data)
#child=root.find('.//RailTime')
caca=root[0]
deux=caca[0]
quatre=deux[3].text
return quatre
conn.close()
except Exception as e:
print("[Errno {0}] {1}".format(e.errno, e.strerror))
def get_indice(liste,arret):
for i in range(0,len(liste)):
if (liste[i]==arret):
return i
def affecter_matrice(station1,station2,tab,liste):
temps=temps_entre_deux_stations(station1,station2)
indice_station1=get_indice(liste,station1)
indice_station2=get_indice(liste,station2)
tab[indice_station1][indice_station2]=temps
print "1"
def definir_graphe(station1,station2,liste):
headers = {'api_key': api_key,}
params = urllib.urlencode({'FromStationCode': station1,'ToStationCode': station2,})
try:
conn = httplib.HTTPSConnection('api.wmata.com')
conn.request("GET", "/Rail.svc/Path?%s" % params, "{body}", headers)
response = conn.getresponse()
data = response.read()
root=ET.fromstring(data)
premier=root[0]
for i in range(0,len(premier)):
deux=premier[i]
quatre=deux[4].text
liste.append(quatre)
conn.close()
except Exception as e:
print("[Errno {0}] {1}".format(e.errno, e.strerror))
def symetrique(tab):
for i in range(0,len(tab)):
for j in range(0,len(tab)):
if (tab[j][i]!=0 and tab[i][j]==0):
tab[i][j]=tab[j][i]
if (tab[i][j]!=0 and tab[j][i]==0):
tab[j][i]=tab[i][j]
if (tab[i][j]!=0 and tab[j][i]!=0):
if (tab[i][j]>tab[j][i]):
tab[i][j]=tab[j][i]
else:
tab[j][i]=tab[i][j]
def envoyer(liste1,liste2,liste3,liste4,liste5,liste6):
definir_graphe('N06','G05',liste1)
definir_graphe('B11','A15',liste2)
definir_graphe('K08','D13',liste3)
definir_graphe('G05','J03',liste4)
definir_graphe('C15','E06',liste5)
definir_graphe('E10','F11',liste6)
global tab
def define():
dimension=len(liste_stations)
tab=zeros((dimension, dimension))
liste1=[]#SV
liste2=[]#RD
liste3=[]#OR
liste4=[]#BL
liste5=[]#YL
liste6=[]#GR
envoyer(liste1,liste2,liste3,liste4,liste5,liste6)
for i in range(0,len(liste1)-1):
tmp1=get_code_from_name(liste1[i])
tmp2=get_code_from_name(liste1[i+1])
affecter_matrice(tmp1,tmp2,tab,liste_code_stations)
for i in range(0,len(liste2)-1):
tmp1=get_code_from_name(liste2[i])
tmp2=get_code_from_name(liste2[i+1])
affecter_matrice(tmp1,tmp2,tab,liste_code_stations)
for i in range(0,len(liste3)-1):
tmp1=get_code_from_name(liste3[i])
tmp2=get_code_from_name(liste3[i+1])
affecter_matrice(tmp1,tmp2,tab,liste_code_stations)
for i in range(0,len(liste4)-1):
tmp1=get_code_from_name(liste4[i])
tmp2=get_code_from_name(liste4[i+1])
affecter_matrice(tmp1,tmp2,tab,liste_code_stations)
for i in range(0,len(liste5)-1):
tmp1=get_code_from_name(liste5[i])
tmp2=get_code_from_name(liste5[i+1])
affecter_matrice(tmp1,tmp2,tab,liste_code_stations)
for i in range(0,len(liste6)-1):
tmp1=get_code_from_name(liste6[i])
tmp2=get_code_from_name(liste6[i+1])
affecter_matrice(tmp1,tmp2,tab,liste_code_stations)
symetrique(tab)
np.savetxt(
'tab.csv', # file name
tab, # array to save
fmt='%.2f', # formatting, 2 digits in this case
delimiter=',', # column delimiter
newline='\n', # new line character
footer='end of file', # file footer
comments='# ', # character to use for comments
header='Data generated by numpy')
def affichage_trajet():
liste_stations_tmp=[]
liste_stations_tmp=liste_stations
var1= saisir1.get()
var2= saisir2.get()
var3= saisir3.get()
try :
bb=get_indice(liste_stations,var3)
del liste_stations_tmp[bb]
M=np.delete(thedata, bb, 0)
N=np.delete(M, bb, 1)
G = nx.from_numpy_matrix(N, create_using=nx.DiGraph())
cc=get_indice(liste_stations,var2)
dd=get_indice(liste_stations,var1)
resultat=nx.dijkstra_path(G, dd, cc)
Label(c,text="Numéros").grid(row=0,column=0)
Label(c,text="Stations").grid(row=0,column=1)
compteur2=0
for i in resultat:
compteur2+=1
Label(c,text=compteur2).grid(row=compteur2+1,column=0)
Label(c,text=liste_stations[i]).grid(row=compteur2+1,column=1)
a=nx.dijkstra_path_length(G,dd,cc)
Label(c,text="Temps mis :",font=("Helvetica", 16),fg="red").grid(row=compteur2+2,column=0)
Label(c,text=a,font=("Helvetica", 16),fg="red").grid(row=compteur2+2,column=1)
Label(c,text="min",font=("Helvetica", 16),fg="red").grid(row=compteur2+2,column=2)
except:
Label(c,text="Mauvaise saisie",fg="green").grid()
def trajet_bis():
global saisir1,saisir2,saisir3
saisir1=StringVar() # prevoir la variable pour recevoir le texte saisi
saisir2=StringVar() # prevoir la variable pour recevoir le texte saisi
saisir3=StringVar() # prevoir la variable pour recevoir le texte saisi
saisir1.set("Entrez Départ")
saisir2.set("Entrez arrivé")
saisir3.set("Saisir l'arret à éviter")
saisie1=Entry(l,textvariable=saisir1, width=50,justify=CENTER).pack()
saisie2=Entry(l,textvariable=saisir2, width=50,justify=CENTER).pack()
saisie3=Entry(l,textvariable=saisir3, width=50,justify=CENTER).pack()
valider=Button(l,text='OK',command=affichage_trajet).pack()
compteur=0
def determiner_trajet(evt):
global var1,var2
global compteur
compteur+=1
try:
i=l1.curselection() ## Récupération de l'index de l'élément sélectionné
var1= l1.get(i) ## On retourne l'élément (un string) sélectionné
except:
i=l2.curselection() ## Récupération de l'index de l'élément sélectionné
var2=l2.get(i)
G = nx.from_numpy_matrix(thedata, create_using=nx.DiGraph())
var1_int=get_indice(liste_stations,var1)
var2_int=get_indice(liste_stations,var2)
resultat=nx.dijkstra_path(G, var1_int, var2_int)
Label(c,text="Numéros").grid(row=0,column=0)
Label(c,text="Stations").grid(row=0,column=1)
compteur2=0
for i in resultat:
compteur2+=1
Label(c,text=compteur2).grid(row=i+1,column=0)
Label(c,text=liste_stations[i]).grid(row=i+1,column=1)
a=nx.dijkstra_path_length(G,var1_int,var2_int)
Label(c,text="Temps mis :",font=("Helvetica", 16),fg="red").grid(row=i+2,column=0)
Label(c,text=a,font=("Helvetica", 16),fg="red").grid(row=i+2,column=1)
Label(c,text="min",font=("Helvetica", 16),fg="red").grid(row=i+2,column=2)
window.mainloop()
def trajet():
global l1,l2
liste_stations_tmp=[]
liste_stations_tmp=liste_stations
liste_stations_tmp.sort()
compteur=0
f1 = Frame(l)
s1 = Scrollbar(f1)
l1 = Listbox(f1)
l1.bind('<ButtonRelease-1>',determiner_trajet)
s2 = Scrollbar(f1)
l2= Listbox(f1)
l2.bind('<ButtonRelease-1>',determiner_trajet)
for user in liste_stations:
compteur+=1
l1.insert(compteur, user)
l2.insert(compteur, user)
s1.config(command = l1.yview)
l1.config(yscrollcommand = s1.set)
l1.pack(side = LEFT, fill = Y)
s1.pack(side = RIGHT, fill = Y)
s2.config(command = l2.yview)
l2.config(yscrollcommand = s2.set)
l2.pack(side = LEFT, fill = Y)
s2.pack(side = RIGHT, fill = Y)
f1.pack()
def boutons():
bouton3=Button(l, text="Construire le graphe",command=define,bd=5)
bouton4=Button(l, text="Afficher la carte",command=afficher_carte,bd=5)
bouton2=Button(l, text="Trouver itinéraire",command=trajet,bd=5)
bouton5=Button(l, text="Trouver itinéraire bis",command=trajet_bis,bd=5)
bouton2.pack()
bouton5.pack()
bouton3.pack()
bouton4.pack()
window.mainloop()
def changer():
api_key= e.get()
def changer_api():
global e
seconde=Tk()
window.title("API")
window.configure(background='grey')
Label(seconde, text="API-key").grid(row=0)
e = Entry(seconde).grid(row=0,column=1)
b = Button(seconde, text="Valider", width=10, command=changer).grid(row=0,column=2)
seconde.mainloop()
def about():
about=Tk()
about.title("Help")
texte="Version Alpha\r Distributeurs : Mendes Ryan - Ezvan Jean-Loup \rMails:[email protected] - [email protected]"
label = Label(about, text=texte)
label.pack()
menubar = Menu(window)
menu1=Menu(menubar)
menu1.add_command(label="API_Key",command=changer_api)
menu1.add_command(label="Exit",command=window.quit)
menu2=Menu(menubar)
menu2.add_command(label="About",command=about)
menubar.add_cascade(label="File",menu=menu1)
menubar.add_cascade(label="Help",menu=menu2)
window.config(menu = menubar)
window.title("Metro")
window.geometry("1920x1920")
window.configure(background='grey')
boutons()
|
py | 1a3bbc8210dc401e9c3a5e46a47d8585c9768f16 | # -*- coding: utf-8 -*-
#
# Copyright (C) 2013-2014 Germain Z. <[email protected]>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
#
# Add vi/vim-like modes to WeeChat.
#
import csv
import os
import re
import subprocess
from StringIO import StringIO
import time
import weechat
# Script info.
# ============
SCRIPT_NAME = "vimode"
SCRIPT_AUTHOR = "GermainZ <[email protected]>"
SCRIPT_VERSION = "0.5"
SCRIPT_LICENSE = "GPL3"
SCRIPT_DESC = ("Add vi/vim-like modes and keybindings to WeeChat.")
# Global variables.
# =================
# General.
# --------
# Halp! Halp! Halp!
GITHUB_BASE = "https://github.com/GermainZ/weechat-vimode/blob/master/"
README_URL = GITHUB_BASE + "README.md"
FAQ_KEYBINDINGS = GITHUB_BASE + "FAQ#problematic-key-bindings.md"
FAQ_ESC = GITHUB_BASE + "FAQ.md#esc-key-not-being-detected-instantly"
# Holds the text of the command-line mode (currently only Ex commands ":").
cmd_text = ""
# Mode we're in. One of INSERT, NORMAL or REPLACE.
mode = "INSERT"
# Holds normal commands (e.g. "dd").
vi_buffer = ""
# See `cb_key_combo_default()`.
esc_pressed = 0
# See `cb_key_pressed()`.
last_signal_time = 0
# See `start_catching_keys()` for more info.
catching_keys_data = {'amount': 0}
# Used for ; and , to store the last f/F/t/T motion.
last_search_motion = {'motion': None, 'data': None}
# Script options.
vimode_settings = {'no_warn': ("off", "don't warn about problematic"
"keybindings and tmux/screen")}
# Regex patterns.
# ---------------
WHITESPACE = re.compile(r"\s")
IS_KEYWORD = re.compile(r"[a-zA-Z0-9_@À-ÿ]")
REGEX_MOTION_LOWERCASE_W = re.compile(r"\b\S|(?<=\s)\S")
REGEX_MOTION_UPPERCASE_W = re.compile(r"(?<=\s)\S")
REGEX_MOTION_UPPERCASE_E = re.compile(r"\S(?!\S)")
REGEX_MOTION_UPPERCASE_B = REGEX_MOTION_UPPERCASE_E
REGEX_MOTION_G_UPPERCASE_E = REGEX_MOTION_UPPERCASE_W
REGEX_MOTION_CARRET = re.compile(r"\S")
REGEX_INT = r"[0-9]"
# Regex used to detect problematic keybindings.
# For example: meta-wmeta-s is bound by default to ``/window swap``.
# If the user pressed Esc-w, WeeChat will detect it as meta-w and will not
# send any signal to `cb_key_combo_default()` just yet, since it's the
# beginning of a known key combo.
# Instead, `cb_key_combo_default()` will receive the Esc-ws signal, which
# becomes "ws" after removing the Esc part, and won't know how to handle it.
REGEX_PROBLEMATIC_KEYBINDINGS = re.compile(r"meta-\w(meta|ctrl)")
# Vi commands.
# ------------
# See Also: `cb_exec_cmd()`.
VI_COMMANDS = {'h': "/help",
'qall': "/exit",
'q': "/close",
'w': "/save",
'set': "/set",
'bp': "/buffer -1",
'bn': "/buffer +1",
'bd': "/close",
'b#': "/input jump_last_buffer_displayed",
'b': "/buffer",
'sp': "/window splith",
'vsp': "/window splitv"}
# Vi operators.
# -------------
# Each operator must have a corresponding function, called "operator_X" where
# X is the operator. For example: `operator_c()`.
VI_OPERATORS = ["c", "d", "y"]
# Vi motions.
# -----------
# Vi motions. Each motion must have a corresponding function, called
# "motion_X" where X is the motion (e.g. `motion_w()`).
# See Also: `SPECIAL_CHARS`.
VI_MOTIONS = ["w", "e", "b", "^", "$", "h", "l", "W", "E", "B", "f", "F", "t",
"T", "ge", "gE", "0"]
# Special characters for motions. The corresponding function's name is
# converted before calling. For example, "^" will call `motion_carret` instead
# of `motion_^` (which isn't allowed because of illegal characters).
SPECIAL_CHARS = {'^': "carret",
'$': "dollar"}
# Methods for vi operators, motions and key bindings.
# ===================================================
# Documented base examples:
# -------------------------
def operator_base(buf, input_line, pos1, pos2, overwrite):
"""Operator method example.
Args:
buf (str): pointer to the current WeeChat buffer.
input_line (str): the content of the input line.
pos1 (int): the starting position of the motion.
pos2 (int): the ending position of the motion.
overwrite (bool, optional): whether the character at the cursor's new
position should be overwritten or not (for inclusive motions).
Defaults to False.
Notes:
Should be called "operator_X", where X is the operator, and defined in
`VI_OPERATORS`.
Must perform actions (e.g. modifying the input line) on its own,
using the WeeChat API.
See Also:
For additional examples, see `operator_d()` and
`operator_y()`.
"""
# Get start and end positions.
start = min(pos1, pos2)
end = max(pos1, pos2)
# Print the text the operator should go over.
weechat.prnt("", "Selection: %s" % input_line[start:end])
def motion_base(input_line, cur, count):
"""Motion method example.
Args:
input_line (str): the content of the input line.
cur (int): the position of the cursor.
count (int): the amount of times to multiply or iterate the action.
Returns:
A tuple containing three values:
int: the new position of the cursor.
bool: True if the motion is inclusive, False otherwise.
bool: True if the motion is catching, False otherwise.
See `start_catching_keys()` for more info on catching motions.
Notes:
Should be called "motion_X", where X is the motion, and defined in
`VI_MOTIONS`.
Must not modify the input line directly.
See Also:
For additional examples, see `motion_w()` (normal motion) and
`motion_f()` (catching motion).
"""
# Find (relative to cur) position of next number.
pos = get_pos(input_line, REGEX_INT, cur, True, count)
# Return the new (absolute) cursor position.
# This motion is exclusive, so overwrite is False.
return cur + pos, False
def key_base(buf, input_line, cur, count):
"""Key method example.
Args:
buf (str): pointer to the current WeeChat buffer.
input_line (str): the content of the input line.
cur (int): the position of the cursor.
count (int): the amount of times to multiply or iterate the action.
Notes:
Should be called `key_X`, where X represents the key(s), and defined
in `VI_KEYS`.
Must perform actions on its own (using the WeeChat API).
See Also:
For additional examples, see `key_a()` (normal key) and
`key_r()` (catching key).
"""
# Key was pressed. Go to Insert mode (similar to "i").
set_mode("INSERT")
# Operators:
# ----------
def operator_d(buf, input_line, pos1, pos2, overwrite=False):
"""Delete text from `pos1` to `pos2` from the input line.
If `overwrite` is set to True, the character at the cursor's new position
is removed as well (the motion is inclusive).
See Also:
`operator_base()`.
"""
start = min(pos1, pos2)
end = max(pos1, pos2)
if overwrite:
end += 1
input_line = list(input_line)
del input_line[start:end]
input_line = "".join(input_line)
weechat.buffer_set(buf, "input", input_line)
set_cur(buf, input_line, pos1)
def operator_c(buf, input_line, pos1, pos2, overwrite=False):
"""Delete text from `pos1` to `pos2` from the input and enter Insert mode.
If `overwrite` is set to True, the character at the cursor's new position
is removed as well (the motion is inclusive.)
See Also:
`operator_base()`.
"""
operator_d(buf, input_line, pos1, pos2, overwrite)
set_mode("INSERT")
def operator_y(buf, input_line, pos1, pos2, _):
"""Yank text from `pos1` to `pos2` from the input line.
See Also:
`operator_base()`.
"""
start = min(pos1, pos2)
end = max(pos1, pos2)
proc = subprocess.Popen(["xclip", "-selection", "c"],
stdin=subprocess.PIPE)
proc.communicate(input=input_line[start:end])
# Motions:
# --------
def motion_0(input_line, cur, count):
"""Go to the first character of the line.
See Also;
`motion_base()`.
"""
return 0, False, False
def motion_w(input_line, cur, count):
"""Go `count` words forward and return position.
See Also:
`motion_base()`.
"""
pos = get_pos(input_line, REGEX_MOTION_LOWERCASE_W, cur, True, count)
if pos == -1:
return len(input_line), False, False
return cur + pos, False, False
def motion_W(input_line, cur, count):
"""Go `count` WORDS forward and return position.
See Also:
`motion_base()`.
"""
pos = get_pos(input_line, REGEX_MOTION_UPPERCASE_W, cur, True, count)
if pos == -1:
return len(input_line), False, False
return cur + pos, False, False
def motion_e(input_line, cur, count):
"""Go to the end of `count` words and return position.
See Also:
`motion_base()`.
"""
for _ in range(max(1, count)):
found = False
pos = cur
for pos in range(cur + 1, len(input_line) - 1):
# Whitespace, keep going.
if WHITESPACE.match(input_line[pos]):
pass
# End of sequence made from 'iskeyword' characters only,
# or end of sequence made from non 'iskeyword' characters only.
elif ((IS_KEYWORD.match(input_line[pos]) and
(not IS_KEYWORD.match(input_line[pos + 1]) or
WHITESPACE.match(input_line[pos + 1]))) or
(not IS_KEYWORD.match(input_line[pos]) and
(IS_KEYWORD.match(input_line[pos + 1]) or
WHITESPACE.match(input_line[pos + 1])))):
found = True
cur = pos
break
# We're at the character before the last and we still found nothing.
# Go to the last character.
if not found:
cur = pos + 1
return cur, True, False
def motion_E(input_line, cur, count):
"""Go to the end of `count` WORDS and return cusor position.
See Also:
`motion_base()`.
"""
pos = get_pos(input_line, REGEX_MOTION_UPPERCASE_E, cur, True, count)
if pos == -1:
return len(input_line), False, False
return cur + pos, True, False
def motion_b(input_line, cur, count):
"""Go `count` words backwards and return position.
See Also:
`motion_base()`.
"""
# "b" is just "e" on inverted data (e.g. "olleH" instead of "Hello").
pos_inv = motion_e(input_line[::-1], len(input_line) - cur - 1, count)[0]
pos = len(input_line) - pos_inv - 1
return pos, True, False
def motion_B(input_line, cur, count):
"""Go `count` WORDS backwards and return position.
See Also:
`motion_base()`.
"""
new_cur = len(input_line) - cur
pos = get_pos(input_line[::-1], REGEX_MOTION_UPPERCASE_B, new_cur,
count=count)
if pos == -1:
return 0, False, False
pos = len(input_line) - (pos + new_cur + 1)
return pos, True, False
def motion_ge(input_line, cur, count):
"""Go to end of `count` words backwards and return position.
See Also:
`motion_base()`.
"""
# "ge is just "w" on inverted data (e.g. "olleH" instead of "Hello").
pos_inv = motion_w(input_line[::-1], len(input_line) - cur - 1, count)[0]
pos = len(input_line) - pos_inv - 1
return pos, True, False
def motion_gE(input_line, cur, count):
"""Go to end of `count` WORDS backwards and return position.
See Also:
`motion_base()`.
"""
new_cur = len(input_line) - cur - 1
pos = get_pos(input_line[::-1], REGEX_MOTION_G_UPPERCASE_E, new_cur,
True, count)
if pos == -1:
return 0, False, False
pos = len(input_line) - (pos + new_cur + 1)
return pos, True, False
def motion_h(input_line, cur, count):
"""Go `count` characters to the left and return position.
See Also:
`motion_base()`.
"""
return max(0, cur - max(count, 1)), False, False
def motion_l(input_line, cur, count):
"""Go `count` characters to the right and return position.
See Also:
`motion_base()`.
"""
return cur + max(count, 1), False, False
def motion_carret(input_line, cur, count):
"""Go to first non-blank character of line and return position.
See Also:
`motion_base()`.
"""
pos = get_pos(input_line, REGEX_MOTION_CARRET, 0)
return pos, False, False
def motion_dollar(input_line, cur, count):
"""Go to end of line and return position.
See Also:
`motion_base()`.
"""
pos = len(input_line)
return pos, False, False
def motion_f(input_line, cur, count):
"""Go to `count`'th occurence of character and return position.
See Also:
`motion_base()`.
"""
return start_catching_keys(1, "cb_motion_f", input_line, cur, count)
def cb_motion_f(update_last=True):
"""Callback for `motion_f()`.
Args:
update_last (bool, optional): should `last_search_motion` be updated?
Set to False when calling from `key_semicolon()` or `key_comma()`
so that the last search motion isn't overwritten.
Defaults to True.
See Also:
`start_catching_keys()`.
"""
global last_search_motion
pattern = catching_keys_data['keys']
pos = get_pos(catching_keys_data['input_line'], re.escape(pattern),
catching_keys_data['cur'], True,
catching_keys_data['count'])
catching_keys_data['new_cur'] = max(0, pos) + catching_keys_data['cur']
if update_last:
last_search_motion = {'motion': "f", 'data': pattern}
cb_key_combo_default(None, None, "")
def motion_F(input_line, cur, count):
"""Go to `count`'th occurence of char to the right and return position.
See Also:
`motion_base()`.
"""
return start_catching_keys(1, "cb_motion_F", input_line, cur, count)
def cb_motion_F(update_last=True):
"""Callback for `motion_F()`.
Args:
update_last (bool, optional): should `last_search_motion` be updated?
Set to False when calling from `key_semicolon()` or `key_comma()`
so that the last search motion isn't overwritten.
Defaults to True.
See Also:
`start_catching_keys()`.
"""
global last_search_motion
pattern = catching_keys_data['keys']
cur = len(catching_keys_data['input_line']) - catching_keys_data['cur']
pos = get_pos(catching_keys_data['input_line'][::-1],
re.escape(pattern),
cur,
False,
catching_keys_data['count'])
catching_keys_data['new_cur'] = catching_keys_data['cur'] - max(0, pos + 1)
if update_last:
last_search_motion = {'motion': "F", 'data': pattern}
cb_key_combo_default(None, None, "")
def motion_t(input_line, cur, count):
"""Go to `count`'th occurence of char and return position.
The position returned is the position of the character to the left of char.
See Also:
`motion_base()`.
"""
return start_catching_keys(1, "cb_motion_t", input_line, cur, count)
def cb_motion_t(update_last=True):
"""Callback for `motion_t()`.
Args:
update_last (bool, optional): should `last_search_motion` be updated?
Set to False when calling from `key_semicolon()` or `key_comma()`
so that the last search motion isn't overwritten.
Defaults to True.
See Also:
`start_catching_keys()`.
"""
global last_search_motion
pattern = catching_keys_data['keys']
pos = get_pos(catching_keys_data['input_line'], re.escape(pattern),
catching_keys_data['cur'] + 1,
True, catching_keys_data['count'])
pos += 1
if pos > 0:
catching_keys_data['new_cur'] = pos + catching_keys_data['cur'] - 1
else:
catching_keys_data['new_cur'] = catching_keys_data['cur']
if update_last:
last_search_motion = {'motion': "t", 'data': pattern}
cb_key_combo_default(None, None, "")
def motion_T(input_line, cur, count):
"""Go to `count`'th occurence of char to the left and return position.
The position returned is the position of the character to the right of
char.
See Also:
`motion_base()`.
"""
return start_catching_keys(1, "cb_motion_T", input_line, cur, count)
def cb_motion_T(update_last=True):
"""Callback for `motion_T()`.
Args:
update_last (bool, optional): should `last_search_motion` be updated?
Set to False when calling from `key_semicolon()` or `key_comma()`
so that the last search motion isn't overwritten.
Defaults to True.
See Also:
`start_catching_keys()`.
"""
global last_search_motion
pattern = catching_keys_data['keys']
pos = get_pos(catching_keys_data['input_line'][::-1], re.escape(pattern),
(len(catching_keys_data['input_line']) -
(catching_keys_data['cur'] + 1)) + 1,
True, catching_keys_data['count'])
pos += 1
if pos > 0:
catching_keys_data['new_cur'] = catching_keys_data['cur'] - pos + 1
else:
catching_keys_data['new_cur'] = catching_keys_data['cur']
if update_last:
last_search_motion = {'motion': "T", 'data': pattern}
cb_key_combo_default(None, None, "")
# Keys:
# -----
def key_cc(buf, input_line, cur, count):
"""Delete line and start Insert mode.
See Also:
`key_base()`.
"""
weechat.command("", "/input delete_line")
set_mode("INSERT")
def key_C(buf, input_line, cur, count):
"""Delete from cursor to end of line and start Insert mode.
See Also:
`key_base()`.
"""
weechat.command("", "/input delete_end_of_line")
set_mode("INSERT")
def key_yy(buf, input_line, cur, count):
"""Yank line.
See Also:
`key_base()`.
"""
proc = subprocess.Popen(["xclip", "-selection", "c"],
stdin=subprocess.PIPE)
proc.communicate(input=input_line)
def key_i(buf, input_line, cur, count):
"""Start Insert mode.
See Also:
`key_base()`.
"""
set_mode("INSERT")
def key_a(buf, input_line, cur, count):
"""Move cursor one character to the right and start Insert mode.
See Also:
`key_base()`.
"""
set_cur(buf, input_line, cur + 1, False)
set_mode("INSERT")
def key_A(buf, input_line, cur, count):
"""Move cursor to end of line and start Insert mode.
See Also:
`key_base()`.
"""
set_cur(buf, input_line, len(input_line), False)
set_mode("INSERT")
def key_I(buf, input_line, cur, count):
"""Move cursor to first non-blank character and start Insert mode.
See Also:
`key_base()`.
"""
pos, _, _ = motion_carret(input_line, cur, 0)
set_cur(buf, input_line, pos)
set_mode("INSERT")
def key_G(buf, input_line, cur, count):
"""Scroll to specified line or bottom of buffer.
See Also:
`key_base()`.
"""
if count > 0:
# This is necessary to prevent weird scroll jumps.
weechat.command("", "/window scroll_top")
weechat.command("", "/window scroll %s" % (count - 1))
else:
weechat.command("", "/window scroll_bottom")
def key_r(buf, input_line, cur, count):
"""Replace `count` characters under the cursor.
See Also:
`key_base()`.
"""
start_catching_keys(1, "cb_key_r", input_line, cur, count, buf)
def cb_key_r():
"""Callback for `key_r()`.
See Also:
`start_catching_keys()`.
"""
global catching_keys_data
input_line = list(catching_keys_data['input_line'])
count = max(catching_keys_data['count'], 1)
cur = catching_keys_data['cur']
if cur + count <= len(input_line):
for _ in range(count):
input_line[cur] = catching_keys_data['keys']
cur += 1
input_line = "".join(input_line)
weechat.buffer_set(catching_keys_data['buf'], "input", input_line)
set_cur(catching_keys_data['buf'], input_line, cur - 1)
catching_keys_data = {'amount': 0}
def key_R(buf, input_line, cur, count):
"""Start Replace mode.
See Also:
`key_base()`.
"""
set_mode("REPLACE")
def key_tilda(buf, input_line, cur, count):
"""Switch the case of `count` characters under the cursor.
See Also:
`key_base()`.
"""
input_line = list(input_line)
count = max(1, count)
while count and cur < len(input_line):
input_line[cur] = input_line[cur].swapcase()
count -= 1
cur += 1
input_line = "".join(input_line)
weechat.buffer_set(buf, "input", input_line)
set_cur(buf, input_line, cur)
def key_alt_j(buf, input_line, cur, count):
"""Go to WeeChat buffer.
Called to preserve WeeChat's alt-j buffer switching.
This is only called when alt-j<num> is pressed after pressing Esc, because
\x01\x01j is received in key_combo_default which becomes \x01j after
removing the detected Esc key.
If Esc isn't the last pressed key, \x01j<num> is directly received in
key_combo_default.
"""
start_catching_keys(2, "cb_key_alt_j", input_line, cur, count)
def cb_key_alt_j():
"""Callback for `key_alt_j()`.
See Also:
`start_catching_keys()`.
"""
global catching_keys_data
weechat.command("", "/buffer " + catching_keys_data['keys'])
catching_keys_data = {'amount': 0}
def key_semicolon(buf, input_line, cur, count, swap=False):
"""Repeat last f, t, F, T `count` times.
Args:
swap (bool, optional): if True, the last motion will be repeated in the
opposite direction (e.g. "f" instead of "F"). Defaults to False.
See Also:
`key_base()`.
"""
global catching_keys_data, vi_buffer
catching_keys_data = ({'amount': 0,
'input_line': input_line,
'cur': cur,
'keys': last_search_motion['data'],
'count': count,
'new_cur': 0,
'buf': buf})
# Swap the motion's case if called from key_comma.
if swap:
motion = last_search_motion['motion'].swapcase()
else:
motion = last_search_motion['motion']
func = "cb_motion_%s" % motion
vi_buffer = motion
globals()[func](False)
def key_comma(buf, input_line, cur, count):
"""Repeat last f, t, F, T in opposite direction `count` times.
See Also:
`key_base()`.
"""
key_semicolon(buf, input_line, cur, count, True)
# Vi key bindings.
# ================
# String values will be executed as normal WeeChat commands.
# For functions, see `key_base()` for reference.
VI_KEYS = {'j': "/window scroll_down",
'k': "/window scroll_up",
'G': key_G,
'gg': "/window scroll_top",
'x': "/input delete_next_char",
'X': "/input delete_previous_char",
'dd': "/input delete_line",
'D': "/input delete_end_of_line",
'cc': key_cc,
'C': key_C,
'i': key_i,
'a': key_a,
'A': key_A,
'I': key_I,
'yy': key_yy,
'p': "/input clipboard_paste",
'/': "/input search_text",
'gt': "/buffer +1",
'K': "/buffer +1",
'gT': "/buffer -1",
'J': "/buffer -1",
'r': key_r,
'R': key_R,
'~': key_tilda,
'\x01[[A': "/input history_previous",
'\x01[[B': "/input history_next",
'\x01[[C': "/input move_next_char",
'\x01[[D': "/input move_previous_char",
'\x01[[H': "/input move_beginning_of_line",
'\x01[[F': "/input move_end_of_line",
'\x01[[5~': "/window page_up",
'\x01[[6~': "/window page_down",
'\x01[[3~': "/input delete_next_char",
'\x01[[2~': key_i,
'\x01M': "/input return",
'\x01?': "/input move_previous_char",
' ': "/input move_next_char",
'\x01[j': key_alt_j,
'\x01[1': "/buffer *1",
'\x01[2': "/buffer *2",
'\x01[3': "/buffer *3",
'\x01[4': "/buffer *4",
'\x01[5': "/buffer *5",
'\x01[6': "/buffer *6",
'\x01[7': "/buffer *7",
'\x01[8': "/buffer *8",
'\x01[9': "/buffer *9",
'\x01[0': "/buffer *10",
'\x01^': "/input jump_last_buffer_displayed",
'\x01D': "/window page_down",
'\x01U': "/window page_up",
'\x01Wh': "/window left",
'\x01Wj': "/window down",
'\x01Wk': "/window up",
'\x01Wl': "/window right",
'\x01W=': "/window balance",
'\x01Wx': "/window swap",
'\x01Ws': "/window splith",
'\x01Wv': "/window splitv",
'\x01Wq': "/window merge",
';': key_semicolon,
',': key_comma}
# Add alt-j<number> bindings.
for i in range(10, 99):
VI_KEYS['\x01[j%s' % i] = "/buffer %s" % i
# Key handling.
# =============
def cb_key_pressed(data, signal, signal_data):
"""Detect potential Esc presses.
Alt and Esc are detected as the same key in most terminals. The difference
is that Alt signal is sent just before the other pressed key's signal.
We therefore use a timeout (50ms) to detect whether Alt or Esc was pressed.
"""
global last_signal_time
last_signal_time = time.time()
if signal_data == "\x01[":
# In 50ms, check if any other keys were pressed. If not, it's Esc!
weechat.hook_timer(50, 0, 1, "cb_check_esc",
"{:f}".format(last_signal_time))
return weechat.WEECHAT_RC_OK
def cb_check_esc(data, remaining_calls):
"""Check if the Esc key was pressed and change the mode accordingly."""
global esc_pressed, vi_buffer, cmd_text, catching_keys_data
if last_signal_time == float(data):
esc_pressed += 1
set_mode("NORMAL")
# Cancel any current partial commands.
vi_buffer = ""
cmd_text = ""
weechat.command("", "/bar hide vi_cmd")
catching_keys_data = {'amount': 0}
weechat.bar_item_update("vi_buffer")
return weechat.WEECHAT_RC_OK
def cb_key_combo_default(data, signal, signal_data):
"""Eat and handle key events when in Normal mode, if needed.
The key_combo_default signal is sent when a key combo is pressed. For
example, alt-k will send the "\x01[k" signal.
Esc is handled a bit differently to avoid delays, see `cb_key_pressed()`.
"""
global esc_pressed, vi_buffer, cmd_text
# If Esc was pressed, strip the Esc part from the pressed keys.
# Example: user presses Esc followed by i. This is detected as "\x01[i",
# but we only want to handle "i".
keys = signal_data
if esc_pressed or esc_pressed == -2:
if keys.startswith("\x01[" * esc_pressed):
# Multiples of 3 seem to "cancel" themselves,
# e.g. Esc-Esc-Esc-Alt-j-11 is detected as "\x01[\x01[\x01"
# followed by "\x01[j11" (two different signals).
if signal_data == "\x01[" * 3:
esc_pressed = -1 # `cb_check_esc()` will increment it to 0.
else:
esc_pressed = 0
# This can happen if a valid combination is started but interrupted
# with Esc, such as Ctrl-W→Esc→w which would send two signals:
# "\x01W\x01[" then "\x01W\x01[w".
# In that case, we still need to handle the next signal ("\x01W\x01[w")
# so we use the special value "-2".
else:
esc_pressed = -2
keys = keys.split("\x01[")[-1] # Remove the "Esc" part(s).
# Ctrl-Space.
elif keys == "\x01@":
set_mode("NORMAL")
return weechat.WEECHAT_RC_OK_EAT
# Nothing to do here.
if mode == "INSERT":
return weechat.WEECHAT_RC_OK
# We're in Replace mode — allow "normal" key presses (e.g. "a") and
# overwrite the next character with them, but let the other key presses
# pass normally (e.g. backspace, arrow keys, etc).
if mode == "REPLACE":
if len(keys) == 1:
weechat.command("", "/input delete_next_char")
elif keys == "\x01?":
weechat.command("", "/input move_previous_char")
return weechat.WEECHAT_RC_OK_EAT
return weechat.WEECHAT_RC_OK
# We're catching keys! Only "normal" key presses interest us (e.g. "a"),
# not complex ones (e.g. backspace).
if len(keys) == 1 and catching_keys_data['amount']:
catching_keys_data['keys'] += keys
catching_keys_data['amount'] -= 1
# Done catching keys, execute the callback.
if catching_keys_data['amount'] == 0:
globals()[catching_keys_data['callback']]()
vi_buffer = ""
weechat.bar_item_update("vi_buffer")
return weechat.WEECHAT_RC_OK_EAT
# We're in command-line mode.
if cmd_text:
# Backspace key.
if keys == "\x01?":
# Remove the last character from our command line.
cmd_text = list(cmd_text)
del cmd_text[-1]
cmd_text = "".join(cmd_text)
# Return key.
elif keys == "\x01M":
weechat.hook_timer(1, 0, 1, "cb_exec_cmd", cmd_text)
cmd_text = ""
# Input.
elif len(keys) == 1:
cmd_text += keys
# Update (and maybe hide) the bar item.
weechat.bar_item_update("cmd_text")
if not cmd_text:
weechat.command("", "/bar hide vi_cmd")
return weechat.WEECHAT_RC_OK_EAT
# Enter command mode.
elif keys == ":":
cmd_text += ":"
weechat.command("", "/bar show vi_cmd")
weechat.bar_item_update("cmd_text")
return weechat.WEECHAT_RC_OK_EAT
# Add key to the buffer.
vi_buffer += keys
weechat.bar_item_update("vi_buffer")
if not vi_buffer:
return weechat.WEECHAT_RC_OK
# Check if the keys have a (partial or full) match. If so, also get the
# keys without the count. (These are the actual keys we should handle.)
# After that, `vi_buffer` is only used for display purposes — only
# `vi_keys` is checked for all the handling.
# If no matches are found, the keys buffer is cleared.
matched, vi_keys, count = get_keys_and_count(vi_buffer)
if not matched:
vi_buffer = ""
return weechat.WEECHAT_RC_OK_EAT
buf = weechat.current_buffer()
input_line = weechat.buffer_get_string(buf, "input")
cur = weechat.buffer_get_integer(buf, "input_pos")
# It's a key. If the corresponding value is a string, we assume it's a
# WeeChat command. Otherwise, it's a method we'll call.
if vi_keys in VI_KEYS:
if isinstance(VI_KEYS[vi_keys], str):
for _ in range(max(count, 1)):
# This is to avoid crashing WeeChat on script reloads/unloads,
# because no hooks must still be running when a script is
# reloaded or unloaded.
if VI_KEYS[vi_keys] == "/input return":
return weechat.WEECHAT_RC_OK
weechat.command("", VI_KEYS[vi_keys])
current_cur = weechat.buffer_get_integer(buf, "input_pos")
set_cur(buf, input_line, current_cur)
else:
VI_KEYS[vi_keys](buf, input_line, cur, count)
# It's a motion (e.g. "w") — call `motion_X()` where X is the motion, then
# set the cursor's position to what that function returned.
elif vi_keys in VI_MOTIONS:
if vi_keys in SPECIAL_CHARS:
func = "motion_%s" % SPECIAL_CHARS[vi_keys]
else:
func = "motion_%s" % vi_keys
end, _, _ = globals()[func](input_line, cur, count)
set_cur(buf, input_line, end)
# It's an operator + motion (e.g. "dw") — call `motion_X()` (where X is
# the motion), then we call `operator_Y()` (where Y is the operator)
# with the position `motion_X()` returned. `operator_Y()` should then
# handle changing the input line.
elif (len(vi_keys) > 1 and
vi_keys[0] in VI_OPERATORS and
vi_keys[1:] in VI_MOTIONS):
if vi_keys[1:] in SPECIAL_CHARS:
func = "motion_%s" % SPECIAL_CHARS[vi_keys[1:]]
else:
func = "motion_%s" % vi_keys[1:]
pos, overwrite, catching = globals()[func](input_line, cur, count)
# If it's a catching motion, we don't want to call the operator just
# yet -- this code will run again when the motion is complete, at which
# point we will.
if not catching:
oper = "operator_%s" % vi_keys[0]
globals()[oper](buf, input_line, cur, pos, overwrite)
# The combo isn't completed yet (e.g. just "d").
else:
return weechat.WEECHAT_RC_OK_EAT
# We've already handled the key combo, so clear the keys buffer.
if not catching_keys_data['amount']:
vi_buffer = ""
weechat.bar_item_update("vi_buffer")
return weechat.WEECHAT_RC_OK_EAT
# Callbacks.
# ==========
# Bar items.
# ----------
def cb_vi_buffer(data, item, window):
"""Return the content of the vi buffer (pressed keys on hold)."""
return vi_buffer
def cb_cmd_text(data, item, window):
"""Return the text of the command line."""
return cmd_text
def cb_mode_indicator(data, item, window):
"""Return the current mode (INSERT/NORMAL/REPLACE)."""
return mode[0][0][0][0][0]
def cb_line_numbers(data, item, window):
"""Fill the line numbers bar item."""
bar_height = weechat.window_get_integer(window, "win_chat_height")
content = ""
for i in range(1, bar_height + 1):
content += "%s \n" % i
return content
# Callbacks for the line numbers bar.
# ...................................
def cb_update_line_numbers(data, signal, signal_data):
"""Call `cb_timer_update_line_numbers()` when switching buffers.
A timer is required because the bar item is refreshed before the new buffer
is actually displayed, so ``win_chat_height`` would refer to the old
buffer. Using a timer refreshes the item after the new buffer is displayed.
"""
weechat.hook_timer(10, 0, 1, "cb_timer_update_line_numbers", "")
return weechat.WEECHAT_RC_OK
def cb_timer_update_line_numbers(data, remaining_calls):
"""Update the line numbers bar item."""
weechat.bar_item_update("line_numbers")
return weechat.WEECHAT_RC_OK
# Config.
# -------
def cb_config(data, option, value):
"""Script option changed, update our copy."""
option_name = option.split(".")[-1]
if option_name in vimode_settings:
vimode_settings[option_name] = value
return weechat.WEECHAT_RC_OK
# Command-line execution.
# -----------------------
def cb_exec_cmd(data, remaining_calls):
"""Translate and execute our custom commands to WeeChat command."""
# Process the entered command.
data = list(data)
del data[0]
data = "".join(data)
# s/foo/bar command.
if data.startswith("s/"):
cmd = data
parsed_cmd = next(csv.reader(StringIO(cmd), delimiter="/",
escapechar="\\"))
pattern = re.escape(parsed_cmd[1])
repl = parsed_cmd[2]
repl = re.sub(r"([^\\])&", r"\1" + pattern, repl)
flag = None
if len(parsed_cmd) == 4:
flag = parsed_cmd[3]
count = 1
if flag == "g":
count = 0
buf = weechat.current_buffer()
input_line = weechat.buffer_get_string(buf, "input")
input_line = re.sub(pattern, repl, input_line, count)
weechat.buffer_set(buf, "input", input_line)
# Shell command.
elif data.startswith("!"):
weechat.command("", "/exec -buffer shell %s" % data[1:])
# Commands like `:22`. This should start cursor mode (``/cursor``) and take
# us to the relevant line.
# TODO: look into possible replacement key bindings for: ← ↑ → ↓ Q m q.
elif data.isdigit():
line_number = int(data)
hdata_window = weechat.hdata_get("window")
window = weechat.current_window()
x = weechat.hdata_integer(hdata_window, window, "win_chat_x")
y = (weechat.hdata_integer(hdata_window, window, "win_chat_y") +
(line_number - 1))
weechat.command("", "/cursor go {},{}".format(x, y))
# Check againt defined commands.
else:
data = data.split(" ", 1)
cmd = data[0]
args = ""
if len(data) == 2:
args = data[1]
if cmd in VI_COMMANDS:
weechat.command("", "%s %s" % (VI_COMMANDS[cmd], args))
# No vi commands defined, run the command as a WeeChat command.
else:
weechat.command("", "/{} {}".format(cmd, args))
return weechat.WEECHAT_RC_OK
# Script commands.
# ----------------
def cb_vimode_cmd(data, buf, args):
"""Handle script commands (``/vimode <command>``)."""
# ``/vimode`` or ``/vimode help``
if not args or args == "help":
weechat.prnt("", "[vimode.py] %s" % README_URL)
# ``/vimode bind_keys`` or ``/vimode bind_keys --list``
elif args.startswith("bind_keys"):
infolist = weechat.infolist_get("key", "", "default")
weechat.infolist_reset_item_cursor(infolist)
commands = ["/key unbind ctrl-W",
"/key bind ctrl-W /input delete_previous_word",
"/key bind ctrl-^ /input jump_last_buffer_displayed",
"/key bind ctrl-Wh /window left",
"/key bind ctrl-Wj /window down",
"/key bind ctrl-Wk /window up",
"/key bind ctrl-Wl /window right",
"/key bind ctrl-W= /window balance",
"/key bind ctrl-Wx /window swap",
"/key bind ctrl-Ws /window splith",
"/key bind ctrl-Wv /window splitv",
"/key bind ctrl-Wq /window merge"]
while weechat.infolist_next(infolist):
key = weechat.infolist_string(infolist, "key")
if re.match(REGEX_PROBLEMATIC_KEYBINDINGS, key):
commands.append("/key unbind %s" % key)
if args == "bind_keys":
weechat.prnt("", "Running commands:")
for command in commands:
weechat.command("", command)
weechat.prnt("", "Done.")
elif args == "bind_keys --list":
weechat.prnt("", "Listing commands we'll run:")
for command in commands:
weechat.prnt("", " %s" % command)
weechat.prnt("", "Done.")
return weechat.WEECHAT_RC_OK
# Helpers.
# ========
# Motions/keys helpers.
# ---------------------
def get_pos(data, regex, cur, ignore_cur=False, count=0):
"""Return the position of `regex` match in `data`, starting at `cur`.
Args:
data (str): the data to search in.
regex (pattern): regex pattern to search for.
cur (int): where to start the search.
ignore_cur (bool, optional): should the first match be ignored if it's
also the character at `cur`?
Defaults to False.
count (int, optional): the index of the match to return. Defaults to 0.
Returns:
int: position of the match. -1 if no matches are found.
"""
# List of the *positions* of the found patterns.
matches = [m.start() for m in re.finditer(regex, data[cur:])]
pos = -1
if count:
if len(matches) > count - 1:
if ignore_cur and matches[0] == 0:
if len(matches) > count:
pos = matches[count]
else:
pos = matches[count - 1]
elif matches:
if ignore_cur and matches[0] == 0:
if len(matches) > 1:
pos = matches[1]
else:
pos = matches[0]
return pos
def set_cur(buf, input_line, pos, cap=True):
"""Set the cursor's position.
Args:
buf (str): pointer to the current WeeChat buffer.
input_line (str): the content of the input line.
pos (int): the position to set the cursor to.
cap (bool, optional): if True, the `pos` will shortened to the length
of `input_line` if it's too long. Defaults to True.
"""
if cap:
pos = min(pos, len(input_line) - 1)
weechat.buffer_set(buf, "input_pos", str(pos))
def start_catching_keys(amount, callback, input_line, cur, count, buf=None):
"""Start catching keys. Used for special commands (e.g. "f", "r").
amount (int): amount of keys to catch.
callback (str): name of method to call once all keys are caught.
input_line (str): input line's content.
cur (int): cursor's position.
count (int): count, e.g. "2" for "2fs".
buf (str, optional): pointer to the current WeeChat buffer.
Defaults to None.
`catching_keys_data` is a dict with the above arguments, as well as:
keys (str): pressed keys will be added under this key.
new_cur (int): the new cursor's position, set in the callback.
When catching keys is active, normal pressed keys (e.g. "a" but not arrows)
will get added to `catching_keys_data` under the key "keys", and will not
be handled any further.
Once all keys are caught, the method defined in the "callback" key is
called, and can use the data in `catching_keys_data` to perform its action.
"""
global catching_keys_data
if "new_cur" in catching_keys_data:
new_cur = catching_keys_data['new_cur']
catching_keys_data = {'amount': 0}
return new_cur, True, False
catching_keys_data = ({'amount': amount,
'callback': callback,
'input_line': input_line,
'cur': cur,
'keys': "",
'count': count,
'new_cur': 0,
'buf': buf})
return cur, False, True
def get_keys_and_count(combo):
"""Check if `combo` is a valid combo and extract keys/counts if so.
Args:
combo (str): pressed keys combo.
Returns:
matched (bool): True if the combo has a (partial or full) match, False
otherwise.
combo (str): `combo` with the count removed. These are the actual keys
we should handle.
count (int): count for `combo`.
"""
# Look for a potential match (e.g. "d" might become "dw" or "dd" so we
# accept it, but "d9" is invalid).
matched = False
# Digits are allowed at the beginning (counts or "0").
count = 0
if combo.isdigit():
matched = True
elif combo and combo[0].isdigit():
count = ""
for char in combo:
if char.isdigit():
count += char
else:
break
combo = combo.replace(count, "", 1)
count = int(count)
# Check against defined keys.
if not matched:
for key in VI_KEYS:
if key.startswith(combo):
matched = True
break
# Check against defined motions.
if not matched:
for motion in VI_MOTIONS:
if motion.startswith(combo):
matched = True
break
# Check against defined operators + motions.
if not matched:
for operator in VI_OPERATORS:
if combo.startswith(operator):
# Check for counts before the motion (but after the operator).
vi_keys_no_op = combo[len(operator):]
# There's no motion yet.
if vi_keys_no_op.isdigit():
matched = True
break
# Get the motion count, then multiply the operator count by
# it, similar to vim's behavior.
elif vi_keys_no_op and vi_keys_no_op[0].isdigit():
motion_count = ""
for char in vi_keys_no_op:
if char.isdigit():
motion_count += char
else:
break
# Remove counts from `vi_keys_no_op`.
combo = combo.replace(motion_count, "", 1)
motion_count = int(motion_count)
count = max(count, 1) * motion_count
# Check against defined motions.
for motion in VI_MOTIONS:
if motion.startswith(combo[1:]):
matched = True
break
return matched, combo, count
# Other helpers.
# --------------
def set_mode(arg):
"""Set the current mode and update the bar mode indicator."""
global mode
mode = arg
# If we're going to Normal mode, the cursor must move one character to the
# left.
if mode == "NORMAL":
buf = weechat.current_buffer()
input_line = weechat.buffer_get_string(buf, "input")
cur = weechat.buffer_get_integer(buf, "input_pos")
set_cur(buf, input_line, cur - 1, False)
weechat.bar_item_update("mode_indicator")
def print_warning(text):
"""Print warning, in red, to the current buffer."""
weechat.prnt("", ("%s[vimode.py] %s" % (weechat.color("red"), text)))
def check_warnings():
"""Warn the user about problematic key bindings and tmux/screen."""
user_warned = False
# Warn the user about problematic key bindings that may conflict with
# vimode.
# The solution is to remove these key bindings, but that's up to the user.
infolist = weechat.infolist_get("key", "", "default")
problematic_keybindings = []
while weechat.infolist_next(infolist):
key = weechat.infolist_string(infolist, "key")
command = weechat.infolist_string(infolist, "command")
if re.match(REGEX_PROBLEMATIC_KEYBINDINGS, key):
problematic_keybindings.append("%s -> %s" % (key, command))
if problematic_keybindings:
user_warned = True
print_warning("Problematic keybindings detected:")
for keybinding in problematic_keybindings:
print_warning(" %s" % keybinding)
print_warning("These keybindings may conflict with vimode.")
print_warning("You can remove problematic key bindings and add"
" recommended ones by using /vimode bind_keys, or only"
" list them with /vimode bind_keys --list")
print_warning("For help, see: %s" % FAQ_KEYBINDINGS)
del problematic_keybindings
# Warn tmux/screen users about possible Esc detection delays.
if "STY" in os.environ or "TMUX" in os.environ:
if user_warned:
weechat.prnt("", "")
user_warned = True
print_warning("tmux/screen users, see: %s" % FAQ_ESC)
if (user_warned and not
weechat.config_string_to_boolean(vimode_settings['no_warn'])):
if user_warned:
weechat.prnt("", "")
print_warning("To force disable warnings, you can set"
" plugins.var.python.vimode.no_warn to 'on'")
# Main script.
# ============
if __name__ == "__main__":
weechat.register(SCRIPT_NAME, SCRIPT_AUTHOR, SCRIPT_VERSION,
SCRIPT_LICENSE, SCRIPT_DESC, "", "")
# Warn the user if he's using an unsupported WeeChat version.
VERSION = weechat.info_get("version_number", "")
if int(VERSION) < 0x01000000:
print_warning("Please upgrade to WeeChat ≥ 1.0.0. Previous versions"
" are not supported.")
# Set up script options.
for option, value in vimode_settings.items():
if weechat.config_is_set_plugin(option):
vimode_settings[option] = weechat.config_get_plugin(option)
else:
weechat.config_set_plugin(option, value[0])
vimode_settings[option] = value[0]
weechat.config_set_desc_plugin(option,
"%s (default: \"%s\")" % (value[1],
value[0]))
# Warn the user about possible problems if necessary.
if not weechat.config_string_to_boolean(vimode_settings['no_warn']):
check_warnings()
# Create bar items and setup hooks.
weechat.bar_item_new("mode_indicator", "cb_mode_indicator", "")
weechat.bar_item_new("cmd_text", "cb_cmd_text", "")
weechat.bar_item_new("vi_buffer", "cb_vi_buffer", "")
weechat.bar_item_new("line_numbers", "cb_line_numbers", "")
weechat.bar_new("vi_cmd", "off", "0", "root", "", "bottom", "vertical",
"vertical", "0", "0", "default", "default", "default", "0",
"cmd_text")
weechat.bar_new("vi_line_numbers", "on", "0", "window", "", "left",
"vertical", "vertical", "0", "0", "default", "default",
"default", "0", "line_numbers")
weechat.hook_config("plugins.var.python.%s.*" % SCRIPT_NAME, "cb_config",
"")
weechat.hook_signal("key_pressed", "cb_key_pressed", "")
weechat.hook_signal("key_combo_default", "cb_key_combo_default", "")
weechat.hook_signal("buffer_switch", "cb_update_line_numbers", "")
weechat.hook_command("vimode", SCRIPT_DESC, "[help | bind_keys [--list]]",
" help: show help\n"
"bind_keys: unbind problematic keys, and bind"
" recommended keys to use in WeeChat\n"
" --list: only list changes",
"help || bind_keys |--list",
"cb_vimode_cmd", "")
|
py | 1a3bbcbf7a29d30345ff0acd5660e7e8e4cf21ac | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Generate docs for the TensorFlow Python API."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import inspect
import os
import sys
import tensorflow as tf
from tensorflow.python import debug as tf_debug
from tensorflow.tools.docs import generate_lib
if __name__ == '__main__':
doc_generator = generate_lib.DocGenerator()
doc_generator.add_output_dir_argument()
doc_generator.add_src_dir_argument()
# This doc generator works on the TensorFlow codebase. Since this script lives
# at tensorflow/tools/docs, and all code is defined somewhere inside
# tensorflow/, we can compute the base directory (two levels up), which is
# valid unless we're trying to apply this to a different code base, or are
# moving the script around.
script_dir = os.path.dirname(inspect.getfile(inspect.currentframe()))
default_base_dir = os.path.join(script_dir, '..', '..')
doc_generator.add_base_dir_argument(default_base_dir)
flags = doc_generator.parse_known_args()
# tf_debug is not imported with tf, it's a separate module altogether
doc_generator.set_py_modules([('tf', tf), ('tfdbg', tf_debug)])
doc_generator.load_contrib()
sys.exit(doc_generator.build(flags))
|
py | 1a3bbd682305fc07ae9076fc02ccbcd14e451ca8 | from functools import lru_cache
from findimports import ModuleGraph
from pathlib import Path
from onegov.core import LEVELS
def test_hierarchy():
""" Originally, onegov.* modules were separated into separate repositories
and deployed individually to PyPI.
This meant that each module would list the dependencies it needed,
including other onegov.* modules. As a side-effect, this ensured that
a module like onegov.core would not import from onegov.org, creating
an undesired dependency.
With the move to a single repository and a container build, we lost this
side-effect. It is now possible for onegov.core to import from onegov.org
and that is not something we want, because things like the core should
not import from modules higher up the chain.
This test ensures that this restriction is still honored.
Each module is put into a level. Modules may import from the same level
or the levels below, but not from the levels above.
The current list of levels is also used for the upgrade step order. It can
be found in `onegov.core.__init__.py`.
This is not exactly equivalent to what we had before, but it is good
basic check to ensure that we do not add unwanted dependencies.
"""
modules = level_by_module(LEVELS)
# all modules must be defined
for module in existing_modules():
assert module in modules, f"module not defined in hierarchy: {module}"
# graph all imports
graph = ModuleGraph()
graph.parsePathname(str(sources()))
# ensure hierarchy
for id, module in graph.modules.items():
name = module_name(module.filename)
if name is None:
continue
allowed = allowed_imports(LEVELS, name)
for imported in module.imported_names:
import_name = '.'.join(imported.name.split('.')[:2])
if not import_name.startswith('onegov'):
continue
assert import_name in allowed, \
f"Invalid import {name} → {import_name} in {imported.filename}"
def allowed_imports(levels, module):
""" Given a module name, returns an imprtable set of onegov modules. """
allowed = set()
for modules in levels:
allowed.update(modules)
if module in modules:
return allowed
assert False, f"unknown module: {module}"
def sources():
""" Returns the path to 'src'. """
return Path(__file__).parent.parent / 'src'
@lru_cache(maxsize=128)
def module_name(path):
""" Given a path, returns the onegov module, or None, if not a onegov
module (and therefore not relevant to this analysis).
"""
namespace = sources() / 'onegov'
if namespace in Path(path).parents:
name = str(path).replace(str(namespace), '')\
.strip('/')\
.split('/', 1)[0]
return f'onegov.{name}'
def level_by_module(levels):
""" Returns a dictionary with modules -> level. """
result = {}
for level, modules in enumerate(levels):
for module in modules:
assert module not in result, f"duplicate module: {module}"
result[module] = level
return result
def existing_modules():
""" Yields the module names found in the src/onegov folder. """
for child in (sources() / 'onegov').iterdir():
if child.is_dir():
yield f'onegov.{child.name}'
|
py | 1a3bbe7309f6e829c67fd01849ceb8dd434876c4 | # -*- coding: utf-8 -*-
try:
from models.interface import AbstractModel
except:
from interface import AbstractModel
import torch
import torch.nn.functional as F
import torch.nn as nn
import torchvision
import torchvision.datasets as datasets
import matplotlib.pyplot as plt
import numpy as np
import pickle
from torch import Tensor
import torch.optim as optim
from torch.utils.data import DataLoader, TensorDataset
import matplotlib.pyplot as plt
import pandas as pd
from sklearn import preprocessing
from sklearn.model_selection import train_test_split
from scipy.fft import rfft, rfftfreq, fft, fftfreq
import scipy
import time
import copy
import json
from pathlib import Path
class EEGDCNNModel(AbstractModel):
DATA_PATH = "./"
OUTPUT_PATH = "./"
def __init__(self, sample_rate=1, data_frequency=128):
model = nn.Sequential(
nn.Conv2d(4, 32, [3, 1]),
nn.ReLU(),
nn.Dropout(),
nn.Conv2d(32, 64, [3, 1]),
nn.ReLU(),
nn.Dropout(),
nn.MaxPool2d([3, 3]),
nn.Flatten(),
nn.Linear(5760, 512),
nn.ReLU(),
nn.Linear(512, 256),
nn.ReLU(),
nn.Linear(256, 4)
)
self.model = model
base_path = Path(__file__).parent
self.model.load_state_dict(torch.load((base_path / 'model_multi.pth').resolve(), 'cpu'))
self.model.eval()
self.sample_rate = sample_rate
self.data_frequency = data_frequency
print("Initialized EEG DCNN Model with sample rate {} data freq {}".format(self.sample_rate, self.data_frequency))
# data passed in is one trial with only the 32 channels with last 3 sec trimmed
# period has to be a factor of the total clip length
def run(self, data_path):
print("Running EEG DCNN Model")
self.run_eeg(self.DATA_PATH + data_path, self.data_frequency, self.sample_rate)
def run_eeg(self, data_path, data_frequency, sample_rate):
self.data = np.array(pickle.load(open(data_path, "rb"), encoding='latin1'))
# data is 32 channel, 7680 (60 * 128)
channels_total = self.data.shape[0]
time_total = self.data.shape[1]
windows = int((time_total / data_frequency) * sample_rate)
final_data = []
# sliding window is 8 because thats what the window was when training
train_sliding_window = 4
# loops through all the windows
for i in range(windows - train_sliding_window):
time_window = self.data[:, int((data_frequency * i) / sample_rate): int((data_frequency * (i + train_sliding_window)) / sample_rate)]
transformed_channel = []
# loops through all the channels
for channel_num in range(channels_total):
channel_data = time_window[channel_num]
# convert to frequency domain
fft_channel = np.abs(rfft(channel_data))
fftfreq_channel = rfftfreq(channel_data.size, 1/ data_frequency)
# fft_channel_normalized = np.fft.fftshift(fft_channel / channel_data.size)
# power_spectrum = np.square(fft_channel_normalized)
# power = np.sum(power_spectrum)
# identify frequency ranges
one_freq = np.where(fftfreq_channel == 1)[0][0]
eight_freq = np.where(fftfreq_channel == 8)[0][0]
fourteen_freq = np.where(fftfreq_channel == 14)[0][0]
thirty_freq = np.where(fftfreq_channel == 30)[0][0]
fourtyfive_freq = np.where(fftfreq_channel == 45)[0][0]
# make bins for frequency ranges
theta_bin = fft_channel[one_freq:eight_freq]
alpha_bin = fft_channel[eight_freq:fourteen_freq]
beta_bin = fft_channel[fourteen_freq:thirty_freq]
gamma_bin = fft_channel[thirty_freq:fourtyfive_freq]
all_bins = [theta_bin, alpha_bin, beta_bin, gamma_bin]
transformed_channel.append(all_bins)
binned_pcc_matrix = np.ones((4, channels_total, channels_total)) # 4, 32, 32
for bin_num in range(4):
pcc_matrix = binned_pcc_matrix[bin_num] # 32, 32
index_mover = 0
# creates correlation matrices for each bin
for channel_num_i in range(0, channels_total):
for channel_num_j in range(index_mover, channels_total):
data1 = transformed_channel[channel_num_i][bin_num]
data2 = transformed_channel[channel_num_j][bin_num]
pcc_num = scipy.stats.pearsonr(data1, data2)[0]
pcc_matrix[channel_num_i][channel_num_j] = pcc_num
pcc_matrix[channel_num_j][channel_num_i] = pcc_num
index_mover += 1
binned_pcc_matrix[bin_num] = pcc_matrix
final_data.append(binned_pcc_matrix)
# makes last 8 sec the same as the last output
for i in range(min(windows, train_sliding_window)):
final_data.append(binned_pcc_matrix)
self.data = torch.tensor(final_data).float()
# run model
output = self.model(self.data)
_, preds = torch.max(output, 1)
# output data as json
json_data = dict()
for i in range(len(preds)):
json_data[i / sample_rate] = int(preds[i])
json_dict = dict()
json_dict["metadata"] = {"dataPath": data_path, "eegLabelFrequency": str(sample_rate), "eegModelName":"defaulteeg"}
json_dict["data"] = json_data
with open(self.OUTPUT_PATH + 'defaulteeg.json', "w+") as outfile:
json.dump(json_dict, outfile)
def test_output_format_eeg():
model = EEGDCNNModel(sample_rate=2)
model.OUTPUT_PATH = './output/'
print("Testing output format")
model.run('uploads/dev/s01_trial01.dat')
output = json.load(open('output/defaulteeg.json', 'r'))
# print(type(output), output)
assert set(output.keys()) == set(['metadata', 'data']), "Error: wrong keys in output json: " + str(output.keys())
assert "59.0" in output['data'].keys() and '58.5' in output['data'].keys(), "Error with timestamps: " + str(output['data'].keys())
print("Passed output test")
def test_parameters_eeg():
print("Testing model parameters")
model = EEGDCNNModel(sample_rate=4)
model.OUTPUT_PATH = './output/'
model.run('uploads/dev/s01_trial01.dat')
output = json.load(open('output/defaulteeg.json', 'r'))
assert str(output['metadata']['eegLabelFrequency']) == '4', "Error setting eegLabelFrequency: " + str(output['metadata'])
print("Passed parameter test")
if __name__ == "__main__":
# test_run = EEGDCNNModel(sample_rate=1, data_frequency=128)
# test_run.run('s01_trial01.dat')
test_output_format_eeg()
test_parameters_eeg() |
py | 1a3bbed20dc812e6a48d1526b20b61459b165d14 | # Copyright (c) Microsoft Corporation
#
# All rights reserved.
#
# MIT License
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED *AS IS*, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
from Deadline.Cloud import HardwareType
class AzureVmSpec:
def __init__(self, vcpus, mem_mb):
self.vcpus = vcpus
self.mem_mb = mem_mb
AZURE_VM_SIZES = {
# Compute Optimised
'Standard_F2': AzureVmSpec(2, 4 * 1024),
'Standard_F4': AzureVmSpec(4, 8 * 1024),
'Standard_F8': AzureVmSpec(8, 16 * 1024),
'Standard_F16': AzureVmSpec(16, 32 * 1024),
# General purpose
'Standard_D2_v3': AzureVmSpec(2, 8 * 1024),
'Standard_D4_v3': AzureVmSpec(4, 16 * 1024),
'Standard_D8_v3': AzureVmSpec(8, 32 * 1024),
'Standard_D16_v3': AzureVmSpec(16, 64 * 1024),
'Standard_D32_v3': AzureVmSpec(32, 128 * 1024),
'Standard_D64_v3': AzureVmSpec(64, 256 * 1024),
# GPU v1
'Standard_NC6': AzureVmSpec(6, 56 * 1024),
'Standard_NC12': AzureVmSpec(12, 112 * 1024),
'Standard_NC24': AzureVmSpec(24, 224 * 1024),
}
def vm_sizes_to_hardware_types(vm_sizes):
"""
Maps Azure VM sizes to Deadline HardwareType list
:param vm_sizes: list
:return: list of Deadline.Cloud.HardwareType
:rtype: list of Deadline.Cloud.HardwareType
"""
hw_types = []
if vm_sizes:
for vm_size in vm_sizes:
hwt = HardwareType()
hwt.ID = vm_size
hwt.Name = vm_size
hwt.RamMB = 0
hwt.VCPUs = 0
if vm_size in AZURE_VM_SIZES:
vm_spec = AZURE_VM_SIZES[vm_size]
hwt.RamMB = vm_spec.mem_mb
hwt.VCPUs = vm_spec.vcpus
hw_types.append(hwt)
else:
for vm_size, vm_spec in AZURE_VM_SIZES.iteritems():
hwt = HardwareType()
hwt.ID = vm_size
hwt.Name = vm_size
hwt.RamMB = vm_spec.mem_mb
hwt.VCPUs = vm_spec.vcpus
hw_types.append(hwt)
return hw_types
|
py | 1a3bbedc0b149001d9c0c0c6bfc8588e39bd8174 | import argparse
from sniffles.feature import FeatureParser
from sniffles.rule_formats import (PetabiPacketClassifierFormat, RegexFormat,
RuleFormat, SnortRuleFormat)
def main():
parser = argparse.ArgumentParser(description='Random Rule Generator')
parser.add_argument('-c', '--count', type=int, default=1,
help='the number of rules to generate (default: 1)')
parser.add_argument('-f', '--feature_file',
help='the file containing the feature set description')
parser.add_argument('-o', '--output_file', default='rules.txt',
help='the output file to which rules are written '
'(default: rules.txt)')
parser.add_argument('-r', '--rule_format',
choices=['petabipktclass', 'regex', 'snort'],
default='regex',
help='rule format')
args = parser.parse_args()
try:
myfp = FeatureParser(args.feature_file)
myfeatures = myfp.getFeatures()
myrules = generateRules(myfeatures, args.count)
printRules(myrules, args.output_file, args.rule_format)
except Exception as err:
print("RandRuleGen-main: " + str(err))
def generateRules(feature_list, count=1):
return ['; '.join(map(str, feature_list)) + '; '] * count
def printRules(rule_list=None, outfile=None, rule_format=None):
if rule_list and outfile:
fd = open(outfile, 'w', encoding='utf-8')
for rule in rule_list:
rwf = getRuleWithFormat(rule, rule_format)
fd.write(str(rwf))
fd.write("\n")
fd.close()
def getRuleWithFormat(rule=None, fmt=None):
rulefmt = None
if rule:
if fmt is not None:
if fmt == "snort":
rulefmt = SnortRuleFormat(
rule, getRuleWithFormat.rule_counter)
getRuleWithFormat.rule_counter += 1
if fmt == "petabipktclass":
rulefmt = PetabiPacketClassifierFormat(rule)
if fmt == "regex":
rulefmt = RegexFormat(rule)
if rulefmt is None:
rulefmt = RuleFormat(rule)
return rulefmt
getRuleWithFormat.rule_counter = 1
if __name__ == "__main__":
main()
|
py | 1a3bbef526c2fa4456a99dd2bf8e9db38f60b9a5 | import requests
from PIL import Image
from datainfo import file_list
for item in file_list:
item_file = '../items/'+item
items = open(item_file, 'r').read().split()
for name in items:
print('downloading', name)
url = 'https://gameinfo.albiononline.com/api/gameinfo/items/'
response = requests.get(url+name, stream=True)
if response.status_code == 200:
img = Image.open(response.raw)
img = img.resize((50, 50))
img.save('img_lowquality/'+name+'.png')
|
py | 1a3bbffeb436bd36186cc613c3252986c51627e5 | # Copyright 2017 IBM Corp. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Unit tests for _user_pattern module.
"""
from __future__ import absolute_import, print_function
import re
import copy
import pytest
from zhmcclient import Client, HTTPError, NotFound, UserPattern
from zhmcclient_mock import FakedSession
from tests.common.utils import assert_resources
class TestUserPattern(object):
"""All tests for the UserPattern and UserPatternManager classes."""
def setup_method(self):
"""
Setup that is called by pytest before each test method.
Set up a faked session, and add a faked Console without any
child resources.
"""
# pylint: disable=attribute-defined-outside-init
self.session = FakedSession('fake-host', 'fake-hmc', '2.13.1', '1.8')
self.client = Client(self.session)
self.faked_console = self.session.hmc.consoles.add({
'object-id': None,
# object-uri will be automatically set
'parent': None,
'class': 'console',
'name': 'fake-console1',
'description': 'Console #1',
})
self.console = self.client.consoles.find(name=self.faked_console.name)
def add_user_pattern(self, name, pattern, type_, user_template_uri):
"""
Add a faked user pattern object to the faked Console and return it.
"""
faked_user_pattern = self.faked_console.user_patterns.add({
'element-id': 'oid-{}'.format(name),
# element-uri will be automatically set
'parent': '/api/console',
'class': 'user-pattern',
'name': name,
'description': 'User Pattern {}'.format(name),
'pattern': pattern,
'type': type_,
'retention-time': 0,
'user-template-uri': user_template_uri,
})
return faked_user_pattern
def add_user(self, name, type_):
"""
Add a faked user object to the faked Console and return it.
"""
faked_user = self.faked_console.users.add({
'object-id': 'oid-{}'.format(name),
# object-uri will be automatically set
'parent': '/api/console',
'class': 'user',
'name': name,
'description': 'User {}'.format(name),
'type': type_,
'authentication-type': 'local',
})
return faked_user
def test_upm_repr(self):
"""Test UserPatternManager.__repr__()."""
user_pattern_mgr = self.console.user_patterns
# Execute the code to be tested
repr_str = repr(user_pattern_mgr)
repr_str = repr_str.replace('\n', '\\n')
# We check just the begin of the string:
assert re.match(r'^{classname}\s+at\s+0x{id:08x}\s+\(\\n.*'.
format(classname=user_pattern_mgr.__class__.__name__,
id=id(user_pattern_mgr)),
repr_str)
def test_upm_initial_attrs(self):
"""Test initial attributes of UserPatternManager."""
user_pattern_mgr = self.console.user_patterns
# Verify all public properties of the manager object
assert user_pattern_mgr.resource_class == UserPattern
assert user_pattern_mgr.class_name == 'user-pattern'
assert user_pattern_mgr.session is self.session
assert user_pattern_mgr.parent is self.console
assert user_pattern_mgr.console is self.console
@pytest.mark.parametrize(
"full_properties_kwargs, prop_names", [
(dict(full_properties=False),
['element-uri']),
(dict(full_properties=True),
['element-uri', 'name']),
(dict(), # test default for full_properties (True)
['element-uri', 'name']),
]
)
@pytest.mark.parametrize(
"filter_args, exp_names", [
(None,
['a', 'b']),
({},
['a', 'b']),
({'name': 'a'},
['a']),
]
)
def test_upm_list(
self, filter_args, exp_names, full_properties_kwargs, prop_names):
"""Test UserPatternManager.list()."""
faked_user1 = self.add_user(name='a', type_='standard')
faked_user2 = self.add_user(name='b', type_='standard')
faked_user_pattern1 = self.add_user_pattern(
name='a', pattern='a_*', type_='glob-like',
user_template_uri=faked_user1.uri)
faked_user_pattern2 = self.add_user_pattern(
name='b', pattern='b_.*', type_='regular-expression',
user_template_uri=faked_user2.uri)
faked_user_patterns = [faked_user_pattern1, faked_user_pattern2]
exp_faked_user_patterns = [u for u in faked_user_patterns
if u.name in exp_names]
user_pattern_mgr = self.console.user_patterns
# Execute the code to be tested
user_patterns = user_pattern_mgr.list(filter_args=filter_args,
**full_properties_kwargs)
assert_resources(user_patterns, exp_faked_user_patterns, prop_names)
@pytest.mark.parametrize(
"input_props, exp_prop_names, exp_exc", [
({}, # props missing
None,
HTTPError({'http-status': 400, 'reason': 5})),
({'description': 'fake description X'}, # props missing
None,
HTTPError({'http-status': 400, 'reason': 5})),
({'description': 'fake description X',
'name': 'a',
'pattern': 'a*'}, # several missing
None,
HTTPError({'http-status': 400, 'reason': 5})),
({'description': 'fake description X',
'name': 'a',
'pattern': 'a*'}, # several missing
None,
HTTPError({'http-status': 400, 'reason': 5})),
({'description': 'fake description X',
'name': 'a',
'pattern': 'a*',
'type': 'glob-like'}, # props missing
None,
HTTPError({'http-status': 400, 'reason': 5})),
({'description': 'fake description X',
'name': 'a',
'pattern': 'a*',
'type': 'glob-like',
'retention-time': 0}, # props missing
None,
HTTPError({'http-status': 400, 'reason': 5})),
({'description': 'fake description X',
'name': 'a',
'pattern': 'a*',
'type': 'glob-like',
'retention-time': 28,
'user-template-uri': '/api/users/oid-tpl'},
['element-uri', 'name', 'description', 'pattern', 'type',
'retention-time', 'user-template-uri'],
None),
]
)
def test_upm_create(self, input_props, exp_prop_names, exp_exc):
"""Test UserPatternManager.create()."""
faked_user_template = self.add_user(name='tpl', type_='template')
assert faked_user_template.uri == '/api/users/oid-tpl'
user_pattern_mgr = self.console.user_patterns
if exp_exc is not None:
with pytest.raises(exp_exc.__class__) as exc_info:
# Execute the code to be tested
user_pattern_mgr.create(properties=input_props)
exc = exc_info.value
if isinstance(exp_exc, HTTPError):
assert exc.http_status == exp_exc.http_status
assert exc.reason == exp_exc.reason
else:
# Execute the code to be tested.
user_pattern = user_pattern_mgr.create(properties=input_props)
# Check the resource for consistency within itself
assert isinstance(user_pattern, UserPattern)
user_pattern_name = user_pattern.name
exp_user_pattern_name = user_pattern.properties['name']
assert user_pattern_name == exp_user_pattern_name
user_pattern_uri = user_pattern.uri
exp_user_pattern_uri = user_pattern.properties['element-uri']
assert user_pattern_uri == exp_user_pattern_uri
# Check the properties against the expected names and values
for prop_name in exp_prop_names:
assert prop_name in user_pattern.properties
if prop_name in input_props:
value = user_pattern.properties[prop_name]
exp_value = input_props[prop_name]
assert value == exp_value
def test_up_repr(self):
"""Test UserPattern.__repr__()."""
faked_user1 = self.add_user(name='a', type_='standard')
faked_user_pattern1 = self.add_user_pattern(
name='a', pattern='a_*', type_='glob-like',
user_template_uri=faked_user1.uri)
user_pattern1 = self.console.user_patterns.find(
name=faked_user_pattern1.name)
# Execute the code to be tested
repr_str = repr(user_pattern1)
repr_str = repr_str.replace('\n', '\\n')
# We check just the begin of the string:
assert re.match(r'^{classname}\s+at\s+0x{id:08x}\s+\(\\n.*'.
format(classname=user_pattern1.__class__.__name__,
id=id(user_pattern1)),
repr_str)
@pytest.mark.parametrize(
"input_props, exp_exc", [
({'name': 'a',
'description': 'fake description X',
'pattern': 'a*',
'type': 'glob-like',
'retention-time': 28,
'user-template-uri': '/api/users/oid-tpl'},
None),
]
)
def test_up_delete(self, input_props, exp_exc):
"""Test UserPattern.delete()."""
faked_user_pattern = self.add_user_pattern(
name=input_props['name'],
pattern=input_props['pattern'],
type_=input_props['type'],
user_template_uri=input_props['user-template-uri'])
user_pattern_mgr = self.console.user_patterns
user_pattern = user_pattern_mgr.find(name=faked_user_pattern.name)
if exp_exc is not None:
with pytest.raises(exp_exc.__class__) as exc_info:
# Execute the code to be tested
user_pattern.delete()
exc = exc_info.value
if isinstance(exp_exc, HTTPError):
assert exc.http_status == exp_exc.http_status
assert exc.reason == exp_exc.reason
# Check that the user pattern still exists
user_pattern_mgr.find(name=faked_user_pattern.name)
else:
# Execute the code to be tested.
user_pattern.delete()
# Check that the user pattern no longer exists
with pytest.raises(NotFound) as exc_info:
user_pattern_mgr.find(name=faked_user_pattern.name)
def test_up_delete_create_same(self):
"""Test UserPattern.delete() followed by create() with same name."""
user_pattern_name = 'faked_a'
faked_user1 = self.add_user(name='a', type_='standard')
# Add the user pattern to be tested
self.add_user_pattern(
name=user_pattern_name, pattern='a_*', type_='glob-like',
user_template_uri=faked_user1.uri)
# Input properties for a user pattern with the same name
sn_user_pattern_props = {
'name': user_pattern_name,
'description': 'User Pattern with same name',
'pattern': 'a*',
'type': 'glob-like',
'retention-time': 28,
'user-template-uri': '/api/users/oid-tpl',
}
user_pattern_mgr = self.console.user_patterns
user_pattern = user_pattern_mgr.find(name=user_pattern_name)
# Execute the deletion code to be tested
user_pattern.delete()
# Check that the user pattern no longer exists
with pytest.raises(NotFound):
user_pattern_mgr.find(name=user_pattern_name)
# Execute the creation code to be tested.
user_pattern_mgr.create(sn_user_pattern_props)
# Check that the user pattern exists again under that name
sn_user_pattern = user_pattern_mgr.find(name=user_pattern_name)
description = sn_user_pattern.get_property('description')
assert description == sn_user_pattern_props['description']
@pytest.mark.parametrize(
"input_props", [
{},
{'description': 'New user pattern description'},
]
)
def test_up_update_properties(self, input_props):
"""Test UserPattern.update_properties()."""
user_pattern_name = 'faked_a'
faked_user1 = self.add_user(name='a', type_='standard')
# Add the user pattern to be tested
self.add_user_pattern(
name=user_pattern_name, pattern='a_*', type_='glob-like',
user_template_uri=faked_user1.uri)
user_pattern_mgr = self.console.user_patterns
user_pattern = user_pattern_mgr.find(name=user_pattern_name)
user_pattern.pull_full_properties()
saved_properties = copy.deepcopy(user_pattern.properties)
# Execute the code to be tested
user_pattern.update_properties(properties=input_props)
# Verify that the resource object already reflects the property
# updates.
for prop_name in saved_properties:
if prop_name in input_props:
exp_prop_value = input_props[prop_name]
else:
exp_prop_value = saved_properties[prop_name]
assert prop_name in user_pattern.properties
prop_value = user_pattern.properties[prop_name]
assert prop_value == exp_prop_value, \
"Unexpected value for property {!r}".format(prop_name)
# Refresh the resource object and verify that the resource object
# still reflects the property updates.
user_pattern.pull_full_properties()
for prop_name in saved_properties:
if prop_name in input_props:
exp_prop_value = input_props[prop_name]
else:
exp_prop_value = saved_properties[prop_name]
assert prop_name in user_pattern.properties
prop_value = user_pattern.properties[prop_name]
assert prop_value == exp_prop_value
|
py | 1a3bc0134c1e24d0c0fcec3604ae72752f38580b | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import math as m
import cplotting as cplot
n=20
w=[m.e**(2.*m.pi*1j/float(k)) for k in range(1,n+1)]
cplot.plot({z for z in w},4)
cplot.show()
|
py | 1a3bc022dcbef8704697240ac16622885ac2c0b3 | import copy
def compose(a, b, keep_null=False):
"""
Compose two operations into one.
``keep_null`` [default=false] is a boolean that controls whether None/Null
attributes are retrained.
"""
if a is None:
a = {}
if b is None:
b = {}
# deep copy b, but get rid of None values if keep_null is falsey
attributes = dict((k, copy.deepcopy(v)) for k, v in b.items() if keep_null or v is not None)
for k, v in a.items():
if k not in b:
attributes[k] = copy.deepcopy(v)
return attributes or None
def diff(a, b):
"""
Return the difference between operations a and b.
"""
if a is None:
a = {}
if b is None:
b = {}
keys = set(a.keys()).union(set(b.keys()))
attributes = {}
for k in keys:
av, bv = a.get(k, None), b.get(k, None)
if av != bv:
attributes[k] = bv
return attributes or None
def transform(a, b, priority=True):
"""
Return the transformation from operation a to b.
If ``priority`` is falsey [default=True] then just return b.
"""
if a is None:
a = {}
if b is None:
b = {}
if not priority:
return b or None
attributes = {}
for k, v in b.items():
if k not in a:
attributes[k] = v
return attributes or None
def length_of(op):
typ = type_of(op)
if typ == 'delete':
return op['delete']
elif typ == 'retain':
return op['retain']
elif isinstance(op.get('insert'), str):
return len(op['insert'])
else:
return 1
def type_of(op):
if not op:
return None
if isinstance(op.get('delete'), int):
return 'delete'
if isinstance(op.get('retain'), int):
return 'retain'
return 'insert'
class Iterator(object):
"""
An iterator that enables itself to break off operations
to exactly the length needed via the ``next()`` method.
"""
def __init__(self, ops=[]):
self.ops = ops
self.reset()
def reset(self):
self.index = 0
self.offset = 0
def has_next(self):
return self.peek_length() is not None
def next(self, length=None):
offset = self.offset
op = self.peek()
op_type = type_of(op)
if op is None:
return { 'retain': None }
op_length = length_of(op)
if (length is None or length >= op_length - offset):
length = op_length - offset
self.index += 1
self.offset = 0
else:
self.offset += length
if op_type == 'delete':
return { 'delete': length }
result_op = {}
if op.get('attributes'):
result_op['attributes'] = op['attributes']
if op_type == 'retain':
result_op['retain'] = length
elif isinstance(op.get('insert'), str):
result_op['insert'] = op['insert'][offset:offset+length]
else:
assert offset == 0
assert length == 1
if 'insert' in op:
result_op['insert'] = op['insert']
return result_op
__next__ = next
def __length__(self):
return len(self.ops)
def __iter__(self):
return self
def peek(self):
try:
return self.ops[self.index]
except IndexError:
return None
def peek_length(self):
next_op = self.peek()
if next_op is None:
return None
return length_of(next_op) - self.offset
def peek_type(self):
op = self.peek()
if op is None:
return 'retain'
return type_of(op)
length = length_of
type = type_of
iterator = lambda x: Iterator(x) |
py | 1a3bc0c7fb40df8c6fff4c9fb62b73fa40306202 | #! /usr/bin/env python
import sys
import importlib
from collections import OrderedDict
import numpy as np
from threading import Lock, RLock, Thread
import time
import glob
import scipy.misc
import pdb
try:
import cv2
except ImportError:
print 'Error: Could not import cv2, please install it first.'
raise
from misc import WithTimer
from image_misc import cv2_imshow_rgb, FormattedString, cv2_typeset_text, to_255, gray_to_color, ensure_uint255_and_resize_without_fit
from bindings import bindings
pane_debug_clr = (255, 64, 64)
class ImproperlyConfigured(Exception):
pass
class Pane(object):
'''Hold info about one window pane (rectangular region within the main window)'''
def __init__(self, i_begin, j_begin, i_size, j_size):
self.reset(i_begin, j_begin, i_size, j_size)
def reset(self, i_begin, j_begin, i_size, j_size):
self.i_begin = i_begin
self.j_begin = j_begin
self.i_size = i_size
self.j_size = j_size
self.i_end = i_begin + i_size
self.j_end = j_begin + j_size
self.data = None # eventually contains a slice of the window buffer
class LiveVis(object):
'''Runs the demo'''
def __init__(self, settings):
self.settings = settings
self.bindings = bindings
self.app_classes = OrderedDict()
self.apps = OrderedDict()
for module_path, app_name in settings.installed_apps:
module = importlib.import_module(module_path)
print 'got module', module
app_class = getattr(module, app_name)
print 'got app', app_class
self.app_classes[app_name] = app_class
for app_name, app_class in self.app_classes.iteritems():
app = app_class(settings, self.bindings)
self.apps[app_name] = app
self.help_mode = False
self.window_name = 'Deep Visualization Toolbox | Model: %s' % (settings.model_to_load)
self.quit = False
self.debug_level = 0
self.debug_pane_defaults = {
'face': getattr(cv2, self.settings.help_face),
'fsize': self.settings.help_fsize,
'clr': pane_debug_clr,
'thick': self.settings.help_thick
}
self.help_pane_defaults = {
'face': getattr(cv2, self.settings.help_face),
'fsize': self.settings.help_fsize,
'clr': to_255(self.settings.help_clr),
'thick': self.settings.help_thick
}
def init_window(self):
cv2.namedWindow(self.window_name)
max_i, max_j = 0, 0
if len(self.settings.window_panes) == 0:
raise ImproperlyConfigured('settings.window_panes is empty.')
self.panes = OrderedDict()
for pane_name, pane_dimensions in self.settings.window_panes:
if len(pane_dimensions) != 4:
raise ImproperlyConfigured('pane dimensions should be a tuple of length 4, but it is "%s"' % repr(pane_dimensions))
i_begin, j_begin, i_size, j_size = pane_dimensions
max_i = max(max_i, i_begin + i_size)
max_j = max(max_j, j_begin + j_size)
if pane_name in self.panes:
raise Exception('Duplicate pane name in settings: %s' % pane_name)
self.panes[pane_name] = Pane(i_begin, j_begin, i_size, j_size)
self.buffer_height = max_i
self.buffer_width = max_j
self.window_buffer = np.tile(np.array(np.array(self.settings.window_background) * 255, 'uint8'),
(max_i,max_j,1))
#print 'BUFFER IS:', self.window_buffer.shape, self.window_buffer.min(), self.window_buffer.max()
for _,pane in self.panes.iteritems():
pane.data = self.window_buffer[pane.i_begin:pane.i_end, pane.j_begin:pane.j_end]
# Allocate help pane
for ll in self.settings.help_pane_loc:
assert ll >= 0 and ll <= 1, 'help_pane_loc values should be in [0,1]'
self.help_pane = Pane(int(self.settings.help_pane_loc[0]*max_i),
int(self.settings.help_pane_loc[1]*max_j),
int(self.settings.help_pane_loc[2]*max_i),
int(self.settings.help_pane_loc[3]*max_j))
self.help_buffer = self.window_buffer.copy() # For rendering help mode
self.help_pane.data = self.help_buffer[self.help_pane.i_begin:self.help_pane.i_end, self.help_pane.j_begin:self.help_pane.j_end]
# add listener for mouse clicks
cv2.setMouseCallback(self.window_name, self.on_mouse_click)
def on_mouse_click(self, event, x, y, flags, param):
'''
Handle all button presses.
'''
if event == cv2.EVENT_LBUTTONUP:
for app_name, app in self.apps.iteritems():
with WithTimer('%s:on_mouse_click' % app_name, quiet=self.debug_level < 1):
key = app.handle_mouse_left_click(x, y, flags, param, self.panes)
def check_for_control_height_update(self):
if hasattr(self.settings, '_calculated_control_pane_height') and \
self.settings._calculated_control_pane_height != self.panes['caffevis_control'].i_size:
self.panes['caffevis_control'].reset(
self.settings.window_panes[4][1][0],
self.settings.window_panes[4][1][1],
self.settings._calculated_control_pane_height,
self.settings.window_panes[4][1][3])
self.panes['caffevis_layers'].reset(
self.settings._calculated_control_pane_height,
self.settings.window_panes[5][1][1],
self.settings.window_panes[5][1][2] + 3*20 - self.settings._calculated_control_pane_height,
self.settings.window_panes[5][1][3])
for _, pane in self.panes.iteritems():
pane.data = self.window_buffer[pane.i_begin:pane.i_end, pane.j_begin:pane.j_end]
return True
else:
return False
pass
def run_loop(self):
self.quit = False
# Setup
self.init_window()
#cap = cv2.VideoCapture(self.settings.capture_device)
from input_fetcher import InputImageFetcher
self.input_updater = InputImageFetcher(self.settings)
self.input_updater.bind_camera()
self.input_updater.start()
heartbeat_functions = [self.input_updater.heartbeat]
for app_name, app in self.apps.iteritems():
print 'Starting app:', app_name
app.start(self)
heartbeat_functions.extend(app.get_heartbeats())
ii = 0
since_keypress = 999
since_redraw = 999
since_imshow = 0
last_render = time.time() - 999
latest_frame_idx = None
latest_frame_data = None
frame_for_apps = None
redraw_needed = True # Force redraw the first time
imshow_needed = True
while not self.quit:
# Call any heartbeats
for heartbeat in heartbeat_functions:
#print 'Heartbeat: calling', heartbeat
heartbeat()
# Handle key presses
keys = []
# Collect key presses (multiple if len(range)>1)
for cc in range(1):
with WithTimer('LiveVis:waitKey', quiet = self.debug_level < 2):
key = cv2.waitKey(self.settings.main_loop_sleep_ms)
if key == -1:
break
else:
if (key != 255):
keys.append(key)
#print 'Got key:', key
now = time.time()
#print 'Since last:', now - last_render
skip_imshow = False
#if now - last_render > .05 and since_imshow < 1:
# skip_imshow = True
if skip_imshow:
since_imshow += 1
else:
since_imshow = 0
last_render = now
#print ' Number of keys:', len(keys)
for key in keys:
since_keypress = 0
#print 'Got Key:', key
key,do_redraw = self.handle_key_pre_apps(key)
redraw_needed |= do_redraw
imshow_needed |= do_redraw
for app_name, app in self.apps.iteritems():
with WithTimer('%s:handle_key' % app_name, quiet = self.debug_level < 1):
key = app.handle_key(key, self.panes)
key = self.handle_key_post_apps(key)
if self.quit:
break
for app_name, app in self.apps.iteritems():
redraw_needed |= app.redraw_needed()
redraw_needed |= self.check_for_control_height_update()
# Grab latest frame from input_updater thread
#for a in range (1,11):
#pdb.set_trace()
#fr_idx,fr_data,fr_label,fr_filename = self.input_updater.get_frame()
#latest_label = fr_label
#latest_frame_data = scipy.misc.imread('/home/mbm/Desktop/Aux/input_images/val_256/Places365_val_%08d.jpg'%a)
#latest_filename = ('Places365_val_%08d.jpg'%a)
#app.handle_input(latest_frame_data, latest_label, latest_filename, self.panes)
#do_handle_input = (ii == 0 or
# since_keypress >= self.settings.keypress_pause_handle_iterations)
#imshow_needed |= app.draw(self.panes)
fr_idx,fr_data,fr_label,fr_filename = self.input_updater.get_frame()
is_new_frame = (fr_idx != latest_frame_idx and fr_data is not None)
if is_new_frame:
latest_frame_idx = fr_idx
latest_frame_data = fr_data
latest_label = fr_label
latest_filename = fr_filename
frame_for_apps = fr_data
if is_new_frame:
with WithTimer('LiveVis.display_frame', quiet = self.debug_level < 1):
self.display_frame(latest_frame_data)
imshow_needed = True
do_handle_input = (ii == 0 or
since_keypress >= self.settings.keypress_pause_handle_iterations)
if frame_for_apps is not None and do_handle_input:
# Pass frame to apps for processing
for app_name, app in self.apps.iteritems():
with WithTimer('%s:handle_input' % app_name, quiet = self.debug_level < 1):
app.handle_input(latest_frame_data, latest_label, latest_filename, self.panes)
frame_for_apps = None
# Tell each app to draw
do_redraw = (redraw_needed and
(since_keypress >= self.settings.keypress_pause_redraw_iterations or
since_redraw >= self.settings.redraw_at_least_every))
if redraw_needed and do_redraw:
for app_name, app in self.apps.iteritems():
with WithTimer('%s:draw' % app_name, quiet = self.debug_level < 1):
imshow_needed |= app.draw(self.panes)
redraw_needed = False
since_redraw = 0
# Render buffer
if imshow_needed:
# Only redraw pane debug if display will be updated
if hasattr(self.settings, 'debug_window_panes') and self.settings.debug_window_panes:
for pane_name,pane in self.panes.iteritems():
print pane_name, pane
pane.data[:] = pane.data * .5
line = [FormattedString('%s |' % pane_name, self.debug_pane_defaults),
FormattedString('pos: %d,%d |' % (pane.i_begin, pane.j_begin), self.debug_pane_defaults),
FormattedString('shape: %d,%d' % (pane.i_size, pane.j_size), self.debug_pane_defaults)]
cv2_typeset_text(pane.data, line, (5,20), line_spacing = 5, wrap = True)
pane.data[:1,:] = pane_debug_clr
pane.data[-1:,:] = pane_debug_clr
pane.data[:,:1] = pane_debug_clr
pane.data[:,-1:] = pane_debug_clr
with WithTimer('LiveVis:imshow', quiet = self.debug_level < 1):
if self.help_mode:
# Copy main buffer to help buffer
self.help_buffer[:] = self.window_buffer[:]
self.draw_help()
cv2_imshow_rgb(self.window_name, self.help_buffer)
else:
cv2_imshow_rgb(self.window_name, self.window_buffer)
imshow_needed = False
ii += 1
since_keypress += 1
since_redraw += 1
if ii % 2 == 0 and self.settings.print_dots:
sys.stdout.write('.')
sys.stdout.flush()
# Extra sleep just for debugging. In production all main loop sleep should be in cv2.waitKey.
#time.sleep(2)
print '\n\nTrying to exit run_loop...'
self.input_updater.quit = True
self.input_updater.join(.01 + float(self.settings.input_updater_sleep_after_read_frame) * 5)
if self.input_updater.is_alive():
raise Exception('Could not join self.input_updater thread')
else:
self.input_updater.free_camera()
for app_name, app in self.apps.iteritems():
print 'Quitting app:', app_name
app.quit()
print 'Input thread joined and apps quit; exiting run_loop.'
def handle_key_pre_apps(self, key):
tag = self.bindings.get_tag(key)
if tag == 'freeze_cam':
self.input_updater.freeze_cam = not self.input_updater.freeze_cam
elif tag == 'toggle_input_mode':
self.input_updater.toggle_input_mode()
elif tag == 'static_file_increment':
self.input_updater.next_image()
elif tag == 'static_file_decrement':
self.input_updater.prev_image()
elif tag == 'help_mode':
self.toggle_help_mode()
elif tag == 'stretch_mode':
self.input_updater.toggle_stretch_mode()
print 'Stretch mode is now', self.input_updater.static_file_stretch_mode
elif tag == 'debug_level':
self.debug_level = (self.debug_level + 1) % 3
for app_name, app in self.apps.iteritems():
app.set_debug(self.debug_level)
else:
return key, False
return None, True
def handle_key_post_apps(self, key):
tag = self.bindings.get_tag(key)
if tag == 'quit':
self.set_quit_flag()
elif key == None:
pass
else:
key_label, masked_vals = self.bindings.get_key_label_from_keycode(key, extra_info = True)
masked_vals_pp = ', '.join(['%d (%s)' % (mv, hex(mv)) for mv in masked_vals])
if key_label is None:
print 'Got key code %d (%s), did not match any known key (masked vals tried: %s)' % (key, hex(key), masked_vals_pp)
elif tag is None:
print 'Got key code %d (%s), matched key "%s", but key is not bound to any function' % (key, hex(key), key_label)
else:
print 'Got key code %d (%s), matched key "%s", bound to "%s", but nobody handled "%s"' % (
key, hex(key), key_label, tag, tag)
def display_frame(self, frame):
full_pane_shape = self.panes['input'].data.shape[:2][::-1]
if self.settings.is_siamese and ((type(frame),len(frame)) == (tuple,2)):
frame1 = frame[0]
frame2 = frame[1]
half_pane_shape = (full_pane_shape[0], full_pane_shape[1]/2)
frame_disp1 = ensure_uint255_and_resize_without_fit(frame1[:], half_pane_shape)
frame_disp2 = ensure_uint255_and_resize_without_fit(frame2[:], half_pane_shape)
frame_disp = np.concatenate((frame_disp1, frame_disp2), axis=1)
else:
frame_disp = ensure_uint255_and_resize_without_fit(frame[:], full_pane_shape)
if self.settings._calculated_is_gray_model:
frame_disp = gray_to_color(frame_disp)
self.panes['input'].data[:] = frame_disp
def draw_help(self):
self.help_buffer[:] = self.help_buffer[:] * .7
self.help_pane.data[:] = self.help_pane.data[:] * .7
loc = self.settings.help_loc[::-1] # Reverse to OpenCV c,r order
defaults = self.help_pane_defaults
lines = []
lines.append([FormattedString('~ ~ ~ Deep Visualization Toolbox ~ ~ ~', defaults, align='center', width=self.help_pane.j_size)])
locy, boxes = cv2_typeset_text(self.help_pane.data, lines, loc,
line_spacing = self.settings.help_line_spacing)
for app_name, app in self.apps.iteritems():
locy = app.draw_help(self.help_pane, locy)
def toggle_help_mode(self):
self.help_mode = not self.help_mode
def set_quit_flag(self):
self.quit = True
if __name__ == '__main__':
print 'You probably want to run ./run_toolbox.py instead.'
|
py | 1a3bc0fed187e4c54ce08478b20bc660b4e2b309 | n, l, t = input().split()
n, l, t = int(n), int(l), int(t)
p = [int(i) for i in input().split()]
sp = sorted(p)
map_set = list()
for i in p:
map_set.append(sp.index(i))
ori = [1] * n
for ti in range(t):
for i in range(n-1):
if sp[i] == sp[i+1]:
ori[i] ^= (-1^1)
ori[i+1] ^= (-1^1)
if sp[0] == 0 and ori[0] == -1:
ori[0] = 1
if sp[n-1] == l and ori[n-1] == 1:
ori[n-1] = -1
for i in range(n):
sp[i] += ori[i]
for i in map_set:
print(sp[i], end=" ")
print() |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.