max_stars_repo_path
stringlengths 4
286
| max_stars_repo_name
stringlengths 5
119
| max_stars_count
int64 0
191k
| id
stringlengths 1
7
| content
stringlengths 6
1.03M
| content_cleaned
stringlengths 6
1.03M
| language
stringclasses 111
values | language_score
float64 0.03
1
| comments
stringlengths 0
556k
| edu_score
float64 0.32
5.03
| edu_int_score
int64 0
5
|
---|---|---|---|---|---|---|---|---|---|---|
hoomd/communicator.py | EdwardZX/hoomd-blue | 204 | 8400 | # Copyright (c) 2009-2021 The Regents of the University of Michigan
# This file is part of the HOOMD-blue project, released under the BSD 3-Clause
# License.
"""MPI communicator."""
from hoomd import _hoomd
import hoomd
import contextlib
class Communicator(object):
"""MPI communicator.
Args:
mpi_comm: Accepts an mpi4py communicator. Use this argument to perform
many independent hoomd simulations where you communicate between those
simulations using mpi4py.
ranks_per_partition (int): (MPI) Number of ranks to include in a
partition.
`Communicator` initialize MPI communications for a `hoomd.Simulation`. To
use MPI, launch your Python script with an MPI launcher (e.g. ``mpirun`` or
``mpiexec``). By default, `Communicator` uses all ranks provided by the
launcher ``num_launch_ranks`` for a single `hoomd.Simulation` object which
decomposes the state onto that many domains.
Set ``ranks_per_partition`` to an integer to partition launched ranks into
``num_launch_ranks / ranks_per_partition`` communicators, each with their
own `partition` index. Use this to perform many simulations in parallel, for
example by using `partition` as an index into an array of state points to
execute.
"""
def __init__(self, mpi_comm=None, ranks_per_partition=None):
# check ranks_per_partition
if ranks_per_partition is not None:
if not hoomd.version.mpi_enabled:
raise RuntimeError(
"The ranks_per_partition option is only available in MPI.\n"
)
mpi_available = hoomd.version.mpi_enabled
self.cpp_mpi_conf = None
# create the specified configuration
if mpi_comm is None:
self.cpp_mpi_conf = _hoomd.MPIConfiguration()
else:
if not mpi_available:
raise RuntimeError("mpi_comm is not supported in serial builds")
handled = False
# pass in pointer to MPI_Comm object provided by mpi4py
try:
import mpi4py
if isinstance(mpi_comm, mpi4py.MPI.Comm):
addr = mpi4py.MPI._addressof(mpi_comm)
self.cpp_mpi_conf = \
_hoomd.MPIConfiguration._make_mpi_conf_mpi_comm(addr)
handled = True
except ImportError:
# silently ignore when mpi4py is missing
pass
# undocumented case: handle plain integers as pointers to MPI_Comm
# objects
if not handled and isinstance(mpi_comm, int):
self.cpp_mpi_conf = \
_hoomd.MPIConfiguration._make_mpi_conf_mpi_comm(mpi_comm)
handled = True
if not handled:
raise RuntimeError(
"Invalid mpi_comm object: {}".format(mpi_comm))
if ranks_per_partition is not None:
# check validity
if (self.cpp_mpi_conf.getNRanksGlobal() % ranks_per_partition):
raise RuntimeError('Total number of ranks is not a multiple of '
'ranks_per_partition.')
# split the communicator into partitions
self.cpp_mpi_conf.splitPartitions(ranks_per_partition)
@property
def num_ranks(self):
"""int: The number of ranks in this partition.
When initialized with ``ranks_per_partition=None``, `num_ranks` is equal
to the ``num_launch_ranks`` set by the MPI launcher. When using
partitions, `num_ranks` is equal to ``ranks_per_partition``.
Note:
Returns 1 in builds with ENABLE_MPI=off.
"""
if hoomd.version.mpi_enabled:
return self.cpp_mpi_conf.getNRanks()
else:
return 1
@property
def rank(self):
"""int: The current rank within the partition.
Note:
Returns 0 in builds with ENABLE_MPI=off.
"""
if hoomd.version.mpi_enabled:
return self.cpp_mpi_conf.getRank()
else:
return 0
@property
def num_partitions(self):
"""int: The number of partitions in this execution.
Create partitions with the ``ranks_per_partition`` argument on
initialization. Then, the number of partitions is
``num_launch_ranks / ranks_per_partition``.
Note:
Returns 1 in builds with ENABLE_MPI=off.
"""
if hoomd.version.mpi_enabled:
return self.cpp_mpi_conf.getNPartitions()
else:
return 1
@property
def partition(self):
"""int: The current partition.
Note:
Returns 0 in builds with ENABLE_MPI=off.
"""
if hoomd.version.mpi_enabled:
return self.cpp_mpi_conf.getPartition()
else:
return 0
def barrier_all(self):
"""Perform a MPI barrier synchronization across all ranks.
Note:
Does nothing in builds with ENABLE_MPI=off.
"""
if hoomd.version.mpi_enabled:
_hoomd.mpi_barrier_world()
def barrier(self):
"""Perform a barrier synchronization across all ranks in the partition.
Note:
Does nothing in builds with ENABLE_MPI=off.
"""
if hoomd.version.mpi_enabled:
self.cpp_mpi_conf.barrier()
@contextlib.contextmanager
def localize_abort(self):
"""Localize MPI_Abort to this partition.
HOOMD calls ``MPI_Abort`` to tear down all running MPI processes
whenever there is an uncaught exception. By default, this will abort the
entire MPI execution. When using partitions, an uncaught exception on
one partition will therefore abort all of them.
Use the return value of :py:meth:`localize_abort()` as a context manager
to tell HOOMD that all operations within the context will use only
that MPI communicator so that an uncaught exception in one partition
will only abort that partition and leave the others running.
"""
global _current_communicator
prev = _current_communicator
_current_communicator = self
yield None
_current_communicator = prev
# store the "current" communicator to be used for MPI_Abort calls. This defaults
# to the world communicator, but users can opt in to a more specific
# communicator using the Device.localize_abort context manager
_current_communicator = Communicator()
| # Copyright (c) 2009-2021 The Regents of the University of Michigan
# This file is part of the HOOMD-blue project, released under the BSD 3-Clause
# License.
"""MPI communicator."""
from hoomd import _hoomd
import hoomd
import contextlib
class Communicator(object):
"""MPI communicator.
Args:
mpi_comm: Accepts an mpi4py communicator. Use this argument to perform
many independent hoomd simulations where you communicate between those
simulations using mpi4py.
ranks_per_partition (int): (MPI) Number of ranks to include in a
partition.
`Communicator` initialize MPI communications for a `hoomd.Simulation`. To
use MPI, launch your Python script with an MPI launcher (e.g. ``mpirun`` or
``mpiexec``). By default, `Communicator` uses all ranks provided by the
launcher ``num_launch_ranks`` for a single `hoomd.Simulation` object which
decomposes the state onto that many domains.
Set ``ranks_per_partition`` to an integer to partition launched ranks into
``num_launch_ranks / ranks_per_partition`` communicators, each with their
own `partition` index. Use this to perform many simulations in parallel, for
example by using `partition` as an index into an array of state points to
execute.
"""
def __init__(self, mpi_comm=None, ranks_per_partition=None):
# check ranks_per_partition
if ranks_per_partition is not None:
if not hoomd.version.mpi_enabled:
raise RuntimeError(
"The ranks_per_partition option is only available in MPI.\n"
)
mpi_available = hoomd.version.mpi_enabled
self.cpp_mpi_conf = None
# create the specified configuration
if mpi_comm is None:
self.cpp_mpi_conf = _hoomd.MPIConfiguration()
else:
if not mpi_available:
raise RuntimeError("mpi_comm is not supported in serial builds")
handled = False
# pass in pointer to MPI_Comm object provided by mpi4py
try:
import mpi4py
if isinstance(mpi_comm, mpi4py.MPI.Comm):
addr = mpi4py.MPI._addressof(mpi_comm)
self.cpp_mpi_conf = \
_hoomd.MPIConfiguration._make_mpi_conf_mpi_comm(addr)
handled = True
except ImportError:
# silently ignore when mpi4py is missing
pass
# undocumented case: handle plain integers as pointers to MPI_Comm
# objects
if not handled and isinstance(mpi_comm, int):
self.cpp_mpi_conf = \
_hoomd.MPIConfiguration._make_mpi_conf_mpi_comm(mpi_comm)
handled = True
if not handled:
raise RuntimeError(
"Invalid mpi_comm object: {}".format(mpi_comm))
if ranks_per_partition is not None:
# check validity
if (self.cpp_mpi_conf.getNRanksGlobal() % ranks_per_partition):
raise RuntimeError('Total number of ranks is not a multiple of '
'ranks_per_partition.')
# split the communicator into partitions
self.cpp_mpi_conf.splitPartitions(ranks_per_partition)
@property
def num_ranks(self):
"""int: The number of ranks in this partition.
When initialized with ``ranks_per_partition=None``, `num_ranks` is equal
to the ``num_launch_ranks`` set by the MPI launcher. When using
partitions, `num_ranks` is equal to ``ranks_per_partition``.
Note:
Returns 1 in builds with ENABLE_MPI=off.
"""
if hoomd.version.mpi_enabled:
return self.cpp_mpi_conf.getNRanks()
else:
return 1
@property
def rank(self):
"""int: The current rank within the partition.
Note:
Returns 0 in builds with ENABLE_MPI=off.
"""
if hoomd.version.mpi_enabled:
return self.cpp_mpi_conf.getRank()
else:
return 0
@property
def num_partitions(self):
"""int: The number of partitions in this execution.
Create partitions with the ``ranks_per_partition`` argument on
initialization. Then, the number of partitions is
``num_launch_ranks / ranks_per_partition``.
Note:
Returns 1 in builds with ENABLE_MPI=off.
"""
if hoomd.version.mpi_enabled:
return self.cpp_mpi_conf.getNPartitions()
else:
return 1
@property
def partition(self):
"""int: The current partition.
Note:
Returns 0 in builds with ENABLE_MPI=off.
"""
if hoomd.version.mpi_enabled:
return self.cpp_mpi_conf.getPartition()
else:
return 0
def barrier_all(self):
"""Perform a MPI barrier synchronization across all ranks.
Note:
Does nothing in builds with ENABLE_MPI=off.
"""
if hoomd.version.mpi_enabled:
_hoomd.mpi_barrier_world()
def barrier(self):
"""Perform a barrier synchronization across all ranks in the partition.
Note:
Does nothing in builds with ENABLE_MPI=off.
"""
if hoomd.version.mpi_enabled:
self.cpp_mpi_conf.barrier()
@contextlib.contextmanager
def localize_abort(self):
"""Localize MPI_Abort to this partition.
HOOMD calls ``MPI_Abort`` to tear down all running MPI processes
whenever there is an uncaught exception. By default, this will abort the
entire MPI execution. When using partitions, an uncaught exception on
one partition will therefore abort all of them.
Use the return value of :py:meth:`localize_abort()` as a context manager
to tell HOOMD that all operations within the context will use only
that MPI communicator so that an uncaught exception in one partition
will only abort that partition and leave the others running.
"""
global _current_communicator
prev = _current_communicator
_current_communicator = self
yield None
_current_communicator = prev
# store the "current" communicator to be used for MPI_Abort calls. This defaults
# to the world communicator, but users can opt in to a more specific
# communicator using the Device.localize_abort context manager
_current_communicator = Communicator()
| en | 0.822538 | # Copyright (c) 2009-2021 The Regents of the University of Michigan # This file is part of the HOOMD-blue project, released under the BSD 3-Clause # License. MPI communicator. MPI communicator. Args: mpi_comm: Accepts an mpi4py communicator. Use this argument to perform many independent hoomd simulations where you communicate between those simulations using mpi4py. ranks_per_partition (int): (MPI) Number of ranks to include in a partition. `Communicator` initialize MPI communications for a `hoomd.Simulation`. To use MPI, launch your Python script with an MPI launcher (e.g. ``mpirun`` or ``mpiexec``). By default, `Communicator` uses all ranks provided by the launcher ``num_launch_ranks`` for a single `hoomd.Simulation` object which decomposes the state onto that many domains. Set ``ranks_per_partition`` to an integer to partition launched ranks into ``num_launch_ranks / ranks_per_partition`` communicators, each with their own `partition` index. Use this to perform many simulations in parallel, for example by using `partition` as an index into an array of state points to execute. # check ranks_per_partition # create the specified configuration # pass in pointer to MPI_Comm object provided by mpi4py # silently ignore when mpi4py is missing # undocumented case: handle plain integers as pointers to MPI_Comm # objects # check validity # split the communicator into partitions int: The number of ranks in this partition. When initialized with ``ranks_per_partition=None``, `num_ranks` is equal to the ``num_launch_ranks`` set by the MPI launcher. When using partitions, `num_ranks` is equal to ``ranks_per_partition``. Note: Returns 1 in builds with ENABLE_MPI=off. int: The current rank within the partition. Note: Returns 0 in builds with ENABLE_MPI=off. int: The number of partitions in this execution. Create partitions with the ``ranks_per_partition`` argument on initialization. Then, the number of partitions is ``num_launch_ranks / ranks_per_partition``. Note: Returns 1 in builds with ENABLE_MPI=off. int: The current partition. Note: Returns 0 in builds with ENABLE_MPI=off. Perform a MPI barrier synchronization across all ranks. Note: Does nothing in builds with ENABLE_MPI=off. Perform a barrier synchronization across all ranks in the partition. Note: Does nothing in builds with ENABLE_MPI=off. Localize MPI_Abort to this partition. HOOMD calls ``MPI_Abort`` to tear down all running MPI processes whenever there is an uncaught exception. By default, this will abort the entire MPI execution. When using partitions, an uncaught exception on one partition will therefore abort all of them. Use the return value of :py:meth:`localize_abort()` as a context manager to tell HOOMD that all operations within the context will use only that MPI communicator so that an uncaught exception in one partition will only abort that partition and leave the others running. # store the "current" communicator to be used for MPI_Abort calls. This defaults # to the world communicator, but users can opt in to a more specific # communicator using the Device.localize_abort context manager | 2.704311 | 3 |
src/affinity-propagation/generate_data.py | dominc8/affinity-propagation | 1 | 8401 | <filename>src/affinity-propagation/generate_data.py
from config import DataGeneratorCfg
from sklearn.datasets.samples_generator import make_blobs
import numpy as np
def generate():
data, true_labels = make_blobs(n_samples=DataGeneratorCfg.n_samples, centers=DataGeneratorCfg.centers, cluster_std=DataGeneratorCfg.cluster_std, random_state=DataGeneratorCfg.random_state)
print("Generating new data!")
np.savetxt("data/data.txt", data)
np.savetxt("data/true_labels.txt", true_labels)
return data
| <filename>src/affinity-propagation/generate_data.py
from config import DataGeneratorCfg
from sklearn.datasets.samples_generator import make_blobs
import numpy as np
def generate():
data, true_labels = make_blobs(n_samples=DataGeneratorCfg.n_samples, centers=DataGeneratorCfg.centers, cluster_std=DataGeneratorCfg.cluster_std, random_state=DataGeneratorCfg.random_state)
print("Generating new data!")
np.savetxt("data/data.txt", data)
np.savetxt("data/true_labels.txt", true_labels)
return data
| none | 1 | 2.98694 | 3 |
|
peon/tests/test_project/test_file/test_function_def/test_functions/test_reflection_at_line.py | roch1990/peon | 32 | 8402 | import _ast
from peon.src.project.file.function_def.function import FunctionLint
class ReflectionAtLineFixture:
empty_node = _ast.Pass
is_instance_at_first_lvl = _ast.FunctionDef(id='isinstance', lineno=1)
type_at_first_lvl = _ast.FunctionDef(id='type', lineno=1)
is_instance_at_second_lvl = _ast.FunctionDef(body=[_ast.Expr(id='isinstance', lineno=2)], lineno=1)
type_at_second_lvl = _ast.FunctionDef(body=[_ast.Expr(id='type', lineno=2)], lineno=1)
def test_empty_node():
assert FunctionLint(
definition=ReflectionAtLineFixture.empty_node,
).reflection_at_line() == tuple()
def test_is_instance_at_first_lvl():
assert FunctionLint(
definition=ReflectionAtLineFixture.is_instance_at_first_lvl,
).reflection_at_line() == (1,)
def test_type_at_first_lvl():
assert FunctionLint(
definition=ReflectionAtLineFixture.type_at_first_lvl,
).reflection_at_line() == (1,)
def test_is_instance_at_second_lvl():
assert FunctionLint(
definition=ReflectionAtLineFixture.is_instance_at_second_lvl,
).reflection_at_line() == (2,)
def test_type_at_second_lvl():
assert FunctionLint(
definition=ReflectionAtLineFixture.type_at_second_lvl,
).reflection_at_line() == (2,)
| import _ast
from peon.src.project.file.function_def.function import FunctionLint
class ReflectionAtLineFixture:
empty_node = _ast.Pass
is_instance_at_first_lvl = _ast.FunctionDef(id='isinstance', lineno=1)
type_at_first_lvl = _ast.FunctionDef(id='type', lineno=1)
is_instance_at_second_lvl = _ast.FunctionDef(body=[_ast.Expr(id='isinstance', lineno=2)], lineno=1)
type_at_second_lvl = _ast.FunctionDef(body=[_ast.Expr(id='type', lineno=2)], lineno=1)
def test_empty_node():
assert FunctionLint(
definition=ReflectionAtLineFixture.empty_node,
).reflection_at_line() == tuple()
def test_is_instance_at_first_lvl():
assert FunctionLint(
definition=ReflectionAtLineFixture.is_instance_at_first_lvl,
).reflection_at_line() == (1,)
def test_type_at_first_lvl():
assert FunctionLint(
definition=ReflectionAtLineFixture.type_at_first_lvl,
).reflection_at_line() == (1,)
def test_is_instance_at_second_lvl():
assert FunctionLint(
definition=ReflectionAtLineFixture.is_instance_at_second_lvl,
).reflection_at_line() == (2,)
def test_type_at_second_lvl():
assert FunctionLint(
definition=ReflectionAtLineFixture.type_at_second_lvl,
).reflection_at_line() == (2,)
| none | 1 | 2.503872 | 3 |
|
db2_funcs.py | Nama/A.T.S.P.-Website | 4 | 8403 | ###############################################################################
# #
'''Website Database-connection-related features''' #
# #
###############################################################################
import cymysql
from conf import website_db
from time import gmtime
from time import strftime
db_host = website_db.ip
db_port = website_db.port
db = website_db.db
db_user = website_db.user
db_pw = website_db.pw
###############################################################################
# #
'''Databse-connect and close''' #
# #
###############################################################################
def db_con():
conn = cymysql.connect(host=db_host, port=db_port, user=db_user, passwd=db_pw, db=db)
cur = conn.cursor()
return conn, cur
def db_close(conn, cur):
cur.close()
conn.close()
###############################################################################
# #
'''Donation-Page data''' #
# #
###############################################################################
def donate_save(nick):
conn, cur = db_con()
time = strftime('%Y.%m.%d - %H:%M:%S', gmtime())
cur.execute('INSERT INTO `donate` (`time`, `user`) VALUES (%s, %s)', (time, nick))
conn.commit()
db_close(conn, cur)
def donate_read():
conn, cur = db_con()
cur.execute('SELECT * FROM `donate` ORDER BY `time` DESC LIMIT 20')
nicks = list()
for r in cur.fetchall():
nicks.append([r[0], r[1]])
db_close(conn, cur)
return nicks
###############################################################################
# #
'''Short-URL data''' #
# #
###############################################################################
def shorturl_save(surl, url):
conn, cur = db_con()
cur.execute('INSERT INTO `shorturls` (`surl`, `url`) VALUES (%s, %s)', (surl, url))
conn.commit()
db_close(conn, cur)
def shorturl_read():
conn, cur = db_con()
cur.execute('SELECT * FROM `shorturls`')
urls = list()
for r in cur.fetchall():
urls.append([r[0], r[0], r[1]])
db_close(conn, cur)
return urls
###############################################################################
# #
'''Old Worlds''' #
# #
###############################################################################
def get_old_worlds(item):
conn, cur = db_con()
sql = 'SELECT * FROM `oldworlds` ORDER BY `date` DESC LIMIT {0}, {1}'.format(item, 20)
cur.execute(sql)
worlds = cur.fetchall()
db_close(conn, cur)
return worlds
###############################################################################
# #
'''Server Backup-Size in Dash''' #
# #
###############################################################################
def backup_size():
conn, cur = db_con()
dbtshock = []
tserver = []
htdocs = []
cur.execute('SELECT * FROM `backups`')
for r in cur.fetchall():
if r[1] == 'db':
dbtshock.append([r[0] * 1000, r[2]])
elif r[1] == 'tserver':
tserver.append([r[0] * 1000, r[2]])
elif r[1] == 'htdocs':
htdocs.append([r[0] * 1000, r[2]])
db_close(conn, cur)
return (dbtshock, tserver, htdocs)
| ###############################################################################
# #
'''Website Database-connection-related features''' #
# #
###############################################################################
import cymysql
from conf import website_db
from time import gmtime
from time import strftime
db_host = website_db.ip
db_port = website_db.port
db = website_db.db
db_user = website_db.user
db_pw = website_db.pw
###############################################################################
# #
'''Databse-connect and close''' #
# #
###############################################################################
def db_con():
conn = cymysql.connect(host=db_host, port=db_port, user=db_user, passwd=db_pw, db=db)
cur = conn.cursor()
return conn, cur
def db_close(conn, cur):
cur.close()
conn.close()
###############################################################################
# #
'''Donation-Page data''' #
# #
###############################################################################
def donate_save(nick):
conn, cur = db_con()
time = strftime('%Y.%m.%d - %H:%M:%S', gmtime())
cur.execute('INSERT INTO `donate` (`time`, `user`) VALUES (%s, %s)', (time, nick))
conn.commit()
db_close(conn, cur)
def donate_read():
conn, cur = db_con()
cur.execute('SELECT * FROM `donate` ORDER BY `time` DESC LIMIT 20')
nicks = list()
for r in cur.fetchall():
nicks.append([r[0], r[1]])
db_close(conn, cur)
return nicks
###############################################################################
# #
'''Short-URL data''' #
# #
###############################################################################
def shorturl_save(surl, url):
conn, cur = db_con()
cur.execute('INSERT INTO `shorturls` (`surl`, `url`) VALUES (%s, %s)', (surl, url))
conn.commit()
db_close(conn, cur)
def shorturl_read():
conn, cur = db_con()
cur.execute('SELECT * FROM `shorturls`')
urls = list()
for r in cur.fetchall():
urls.append([r[0], r[0], r[1]])
db_close(conn, cur)
return urls
###############################################################################
# #
'''Old Worlds''' #
# #
###############################################################################
def get_old_worlds(item):
conn, cur = db_con()
sql = 'SELECT * FROM `oldworlds` ORDER BY `date` DESC LIMIT {0}, {1}'.format(item, 20)
cur.execute(sql)
worlds = cur.fetchall()
db_close(conn, cur)
return worlds
###############################################################################
# #
'''Server Backup-Size in Dash''' #
# #
###############################################################################
def backup_size():
conn, cur = db_con()
dbtshock = []
tserver = []
htdocs = []
cur.execute('SELECT * FROM `backups`')
for r in cur.fetchall():
if r[1] == 'db':
dbtshock.append([r[0] * 1000, r[2]])
elif r[1] == 'tserver':
tserver.append([r[0] * 1000, r[2]])
elif r[1] == 'htdocs':
htdocs.append([r[0] * 1000, r[2]])
db_close(conn, cur)
return (dbtshock, tserver, htdocs)
| de | 0.811098 | ############################################################################### # # Website Database-connection-related features # # # ############################################################################### ############################################################################### # # Databse-connect and close # # # ############################################################################### ############################################################################### # # Donation-Page data # # # ############################################################################### ############################################################################### # # Short-URL data # # # ############################################################################### ############################################################################### # # Old Worlds # # # ############################################################################### ############################################################################### # # Server Backup-Size in Dash # # # ############################################################################### | 2.46345 | 2 |
nlp/handler.py | rgschmitz1/tcss702 | 0 | 8404 | <filename>nlp/handler.py
from minio import Minio
import json
import os
from .Inspector import Inspector
from .topic_model import topic_model
#def handle(event):
def handle(event, context):
with open("/var/openfaas/secrets/minio-access-key") as f:
access_key = f.read()
with open("/var/openfaas/secrets/minio-secret-key") as f:
secret_key = f.read()
mc = Minio(os.environ['minio_hostname'],
access_key=access_key,
secret_key=secret_key,
secure=False)
tm = topic_model(mc)
# Collect data
inspector = Inspector()
inspector.inspectAll()
# Add custom message and finish the function
# if "startWallClock" in event:
# inspector.addAttribute("startWallClock", event['startWallClock'])
body = json.loads(event.body)
print(body['fn'], flush=True)
fn = {"p": tm.preprocess,
"t": tm.train,
"q": tm.query}
fn[body['fn']]()
inspector.inspectAllDeltas()
# Include functionName
inspector.addAttribute("functionName", fn[body['fn']].__name__)
iret = inspector.finish()
ret = {
"status": 200,
"body": iret
}
return ret
| <filename>nlp/handler.py
from minio import Minio
import json
import os
from .Inspector import Inspector
from .topic_model import topic_model
#def handle(event):
def handle(event, context):
with open("/var/openfaas/secrets/minio-access-key") as f:
access_key = f.read()
with open("/var/openfaas/secrets/minio-secret-key") as f:
secret_key = f.read()
mc = Minio(os.environ['minio_hostname'],
access_key=access_key,
secret_key=secret_key,
secure=False)
tm = topic_model(mc)
# Collect data
inspector = Inspector()
inspector.inspectAll()
# Add custom message and finish the function
# if "startWallClock" in event:
# inspector.addAttribute("startWallClock", event['startWallClock'])
body = json.loads(event.body)
print(body['fn'], flush=True)
fn = {"p": tm.preprocess,
"t": tm.train,
"q": tm.query}
fn[body['fn']]()
inspector.inspectAllDeltas()
# Include functionName
inspector.addAttribute("functionName", fn[body['fn']].__name__)
iret = inspector.finish()
ret = {
"status": 200,
"body": iret
}
return ret
| en | 0.373447 | #def handle(event): # Collect data # Add custom message and finish the function # if "startWallClock" in event: # inspector.addAttribute("startWallClock", event['startWallClock']) # Include functionName | 2.248833 | 2 |
src/pve_exporter/cli.py | jmangs/prometheus-pve-exporter | 0 | 8405 | """
Proxmox VE exporter for the Prometheus monitoring system.
"""
import sys
from argparse import ArgumentParser
from pve_exporter.http import start_http_server
def main(args=None):
"""
Main entry point.
"""
parser = ArgumentParser()
parser.add_argument('config', nargs='?', default='pve.yml',
help='Path to configuration file (pve.yml)')
parser.add_argument('port', nargs='?', type=int, default='9221',
help='Port on which the exporter is listening (9221)')
parser.add_argument('address', nargs='?', default='',
help='Address to which the exporter will bind')
params = parser.parse_args(args if args is None else sys.argv[1:])
start_http_server(params.config, params.port, params.address)
| """
Proxmox VE exporter for the Prometheus monitoring system.
"""
import sys
from argparse import ArgumentParser
from pve_exporter.http import start_http_server
def main(args=None):
"""
Main entry point.
"""
parser = ArgumentParser()
parser.add_argument('config', nargs='?', default='pve.yml',
help='Path to configuration file (pve.yml)')
parser.add_argument('port', nargs='?', type=int, default='9221',
help='Port on which the exporter is listening (9221)')
parser.add_argument('address', nargs='?', default='',
help='Address to which the exporter will bind')
params = parser.parse_args(args if args is None else sys.argv[1:])
start_http_server(params.config, params.port, params.address)
| en | 0.666024 | Proxmox VE exporter for the Prometheus monitoring system. Main entry point. | 2.633469 | 3 |
workers/repo_info_worker/repo_info_worker.py | vinodkahuja/augur | 2 | 8406 | #SPDX-License-Identifier: MIT
import logging, os, sys, time, requests, json
from datetime import datetime
from multiprocessing import Process, Queue
import pandas as pd
import sqlalchemy as s
from workers.worker_base import Worker
# NOTE: This worker primarily inserts rows into the REPO_INFO table, which serves the primary purposes of
# 1. Displaying discrete metadata like "number of forks" and how they change over time
# 2. Validating other workers, like those related to pull requests, issues, and commits. Our totals should be at or very near the totals in the repo_info table.
# This table also updates the REPO table in 2 cases:
# 1. Recognizing when a repository is a forked repository by updating the "forked_from" field and
# 2. Recognizing when a repository is archived, and recording the data we observed the change in status.
class RepoInfoWorker(Worker):
def __init__(self, config={}):
worker_type = "repo_info_worker"
# Define what this worker can be given and know how to interpret
given = [['github_url']]
models = ['repo_info']
# Define the tables needed to insert, update, or delete on
data_tables = ['repo_info', 'repo']
operations_tables = ['worker_history', 'worker_job']
# Run the general worker initialization
super().__init__(worker_type, config, given, models, data_tables, operations_tables)
# Define data collection info
self.tool_source = 'Repo Info Worker'
self.tool_version = '1.0.0'
self.data_source = 'GitHub API'
def repo_info_model(self, task, repo_id):
github_url = task['given']['github_url']
self.logger.info("Beginning filling the repo_info model for repo: " + github_url + "\n")
owner, repo = self.get_owner_repo(github_url)
url = 'https://api.github.com/graphql'
query = """
{
repository(owner:"%s", name:"%s"){
updatedAt
hasIssuesEnabled
issues(states:OPEN) {
totalCount
}
hasWikiEnabled
forkCount
defaultBranchRef {
name
}
watchers {
totalCount
}
id
licenseInfo {
name
url
}
stargazers {
totalCount
}
codeOfConduct {
name
url
}
issue_count: issues {
totalCount
}
issues_closed: issues(states:CLOSED) {
totalCount
}
pr_count: pullRequests {
totalCount
}
pr_open: pullRequests(states: OPEN) {
totalCount
}
pr_closed: pullRequests(states: CLOSED) {
totalCount
}
pr_merged: pullRequests(states: MERGED) {
totalCount
}
ref(qualifiedName: "master") {
target {
... on Commit {
history(first: 0){
totalCount
}
}
}
}
}
}
""" % (owner, repo)
# Hit the graphql endpoint and retry 3 times in case of failure
num_attempts = 0
success = False
data = None
while num_attempts < 3:
self.logger.info("Hitting endpoint: {} ...\n".format(url))
r = requests.post(url, json={'query': query}, headers=self.headers)
self.update_gh_rate_limit(r)
try:
data = r.json()
except:
data = json.loads(json.dumps(r.text))
if 'errors' in data:
self.logger.info("Error!: {}".format(data['errors']))
if data['errors'][0]['message'] == 'API rate limit exceeded':
self.update_gh_rate_limit(r)
continue
if 'data' in data:
success = True
data = data['data']['repository']
break
else:
self.logger.info("Request returned a non-data dict: {}\n".format(data))
if data['message'] == 'Not Found':
self.logger.info("Github repo was not found or does not exist for endpoint: {}\n".format(url))
break
if data['message'] == 'You have triggered an abuse detection mechanism. Please wait a few minutes before you try again.':
self.update_gh_rate_limit(r, temporarily_disable=True)
continue
if data['message'] == 'Bad credentials':
self.update_gh_rate_limit(r, bad_credentials=True)
continue
num_attempts += 1
if not success:
self.logger.error('Cannot hit endpoint after 3 attempts. \"Completing\" task.\n')
self.register_task_completion(self.task, repo_id, 'repo_info')
return
# Just checking that the data is accessible (would not be if repo no longer exists)
try:
data['updatedAt']
except Exception as e:
self.logger.error('Cannot access repo_info data: {}\nError: {}. \"Completing\" task.'.format(data, e))
self.register_task_completion(self.task, repo_id, 'repo_info')
return
# Get committers count info that requires seperate endpoint
committers_count = self.query_committers_count(owner, repo)
# Put all data together in format of the table
self.logger.info(f'Inserting repo info for repo with id:{repo_id}, owner:{owner}, name:{repo}\n')
rep_inf = {
'repo_id': repo_id,
'last_updated': data['updatedAt'] if 'updatedAt' in data else None,
'issues_enabled': data['hasIssuesEnabled'] if 'hasIssuesEnabled' in data else None,
'open_issues': data['issues']['totalCount'] if data['issues'] else None,
'pull_requests_enabled': None,
'wiki_enabled': data['hasWikiEnabled'] if 'hasWikiEnabled' in data else None,
'pages_enabled': None,
'fork_count': data['forkCount'] if 'forkCount' in data else None,
'default_branch': data['defaultBranchRef']['name'] if data['defaultBranchRef'] else None,
'watchers_count': data['watchers']['totalCount'] if data['watchers'] else None,
'UUID': None,
'license': data['licenseInfo']['name'] if data['licenseInfo'] else None,
'stars_count': data['stargazers']['totalCount'] if data['stargazers'] else None,
'committers_count': committers_count,
'issue_contributors_count': None,
'changelog_file': None,
'contributing_file': None,
'license_file': data['licenseInfo']['url'] if data['licenseInfo'] else None,
'code_of_conduct_file': data['codeOfConduct']['url'] if data['codeOfConduct'] else None,
'security_issue_file': None,
'security_audit_file': None,
'status': None,
'keywords': None,
'commit_count': data['ref']['target']['history']['totalCount'] if data['ref'] else None,
'issues_count': data['issue_count']['totalCount'] if data['issue_count'] else None,
'issues_closed': data['issues_closed']['totalCount'] if data['issues_closed'] else None,
'pull_request_count': data['pr_count']['totalCount'] if data['pr_count'] else None,
'pull_requests_open': data['pr_open']['totalCount'] if data['pr_open'] else None,
'pull_requests_closed': data['pr_closed']['totalCount'] if data['pr_closed'] else None,
'pull_requests_merged': data['pr_merged']['totalCount'] if data['pr_merged'] else None,
'tool_source': self.tool_source,
'tool_version': self.tool_version,
'data_source': self.data_source
}
result = self.db.execute(self.repo_info_table.insert().values(rep_inf))
self.logger.info(f"Primary Key inserted into repo_info table: {result.inserted_primary_key}\n")
self.results_counter += 1
# Note that the addition of information about where a repository may be forked from, and whether a repository is archived, updates the `repo` table, not the `repo_info` table.
forked = self.is_forked(owner, repo)
archived = self.is_archived(owner, repo)
archived_date_collected = None
if archived is not False:
archived_date_collected = archived
archived = 1
else:
archived = 0
rep_additional_data = {
'forked_from': forked,
'repo_archived': archived,
'repo_archived_date_collected': archived_date_collected
}
result = self.db.execute(self.repo_table.update().where(
self.repo_table.c.repo_id==repo_id).values(rep_additional_data))
self.logger.info(f"Inserted info for {owner}/{repo}\n")
# Register this task as completed
self.register_task_completion(self.task, repo_id, "repo_info")
def query_committers_count(self, owner, repo):
self.logger.info('Querying committers count\n')
url = f'https://api.github.com/repos/{owner}/{repo}/contributors?per_page=100'
committers = 0
try:
while True:
r = requests.get(url, headers=self.headers)
self.update_gh_rate_limit(r)
committers += len(r.json())
if 'next' not in r.links:
break
else:
url = r.links['next']['url']
except Exception:
self.logger.exception('An error occured while querying contributor count\n')
return committers
def is_forked(self, owner, repo): #/repos/:owner/:repo parent
self.logger.info('Querying parent info to verify if the repo is forked\n')
url = f'https://api.github.com/repos/{owner}/{repo}'
r = requests.get(url, headers=self.headers)
self.update_gh_rate_limit(r)
data = self.get_repo_data(url, r)
if 'fork' in data:
if 'parent' in data:
return data['parent']['full_name']
return 'Parent not available'
return False
def is_archived(self, owner, repo):
self.logger.info('Querying committers count\n')
url = f'https://api.github.com/repos/{owner}/{repo}'
r = requests.get(url, headers=self.headers)
self.update_gh_rate_limit(r)
data = self.get_repo_data(url, r)
if 'archived' in data:
if data['archived']:
if 'updated_at' in data:
return data['updated_at']
return 'Date not available'
return False
return False
def get_repo_data(self, url, response):
success = False
try:
data = response.json()
except:
data = json.loads(json.dumps(response.text))
if 'errors' in data:
self.logger.info("Error!: {}".format(data['errors']))
if data['errors'][0]['message'] == 'API rate limit exceeded':
self.update_gh_rate_limit(response)
if 'id' in data:
success = True
else:
self.logger.info("Request returned a non-data dict: {}\n".format(data))
if data['message'] == 'Not Found':
self.logger.info("Github repo was not found or does not exist for endpoint: {}\n".format(url))
if data['message'] == 'You have triggered an abuse detection mechanism. Please wait a few minutes before you try again.':
self.update_gh_rate_limit(r, temporarily_disable=True)
if data['message'] == 'Bad credentials':
self.update_gh_rate_limit(r, bad_credentials=True)
if not success:
self.register_task_failure(self.task, repo_id, "Failed to hit endpoint: {}".format(url))
return data
| #SPDX-License-Identifier: MIT
import logging, os, sys, time, requests, json
from datetime import datetime
from multiprocessing import Process, Queue
import pandas as pd
import sqlalchemy as s
from workers.worker_base import Worker
# NOTE: This worker primarily inserts rows into the REPO_INFO table, which serves the primary purposes of
# 1. Displaying discrete metadata like "number of forks" and how they change over time
# 2. Validating other workers, like those related to pull requests, issues, and commits. Our totals should be at or very near the totals in the repo_info table.
# This table also updates the REPO table in 2 cases:
# 1. Recognizing when a repository is a forked repository by updating the "forked_from" field and
# 2. Recognizing when a repository is archived, and recording the data we observed the change in status.
class RepoInfoWorker(Worker):
def __init__(self, config={}):
worker_type = "repo_info_worker"
# Define what this worker can be given and know how to interpret
given = [['github_url']]
models = ['repo_info']
# Define the tables needed to insert, update, or delete on
data_tables = ['repo_info', 'repo']
operations_tables = ['worker_history', 'worker_job']
# Run the general worker initialization
super().__init__(worker_type, config, given, models, data_tables, operations_tables)
# Define data collection info
self.tool_source = 'Repo Info Worker'
self.tool_version = '1.0.0'
self.data_source = 'GitHub API'
def repo_info_model(self, task, repo_id):
github_url = task['given']['github_url']
self.logger.info("Beginning filling the repo_info model for repo: " + github_url + "\n")
owner, repo = self.get_owner_repo(github_url)
url = 'https://api.github.com/graphql'
query = """
{
repository(owner:"%s", name:"%s"){
updatedAt
hasIssuesEnabled
issues(states:OPEN) {
totalCount
}
hasWikiEnabled
forkCount
defaultBranchRef {
name
}
watchers {
totalCount
}
id
licenseInfo {
name
url
}
stargazers {
totalCount
}
codeOfConduct {
name
url
}
issue_count: issues {
totalCount
}
issues_closed: issues(states:CLOSED) {
totalCount
}
pr_count: pullRequests {
totalCount
}
pr_open: pullRequests(states: OPEN) {
totalCount
}
pr_closed: pullRequests(states: CLOSED) {
totalCount
}
pr_merged: pullRequests(states: MERGED) {
totalCount
}
ref(qualifiedName: "master") {
target {
... on Commit {
history(first: 0){
totalCount
}
}
}
}
}
}
""" % (owner, repo)
# Hit the graphql endpoint and retry 3 times in case of failure
num_attempts = 0
success = False
data = None
while num_attempts < 3:
self.logger.info("Hitting endpoint: {} ...\n".format(url))
r = requests.post(url, json={'query': query}, headers=self.headers)
self.update_gh_rate_limit(r)
try:
data = r.json()
except:
data = json.loads(json.dumps(r.text))
if 'errors' in data:
self.logger.info("Error!: {}".format(data['errors']))
if data['errors'][0]['message'] == 'API rate limit exceeded':
self.update_gh_rate_limit(r)
continue
if 'data' in data:
success = True
data = data['data']['repository']
break
else:
self.logger.info("Request returned a non-data dict: {}\n".format(data))
if data['message'] == 'Not Found':
self.logger.info("Github repo was not found or does not exist for endpoint: {}\n".format(url))
break
if data['message'] == 'You have triggered an abuse detection mechanism. Please wait a few minutes before you try again.':
self.update_gh_rate_limit(r, temporarily_disable=True)
continue
if data['message'] == 'Bad credentials':
self.update_gh_rate_limit(r, bad_credentials=True)
continue
num_attempts += 1
if not success:
self.logger.error('Cannot hit endpoint after 3 attempts. \"Completing\" task.\n')
self.register_task_completion(self.task, repo_id, 'repo_info')
return
# Just checking that the data is accessible (would not be if repo no longer exists)
try:
data['updatedAt']
except Exception as e:
self.logger.error('Cannot access repo_info data: {}\nError: {}. \"Completing\" task.'.format(data, e))
self.register_task_completion(self.task, repo_id, 'repo_info')
return
# Get committers count info that requires seperate endpoint
committers_count = self.query_committers_count(owner, repo)
# Put all data together in format of the table
self.logger.info(f'Inserting repo info for repo with id:{repo_id}, owner:{owner}, name:{repo}\n')
rep_inf = {
'repo_id': repo_id,
'last_updated': data['updatedAt'] if 'updatedAt' in data else None,
'issues_enabled': data['hasIssuesEnabled'] if 'hasIssuesEnabled' in data else None,
'open_issues': data['issues']['totalCount'] if data['issues'] else None,
'pull_requests_enabled': None,
'wiki_enabled': data['hasWikiEnabled'] if 'hasWikiEnabled' in data else None,
'pages_enabled': None,
'fork_count': data['forkCount'] if 'forkCount' in data else None,
'default_branch': data['defaultBranchRef']['name'] if data['defaultBranchRef'] else None,
'watchers_count': data['watchers']['totalCount'] if data['watchers'] else None,
'UUID': None,
'license': data['licenseInfo']['name'] if data['licenseInfo'] else None,
'stars_count': data['stargazers']['totalCount'] if data['stargazers'] else None,
'committers_count': committers_count,
'issue_contributors_count': None,
'changelog_file': None,
'contributing_file': None,
'license_file': data['licenseInfo']['url'] if data['licenseInfo'] else None,
'code_of_conduct_file': data['codeOfConduct']['url'] if data['codeOfConduct'] else None,
'security_issue_file': None,
'security_audit_file': None,
'status': None,
'keywords': None,
'commit_count': data['ref']['target']['history']['totalCount'] if data['ref'] else None,
'issues_count': data['issue_count']['totalCount'] if data['issue_count'] else None,
'issues_closed': data['issues_closed']['totalCount'] if data['issues_closed'] else None,
'pull_request_count': data['pr_count']['totalCount'] if data['pr_count'] else None,
'pull_requests_open': data['pr_open']['totalCount'] if data['pr_open'] else None,
'pull_requests_closed': data['pr_closed']['totalCount'] if data['pr_closed'] else None,
'pull_requests_merged': data['pr_merged']['totalCount'] if data['pr_merged'] else None,
'tool_source': self.tool_source,
'tool_version': self.tool_version,
'data_source': self.data_source
}
result = self.db.execute(self.repo_info_table.insert().values(rep_inf))
self.logger.info(f"Primary Key inserted into repo_info table: {result.inserted_primary_key}\n")
self.results_counter += 1
# Note that the addition of information about where a repository may be forked from, and whether a repository is archived, updates the `repo` table, not the `repo_info` table.
forked = self.is_forked(owner, repo)
archived = self.is_archived(owner, repo)
archived_date_collected = None
if archived is not False:
archived_date_collected = archived
archived = 1
else:
archived = 0
rep_additional_data = {
'forked_from': forked,
'repo_archived': archived,
'repo_archived_date_collected': archived_date_collected
}
result = self.db.execute(self.repo_table.update().where(
self.repo_table.c.repo_id==repo_id).values(rep_additional_data))
self.logger.info(f"Inserted info for {owner}/{repo}\n")
# Register this task as completed
self.register_task_completion(self.task, repo_id, "repo_info")
def query_committers_count(self, owner, repo):
self.logger.info('Querying committers count\n')
url = f'https://api.github.com/repos/{owner}/{repo}/contributors?per_page=100'
committers = 0
try:
while True:
r = requests.get(url, headers=self.headers)
self.update_gh_rate_limit(r)
committers += len(r.json())
if 'next' not in r.links:
break
else:
url = r.links['next']['url']
except Exception:
self.logger.exception('An error occured while querying contributor count\n')
return committers
def is_forked(self, owner, repo): #/repos/:owner/:repo parent
self.logger.info('Querying parent info to verify if the repo is forked\n')
url = f'https://api.github.com/repos/{owner}/{repo}'
r = requests.get(url, headers=self.headers)
self.update_gh_rate_limit(r)
data = self.get_repo_data(url, r)
if 'fork' in data:
if 'parent' in data:
return data['parent']['full_name']
return 'Parent not available'
return False
def is_archived(self, owner, repo):
self.logger.info('Querying committers count\n')
url = f'https://api.github.com/repos/{owner}/{repo}'
r = requests.get(url, headers=self.headers)
self.update_gh_rate_limit(r)
data = self.get_repo_data(url, r)
if 'archived' in data:
if data['archived']:
if 'updated_at' in data:
return data['updated_at']
return 'Date not available'
return False
return False
def get_repo_data(self, url, response):
success = False
try:
data = response.json()
except:
data = json.loads(json.dumps(response.text))
if 'errors' in data:
self.logger.info("Error!: {}".format(data['errors']))
if data['errors'][0]['message'] == 'API rate limit exceeded':
self.update_gh_rate_limit(response)
if 'id' in data:
success = True
else:
self.logger.info("Request returned a non-data dict: {}\n".format(data))
if data['message'] == 'Not Found':
self.logger.info("Github repo was not found or does not exist for endpoint: {}\n".format(url))
if data['message'] == 'You have triggered an abuse detection mechanism. Please wait a few minutes before you try again.':
self.update_gh_rate_limit(r, temporarily_disable=True)
if data['message'] == 'Bad credentials':
self.update_gh_rate_limit(r, bad_credentials=True)
if not success:
self.register_task_failure(self.task, repo_id, "Failed to hit endpoint: {}".format(url))
return data
| en | 0.748817 | #SPDX-License-Identifier: MIT # NOTE: This worker primarily inserts rows into the REPO_INFO table, which serves the primary purposes of # 1. Displaying discrete metadata like "number of forks" and how they change over time # 2. Validating other workers, like those related to pull requests, issues, and commits. Our totals should be at or very near the totals in the repo_info table. # This table also updates the REPO table in 2 cases: # 1. Recognizing when a repository is a forked repository by updating the "forked_from" field and # 2. Recognizing when a repository is archived, and recording the data we observed the change in status. # Define what this worker can be given and know how to interpret # Define the tables needed to insert, update, or delete on # Run the general worker initialization # Define data collection info { repository(owner:"%s", name:"%s"){ updatedAt hasIssuesEnabled issues(states:OPEN) { totalCount } hasWikiEnabled forkCount defaultBranchRef { name } watchers { totalCount } id licenseInfo { name url } stargazers { totalCount } codeOfConduct { name url } issue_count: issues { totalCount } issues_closed: issues(states:CLOSED) { totalCount } pr_count: pullRequests { totalCount } pr_open: pullRequests(states: OPEN) { totalCount } pr_closed: pullRequests(states: CLOSED) { totalCount } pr_merged: pullRequests(states: MERGED) { totalCount } ref(qualifiedName: "master") { target { ... on Commit { history(first: 0){ totalCount } } } } } } # Hit the graphql endpoint and retry 3 times in case of failure # Just checking that the data is accessible (would not be if repo no longer exists) # Get committers count info that requires seperate endpoint # Put all data together in format of the table # Note that the addition of information about where a repository may be forked from, and whether a repository is archived, updates the `repo` table, not the `repo_info` table. # Register this task as completed #/repos/:owner/:repo parent | 2.496588 | 2 |
benchmark/my_argparser.py | victor-estrade/SystGradDescent | 2 | 8407 | # coding: utf-8
from __future__ import print_function
from __future__ import division
from __future__ import absolute_import
from __future__ import unicode_literals
import argparse
def parse_args_tolerance():
parser = argparse.ArgumentParser(description='just for tolerance')
parser.add_argument("--tolerance", type=float,
default=0.1, help="tolerance value for Minuit migrad and simplex minimization")
args, _ = parser.parse_known_args()
return args.tolerance
def GB_parse_args(main_description="Training launcher"):
parser = argparse.ArgumentParser(description=main_description)
parser.add_argument("--verbose", "-v", type=int, choices=[0, 1, 2],
default=0, help="increase output verbosity")
parser.add_argument("--start-cv", type=int,
default=0, help="start of i_cv for range(start, end)")
parser.add_argument("--end-cv", type=int,
default=30, help="end of i_cv for range(start, end)")
parser.add_argument("--tolerance", type=float,
default=0.1, help="tolerance value for Minuit migrad and simplex minimization")
parser.add_argument('--load-run', help='load saved runs. Do not run the models',
action='store_true')
parser.add_argument('--estimate-only', help='Turns off conditional estimation for V_stat and V_syst',
action='store_true')
parser.add_argument('--conditional-only', help='Turns off common estimation',
action='store_true')
# MODEL HYPER PARAMETERS
parser.add_argument('--n-estimators', help='number of estimators',
default=100, type=int)
parser.add_argument('--max-depth', help='maximum depth of trees',
default=3, type=int)
parser.add_argument('--learning-rate', '--lr', help='learning rate',
default=1e-1, type=float)
# OTHER
parser.add_argument('--no-cuda', '--no-gpu', help='flag to use or not the gpu',
action='store_false', dest='cuda')
parser.add_argument('--retrain', help='flag to force retraining',
action='store_true')
parser.add_argument('--skip-minuit', help='flag to skip minuit NLL minization',
action='store_true')
args = parser.parse_args()
return args
def REG_parse_args(main_description="Training launcher"):
parser = argparse.ArgumentParser(description=main_description)
parser.add_argument("--verbose", "-v", type=int, choices=[0, 1, 2],
default=0, help="increase output verbosity")
parser.add_argument("--start-cv", type=int,
default=0, help="start of i_cv for range(start, end)")
parser.add_argument("--end-cv", type=int,
default=30, help="end of i_cv for range(start, end)")
parser.add_argument("--tolerance", type=float,
default=0.1, help="tolerance value for Minuit migrad and simplex minimization")
parser.add_argument('--load-run', help='load saved runs. Do not run the models',
action='store_true')
parser.add_argument('--estimate-only', help='Turns off conditional estimation for V_stat and V_syst',
action='store_true')
parser.add_argument('--conditional-only', help='Turns off common estimation',
action='store_true')
# MODEL HYPER PARAMETERS
parser.add_argument('--learning-rate', '--lr', help='learning rate',
default=1e-4, type=float)
parser.add_argument('--beta1', help='beta 1 for Adam',
default=0.5, type=float)
parser.add_argument('--beta2', help='beta 2 for Adam',
default=0.9, type=float)
parser.add_argument('--weight-decay', help='weight decay for SGD',
default=0.0, type=float)
parser.add_argument('--optimizer', help='optimizer name', dest='optimizer_name',
default='Adam', type=str, choices=('Adam', 'SGD', 'ADAM', 'sgd', 'adam'))
parser.add_argument('--n-unit', help='Number of units in layers. Controls NN width.',
default=200, type=int)
parser.add_argument('--sample-size', help='data sample size',
default=1000, type=int)
parser.add_argument('--batch-size', help='mini-batch size',
default=20, type=int)
parser.add_argument('--n-steps', help='number of update steps',
default=1000, type=int)
# OTHER
parser.add_argument('--no-cuda', '--no-gpu', help='flag to use or not the gpu',
action='store_false', dest='cuda')
parser.add_argument('--retrain', help='flag to force retraining',
action='store_true')
args = parser.parse_args()
return args
def INFERNO_parse_args(main_description="Training launcher"):
parser = argparse.ArgumentParser(description=main_description)
parser.add_argument("--verbose", "-v", type=int, choices=[0, 1, 2],
default=0, help="increase output verbosity")
parser.add_argument("--start-cv", type=int,
default=0, help="start of i_cv for range(start, end)")
parser.add_argument("--end-cv", type=int,
default=30, help="end of i_cv for range(start, end)")
parser.add_argument("--tolerance", type=float,
default=0.1, help="tolerance value for Minuit migrad and simplex minimization")
parser.add_argument('--load-run', help='load saved runs. Do not run the models',
action='store_true')
parser.add_argument('--estimate-only', help='Turns off conditional estimation for V_stat and V_syst',
action='store_true')
parser.add_argument('--conditional-only', help='Turns off common estimation',
action='store_true')
# MODEL HYPER PARAMETERS
parser.add_argument('--learning-rate', '--lr', help='learning rate',
default=1e-3, type=float)
parser.add_argument('--temperature', help='control initial softmax steepness',
default=1.0, type=float)
parser.add_argument('--beta1', help='beta 1 for Adam',
default=0.5, type=float)
parser.add_argument('--beta2', help='beta 2 for Adam',
default=0.9, type=float)
parser.add_argument('--weight-decay', help='weight decay for SGD',
default=0.0, type=float)
parser.add_argument('--optimizer', help='optimizer name', dest='optimizer_name',
default='Adam', type=str, choices=('Adam', 'SGD', 'ADAM', 'sgd', 'adam'))
parser.add_argument('--n-unit', help='Number of units in layers. Controls NN width.',
default=200, type=int)
parser.add_argument('--n-bins', help='number of output bins',
default=10, type=int)
parser.add_argument('--sample-size', help='data sample size',
default=1000, type=int)
parser.add_argument('--batch-size', help='mini-batch size',
default=20, type=int)
parser.add_argument('--n-steps', help='number of update steps',
default=1000, type=int)
# OTHER
parser.add_argument('--no-cuda', '--no-gpu', help='flag to use or not the gpu',
action='store_false', dest='cuda')
parser.add_argument('--retrain', help='flag to force retraining',
action='store_true')
args = parser.parse_args()
return args
def NET_parse_args(main_description="Training launcher"):
parser = argparse.ArgumentParser(description=main_description)
parser.add_argument("--verbose", "-v", type=int, choices=[0, 1, 2],
default=0, help="increase output verbosity")
parser.add_argument("--start-cv", type=int,
default=0, help="start of i_cv for range(start, end)")
parser.add_argument("--end-cv", type=int,
default=30, help="end of i_cv for range(start, end)")
parser.add_argument("--tolerance", type=float,
default=0.1, help="tolerance value for Minuit migrad and simplex minimization")
parser.add_argument('--load-run', help='load saved runs. Do not run the models',
action='store_true')
parser.add_argument('--estimate-only', help='Turns off conditional estimation for V_stat and V_syst',
action='store_true')
parser.add_argument('--conditional-only', help='Turns off common estimation',
action='store_true')
# MODEL HYPER PARAMETERS
parser.add_argument('--learning-rate', '--lr', help='learning rate',
default=1e-3, type=float)
parser.add_argument('--beta1', help='beta 1 for Adam',
default=0.9, type=float)
parser.add_argument('--beta2', help='beta 2 for Adam',
default=0.999, type=float)
parser.add_argument('--weight-decay', help='weight decay for SGD',
default=0.0, type=float)
parser.add_argument('--optimizer', help='optimizer name', dest='optimizer_name',
default='Adam', type=str, choices=('Adam', 'SGD', 'ADAM', 'sgd', 'adam'))
parser.add_argument('--n-unit', help='Number of units in layers. Controls NN width.',
default=200, type=int)
parser.add_argument('--sample-size', help='data sample size',
default=1000, type=int)
parser.add_argument('--batch-size', help='mini-batch size',
default=1000, type=int)
parser.add_argument('--n-steps', help='number of update steps',
default=1000, type=int)
# OTHER
parser.add_argument('--no-cuda', '--no-gpu', help='flag to use or not the gpu',
action='store_false', dest='cuda')
parser.add_argument('--retrain', help='flag to force retraining',
action='store_true')
args = parser.parse_args()
return args
def TP_parse_args(main_description="Training launcher"):
parser = argparse.ArgumentParser(description=main_description)
parser.add_argument("--verbose", "-v", type=int, choices=[0, 1, 2],
default=0, help="increase output verbosity")
parser.add_argument("--start-cv", type=int,
default=0, help="start of i_cv for range(start, end)")
parser.add_argument("--end-cv", type=int,
default=30, help="end of i_cv for range(start, end)")
parser.add_argument("--tolerance", type=float,
default=0.1, help="tolerance value for Minuit migrad and simplex minimization")
parser.add_argument('--load-run', help='load saved runs. Do not run the models',
action='store_true')
parser.add_argument('--estimate-only', help='Turns off conditional estimation for V_stat and V_syst',
action='store_true')
parser.add_argument('--conditional-only', help='Turns off common estimation',
action='store_true')
# MODEL HYPER PARAMETERS
parser.add_argument('--learning-rate', '--lr', help='learning rate',
default=1e-3, type=float)
parser.add_argument('--trade-off', help='trade-off between classic loss and adversarial loss',
default=1.0, type=float)
parser.add_argument('--beta1', help='beta 1 for Adam',
default=0.9, type=float)
parser.add_argument('--beta2', help='beta 2 for Adam',
default=0.999, type=float)
parser.add_argument('--weight-decay', help='weight decay for SGD',
default=0.0, type=float)
parser.add_argument('--optimizer', help='optimizer name', dest='optimizer_name',
default='Adam', type=str, choices=('Adam', 'SGD', 'ADAM', 'sgd', 'adam'))
parser.add_argument('--n-unit', help='Number of units in layers. Controls NN width.',
default=200, type=int)
parser.add_argument('--sample-size', help='data sample size',
default=1000, type=int)
parser.add_argument('--batch-size', help='mini-batch size',
default=1000, type=int)
parser.add_argument('--n-steps', help='number of update steps',
default=1000, type=int)
# OTHER
parser.add_argument('--no-cuda', '--no-gpu', help='flag to use or not the gpu',
action='store_false', dest='cuda')
parser.add_argument('--retrain', help='flag to force retraining',
action='store_true')
args = parser.parse_args()
return args
def PIVOT_parse_args(main_description="Training launcher"):
parser = argparse.ArgumentParser(description=main_description)
parser.add_argument("--verbose", "-v", type=int, choices=[0, 1, 2],
default=0, help="increase output verbosity")
parser.add_argument("--start-cv", type=int,
default=0, help="start of i_cv for range(start, end)")
parser.add_argument("--end-cv", type=int,
default=30, help="end of i_cv for range(start, end)")
parser.add_argument("--tolerance", type=float,
default=0.1, help="tolerance value for Minuit migrad and simplex minimization")
parser.add_argument('--load-run', help='load saved runs. Do not run the models',
action='store_true')
parser.add_argument('--estimate-only', help='Turns off conditional estimation for V_stat and V_syst',
action='store_true')
parser.add_argument('--conditional-only', help='Turns off common estimation',
action='store_true')
# MODEL HYPER PARAMETERS
parser.add_argument('--learning-rate', '--lr', help='learning rate',
default=1e-3, type=float)
parser.add_argument('--trade-off', help='trade-off between classic loss and adversarial loss',
default=1.0, type=float)
parser.add_argument('--beta1', help='beta 1 for Adam',
default=0.9, type=float)
parser.add_argument('--beta2', help='beta 2 for Adam',
default=0.999, type=float)
parser.add_argument('--weight-decay', help='weight decay for SGD',
default=0.0, type=float)
parser.add_argument('--optimizer', help='optimizer name', dest='optimizer_name',
default='Adam', type=str, choices=('Adam', 'SGD', 'ADAM', 'sgd', 'adam'))
parser.add_argument('--n-unit', help='Number of units in layers. Controls NN width.',
default=200, type=int)
parser.add_argument('--sample-size', help='data sample size',
default=1000, type=int)
parser.add_argument('--batch-size', help='mini-batch size',
default=1000, type=int)
parser.add_argument('--n-steps', help='number of update steps',
default=1000, type=int)
parser.add_argument('--n-net-pre-training-steps', help='number of update steps for pretraining the classifier',
default=1000, type=int)
parser.add_argument('--n-adv-pre-training-steps', help='number of update steps for pretraining the adversarial',
default=1000, type=int)
parser.add_argument('--n-recovery-steps', help='number of update steps for adversarial recovery',
default=1, type=int)
# OTHER
parser.add_argument('--no-cuda', '--no-gpu', help='flag to use or not the gpu',
action='store_false', dest='cuda')
parser.add_argument('--retrain', help='flag to force retraining',
action='store_true')
args = parser.parse_args()
return args
def FF_parse_args(main_description="Training launcher"):
parser = argparse.ArgumentParser(description=main_description)
parser.add_argument("--verbose", "-v", type=int, choices=[0, 1, 2],
default=0, help="increase output verbosity")
parser.add_argument("--start-cv", type=int,
default=0, help="start of i_cv for range(start, end)")
parser.add_argument("--end-cv", type=int,
default=30, help="end of i_cv for range(start, end)")
parser.add_argument("--tolerance", type=float,
default=0.1, help="tolerance value for Minuit migrad and simplex minimization")
parser.add_argument('--load-run', help='load saved runs. Do not run the models',
action='store_true')
parser.add_argument('--estimate-only', help='Turns off conditional estimation for V_stat and V_syst',
action='store_true')
parser.add_argument('--conditional-only', help='Turns off common estimation',
action='store_true')
# MODEL HYPER PARAMETERS
parser.add_argument('--feature-id', help='feature index to filter on',
default=0, type=int)
# OTHER
parser.add_argument('--no-cuda', '--no-gpu', help='flag to use or not the gpu',
action='store_false', dest='cuda')
parser.add_argument('--retrain', help='flag to force retraining',
action='store_true')
parser.add_argument('--skip-minuit', help='flag to skip minuit NLL minization',
action='store_true')
args = parser.parse_args()
return args
| # coding: utf-8
from __future__ import print_function
from __future__ import division
from __future__ import absolute_import
from __future__ import unicode_literals
import argparse
def parse_args_tolerance():
parser = argparse.ArgumentParser(description='just for tolerance')
parser.add_argument("--tolerance", type=float,
default=0.1, help="tolerance value for Minuit migrad and simplex minimization")
args, _ = parser.parse_known_args()
return args.tolerance
def GB_parse_args(main_description="Training launcher"):
parser = argparse.ArgumentParser(description=main_description)
parser.add_argument("--verbose", "-v", type=int, choices=[0, 1, 2],
default=0, help="increase output verbosity")
parser.add_argument("--start-cv", type=int,
default=0, help="start of i_cv for range(start, end)")
parser.add_argument("--end-cv", type=int,
default=30, help="end of i_cv for range(start, end)")
parser.add_argument("--tolerance", type=float,
default=0.1, help="tolerance value for Minuit migrad and simplex minimization")
parser.add_argument('--load-run', help='load saved runs. Do not run the models',
action='store_true')
parser.add_argument('--estimate-only', help='Turns off conditional estimation for V_stat and V_syst',
action='store_true')
parser.add_argument('--conditional-only', help='Turns off common estimation',
action='store_true')
# MODEL HYPER PARAMETERS
parser.add_argument('--n-estimators', help='number of estimators',
default=100, type=int)
parser.add_argument('--max-depth', help='maximum depth of trees',
default=3, type=int)
parser.add_argument('--learning-rate', '--lr', help='learning rate',
default=1e-1, type=float)
# OTHER
parser.add_argument('--no-cuda', '--no-gpu', help='flag to use or not the gpu',
action='store_false', dest='cuda')
parser.add_argument('--retrain', help='flag to force retraining',
action='store_true')
parser.add_argument('--skip-minuit', help='flag to skip minuit NLL minization',
action='store_true')
args = parser.parse_args()
return args
def REG_parse_args(main_description="Training launcher"):
parser = argparse.ArgumentParser(description=main_description)
parser.add_argument("--verbose", "-v", type=int, choices=[0, 1, 2],
default=0, help="increase output verbosity")
parser.add_argument("--start-cv", type=int,
default=0, help="start of i_cv for range(start, end)")
parser.add_argument("--end-cv", type=int,
default=30, help="end of i_cv for range(start, end)")
parser.add_argument("--tolerance", type=float,
default=0.1, help="tolerance value for Minuit migrad and simplex minimization")
parser.add_argument('--load-run', help='load saved runs. Do not run the models',
action='store_true')
parser.add_argument('--estimate-only', help='Turns off conditional estimation for V_stat and V_syst',
action='store_true')
parser.add_argument('--conditional-only', help='Turns off common estimation',
action='store_true')
# MODEL HYPER PARAMETERS
parser.add_argument('--learning-rate', '--lr', help='learning rate',
default=1e-4, type=float)
parser.add_argument('--beta1', help='beta 1 for Adam',
default=0.5, type=float)
parser.add_argument('--beta2', help='beta 2 for Adam',
default=0.9, type=float)
parser.add_argument('--weight-decay', help='weight decay for SGD',
default=0.0, type=float)
parser.add_argument('--optimizer', help='optimizer name', dest='optimizer_name',
default='Adam', type=str, choices=('Adam', 'SGD', 'ADAM', 'sgd', 'adam'))
parser.add_argument('--n-unit', help='Number of units in layers. Controls NN width.',
default=200, type=int)
parser.add_argument('--sample-size', help='data sample size',
default=1000, type=int)
parser.add_argument('--batch-size', help='mini-batch size',
default=20, type=int)
parser.add_argument('--n-steps', help='number of update steps',
default=1000, type=int)
# OTHER
parser.add_argument('--no-cuda', '--no-gpu', help='flag to use or not the gpu',
action='store_false', dest='cuda')
parser.add_argument('--retrain', help='flag to force retraining',
action='store_true')
args = parser.parse_args()
return args
def INFERNO_parse_args(main_description="Training launcher"):
parser = argparse.ArgumentParser(description=main_description)
parser.add_argument("--verbose", "-v", type=int, choices=[0, 1, 2],
default=0, help="increase output verbosity")
parser.add_argument("--start-cv", type=int,
default=0, help="start of i_cv for range(start, end)")
parser.add_argument("--end-cv", type=int,
default=30, help="end of i_cv for range(start, end)")
parser.add_argument("--tolerance", type=float,
default=0.1, help="tolerance value for Minuit migrad and simplex minimization")
parser.add_argument('--load-run', help='load saved runs. Do not run the models',
action='store_true')
parser.add_argument('--estimate-only', help='Turns off conditional estimation for V_stat and V_syst',
action='store_true')
parser.add_argument('--conditional-only', help='Turns off common estimation',
action='store_true')
# MODEL HYPER PARAMETERS
parser.add_argument('--learning-rate', '--lr', help='learning rate',
default=1e-3, type=float)
parser.add_argument('--temperature', help='control initial softmax steepness',
default=1.0, type=float)
parser.add_argument('--beta1', help='beta 1 for Adam',
default=0.5, type=float)
parser.add_argument('--beta2', help='beta 2 for Adam',
default=0.9, type=float)
parser.add_argument('--weight-decay', help='weight decay for SGD',
default=0.0, type=float)
parser.add_argument('--optimizer', help='optimizer name', dest='optimizer_name',
default='Adam', type=str, choices=('Adam', 'SGD', 'ADAM', 'sgd', 'adam'))
parser.add_argument('--n-unit', help='Number of units in layers. Controls NN width.',
default=200, type=int)
parser.add_argument('--n-bins', help='number of output bins',
default=10, type=int)
parser.add_argument('--sample-size', help='data sample size',
default=1000, type=int)
parser.add_argument('--batch-size', help='mini-batch size',
default=20, type=int)
parser.add_argument('--n-steps', help='number of update steps',
default=1000, type=int)
# OTHER
parser.add_argument('--no-cuda', '--no-gpu', help='flag to use or not the gpu',
action='store_false', dest='cuda')
parser.add_argument('--retrain', help='flag to force retraining',
action='store_true')
args = parser.parse_args()
return args
def NET_parse_args(main_description="Training launcher"):
parser = argparse.ArgumentParser(description=main_description)
parser.add_argument("--verbose", "-v", type=int, choices=[0, 1, 2],
default=0, help="increase output verbosity")
parser.add_argument("--start-cv", type=int,
default=0, help="start of i_cv for range(start, end)")
parser.add_argument("--end-cv", type=int,
default=30, help="end of i_cv for range(start, end)")
parser.add_argument("--tolerance", type=float,
default=0.1, help="tolerance value for Minuit migrad and simplex minimization")
parser.add_argument('--load-run', help='load saved runs. Do not run the models',
action='store_true')
parser.add_argument('--estimate-only', help='Turns off conditional estimation for V_stat and V_syst',
action='store_true')
parser.add_argument('--conditional-only', help='Turns off common estimation',
action='store_true')
# MODEL HYPER PARAMETERS
parser.add_argument('--learning-rate', '--lr', help='learning rate',
default=1e-3, type=float)
parser.add_argument('--beta1', help='beta 1 for Adam',
default=0.9, type=float)
parser.add_argument('--beta2', help='beta 2 for Adam',
default=0.999, type=float)
parser.add_argument('--weight-decay', help='weight decay for SGD',
default=0.0, type=float)
parser.add_argument('--optimizer', help='optimizer name', dest='optimizer_name',
default='Adam', type=str, choices=('Adam', 'SGD', 'ADAM', 'sgd', 'adam'))
parser.add_argument('--n-unit', help='Number of units in layers. Controls NN width.',
default=200, type=int)
parser.add_argument('--sample-size', help='data sample size',
default=1000, type=int)
parser.add_argument('--batch-size', help='mini-batch size',
default=1000, type=int)
parser.add_argument('--n-steps', help='number of update steps',
default=1000, type=int)
# OTHER
parser.add_argument('--no-cuda', '--no-gpu', help='flag to use or not the gpu',
action='store_false', dest='cuda')
parser.add_argument('--retrain', help='flag to force retraining',
action='store_true')
args = parser.parse_args()
return args
def TP_parse_args(main_description="Training launcher"):
parser = argparse.ArgumentParser(description=main_description)
parser.add_argument("--verbose", "-v", type=int, choices=[0, 1, 2],
default=0, help="increase output verbosity")
parser.add_argument("--start-cv", type=int,
default=0, help="start of i_cv for range(start, end)")
parser.add_argument("--end-cv", type=int,
default=30, help="end of i_cv for range(start, end)")
parser.add_argument("--tolerance", type=float,
default=0.1, help="tolerance value for Minuit migrad and simplex minimization")
parser.add_argument('--load-run', help='load saved runs. Do not run the models',
action='store_true')
parser.add_argument('--estimate-only', help='Turns off conditional estimation for V_stat and V_syst',
action='store_true')
parser.add_argument('--conditional-only', help='Turns off common estimation',
action='store_true')
# MODEL HYPER PARAMETERS
parser.add_argument('--learning-rate', '--lr', help='learning rate',
default=1e-3, type=float)
parser.add_argument('--trade-off', help='trade-off between classic loss and adversarial loss',
default=1.0, type=float)
parser.add_argument('--beta1', help='beta 1 for Adam',
default=0.9, type=float)
parser.add_argument('--beta2', help='beta 2 for Adam',
default=0.999, type=float)
parser.add_argument('--weight-decay', help='weight decay for SGD',
default=0.0, type=float)
parser.add_argument('--optimizer', help='optimizer name', dest='optimizer_name',
default='Adam', type=str, choices=('Adam', 'SGD', 'ADAM', 'sgd', 'adam'))
parser.add_argument('--n-unit', help='Number of units in layers. Controls NN width.',
default=200, type=int)
parser.add_argument('--sample-size', help='data sample size',
default=1000, type=int)
parser.add_argument('--batch-size', help='mini-batch size',
default=1000, type=int)
parser.add_argument('--n-steps', help='number of update steps',
default=1000, type=int)
# OTHER
parser.add_argument('--no-cuda', '--no-gpu', help='flag to use or not the gpu',
action='store_false', dest='cuda')
parser.add_argument('--retrain', help='flag to force retraining',
action='store_true')
args = parser.parse_args()
return args
def PIVOT_parse_args(main_description="Training launcher"):
parser = argparse.ArgumentParser(description=main_description)
parser.add_argument("--verbose", "-v", type=int, choices=[0, 1, 2],
default=0, help="increase output verbosity")
parser.add_argument("--start-cv", type=int,
default=0, help="start of i_cv for range(start, end)")
parser.add_argument("--end-cv", type=int,
default=30, help="end of i_cv for range(start, end)")
parser.add_argument("--tolerance", type=float,
default=0.1, help="tolerance value for Minuit migrad and simplex minimization")
parser.add_argument('--load-run', help='load saved runs. Do not run the models',
action='store_true')
parser.add_argument('--estimate-only', help='Turns off conditional estimation for V_stat and V_syst',
action='store_true')
parser.add_argument('--conditional-only', help='Turns off common estimation',
action='store_true')
# MODEL HYPER PARAMETERS
parser.add_argument('--learning-rate', '--lr', help='learning rate',
default=1e-3, type=float)
parser.add_argument('--trade-off', help='trade-off between classic loss and adversarial loss',
default=1.0, type=float)
parser.add_argument('--beta1', help='beta 1 for Adam',
default=0.9, type=float)
parser.add_argument('--beta2', help='beta 2 for Adam',
default=0.999, type=float)
parser.add_argument('--weight-decay', help='weight decay for SGD',
default=0.0, type=float)
parser.add_argument('--optimizer', help='optimizer name', dest='optimizer_name',
default='Adam', type=str, choices=('Adam', 'SGD', 'ADAM', 'sgd', 'adam'))
parser.add_argument('--n-unit', help='Number of units in layers. Controls NN width.',
default=200, type=int)
parser.add_argument('--sample-size', help='data sample size',
default=1000, type=int)
parser.add_argument('--batch-size', help='mini-batch size',
default=1000, type=int)
parser.add_argument('--n-steps', help='number of update steps',
default=1000, type=int)
parser.add_argument('--n-net-pre-training-steps', help='number of update steps for pretraining the classifier',
default=1000, type=int)
parser.add_argument('--n-adv-pre-training-steps', help='number of update steps for pretraining the adversarial',
default=1000, type=int)
parser.add_argument('--n-recovery-steps', help='number of update steps for adversarial recovery',
default=1, type=int)
# OTHER
parser.add_argument('--no-cuda', '--no-gpu', help='flag to use or not the gpu',
action='store_false', dest='cuda')
parser.add_argument('--retrain', help='flag to force retraining',
action='store_true')
args = parser.parse_args()
return args
def FF_parse_args(main_description="Training launcher"):
parser = argparse.ArgumentParser(description=main_description)
parser.add_argument("--verbose", "-v", type=int, choices=[0, 1, 2],
default=0, help="increase output verbosity")
parser.add_argument("--start-cv", type=int,
default=0, help="start of i_cv for range(start, end)")
parser.add_argument("--end-cv", type=int,
default=30, help="end of i_cv for range(start, end)")
parser.add_argument("--tolerance", type=float,
default=0.1, help="tolerance value for Minuit migrad and simplex minimization")
parser.add_argument('--load-run', help='load saved runs. Do not run the models',
action='store_true')
parser.add_argument('--estimate-only', help='Turns off conditional estimation for V_stat and V_syst',
action='store_true')
parser.add_argument('--conditional-only', help='Turns off common estimation',
action='store_true')
# MODEL HYPER PARAMETERS
parser.add_argument('--feature-id', help='feature index to filter on',
default=0, type=int)
# OTHER
parser.add_argument('--no-cuda', '--no-gpu', help='flag to use or not the gpu',
action='store_false', dest='cuda')
parser.add_argument('--retrain', help='flag to force retraining',
action='store_true')
parser.add_argument('--skip-minuit', help='flag to skip minuit NLL minization',
action='store_true')
args = parser.parse_args()
return args
| en | 0.21762 | # coding: utf-8 # MODEL HYPER PARAMETERS # OTHER # MODEL HYPER PARAMETERS # OTHER # MODEL HYPER PARAMETERS # OTHER # MODEL HYPER PARAMETERS # OTHER # MODEL HYPER PARAMETERS # OTHER # MODEL HYPER PARAMETERS # OTHER # MODEL HYPER PARAMETERS # OTHER | 2.295434 | 2 |
src/main/python/main.py | SarthakJariwala/Shockley-Queisser-Calculator | 1 | 8408 | <reponame>SarthakJariwala/Shockley-Queisser-Calculator
from fbs_runtime.application_context.PyQt5 import ApplicationContext, cached_property
from fbs_runtime.platform import is_windows, is_mac
# system imports
import sys
# module imports
from PyQt5 import uic, QtWidgets
from PyQt5.QtWidgets import QMessageBox
import numpy as np
import matplotlib.pyplot as plt
import matplotlib
import scipy.constants as constants
from scipy.integrate import simps, quad
from scipy.interpolate import splrep, splint
from scipy.optimize import fmin
class AppContext(ApplicationContext):
def run(self):
self.main_window.show()
return self.app.exec_()
def get_design(self):
qtCreatorFile = self.get_resource("SQ_GUI.ui")
return qtCreatorFile
def get_file(self):
astmg_file = self.get_resource("ASTMG173.csv")
return astmg_file
@cached_property
def main_window(self):
return MainWindow(self.get_design(), self.get_file())
if is_windows():
matplotlib.use('Qt5Agg')
elif is_mac():
matplotlib.use('macosx')
class MainWindow(QtWidgets.QMainWindow):
def __init__(self, uiFile, astmg173_file):
super(MainWindow, self).__init__()
#Create Main Window
self.ui = uic.loadUi(uiFile, self)
#self.ui = WindowTemplate()
#self.ui.setupUi(self)
#Connect PushButtons to Functions etc
self.ui.CalcualteSQ_pushButton.clicked.connect(self.calculate_SQ)
self.ui.load_pushButton.clicked.connect(self.load_SMARTS_spectrum)
self.ui.save_pushButton.clicked.connect(self.save_bandgap_array)
#start app with checked "plot j-v curve"
self.ui.plot_checkBox.setChecked(True)
self.astmg173_file = astmg173_file
self.out_array = None
self.show()
def load_SMARTS_spectrum(self):
filename = QtWidgets.QFileDialog.getOpenFileName(self)
try:
self.SMARTS = np.genfromtxt(filename[0], skip_header=1)
self.ui.load_checkBox.setChecked(False)
except Exception as e:
QMessageBox.information(
self, None,
str(e), QMessageBox.Ok
)
def calculate_SQ(self):
h = constants.physical_constants['Planck constant'][0] # units of J*s
h_ev = constants.physical_constants['Planck constant in eV s'][0]
c_nm = (constants.physical_constants['speed of light in vacuum'][0]) * 1e9
c = (constants.physical_constants['speed of light in vacuum'][0])
e_charge = constants.physical_constants['elementary charge'][0]
kb_ev = constants.physical_constants['Boltzmann constant in eV/K'][0]
"""User settings"""
Tcell = self.ui.temp_spinBox.value() #temperature of solar cell in degrees K
bandgap = self.ui.bandgap_doubleSpinBox.value() #enter bandgap in eV
#self.ui.textBrowser.append(str('Tcell = %.3f' %(Tcell)))
plot_jv = self.ui.plot_checkBox.isChecked() #'True' if you want to plot the SQ JV curve for "bandgap"
plot_bandgap_array = self.ui.calc_SQ_array_checkBox.isChecked() #'True' if you want to plot SQ parameters for an array of bandgaps
# starting from "mbandgap_array_min" to "bandgap_array_max"
# with number of points "num_points_bandgap_array"
# (see below)
#'False' if you just want SQ data for one bandgap (faster)
bandgap_array_min = self.ui.bandgap_min_doubleSpinBox.value() #in eV
bandgap_array_max = self.ui.bandgap_max_doubleSpinBox.value() # in eV
num_points_bandgap_array = self.ui.no_points_spinBox.value()
"""Programming below"""
bandgap_array = np.linspace(bandgap_array_min, bandgap_array_max, num_points_bandgap_array)
#First convert AM1.5 spectrum from W/m^2/nm to W/m^2/ev
if self.ui.load_checkBox.isChecked():
astmg173 = np.loadtxt(self.astmg173_file, delimiter = ',', skiprows = 2)
am15_wav = np.copy(astmg173[:,0]) #AM1.5 wavelength axis in nm
am15 = np.copy(astmg173[:,2]) #AM1.5 in units of W/m^2/nm = J/s*m^2/nm
else:
try:
astmg173 = self.SMARTS
am15_wav = np.copy(astmg173[:,0]) #AM1.5 wavelength axis in nm
am15 = np.copy(astmg173[:,1]) #AM1.5 in units of W/m^2/nm = J/s*m^2/nm
except:
QMessageBox.information(
self, None,
"No valid spectrum file found!\n\n"+
"Load a valid file or check the 'Use ASTMG173'box"
)
return
total_power_nm = simps(am15, x = am15_wav) #Integrate over nm to check that total power density = 1000 W/m^2
am15_ev = h_ev * (c_nm) / (am15_wav )
am15_wats_ev = am15 * (h_ev * c_nm/ ((am15_ev) ** 2.0))
am15_ev_flip = am15_ev[::-1]
am15_wats_ev_flip = am15_wats_ev[::-1]
total_power_ev = simps(am15_wats_ev_flip, x = am15_ev_flip) #Integrate over eV to check that total power density = 1000 W/m^2
am15_photons_ev = am15_wats_ev_flip / (am15_ev_flip * e_charge)
am15_photons_nm = am15 / (am15_ev * e_charge)
total_photonflux_ev = simps(am15_photons_ev, x = am15_ev_flip)
total_photonflux_nm = simps(am15_photons_nm , x = am15_wav)
total_photonflux_ev_splrep = splrep(am15_ev_flip, am15_photons_ev)
emin = am15_ev_flip[0]
emax = am15_ev_flip[len(am15_ev_flip) - 1]
def solar_photons_above_gap(Egap): #units of photons / sec *m^2
return splint(Egap, emax,total_photonflux_ev_splrep)
def RR0(Egap):
integrand = lambda eV : eV ** 2.0 / (np.exp(eV / (kb_ev * Tcell)) - 1)
integral = quad(integrand, Egap, emax, full_output=1)[0]
return ((2.0 * np.pi / ((c ** 2.0) * (h_ev ** 3.0)))) * integral
def current_density(V, Egap): #to get from units of amps / m^2 to mA/ cm^2 ---multiply by 1000 to convert to mA ---- multiply by (0.01 ^2) to convert to cm^2
cur_dens = e_charge * (solar_photons_above_gap(Egap) - RR0(Egap) * np.exp( V / (kb_ev * Tcell)))
return cur_dens * 1000 * (0.01 ** 2.0)
def JSC(Egap):
return current_density(0, Egap)
def VOC(Egap):
return (kb_ev * Tcell) * np.log(solar_photons_above_gap(Egap) / RR0(Egap))
def fmax(func_to_maximize, initial_guess=0):
"""return the x that maximizes func_to_maximize(x)"""
func_to_minimize = lambda x : -func_to_maximize(x)
return fmin(func_to_minimize, initial_guess, disp=False)[0]
def V_mpp_Jmpp_maxpower_maxeff_ff(Egap):
vmpp = fmax(lambda V : V * current_density(V, Egap))
jmpp = current_density(vmpp, Egap)
maxpower = vmpp * jmpp
max_eff = maxpower / (total_power_ev * 1000 * (0.01 ** 2.0))
jsc_return = JSC(Egap)
voc_return = VOC(Egap)
ff = maxpower / (jsc_return * voc_return)
return [vmpp, jmpp, maxpower, max_eff, ff, jsc_return, voc_return]
maxpcemeta = V_mpp_Jmpp_maxpower_maxeff_ff(bandgap)
self.ui.textBrowser.append(str('For Bandgap = %.3f eV, TCell = %.3f K:\nJSC = %.3f mA/cm^2\nVOC = %.3f V\nFF = %.3f\nPCE = %.3f' % (bandgap, Tcell, maxpcemeta[5], maxpcemeta[6],maxpcemeta[4], maxpcemeta[3] * 100)))
if plot_bandgap_array == True:
pce_array = np.empty_like(bandgap_array)
ff_array = np.empty_like(bandgap_array)
voc_array = np.empty_like(bandgap_array)
jsc_array = np.empty_like(bandgap_array)
for i in range(len(bandgap_array)):
metadata = V_mpp_Jmpp_maxpower_maxeff_ff(bandgap_array[i])
pce_array[i] = metadata[3]
ff_array[i] = metadata[4]
voc_array[i] = metadata[6]
jsc_array[i] = metadata[5]
self.out_array = np.array((bandgap_array,pce_array,ff_array, voc_array,jsc_array)).T
plt.figure(figsize=(5,4))
plt.title('Cell Temperature = %.2f K' %(Tcell))
plt.xlim(bandgap_array[0], bandgap_array[len(bandgap_array) - 1])
plt.ylabel('PCE (%)')
plt.xlabel('Bandgap (eV)')
plt.plot(bandgap_array, pce_array * 100)
plt.tight_layout()
plt.show()
plt.figure(figsize=(5,4))
plt.title('Cell Temperature = %.2f K' %(Tcell))
plt.ylim(0, 1)
plt.xlim(bandgap_array[0], bandgap_array[len(bandgap_array) - 1])
plt.ylabel('Fill Factor')
plt.xlabel('Bandgap (eV)')
plt.plot(bandgap_array, ff_array)
plt.tight_layout()
plt.show()
plt.figure(figsize=(5,4))
plt.title('Cell Temperature = %.2f K' %(Tcell))
plt.xlim(bandgap_array[0], bandgap_array[len(bandgap_array) - 1])
plt.ylabel('Jsc (mA/cm$^2$)')
plt.xlabel('Bandgap (eV)')
plt.plot(bandgap_array, jsc_array)
plt.tight_layout()
plt.show()
plt.figure(figsize=(5,4))
plt.title('Cell Temperature = %.2f K' %(Tcell))
plt.xlim(bandgap_array[0], bandgap_array[len(bandgap_array) - 1])
plt.ylabel('Voc (V)')
plt.xlabel('Bandgap (eV)')
plt.plot(bandgap_array, voc_array, label = 'S-Q Voc')
plt.plot(bandgap_array, bandgap_array, '--', label = 'Bandgap')
plt.legend(loc = 'best')
plt.tight_layout()
plt.show()
self.ui.textBrowser.append('--')
else:
self.ui.textBrowser.append('--')
def JV_curve(Egap):
volt_array = np.linspace(0, VOC(Egap), 200)
j_array = np.empty_like(volt_array)
for i in range(len(volt_array)):
j_array[i] = current_density(volt_array[i], Egap)
return [volt_array, j_array]
if plot_jv == True:
jv_meta = JV_curve(bandgap)
v_array = jv_meta[0]
jv_array = jv_meta[1]
plt.figure(figsize=(5,4))
plt.ylabel('Current Density (mA/cm$^2$)')
plt.xlabel('Voltage (V)')
plt.plot(v_array, -jv_array)
plt.title('J-V Curve for '+str(self.ui.bandgap_doubleSpinBox.value())+'eV')
plt.tight_layout()
plt.show()
self.ui.textBrowser.append('--')
else:
self.ui.textBrowser.append('--')
def save_bandgap_array(self):
if self.out_array is None:
self.ui.textBrowser.append("Calculate SQ limit before saving file!")
else:
filename = QtWidgets.QFileDialog.getSaveFileName(self)
np.savetxt(filename[0]+".txt", self.out_array, delimiter='\t', header="Bandgap, PCE, FillFactor, Voc, Jsc")
#def run():
# win = MainWindow()
# QtGui.QApplication.instance().exec_()
# return win
#run()
if __name__ == '__main__':
appctxt = AppContext() # 1. Instantiate ApplicationContext
exit_code = appctxt.run()
sys.exit(exit_code) # 2. Invoke appctxt.app.exec_() | from fbs_runtime.application_context.PyQt5 import ApplicationContext, cached_property
from fbs_runtime.platform import is_windows, is_mac
# system imports
import sys
# module imports
from PyQt5 import uic, QtWidgets
from PyQt5.QtWidgets import QMessageBox
import numpy as np
import matplotlib.pyplot as plt
import matplotlib
import scipy.constants as constants
from scipy.integrate import simps, quad
from scipy.interpolate import splrep, splint
from scipy.optimize import fmin
class AppContext(ApplicationContext):
def run(self):
self.main_window.show()
return self.app.exec_()
def get_design(self):
qtCreatorFile = self.get_resource("SQ_GUI.ui")
return qtCreatorFile
def get_file(self):
astmg_file = self.get_resource("ASTMG173.csv")
return astmg_file
@cached_property
def main_window(self):
return MainWindow(self.get_design(), self.get_file())
if is_windows():
matplotlib.use('Qt5Agg')
elif is_mac():
matplotlib.use('macosx')
class MainWindow(QtWidgets.QMainWindow):
def __init__(self, uiFile, astmg173_file):
super(MainWindow, self).__init__()
#Create Main Window
self.ui = uic.loadUi(uiFile, self)
#self.ui = WindowTemplate()
#self.ui.setupUi(self)
#Connect PushButtons to Functions etc
self.ui.CalcualteSQ_pushButton.clicked.connect(self.calculate_SQ)
self.ui.load_pushButton.clicked.connect(self.load_SMARTS_spectrum)
self.ui.save_pushButton.clicked.connect(self.save_bandgap_array)
#start app with checked "plot j-v curve"
self.ui.plot_checkBox.setChecked(True)
self.astmg173_file = astmg173_file
self.out_array = None
self.show()
def load_SMARTS_spectrum(self):
filename = QtWidgets.QFileDialog.getOpenFileName(self)
try:
self.SMARTS = np.genfromtxt(filename[0], skip_header=1)
self.ui.load_checkBox.setChecked(False)
except Exception as e:
QMessageBox.information(
self, None,
str(e), QMessageBox.Ok
)
def calculate_SQ(self):
h = constants.physical_constants['Planck constant'][0] # units of J*s
h_ev = constants.physical_constants['Planck constant in eV s'][0]
c_nm = (constants.physical_constants['speed of light in vacuum'][0]) * 1e9
c = (constants.physical_constants['speed of light in vacuum'][0])
e_charge = constants.physical_constants['elementary charge'][0]
kb_ev = constants.physical_constants['Boltzmann constant in eV/K'][0]
"""User settings"""
Tcell = self.ui.temp_spinBox.value() #temperature of solar cell in degrees K
bandgap = self.ui.bandgap_doubleSpinBox.value() #enter bandgap in eV
#self.ui.textBrowser.append(str('Tcell = %.3f' %(Tcell)))
plot_jv = self.ui.plot_checkBox.isChecked() #'True' if you want to plot the SQ JV curve for "bandgap"
plot_bandgap_array = self.ui.calc_SQ_array_checkBox.isChecked() #'True' if you want to plot SQ parameters for an array of bandgaps
# starting from "mbandgap_array_min" to "bandgap_array_max"
# with number of points "num_points_bandgap_array"
# (see below)
#'False' if you just want SQ data for one bandgap (faster)
bandgap_array_min = self.ui.bandgap_min_doubleSpinBox.value() #in eV
bandgap_array_max = self.ui.bandgap_max_doubleSpinBox.value() # in eV
num_points_bandgap_array = self.ui.no_points_spinBox.value()
"""Programming below"""
bandgap_array = np.linspace(bandgap_array_min, bandgap_array_max, num_points_bandgap_array)
#First convert AM1.5 spectrum from W/m^2/nm to W/m^2/ev
if self.ui.load_checkBox.isChecked():
astmg173 = np.loadtxt(self.astmg173_file, delimiter = ',', skiprows = 2)
am15_wav = np.copy(astmg173[:,0]) #AM1.5 wavelength axis in nm
am15 = np.copy(astmg173[:,2]) #AM1.5 in units of W/m^2/nm = J/s*m^2/nm
else:
try:
astmg173 = self.SMARTS
am15_wav = np.copy(astmg173[:,0]) #AM1.5 wavelength axis in nm
am15 = np.copy(astmg173[:,1]) #AM1.5 in units of W/m^2/nm = J/s*m^2/nm
except:
QMessageBox.information(
self, None,
"No valid spectrum file found!\n\n"+
"Load a valid file or check the 'Use ASTMG173'box"
)
return
total_power_nm = simps(am15, x = am15_wav) #Integrate over nm to check that total power density = 1000 W/m^2
am15_ev = h_ev * (c_nm) / (am15_wav )
am15_wats_ev = am15 * (h_ev * c_nm/ ((am15_ev) ** 2.0))
am15_ev_flip = am15_ev[::-1]
am15_wats_ev_flip = am15_wats_ev[::-1]
total_power_ev = simps(am15_wats_ev_flip, x = am15_ev_flip) #Integrate over eV to check that total power density = 1000 W/m^2
am15_photons_ev = am15_wats_ev_flip / (am15_ev_flip * e_charge)
am15_photons_nm = am15 / (am15_ev * e_charge)
total_photonflux_ev = simps(am15_photons_ev, x = am15_ev_flip)
total_photonflux_nm = simps(am15_photons_nm , x = am15_wav)
total_photonflux_ev_splrep = splrep(am15_ev_flip, am15_photons_ev)
emin = am15_ev_flip[0]
emax = am15_ev_flip[len(am15_ev_flip) - 1]
def solar_photons_above_gap(Egap): #units of photons / sec *m^2
return splint(Egap, emax,total_photonflux_ev_splrep)
def RR0(Egap):
integrand = lambda eV : eV ** 2.0 / (np.exp(eV / (kb_ev * Tcell)) - 1)
integral = quad(integrand, Egap, emax, full_output=1)[0]
return ((2.0 * np.pi / ((c ** 2.0) * (h_ev ** 3.0)))) * integral
def current_density(V, Egap): #to get from units of amps / m^2 to mA/ cm^2 ---multiply by 1000 to convert to mA ---- multiply by (0.01 ^2) to convert to cm^2
cur_dens = e_charge * (solar_photons_above_gap(Egap) - RR0(Egap) * np.exp( V / (kb_ev * Tcell)))
return cur_dens * 1000 * (0.01 ** 2.0)
def JSC(Egap):
return current_density(0, Egap)
def VOC(Egap):
return (kb_ev * Tcell) * np.log(solar_photons_above_gap(Egap) / RR0(Egap))
def fmax(func_to_maximize, initial_guess=0):
"""return the x that maximizes func_to_maximize(x)"""
func_to_minimize = lambda x : -func_to_maximize(x)
return fmin(func_to_minimize, initial_guess, disp=False)[0]
def V_mpp_Jmpp_maxpower_maxeff_ff(Egap):
vmpp = fmax(lambda V : V * current_density(V, Egap))
jmpp = current_density(vmpp, Egap)
maxpower = vmpp * jmpp
max_eff = maxpower / (total_power_ev * 1000 * (0.01 ** 2.0))
jsc_return = JSC(Egap)
voc_return = VOC(Egap)
ff = maxpower / (jsc_return * voc_return)
return [vmpp, jmpp, maxpower, max_eff, ff, jsc_return, voc_return]
maxpcemeta = V_mpp_Jmpp_maxpower_maxeff_ff(bandgap)
self.ui.textBrowser.append(str('For Bandgap = %.3f eV, TCell = %.3f K:\nJSC = %.3f mA/cm^2\nVOC = %.3f V\nFF = %.3f\nPCE = %.3f' % (bandgap, Tcell, maxpcemeta[5], maxpcemeta[6],maxpcemeta[4], maxpcemeta[3] * 100)))
if plot_bandgap_array == True:
pce_array = np.empty_like(bandgap_array)
ff_array = np.empty_like(bandgap_array)
voc_array = np.empty_like(bandgap_array)
jsc_array = np.empty_like(bandgap_array)
for i in range(len(bandgap_array)):
metadata = V_mpp_Jmpp_maxpower_maxeff_ff(bandgap_array[i])
pce_array[i] = metadata[3]
ff_array[i] = metadata[4]
voc_array[i] = metadata[6]
jsc_array[i] = metadata[5]
self.out_array = np.array((bandgap_array,pce_array,ff_array, voc_array,jsc_array)).T
plt.figure(figsize=(5,4))
plt.title('Cell Temperature = %.2f K' %(Tcell))
plt.xlim(bandgap_array[0], bandgap_array[len(bandgap_array) - 1])
plt.ylabel('PCE (%)')
plt.xlabel('Bandgap (eV)')
plt.plot(bandgap_array, pce_array * 100)
plt.tight_layout()
plt.show()
plt.figure(figsize=(5,4))
plt.title('Cell Temperature = %.2f K' %(Tcell))
plt.ylim(0, 1)
plt.xlim(bandgap_array[0], bandgap_array[len(bandgap_array) - 1])
plt.ylabel('Fill Factor')
plt.xlabel('Bandgap (eV)')
plt.plot(bandgap_array, ff_array)
plt.tight_layout()
plt.show()
plt.figure(figsize=(5,4))
plt.title('Cell Temperature = %.2f K' %(Tcell))
plt.xlim(bandgap_array[0], bandgap_array[len(bandgap_array) - 1])
plt.ylabel('Jsc (mA/cm$^2$)')
plt.xlabel('Bandgap (eV)')
plt.plot(bandgap_array, jsc_array)
plt.tight_layout()
plt.show()
plt.figure(figsize=(5,4))
plt.title('Cell Temperature = %.2f K' %(Tcell))
plt.xlim(bandgap_array[0], bandgap_array[len(bandgap_array) - 1])
plt.ylabel('Voc (V)')
plt.xlabel('Bandgap (eV)')
plt.plot(bandgap_array, voc_array, label = 'S-Q Voc')
plt.plot(bandgap_array, bandgap_array, '--', label = 'Bandgap')
plt.legend(loc = 'best')
plt.tight_layout()
plt.show()
self.ui.textBrowser.append('--')
else:
self.ui.textBrowser.append('--')
def JV_curve(Egap):
volt_array = np.linspace(0, VOC(Egap), 200)
j_array = np.empty_like(volt_array)
for i in range(len(volt_array)):
j_array[i] = current_density(volt_array[i], Egap)
return [volt_array, j_array]
if plot_jv == True:
jv_meta = JV_curve(bandgap)
v_array = jv_meta[0]
jv_array = jv_meta[1]
plt.figure(figsize=(5,4))
plt.ylabel('Current Density (mA/cm$^2$)')
plt.xlabel('Voltage (V)')
plt.plot(v_array, -jv_array)
plt.title('J-V Curve for '+str(self.ui.bandgap_doubleSpinBox.value())+'eV')
plt.tight_layout()
plt.show()
self.ui.textBrowser.append('--')
else:
self.ui.textBrowser.append('--')
def save_bandgap_array(self):
if self.out_array is None:
self.ui.textBrowser.append("Calculate SQ limit before saving file!")
else:
filename = QtWidgets.QFileDialog.getSaveFileName(self)
np.savetxt(filename[0]+".txt", self.out_array, delimiter='\t', header="Bandgap, PCE, FillFactor, Voc, Jsc")
#def run():
# win = MainWindow()
# QtGui.QApplication.instance().exec_()
# return win
#run()
if __name__ == '__main__':
appctxt = AppContext() # 1. Instantiate ApplicationContext
exit_code = appctxt.run()
sys.exit(exit_code) # 2. Invoke appctxt.app.exec_() | en | 0.618388 | # system imports # module imports #Create Main Window #self.ui = WindowTemplate() #self.ui.setupUi(self) #Connect PushButtons to Functions etc #start app with checked "plot j-v curve" # units of J*s User settings #temperature of solar cell in degrees K #enter bandgap in eV #self.ui.textBrowser.append(str('Tcell = %.3f' %(Tcell))) #'True' if you want to plot the SQ JV curve for "bandgap" #'True' if you want to plot SQ parameters for an array of bandgaps # starting from "mbandgap_array_min" to "bandgap_array_max" # with number of points "num_points_bandgap_array" # (see below) #'False' if you just want SQ data for one bandgap (faster) #in eV # in eV Programming below #First convert AM1.5 spectrum from W/m^2/nm to W/m^2/ev #AM1.5 wavelength axis in nm #AM1.5 in units of W/m^2/nm = J/s*m^2/nm #AM1.5 wavelength axis in nm #AM1.5 in units of W/m^2/nm = J/s*m^2/nm #Integrate over nm to check that total power density = 1000 W/m^2 #Integrate over eV to check that total power density = 1000 W/m^2 #units of photons / sec *m^2 #to get from units of amps / m^2 to mA/ cm^2 ---multiply by 1000 to convert to mA ---- multiply by (0.01 ^2) to convert to cm^2 return the x that maximizes func_to_maximize(x) #def run(): # win = MainWindow() # QtGui.QApplication.instance().exec_() # return win #run() # 1. Instantiate ApplicationContext # 2. Invoke appctxt.app.exec_() | 2.023057 | 2 |
helpus/core.py | tov101/HelpUs | 0 | 8409 | <filename>helpus/core.py
import io
import logging
import os
import sys
from PyQt5 import QtGui, QtCore, QtWidgets
from helpus import icon_file_path
from helpus import __version__
LOGGER = logging.getLogger('HelpUs')
LOGGER.setLevel(logging.DEBUG)
class XStream(QtCore.QObject):
_stdout = None
_stderr = None
messageWritten = QtCore.pyqtSignal(str)
@staticmethod
def flush():
pass
@staticmethod
def fileno():
return -1
def write(self, msg):
if not self.signalsBlocked():
self.messageWritten.emit(msg)
@staticmethod
def stdout():
if not XStream._stdout:
XStream._stdout = XStream()
sys.stdout = XStream._stdout
return XStream._stdout
@staticmethod
def stderr():
if not XStream._stderr:
XStream._stderr = XStream()
sys.stderr = XStream._stderr
return XStream._stderr
class MyBreakPoint(QtWidgets.QDialog):
_stdout = None
_stderr = None
messageWritten = QtCore.pyqtSignal(str)
HOOK_HEADER = '(Pdb) '
HOOK_INTERACT = '>>> '
HOOK_LINE_BREAK = '... '
HOOKS = [HOOK_HEADER, HOOK_INTERACT]
BUTTONS = [
'Continue',
'Next',
'Step',
'Where',
'Up',
'Down'
]
def __init__(self, parent=None):
super().__init__()
if not parent:
self.parentWidget = QtWidgets.QMainWindow()
else:
self.parentWidget = parent
# Change Window Modality, otherwise parentWidget won't let you use this widget
if self.parentWidget.windowModality() == QtCore.Qt.WindowModality.ApplicationModal:
self.parentWidget.hide()
self.parentWidget.setWindowModality(QtCore.Qt.WindowModality.NonModal)
self.parentWidget.showNormal()
# Set Icon
if icon_file_path and os.path.exists(icon_file_path):
self.setWindowIcon(QtGui.QIcon(icon_file_path))
# Set Flags
self.setWindowFlags(
QtCore.Qt.WindowSystemMenuHint |
QtCore.Qt.WindowTitleHint |
QtCore.Qt.WindowCloseButtonHint
)
# Resize
self.resize(513, 300)
# Create Layout
self.main_layout = QtWidgets.QHBoxLayout()
self.setLayout(self.main_layout)
self.setWindowTitle("HelpUs {}".format(__version__))
# Create Content Layouts
self.ConsoleLayout = QtWidgets.QVBoxLayout()
self.ButtonsLayout = QtWidgets.QVBoxLayout()
self.main_layout.addLayout(self.ButtonsLayout)
self.main_layout.addLayout(self.ConsoleLayout)
# Create OutputConsole
self.console = QtWidgets.QTextEdit(parent)
self.console.insertPlainText = self.__insert_plain_text
self.console.keyPressEvent = self.__key_press_event
self.ConsoleLayout.addWidget(self.console)
# Create buttons
for button_text in self.BUTTONS:
# Create Button Name
button_name = 'button_%s' % button_text.lower()
setattr(self, button_name, QtWidgets.QPushButton(button_text))
getattr(self, button_name).clicked.connect(self.__push_button)
# Add Button to Widget
self.ButtonsLayout.addWidget(getattr(self, button_name))
# Init Buffer
self.buffer = io.StringIO()
self.__set_enable_gui(False)
self.showNormal()
def __set_enable_gui(self, state=True):
"""
:param state:
:return:
"""
self.console.setEnabled(state)
for button_text in self.BUTTONS:
# Get Button Name
button_name = 'button_%s' % button_text.lower()
getattr(self, button_name).setEnabled(state)
if state:
self.console.setFocus()
def redirect_outerr_stream(self):
"""
:return:
"""
# Link Stream Output
XStream.stdout().messageWritten.connect(self.console.insertPlainText)
XStream.stderr().messageWritten.connect(self.console.insertPlainText)
def readline(self):
"""
:return:
"""
if not self.console.isEnabled():
self.__set_enable_gui(True)
# Reset Buffer
self.__reset_buffer()
# Check Position
while self.buffer.tell() == 0:
QtCore.QCoreApplication.processEvents()
value = self.buffer.getvalue()
return value
def __key_press_event(self, event):
"""
:param event:
:return:
"""
# Get Last Line
document = self.console.document()
line_index = document.lineCount()
raw_last_line = document.findBlockByLineNumber(line_index - 1).text()
text = ''
current_hook = ''
# Exclude first 6 chars: (Pdb)\s
if raw_last_line:
for hook in self.HOOKS:
if raw_last_line.startswith(hook):
current_hook = hook
text = raw_last_line[len(hook):]
break
else:
text = raw_last_line
# Get Cursor position
line_from_zero = line_index - 1
current_cursor_line = self.console.textCursor().blockNumber()
current_cursor_column = self.console.textCursor().columnNumber()
# If Enter was pressed -> Process Expression
if event.key() == QtCore.Qt.Key.Key_Return and text:
# Consider Custom Clear Screen Command
if text == 'cls':
self.__clear_screen(raw_last_line)
return
# Replace Line Break with Enter
if self.HOOK_LINE_BREAK == text:
text = '\r\n'
elif self.HOOK_LINE_BREAK in text:
# Replace Line Break with tab
text = text.replace(self.HOOK_LINE_BREAK, '\t')
current_hook = self.HOOK_LINE_BREAK
self.__reset_buffer()
self.buffer.write(text)
self.__set_enable_gui(False)
# If User want to delete something and there is no value in buffer -> Reject
if event.key() == QtCore.Qt.Key.Key_Backspace or event.key() == QtCore.Qt.Key.Key_Delete:
if current_cursor_line != line_from_zero or current_cursor_column <= len(current_hook):
return
if event.key() == QtCore.Qt.Key.Key_Home and current_cursor_line == line_from_zero:
if text:
temp_cursor = self.console.textCursor()
temp_cursor.movePosition(
QtGui.QTextCursor.MoveOperation.StartOfLine,
QtGui.QTextCursor.MoveMode.MoveAnchor
)
temp_cursor.movePosition(
QtGui.QTextCursor.MoveOperation.Right,
QtGui.QTextCursor.MoveMode.MoveAnchor,
len(current_hook)
)
self.console.setTextCursor(temp_cursor)
return
# Set Console Text to Black
self.console.setTextColor(QtCore.Qt.GlobalColor.black)
# Execute default method
QtWidgets.QTextEdit.keyPressEvent(self.console, event)
def __push_button(self):
# Read text from Button and use it as pdb keyword
button_scope = self.sender().text().lower()
self.__reset_buffer()
self.buffer.write(button_scope)
self.__set_enable_gui(False)
def __reset_buffer(self):
if isinstance(self.buffer, io.StringIO):
# Clear Buffer
self.buffer.truncate(0)
self.buffer.seek(0)
else:
self.buffer = io.StringIO()
def __insert_plain_text(self, message):
# Do some stylistics
if message.startswith(self.HOOK_HEADER):
self.console.setTextColor(QtCore.Qt.GlobalColor.magenta)
QtWidgets.QTextEdit.insertPlainText(self.console, message)
return
elif message.startswith(self.HOOK_INTERACT):
self.console.setTextColor(QtCore.Qt.GlobalColor.darkMagenta)
QtWidgets.QTextEdit.insertPlainText(self.console, message)
return
if message.startswith('***'):
self.console.setTextColor(QtCore.Qt.GlobalColor.red)
QtWidgets.QTextEdit.insertPlainText(self.console, message)
# AutoScroll
self.console.verticalScrollBar().setValue(self.console.verticalScrollBar().maximum())
def __clear_screen(self, text):
current_hook = text
for hook in self.HOOKS:
if hook in current_hook:
current_hook = hook
break
self.console.clear()
self.console.insertPlainText(current_hook)
def get_qtconsole_object():
if isinstance(sys.stdin, MyBreakPoint):
return sys.stdin.console
else:
return MyBreakPoint.console
def setup_breakpoint_hook(parent, method, redirect_streams=False):
def __method(*args, **kwargs):
breakpoint()
return method(*args, **kwargs)
if not isinstance(sys.stdin, MyBreakPoint):
sys.stdin = MyBreakPoint(parent)
else:
# Restore Streams
sys.stdin = sys.__stdin__
sys.stdout = sys.__stdout__
sys.stderr = sys.__stderr__
raise Exception(
"Multiple Instances are not allowed. Can be possible, but I'm to lazy to go deep with development."
)
if redirect_streams:
sys.stdin.redirect_outerr_stream()
return __method
if __name__ == '__main__':
p = QtWidgets.QApplication(sys.argv)
LOGGER.error('Ceva')
LOGGER.error = setup_breakpoint_hook(None, LOGGER.error, redirect_streams=True)
# LOGGER.error = setup_breakpoint_hook(None, LOGGER.error, redirect_streams=True)
x = 90
LOGGER.error('Altceva')
print(x)
| <filename>helpus/core.py
import io
import logging
import os
import sys
from PyQt5 import QtGui, QtCore, QtWidgets
from helpus import icon_file_path
from helpus import __version__
LOGGER = logging.getLogger('HelpUs')
LOGGER.setLevel(logging.DEBUG)
class XStream(QtCore.QObject):
_stdout = None
_stderr = None
messageWritten = QtCore.pyqtSignal(str)
@staticmethod
def flush():
pass
@staticmethod
def fileno():
return -1
def write(self, msg):
if not self.signalsBlocked():
self.messageWritten.emit(msg)
@staticmethod
def stdout():
if not XStream._stdout:
XStream._stdout = XStream()
sys.stdout = XStream._stdout
return XStream._stdout
@staticmethod
def stderr():
if not XStream._stderr:
XStream._stderr = XStream()
sys.stderr = XStream._stderr
return XStream._stderr
class MyBreakPoint(QtWidgets.QDialog):
_stdout = None
_stderr = None
messageWritten = QtCore.pyqtSignal(str)
HOOK_HEADER = '(Pdb) '
HOOK_INTERACT = '>>> '
HOOK_LINE_BREAK = '... '
HOOKS = [HOOK_HEADER, HOOK_INTERACT]
BUTTONS = [
'Continue',
'Next',
'Step',
'Where',
'Up',
'Down'
]
def __init__(self, parent=None):
super().__init__()
if not parent:
self.parentWidget = QtWidgets.QMainWindow()
else:
self.parentWidget = parent
# Change Window Modality, otherwise parentWidget won't let you use this widget
if self.parentWidget.windowModality() == QtCore.Qt.WindowModality.ApplicationModal:
self.parentWidget.hide()
self.parentWidget.setWindowModality(QtCore.Qt.WindowModality.NonModal)
self.parentWidget.showNormal()
# Set Icon
if icon_file_path and os.path.exists(icon_file_path):
self.setWindowIcon(QtGui.QIcon(icon_file_path))
# Set Flags
self.setWindowFlags(
QtCore.Qt.WindowSystemMenuHint |
QtCore.Qt.WindowTitleHint |
QtCore.Qt.WindowCloseButtonHint
)
# Resize
self.resize(513, 300)
# Create Layout
self.main_layout = QtWidgets.QHBoxLayout()
self.setLayout(self.main_layout)
self.setWindowTitle("HelpUs {}".format(__version__))
# Create Content Layouts
self.ConsoleLayout = QtWidgets.QVBoxLayout()
self.ButtonsLayout = QtWidgets.QVBoxLayout()
self.main_layout.addLayout(self.ButtonsLayout)
self.main_layout.addLayout(self.ConsoleLayout)
# Create OutputConsole
self.console = QtWidgets.QTextEdit(parent)
self.console.insertPlainText = self.__insert_plain_text
self.console.keyPressEvent = self.__key_press_event
self.ConsoleLayout.addWidget(self.console)
# Create buttons
for button_text in self.BUTTONS:
# Create Button Name
button_name = 'button_%s' % button_text.lower()
setattr(self, button_name, QtWidgets.QPushButton(button_text))
getattr(self, button_name).clicked.connect(self.__push_button)
# Add Button to Widget
self.ButtonsLayout.addWidget(getattr(self, button_name))
# Init Buffer
self.buffer = io.StringIO()
self.__set_enable_gui(False)
self.showNormal()
def __set_enable_gui(self, state=True):
"""
:param state:
:return:
"""
self.console.setEnabled(state)
for button_text in self.BUTTONS:
# Get Button Name
button_name = 'button_%s' % button_text.lower()
getattr(self, button_name).setEnabled(state)
if state:
self.console.setFocus()
def redirect_outerr_stream(self):
"""
:return:
"""
# Link Stream Output
XStream.stdout().messageWritten.connect(self.console.insertPlainText)
XStream.stderr().messageWritten.connect(self.console.insertPlainText)
def readline(self):
"""
:return:
"""
if not self.console.isEnabled():
self.__set_enable_gui(True)
# Reset Buffer
self.__reset_buffer()
# Check Position
while self.buffer.tell() == 0:
QtCore.QCoreApplication.processEvents()
value = self.buffer.getvalue()
return value
def __key_press_event(self, event):
"""
:param event:
:return:
"""
# Get Last Line
document = self.console.document()
line_index = document.lineCount()
raw_last_line = document.findBlockByLineNumber(line_index - 1).text()
text = ''
current_hook = ''
# Exclude first 6 chars: (Pdb)\s
if raw_last_line:
for hook in self.HOOKS:
if raw_last_line.startswith(hook):
current_hook = hook
text = raw_last_line[len(hook):]
break
else:
text = raw_last_line
# Get Cursor position
line_from_zero = line_index - 1
current_cursor_line = self.console.textCursor().blockNumber()
current_cursor_column = self.console.textCursor().columnNumber()
# If Enter was pressed -> Process Expression
if event.key() == QtCore.Qt.Key.Key_Return and text:
# Consider Custom Clear Screen Command
if text == 'cls':
self.__clear_screen(raw_last_line)
return
# Replace Line Break with Enter
if self.HOOK_LINE_BREAK == text:
text = '\r\n'
elif self.HOOK_LINE_BREAK in text:
# Replace Line Break with tab
text = text.replace(self.HOOK_LINE_BREAK, '\t')
current_hook = self.HOOK_LINE_BREAK
self.__reset_buffer()
self.buffer.write(text)
self.__set_enable_gui(False)
# If User want to delete something and there is no value in buffer -> Reject
if event.key() == QtCore.Qt.Key.Key_Backspace or event.key() == QtCore.Qt.Key.Key_Delete:
if current_cursor_line != line_from_zero or current_cursor_column <= len(current_hook):
return
if event.key() == QtCore.Qt.Key.Key_Home and current_cursor_line == line_from_zero:
if text:
temp_cursor = self.console.textCursor()
temp_cursor.movePosition(
QtGui.QTextCursor.MoveOperation.StartOfLine,
QtGui.QTextCursor.MoveMode.MoveAnchor
)
temp_cursor.movePosition(
QtGui.QTextCursor.MoveOperation.Right,
QtGui.QTextCursor.MoveMode.MoveAnchor,
len(current_hook)
)
self.console.setTextCursor(temp_cursor)
return
# Set Console Text to Black
self.console.setTextColor(QtCore.Qt.GlobalColor.black)
# Execute default method
QtWidgets.QTextEdit.keyPressEvent(self.console, event)
def __push_button(self):
# Read text from Button and use it as pdb keyword
button_scope = self.sender().text().lower()
self.__reset_buffer()
self.buffer.write(button_scope)
self.__set_enable_gui(False)
def __reset_buffer(self):
if isinstance(self.buffer, io.StringIO):
# Clear Buffer
self.buffer.truncate(0)
self.buffer.seek(0)
else:
self.buffer = io.StringIO()
def __insert_plain_text(self, message):
# Do some stylistics
if message.startswith(self.HOOK_HEADER):
self.console.setTextColor(QtCore.Qt.GlobalColor.magenta)
QtWidgets.QTextEdit.insertPlainText(self.console, message)
return
elif message.startswith(self.HOOK_INTERACT):
self.console.setTextColor(QtCore.Qt.GlobalColor.darkMagenta)
QtWidgets.QTextEdit.insertPlainText(self.console, message)
return
if message.startswith('***'):
self.console.setTextColor(QtCore.Qt.GlobalColor.red)
QtWidgets.QTextEdit.insertPlainText(self.console, message)
# AutoScroll
self.console.verticalScrollBar().setValue(self.console.verticalScrollBar().maximum())
def __clear_screen(self, text):
current_hook = text
for hook in self.HOOKS:
if hook in current_hook:
current_hook = hook
break
self.console.clear()
self.console.insertPlainText(current_hook)
def get_qtconsole_object():
if isinstance(sys.stdin, MyBreakPoint):
return sys.stdin.console
else:
return MyBreakPoint.console
def setup_breakpoint_hook(parent, method, redirect_streams=False):
def __method(*args, **kwargs):
breakpoint()
return method(*args, **kwargs)
if not isinstance(sys.stdin, MyBreakPoint):
sys.stdin = MyBreakPoint(parent)
else:
# Restore Streams
sys.stdin = sys.__stdin__
sys.stdout = sys.__stdout__
sys.stderr = sys.__stderr__
raise Exception(
"Multiple Instances are not allowed. Can be possible, but I'm to lazy to go deep with development."
)
if redirect_streams:
sys.stdin.redirect_outerr_stream()
return __method
if __name__ == '__main__':
p = QtWidgets.QApplication(sys.argv)
LOGGER.error('Ceva')
LOGGER.error = setup_breakpoint_hook(None, LOGGER.error, redirect_streams=True)
# LOGGER.error = setup_breakpoint_hook(None, LOGGER.error, redirect_streams=True)
x = 90
LOGGER.error('Altceva')
print(x)
| en | 0.739553 | # Change Window Modality, otherwise parentWidget won't let you use this widget # Set Icon # Set Flags # Resize # Create Layout # Create Content Layouts # Create OutputConsole # Create buttons # Create Button Name # Add Button to Widget # Init Buffer :param state: :return: # Get Button Name :return: # Link Stream Output :return: # Reset Buffer # Check Position :param event: :return: # Get Last Line # Exclude first 6 chars: (Pdb)\s # Get Cursor position # If Enter was pressed -> Process Expression # Consider Custom Clear Screen Command # Replace Line Break with Enter # Replace Line Break with tab # If User want to delete something and there is no value in buffer -> Reject # Set Console Text to Black # Execute default method # Read text from Button and use it as pdb keyword # Clear Buffer # Do some stylistics # AutoScroll # Restore Streams # LOGGER.error = setup_breakpoint_hook(None, LOGGER.error, redirect_streams=True) | 2.116746 | 2 |
biothings/hub/dataindex/indexer_schedule.py | newgene/biothings.api | 30 | 8410 | import math
class Schedule():
def __init__(self, total, batch_size):
self._batch_size = batch_size
self._state = ""
self.total = total
self.scheduled = 0
self.finished = 0
@property
def _batch(self):
return math.ceil(self.scheduled / self._batch_size)
@property
def _batches(self):
return math.ceil(self.total / self._batch_size)
@property
def _percentage(self):
_percentage = self.scheduled / self.total * 100
return "%.1f%%" % _percentage
def suffix(self, string):
return " ".join((
string,
"#%d/%d %s" %
(
self._batch,
self._batches,
self._percentage
)
))
def completed(self):
if self.finished != self.total:
raise ValueError(self.finished, self.total)
def __iter__(self):
return self
def __next__(self):
if self.scheduled >= self.total:
self._state = "pending, waiting for completion,"
raise StopIteration()
self.scheduled += self._batch_size
if self.scheduled > self.total:
self.scheduled = self.total
self._state = self.suffix("running, on batch") + ","
return self._batch
def __str__(self):
return " ".join(f"""
<Schedule {"done" if self.finished >= self.total else self._state}
total={self.total} scheduled={self.scheduled} finished={self.finished}>
""".split())
def test_01():
schedule = Schedule(100, 10)
for batch in schedule:
print(batch)
print(schedule)
def test_02():
schedule = Schedule(25, 10)
for batch in schedule:
print(batch)
print(schedule)
print(schedule.suffix("Task"))
def test_03():
schedule = Schedule(0, 10)
for batch in schedule:
print(batch)
print(schedule)
print(schedule.suffix("Task"))
def test_04():
schedule = Schedule(1, 10)
for batch in schedule:
print(batch)
print(schedule)
print(schedule.suffix("Task"))
if __name__ == "__main__":
test_02()
| import math
class Schedule():
def __init__(self, total, batch_size):
self._batch_size = batch_size
self._state = ""
self.total = total
self.scheduled = 0
self.finished = 0
@property
def _batch(self):
return math.ceil(self.scheduled / self._batch_size)
@property
def _batches(self):
return math.ceil(self.total / self._batch_size)
@property
def _percentage(self):
_percentage = self.scheduled / self.total * 100
return "%.1f%%" % _percentage
def suffix(self, string):
return " ".join((
string,
"#%d/%d %s" %
(
self._batch,
self._batches,
self._percentage
)
))
def completed(self):
if self.finished != self.total:
raise ValueError(self.finished, self.total)
def __iter__(self):
return self
def __next__(self):
if self.scheduled >= self.total:
self._state = "pending, waiting for completion,"
raise StopIteration()
self.scheduled += self._batch_size
if self.scheduled > self.total:
self.scheduled = self.total
self._state = self.suffix("running, on batch") + ","
return self._batch
def __str__(self):
return " ".join(f"""
<Schedule {"done" if self.finished >= self.total else self._state}
total={self.total} scheduled={self.scheduled} finished={self.finished}>
""".split())
def test_01():
schedule = Schedule(100, 10)
for batch in schedule:
print(batch)
print(schedule)
def test_02():
schedule = Schedule(25, 10)
for batch in schedule:
print(batch)
print(schedule)
print(schedule.suffix("Task"))
def test_03():
schedule = Schedule(0, 10)
for batch in schedule:
print(batch)
print(schedule)
print(schedule.suffix("Task"))
def test_04():
schedule = Schedule(1, 10)
for batch in schedule:
print(batch)
print(schedule)
print(schedule.suffix("Task"))
if __name__ == "__main__":
test_02()
| en | 0.327973 | <Schedule {"done" if self.finished >= self.total else self._state} total={self.total} scheduled={self.scheduled} finished={self.finished}> | 3.510472 | 4 |
examples/py/async-basic.py | voBits/ccxt | 73 | 8411 | <filename>examples/py/async-basic.py
# -*- coding: utf-8 -*-
import asyncio
import os
import sys
root = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
sys.path.append(root + '/python')
import ccxt.async as ccxt # noqa: E402
async def test_gdax():
gdax = ccxt.gdax()
markets = await gdax.load_markets()
await gdax.close()
return markets
if __name__ == '__main__':
print(asyncio.get_event_loop().run_until_complete(test_gdax()))
| <filename>examples/py/async-basic.py
# -*- coding: utf-8 -*-
import asyncio
import os
import sys
root = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
sys.path.append(root + '/python')
import ccxt.async as ccxt # noqa: E402
async def test_gdax():
gdax = ccxt.gdax()
markets = await gdax.load_markets()
await gdax.close()
return markets
if __name__ == '__main__':
print(asyncio.get_event_loop().run_until_complete(test_gdax()))
| en | 0.610126 | # -*- coding: utf-8 -*- # noqa: E402 | 2.890068 | 3 |
pymclevel/test/__init__.py | bennettdc/MCEdit-Unified | 673 | 8412 | __author__ = 'Rio'
| __author__ = 'Rio'
| none | 1 | 1.004827 | 1 |
|
xview/datasets/wrapper.py | ethz-asl/modular_semantic_segmentation | 20 | 8413 | from abc import ABCMeta, abstractmethod
class DataWrapper:
"""Interface for access to datasets."""
__metaclass__ = ABCMeta
@abstractmethod
def next(self):
"""Returns next minibatch for training."""
return NotImplementedError
| from abc import ABCMeta, abstractmethod
class DataWrapper:
"""Interface for access to datasets."""
__metaclass__ = ABCMeta
@abstractmethod
def next(self):
"""Returns next minibatch for training."""
return NotImplementedError
| en | 0.819686 | Interface for access to datasets. Returns next minibatch for training. | 3.078398 | 3 |
partd/core.py | jrbourbeau/partd | 2 | 8414 | <gh_stars>1-10
from __future__ import absolute_import
import os
import shutil
import locket
import string
from toolz import memoize
from contextlib import contextmanager
from .utils import nested_get, flatten
# http://stackoverflow.com/questions/295135/turn-a-string-into-a-valid-filename-in-python
valid_chars = "-_.() " + string.ascii_letters + string.digits + os.path.sep
def escape_filename(fn):
""" Escape text so that it is a valid filename
>>> escape_filename('Foo!bar?')
'Foobar'
"""
return ''.join(filter(valid_chars.__contains__, fn))
def filename(path, key):
return os.path.join(path, escape_filename(token(key)))
def token(key):
"""
>>> token('hello')
'hello'
>>> token(('hello', 'world')) # doctest: +SKIP
'hello/world'
"""
if isinstance(key, str):
return key
elif isinstance(key, tuple):
return os.path.join(*map(token, key))
else:
return str(key)
class Interface(object):
def __init__(self):
self._iset_seen = set()
def __setstate__(self, state):
self.__dict__.update(state)
self._iset_seen = set()
def iset(self, key, value, **kwargs):
if key in self._iset_seen:
return
else:
self._iset(key, value, **kwargs)
self._iset_seen.add(key)
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
self.drop()
def iget(self, key):
return self._get([key], lock=False)[0]
def get(self, keys, **kwargs):
if not isinstance(keys, list):
return self.get([keys], **kwargs)[0]
elif any(isinstance(key, list) for key in keys): # nested case
flatkeys = list(flatten(keys))
result = self.get(flatkeys, **kwargs)
return nested_get(keys, dict(zip(flatkeys, result)))
else:
return self._get(keys, **kwargs)
def delete(self, keys, **kwargs):
if not isinstance(keys, list):
return self._delete([keys], **kwargs)
else:
return self._delete(keys, **kwargs)
def pop(self, keys, **kwargs):
with self.partd.lock:
result = self.partd.get(keys, lock=False)
self.partd.delete(keys, lock=False)
return result
| from __future__ import absolute_import
import os
import shutil
import locket
import string
from toolz import memoize
from contextlib import contextmanager
from .utils import nested_get, flatten
# http://stackoverflow.com/questions/295135/turn-a-string-into-a-valid-filename-in-python
valid_chars = "-_.() " + string.ascii_letters + string.digits + os.path.sep
def escape_filename(fn):
""" Escape text so that it is a valid filename
>>> escape_filename('Foo!bar?')
'Foobar'
"""
return ''.join(filter(valid_chars.__contains__, fn))
def filename(path, key):
return os.path.join(path, escape_filename(token(key)))
def token(key):
"""
>>> token('hello')
'hello'
>>> token(('hello', 'world')) # doctest: +SKIP
'hello/world'
"""
if isinstance(key, str):
return key
elif isinstance(key, tuple):
return os.path.join(*map(token, key))
else:
return str(key)
class Interface(object):
def __init__(self):
self._iset_seen = set()
def __setstate__(self, state):
self.__dict__.update(state)
self._iset_seen = set()
def iset(self, key, value, **kwargs):
if key in self._iset_seen:
return
else:
self._iset(key, value, **kwargs)
self._iset_seen.add(key)
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
self.drop()
def iget(self, key):
return self._get([key], lock=False)[0]
def get(self, keys, **kwargs):
if not isinstance(keys, list):
return self.get([keys], **kwargs)[0]
elif any(isinstance(key, list) for key in keys): # nested case
flatkeys = list(flatten(keys))
result = self.get(flatkeys, **kwargs)
return nested_get(keys, dict(zip(flatkeys, result)))
else:
return self._get(keys, **kwargs)
def delete(self, keys, **kwargs):
if not isinstance(keys, list):
return self._delete([keys], **kwargs)
else:
return self._delete(keys, **kwargs)
def pop(self, keys, **kwargs):
with self.partd.lock:
result = self.partd.get(keys, lock=False)
self.partd.delete(keys, lock=False)
return result | en | 0.45349 | # http://stackoverflow.com/questions/295135/turn-a-string-into-a-valid-filename-in-python Escape text so that it is a valid filename >>> escape_filename('Foo!bar?') 'Foobar' >>> token('hello') 'hello' >>> token(('hello', 'world')) # doctest: +SKIP 'hello/world' # nested case | 2.478116 | 2 |
pretraining/model_ensemble.py | VITA-Group/Adv-SS-Pretraining | 32 | 8415 | <filename>pretraining/model_ensemble.py
'''
model ensemble for cifar10 // input size(32,32)
'''
import torch
import torchvision
import copy
import torch.nn as nn
from resnetv2 import ResNet50 as resnet50v2
def split_resnet50(model):
return nn.Sequential(
model.conv1,
model.layer1,
model.layer2,
model.layer3
)
class PretrainEnsembleModel(nn.Module):
def __init__(self):
super(PretrainEnsembleModel, self).__init__()
self.blocks = split_resnet50(resnet50v2())
self.layer4_rotation = resnet50v2().layer4
self.layer4_jigsaw = resnet50v2().layer4
self.fc_rotation = nn.Linear(2048, 4)
self.fc_jigsaw = nn.Linear(2048, 31)
self.avgpool1 = nn.AdaptiveAvgPool2d((1,1))
self.avgpool2 = nn.AdaptiveAvgPool2d((1,1))
self.avgpool3 = nn.AdaptiveAvgPool2d((1,1))
def _Normal(self,x):
mean=torch.Tensor([0.485, 0.456, 0.406])
mean=mean[None,:,None,None].cuda()
std = torch.Tensor([0.229, 0.224, 0.225])
std = std[None,:,None,None].cuda()
return x.sub(mean).div(std)
def forward(self, x):
feature_map = self.blocks(self._Normal(x))
return feature_map
| <filename>pretraining/model_ensemble.py
'''
model ensemble for cifar10 // input size(32,32)
'''
import torch
import torchvision
import copy
import torch.nn as nn
from resnetv2 import ResNet50 as resnet50v2
def split_resnet50(model):
return nn.Sequential(
model.conv1,
model.layer1,
model.layer2,
model.layer3
)
class PretrainEnsembleModel(nn.Module):
def __init__(self):
super(PretrainEnsembleModel, self).__init__()
self.blocks = split_resnet50(resnet50v2())
self.layer4_rotation = resnet50v2().layer4
self.layer4_jigsaw = resnet50v2().layer4
self.fc_rotation = nn.Linear(2048, 4)
self.fc_jigsaw = nn.Linear(2048, 31)
self.avgpool1 = nn.AdaptiveAvgPool2d((1,1))
self.avgpool2 = nn.AdaptiveAvgPool2d((1,1))
self.avgpool3 = nn.AdaptiveAvgPool2d((1,1))
def _Normal(self,x):
mean=torch.Tensor([0.485, 0.456, 0.406])
mean=mean[None,:,None,None].cuda()
std = torch.Tensor([0.229, 0.224, 0.225])
std = std[None,:,None,None].cuda()
return x.sub(mean).div(std)
def forward(self, x):
feature_map = self.blocks(self._Normal(x))
return feature_map
| en | 0.375557 | model ensemble for cifar10 // input size(32,32) | 2.601841 | 3 |
scripts/ccdf.py | glciampaglia/HoaxyBots | 0 | 8416 | # -*- coding: utf-8 -*-
""" Function that implement Complement the Complementary Cumulative
Distribution Function (CCDF).
"""
#
# written by <NAME> <<EMAIL>>
import numpy as np
import pandas as pd
def ccdf(s):
"""
Parameters:
`s`, series, the values of s should be variable to be handled
Return:
a new series `s`, index of s will be X axis (number), value of s
will be Y axis (probability)
"""
s = s.copy()
s = s.sort_values(ascending=True, inplace=False)
s.reset_index(drop=True, inplace=True)
n = len(s)
s.drop_duplicates(keep='first', inplace=True)
X = s.values
Y = [n - i for i in s.index]
return pd.Series(data=Y, index=X) / n
def sum_cdf(s):
s = s.copy()
s = s.value_counts()
s = s.sort_index(ascending=True)
cumulative = []
for i in range(len(s)):
s0 = s.iloc[:i + 1]
cumulative.append(np.inner(s0.index, s0.values))
s = pd.Series(cumulative, index=s.index)
return s / s.max()
def sum_ccdf(s):
"""
Parameters:
`s`, series, the values of s should be variable to be handled
Return:
a news series `s`, index of s will be X axis (number), values
will be Y axis (sum(X>=x))
"""
s = s.copy()
s = s.value_counts()
s = s.sort_index(ascending=True)
cumulative = []
for i in range(len(s)):
s1 = s.iloc[i:]
cumulative.append(np.inner(s1.index, s1.values))
return pd.Series(cumulative, index=s.index)
| # -*- coding: utf-8 -*-
""" Function that implement Complement the Complementary Cumulative
Distribution Function (CCDF).
"""
#
# written by <NAME> <<EMAIL>>
import numpy as np
import pandas as pd
def ccdf(s):
"""
Parameters:
`s`, series, the values of s should be variable to be handled
Return:
a new series `s`, index of s will be X axis (number), value of s
will be Y axis (probability)
"""
s = s.copy()
s = s.sort_values(ascending=True, inplace=False)
s.reset_index(drop=True, inplace=True)
n = len(s)
s.drop_duplicates(keep='first', inplace=True)
X = s.values
Y = [n - i for i in s.index]
return pd.Series(data=Y, index=X) / n
def sum_cdf(s):
s = s.copy()
s = s.value_counts()
s = s.sort_index(ascending=True)
cumulative = []
for i in range(len(s)):
s0 = s.iloc[:i + 1]
cumulative.append(np.inner(s0.index, s0.values))
s = pd.Series(cumulative, index=s.index)
return s / s.max()
def sum_ccdf(s):
"""
Parameters:
`s`, series, the values of s should be variable to be handled
Return:
a news series `s`, index of s will be X axis (number), values
will be Y axis (sum(X>=x))
"""
s = s.copy()
s = s.value_counts()
s = s.sort_index(ascending=True)
cumulative = []
for i in range(len(s)):
s1 = s.iloc[i:]
cumulative.append(np.inner(s1.index, s1.values))
return pd.Series(cumulative, index=s.index)
| en | 0.732518 | # -*- coding: utf-8 -*- Function that implement Complement the Complementary Cumulative Distribution Function (CCDF). # # written by <NAME> <<EMAIL>> Parameters: `s`, series, the values of s should be variable to be handled Return: a new series `s`, index of s will be X axis (number), value of s will be Y axis (probability) Parameters: `s`, series, the values of s should be variable to be handled Return: a news series `s`, index of s will be X axis (number), values will be Y axis (sum(X>=x)) | 3.163953 | 3 |
lifelines/fitters/kaplan_meier_fitter.py | eliracho37/lifelines | 0 | 8417 | # -*- coding: utf-8 -*-
from __future__ import print_function
import numpy as np
import pandas as pd
from lifelines.fitters import UnivariateFitter
from lifelines.utils import _preprocess_inputs, _additive_estimate, StatError, inv_normal_cdf,\
median_survival_times
from lifelines.plotting import plot_loglogs
class KaplanMeierFitter(UnivariateFitter):
"""
Class for fitting the Kaplan-Meier estimate for the survival function.
KaplanMeierFitter( alpha=0.95)
alpha: The alpha value associated with the confidence intervals.
"""
def fit(self, durations, event_observed=None, timeline=None, entry=None, label='KM_estimate',
alpha=None, left_censorship=False, ci_labels=None):
"""
Parameters:
duration: an array, or pd.Series, of length n -- duration subject was observed for
timeline: return the best estimate at the values in timelines (postively increasing)
event_observed: an array, or pd.Series, of length n -- True if the the death was observed, False if the event
was lost (right-censored). Defaults all True if event_observed==None
entry: an array, or pd.Series, of length n -- relative time when a subject entered the study. This is
useful for left-truncated (not left-censored) observations. If None, all members of the population
were born at time 0.
label: a string to name the column of the estimate.
alpha: the alpha value in the confidence intervals. Overrides the initializing
alpha for this call to fit only.
left_censorship: True if durations and event_observed refer to left censorship events. Default False
ci_labels: add custom column names to the generated confidence intervals
as a length-2 list: [<lower-bound name>, <upper-bound name>]. Default: <label>_lower_<alpha>
Returns:
self, with new properties like 'survival_function_'.
"""
# if the user is interested in left-censorship, we return the cumulative_density_, no survival_function_,
estimate_name = 'survival_function_' if not left_censorship else 'cumulative_density_'
v = _preprocess_inputs(durations, event_observed, timeline, entry)
self.durations, self.event_observed, self.timeline, self.entry, self.event_table = v
self._label = label
alpha = alpha if alpha else self.alpha
log_survival_function, cumulative_sq_ = _additive_estimate(self.event_table, self.timeline,
self._additive_f, self._additive_var,
left_censorship)
if entry is not None:
# a serious problem with KM is that when the sample size is small and there are too few early
# truncation times, it may happen that is the number of patients at risk and the number of deaths is the same.
# we adjust for this using the Breslow-Fleming-Harrington estimator
n = self.event_table.shape[0]
net_population = (self.event_table['entrance'] - self.event_table['removed']).cumsum()
if net_population.iloc[:int(n / 2)].min() == 0:
ix = net_population.iloc[:int(n / 2)].argmin()
raise StatError("""There are too few early truncation times and too many events. S(t)==0 for all t>%.1f. Recommend BreslowFlemingHarringtonFitter.""" % ix)
# estimation
setattr(self, estimate_name, pd.DataFrame(np.exp(log_survival_function), columns=[self._label]))
self.__estimate = getattr(self, estimate_name)
self.confidence_interval_ = self._bounds(cumulative_sq_[:, None], alpha, ci_labels)
self.median_ = median_survival_times(self.__estimate, left_censorship=left_censorship)
# estimation methods
self.predict = self._predict(estimate_name, label)
self.subtract = self._subtract(estimate_name)
self.divide = self._divide(estimate_name)
# plotting functions
self.plot = self._plot_estimate(estimate_name)
setattr(self, "plot_" + estimate_name, self.plot)
self.plot_loglogs = plot_loglogs(self)
return self
def _bounds(self, cumulative_sq_, alpha, ci_labels):
# See http://courses.nus.edu.sg/course/stacar/internet/st3242/handouts/notes2.pdf
alpha2 = inv_normal_cdf((1. + alpha) / 2.)
df = pd.DataFrame(index=self.timeline)
v = np.log(self.__estimate.values)
if ci_labels is None:
ci_labels = ["%s_upper_%.2f" % (self._label, alpha), "%s_lower_%.2f" % (self._label, alpha)]
assert len(ci_labels) == 2, "ci_labels should be a length 2 array."
df[ci_labels[0]] = np.exp(-np.exp(np.log(-v) + alpha2 * np.sqrt(cumulative_sq_) / v))
df[ci_labels[1]] = np.exp(-np.exp(np.log(-v) - alpha2 * np.sqrt(cumulative_sq_) / v))
return df
def _additive_f(self, population, deaths):
np.seterr(invalid='ignore', divide='ignore')
return (np.log(population - deaths) - np.log(population))
def _additive_var(self, population, deaths):
np.seterr(divide='ignore')
return (1. * deaths / (population * (population - deaths))).replace([np.inf], 0)
| # -*- coding: utf-8 -*-
from __future__ import print_function
import numpy as np
import pandas as pd
from lifelines.fitters import UnivariateFitter
from lifelines.utils import _preprocess_inputs, _additive_estimate, StatError, inv_normal_cdf,\
median_survival_times
from lifelines.plotting import plot_loglogs
class KaplanMeierFitter(UnivariateFitter):
"""
Class for fitting the Kaplan-Meier estimate for the survival function.
KaplanMeierFitter( alpha=0.95)
alpha: The alpha value associated with the confidence intervals.
"""
def fit(self, durations, event_observed=None, timeline=None, entry=None, label='KM_estimate',
alpha=None, left_censorship=False, ci_labels=None):
"""
Parameters:
duration: an array, or pd.Series, of length n -- duration subject was observed for
timeline: return the best estimate at the values in timelines (postively increasing)
event_observed: an array, or pd.Series, of length n -- True if the the death was observed, False if the event
was lost (right-censored). Defaults all True if event_observed==None
entry: an array, or pd.Series, of length n -- relative time when a subject entered the study. This is
useful for left-truncated (not left-censored) observations. If None, all members of the population
were born at time 0.
label: a string to name the column of the estimate.
alpha: the alpha value in the confidence intervals. Overrides the initializing
alpha for this call to fit only.
left_censorship: True if durations and event_observed refer to left censorship events. Default False
ci_labels: add custom column names to the generated confidence intervals
as a length-2 list: [<lower-bound name>, <upper-bound name>]. Default: <label>_lower_<alpha>
Returns:
self, with new properties like 'survival_function_'.
"""
# if the user is interested in left-censorship, we return the cumulative_density_, no survival_function_,
estimate_name = 'survival_function_' if not left_censorship else 'cumulative_density_'
v = _preprocess_inputs(durations, event_observed, timeline, entry)
self.durations, self.event_observed, self.timeline, self.entry, self.event_table = v
self._label = label
alpha = alpha if alpha else self.alpha
log_survival_function, cumulative_sq_ = _additive_estimate(self.event_table, self.timeline,
self._additive_f, self._additive_var,
left_censorship)
if entry is not None:
# a serious problem with KM is that when the sample size is small and there are too few early
# truncation times, it may happen that is the number of patients at risk and the number of deaths is the same.
# we adjust for this using the Breslow-Fleming-Harrington estimator
n = self.event_table.shape[0]
net_population = (self.event_table['entrance'] - self.event_table['removed']).cumsum()
if net_population.iloc[:int(n / 2)].min() == 0:
ix = net_population.iloc[:int(n / 2)].argmin()
raise StatError("""There are too few early truncation times and too many events. S(t)==0 for all t>%.1f. Recommend BreslowFlemingHarringtonFitter.""" % ix)
# estimation
setattr(self, estimate_name, pd.DataFrame(np.exp(log_survival_function), columns=[self._label]))
self.__estimate = getattr(self, estimate_name)
self.confidence_interval_ = self._bounds(cumulative_sq_[:, None], alpha, ci_labels)
self.median_ = median_survival_times(self.__estimate, left_censorship=left_censorship)
# estimation methods
self.predict = self._predict(estimate_name, label)
self.subtract = self._subtract(estimate_name)
self.divide = self._divide(estimate_name)
# plotting functions
self.plot = self._plot_estimate(estimate_name)
setattr(self, "plot_" + estimate_name, self.plot)
self.plot_loglogs = plot_loglogs(self)
return self
def _bounds(self, cumulative_sq_, alpha, ci_labels):
# See http://courses.nus.edu.sg/course/stacar/internet/st3242/handouts/notes2.pdf
alpha2 = inv_normal_cdf((1. + alpha) / 2.)
df = pd.DataFrame(index=self.timeline)
v = np.log(self.__estimate.values)
if ci_labels is None:
ci_labels = ["%s_upper_%.2f" % (self._label, alpha), "%s_lower_%.2f" % (self._label, alpha)]
assert len(ci_labels) == 2, "ci_labels should be a length 2 array."
df[ci_labels[0]] = np.exp(-np.exp(np.log(-v) + alpha2 * np.sqrt(cumulative_sq_) / v))
df[ci_labels[1]] = np.exp(-np.exp(np.log(-v) - alpha2 * np.sqrt(cumulative_sq_) / v))
return df
def _additive_f(self, population, deaths):
np.seterr(invalid='ignore', divide='ignore')
return (np.log(population - deaths) - np.log(population))
def _additive_var(self, population, deaths):
np.seterr(divide='ignore')
return (1. * deaths / (population * (population - deaths))).replace([np.inf], 0)
| en | 0.82713 | # -*- coding: utf-8 -*- Class for fitting the Kaplan-Meier estimate for the survival function. KaplanMeierFitter( alpha=0.95) alpha: The alpha value associated with the confidence intervals. Parameters: duration: an array, or pd.Series, of length n -- duration subject was observed for timeline: return the best estimate at the values in timelines (postively increasing) event_observed: an array, or pd.Series, of length n -- True if the the death was observed, False if the event was lost (right-censored). Defaults all True if event_observed==None entry: an array, or pd.Series, of length n -- relative time when a subject entered the study. This is useful for left-truncated (not left-censored) observations. If None, all members of the population were born at time 0. label: a string to name the column of the estimate. alpha: the alpha value in the confidence intervals. Overrides the initializing alpha for this call to fit only. left_censorship: True if durations and event_observed refer to left censorship events. Default False ci_labels: add custom column names to the generated confidence intervals as a length-2 list: [<lower-bound name>, <upper-bound name>]. Default: <label>_lower_<alpha> Returns: self, with new properties like 'survival_function_'. # if the user is interested in left-censorship, we return the cumulative_density_, no survival_function_, # a serious problem with KM is that when the sample size is small and there are too few early # truncation times, it may happen that is the number of patients at risk and the number of deaths is the same. # we adjust for this using the Breslow-Fleming-Harrington estimator There are too few early truncation times and too many events. S(t)==0 for all t>%.1f. Recommend BreslowFlemingHarringtonFitter. # estimation # estimation methods # plotting functions # See http://courses.nus.edu.sg/course/stacar/internet/st3242/handouts/notes2.pdf | 3.214865 | 3 |
pydmfet/qcwrap/pyscf_rhf.py | fishjojo/pydmfe | 3 | 8418 | import numpy as np
from pydmfet import tools
from .fermi import find_efermi, entropy_corr
from pyscf import ao2mo, gto, scf, dft, lib
from pydmfet.qcwrap import fermi
import time
from functools import reduce
def scf_oei( OEI, Norb, Nelec, smear_sigma = 0.0):
OEI = 0.5*(OEI.T + OEI)
eigenvals, eigenvecs = np.linalg.eigh( OEI )
idx = np.argmax(abs(eigenvecs), axis=0)
eigenvecs[:,eigenvecs[ idx, np.arange(len(eigenvals)) ]<0] *= -1
Nocc = Nelec//2 #closed shell
e_homo = eigenvals[Nocc-1]
e_lumo = eigenvals[Nocc]
print ('HOMO: ', e_homo, 'LUMO: ', e_lumo)
print ("mo_energy:")
print (eigenvals[:Nocc+5])
e_fermi = e_homo
mo_occ = np.zeros((Norb))
if(smear_sigma < 1e-8): #T=0
mo_occ[:Nocc] = 1.0
else: #finite T
e_fermi, mo_occ = find_efermi(eigenvals, smear_sigma, Nocc, Norb)
mo_occ*=2.0 #closed shell
Ne_error = np.sum(mo_occ) - Nelec
if(Ne_error > 1e-8):
print ('Ne error = ', Ne_error)
print ("fermi energy: ", e_fermi)
np.set_printoptions(precision=4)
flag = mo_occ > 1e-4
print (mo_occ[flag])
np.set_printoptions()
RDM1 = reduce(np.dot, (eigenvecs, np.diag(mo_occ), eigenvecs.T))
RDM1 = (RDM1.T + RDM1)/2.0
energy = np.trace(np.dot(RDM1,OEI))
es = entropy_corr(mo_occ, smear_sigma)
print ('entropy correction: ', es)
energy += es
print ('e_tot = ', energy)
return ( energy, RDM1, eigenvecs, eigenvals, mo_occ )
# The following is deprecated!
class scf_pyscf():
'''
subspace scf
wrapper for scf module of pyscf
'''
def __init__(self, Ne, Norb, mol=None, oei=None, tei=None, ovlp=1, dm0=None, coredm=0, ao2sub=None, mf_method='HF'):
self.mol = mol
self.Ne = Ne
self.Norb = Norb
self.method = mf_method
self.oei = oei
self.tei = tei
self.ovlp = ovlp
self.dm0 = dm0
self.coredm = coredm
self.ao2sub = ao2sub
self.method = mf_method.lower()
self.mf = None
if(self.mol is None):
#what molecule does not matter
self.mol = gto.Mole()
self.mol.build( verbose=0 )
self.mol.atom.append(('C', (0, 0, 0)))
#adjust number of electrons
self.mol.nelectron = Ne
if(self.tei is not None):
self.mol.incore_anyway = True
if(self.method == 'hf'):
self.mf = scf.RHF(self.mol)
self.prep_rhf()
else:
self.mf = scf.RKS(self.mol)
self.mf.xc = self.method
self.prep_rhf()
self.prep_rks()
self.elec_energy = 0.0
self.rdm1 = None
self.mo_coeff = None
self.mo_energy = None
self.mo_occ = None
def prep_rhf(self):
if(self.ovlp == 1):
self.mf.get_ovlp = lambda *args: np.eye( self.Norb )
if(self.oei is not None):
self.mf.get_hcore = lambda *args: self.oei
if(self.tei is not None):
self.mf._eri = ao2mo.restore(8, self.tei, self.Norb)
def prep_rks(self):
if(self.ao2sub is None):
return
#overload dft.rks.get_veff if necessary
self.mf.get_veff = get_veff_rks_decorator(self.ao2sub, self.coredm)
def kernel(self):
self.mf.kernel(self.dm0)
if ( self.mf.converged == False ):
raise Exception("scf not converged!")
rdm1 = self.mf.make_rdm1()
self.rdm1 = 0.5*(rdm1.T + rdm1)
self.elec_energy = self.mf.energy_elec(self.rdm1)[0]
self.mo_coeff = self.mf.mo_coeff
self.mo_energy = self.mf.mo_energy
self.mo_occ = self.mf.mo_occ
def get_veff_rks_decorator(ao2sub, coredm):
def get_veff(ks, mol=None, dm=None, dm_last=0, vhf_last=0, hermi=1):
if mol is None: mol = ks.mol
if dm is None: dm = ks.make_rdm1()
dm_sub = np.asarray(dm) + coredm
dm_ao = tools.dm_sub2ao(dm_sub, ao2sub)
if hasattr(dm, 'mo_coeff'):
mo_coeff_sub = dm.mo_coeff
mo_occ_sub = dm.mo_occ
mo_coeff_ao = tools.mo_sub2ao(mo_coeff_sub, ao2sub)
mo_occ_ao = mo_occ_sub
dm_ao = lib.tag_array(dm_ao, mo_coeff=mo_coeff_ao, mo_occ=mo_occ_ao)
n, exc, vxc_ao, hyb = get_vxc(ks, mol, dm_ao)
vxc = tools.op_ao2sub(vxc_ao, ao2sub)
vj = None
vk = None
if abs(hyb) < 1e-10:
if (ks._eri is None and ks.direct_scf and
getattr(vhf_last, 'vj', None) is not None):
ddm = numpy.asarray(dm) - numpy.asarray(dm_last)
vj = ks.get_jk(mol, ddm, hermi)[0]
vj += vhf_last.vj
else:
vj = ks.get_jk(mol, dm, hermi)[0]
vxc += vj
else:
if (ks._eri is None and ks.direct_scf and
getattr(vhf_last, 'vk', None) is not None):
ddm = numpy.asarray(dm) - numpy.asarray(dm_last)
vj, vk = ks.get_jk(mol, ddm, hermi)
vj += vhf_last.vj
vk += vhf_last.vk
else:
vj, vk = ks.get_jk(mol, dm, hermi)
vxc += vj - vk * (hyb * .5)
exc -= np.einsum('ij,ji', dm, vk) * .5 * hyb*.5
ecoul = np.einsum('ij,ji', dm, vj) * .5
vxc = lib.tag_array(vxc, ecoul=ecoul, exc=exc, vj=vj, vk=vk)
return vxc
return get_veff
def get_vxc(ks, mol, dm, hermi=1):
ground_state = (isinstance(dm, numpy.ndarray) and dm.ndim == 2)
if(not ground_state):
raise Exception("fatal error")
if ks.grids.coords is None:
ks.grids.build(with_non0tab=True)
if ks.small_rho_cutoff > 1e-20 and ground_state:
# Filter grids the first time setup grids
t0 = (time.clock(), time.time())
ks.grids = dft.rks.prune_small_rho_grids_(ks, mol, dm, ks.grids)
t1 = tools.timer("prune grid",t0)
if hermi == 2: # because rho = 0
n, exc, vxc = 0, 0, 0
else:
n, exc, vxc = ks._numint.nr_rks(mol, ks.grids, ks.xc, dm)
hyb = ks._numint.hybrid_coeff(ks.xc, spin=mol.spin)
return n, exc, vxc, hyb
'''
def rhf(mol, OEI, TEI, Norb, Nelec, OneDM0=None ):
# Get the RHF solution
OEI = 0.5*(OEI.T + OEI)
#mol = gto.Mole()
#mol.max_memory = 8000
#mol.build( verbose=0 )
#mol.atom.append(('C', (0, 0, 0)))
mol.nelectron = Nelec
mol.incore_anyway = True
mf = pyscf_scf.RHF( mol )
mf.get_hcore = lambda *args: OEI
mf.get_ovlp = lambda *args: np.eye( Norb )
mf._eri = ao2mo.restore(8, TEI, Norb)
mf.max_cycle = 100
#mf.conv_tol = 1e-8
#adiis = pyscf_scf.diis.ADIIS()
#mf.diis = adiis
#mf.verbose = 5
mf.kernel(OneDM0)
if ( mf.converged == False ):
#RDM1 = mf.make_rdm1()
#cdiis = pyscf_scf.diis.SCF_DIIS()
#mf.diis = cdiis
#mf.max_cycle = 200
#mf.kernel(RDM1)
if ( mf.converged == False ):
raise Exception(" rhf not converged!")
return mf
def rks(mol, OEI, TEI, Norb, Nelec, xcfunc, OneDM0=None ):
# Get the RKS solution
OEI = 0.5*(OEI.T + OEI)
#mol = gto.Mole()
#mol.build( verbose=5 )
#mol.atom.append(('C', (0, 0, 0)))
mol.nelectron = Nelec
# mol.incore_anyway = True
mf = pyscf_scf.RKS( mol )
mf.xc = xcfunc.lower()
# mf.get_hcore = lambda *args: OEI
# mf.get_ovlp = lambda *args: np.eye( Norb )
# mf._eri = ao2mo.restore(8, TEI, Norb)
OneDM0 = None
mf.kernel( OneDM0 )
if ( mf.converged == False ):
raise Exception(" rks not converged!")
return mf
def scf(mol, OEI, TEI, Norb, Nelec, OneDM0=None, mf_method = 'HF' ):
# Get the mean-field solution
if(mf_method.lower() == 'hf'):
mf = rhf(mol, OEI, TEI, Norb, Nelec, OneDM0 )
else:
mf = rks(mol, OEI, TEI, Norb, Nelec, mf_method ,OneDM0 )
RDM1 = mf.make_rdm1()
RDM1 = 0.5*(RDM1.T + RDM1)
mo_coeff = mf.mo_coeff
mo_energy = mf.mo_energy
energy = mf.energy_elec(RDM1)[0]
mo = np.zeros([Norb,Norb+1],dtype=float)
mo[:,:-1] = mo_coeff
mo[:,-1] = mo_energy
#print "mo energy"
#print mf.mo_energy
#tools.MatPrint(mf.get_fock(),"fock")
#JK = mf.get_veff(None, dm=RDM1)
#tools.MatPrint(JK,"JK")
#tools.MatPrint(np.dot(mf.get_fock(), mf.mo_coeff),"test")
#tools.MatPrint(mf.mo_coeff,"mo_coeff")
return (energy, RDM1, mo)
'''
| import numpy as np
from pydmfet import tools
from .fermi import find_efermi, entropy_corr
from pyscf import ao2mo, gto, scf, dft, lib
from pydmfet.qcwrap import fermi
import time
from functools import reduce
def scf_oei( OEI, Norb, Nelec, smear_sigma = 0.0):
OEI = 0.5*(OEI.T + OEI)
eigenvals, eigenvecs = np.linalg.eigh( OEI )
idx = np.argmax(abs(eigenvecs), axis=0)
eigenvecs[:,eigenvecs[ idx, np.arange(len(eigenvals)) ]<0] *= -1
Nocc = Nelec//2 #closed shell
e_homo = eigenvals[Nocc-1]
e_lumo = eigenvals[Nocc]
print ('HOMO: ', e_homo, 'LUMO: ', e_lumo)
print ("mo_energy:")
print (eigenvals[:Nocc+5])
e_fermi = e_homo
mo_occ = np.zeros((Norb))
if(smear_sigma < 1e-8): #T=0
mo_occ[:Nocc] = 1.0
else: #finite T
e_fermi, mo_occ = find_efermi(eigenvals, smear_sigma, Nocc, Norb)
mo_occ*=2.0 #closed shell
Ne_error = np.sum(mo_occ) - Nelec
if(Ne_error > 1e-8):
print ('Ne error = ', Ne_error)
print ("fermi energy: ", e_fermi)
np.set_printoptions(precision=4)
flag = mo_occ > 1e-4
print (mo_occ[flag])
np.set_printoptions()
RDM1 = reduce(np.dot, (eigenvecs, np.diag(mo_occ), eigenvecs.T))
RDM1 = (RDM1.T + RDM1)/2.0
energy = np.trace(np.dot(RDM1,OEI))
es = entropy_corr(mo_occ, smear_sigma)
print ('entropy correction: ', es)
energy += es
print ('e_tot = ', energy)
return ( energy, RDM1, eigenvecs, eigenvals, mo_occ )
# The following is deprecated!
class scf_pyscf():
'''
subspace scf
wrapper for scf module of pyscf
'''
def __init__(self, Ne, Norb, mol=None, oei=None, tei=None, ovlp=1, dm0=None, coredm=0, ao2sub=None, mf_method='HF'):
self.mol = mol
self.Ne = Ne
self.Norb = Norb
self.method = mf_method
self.oei = oei
self.tei = tei
self.ovlp = ovlp
self.dm0 = dm0
self.coredm = coredm
self.ao2sub = ao2sub
self.method = mf_method.lower()
self.mf = None
if(self.mol is None):
#what molecule does not matter
self.mol = gto.Mole()
self.mol.build( verbose=0 )
self.mol.atom.append(('C', (0, 0, 0)))
#adjust number of electrons
self.mol.nelectron = Ne
if(self.tei is not None):
self.mol.incore_anyway = True
if(self.method == 'hf'):
self.mf = scf.RHF(self.mol)
self.prep_rhf()
else:
self.mf = scf.RKS(self.mol)
self.mf.xc = self.method
self.prep_rhf()
self.prep_rks()
self.elec_energy = 0.0
self.rdm1 = None
self.mo_coeff = None
self.mo_energy = None
self.mo_occ = None
def prep_rhf(self):
if(self.ovlp == 1):
self.mf.get_ovlp = lambda *args: np.eye( self.Norb )
if(self.oei is not None):
self.mf.get_hcore = lambda *args: self.oei
if(self.tei is not None):
self.mf._eri = ao2mo.restore(8, self.tei, self.Norb)
def prep_rks(self):
if(self.ao2sub is None):
return
#overload dft.rks.get_veff if necessary
self.mf.get_veff = get_veff_rks_decorator(self.ao2sub, self.coredm)
def kernel(self):
self.mf.kernel(self.dm0)
if ( self.mf.converged == False ):
raise Exception("scf not converged!")
rdm1 = self.mf.make_rdm1()
self.rdm1 = 0.5*(rdm1.T + rdm1)
self.elec_energy = self.mf.energy_elec(self.rdm1)[0]
self.mo_coeff = self.mf.mo_coeff
self.mo_energy = self.mf.mo_energy
self.mo_occ = self.mf.mo_occ
def get_veff_rks_decorator(ao2sub, coredm):
def get_veff(ks, mol=None, dm=None, dm_last=0, vhf_last=0, hermi=1):
if mol is None: mol = ks.mol
if dm is None: dm = ks.make_rdm1()
dm_sub = np.asarray(dm) + coredm
dm_ao = tools.dm_sub2ao(dm_sub, ao2sub)
if hasattr(dm, 'mo_coeff'):
mo_coeff_sub = dm.mo_coeff
mo_occ_sub = dm.mo_occ
mo_coeff_ao = tools.mo_sub2ao(mo_coeff_sub, ao2sub)
mo_occ_ao = mo_occ_sub
dm_ao = lib.tag_array(dm_ao, mo_coeff=mo_coeff_ao, mo_occ=mo_occ_ao)
n, exc, vxc_ao, hyb = get_vxc(ks, mol, dm_ao)
vxc = tools.op_ao2sub(vxc_ao, ao2sub)
vj = None
vk = None
if abs(hyb) < 1e-10:
if (ks._eri is None and ks.direct_scf and
getattr(vhf_last, 'vj', None) is not None):
ddm = numpy.asarray(dm) - numpy.asarray(dm_last)
vj = ks.get_jk(mol, ddm, hermi)[0]
vj += vhf_last.vj
else:
vj = ks.get_jk(mol, dm, hermi)[0]
vxc += vj
else:
if (ks._eri is None and ks.direct_scf and
getattr(vhf_last, 'vk', None) is not None):
ddm = numpy.asarray(dm) - numpy.asarray(dm_last)
vj, vk = ks.get_jk(mol, ddm, hermi)
vj += vhf_last.vj
vk += vhf_last.vk
else:
vj, vk = ks.get_jk(mol, dm, hermi)
vxc += vj - vk * (hyb * .5)
exc -= np.einsum('ij,ji', dm, vk) * .5 * hyb*.5
ecoul = np.einsum('ij,ji', dm, vj) * .5
vxc = lib.tag_array(vxc, ecoul=ecoul, exc=exc, vj=vj, vk=vk)
return vxc
return get_veff
def get_vxc(ks, mol, dm, hermi=1):
ground_state = (isinstance(dm, numpy.ndarray) and dm.ndim == 2)
if(not ground_state):
raise Exception("fatal error")
if ks.grids.coords is None:
ks.grids.build(with_non0tab=True)
if ks.small_rho_cutoff > 1e-20 and ground_state:
# Filter grids the first time setup grids
t0 = (time.clock(), time.time())
ks.grids = dft.rks.prune_small_rho_grids_(ks, mol, dm, ks.grids)
t1 = tools.timer("prune grid",t0)
if hermi == 2: # because rho = 0
n, exc, vxc = 0, 0, 0
else:
n, exc, vxc = ks._numint.nr_rks(mol, ks.grids, ks.xc, dm)
hyb = ks._numint.hybrid_coeff(ks.xc, spin=mol.spin)
return n, exc, vxc, hyb
'''
def rhf(mol, OEI, TEI, Norb, Nelec, OneDM0=None ):
# Get the RHF solution
OEI = 0.5*(OEI.T + OEI)
#mol = gto.Mole()
#mol.max_memory = 8000
#mol.build( verbose=0 )
#mol.atom.append(('C', (0, 0, 0)))
mol.nelectron = Nelec
mol.incore_anyway = True
mf = pyscf_scf.RHF( mol )
mf.get_hcore = lambda *args: OEI
mf.get_ovlp = lambda *args: np.eye( Norb )
mf._eri = ao2mo.restore(8, TEI, Norb)
mf.max_cycle = 100
#mf.conv_tol = 1e-8
#adiis = pyscf_scf.diis.ADIIS()
#mf.diis = adiis
#mf.verbose = 5
mf.kernel(OneDM0)
if ( mf.converged == False ):
#RDM1 = mf.make_rdm1()
#cdiis = pyscf_scf.diis.SCF_DIIS()
#mf.diis = cdiis
#mf.max_cycle = 200
#mf.kernel(RDM1)
if ( mf.converged == False ):
raise Exception(" rhf not converged!")
return mf
def rks(mol, OEI, TEI, Norb, Nelec, xcfunc, OneDM0=None ):
# Get the RKS solution
OEI = 0.5*(OEI.T + OEI)
#mol = gto.Mole()
#mol.build( verbose=5 )
#mol.atom.append(('C', (0, 0, 0)))
mol.nelectron = Nelec
# mol.incore_anyway = True
mf = pyscf_scf.RKS( mol )
mf.xc = xcfunc.lower()
# mf.get_hcore = lambda *args: OEI
# mf.get_ovlp = lambda *args: np.eye( Norb )
# mf._eri = ao2mo.restore(8, TEI, Norb)
OneDM0 = None
mf.kernel( OneDM0 )
if ( mf.converged == False ):
raise Exception(" rks not converged!")
return mf
def scf(mol, OEI, TEI, Norb, Nelec, OneDM0=None, mf_method = 'HF' ):
# Get the mean-field solution
if(mf_method.lower() == 'hf'):
mf = rhf(mol, OEI, TEI, Norb, Nelec, OneDM0 )
else:
mf = rks(mol, OEI, TEI, Norb, Nelec, mf_method ,OneDM0 )
RDM1 = mf.make_rdm1()
RDM1 = 0.5*(RDM1.T + RDM1)
mo_coeff = mf.mo_coeff
mo_energy = mf.mo_energy
energy = mf.energy_elec(RDM1)[0]
mo = np.zeros([Norb,Norb+1],dtype=float)
mo[:,:-1] = mo_coeff
mo[:,-1] = mo_energy
#print "mo energy"
#print mf.mo_energy
#tools.MatPrint(mf.get_fock(),"fock")
#JK = mf.get_veff(None, dm=RDM1)
#tools.MatPrint(JK,"JK")
#tools.MatPrint(np.dot(mf.get_fock(), mf.mo_coeff),"test")
#tools.MatPrint(mf.mo_coeff,"mo_coeff")
return (energy, RDM1, mo)
'''
| en | 0.333448 | #closed shell #T=0 #finite T #closed shell # The following is deprecated! subspace scf wrapper for scf module of pyscf #what molecule does not matter #adjust number of electrons #overload dft.rks.get_veff if necessary # Filter grids the first time setup grids # because rho = 0 def rhf(mol, OEI, TEI, Norb, Nelec, OneDM0=None ): # Get the RHF solution OEI = 0.5*(OEI.T + OEI) #mol = gto.Mole() #mol.max_memory = 8000 #mol.build( verbose=0 ) #mol.atom.append(('C', (0, 0, 0))) mol.nelectron = Nelec mol.incore_anyway = True mf = pyscf_scf.RHF( mol ) mf.get_hcore = lambda *args: OEI mf.get_ovlp = lambda *args: np.eye( Norb ) mf._eri = ao2mo.restore(8, TEI, Norb) mf.max_cycle = 100 #mf.conv_tol = 1e-8 #adiis = pyscf_scf.diis.ADIIS() #mf.diis = adiis #mf.verbose = 5 mf.kernel(OneDM0) if ( mf.converged == False ): #RDM1 = mf.make_rdm1() #cdiis = pyscf_scf.diis.SCF_DIIS() #mf.diis = cdiis #mf.max_cycle = 200 #mf.kernel(RDM1) if ( mf.converged == False ): raise Exception(" rhf not converged!") return mf def rks(mol, OEI, TEI, Norb, Nelec, xcfunc, OneDM0=None ): # Get the RKS solution OEI = 0.5*(OEI.T + OEI) #mol = gto.Mole() #mol.build( verbose=5 ) #mol.atom.append(('C', (0, 0, 0))) mol.nelectron = Nelec # mol.incore_anyway = True mf = pyscf_scf.RKS( mol ) mf.xc = xcfunc.lower() # mf.get_hcore = lambda *args: OEI # mf.get_ovlp = lambda *args: np.eye( Norb ) # mf._eri = ao2mo.restore(8, TEI, Norb) OneDM0 = None mf.kernel( OneDM0 ) if ( mf.converged == False ): raise Exception(" rks not converged!") return mf def scf(mol, OEI, TEI, Norb, Nelec, OneDM0=None, mf_method = 'HF' ): # Get the mean-field solution if(mf_method.lower() == 'hf'): mf = rhf(mol, OEI, TEI, Norb, Nelec, OneDM0 ) else: mf = rks(mol, OEI, TEI, Norb, Nelec, mf_method ,OneDM0 ) RDM1 = mf.make_rdm1() RDM1 = 0.5*(RDM1.T + RDM1) mo_coeff = mf.mo_coeff mo_energy = mf.mo_energy energy = mf.energy_elec(RDM1)[0] mo = np.zeros([Norb,Norb+1],dtype=float) mo[:,:-1] = mo_coeff mo[:,-1] = mo_energy #print "mo energy" #print mf.mo_energy #tools.MatPrint(mf.get_fock(),"fock") #JK = mf.get_veff(None, dm=RDM1) #tools.MatPrint(JK,"JK") #tools.MatPrint(np.dot(mf.get_fock(), mf.mo_coeff),"test") #tools.MatPrint(mf.mo_coeff,"mo_coeff") return (energy, RDM1, mo) | 1.996903 | 2 |
backends/fortify/summarize-fortify.py | tautschnig/one-line-scan | 16 | 8419 | #!/usr/bin/env python
#
# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License").
# You may not use this file except in compliance with the License.
# A copy of the License is located at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# or in the "license" file accompanying this file. This file is distributed
# on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
# express or implied. See the License for the specific language governing
# permissions and limitations under the License.
#
# Parse the report.html of Fortify and create an ASCII summary
import os
import sys
from subprocess import call
from xml.etree import ElementTree
# print usage
if len(sys.argv) != 2:
print "usage summarizy-fortify.py LOGDIR"
sys.exit(1)
# get directory where the logs are placed
logdir=sys.argv[1]
# strip this part of the directory information of
workdirectory = os.getcwd() + '/'
# get the fortify report; first make it valid XML
filename=logdir+'/log/report.html'
call(['perl', '-p', '-i', '-e', 's#<((img|meta) [^>]+)>#<$1/>#', filename])
# make sure we can run this script multiple times on the same html file
call(['perl', '-p', '-i', '-e', 's#//>#/>#', filename])
# parse the html file and jump to the last table
data=ElementTree.parse(filename).getroot()
table=data.find('.//table')[-1]
# iterate over all rows and print their content in a more useable format
for data in table.iter('tr'):
# handle only the rows that contain results
if len(data) != 4:
continue
# extract file information, convert absolute path into relative one
location=data[2].find('a')
# header does not have <a ...>
if location is None:
continue
filename=location.get('href')
filename=filename.replace('file://','')
filename=filename.replace(workdirectory,'')
severity=data[3].text
if severity is None:
severity=data[3].find('span').text
# strip newline and space sequences
problem=data[0].text.replace('\n','').replace('\r','')
short=problem.replace(' ',' ')
while len(short) < len(problem):
problem=short
short=problem.replace(' ',' ')
column=ElementTree.tostring(data[2].findall("*")[0]).split(':')[2]
printstring = filename + ':' + column.strip() + ', ' + \
severity.strip() + ', ' + \
problem
if data[1].text is not None:
printstring = printstring + ', ' + data[1].text
print printstring
| #!/usr/bin/env python
#
# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License").
# You may not use this file except in compliance with the License.
# A copy of the License is located at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# or in the "license" file accompanying this file. This file is distributed
# on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
# express or implied. See the License for the specific language governing
# permissions and limitations under the License.
#
# Parse the report.html of Fortify and create an ASCII summary
import os
import sys
from subprocess import call
from xml.etree import ElementTree
# print usage
if len(sys.argv) != 2:
print "usage summarizy-fortify.py LOGDIR"
sys.exit(1)
# get directory where the logs are placed
logdir=sys.argv[1]
# strip this part of the directory information of
workdirectory = os.getcwd() + '/'
# get the fortify report; first make it valid XML
filename=logdir+'/log/report.html'
call(['perl', '-p', '-i', '-e', 's#<((img|meta) [^>]+)>#<$1/>#', filename])
# make sure we can run this script multiple times on the same html file
call(['perl', '-p', '-i', '-e', 's#//>#/>#', filename])
# parse the html file and jump to the last table
data=ElementTree.parse(filename).getroot()
table=data.find('.//table')[-1]
# iterate over all rows and print their content in a more useable format
for data in table.iter('tr'):
# handle only the rows that contain results
if len(data) != 4:
continue
# extract file information, convert absolute path into relative one
location=data[2].find('a')
# header does not have <a ...>
if location is None:
continue
filename=location.get('href')
filename=filename.replace('file://','')
filename=filename.replace(workdirectory,'')
severity=data[3].text
if severity is None:
severity=data[3].find('span').text
# strip newline and space sequences
problem=data[0].text.replace('\n','').replace('\r','')
short=problem.replace(' ',' ')
while len(short) < len(problem):
problem=short
short=problem.replace(' ',' ')
column=ElementTree.tostring(data[2].findall("*")[0]).split(':')[2]
printstring = filename + ':' + column.strip() + ', ' + \
severity.strip() + ', ' + \
problem
if data[1].text is not None:
printstring = printstring + ', ' + data[1].text
print printstring
| en | 0.802159 | #!/usr/bin/env python # # Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"). # You may not use this file except in compliance with the License. # A copy of the License is located at # # http://www.apache.org/licenses/LICENSE-2.0 # # or in the "license" file accompanying this file. This file is distributed # on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either # express or implied. See the License for the specific language governing # permissions and limitations under the License. # # Parse the report.html of Fortify and create an ASCII summary # print usage # get directory where the logs are placed # strip this part of the directory information of # get the fortify report; first make it valid XML #<((img|meta) [^>]+)>#<$1/>#', filename]) # make sure we can run this script multiple times on the same html file #//>#/>#', filename]) # parse the html file and jump to the last table # iterate over all rows and print their content in a more useable format # handle only the rows that contain results # extract file information, convert absolute path into relative one # header does not have <a ...> # strip newline and space sequences | 2.655877 | 3 |
aiolookin/__init__.py | bachya/aiolookin | 0 | 8420 | """Define the aiolookin package."""
from .device import async_get_device # noqa
| """Define the aiolookin package."""
from .device import async_get_device # noqa
| en | 0.321305 | Define the aiolookin package. # noqa | 1.039864 | 1 |
odepy/collision_space.py | yuemingl/ode-python-1 | 9 | 8421 | <reponame>yuemingl/ode-python-1
# -*- coding: utf-8 -*-
from .common import loadOde
from .common import dGeomID
from .common import dSpaceID
from .common import dVector3
from ctypes import POINTER
from ctypes import CFUNCTYPE
from ctypes import c_void_p
from ctypes import c_int32
dNearCallback = CFUNCTYPE(None, c_void_p, dGeomID, dGeomID)
def dSimpleSpaceCreate(space):
if isinstance(space, int):
return loadOde('dSimpleSpaceCreate', dSpaceID, c_int32)(space)
else:
return loadOde('dSimpleSpaceCreate', dSpaceID, dSpaceID)(space)
def dHashSpaceCreate(space):
if isinstance(space, int):
return loadOde('dHashSpaceCreate', dSpaceID, c_int32)(space)
else:
return loadOde('dHashSpaceCreate', dSpaceID, dSpaceID)(space)
dQuadTreeSpaceCreate = loadOde('dQuadTreeSpaceCreate', dSpaceID, dSpaceID, dVector3, dVector3, c_int32)
dSweepAndPruneSpaceCreate = loadOde('dSweepAndPruneSpaceCreate', dSpaceID, dSpaceID, c_int32)
dSpaceDestroy = loadOde('dSpaceDestroy', None, dSpaceID)
dHashSpaceSetLevels = loadOde('dHashSpaceSetLevels', None, dSpaceID, c_int32, c_int32)
dHashSpaceGetLevels = loadOde('dHashSpaceGetLevels', None, dSpaceID, POINTER(c_int32), POINTER(c_int32))
dSpaceSetCleanup = loadOde('dSpaceSetCleanup', None, dSpaceID, c_int32)
dSpaceGetCleanup = loadOde('dSpaceGetCleanup', c_int32, dSpaceID)
dSpaceSetSublevel = loadOde('dSpaceSetSublevel', None, dSpaceID, c_int32)
dSpaceGetSublevel = loadOde('dSpaceGetSublevel', c_int32, dSpaceID)
dSpaceSetManualCleanup = loadOde('dSpaceSetManualCleanup', None, dSpaceID, c_int32)
dSpaceGetManualCleanup = loadOde('dSpaceGetManualCleanup', c_int32, dSpaceID)
dSpaceAdd = loadOde('dSpaceAdd', None, dSpaceID, dGeomID)
dSpaceRemove = loadOde('dSpaceRemove', None, dSpaceID, dGeomID)
dSpaceQuery = loadOde('dSpaceQuery', c_int32, dSpaceID, dGeomID)
dSpaceClean = loadOde('dSpaceClean', None, dSpaceID)
dSpaceGetNumGeoms = loadOde('dSpaceGetNumGeoms', c_int32, dSpaceID)
dSpaceGetGeom = loadOde('dSpaceGetGeom', dGeomID, dSpaceID, c_int32)
dSpaceGetClass = loadOde('dSpaceGetClass', c_int32, dSpaceID)
| # -*- coding: utf-8 -*-
from .common import loadOde
from .common import dGeomID
from .common import dSpaceID
from .common import dVector3
from ctypes import POINTER
from ctypes import CFUNCTYPE
from ctypes import c_void_p
from ctypes import c_int32
dNearCallback = CFUNCTYPE(None, c_void_p, dGeomID, dGeomID)
def dSimpleSpaceCreate(space):
if isinstance(space, int):
return loadOde('dSimpleSpaceCreate', dSpaceID, c_int32)(space)
else:
return loadOde('dSimpleSpaceCreate', dSpaceID, dSpaceID)(space)
def dHashSpaceCreate(space):
if isinstance(space, int):
return loadOde('dHashSpaceCreate', dSpaceID, c_int32)(space)
else:
return loadOde('dHashSpaceCreate', dSpaceID, dSpaceID)(space)
dQuadTreeSpaceCreate = loadOde('dQuadTreeSpaceCreate', dSpaceID, dSpaceID, dVector3, dVector3, c_int32)
dSweepAndPruneSpaceCreate = loadOde('dSweepAndPruneSpaceCreate', dSpaceID, dSpaceID, c_int32)
dSpaceDestroy = loadOde('dSpaceDestroy', None, dSpaceID)
dHashSpaceSetLevels = loadOde('dHashSpaceSetLevels', None, dSpaceID, c_int32, c_int32)
dHashSpaceGetLevels = loadOde('dHashSpaceGetLevels', None, dSpaceID, POINTER(c_int32), POINTER(c_int32))
dSpaceSetCleanup = loadOde('dSpaceSetCleanup', None, dSpaceID, c_int32)
dSpaceGetCleanup = loadOde('dSpaceGetCleanup', c_int32, dSpaceID)
dSpaceSetSublevel = loadOde('dSpaceSetSublevel', None, dSpaceID, c_int32)
dSpaceGetSublevel = loadOde('dSpaceGetSublevel', c_int32, dSpaceID)
dSpaceSetManualCleanup = loadOde('dSpaceSetManualCleanup', None, dSpaceID, c_int32)
dSpaceGetManualCleanup = loadOde('dSpaceGetManualCleanup', c_int32, dSpaceID)
dSpaceAdd = loadOde('dSpaceAdd', None, dSpaceID, dGeomID)
dSpaceRemove = loadOde('dSpaceRemove', None, dSpaceID, dGeomID)
dSpaceQuery = loadOde('dSpaceQuery', c_int32, dSpaceID, dGeomID)
dSpaceClean = loadOde('dSpaceClean', None, dSpaceID)
dSpaceGetNumGeoms = loadOde('dSpaceGetNumGeoms', c_int32, dSpaceID)
dSpaceGetGeom = loadOde('dSpaceGetGeom', dGeomID, dSpaceID, c_int32)
dSpaceGetClass = loadOde('dSpaceGetClass', c_int32, dSpaceID) | en | 0.769321 | # -*- coding: utf-8 -*- | 2.081334 | 2 |
bst.py | phildavis17/DS_A | 0 | 8422 | class BSTNode:
def __init__(self, data = None) -> None:
self.data = data
self.left = None
self.right = None
def __repr__(self) -> str:
return(f"BSTNode({self.data})")
def __str__(self) -> str:
return str(self.data)
def __eq__(self, o: object) -> bool:
pass
def __hash__(self) -> int:
pass
class BST:
def __init__(self) -> None:
pass
def insert(self, item: int) -> None:
pass
def remove(self, item: int) -> int:
pass
def swap_nodes(self, item_a: int, item_b: int) -> None:
pass
def rebalance(self) -> None:
pass
def get_min_value(self) -> int:
pass
def get_max_value(self) -> int:
pass
def clear(self) -> None:
pass
def get_dept(self) -> int:
"""Returns the current depth of the tree."""
pass
def is_bst(self) -> bool:
"""Returns True if the tree is properly configured bst."""
pass
def is_balanced(self) -> bool:
"""
Returns True if the tree is balanced
"""
pass
def is_perfect(self) -> bool:
"""
Returns True if the tree is perfect
"""
pass
def in_order(self):
"""Returns an iterable of the nodes in the tree."""
pass
def pre_order(self):
"""Returns an iterable of the nodes in the tree."""
pass
def post_order(self):
"""Returns an iterable of the nodes in the tree."""
pass
| class BSTNode:
def __init__(self, data = None) -> None:
self.data = data
self.left = None
self.right = None
def __repr__(self) -> str:
return(f"BSTNode({self.data})")
def __str__(self) -> str:
return str(self.data)
def __eq__(self, o: object) -> bool:
pass
def __hash__(self) -> int:
pass
class BST:
def __init__(self) -> None:
pass
def insert(self, item: int) -> None:
pass
def remove(self, item: int) -> int:
pass
def swap_nodes(self, item_a: int, item_b: int) -> None:
pass
def rebalance(self) -> None:
pass
def get_min_value(self) -> int:
pass
def get_max_value(self) -> int:
pass
def clear(self) -> None:
pass
def get_dept(self) -> int:
"""Returns the current depth of the tree."""
pass
def is_bst(self) -> bool:
"""Returns True if the tree is properly configured bst."""
pass
def is_balanced(self) -> bool:
"""
Returns True if the tree is balanced
"""
pass
def is_perfect(self) -> bool:
"""
Returns True if the tree is perfect
"""
pass
def in_order(self):
"""Returns an iterable of the nodes in the tree."""
pass
def pre_order(self):
"""Returns an iterable of the nodes in the tree."""
pass
def post_order(self):
"""Returns an iterable of the nodes in the tree."""
pass
| en | 0.796132 | Returns the current depth of the tree. Returns True if the tree is properly configured bst. Returns True if the tree is balanced Returns True if the tree is perfect Returns an iterable of the nodes in the tree. Returns an iterable of the nodes in the tree. Returns an iterable of the nodes in the tree. | 3.699344 | 4 |
pctest/test_publish.py | DaveWK/pyth-client | 0 | 8423 | <filename>pctest/test_publish.py<gh_stars>0
#!/usr/bin/python3
# pip3 install websockets
import asyncio
import websockets
import json
import datetime
import sys
class test_publish:
idnum = 1
def __init__( self, sym, price, spread ):
self.symbol = sym
self.pidnum = test_publish.idnum
test_publish.idnum += 1
self.sidnum = test_publish.idnum
test_publish.idnum += 1
self.psubid = -1
self.ssubid = -1
self.price = price
self.spread = spread
def gen_subscribe_price(self):
req = {
'jsonrpc': '2.0',
'method' : 'subscribe_price',
'params' : {
'account': self.account,
'price_type' : 'price'
},
'id': self.sidnum
}
return json.dumps( req )
def gen_subscribe_price_sched(self):
req = {
'jsonrpc': '2.0',
'method' : 'subscribe_price_sched',
'params' : {
'account': self.account,
'price_type' : 'price'
},
'id': self.pidnum
}
return json.dumps( req )
def gen_update_price(self):
req = {
'jsonrpc': '2.0',
'method': 'update_price',
'params':{
'account': self.account,
'price_type': 'price',
'status': 'trading',
'price': self.price,
'conf': self.spread
},
'id': None
}
self.price += self.spread
return json.dumps( req )
def parse_reply( self, msg, allsub ):
# parse subscription replies
subid = msg['result']['subscription']
allsub[subid] = self
if msg['id'] == self.pidnum:
self.psubid = subid;
else:
self.ssubid = subid
async def parse_notify( self, ws, msg ):
# parse subscription notification messages
subid = msg['params']['subscription']
ts = datetime.datetime.utcnow().isoformat()
if subid == self.ssubid:
# aggregate price update
res = msg['params']['result']
price = res['price']
spread = res['conf']
status = res['status']
print( f'{ts} received aggregate price update symbol=' + self.symbol +
f',price={price}, spread={spread}, status={status}' )
else:
# request to submit price
print( f'{ts} submit price to block-chain symbol=' + self.symbol +
f',price={self.price}, spread={self.spread}, subscription={subid}')
await ws.send( self.gen_update_price() )
async def subscribe( self, acct, ws, allids ):
# submmit initial subscriptions
self.account = acct
allids[self.pidnum] = self
allids[self.sidnum] = self
await ws.send( self.gen_subscribe_price() )
await ws.send( self.gen_subscribe_price_sched() )
# wbsocket event loop
async def poll( uri ):
# connect to pythd
ws = await websockets.connect(uri)
# submit subscriptions to pythd
allids = {}
allsub = {}
allsym = {}
sym1 = test_publish( 'SYMBOL1/USD', 10000, 100 )
sym2 = test_publish( 'SYMBOL2/USD', 2000000, 20000 )
allsym[sym1.symbol] = sym1
allsym[sym2.symbol] = sym2
# lookup accounts by symbol and subscribe
req = { 'jsonrpc': '2.0', 'method': 'get_product_list', 'id': None }
await ws.send( json.dumps( req ) )
msg = json.loads( await ws.recv() )
for prod in msg['result']:
sym = prod['attr_dict']['symbol']
for px in prod['price']:
if sym in allsym and px['price_type'] == 'price':
await allsym[sym].subscribe( px['account'], ws, allids );
# poll for updates from pythd
while True:
msg = json.loads( await ws.recv() )
# print(msg)
if 'error' in msg:
ts = datetime.datetime.utcnow().isoformat()
code = msg['error']['code']
emsg = msg['error']['message']
print( f'{ts} error code: {code} msg: {emsg}' )
sys.exit(1)
elif 'result' in msg:
msgid = msg['id']
if msgid in allids:
allids[msgid].parse_reply( msg, allsub )
else:
subid = msg['params']['subscription']
if subid in allsub:
await allsub[subid].parse_notify( ws, msg )
# connect to pythd, subscribe to and start publishing on two symbols
if __name__ == '__main__':
uri='ws://localhost:8910'
eloop = asyncio.get_event_loop()
try:
eloop.run_until_complete( poll( uri ) )
except ConnectionRefusedError:
print( f'connection refused uri={uri}' )
sys.exit(1)
| <filename>pctest/test_publish.py<gh_stars>0
#!/usr/bin/python3
# pip3 install websockets
import asyncio
import websockets
import json
import datetime
import sys
class test_publish:
idnum = 1
def __init__( self, sym, price, spread ):
self.symbol = sym
self.pidnum = test_publish.idnum
test_publish.idnum += 1
self.sidnum = test_publish.idnum
test_publish.idnum += 1
self.psubid = -1
self.ssubid = -1
self.price = price
self.spread = spread
def gen_subscribe_price(self):
req = {
'jsonrpc': '2.0',
'method' : 'subscribe_price',
'params' : {
'account': self.account,
'price_type' : 'price'
},
'id': self.sidnum
}
return json.dumps( req )
def gen_subscribe_price_sched(self):
req = {
'jsonrpc': '2.0',
'method' : 'subscribe_price_sched',
'params' : {
'account': self.account,
'price_type' : 'price'
},
'id': self.pidnum
}
return json.dumps( req )
def gen_update_price(self):
req = {
'jsonrpc': '2.0',
'method': 'update_price',
'params':{
'account': self.account,
'price_type': 'price',
'status': 'trading',
'price': self.price,
'conf': self.spread
},
'id': None
}
self.price += self.spread
return json.dumps( req )
def parse_reply( self, msg, allsub ):
# parse subscription replies
subid = msg['result']['subscription']
allsub[subid] = self
if msg['id'] == self.pidnum:
self.psubid = subid;
else:
self.ssubid = subid
async def parse_notify( self, ws, msg ):
# parse subscription notification messages
subid = msg['params']['subscription']
ts = datetime.datetime.utcnow().isoformat()
if subid == self.ssubid:
# aggregate price update
res = msg['params']['result']
price = res['price']
spread = res['conf']
status = res['status']
print( f'{ts} received aggregate price update symbol=' + self.symbol +
f',price={price}, spread={spread}, status={status}' )
else:
# request to submit price
print( f'{ts} submit price to block-chain symbol=' + self.symbol +
f',price={self.price}, spread={self.spread}, subscription={subid}')
await ws.send( self.gen_update_price() )
async def subscribe( self, acct, ws, allids ):
# submmit initial subscriptions
self.account = acct
allids[self.pidnum] = self
allids[self.sidnum] = self
await ws.send( self.gen_subscribe_price() )
await ws.send( self.gen_subscribe_price_sched() )
# wbsocket event loop
async def poll( uri ):
# connect to pythd
ws = await websockets.connect(uri)
# submit subscriptions to pythd
allids = {}
allsub = {}
allsym = {}
sym1 = test_publish( 'SYMBOL1/USD', 10000, 100 )
sym2 = test_publish( 'SYMBOL2/USD', 2000000, 20000 )
allsym[sym1.symbol] = sym1
allsym[sym2.symbol] = sym2
# lookup accounts by symbol and subscribe
req = { 'jsonrpc': '2.0', 'method': 'get_product_list', 'id': None }
await ws.send( json.dumps( req ) )
msg = json.loads( await ws.recv() )
for prod in msg['result']:
sym = prod['attr_dict']['symbol']
for px in prod['price']:
if sym in allsym and px['price_type'] == 'price':
await allsym[sym].subscribe( px['account'], ws, allids );
# poll for updates from pythd
while True:
msg = json.loads( await ws.recv() )
# print(msg)
if 'error' in msg:
ts = datetime.datetime.utcnow().isoformat()
code = msg['error']['code']
emsg = msg['error']['message']
print( f'{ts} error code: {code} msg: {emsg}' )
sys.exit(1)
elif 'result' in msg:
msgid = msg['id']
if msgid in allids:
allids[msgid].parse_reply( msg, allsub )
else:
subid = msg['params']['subscription']
if subid in allsub:
await allsub[subid].parse_notify( ws, msg )
# connect to pythd, subscribe to and start publishing on two symbols
if __name__ == '__main__':
uri='ws://localhost:8910'
eloop = asyncio.get_event_loop()
try:
eloop.run_until_complete( poll( uri ) )
except ConnectionRefusedError:
print( f'connection refused uri={uri}' )
sys.exit(1)
| en | 0.747662 | #!/usr/bin/python3 # pip3 install websockets # parse subscription replies # parse subscription notification messages # aggregate price update # request to submit price # submmit initial subscriptions # wbsocket event loop # connect to pythd # submit subscriptions to pythd # lookup accounts by symbol and subscribe # poll for updates from pythd # print(msg) # connect to pythd, subscribe to and start publishing on two symbols | 2.31827 | 2 |
Python/other/merge_interval.py | TechSpiritSS/NeoAlgo | 897 | 8424 | '''
Given an array of intervals, merge all overlapping intervals,
and return an array of the non-overlapping intervals that cover all the intervals in the input.
Input: intervals = [[1,3],[2,6],[8,10],[15,18]]
Output: [[1,6],[8,10],[15,18]]
Explanation: Since intervals [1,3] and [2,6] overlaps, merge them into [1,6].
'''
def merge(intervals):
#sort the array
intervals.sort()
#take another empty list
intervals_stack = []
for pair in intervals:
if len(intervals_stack) == 0:
intervals_stack.append(pair) #adding all the number in intervals elements in empty list
#check number is equal or greater and less than pop elements
else:
current_pair = intervals_stack[-1]
if current_pair[1]>=pair[0]:
intervals_stack.pop()
if current_pair[1]<pair[1]:
new_pair = [current_pair[0],pair[1]]
intervals_stack.append(new_pair)
else:
new_pair = [current_pair[0],current_pair[1]]
intervals_stack.append(new_pair)
else:
intervals_stack.append(pair)
# result
return intervals_stack
if __name__ == '__main__':
R = int(input("Enter the number of rows:"))
C = int(input("Enter the number of columns:"))
interval = [[int(input("Enter the elements: ")) for x in range (C)] for y in range(R)]
print("Overlapping interval: ",interval)
print("Non-overlapping intervals: ",merge(interval))
"""
Time complexity : O(n^2)
Space complexity : O(n^2)
INPUT:-
Enter the number of rows:4
Enter the number of columns:2
Enter the elements: 1
Enter the elements: 3
Enter the elements: 2
Enter the elements: 6
Enter the elements: 8
Enter the elements: 10
Enter the elements: 15
Enter the elements: 18
OUTPUT:-
Overlapping interval: [[1, 3], [2, 6], [8, 10], [15, 18]]
Non-overlapping intervals: [[1, 6], [8, 10], [15, 18]]
"""
| '''
Given an array of intervals, merge all overlapping intervals,
and return an array of the non-overlapping intervals that cover all the intervals in the input.
Input: intervals = [[1,3],[2,6],[8,10],[15,18]]
Output: [[1,6],[8,10],[15,18]]
Explanation: Since intervals [1,3] and [2,6] overlaps, merge them into [1,6].
'''
def merge(intervals):
#sort the array
intervals.sort()
#take another empty list
intervals_stack = []
for pair in intervals:
if len(intervals_stack) == 0:
intervals_stack.append(pair) #adding all the number in intervals elements in empty list
#check number is equal or greater and less than pop elements
else:
current_pair = intervals_stack[-1]
if current_pair[1]>=pair[0]:
intervals_stack.pop()
if current_pair[1]<pair[1]:
new_pair = [current_pair[0],pair[1]]
intervals_stack.append(new_pair)
else:
new_pair = [current_pair[0],current_pair[1]]
intervals_stack.append(new_pair)
else:
intervals_stack.append(pair)
# result
return intervals_stack
if __name__ == '__main__':
R = int(input("Enter the number of rows:"))
C = int(input("Enter the number of columns:"))
interval = [[int(input("Enter the elements: ")) for x in range (C)] for y in range(R)]
print("Overlapping interval: ",interval)
print("Non-overlapping intervals: ",merge(interval))
"""
Time complexity : O(n^2)
Space complexity : O(n^2)
INPUT:-
Enter the number of rows:4
Enter the number of columns:2
Enter the elements: 1
Enter the elements: 3
Enter the elements: 2
Enter the elements: 6
Enter the elements: 8
Enter the elements: 10
Enter the elements: 15
Enter the elements: 18
OUTPUT:-
Overlapping interval: [[1, 3], [2, 6], [8, 10], [15, 18]]
Non-overlapping intervals: [[1, 6], [8, 10], [15, 18]]
"""
| en | 0.737803 | Given an array of intervals, merge all overlapping intervals, and return an array of the non-overlapping intervals that cover all the intervals in the input. Input: intervals = [[1,3],[2,6],[8,10],[15,18]] Output: [[1,6],[8,10],[15,18]] Explanation: Since intervals [1,3] and [2,6] overlaps, merge them into [1,6]. #sort the array #take another empty list #adding all the number in intervals elements in empty list #check number is equal or greater and less than pop elements # result Time complexity : O(n^2) Space complexity : O(n^2) INPUT:- Enter the number of rows:4 Enter the number of columns:2 Enter the elements: 1 Enter the elements: 3 Enter the elements: 2 Enter the elements: 6 Enter the elements: 8 Enter the elements: 10 Enter the elements: 15 Enter the elements: 18 OUTPUT:- Overlapping interval: [[1, 3], [2, 6], [8, 10], [15, 18]] Non-overlapping intervals: [[1, 6], [8, 10], [15, 18]] | 4.393945 | 4 |
tests/test_all.py | InnovativeTravel/humilis-lambdautils | 0 | 8425 | """Unit tests."""
import inspect
import json
from mock import Mock
import os
import sys
import uuid
import pytest
# Add the lambda directory to the python library search path
lambda_dir = os.path.join(
os.path.dirname(inspect.getfile(inspect.currentframe())), '..')
sys.path.append(lambda_dir)
import lambdautils.utils
@pytest.mark.parametrize(
"key,environment,stage,namespace,table,nkey", [
("k", "e", "s", None, "e-s-secrets", "k"),
("k", "e", None, None, "e-dummystage-secrets", "k"),
("k", "e", None, "n", "e-dummystage-secrets", "n:k"),
("k", "e", "s", "n", "e-s-secrets", "n:k")])
def test_get_secret_from_vault(key, environment, stage, namespace, table, nkey,
boto3_resource, boto3_client, monkeypatch):
"""Gets a secret from the DynamoDB secrets vault."""
# Call to the DynamoDB client to retrieve the encrypted secret
monkeypatch.setattr("boto3.resource", boto3_resource)
monkeypatch.setattr("boto3.client", boto3_client)
secret = lambdautils.utils.get_secret(key,
namespace=namespace,
environment=environment,
stage=stage)
assert secret == "dummy"
boto3_client("dynamodb").get_item.assert_called_with(
TableName=table,
Key={"id": {"S": nkey}})
# Call to the KMS client to decrypt the secret
boto3_client('kms').decrypt.assert_called_with(CiphertextBlob="encrypted")
def test_get_secret_from_env(monkeypatch):
"""Get a secret from an (encrypted) environment variable."""
key = str(uuid.uuid4()).replace('-', '.')
value = str(uuid.uuid4())
monkeypatch.setenv(key.replace('.', '_').upper(), value)
secret = lambdautils.utils.get_secret(key)
assert secret == value
def test_get_setting(monkeypatch):
"""Should be an alias for get_secret."""
resp = str(uuid.uuid4())
arg = str(uuid.uuid4())
kwarg = str(uuid.uuid4())
get_secret = Mock(return_value=resp)
monkeypatch.setattr("lambdautils.state.get_secret", get_secret)
resp2 = lambdautils.state.get_setting(arg, kwarg=kwarg)
assert resp2 == resp
get_secret.assert_called_with(arg, kwarg=kwarg)
@pytest.mark.parametrize(
"key,environment,layer,stage,shard_id,namespace,table,consistent,nkey", [
("k", "e", "l", "s", None, None, "e-l-s-state", False, "k"),
("k", "e", "l", "s", None, "n", "e-l-s-state", False, "n:k"),
("k", "e", "l", "s", "s-012", "n", "e-l-s-state", True, "s-012:n:k"),
("k", "e", "l", "s", "s-0001", None, "e-l-s-state", True, "s-0001:k")])
def test_get_state(boto3_resource, monkeypatch, key, environment, layer,
stage, shard_id, namespace, table, consistent, nkey):
"""Get a state value from DynamoDB."""
monkeypatch.setattr("boto3.resource", boto3_resource)
lambdautils.utils.get_state(key, environment=environment, layer=layer,
stage=stage, shard_id=shard_id,
namespace=namespace,
consistent=consistent)
boto3_resource("dynamodb").Table.assert_called_with(table)
if consistent is None:
# The default setting: use consistent reads
consistent = True
boto3_resource("dynamodb").Table().get_item.assert_called_with(
Key={"id": nkey}, ConsistentRead=consistent)
def test_no_state_table(boto3_resource, monkeypatch):
"""Test accessing state variable without having a state table."""
monkeypatch.setattr("boto3.resource", boto3_resource)
monkeypatch.delenv("HUMILIS_ENVIRONMENT")
with pytest.raises(lambdautils.state.StateTableError):
lambdautils.utils.set_state("sample_state_key", "sample_state_value")
with pytest.raises(lambdautils.state.StateTableError):
lambdautils.utils.delete_state("sample_state_key")
with pytest.raises(lambdautils.state.StateTableError):
lambdautils.utils.get_state("sample_state_key")
@pytest.mark.parametrize(
"key,value,environment,layer,stage,shard_id,namespace,table,nkey", [
("k", "v", "e", "l", "s", None, None, "e-l-s-state", "k"),
("k", "v", "e", "l", "s", None, "n", "e-l-s-state", "n:k"),
("k", "v", "e", "l", "s", "s1", "n", "e-l-s-state", "s1:n:k"),
("k", "v", "e", "l", "s", "s2", None, "e-l-s-state", "s2:k")])
def test_set_state(boto3_resource, monkeypatch, key, value, environment, layer,
stage, shard_id, namespace, table, nkey):
"""Tests setting a state variable."""
monkeypatch.setattr("boto3.resource", boto3_resource)
lambdautils.utils.set_state(key, value, environment=environment,
layer=layer, stage=stage, shard_id=shard_id,
namespace=namespace)
boto3_resource("dynamodb").Table.assert_called_with(table)
boto3_resource("dynamodb").Table().put_item.assert_called_with(
Item={"id": nkey, "value": json.dumps(value)})
@pytest.mark.parametrize(
"key,environment,layer,stage,shard_id,namespace,table,nkey", [
("k", "e", "l", "s", None, None, "e-l-s-state", "k"),
("k", "e", "l", "s", None, "n", "e-l-s-state", "n:k"),
("k", "e", "l", "s", "s1", "n", "e-l-s-state", "s1:n:k"),
("k", "e", "l", "s", "s2", None, "e-l-s-state", "s2:k")])
def test_delete_state(boto3_resource, monkeypatch, key, environment,
layer, stage, shard_id, namespace, table, nkey):
"""Tests setting a state variable."""
monkeypatch.setattr("boto3.resource", boto3_resource)
lambdautils.utils.delete_state(key, environment=environment,
layer=layer, stage=stage, shard_id=shard_id,
namespace=namespace)
boto3_resource("dynamodb").Table.assert_called_with(table)
boto3_resource("dynamodb").Table().delete_item.assert_called_with(
Key={"id": nkey})
def test_sentry_monitor_bad_client(boto3_client, raven_client, context,
monkeypatch):
"""Test that sentry_monitor handles raven client errors gracefully."""
class ClientError(Exception):
pass
def raise_error(dsn):
raise ClientError
monkeypatch.setattr("raven.Client", Mock(side_effect=raise_error))
monkeypatch.setattr("boto3.client", boto3_client)
@lambdautils.utils.sentry_monitor(environment="dummyenv",
stage="dummystage")
def lambda_handler(event, context):
pass
lambda_handler(None, context)
raven_client.captureException.assert_not_called()
@pytest.mark.parametrize(
"kstream, fstream, rcalls, kcalls, fcalls, ev", [
("a", "b", 1, 0, 0, {"Records": [{}]}),
(None, "b", 1, 0, 0, {"Records": [{}]}),
(None, None, 1, 0, 0, None),
(None, None, 1, 0, 0, None),
("a", "b", 1, 0, 0, None),
("a", None, 1, 0, 0, None)])
def test_sentry_monitor_exception(
kstream, fstream, rcalls, kcalls, fcalls, ev,
boto3_client, raven_client, context, kinesis_event, monkeypatch):
"""Tests the sentry_monitor decorator when throwing an exception and
lacking an error stream where to dump the errors."""
if ev is None:
# Default to a Kinesis event
ev = kinesis_event
monkeypatch.setattr("boto3.client", boto3_client)
monkeypatch.setattr("raven.Client", Mock(return_value=raven_client))
monkeypatch.setattr("lambdautils.monitor.SentryHandler", Mock())
monkeypatch.setattr("lambdautils.utils.get_secret",
Mock(return_value="dummydsn"))
error_stream = {
"kinesis_stream": kstream,
"firehose_delivery_stream": fstream}
@lambdautils.utils.sentry_monitor(error_stream=error_stream)
def lambda_handler(event, context):
"""Raise an error."""
raise KeyError
with pytest.raises(KeyError):
lambda_handler(ev, context)
# Should have captured only 1 error:
# * The original KeyError
assert raven_client.captureException.call_count == rcalls
# And should have send the events to the Kinesis and FH error streams
assert boto3_client("kinesis").put_records.call_count == kcalls
assert boto3_client("firehose").put_record_batch.call_count == fcalls
def test_send_to_kinesis_stream(search_events, boto3_client, monkeypatch):
"""Tests sending events to a Kinesis stream."""
monkeypatch.setattr("boto3.client", boto3_client)
lambdautils.utils.send_to_kinesis_stream(search_events, "dummy_stream")
boto3_client("kinesis").put_records.call_count == 1
def test_send_to_delivery_stream(search_events, boto3_client, monkeypatch):
"""Tests sending events to a Firehose delivery stream."""
monkeypatch.setattr("boto3.client", boto3_client)
lambdautils.utils.send_to_delivery_stream(search_events, "dummy_stream")
boto3_client("firehose").put_record_batch.call_count == 1
@pytest.mark.parametrize("deserializer, embed_ts", [
[json.loads, False],
[json.loads, "kinesis_timestamp"],
[None, False]])
def test_unpack_kinesis_event(kinesis_event, deserializer, embed_ts):
"""Extracts json-serialized events from a Kinesis events."""
events, shard_id = lambdautils.utils.unpack_kinesis_event(
kinesis_event, deserializer=deserializer, embed_timestamp=embed_ts)
# There should be one event per kinesis record
assert len(events) == len(kinesis_event["Records"])
assert shard_id == kinesis_event["Records"][0]["eventID"].split(":")[0]
if embed_ts:
assert all(embed_ts in ev for ev in events)
| """Unit tests."""
import inspect
import json
from mock import Mock
import os
import sys
import uuid
import pytest
# Add the lambda directory to the python library search path
lambda_dir = os.path.join(
os.path.dirname(inspect.getfile(inspect.currentframe())), '..')
sys.path.append(lambda_dir)
import lambdautils.utils
@pytest.mark.parametrize(
"key,environment,stage,namespace,table,nkey", [
("k", "e", "s", None, "e-s-secrets", "k"),
("k", "e", None, None, "e-dummystage-secrets", "k"),
("k", "e", None, "n", "e-dummystage-secrets", "n:k"),
("k", "e", "s", "n", "e-s-secrets", "n:k")])
def test_get_secret_from_vault(key, environment, stage, namespace, table, nkey,
boto3_resource, boto3_client, monkeypatch):
"""Gets a secret from the DynamoDB secrets vault."""
# Call to the DynamoDB client to retrieve the encrypted secret
monkeypatch.setattr("boto3.resource", boto3_resource)
monkeypatch.setattr("boto3.client", boto3_client)
secret = lambdautils.utils.get_secret(key,
namespace=namespace,
environment=environment,
stage=stage)
assert secret == "dummy"
boto3_client("dynamodb").get_item.assert_called_with(
TableName=table,
Key={"id": {"S": nkey}})
# Call to the KMS client to decrypt the secret
boto3_client('kms').decrypt.assert_called_with(CiphertextBlob="encrypted")
def test_get_secret_from_env(monkeypatch):
"""Get a secret from an (encrypted) environment variable."""
key = str(uuid.uuid4()).replace('-', '.')
value = str(uuid.uuid4())
monkeypatch.setenv(key.replace('.', '_').upper(), value)
secret = lambdautils.utils.get_secret(key)
assert secret == value
def test_get_setting(monkeypatch):
"""Should be an alias for get_secret."""
resp = str(uuid.uuid4())
arg = str(uuid.uuid4())
kwarg = str(uuid.uuid4())
get_secret = Mock(return_value=resp)
monkeypatch.setattr("lambdautils.state.get_secret", get_secret)
resp2 = lambdautils.state.get_setting(arg, kwarg=kwarg)
assert resp2 == resp
get_secret.assert_called_with(arg, kwarg=kwarg)
@pytest.mark.parametrize(
"key,environment,layer,stage,shard_id,namespace,table,consistent,nkey", [
("k", "e", "l", "s", None, None, "e-l-s-state", False, "k"),
("k", "e", "l", "s", None, "n", "e-l-s-state", False, "n:k"),
("k", "e", "l", "s", "s-012", "n", "e-l-s-state", True, "s-012:n:k"),
("k", "e", "l", "s", "s-0001", None, "e-l-s-state", True, "s-0001:k")])
def test_get_state(boto3_resource, monkeypatch, key, environment, layer,
stage, shard_id, namespace, table, consistent, nkey):
"""Get a state value from DynamoDB."""
monkeypatch.setattr("boto3.resource", boto3_resource)
lambdautils.utils.get_state(key, environment=environment, layer=layer,
stage=stage, shard_id=shard_id,
namespace=namespace,
consistent=consistent)
boto3_resource("dynamodb").Table.assert_called_with(table)
if consistent is None:
# The default setting: use consistent reads
consistent = True
boto3_resource("dynamodb").Table().get_item.assert_called_with(
Key={"id": nkey}, ConsistentRead=consistent)
def test_no_state_table(boto3_resource, monkeypatch):
"""Test accessing state variable without having a state table."""
monkeypatch.setattr("boto3.resource", boto3_resource)
monkeypatch.delenv("HUMILIS_ENVIRONMENT")
with pytest.raises(lambdautils.state.StateTableError):
lambdautils.utils.set_state("sample_state_key", "sample_state_value")
with pytest.raises(lambdautils.state.StateTableError):
lambdautils.utils.delete_state("sample_state_key")
with pytest.raises(lambdautils.state.StateTableError):
lambdautils.utils.get_state("sample_state_key")
@pytest.mark.parametrize(
"key,value,environment,layer,stage,shard_id,namespace,table,nkey", [
("k", "v", "e", "l", "s", None, None, "e-l-s-state", "k"),
("k", "v", "e", "l", "s", None, "n", "e-l-s-state", "n:k"),
("k", "v", "e", "l", "s", "s1", "n", "e-l-s-state", "s1:n:k"),
("k", "v", "e", "l", "s", "s2", None, "e-l-s-state", "s2:k")])
def test_set_state(boto3_resource, monkeypatch, key, value, environment, layer,
stage, shard_id, namespace, table, nkey):
"""Tests setting a state variable."""
monkeypatch.setattr("boto3.resource", boto3_resource)
lambdautils.utils.set_state(key, value, environment=environment,
layer=layer, stage=stage, shard_id=shard_id,
namespace=namespace)
boto3_resource("dynamodb").Table.assert_called_with(table)
boto3_resource("dynamodb").Table().put_item.assert_called_with(
Item={"id": nkey, "value": json.dumps(value)})
@pytest.mark.parametrize(
"key,environment,layer,stage,shard_id,namespace,table,nkey", [
("k", "e", "l", "s", None, None, "e-l-s-state", "k"),
("k", "e", "l", "s", None, "n", "e-l-s-state", "n:k"),
("k", "e", "l", "s", "s1", "n", "e-l-s-state", "s1:n:k"),
("k", "e", "l", "s", "s2", None, "e-l-s-state", "s2:k")])
def test_delete_state(boto3_resource, monkeypatch, key, environment,
layer, stage, shard_id, namespace, table, nkey):
"""Tests setting a state variable."""
monkeypatch.setattr("boto3.resource", boto3_resource)
lambdautils.utils.delete_state(key, environment=environment,
layer=layer, stage=stage, shard_id=shard_id,
namespace=namespace)
boto3_resource("dynamodb").Table.assert_called_with(table)
boto3_resource("dynamodb").Table().delete_item.assert_called_with(
Key={"id": nkey})
def test_sentry_monitor_bad_client(boto3_client, raven_client, context,
monkeypatch):
"""Test that sentry_monitor handles raven client errors gracefully."""
class ClientError(Exception):
pass
def raise_error(dsn):
raise ClientError
monkeypatch.setattr("raven.Client", Mock(side_effect=raise_error))
monkeypatch.setattr("boto3.client", boto3_client)
@lambdautils.utils.sentry_monitor(environment="dummyenv",
stage="dummystage")
def lambda_handler(event, context):
pass
lambda_handler(None, context)
raven_client.captureException.assert_not_called()
@pytest.mark.parametrize(
"kstream, fstream, rcalls, kcalls, fcalls, ev", [
("a", "b", 1, 0, 0, {"Records": [{}]}),
(None, "b", 1, 0, 0, {"Records": [{}]}),
(None, None, 1, 0, 0, None),
(None, None, 1, 0, 0, None),
("a", "b", 1, 0, 0, None),
("a", None, 1, 0, 0, None)])
def test_sentry_monitor_exception(
kstream, fstream, rcalls, kcalls, fcalls, ev,
boto3_client, raven_client, context, kinesis_event, monkeypatch):
"""Tests the sentry_monitor decorator when throwing an exception and
lacking an error stream where to dump the errors."""
if ev is None:
# Default to a Kinesis event
ev = kinesis_event
monkeypatch.setattr("boto3.client", boto3_client)
monkeypatch.setattr("raven.Client", Mock(return_value=raven_client))
monkeypatch.setattr("lambdautils.monitor.SentryHandler", Mock())
monkeypatch.setattr("lambdautils.utils.get_secret",
Mock(return_value="dummydsn"))
error_stream = {
"kinesis_stream": kstream,
"firehose_delivery_stream": fstream}
@lambdautils.utils.sentry_monitor(error_stream=error_stream)
def lambda_handler(event, context):
"""Raise an error."""
raise KeyError
with pytest.raises(KeyError):
lambda_handler(ev, context)
# Should have captured only 1 error:
# * The original KeyError
assert raven_client.captureException.call_count == rcalls
# And should have send the events to the Kinesis and FH error streams
assert boto3_client("kinesis").put_records.call_count == kcalls
assert boto3_client("firehose").put_record_batch.call_count == fcalls
def test_send_to_kinesis_stream(search_events, boto3_client, monkeypatch):
"""Tests sending events to a Kinesis stream."""
monkeypatch.setattr("boto3.client", boto3_client)
lambdautils.utils.send_to_kinesis_stream(search_events, "dummy_stream")
boto3_client("kinesis").put_records.call_count == 1
def test_send_to_delivery_stream(search_events, boto3_client, monkeypatch):
"""Tests sending events to a Firehose delivery stream."""
monkeypatch.setattr("boto3.client", boto3_client)
lambdautils.utils.send_to_delivery_stream(search_events, "dummy_stream")
boto3_client("firehose").put_record_batch.call_count == 1
@pytest.mark.parametrize("deserializer, embed_ts", [
[json.loads, False],
[json.loads, "kinesis_timestamp"],
[None, False]])
def test_unpack_kinesis_event(kinesis_event, deserializer, embed_ts):
"""Extracts json-serialized events from a Kinesis events."""
events, shard_id = lambdautils.utils.unpack_kinesis_event(
kinesis_event, deserializer=deserializer, embed_timestamp=embed_ts)
# There should be one event per kinesis record
assert len(events) == len(kinesis_event["Records"])
assert shard_id == kinesis_event["Records"][0]["eventID"].split(":")[0]
if embed_ts:
assert all(embed_ts in ev for ev in events)
| en | 0.790384 | Unit tests. # Add the lambda directory to the python library search path Gets a secret from the DynamoDB secrets vault. # Call to the DynamoDB client to retrieve the encrypted secret # Call to the KMS client to decrypt the secret Get a secret from an (encrypted) environment variable. Should be an alias for get_secret. Get a state value from DynamoDB. # The default setting: use consistent reads Test accessing state variable without having a state table. Tests setting a state variable. Tests setting a state variable. Test that sentry_monitor handles raven client errors gracefully. Tests the sentry_monitor decorator when throwing an exception and lacking an error stream where to dump the errors. # Default to a Kinesis event Raise an error. # Should have captured only 1 error: # * The original KeyError # And should have send the events to the Kinesis and FH error streams Tests sending events to a Kinesis stream. Tests sending events to a Firehose delivery stream. Extracts json-serialized events from a Kinesis events. # There should be one event per kinesis record | 2.327455 | 2 |
packages/starcheck/post_regress.py | sot/ska_testr | 0 | 8426 | import os
from testr.packages import make_regress_files
regress_files = ['starcheck.txt',
'starcheck/pcad_att_check.txt']
clean = {'starcheck.txt': [(r'\s*Run on.*[\n\r]*', ''),
(os.environ['SKA'], '')],
'starcheck/pcad_att_check.txt': [(os.environ['SKA'], '')]}
make_regress_files(regress_files, clean=clean)
| import os
from testr.packages import make_regress_files
regress_files = ['starcheck.txt',
'starcheck/pcad_att_check.txt']
clean = {'starcheck.txt': [(r'\s*Run on.*[\n\r]*', ''),
(os.environ['SKA'], '')],
'starcheck/pcad_att_check.txt': [(os.environ['SKA'], '')]}
make_regress_files(regress_files, clean=clean)
| none | 1 | 1.863682 | 2 |
|
testsite/wsgi.py | stungkit/djaodjin-saas | 0 | 8427 | <reponame>stungkit/djaodjin-saas<gh_stars>0
"""
WSGI config for testsite project.
This module contains the WSGI application used by Django's development server
and any production WSGI deployments. It should expose a module-level variable
named ``application``. Django's ``runserver`` and ``runfcgi`` commands discover
this application via the ``WSGI_APPLICATION`` setting.
Usually you will have the standard Django WSGI application here, but it also
might make sense to replace the whole Django WSGI application with a custom one
that later delegates to the Django one. For example, you could introduce WSGI
middleware here, or combine a Django application with an application of another
framework.
"""
import os, signal
#pylint: disable=invalid-name
def save_coverage(*args, **kwargs):
#pylint:disable=unused-argument
sys.stderr.write("saving coverage\n")
cov.stop()
cov.save()
if os.getenv('DJANGO_COVERAGE'):
import atexit, sys
import coverage
cov = coverage.coverage(data_file=os.path.join(os.getenv('DJANGO_COVERAGE'),
".coverage.%d" % os.getpid()))
cov.start()
atexit.register(save_coverage)
try:
signal.signal(signal.SIGTERM, save_coverage)
except ValueError as e:
# trapping signals does not work with manage
# trying to do so fails with
# ValueError: signal only works in main thread
pass
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "testsite.settings")
# This application object is used by any WSGI server configured to use this
# file. This includes Django's development server, if the WSGI_APPLICATION
# setting points here.
from django.core.wsgi import get_wsgi_application
#pylint: disable=invalid-name
application = get_wsgi_application()
| """
WSGI config for testsite project.
This module contains the WSGI application used by Django's development server
and any production WSGI deployments. It should expose a module-level variable
named ``application``. Django's ``runserver`` and ``runfcgi`` commands discover
this application via the ``WSGI_APPLICATION`` setting.
Usually you will have the standard Django WSGI application here, but it also
might make sense to replace the whole Django WSGI application with a custom one
that later delegates to the Django one. For example, you could introduce WSGI
middleware here, or combine a Django application with an application of another
framework.
"""
import os, signal
#pylint: disable=invalid-name
def save_coverage(*args, **kwargs):
#pylint:disable=unused-argument
sys.stderr.write("saving coverage\n")
cov.stop()
cov.save()
if os.getenv('DJANGO_COVERAGE'):
import atexit, sys
import coverage
cov = coverage.coverage(data_file=os.path.join(os.getenv('DJANGO_COVERAGE'),
".coverage.%d" % os.getpid()))
cov.start()
atexit.register(save_coverage)
try:
signal.signal(signal.SIGTERM, save_coverage)
except ValueError as e:
# trapping signals does not work with manage
# trying to do so fails with
# ValueError: signal only works in main thread
pass
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "testsite.settings")
# This application object is used by any WSGI server configured to use this
# file. This includes Django's development server, if the WSGI_APPLICATION
# setting points here.
from django.core.wsgi import get_wsgi_application
#pylint: disable=invalid-name
application = get_wsgi_application() | en | 0.867101 | WSGI config for testsite project. This module contains the WSGI application used by Django's development server and any production WSGI deployments. It should expose a module-level variable named ``application``. Django's ``runserver`` and ``runfcgi`` commands discover this application via the ``WSGI_APPLICATION`` setting. Usually you will have the standard Django WSGI application here, but it also might make sense to replace the whole Django WSGI application with a custom one that later delegates to the Django one. For example, you could introduce WSGI middleware here, or combine a Django application with an application of another framework. #pylint: disable=invalid-name #pylint:disable=unused-argument # trapping signals does not work with manage # trying to do so fails with # ValueError: signal only works in main thread # This application object is used by any WSGI server configured to use this # file. This includes Django's development server, if the WSGI_APPLICATION # setting points here. #pylint: disable=invalid-name | 1.902266 | 2 |
authserver/maildaemons/forwarder/server.py | jdelic/authserver | 8 | 8428 | <reponame>jdelic/authserver
#!/usr/bin/env python3 -u
# -* encoding: utf-8 *-
import argparse
import asyncore
import json
import logging
import signal
import sys
import os
from types import FrameType
from typing import Tuple, Sequence, Any, Union, Optional, List, Dict
from concurrent.futures import ThreadPoolExecutor as Pool
import daemon
from django.db.utils import OperationalError
import authserver
from maildaemons.utils import SMTPWrapper, PatchedSMTPChannel, SaneSMTPServer
_log = logging.getLogger(__name__)
pool = Pool()
class ForwarderServer(SaneSMTPServer):
def __init__(self, remote_relay_ip: str, remote_relay_port: int, local_delivery_ip: str,
local_delivery_port: int, *args: Any, **kwargs: Any) -> None:
super().__init__(*args, **kwargs)
self.smtp = SMTPWrapper(
external_ip=remote_relay_ip, external_port=remote_relay_port,
error_relay_ip=local_delivery_ip, error_relay_port=local_delivery_port
)
# ** must be thread-safe, don't modify shared state,
# _log should be thread-safe as stated by the docs. Django ORM should be as well.
def _process_message(self, peer: Tuple[str, int], mailfrom: str, rcpttos: Sequence[str], data: bytes, *,
channel: PatchedSMTPChannel,
**kwargs: Any) -> Optional[str]:
# we can't import the Domain model before Django has been initialized
from mailauth.models import EmailAlias, Domain
data = self.add_received_header(peer, data, channel)
remaining_rcpttos = list(rcpttos) # ensure that new_rcpttos is a mutable list
combined_rcptto = {} # type: Dict[str, List[str]] # { new_mailfrom: [recipients] }
def add_rcptto(mfrom: str, rcpt: Union[str, List]) -> None:
if mfrom in combined_rcptto:
if isinstance(rcpt, list):
combined_rcptto[mfrom] += rcpt
else:
combined_rcptto[mfrom].append(rcpt)
else:
if isinstance(rcpt, list):
combined_rcptto[mfrom] = rcpt
else:
combined_rcptto[mfrom] = [rcpt]
# we're going to modify remaining_rcpttos so we start from its end
for ix in range(len(remaining_rcpttos) - 1, -1, -1):
rcptto = rcpttos[ix].lower()
rcptuser, rcptdomain = rcptto.split("@", 1)
# implement domain catch-all redirect
domain = None # type: Optional[Domain]
try:
domain = Domain.objects.get(name=rcptdomain)
except Domain.DoesNotExist:
pass
except OperationalError:
_log.exception("Database unavailable.")
return "421 Processing problem. Please try again later."
if domain:
if domain.redirect_to:
_log.debug("ix: %s - rcptto: %s - remaining rcpttos: %s", ix, rcptto, remaining_rcpttos)
del remaining_rcpttos[ix]
new_rcptto = "%s@%s" % (rcptuser, domain.redirect_to)
_log.info("%sForwarding email from <%s> to <%s> to domain @%s",
"(Retry) " if "retry" in kwargs and kwargs["retry"] else "",
mailfrom, rcptto, domain.redirect_to)
add_rcptto(mailfrom, new_rcptto)
continue
# follow the same path like the stored procedure authserver_resolve_alias(...)
if "-" in rcptuser:
# convert the first - to a +
user_mailprefix = "%s+%s" % tuple(rcptuser.split("-", 1)) # type: ignore
else:
user_mailprefix = rcptuser
if "+" in user_mailprefix:
# if we had a dashext, or a plusext, we're left with just the prefix after this
user_mailprefix = user_mailprefix.split("+", 1)[0]
try:
alias = EmailAlias.objects.get(mailprefix__iexact=user_mailprefix,
domain__name__iexact=rcptdomain) # type: EmailAlias
except EmailAlias.DoesNotExist:
# OpenSMTPD shouldn't even call us for invalid addresses if we're configured correctly
_log.error("%sUnknown mail address: %s (from: %s, prefix: %s)",
"(Retry) " if "retry" in kwargs and kwargs["retry"] else "",
rcptto, mailfrom, user_mailprefix)
continue
except OperationalError:
_log.exception("Database unavailable.")
return "421 Processing problem. Please try again later."
if alias.forward_to is not None:
# it's a mailing list, forward the email to all connected addresses
del remaining_rcpttos[ix] # remove this recipient from the list
_newmf = mailfrom
if alias.forward_to.new_mailfrom != "":
_newmf = alias.forward_to.new_mailfrom
_log.info("%sForwarding email from <%s> with new sender <%s> to <%s>",
"(Retry) " if "retry" in kwargs and kwargs["retry"] else "",
mailfrom, _newmf, alias.forward_to.addresses)
add_rcptto(_newmf, alias.forward_to.addresses)
# if there are any remaining non-list/non-forward recipients, we inject them back to OpenSMTPD here
if len(remaining_rcpttos) > 0:
_log.info("%sDelivering email from <%s> to remaining recipients <%s>",
"(Retry) " if "retry" in kwargs and kwargs["retry"] else "",
mailfrom, remaining_rcpttos)
add_rcptto(mailfrom, remaining_rcpttos)
if len(combined_rcptto.keys()) == 1:
_log.debug("Only one mail envelope sender, forwarding is atomic")
results = {k: "unsent" for k in combined_rcptto.keys()} # type: Dict[str, str]
for new_mailfrom in combined_rcptto.keys():
_log.debug("Injecting email from <%s> to <%s>", new_mailfrom, combined_rcptto[new_mailfrom])
ret = self.smtp.sendmail(new_mailfrom, combined_rcptto[new_mailfrom], data)
if ret is not None:
results[new_mailfrom] = "failure"
if len(combined_rcptto.keys()) > 1:
_log.error("Non-atomic mail sending failed from <%s> in dict(%s)", combined_rcptto.keys(),
json.dumps(results))
return ret
results[new_mailfrom] = "success"
# TODO: log results
_log.debug("Done processing.")
return None
def process_message(self, *args: Any, **kwargs: Any) -> Optional[str]:
future = pool.submit(ForwarderServer._process_message, self, *args, **kwargs)
return future.result()
def run(_args: argparse.Namespace) -> None:
server = ForwarderServer(_args.remote_relay_ip, _args.remote_relay_port,
_args.local_delivery_ip, _args.local_delivery_port,
(_args.input_ip, _args.input_port), None, decode_data=False,
daemon_name="mailforwarder")
asyncore.loop()
def _sigint_handler(sig: int, frame: FrameType) -> None:
print("CTRL+C exiting")
pool.shutdown(wait=False)
sys.exit(1)
def _main() -> None:
signal.signal(signal.SIGINT, _sigint_handler)
parser = argparse.ArgumentParser(
description="This is a SMTP daemon that is used through OpenSMTPD configuration "
"to check whether incoming emails are addressed to a forwarding email alias "
"and if they are, inject emails to all list delivery addresses / expand the alias."
)
grp_daemon = parser.add_argument_group("Daemon options")
grp_daemon.add_argument("-p", "--pidfile", dest="pidfile", default="./mailforwarder-server.pid",
help="Path to a pidfile")
grp_daemon.add_argument("-u", "--user", dest="user", default=None, help="Drop privileges and switch to this user")
grp_daemon.add_argument("-g", "--group", dest="group", default=None,
help="Drop privileges and switch to this group")
grp_daemon.add_argument("-d", "--daemonize", dest="daemonize", default=False, action="store_true",
help="If set, fork into background")
grp_daemon.add_argument("-v", "--verbose", dest="verbose", default=False, action="store_true",
help="Output extra logging (not implemented right now)")
grp_daemon.add_argument("-C", "--chdir", dest="chdir", default=".",
help="Change working directory to the provided value")
grp_network = parser.add_argument_group("Network options")
grp_network.add_argument("--input-ip", dest="input_ip", default="127.0.0.1", help="The network address to bind to")
grp_network.add_argument("--input-port", dest="input_port", metavar="PORT", type=int, default=10046,
help="The port to bind to")
grp_network.add_argument("--local-delivery-ip", dest="local_delivery_ip", default="127.0.0.1",
help="The OpenSMTPD instance IP for local email to be delivered.")
grp_network.add_argument("--local-delivery-port", dest="local_delivery_port", metavar="PORT", type=int,
default=10045, help="The port where OpenSMTPD listens for local email to be delivered")
grp_network.add_argument("--remote-relay-ip", dest="remote_relay_ip", default="127.0.0.1",
help="The OpenSMTPD instance IP that accepts mail for relay to external domains.")
grp_network.add_argument("--remote-relay-port", dest="remote_relay_port", default=10045,
help="The port where OpenSMTPD listens for mail to relay.")
grp_django = parser.add_argument_group("Django options")
grp_django.add_argument("--settings", dest="django_settings", default="authserver.settings",
help="The Django settings module to use for authserver database access (default: "
"authserver.settings)")
_args = parser.parse_args()
os.environ.setdefault("DJANGO_SETTINGS_MODULE", _args.django_settings)
# noinspection PyUnresolvedReferences
from django.conf import settings # initialize Django
import django
django.setup()
_log.info("mailforwarder v%s: Forwarding Alias Service starting" % authserver.version)
_log.info("Django ORM initialized")
pidfile = open(_args.pidfile, "w")
ctx = daemon.DaemonContext(
working_directory=_args.chdir,
pidfile=pidfile,
uid=_args.user,
gid=_args.group,
detach_process=_args.daemonize,
files_preserve=[1, 2, 3, pidfile],
stdin=sys.stdin,
stdout=sys.stdout,
stderr=sys.stderr,
)
with ctx:
run(_args)
def main() -> None:
try:
_main()
except Exception as e:
_log.critical("Unhandled exception", exc_info=True)
sys.exit(1)
if __name__ == "__main__":
main()
| #!/usr/bin/env python3 -u
# -* encoding: utf-8 *-
import argparse
import asyncore
import json
import logging
import signal
import sys
import os
from types import FrameType
from typing import Tuple, Sequence, Any, Union, Optional, List, Dict
from concurrent.futures import ThreadPoolExecutor as Pool
import daemon
from django.db.utils import OperationalError
import authserver
from maildaemons.utils import SMTPWrapper, PatchedSMTPChannel, SaneSMTPServer
_log = logging.getLogger(__name__)
pool = Pool()
class ForwarderServer(SaneSMTPServer):
def __init__(self, remote_relay_ip: str, remote_relay_port: int, local_delivery_ip: str,
local_delivery_port: int, *args: Any, **kwargs: Any) -> None:
super().__init__(*args, **kwargs)
self.smtp = SMTPWrapper(
external_ip=remote_relay_ip, external_port=remote_relay_port,
error_relay_ip=local_delivery_ip, error_relay_port=local_delivery_port
)
# ** must be thread-safe, don't modify shared state,
# _log should be thread-safe as stated by the docs. Django ORM should be as well.
def _process_message(self, peer: Tuple[str, int], mailfrom: str, rcpttos: Sequence[str], data: bytes, *,
channel: PatchedSMTPChannel,
**kwargs: Any) -> Optional[str]:
# we can't import the Domain model before Django has been initialized
from mailauth.models import EmailAlias, Domain
data = self.add_received_header(peer, data, channel)
remaining_rcpttos = list(rcpttos) # ensure that new_rcpttos is a mutable list
combined_rcptto = {} # type: Dict[str, List[str]] # { new_mailfrom: [recipients] }
def add_rcptto(mfrom: str, rcpt: Union[str, List]) -> None:
if mfrom in combined_rcptto:
if isinstance(rcpt, list):
combined_rcptto[mfrom] += rcpt
else:
combined_rcptto[mfrom].append(rcpt)
else:
if isinstance(rcpt, list):
combined_rcptto[mfrom] = rcpt
else:
combined_rcptto[mfrom] = [rcpt]
# we're going to modify remaining_rcpttos so we start from its end
for ix in range(len(remaining_rcpttos) - 1, -1, -1):
rcptto = rcpttos[ix].lower()
rcptuser, rcptdomain = rcptto.split("@", 1)
# implement domain catch-all redirect
domain = None # type: Optional[Domain]
try:
domain = Domain.objects.get(name=rcptdomain)
except Domain.DoesNotExist:
pass
except OperationalError:
_log.exception("Database unavailable.")
return "421 Processing problem. Please try again later."
if domain:
if domain.redirect_to:
_log.debug("ix: %s - rcptto: %s - remaining rcpttos: %s", ix, rcptto, remaining_rcpttos)
del remaining_rcpttos[ix]
new_rcptto = "%s@%s" % (rcptuser, domain.redirect_to)
_log.info("%sForwarding email from <%s> to <%s> to domain @%s",
"(Retry) " if "retry" in kwargs and kwargs["retry"] else "",
mailfrom, rcptto, domain.redirect_to)
add_rcptto(mailfrom, new_rcptto)
continue
# follow the same path like the stored procedure authserver_resolve_alias(...)
if "-" in rcptuser:
# convert the first - to a +
user_mailprefix = "%s+%s" % tuple(rcptuser.split("-", 1)) # type: ignore
else:
user_mailprefix = rcptuser
if "+" in user_mailprefix:
# if we had a dashext, or a plusext, we're left with just the prefix after this
user_mailprefix = user_mailprefix.split("+", 1)[0]
try:
alias = EmailAlias.objects.get(mailprefix__iexact=user_mailprefix,
domain__name__iexact=rcptdomain) # type: EmailAlias
except EmailAlias.DoesNotExist:
# OpenSMTPD shouldn't even call us for invalid addresses if we're configured correctly
_log.error("%sUnknown mail address: %s (from: %s, prefix: %s)",
"(Retry) " if "retry" in kwargs and kwargs["retry"] else "",
rcptto, mailfrom, user_mailprefix)
continue
except OperationalError:
_log.exception("Database unavailable.")
return "421 Processing problem. Please try again later."
if alias.forward_to is not None:
# it's a mailing list, forward the email to all connected addresses
del remaining_rcpttos[ix] # remove this recipient from the list
_newmf = mailfrom
if alias.forward_to.new_mailfrom != "":
_newmf = alias.forward_to.new_mailfrom
_log.info("%sForwarding email from <%s> with new sender <%s> to <%s>",
"(Retry) " if "retry" in kwargs and kwargs["retry"] else "",
mailfrom, _newmf, alias.forward_to.addresses)
add_rcptto(_newmf, alias.forward_to.addresses)
# if there are any remaining non-list/non-forward recipients, we inject them back to OpenSMTPD here
if len(remaining_rcpttos) > 0:
_log.info("%sDelivering email from <%s> to remaining recipients <%s>",
"(Retry) " if "retry" in kwargs and kwargs["retry"] else "",
mailfrom, remaining_rcpttos)
add_rcptto(mailfrom, remaining_rcpttos)
if len(combined_rcptto.keys()) == 1:
_log.debug("Only one mail envelope sender, forwarding is atomic")
results = {k: "unsent" for k in combined_rcptto.keys()} # type: Dict[str, str]
for new_mailfrom in combined_rcptto.keys():
_log.debug("Injecting email from <%s> to <%s>", new_mailfrom, combined_rcptto[new_mailfrom])
ret = self.smtp.sendmail(new_mailfrom, combined_rcptto[new_mailfrom], data)
if ret is not None:
results[new_mailfrom] = "failure"
if len(combined_rcptto.keys()) > 1:
_log.error("Non-atomic mail sending failed from <%s> in dict(%s)", combined_rcptto.keys(),
json.dumps(results))
return ret
results[new_mailfrom] = "success"
# TODO: log results
_log.debug("Done processing.")
return None
def process_message(self, *args: Any, **kwargs: Any) -> Optional[str]:
future = pool.submit(ForwarderServer._process_message, self, *args, **kwargs)
return future.result()
def run(_args: argparse.Namespace) -> None:
server = ForwarderServer(_args.remote_relay_ip, _args.remote_relay_port,
_args.local_delivery_ip, _args.local_delivery_port,
(_args.input_ip, _args.input_port), None, decode_data=False,
daemon_name="mailforwarder")
asyncore.loop()
def _sigint_handler(sig: int, frame: FrameType) -> None:
print("CTRL+C exiting")
pool.shutdown(wait=False)
sys.exit(1)
def _main() -> None:
signal.signal(signal.SIGINT, _sigint_handler)
parser = argparse.ArgumentParser(
description="This is a SMTP daemon that is used through OpenSMTPD configuration "
"to check whether incoming emails are addressed to a forwarding email alias "
"and if they are, inject emails to all list delivery addresses / expand the alias."
)
grp_daemon = parser.add_argument_group("Daemon options")
grp_daemon.add_argument("-p", "--pidfile", dest="pidfile", default="./mailforwarder-server.pid",
help="Path to a pidfile")
grp_daemon.add_argument("-u", "--user", dest="user", default=None, help="Drop privileges and switch to this user")
grp_daemon.add_argument("-g", "--group", dest="group", default=None,
help="Drop privileges and switch to this group")
grp_daemon.add_argument("-d", "--daemonize", dest="daemonize", default=False, action="store_true",
help="If set, fork into background")
grp_daemon.add_argument("-v", "--verbose", dest="verbose", default=False, action="store_true",
help="Output extra logging (not implemented right now)")
grp_daemon.add_argument("-C", "--chdir", dest="chdir", default=".",
help="Change working directory to the provided value")
grp_network = parser.add_argument_group("Network options")
grp_network.add_argument("--input-ip", dest="input_ip", default="127.0.0.1", help="The network address to bind to")
grp_network.add_argument("--input-port", dest="input_port", metavar="PORT", type=int, default=10046,
help="The port to bind to")
grp_network.add_argument("--local-delivery-ip", dest="local_delivery_ip", default="127.0.0.1",
help="The OpenSMTPD instance IP for local email to be delivered.")
grp_network.add_argument("--local-delivery-port", dest="local_delivery_port", metavar="PORT", type=int,
default=10045, help="The port where OpenSMTPD listens for local email to be delivered")
grp_network.add_argument("--remote-relay-ip", dest="remote_relay_ip", default="127.0.0.1",
help="The OpenSMTPD instance IP that accepts mail for relay to external domains.")
grp_network.add_argument("--remote-relay-port", dest="remote_relay_port", default=10045,
help="The port where OpenSMTPD listens for mail to relay.")
grp_django = parser.add_argument_group("Django options")
grp_django.add_argument("--settings", dest="django_settings", default="authserver.settings",
help="The Django settings module to use for authserver database access (default: "
"authserver.settings)")
_args = parser.parse_args()
os.environ.setdefault("DJANGO_SETTINGS_MODULE", _args.django_settings)
# noinspection PyUnresolvedReferences
from django.conf import settings # initialize Django
import django
django.setup()
_log.info("mailforwarder v%s: Forwarding Alias Service starting" % authserver.version)
_log.info("Django ORM initialized")
pidfile = open(_args.pidfile, "w")
ctx = daemon.DaemonContext(
working_directory=_args.chdir,
pidfile=pidfile,
uid=_args.user,
gid=_args.group,
detach_process=_args.daemonize,
files_preserve=[1, 2, 3, pidfile],
stdin=sys.stdin,
stdout=sys.stdout,
stderr=sys.stderr,
)
with ctx:
run(_args)
def main() -> None:
try:
_main()
except Exception as e:
_log.critical("Unhandled exception", exc_info=True)
sys.exit(1)
if __name__ == "__main__":
main() | en | 0.844925 | #!/usr/bin/env python3 -u # -* encoding: utf-8 *- # ** must be thread-safe, don't modify shared state, # _log should be thread-safe as stated by the docs. Django ORM should be as well. # we can't import the Domain model before Django has been initialized # ensure that new_rcpttos is a mutable list # type: Dict[str, List[str]] # { new_mailfrom: [recipients] } # we're going to modify remaining_rcpttos so we start from its end # implement domain catch-all redirect # type: Optional[Domain] # follow the same path like the stored procedure authserver_resolve_alias(...) # convert the first - to a + # type: ignore # if we had a dashext, or a plusext, we're left with just the prefix after this # type: EmailAlias # OpenSMTPD shouldn't even call us for invalid addresses if we're configured correctly # it's a mailing list, forward the email to all connected addresses # remove this recipient from the list # if there are any remaining non-list/non-forward recipients, we inject them back to OpenSMTPD here # type: Dict[str, str] # TODO: log results # noinspection PyUnresolvedReferences # initialize Django | 1.915445 | 2 |
services/backend/project/api/sites.py | kzkaneoka/custom-job-search | 0 | 8429 | <gh_stars>0
import requests
from bs4 import BeautifulSoup, element
class Indeed:
def __init__(self, words, location, offset):
self.url = "https://www.indeed.com/jobs?as_and={}&l={}&sort=date&start={}".format(
"+".join(set(d.strip().lower() for d in words.split(",") if d)),
"+".join(list(d.lower() for d in location.split(" ") if d)),
int(offset),
)
def extract(self, soup):
if not soup:
return []
jobs = []
for tag in soup.find_all(name="div", attrs={"class": "jobsearch-SerpJobCard"}):
job = {}
for child in tag.children:
if child and type(child) == element.Tag and child.attrs:
if child.attrs["class"][0] == "title":
job["title"] = child.get_text().strip()
for grandchild in child.find_all(name="a"):
if grandchild.has_attr("href"):
job["link"] = (
"https://www.indeed.com" + grandchild["href"]
)
elif child.attrs["class"][0] == "sjcl":
lines = child.get_text().strip().split("\n")
job["company"] = lines[0]
job["location"] = lines[-1]
elif child.attrs["class"][0] == "jobsearch-SerpJobCard-footer":
job["date"] = "n/a"
for grandchild in child.find_all(
name="span", attrs={"class": "date"}
):
job["date"] = grandchild.get_text()
jobs.append(job)
return jobs
def fetch(self):
soup = None
try:
r = requests.get(self.url)
r.raise_for_status()
soup = BeautifulSoup(r.text, "html.parser")
finally:
return soup
def search(self):
soup = self.fetch()
jobs = self.extract(soup)
return jobs
| import requests
from bs4 import BeautifulSoup, element
class Indeed:
def __init__(self, words, location, offset):
self.url = "https://www.indeed.com/jobs?as_and={}&l={}&sort=date&start={}".format(
"+".join(set(d.strip().lower() for d in words.split(",") if d)),
"+".join(list(d.lower() for d in location.split(" ") if d)),
int(offset),
)
def extract(self, soup):
if not soup:
return []
jobs = []
for tag in soup.find_all(name="div", attrs={"class": "jobsearch-SerpJobCard"}):
job = {}
for child in tag.children:
if child and type(child) == element.Tag and child.attrs:
if child.attrs["class"][0] == "title":
job["title"] = child.get_text().strip()
for grandchild in child.find_all(name="a"):
if grandchild.has_attr("href"):
job["link"] = (
"https://www.indeed.com" + grandchild["href"]
)
elif child.attrs["class"][0] == "sjcl":
lines = child.get_text().strip().split("\n")
job["company"] = lines[0]
job["location"] = lines[-1]
elif child.attrs["class"][0] == "jobsearch-SerpJobCard-footer":
job["date"] = "n/a"
for grandchild in child.find_all(
name="span", attrs={"class": "date"}
):
job["date"] = grandchild.get_text()
jobs.append(job)
return jobs
def fetch(self):
soup = None
try:
r = requests.get(self.url)
r.raise_for_status()
soup = BeautifulSoup(r.text, "html.parser")
finally:
return soup
def search(self):
soup = self.fetch()
jobs = self.extract(soup)
return jobs | none | 1 | 3.015776 | 3 |
|
product_details/utils.py | gene1wood/django-product-details | 0 | 8430 | <reponame>gene1wood/django-product-details<filename>product_details/utils.py
from django.conf import settings
from django.core.exceptions import ImproperlyConfigured
from product_details import settings_defaults
def settings_fallback(key):
"""Grab user-defined settings, or fall back to default."""
try:
return getattr(settings, key)
except (AttributeError, ImportError, ImproperlyConfigured):
return getattr(settings_defaults, key)
def get_django_cache(cache_name):
try:
from django.core.cache import caches # django 1.7+
return caches[cache_name]
except ImportError:
from django.core.cache import get_cache
return get_cache(cache_name)
except ImproperlyConfigured:
# dance to get around not-setup-django at import time
return {}
| from django.conf import settings
from django.core.exceptions import ImproperlyConfigured
from product_details import settings_defaults
def settings_fallback(key):
"""Grab user-defined settings, or fall back to default."""
try:
return getattr(settings, key)
except (AttributeError, ImportError, ImproperlyConfigured):
return getattr(settings_defaults, key)
def get_django_cache(cache_name):
try:
from django.core.cache import caches # django 1.7+
return caches[cache_name]
except ImportError:
from django.core.cache import get_cache
return get_cache(cache_name)
except ImproperlyConfigured:
# dance to get around not-setup-django at import time
return {} | en | 0.69896 | Grab user-defined settings, or fall back to default. # django 1.7+ # dance to get around not-setup-django at import time | 2.234738 | 2 |
kattis/Soda Slurper.py | jaredliw/python-question-bank | 1 | 8431 | # CPU: 0.06 s
possessed, found, condition = map(int, input().split())
possessed += found
count = 0
while possessed >= condition:
div, mod = divmod(possessed, condition)
count += div
possessed = div + mod
print(count)
| # CPU: 0.06 s
possessed, found, condition = map(int, input().split())
possessed += found
count = 0
while possessed >= condition:
div, mod = divmod(possessed, condition)
count += div
possessed = div + mod
print(count)
| de | 0.232366 | # CPU: 0.06 s | 3.249164 | 3 |
efficientdet/dataset/csv_.py | HyunjiEllenPak/automl | 0 | 8432 | <gh_stars>0
"""
Copyright 2017-2018 yhenon (https://github.com/yhenon/)
Copyright 2017-2018 Fizyr (https://fizyr.com)
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
# from generators.common import Generator
import cv2
import numpy as np
from PIL import Image
from six import raise_from
import csv
import sys
import os.path as osp
from collections import OrderedDict
import os
def _parse(value, function, fmt):
"""
Parse a string into a value, and format a nice ValueError if it fails.
Returns `function(value)`.
Any `ValueError` raised is catched and a new `ValueError` is raised
with message `fmt.format(e)`, where `e` is the caught `ValueError`.
"""
try:
return function(value)
except ValueError as e:
raise_from(ValueError(fmt.format(e)), None)
def _read_classes(csv_reader):
"""
Parse the classes file given by csv_reader.
"""
result = OrderedDict()
for line, row in enumerate(csv_reader):
line += 1
try:
class_name, class_id = row
except ValueError:
raise_from(ValueError('line {}: format should be \'class_name,class_id\''.format(line)), None)
class_id = _parse(class_id, int, 'line {}: malformed class ID: {{}}'.format(line))
if class_name in result:
raise ValueError('line {}: duplicate class name: \'{}\''.format(line, class_name))
result[class_name] = class_id
return result
def _read_quadrangle_annotations(csv_reader, classes, detect_text=False):
"""
Read annotations from the csv_reader.
Args:
csv_reader: csv reader of args.annotations_path
classes: list[str] all the class names read from args.classes_path
Returns:
result: dict, dict is like {image_path: [{'x1': x1, 'y1': y1, 'x2': x2, 'y2': y2,
'x3': x3, 'y3': y3, 'x4': x4, 'y4': y4, 'class': class_name}]}
"""
result = OrderedDict()
for line, row in enumerate(csv_reader, 1):
try:
img_file, x1, y1, x2, y2, x3, y3, x4, y4, class_name = row[:10]
if img_file not in result:
result[img_file] = []
# If a row contains only an image path, it's an image without annotations.
if (x1, y1, x2, y2, x3, y3, x4, y4, class_name) == ('', '', '', '', '', '', '', '', ''):
continue
x1 = _parse(x1, int, 'line {}: malformed x1: {{}}'.format(line))
y1 = _parse(y1, int, 'line {}: malformed y1: {{}}'.format(line))
x2 = _parse(x2, int, 'line {}: malformed x2: {{}}'.format(line))
y2 = _parse(y2, int, 'line {}: malformed y2: {{}}'.format(line))
x3 = _parse(x3, int, 'line {}: malformed x3: {{}}'.format(line))
y3 = _parse(y3, int, 'line {}: malformed y3: {{}}'.format(line))
x4 = _parse(x4, int, 'line {}: malformed x4: {{}}'.format(line))
y4 = _parse(y4, int, 'line {}: malformed y4: {{}}'.format(line))
# check if the current class name is correctly present
if detect_text:
if class_name == '###':
continue
else:
class_name = 'text'
if class_name not in classes:
raise ValueError(f'line {line}: unknown class name: \'{class_name}\' (classes: {classes})')
result[img_file].append({'x1': x1, 'y1': y1, 'x2': x2, 'y2': y2,
'x3': x3, 'y3': y3, 'x4': x4, 'y4': y4, 'class': class_name})
except ValueError:
raise_from(ValueError(
f'line {line}: format should be \'img_file,x1,y1,x2,y2,x3,y3,x4,y4,class_name\' or \'img_file,,,,,\''),
None)
return result
def _read_annotations(csv_reader, classes, base_dir):
"""
Read annotations from the csv_reader.
Args:
csv_reader: csv reader of args.annotations_path
classes: list[str] all the class names read from args.classes_path
Returns:
result: dict, dict is like {image_path: [{'x1': x1, 'y1': y1, 'x2': x2, 'y2': y2, 'class': class_name}]}
"""
result = OrderedDict()
for line, row in enumerate(csv_reader, 1):
try:
img_file, x1, y1, x2, y2 = row[:5]
class_name = img_file.split("/")[0]
if img_file not in result:
result[img_file] = []
# If a row contains only an image path, it's an image without annotations.
if (x1, y1, x2, y2, class_name) == ('', '', '', '', ''):
continue
x1 = _parse(x1, int, 'line {}: malformed x1: {{}}'.format(line))
y1 = _parse(y1, int, 'line {}: malformed y1: {{}}'.format(line))
x2 = _parse(x2, int, 'line {}: malformed x2: {{}}'.format(line))
y2 = _parse(y2, int, 'line {}: malformed y2: {{}}'.format(line))
if class_name not in classes:
raise ValueError(f'line {line}: unknown class name: \'{class_name}\' (classes: {classes})')
result[img_file].append({'x1': x1, 'y1': y1, 'x2': x2, 'y2': y2, 'class': class_name,
'filename':img_file})
except ValueError:
raise_from(ValueError(
f'line {line}: format should be \'img_file,x1,y1,x2,y2,class_name\' or \'img_file,,,,,\''),
None)
return result
def _open_for_csv(path):
"""
Open a file with flags suitable for csv.reader.
This is different for python2 it means with mode 'rb', for python3 this means 'r' with "universal newlines".
"""
if sys.version_info[0] < 3:
return open(path, 'rb')
else:
return open(path, 'r', newline='')
def load_image(path):
"""
Load an image at the image_index.
"""
image = cv2.imread(path)
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
return image | """
Copyright 2017-2018 yhenon (https://github.com/yhenon/)
Copyright 2017-2018 Fizyr (https://fizyr.com)
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
# from generators.common import Generator
import cv2
import numpy as np
from PIL import Image
from six import raise_from
import csv
import sys
import os.path as osp
from collections import OrderedDict
import os
def _parse(value, function, fmt):
"""
Parse a string into a value, and format a nice ValueError if it fails.
Returns `function(value)`.
Any `ValueError` raised is catched and a new `ValueError` is raised
with message `fmt.format(e)`, where `e` is the caught `ValueError`.
"""
try:
return function(value)
except ValueError as e:
raise_from(ValueError(fmt.format(e)), None)
def _read_classes(csv_reader):
"""
Parse the classes file given by csv_reader.
"""
result = OrderedDict()
for line, row in enumerate(csv_reader):
line += 1
try:
class_name, class_id = row
except ValueError:
raise_from(ValueError('line {}: format should be \'class_name,class_id\''.format(line)), None)
class_id = _parse(class_id, int, 'line {}: malformed class ID: {{}}'.format(line))
if class_name in result:
raise ValueError('line {}: duplicate class name: \'{}\''.format(line, class_name))
result[class_name] = class_id
return result
def _read_quadrangle_annotations(csv_reader, classes, detect_text=False):
"""
Read annotations from the csv_reader.
Args:
csv_reader: csv reader of args.annotations_path
classes: list[str] all the class names read from args.classes_path
Returns:
result: dict, dict is like {image_path: [{'x1': x1, 'y1': y1, 'x2': x2, 'y2': y2,
'x3': x3, 'y3': y3, 'x4': x4, 'y4': y4, 'class': class_name}]}
"""
result = OrderedDict()
for line, row in enumerate(csv_reader, 1):
try:
img_file, x1, y1, x2, y2, x3, y3, x4, y4, class_name = row[:10]
if img_file not in result:
result[img_file] = []
# If a row contains only an image path, it's an image without annotations.
if (x1, y1, x2, y2, x3, y3, x4, y4, class_name) == ('', '', '', '', '', '', '', '', ''):
continue
x1 = _parse(x1, int, 'line {}: malformed x1: {{}}'.format(line))
y1 = _parse(y1, int, 'line {}: malformed y1: {{}}'.format(line))
x2 = _parse(x2, int, 'line {}: malformed x2: {{}}'.format(line))
y2 = _parse(y2, int, 'line {}: malformed y2: {{}}'.format(line))
x3 = _parse(x3, int, 'line {}: malformed x3: {{}}'.format(line))
y3 = _parse(y3, int, 'line {}: malformed y3: {{}}'.format(line))
x4 = _parse(x4, int, 'line {}: malformed x4: {{}}'.format(line))
y4 = _parse(y4, int, 'line {}: malformed y4: {{}}'.format(line))
# check if the current class name is correctly present
if detect_text:
if class_name == '###':
continue
else:
class_name = 'text'
if class_name not in classes:
raise ValueError(f'line {line}: unknown class name: \'{class_name}\' (classes: {classes})')
result[img_file].append({'x1': x1, 'y1': y1, 'x2': x2, 'y2': y2,
'x3': x3, 'y3': y3, 'x4': x4, 'y4': y4, 'class': class_name})
except ValueError:
raise_from(ValueError(
f'line {line}: format should be \'img_file,x1,y1,x2,y2,x3,y3,x4,y4,class_name\' or \'img_file,,,,,\''),
None)
return result
def _read_annotations(csv_reader, classes, base_dir):
"""
Read annotations from the csv_reader.
Args:
csv_reader: csv reader of args.annotations_path
classes: list[str] all the class names read from args.classes_path
Returns:
result: dict, dict is like {image_path: [{'x1': x1, 'y1': y1, 'x2': x2, 'y2': y2, 'class': class_name}]}
"""
result = OrderedDict()
for line, row in enumerate(csv_reader, 1):
try:
img_file, x1, y1, x2, y2 = row[:5]
class_name = img_file.split("/")[0]
if img_file not in result:
result[img_file] = []
# If a row contains only an image path, it's an image without annotations.
if (x1, y1, x2, y2, class_name) == ('', '', '', '', ''):
continue
x1 = _parse(x1, int, 'line {}: malformed x1: {{}}'.format(line))
y1 = _parse(y1, int, 'line {}: malformed y1: {{}}'.format(line))
x2 = _parse(x2, int, 'line {}: malformed x2: {{}}'.format(line))
y2 = _parse(y2, int, 'line {}: malformed y2: {{}}'.format(line))
if class_name not in classes:
raise ValueError(f'line {line}: unknown class name: \'{class_name}\' (classes: {classes})')
result[img_file].append({'x1': x1, 'y1': y1, 'x2': x2, 'y2': y2, 'class': class_name,
'filename':img_file})
except ValueError:
raise_from(ValueError(
f'line {line}: format should be \'img_file,x1,y1,x2,y2,class_name\' or \'img_file,,,,,\''),
None)
return result
def _open_for_csv(path):
"""
Open a file with flags suitable for csv.reader.
This is different for python2 it means with mode 'rb', for python3 this means 'r' with "universal newlines".
"""
if sys.version_info[0] < 3:
return open(path, 'rb')
else:
return open(path, 'r', newline='')
def load_image(path):
"""
Load an image at the image_index.
"""
image = cv2.imread(path)
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
return image | en | 0.738265 | Copyright 2017-2018 yhenon (https://github.com/yhenon/) Copyright 2017-2018 Fizyr (https://fizyr.com) Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. # from generators.common import Generator Parse a string into a value, and format a nice ValueError if it fails. Returns `function(value)`. Any `ValueError` raised is catched and a new `ValueError` is raised with message `fmt.format(e)`, where `e` is the caught `ValueError`. Parse the classes file given by csv_reader. Read annotations from the csv_reader. Args: csv_reader: csv reader of args.annotations_path classes: list[str] all the class names read from args.classes_path Returns: result: dict, dict is like {image_path: [{'x1': x1, 'y1': y1, 'x2': x2, 'y2': y2, 'x3': x3, 'y3': y3, 'x4': x4, 'y4': y4, 'class': class_name}]} # If a row contains only an image path, it's an image without annotations. # check if the current class name is correctly present ##': Read annotations from the csv_reader. Args: csv_reader: csv reader of args.annotations_path classes: list[str] all the class names read from args.classes_path Returns: result: dict, dict is like {image_path: [{'x1': x1, 'y1': y1, 'x2': x2, 'y2': y2, 'class': class_name}]} # If a row contains only an image path, it's an image without annotations. Open a file with flags suitable for csv.reader. This is different for python2 it means with mode 'rb', for python3 this means 'r' with "universal newlines". Load an image at the image_index. | 2.415767 | 2 |
hoover/site/wsgi.py | hoover/hoover | 15 | 8433 | <gh_stars>10-100
from . import events # noqa
from django.core.wsgi import get_wsgi_application
import os
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "hoover.site.settings")
application = get_wsgi_application()
| from . import events # noqa
from django.core.wsgi import get_wsgi_application
import os
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "hoover.site.settings")
application = get_wsgi_application() | none | 1 | 1.425091 | 1 |
|
submodules/hal/analysis/constraintTurnover/turnoverModel.py | pbasting/cactus | 0 | 8434 | #!/usr/bin/env python
#Copyright (C) 2013 by <NAME>
#
#Released under the MIT license, see LICENSE.txt
#!/usr/bin/env python
"""This is a two-state continuous time markov model: 0: unconstratined. 1: constrained. There are two transition rates to go between states. lossRate: 1->0 and gainRate: 0->1. Probability Matrix and Stationary Distribution are computed from the two rates and a time t. (see pdf)
"""
import argparse
import os
import sys
import copy
import random
import math
from collections import defaultdict
import numpy as np
import subprocess
import tempfile
#constrained is always 1. unconstrained is always 0
# compute probability matrix from rates and time.
def computePMatrix(lossRate, gainRate, t):
assert t >= 0
assert lossRate >= 0
assert gainRate >= 0
x = gainRate / lossRate
y = gainRate + lossRate
eyt = math.exp(-y * t)
c = 1.0 / (x + 1.0)
P = [ [c * (1.0 + x * eyt), c * (x - x * eyt)],
[c * (1.0 - eyt), c * (x + eyt)] ]
assert math.fabs(P[0][0] + P[0][1] - 1.0) < 0.00001
assert math.fabs(P[1][0] + P[1][1] - 1.0) < 0.00001
return P
# compute stationary distribution from rates and time
def computeStationaryDist(lossRate, gainRate, t):
assert t >= 0
assert lossRate >= 0
assert gainRate >= 0
x = gainRate / lossRate
y = gainRate + lossRate
eyt = math.exp(-y * t)
pi0 = (eyt - 1.0) / ( x * eyt + eyt - x - 1.0)
pi1 = 1. - pi0
# assert pi0 * ( ((1.0 + x * eyt) / (x + 1.0)) -1.0) + (1.0 - pi0) * ((1.0 - eyt) / (x + 1.0)) == 0
assert pi0 >= 0 and pi0 <= 1.0
assert pi1 >= 0 and pi1 <= 1.0
return [pi0, pi1]
# compute the absolute difference between the values of the
# probability matrix and stationary distribution computed from a given
# rate, and a set of absolute values of the same. This is a sum of four
# differences, 2 for the distribution, 4 for the matrix.
def diffOnePoint(lossRate, gainRate, piEst, Pest, t):
P = computePMatrix(lossRate, gainRate, t)
pi = computeStationaryDist(lossRate, gainRate, t)
d = math.fabs(pi[0] - piEst[0])
d += math.fabs(pi[1] - piEst[1])
d += math.fabs(P[0][0] - Pest[0][0])
d += math.fabs(P[0][1] - Pest[0][1])
d += math.fabs(P[1][0] - Pest[1][0])
d += math.fabs(P[1][1] - Pest[1][1])
return d
# compute the sum of squared differences for a pair of rate parameters
# and a set of data points. Each data point is a 3 tuple:
# (1x2 stationary distribution pi, 2x2 probability matrix P, time t)
def diffSqManyPoints(lossRate, gainRate, estVals):
dtot = 0
for estVal in estVals:
piEst = estVal[0]
Pest = estVal[1]
t = estVal[2]
d = diffOnePoint(lossRate, gainRate, piEst, Pest, t)
dtot += d * d
return dtot
# use really simple gradient descent type approach to find rate values that
# minimize the squared difference with some data points. Each data point
# is a 3-tuple as described above. The gradient descent iteratres over
# maxIt iterations. Each iteration it tries to add and subtract delta from
# the current best rates (4 combinations: add delta to gain, add delta to loss,
# subtract delta from gain, subtract delta from loss). The best pair
# of rate parameters are returned, along with their square difference from
# the data.
def gradDescent(lrStart, grStart, estVals, maxIt, delta):
bestDiff = diffSqManyPoints(lrStart, grStart, estVals)
bestLr = lrStart
bestGr = grStart
lastChangeIterator = 0
for i in range(maxIt):
lr = bestLr
gr = bestGr
dpl = diffSqManyPoints(lr + delta, gr, estVals)
rval = random.randint(0, 3)
if rval == 0 and dpl < bestDiff:
bestDiff = dpl
bestLr = lr + delta
bestGr = gr
lastChangeIterator = i
dpg = diffSqManyPoints(lr, gr + delta, estVals)
if rval == 1 and dpg < bestDiff:
bestDiff = dpg
bestLr = lr
bestGr = gr + delta
lastChangeIterator = i
if rval == 2 and lr > delta:
dml = diffSqManyPoints(lr - delta, gr, estVals)
if dml < bestDiff:
bestDiff = dml
bestLr = lr - delta
bestGr = gr
lastChangeIterator = i
if rval == 3 and gr > delta:
dmg = diffSqManyPoints(lr, gr - delta, estVals)
if dmg < bestDiff:
bestDiff = dmg
bestLr = lr
bestGr = gr - delta
lastChangeIterator = i
#
# Hack: if nothing happened, instead of returning, try adding
# 10x the step value and seeing what happens.
#
if i == lastChangeIterator + 8:
boostDelta = delta * 10.
dpl = diffSqManyPoints(lr + boostDelta, gr, estVals)
if rval == 0 and dpl < bestDiff:
bestDiff = dpl
bestLr = lr + boostDelta
bestGr = gr
lastChangeIterator = i
dpg = diffSqManyPoints(lr, gr + boostDelta, estVals)
if rval == 1 and dpg < bestDiff:
bestDiff = dpg
bestLr = lr
bestGr = gr + boostDelta
lastChangeIterator = i
if rval == 2 and lr > boostDelta:
dml = diffSqManyPoints(lr - boostDelta, gr, estVals)
if dml < bestDiff:
bestDiff = dml
bestLr = lr - boostDelta
bestGr = gr
lastChangeIterator = i
if rval == 3 and gr > boostDelta:
dmg = diffSqManyPoints(lr, gr - boostDelta, estVals)
if dmg < bestDiff:
bestDiff = dmg
bestLr = lr
bestGr = gr - boostDelta
lastChangeIterator = i
# we tried the 10x and now give up
elif i > lastChangeIterator + 8:
break
return (bestLr, bestGr, bestDiff)
# add some noise to parameters
def addNoise(P, pi, maxNoise):
d = random.uniform(-maxNoise, maxNoise)
P[0][0] += d
P[0][1] -= d
d = random.uniform(-maxNoise, maxNoise)
P[1][0] += d
P[1][1] -= d
d = random.uniform(-maxNoise, maxNoise)
pi[0] += d
pi[1] -= d
# generate some random "estimated" parameters for values of t
# within a given range. random noise is added as specifed by maxNoise
def generateData(n, tRange, lossRate, gainRate, maxNoise):
genVals = []
for i in range(n):
t = random.uniform(tRange[0], tRange[1])
P = computePMatrix(lossRate, gainRate, t)
pi = computeStationaryDist(lossRate, gainRate, t)
addNoise(P, pi, maxNoise)
genVals.append((pi, P, t))
return genVals
def main(argv=None):
if argv is None:
argv = sys.argv
parser = argparse.ArgumentParser()
parser.add_argument("N", type=int,
help="number of simulated data sets")
parser.add_argument("size", type=int,
help="number of simulated data points per set")
parser.add_argument("minRate", type=float,
help="minimum true rate")
parser.add_argument("maxRate", type=float,
help="maximum true rate")
parser.add_argument("minT", type=float,
help="minimum true t")
parser.add_argument("maxT", type=float,
help="maximum true t")
parser.add_argument("--maxIt", type=int, default=1000,
help="number of iterations for gradient descent")
parser.add_argument("--step", type=float, default=0.001,
help="gradient descent step")
parser.add_argument("--noise", type=float, default=0,
help="max amount of noise to add")
parser.add_argument("--retries", type=int, default=5,
help="number of gradient descents to run")
args = parser.parse_args()
assert (args.N > 0 and args.size > 0 and args.minRate > 0 and
args.maxRate > 0 and args.minT > 0 and args.maxT > 0 and
args.maxIt > 0 and args.step > 0 and args.noise >= 0 and
args.retries > 1)
for n in range(args.N):
lrTrue = random.uniform(args.minRate, args.maxRate)
grTrue = random.uniform(args.minRate, args.maxRate)
genVals = generateData(args.size, (args.minT, args.maxT),
lrTrue, grTrue, args.noise)
bestLr, bestGr, bestDiff = (0, 0, 1000000)
for retry in range(args.retries):
lrStart = random.uniform(0.0001, 1.0)
grStart = random.uniform(0.0001, 1.0)
(lrEst, grEst, diff) = gradDescent(lrStart, grStart, genVals,
args.maxIt, args.step)
if diff < bestDiff:
bestLr, bestGr, bestDiff = (lrEst, grEst, diff)
print "Truth=(%f,%f), Start=(%f,%f) Est=(%f,%f), dsq=%f" % (
lrTrue, grTrue, lrStart, grStart, bestLr, bestGr,
(lrTrue - bestLr) * (lrTrue - bestLr) +
(grTrue - bestGr) * (grTrue - bestGr))
print "--------------------------------"
if __name__ == "__main__":
sys.exit(main())
| #!/usr/bin/env python
#Copyright (C) 2013 by <NAME>
#
#Released under the MIT license, see LICENSE.txt
#!/usr/bin/env python
"""This is a two-state continuous time markov model: 0: unconstratined. 1: constrained. There are two transition rates to go between states. lossRate: 1->0 and gainRate: 0->1. Probability Matrix and Stationary Distribution are computed from the two rates and a time t. (see pdf)
"""
import argparse
import os
import sys
import copy
import random
import math
from collections import defaultdict
import numpy as np
import subprocess
import tempfile
#constrained is always 1. unconstrained is always 0
# compute probability matrix from rates and time.
def computePMatrix(lossRate, gainRate, t):
assert t >= 0
assert lossRate >= 0
assert gainRate >= 0
x = gainRate / lossRate
y = gainRate + lossRate
eyt = math.exp(-y * t)
c = 1.0 / (x + 1.0)
P = [ [c * (1.0 + x * eyt), c * (x - x * eyt)],
[c * (1.0 - eyt), c * (x + eyt)] ]
assert math.fabs(P[0][0] + P[0][1] - 1.0) < 0.00001
assert math.fabs(P[1][0] + P[1][1] - 1.0) < 0.00001
return P
# compute stationary distribution from rates and time
def computeStationaryDist(lossRate, gainRate, t):
assert t >= 0
assert lossRate >= 0
assert gainRate >= 0
x = gainRate / lossRate
y = gainRate + lossRate
eyt = math.exp(-y * t)
pi0 = (eyt - 1.0) / ( x * eyt + eyt - x - 1.0)
pi1 = 1. - pi0
# assert pi0 * ( ((1.0 + x * eyt) / (x + 1.0)) -1.0) + (1.0 - pi0) * ((1.0 - eyt) / (x + 1.0)) == 0
assert pi0 >= 0 and pi0 <= 1.0
assert pi1 >= 0 and pi1 <= 1.0
return [pi0, pi1]
# compute the absolute difference between the values of the
# probability matrix and stationary distribution computed from a given
# rate, and a set of absolute values of the same. This is a sum of four
# differences, 2 for the distribution, 4 for the matrix.
def diffOnePoint(lossRate, gainRate, piEst, Pest, t):
P = computePMatrix(lossRate, gainRate, t)
pi = computeStationaryDist(lossRate, gainRate, t)
d = math.fabs(pi[0] - piEst[0])
d += math.fabs(pi[1] - piEst[1])
d += math.fabs(P[0][0] - Pest[0][0])
d += math.fabs(P[0][1] - Pest[0][1])
d += math.fabs(P[1][0] - Pest[1][0])
d += math.fabs(P[1][1] - Pest[1][1])
return d
# compute the sum of squared differences for a pair of rate parameters
# and a set of data points. Each data point is a 3 tuple:
# (1x2 stationary distribution pi, 2x2 probability matrix P, time t)
def diffSqManyPoints(lossRate, gainRate, estVals):
dtot = 0
for estVal in estVals:
piEst = estVal[0]
Pest = estVal[1]
t = estVal[2]
d = diffOnePoint(lossRate, gainRate, piEst, Pest, t)
dtot += d * d
return dtot
# use really simple gradient descent type approach to find rate values that
# minimize the squared difference with some data points. Each data point
# is a 3-tuple as described above. The gradient descent iteratres over
# maxIt iterations. Each iteration it tries to add and subtract delta from
# the current best rates (4 combinations: add delta to gain, add delta to loss,
# subtract delta from gain, subtract delta from loss). The best pair
# of rate parameters are returned, along with their square difference from
# the data.
def gradDescent(lrStart, grStart, estVals, maxIt, delta):
bestDiff = diffSqManyPoints(lrStart, grStart, estVals)
bestLr = lrStart
bestGr = grStart
lastChangeIterator = 0
for i in range(maxIt):
lr = bestLr
gr = bestGr
dpl = diffSqManyPoints(lr + delta, gr, estVals)
rval = random.randint(0, 3)
if rval == 0 and dpl < bestDiff:
bestDiff = dpl
bestLr = lr + delta
bestGr = gr
lastChangeIterator = i
dpg = diffSqManyPoints(lr, gr + delta, estVals)
if rval == 1 and dpg < bestDiff:
bestDiff = dpg
bestLr = lr
bestGr = gr + delta
lastChangeIterator = i
if rval == 2 and lr > delta:
dml = diffSqManyPoints(lr - delta, gr, estVals)
if dml < bestDiff:
bestDiff = dml
bestLr = lr - delta
bestGr = gr
lastChangeIterator = i
if rval == 3 and gr > delta:
dmg = diffSqManyPoints(lr, gr - delta, estVals)
if dmg < bestDiff:
bestDiff = dmg
bestLr = lr
bestGr = gr - delta
lastChangeIterator = i
#
# Hack: if nothing happened, instead of returning, try adding
# 10x the step value and seeing what happens.
#
if i == lastChangeIterator + 8:
boostDelta = delta * 10.
dpl = diffSqManyPoints(lr + boostDelta, gr, estVals)
if rval == 0 and dpl < bestDiff:
bestDiff = dpl
bestLr = lr + boostDelta
bestGr = gr
lastChangeIterator = i
dpg = diffSqManyPoints(lr, gr + boostDelta, estVals)
if rval == 1 and dpg < bestDiff:
bestDiff = dpg
bestLr = lr
bestGr = gr + boostDelta
lastChangeIterator = i
if rval == 2 and lr > boostDelta:
dml = diffSqManyPoints(lr - boostDelta, gr, estVals)
if dml < bestDiff:
bestDiff = dml
bestLr = lr - boostDelta
bestGr = gr
lastChangeIterator = i
if rval == 3 and gr > boostDelta:
dmg = diffSqManyPoints(lr, gr - boostDelta, estVals)
if dmg < bestDiff:
bestDiff = dmg
bestLr = lr
bestGr = gr - boostDelta
lastChangeIterator = i
# we tried the 10x and now give up
elif i > lastChangeIterator + 8:
break
return (bestLr, bestGr, bestDiff)
# add some noise to parameters
def addNoise(P, pi, maxNoise):
d = random.uniform(-maxNoise, maxNoise)
P[0][0] += d
P[0][1] -= d
d = random.uniform(-maxNoise, maxNoise)
P[1][0] += d
P[1][1] -= d
d = random.uniform(-maxNoise, maxNoise)
pi[0] += d
pi[1] -= d
# generate some random "estimated" parameters for values of t
# within a given range. random noise is added as specifed by maxNoise
def generateData(n, tRange, lossRate, gainRate, maxNoise):
genVals = []
for i in range(n):
t = random.uniform(tRange[0], tRange[1])
P = computePMatrix(lossRate, gainRate, t)
pi = computeStationaryDist(lossRate, gainRate, t)
addNoise(P, pi, maxNoise)
genVals.append((pi, P, t))
return genVals
def main(argv=None):
if argv is None:
argv = sys.argv
parser = argparse.ArgumentParser()
parser.add_argument("N", type=int,
help="number of simulated data sets")
parser.add_argument("size", type=int,
help="number of simulated data points per set")
parser.add_argument("minRate", type=float,
help="minimum true rate")
parser.add_argument("maxRate", type=float,
help="maximum true rate")
parser.add_argument("minT", type=float,
help="minimum true t")
parser.add_argument("maxT", type=float,
help="maximum true t")
parser.add_argument("--maxIt", type=int, default=1000,
help="number of iterations for gradient descent")
parser.add_argument("--step", type=float, default=0.001,
help="gradient descent step")
parser.add_argument("--noise", type=float, default=0,
help="max amount of noise to add")
parser.add_argument("--retries", type=int, default=5,
help="number of gradient descents to run")
args = parser.parse_args()
assert (args.N > 0 and args.size > 0 and args.minRate > 0 and
args.maxRate > 0 and args.minT > 0 and args.maxT > 0 and
args.maxIt > 0 and args.step > 0 and args.noise >= 0 and
args.retries > 1)
for n in range(args.N):
lrTrue = random.uniform(args.minRate, args.maxRate)
grTrue = random.uniform(args.minRate, args.maxRate)
genVals = generateData(args.size, (args.minT, args.maxT),
lrTrue, grTrue, args.noise)
bestLr, bestGr, bestDiff = (0, 0, 1000000)
for retry in range(args.retries):
lrStart = random.uniform(0.0001, 1.0)
grStart = random.uniform(0.0001, 1.0)
(lrEst, grEst, diff) = gradDescent(lrStart, grStart, genVals,
args.maxIt, args.step)
if diff < bestDiff:
bestLr, bestGr, bestDiff = (lrEst, grEst, diff)
print "Truth=(%f,%f), Start=(%f,%f) Est=(%f,%f), dsq=%f" % (
lrTrue, grTrue, lrStart, grStart, bestLr, bestGr,
(lrTrue - bestLr) * (lrTrue - bestLr) +
(grTrue - bestGr) * (grTrue - bestGr))
print "--------------------------------"
if __name__ == "__main__":
sys.exit(main())
| en | 0.860659 | #!/usr/bin/env python #Copyright (C) 2013 by <NAME> # #Released under the MIT license, see LICENSE.txt #!/usr/bin/env python This is a two-state continuous time markov model: 0: unconstratined. 1: constrained. There are two transition rates to go between states. lossRate: 1->0 and gainRate: 0->1. Probability Matrix and Stationary Distribution are computed from the two rates and a time t. (see pdf) #constrained is always 1. unconstrained is always 0 # compute probability matrix from rates and time. # compute stationary distribution from rates and time # assert pi0 * ( ((1.0 + x * eyt) / (x + 1.0)) -1.0) + (1.0 - pi0) * ((1.0 - eyt) / (x + 1.0)) == 0 # compute the absolute difference between the values of the # probability matrix and stationary distribution computed from a given # rate, and a set of absolute values of the same. This is a sum of four # differences, 2 for the distribution, 4 for the matrix. # compute the sum of squared differences for a pair of rate parameters # and a set of data points. Each data point is a 3 tuple: # (1x2 stationary distribution pi, 2x2 probability matrix P, time t) # use really simple gradient descent type approach to find rate values that # minimize the squared difference with some data points. Each data point # is a 3-tuple as described above. The gradient descent iteratres over # maxIt iterations. Each iteration it tries to add and subtract delta from # the current best rates (4 combinations: add delta to gain, add delta to loss, # subtract delta from gain, subtract delta from loss). The best pair # of rate parameters are returned, along with their square difference from # the data. # # Hack: if nothing happened, instead of returning, try adding # 10x the step value and seeing what happens. # # we tried the 10x and now give up # add some noise to parameters # generate some random "estimated" parameters for values of t # within a given range. random noise is added as specifed by maxNoise | 2.782039 | 3 |
SimpleBudget/SimpleBudget/budgets/tests.py | speratus/SimpleBudget | 0 | 8435 | from django.test import TestCase
from .validators import validate_budget_period
from .models import Budget, Expense, Payment
from django.contrib.auth.models import User
from django.core.exceptions import ValidationError
class ExpenseTestCases(TestCase):
def setUp(self) -> None:
user = User.objects.create_user('joe', email='<EMAIL>', password='<PASSWORD>')
budget = Budget.objects.create(name='My budget',
creation_date='2019-05-03',
owner=user,
description='The budget of champions.'
)
Expense.objects.create(name='Water park visit',
amount=30.00,
period='1-monthly',
payee='Super awesome Water parks',
description='I will go to the water park.',
date='2019-06-04',
budget=budget
)
Payment.objects.create(name='Paycheck',
amount=4000.0,
period='1-monthly',
description='Where the Mullah comes from',
date='2017-01-12',
origin='The big boss fom up top in HR.',
budget=budget
)
def test_proper_str_formation(self):
budget = Budget.objects.get(pk=1)
expense = Expense.objects.get(pk=1)
payment = Payment.objects.get(pk=1)
self.assertEquals(budget.__str__(), 'My budget: joe', 'The budget was not created properly.')
self.assertEquals(expense.__str__(), 'Water park visit: 30.0', 'The expense was not create properly.')
self.assertEquals(payment.__str__(), 'Paycheck: 4000.0', 'The string function on payment is not workng properly.')
class BudgetPeriodValidatorTestCase(TestCase):
valid_cases = [
'1-daily',
'1-onetime',
'1-annually',
'5-quarterly',
'7-weekly',
'3-annually',
'10-monthly',
'19-weekly',
'99-daily'
]
invalid_cases = [
'0.4-daily',
'0-weekly',
'ad-annually',
'100-weekly',
'4.6-quarterly',
'-31-daily',
'whoot-quarterly',
'59-zoobly',
'5-onetime',
'03-monthly',
]
def test_budget_period_validator(self):
for c in self.valid_cases:
self.assertEquals(validate_budget_period(c), None, f'failed on {c}')
def test_budget_period_validator_fail(self):
for c in self.invalid_cases:
self.assertRaises(ValidationError, validate_budget_period, c)
def test_validator_in_expense_model_creation_invalid(self):
user = User.objects.create(username='joe', email='<EMAIL>', password='<PASSWORD>')
budget = Budget.objects.create(name='My Budget',
creation_date='2019-04-13',
owner=user,
)
for c in self.invalid_cases:
self.assertRaises(Exception, Expense.objects.create,
name=c + '1',
amount=15.0,
date='2014-05-06',
period=c,
budget=budget
)
| from django.test import TestCase
from .validators import validate_budget_period
from .models import Budget, Expense, Payment
from django.contrib.auth.models import User
from django.core.exceptions import ValidationError
class ExpenseTestCases(TestCase):
def setUp(self) -> None:
user = User.objects.create_user('joe', email='<EMAIL>', password='<PASSWORD>')
budget = Budget.objects.create(name='My budget',
creation_date='2019-05-03',
owner=user,
description='The budget of champions.'
)
Expense.objects.create(name='Water park visit',
amount=30.00,
period='1-monthly',
payee='Super awesome Water parks',
description='I will go to the water park.',
date='2019-06-04',
budget=budget
)
Payment.objects.create(name='Paycheck',
amount=4000.0,
period='1-monthly',
description='Where the Mullah comes from',
date='2017-01-12',
origin='The big boss fom up top in HR.',
budget=budget
)
def test_proper_str_formation(self):
budget = Budget.objects.get(pk=1)
expense = Expense.objects.get(pk=1)
payment = Payment.objects.get(pk=1)
self.assertEquals(budget.__str__(), 'My budget: joe', 'The budget was not created properly.')
self.assertEquals(expense.__str__(), 'Water park visit: 30.0', 'The expense was not create properly.')
self.assertEquals(payment.__str__(), 'Paycheck: 4000.0', 'The string function on payment is not workng properly.')
class BudgetPeriodValidatorTestCase(TestCase):
valid_cases = [
'1-daily',
'1-onetime',
'1-annually',
'5-quarterly',
'7-weekly',
'3-annually',
'10-monthly',
'19-weekly',
'99-daily'
]
invalid_cases = [
'0.4-daily',
'0-weekly',
'ad-annually',
'100-weekly',
'4.6-quarterly',
'-31-daily',
'whoot-quarterly',
'59-zoobly',
'5-onetime',
'03-monthly',
]
def test_budget_period_validator(self):
for c in self.valid_cases:
self.assertEquals(validate_budget_period(c), None, f'failed on {c}')
def test_budget_period_validator_fail(self):
for c in self.invalid_cases:
self.assertRaises(ValidationError, validate_budget_period, c)
def test_validator_in_expense_model_creation_invalid(self):
user = User.objects.create(username='joe', email='<EMAIL>', password='<PASSWORD>')
budget = Budget.objects.create(name='My Budget',
creation_date='2019-04-13',
owner=user,
)
for c in self.invalid_cases:
self.assertRaises(Exception, Expense.objects.create,
name=c + '1',
amount=15.0,
date='2014-05-06',
period=c,
budget=budget
)
| none | 1 | 2.539899 | 3 |
|
feed/migrations/0002_remove_player_finished_decks.py | kubapi/hater | 0 | 8436 | <reponame>kubapi/hater
# Generated by Django 3.2.3 on 2021-06-13 19:58
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('feed', '0001_initial'),
]
operations = [
migrations.RemoveField(
model_name='player',
name='finished_decks',
),
]
| # Generated by Django 3.2.3 on 2021-06-13 19:58
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('feed', '0001_initial'),
]
operations = [
migrations.RemoveField(
model_name='player',
name='finished_decks',
),
] | en | 0.848412 | # Generated by Django 3.2.3 on 2021-06-13 19:58 | 1.386483 | 1 |
var/spack/repos/builtin/packages/abacus/package.py | jeanbez/spack | 0 | 8437 | # Copyright 2013-2022 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
import re
from spack.package import *
class Abacus(MakefilePackage):
"""ABACUS (Atomic-orbital Based Ab-initio Computation at UStc)
is an open-source computer code package aiming
for large-scale electronic-structure simulations
from first principles"""
maintainers = ["bitllion"]
homepage = "http://abacus.ustc.edu.cn/"
git = "https://github.com/abacusmodeling/abacus-develop.git"
url = "https://github.com/abacusmodeling/abacus-develop/archive/refs/tags/v2.2.1.tar.gz"
version("develop", branch="develop")
version(
"2.2.3",
sha256="88dbf6a3bdd907df3e097637ec8e51fde13e2f5e0b44f3667443195481320edf",
)
version(
"2.2.2",
sha256="4a7cf2ec6e43dd5c53d5f877a941367074f4714d93c1977a719782957916169e",
)
version(
"2.2.1",
sha256="14feca1d8d1ce025d3f263b85ebfbebc1a1efff704b6490e95b07603c55c1d63",
)
version(
"2.2.0",
sha256="09d4a2508d903121d29813a85791eeb3a905acbe1c5664b8a88903f8eda64b8f",
)
variant("openmp", default=True, description="Enable OpenMP support")
depends_on("elpa+openmp", when="+openmp")
depends_on("elpa~openmp", when="~openmp")
depends_on("cereal")
depends_on("libxc")
depends_on("fftw")
# MPI is a necessary dependency
depends_on("mpi", type=("build", "link", "run"))
depends_on("mkl")
build_directory = "source"
def edit(self, spec, prefix):
if "+openmp" in spec:
inc_var = "_openmp-"
system_var = (
"ELPA_LIB = -L${ELPA_LIB_DIR} -lelpa_openmp -Wl, -rpath=${ELPA_LIB_DIR}"
)
else:
inc_var = "-"
system_var = (
"ELPA_LIB = -L${ELPA_LIB_DIR} -lelpa -Wl,-rpath=${ELPA_LIB_DIR}"
)
tempInc = (
"\
FORTRAN = ifort\n\
CPLUSPLUS = icpc\n\
CPLUSPLUS_MPI = mpiicpc\n\
LAPACK_DIR = $(MKLROOT)\n\
FFTW_DIR = %s\n\
ELPA_DIR = %s\n\
ELPA_INCLUDE = -I${ELPA_DIR}/include/elpa%s%s\n\
CEREAL_DIR = %s\n\
OBJ_DIR = obj\n\
OBJ_DIR_serial = obj\n\
NP = 14\n"
% (
spec["fftw"].prefix,
spec["elpa"].prefix,
inc_var,
"{0}".format(spec["elpa"].version),
spec["cereal"].prefix,
)
)
with open(self.build_directory + "/Makefile.vars", "w") as f:
f.write(tempInc)
lineList = []
Pattern1 = re.compile("^ELPA_INCLUDE_DIR")
Pattern2 = re.compile("^ELPA_LIB\\s*= ")
with open(self.build_directory + "/Makefile.system", "r") as f:
while True:
line = f.readline()
if not line:
break
elif Pattern1.search(line):
pass
elif Pattern2.search(line):
pass
else:
lineList.append(line)
with open(self.build_directory + "/Makefile.system", "w") as f:
for i in lineList:
f.write(i)
with open(self.build_directory + "/Makefile.system", "a") as f:
f.write(system_var)
def install(self, spec, prefix):
install_tree("bin", prefix.bin)
| # Copyright 2013-2022 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
import re
from spack.package import *
class Abacus(MakefilePackage):
"""ABACUS (Atomic-orbital Based Ab-initio Computation at UStc)
is an open-source computer code package aiming
for large-scale electronic-structure simulations
from first principles"""
maintainers = ["bitllion"]
homepage = "http://abacus.ustc.edu.cn/"
git = "https://github.com/abacusmodeling/abacus-develop.git"
url = "https://github.com/abacusmodeling/abacus-develop/archive/refs/tags/v2.2.1.tar.gz"
version("develop", branch="develop")
version(
"2.2.3",
sha256="88dbf6a3bdd907df3e097637ec8e51fde13e2f5e0b44f3667443195481320edf",
)
version(
"2.2.2",
sha256="4a7cf2ec6e43dd5c53d5f877a941367074f4714d93c1977a719782957916169e",
)
version(
"2.2.1",
sha256="14feca1d8d1ce025d3f263b85ebfbebc1a1efff704b6490e95b07603c55c1d63",
)
version(
"2.2.0",
sha256="09d4a2508d903121d29813a85791eeb3a905acbe1c5664b8a88903f8eda64b8f",
)
variant("openmp", default=True, description="Enable OpenMP support")
depends_on("elpa+openmp", when="+openmp")
depends_on("elpa~openmp", when="~openmp")
depends_on("cereal")
depends_on("libxc")
depends_on("fftw")
# MPI is a necessary dependency
depends_on("mpi", type=("build", "link", "run"))
depends_on("mkl")
build_directory = "source"
def edit(self, spec, prefix):
if "+openmp" in spec:
inc_var = "_openmp-"
system_var = (
"ELPA_LIB = -L${ELPA_LIB_DIR} -lelpa_openmp -Wl, -rpath=${ELPA_LIB_DIR}"
)
else:
inc_var = "-"
system_var = (
"ELPA_LIB = -L${ELPA_LIB_DIR} -lelpa -Wl,-rpath=${ELPA_LIB_DIR}"
)
tempInc = (
"\
FORTRAN = ifort\n\
CPLUSPLUS = icpc\n\
CPLUSPLUS_MPI = mpiicpc\n\
LAPACK_DIR = $(MKLROOT)\n\
FFTW_DIR = %s\n\
ELPA_DIR = %s\n\
ELPA_INCLUDE = -I${ELPA_DIR}/include/elpa%s%s\n\
CEREAL_DIR = %s\n\
OBJ_DIR = obj\n\
OBJ_DIR_serial = obj\n\
NP = 14\n"
% (
spec["fftw"].prefix,
spec["elpa"].prefix,
inc_var,
"{0}".format(spec["elpa"].version),
spec["cereal"].prefix,
)
)
with open(self.build_directory + "/Makefile.vars", "w") as f:
f.write(tempInc)
lineList = []
Pattern1 = re.compile("^ELPA_INCLUDE_DIR")
Pattern2 = re.compile("^ELPA_LIB\\s*= ")
with open(self.build_directory + "/Makefile.system", "r") as f:
while True:
line = f.readline()
if not line:
break
elif Pattern1.search(line):
pass
elif Pattern2.search(line):
pass
else:
lineList.append(line)
with open(self.build_directory + "/Makefile.system", "w") as f:
for i in lineList:
f.write(i)
with open(self.build_directory + "/Makefile.system", "a") as f:
f.write(system_var)
def install(self, spec, prefix):
install_tree("bin", prefix.bin)
| en | 0.731168 | # Copyright 2013-2022 Lawrence Livermore National Security, LLC and other # Spack Project Developers. See the top-level COPYRIGHT file for details. # # SPDX-License-Identifier: (Apache-2.0 OR MIT) ABACUS (Atomic-orbital Based Ab-initio Computation at UStc) is an open-source computer code package aiming for large-scale electronic-structure simulations from first principles # MPI is a necessary dependency | 1.640955 | 2 |
test/regression/features/arithmetic/mult.py | ppelleti/berp | 137 | 8438 | print(18 * 1234)
print(18 * 1234 * 2)
print(0 * 1)
print(1 * 0)
print(0.0 * 1.0)
print(1.0 * 0.0)
| print(18 * 1234)
print(18 * 1234 * 2)
print(0 * 1)
print(1 * 0)
print(0.0 * 1.0)
print(1.0 * 0.0)
| none | 1 | 2.300952 | 2 |
|
001146StepikPyBegin/Stepik001146PyBeginсh02p05st15C09_20200411.py | SafonovMikhail/python_000577 | 0 | 8439 | num = int(input())
d1 = (num % 10 ** 4) // 10 ** 3
d2 = (num % 10 ** 3) // 10 ** 2
d3 = (num % 10 ** 2) // 10
d4 = num % 10
print("Цифра в позиции тысяч равна", d1)
print("Цифра в позиции сотен равна", d2)
print("Цифра в позиции десятков равна", d3)
print("Цифра в позиции единиц равна", d4)
# print("Python", , "is the best")
_quit = 1
print(_quit * 2)
| num = int(input())
d1 = (num % 10 ** 4) // 10 ** 3
d2 = (num % 10 ** 3) // 10 ** 2
d3 = (num % 10 ** 2) // 10
d4 = num % 10
print("Цифра в позиции тысяч равна", d1)
print("Цифра в позиции сотен равна", d2)
print("Цифра в позиции десятков равна", d3)
print("Цифра в позиции единиц равна", d4)
# print("Python", , "is the best")
_quit = 1
print(_quit * 2)
| la | 0.128741 | # print("Python", , "is the best") | 3.786788 | 4 |
WP3/Task3.2/spark/shared/addcountry2dataset.py | on-merrit/ON-MERRIT | 2 | 8440 | <filename>WP3/Task3.2/spark/shared/addcountry2dataset.py
import csv
from os import listdir
from os.path import isfile, join
from osgeo import ogr
from multiprocessing import Pool
driver = ogr.GetDriverByName('GeoJSON')
countryFile = driver.Open("../data/external/countries.json")
layer = countryFile.GetLayer()
class Point(object):
""" Wrapper for ogr point """
def __init__(self, lat, lng):
""" Coordinates are in degrees """
self.point = ogr.Geometry(ogr.wkbPoint)
self.point.AddPoint(lng, lat)
def getOgr(self):
return self.point
ogr = property(getOgr)
class Country(object):
""" Wrapper for ogr country shape. Not meant to be instantiated directly. """
def __init__(self, shape):
self.shape = shape
def getIso(self):
return self.shape.GetField('ISO_A3')
iso = property(getIso)
def __str__(self):
return self.shape.GetField('ADMIN')
def contains(self, point):
return self.shape.geometry().Contains(point.ogr)
def getCountry(lat, lng):
"""
Checks given gps-incoming coordinates for country.
Output is either country shape index or None
"""
point = ogr.Geometry(ogr.wkbPoint)
point.AddPoint(lng, lat)
for i in range(layer.GetFeatureCount()):
country = layer.GetFeature(i)
if country.geometry().Contains(point):
return Country(country).iso
# nothing found
return None
def process_chunk(file):
with open(file, 'r') as read_obj, open(f"{file}_done.csv", 'w') as write_obj:
# pass the file object to reader() to get the reader object
csv_reader = csv.reader(read_obj)
csv_writer = csv.writer(write_obj)
# Iterate over each row in the csv using reader object
count=0
for row in csv_reader:
# row variable is a list that represents a row in csv
if row[2] and row[3]:
country = getCountry(float(row[2]), float(row[3]))
row.append(country)
csv_writer.writerow(row)
count+=1
if count%100==0:
print(f"File {file} progress: {count}/100000")
print(f"Processing {file} terminated")
allfiles = [join("q1a_latlon_split", f) for f in listdir("q1a_latlon_split") if isfile(join("q1a_latlon_split", f))]
with Pool(32) as p:
p.map(process_chunk, allfiles)
| <filename>WP3/Task3.2/spark/shared/addcountry2dataset.py
import csv
from os import listdir
from os.path import isfile, join
from osgeo import ogr
from multiprocessing import Pool
driver = ogr.GetDriverByName('GeoJSON')
countryFile = driver.Open("../data/external/countries.json")
layer = countryFile.GetLayer()
class Point(object):
""" Wrapper for ogr point """
def __init__(self, lat, lng):
""" Coordinates are in degrees """
self.point = ogr.Geometry(ogr.wkbPoint)
self.point.AddPoint(lng, lat)
def getOgr(self):
return self.point
ogr = property(getOgr)
class Country(object):
""" Wrapper for ogr country shape. Not meant to be instantiated directly. """
def __init__(self, shape):
self.shape = shape
def getIso(self):
return self.shape.GetField('ISO_A3')
iso = property(getIso)
def __str__(self):
return self.shape.GetField('ADMIN')
def contains(self, point):
return self.shape.geometry().Contains(point.ogr)
def getCountry(lat, lng):
"""
Checks given gps-incoming coordinates for country.
Output is either country shape index or None
"""
point = ogr.Geometry(ogr.wkbPoint)
point.AddPoint(lng, lat)
for i in range(layer.GetFeatureCount()):
country = layer.GetFeature(i)
if country.geometry().Contains(point):
return Country(country).iso
# nothing found
return None
def process_chunk(file):
with open(file, 'r') as read_obj, open(f"{file}_done.csv", 'w') as write_obj:
# pass the file object to reader() to get the reader object
csv_reader = csv.reader(read_obj)
csv_writer = csv.writer(write_obj)
# Iterate over each row in the csv using reader object
count=0
for row in csv_reader:
# row variable is a list that represents a row in csv
if row[2] and row[3]:
country = getCountry(float(row[2]), float(row[3]))
row.append(country)
csv_writer.writerow(row)
count+=1
if count%100==0:
print(f"File {file} progress: {count}/100000")
print(f"Processing {file} terminated")
allfiles = [join("q1a_latlon_split", f) for f in listdir("q1a_latlon_split") if isfile(join("q1a_latlon_split", f))]
with Pool(32) as p:
p.map(process_chunk, allfiles)
| en | 0.846803 | Wrapper for ogr point Coordinates are in degrees Wrapper for ogr country shape. Not meant to be instantiated directly. Checks given gps-incoming coordinates for country. Output is either country shape index or None # nothing found # pass the file object to reader() to get the reader object # Iterate over each row in the csv using reader object # row variable is a list that represents a row in csv | 2.739059 | 3 |
ai-experiments/sudoku/rdisplay.py | Henchel-Santillan/open-ai | 0 | 8441 | import cv2
import numpy as np
def process_core(image):
'''
Returns an inverted preprocessed binary image, with noise
reduction achieved with greyscaling, Gaussian Blur, Otsu's Threshold, and
an open morph.
'''
#apply greyscaling, Gaussian Blur, and Otsu's Threshold
greyscale = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
blur = cv2.GaussianBlur(greyscale, (3, 3), 0)
threshold = cv2.threshold(blur, 0, 255, cv2.THRESH_BINARY_INV + cv2.THRESH_OTSU)[1]
#apply an open morph to invert image to remove noise
kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (3, 3))
invert = 255 - cv2.morphologyEx(threshold, cv2.MORPH_OPEN, kernel, iterations=1)
return invert
def find_houghlines(image, width, height):
hough_lines = None
lines = cv2.HoughLinesP(image, 1, np.pi/180, 50, minLineLength=50, maxLineGap=5)
#generates blank black image with single color layer
if lines is not None and len(lines) != 0:
hough_lines = np.zeros((height, width), dtype=np.uint8)
for line in lines:
x1, y1, x2, y2 = line[0]
cv2.line(hough_lines, (x1, y1), (x2, y2), (255, 255, 255), 2)
return hough_lines
def find_bounds(image):
rect_bounds = None
#Run contour recognition
contours, _ = cv2.findContours(image, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
#Take list of sorted contours by largest area to smallest area
#If at least one contour is identified, can process visual approx. of contour bounds
if len(sorted(contours, key=cv2.contourArea, reverse=True)) > 0:
contour_bounds = None
#Pre-determined image size factor constant
SFACTOR = 20
for contour in contours:
#Minimum intended size of a single cell is not reached, likely a cutoff, not worth approx.
if (image[0] * image[1]) / SFACTOR > cv2.contourArea(contour):
break
approximation = cv2.approxPolyDP(contour, cv2.arcLength(contour, True), True)
#This means that the approximated polygon is a quad
if len(approximation) == 4:
contour_bounds = approximation
break
if contour_bounds is not None:
rect_bounds = np.zeros((4, 2), dtype=np.float32)
corners = contour_bounds.reshape(-1, 2)
rect_bounds[0] = corners[np.argmin(contour_bounds.sum(axis=1))]
rect_bounds[2] = corners[np.argmax(contour_bounds.sum(axis=1))]
rect_bounds[1] = corners[np.argmin(np.diff(corners, axis=1))]
rect_bounds[3] = corners[np.argmax(np.diff(corners, axis=1))]
return rect_bounds
#Transform the perspective to render as if looking down on paper (top-down view)
def transform(image, perspective):
pass
#Process the grid based on expected clean binary image input
def process_grid(image, width, height):
grid = None
detected = False
hough_lines = find_houghlines(image, width, height)
| import cv2
import numpy as np
def process_core(image):
'''
Returns an inverted preprocessed binary image, with noise
reduction achieved with greyscaling, Gaussian Blur, Otsu's Threshold, and
an open morph.
'''
#apply greyscaling, Gaussian Blur, and Otsu's Threshold
greyscale = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
blur = cv2.GaussianBlur(greyscale, (3, 3), 0)
threshold = cv2.threshold(blur, 0, 255, cv2.THRESH_BINARY_INV + cv2.THRESH_OTSU)[1]
#apply an open morph to invert image to remove noise
kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (3, 3))
invert = 255 - cv2.morphologyEx(threshold, cv2.MORPH_OPEN, kernel, iterations=1)
return invert
def find_houghlines(image, width, height):
hough_lines = None
lines = cv2.HoughLinesP(image, 1, np.pi/180, 50, minLineLength=50, maxLineGap=5)
#generates blank black image with single color layer
if lines is not None and len(lines) != 0:
hough_lines = np.zeros((height, width), dtype=np.uint8)
for line in lines:
x1, y1, x2, y2 = line[0]
cv2.line(hough_lines, (x1, y1), (x2, y2), (255, 255, 255), 2)
return hough_lines
def find_bounds(image):
rect_bounds = None
#Run contour recognition
contours, _ = cv2.findContours(image, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
#Take list of sorted contours by largest area to smallest area
#If at least one contour is identified, can process visual approx. of contour bounds
if len(sorted(contours, key=cv2.contourArea, reverse=True)) > 0:
contour_bounds = None
#Pre-determined image size factor constant
SFACTOR = 20
for contour in contours:
#Minimum intended size of a single cell is not reached, likely a cutoff, not worth approx.
if (image[0] * image[1]) / SFACTOR > cv2.contourArea(contour):
break
approximation = cv2.approxPolyDP(contour, cv2.arcLength(contour, True), True)
#This means that the approximated polygon is a quad
if len(approximation) == 4:
contour_bounds = approximation
break
if contour_bounds is not None:
rect_bounds = np.zeros((4, 2), dtype=np.float32)
corners = contour_bounds.reshape(-1, 2)
rect_bounds[0] = corners[np.argmin(contour_bounds.sum(axis=1))]
rect_bounds[2] = corners[np.argmax(contour_bounds.sum(axis=1))]
rect_bounds[1] = corners[np.argmin(np.diff(corners, axis=1))]
rect_bounds[3] = corners[np.argmax(np.diff(corners, axis=1))]
return rect_bounds
#Transform the perspective to render as if looking down on paper (top-down view)
def transform(image, perspective):
pass
#Process the grid based on expected clean binary image input
def process_grid(image, width, height):
grid = None
detected = False
hough_lines = find_houghlines(image, width, height)
| en | 0.88194 | Returns an inverted preprocessed binary image, with noise reduction achieved with greyscaling, Gaussian Blur, Otsu's Threshold, and an open morph. #apply greyscaling, Gaussian Blur, and Otsu's Threshold #apply an open morph to invert image to remove noise #generates blank black image with single color layer #Run contour recognition #Take list of sorted contours by largest area to smallest area #If at least one contour is identified, can process visual approx. of contour bounds #Pre-determined image size factor constant #Minimum intended size of a single cell is not reached, likely a cutoff, not worth approx. #This means that the approximated polygon is a quad #Transform the perspective to render as if looking down on paper (top-down view) #Process the grid based on expected clean binary image input | 3.096577 | 3 |
pythia/tasks/base_task.py | abhiskk/pythia | 2 | 8442 | <filename>pythia/tasks/base_task.py
# Copyright (c) Facebook, Inc. and its affiliates.
"""
Tasks come above datasets in hierarchy level. In case you want to
implement a new task, you need to inherit ``BaseTask`` class. You need
to implement ``_get_available_datasets`` and ``_preprocess_item`` functions
to complete the implementation. You can check the source to see if you need
to override any other methods like ``prepare_batch``.
Check example of ``VQATask`` here_.
Example::
from pythia.tasks.base_task import BaseTask
from pythia.common.registry import registry
@registry.register_task("my")
class MyTask(BaseTask):
def __init__(self):
super().__init__("my")
def _get_available_datasets(self):
return ["my"]
def _preprocess_item(self):
item.text = None
return item
.. _here: https://github.com/facebookresearch/pythia/blob/v0.3/pythia/tasks/vqa/vqa_task.py
"""
import sys
import numpy as np
from torch.utils.data import Dataset
from pythia.common.registry import registry
class BaseTask(Dataset):
"""
BaseTask that task classes need to inherit in order to create a new task.
Users must implement ``_get_available_datasets`` and ``_preprocess_item``
in order to complete implementation.
Args:
task_name (str): Name of the task with which it will be registered
"""
def __init__(self, task_name):
super(BaseTask, self).__init__()
self.task_name = task_name
self.writer = registry.get("writer")
def _process_datasets(self):
if "datasets" not in self.opts:
self.writer.write(
"No datasets attribute present for task: %s."
" Defaulting to all" % (self.task_name),
"warning",
)
datasets = "all"
else:
datasets = self.opts["datasets"]
if datasets is None or datasets == "all":
datasets = self._get_available_datasets()
if type(datasets) == str:
datasets = list(map(lambda x: x.strip(), datasets.split(",")))
if len(datasets) == 0 and datasets[0] == "all":
datasets = self._get_available_datasets()
self.given_datasets = datasets
def load(self, **opts):
self.opts = opts
self._process_datasets()
self.datasets = []
self.builders = []
available_datasets = self._get_available_datasets()
self.total_length = 0
self.per_dataset_lengths = []
self.num_datasets = 0
for dataset in self.given_datasets:
if dataset in available_datasets:
builder_class = registry.get_builder_class(dataset)
if builder_class is None:
print("No builder class found for %s." % dataset)
continue
builder_instance = builder_class()
if dataset in self.opts["dataset_attributes"]:
attributes = self.opts["dataset_attributes"][dataset]
else:
self.writer.write(
"Dataset %s is missing from "
"dataset_attributes in config." % dataset,
"error",
)
sys.exit(1)
dataset_type = self.opts.get("dataset_type", "train")
builder_instance.build(dataset_type, attributes)
dataset_instance = builder_instance.load(dataset_type, attributes)
self.builders.append(builder_instance)
self.datasets.append(dataset_instance)
self.per_dataset_lengths.append(len(dataset_instance))
self.total_length += len(dataset_instance)
else:
print(
"Dataset %s is not a valid dataset for task %s. Skipping"
% (dataset, self.task_name)
)
self.num_datasets = len(self.datasets)
self.dataset_probablities = [1 for _ in range(self.num_datasets)]
sampling = self.opts.get("dataset_size_proportional_sampling", None)
if sampling is True:
self.dataset_probablities = self.per_dataset_lengths[:]
self.dataset_probablities = [
prob / self.total_length for prob in self.dataset_probablities
]
self.change_dataset()
def _get_available_datasets(self):
"""Set available datasets for this task here.
Override in your child task class
Temporary solution, later we will use decorators to easily register
datasets with a task
Returns:
List - List of available datasets for this particular task
"""
return []
def get_datasets(self):
return self.datasets
def __len__(self):
return self.total_length
def __getitem__(self, idx):
idx = idx % self.per_dataset_lengths[self.dataset_choice]
item = self.chosen_dataset[idx]
return self._preprocess_item(item)
def change_dataset(self):
self.dataset_choice = np.random.choice(
self.num_datasets, 1, p=self.dataset_probablities
)[0]
self.chosen_dataset = self.datasets[self.dataset_choice]
def verbose_dump(self, *args, **kwargs):
self.chosen_dataset.verbose_dump(*args, **kwargs)
def prepare_batch(self, batch):
return self.chosen_dataset.prepare_batch(batch)
def _preprocess_item(self, item):
"""Preprocess an item to be returned from __getitem__.
Override in your child task class, so you have control on what you are
returning
Args:
item (Sample): Sample returned by a particular dataset
Returns:
Sample: Preprocessed item
"""
raise NotImplementedError(
"This task doesn't implement preprocess_item" " method"
)
def update_registry_for_model(self, config):
"""
Use this if there is some specific configuration required by model
which must be inferred at runtime.
"""
for builder in self.builders:
builder.update_registry_for_model(config)
def init_args(self, parser):
parser.add_argument_group("General Task Arguments")
parser.add_argument(
"-dsp",
"--dataset_size_proportional_sampling",
type=bool,
default=0,
help="Pass if you want to sample from"
" dataset according to its size. Default: Equal "
" weighted sampling",
)
# TODO: Figure out later if we want to init args from datasets
# self._init_args(parser)
def _init_args(self, parser):
"""Override this function to add extra parameters to
parser in your child task class.
Parameters
----------
parser : ArgumentParser
Original parser object passed from the higher level classes like
trainer
Returns
-------
type
Description of returned object.
"""
for builder in self.builders:
builder.init_args(parser)
def clean_config(self, config):
"""
Override this in case you want to clean the config you updated earlier
in update_registry_for_model
"""
return config
| <filename>pythia/tasks/base_task.py
# Copyright (c) Facebook, Inc. and its affiliates.
"""
Tasks come above datasets in hierarchy level. In case you want to
implement a new task, you need to inherit ``BaseTask`` class. You need
to implement ``_get_available_datasets`` and ``_preprocess_item`` functions
to complete the implementation. You can check the source to see if you need
to override any other methods like ``prepare_batch``.
Check example of ``VQATask`` here_.
Example::
from pythia.tasks.base_task import BaseTask
from pythia.common.registry import registry
@registry.register_task("my")
class MyTask(BaseTask):
def __init__(self):
super().__init__("my")
def _get_available_datasets(self):
return ["my"]
def _preprocess_item(self):
item.text = None
return item
.. _here: https://github.com/facebookresearch/pythia/blob/v0.3/pythia/tasks/vqa/vqa_task.py
"""
import sys
import numpy as np
from torch.utils.data import Dataset
from pythia.common.registry import registry
class BaseTask(Dataset):
"""
BaseTask that task classes need to inherit in order to create a new task.
Users must implement ``_get_available_datasets`` and ``_preprocess_item``
in order to complete implementation.
Args:
task_name (str): Name of the task with which it will be registered
"""
def __init__(self, task_name):
super(BaseTask, self).__init__()
self.task_name = task_name
self.writer = registry.get("writer")
def _process_datasets(self):
if "datasets" not in self.opts:
self.writer.write(
"No datasets attribute present for task: %s."
" Defaulting to all" % (self.task_name),
"warning",
)
datasets = "all"
else:
datasets = self.opts["datasets"]
if datasets is None or datasets == "all":
datasets = self._get_available_datasets()
if type(datasets) == str:
datasets = list(map(lambda x: x.strip(), datasets.split(",")))
if len(datasets) == 0 and datasets[0] == "all":
datasets = self._get_available_datasets()
self.given_datasets = datasets
def load(self, **opts):
self.opts = opts
self._process_datasets()
self.datasets = []
self.builders = []
available_datasets = self._get_available_datasets()
self.total_length = 0
self.per_dataset_lengths = []
self.num_datasets = 0
for dataset in self.given_datasets:
if dataset in available_datasets:
builder_class = registry.get_builder_class(dataset)
if builder_class is None:
print("No builder class found for %s." % dataset)
continue
builder_instance = builder_class()
if dataset in self.opts["dataset_attributes"]:
attributes = self.opts["dataset_attributes"][dataset]
else:
self.writer.write(
"Dataset %s is missing from "
"dataset_attributes in config." % dataset,
"error",
)
sys.exit(1)
dataset_type = self.opts.get("dataset_type", "train")
builder_instance.build(dataset_type, attributes)
dataset_instance = builder_instance.load(dataset_type, attributes)
self.builders.append(builder_instance)
self.datasets.append(dataset_instance)
self.per_dataset_lengths.append(len(dataset_instance))
self.total_length += len(dataset_instance)
else:
print(
"Dataset %s is not a valid dataset for task %s. Skipping"
% (dataset, self.task_name)
)
self.num_datasets = len(self.datasets)
self.dataset_probablities = [1 for _ in range(self.num_datasets)]
sampling = self.opts.get("dataset_size_proportional_sampling", None)
if sampling is True:
self.dataset_probablities = self.per_dataset_lengths[:]
self.dataset_probablities = [
prob / self.total_length for prob in self.dataset_probablities
]
self.change_dataset()
def _get_available_datasets(self):
"""Set available datasets for this task here.
Override in your child task class
Temporary solution, later we will use decorators to easily register
datasets with a task
Returns:
List - List of available datasets for this particular task
"""
return []
def get_datasets(self):
return self.datasets
def __len__(self):
return self.total_length
def __getitem__(self, idx):
idx = idx % self.per_dataset_lengths[self.dataset_choice]
item = self.chosen_dataset[idx]
return self._preprocess_item(item)
def change_dataset(self):
self.dataset_choice = np.random.choice(
self.num_datasets, 1, p=self.dataset_probablities
)[0]
self.chosen_dataset = self.datasets[self.dataset_choice]
def verbose_dump(self, *args, **kwargs):
self.chosen_dataset.verbose_dump(*args, **kwargs)
def prepare_batch(self, batch):
return self.chosen_dataset.prepare_batch(batch)
def _preprocess_item(self, item):
"""Preprocess an item to be returned from __getitem__.
Override in your child task class, so you have control on what you are
returning
Args:
item (Sample): Sample returned by a particular dataset
Returns:
Sample: Preprocessed item
"""
raise NotImplementedError(
"This task doesn't implement preprocess_item" " method"
)
def update_registry_for_model(self, config):
"""
Use this if there is some specific configuration required by model
which must be inferred at runtime.
"""
for builder in self.builders:
builder.update_registry_for_model(config)
def init_args(self, parser):
parser.add_argument_group("General Task Arguments")
parser.add_argument(
"-dsp",
"--dataset_size_proportional_sampling",
type=bool,
default=0,
help="Pass if you want to sample from"
" dataset according to its size. Default: Equal "
" weighted sampling",
)
# TODO: Figure out later if we want to init args from datasets
# self._init_args(parser)
def _init_args(self, parser):
"""Override this function to add extra parameters to
parser in your child task class.
Parameters
----------
parser : ArgumentParser
Original parser object passed from the higher level classes like
trainer
Returns
-------
type
Description of returned object.
"""
for builder in self.builders:
builder.init_args(parser)
def clean_config(self, config):
"""
Override this in case you want to clean the config you updated earlier
in update_registry_for_model
"""
return config
| en | 0.679702 | # Copyright (c) Facebook, Inc. and its affiliates. Tasks come above datasets in hierarchy level. In case you want to implement a new task, you need to inherit ``BaseTask`` class. You need to implement ``_get_available_datasets`` and ``_preprocess_item`` functions to complete the implementation. You can check the source to see if you need to override any other methods like ``prepare_batch``. Check example of ``VQATask`` here_. Example:: from pythia.tasks.base_task import BaseTask from pythia.common.registry import registry @registry.register_task("my") class MyTask(BaseTask): def __init__(self): super().__init__("my") def _get_available_datasets(self): return ["my"] def _preprocess_item(self): item.text = None return item .. _here: https://github.com/facebookresearch/pythia/blob/v0.3/pythia/tasks/vqa/vqa_task.py BaseTask that task classes need to inherit in order to create a new task. Users must implement ``_get_available_datasets`` and ``_preprocess_item`` in order to complete implementation. Args: task_name (str): Name of the task with which it will be registered Set available datasets for this task here. Override in your child task class Temporary solution, later we will use decorators to easily register datasets with a task Returns: List - List of available datasets for this particular task Preprocess an item to be returned from __getitem__. Override in your child task class, so you have control on what you are returning Args: item (Sample): Sample returned by a particular dataset Returns: Sample: Preprocessed item Use this if there is some specific configuration required by model which must be inferred at runtime. # TODO: Figure out later if we want to init args from datasets # self._init_args(parser) Override this function to add extra parameters to parser in your child task class. Parameters ---------- parser : ArgumentParser Original parser object passed from the higher level classes like trainer Returns ------- type Description of returned object. Override this in case you want to clean the config you updated earlier in update_registry_for_model | 2.331426 | 2 |
src/gauss_n.py | Konstantysz/InterGen | 0 | 8443 | <reponame>Konstantysz/InterGen
from numba import jit
import numpy as np
@jit(nopython=True, parallel=True)
def gauss_n(X, Y, mu_x = 0.0, mu_y = 0.0, amp = 1.0, sigma = 3.0):
'''
Function that generates 2D discrete gaussian distribution.
Boosted with Numba: works in C and with parallel computing.
Parameters
----------
X : numpy.ndarray
meshgrided values in X axis
Y : numpy.ndarray
meshgrided values in Y axis
mu_x : float
Displacement in X axis
mu_y : float
Displacement in Y axis
amp : float
Amplitude of gaussian distribution
sigma : float
Std dev of gaussian distribution
Returns:
----------
val : numpy.ndarray
matrix of 2D gaussian distribution
'''
exponent = ((X - mu_x)**2 + (Y - mu_y)**2) / 2*sigma
val = (amp*np.exp(-exponent))
return val | from numba import jit
import numpy as np
@jit(nopython=True, parallel=True)
def gauss_n(X, Y, mu_x = 0.0, mu_y = 0.0, amp = 1.0, sigma = 3.0):
'''
Function that generates 2D discrete gaussian distribution.
Boosted with Numba: works in C and with parallel computing.
Parameters
----------
X : numpy.ndarray
meshgrided values in X axis
Y : numpy.ndarray
meshgrided values in Y axis
mu_x : float
Displacement in X axis
mu_y : float
Displacement in Y axis
amp : float
Amplitude of gaussian distribution
sigma : float
Std dev of gaussian distribution
Returns:
----------
val : numpy.ndarray
matrix of 2D gaussian distribution
'''
exponent = ((X - mu_x)**2 + (Y - mu_y)**2) / 2*sigma
val = (amp*np.exp(-exponent))
return val | en | 0.61707 | Function that generates 2D discrete gaussian distribution. Boosted with Numba: works in C and with parallel computing. Parameters ---------- X : numpy.ndarray meshgrided values in X axis Y : numpy.ndarray meshgrided values in Y axis mu_x : float Displacement in X axis mu_y : float Displacement in Y axis amp : float Amplitude of gaussian distribution sigma : float Std dev of gaussian distribution Returns: ---------- val : numpy.ndarray matrix of 2D gaussian distribution | 3.176053 | 3 |
satori.core/satori/core/export/pc.py | Cloud11665/satori-git | 4 | 8444 | <reponame>Cloud11665/satori-git<gh_stars>1-10
# vim:ts=4:sts=4:sw=4:expandtab
from token import token_container
from satori.core.export.type_helpers import DefineException
AccessDenied = DefineException('AccessDenied', 'You don\'t have rights to call this procedure')
class PCDeny(object):
def __call__(__pc__self, **kwargs):
return False
def __str__(__pc__self):
return 'imposible'
class PCPermit(object):
def __call__(__pc__self, **kwargs):
return True
def __str__(__pc__self):
return 'none'
class PCArg(object):
def __init__(__pc__self, name, perm):
super(PCArg, __pc__self).__init__()
__pc__self.name = name
__pc__self.perm = perm
def __call__(__pc__self, **kwargs):
return Privilege.demand(kwargs[__pc__self.name], __pc__self.perm)
def __str__(__pc__self):
return '{0} on {1}'.format(__pc__self.perm, __pc__self.name)
class PCArgField(object):
def __init__(__pc__self, name, field_name, perm):
super(PCArgField, __pc__self).__init__()
__pc__self.name = name
__pc__self.field_name = field_name
__pc__self.perm = perm
def __call__(__pc__self, **kwargs):
field = getattr(kwargs[__pc__self.name], __pc__self.field_name, None)
if field is None:
return False
return Privilege.demand(field, __pc__self.perm)
def __str__(__pc__self):
return '{0} on {1}.{2}'.format(__pc__self.perm, __pc__self.name, __pc__self.field_name)
class PCGlobal(object):
def __init__(__pc__self, perm):
super(PCGlobal, __pc__self).__init__()
__pc__self.perm = perm
def __call__(__pc__self, **kwargs):
return Privilege.global_demand(__pc__self.perm)
def __str__(__pc__self):
return 'global {0}'.format(__pc__self.perm)
class PCAnd(object):
def __init__(__pc__self, *subs):
super(PCAnd, __pc__self).__init__()
__pc__self.subs = subs
def __call__(__pc__self, **kwargs):
return all(x(**kwargs) for x in __pc__self.subs)
def __str__(__pc__self):
return '(' + ') and ('.join(str(p) for p in __pc__self.subs) + ')'
class PCOr(object):
def __init__(__pc__self, *subs):
super(PCOr, __pc__self).__init__()
__pc__self.subs = subs
def __call__(__pc__self, **kwargs):
return any(x(**kwargs) for x in __pc__self.subs)
def __str__(__pc__self):
return '(' + ') or ('.join(str(p) for p in __pc__self.subs) + ')'
class PCEach(object):
def __init__(__pc__self, name, sub):
super(PCEach, __pc__self).__init__()
__pc__self.name = name
__pc__self.sub = sub
def __call__(__pc__self, **kwargs):
if kwargs[__pc__self.name] is None:
return True
return all(__pc__self.sub(item=x) for x in kwargs[__pc__self.name])
def __str__(__pc__self):
return 'for every item in {0}: {1}'.format(__pc__self.name, str(__pc__self.sub))
class PCEachKey(object):
def __init__(__pc__self, name, sub):
super(PCEachKey, __pc__self).__init__()
__pc__self.name = name
__pc__self.sub = sub
def __call__(__pc__self, **kwargs):
if kwargs[__pc__self.name] is None:
return True
return all(__pc__self.sub(item=x) for x in kwargs[__pc__self.name].keys())
def __str__(__pc__self):
return 'for every item in {0}.keys(): {1}'.format(__pc__self.name, str(__pc__self.sub))
class PCEachValue(object):
def __init__(__pc__self, name, sub):
super(PCEachValue, __pc__self).__init__()
__pc__self.name = name
__pc__self.sub = sub
def __call__(__pc__self, **kwargs):
if kwargs[__pc__self.name] is None:
return True
return all(__pc__self.sub(item=x) for x in kwargs[__pc__self.name].values())
def __str__(__pc__self):
return 'for every item in {0}.values(): {1}'.format(__pc__self.name, str(__pc__self.sub))
class PCTokenUser(object):
def __init__(__pc__self, name):
super(PCTokenUser, __pc__self).__init__()
__pc__self.name = name
def __call__(__pc__self, **kwargs):
return token_container.token.role and token_container.token.role.id == kwargs[__pc__self.name].id
def __str__(__pc__self):
return '{0} equals to calling user'.format(__pc__self.name)
class PCTokenIsUser(object):
def __init__(__pc__self):
super(PCTokenIsUser, __pc__self).__init__()
def __call__(__pc__self, **kwargs):
return token_container.token.user is not None
def __str__(__pc__self):
return 'calling role is user'
class PCTokenIsMachine(object):
def __init__(__pc__self):
super(PCTokenIsMachine, __pc__self).__init__()
def __call__(__pc__self, **kwargs):
return token_container.token.machine is not None
def __str__(__pc__self):
return 'calling role is machine'
class PCRawBlob(object):
def __init__(__pc__self, name):
super(PCRawBlob, __pc__self).__init__()
__pc__self.name = name
def __call__(__pc__self, **kwargs):
if kwargs[__pc__self.name].is_blob:
return Privilege.global_demand('RAW_BLOB')
else:
return True
def __str__(__pc__self):
return 'global RAW_BLOB if {0}.is_blob = True'.format(__pc__self.name)
def init():
global Privilege
from satori.core.models import Privilege
| # vim:ts=4:sts=4:sw=4:expandtab
from token import token_container
from satori.core.export.type_helpers import DefineException
AccessDenied = DefineException('AccessDenied', 'You don\'t have rights to call this procedure')
class PCDeny(object):
def __call__(__pc__self, **kwargs):
return False
def __str__(__pc__self):
return 'imposible'
class PCPermit(object):
def __call__(__pc__self, **kwargs):
return True
def __str__(__pc__self):
return 'none'
class PCArg(object):
def __init__(__pc__self, name, perm):
super(PCArg, __pc__self).__init__()
__pc__self.name = name
__pc__self.perm = perm
def __call__(__pc__self, **kwargs):
return Privilege.demand(kwargs[__pc__self.name], __pc__self.perm)
def __str__(__pc__self):
return '{0} on {1}'.format(__pc__self.perm, __pc__self.name)
class PCArgField(object):
def __init__(__pc__self, name, field_name, perm):
super(PCArgField, __pc__self).__init__()
__pc__self.name = name
__pc__self.field_name = field_name
__pc__self.perm = perm
def __call__(__pc__self, **kwargs):
field = getattr(kwargs[__pc__self.name], __pc__self.field_name, None)
if field is None:
return False
return Privilege.demand(field, __pc__self.perm)
def __str__(__pc__self):
return '{0} on {1}.{2}'.format(__pc__self.perm, __pc__self.name, __pc__self.field_name)
class PCGlobal(object):
def __init__(__pc__self, perm):
super(PCGlobal, __pc__self).__init__()
__pc__self.perm = perm
def __call__(__pc__self, **kwargs):
return Privilege.global_demand(__pc__self.perm)
def __str__(__pc__self):
return 'global {0}'.format(__pc__self.perm)
class PCAnd(object):
def __init__(__pc__self, *subs):
super(PCAnd, __pc__self).__init__()
__pc__self.subs = subs
def __call__(__pc__self, **kwargs):
return all(x(**kwargs) for x in __pc__self.subs)
def __str__(__pc__self):
return '(' + ') and ('.join(str(p) for p in __pc__self.subs) + ')'
class PCOr(object):
def __init__(__pc__self, *subs):
super(PCOr, __pc__self).__init__()
__pc__self.subs = subs
def __call__(__pc__self, **kwargs):
return any(x(**kwargs) for x in __pc__self.subs)
def __str__(__pc__self):
return '(' + ') or ('.join(str(p) for p in __pc__self.subs) + ')'
class PCEach(object):
def __init__(__pc__self, name, sub):
super(PCEach, __pc__self).__init__()
__pc__self.name = name
__pc__self.sub = sub
def __call__(__pc__self, **kwargs):
if kwargs[__pc__self.name] is None:
return True
return all(__pc__self.sub(item=x) for x in kwargs[__pc__self.name])
def __str__(__pc__self):
return 'for every item in {0}: {1}'.format(__pc__self.name, str(__pc__self.sub))
class PCEachKey(object):
def __init__(__pc__self, name, sub):
super(PCEachKey, __pc__self).__init__()
__pc__self.name = name
__pc__self.sub = sub
def __call__(__pc__self, **kwargs):
if kwargs[__pc__self.name] is None:
return True
return all(__pc__self.sub(item=x) for x in kwargs[__pc__self.name].keys())
def __str__(__pc__self):
return 'for every item in {0}.keys(): {1}'.format(__pc__self.name, str(__pc__self.sub))
class PCEachValue(object):
def __init__(__pc__self, name, sub):
super(PCEachValue, __pc__self).__init__()
__pc__self.name = name
__pc__self.sub = sub
def __call__(__pc__self, **kwargs):
if kwargs[__pc__self.name] is None:
return True
return all(__pc__self.sub(item=x) for x in kwargs[__pc__self.name].values())
def __str__(__pc__self):
return 'for every item in {0}.values(): {1}'.format(__pc__self.name, str(__pc__self.sub))
class PCTokenUser(object):
def __init__(__pc__self, name):
super(PCTokenUser, __pc__self).__init__()
__pc__self.name = name
def __call__(__pc__self, **kwargs):
return token_container.token.role and token_container.token.role.id == kwargs[__pc__self.name].id
def __str__(__pc__self):
return '{0} equals to calling user'.format(__pc__self.name)
class PCTokenIsUser(object):
def __init__(__pc__self):
super(PCTokenIsUser, __pc__self).__init__()
def __call__(__pc__self, **kwargs):
return token_container.token.user is not None
def __str__(__pc__self):
return 'calling role is user'
class PCTokenIsMachine(object):
def __init__(__pc__self):
super(PCTokenIsMachine, __pc__self).__init__()
def __call__(__pc__self, **kwargs):
return token_container.token.machine is not None
def __str__(__pc__self):
return 'calling role is machine'
class PCRawBlob(object):
def __init__(__pc__self, name):
super(PCRawBlob, __pc__self).__init__()
__pc__self.name = name
def __call__(__pc__self, **kwargs):
if kwargs[__pc__self.name].is_blob:
return Privilege.global_demand('RAW_BLOB')
else:
return True
def __str__(__pc__self):
return 'global RAW_BLOB if {0}.is_blob = True'.format(__pc__self.name)
def init():
global Privilege
from satori.core.models import Privilege | ru | 0.270263 | # vim:ts=4:sts=4:sw=4:expandtab | 2.095348 | 2 |
appimagebuilder/orchestrator.py | AppImageCrafters/AppImageBuilder | 0 | 8445 | <reponame>AppImageCrafters/AppImageBuilder<filename>appimagebuilder/orchestrator.py
# Copyright 2021 <NAME>
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
import os
import pathlib
from appimagebuilder.utils.finder import Finder
from appimagebuilder.context import AppInfo, Context, BundleInfo
from appimagebuilder.commands.apt_deploy import AptDeployCommand
from appimagebuilder.commands.create_appimage import CreateAppImageCommand
from appimagebuilder.commands.file_deploy import FileDeployCommand
from appimagebuilder.commands.pacman_deploy import PacmanDeployCommand
from appimagebuilder.commands.run_script import RunScriptCommand
from appimagebuilder.commands.run_test import RunTestCommand
from appimagebuilder.commands.setup_app_info import SetupAppInfoCommand
from appimagebuilder.commands.setup_runtime import SetupRuntimeCommand
from appimagebuilder.commands.setup_symlinks import SetupSymlinksCommand
from appimagebuilder.commands.deploy_record import (
WriteDeployRecordCommand,
)
from appimagebuilder.recipe.roamer import Roamer
class Orchestrator:
"""Transforms a recipe into a command list"""
def process(self, recipe: Roamer, args):
if recipe.version() == 1:
return self._prepare_commands_for_recipe_v1(args, recipe)
raise RuntimeError("Unknown recipe version: %s" % recipe.version())
def _prepare_commands_for_recipe_v1(self, args, recipe):
context = self._extract_v1_recipe_context(args, recipe)
commands = []
if not args.skip_script:
command = RunScriptCommand(context, recipe.script, "main script")
commands.append(command)
if not args.skip_build:
commands.extend(self._create_app_dir_commands(context, recipe))
if not args.skip_tests and recipe.AppDir.test:
command = RunTestCommand(context, recipe.AppDir.test)
commands.append(command)
if not args.skip_appimage:
command = CreateAppImageCommand(context, recipe)
commands.append(command)
return commands
def _create_app_dir_commands(self, context, recipe):
commands = []
commands.extend(self._create_deploy_commands(context, recipe))
commands.extend(self._create_setup_commands(context, recipe))
commands.append(WriteDeployRecordCommand(context))
return commands
def _create_deploy_commands(self, context, recipe):
commands = []
if recipe.AppDir.before_bundle:
command = RunScriptCommand(
context, recipe.AppDir.before_bundle, "before bundle script"
)
commands.append(command)
apt_section = recipe.AppDir.apt
if apt_section:
command = self._generate_apt_deploy_command(context, apt_section)
commands.append(command)
pacman_section = recipe.AppDir.pacman
if pacman_section:
command = self._generate_pacman_deploy_command(context, pacman_section)
commands.append(command)
files_section = recipe.AppDir.files
if files_section:
command = FileDeployCommand(
context,
files_section.include() or [],
files_section.exclude() or [],
)
commands.append(command)
if recipe.AppDir.after_bundle:
command = RunScriptCommand(
context, recipe.AppDir.after_bundle, "after bundle script"
)
commands.append(command)
return commands
def _create_setup_commands(self, context, recipe):
commands = []
if recipe.AppDir.before_runtime:
command = RunScriptCommand(
context, recipe.AppDir.before_runtime, "before runtime script"
)
commands.append(command)
finder = Finder(context.app_dir)
commands.append(SetupSymlinksCommand(context, recipe, finder))
commands.append(SetupRuntimeCommand(context, recipe, finder))
commands.append(SetupAppInfoCommand(context))
if recipe.AppDir.after_runtime:
command = RunScriptCommand(
context, recipe.AppDir.after_runtime, "after runtime script"
)
commands.append(command)
return commands
def _generate_apt_deploy_command(self, context, apt_section):
apt_archs = apt_section.arch()
if isinstance(apt_archs, str):
apt_archs = [apt_archs]
sources = []
keys = []
for item in apt_section.sources():
if "sourceline" in item:
sources.append(item["sourceline"])
if "key_url" in item:
keys.append(item["key_url"])
return AptDeployCommand(
context,
apt_section.include(),
apt_section.exclude() or [],
apt_archs,
sources,
keys,
apt_section.allow_unauthenticated() or False,
)
def _generate_pacman_deploy_command(self, context, pacman_section):
return PacmanDeployCommand(
context,
pacman_section.include(),
pacman_section.exclude(),
pacman_section["Architecture"](),
pacman_section.repositories(),
pacman_section.options(),
)
def _extract_v1_recipe_context(self, args, recipe):
app_dir_path = pathlib.Path(args.appdir).absolute()
build_dir_path = pathlib.Path(args.build_dir).absolute()
app_info_section = recipe.AppDir.app_info
app_info = AppInfo(
app_info_section.id(),
app_info_section.name() or app_info_section.id(),
app_info_section.icon() or "application-vnd.appimage",
app_info_section.version(),
app_info_section.exec(),
app_info_section.exec_args(),
)
bundle_info = BundleInfo(
app_dir=app_dir_path,
app_info=app_info,
update_string=recipe.AppImage["update-information"]() or "guess",
runtime_arch=recipe.AppImage.arch(),
sign_key=recipe.AppImage["sign-key"]() or None,
file_name=recipe.AppImage["file_name"] or None,
)
return Context(
recipe=recipe,
recipe_path=pathlib.Path(args.recipe),
app_info=app_info,
bundle_info=bundle_info,
app_dir=app_dir_path,
build_dir=build_dir_path,
)
| # Copyright 2021 <NAME>
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
import os
import pathlib
from appimagebuilder.utils.finder import Finder
from appimagebuilder.context import AppInfo, Context, BundleInfo
from appimagebuilder.commands.apt_deploy import AptDeployCommand
from appimagebuilder.commands.create_appimage import CreateAppImageCommand
from appimagebuilder.commands.file_deploy import FileDeployCommand
from appimagebuilder.commands.pacman_deploy import PacmanDeployCommand
from appimagebuilder.commands.run_script import RunScriptCommand
from appimagebuilder.commands.run_test import RunTestCommand
from appimagebuilder.commands.setup_app_info import SetupAppInfoCommand
from appimagebuilder.commands.setup_runtime import SetupRuntimeCommand
from appimagebuilder.commands.setup_symlinks import SetupSymlinksCommand
from appimagebuilder.commands.deploy_record import (
WriteDeployRecordCommand,
)
from appimagebuilder.recipe.roamer import Roamer
class Orchestrator:
"""Transforms a recipe into a command list"""
def process(self, recipe: Roamer, args):
if recipe.version() == 1:
return self._prepare_commands_for_recipe_v1(args, recipe)
raise RuntimeError("Unknown recipe version: %s" % recipe.version())
def _prepare_commands_for_recipe_v1(self, args, recipe):
context = self._extract_v1_recipe_context(args, recipe)
commands = []
if not args.skip_script:
command = RunScriptCommand(context, recipe.script, "main script")
commands.append(command)
if not args.skip_build:
commands.extend(self._create_app_dir_commands(context, recipe))
if not args.skip_tests and recipe.AppDir.test:
command = RunTestCommand(context, recipe.AppDir.test)
commands.append(command)
if not args.skip_appimage:
command = CreateAppImageCommand(context, recipe)
commands.append(command)
return commands
def _create_app_dir_commands(self, context, recipe):
commands = []
commands.extend(self._create_deploy_commands(context, recipe))
commands.extend(self._create_setup_commands(context, recipe))
commands.append(WriteDeployRecordCommand(context))
return commands
def _create_deploy_commands(self, context, recipe):
commands = []
if recipe.AppDir.before_bundle:
command = RunScriptCommand(
context, recipe.AppDir.before_bundle, "before bundle script"
)
commands.append(command)
apt_section = recipe.AppDir.apt
if apt_section:
command = self._generate_apt_deploy_command(context, apt_section)
commands.append(command)
pacman_section = recipe.AppDir.pacman
if pacman_section:
command = self._generate_pacman_deploy_command(context, pacman_section)
commands.append(command)
files_section = recipe.AppDir.files
if files_section:
command = FileDeployCommand(
context,
files_section.include() or [],
files_section.exclude() or [],
)
commands.append(command)
if recipe.AppDir.after_bundle:
command = RunScriptCommand(
context, recipe.AppDir.after_bundle, "after bundle script"
)
commands.append(command)
return commands
def _create_setup_commands(self, context, recipe):
commands = []
if recipe.AppDir.before_runtime:
command = RunScriptCommand(
context, recipe.AppDir.before_runtime, "before runtime script"
)
commands.append(command)
finder = Finder(context.app_dir)
commands.append(SetupSymlinksCommand(context, recipe, finder))
commands.append(SetupRuntimeCommand(context, recipe, finder))
commands.append(SetupAppInfoCommand(context))
if recipe.AppDir.after_runtime:
command = RunScriptCommand(
context, recipe.AppDir.after_runtime, "after runtime script"
)
commands.append(command)
return commands
def _generate_apt_deploy_command(self, context, apt_section):
apt_archs = apt_section.arch()
if isinstance(apt_archs, str):
apt_archs = [apt_archs]
sources = []
keys = []
for item in apt_section.sources():
if "sourceline" in item:
sources.append(item["sourceline"])
if "key_url" in item:
keys.append(item["key_url"])
return AptDeployCommand(
context,
apt_section.include(),
apt_section.exclude() or [],
apt_archs,
sources,
keys,
apt_section.allow_unauthenticated() or False,
)
def _generate_pacman_deploy_command(self, context, pacman_section):
return PacmanDeployCommand(
context,
pacman_section.include(),
pacman_section.exclude(),
pacman_section["Architecture"](),
pacman_section.repositories(),
pacman_section.options(),
)
def _extract_v1_recipe_context(self, args, recipe):
app_dir_path = pathlib.Path(args.appdir).absolute()
build_dir_path = pathlib.Path(args.build_dir).absolute()
app_info_section = recipe.AppDir.app_info
app_info = AppInfo(
app_info_section.id(),
app_info_section.name() or app_info_section.id(),
app_info_section.icon() or "application-vnd.appimage",
app_info_section.version(),
app_info_section.exec(),
app_info_section.exec_args(),
)
bundle_info = BundleInfo(
app_dir=app_dir_path,
app_info=app_info,
update_string=recipe.AppImage["update-information"]() or "guess",
runtime_arch=recipe.AppImage.arch(),
sign_key=recipe.AppImage["sign-key"]() or None,
file_name=recipe.AppImage["file_name"] or None,
)
return Context(
recipe=recipe,
recipe_path=pathlib.Path(args.recipe),
app_info=app_info,
bundle_info=bundle_info,
app_dir=app_dir_path,
build_dir=build_dir_path,
) | en | 0.883429 | # Copyright 2021 <NAME> # # Permission is hereby granted, free of charge, to any person obtaining a # copy of this software and associated documentation files (the "Software"), # to deal in the Software without restriction, including without limitation the # rights to use, copy, modify, merge, publish, distribute, sublicense, and/or # sell copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in # all copies or substantial portions of the Software. Transforms a recipe into a command list | 1.878579 | 2 |
API_Collections/googlemap_geocode.py | Musketeer-Liu/Auto_Coding_Tools_Box | 0 | 8446 | <gh_stars>0
# python3 --> Enter Python Shell
# from geocode import getGeocodeLocation
# getGeocodeLocation("Place you wanto to query")
import httplib2
import json
def getGeocodeLocation(inputString):
google_api_key = "<KEY>"
locatationString = inputString.replace(" ", "+")
url = ('https://maps.googleapis.com/maps/api/geocode/json?address=%s&key=%s'%(locatationString, google_api_key))
h = httplib2.Http()
response, content = h.request(url, 'GET')
result = json.loads(content)
latitude = result['results'][0]['geometry']['location']['lat']
longitude = result['results'][0]['geometry']['location']['lng']
# print(latitude, longitude)
return (latitude, longitude)
# print("response header: %s \n \n" % response)
# return result
# san_francisco = getGeocodeLocation("San Francisco, CA")
# response header: {'content-type': 'application/json; charset=UTF-8', 'date': 'Sat, 27 Jan 2018 06:25:35 GMT', 'expires': 'Sun, 28 Jan 2018 06:25:35 GMT', 'cache-control': 'public, max-age=86400', 'vary': 'Accept-Language', 'access-control-allow-origin': '*', 'server': 'mafe', 'content-length': '1749', 'x-xss-protection': '1; mode=block', 'x-frame-options': 'SAMEORIGIN', 'alt-svc': 'hq=":443"; ma=2592000; quic=51303431; quic=51303339; quic=51303338; quic=51303337; quic=51303335,quic=":443"; ma=2592000; v="41,39,38,37,35"', 'status': '200', '-content-encoding': 'gzip', 'content-location': 'https://maps.googleapis.com/maps/api/geocode/json?address=San+Francisco,+CA&key=<KEY>'}
# san_francisco
# {'results': [{'address_components': [{'long_name': 'San Francisco', 'short_name': 'SF', 'types': ['locality', 'political']}, {'long_name': 'San Francisco County', 'short_name': 'San Francisco County', 'types': ['administrative_area_level_2', 'political']}, {'long_name': 'California', 'short_name': 'CA', 'types': ['administrative_area_level_1', 'political']}, {'long_name': 'United States', 'short_name': 'US', 'types': ['country', 'political']}], 'formatted_address': 'San Francisco, CA, USA', 'geometry': {'bounds': {'northeast': {'lat': 37.9298239, 'lng': -122.28178}, 'southwest': {'lat': 37.6398299, 'lng': -123.173825}}, 'location': {'lat': 37.7749295, 'lng': -122.4194155}, 'location_type': 'APPROXIMATE', 'viewport': {'northeast': {'lat': 37.812,'lng': -122.3482}, 'southwest': {'lat': 37.70339999999999, 'lng': -122.527}}}, 'place_id': 'ChIJIQBpAG2ahYAR_6128GcTUEo', 'types': ['locality', 'political']}], 'status': 'OK'}
# san_francisco.keys()
# dict_keys(['results', 'status'])
# san_francisco['results'][0]['geometry']['location']['lat']
# 37.7749295
# san_francisco['results'][0]['geometry']['location']['lng']
# -122.4194155
| # python3 --> Enter Python Shell
# from geocode import getGeocodeLocation
# getGeocodeLocation("Place you wanto to query")
import httplib2
import json
def getGeocodeLocation(inputString):
google_api_key = "<KEY>"
locatationString = inputString.replace(" ", "+")
url = ('https://maps.googleapis.com/maps/api/geocode/json?address=%s&key=%s'%(locatationString, google_api_key))
h = httplib2.Http()
response, content = h.request(url, 'GET')
result = json.loads(content)
latitude = result['results'][0]['geometry']['location']['lat']
longitude = result['results'][0]['geometry']['location']['lng']
# print(latitude, longitude)
return (latitude, longitude)
# print("response header: %s \n \n" % response)
# return result
# san_francisco = getGeocodeLocation("San Francisco, CA")
# response header: {'content-type': 'application/json; charset=UTF-8', 'date': 'Sat, 27 Jan 2018 06:25:35 GMT', 'expires': 'Sun, 28 Jan 2018 06:25:35 GMT', 'cache-control': 'public, max-age=86400', 'vary': 'Accept-Language', 'access-control-allow-origin': '*', 'server': 'mafe', 'content-length': '1749', 'x-xss-protection': '1; mode=block', 'x-frame-options': 'SAMEORIGIN', 'alt-svc': 'hq=":443"; ma=2592000; quic=51303431; quic=51303339; quic=51303338; quic=51303337; quic=51303335,quic=":443"; ma=2592000; v="41,39,38,37,35"', 'status': '200', '-content-encoding': 'gzip', 'content-location': 'https://maps.googleapis.com/maps/api/geocode/json?address=San+Francisco,+CA&key=<KEY>'}
# san_francisco
# {'results': [{'address_components': [{'long_name': 'San Francisco', 'short_name': 'SF', 'types': ['locality', 'political']}, {'long_name': 'San Francisco County', 'short_name': 'San Francisco County', 'types': ['administrative_area_level_2', 'political']}, {'long_name': 'California', 'short_name': 'CA', 'types': ['administrative_area_level_1', 'political']}, {'long_name': 'United States', 'short_name': 'US', 'types': ['country', 'political']}], 'formatted_address': 'San Francisco, CA, USA', 'geometry': {'bounds': {'northeast': {'lat': 37.9298239, 'lng': -122.28178}, 'southwest': {'lat': 37.6398299, 'lng': -123.173825}}, 'location': {'lat': 37.7749295, 'lng': -122.4194155}, 'location_type': 'APPROXIMATE', 'viewport': {'northeast': {'lat': 37.812,'lng': -122.3482}, 'southwest': {'lat': 37.70339999999999, 'lng': -122.527}}}, 'place_id': 'ChIJIQBpAG2ahYAR_6128GcTUEo', 'types': ['locality', 'political']}], 'status': 'OK'}
# san_francisco.keys()
# dict_keys(['results', 'status'])
# san_francisco['results'][0]['geometry']['location']['lat']
# 37.7749295
# san_francisco['results'][0]['geometry']['location']['lng']
# -122.4194155 | en | 0.213054 | # python3 --> Enter Python Shell # from geocode import getGeocodeLocation # getGeocodeLocation("Place you wanto to query") # print(latitude, longitude) # print("response header: %s \n \n" % response) # return result # san_francisco = getGeocodeLocation("San Francisco, CA") # response header: {'content-type': 'application/json; charset=UTF-8', 'date': 'Sat, 27 Jan 2018 06:25:35 GMT', 'expires': 'Sun, 28 Jan 2018 06:25:35 GMT', 'cache-control': 'public, max-age=86400', 'vary': 'Accept-Language', 'access-control-allow-origin': '*', 'server': 'mafe', 'content-length': '1749', 'x-xss-protection': '1; mode=block', 'x-frame-options': 'SAMEORIGIN', 'alt-svc': 'hq=":443"; ma=2592000; quic=51303431; quic=51303339; quic=51303338; quic=51303337; quic=51303335,quic=":443"; ma=2592000; v="41,39,38,37,35"', 'status': '200', '-content-encoding': 'gzip', 'content-location': 'https://maps.googleapis.com/maps/api/geocode/json?address=San+Francisco,+CA&key=<KEY>'} # san_francisco # {'results': [{'address_components': [{'long_name': 'San Francisco', 'short_name': 'SF', 'types': ['locality', 'political']}, {'long_name': 'San Francisco County', 'short_name': 'San Francisco County', 'types': ['administrative_area_level_2', 'political']}, {'long_name': 'California', 'short_name': 'CA', 'types': ['administrative_area_level_1', 'political']}, {'long_name': 'United States', 'short_name': 'US', 'types': ['country', 'political']}], 'formatted_address': 'San Francisco, CA, USA', 'geometry': {'bounds': {'northeast': {'lat': 37.9298239, 'lng': -122.28178}, 'southwest': {'lat': 37.6398299, 'lng': -123.173825}}, 'location': {'lat': 37.7749295, 'lng': -122.4194155}, 'location_type': 'APPROXIMATE', 'viewport': {'northeast': {'lat': 37.812,'lng': -122.3482}, 'southwest': {'lat': 37.70339999999999, 'lng': -122.527}}}, 'place_id': 'ChIJIQBpAG2ahYAR_6128GcTUEo', 'types': ['locality', 'political']}], 'status': 'OK'} # san_francisco.keys() # dict_keys(['results', 'status']) # san_francisco['results'][0]['geometry']['location']['lat'] # 37.7749295 # san_francisco['results'][0]['geometry']['location']['lng'] # -122.4194155 | 3.370024 | 3 |
backend/core/actions/actionGenerator.py | makakken/roseguarden | 0 | 8447 | <reponame>makakken/roseguarden<gh_stars>0
"""
The roseguarden project
Copyright (C) 2018-2020 <NAME>,
This program is free software: you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software
Foundation, either version 3 of the License, or (at your option) any later
version.
This program is distributed in the hope that it will be useful, but WITHOUT
ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details.
You should have received a copy of the GNU General Public License along with
this program. If not, see <http://www.gnu.org/licenses/>.
"""
__authors__ = ["<NAME>"]
__contact__ = "<EMAIL>"
__credits__ = []
__license__ = "GPLv3"
class BaseAction(object):
action = 'undefined'
target = 'undefined'
source = 'server'
version = '1.0.0'
def __init__(self, ):
print("Instance of BaseAction created")
def execute(self, ):
print("Execute not defined")
@classmethod
def generate(cls, delay=0.0):
action = {}
action['action'] = cls.action
action['target'] = cls.target
action['version'] = cls.version
action['source'] = cls.source
action['delay'] = delay
return action
class BaseNodeAction(object):
action = 'undefined'
version = '1.0.0'
def __init__(self, ):
print("Instance of BaseAction created")
def execute(self, ):
print("Execute not defined")
@classmethod
def generate(cls):
action = {}
action['action'] = cls.action
action['version'] = cls.version
return action
| """
The roseguarden project
Copyright (C) 2018-2020 <NAME>,
This program is free software: you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software
Foundation, either version 3 of the License, or (at your option) any later
version.
This program is distributed in the hope that it will be useful, but WITHOUT
ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details.
You should have received a copy of the GNU General Public License along with
this program. If not, see <http://www.gnu.org/licenses/>.
"""
__authors__ = ["<NAME>"]
__contact__ = "<EMAIL>"
__credits__ = []
__license__ = "GPLv3"
class BaseAction(object):
action = 'undefined'
target = 'undefined'
source = 'server'
version = '1.0.0'
def __init__(self, ):
print("Instance of BaseAction created")
def execute(self, ):
print("Execute not defined")
@classmethod
def generate(cls, delay=0.0):
action = {}
action['action'] = cls.action
action['target'] = cls.target
action['version'] = cls.version
action['source'] = cls.source
action['delay'] = delay
return action
class BaseNodeAction(object):
action = 'undefined'
version = '1.0.0'
def __init__(self, ):
print("Instance of BaseAction created")
def execute(self, ):
print("Execute not defined")
@classmethod
def generate(cls):
action = {}
action['action'] = cls.action
action['version'] = cls.version
return action | en | 0.875064 | The roseguarden project Copyright (C) 2018-2020 <NAME>, This program is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program. If not, see <http://www.gnu.org/licenses/>. | 2.466266 | 2 |
lib/csv/csv.py | arnscott/gcounter | 0 | 8448 | """
MIT License
Copyright (c) 2018 <NAME>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
import datetime
import csv
import os
class CSVReader(object):
"""Wrapper for reading csv files.
Takes just the filepath as an argument.
Use the iterrecords() generator method for large data sets for increased performance.
"""
def __init__(self, file_path, delimiter=','):
self.file_path = file_path
self.delimiter = delimiter
def read_to_list(self):
"""Returns the records in the csv as a list[]
Each record is a dictionary
"""
records = []
with open(self.file_path) as source:
reader = csv.DictReader(source,
delimiter=self.delimiter)
for row in reader:
records.append(row)
return records
def read_to_dict(self, key_field):
"""Returns the records in the csv as a dictionary.
The key value is specified by the key_field argument for each record
"""
records = {}
with open(self.file_path) as source:
reader = csv.DictReader(source,
delimiter=self.delimiter)
self.headers = reader.fieldnames
if key_field in self.headers:
for row in reader:
if not row[key_field] in records:
records[row[key_field]] = row
else:
raise Exception('The key provided does not have unique values.')
else:
raise KeyError('The key provided does not exist')
return records
def iterrecords(self):
"""Generator method that provides a more efficient way to iterate records.
for record in instance.iterrecords():
print(record)
"""
records = []
with open(self.file_path) as source:
reader = csv.DictReader(source,
delimiter=self.delimiter)
for row in reader:
yield row
class CSVWriter(object):
"""Wrapper for writing csv files.
takes the file path and a list of headers as arguments
"""
def __init__(self, file_path, headers):
self.headers = headers
self.file_path = file_path
def write_from_list(self, records=[]):
"""Writes the csv to the indicated file_path
taking a list[] of records as the argument
where each record is a dictionary.
Only the fields in self.headers will be written to the csv.
But extra fields can be passed, they will just be skipped over.
"""
if isinstance(records, list):
with open(self.file_path, 'w') as csvfile:
writer = csv.DictWriter(csvfile,
fieldnames=self.headers)
writer.writeheader()
for record in records:
if isinstance(record, dict):
row = {field: record[field] for field in self.headers}
writer.writerow(row)
else:
raise Exception('Items in list must be of type dict')
else:
raise Exception('Must pass a list object as the records list')
return self.file_path
def write_from_dict(self, records={}):
"""Writes the csv to the indicated file_path
taking a dict{} of records as the argument
where each item in the dict{} is also a dict{}
"""
with open(self.file_path, 'w') as csvfile:
writer = csv.DictWriter(csvfile,
fieldnames=self.headers)
writer.writeheader()
for key, record in records.items():
row = {field: record[field] for field in self.headers}
writer.writerow(row)
return self.file_path
def reader(file_path='', delimiter=','):
"""Returns a CSVReader object
"""
if os.path.isfile(file_path):
if os.access(file_path, os.R_OK):
return CSVReader(file_path, delimiter=delimiter)
else:
raise Exception('{fname} exists but is not readable.'.format(fname=file_path))
else:
raise Exception('{fname} does not exist'.format(fname=file_path))
def writer(file_path='', headers=[]):
"""Returns a CSVWriter object
"""
if not os.path.isfile(file_path):
if isinstance(headers, list):
return CSVWriter(file_path=file_path, headers=headers)
else:
raise Exception('Headers need to be in a list object.')
else:
raise Exception('{fname} is already a file. Please write to a new location.'.format(fname=file_path))
def the_date():
return datetime.date.today().strftime('%m_%d_%Y')
| """
MIT License
Copyright (c) 2018 <NAME>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
import datetime
import csv
import os
class CSVReader(object):
"""Wrapper for reading csv files.
Takes just the filepath as an argument.
Use the iterrecords() generator method for large data sets for increased performance.
"""
def __init__(self, file_path, delimiter=','):
self.file_path = file_path
self.delimiter = delimiter
def read_to_list(self):
"""Returns the records in the csv as a list[]
Each record is a dictionary
"""
records = []
with open(self.file_path) as source:
reader = csv.DictReader(source,
delimiter=self.delimiter)
for row in reader:
records.append(row)
return records
def read_to_dict(self, key_field):
"""Returns the records in the csv as a dictionary.
The key value is specified by the key_field argument for each record
"""
records = {}
with open(self.file_path) as source:
reader = csv.DictReader(source,
delimiter=self.delimiter)
self.headers = reader.fieldnames
if key_field in self.headers:
for row in reader:
if not row[key_field] in records:
records[row[key_field]] = row
else:
raise Exception('The key provided does not have unique values.')
else:
raise KeyError('The key provided does not exist')
return records
def iterrecords(self):
"""Generator method that provides a more efficient way to iterate records.
for record in instance.iterrecords():
print(record)
"""
records = []
with open(self.file_path) as source:
reader = csv.DictReader(source,
delimiter=self.delimiter)
for row in reader:
yield row
class CSVWriter(object):
"""Wrapper for writing csv files.
takes the file path and a list of headers as arguments
"""
def __init__(self, file_path, headers):
self.headers = headers
self.file_path = file_path
def write_from_list(self, records=[]):
"""Writes the csv to the indicated file_path
taking a list[] of records as the argument
where each record is a dictionary.
Only the fields in self.headers will be written to the csv.
But extra fields can be passed, they will just be skipped over.
"""
if isinstance(records, list):
with open(self.file_path, 'w') as csvfile:
writer = csv.DictWriter(csvfile,
fieldnames=self.headers)
writer.writeheader()
for record in records:
if isinstance(record, dict):
row = {field: record[field] for field in self.headers}
writer.writerow(row)
else:
raise Exception('Items in list must be of type dict')
else:
raise Exception('Must pass a list object as the records list')
return self.file_path
def write_from_dict(self, records={}):
"""Writes the csv to the indicated file_path
taking a dict{} of records as the argument
where each item in the dict{} is also a dict{}
"""
with open(self.file_path, 'w') as csvfile:
writer = csv.DictWriter(csvfile,
fieldnames=self.headers)
writer.writeheader()
for key, record in records.items():
row = {field: record[field] for field in self.headers}
writer.writerow(row)
return self.file_path
def reader(file_path='', delimiter=','):
"""Returns a CSVReader object
"""
if os.path.isfile(file_path):
if os.access(file_path, os.R_OK):
return CSVReader(file_path, delimiter=delimiter)
else:
raise Exception('{fname} exists but is not readable.'.format(fname=file_path))
else:
raise Exception('{fname} does not exist'.format(fname=file_path))
def writer(file_path='', headers=[]):
"""Returns a CSVWriter object
"""
if not os.path.isfile(file_path):
if isinstance(headers, list):
return CSVWriter(file_path=file_path, headers=headers)
else:
raise Exception('Headers need to be in a list object.')
else:
raise Exception('{fname} is already a file. Please write to a new location.'.format(fname=file_path))
def the_date():
return datetime.date.today().strftime('%m_%d_%Y')
| en | 0.81453 | MIT License Copyright (c) 2018 <NAME> Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. Wrapper for reading csv files. Takes just the filepath as an argument. Use the iterrecords() generator method for large data sets for increased performance. Returns the records in the csv as a list[] Each record is a dictionary Returns the records in the csv as a dictionary. The key value is specified by the key_field argument for each record Generator method that provides a more efficient way to iterate records. for record in instance.iterrecords(): print(record) Wrapper for writing csv files. takes the file path and a list of headers as arguments Writes the csv to the indicated file_path taking a list[] of records as the argument where each record is a dictionary. Only the fields in self.headers will be written to the csv. But extra fields can be passed, they will just be skipped over. Writes the csv to the indicated file_path taking a dict{} of records as the argument where each item in the dict{} is also a dict{} Returns a CSVReader object Returns a CSVWriter object | 2.534572 | 3 |
Module01/LearningQGIS_ThirdEdition_Code/Chapter6_code/export_map.py | karant17/Test | 7 | 8449 | from PyQt4.QtGui import QImage, QPainter
from PyQt4.QtCore import QSize
# configure the output image
width = 800
height = 600
dpi = 92
img = QImage(QSize(width, height), QImage.Format_RGB32)
img.setDotsPerMeterX(dpi / 25.4 * 1000)
img.setDotsPerMeterY(dpi / 25.4 * 1000)
# get the map layers and extent
layers = [ layer.id() for layer in iface.legendInterface().layers() ]
extent = iface.mapCanvas().extent()
# configure map settings for export
mapSettings = QgsMapSettings()
mapSettings.setMapUnits(0)
mapSettings.setExtent(extent)
mapSettings.setOutputDpi(dpi)
mapSettings.setOutputSize(QSize(width, height))
mapSettings.setLayers(layers)
mapSettings.setFlags(QgsMapSettings.Antialiasing | QgsMapSettings.UseAdvancedEffects | QgsMapSettings.ForceVectorOutput | QgsMapSettings.DrawLabeling)
# configure and run painter
p = QPainter()
p.begin(img)
mapRenderer = QgsMapRendererCustomPainterJob(mapSettings, p)
mapRenderer.start()
mapRenderer.waitForFinished()
p.end()
# save the result
img.save("C:/temp/custom_export.png","png") | from PyQt4.QtGui import QImage, QPainter
from PyQt4.QtCore import QSize
# configure the output image
width = 800
height = 600
dpi = 92
img = QImage(QSize(width, height), QImage.Format_RGB32)
img.setDotsPerMeterX(dpi / 25.4 * 1000)
img.setDotsPerMeterY(dpi / 25.4 * 1000)
# get the map layers and extent
layers = [ layer.id() for layer in iface.legendInterface().layers() ]
extent = iface.mapCanvas().extent()
# configure map settings for export
mapSettings = QgsMapSettings()
mapSettings.setMapUnits(0)
mapSettings.setExtent(extent)
mapSettings.setOutputDpi(dpi)
mapSettings.setOutputSize(QSize(width, height))
mapSettings.setLayers(layers)
mapSettings.setFlags(QgsMapSettings.Antialiasing | QgsMapSettings.UseAdvancedEffects | QgsMapSettings.ForceVectorOutput | QgsMapSettings.DrawLabeling)
# configure and run painter
p = QPainter()
p.begin(img)
mapRenderer = QgsMapRendererCustomPainterJob(mapSettings, p)
mapRenderer.start()
mapRenderer.waitForFinished()
p.end()
# save the result
img.save("C:/temp/custom_export.png","png") | en | 0.751412 | # configure the output image # get the map layers and extent # configure map settings for export # configure and run painter # save the result | 2.490481 | 2 |
tools/generate_cropped_dataset.py | DIVA-DIA/DIVA-DAF | 3 | 8450 | <reponame>DIVA-DIA/DIVA-DAF
"""
Load a dataset of historic documents by specifying the folder where its located.
"""
import argparse
# Utils
import itertools
import logging
import math
from datetime import datetime
from pathlib import Path
from torchvision.datasets.folder import has_file_allowed_extension, pil_loader
from torchvision.transforms import functional as F
from tqdm import tqdm
IMG_EXTENSIONS = ('.jpg', '.jpeg', '.png', '.ppm', '.bmp', '.pgm', '.gif')
JPG_EXTENSIONS = ('.jpg', '.jpeg')
def get_img_paths_uncropped(directory):
"""
Parameters
----------
directory: string
parent directory with images inside
Returns
-------
paths: list of paths
"""
paths = []
directory = Path(directory).expanduser()
if not directory.is_dir():
logging.error(f'Directory not found ({directory})')
for subdir in sorted(directory.iterdir()):
if not subdir.is_dir():
continue
for img_name in sorted(subdir.iterdir()):
if has_file_allowed_extension(str(img_name), IMG_EXTENSIONS):
paths.append((subdir / img_name, str(subdir.stem)))
return paths
class ImageCrop(object):
"""
Crop the data and ground truth image at the specified coordinates to the specified size and convert
them to a tensor.
"""
def __init__(self, crop_size):
self.crop_size = crop_size
def __call__(self, img, coordinates):
"""
Args:
img (PIL Image): Data image to be cropped and converted to tensor.
gt (PIL Image): Ground truth image to be cropped and converted to tensor.
Returns:
Data tensor, gt tensor (tuple of tensors): cropped and converted images
"""
x_position = coordinates[0]
y_position = coordinates[1]
img_crop = F.to_tensor(
F.crop(img=img, left=x_position, top=y_position, width=self.crop_size, height=self.crop_size))
return img_crop
class CroppedDatasetGenerator:
def __init__(self, input_path: Path, output_path, crop_size_train, crop_size_val, crop_size_test, overlap=0.5,
leading_zeros_length=4, override_existing=False):
# Init list
self.input_path = input_path
self.output_path = output_path
self.crop_size_train = crop_size_train
self.crop_size_val = crop_size_val
self.crop_size_test = crop_size_test
self.overlap = overlap
self.leading_zeros_length = leading_zeros_length
self.override_existing = override_existing
self.generator_train = CropGenerator(input_path=input_path / 'train',
output_path=output_path / 'train',
crop_size=crop_size_train,
overlap=overlap,
leading_zeros_length=leading_zeros_length,
override_existing=override_existing,
progress_title='Cropping "train"')
self.generator_val = CropGenerator(input_path=input_path / 'val',
output_path=output_path / 'val',
crop_size=crop_size_val,
overlap=overlap,
leading_zeros_length=leading_zeros_length,
override_existing=override_existing,
progress_title='Cropping "val"')
self.generator_test = CropGenerator(input_path=input_path / 'test',
output_path=output_path / 'test',
crop_size=crop_size_test,
overlap=overlap,
leading_zeros_length=leading_zeros_length,
override_existing=override_existing,
progress_title='Cropping "test"')
def write_crops(self):
info_list = ['Running CroppedDatasetGenerator.write_crops():',
f'- full_command:',
f'python tools/generate_cropped_dataset.py -i {self.input_path} -o {self.output_path} '
f'-tr {self.crop_size_train} -v {self.crop_size_val} -te {self.crop_size_test} -ov {self.overlap} '
f'-l {self.leading_zeros_length}',
f'',
f'- start_time: \t{datetime.now():%Y-%m-%d_%H-%M-%S}',
f'- input_path: \t{self.input_path}',
f'- output_path: \t{self.output_path}',
f'- crop_size_train: \t{self.crop_size_train}',
f'- crop_size_val: \t{self.crop_size_val}',
f'- crop_size_test: \t{self.crop_size_test}',
f'- overlap: \t{self.overlap}',
f'- leading_zeros_len:\t{self.leading_zeros_length}',
f'- override_existing:\t{self.override_existing}',
''] # empty string to get linebreak at the end when using join
info_str = '\n'.join(info_list)
print(info_str)
# Write info_cropped_dataset.txt
self.output_path.mkdir(parents=True, exist_ok=True)
info_file = self.output_path / 'info_cropped_dataset.txt'
with info_file.open('a') as f:
f.write(info_str)
print(f'Start cropping:')
self.generator_train.write_crops()
self.generator_val.write_crops()
self.generator_test.write_crops()
with info_file.open('a') as f:
f.write(f'- end_time: \t{datetime.now():%Y-%m-%d_%H-%M-%S}\n\n')
class CropGenerator:
def __init__(self, input_path, output_path, crop_size, overlap=0.5, leading_zeros_length=4,
override_existing=False, progress_title=''):
# Init list
self.input_path = input_path
self.output_path = output_path
self.crop_size = crop_size
self.overlap = overlap
self.leading_zeros_length = leading_zeros_length
self.override_existing = override_existing
self.progress_title = progress_title
self.step_size = int(self.crop_size * (1 - self.overlap))
# List of tuples that contain the path to the gt and image that belong together
self.img_paths = get_img_paths_uncropped(input_path)
self.num_imgs_in_set = len(self.img_paths)
if self.num_imgs_in_set == 0:
raise RuntimeError("Found 0 images in subfolders of: {} \n Supported image extensions are: {}".format(
input_path, ",".join(IMG_EXTENSIONS)))
self.current_split = ''
self.current_img_index = -1
self.img_names_sizes, self.num_horiz_crops, self.num_vert_crops = self._get_img_size_and_crop_numbers()
self.crop_list = self._get_crop_list()
def write_crops(self):
crop_function = ImageCrop(self.crop_size)
for img_index, x, y in tqdm(self.crop_list, desc=self.progress_title):
self._load_image(img_index=img_index)
coordinates = (x, y)
split_name = self.img_names_sizes[img_index][0]
img_full_name = self.img_names_sizes[img_index][1]
img_full_name = Path(img_full_name)
img_name = img_full_name.stem
dest_folder = self.output_path / split_name / img_name
dest_folder.mkdir(parents=True, exist_ok=True)
extension = img_full_name.suffix
filename = f'{img_name}_x{x:0{self.leading_zeros_length}d}_y{y:0{self.leading_zeros_length}d}{extension}'
dest_filename = dest_folder / filename
if not self.override_existing:
if dest_filename.exists():
continue
img = self.get_crop(self.current_img, coordinates=coordinates, crop_function=crop_function)
pil_img = F.to_pil_image(img, mode='RGB')
if extension in JPG_EXTENSIONS:
pil_img.save(dest_filename, quality=95)
else:
# save_image(img, dest_filename)
pil_img.save(dest_filename)
def _load_image(self, img_index):
"""
Inits the variables responsible of tracking which crop should be taken next, the current images and the like.
This should be run every time a new page gets loaded for the test-set
"""
if self.current_img_index == img_index:
return
# Load image
self.current_img = pil_loader(self.img_paths[img_index][0])
# Update pointer to current image
self.current_img_index = img_index
self.current_split = self.img_paths[img_index][1]
def get_crop(self, img, coordinates, crop_function):
img = crop_function(img, coordinates)
return img
def _get_img_size_and_crop_numbers(self):
img_names_sizes = [] # list of tuples -> (split_name, img_name, img_size (H, W))
num_horiz_crops = []
num_vert_crops = []
for img_path, split_name in self.img_paths:
data_img = pil_loader(img_path)
img_names_sizes.append((split_name, img_path.name, data_img.size))
num_horiz_crops.append(math.ceil((data_img.size[0] - self.crop_size) / self.step_size + 1))
num_vert_crops.append(math.ceil((data_img.size[1] - self.crop_size) / self.step_size + 1))
return img_names_sizes, num_horiz_crops, num_vert_crops
def _get_crop_list(self):
return [self._convert_crop_id_to_coordinates(img_index, hcrop_index, vcrop_index) for img_index in
range(self.num_imgs_in_set) for hcrop_index, vcrop_index in
itertools.product(range(self.num_horiz_crops[img_index]),
range(self.num_vert_crops[img_index]))]
def _convert_crop_id_to_coordinates(self, img_index, hcrop_index, vcrop_index):
# X coordinate
if hcrop_index == self.num_horiz_crops[img_index] - 1:
# We are at the end of a line
x_position = self.img_names_sizes[img_index][2][0] - self.crop_size
else:
x_position = self.step_size * hcrop_index
assert x_position < self.img_names_sizes[img_index][2][0] - self.crop_size
# Y coordinate
if vcrop_index == self.num_vert_crops[img_index] - 1:
# We are at the bottom end
y_position = self.img_names_sizes[img_index][2][1] - self.crop_size
else:
y_position = self.step_size * vcrop_index
assert y_position < self.img_names_sizes[img_index][2][1] - self.crop_size
return img_index, x_position, y_position
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('-i', '--input_path',
help='Path to the root folder of the dataset (contains train/val/test)',
type=Path,
required=True)
parser.add_argument('-o', '--output_path',
help='Path to the output folder',
type=Path,
required=True)
parser.add_argument('-tr', '--crop_size_train',
help='Size of the crops in the training set',
type=int,
required=True)
parser.add_argument('-v', '--crop_size_val',
help='Size of the crops in the validation set',
type=int,
required=True)
parser.add_argument('-te', '--crop_size_test',
help='Size of the crops in the test set',
type=int,
required=True)
parser.add_argument('-ov', '--overlap',
help='Overlap of the different crops (between 0-1)',
type=float,
default=0.5)
parser.add_argument('-l', '--leading_zeros_length',
help='amount of leading zeros to encode the coordinates',
type=int,
default=4)
parser.add_argument('-oe', '--override_existing',
help='If true overrides the images ',
type=bool,
default=False)
args = parser.parse_args()
dataset_generator = CroppedDatasetGenerator(**args.__dict__)
dataset_generator.write_crops()
# example call arguments
# -i
# /Users/voegtlil/Documents/04_Datasets/003-DataSet/CB55-10-segmentation
# -o
# /Users/voegtlil/Desktop/fun
# -tr
# 300
# -v
# 300
# -te
# 256
# example call arguments
# -i
# /dataset/DIVA-HisDB/segmentation/CB55
# -o
# /net/research-hisdoc/datasets/semantic_segmentation/datasets_cropped/temp-CB55
# -tr
# 300
# -v
# 300
# -te
# 256
# dataset_generator = CroppedDatasetGenerator(
# input_path=Path('/dataset/DIVA-HisDB/segmentation/CB55'),
# output_path=Path('/net/research-hisdoc/datasets/semantic_segmentation/datasets_cropped/CB55'),
# crop_size_train=300,
# crop_size_val=300,
# crop_size_test=256,
# overlap=0.5,
# leading_zeros_length=4,
# override_existing=False)
# dataset_generator.write_crops()
| """
Load a dataset of historic documents by specifying the folder where its located.
"""
import argparse
# Utils
import itertools
import logging
import math
from datetime import datetime
from pathlib import Path
from torchvision.datasets.folder import has_file_allowed_extension, pil_loader
from torchvision.transforms import functional as F
from tqdm import tqdm
IMG_EXTENSIONS = ('.jpg', '.jpeg', '.png', '.ppm', '.bmp', '.pgm', '.gif')
JPG_EXTENSIONS = ('.jpg', '.jpeg')
def get_img_paths_uncropped(directory):
"""
Parameters
----------
directory: string
parent directory with images inside
Returns
-------
paths: list of paths
"""
paths = []
directory = Path(directory).expanduser()
if not directory.is_dir():
logging.error(f'Directory not found ({directory})')
for subdir in sorted(directory.iterdir()):
if not subdir.is_dir():
continue
for img_name in sorted(subdir.iterdir()):
if has_file_allowed_extension(str(img_name), IMG_EXTENSIONS):
paths.append((subdir / img_name, str(subdir.stem)))
return paths
class ImageCrop(object):
"""
Crop the data and ground truth image at the specified coordinates to the specified size and convert
them to a tensor.
"""
def __init__(self, crop_size):
self.crop_size = crop_size
def __call__(self, img, coordinates):
"""
Args:
img (PIL Image): Data image to be cropped and converted to tensor.
gt (PIL Image): Ground truth image to be cropped and converted to tensor.
Returns:
Data tensor, gt tensor (tuple of tensors): cropped and converted images
"""
x_position = coordinates[0]
y_position = coordinates[1]
img_crop = F.to_tensor(
F.crop(img=img, left=x_position, top=y_position, width=self.crop_size, height=self.crop_size))
return img_crop
class CroppedDatasetGenerator:
def __init__(self, input_path: Path, output_path, crop_size_train, crop_size_val, crop_size_test, overlap=0.5,
leading_zeros_length=4, override_existing=False):
# Init list
self.input_path = input_path
self.output_path = output_path
self.crop_size_train = crop_size_train
self.crop_size_val = crop_size_val
self.crop_size_test = crop_size_test
self.overlap = overlap
self.leading_zeros_length = leading_zeros_length
self.override_existing = override_existing
self.generator_train = CropGenerator(input_path=input_path / 'train',
output_path=output_path / 'train',
crop_size=crop_size_train,
overlap=overlap,
leading_zeros_length=leading_zeros_length,
override_existing=override_existing,
progress_title='Cropping "train"')
self.generator_val = CropGenerator(input_path=input_path / 'val',
output_path=output_path / 'val',
crop_size=crop_size_val,
overlap=overlap,
leading_zeros_length=leading_zeros_length,
override_existing=override_existing,
progress_title='Cropping "val"')
self.generator_test = CropGenerator(input_path=input_path / 'test',
output_path=output_path / 'test',
crop_size=crop_size_test,
overlap=overlap,
leading_zeros_length=leading_zeros_length,
override_existing=override_existing,
progress_title='Cropping "test"')
def write_crops(self):
info_list = ['Running CroppedDatasetGenerator.write_crops():',
f'- full_command:',
f'python tools/generate_cropped_dataset.py -i {self.input_path} -o {self.output_path} '
f'-tr {self.crop_size_train} -v {self.crop_size_val} -te {self.crop_size_test} -ov {self.overlap} '
f'-l {self.leading_zeros_length}',
f'',
f'- start_time: \t{datetime.now():%Y-%m-%d_%H-%M-%S}',
f'- input_path: \t{self.input_path}',
f'- output_path: \t{self.output_path}',
f'- crop_size_train: \t{self.crop_size_train}',
f'- crop_size_val: \t{self.crop_size_val}',
f'- crop_size_test: \t{self.crop_size_test}',
f'- overlap: \t{self.overlap}',
f'- leading_zeros_len:\t{self.leading_zeros_length}',
f'- override_existing:\t{self.override_existing}',
''] # empty string to get linebreak at the end when using join
info_str = '\n'.join(info_list)
print(info_str)
# Write info_cropped_dataset.txt
self.output_path.mkdir(parents=True, exist_ok=True)
info_file = self.output_path / 'info_cropped_dataset.txt'
with info_file.open('a') as f:
f.write(info_str)
print(f'Start cropping:')
self.generator_train.write_crops()
self.generator_val.write_crops()
self.generator_test.write_crops()
with info_file.open('a') as f:
f.write(f'- end_time: \t{datetime.now():%Y-%m-%d_%H-%M-%S}\n\n')
class CropGenerator:
def __init__(self, input_path, output_path, crop_size, overlap=0.5, leading_zeros_length=4,
override_existing=False, progress_title=''):
# Init list
self.input_path = input_path
self.output_path = output_path
self.crop_size = crop_size
self.overlap = overlap
self.leading_zeros_length = leading_zeros_length
self.override_existing = override_existing
self.progress_title = progress_title
self.step_size = int(self.crop_size * (1 - self.overlap))
# List of tuples that contain the path to the gt and image that belong together
self.img_paths = get_img_paths_uncropped(input_path)
self.num_imgs_in_set = len(self.img_paths)
if self.num_imgs_in_set == 0:
raise RuntimeError("Found 0 images in subfolders of: {} \n Supported image extensions are: {}".format(
input_path, ",".join(IMG_EXTENSIONS)))
self.current_split = ''
self.current_img_index = -1
self.img_names_sizes, self.num_horiz_crops, self.num_vert_crops = self._get_img_size_and_crop_numbers()
self.crop_list = self._get_crop_list()
def write_crops(self):
crop_function = ImageCrop(self.crop_size)
for img_index, x, y in tqdm(self.crop_list, desc=self.progress_title):
self._load_image(img_index=img_index)
coordinates = (x, y)
split_name = self.img_names_sizes[img_index][0]
img_full_name = self.img_names_sizes[img_index][1]
img_full_name = Path(img_full_name)
img_name = img_full_name.stem
dest_folder = self.output_path / split_name / img_name
dest_folder.mkdir(parents=True, exist_ok=True)
extension = img_full_name.suffix
filename = f'{img_name}_x{x:0{self.leading_zeros_length}d}_y{y:0{self.leading_zeros_length}d}{extension}'
dest_filename = dest_folder / filename
if not self.override_existing:
if dest_filename.exists():
continue
img = self.get_crop(self.current_img, coordinates=coordinates, crop_function=crop_function)
pil_img = F.to_pil_image(img, mode='RGB')
if extension in JPG_EXTENSIONS:
pil_img.save(dest_filename, quality=95)
else:
# save_image(img, dest_filename)
pil_img.save(dest_filename)
def _load_image(self, img_index):
"""
Inits the variables responsible of tracking which crop should be taken next, the current images and the like.
This should be run every time a new page gets loaded for the test-set
"""
if self.current_img_index == img_index:
return
# Load image
self.current_img = pil_loader(self.img_paths[img_index][0])
# Update pointer to current image
self.current_img_index = img_index
self.current_split = self.img_paths[img_index][1]
def get_crop(self, img, coordinates, crop_function):
img = crop_function(img, coordinates)
return img
def _get_img_size_and_crop_numbers(self):
img_names_sizes = [] # list of tuples -> (split_name, img_name, img_size (H, W))
num_horiz_crops = []
num_vert_crops = []
for img_path, split_name in self.img_paths:
data_img = pil_loader(img_path)
img_names_sizes.append((split_name, img_path.name, data_img.size))
num_horiz_crops.append(math.ceil((data_img.size[0] - self.crop_size) / self.step_size + 1))
num_vert_crops.append(math.ceil((data_img.size[1] - self.crop_size) / self.step_size + 1))
return img_names_sizes, num_horiz_crops, num_vert_crops
def _get_crop_list(self):
return [self._convert_crop_id_to_coordinates(img_index, hcrop_index, vcrop_index) for img_index in
range(self.num_imgs_in_set) for hcrop_index, vcrop_index in
itertools.product(range(self.num_horiz_crops[img_index]),
range(self.num_vert_crops[img_index]))]
def _convert_crop_id_to_coordinates(self, img_index, hcrop_index, vcrop_index):
# X coordinate
if hcrop_index == self.num_horiz_crops[img_index] - 1:
# We are at the end of a line
x_position = self.img_names_sizes[img_index][2][0] - self.crop_size
else:
x_position = self.step_size * hcrop_index
assert x_position < self.img_names_sizes[img_index][2][0] - self.crop_size
# Y coordinate
if vcrop_index == self.num_vert_crops[img_index] - 1:
# We are at the bottom end
y_position = self.img_names_sizes[img_index][2][1] - self.crop_size
else:
y_position = self.step_size * vcrop_index
assert y_position < self.img_names_sizes[img_index][2][1] - self.crop_size
return img_index, x_position, y_position
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('-i', '--input_path',
help='Path to the root folder of the dataset (contains train/val/test)',
type=Path,
required=True)
parser.add_argument('-o', '--output_path',
help='Path to the output folder',
type=Path,
required=True)
parser.add_argument('-tr', '--crop_size_train',
help='Size of the crops in the training set',
type=int,
required=True)
parser.add_argument('-v', '--crop_size_val',
help='Size of the crops in the validation set',
type=int,
required=True)
parser.add_argument('-te', '--crop_size_test',
help='Size of the crops in the test set',
type=int,
required=True)
parser.add_argument('-ov', '--overlap',
help='Overlap of the different crops (between 0-1)',
type=float,
default=0.5)
parser.add_argument('-l', '--leading_zeros_length',
help='amount of leading zeros to encode the coordinates',
type=int,
default=4)
parser.add_argument('-oe', '--override_existing',
help='If true overrides the images ',
type=bool,
default=False)
args = parser.parse_args()
dataset_generator = CroppedDatasetGenerator(**args.__dict__)
dataset_generator.write_crops()
# example call arguments
# -i
# /Users/voegtlil/Documents/04_Datasets/003-DataSet/CB55-10-segmentation
# -o
# /Users/voegtlil/Desktop/fun
# -tr
# 300
# -v
# 300
# -te
# 256
# example call arguments
# -i
# /dataset/DIVA-HisDB/segmentation/CB55
# -o
# /net/research-hisdoc/datasets/semantic_segmentation/datasets_cropped/temp-CB55
# -tr
# 300
# -v
# 300
# -te
# 256
# dataset_generator = CroppedDatasetGenerator(
# input_path=Path('/dataset/DIVA-HisDB/segmentation/CB55'),
# output_path=Path('/net/research-hisdoc/datasets/semantic_segmentation/datasets_cropped/CB55'),
# crop_size_train=300,
# crop_size_val=300,
# crop_size_test=256,
# overlap=0.5,
# leading_zeros_length=4,
# override_existing=False)
# dataset_generator.write_crops() | en | 0.642525 | Load a dataset of historic documents by specifying the folder where its located. # Utils Parameters ---------- directory: string parent directory with images inside Returns ------- paths: list of paths Crop the data and ground truth image at the specified coordinates to the specified size and convert them to a tensor. Args: img (PIL Image): Data image to be cropped and converted to tensor. gt (PIL Image): Ground truth image to be cropped and converted to tensor. Returns: Data tensor, gt tensor (tuple of tensors): cropped and converted images # Init list # empty string to get linebreak at the end when using join # Write info_cropped_dataset.txt # Init list # List of tuples that contain the path to the gt and image that belong together # save_image(img, dest_filename) Inits the variables responsible of tracking which crop should be taken next, the current images and the like. This should be run every time a new page gets loaded for the test-set # Load image # Update pointer to current image # list of tuples -> (split_name, img_name, img_size (H, W)) # X coordinate # We are at the end of a line # Y coordinate # We are at the bottom end # example call arguments # -i # /Users/voegtlil/Documents/04_Datasets/003-DataSet/CB55-10-segmentation # -o # /Users/voegtlil/Desktop/fun # -tr # 300 # -v # 300 # -te # 256 # example call arguments # -i # /dataset/DIVA-HisDB/segmentation/CB55 # -o # /net/research-hisdoc/datasets/semantic_segmentation/datasets_cropped/temp-CB55 # -tr # 300 # -v # 300 # -te # 256 # dataset_generator = CroppedDatasetGenerator( # input_path=Path('/dataset/DIVA-HisDB/segmentation/CB55'), # output_path=Path('/net/research-hisdoc/datasets/semantic_segmentation/datasets_cropped/CB55'), # crop_size_train=300, # crop_size_val=300, # crop_size_test=256, # overlap=0.5, # leading_zeros_length=4, # override_existing=False) # dataset_generator.write_crops() | 2.898984 | 3 |
run.py | seanzhangJM/torch_model_demo | 0 | 8451 | <gh_stars>0
#!/usr/bin/env python
# _*_ coding: utf-8 _*_
# @Time : 2021/12/27 14:04
# @Author : zhangjianming
# @Email : <EMAIL>
# @File : run_task.py
# @Software: PyCharm
import sys
sys.path.extend(["."])
from torch_model_demo.task.run_task import train_fashion_demo
if __name__ == '__main__':
train_fashion_demo()
| #!/usr/bin/env python
# _*_ coding: utf-8 _*_
# @Time : 2021/12/27 14:04
# @Author : zhangjianming
# @Email : <EMAIL>
# @File : run_task.py
# @Software: PyCharm
import sys
sys.path.extend(["."])
from torch_model_demo.task.run_task import train_fashion_demo
if __name__ == '__main__':
train_fashion_demo() | en | 0.157493 | #!/usr/bin/env python # _*_ coding: utf-8 _*_ # @Time : 2021/12/27 14:04 # @Author : zhangjianming # @Email : <EMAIL> # @File : run_task.py # @Software: PyCharm | 1.759812 | 2 |
practice/4_tracking/tracker.py | OrangeRedeng/CV-SUMMER-CAMP-2021 | 13 | 8452 | <gh_stars>10-100
import numpy as np
import math
import logging as log
import sys
from tqdm import tqdm
from common.feature_distance import calc_features_similarity
from common.common_objects import DetectedObject, validate_detected_object, Bbox
from common.common_objects import get_bbox_center, get_dist, calc_bbox_area
from common.find_best_assignment import solve_assignment_problem
from common.annotation import AnnotationObject, AnnotationStorage
class Track:
__next_track_id = 0
def __init__(self, first_obj):
self.objects = []
self._track_id = Track.__next_track_id
Track.__next_track_id += 1
self.objects.append(first_obj)
def _validate(self):
assert len(self.objects) > 0
for o in self.objects:
validate_detected_object(o)
for i in range(len(self.objects) - 1):
self.objects[i].frame_index < self.objects[i+1].frame_index
def add_object(self, o):
self._validate()
validate_detected_object(o)
last_frame_index = self.objects[-1].frame_index
if not last_frame_index < o.frame_index:
raise RuntimeError("Add object={} to track with the last_frame_index={}".format(o, last_frame_index))
self.objects.append(o)
def last(self):
return self.objects[-1]
def get_id(self):
return self._track_id
def get_bbox_for_frame(self, cur_frame_ind):
"""Finds bbox for frame index using linear approximation"""
self._validate()
i_found = None
for i, o in enumerate(self.objects):
if o.frame_index == cur_frame_ind:
return o.bbox
if o.frame_index > cur_frame_ind:
i_found = i
break
if i_found is None: # cur_frame_ind after the last frame_index in track
return None
if i_found == 0: # cur_frame_ind before the first frame_index in track
return None
log.debug("using linear approximation for track id={}, frame_index={}".format(self._track_id, cur_frame_ind))
o1 = self.objects[i_found-1]
o2 = self.objects[i_found]
assert o1.frame_index < cur_frame_ind < o2.frame_index
dindex = o2.frame_index - o1.frame_index
d_cur_index1 = cur_frame_ind - o1.frame_index
d_cur_index2 = o2.frame_index - cur_frame_ind
bbox1 = o1.bbox
bbox2 = o2.bbox
res_bbox = [None, None, None, None]
for k in range(4):
# linear approximation for all bbox fields
res_bbox[k] = (bbox1[k] * d_cur_index2 + bbox2[k] * d_cur_index1) / dindex
res_bbox = Bbox(res_bbox[0], res_bbox[1], res_bbox[2], res_bbox[3])
return res_bbox
class Tracker:
def __init__(self, num_frames_to_remove_track, num_objects_to_make_track_valid, affinity_threshold):
self.tracks = []
self.track_archive = []
self.num_frames_to_remove_track = num_frames_to_remove_track
self.num_objects_to_make_track_valid = num_objects_to_make_track_valid
self.affinity_threshold = affinity_threshold
def add_objects(self, det_objs):
log.debug("begin: handling {} objects".format(len(det_objs)))
if len(det_objs) == 0:
return
frame_index = det_objs[0].frame_index
assert all(o.frame_index == frame_index for o in det_objs), "All det_objs should have the same frame_index"
affinity_matrix = self._build_affinity_matrix(det_objs)
self._validate_affinity_matrix(affinity_matrix, len(self.tracks), len(det_objs))
self._log_affinity_matrix(affinity_matrix)
decision, best_affinity = self._solve_assignment_problem(affinity_matrix)
self._log_decision(decision, best_affinity, det_objs, frame_index)
self._apply_decision(decision, det_objs, frame_index)
self._move_obsolete_tracks_to_archive(frame_index)
log.debug("end: handling {} objects".format(len(det_objs)))
@staticmethod
def _validate_affinity_matrix(affinity_matrix, num_tracks, num_det_objs):
assert isinstance(affinity_matrix, list)
assert len(affinity_matrix) == num_tracks
for affinity_row in affinity_matrix:
assert isinstance(affinity_row, list)
assert len(affinity_row) == num_det_objs
assert all(isinstance(v, float) for v in affinity_row)
assert all(v >= 0 for v in affinity_row)
def _build_affinity_matrix(self, det_objs):
affinity_matrix = []
for t in self.tracks:
affinity_row = []
for o in det_objs:
cur_affinity = self._calc_affinity(t, o)
affinity_row.append(cur_affinity)
affinity_matrix.append(affinity_row)
return affinity_matrix
def _calc_affinity(self, track, obj):
affinity_appearance = self._calc_affinity_appearance(track, obj)
affinity_position = self._calc_affinity_position(track, obj)
affinity_shape = self._calc_affinity_shape(track, obj)
return affinity_appearance * affinity_position * affinity_shape
def _calc_affinity_appearance(self, track, obj):
raise NotImplementedError("The function _calc_affinity_appearance is not implemented -- implement it by yourself")
def _calc_affinity_position(self, track, obj):
raise NotImplementedError("The function _calc_affinity_position is not implemented -- implement it by yourself")
def _calc_affinity_shape(self, track, obj):
raise NotImplementedError("The function _calc_affinity_shape is not implemented -- implement it by yourself")
@staticmethod
def _log_affinity_matrix(affinity_matrix):
with np.printoptions(precision=2, suppress=True, threshold=sys.maxsize, linewidth=sys.maxsize):
log.debug("Affinity matrix =\n{}".format(np.array(affinity_matrix)))
def _solve_assignment_problem(self, affinity_matrix):
decision, best_affinity = solve_assignment_problem(affinity_matrix, self.affinity_threshold)
return decision, best_affinity
def _log_decision(self, decision, best_affinity, det_objs, frame_index):
log.debug("Logging decision for frame index={}".format(frame_index))
num_tracks = len(self.tracks)
for track_index in range(num_tracks):
assert track_index in decision
obj_index = decision[track_index] # index of the object assigned to the track
if obj_index is not None:
assert 0 <= obj_index < len(det_objs)
obj_bbox = det_objs[obj_index].bbox
else:
obj_bbox = None
cur_best_affinity = best_affinity[track_index]
if cur_best_affinity is not None:
best_affinity_str = "{:.3f}".format(cur_best_affinity)
else:
best_affinity_str = str(cur_best_affinity)
log.debug("track_index={}, track id={}, last_bbox={}, decision={}, best_affinity={} => {}".format(
track_index, self.tracks[track_index].get_id(),
self.tracks[track_index].last().bbox,
decision[track_index],
best_affinity_str,
obj_bbox))
def _apply_decision(self, decision, det_objs, frame_index):
set_updated_tracks_indexes = set()
num_det_objs = len(det_objs)
num_tracks = len(self.tracks)
object_indexes_not_mapped_to_tracks = set(range(num_det_objs)) # all indexes from 0 to num_det_objs-1
for track_index in range(num_tracks):
assert track_index in decision
obj_index = decision[track_index] # index of the object assigned to the track
if obj_index is None:
# no objects are mapped for this track
continue
assert 0 <= obj_index < num_det_objs
if obj_index not in object_indexes_not_mapped_to_tracks:
raise RuntimeError("ERROR: Algorithm assigned the object {} to several tracks".format(obj_index))
object_indexes_not_mapped_to_tracks.remove(obj_index)
o = det_objs[obj_index]
self.tracks[track_index].add_object(o)
# create new tracks for all the objects not mapped to tracks
for obj_index in object_indexes_not_mapped_to_tracks:
o = det_objs[obj_index]
self._create_new_track(o)
def _create_new_track(self, o):
new_track = Track(o)
self.tracks.append(new_track)
log.debug("created new track: id={} object: frame_index={}, {}".format(
new_track.get_id(), o.frame_index, o.bbox))
def _move_obsolete_tracks_to_archive(self, frame_index):
new_tracks = []
for t in self.tracks:
last_frame_index = t.last().frame_index
if frame_index - last_frame_index >= self.num_frames_to_remove_track:
log.debug("Move the track id={} to archive: the current frame_index={}, "
"the last frame_index in track={}".format(
t.get_id(), frame_index, last_frame_index))
self.track_archive.append(t)
else:
new_tracks.append(t)
self.tracks = new_tracks
def is_track_valid(self, track):
assert isinstance(track, Track)
return len(track.objects) > self.num_objects_to_make_track_valid
def get_all_valid_tracks(self):
res = []
for t in self.track_archive:
if self.is_track_valid(t):
res.append(t)
for t in self.tracks:
if self.is_track_valid(t):
res.append(t)
return res
def convert_tracks_to_annotation_storage(tracks):
ann_objects_by_frame_index = {}
for cur_track in tqdm(tracks, desc="Converting"):
track_id = cur_track.get_id()
first_frame_index = cur_track.objects[0].frame_index
last_frame_index = cur_track.objects[-1].frame_index
for frame_index in range(first_frame_index, last_frame_index+1):
bbox = cur_track.get_bbox_for_frame(frame_index)
tl_x = math.floor(bbox.tl_x)
tl_y = math.floor(bbox.tl_y)
br_x = math.ceil(bbox.br_x)
br_y = math.ceil(bbox.br_y)
detect_obj = DetectedObject(frame_index=frame_index,
bbox=Bbox(tl_x, tl_y, br_x, br_y),
appearance_feature=[])
ann_obj = AnnotationObject(detect_obj=detect_obj,
track_id=track_id)
if frame_index not in ann_objects_by_frame_index:
ann_objects_by_frame_index[frame_index] = {}
ann_objects_by_frame_index[frame_index][track_id] = ann_obj
annotation_objects = []
for frame_index in sorted(ann_objects_by_frame_index.keys()):
cur_ann_objects = ann_objects_by_frame_index[frame_index]
for track_id in sorted(cur_ann_objects.keys()):
annotation_objects.append(cur_ann_objects[track_id])
annotation_storage = AnnotationStorage.create_annotation_storage_from_list(annotation_objects)
return annotation_storage
| import numpy as np
import math
import logging as log
import sys
from tqdm import tqdm
from common.feature_distance import calc_features_similarity
from common.common_objects import DetectedObject, validate_detected_object, Bbox
from common.common_objects import get_bbox_center, get_dist, calc_bbox_area
from common.find_best_assignment import solve_assignment_problem
from common.annotation import AnnotationObject, AnnotationStorage
class Track:
__next_track_id = 0
def __init__(self, first_obj):
self.objects = []
self._track_id = Track.__next_track_id
Track.__next_track_id += 1
self.objects.append(first_obj)
def _validate(self):
assert len(self.objects) > 0
for o in self.objects:
validate_detected_object(o)
for i in range(len(self.objects) - 1):
self.objects[i].frame_index < self.objects[i+1].frame_index
def add_object(self, o):
self._validate()
validate_detected_object(o)
last_frame_index = self.objects[-1].frame_index
if not last_frame_index < o.frame_index:
raise RuntimeError("Add object={} to track with the last_frame_index={}".format(o, last_frame_index))
self.objects.append(o)
def last(self):
return self.objects[-1]
def get_id(self):
return self._track_id
def get_bbox_for_frame(self, cur_frame_ind):
"""Finds bbox for frame index using linear approximation"""
self._validate()
i_found = None
for i, o in enumerate(self.objects):
if o.frame_index == cur_frame_ind:
return o.bbox
if o.frame_index > cur_frame_ind:
i_found = i
break
if i_found is None: # cur_frame_ind after the last frame_index in track
return None
if i_found == 0: # cur_frame_ind before the first frame_index in track
return None
log.debug("using linear approximation for track id={}, frame_index={}".format(self._track_id, cur_frame_ind))
o1 = self.objects[i_found-1]
o2 = self.objects[i_found]
assert o1.frame_index < cur_frame_ind < o2.frame_index
dindex = o2.frame_index - o1.frame_index
d_cur_index1 = cur_frame_ind - o1.frame_index
d_cur_index2 = o2.frame_index - cur_frame_ind
bbox1 = o1.bbox
bbox2 = o2.bbox
res_bbox = [None, None, None, None]
for k in range(4):
# linear approximation for all bbox fields
res_bbox[k] = (bbox1[k] * d_cur_index2 + bbox2[k] * d_cur_index1) / dindex
res_bbox = Bbox(res_bbox[0], res_bbox[1], res_bbox[2], res_bbox[3])
return res_bbox
class Tracker:
def __init__(self, num_frames_to_remove_track, num_objects_to_make_track_valid, affinity_threshold):
self.tracks = []
self.track_archive = []
self.num_frames_to_remove_track = num_frames_to_remove_track
self.num_objects_to_make_track_valid = num_objects_to_make_track_valid
self.affinity_threshold = affinity_threshold
def add_objects(self, det_objs):
log.debug("begin: handling {} objects".format(len(det_objs)))
if len(det_objs) == 0:
return
frame_index = det_objs[0].frame_index
assert all(o.frame_index == frame_index for o in det_objs), "All det_objs should have the same frame_index"
affinity_matrix = self._build_affinity_matrix(det_objs)
self._validate_affinity_matrix(affinity_matrix, len(self.tracks), len(det_objs))
self._log_affinity_matrix(affinity_matrix)
decision, best_affinity = self._solve_assignment_problem(affinity_matrix)
self._log_decision(decision, best_affinity, det_objs, frame_index)
self._apply_decision(decision, det_objs, frame_index)
self._move_obsolete_tracks_to_archive(frame_index)
log.debug("end: handling {} objects".format(len(det_objs)))
@staticmethod
def _validate_affinity_matrix(affinity_matrix, num_tracks, num_det_objs):
assert isinstance(affinity_matrix, list)
assert len(affinity_matrix) == num_tracks
for affinity_row in affinity_matrix:
assert isinstance(affinity_row, list)
assert len(affinity_row) == num_det_objs
assert all(isinstance(v, float) for v in affinity_row)
assert all(v >= 0 for v in affinity_row)
def _build_affinity_matrix(self, det_objs):
affinity_matrix = []
for t in self.tracks:
affinity_row = []
for o in det_objs:
cur_affinity = self._calc_affinity(t, o)
affinity_row.append(cur_affinity)
affinity_matrix.append(affinity_row)
return affinity_matrix
def _calc_affinity(self, track, obj):
affinity_appearance = self._calc_affinity_appearance(track, obj)
affinity_position = self._calc_affinity_position(track, obj)
affinity_shape = self._calc_affinity_shape(track, obj)
return affinity_appearance * affinity_position * affinity_shape
def _calc_affinity_appearance(self, track, obj):
raise NotImplementedError("The function _calc_affinity_appearance is not implemented -- implement it by yourself")
def _calc_affinity_position(self, track, obj):
raise NotImplementedError("The function _calc_affinity_position is not implemented -- implement it by yourself")
def _calc_affinity_shape(self, track, obj):
raise NotImplementedError("The function _calc_affinity_shape is not implemented -- implement it by yourself")
@staticmethod
def _log_affinity_matrix(affinity_matrix):
with np.printoptions(precision=2, suppress=True, threshold=sys.maxsize, linewidth=sys.maxsize):
log.debug("Affinity matrix =\n{}".format(np.array(affinity_matrix)))
def _solve_assignment_problem(self, affinity_matrix):
decision, best_affinity = solve_assignment_problem(affinity_matrix, self.affinity_threshold)
return decision, best_affinity
def _log_decision(self, decision, best_affinity, det_objs, frame_index):
log.debug("Logging decision for frame index={}".format(frame_index))
num_tracks = len(self.tracks)
for track_index in range(num_tracks):
assert track_index in decision
obj_index = decision[track_index] # index of the object assigned to the track
if obj_index is not None:
assert 0 <= obj_index < len(det_objs)
obj_bbox = det_objs[obj_index].bbox
else:
obj_bbox = None
cur_best_affinity = best_affinity[track_index]
if cur_best_affinity is not None:
best_affinity_str = "{:.3f}".format(cur_best_affinity)
else:
best_affinity_str = str(cur_best_affinity)
log.debug("track_index={}, track id={}, last_bbox={}, decision={}, best_affinity={} => {}".format(
track_index, self.tracks[track_index].get_id(),
self.tracks[track_index].last().bbox,
decision[track_index],
best_affinity_str,
obj_bbox))
def _apply_decision(self, decision, det_objs, frame_index):
set_updated_tracks_indexes = set()
num_det_objs = len(det_objs)
num_tracks = len(self.tracks)
object_indexes_not_mapped_to_tracks = set(range(num_det_objs)) # all indexes from 0 to num_det_objs-1
for track_index in range(num_tracks):
assert track_index in decision
obj_index = decision[track_index] # index of the object assigned to the track
if obj_index is None:
# no objects are mapped for this track
continue
assert 0 <= obj_index < num_det_objs
if obj_index not in object_indexes_not_mapped_to_tracks:
raise RuntimeError("ERROR: Algorithm assigned the object {} to several tracks".format(obj_index))
object_indexes_not_mapped_to_tracks.remove(obj_index)
o = det_objs[obj_index]
self.tracks[track_index].add_object(o)
# create new tracks for all the objects not mapped to tracks
for obj_index in object_indexes_not_mapped_to_tracks:
o = det_objs[obj_index]
self._create_new_track(o)
def _create_new_track(self, o):
new_track = Track(o)
self.tracks.append(new_track)
log.debug("created new track: id={} object: frame_index={}, {}".format(
new_track.get_id(), o.frame_index, o.bbox))
def _move_obsolete_tracks_to_archive(self, frame_index):
new_tracks = []
for t in self.tracks:
last_frame_index = t.last().frame_index
if frame_index - last_frame_index >= self.num_frames_to_remove_track:
log.debug("Move the track id={} to archive: the current frame_index={}, "
"the last frame_index in track={}".format(
t.get_id(), frame_index, last_frame_index))
self.track_archive.append(t)
else:
new_tracks.append(t)
self.tracks = new_tracks
def is_track_valid(self, track):
assert isinstance(track, Track)
return len(track.objects) > self.num_objects_to_make_track_valid
def get_all_valid_tracks(self):
res = []
for t in self.track_archive:
if self.is_track_valid(t):
res.append(t)
for t in self.tracks:
if self.is_track_valid(t):
res.append(t)
return res
def convert_tracks_to_annotation_storage(tracks):
ann_objects_by_frame_index = {}
for cur_track in tqdm(tracks, desc="Converting"):
track_id = cur_track.get_id()
first_frame_index = cur_track.objects[0].frame_index
last_frame_index = cur_track.objects[-1].frame_index
for frame_index in range(first_frame_index, last_frame_index+1):
bbox = cur_track.get_bbox_for_frame(frame_index)
tl_x = math.floor(bbox.tl_x)
tl_y = math.floor(bbox.tl_y)
br_x = math.ceil(bbox.br_x)
br_y = math.ceil(bbox.br_y)
detect_obj = DetectedObject(frame_index=frame_index,
bbox=Bbox(tl_x, tl_y, br_x, br_y),
appearance_feature=[])
ann_obj = AnnotationObject(detect_obj=detect_obj,
track_id=track_id)
if frame_index not in ann_objects_by_frame_index:
ann_objects_by_frame_index[frame_index] = {}
ann_objects_by_frame_index[frame_index][track_id] = ann_obj
annotation_objects = []
for frame_index in sorted(ann_objects_by_frame_index.keys()):
cur_ann_objects = ann_objects_by_frame_index[frame_index]
for track_id in sorted(cur_ann_objects.keys()):
annotation_objects.append(cur_ann_objects[track_id])
annotation_storage = AnnotationStorage.create_annotation_storage_from_list(annotation_objects)
return annotation_storage | en | 0.7978 | Finds bbox for frame index using linear approximation # cur_frame_ind after the last frame_index in track # cur_frame_ind before the first frame_index in track # linear approximation for all bbox fields # index of the object assigned to the track # all indexes from 0 to num_det_objs-1 # index of the object assigned to the track # no objects are mapped for this track # create new tracks for all the objects not mapped to tracks | 2.078671 | 2 |
gm2m/managers.py | mikewolfd/django-gm2m | 0 | 8453 | from django.db import router
from django.db.models import Q, Manager
from django.db import connections
from .contenttypes import ct, get_content_type
from .query import GM2MTgtQuerySet
class GM2MBaseManager(Manager):
use_in_migration = True
def __init__(self, instance):
super(GM2MBaseManager, self).__init__()
self.model = self._model # see create_gm2m_related_manager
self.instance = instance
self.pk = instance.pk
self.core_filters = {}
def get_queryset(self):
try:
return self.instance \
._prefetched_objects_cache[self.prefetch_cache_name]
except (AttributeError, KeyError):
db = self._db or router.db_for_read(self.instance.__class__,
instance=self.instance)
return self._get_queryset(using=db)._next_is_sticky() \
.filter(**self.core_filters)
def _get_queryset(self, using):
return super(GM2MBaseManager, self).get_queryset().using(using)
def get_prefetch_queryset(self, instances, queryset=None):
db = self._db or router.db_for_read(self.model,
instance=instances[0])
if queryset is None:
queryset = self._get_queryset(db)
qs, rel_obj_attr, instance_attr = \
self._get_prefetch_queryset_params(instances, queryset, db)
return (qs,
rel_obj_attr,
instance_attr,
False,
self.prefetch_cache_name)
def _get_extra_queryset(self, queryset, q, extra_fields, db):
join_table = self.through._meta.db_table
connection = connections[db]
qn = connection.ops.quote_name
extra = dict(select=dict(
('_prefetch_related_val_%s' % f.attname,
'%s.%s' % (qn(join_table), qn(f.column)))
for f in extra_fields))
return queryset.using(db)._next_is_sticky().filter(q).extra(**extra)
def _check_through_model(self, method_name):
# If the GM2M relation has an intermediary model,
# the add and remove methods are not available.
if not self.through._meta.auto_created:
opts = self.through._meta
raise AttributeError(
'Cannot use %s() on a ManyToManyField which specifies an '
'intermediary model. Use %s.%s\'s Manager instead.'
% (method_name, opts.app_label, opts.object_name))
def _do_add(self, db, through_objs):
"""
Performs items addition
"""
# Add the new entries in the db table
self.through._default_manager.using(db).bulk_create(through_objs)
def add(self, *objs):
"""
Adds objects to the GM2M field
:param *objs: object instances to add
"""
#
self._check_through_model('add')
if not objs:
return
db = router.db_for_write(self.through, instance=self.instance)
self._do_add(db, self._to_add(objs, db))
add.alters_data = True
def _do_remove(self, db, q):
"""
Perfoms items removal from a Q object
"""
self.through._default_manager.using(db).filter(q).delete()
def remove(self, *objs):
"""
Removes objects from the GM2M field
"""
# *objs - objects to remove
self._check_through_model('remove')
if not objs:
return
db = router.db_for_write(self.through, instance=self.instance)
self._do_remove(db, self._to_remove(objs))
remove.alters_data = True
def _do_clear(self, db, filter=None):
self.through._default_manager.using(db).filter(**(filter or {})) \
.delete()
def set(self, objs, **kwargs):
"""
Sets the objs iterable as the set of related objects
(Added for compatibility with Django 1.9)
"""
self._check_through_model('set')
objs = tuple(objs)
clear = kwargs.pop('clear', False)
db = router.db_for_write(self.through, instance=self.instance)
if clear:
# clears all and re-adds
self._do_clear(db)
self._do_add(db, *objs)
else:
# just removes the necessary items and adds the missing ones
to_add, to_remove = self._to_change(objs, db)
self._do_remove(db, to_remove)
self._do_add(db, to_add)
set.alters_data = True
def clear(self):
db = router.db_for_write(self.through, instance=self.instance)
self._do_clear(db, self._to_clear())
clear.alters_data = True
class GM2MBaseSrcManager(Manager):
def __init__(self, instance):
# the manager's model is the source model
super(GM2MBaseSrcManager, self).__init__(instance)
self.core_filters['%s__%s' % (self.query_field_name,
self.field_names['tgt_ct'])] = \
get_content_type(self.instance)
self.core_filters['%s__%s' % (self.query_field_name,
self.field_names['tgt_fk'])] = \
self.instance.pk
def _get_prefetch_queryset_params(self, instances, queryset, db):
# we're looking for generic target instances, which should be
# converted to (content_type, primary_key) tuples
q = Q()
for obj in instances:
q = q | Q(**{
'%s__%s' % (self.query_field_name,
self.field_names['tgt_ct']):get_content_type(obj),
'%s__%s' % (self.query_field_name,
self.field_names['tgt_fk']): obj.pk
})
# Annotating the query in order to retrieve the primary model
# content type and id in the same query
# content type must be the 1st element, see rel_obj_attr below
extra_fields = (
self.through._meta.get_field(self.field_names['tgt_ct']),
self.through._meta.get_field(self.field_names['tgt_fk'])
)
qs = self._get_extra_queryset(queryset, q, extra_fields, db)
# primary model retrieval function
def rel_obj_attr(relobj):
t = []
for f in extra_fields:
try:
# t already contains the content type id
# we use get_for_id to retrieve the cached content type
model = ct.ContentType.objects.get_for_id(t[0]) \
.model_class()
except IndexError:
# t is empty
model = ct.ContentType
t.append(model._meta.pk.to_python(
getattr(relobj, '_prefetch_related_val_%s' % f.attname)
))
return tuple(t)
# model attribute retrieval function
instance_attr = lambda inst: \
(get_content_type(inst).pk, inst.pk)
return qs, rel_obj_attr, instance_attr
def _to_add(self, objs, db):
# we're using the reverse relation to add source model
# instances
inst_ct = get_content_type(self.instance)
vals = self.through._default_manager.using(db) \
.values_list(self.field_names['src'],
flat=True) \
.filter(**{
self.field_names['tgt_ct']: inst_ct,
self.field_names['tgt_fk']: self.pk
})
to_add = []
for obj in objs:
if obj.pk not in vals:
to_add.append(self.through(**{
'%s_id' % self.field_names['src']:
obj.pk,
self.field_names['tgt_ct']: inst_ct,
self.field_names['tgt_fk']: self.pk
}))
return to_add
def _to_remove(self, objs):
# we're using the reverse relation to delete source model
# instances
inst_ct = get_content_type(self.instance)
return Q(**{
'%s_id__in' % self.field_names['src']:
[obj.pk for obj in objs],
self.field_names['tgt_ct']: inst_ct,
self.field_names['tgt_fk']: self.pk
})
def _to_change(self, objs, db):
"""
Returns the sets of items to be added and a Q object for removal
"""
inst_ct = get_content_type(self.instance)
vals = list(self.through._default_manager.using(db)
.values_list(self.field_names['src'], flat=True)
.filter(**{
self.field_names['tgt_ct']: inst_ct,
self.field_names['tgt_fk']: self.pk
}))
to_add = set()
to_remove = set()
for obj in objs:
try:
vals.remove(obj.pk)
except ValueError:
# obj.pk is not in vals and must be added
to_add.add(self.through(**{
'%s_id' % self.field_names['src']:
obj.pk,
self.field_names['tgt_ct']: inst_ct,
self.field_names['tgt_fk']: self.pk
}))
for v in vals:
to_remove.add(v)
return to_add, Q(pk__in=to_remove)
def _to_clear(self):
return {
self.field_names['tgt_ct']: get_content_type(self.instance),
self.field_names['tgt_fk']: self.instance.pk
}
class GM2MBaseTgtManager(Manager):
def __init__(self, instance):
# the manager's model is the through model
super(GM2MBaseTgtManager, self).__init__(instance)
source_field = self.through._meta.get_field(
self.field_names['src'])
self.source_related_fields = source_field.related_fields
for __, rh_field in self.source_related_fields:
key = '%s__%s' % (self.query_field_name, rh_field.name)
self.core_filters[key] = getattr(self.instance,
rh_field.attname)
def _get_queryset(self, using):
return GM2MTgtQuerySet(self.model, using=using)
def _get_prefetch_queryset_params(self, instances, queryset, db):
# we're looking for through model instances
query = {}
for lh_field, rh_field in self.source_related_fields:
query['%s__in' % lh_field.name] = \
set(getattr(obj, rh_field.attname)
for obj in instances)
q = Q(**query)
# Annotating the query in order to retrieve the primary model
# id in the same query
fk = self.through._meta.get_field(self.field_names['src'])
extra_fields = fk.local_related_fields
qs = self._get_extra_queryset(queryset, q, extra_fields, db)
# marking the queryset so that the original queryset should
# be returned when evaluated the first time
qs._related_prefetching = True
# primary model retrieval function
def rel_obj_attr(relobj):
t = []
for f in extra_fields:
v = getattr(relobj,
'_prefetch_related_val_%s' % f.attname)
try:
v = v.pop()
except AttributeError: # v is not a list
pass
t.append(f.related_model._meta.pk.to_python(v))
return tuple(t)
# model attribute retrieval function
select_fields = fk.foreign_related_fields
instance_attr = lambda inst: tuple([getattr(inst, f.attname)
for f in select_fields])
return qs, rel_obj_attr, instance_attr
def _to_add(self, objs, db):
models = []
objs_set = set()
for obj in objs:
# extract content type and primary key for each object
objs_set.add((get_content_type(obj),
obj.pk))
m = obj.__class__
if m not in models:
# call field.add_relation for each model
models.append(m)
self.field.add_relation(m, auto=True)
vals = self.through._default_manager.using(db) \
.filter(**{self.field_names['src']: self.pk}) \
.values_list(self.field_names['tgt_ct'],
self.field_names['tgt_fk'])
to_add = []
for ct, pk in objs_set.difference(vals):
to_add.append(self.through(**{
'%s_id' % self.field_names['src']: self.pk,
self.field_names['tgt_ct']: ct,
self.field_names['tgt_fk']: pk
}))
return to_add
def _to_remove(self, objs):
q = Q()
for obj in objs:
# Convert the obj to (content_type, primary_key)
q = q | Q(**{
self.field_names['tgt_ct']: get_content_type(obj),
self.field_names['tgt_fk']: obj.pk
})
return q & Q(**{
'%s_id' % self.field_names['src']: self.pk
})
def _to_clear(self):
return {
'%s_id' % self.field_names['src']: self.pk
}
def _to_change(self, objs, db):
"""
Returns the sets of items to be added and a Q object for removal
"""
to_add = set()
src_fname = self.field_names['src']
ct_fname = self.field_names['tgt_ct']
fk_fname = self.field_names['tgt_fk']
vals = list(self.through._default_manager.using(db)
.filter(**{self.field_names['src']: self.pk})
.values_list(ct_fname, fk_fname))
known_cts = set(v[0] for v in vals)
for obj in objs:
ct = get_content_type(obj)
val = (ct, obj.pk)
try:
vals.remove(val)
except ValueError:
# val is not in vals
# extract content type and primary key for each object
to_add.add((ct, obj.pk))
if ct.pk not in known_cts:
# call field.add_relation for each unknown model
self.field.add_relation(obj.__class__, auto=True)
known_cts.add(ct.pk)
rem_q = Q()
for val in vals:
# Convert the obj to (content_type, primary_key)
rem_q = rem_q | Q(**{
ct_fname: val[0],
fk_fname: val[1]
})
return [
self.through(**{
'%s_id' % src_fname: self.pk,
ct_fname: t[0],
fk_fname: t[1]
}) for t in to_add
], \
rem_q & Q(**{
'%s_id' % src_fname: self.pk
})
def create_gm2m_related_manager(superclass=None, **kwargs):
"""
Dynamically create a manager class that only concerns an instance (source
or target)
"""
bases = [GM2MBaseManager]
if superclass is None:
# no superclass provided, the manager is a generic target model manager
bases.insert(0, GM2MBaseTgtManager)
else:
# superclass provided, the manager is a source model manager and also
# derives from superclass
bases.insert(0, GM2MBaseSrcManager)
bases.append(superclass)
# Django's Manager constructor sets model to None, we store it under the
# class's attribute '_model' and it is retrieved in __init__
kwargs['_model'] = kwargs.pop('model')
return type(Manager)('GM2MManager', tuple(bases), kwargs)
| from django.db import router
from django.db.models import Q, Manager
from django.db import connections
from .contenttypes import ct, get_content_type
from .query import GM2MTgtQuerySet
class GM2MBaseManager(Manager):
use_in_migration = True
def __init__(self, instance):
super(GM2MBaseManager, self).__init__()
self.model = self._model # see create_gm2m_related_manager
self.instance = instance
self.pk = instance.pk
self.core_filters = {}
def get_queryset(self):
try:
return self.instance \
._prefetched_objects_cache[self.prefetch_cache_name]
except (AttributeError, KeyError):
db = self._db or router.db_for_read(self.instance.__class__,
instance=self.instance)
return self._get_queryset(using=db)._next_is_sticky() \
.filter(**self.core_filters)
def _get_queryset(self, using):
return super(GM2MBaseManager, self).get_queryset().using(using)
def get_prefetch_queryset(self, instances, queryset=None):
db = self._db or router.db_for_read(self.model,
instance=instances[0])
if queryset is None:
queryset = self._get_queryset(db)
qs, rel_obj_attr, instance_attr = \
self._get_prefetch_queryset_params(instances, queryset, db)
return (qs,
rel_obj_attr,
instance_attr,
False,
self.prefetch_cache_name)
def _get_extra_queryset(self, queryset, q, extra_fields, db):
join_table = self.through._meta.db_table
connection = connections[db]
qn = connection.ops.quote_name
extra = dict(select=dict(
('_prefetch_related_val_%s' % f.attname,
'%s.%s' % (qn(join_table), qn(f.column)))
for f in extra_fields))
return queryset.using(db)._next_is_sticky().filter(q).extra(**extra)
def _check_through_model(self, method_name):
# If the GM2M relation has an intermediary model,
# the add and remove methods are not available.
if not self.through._meta.auto_created:
opts = self.through._meta
raise AttributeError(
'Cannot use %s() on a ManyToManyField which specifies an '
'intermediary model. Use %s.%s\'s Manager instead.'
% (method_name, opts.app_label, opts.object_name))
def _do_add(self, db, through_objs):
"""
Performs items addition
"""
# Add the new entries in the db table
self.through._default_manager.using(db).bulk_create(through_objs)
def add(self, *objs):
"""
Adds objects to the GM2M field
:param *objs: object instances to add
"""
#
self._check_through_model('add')
if not objs:
return
db = router.db_for_write(self.through, instance=self.instance)
self._do_add(db, self._to_add(objs, db))
add.alters_data = True
def _do_remove(self, db, q):
"""
Perfoms items removal from a Q object
"""
self.through._default_manager.using(db).filter(q).delete()
def remove(self, *objs):
"""
Removes objects from the GM2M field
"""
# *objs - objects to remove
self._check_through_model('remove')
if not objs:
return
db = router.db_for_write(self.through, instance=self.instance)
self._do_remove(db, self._to_remove(objs))
remove.alters_data = True
def _do_clear(self, db, filter=None):
self.through._default_manager.using(db).filter(**(filter or {})) \
.delete()
def set(self, objs, **kwargs):
"""
Sets the objs iterable as the set of related objects
(Added for compatibility with Django 1.9)
"""
self._check_through_model('set')
objs = tuple(objs)
clear = kwargs.pop('clear', False)
db = router.db_for_write(self.through, instance=self.instance)
if clear:
# clears all and re-adds
self._do_clear(db)
self._do_add(db, *objs)
else:
# just removes the necessary items and adds the missing ones
to_add, to_remove = self._to_change(objs, db)
self._do_remove(db, to_remove)
self._do_add(db, to_add)
set.alters_data = True
def clear(self):
db = router.db_for_write(self.through, instance=self.instance)
self._do_clear(db, self._to_clear())
clear.alters_data = True
class GM2MBaseSrcManager(Manager):
def __init__(self, instance):
# the manager's model is the source model
super(GM2MBaseSrcManager, self).__init__(instance)
self.core_filters['%s__%s' % (self.query_field_name,
self.field_names['tgt_ct'])] = \
get_content_type(self.instance)
self.core_filters['%s__%s' % (self.query_field_name,
self.field_names['tgt_fk'])] = \
self.instance.pk
def _get_prefetch_queryset_params(self, instances, queryset, db):
# we're looking for generic target instances, which should be
# converted to (content_type, primary_key) tuples
q = Q()
for obj in instances:
q = q | Q(**{
'%s__%s' % (self.query_field_name,
self.field_names['tgt_ct']):get_content_type(obj),
'%s__%s' % (self.query_field_name,
self.field_names['tgt_fk']): obj.pk
})
# Annotating the query in order to retrieve the primary model
# content type and id in the same query
# content type must be the 1st element, see rel_obj_attr below
extra_fields = (
self.through._meta.get_field(self.field_names['tgt_ct']),
self.through._meta.get_field(self.field_names['tgt_fk'])
)
qs = self._get_extra_queryset(queryset, q, extra_fields, db)
# primary model retrieval function
def rel_obj_attr(relobj):
t = []
for f in extra_fields:
try:
# t already contains the content type id
# we use get_for_id to retrieve the cached content type
model = ct.ContentType.objects.get_for_id(t[0]) \
.model_class()
except IndexError:
# t is empty
model = ct.ContentType
t.append(model._meta.pk.to_python(
getattr(relobj, '_prefetch_related_val_%s' % f.attname)
))
return tuple(t)
# model attribute retrieval function
instance_attr = lambda inst: \
(get_content_type(inst).pk, inst.pk)
return qs, rel_obj_attr, instance_attr
def _to_add(self, objs, db):
# we're using the reverse relation to add source model
# instances
inst_ct = get_content_type(self.instance)
vals = self.through._default_manager.using(db) \
.values_list(self.field_names['src'],
flat=True) \
.filter(**{
self.field_names['tgt_ct']: inst_ct,
self.field_names['tgt_fk']: self.pk
})
to_add = []
for obj in objs:
if obj.pk not in vals:
to_add.append(self.through(**{
'%s_id' % self.field_names['src']:
obj.pk,
self.field_names['tgt_ct']: inst_ct,
self.field_names['tgt_fk']: self.pk
}))
return to_add
def _to_remove(self, objs):
# we're using the reverse relation to delete source model
# instances
inst_ct = get_content_type(self.instance)
return Q(**{
'%s_id__in' % self.field_names['src']:
[obj.pk for obj in objs],
self.field_names['tgt_ct']: inst_ct,
self.field_names['tgt_fk']: self.pk
})
def _to_change(self, objs, db):
"""
Returns the sets of items to be added and a Q object for removal
"""
inst_ct = get_content_type(self.instance)
vals = list(self.through._default_manager.using(db)
.values_list(self.field_names['src'], flat=True)
.filter(**{
self.field_names['tgt_ct']: inst_ct,
self.field_names['tgt_fk']: self.pk
}))
to_add = set()
to_remove = set()
for obj in objs:
try:
vals.remove(obj.pk)
except ValueError:
# obj.pk is not in vals and must be added
to_add.add(self.through(**{
'%s_id' % self.field_names['src']:
obj.pk,
self.field_names['tgt_ct']: inst_ct,
self.field_names['tgt_fk']: self.pk
}))
for v in vals:
to_remove.add(v)
return to_add, Q(pk__in=to_remove)
def _to_clear(self):
return {
self.field_names['tgt_ct']: get_content_type(self.instance),
self.field_names['tgt_fk']: self.instance.pk
}
class GM2MBaseTgtManager(Manager):
def __init__(self, instance):
# the manager's model is the through model
super(GM2MBaseTgtManager, self).__init__(instance)
source_field = self.through._meta.get_field(
self.field_names['src'])
self.source_related_fields = source_field.related_fields
for __, rh_field in self.source_related_fields:
key = '%s__%s' % (self.query_field_name, rh_field.name)
self.core_filters[key] = getattr(self.instance,
rh_field.attname)
def _get_queryset(self, using):
return GM2MTgtQuerySet(self.model, using=using)
def _get_prefetch_queryset_params(self, instances, queryset, db):
# we're looking for through model instances
query = {}
for lh_field, rh_field in self.source_related_fields:
query['%s__in' % lh_field.name] = \
set(getattr(obj, rh_field.attname)
for obj in instances)
q = Q(**query)
# Annotating the query in order to retrieve the primary model
# id in the same query
fk = self.through._meta.get_field(self.field_names['src'])
extra_fields = fk.local_related_fields
qs = self._get_extra_queryset(queryset, q, extra_fields, db)
# marking the queryset so that the original queryset should
# be returned when evaluated the first time
qs._related_prefetching = True
# primary model retrieval function
def rel_obj_attr(relobj):
t = []
for f in extra_fields:
v = getattr(relobj,
'_prefetch_related_val_%s' % f.attname)
try:
v = v.pop()
except AttributeError: # v is not a list
pass
t.append(f.related_model._meta.pk.to_python(v))
return tuple(t)
# model attribute retrieval function
select_fields = fk.foreign_related_fields
instance_attr = lambda inst: tuple([getattr(inst, f.attname)
for f in select_fields])
return qs, rel_obj_attr, instance_attr
def _to_add(self, objs, db):
models = []
objs_set = set()
for obj in objs:
# extract content type and primary key for each object
objs_set.add((get_content_type(obj),
obj.pk))
m = obj.__class__
if m not in models:
# call field.add_relation for each model
models.append(m)
self.field.add_relation(m, auto=True)
vals = self.through._default_manager.using(db) \
.filter(**{self.field_names['src']: self.pk}) \
.values_list(self.field_names['tgt_ct'],
self.field_names['tgt_fk'])
to_add = []
for ct, pk in objs_set.difference(vals):
to_add.append(self.through(**{
'%s_id' % self.field_names['src']: self.pk,
self.field_names['tgt_ct']: ct,
self.field_names['tgt_fk']: pk
}))
return to_add
def _to_remove(self, objs):
q = Q()
for obj in objs:
# Convert the obj to (content_type, primary_key)
q = q | Q(**{
self.field_names['tgt_ct']: get_content_type(obj),
self.field_names['tgt_fk']: obj.pk
})
return q & Q(**{
'%s_id' % self.field_names['src']: self.pk
})
def _to_clear(self):
return {
'%s_id' % self.field_names['src']: self.pk
}
def _to_change(self, objs, db):
"""
Returns the sets of items to be added and a Q object for removal
"""
to_add = set()
src_fname = self.field_names['src']
ct_fname = self.field_names['tgt_ct']
fk_fname = self.field_names['tgt_fk']
vals = list(self.through._default_manager.using(db)
.filter(**{self.field_names['src']: self.pk})
.values_list(ct_fname, fk_fname))
known_cts = set(v[0] for v in vals)
for obj in objs:
ct = get_content_type(obj)
val = (ct, obj.pk)
try:
vals.remove(val)
except ValueError:
# val is not in vals
# extract content type and primary key for each object
to_add.add((ct, obj.pk))
if ct.pk not in known_cts:
# call field.add_relation for each unknown model
self.field.add_relation(obj.__class__, auto=True)
known_cts.add(ct.pk)
rem_q = Q()
for val in vals:
# Convert the obj to (content_type, primary_key)
rem_q = rem_q | Q(**{
ct_fname: val[0],
fk_fname: val[1]
})
return [
self.through(**{
'%s_id' % src_fname: self.pk,
ct_fname: t[0],
fk_fname: t[1]
}) for t in to_add
], \
rem_q & Q(**{
'%s_id' % src_fname: self.pk
})
def create_gm2m_related_manager(superclass=None, **kwargs):
"""
Dynamically create a manager class that only concerns an instance (source
or target)
"""
bases = [GM2MBaseManager]
if superclass is None:
# no superclass provided, the manager is a generic target model manager
bases.insert(0, GM2MBaseTgtManager)
else:
# superclass provided, the manager is a source model manager and also
# derives from superclass
bases.insert(0, GM2MBaseSrcManager)
bases.append(superclass)
# Django's Manager constructor sets model to None, we store it under the
# class's attribute '_model' and it is retrieved in __init__
kwargs['_model'] = kwargs.pop('model')
return type(Manager)('GM2MManager', tuple(bases), kwargs)
| en | 0.82015 | # see create_gm2m_related_manager # If the GM2M relation has an intermediary model, # the add and remove methods are not available. Performs items addition # Add the new entries in the db table Adds objects to the GM2M field
:param *objs: object instances to add # Perfoms items removal from a Q object Removes objects from the GM2M field # *objs - objects to remove Sets the objs iterable as the set of related objects
(Added for compatibility with Django 1.9) # clears all and re-adds # just removes the necessary items and adds the missing ones # the manager's model is the source model # we're looking for generic target instances, which should be # converted to (content_type, primary_key) tuples # Annotating the query in order to retrieve the primary model # content type and id in the same query # content type must be the 1st element, see rel_obj_attr below # primary model retrieval function # t already contains the content type id # we use get_for_id to retrieve the cached content type # t is empty # model attribute retrieval function # we're using the reverse relation to add source model # instances # we're using the reverse relation to delete source model # instances Returns the sets of items to be added and a Q object for removal # obj.pk is not in vals and must be added # the manager's model is the through model # we're looking for through model instances # Annotating the query in order to retrieve the primary model # id in the same query # marking the queryset so that the original queryset should # be returned when evaluated the first time # primary model retrieval function # v is not a list # model attribute retrieval function # extract content type and primary key for each object # call field.add_relation for each model # Convert the obj to (content_type, primary_key) Returns the sets of items to be added and a Q object for removal # val is not in vals # extract content type and primary key for each object # call field.add_relation for each unknown model # Convert the obj to (content_type, primary_key) Dynamically create a manager class that only concerns an instance (source
or target) # no superclass provided, the manager is a generic target model manager # superclass provided, the manager is a source model manager and also # derives from superclass # Django's Manager constructor sets model to None, we store it under the # class's attribute '_model' and it is retrieved in __init__ | 2.026506 | 2 |
rastreador-de-bolso/TwitterListener.py | vitorduarte/RastreadorDeBolso | 1 | 8454 | from selenium.webdriver.chrome.options import Options
from selenium import webdriver
import logging
import coloredlogs
import os
import pathlib
import time
import twitter as tt
from utils import retry
from fetch_likes import get_user_likes, login
from conf.settings import USER_ID, USERNAME, PASSWORD
CURR_PATH = pathlib.Path(__file__).parent.absolute()
TWEETS_FOLDER = os.path.join(CURR_PATH, 'screenshots')
LIKED_FOLDER = os.path.join(CURR_PATH, 'screenshots', 'liked')
class TwitterListener():
def __init__(self, user_id=USER_ID, search_base=40):
# Configure log
coloredlogs.install()
logging.basicConfig()
self.logger = logging.getLogger('TwitterListener')
self.logger.setLevel(logging.DEBUG)
# Set chrome options
chrome_options = Options()
chrome_options.add_argument('--headless')
chrome_options.add_argument("--no-sandbox")
self.driver = webdriver.Chrome(options=chrome_options)
# Create formatter, file handler and add they to the handlers
formatter = logging.Formatter(
'%(asctime)s - %(name)s - %(levelname)s - %(message)s')
fh = logging.FileHandler('twitter.log')
fh.setFormatter(formatter)
self.logger.addHandler(fh)
self.search_base = search_base
self.user_id = user_id
self.target = tt.get_username_from_id(user_id)
self.is_logged = False,
self.has_previous_tweets = False
self.has_previous_friends = False
self.has_previous_likes = False
def _get_new_tweets(self):
if(not self.has_previous_tweets):
self.previous_tweets_ids = tt.get_ids_from_tweets(
tt.get_tweets(user_id=self.user_id, count=self.search_base))
self.has_previous_tweets = True
last_tweets = tt.get_tweets(user_id=self.user_id,
count=self.search_base)
last_tweets_ids = tt.get_ids_from_tweets(last_tweets)
diff_tweets = self._get_new_diff(
last_tweets_ids, self.previous_tweets_ids)
if diff_tweets:
new_tweets = [last_tweets[i] for i in range(len(diff_tweets))]
self.previous_tweets_ids = last_tweets_ids
new_tweets.reverse()
return new_tweets
return []
def _get_new_likes(self):
count = self.search_base/2
if(not self.is_logged):
login(self.driver, USERNAME, PASSWORD)
self.is_logged = True
if(not self.has_previous_likes):
self.previous_likes_ids = get_user_likes(
self.driver, self.target, count=count)
self.has_previous_likes = True
new_likes_ids = get_user_likes(
self.driver, self.target, count=count)
diff_tweets = self._get_new_diff(
new_likes_ids, self.previous_likes_ids)
if diff_tweets:
self.previous_likes_ids = new_likes_ids
diff_tweets.reverse()
return diff_tweets
return []
def _get_new_diff(self, curr, old):
count = len(old)
return list(set(curr[:count//2]) -
set(old))
def _get_abs_diff(self, first_list, second_list):
return list(set(first_list) - set(second_list))
def print_new_tweets(self):
try:
new_tweets = self._get_new_tweets()
for tweet in new_tweets:
tweet_id = str(tweet['id'])
tweet_url = tt.get_url(tweet)
# Get image
self.logger.info('New tweet %s', tweet_url)
img_path = os.path.join(TWEETS_FOLDER, f'{tweet_id}.png')
retry(tt.print_tweet, tweet_url,
self.driver, output_path=img_path)
self.logger.debug('Take a screenshot of tweet')
# Tweet image
tweet_msg = '<NAME> acabou de twittar'
self.logger.debug(
f'Is a retweet: {"retweeted_status" in tweet}')
if('retweeted_status' in tweet):
tweet_msg = '<NAME> acabou de retweetar'
tt.tweet_print(img_path, tweet_url, tweet_msg)
self.logger.debug('Tweet the screenshot')
except Exception as e:
self.logger.error(e)
def print_new_likes(self):
try:
new_likes = self._get_new_likes()
for t_id in new_likes:
t_url = f'https://twitter.com/{self.target}/status/{t_id}'
# Get image
self.logger.info('New like %s', t_url)
img_path = os.path.join(LIKED_FOLDER, f'{t_id}.png')
retry(tt.print_tweet, t_url, self.driver, output_path=img_path)
self.logger.debug('Take a screenshot of tweet')
# Tweet image
t_msg = '<NAME> acabou de curtir esse tweet'
tt.tweet_print(img_path, t_url, t_msg)
self.logger.debug('Tweet the screenshot')
except Exception as e:
self.logger.error(e)
def watch_friends(self):
try:
if(not self.has_previous_friends):
self.previous_friends = tt.get_friends_ids(
user_id=self.user_id)
self.has_previous_friends = True
last_friends = tt.get_friends_ids()
new_friends = self._get_abs_diff(
last_friends, self.previous_friends)
unfriends = self._get_abs_diff(self.previous_friends, last_friends)
for user_id in new_friends:
username = tt.get_username_from_id(user_id=user_id)
self.logger.info(f'New friend: @{username}')
retry(
tt.update_status,
status=(
f'<NAME> aparentemente está seguindo @{username}.'
'\n(Esse bot não consegue verificar se essa atualização foi gerada '
'por um follow ou por uma reativação de conta)'
)
)
for user_id in unfriends:
username = tt.get_username_from_id(user_id=user_id)
self.logger.info(f'Unfriend: @{username}')
retry(
tt.update_status,
status=(
f'<NAME> aparentemente deixou de seguir @{username}.'
'\n(Esse bot não consegue verificar se essa atualização foi gerada '
'por um unfollow, suspensão ou block.)'
)
)
self.previous_friends = last_friends
except Exception as e:
self.logger.error(e)
| from selenium.webdriver.chrome.options import Options
from selenium import webdriver
import logging
import coloredlogs
import os
import pathlib
import time
import twitter as tt
from utils import retry
from fetch_likes import get_user_likes, login
from conf.settings import USER_ID, USERNAME, PASSWORD
CURR_PATH = pathlib.Path(__file__).parent.absolute()
TWEETS_FOLDER = os.path.join(CURR_PATH, 'screenshots')
LIKED_FOLDER = os.path.join(CURR_PATH, 'screenshots', 'liked')
class TwitterListener():
def __init__(self, user_id=USER_ID, search_base=40):
# Configure log
coloredlogs.install()
logging.basicConfig()
self.logger = logging.getLogger('TwitterListener')
self.logger.setLevel(logging.DEBUG)
# Set chrome options
chrome_options = Options()
chrome_options.add_argument('--headless')
chrome_options.add_argument("--no-sandbox")
self.driver = webdriver.Chrome(options=chrome_options)
# Create formatter, file handler and add they to the handlers
formatter = logging.Formatter(
'%(asctime)s - %(name)s - %(levelname)s - %(message)s')
fh = logging.FileHandler('twitter.log')
fh.setFormatter(formatter)
self.logger.addHandler(fh)
self.search_base = search_base
self.user_id = user_id
self.target = tt.get_username_from_id(user_id)
self.is_logged = False,
self.has_previous_tweets = False
self.has_previous_friends = False
self.has_previous_likes = False
def _get_new_tweets(self):
if(not self.has_previous_tweets):
self.previous_tweets_ids = tt.get_ids_from_tweets(
tt.get_tweets(user_id=self.user_id, count=self.search_base))
self.has_previous_tweets = True
last_tweets = tt.get_tweets(user_id=self.user_id,
count=self.search_base)
last_tweets_ids = tt.get_ids_from_tweets(last_tweets)
diff_tweets = self._get_new_diff(
last_tweets_ids, self.previous_tweets_ids)
if diff_tweets:
new_tweets = [last_tweets[i] for i in range(len(diff_tweets))]
self.previous_tweets_ids = last_tweets_ids
new_tweets.reverse()
return new_tweets
return []
def _get_new_likes(self):
count = self.search_base/2
if(not self.is_logged):
login(self.driver, USERNAME, PASSWORD)
self.is_logged = True
if(not self.has_previous_likes):
self.previous_likes_ids = get_user_likes(
self.driver, self.target, count=count)
self.has_previous_likes = True
new_likes_ids = get_user_likes(
self.driver, self.target, count=count)
diff_tweets = self._get_new_diff(
new_likes_ids, self.previous_likes_ids)
if diff_tweets:
self.previous_likes_ids = new_likes_ids
diff_tweets.reverse()
return diff_tweets
return []
def _get_new_diff(self, curr, old):
count = len(old)
return list(set(curr[:count//2]) -
set(old))
def _get_abs_diff(self, first_list, second_list):
return list(set(first_list) - set(second_list))
def print_new_tweets(self):
try:
new_tweets = self._get_new_tweets()
for tweet in new_tweets:
tweet_id = str(tweet['id'])
tweet_url = tt.get_url(tweet)
# Get image
self.logger.info('New tweet %s', tweet_url)
img_path = os.path.join(TWEETS_FOLDER, f'{tweet_id}.png')
retry(tt.print_tweet, tweet_url,
self.driver, output_path=img_path)
self.logger.debug('Take a screenshot of tweet')
# Tweet image
tweet_msg = '<NAME> acabou de twittar'
self.logger.debug(
f'Is a retweet: {"retweeted_status" in tweet}')
if('retweeted_status' in tweet):
tweet_msg = '<NAME> acabou de retweetar'
tt.tweet_print(img_path, tweet_url, tweet_msg)
self.logger.debug('Tweet the screenshot')
except Exception as e:
self.logger.error(e)
def print_new_likes(self):
try:
new_likes = self._get_new_likes()
for t_id in new_likes:
t_url = f'https://twitter.com/{self.target}/status/{t_id}'
# Get image
self.logger.info('New like %s', t_url)
img_path = os.path.join(LIKED_FOLDER, f'{t_id}.png')
retry(tt.print_tweet, t_url, self.driver, output_path=img_path)
self.logger.debug('Take a screenshot of tweet')
# Tweet image
t_msg = '<NAME> acabou de curtir esse tweet'
tt.tweet_print(img_path, t_url, t_msg)
self.logger.debug('Tweet the screenshot')
except Exception as e:
self.logger.error(e)
def watch_friends(self):
try:
if(not self.has_previous_friends):
self.previous_friends = tt.get_friends_ids(
user_id=self.user_id)
self.has_previous_friends = True
last_friends = tt.get_friends_ids()
new_friends = self._get_abs_diff(
last_friends, self.previous_friends)
unfriends = self._get_abs_diff(self.previous_friends, last_friends)
for user_id in new_friends:
username = tt.get_username_from_id(user_id=user_id)
self.logger.info(f'New friend: @{username}')
retry(
tt.update_status,
status=(
f'<NAME> aparentemente está seguindo @{username}.'
'\n(Esse bot não consegue verificar se essa atualização foi gerada '
'por um follow ou por uma reativação de conta)'
)
)
for user_id in unfriends:
username = tt.get_username_from_id(user_id=user_id)
self.logger.info(f'Unfriend: @{username}')
retry(
tt.update_status,
status=(
f'<NAME> aparentemente deixou de seguir @{username}.'
'\n(Esse bot não consegue verificar se essa atualização foi gerada '
'por um unfollow, suspensão ou block.)'
)
)
self.previous_friends = last_friends
except Exception as e:
self.logger.error(e)
| en | 0.6865 | # Configure log # Set chrome options # Create formatter, file handler and add they to the handlers # Get image # Tweet image # Get image # Tweet image | 2.349149 | 2 |
smartexcel/tests/data/data_models/dummy.py | pierrealixt/SmartExcel | 0 | 8455 | class Dummy():
def __init__(self, data):
self.name = data['name']
self.age = data['age']
self.city = data['city']
class DummyData():
def __init__(self):
self.results = [
Dummy({
'name': 'PA',
'age': 29,
'city': 'Paris'
}),
Dummy({
'name': 'Cairo',
'age': 0,
'city': 'Muizenberg'
}),
Dummy({
'name': 'Carina',
'age': 26,
'city': 'Windhoek'
})
]
def write_name(self, instance, kwargs={}):
return instance.name
def write_age(self, instance, kwargs={}):
return instance.age
def write_city(self, instance, kwargs={}):
return instance.city
def get_age_list(self):
return [i for i in range(0, 99)]
def get_city_list(self):
return [
'Paris',
'Muizenberg',
'Windhoek',
'Saint-Dizier'
]
def write_get_repeat_func(self):
return len(self.results)
def write_get_name_func(self, instance, kwargs={}):
return self.results[kwargs['index']].name
| class Dummy():
def __init__(self, data):
self.name = data['name']
self.age = data['age']
self.city = data['city']
class DummyData():
def __init__(self):
self.results = [
Dummy({
'name': 'PA',
'age': 29,
'city': 'Paris'
}),
Dummy({
'name': 'Cairo',
'age': 0,
'city': 'Muizenberg'
}),
Dummy({
'name': 'Carina',
'age': 26,
'city': 'Windhoek'
})
]
def write_name(self, instance, kwargs={}):
return instance.name
def write_age(self, instance, kwargs={}):
return instance.age
def write_city(self, instance, kwargs={}):
return instance.city
def get_age_list(self):
return [i for i in range(0, 99)]
def get_city_list(self):
return [
'Paris',
'Muizenberg',
'Windhoek',
'Saint-Dizier'
]
def write_get_repeat_func(self):
return len(self.results)
def write_get_name_func(self, instance, kwargs={}):
return self.results[kwargs['index']].name
| none | 1 | 3.504238 | 4 |
|
ASR_TransV1/Load_sp_model.py | HariKrishna-Vydana/ASR_Transformer | 1 | 8456 | <gh_stars>1-10
#!/usr/bin/python
import sys
import os
from os.path import join, isdir
import sentencepiece as spm
#--------------------------
def Load_sp_models(PATH):
PATH_model = spm.SentencePieceProcessor()
PATH_model.Load(join(PATH))
return PATH_model
#--------------------------
| #!/usr/bin/python
import sys
import os
from os.path import join, isdir
import sentencepiece as spm
#--------------------------
def Load_sp_models(PATH):
PATH_model = spm.SentencePieceProcessor()
PATH_model.Load(join(PATH))
return PATH_model
#-------------------------- | pt | 0.093462 | #!/usr/bin/python #-------------------------- #-------------------------- | 2.122915 | 2 |
fiepipedesktoplib/gitlabserver/shell/manager.py | leith-bartrich/fiepipe_desktop | 0 | 8457 | <filename>fiepipedesktoplib/gitlabserver/shell/manager.py<gh_stars>0
import typing
from fiepipelib.gitlabserver.data.gitlab_server import GitLabServer
from fiepipelib.gitlabserver.routines.manager import GitLabServerManagerInteractiveRoutines
from fiepipedesktoplib.gitlabserver.shell.gitlab_hostname_input_ui import GitLabHostnameInputDefaultShellUI
from fiepipedesktoplib.gitlabserver.shell.gitlab_username_input_ui import GitLabUsernameInputDefaultShellUI
from fiepipedesktoplib.gitlabserver.shell.gitlab_private_token_input_ui import GitLabPrivateTokenInputDefaultShellUI
from fiepipedesktoplib.gitlabserver.shell.gitlabserver import GitLabServerShell
from fiepipedesktoplib.gitlabserver.shell.server_name_var_command import GitLabServerNameVar
from fiepipedesktoplib.locallymanagedtypes.shells.AbstractLocalManagedTypeCommand import LocalManagedTypeCommand
from fiepipedesktoplib.shells.AbstractShell import AbstractShell
from fiepipedesktoplib.shells.variables.fqdn_var_command import FQDNVarCommand
class GitLabServerManagerShell(LocalManagedTypeCommand[GitLabServer]):
def get_routines(self) -> GitLabServerManagerInteractiveRoutines:
return GitLabServerManagerInteractiveRoutines(feedback_ui=self.get_feedback_ui(),
hostname_input_default_ui=GitLabHostnameInputDefaultShellUI(self),
username_input_default_ui=GitLabUsernameInputDefaultShellUI(self),
private_token_input_default_ui=GitLabPrivateTokenInputDefaultShellUI(self))
def get_shell(self, item: GitLabServer) -> AbstractShell:
# no shell currently. We call super instead.
server_name = GitLabServerNameVar()
server_name.set_value(item.get_name())
return GitLabServerShell(server_name)
def get_plugin_names_v1(self) -> typing.List[str]:
ret = super(GitLabServerManagerShell, self).get_plugin_names_v1()
ret.append("gitlabserver.manager")
return ret
def get_prompt_text(self) -> str:
return self.prompt_separator.join(['GitLabServer', 'Manager'])
def main():
shell = GitLabServerManagerShell()
shell.cmdloop()
if __name__ == '__main__':
main()
| <filename>fiepipedesktoplib/gitlabserver/shell/manager.py<gh_stars>0
import typing
from fiepipelib.gitlabserver.data.gitlab_server import GitLabServer
from fiepipelib.gitlabserver.routines.manager import GitLabServerManagerInteractiveRoutines
from fiepipedesktoplib.gitlabserver.shell.gitlab_hostname_input_ui import GitLabHostnameInputDefaultShellUI
from fiepipedesktoplib.gitlabserver.shell.gitlab_username_input_ui import GitLabUsernameInputDefaultShellUI
from fiepipedesktoplib.gitlabserver.shell.gitlab_private_token_input_ui import GitLabPrivateTokenInputDefaultShellUI
from fiepipedesktoplib.gitlabserver.shell.gitlabserver import GitLabServerShell
from fiepipedesktoplib.gitlabserver.shell.server_name_var_command import GitLabServerNameVar
from fiepipedesktoplib.locallymanagedtypes.shells.AbstractLocalManagedTypeCommand import LocalManagedTypeCommand
from fiepipedesktoplib.shells.AbstractShell import AbstractShell
from fiepipedesktoplib.shells.variables.fqdn_var_command import FQDNVarCommand
class GitLabServerManagerShell(LocalManagedTypeCommand[GitLabServer]):
def get_routines(self) -> GitLabServerManagerInteractiveRoutines:
return GitLabServerManagerInteractiveRoutines(feedback_ui=self.get_feedback_ui(),
hostname_input_default_ui=GitLabHostnameInputDefaultShellUI(self),
username_input_default_ui=GitLabUsernameInputDefaultShellUI(self),
private_token_input_default_ui=GitLabPrivateTokenInputDefaultShellUI(self))
def get_shell(self, item: GitLabServer) -> AbstractShell:
# no shell currently. We call super instead.
server_name = GitLabServerNameVar()
server_name.set_value(item.get_name())
return GitLabServerShell(server_name)
def get_plugin_names_v1(self) -> typing.List[str]:
ret = super(GitLabServerManagerShell, self).get_plugin_names_v1()
ret.append("gitlabserver.manager")
return ret
def get_prompt_text(self) -> str:
return self.prompt_separator.join(['GitLabServer', 'Manager'])
def main():
shell = GitLabServerManagerShell()
shell.cmdloop()
if __name__ == '__main__':
main()
| en | 0.899689 | # no shell currently. We call super instead. | 1.69608 | 2 |
fairseq/models/wav2vec/eteh_model/transformer/repeat.py | gaochangfeng/fairseq | 0 | 8458 | import torch
class MultiSequential(torch.nn.Sequential):
"""Multi-input multi-output torch.nn.Sequential"""
def forward(self, *args):
for m in self:
args = m(*args)
return args
def repeat(N, fn):
"""repeat module N times
:param int N: repeat time
:param function fn: function to generate module
:return: repeated modules
:rtype: MultiSequential
"""
return MultiSequential(*[fn(n) for n in range(N)])
| import torch
class MultiSequential(torch.nn.Sequential):
"""Multi-input multi-output torch.nn.Sequential"""
def forward(self, *args):
for m in self:
args = m(*args)
return args
def repeat(N, fn):
"""repeat module N times
:param int N: repeat time
:param function fn: function to generate module
:return: repeated modules
:rtype: MultiSequential
"""
return MultiSequential(*[fn(n) for n in range(N)])
| en | 0.394626 | Multi-input multi-output torch.nn.Sequential repeat module N times :param int N: repeat time :param function fn: function to generate module :return: repeated modules :rtype: MultiSequential | 3.06099 | 3 |
torch_lib/Nets.py | troncosoae/jetson-exp | 0 | 8459 | <filename>torch_lib/Nets.py<gh_stars>0
import torch
import torch.nn as nn
import torch.nn.functional as F
class MediumNet(nn.Module):
def __init__(self):
super().__init__()
self.conv1 = nn.Conv2d(
3, out_channels=6, kernel_size=5, padding=0)
self.pool1 = nn.MaxPool2d(kernel_size=2, stride=2)
self.conv2 = nn.Conv2d(
6, out_channels=16, kernel_size=5, padding=0)
self.pool2 = nn.MaxPool2d(kernel_size=2, stride=2)
self.fc1 = nn.Linear(16*5*5, 120)
self.fc2 = nn.Linear(120, 84)
self.fc3 = nn.Linear(84, 10)
def forward(self, x):
x = self.pool1(F.relu(self.conv1(x)))
x = self.pool2(F.relu(self.conv2(x)))
x = torch.flatten(x, 1) # flatten all dimensions except batch
x = F.relu(self.fc1(x))
x = F.relu(self.fc2(x))
x = self.fc3(x)
return x
| <filename>torch_lib/Nets.py<gh_stars>0
import torch
import torch.nn as nn
import torch.nn.functional as F
class MediumNet(nn.Module):
def __init__(self):
super().__init__()
self.conv1 = nn.Conv2d(
3, out_channels=6, kernel_size=5, padding=0)
self.pool1 = nn.MaxPool2d(kernel_size=2, stride=2)
self.conv2 = nn.Conv2d(
6, out_channels=16, kernel_size=5, padding=0)
self.pool2 = nn.MaxPool2d(kernel_size=2, stride=2)
self.fc1 = nn.Linear(16*5*5, 120)
self.fc2 = nn.Linear(120, 84)
self.fc3 = nn.Linear(84, 10)
def forward(self, x):
x = self.pool1(F.relu(self.conv1(x)))
x = self.pool2(F.relu(self.conv2(x)))
x = torch.flatten(x, 1) # flatten all dimensions except batch
x = F.relu(self.fc1(x))
x = F.relu(self.fc2(x))
x = self.fc3(x)
return x
| en | 0.604152 | # flatten all dimensions except batch | 2.672536 | 3 |
test123.py | umousesonic/zinc | 0 | 8460 | from runner import runner
if __name__ == '__main__':
r = runner()
p = 'public class main{public static void main (String[] args){' \
'public String StudentAnswer(String myInput){' \
'return "myOutput"; ' \
'}System.out.println("hello world!");}}'
print (r.sendCode(p, '')) | from runner import runner
if __name__ == '__main__':
r = runner()
p = 'public class main{public static void main (String[] args){' \
'public String StudentAnswer(String myInput){' \
'return "myOutput"; ' \
'}System.out.println("hello world!");}}'
print (r.sendCode(p, '')) | none | 1 | 2.732657 | 3 |
|
beancount_bot/bot.py | dumbPy/beancount_bot | 0 | 8461 | <reponame>dumbPy/beancount_bot
import traceback
import telebot
from telebot import apihelper
from telebot.types import InlineKeyboardMarkup, InlineKeyboardButton, MessageEntity, Message, CallbackQuery
from beancount_bot import transaction
from beancount_bot.config import get_config, load_config
from beancount_bot.dispatcher import Dispatcher
from beancount_bot.i18n import _
from beancount_bot.session import get_session, SESS_AUTH, get_session_for, set_session
from beancount_bot.task import load_task, get_task
from beancount_bot.transaction import get_manager
from beancount_bot.util import logger
apihelper.ENABLE_MIDDLEWARE = True
bot = telebot.TeleBot(token=None, parse_mode=None)
@bot.middleware_handler(update_types=['message'])
def session_middleware(bot_instance, message):
"""
Session middleware
:param bot_instance:
:param message:
:return:
"""
bot_instance.session = get_session_for(message.from_user.id)
#######
# Authentication #
#######
def check_auth() -> bool:
"""
Check if you log in
:return:
"""
return SESS_AUTH in bot.session and bot.session[SESS_AUTH]
@bot.message_handler(commands=['start'])
def start_handler(message: Message):
"""
First chat time authentication
:param message:
:return:
"""
auth = get_session(message.from_user.id, SESS_AUTH, False)
if auth:
bot.reply_to(message, _("Have been authenticated!"))
return
# 要求鉴权
bot.reply_to(message, _("Welcome to the accounting robot!Please enter the authentication token:"))
def auth_token_handler(message: Message):
"""
Login token callback
:param message:
:return:
"""
if check_auth():
return
# Unconfirmation is considered an authentication token
auth_token = get_config('bot.auth_token')
if auth_token == message.text:
set_session(message.from_user.id, SESS_AUTH, True)
bot.reply_to(message, _("Authentic success!"))
else:
bot.reply_to(message, _("Authentication token error!"))
#######
# instruction #
#######
@bot.message_handler(commands=['reload'])
def reload_handler(message):
"""
Overload configuration instruction
:param message:
:return:
"""
if not check_auth():
bot.reply_to(message, _("Please conduct authentication first!"))
return
load_config()
load_task()
bot.reply_to(message, _("Successful overload configuration!"))
@bot.message_handler(commands=['help'])
def help_handler(message):
"""
Help instruction
:param message:
:return:
"""
cmd = message.text
dispatchers = get_manager().dispatchers
if cmd == '/help':
# Create a message button
markup = InlineKeyboardMarkup()
for ind, d in zip(range(len(dispatchers)), dispatchers):
help_btn = _("help:{name}").format(name=d.get_name())
markup.add(InlineKeyboardButton(help_btn, callback_data=f'help:{ind}'))
# 帮助信息
command_usage = [
_("/start - Authentication"),
_("/help - Using help"),
_("/reload - Reload the configuration file"),
_("/task - View, run the task"),
]
help_text = \
_("Account bill Bot\n\nAvailable instruction list:\n{command}\n\nTrade statement syntax help, select the corresponding module,Use /help [Module name] Check.").format(
command='\n'.join(command_usage))
bot.reply_to(message, help_text, reply_markup=markup)
else:
# Display detailed help
name: str = cmd[6:]
flag_found = False
for d in dispatchers:
if name.lower() == d.get_name().lower():
show_usage_for(message, d)
flag_found = True
if not flag_found:
bot.reply_to(message, _("The corresponding name of the transaction statement processor does not exist!"))
def show_usage_for(message: Message, d: Dispatcher):
"""
Show the method of use of a specific processor
:param message:
:param d:
:return:
"""
usage = _("help:{name}\n\n{usage}").format(name=d.get_name(), usage=d.get_usage())
bot.reply_to(message, usage)
@bot.callback_query_handler(func=lambda call: call.data[:4] == 'help')
def callback_help(call: CallbackQuery):
"""
Help statement detailed help
:param call:
:return:
"""
try:
d_id = int(call.data[5:])
dispatchers = get_manager().dispatchers
show_usage_for(call.message, dispatchers[d_id])
except Exception as e:
logger.error(f'{call.id}:Unknown error!', e)
logger.error(traceback.format_exc())
bot.answer_callback_query(call.id, _("Unknown error!\n"+traceback.format_exc()))
@bot.message_handler(commands=['task'])
def task_handler(message):
"""
Task instruction
:param message:
:return:
"""
if not check_auth():
bot.reply_to(message, _("Please conduct authentication first!"))
return
cmd = message.text
tasks = get_task()
if cmd == '/task':
# Show all tasks
all_tasks = ', '.join(tasks.keys())
bot.reply_to(message,
_("Current registration task:{all_tasks}\n"
"able to pass /task [Task Name] Active trigger").format(all_tasks=all_tasks))
else:
# Run task
dest = cmd[6:]
if dest not in tasks:
bot.reply_to(message, _("Task does not exist!"))
return
task = tasks[dest]
task.trigger(bot)
#######
# trade #
#######
@bot.message_handler(func=lambda m: True)
def transaction_query_handler(message: Message):
"""
Trading statement processing
:param message:
:return:
"""
if not check_auth():
auth_token_handler(message)
return
# Treated
manager = get_manager()
try:
tx_uuid, tx = manager.create_from_str(message.text)
# Create a message button
markup = InlineKeyboardMarkup()
markup.add(InlineKeyboardButton(_("Revoke trading"), callback_data=f'withdraw:{tx_uuid}'))
# 回复
bot.reply_to(message, transaction.stringfy(tx), reply_markup=markup)
except ValueError as e:
logger.info(f'{message.from_user.id}:Unable to add transactions', e)
bot.reply_to(message, e.args[0])
except Exception as e:
logger.error(f'{message.from_user.id}:An unknown mistake!Adding a transaction failed.', e)
bot.reply_to(message, _("An unknown mistake!Adding a transaction failed.\n"+traceback.format_exc()))
@bot.callback_query_handler(func=lambda call: call.data[:8] == 'withdraw')
def callback_withdraw(call: CallbackQuery):
"""
Transaction withdrawal callback
:param call:
:return:
"""
auth = get_session(call.from_user.id, SESS_AUTH, False)
if not auth:
bot.answer_callback_query(call.id, _("Please conduct authentication first!"))
return
tx_uuid = call.data[9:]
manager = get_manager()
try:
manager.remove(tx_uuid)
# Modify the original message reply
message = _("Transaction has been withdrawn")
code_format = MessageEntity('code', 0, len(message))
bot.edit_message_text(message,
chat_id=call.message.chat.id,
message_id=call.message.message_id,
entities=[code_format])
except ValueError as e:
logger.info(f'{call.id}:Unable to create trading', e)
bot.answer_callback_query(call.id, e.args[0])
except Exception as e:
logger.error(f'{call.id}:An unknown mistake!Withdrawal of the transaction failed.', e)
bot.answer_callback_query(call.id, _("An unknown mistake!Withdrawal of the transaction failed."))
def serving():
"""
start up Bot
:return:
"""
# set up Token
token = get_config('bot.token')
bot.token = token
# Set a proxy
proxy = get_config('bot.proxy')
if proxy is not None:
apihelper.proxy = {'https': proxy}
# start up
bot.infinity_polling()
| import traceback
import telebot
from telebot import apihelper
from telebot.types import InlineKeyboardMarkup, InlineKeyboardButton, MessageEntity, Message, CallbackQuery
from beancount_bot import transaction
from beancount_bot.config import get_config, load_config
from beancount_bot.dispatcher import Dispatcher
from beancount_bot.i18n import _
from beancount_bot.session import get_session, SESS_AUTH, get_session_for, set_session
from beancount_bot.task import load_task, get_task
from beancount_bot.transaction import get_manager
from beancount_bot.util import logger
apihelper.ENABLE_MIDDLEWARE = True
bot = telebot.TeleBot(token=None, parse_mode=None)
@bot.middleware_handler(update_types=['message'])
def session_middleware(bot_instance, message):
"""
Session middleware
:param bot_instance:
:param message:
:return:
"""
bot_instance.session = get_session_for(message.from_user.id)
#######
# Authentication #
#######
def check_auth() -> bool:
"""
Check if you log in
:return:
"""
return SESS_AUTH in bot.session and bot.session[SESS_AUTH]
@bot.message_handler(commands=['start'])
def start_handler(message: Message):
"""
First chat time authentication
:param message:
:return:
"""
auth = get_session(message.from_user.id, SESS_AUTH, False)
if auth:
bot.reply_to(message, _("Have been authenticated!"))
return
# 要求鉴权
bot.reply_to(message, _("Welcome to the accounting robot!Please enter the authentication token:"))
def auth_token_handler(message: Message):
"""
Login token callback
:param message:
:return:
"""
if check_auth():
return
# Unconfirmation is considered an authentication token
auth_token = get_config('bot.auth_token')
if auth_token == message.text:
set_session(message.from_user.id, SESS_AUTH, True)
bot.reply_to(message, _("Authentic success!"))
else:
bot.reply_to(message, _("Authentication token error!"))
#######
# instruction #
#######
@bot.message_handler(commands=['reload'])
def reload_handler(message):
"""
Overload configuration instruction
:param message:
:return:
"""
if not check_auth():
bot.reply_to(message, _("Please conduct authentication first!"))
return
load_config()
load_task()
bot.reply_to(message, _("Successful overload configuration!"))
@bot.message_handler(commands=['help'])
def help_handler(message):
"""
Help instruction
:param message:
:return:
"""
cmd = message.text
dispatchers = get_manager().dispatchers
if cmd == '/help':
# Create a message button
markup = InlineKeyboardMarkup()
for ind, d in zip(range(len(dispatchers)), dispatchers):
help_btn = _("help:{name}").format(name=d.get_name())
markup.add(InlineKeyboardButton(help_btn, callback_data=f'help:{ind}'))
# 帮助信息
command_usage = [
_("/start - Authentication"),
_("/help - Using help"),
_("/reload - Reload the configuration file"),
_("/task - View, run the task"),
]
help_text = \
_("Account bill Bot\n\nAvailable instruction list:\n{command}\n\nTrade statement syntax help, select the corresponding module,Use /help [Module name] Check.").format(
command='\n'.join(command_usage))
bot.reply_to(message, help_text, reply_markup=markup)
else:
# Display detailed help
name: str = cmd[6:]
flag_found = False
for d in dispatchers:
if name.lower() == d.get_name().lower():
show_usage_for(message, d)
flag_found = True
if not flag_found:
bot.reply_to(message, _("The corresponding name of the transaction statement processor does not exist!"))
def show_usage_for(message: Message, d: Dispatcher):
"""
Show the method of use of a specific processor
:param message:
:param d:
:return:
"""
usage = _("help:{name}\n\n{usage}").format(name=d.get_name(), usage=d.get_usage())
bot.reply_to(message, usage)
@bot.callback_query_handler(func=lambda call: call.data[:4] == 'help')
def callback_help(call: CallbackQuery):
"""
Help statement detailed help
:param call:
:return:
"""
try:
d_id = int(call.data[5:])
dispatchers = get_manager().dispatchers
show_usage_for(call.message, dispatchers[d_id])
except Exception as e:
logger.error(f'{call.id}:Unknown error!', e)
logger.error(traceback.format_exc())
bot.answer_callback_query(call.id, _("Unknown error!\n"+traceback.format_exc()))
@bot.message_handler(commands=['task'])
def task_handler(message):
"""
Task instruction
:param message:
:return:
"""
if not check_auth():
bot.reply_to(message, _("Please conduct authentication first!"))
return
cmd = message.text
tasks = get_task()
if cmd == '/task':
# Show all tasks
all_tasks = ', '.join(tasks.keys())
bot.reply_to(message,
_("Current registration task:{all_tasks}\n"
"able to pass /task [Task Name] Active trigger").format(all_tasks=all_tasks))
else:
# Run task
dest = cmd[6:]
if dest not in tasks:
bot.reply_to(message, _("Task does not exist!"))
return
task = tasks[dest]
task.trigger(bot)
#######
# trade #
#######
@bot.message_handler(func=lambda m: True)
def transaction_query_handler(message: Message):
"""
Trading statement processing
:param message:
:return:
"""
if not check_auth():
auth_token_handler(message)
return
# Treated
manager = get_manager()
try:
tx_uuid, tx = manager.create_from_str(message.text)
# Create a message button
markup = InlineKeyboardMarkup()
markup.add(InlineKeyboardButton(_("Revoke trading"), callback_data=f'withdraw:{tx_uuid}'))
# 回复
bot.reply_to(message, transaction.stringfy(tx), reply_markup=markup)
except ValueError as e:
logger.info(f'{message.from_user.id}:Unable to add transactions', e)
bot.reply_to(message, e.args[0])
except Exception as e:
logger.error(f'{message.from_user.id}:An unknown mistake!Adding a transaction failed.', e)
bot.reply_to(message, _("An unknown mistake!Adding a transaction failed.\n"+traceback.format_exc()))
@bot.callback_query_handler(func=lambda call: call.data[:8] == 'withdraw')
def callback_withdraw(call: CallbackQuery):
"""
Transaction withdrawal callback
:param call:
:return:
"""
auth = get_session(call.from_user.id, SESS_AUTH, False)
if not auth:
bot.answer_callback_query(call.id, _("Please conduct authentication first!"))
return
tx_uuid = call.data[9:]
manager = get_manager()
try:
manager.remove(tx_uuid)
# Modify the original message reply
message = _("Transaction has been withdrawn")
code_format = MessageEntity('code', 0, len(message))
bot.edit_message_text(message,
chat_id=call.message.chat.id,
message_id=call.message.message_id,
entities=[code_format])
except ValueError as e:
logger.info(f'{call.id}:Unable to create trading', e)
bot.answer_callback_query(call.id, e.args[0])
except Exception as e:
logger.error(f'{call.id}:An unknown mistake!Withdrawal of the transaction failed.', e)
bot.answer_callback_query(call.id, _("An unknown mistake!Withdrawal of the transaction failed."))
def serving():
"""
start up Bot
:return:
"""
# set up Token
token = get_config('bot.token')
bot.token = token
# Set a proxy
proxy = get_config('bot.proxy')
if proxy is not None:
apihelper.proxy = {'https': proxy}
# start up
bot.infinity_polling() | en | 0.489824 | Session middleware :param bot_instance: :param message: :return: ####### # Authentication # ####### Check if you log in :return: First chat time authentication :param message: :return: # 要求鉴权 Login token callback :param message: :return: # Unconfirmation is considered an authentication token ####### # instruction # ####### Overload configuration instruction :param message: :return: Help instruction :param message: :return: # Create a message button # 帮助信息 # Display detailed help Show the method of use of a specific processor :param message: :param d: :return: Help statement detailed help :param call: :return: Task instruction :param message: :return: # Show all tasks # Run task ####### # trade # ####### Trading statement processing :param message: :return: # Treated # Create a message button # 回复 Transaction withdrawal callback :param call: :return: # Modify the original message reply start up Bot :return: # set up Token # Set a proxy # start up | 2.055054 | 2 |
test/unit/metrics/test_group_sklearn_wrappers.py | GeGao2014/fairlearn | 2 | 8462 | # Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
import pytest
import numpy as np
import sklearn.metrics as skm
import fairlearn.metrics as metrics
# ======================================================
a = "a"
b = "b"
c = "c"
Y_true = [0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1]
Y_pred = [1, 0, 1, 0, 1, 0, 0, 1, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1]
Y_true_ternary = [a, b, c, c, c, b, b, b, c, c, a, a, a, a, a, b, c, c]
Y_pred_ternary = [b, c, c, c, b, b, b, b, b, c, a, a, c, a, a, b, c, c]
groups = [3, 4, 1, 0, 0, 0, 3, 2, 0, 1, 2, 3, 4, 0, 1, 2, 3, 4]
weight = [1, 2, 3, 1, 2, 3, 4, 2, 3, 3, 2, 1, 2, 3, 1, 2, 3, 4]
group2 = [0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1]
# =======================================================
# Define as a dictionary so that the actual name can be seen
# when pytest builds the tests
supported_metrics_weighted = [(skm.accuracy_score, metrics.group_accuracy_score),
(skm.confusion_matrix, metrics.group_confusion_matrix),
(skm.zero_one_loss, metrics.group_zero_one_loss)]
# The following only work with binary data when called with their default arguments
supported_metrics_weighted_binary = [(skm.precision_score, metrics.group_precision_score),
(skm.recall_score, metrics.group_recall_score),
(skm.roc_auc_score, metrics.group_roc_auc_score),
(skm.mean_squared_error, metrics.group_mean_squared_error),
(skm.r2_score, metrics.group_r2_score)]
supported_metrics_weighted_binary = supported_metrics_weighted_binary + supported_metrics_weighted
metrics_no_sample_weights = [(skm.max_error, metrics.group_max_error),
(skm.mean_absolute_error, metrics.group_mean_absolute_error),
(skm.mean_squared_log_error, metrics.group_mean_squared_log_error),
(skm.median_absolute_error, metrics.group_median_absolute_error)]
supported_metrics_unweighted = metrics_no_sample_weights + supported_metrics_weighted_binary
# =======================================================
@pytest.mark.parametrize("func_tuple", supported_metrics_unweighted)
def test_metric_unweighted(func_tuple):
metric_func = func_tuple[0]
group_metric_func = func_tuple[1]
result = group_metric_func(Y_true, Y_pred, groups)
# We don't really care about the numbers (sklearn is responsible)
# We just want to make sure we got a result
assert len(result.by_group) == 5
expected_overall = metric_func(Y_true, Y_pred)
if isinstance(expected_overall, np.ndarray):
assert np.array_equal(expected_overall, result.overall)
else:
assert expected_overall == result.overall
@pytest.mark.parametrize("func_tuple", supported_metrics_weighted_binary)
def test_metric_weighted(func_tuple):
metric_func = func_tuple[0]
group_metric_func = func_tuple[1]
result = group_metric_func(Y_true, Y_pred, groups, sample_weight=weight)
assert len(result.by_group) == 5
expected_overall = metric_func(Y_true, Y_pred, sample_weight=weight)
if isinstance(expected_overall, np.ndarray):
assert np.array_equal(expected_overall, result.overall)
else:
assert expected_overall == result.overall
@pytest.mark.parametrize("func_tuple", supported_metrics_weighted)
def test_metric_weighted_ternary(func_tuple):
metric_func = func_tuple[0]
group_metric_func = func_tuple[1]
result = group_metric_func(Y_true_ternary, Y_pred_ternary, groups, sample_weight=weight)
assert len(result.by_group) == 5
expected_overall = metric_func(Y_true_ternary, Y_pred_ternary, sample_weight=weight)
if isinstance(expected_overall, np.ndarray):
assert np.array_equal(expected_overall, result.overall)
else:
assert expected_overall == result.overall
# ======================================================================================
def test_group_accuracy_score_unnormalized():
result = metrics.group_accuracy_score(Y_true, Y_pred, groups, normalize=False)
expected_overall = skm.accuracy_score(Y_true, Y_pred, False)
assert result.overall == expected_overall
# ======================================================================================
def test_group_confusion_matrix_labels():
labels = [0, 4]
result = metrics.group_confusion_matrix(Y_true, Y_pred, groups, labels=labels)
expected_overall = skm.confusion_matrix(Y_true, Y_pred, labels=labels)
assert np.array_equal(result.overall, expected_overall)
# ======================================================================================
def test_group_precision_score_ternary():
result = metrics.group_precision_score(Y_true_ternary, Y_pred_ternary, group2, average=None)
expected_overall = skm.precision_score(Y_true_ternary, Y_pred_ternary, average=None)
assert np.array_equal(result.overall, expected_overall)
def test_group_precision_score_pos_label():
result = metrics.group_precision_score(Y_true, Y_pred, groups, pos_label=0)
expected_overall = skm.precision_score(Y_true, Y_pred, pos_label=0)
assert np.array_equal(result.overall, expected_overall)
# ======================================================================================
def test_group_recall_score_ternary():
result = metrics.group_recall_score(Y_true_ternary, Y_pred_ternary, group2, average=None)
expected_overall = skm.recall_score(Y_true_ternary, Y_pred_ternary, average=None)
assert np.array_equal(result.overall, expected_overall)
def test_group_recall_score_pos_label():
result = metrics.group_recall_score(Y_true, Y_pred, groups, pos_label=0)
expected_overall = skm.recall_score(Y_true, Y_pred, pos_label=0)
assert np.array_equal(result.overall, expected_overall)
# ======================================================================================
def test_group_roc_auc_score_average():
result = metrics.group_roc_auc_score(Y_true, Y_pred, groups, average='samples')
expected_overall = skm.roc_auc_score(Y_true, Y_pred, average='samples')
assert expected_overall == result.overall
def test_group_roc_auc_score_max_fpr():
result = metrics.group_roc_auc_score(Y_true, Y_pred, groups, max_fpr=0.5)
expected_overall = skm.roc_auc_score(Y_true, Y_pred, max_fpr=0.5)
assert expected_overall == result.overall
# ======================================================================================
def test_group_zero_one_loss_unnormalized():
result = metrics.group_zero_one_loss(Y_true, Y_pred, groups, normalize=False)
expected_overall = skm.zero_one_loss(Y_true, Y_pred, False)
assert result.overall == expected_overall
# =============================================================================================
def test_group_mean_squared_error_multioutput_single_ndarray():
y_t = np.random.rand(len(groups), 2)
y_p = np.random.rand(len(groups), 2)
result = metrics.group_mean_squared_error(y_t, y_p, groups, multioutput='raw_values')
expected_overall = skm.mean_squared_error(y_t, y_p, multioutput='raw_values')
assert np.array_equal(result.overall, expected_overall)
# =============================================================================================
def test_group_r2_score_multioutput():
y_t = np.random.rand(len(groups), 2)
y_p = np.random.rand(len(groups), 2)
result = metrics.group_r2_score(y_t, y_p, groups, multioutput='raw_values')
expected_overall = skm.r2_score(y_t, y_p, multioutput='raw_values')
assert np.array_equal(result.overall, expected_overall)
for target_group in np.unique(groups):
mask = np.asarray(groups) == target_group
expected = skm.r2_score(y_t[mask], y_p[mask], multioutput='raw_values')
assert np.array_equal(result.by_group[target_group], expected)
# =============================================================================================
def test_group_mean_squared_error_multioutput_list_ndarray():
y_t = [np.random.rand(2) for x in groups]
y_p = [np.random.rand(2) for x in groups]
result = metrics.group_mean_squared_error(y_t, y_p, groups, multioutput='raw_values')
expected_overall = skm.mean_squared_error(y_t, y_p, multioutput='raw_values')
assert np.array_equal(result.overall, expected_overall)
for target_group in np.unique(groups):
y_true = []
y_pred = []
for i in range(len(groups)):
if groups[i] == target_group:
y_true.append(y_t[i])
y_pred.append(y_p[i])
expected = skm.mean_squared_error(y_true, y_pred, multioutput='raw_values')
assert np.array_equal(result.by_group[target_group], expected)
| # Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
import pytest
import numpy as np
import sklearn.metrics as skm
import fairlearn.metrics as metrics
# ======================================================
a = "a"
b = "b"
c = "c"
Y_true = [0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1]
Y_pred = [1, 0, 1, 0, 1, 0, 0, 1, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1]
Y_true_ternary = [a, b, c, c, c, b, b, b, c, c, a, a, a, a, a, b, c, c]
Y_pred_ternary = [b, c, c, c, b, b, b, b, b, c, a, a, c, a, a, b, c, c]
groups = [3, 4, 1, 0, 0, 0, 3, 2, 0, 1, 2, 3, 4, 0, 1, 2, 3, 4]
weight = [1, 2, 3, 1, 2, 3, 4, 2, 3, 3, 2, 1, 2, 3, 1, 2, 3, 4]
group2 = [0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1]
# =======================================================
# Define as a dictionary so that the actual name can be seen
# when pytest builds the tests
supported_metrics_weighted = [(skm.accuracy_score, metrics.group_accuracy_score),
(skm.confusion_matrix, metrics.group_confusion_matrix),
(skm.zero_one_loss, metrics.group_zero_one_loss)]
# The following only work with binary data when called with their default arguments
supported_metrics_weighted_binary = [(skm.precision_score, metrics.group_precision_score),
(skm.recall_score, metrics.group_recall_score),
(skm.roc_auc_score, metrics.group_roc_auc_score),
(skm.mean_squared_error, metrics.group_mean_squared_error),
(skm.r2_score, metrics.group_r2_score)]
supported_metrics_weighted_binary = supported_metrics_weighted_binary + supported_metrics_weighted
metrics_no_sample_weights = [(skm.max_error, metrics.group_max_error),
(skm.mean_absolute_error, metrics.group_mean_absolute_error),
(skm.mean_squared_log_error, metrics.group_mean_squared_log_error),
(skm.median_absolute_error, metrics.group_median_absolute_error)]
supported_metrics_unweighted = metrics_no_sample_weights + supported_metrics_weighted_binary
# =======================================================
@pytest.mark.parametrize("func_tuple", supported_metrics_unweighted)
def test_metric_unweighted(func_tuple):
metric_func = func_tuple[0]
group_metric_func = func_tuple[1]
result = group_metric_func(Y_true, Y_pred, groups)
# We don't really care about the numbers (sklearn is responsible)
# We just want to make sure we got a result
assert len(result.by_group) == 5
expected_overall = metric_func(Y_true, Y_pred)
if isinstance(expected_overall, np.ndarray):
assert np.array_equal(expected_overall, result.overall)
else:
assert expected_overall == result.overall
@pytest.mark.parametrize("func_tuple", supported_metrics_weighted_binary)
def test_metric_weighted(func_tuple):
metric_func = func_tuple[0]
group_metric_func = func_tuple[1]
result = group_metric_func(Y_true, Y_pred, groups, sample_weight=weight)
assert len(result.by_group) == 5
expected_overall = metric_func(Y_true, Y_pred, sample_weight=weight)
if isinstance(expected_overall, np.ndarray):
assert np.array_equal(expected_overall, result.overall)
else:
assert expected_overall == result.overall
@pytest.mark.parametrize("func_tuple", supported_metrics_weighted)
def test_metric_weighted_ternary(func_tuple):
metric_func = func_tuple[0]
group_metric_func = func_tuple[1]
result = group_metric_func(Y_true_ternary, Y_pred_ternary, groups, sample_weight=weight)
assert len(result.by_group) == 5
expected_overall = metric_func(Y_true_ternary, Y_pred_ternary, sample_weight=weight)
if isinstance(expected_overall, np.ndarray):
assert np.array_equal(expected_overall, result.overall)
else:
assert expected_overall == result.overall
# ======================================================================================
def test_group_accuracy_score_unnormalized():
result = metrics.group_accuracy_score(Y_true, Y_pred, groups, normalize=False)
expected_overall = skm.accuracy_score(Y_true, Y_pred, False)
assert result.overall == expected_overall
# ======================================================================================
def test_group_confusion_matrix_labels():
labels = [0, 4]
result = metrics.group_confusion_matrix(Y_true, Y_pred, groups, labels=labels)
expected_overall = skm.confusion_matrix(Y_true, Y_pred, labels=labels)
assert np.array_equal(result.overall, expected_overall)
# ======================================================================================
def test_group_precision_score_ternary():
result = metrics.group_precision_score(Y_true_ternary, Y_pred_ternary, group2, average=None)
expected_overall = skm.precision_score(Y_true_ternary, Y_pred_ternary, average=None)
assert np.array_equal(result.overall, expected_overall)
def test_group_precision_score_pos_label():
result = metrics.group_precision_score(Y_true, Y_pred, groups, pos_label=0)
expected_overall = skm.precision_score(Y_true, Y_pred, pos_label=0)
assert np.array_equal(result.overall, expected_overall)
# ======================================================================================
def test_group_recall_score_ternary():
result = metrics.group_recall_score(Y_true_ternary, Y_pred_ternary, group2, average=None)
expected_overall = skm.recall_score(Y_true_ternary, Y_pred_ternary, average=None)
assert np.array_equal(result.overall, expected_overall)
def test_group_recall_score_pos_label():
result = metrics.group_recall_score(Y_true, Y_pred, groups, pos_label=0)
expected_overall = skm.recall_score(Y_true, Y_pred, pos_label=0)
assert np.array_equal(result.overall, expected_overall)
# ======================================================================================
def test_group_roc_auc_score_average():
result = metrics.group_roc_auc_score(Y_true, Y_pred, groups, average='samples')
expected_overall = skm.roc_auc_score(Y_true, Y_pred, average='samples')
assert expected_overall == result.overall
def test_group_roc_auc_score_max_fpr():
result = metrics.group_roc_auc_score(Y_true, Y_pred, groups, max_fpr=0.5)
expected_overall = skm.roc_auc_score(Y_true, Y_pred, max_fpr=0.5)
assert expected_overall == result.overall
# ======================================================================================
def test_group_zero_one_loss_unnormalized():
result = metrics.group_zero_one_loss(Y_true, Y_pred, groups, normalize=False)
expected_overall = skm.zero_one_loss(Y_true, Y_pred, False)
assert result.overall == expected_overall
# =============================================================================================
def test_group_mean_squared_error_multioutput_single_ndarray():
y_t = np.random.rand(len(groups), 2)
y_p = np.random.rand(len(groups), 2)
result = metrics.group_mean_squared_error(y_t, y_p, groups, multioutput='raw_values')
expected_overall = skm.mean_squared_error(y_t, y_p, multioutput='raw_values')
assert np.array_equal(result.overall, expected_overall)
# =============================================================================================
def test_group_r2_score_multioutput():
y_t = np.random.rand(len(groups), 2)
y_p = np.random.rand(len(groups), 2)
result = metrics.group_r2_score(y_t, y_p, groups, multioutput='raw_values')
expected_overall = skm.r2_score(y_t, y_p, multioutput='raw_values')
assert np.array_equal(result.overall, expected_overall)
for target_group in np.unique(groups):
mask = np.asarray(groups) == target_group
expected = skm.r2_score(y_t[mask], y_p[mask], multioutput='raw_values')
assert np.array_equal(result.by_group[target_group], expected)
# =============================================================================================
def test_group_mean_squared_error_multioutput_list_ndarray():
y_t = [np.random.rand(2) for x in groups]
y_p = [np.random.rand(2) for x in groups]
result = metrics.group_mean_squared_error(y_t, y_p, groups, multioutput='raw_values')
expected_overall = skm.mean_squared_error(y_t, y_p, multioutput='raw_values')
assert np.array_equal(result.overall, expected_overall)
for target_group in np.unique(groups):
y_true = []
y_pred = []
for i in range(len(groups)):
if groups[i] == target_group:
y_true.append(y_t[i])
y_pred.append(y_p[i])
expected = skm.mean_squared_error(y_true, y_pred, multioutput='raw_values')
assert np.array_equal(result.by_group[target_group], expected)
| en | 0.54425 | # Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. # ====================================================== # ======================================================= # Define as a dictionary so that the actual name can be seen # when pytest builds the tests # The following only work with binary data when called with their default arguments # ======================================================= # We don't really care about the numbers (sklearn is responsible) # We just want to make sure we got a result # ====================================================================================== # ====================================================================================== # ====================================================================================== # ====================================================================================== # ====================================================================================== # ====================================================================================== # ============================================================================================= # ============================================================================================= # ============================================================================================= | 2.319763 | 2 |
deeplearning/tf_util.py | cbschaff/nlimb | 12 | 8463 | """
Adapted from OpenAI Baselines.
"""
import numpy as np
import tensorflow as tf # pylint: ignore-module
import random
import copy
import os
import functools
import collections
import multiprocessing
def switch(condition, then_expression, else_expression):
"""Switches between two operations depending on a scalar value (int or bool).
Note that both `then_expression` and `else_expression`
should be symbolic tensors of the *same shape*.
# Arguments
condition: scalar tensor.
then_expression: TensorFlow operation.
else_expression: TensorFlow operation.
"""
x_shape = copy.copy(then_expression.get_shape())
x = tf.cond(tf.cast(condition, 'bool'),
lambda: then_expression,
lambda: else_expression)
x.set_shape(x_shape)
return x
# ================================================================
# Extras
# ================================================================
def lrelu(x, leak=0.2):
f1 = 0.5 * (1 + leak)
f2 = 0.5 * (1 - leak)
return f1 * x + f2 * abs(x)
# ================================================================
# Mathematical utils
# ================================================================
def huber_loss(x, delta=1.0):
"""Reference: https://en.wikipedia.org/wiki/Huber_loss"""
return tf.where(
tf.abs(x) < delta,
tf.square(x) * 0.5,
delta * (tf.abs(x) - 0.5 * delta)
)
# ================================================================
# Global session
# ================================================================
def make_session(num_cpu=None, make_default=False):
"""Returns a session that will use <num_cpu> CPU's only"""
if num_cpu is None:
num_cpu = int(os.getenv('RCALL_NUM_CPU', multiprocessing.cpu_count()))
tf_config = tf.ConfigProto(
inter_op_parallelism_threads=num_cpu,
intra_op_parallelism_threads=num_cpu)
tf_config.gpu_options.allocator_type = 'BFC'
if make_default:
return tf.InteractiveSession(config=tf_config)
else:
return tf.Session(config=tf_config)
def single_threaded_session():
"""Returns a session which will only use a single CPU"""
return make_session(num_cpu=1)
def in_session(f):
@functools.wraps(f)
def newfunc(*args, **kwargs):
with tf.Session():
f(*args, **kwargs)
return newfunc
ALREADY_INITIALIZED = set()
def initialize():
"""Initialize all the uninitialized variables in the global scope."""
new_variables = set(tf.global_variables()) - ALREADY_INITIALIZED
tf.get_default_session().run(tf.variables_initializer(new_variables))
ALREADY_INITIALIZED.update(new_variables)
# ================================================================
# Saving variables and setting up experiment directories
# ================================================================
def load_state(fname):
saver = tf.train.Saver()
saver.restore(tf.get_default_session(), fname)
def save_state(fname):
os.makedirs(os.path.dirname(fname), exist_ok=True)
saver = tf.train.Saver()
saver.save(tf.get_default_session(), fname)
def load(fname):
import cloudpickle
with open(fname, 'rb') as f:
return cloudpickle.load(f)
def save(fname, obj):
import cloudpickle
os.makedirs(os.path.dirname(fname), exist_ok=True)
with open(fname, 'wb') as fh:
cloudpickle.dump(obj, fh)
class Experiment(object):
def __init__(self, logdir):
self.logdir = logdir
os.makedirs(os.path.join(logdir, 'checkpoints'), exist_ok=True)
def load(self, timestep=None):
if timestep is None:
# get latest ckpt
import glob
fs = glob.glob(os.path.join(self.logdir, 'checkpoints/*'))
timesteps = []
for f in fs:
try: timesteps.append(int(os.path.basename(f)))
except: pass
if len(timesteps) == 0:
return 0
timestep = max(timesteps)
fname = os.path.join(self.logdir, 'checkpoints', str(timestep), 'model')
load_state(fname)
return timestep
def save(self, timestep):
fname = os.path.join(self.logdir, 'checkpoints', str(timestep), 'model')
save_state(fname)
def load_model_fn(self):
fname = os.path.join(self.logdir, 'checkpoints/model_fn.pkl')
assert os.path.exists(fname), "No model file saved."
return load(fname)
def save_model_fn(self, model_fn):
fname = os.path.join(self.logdir, 'checkpoints/model_fn.pkl')
save(fname, model_fn)
# ================================================================
# Model components
# ================================================================
def batch_to_seq(h, nbatch, nsteps, flat=False):
"""
Assumes Time major data!!
x.shape = [nsteps, nbatch, *obs_shape]
h = x.reshape([-1, *x.shape[2:]]))
"""
if flat:
h = tf.reshape(h, [nsteps, nbatch])
else:
h = tf.reshape(h, [nsteps, nbatch, -1])
return [tf.squeeze(v, [0]) for v in tf.split(axis=0, num_or_size_splits=nsteps, value=h)]
def seq_to_batch(h, flat = False):
"""
Assumes Time major data!!
x.shape = [nsteps, nbatch, *obs_shape]
x = output.reshape(nsteps, nbatch, *obs_shape), where output is the output of this function.
"""
shape = h[0].get_shape().as_list()
if not flat:
assert(len(shape) > 1)
nh = h[0].get_shape()[-1].value
return tf.reshape(tf.concat(axis=0, values=h), [-1, nh])
else:
return tf.reshape(tf.stack(values=h, axis=0), [-1])
def ortho_init(scale=1.0):
def _ortho_init(shape, dtype, partition_info=None):
#lasagne ortho init for tf
shape = tuple(shape)
if len(shape) == 2:
flat_shape = shape
elif len(shape) == 4: # assumes NHWC
flat_shape = (np.prod(shape[:-1]), shape[-1])
else:
raise NotImplementedError
a = np.random.normal(0.0, 1.0, flat_shape)
u, _, v = np.linalg.svd(a, full_matrices=False)
q = u if u.shape == flat_shape else v # pick the one with the correct shape
q = q.reshape(shape)
return (scale * q[:shape[0], :shape[1]]).astype(np.float32)
return _ortho_init
def normc_initializer(std=1.0):
def _initializer(shape, dtype=None, partition_info=None): # pylint: disable=W0613
out = np.random.randn(*shape).astype(np.float32)
out *= std / np.sqrt(np.square(out).sum(axis=0, keepdims=True))
return tf.constant(out)
return _initializer
def lstm(xs, ms, s, scope, nh, init_scale=1.0):
nbatch, nin = [v.value for v in xs[0].get_shape()]
nsteps = len(xs)
with tf.variable_scope(scope):
wx = tf.get_variable("wx", [nin, nh*4], initializer=ortho_init(init_scale))
wh = tf.get_variable("wh", [nh, nh*4], initializer=ortho_init(init_scale))
b = tf.get_variable("b", [nh*4], initializer=tf.constant_initializer(0.0))
c, h = tf.split(axis=1, num_or_size_splits=2, value=s)
for idx, (x, m) in enumerate(zip(xs, ms)):
c = c*(1-m)
h = h*(1-m)
z = tf.matmul(x, wx) + tf.matmul(h, wh) + b
i, f, o, u = tf.split(axis=1, num_or_size_splits=4, value=z)
i = tf.nn.sigmoid(i)
f = tf.nn.sigmoid(f)
o = tf.nn.sigmoid(o)
u = tf.tanh(u)
c = f*c + i*u
h = o*tf.tanh(c)
xs[idx] = h
s = tf.concat(axis=1, values=[c, h])
return xs, s
# ================================================================
# Theano-like Function
# ================================================================
def function(inputs, outputs, updates=None, givens=None):
"""Just like Theano function. Take a bunch of tensorflow placeholders and expressions
computed based on those placeholders and produces f(inputs) -> outputs. Function f takes
values to be fed to the input's placeholders and produces the values of the expressions
in outputs.
Input values can be passed in the same order as inputs or can be provided as kwargs based
on placeholder name (passed to constructor or accessible via placeholder.op.name).
Example:
x = tf.placeholder(tf.int32, (), name="x")
y = tf.placeholder(tf.int32, (), name="y")
z = 3 * x + 2 * y
lin = function([x, y], z, givens={y: 0})
with single_threaded_session():
initialize()
assert lin(2) == 6
assert lin(x=3) == 9
assert lin(2, 2) == 10
assert lin(x=2, y=3) == 12
Parameters
----------
inputs: [tf.placeholder, tf.constant, or object with make_feed_dict method]
list of input arguments
outputs: [tf.Variable] or tf.Variable
list of outputs or a single output to be returned from function. Returned
value will also have the same shape.
"""
if isinstance(outputs, list):
return _Function(inputs, outputs, updates, givens=givens)
elif isinstance(outputs, (dict, collections.OrderedDict)):
f = _Function(inputs, outputs.values(), updates, givens=givens)
return lambda *args, **kwargs: type(outputs)(zip(outputs.keys(), f(*args, **kwargs)))
else:
f = _Function(inputs, [outputs], updates, givens=givens)
return lambda *args, **kwargs: f(*args, **kwargs)[0]
class _Function(object):
def __init__(self, inputs, outputs, updates, givens):
# for inpt in inputs:
# if not hasattr(inpt, 'make_feed_dict') and not (type(inpt) is tf.Tensor and len(inpt.op.inputs) == 0):
# assert False, "inputs should all be placeholders, constants, or have a make_feed_dict method"
self.inputs = inputs
updates = updates or []
self.update_group = tf.group(*updates)
self.outputs_update = list(outputs) + [self.update_group]
self.givens = {} if givens is None else givens
def _feed_input(self, feed_dict, inpt, value):
if hasattr(inpt, 'make_feed_dict'):
feed_dict.update(inpt.make_feed_dict(value))
else:
feed_dict[inpt] = value
def __call__(self, *args):
assert len(args) <= len(self.inputs), "Too many arguments provided"
feed_dict = {}
# Update the args
for inpt, value in zip(self.inputs, args):
if value is not None:
self._feed_input(feed_dict, inpt, value)
# Update feed dict with givens.
for inpt in self.givens:
feed_dict[inpt] = feed_dict.get(inpt, self.givens[inpt])
results = tf.get_default_session().run(self.outputs_update, feed_dict=feed_dict)[:-1]
return results
# ================================================================
# Flat vectors
# ================================================================
def var_shape(x):
out = x.get_shape().as_list()
assert all(isinstance(a, int) for a in out), \
"shape function assumes that shape is fully known"
return out
def numel(x):
return intprod(var_shape(x))
def intprod(x):
return int(np.prod(x))
def flatgrad(loss, var_list, clip_norm=None):
grads = tf.gradients(loss, var_list)
if clip_norm is not None:
grads, _ = tf.clip_by_global_norm(grads, clip_norm=clip_norm)
return tf.concat(axis=0, values=[
tf.reshape(grad if grad is not None else tf.zeros_like(v), [numel(v)])
for (v, grad) in zip(var_list, grads)
])
class SetFromFlat(object):
def __init__(self, var_list, dtype=tf.float32):
assigns = []
shapes = list(map(var_shape, var_list))
total_size = np.sum([intprod(shape) for shape in shapes])
self.theta = theta = tf.placeholder(dtype, [total_size])
start = 0
assigns = []
for (shape, v) in zip(shapes, var_list):
size = intprod(shape)
assigns.append(tf.assign(v, tf.reshape(theta[start:start + size], shape)))
start += size
self.op = tf.group(*assigns)
def __call__(self, theta):
tf.get_default_session().run(self.op, feed_dict={self.theta: theta})
class GetFlat(object):
def __init__(self, var_list):
self.op = tf.concat(axis=0, values=[tf.reshape(v, [numel(v)]) for v in var_list])
def __call__(self):
return tf.get_default_session().run(self.op)
def flattenallbut0(x):
return tf.reshape(x, [-1, intprod(x.get_shape().as_list()[1:])])
def reset():
global ALREADY_INITIALIZED
ALREADY_INITIALIZED = set()
tf.reset_default_graph()
"""
Random Seeds
"""
def set_global_seeds(i):
try:
import tensorflow as tf
except ImportError:
pass
else:
tf.set_random_seed(i)
np.random.seed(i)
random.seed(i)
| """
Adapted from OpenAI Baselines.
"""
import numpy as np
import tensorflow as tf # pylint: ignore-module
import random
import copy
import os
import functools
import collections
import multiprocessing
def switch(condition, then_expression, else_expression):
"""Switches between two operations depending on a scalar value (int or bool).
Note that both `then_expression` and `else_expression`
should be symbolic tensors of the *same shape*.
# Arguments
condition: scalar tensor.
then_expression: TensorFlow operation.
else_expression: TensorFlow operation.
"""
x_shape = copy.copy(then_expression.get_shape())
x = tf.cond(tf.cast(condition, 'bool'),
lambda: then_expression,
lambda: else_expression)
x.set_shape(x_shape)
return x
# ================================================================
# Extras
# ================================================================
def lrelu(x, leak=0.2):
f1 = 0.5 * (1 + leak)
f2 = 0.5 * (1 - leak)
return f1 * x + f2 * abs(x)
# ================================================================
# Mathematical utils
# ================================================================
def huber_loss(x, delta=1.0):
"""Reference: https://en.wikipedia.org/wiki/Huber_loss"""
return tf.where(
tf.abs(x) < delta,
tf.square(x) * 0.5,
delta * (tf.abs(x) - 0.5 * delta)
)
# ================================================================
# Global session
# ================================================================
def make_session(num_cpu=None, make_default=False):
"""Returns a session that will use <num_cpu> CPU's only"""
if num_cpu is None:
num_cpu = int(os.getenv('RCALL_NUM_CPU', multiprocessing.cpu_count()))
tf_config = tf.ConfigProto(
inter_op_parallelism_threads=num_cpu,
intra_op_parallelism_threads=num_cpu)
tf_config.gpu_options.allocator_type = 'BFC'
if make_default:
return tf.InteractiveSession(config=tf_config)
else:
return tf.Session(config=tf_config)
def single_threaded_session():
"""Returns a session which will only use a single CPU"""
return make_session(num_cpu=1)
def in_session(f):
@functools.wraps(f)
def newfunc(*args, **kwargs):
with tf.Session():
f(*args, **kwargs)
return newfunc
ALREADY_INITIALIZED = set()
def initialize():
"""Initialize all the uninitialized variables in the global scope."""
new_variables = set(tf.global_variables()) - ALREADY_INITIALIZED
tf.get_default_session().run(tf.variables_initializer(new_variables))
ALREADY_INITIALIZED.update(new_variables)
# ================================================================
# Saving variables and setting up experiment directories
# ================================================================
def load_state(fname):
saver = tf.train.Saver()
saver.restore(tf.get_default_session(), fname)
def save_state(fname):
os.makedirs(os.path.dirname(fname), exist_ok=True)
saver = tf.train.Saver()
saver.save(tf.get_default_session(), fname)
def load(fname):
import cloudpickle
with open(fname, 'rb') as f:
return cloudpickle.load(f)
def save(fname, obj):
import cloudpickle
os.makedirs(os.path.dirname(fname), exist_ok=True)
with open(fname, 'wb') as fh:
cloudpickle.dump(obj, fh)
class Experiment(object):
def __init__(self, logdir):
self.logdir = logdir
os.makedirs(os.path.join(logdir, 'checkpoints'), exist_ok=True)
def load(self, timestep=None):
if timestep is None:
# get latest ckpt
import glob
fs = glob.glob(os.path.join(self.logdir, 'checkpoints/*'))
timesteps = []
for f in fs:
try: timesteps.append(int(os.path.basename(f)))
except: pass
if len(timesteps) == 0:
return 0
timestep = max(timesteps)
fname = os.path.join(self.logdir, 'checkpoints', str(timestep), 'model')
load_state(fname)
return timestep
def save(self, timestep):
fname = os.path.join(self.logdir, 'checkpoints', str(timestep), 'model')
save_state(fname)
def load_model_fn(self):
fname = os.path.join(self.logdir, 'checkpoints/model_fn.pkl')
assert os.path.exists(fname), "No model file saved."
return load(fname)
def save_model_fn(self, model_fn):
fname = os.path.join(self.logdir, 'checkpoints/model_fn.pkl')
save(fname, model_fn)
# ================================================================
# Model components
# ================================================================
def batch_to_seq(h, nbatch, nsteps, flat=False):
"""
Assumes Time major data!!
x.shape = [nsteps, nbatch, *obs_shape]
h = x.reshape([-1, *x.shape[2:]]))
"""
if flat:
h = tf.reshape(h, [nsteps, nbatch])
else:
h = tf.reshape(h, [nsteps, nbatch, -1])
return [tf.squeeze(v, [0]) for v in tf.split(axis=0, num_or_size_splits=nsteps, value=h)]
def seq_to_batch(h, flat = False):
"""
Assumes Time major data!!
x.shape = [nsteps, nbatch, *obs_shape]
x = output.reshape(nsteps, nbatch, *obs_shape), where output is the output of this function.
"""
shape = h[0].get_shape().as_list()
if not flat:
assert(len(shape) > 1)
nh = h[0].get_shape()[-1].value
return tf.reshape(tf.concat(axis=0, values=h), [-1, nh])
else:
return tf.reshape(tf.stack(values=h, axis=0), [-1])
def ortho_init(scale=1.0):
def _ortho_init(shape, dtype, partition_info=None):
#lasagne ortho init for tf
shape = tuple(shape)
if len(shape) == 2:
flat_shape = shape
elif len(shape) == 4: # assumes NHWC
flat_shape = (np.prod(shape[:-1]), shape[-1])
else:
raise NotImplementedError
a = np.random.normal(0.0, 1.0, flat_shape)
u, _, v = np.linalg.svd(a, full_matrices=False)
q = u if u.shape == flat_shape else v # pick the one with the correct shape
q = q.reshape(shape)
return (scale * q[:shape[0], :shape[1]]).astype(np.float32)
return _ortho_init
def normc_initializer(std=1.0):
def _initializer(shape, dtype=None, partition_info=None): # pylint: disable=W0613
out = np.random.randn(*shape).astype(np.float32)
out *= std / np.sqrt(np.square(out).sum(axis=0, keepdims=True))
return tf.constant(out)
return _initializer
def lstm(xs, ms, s, scope, nh, init_scale=1.0):
nbatch, nin = [v.value for v in xs[0].get_shape()]
nsteps = len(xs)
with tf.variable_scope(scope):
wx = tf.get_variable("wx", [nin, nh*4], initializer=ortho_init(init_scale))
wh = tf.get_variable("wh", [nh, nh*4], initializer=ortho_init(init_scale))
b = tf.get_variable("b", [nh*4], initializer=tf.constant_initializer(0.0))
c, h = tf.split(axis=1, num_or_size_splits=2, value=s)
for idx, (x, m) in enumerate(zip(xs, ms)):
c = c*(1-m)
h = h*(1-m)
z = tf.matmul(x, wx) + tf.matmul(h, wh) + b
i, f, o, u = tf.split(axis=1, num_or_size_splits=4, value=z)
i = tf.nn.sigmoid(i)
f = tf.nn.sigmoid(f)
o = tf.nn.sigmoid(o)
u = tf.tanh(u)
c = f*c + i*u
h = o*tf.tanh(c)
xs[idx] = h
s = tf.concat(axis=1, values=[c, h])
return xs, s
# ================================================================
# Theano-like Function
# ================================================================
def function(inputs, outputs, updates=None, givens=None):
"""Just like Theano function. Take a bunch of tensorflow placeholders and expressions
computed based on those placeholders and produces f(inputs) -> outputs. Function f takes
values to be fed to the input's placeholders and produces the values of the expressions
in outputs.
Input values can be passed in the same order as inputs or can be provided as kwargs based
on placeholder name (passed to constructor or accessible via placeholder.op.name).
Example:
x = tf.placeholder(tf.int32, (), name="x")
y = tf.placeholder(tf.int32, (), name="y")
z = 3 * x + 2 * y
lin = function([x, y], z, givens={y: 0})
with single_threaded_session():
initialize()
assert lin(2) == 6
assert lin(x=3) == 9
assert lin(2, 2) == 10
assert lin(x=2, y=3) == 12
Parameters
----------
inputs: [tf.placeholder, tf.constant, or object with make_feed_dict method]
list of input arguments
outputs: [tf.Variable] or tf.Variable
list of outputs or a single output to be returned from function. Returned
value will also have the same shape.
"""
if isinstance(outputs, list):
return _Function(inputs, outputs, updates, givens=givens)
elif isinstance(outputs, (dict, collections.OrderedDict)):
f = _Function(inputs, outputs.values(), updates, givens=givens)
return lambda *args, **kwargs: type(outputs)(zip(outputs.keys(), f(*args, **kwargs)))
else:
f = _Function(inputs, [outputs], updates, givens=givens)
return lambda *args, **kwargs: f(*args, **kwargs)[0]
class _Function(object):
def __init__(self, inputs, outputs, updates, givens):
# for inpt in inputs:
# if not hasattr(inpt, 'make_feed_dict') and not (type(inpt) is tf.Tensor and len(inpt.op.inputs) == 0):
# assert False, "inputs should all be placeholders, constants, or have a make_feed_dict method"
self.inputs = inputs
updates = updates or []
self.update_group = tf.group(*updates)
self.outputs_update = list(outputs) + [self.update_group]
self.givens = {} if givens is None else givens
def _feed_input(self, feed_dict, inpt, value):
if hasattr(inpt, 'make_feed_dict'):
feed_dict.update(inpt.make_feed_dict(value))
else:
feed_dict[inpt] = value
def __call__(self, *args):
assert len(args) <= len(self.inputs), "Too many arguments provided"
feed_dict = {}
# Update the args
for inpt, value in zip(self.inputs, args):
if value is not None:
self._feed_input(feed_dict, inpt, value)
# Update feed dict with givens.
for inpt in self.givens:
feed_dict[inpt] = feed_dict.get(inpt, self.givens[inpt])
results = tf.get_default_session().run(self.outputs_update, feed_dict=feed_dict)[:-1]
return results
# ================================================================
# Flat vectors
# ================================================================
def var_shape(x):
out = x.get_shape().as_list()
assert all(isinstance(a, int) for a in out), \
"shape function assumes that shape is fully known"
return out
def numel(x):
return intprod(var_shape(x))
def intprod(x):
return int(np.prod(x))
def flatgrad(loss, var_list, clip_norm=None):
grads = tf.gradients(loss, var_list)
if clip_norm is not None:
grads, _ = tf.clip_by_global_norm(grads, clip_norm=clip_norm)
return tf.concat(axis=0, values=[
tf.reshape(grad if grad is not None else tf.zeros_like(v), [numel(v)])
for (v, grad) in zip(var_list, grads)
])
class SetFromFlat(object):
def __init__(self, var_list, dtype=tf.float32):
assigns = []
shapes = list(map(var_shape, var_list))
total_size = np.sum([intprod(shape) for shape in shapes])
self.theta = theta = tf.placeholder(dtype, [total_size])
start = 0
assigns = []
for (shape, v) in zip(shapes, var_list):
size = intprod(shape)
assigns.append(tf.assign(v, tf.reshape(theta[start:start + size], shape)))
start += size
self.op = tf.group(*assigns)
def __call__(self, theta):
tf.get_default_session().run(self.op, feed_dict={self.theta: theta})
class GetFlat(object):
def __init__(self, var_list):
self.op = tf.concat(axis=0, values=[tf.reshape(v, [numel(v)]) for v in var_list])
def __call__(self):
return tf.get_default_session().run(self.op)
def flattenallbut0(x):
return tf.reshape(x, [-1, intprod(x.get_shape().as_list()[1:])])
def reset():
global ALREADY_INITIALIZED
ALREADY_INITIALIZED = set()
tf.reset_default_graph()
"""
Random Seeds
"""
def set_global_seeds(i):
try:
import tensorflow as tf
except ImportError:
pass
else:
tf.set_random_seed(i)
np.random.seed(i)
random.seed(i)
| en | 0.605814 | Adapted from OpenAI Baselines. # pylint: ignore-module Switches between two operations depending on a scalar value (int or bool). Note that both `then_expression` and `else_expression` should be symbolic tensors of the *same shape*. # Arguments condition: scalar tensor. then_expression: TensorFlow operation. else_expression: TensorFlow operation. # ================================================================ # Extras # ================================================================ # ================================================================ # Mathematical utils # ================================================================ Reference: https://en.wikipedia.org/wiki/Huber_loss # ================================================================ # Global session # ================================================================ Returns a session that will use <num_cpu> CPU's only Returns a session which will only use a single CPU Initialize all the uninitialized variables in the global scope. # ================================================================ # Saving variables and setting up experiment directories # ================================================================ # get latest ckpt # ================================================================ # Model components # ================================================================ Assumes Time major data!! x.shape = [nsteps, nbatch, *obs_shape] h = x.reshape([-1, *x.shape[2:]])) Assumes Time major data!! x.shape = [nsteps, nbatch, *obs_shape] x = output.reshape(nsteps, nbatch, *obs_shape), where output is the output of this function. #lasagne ortho init for tf # assumes NHWC # pick the one with the correct shape # pylint: disable=W0613 # ================================================================ # Theano-like Function # ================================================================ Just like Theano function. Take a bunch of tensorflow placeholders and expressions computed based on those placeholders and produces f(inputs) -> outputs. Function f takes values to be fed to the input's placeholders and produces the values of the expressions in outputs. Input values can be passed in the same order as inputs or can be provided as kwargs based on placeholder name (passed to constructor or accessible via placeholder.op.name). Example: x = tf.placeholder(tf.int32, (), name="x") y = tf.placeholder(tf.int32, (), name="y") z = 3 * x + 2 * y lin = function([x, y], z, givens={y: 0}) with single_threaded_session(): initialize() assert lin(2) == 6 assert lin(x=3) == 9 assert lin(2, 2) == 10 assert lin(x=2, y=3) == 12 Parameters ---------- inputs: [tf.placeholder, tf.constant, or object with make_feed_dict method] list of input arguments outputs: [tf.Variable] or tf.Variable list of outputs or a single output to be returned from function. Returned value will also have the same shape. # for inpt in inputs: # if not hasattr(inpt, 'make_feed_dict') and not (type(inpt) is tf.Tensor and len(inpt.op.inputs) == 0): # assert False, "inputs should all be placeholders, constants, or have a make_feed_dict method" # Update the args # Update feed dict with givens. # ================================================================ # Flat vectors # ================================================================ Random Seeds | 2.80497 | 3 |
Util/constant.py | RoboCupULaval/StrategyAI | 13 | 8464 | # Under MIT License, see LICENSE.txt
""" Module définissant des constantes de programmations python pour l'IA """
from enum import Enum
ROBOT_RADIUS = 90
ROBOT_DIAMETER = ROBOT_RADIUS * 2
ROBOT_CENTER_TO_KICKER = 60
BALL_RADIUS = 21
MAX_PLAYER_ON_FIELD_PER_TEAM = 6
BALL_OUTSIDE_FIELD_BUFFER = 200
# Radius and angles for tactics
DISTANCE_BEHIND = ROBOT_RADIUS + 30 # in millimeters
ANGLE_TO_GRAB_BALL = 1 # in radians; must be large in case ball moves fast
RADIUS_TO_GRAB_BALL = ROBOT_RADIUS + 30
ANGLE_TO_HALT = 0.05 # 3 degrees
RADIUS_TO_HALT = ROBOT_RADIUS + BALL_RADIUS
REASONABLE_OFFSET = 50 # To take into account the camera precision and other things
# Rules
KEEPOUT_DISTANCE_FROM_BALL = 500 + ROBOT_RADIUS + REASONABLE_OFFSET
KEEPOUT_DISTANCE_FROM_GOAL = ROBOT_RADIUS + REASONABLE_OFFSET
PADDING_DEFENSE_AREA = 100
# Rule 5.2: Minimum movement before a ball is "in play"
IN_PLAY_MIN_DISTANCE = 50
# Rule 8.2.1: Distance from the opposing team defending zone
INDIRECT_KICK_OFFSET = 200
# Deadzones
POSITION_DEADZONE = ROBOT_RADIUS * 0.1
# Orientation abs_tol
ORIENTATION_ABSOLUTE_TOLERANCE = 0.01 # 0.5 degree
# TeamColor
class TeamColor(Enum):
def __str__(self):
return 'blue' if self == TeamColor.BLUE else 'yellow'
YELLOW = 0
BLUE = 1
class FieldSide(Enum):
POSITIVE = 0
NEGATIVE = 1
class KickForce(Enum):
NONE = 0
LOW = 1
MEDIUM = 2
HIGH = 3
@classmethod
def for_dist(cls, dist, seconds_to_reach=1.0):
speed = (dist / 1000) / seconds_to_reach
return speed
class KickType(Enum):
DIRECT = 0
CHIP = 1
class DribbleState(Enum):
AUTOMATIC = 0
FORCE_STOP = 1
FORCE_SPIN = 2
| # Under MIT License, see LICENSE.txt
""" Module définissant des constantes de programmations python pour l'IA """
from enum import Enum
ROBOT_RADIUS = 90
ROBOT_DIAMETER = ROBOT_RADIUS * 2
ROBOT_CENTER_TO_KICKER = 60
BALL_RADIUS = 21
MAX_PLAYER_ON_FIELD_PER_TEAM = 6
BALL_OUTSIDE_FIELD_BUFFER = 200
# Radius and angles for tactics
DISTANCE_BEHIND = ROBOT_RADIUS + 30 # in millimeters
ANGLE_TO_GRAB_BALL = 1 # in radians; must be large in case ball moves fast
RADIUS_TO_GRAB_BALL = ROBOT_RADIUS + 30
ANGLE_TO_HALT = 0.05 # 3 degrees
RADIUS_TO_HALT = ROBOT_RADIUS + BALL_RADIUS
REASONABLE_OFFSET = 50 # To take into account the camera precision and other things
# Rules
KEEPOUT_DISTANCE_FROM_BALL = 500 + ROBOT_RADIUS + REASONABLE_OFFSET
KEEPOUT_DISTANCE_FROM_GOAL = ROBOT_RADIUS + REASONABLE_OFFSET
PADDING_DEFENSE_AREA = 100
# Rule 5.2: Minimum movement before a ball is "in play"
IN_PLAY_MIN_DISTANCE = 50
# Rule 8.2.1: Distance from the opposing team defending zone
INDIRECT_KICK_OFFSET = 200
# Deadzones
POSITION_DEADZONE = ROBOT_RADIUS * 0.1
# Orientation abs_tol
ORIENTATION_ABSOLUTE_TOLERANCE = 0.01 # 0.5 degree
# TeamColor
class TeamColor(Enum):
def __str__(self):
return 'blue' if self == TeamColor.BLUE else 'yellow'
YELLOW = 0
BLUE = 1
class FieldSide(Enum):
POSITIVE = 0
NEGATIVE = 1
class KickForce(Enum):
NONE = 0
LOW = 1
MEDIUM = 2
HIGH = 3
@classmethod
def for_dist(cls, dist, seconds_to_reach=1.0):
speed = (dist / 1000) / seconds_to_reach
return speed
class KickType(Enum):
DIRECT = 0
CHIP = 1
class DribbleState(Enum):
AUTOMATIC = 0
FORCE_STOP = 1
FORCE_SPIN = 2
| en | 0.639576 | # Under MIT License, see LICENSE.txt Module définissant des constantes de programmations python pour l'IA # Radius and angles for tactics # in millimeters # in radians; must be large in case ball moves fast # 3 degrees # To take into account the camera precision and other things # Rules # Rule 5.2: Minimum movement before a ball is "in play" # Rule 8.2.1: Distance from the opposing team defending zone # Deadzones # Orientation abs_tol # 0.5 degree # TeamColor | 3.313501 | 3 |
src/transbigdata/CoordinatesConverter.py | cirno1w/transport | 1 | 8465 | <gh_stars>1-10
import numpy as np
x_pi = 3.14159265358979324 * 3000.0 / 180.0
pi = 3.1415926535897932384626
a = 6378245.0
ee = 0.00669342162296594323
def gcj02tobd09(lng, lat):
"""
Convert coordinates from GCJ02 to BD09
Parameters
-------
lng : Series or number
Longitude
lat : Series or number
Latitude
return
-------
lng : Series or number
Longitude (Converted)
lat : Series or number
Latitude (Converted)
"""
try:
lng = lng.astype(float)
lat = lat.astype(float)
except:
lng = float(lng)
lat = float(lat)
z = np.sqrt(lng * lng + lat * lat) + 0.00002 * np.sin(lat * x_pi)
theta = np.arctan2(lat, lng) + 0.000003 * np.cos(lng * x_pi)
bd_lng = z * np.cos(theta) + 0.0065
bd_lat = z * np.sin(theta) + 0.006
return bd_lng, bd_lat
def bd09togcj02(bd_lon, bd_lat):
"""
Convert coordinates from BD09 to GCJ02
Parameters
-------
lng : Series or number
Longitude
lat : Series or number
Latitude
return
-------
lng : Series or number
Longitude (Converted)
lat : Series or number
Latitude (Converted)
"""
try:
bd_lon = bd_lon.astype(float)
bd_lat = bd_lat.astype(float)
except:
bd_lon = float(bd_lon)
bd_lat = float(bd_lat)
x = bd_lon - 0.0065
y = bd_lat - 0.006
z = np.sqrt(x * x + y * y) - 0.00002 * np.sin(y * x_pi)
theta = np.arctan2(y, x) - 0.000003 * np.cos(x * x_pi)
gg_lng = z * np.cos(theta)
gg_lat = z * np.sin(theta)
return gg_lng, gg_lat
def wgs84togcj02(lng, lat):
"""
Convert coordinates from WGS84 to GCJ02
Parameters
-------
lng : Series or number
Longitude
lat : Series or number
Latitude
return
-------
lng : Series or number
Longitude (Converted)
lat : Series or number
Latitude (Converted)
"""
try:
lng = lng.astype(float)
lat = lat.astype(float)
except:
lng = float(lng)
lat = float(lat)
dlat = transformlat(lng - 105.0, lat - 35.0)
dlng = transformlng(lng - 105.0, lat - 35.0)
radlat = lat / 180.0 * pi
magic = np.sin(radlat)
magic = 1 - ee * magic * magic
sqrtmagic = np.sqrt(magic)
dlat = (dlat * 180.0) / ((a * (1 - ee)) / (magic * sqrtmagic) * pi)
dlng = (dlng * 180.0) / (a / sqrtmagic * np.cos(radlat) * pi)
mglat = lat + dlat
mglng = lng + dlng
return mglng, mglat
def gcj02towgs84(lng, lat):
"""
Convert coordinates from GCJ02 to WGS84
Parameters
-------
lng : Series or number
Longitude
lat : Series or number
Latitude
return
-------
lng : Series or number
Longitude (Converted)
lat : Series or number
Latitude (Converted)
"""
try:
lng = lng.astype(float)
lat = lat.astype(float)
except:
lng = float(lng)
lat = float(lat)
dlat = transformlat(lng - 105.0, lat - 35.0)
dlng = transformlng(lng - 105.0, lat - 35.0)
radlat = lat / 180.0 * pi
magic = np.sin(radlat)
magic = 1 - ee * magic * magic
sqrtmagic = np.sqrt(magic)
dlat = (dlat * 180.0) / ((a * (1 - ee)) / (magic * sqrtmagic) * pi)
dlng = (dlng * 180.0) / (a / sqrtmagic * np.cos(radlat) * pi)
mglat = lat + dlat
mglng = lng + dlng
return lng * 2 - mglng, lat * 2 - mglat
def wgs84tobd09(lon,lat):
"""
Convert coordinates from WGS84 to BD09
Parameters
-------
lng : Series or number
Longitude
lat : Series or number
Latitude
return
-------
lng : Series or number
Longitude (Converted)
lat : Series or number
Latitude (Converted)
"""
try:
lon = lon.astype(float)
lat = lat.astype(float)
except:
lon = float(lon)
lat = float(lat)
lon,lat = wgs84togcj02(lon,lat)
lon,lat = gcj02tobd09(lon,lat)
return lon,lat
def bd09towgs84(lon,lat):
"""
Convert coordinates from BD09 to WGS84
Parameters
-------
lng : Series or number
Longitude
lat : Series or number
Latitude
return
-------
lng : Series or number
Longitude (Converted)
lat : Series or number
Latitude (Converted)
"""
try:
lon = lon.astype(float)
lat = lat.astype(float)
except:
lon = float(lon)
lat = float(lat)
lon,lat = bd09togcj02(lon,lat)
lon,lat = gcj02towgs84(lon,lat)
return lon,lat
def bd09mctobd09(x,y):
"""
Convert coordinates from BD09MC to BD09
Parameters
-------
x : Series or number
x coordinates
y : Series or number
y coordinates
return
-------
lng : Series or number
Longitude (Converted)
lat : Series or number
Latitude (Converted)
"""
MCBAND = [12890594.86, 8362377.87, 5591021, 3481989.83, 1678043.12, 0]
MC2LL = [
[1.410526172116255e-8, 0.00000898305509648872, -1.9939833816331, 200.9824383106796, -187.2403703815547, 91.6087516669843, -23.38765649603339, 2.57121317296198, -0.03801003308653, 17337981.2],
[-7.435856389565537e-9, 0.000008983055097726239, -0.78625201886289, 96.32687599759846, -1.85204757529826, -59.36935905485877, 47.40033549296737, -16.50741931063887, 2.28786674699375, 10260144.86],
[-3.030883460898826e-8, 0.00000898305509983578, 0.30071316287616, 59.74293618442277, 7.357984074871, -25.38371002664745, 13.45380521110908, -3.29883767235584, 0.32710905363475, 6856817.37],
[-1.981981304930552e-8, 0.000008983055099779535, 0.03278182852591, 40.31678527705744, 0.65659298677277, -4.44255534477492, 0.85341911805263, 0.12923347998204, -0.04625736007561, 4482777.06],
[3.09191371068437e-9, 0.000008983055096812155, 0.00006995724062, 23.10934304144901, -0.00023663490511, -0.6321817810242, -0.00663494467273, 0.03430082397953, -0.00466043876332, 2555164.4],
[2.890871144776878e-9, 0.000008983055095805407, -3.068298e-8, 7.47137025468032, -0.00000353937994, -0.02145144861037, -0.00001234426596, 0.00010322952773, -0.00000323890364, 826088.5]
]
y1 = y.iloc[0]
for cD in range(len(MCBAND)):
if y1 >= MCBAND[cD]:
cE = MC2LL[cD]
break
cD = cE
T = cD[0] + cD[1] * np.abs(x);
cB = np.abs(y) / cD[9]
cE = cD[2] + cD[3] * cB + cD[4] * cB * cB +\
cD[5] * cB * cB * cB + cD[6] * cB * cB * cB * cB +\
cD[7] * cB * cB * cB * cB * cB +\
cD[8] * cB * cB * cB * cB * cB * cB
return T,cE
def transformlat(lng, lat):
ret = -100.0 + 2.0 * lng + 3.0 * lat + 0.2 * lat * lat + \
0.1 * lng * lat + 0.2 * np.sqrt(np.fabs(lng))
ret += (20.0 * np.sin(6.0 * lng * pi) + 20.0 *
np.sin(2.0 * lng * pi)) * 2.0 / 3.0
ret += (20.0 * np.sin(lat * pi) + 40.0 *
np.sin(lat / 3.0 * pi)) * 2.0 / 3.0
ret += (160.0 * np.sin(lat / 12.0 * pi) + 320 *
np.sin(lat * pi / 30.0)) * 2.0 / 3.0
return ret
def transformlng(lng, lat):
import numpy as np
ret = 300.0 + lng + 2.0 * lat + 0.1 * lng * lng + \
0.1 * lng * lat + 0.1 * np.sqrt(np.abs(lng))
ret += (20.0 * np.sin(6.0 * lng * pi) + 20.0 *
np.sin(2.0 * lng * pi)) * 2.0 / 3.0
ret += (20.0 * np.sin(lng * pi) + 40.0 *
np.sin(lng / 3.0 * pi)) * 2.0 / 3.0
ret += (150.0 * np.sin(lng / 12.0 * pi) + 300.0 *
np.sin(lng / 30.0 * pi)) * 2.0 / 3.0
return ret
def getdistance(lon1, lat1, lon2, lat2):
'''
Input the origin/destination location in the sequence of [lon1, lat1, lon2, lat2] (in decimal) from DataFrame. The output is the distance (m).
Parameters
-------
lon1 : Series or number
Start longitude
lat1 : Series or number
Start latitude
lon2 : Series or number
End longitude
lat2 : Series or number
End latitude
return
-------
distance : Series or number
The distance
'''
try:
lon1 = lon1.astype(float)
lat1 = lat1.astype(float)
lon2 = lon2.astype(float)
lat2 = lat2.astype(float)
except:
lon1 = float(lon1)
lat1 = float(lat1)
lon2 = float(lon2)
lat2 = float(lat2)
lon1, lat1, lon2, lat2 = map(lambda r:r*pi/180, [lon1, lat1, lon2, lat2])
dlon = lon2 - lon1
dlat = lat2 - lat1
a = np.sin(dlat/2)**2 + np.cos(lat1) * np.cos(lat2) * np.sin(dlon/2)**2
c = 2 * np.arcsin(a**0.5)
r = 6371 # 地球平均半径,单位为公里
return c * r * 1000
def transform_shape(gdf,method):
'''
Convert coordinates of all data. The input is the geographic elements’ DataFrame.
Parameters
-------
gdf : GeoDataFrame
Geographic elements
method : function
The coordinate converting function
return
-------
gdf : GeoDataFrame
The result of converting
'''
from shapely.ops import transform
gdf1 = gdf.copy()
gdf1['geometry'] = gdf1['geometry'].apply(lambda r:transform(method, r))
return gdf1 | import numpy as np
x_pi = 3.14159265358979324 * 3000.0 / 180.0
pi = 3.1415926535897932384626
a = 6378245.0
ee = 0.00669342162296594323
def gcj02tobd09(lng, lat):
"""
Convert coordinates from GCJ02 to BD09
Parameters
-------
lng : Series or number
Longitude
lat : Series or number
Latitude
return
-------
lng : Series or number
Longitude (Converted)
lat : Series or number
Latitude (Converted)
"""
try:
lng = lng.astype(float)
lat = lat.astype(float)
except:
lng = float(lng)
lat = float(lat)
z = np.sqrt(lng * lng + lat * lat) + 0.00002 * np.sin(lat * x_pi)
theta = np.arctan2(lat, lng) + 0.000003 * np.cos(lng * x_pi)
bd_lng = z * np.cos(theta) + 0.0065
bd_lat = z * np.sin(theta) + 0.006
return bd_lng, bd_lat
def bd09togcj02(bd_lon, bd_lat):
"""
Convert coordinates from BD09 to GCJ02
Parameters
-------
lng : Series or number
Longitude
lat : Series or number
Latitude
return
-------
lng : Series or number
Longitude (Converted)
lat : Series or number
Latitude (Converted)
"""
try:
bd_lon = bd_lon.astype(float)
bd_lat = bd_lat.astype(float)
except:
bd_lon = float(bd_lon)
bd_lat = float(bd_lat)
x = bd_lon - 0.0065
y = bd_lat - 0.006
z = np.sqrt(x * x + y * y) - 0.00002 * np.sin(y * x_pi)
theta = np.arctan2(y, x) - 0.000003 * np.cos(x * x_pi)
gg_lng = z * np.cos(theta)
gg_lat = z * np.sin(theta)
return gg_lng, gg_lat
def wgs84togcj02(lng, lat):
"""
Convert coordinates from WGS84 to GCJ02
Parameters
-------
lng : Series or number
Longitude
lat : Series or number
Latitude
return
-------
lng : Series or number
Longitude (Converted)
lat : Series or number
Latitude (Converted)
"""
try:
lng = lng.astype(float)
lat = lat.astype(float)
except:
lng = float(lng)
lat = float(lat)
dlat = transformlat(lng - 105.0, lat - 35.0)
dlng = transformlng(lng - 105.0, lat - 35.0)
radlat = lat / 180.0 * pi
magic = np.sin(radlat)
magic = 1 - ee * magic * magic
sqrtmagic = np.sqrt(magic)
dlat = (dlat * 180.0) / ((a * (1 - ee)) / (magic * sqrtmagic) * pi)
dlng = (dlng * 180.0) / (a / sqrtmagic * np.cos(radlat) * pi)
mglat = lat + dlat
mglng = lng + dlng
return mglng, mglat
def gcj02towgs84(lng, lat):
"""
Convert coordinates from GCJ02 to WGS84
Parameters
-------
lng : Series or number
Longitude
lat : Series or number
Latitude
return
-------
lng : Series or number
Longitude (Converted)
lat : Series or number
Latitude (Converted)
"""
try:
lng = lng.astype(float)
lat = lat.astype(float)
except:
lng = float(lng)
lat = float(lat)
dlat = transformlat(lng - 105.0, lat - 35.0)
dlng = transformlng(lng - 105.0, lat - 35.0)
radlat = lat / 180.0 * pi
magic = np.sin(radlat)
magic = 1 - ee * magic * magic
sqrtmagic = np.sqrt(magic)
dlat = (dlat * 180.0) / ((a * (1 - ee)) / (magic * sqrtmagic) * pi)
dlng = (dlng * 180.0) / (a / sqrtmagic * np.cos(radlat) * pi)
mglat = lat + dlat
mglng = lng + dlng
return lng * 2 - mglng, lat * 2 - mglat
def wgs84tobd09(lon,lat):
"""
Convert coordinates from WGS84 to BD09
Parameters
-------
lng : Series or number
Longitude
lat : Series or number
Latitude
return
-------
lng : Series or number
Longitude (Converted)
lat : Series or number
Latitude (Converted)
"""
try:
lon = lon.astype(float)
lat = lat.astype(float)
except:
lon = float(lon)
lat = float(lat)
lon,lat = wgs84togcj02(lon,lat)
lon,lat = gcj02tobd09(lon,lat)
return lon,lat
def bd09towgs84(lon,lat):
"""
Convert coordinates from BD09 to WGS84
Parameters
-------
lng : Series or number
Longitude
lat : Series or number
Latitude
return
-------
lng : Series or number
Longitude (Converted)
lat : Series or number
Latitude (Converted)
"""
try:
lon = lon.astype(float)
lat = lat.astype(float)
except:
lon = float(lon)
lat = float(lat)
lon,lat = bd09togcj02(lon,lat)
lon,lat = gcj02towgs84(lon,lat)
return lon,lat
def bd09mctobd09(x,y):
"""
Convert coordinates from BD09MC to BD09
Parameters
-------
x : Series or number
x coordinates
y : Series or number
y coordinates
return
-------
lng : Series or number
Longitude (Converted)
lat : Series or number
Latitude (Converted)
"""
MCBAND = [12890594.86, 8362377.87, 5591021, 3481989.83, 1678043.12, 0]
MC2LL = [
[1.410526172116255e-8, 0.00000898305509648872, -1.9939833816331, 200.9824383106796, -187.2403703815547, 91.6087516669843, -23.38765649603339, 2.57121317296198, -0.03801003308653, 17337981.2],
[-7.435856389565537e-9, 0.000008983055097726239, -0.78625201886289, 96.32687599759846, -1.85204757529826, -59.36935905485877, 47.40033549296737, -16.50741931063887, 2.28786674699375, 10260144.86],
[-3.030883460898826e-8, 0.00000898305509983578, 0.30071316287616, 59.74293618442277, 7.357984074871, -25.38371002664745, 13.45380521110908, -3.29883767235584, 0.32710905363475, 6856817.37],
[-1.981981304930552e-8, 0.000008983055099779535, 0.03278182852591, 40.31678527705744, 0.65659298677277, -4.44255534477492, 0.85341911805263, 0.12923347998204, -0.04625736007561, 4482777.06],
[3.09191371068437e-9, 0.000008983055096812155, 0.00006995724062, 23.10934304144901, -0.00023663490511, -0.6321817810242, -0.00663494467273, 0.03430082397953, -0.00466043876332, 2555164.4],
[2.890871144776878e-9, 0.000008983055095805407, -3.068298e-8, 7.47137025468032, -0.00000353937994, -0.02145144861037, -0.00001234426596, 0.00010322952773, -0.00000323890364, 826088.5]
]
y1 = y.iloc[0]
for cD in range(len(MCBAND)):
if y1 >= MCBAND[cD]:
cE = MC2LL[cD]
break
cD = cE
T = cD[0] + cD[1] * np.abs(x);
cB = np.abs(y) / cD[9]
cE = cD[2] + cD[3] * cB + cD[4] * cB * cB +\
cD[5] * cB * cB * cB + cD[6] * cB * cB * cB * cB +\
cD[7] * cB * cB * cB * cB * cB +\
cD[8] * cB * cB * cB * cB * cB * cB
return T,cE
def transformlat(lng, lat):
ret = -100.0 + 2.0 * lng + 3.0 * lat + 0.2 * lat * lat + \
0.1 * lng * lat + 0.2 * np.sqrt(np.fabs(lng))
ret += (20.0 * np.sin(6.0 * lng * pi) + 20.0 *
np.sin(2.0 * lng * pi)) * 2.0 / 3.0
ret += (20.0 * np.sin(lat * pi) + 40.0 *
np.sin(lat / 3.0 * pi)) * 2.0 / 3.0
ret += (160.0 * np.sin(lat / 12.0 * pi) + 320 *
np.sin(lat * pi / 30.0)) * 2.0 / 3.0
return ret
def transformlng(lng, lat):
import numpy as np
ret = 300.0 + lng + 2.0 * lat + 0.1 * lng * lng + \
0.1 * lng * lat + 0.1 * np.sqrt(np.abs(lng))
ret += (20.0 * np.sin(6.0 * lng * pi) + 20.0 *
np.sin(2.0 * lng * pi)) * 2.0 / 3.0
ret += (20.0 * np.sin(lng * pi) + 40.0 *
np.sin(lng / 3.0 * pi)) * 2.0 / 3.0
ret += (150.0 * np.sin(lng / 12.0 * pi) + 300.0 *
np.sin(lng / 30.0 * pi)) * 2.0 / 3.0
return ret
def getdistance(lon1, lat1, lon2, lat2):
'''
Input the origin/destination location in the sequence of [lon1, lat1, lon2, lat2] (in decimal) from DataFrame. The output is the distance (m).
Parameters
-------
lon1 : Series or number
Start longitude
lat1 : Series or number
Start latitude
lon2 : Series or number
End longitude
lat2 : Series or number
End latitude
return
-------
distance : Series or number
The distance
'''
try:
lon1 = lon1.astype(float)
lat1 = lat1.astype(float)
lon2 = lon2.astype(float)
lat2 = lat2.astype(float)
except:
lon1 = float(lon1)
lat1 = float(lat1)
lon2 = float(lon2)
lat2 = float(lat2)
lon1, lat1, lon2, lat2 = map(lambda r:r*pi/180, [lon1, lat1, lon2, lat2])
dlon = lon2 - lon1
dlat = lat2 - lat1
a = np.sin(dlat/2)**2 + np.cos(lat1) * np.cos(lat2) * np.sin(dlon/2)**2
c = 2 * np.arcsin(a**0.5)
r = 6371 # 地球平均半径,单位为公里
return c * r * 1000
def transform_shape(gdf,method):
'''
Convert coordinates of all data. The input is the geographic elements’ DataFrame.
Parameters
-------
gdf : GeoDataFrame
Geographic elements
method : function
The coordinate converting function
return
-------
gdf : GeoDataFrame
The result of converting
'''
from shapely.ops import transform
gdf1 = gdf.copy()
gdf1['geometry'] = gdf1['geometry'].apply(lambda r:transform(method, r))
return gdf1 | en | 0.245646 | Convert coordinates from GCJ02 to BD09 Parameters ------- lng : Series or number Longitude lat : Series or number Latitude return ------- lng : Series or number Longitude (Converted) lat : Series or number Latitude (Converted) Convert coordinates from BD09 to GCJ02 Parameters ------- lng : Series or number Longitude lat : Series or number Latitude return ------- lng : Series or number Longitude (Converted) lat : Series or number Latitude (Converted) Convert coordinates from WGS84 to GCJ02 Parameters ------- lng : Series or number Longitude lat : Series or number Latitude return ------- lng : Series or number Longitude (Converted) lat : Series or number Latitude (Converted) Convert coordinates from GCJ02 to WGS84 Parameters ------- lng : Series or number Longitude lat : Series or number Latitude return ------- lng : Series or number Longitude (Converted) lat : Series or number Latitude (Converted) Convert coordinates from WGS84 to BD09 Parameters ------- lng : Series or number Longitude lat : Series or number Latitude return ------- lng : Series or number Longitude (Converted) lat : Series or number Latitude (Converted) Convert coordinates from BD09 to WGS84 Parameters ------- lng : Series or number Longitude lat : Series or number Latitude return ------- lng : Series or number Longitude (Converted) lat : Series or number Latitude (Converted) Convert coordinates from BD09MC to BD09 Parameters ------- x : Series or number x coordinates y : Series or number y coordinates return ------- lng : Series or number Longitude (Converted) lat : Series or number Latitude (Converted) Input the origin/destination location in the sequence of [lon1, lat1, lon2, lat2] (in decimal) from DataFrame. The output is the distance (m). Parameters ------- lon1 : Series or number Start longitude lat1 : Series or number Start latitude lon2 : Series or number End longitude lat2 : Series or number End latitude return ------- distance : Series or number The distance # 地球平均半径,单位为公里 Convert coordinates of all data. The input is the geographic elements’ DataFrame. Parameters ------- gdf : GeoDataFrame Geographic elements method : function The coordinate converting function return ------- gdf : GeoDataFrame The result of converting | 3.017498 | 3 |
cloudify_rest_client/exceptions.py | aleixsanchis/cloudify-rest-client | 0 | 8466 | ########
# Copyright (c) 2014 GigaSpaces Technologies Ltd. All rights reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# * See the License for the specific language governing permissions and
# * limitations under the License.
class CloudifyClientError(Exception):
def __init__(self, message, server_traceback=None,
status_code=-1, error_code=None, response=None):
super(CloudifyClientError, self).__init__(message)
self.status_code = status_code
self.error_code = error_code
self.server_traceback = server_traceback
self.response = response
self.message = message
def __str__(self):
if self.status_code != -1:
formatted_error = '{0}: {1}'.format(self.status_code, self.message)
return formatted_error
return self.message
class DeploymentEnvironmentCreationInProgressError(CloudifyClientError):
"""
Raised when there's attempt to execute a deployment workflow and
deployment environment creation workflow execution is still running.
In such a case, workflow execution should be retried after a reasonable
time or after the execution of deployment environment creation workflow
has terminated.
"""
ERROR_CODE = 'deployment_environment_creation_in_progress_error'
class DeploymentEnvironmentCreationPendingError(CloudifyClientError):
"""
Raised when there's attempt to execute a deployment workflow and
deployment environment creation workflow execution is pending.
In such a case, workflow execution should be retried after a reasonable
time or after the execution of deployment environment creation workflow
has terminated.
"""
ERROR_CODE = 'deployment_environment_creation_pending_error'
class IllegalExecutionParametersError(CloudifyClientError):
"""
Raised when an attempt to execute a workflow with wrong/missing parameters
has been made.
"""
ERROR_CODE = 'illegal_execution_parameters_error'
class NoSuchIncludeFieldError(CloudifyClientError):
"""
Raised when an _include query parameter contains a field which does not
exist for the queried data model.
"""
ERROR_CODE = 'no_such_include_field_error'
class MissingRequiredDeploymentInputError(CloudifyClientError):
"""
Raised when a required deployment input was not specified on deployment
creation.
"""
ERROR_CODE = 'missing_required_deployment_input_error'
class UnknownDeploymentInputError(CloudifyClientError):
"""
Raised when an unexpected input was specified on deployment creation.
"""
ERROR_CODE = 'unknown_deployment_input_error'
class UnknownDeploymentSecretError(CloudifyClientError):
"""
Raised when a required secret was not found on deployment creation.
"""
ERROR_CODE = 'unknown_deployment_secret_error'
class UnsupportedDeploymentGetSecretError(CloudifyClientError):
"""
Raised when an unsupported get_secret intrinsic function appears in
the blueprint on deployment creation.
"""
ERROR_CODE = 'unsupported_deployment_get_secret_error'
class FunctionsEvaluationError(CloudifyClientError):
"""
Raised when function evaluation failed.
"""
ERROR_CODE = 'functions_evaluation_error'
class UnknownModificationStageError(CloudifyClientError):
"""
Raised when an unknown modification stage was provided.
"""
ERROR_CODE = 'unknown_modification_stage_error'
class ExistingStartedDeploymentModificationError(CloudifyClientError):
"""
Raised when a deployment modification start is attempted while another
deployment modification is currently started
"""
ERROR_CODE = 'existing_started_deployment_modification_error'
class DeploymentModificationAlreadyEndedError(CloudifyClientError):
"""
Raised when a deployment modification finish/rollback is attempted on
a deployment modification that has already been finished/rolledback
"""
ERROR_CODE = 'deployment_modification_already_ended_error'
class UserUnauthorizedError(CloudifyClientError):
"""
Raised when a call has been made to a secured resource with an
unauthorized user (no credentials / bad credentials)
"""
ERROR_CODE = 'unauthorized_error'
class ForbiddenError(CloudifyClientError):
"""
Raised when a call has been made by a user that is not permitted to
perform it
"""
ERROR_CODE = 'forbidden_error'
class PluginInUseError(CloudifyClientError):
"""
Raised if a central deployment agent plugin deletion is attempted and at
least one deployment is currently using this plugin.
"""
ERROR_CODE = 'plugin_in_use'
class PluginInstallationError(CloudifyClientError):
"""
Raised if a central deployment agent plugin installation fails.
"""
ERROR_CODE = 'plugin_installation_error'
class PluginInstallationTimeout(CloudifyClientError):
"""
Raised if a central deployment agent plugin installation times out.
"""
ERROR_CODE = 'plugin_installation_timeout'
class MaintenanceModeActiveError(CloudifyClientError):
"""
Raised when a call has been blocked due to maintenance mode being active.
"""
ERROR_CODE = 'maintenance_mode_active'
def __str__(self):
return self.message
class MaintenanceModeActivatingError(CloudifyClientError):
"""
Raised when a call has been blocked while maintenance mode is activating.
"""
ERROR_CODE = 'entering_maintenance_mode'
def __str__(self):
return self.message
class NotModifiedError(CloudifyClientError):
"""
Raised when a 304 not modified error was returned
"""
ERROR_CODE = 'not_modified'
def __str__(self):
return self.message
class InvalidExecutionUpdateStatus(CloudifyClientError):
"""
Raised when execution update failed do to invalid status update
"""
ERROR_CODE = 'invalid_exception_status_update'
class NotClusterMaster(CloudifyClientError):
"""
Raised when the request was served by a manager that is not the master
node of a manager cluster.
The client should query for the cluster status to learn the master's
address, and retry the request.
If the client stores the server address, it should update the storage
with the new master node address.
"""
ERROR_CODE = 'not_cluster_master'
class RemovedFromCluster(CloudifyClientError):
"""
Raised when attempting to contact a manager that was removed from a
cluster.
The client should retry the request with another manager in the cluster.
If the client stores the server address, it should remove this node's
address from storage.
"""
ERROR_CODE = 'removed_from_cluster'
class DeploymentPluginNotFound(CloudifyClientError):
"""
Raised when a plugin is listed in the blueprint but is not
installed on the manager.
"""
ERROR_CODE = 'deployment_plugin_not_found'
ERROR_MAPPING = dict([
(error.ERROR_CODE, error)
for error in [
DeploymentEnvironmentCreationInProgressError,
DeploymentEnvironmentCreationPendingError,
IllegalExecutionParametersError,
NoSuchIncludeFieldError,
MissingRequiredDeploymentInputError,
UnknownDeploymentInputError,
UnknownDeploymentSecretError,
UnsupportedDeploymentGetSecretError,
FunctionsEvaluationError,
UnknownModificationStageError,
ExistingStartedDeploymentModificationError,
DeploymentModificationAlreadyEndedError,
UserUnauthorizedError,
ForbiddenError,
MaintenanceModeActiveError,
MaintenanceModeActivatingError,
NotModifiedError,
InvalidExecutionUpdateStatus,
PluginInUseError,
PluginInstallationError,
PluginInstallationTimeout,
NotClusterMaster,
RemovedFromCluster,
DeploymentPluginNotFound]])
| ########
# Copyright (c) 2014 GigaSpaces Technologies Ltd. All rights reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# * See the License for the specific language governing permissions and
# * limitations under the License.
class CloudifyClientError(Exception):
def __init__(self, message, server_traceback=None,
status_code=-1, error_code=None, response=None):
super(CloudifyClientError, self).__init__(message)
self.status_code = status_code
self.error_code = error_code
self.server_traceback = server_traceback
self.response = response
self.message = message
def __str__(self):
if self.status_code != -1:
formatted_error = '{0}: {1}'.format(self.status_code, self.message)
return formatted_error
return self.message
class DeploymentEnvironmentCreationInProgressError(CloudifyClientError):
"""
Raised when there's attempt to execute a deployment workflow and
deployment environment creation workflow execution is still running.
In such a case, workflow execution should be retried after a reasonable
time or after the execution of deployment environment creation workflow
has terminated.
"""
ERROR_CODE = 'deployment_environment_creation_in_progress_error'
class DeploymentEnvironmentCreationPendingError(CloudifyClientError):
"""
Raised when there's attempt to execute a deployment workflow and
deployment environment creation workflow execution is pending.
In such a case, workflow execution should be retried after a reasonable
time or after the execution of deployment environment creation workflow
has terminated.
"""
ERROR_CODE = 'deployment_environment_creation_pending_error'
class IllegalExecutionParametersError(CloudifyClientError):
"""
Raised when an attempt to execute a workflow with wrong/missing parameters
has been made.
"""
ERROR_CODE = 'illegal_execution_parameters_error'
class NoSuchIncludeFieldError(CloudifyClientError):
"""
Raised when an _include query parameter contains a field which does not
exist for the queried data model.
"""
ERROR_CODE = 'no_such_include_field_error'
class MissingRequiredDeploymentInputError(CloudifyClientError):
"""
Raised when a required deployment input was not specified on deployment
creation.
"""
ERROR_CODE = 'missing_required_deployment_input_error'
class UnknownDeploymentInputError(CloudifyClientError):
"""
Raised when an unexpected input was specified on deployment creation.
"""
ERROR_CODE = 'unknown_deployment_input_error'
class UnknownDeploymentSecretError(CloudifyClientError):
"""
Raised when a required secret was not found on deployment creation.
"""
ERROR_CODE = 'unknown_deployment_secret_error'
class UnsupportedDeploymentGetSecretError(CloudifyClientError):
"""
Raised when an unsupported get_secret intrinsic function appears in
the blueprint on deployment creation.
"""
ERROR_CODE = 'unsupported_deployment_get_secret_error'
class FunctionsEvaluationError(CloudifyClientError):
"""
Raised when function evaluation failed.
"""
ERROR_CODE = 'functions_evaluation_error'
class UnknownModificationStageError(CloudifyClientError):
"""
Raised when an unknown modification stage was provided.
"""
ERROR_CODE = 'unknown_modification_stage_error'
class ExistingStartedDeploymentModificationError(CloudifyClientError):
"""
Raised when a deployment modification start is attempted while another
deployment modification is currently started
"""
ERROR_CODE = 'existing_started_deployment_modification_error'
class DeploymentModificationAlreadyEndedError(CloudifyClientError):
"""
Raised when a deployment modification finish/rollback is attempted on
a deployment modification that has already been finished/rolledback
"""
ERROR_CODE = 'deployment_modification_already_ended_error'
class UserUnauthorizedError(CloudifyClientError):
"""
Raised when a call has been made to a secured resource with an
unauthorized user (no credentials / bad credentials)
"""
ERROR_CODE = 'unauthorized_error'
class ForbiddenError(CloudifyClientError):
"""
Raised when a call has been made by a user that is not permitted to
perform it
"""
ERROR_CODE = 'forbidden_error'
class PluginInUseError(CloudifyClientError):
"""
Raised if a central deployment agent plugin deletion is attempted and at
least one deployment is currently using this plugin.
"""
ERROR_CODE = 'plugin_in_use'
class PluginInstallationError(CloudifyClientError):
"""
Raised if a central deployment agent plugin installation fails.
"""
ERROR_CODE = 'plugin_installation_error'
class PluginInstallationTimeout(CloudifyClientError):
"""
Raised if a central deployment agent plugin installation times out.
"""
ERROR_CODE = 'plugin_installation_timeout'
class MaintenanceModeActiveError(CloudifyClientError):
"""
Raised when a call has been blocked due to maintenance mode being active.
"""
ERROR_CODE = 'maintenance_mode_active'
def __str__(self):
return self.message
class MaintenanceModeActivatingError(CloudifyClientError):
"""
Raised when a call has been blocked while maintenance mode is activating.
"""
ERROR_CODE = 'entering_maintenance_mode'
def __str__(self):
return self.message
class NotModifiedError(CloudifyClientError):
"""
Raised when a 304 not modified error was returned
"""
ERROR_CODE = 'not_modified'
def __str__(self):
return self.message
class InvalidExecutionUpdateStatus(CloudifyClientError):
"""
Raised when execution update failed do to invalid status update
"""
ERROR_CODE = 'invalid_exception_status_update'
class NotClusterMaster(CloudifyClientError):
"""
Raised when the request was served by a manager that is not the master
node of a manager cluster.
The client should query for the cluster status to learn the master's
address, and retry the request.
If the client stores the server address, it should update the storage
with the new master node address.
"""
ERROR_CODE = 'not_cluster_master'
class RemovedFromCluster(CloudifyClientError):
"""
Raised when attempting to contact a manager that was removed from a
cluster.
The client should retry the request with another manager in the cluster.
If the client stores the server address, it should remove this node's
address from storage.
"""
ERROR_CODE = 'removed_from_cluster'
class DeploymentPluginNotFound(CloudifyClientError):
"""
Raised when a plugin is listed in the blueprint but is not
installed on the manager.
"""
ERROR_CODE = 'deployment_plugin_not_found'
ERROR_MAPPING = dict([
(error.ERROR_CODE, error)
for error in [
DeploymentEnvironmentCreationInProgressError,
DeploymentEnvironmentCreationPendingError,
IllegalExecutionParametersError,
NoSuchIncludeFieldError,
MissingRequiredDeploymentInputError,
UnknownDeploymentInputError,
UnknownDeploymentSecretError,
UnsupportedDeploymentGetSecretError,
FunctionsEvaluationError,
UnknownModificationStageError,
ExistingStartedDeploymentModificationError,
DeploymentModificationAlreadyEndedError,
UserUnauthorizedError,
ForbiddenError,
MaintenanceModeActiveError,
MaintenanceModeActivatingError,
NotModifiedError,
InvalidExecutionUpdateStatus,
PluginInUseError,
PluginInstallationError,
PluginInstallationTimeout,
NotClusterMaster,
RemovedFromCluster,
DeploymentPluginNotFound]])
| en | 0.941013 | ######## # Copyright (c) 2014 GigaSpaces Technologies Ltd. All rights reserved # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # * See the License for the specific language governing permissions and # * limitations under the License. Raised when there's attempt to execute a deployment workflow and deployment environment creation workflow execution is still running. In such a case, workflow execution should be retried after a reasonable time or after the execution of deployment environment creation workflow has terminated. Raised when there's attempt to execute a deployment workflow and deployment environment creation workflow execution is pending. In such a case, workflow execution should be retried after a reasonable time or after the execution of deployment environment creation workflow has terminated. Raised when an attempt to execute a workflow with wrong/missing parameters has been made. Raised when an _include query parameter contains a field which does not exist for the queried data model. Raised when a required deployment input was not specified on deployment creation. Raised when an unexpected input was specified on deployment creation. Raised when a required secret was not found on deployment creation. Raised when an unsupported get_secret intrinsic function appears in the blueprint on deployment creation. Raised when function evaluation failed. Raised when an unknown modification stage was provided. Raised when a deployment modification start is attempted while another deployment modification is currently started Raised when a deployment modification finish/rollback is attempted on a deployment modification that has already been finished/rolledback Raised when a call has been made to a secured resource with an unauthorized user (no credentials / bad credentials) Raised when a call has been made by a user that is not permitted to perform it Raised if a central deployment agent plugin deletion is attempted and at least one deployment is currently using this plugin. Raised if a central deployment agent plugin installation fails. Raised if a central deployment agent plugin installation times out. Raised when a call has been blocked due to maintenance mode being active. Raised when a call has been blocked while maintenance mode is activating. Raised when a 304 not modified error was returned Raised when execution update failed do to invalid status update Raised when the request was served by a manager that is not the master node of a manager cluster. The client should query for the cluster status to learn the master's address, and retry the request. If the client stores the server address, it should update the storage with the new master node address. Raised when attempting to contact a manager that was removed from a cluster. The client should retry the request with another manager in the cluster. If the client stores the server address, it should remove this node's address from storage. Raised when a plugin is listed in the blueprint but is not installed on the manager. | 1.958218 | 2 |
sample-demo-lambda-app/lambda_function.py | sriharshams-aws/aws-codeguru-profiler-python-demo-application | 6 | 8467 | <gh_stars>1-10
import boto3
import logging
import os
from random import randrange
from urllib.request import urlopen
# It is not recommended to enable DEBUG logs in production,
# this is just to show an example of a recommendation
# by Amazon CodeGuru Profiler.
logging.getLogger('botocore').setLevel(logging.DEBUG)
SITE = 'http://www.python.org/'
CW_NAMESPACE = 'ProfilerPythonDemo'
S3_BUCKET = os.environ['S3_BUCKET']
def lambda_handler(event, context):
# Make some network calls using urllib and s3 client.
with urlopen(SITE) as response:
s3_client = boto3.client('s3')
s3_client.put_object(Body=response.read(),
Bucket=S3_BUCKET,
Key='response.txt')
# Publish metrics.
content_length = int(response.headers['Content-Length'])
put_metric('ResponseContentLength', content_length)
put_metric(str(response.status)[0] + 'xxStatus', 1)
# Generate some CPU-intensive work.
num = randrange(content_length)
count = 0
for _ in range(num):
x = randrange(num)
if check_prime(x):
count += 1
return count
def put_metric(name, value):
cw_client = boto3.client('cloudwatch')
metric_data_num = [{'MetricName': name, 'Value': value}]
cw_client.put_metric_data(Namespace=CW_NAMESPACE, MetricData=metric_data_num)
def check_prime(num):
if num == 1 or num == 0:
return False
sq_root = 2
while sq_root * sq_root <= num:
if num % sq_root == 0:
return False
sq_root += 1
return True
| import boto3
import logging
import os
from random import randrange
from urllib.request import urlopen
# It is not recommended to enable DEBUG logs in production,
# this is just to show an example of a recommendation
# by Amazon CodeGuru Profiler.
logging.getLogger('botocore').setLevel(logging.DEBUG)
SITE = 'http://www.python.org/'
CW_NAMESPACE = 'ProfilerPythonDemo'
S3_BUCKET = os.environ['S3_BUCKET']
def lambda_handler(event, context):
# Make some network calls using urllib and s3 client.
with urlopen(SITE) as response:
s3_client = boto3.client('s3')
s3_client.put_object(Body=response.read(),
Bucket=S3_BUCKET,
Key='response.txt')
# Publish metrics.
content_length = int(response.headers['Content-Length'])
put_metric('ResponseContentLength', content_length)
put_metric(str(response.status)[0] + 'xxStatus', 1)
# Generate some CPU-intensive work.
num = randrange(content_length)
count = 0
for _ in range(num):
x = randrange(num)
if check_prime(x):
count += 1
return count
def put_metric(name, value):
cw_client = boto3.client('cloudwatch')
metric_data_num = [{'MetricName': name, 'Value': value}]
cw_client.put_metric_data(Namespace=CW_NAMESPACE, MetricData=metric_data_num)
def check_prime(num):
if num == 1 or num == 0:
return False
sq_root = 2
while sq_root * sq_root <= num:
if num % sq_root == 0:
return False
sq_root += 1
return True | en | 0.868165 | # It is not recommended to enable DEBUG logs in production, # this is just to show an example of a recommendation # by Amazon CodeGuru Profiler. # Make some network calls using urllib and s3 client. # Publish metrics. # Generate some CPU-intensive work. | 2.452254 | 2 |
api/error_handler.py | chuo06/palindrome | 0 | 8468 | from functools import wraps
from werkzeug.exceptions import HTTPException
from api.exceptions import MessageNotFound
def api_error_handler(func):
@wraps(func)
def handle_errors(*args, **kwargs):
try:
return func(*args, **kwargs)
except MessageNotFound as e:
return e.message, 404
except HTTPException:
raise
except Exception:
return "API Internal error", 500
return handle_errors
| from functools import wraps
from werkzeug.exceptions import HTTPException
from api.exceptions import MessageNotFound
def api_error_handler(func):
@wraps(func)
def handle_errors(*args, **kwargs):
try:
return func(*args, **kwargs)
except MessageNotFound as e:
return e.message, 404
except HTTPException:
raise
except Exception:
return "API Internal error", 500
return handle_errors
| none | 1 | 2.495349 | 2 |
|
src/nile/core/run.py | kootsZhin/nile | 121 | 8469 | """Command to run Nile scripts."""
import logging
from importlib.machinery import SourceFileLoader
from nile.nre import NileRuntimeEnvironment
def run(path, network):
"""Run nile scripts passing on the NRE object."""
logger = logging.getLogger()
logger.disabled = True
script = SourceFileLoader("script", path).load_module()
nre = NileRuntimeEnvironment(network)
script.run(nre)
| """Command to run Nile scripts."""
import logging
from importlib.machinery import SourceFileLoader
from nile.nre import NileRuntimeEnvironment
def run(path, network):
"""Run nile scripts passing on the NRE object."""
logger = logging.getLogger()
logger.disabled = True
script = SourceFileLoader("script", path).load_module()
nre = NileRuntimeEnvironment(network)
script.run(nre)
| en | 0.84179 | Command to run Nile scripts. Run nile scripts passing on the NRE object. | 2.045 | 2 |
Python/Basic Data Types/Lists/Solution.py | PawarAditi/HackerRank | 219 | 8470 | <reponame>PawarAditi/HackerRank
array = []
for _ in range(int(input())):
command = input().strip().split(" ")
cmd_type = command[0]
if (cmd_type == "print"):
print(array)
elif (cmd_type == "sort"):
array.sort()
elif (cmd_type == "reverse"):
array.reverse()
elif (cmd_type == "pop"):
array.pop()
elif (cmd_type == "remove"):
array.remove(int(command[1]))
elif (cmd_type == "append"):
array.append(int(command[1]))
elif (cmd_type == "insert"):
array.insert(int(command[1]), int(command[2])) | array = []
for _ in range(int(input())):
command = input().strip().split(" ")
cmd_type = command[0]
if (cmd_type == "print"):
print(array)
elif (cmd_type == "sort"):
array.sort()
elif (cmd_type == "reverse"):
array.reverse()
elif (cmd_type == "pop"):
array.pop()
elif (cmd_type == "remove"):
array.remove(int(command[1]))
elif (cmd_type == "append"):
array.append(int(command[1]))
elif (cmd_type == "insert"):
array.insert(int(command[1]), int(command[2])) | none | 1 | 3.735368 | 4 |
|
dbestclient/ml/density.py | horeapinca/DBEstClient | 0 | 8471 | <filename>dbestclient/ml/density.py
# Created by <NAME> at 2019-07-23
# All right reserved
# Department of Computer Science
# the University of Warwick
# <EMAIL>
from sklearn.neighbors import KernelDensity
class DBEstDensity:
def __init__(self, kernel=None):
if kernel is None:
self.kernel = 'gaussian'
self.kde = None
def fit(self, x):
self.kde = KernelDensity(kernel=self.kernel).fit(x)
return self.kde | <filename>dbestclient/ml/density.py
# Created by <NAME> at 2019-07-23
# All right reserved
# Department of Computer Science
# the University of Warwick
# <EMAIL>
from sklearn.neighbors import KernelDensity
class DBEstDensity:
def __init__(self, kernel=None):
if kernel is None:
self.kernel = 'gaussian'
self.kde = None
def fit(self, x):
self.kde = KernelDensity(kernel=self.kernel).fit(x)
return self.kde | en | 0.731361 | # Created by <NAME> at 2019-07-23 # All right reserved # Department of Computer Science # the University of Warwick # <EMAIL> | 2.510916 | 3 |
setup.py | panchambanerjee/access_spotify | 4 | 8472 | #!/usr/bin/env python
import setuptools
from setuptools import setup
from os import path
# Read the package requirements
with open("requirements.txt", "r") as f:
requirements = [line.rstrip("\n") for line in f if line != "\n"]
# Read the contents of the README file
this_directory = path.abspath(path.dirname(__file__))
with open(path.join(this_directory, 'README.md'), encoding='utf-8') as f:
long_description = f.read()
setup(name='access-spotify',
version="1.1",
author="pancham_banerjee",
author_email="<EMAIL>",
packages=setuptools.find_packages(),
scripts=["./bin/access_script.py"],
install_requires=requirements,
license="MIT",
description="A package to get all album and track info for an artist by querying the Spotify API",
long_description=long_description,
long_description_content_type='text/markdown'
)
| #!/usr/bin/env python
import setuptools
from setuptools import setup
from os import path
# Read the package requirements
with open("requirements.txt", "r") as f:
requirements = [line.rstrip("\n") for line in f if line != "\n"]
# Read the contents of the README file
this_directory = path.abspath(path.dirname(__file__))
with open(path.join(this_directory, 'README.md'), encoding='utf-8') as f:
long_description = f.read()
setup(name='access-spotify',
version="1.1",
author="pancham_banerjee",
author_email="<EMAIL>",
packages=setuptools.find_packages(),
scripts=["./bin/access_script.py"],
install_requires=requirements,
license="MIT",
description="A package to get all album and track info for an artist by querying the Spotify API",
long_description=long_description,
long_description_content_type='text/markdown'
)
| en | 0.567881 | #!/usr/bin/env python # Read the package requirements # Read the contents of the README file | 1.904984 | 2 |
mundiapi/models/update_plan_request.py | hugocpolos/MundiAPI-PYTHON | 10 | 8473 | # -*- coding: utf-8 -*-
"""
mundiapi
This file was automatically generated by APIMATIC v2.0 ( https://apimatic.io ).
"""
class UpdatePlanRequest(object):
"""Implementation of the 'UpdatePlanRequest' model.
Request for updating a plan
Attributes:
name (string): Plan's name
description (string): Description
installments (list of int): Number os installments
statement_descriptor (string): Text that will be shown on the credit
card's statement
currency (string): Currency
interval (string): Interval
interval_count (int): Interval count
payment_methods (list of string): Payment methods accepted by the
plan
billing_type (string): Billing type
status (string): Plan status
shippable (bool): Indicates if the plan is shippable
billing_days (list of int): Billing days accepted by the plan
metadata (dict<object, string>): Metadata
minimum_price (int): Minimum price
trial_period_days (int): Number of trial period in days, where the
customer will not be charged
"""
# Create a mapping from Model property names to API property names
_names = {
"name":'name',
"description":'description',
"installments":'installments',
"statement_descriptor":'statement_descriptor',
"currency":'currency',
"interval":'interval',
"interval_count":'interval_count',
"payment_methods":'payment_methods',
"billing_type":'billing_type',
"status":'status',
"shippable":'shippable',
"billing_days":'billing_days',
"metadata":'metadata',
"minimum_price":'minimum_price',
"trial_period_days":'trial_period_days'
}
def __init__(self,
name=None,
description=None,
installments=None,
statement_descriptor=None,
currency=None,
interval=None,
interval_count=None,
payment_methods=None,
billing_type=None,
status=None,
shippable=None,
billing_days=None,
metadata=None,
minimum_price=None,
trial_period_days=None):
"""Constructor for the UpdatePlanRequest class"""
# Initialize members of the class
self.name = name
self.description = description
self.installments = installments
self.statement_descriptor = statement_descriptor
self.currency = currency
self.interval = interval
self.interval_count = interval_count
self.payment_methods = payment_methods
self.billing_type = billing_type
self.status = status
self.shippable = shippable
self.billing_days = billing_days
self.metadata = metadata
self.minimum_price = minimum_price
self.trial_period_days = trial_period_days
@classmethod
def from_dictionary(cls,
dictionary):
"""Creates an instance of this model from a dictionary
Args:
dictionary (dictionary): A dictionary representation of the object as
obtained from the deserialization of the server's response. The keys
MUST match property names in the API description.
Returns:
object: An instance of this structure class.
"""
if dictionary is None:
return None
# Extract variables from the dictionary
name = dictionary.get('name')
description = dictionary.get('description')
installments = dictionary.get('installments')
statement_descriptor = dictionary.get('statement_descriptor')
currency = dictionary.get('currency')
interval = dictionary.get('interval')
interval_count = dictionary.get('interval_count')
payment_methods = dictionary.get('payment_methods')
billing_type = dictionary.get('billing_type')
status = dictionary.get('status')
shippable = dictionary.get('shippable')
billing_days = dictionary.get('billing_days')
metadata = dictionary.get('metadata')
minimum_price = dictionary.get('minimum_price')
trial_period_days = dictionary.get('trial_period_days')
# Return an object of this model
return cls(name,
description,
installments,
statement_descriptor,
currency,
interval,
interval_count,
payment_methods,
billing_type,
status,
shippable,
billing_days,
metadata,
minimum_price,
trial_period_days)
| # -*- coding: utf-8 -*-
"""
mundiapi
This file was automatically generated by APIMATIC v2.0 ( https://apimatic.io ).
"""
class UpdatePlanRequest(object):
"""Implementation of the 'UpdatePlanRequest' model.
Request for updating a plan
Attributes:
name (string): Plan's name
description (string): Description
installments (list of int): Number os installments
statement_descriptor (string): Text that will be shown on the credit
card's statement
currency (string): Currency
interval (string): Interval
interval_count (int): Interval count
payment_methods (list of string): Payment methods accepted by the
plan
billing_type (string): Billing type
status (string): Plan status
shippable (bool): Indicates if the plan is shippable
billing_days (list of int): Billing days accepted by the plan
metadata (dict<object, string>): Metadata
minimum_price (int): Minimum price
trial_period_days (int): Number of trial period in days, where the
customer will not be charged
"""
# Create a mapping from Model property names to API property names
_names = {
"name":'name',
"description":'description',
"installments":'installments',
"statement_descriptor":'statement_descriptor',
"currency":'currency',
"interval":'interval',
"interval_count":'interval_count',
"payment_methods":'payment_methods',
"billing_type":'billing_type',
"status":'status',
"shippable":'shippable',
"billing_days":'billing_days',
"metadata":'metadata',
"minimum_price":'minimum_price',
"trial_period_days":'trial_period_days'
}
def __init__(self,
name=None,
description=None,
installments=None,
statement_descriptor=None,
currency=None,
interval=None,
interval_count=None,
payment_methods=None,
billing_type=None,
status=None,
shippable=None,
billing_days=None,
metadata=None,
minimum_price=None,
trial_period_days=None):
"""Constructor for the UpdatePlanRequest class"""
# Initialize members of the class
self.name = name
self.description = description
self.installments = installments
self.statement_descriptor = statement_descriptor
self.currency = currency
self.interval = interval
self.interval_count = interval_count
self.payment_methods = payment_methods
self.billing_type = billing_type
self.status = status
self.shippable = shippable
self.billing_days = billing_days
self.metadata = metadata
self.minimum_price = minimum_price
self.trial_period_days = trial_period_days
@classmethod
def from_dictionary(cls,
dictionary):
"""Creates an instance of this model from a dictionary
Args:
dictionary (dictionary): A dictionary representation of the object as
obtained from the deserialization of the server's response. The keys
MUST match property names in the API description.
Returns:
object: An instance of this structure class.
"""
if dictionary is None:
return None
# Extract variables from the dictionary
name = dictionary.get('name')
description = dictionary.get('description')
installments = dictionary.get('installments')
statement_descriptor = dictionary.get('statement_descriptor')
currency = dictionary.get('currency')
interval = dictionary.get('interval')
interval_count = dictionary.get('interval_count')
payment_methods = dictionary.get('payment_methods')
billing_type = dictionary.get('billing_type')
status = dictionary.get('status')
shippable = dictionary.get('shippable')
billing_days = dictionary.get('billing_days')
metadata = dictionary.get('metadata')
minimum_price = dictionary.get('minimum_price')
trial_period_days = dictionary.get('trial_period_days')
# Return an object of this model
return cls(name,
description,
installments,
statement_descriptor,
currency,
interval,
interval_count,
payment_methods,
billing_type,
status,
shippable,
billing_days,
metadata,
minimum_price,
trial_period_days)
| en | 0.764967 | # -*- coding: utf-8 -*- mundiapi
This file was automatically generated by APIMATIC v2.0 ( https://apimatic.io ). Implementation of the 'UpdatePlanRequest' model.
Request for updating a plan
Attributes:
name (string): Plan's name
description (string): Description
installments (list of int): Number os installments
statement_descriptor (string): Text that will be shown on the credit
card's statement
currency (string): Currency
interval (string): Interval
interval_count (int): Interval count
payment_methods (list of string): Payment methods accepted by the
plan
billing_type (string): Billing type
status (string): Plan status
shippable (bool): Indicates if the plan is shippable
billing_days (list of int): Billing days accepted by the plan
metadata (dict<object, string>): Metadata
minimum_price (int): Minimum price
trial_period_days (int): Number of trial period in days, where the
customer will not be charged # Create a mapping from Model property names to API property names Constructor for the UpdatePlanRequest class # Initialize members of the class Creates an instance of this model from a dictionary
Args:
dictionary (dictionary): A dictionary representation of the object as
obtained from the deserialization of the server's response. The keys
MUST match property names in the API description.
Returns:
object: An instance of this structure class. # Extract variables from the dictionary # Return an object of this model | 2.359237 | 2 |
hearthstone/hslog/utils.py | bertokhoury/python-hearthstone | 1 | 8474 | <filename>hearthstone/hslog/utils.py
from hearthstone.enums import GameTag, TAG_TYPES
def parse_enum(enum, value):
if value.isdigit():
value = int(value)
elif hasattr(enum, value):
value = getattr(enum, value)
else:
raise Exception("Unhandled %s: %r" % (enum, value))
return value
def parse_tag(tag, value):
tag = parse_enum(GameTag, tag)
if tag in TAG_TYPES:
value = parse_enum(TAG_TYPES[tag], value)
elif value.isdigit():
value = int(value)
else:
raise NotImplementedError("Invalid string value %r = %r" % (tag, value))
return tag, value
| <filename>hearthstone/hslog/utils.py
from hearthstone.enums import GameTag, TAG_TYPES
def parse_enum(enum, value):
if value.isdigit():
value = int(value)
elif hasattr(enum, value):
value = getattr(enum, value)
else:
raise Exception("Unhandled %s: %r" % (enum, value))
return value
def parse_tag(tag, value):
tag = parse_enum(GameTag, tag)
if tag in TAG_TYPES:
value = parse_enum(TAG_TYPES[tag], value)
elif value.isdigit():
value = int(value)
else:
raise NotImplementedError("Invalid string value %r = %r" % (tag, value))
return tag, value
| none | 1 | 2.887113 | 3 |
|
ejemplo_clase_00.py | ernestoarzabala/Curso-Python-Utch | 0 | 8475 | <gh_stars>0
# Archivo ejemplo 00 de creacion de clases en Python
from math import gcd # greatest common denominator = Maximo Comun Divisor (MCD)
class Fraccion:
""" La clase Fraccion: Una fraccion es un part de enteros: un numerador (num)
y un denominador (den !=0 ) cuyo MCD es 1.
"""
def __init__(self,numerador,denominador):
""" Constructor de la clase. Construye una fracción a partir de dos enteros:
un numerador y un denominador.
¡El constructor se enloquece si el denominador es cero!
Nota mental:Agregar manejo de error para denominador igual a cero.
"""
numerador = int(numerador)
denominador = int(denominador)
hcf = gcd(numerador,denominador)
self.num, self.den = numerador/hcf, denominador/hcf
def __str__(self):
""" Generador de strings para representar una fracción.
Se necesita si se desea convertir ,o mostrar, una fraccion a string.
"""
return "%d/%d" % (self.num,self.den)
def __mul__(self,otrafraccion):
""" Función necesaria para el operador de multiplicación.
Multiplica dos fracciones para obtener una fraccion resultante
"""
return Fraccion(self.num*otrafraccion.num,self.den*otrafraccion.den)
def __add__(self,otrafraccion):
"""Función necesaria para el operador de suma.
Suma dos fracciones para obtener una fraccion resultante
"""
return Fraccion(self.num*otrafraccion.den+self.den*otrafraccion.num,self.den*otrafraccion.den)
def a_numero_real(self):
""" Función para convertir la fracción a un numero de punto flotante.
El equivalente numérico con punto decimal de la fracción.
"""
return float(self.num)/float(self.den)
if __name__ == "__main__":
a = Fraccion(5,12)
print(a)
b = Fraccion(3,5)
c = a*b
c_real = c.a_numero_real()
print("Multiplicar la fraccion {} por la fraccion {} da como resultado la fraccion {} que es equivalente a {}".format(a,b,c,c_real))# Escribe tu código aquí :-)
a = Fraccion(1,2)
print(a)
b = Fraccion(1,4)
c = a+b
c_real = c.a_numero_real()
print("Sumar la fraccion {} con la fraccion {} da como resultado la fraccion {} que es equivalente a {}".format(a,b,c,c_real))# Escribe tu código aquí :-)
| # Archivo ejemplo 00 de creacion de clases en Python
from math import gcd # greatest common denominator = Maximo Comun Divisor (MCD)
class Fraccion:
""" La clase Fraccion: Una fraccion es un part de enteros: un numerador (num)
y un denominador (den !=0 ) cuyo MCD es 1.
"""
def __init__(self,numerador,denominador):
""" Constructor de la clase. Construye una fracción a partir de dos enteros:
un numerador y un denominador.
¡El constructor se enloquece si el denominador es cero!
Nota mental:Agregar manejo de error para denominador igual a cero.
"""
numerador = int(numerador)
denominador = int(denominador)
hcf = gcd(numerador,denominador)
self.num, self.den = numerador/hcf, denominador/hcf
def __str__(self):
""" Generador de strings para representar una fracción.
Se necesita si se desea convertir ,o mostrar, una fraccion a string.
"""
return "%d/%d" % (self.num,self.den)
def __mul__(self,otrafraccion):
""" Función necesaria para el operador de multiplicación.
Multiplica dos fracciones para obtener una fraccion resultante
"""
return Fraccion(self.num*otrafraccion.num,self.den*otrafraccion.den)
def __add__(self,otrafraccion):
"""Función necesaria para el operador de suma.
Suma dos fracciones para obtener una fraccion resultante
"""
return Fraccion(self.num*otrafraccion.den+self.den*otrafraccion.num,self.den*otrafraccion.den)
def a_numero_real(self):
""" Función para convertir la fracción a un numero de punto flotante.
El equivalente numérico con punto decimal de la fracción.
"""
return float(self.num)/float(self.den)
if __name__ == "__main__":
a = Fraccion(5,12)
print(a)
b = Fraccion(3,5)
c = a*b
c_real = c.a_numero_real()
print("Multiplicar la fraccion {} por la fraccion {} da como resultado la fraccion {} que es equivalente a {}".format(a,b,c,c_real))# Escribe tu código aquí :-)
a = Fraccion(1,2)
print(a)
b = Fraccion(1,4)
c = a+b
c_real = c.a_numero_real()
print("Sumar la fraccion {} con la fraccion {} da como resultado la fraccion {} que es equivalente a {}".format(a,b,c,c_real))# Escribe tu código aquí :-) | es | 0.927926 | # Archivo ejemplo 00 de creacion de clases en Python # greatest common denominator = Maximo Comun Divisor (MCD) La clase Fraccion: Una fraccion es un part de enteros: un numerador (num) y un denominador (den !=0 ) cuyo MCD es 1. Constructor de la clase. Construye una fracción a partir de dos enteros: un numerador y un denominador. ¡El constructor se enloquece si el denominador es cero! Nota mental:Agregar manejo de error para denominador igual a cero. Generador de strings para representar una fracción. Se necesita si se desea convertir ,o mostrar, una fraccion a string. Función necesaria para el operador de multiplicación. Multiplica dos fracciones para obtener una fraccion resultante Función necesaria para el operador de suma. Suma dos fracciones para obtener una fraccion resultante Función para convertir la fracción a un numero de punto flotante. El equivalente numérico con punto decimal de la fracción. # Escribe tu código aquí :-) # Escribe tu código aquí :-) | 3.557716 | 4 |
addons14/base_rest/__init__.py | odoochain/addons_oca | 1 | 8476 | from . import models
from . import components
from . import http
| from . import models
from . import components
from . import http
| none | 1 | 1.129688 | 1 |
|
recs/live_project_popularity_recommender.py | WingCode/live-project | 0 | 8477 | import os
import pandas as pd
class LiveProjectPopularityBasedRecs:
def __init__(self):
self.charts = {}
charts_folder = "charts"
if os.path.isdir(charts_folder):
for file in os.listdir("charts"):
name, ext = file.split('.')
if ext == "csv" and len(name) > 0:
self.charts[name] = pd.read_csv("{}/{}".format(charts_folder, file), index_col=0)
else:
print("Genre Global and Charts not implemented!")
def genre_chart(self, genre):
if genre in self.charts:
return self.charts[genre]
elif "Top" in self.charts:
return self.charts["Top"]
else:
return ""
| import os
import pandas as pd
class LiveProjectPopularityBasedRecs:
def __init__(self):
self.charts = {}
charts_folder = "charts"
if os.path.isdir(charts_folder):
for file in os.listdir("charts"):
name, ext = file.split('.')
if ext == "csv" and len(name) > 0:
self.charts[name] = pd.read_csv("{}/{}".format(charts_folder, file), index_col=0)
else:
print("Genre Global and Charts not implemented!")
def genre_chart(self, genre):
if genre in self.charts:
return self.charts[genre]
elif "Top" in self.charts:
return self.charts["Top"]
else:
return ""
| none | 1 | 3.054117 | 3 |
|
resource/pypi/cffi-1.9.1/testing/cffi0/snippets/distutils_module/setup.py | hipnusleo/Laserjet | 0 | 8478 | <filename>resource/pypi/cffi-1.9.1/testing/cffi0/snippets/distutils_module/setup.py<gh_stars>0
from distutils.core import setup
import snip_basic_verify
setup(
py_modules=['snip_basic_verify'],
ext_modules=[snip_basic_verify.ffi.verifier.get_extension()])
| <filename>resource/pypi/cffi-1.9.1/testing/cffi0/snippets/distutils_module/setup.py<gh_stars>0
from distutils.core import setup
import snip_basic_verify
setup(
py_modules=['snip_basic_verify'],
ext_modules=[snip_basic_verify.ffi.verifier.get_extension()])
| none | 1 | 1.271719 | 1 |
|
pce/src/testing/test_pce.py | elise-baumgartner/onramp | 2 | 8479 | #!../env/bin/python
"""A simple test script for the PCE portion of OnRamp.
Usage: ./test_pce.py
This script is only intended to be run in a fresh install of the repository. It
has side-effects that could corrupt module and user data if run in a production
setting.
Prior to running this script, ensure that onramp/pce/bin/onramp_pce_install.py
has been called and that the server is running. Also Ensure
./test_pce_config.cfg contains the proper settings.
"""
import nose
import sys
if __name__ == '__main__':
print (__doc__)
response = raw_input('(C)ontinue or (A)bort? ')
if response != 'C':
sys.exit(0)
nose.main()
| #!../env/bin/python
"""A simple test script for the PCE portion of OnRamp.
Usage: ./test_pce.py
This script is only intended to be run in a fresh install of the repository. It
has side-effects that could corrupt module and user data if run in a production
setting.
Prior to running this script, ensure that onramp/pce/bin/onramp_pce_install.py
has been called and that the server is running. Also Ensure
./test_pce_config.cfg contains the proper settings.
"""
import nose
import sys
if __name__ == '__main__':
print (__doc__)
response = raw_input('(C)ontinue or (A)bort? ')
if response != 'C':
sys.exit(0)
nose.main()
| en | 0.869723 | #!../env/bin/python A simple test script for the PCE portion of OnRamp. Usage: ./test_pce.py This script is only intended to be run in a fresh install of the repository. It has side-effects that could corrupt module and user data if run in a production setting. Prior to running this script, ensure that onramp/pce/bin/onramp_pce_install.py has been called and that the server is running. Also Ensure ./test_pce_config.cfg contains the proper settings. | 2.113826 | 2 |
tobac/plotting.py | w-herbst/tobac | 36 | 8480 | <filename>tobac/plotting.py<gh_stars>10-100
import matplotlib as mpl
mpl.use('Agg')
import matplotlib.pyplot as plt
import logging
from .analysis import lifetime_histogram
from .analysis import histogram_cellwise,histogram_featurewise
import numpy as np
def plot_tracks_mask_field_loop(track,field,mask,features,axes=None,name=None,plot_dir='./',
figsize=(10./2.54,10./2.54),dpi=300,
margin_left=0.05,margin_right=0.05,margin_bottom=0.05,margin_top=0.05,
**kwargs):
import cartopy.crs as ccrs
import os
from iris import Constraint
os.makedirs(plot_dir,exist_ok=True)
time=mask.coord('time')
if name is None:
name=field.name()
for time_i in time.points:
datetime_i=time.units.num2date(time_i)
constraint_time = Constraint(time=datetime_i)
fig1,ax1=plt.subplots(ncols=1, nrows=1,figsize=figsize, subplot_kw={'projection': ccrs.PlateCarree()})
datestring_file=datetime_i.strftime('%Y-%m-%d_%H:%M:%S')
field_i=field.extract(constraint_time)
mask_i=mask.extract(constraint_time)
track_i=track[track['time']==datetime_i]
features_i=features[features['time']==datetime_i]
ax1=plot_tracks_mask_field(track=track_i,field=field_i,mask=mask_i,features=features_i,
axes=ax1,**kwargs)
fig1.subplots_adjust(left=margin_left, bottom=margin_bottom, right=1-margin_right, top=1-margin_top)
os.makedirs(plot_dir, exist_ok=True)
savepath_png=os.path.join(plot_dir,name+'_'+datestring_file+'.png')
fig1.savefig(savepath_png,dpi=dpi)
logging.debug('Figure plotted to ' + str(savepath_png))
plt.close()
def plot_tracks_mask_field(track,field,mask,features,axes=None,axis_extent=None,
plot_outline=True,
plot_marker=True,marker_track='x',markersize_track=4,
plot_number=True,
plot_features=False,marker_feature=None,markersize_feature=None,
title=None,title_str=None,
vmin=None,vmax=None,n_levels=50,
cmap='viridis',extend='neither',
orientation_colorbar='horizontal',pad_colorbar=0.05,
label_colorbar=None,fraction_colorbar=0.046,
rasterized=True,linewidth_contour=1
):
import cartopy
from cartopy.mpl.gridliner import LONGITUDE_FORMATTER, LATITUDE_FORMATTER
import iris.plot as iplt
from matplotlib.ticker import MaxNLocator
import cartopy.feature as cfeature
from .utils import mask_features,mask_features_surface
from matplotlib import ticker
if type(axes) is not cartopy.mpl.geoaxes.GeoAxesSubplot:
raise ValueError('axes had to be cartopy.mpl.geoaxes.GeoAxesSubplot')
datestr=field.coord('time').units.num2date(field.coord('time').points[0]).strftime('%Y-%m-%d %H:%M:%S')
if title is 'datestr':
if title_str is None:
titlestring=datestr
elif type(title_str is str):
titlestring=title+ ' ' + datestr
axes.set_title(titlestring,horizontalalignment='left',loc='left')
gl = axes.gridlines(draw_labels=True)
majorLocator = MaxNLocator(nbins=5,steps=[1,2,5,10])
gl.xlocator=majorLocator
gl.ylocator=majorLocator
gl.xformatter = LONGITUDE_FORMATTER
axes.tick_params(axis='both', which='major')
gl.yformatter = LATITUDE_FORMATTER
gl.xlabels_top = False
gl.ylabels_right = False
axes.coastlines('10m')
# rivers=cfeature.NaturalEarthFeature(category='physical', name='rivers_lake_centerlines',scale='10m',facecolor='none')
lakes=cfeature.NaturalEarthFeature(category='physical', name='lakes',scale='10m',facecolor='none')
axes.add_feature(lakes, edgecolor='black')
axes.set_xlabel('longitude')
axes.set_ylabel('latitude')
# Plot the background field
if np.any(~np.isnan(field.data)): # check if field to plot is not only nan, which causes error:
plot_field=iplt.contourf(field,coords=['longitude','latitude'],
levels=np.linspace(vmin,vmax,num=n_levels),extend=extend,
axes=axes,
cmap=cmap,vmin=vmin,vmax=vmax,zorder=1
)
if rasterized:
axes.set_rasterization_zorder(1)
# create colorbar for background field:
cbar=plt.colorbar(plot_field,orientation=orientation_colorbar, pad=pad_colorbar,fraction=fraction_colorbar,ax=axes)
if label_colorbar is None:
label_colorbar=field.name()+ '('+field.units.symbol +')'
if orientation_colorbar is 'horizontal':
cbar.ax.set_xlabel(label_colorbar)
elif orientation_colorbar is 'vertical':
cbar.ax.set_ylabel(label_colorbar)
tick_locator = ticker.MaxNLocator(nbins=5)
cbar.locator = tick_locator
cbar.update_ticks()
colors_mask=['darkred','orange','crimson','red','darkorange']
#if marker_feature is not explicitly given, set it to marker_track (will then be overwritten by the coloured markers)
if marker_feature is None:
maker_feature=marker_track
if markersize_feature is None:
makersize_feature=markersize_track
#Plot the identified features by looping over rows of DataFrame:
if plot_features:
for i_row,row in features.iterrows():
axes.plot(row['longitude'],row['latitude'],
color='grey',marker=maker_feature,markersize=makersize_feature)
# restrict features to featues inside axis extent
track=track.loc[(track['longitude'] > axis_extent[0])
& (track['longitude'] < axis_extent[1])
& (track['latitude'] > axis_extent[2])
& (track['latitude'] < axis_extent[3])]
#Plot tracked features by looping over rows of Dataframe
for i_row,row in track.iterrows():
feature=row['feature']
cell=row['cell']
if not np.isnan(cell):
color=colors_mask[int(cell%len(colors_mask))]
if plot_number:
cell_string=' '+str(int(row['cell']))
axes.text(row['longitude'],row['latitude'],cell_string,
color=color,fontsize=6, clip_on=True)
else:
color='grey'
if plot_outline:
mask_i=None
# if mask is 3D, create surface projection, if mask is 2D keep the mask
if mask.ndim==2:
mask_i=mask_features(mask,feature,masked=False)
elif mask.ndim==3:
mask_i=mask_features_surface(mask,feature,masked=False,z_coord='model_level_number')
else:
raise ValueError('mask has shape that cannot be understood')
# plot countour lines around the edges of the mask
iplt.contour(mask_i,coords=['longitude','latitude'],
levels=[0,feature],
colors=color,linewidths=linewidth_contour,
axes=axes)
if plot_marker:
axes.plot(row['longitude'],row['latitude'],
color=color,marker=marker_track,markersize=markersize_track)
axes.set_extent(axis_extent)
return axes
def animation_mask_field(track,features,field,mask,interval=500,figsize=(10,10),**kwargs):
import cartopy.crs as ccrs
import matplotlib.pyplot as plt
import matplotlib.animation
from iris import Constraint
fig=plt.figure(figsize=figsize)
plt.close()
def update(time_in):
fig.clf()
ax=fig.add_subplot(111,projection=ccrs.PlateCarree())
constraint_time = Constraint(time=time_in)
field_i=field.extract(constraint_time)
mask_i=mask.extract(constraint_time)
track_i=track[track['time']==time_in]
features_i=features[features['time']==time_in]
#fig1,ax1=plt.subplots(ncols=1, nrows=1,figsize=figsize, subplot_kw={'projection': ccrs.PlateCarree()})
plot_tobac=plot_tracks_mask_field(track_i,field=field_i,mask=mask_i,features=features_i,
axes=ax,
**kwargs)
ax.set_title('{}'.format(time_in))
time=field.coord('time')
datetimes=time.units.num2date(time.points)
animation = matplotlib.animation.FuncAnimation(fig, update,init_func=None, frames=datetimes,interval=interval, blit=False)
return animation
def plot_mask_cell_track_follow(cell,track, cog, features, mask_total,
field_contour, field_filled,
width=10000,
name= 'test', plotdir='./',
file_format=['png'],figsize=(10/2.54, 10/2.54),dpi=300,
**kwargs):
'''Make plots for all cells centred around cell and with one background field as filling and one background field as contrours
Input:
Output:
'''
from iris import Constraint
from numpy import unique
import os
track_cell=track[track['cell']==cell]
for i_row,row in track_cell.iterrows():
constraint_time = Constraint(time=row['time'])
constraint_x = Constraint(projection_x_coordinate = lambda cell: row['projection_x_coordinate']-width < cell < row['projection_x_coordinate']+width)
constraint_y = Constraint(projection_y_coordinate = lambda cell: row['projection_y_coordinate']-width < cell < row['projection_y_coordinate']+width)
constraint = constraint_time & constraint_x & constraint_y
mask_total_i=mask_total.extract(constraint)
if field_contour is None:
field_contour_i=None
else:
field_contour_i=field_contour.extract(constraint)
if field_filled is None:
field_filled_i=None
else:
field_filled_i=field_filled.extract(constraint)
cells=list(unique(mask_total_i.core_data()))
if cell not in cells:
cells.append(cell)
if 0 in cells:
cells.remove(0)
track_i=track[track['cell'].isin(cells)]
track_i=track_i[track_i['time']==row['time']]
if cog is None:
cog_i=None
else:
cog_i=cog[cog['cell'].isin(cells)]
cog_i=cog_i[cog_i['time']==row['time']]
if features is None:
features_i=None
else:
features_i=features[features['time']==row['time']]
fig1, ax1 = plt.subplots(ncols=1, nrows=1, figsize=figsize)
fig1.subplots_adjust(left=0.2, bottom=0.15, right=0.85, top=0.80)
datestring_stamp = row['time'].strftime('%Y-%m-%d %H:%M:%S')
celltime_stamp = "%02d:%02d:%02d" % (row['time_cell'].dt.total_seconds() // 3600,(row['time_cell'].dt.total_seconds() % 3600) // 60, row['time_cell'].dt.total_seconds() % 60 )
title=datestring_stamp + ' , ' + celltime_stamp
datestring_file = row['time'].strftime('%Y-%m-%d_%H%M%S')
ax1=plot_mask_cell_individual_follow(cell_i=cell,track=track_i, cog=cog_i,features=features_i,
mask_total=mask_total_i,
field_contour=field_contour_i, field_filled=field_filled_i,
width=width,
axes=ax1,title=title,
**kwargs)
out_dir = os.path.join(plotdir, name)
os.makedirs(out_dir, exist_ok=True)
if 'png' in file_format:
savepath_png = os.path.join(out_dir, name + '_' + datestring_file + '.png')
fig1.savefig(savepath_png, dpi=dpi)
logging.debug('field_contour field_filled Mask plot saved to ' + savepath_png)
if 'pdf' in file_format:
savepath_pdf = os.path.join(out_dir, name + '_' + datestring_file + '.pdf')
fig1.savefig(savepath_pdf, dpi=dpi)
logging.debug('field_contour field_filled Mask plot saved to ' + savepath_pdf)
plt.close()
plt.clf()
def plot_mask_cell_individual_follow(cell_i,track, cog,features, mask_total,
field_contour, field_filled,
axes=None,width=10000,
label_field_contour=None, cmap_field_contour='Blues',norm_field_contour=None,
linewidths_contour=0.8,contour_labels=False,
vmin_field_contour=0,vmax_field_contour=50,levels_field_contour=None,nlevels_field_contour=10,
label_field_filled=None,cmap_field_filled='summer',norm_field_filled=None,
vmin_field_filled=0,vmax_field_filled=100,levels_field_filled=None,nlevels_field_filled=10,
title=None
):
'''Make individual plot for cell centred around cell and with one background field as filling and one background field as contrours
Input:
Output:
'''
import numpy as np
from .utils import mask_cell_surface
from mpl_toolkits.axes_grid1 import make_axes_locatable
from matplotlib.colors import Normalize
divider = make_axes_locatable(axes)
x_pos=track[track['cell']==cell_i]['projection_x_coordinate'].item()
y_pos=track[track['cell']==cell_i]['projection_y_coordinate'].item()
if field_filled is not None:
if levels_field_filled is None:
levels_field_filled=np.linspace(vmin_field_filled,vmax_field_filled, nlevels_field_filled)
plot_field_filled = axes.contourf((field_filled.coord('projection_x_coordinate').points-x_pos)/1000,
(field_filled.coord('projection_y_coordinate').points-y_pos)/1000,
field_filled.data,
cmap=cmap_field_filled,norm=norm_field_filled,
levels=levels_field_filled,vmin=vmin_field_filled, vmax=vmax_field_filled)
cax_filled = divider.append_axes("right", size="5%", pad=0.1)
norm_filled= Normalize(vmin=vmin_field_filled, vmax=vmax_field_filled)
sm_filled= plt.cm.ScalarMappable(norm=norm_filled, cmap = plot_field_filled.cmap)
sm_filled.set_array([])
cbar_field_filled = plt.colorbar(sm_filled, orientation='vertical',cax=cax_filled)
cbar_field_filled.ax.set_ylabel(label_field_filled)
cbar_field_filled.set_clim(vmin_field_filled, vmax_field_filled)
if field_contour is not None:
if levels_field_contour is None:
levels_field_contour=np.linspace(vmin_field_contour, vmax_field_contour, nlevels_field_contour)
if norm_field_contour:
vmin_field_contour=None,
vmax_field_contour=None,
plot_field_contour = axes.contour((field_contour.coord('projection_x_coordinate').points-x_pos)/1000,
(field_contour.coord('projection_y_coordinate').points-y_pos)/1000,
field_contour.data,
cmap=cmap_field_contour,norm=norm_field_contour,
levels=levels_field_contour,vmin=vmin_field_contour, vmax=vmax_field_contour,
linewidths=linewidths_contour)
if contour_labels:
axes.clabel(plot_field_contour, fontsize=10)
cax_contour = divider.append_axes("bottom", size="5%", pad=0.1)
if norm_field_contour:
vmin_field_contour=None
vmax_field_contour=None
norm_contour=norm_field_contour
else:
norm_contour= Normalize(vmin=vmin_field_contour, vmax=vmax_field_contour)
sm_contour= plt.cm.ScalarMappable(norm=norm_contour, cmap = plot_field_contour.cmap)
sm_contour.set_array([])
cbar_field_contour = plt.colorbar(sm_contour, orientation='horizontal',ticks=levels_field_contour,cax=cax_contour)
cbar_field_contour.ax.set_xlabel(label_field_contour)
cbar_field_contour.set_clim(vmin_field_contour, vmax_field_contour)
for i_row, row in track.iterrows():
cell = int(row['cell'])
if cell==cell_i:
color='darkred'
else:
color='darkorange'
cell_string=' '+str(int(row['cell']))
axes.text((row['projection_x_coordinate']-x_pos)/1000,
(row['projection_y_coordinate']-y_pos)/1000,
cell_string,color=color,fontsize=6, clip_on=True)
# Plot marker for tracked cell centre as a cross
axes.plot((row['projection_x_coordinate']-x_pos)/1000,
(row['projection_y_coordinate']-y_pos)/1000,
'x', color=color,markersize=4)
#Create surface projection of mask for the respective cell and plot it in the right color
z_coord = 'model_level_number'
if len(mask_total.shape)==3:
mask_total_i_surface = mask_cell_surface(mask_total, cell, track, masked=False, z_coord=z_coord)
elif len(mask_total.shape)==2:
mask_total_i_surface=mask_total
axes.contour((mask_total_i_surface.coord('projection_x_coordinate').points-x_pos)/1000,
(mask_total_i_surface.coord('projection_y_coordinate').points-y_pos)/1000,
mask_total_i_surface.data,
levels=[0, cell], colors=color, linestyles=':',linewidth=1)
if cog is not None:
for i_row, row in cog.iterrows():
cell = row['cell']
if cell==cell_i:
color='darkred'
else:
color='darkorange'
# plot marker for centre of gravity as a circle
axes.plot((row['x_M']-x_pos)/1000, (row['y_M']-y_pos)/1000,
'o', markeredgecolor=color, markerfacecolor='None',markersize=4)
if features is not None:
for i_row, row in features.iterrows():
color='purple'
axes.plot((row['projection_x_coordinate']-x_pos)/1000,
(row['projection_y_coordinate']-y_pos)/1000,
'+', color=color,markersize=3)
axes.set_xlabel('x (km)')
axes.set_ylabel('y (km)')
axes.set_xlim([-1*width/1000, width/1000])
axes.set_ylim([-1*width/1000, width/1000])
axes.xaxis.set_label_position('top')
axes.xaxis.set_ticks_position('top')
axes.set_title(title,pad=35,fontsize=10,horizontalalignment='left',loc='left')
return axes
def plot_mask_cell_track_static(cell,track, cog, features, mask_total,
field_contour, field_filled,
width=10000,n_extend=1,
name= 'test', plotdir='./',
file_format=['png'],figsize=(10/2.54, 10/2.54),dpi=300,
**kwargs):
'''Make plots for all cells with fixed frame including entire development of the cell and with one background field as filling and one background field as contrours
Input:
Output:
'''
from iris import Constraint
from numpy import unique
import os
track_cell=track[track['cell']==cell]
x_min=track_cell['projection_x_coordinate'].min()-width
x_max=track_cell['projection_x_coordinate'].max()+width
y_min=track_cell['projection_y_coordinate'].min()-width
y_max=track_cell['projection_y_coordinate'].max()+width
#set up looping over time based on mask's time coordinate to allow for one timestep before and after the track
time_coord=mask_total.coord('time')
time=time_coord.units.num2date(time_coord.points)
i_start=max(0,np.where(time==track_cell['time'].values[0])[0][0]-n_extend)
i_end=min(len(time)-1,np.where(time==track_cell['time'].values[-1])[0][0]+n_extend+1)
time_cell=time[slice(i_start,i_end)]
for time_i in time_cell:
# for i_row,row in track_cell.iterrows():
# time_i=row['time']
# constraint_time = Constraint(time=row['time'])
constraint_time = Constraint(time=time_i)
constraint_x = Constraint(projection_x_coordinate = lambda cell: x_min < cell < x_max)
constraint_y = Constraint(projection_y_coordinate = lambda cell: y_min < cell < y_max)
constraint = constraint_time & constraint_x & constraint_y
mask_total_i=mask_total.extract(constraint)
if field_contour is None:
field_contour_i=None
else:
field_contour_i=field_contour.extract(constraint)
if field_filled is None:
field_filled_i=None
else:
field_filled_i=field_filled.extract(constraint)
track_i=track[track['time']==time_i]
cells_mask=list(unique(mask_total_i.core_data()))
track_cells=track_i.loc[(track_i['projection_x_coordinate'] > x_min) & (track_i['projection_x_coordinate'] < x_max) & (track_i['projection_y_coordinate'] > y_min) & (track_i['projection_y_coordinate'] < y_max)]
cells_track=list(track_cells['cell'].values)
cells=list(set( cells_mask + cells_track ))
if cell not in cells:
cells.append(cell)
if 0 in cells:
cells.remove(0)
track_i=track_i[track_i['cell'].isin(cells)]
if cog is None:
cog_i=None
else:
cog_i=cog[cog['cell'].isin(cells)]
cog_i=cog_i[cog_i['time']==time_i]
if features is None:
features_i=None
else:
features_i=features[features['time']==time_i]
fig1, ax1 = plt.subplots(ncols=1, nrows=1, figsize=figsize)
fig1.subplots_adjust(left=0.2, bottom=0.15, right=0.80, top=0.85)
datestring_stamp = time_i.strftime('%Y-%m-%d %H:%M:%S')
if time_i in track_cell['time'].values:
time_cell_i=track_cell[track_cell['time'].values==time_i]['time_cell']
celltime_stamp = "%02d:%02d:%02d" % (time_cell_i.dt.total_seconds() // 3600,
(time_cell_i.dt.total_seconds() % 3600) // 60,
time_cell_i.dt.total_seconds() % 60 )
else:
celltime_stamp=' - '
title=datestring_stamp + ' , ' + celltime_stamp
datestring_file = time_i.strftime('%Y-%m-%d_%H%M%S')
ax1=plot_mask_cell_individual_static(cell_i=cell,
track=track_i, cog=cog_i,features=features_i,
mask_total=mask_total_i,
field_contour=field_contour_i, field_filled=field_filled_i,
xlim=[x_min/1000,x_max/1000],ylim=[y_min/1000,y_max/1000],
axes=ax1,title=title,**kwargs)
out_dir = os.path.join(plotdir, name)
os.makedirs(out_dir, exist_ok=True)
if 'png' in file_format:
savepath_png = os.path.join(out_dir, name + '_' + datestring_file + '.png')
fig1.savefig(savepath_png, dpi=dpi)
logging.debug('Mask static plot saved to ' + savepath_png)
if 'pdf' in file_format:
savepath_pdf = os.path.join(out_dir, name + '_' + datestring_file + '.pdf')
fig1.savefig(savepath_pdf, dpi=dpi)
logging.debug('Mask static plot saved to ' + savepath_pdf)
plt.close()
plt.clf()
def plot_mask_cell_individual_static(cell_i,track, cog, features, mask_total,
field_contour, field_filled,
axes=None,xlim=None,ylim=None,
label_field_contour=None, cmap_field_contour='Blues',norm_field_contour=None,
linewidths_contour=0.8,contour_labels=False,
vmin_field_contour=0,vmax_field_contour=50,levels_field_contour=None,nlevels_field_contour=10,
label_field_filled=None,cmap_field_filled='summer',norm_field_filled=None,
vmin_field_filled=0,vmax_field_filled=100,levels_field_filled=None,nlevels_field_filled=10,
title=None,feature_number=False
):
'''Make plots for cell in fixed frame and with one background field as filling and one background field as contrours
Input:
Output:
'''
import numpy as np
from .utils import mask_features,mask_features_surface
from mpl_toolkits.axes_grid1 import make_axes_locatable
from matplotlib.colors import Normalize
divider = make_axes_locatable(axes)
if field_filled is not None:
if levels_field_filled is None:
levels_field_filled=np.linspace(vmin_field_filled,vmax_field_filled, 10)
plot_field_filled = axes.contourf(field_filled.coord('projection_x_coordinate').points/1000,
field_filled.coord('projection_y_coordinate').points/1000,
field_filled.data,
levels=levels_field_filled, norm=norm_field_filled,
cmap=cmap_field_filled, vmin=vmin_field_filled, vmax=vmax_field_filled)
cax_filled = divider.append_axes("right", size="5%", pad=0.1)
norm_filled= Normalize(vmin=vmin_field_filled, vmax=vmax_field_filled)
sm1= plt.cm.ScalarMappable(norm=norm_filled, cmap = plot_field_filled.cmap)
sm1.set_array([])
cbar_field_filled = plt.colorbar(sm1, orientation='vertical',cax=cax_filled)
cbar_field_filled.ax.set_ylabel(label_field_filled)
cbar_field_filled.set_clim(vmin_field_filled, vmax_field_filled)
if field_contour is not None:
if levels_field_contour is None:
levels_field_contour=np.linspace(vmin_field_contour, vmax_field_contour, 5)
plot_field_contour = axes.contour(field_contour.coord('projection_x_coordinate').points/1000,
field_contour.coord('projection_y_coordinate').points/1000,
field_contour.data,
cmap=cmap_field_contour,norm=norm_field_contour,
levels=levels_field_contour,vmin=vmin_field_contour, vmax=vmax_field_contour,
linewidths=linewidths_contour)
if contour_labels:
axes.clabel(plot_field_contour, fontsize=10)
cax_contour = divider.append_axes("bottom", size="5%", pad=0.1)
if norm_field_contour:
vmin_field_contour=None
vmax_field_contour=None
norm_contour=norm_field_contour
else:
norm_contour= Normalize(vmin=vmin_field_contour, vmax=vmax_field_contour)
sm_contour= plt.cm.ScalarMappable(norm=norm_contour, cmap = plot_field_contour.cmap)
sm_contour.set_array([])
cbar_field_contour = plt.colorbar(sm_contour, orientation='horizontal',ticks=levels_field_contour,cax=cax_contour)
cbar_field_contour.ax.set_xlabel(label_field_contour)
cbar_field_contour.set_clim(vmin_field_contour, vmax_field_contour)
for i_row, row in track.iterrows():
cell = row['cell']
feature = row['feature']
# logging.debug("cell: "+ str(row['cell']))
# logging.debug("feature: "+ str(row['feature']))
if cell==cell_i:
color='darkred'
if feature_number:
cell_string=' '+str(int(cell))+' ('+str(int(feature))+')'
else:
cell_string=' '+str(int(cell))
elif np.isnan(cell):
color='gray'
if feature_number:
cell_string=' '+'('+str(int(feature))+')'
else:
cell_string=' '
else:
color='darkorange'
if feature_number:
cell_string=' '+str(int(cell))+' ('+str(int(feature))+')'
else:
cell_string=' '+str(int(cell))
axes.text(row['projection_x_coordinate']/1000,
row['projection_y_coordinate']/1000,
cell_string,color=color,fontsize=6, clip_on=True)
# Plot marker for tracked cell centre as a cross
axes.plot(row['projection_x_coordinate']/1000,
row['projection_y_coordinate']/1000,
'x', color=color,markersize=4)
#Create surface projection of mask for the respective cell and plot it in the right color
z_coord = 'model_level_number'
if len(mask_total.shape)==3:
mask_total_i_surface = mask_features_surface(mask_total, feature, masked=False, z_coord=z_coord)
elif len(mask_total.shape)==2:
mask_total_i_surface=mask_features(mask_total, feature, masked=False, z_coord=z_coord)
axes.contour(mask_total_i_surface.coord('projection_x_coordinate').points/1000,
mask_total_i_surface.coord('projection_y_coordinate').points/1000,
mask_total_i_surface.data,
levels=[0, feature], colors=color, linestyles=':',linewidth=1)
if cog is not None:
for i_row, row in cog.iterrows():
cell = row['cell']
if cell==cell_i:
color='darkred'
else:
color='darkorange'
# plot marker for centre of gravity as a circle
axes.plot(row['x_M']/1000, row['y_M']/1000,
'o', markeredgecolor=color, markerfacecolor='None',markersize=4)
if features is not None:
for i_row, row in features.iterrows():
color='purple'
axes.plot(row['projection_x_coordinate']/1000,
row['projection_y_coordinate']/1000,
'+', color=color,markersize=3)
axes.set_xlabel('x (km)')
axes.set_ylabel('y (km)')
axes.set_xlim(xlim)
axes.set_ylim(ylim)
axes.xaxis.set_label_position('top')
axes.xaxis.set_ticks_position('top')
axes.set_title(title,pad=35,fontsize=10,horizontalalignment='left',loc='left')
return axes
def plot_mask_cell_track_2D3Dstatic(cell,track, cog, features, mask_total,
field_contour, field_filled,
width=10000,n_extend=1,
name= 'test', plotdir='./',
file_format=['png'],figsize=(10/2.54, 10/2.54),dpi=300,
ele=10,azim=30,
**kwargs):
'''Make plots for all cells with fixed frame including entire development of the cell and with one background field as filling and one background field as contrours
Input:
Output:
'''
from iris import Constraint
from numpy import unique
import os
from mpl_toolkits.mplot3d import Axes3D
import matplotlib.gridspec as gridspec
track_cell=track[track['cell']==cell]
x_min=track_cell['projection_x_coordinate'].min()-width
x_max=track_cell['projection_x_coordinate'].max()+width
y_min=track_cell['projection_y_coordinate'].min()-width
y_max=track_cell['projection_y_coordinate'].max()+width
#set up looping over time based on mask's time coordinate to allow for one timestep before and after the track
time_coord=mask_total.coord('time')
time=time_coord.units.num2date(time_coord.points)
i_start=max(0,np.where(time==track_cell['time'].values[0])[0][0]-n_extend)
i_end=min(len(time)-1,np.where(time==track_cell['time'].values[-1])[0][0]+n_extend+1)
time_cell=time[slice(i_start,i_end)]
for time_i in time_cell:
# for i_row,row in track_cell.iterrows():
# time_i=row['time']
# constraint_time = Constraint(time=row['time'])
constraint_time = Constraint(time=time_i)
constraint_x = Constraint(projection_x_coordinate = lambda cell: x_min < cell < x_max)
constraint_y = Constraint(projection_y_coordinate = lambda cell: y_min < cell < y_max)
constraint = constraint_time & constraint_x & constraint_y
mask_total_i=mask_total.extract(constraint)
if field_contour is None:
field_contour_i=None
else:
field_contour_i=field_contour.extract(constraint)
if field_filled is None:
field_filled_i=None
else:
field_filled_i=field_filled.extract(constraint)
track_i=track[track['time']==time_i]
cells_mask=list(unique(mask_total_i.core_data()))
track_cells=track_i.loc[(track_i['projection_x_coordinate'] > x_min) & (track_i['projection_x_coordinate'] < x_max) & (track_i['projection_y_coordinate'] > y_min) & (track_i['projection_y_coordinate'] < y_max)]
cells_track=list(track_cells['cell'].values)
cells=list(set( cells_mask + cells_track ))
if cell not in cells:
cells.append(cell)
if 0 in cells:
cells.remove(0)
track_i=track_i[track_i['cell'].isin(cells)]
if cog is None:
cog_i=None
else:
cog_i=cog[cog['cell'].isin(cells)]
cog_i=cog_i[cog_i['time']==time_i]
if features is None:
features_i=None
else:
features_i=features[features['time']==time_i]
fig1=plt.figure(figsize=(20 / 2.54, 10 / 2.54))
fig1.subplots_adjust(left=0.1, bottom=0.15, right=0.9, top=0.9,wspace=0.3, hspace=0.25)
# make two subplots for figure:
gs1 = gridspec.GridSpec(1, 2,width_ratios=[1,1.2])
fig1.add_subplot(gs1[0])
fig1.add_subplot(gs1[1], projection='3d')
ax1 = fig1.get_axes()
datestring_stamp = time_i.strftime('%Y-%m-%d %H:%M:%S')
if time_i in track_cell['time'].values:
time_cell_i=track_cell[track_cell['time'].values==time_i]['time_cell']
celltime_stamp = "%02d:%02d:%02d" % (time_cell_i.dt.total_seconds() // 3600,
(time_cell_i.dt.total_seconds() % 3600) // 60,
time_cell_i.dt.total_seconds() % 60 )
else:
celltime_stamp=' - '
title=datestring_stamp + ' , ' + celltime_stamp
datestring_file = time_i.strftime('%Y-%m-%d_%H%M%S')
ax1[0]=plot_mask_cell_individual_static(cell_i=cell,
track=track_i, cog=cog_i,features=features_i,
mask_total=mask_total_i,
field_contour=field_contour_i, field_filled=field_filled_i,
xlim=[x_min/1000,x_max/1000],ylim=[y_min/1000,y_max/1000],
axes=ax1[0],title=title,**kwargs)
ax1[1]=plot_mask_cell_individual_3Dstatic(cell_i=cell,
track=track_i, cog=cog_i,features=features_i,
mask_total=mask_total_i,
field_contour=field_contour_i, field_filled=field_filled_i,
xlim=[x_min/1000,x_max/1000],ylim=[y_min/1000,y_max/1000],
axes=ax1[1],title=title,
ele=ele,azim=azim,
**kwargs)
out_dir = os.path.join(plotdir, name)
os.makedirs(out_dir, exist_ok=True)
if 'png' in file_format:
savepath_png = os.path.join(out_dir, name + '_' + datestring_file + '.png')
fig1.savefig(savepath_png, dpi=dpi)
logging.debug('Mask static 2d/3D plot saved to ' + savepath_png)
if 'pdf' in file_format:
savepath_pdf = os.path.join(out_dir, name + '_' + datestring_file + '.pdf')
fig1.savefig(savepath_pdf, dpi=dpi)
logging.debug('Mask static 2d/3D plot saved to ' + savepath_pdf)
plt.close()
plt.clf()
def plot_mask_cell_track_3Dstatic(cell,track, cog, features, mask_total,
field_contour, field_filled,
width=10000,n_extend=1,
name= 'test', plotdir='./',
file_format=['png'],figsize=(10/2.54, 10/2.54),dpi=300,
**kwargs):
'''Make plots for all cells with fixed frame including entire development of the cell and with one background field as filling and one background field as contrours
Input:
Output:
'''
from iris import Constraint
from numpy import unique
import os
from mpl_toolkits.mplot3d import Axes3D
track_cell=track[track['cell']==cell]
x_min=track_cell['projection_x_coordinate'].min()-width
x_max=track_cell['projection_x_coordinate'].max()+width
y_min=track_cell['projection_y_coordinate'].min()-width
y_max=track_cell['projection_y_coordinate'].max()+width
#set up looping over time based on mask's time coordinate to allow for one timestep before and after the track
time_coord=mask_total.coord('time')
time=time_coord.units.num2date(time_coord.points)
i_start=max(0,np.where(time==track_cell['time'].values[0])[0][0]-n_extend)
i_end=min(len(time)-1,np.where(time==track_cell['time'].values[-1])[0][0]+n_extend+1)
time_cell=time[slice(i_start,i_end)]
for time_i in time_cell:
# for i_row,row in track_cell.iterrows():
# time_i=row['time']
# constraint_time = Constraint(time=row['time'])
constraint_time = Constraint(time=time_i)
constraint_x = Constraint(projection_x_coordinate = lambda cell: x_min < cell < x_max)
constraint_y = Constraint(projection_y_coordinate = lambda cell: y_min < cell < y_max)
constraint = constraint_time & constraint_x & constraint_y
mask_total_i=mask_total.extract(constraint)
if field_contour is None:
field_contour_i=None
else:
field_contour_i=field_contour.extract(constraint)
if field_filled is None:
field_filled_i=None
else:
field_filled_i=field_filled.extract(constraint)
track_i=track[track['time']==time_i]
cells_mask=list(unique(mask_total_i.core_data()))
track_cells=track_i.loc[(track_i['projection_x_coordinate'] > x_min) & (track_i['projection_x_coordinate'] < x_max) & (track_i['projection_y_coordinate'] > y_min) & (track_i['projection_y_coordinate'] < y_max)]
cells_track=list(track_cells['cell'].values)
cells=list(set( cells_mask + cells_track ))
if cell not in cells:
cells.append(cell)
if 0 in cells:
cells.remove(0)
track_i=track_i[track_i['cell'].isin(cells)]
if cog is None:
cog_i=None
else:
cog_i=cog[cog['cell'].isin(cells)]
cog_i=cog_i[cog_i['time']==time_i]
if features is None:
features_i=None
else:
features_i=features[features['time']==time_i]
# fig1, ax1 = plt.subplots(ncols=1, nrows=1, figsize=figsize)
# fig1.subplots_adjust(left=0.2, bottom=0.15, right=0.80, top=0.85)
fig1, ax1 = plt.subplots(ncols=1, nrows=1, figsize=(10/2.54, 10/2.54), subplot_kw={'projection': '3d'})
datestring_stamp = time_i.strftime('%Y-%m-%d %H:%M:%S')
if time_i in track_cell['time'].values:
time_cell_i=track_cell[track_cell['time'].values==time_i]['time_cell']
celltime_stamp = "%02d:%02d:%02d" % (time_cell_i.dt.total_seconds() // 3600,
(time_cell_i.dt.total_seconds() % 3600) // 60,
time_cell_i.dt.total_seconds() % 60 )
else:
celltime_stamp=' - '
title=datestring_stamp + ' , ' + celltime_stamp
datestring_file = time_i.strftime('%Y-%m-%d_%H%M%S')
ax1=plot_mask_cell_individual_3Dstatic(cell_i=cell,
track=track_i, cog=cog_i,features=features_i,
mask_total=mask_total_i,
field_contour=field_contour_i, field_filled=field_filled_i,
xlim=[x_min/1000,x_max/1000],ylim=[y_min/1000,y_max/1000],
axes=ax1,title=title,**kwargs)
out_dir = os.path.join(plotdir, name)
os.makedirs(out_dir, exist_ok=True)
if 'png' in file_format:
savepath_png = os.path.join(out_dir, name + '_' + datestring_file + '.png')
fig1.savefig(savepath_png, dpi=dpi)
logging.debug('Mask static plot saved to ' + savepath_png)
if 'pdf' in file_format:
savepath_pdf = os.path.join(out_dir, name + '_' + datestring_file + '.pdf')
fig1.savefig(savepath_pdf, dpi=dpi)
logging.debug('Mask static plot saved to ' + savepath_pdf)
plt.close()
plt.clf()
def plot_mask_cell_individual_3Dstatic(cell_i,track, cog, features, mask_total,
field_contour, field_filled,
axes=None,xlim=None,ylim=None,
label_field_contour=None, cmap_field_contour='Blues',norm_field_contour=None,
linewidths_contour=0.8,contour_labels=False,
vmin_field_contour=0,vmax_field_contour=50,levels_field_contour=None,nlevels_field_contour=10,
label_field_filled=None,cmap_field_filled='summer',norm_field_filled=None,
vmin_field_filled=0,vmax_field_filled=100,levels_field_filled=None,nlevels_field_filled=10,
title=None,feature_number=False,
ele=10.,azim=210.
):
'''Make plots for cell in fixed frame and with one background field as filling and one background field as contrours
Input:
Output:
'''
import numpy as np
from .utils import mask_features,mask_features_surface
# from mpl_toolkits.axes_grid1 import make_axes_locatable
# from matplotlib.colors import Normalize
from mpl_toolkits.mplot3d import Axes3D
axes.view_init(elev=ele, azim=azim)
axes.grid(b=False)
axes.set_frame_on(False)
# make the panes transparent
axes.xaxis.set_pane_color((1.0, 1.0, 1.0, 0.0))
axes.yaxis.set_pane_color((1.0, 1.0, 1.0, 0.0))
axes.zaxis.set_pane_color((1.0, 1.0, 1.0, 0.0))
# make the grid lines transparent
axes.xaxis._axinfo["grid"]['color'] = (1,1,1,0)
axes.yaxis._axinfo["grid"]['color'] = (1,1,1,0)
axes.zaxis._axinfo["grid"]['color'] = (1,1,1,0)
if title is not None:
axes.set_title(title,horizontalalignment='left',loc='left')
# colors_mask = ['pink','darkred', 'orange', 'darkred', 'red', 'darkorange']
x = mask_total.coord('projection_x_coordinate').points
y = mask_total.coord('projection_y_coordinate').points
z = mask_total.coord('model_level_number').points
# z = mask_total.coord('geopotential_height').points
zz, yy, xx = np.meshgrid(z, y, x, indexing='ij')
# z_alt = mask_total.coord('geopotential_height').points
# divider = make_axes_locatable(axes)
# if field_filled is not None:
# if levels_field_filled is None:
# levels_field_filled=np.linspace(vmin_field_filled,vmax_field_filled, 10)
# plot_field_filled = axes.contourf(field_filled.coord('projection_x_coordinate').points/1000,
# field_filled.coord('projection_y_coordinate').points/1000,
# field_filled.data,
# levels=levels_field_filled, norm=norm_field_filled,
# cmap=cmap_field_filled, vmin=vmin_field_filled, vmax=vmax_field_filled)
# cax_filled = divider.append_axes("right", size="5%", pad=0.1)
# norm_filled= Normalize(vmin=vmin_field_filled, vmax=vmax_field_filled)
# sm1= plt.cm.ScalarMappable(norm=norm_filled, cmap = plot_field_filled.cmap)
# sm1.set_array([])
# cbar_field_filled = plt.colorbar(sm1, orientation='vertical',cax=cax_filled)
# cbar_field_filled.ax.set_ylabel(label_field_filled)
# cbar_field_filled.set_clim(vmin_field_filled, vmax_field_filled)
# if field_contour is not None:
# if levels_field_contour is None:
# levels_field_contour=np.linspace(vmin_field_contour, vmax_field_contour, 5)
# plot_field_contour = axes.contour(field_contour.coord('projection_x_coordinate').points/1000,
# field_contour.coord('projection_y_coordinate').points/1000,
# field_contour.data,
# cmap=cmap_field_contour,norm=norm_field_contour,
# levels=levels_field_contour,vmin=vmin_field_contour, vmax=vmax_field_contour,
# linewidths=linewidths_contour)
# if contour_labels:
# axes.clabel(plot_field_contour, fontsize=10)
# cax_contour = divider.append_axes("bottom", size="5%", pad=0.1)
# if norm_field_contour:
# vmin_field_contour=None
# vmax_field_contour=None
# norm_contour=norm_field_contour
# else:
# norm_contour= Normalize(vmin=vmin_field_contour, vmax=vmax_field_contour)
#
# sm_contour= plt.cm.ScalarMappable(norm=norm_contour, cmap = plot_field_contour.cmap)
# sm_contour.set_array([])
#
# cbar_field_contour = plt.colorbar(sm_contour, orientation='horizontal',ticks=levels_field_contour,cax=cax_contour)
# cbar_field_contour.ax.set_xlabel(label_field_contour)
# cbar_field_contour.set_clim(vmin_field_contour, vmax_field_contour)
#
for i_row, row in track.iterrows():
cell = row['cell']
feature = row['feature']
# logging.debug("cell: "+ str(row['cell']))
# logging.debug("feature: "+ str(row['feature']))
if cell==cell_i:
color='darkred'
if feature_number:
cell_string=' '+str(int(cell))+' ('+str(int(feature))+')'
else:
cell_string=' '+str(int(cell))
elif np.isnan(cell):
color='gray'
if feature_number:
cell_string=' '+'('+str(int(feature))+')'
else:
cell_string=' '
else:
color='darkorange'
if feature_number:
cell_string=' '+str(int(cell))+' ('+str(int(feature))+')'
else:
cell_string=' '+str(int(cell))
# axes.text(row['projection_x_coordinate']/1000,
# row['projection_y_coordinate']/1000,
# 0,
# cell_string,color=color,fontsize=6, clip_on=True)
# # Plot marker for tracked cell centre as a cross
# axes.plot(row['projection_x_coordinate']/1000,
# row['projection_y_coordinate']/1000,
# 0,
# 'x', color=color,markersize=4)
#Create surface projection of mask for the respective cell and plot it in the right color
# z_coord = 'model_level_number'
# if len(mask_total.shape)==3:
# mask_total_i_surface = mask_features_surface(mask_total, feature, masked=False, z_coord=z_coord)
# elif len(mask_total.shape)==2:
# mask_total_i_surface=mask_features(mask_total, feature, masked=False, z_coord=z_coord)
# axes.contour(mask_total_i_surface.coord('projection_x_coordinate').points/1000,
# mask_total_i_surface.coord('projection_y_coordinate').points/1000,
# 0,
# mask_total_i_surface.data,
# levels=[0, feature], colors=color, linestyles=':',linewidth=1)
mask_feature = mask_total.data == feature
axes.scatter(
# xx[mask_feature]/1000, yy[mask_feature]/1000, zz[mask_feature]/1000,
xx[mask_feature]/1000, yy[mask_feature]/1000, zz[mask_feature],
c=color, marker=',',
s=5,#60000.0 * TWC_i[Mask_particle],
alpha=0.3, cmap='inferno', label=cell_string,rasterized=True)
axes.set_xlim(xlim)
axes.set_ylim(ylim)
axes.set_zlim([0, 100])
# axes.set_zlim([0, 20])
# axes.set_zticks([0, 5,10,15, 20])
axes.set_xlabel('x (km)')
axes.set_ylabel('y (km)')
axes.zaxis.set_rotate_label(False) # disable automatic rotation
# axes.set_zlabel('z (km)', rotation=90)
axes.set_zlabel('model level', rotation=90)
return axes
def plot_mask_cell_track_static_timeseries(cell,track, cog, features, mask_total,
field_contour, field_filled,
track_variable=None,variable=None,variable_ylabel=None,variable_label=[None],variable_legend=False,variable_color=None,
width=10000,n_extend=1,
name= 'test', plotdir='./',
file_format=['png'],figsize=(20/2.54, 10/2.54),dpi=300,
**kwargs):
'''Make plots for all cells with fixed frame including entire development of the cell and with one background field as filling and one background field as contrours
Input:
Output:
'''
'''Make plots for all cells with fixed frame including entire development of the cell and with one background field as filling and one background field as contrours
Input:
Output:
'''
from iris import Constraint
from numpy import unique
import os
import pandas as pd
track_cell=track[track['cell']==cell]
x_min=track_cell['projection_x_coordinate'].min()-width
x_max=track_cell['projection_x_coordinate'].max()+width
y_min=track_cell['projection_y_coordinate'].min()-width
y_max=track_cell['projection_y_coordinate'].max()+width
time_min=track_cell['time'].min()
# time_max=track_cell['time'].max()
track_variable_cell=track_variable[track_variable['cell']==cell]
track_variable_cell['time_cell']=pd.to_timedelta(track_variable_cell['time_cell'])
# track_variable_cell=track_variable_cell[(track_variable_cell['time']>=time_min) & (track_variable_cell['time']<=time_max)]
#set up looping over time based on mask's time coordinate to allow for one timestep before and after the track
time_coord=mask_total.coord('time')
time=time_coord.units.num2date(time_coord.points)
i_start=max(0,np.where(time==track_cell['time'].values[0])[0][0]-n_extend)
i_end=min(len(time)-1,np.where(time==track_cell['time'].values[-1])[0][0]+n_extend+1)
time_cell=time[slice(i_start,i_end)]
for time_i in time_cell:
constraint_time = Constraint(time=time_i)
constraint_x = Constraint(projection_x_coordinate = lambda cell: x_min < cell < x_max)
constraint_y = Constraint(projection_y_coordinate = lambda cell: y_min < cell < y_max)
constraint = constraint_time & constraint_x & constraint_y
mask_total_i=mask_total.extract(constraint)
if field_contour is None:
field_contour_i=None
else:
field_contour_i=field_contour.extract(constraint)
if field_filled is None:
field_filled_i=None
else:
field_filled_i=field_filled.extract(constraint)
track_i=track[track['time']==time_i]
cells_mask=list(unique(mask_total_i.core_data()))
track_cells=track_i.loc[(track_i['projection_x_coordinate'] > x_min) & (track_i['projection_x_coordinate'] < x_max) & (track_i['projection_y_coordinate'] > y_min) & (track_i['projection_y_coordinate'] < y_max)]
cells_track=list(track_cells['cell'].values)
cells=list(set( cells_mask + cells_track ))
if cell not in cells:
cells.append(cell)
if 0 in cells:
cells.remove(0)
track_i=track_i[track_i['cell'].isin(cells)]
if cog is None:
cog_i=None
else:
cog_i=cog[cog['cell'].isin(cells)]
cog_i=cog_i[cog_i['time']==time_i]
if features is None:
features_i=None
else:
features_i=features[features['time']==time_i]
fig1, ax1 = plt.subplots(ncols=2, nrows=1, figsize=figsize)
fig1.subplots_adjust(left=0.1, bottom=0.15, right=0.90, top=0.85,wspace=0.3)
datestring_stamp = time_i.strftime('%Y-%m-%d %H:%M:%S')
if time_i in track_cell['time'].values:
time_cell_i=track_cell[track_cell['time'].values==time_i]['time_cell']
celltime_stamp = "%02d:%02d:%02d" % (time_cell_i.dt.total_seconds() // 3600,
(time_cell_i.dt.total_seconds() % 3600) // 60,
time_cell_i.dt.total_seconds() % 60 )
else:
celltime_stamp=' - '
title=celltime_stamp + ' , ' + datestring_stamp
datestring_file = time_i.strftime('%Y-%m-%d_%H%M%S')
# plot evolving timeseries of variable to second axis:
ax1[0]=plot_mask_cell_individual_static(cell_i=cell,
track=track_i, cog=cog_i,features=features_i,
mask_total=mask_total_i,
field_contour=field_contour_i, field_filled=field_filled_i,
xlim=[x_min/1000,x_max/1000],ylim=[y_min/1000,y_max/1000],
axes=ax1[0],title=title,**kwargs)
track_variable_past=track_variable_cell[(track_variable_cell['time']>=time_min) & (track_variable_cell['time']<=time_i)]
track_variable_current=track_variable_cell[track_variable_cell['time']==time_i]
if variable_color is None:
variable_color='navy'
if type(variable) is str:
# logging.debug('variable: '+str(variable))
if type(variable_color) is str:
variable_color={variable:variable_color}
variable=[variable]
for i_variable,variable_i in enumerate(variable):
color=variable_color[variable_i]
ax1[1].plot(track_variable_past['time_cell'].dt.total_seconds()/ 60.,track_variable_past[variable_i].values,color=color,linestyle='-',label=variable_label[i_variable])
ax1[1].plot(track_variable_current['time_cell'].dt.total_seconds()/ 60.,track_variable_current[variable_i].values,color=color,marker='o',markersize=4,fillstyle='full')
ax1[1].yaxis.tick_right()
ax1[1].yaxis.set_label_position("right")
ax1[1].set_xlim([0,2*60])
ax1[1].set_xticks(np.arange(0,120,15))
ax1[1].set_ylim([0,max(10,1.25*track_variable_cell[variable].max().max())])
ax1[1].set_xlabel('cell lifetime (min)')
if variable_ylabel==None:
variable_ylabel=variable
ax1[1].set_ylabel(variable_ylabel)
ax1[1].set_title(title)
# insert legend, if flag is True
if variable_legend:
if (len(variable_label)<5):
ncol=1
else:
ncol=2
ax1[1].legend(loc='upper right', bbox_to_anchor=(1, 1),ncol=ncol,fontsize=8)
out_dir = os.path.join(plotdir, name)
os.makedirs(out_dir, exist_ok=True)
if 'png' in file_format:
savepath_png = os.path.join(out_dir, name + '_' + datestring_file + '.png')
fig1.savefig(savepath_png, dpi=dpi)
logging.debug('Mask static plot saved to ' + savepath_png)
if 'pdf' in file_format:
savepath_pdf = os.path.join(out_dir, name + '_' + datestring_file + '.pdf')
fig1.savefig(savepath_pdf, dpi=dpi)
logging.debug('Mask static plot saved to ' + savepath_pdf)
plt.close()
plt.clf()
def map_tracks(track,axis_extent=None,figsize=(10,10),axes=None):
for cell in track['cell'].dropna().unique():
track_i=track[track['cell']==cell]
axes.plot(track_i['longitude'],track_i['latitude'],'-')
if axis_extent:
axes.set_extent(axis_extent)
axes=make_map(axes)
return axes
def make_map(axes):
import matplotlib.ticker as mticker
import cartopy.crs as ccrs
from cartopy.mpl.gridliner import LONGITUDE_FORMATTER, LATITUDE_FORMATTER
gl = axes.gridlines(crs=ccrs.PlateCarree(), draw_labels=True,
linewidth=2, color='gray', alpha=0.5, linestyle='-')
axes.coastlines('10m')
gl.xlabels_top = False
gl.ylabels_right = False
gl.xlocator = mticker.MaxNLocator(nbins=5,min_n_ticks=3,steps=None)
gl.ylocator = mticker.MaxNLocator(nbins=5,min_n_ticks=3,steps=None)
gl.xformatter = LONGITUDE_FORMATTER
gl.yformatter = LATITUDE_FORMATTER
#gl.xlabel_style = {'size': 15, 'color': 'gray'}
#gl.xlabel_style = {'color': 'red', 'weight': 'bold'}
return axes
def plot_lifetime_histogram(track,axes=None,bin_edges=np.arange(0,200,20),density=False,**kwargs):
hist, bin_edges,bin_centers = lifetime_histogram(track,bin_edges=bin_edges,density=density)
plot_hist=axes.plot(bin_centers, hist,**kwargs)
return plot_hist
def plot_lifetime_histogram_bar(track,axes=None,bin_edges=np.arange(0,200,20),density=False,width_bar=1,shift=0.5,**kwargs):
hist, bin_edges, bin_centers = lifetime_histogram(track,bin_edges=bin_edges,density=density)
plot_hist=axes.bar(bin_centers+shift,hist,width=width_bar,**kwargs)
return plot_hist
def plot_histogram_cellwise(track,bin_edges,variable,quantity,axes=None,density=False,**kwargs):
hist, bin_edges,bin_centers = histogram_cellwise(track,bin_edges=bin_edges,variable=variable,quantity=quantity,density=density)
plot_hist=axes.plot(bin_centers, hist,**kwargs)
return plot_hist
def plot_histogram_featurewise(Track,bin_edges,variable,axes=None,density=False,**kwargs):
hist, bin_edges, bin_centers = histogram_featurewise(Track,bin_edges=bin_edges,variable=variable,density=density)
plot_hist=axes.plot(bin_centers, hist,**kwargs)
return plot_hist
| <filename>tobac/plotting.py<gh_stars>10-100
import matplotlib as mpl
mpl.use('Agg')
import matplotlib.pyplot as plt
import logging
from .analysis import lifetime_histogram
from .analysis import histogram_cellwise,histogram_featurewise
import numpy as np
def plot_tracks_mask_field_loop(track,field,mask,features,axes=None,name=None,plot_dir='./',
figsize=(10./2.54,10./2.54),dpi=300,
margin_left=0.05,margin_right=0.05,margin_bottom=0.05,margin_top=0.05,
**kwargs):
import cartopy.crs as ccrs
import os
from iris import Constraint
os.makedirs(plot_dir,exist_ok=True)
time=mask.coord('time')
if name is None:
name=field.name()
for time_i in time.points:
datetime_i=time.units.num2date(time_i)
constraint_time = Constraint(time=datetime_i)
fig1,ax1=plt.subplots(ncols=1, nrows=1,figsize=figsize, subplot_kw={'projection': ccrs.PlateCarree()})
datestring_file=datetime_i.strftime('%Y-%m-%d_%H:%M:%S')
field_i=field.extract(constraint_time)
mask_i=mask.extract(constraint_time)
track_i=track[track['time']==datetime_i]
features_i=features[features['time']==datetime_i]
ax1=plot_tracks_mask_field(track=track_i,field=field_i,mask=mask_i,features=features_i,
axes=ax1,**kwargs)
fig1.subplots_adjust(left=margin_left, bottom=margin_bottom, right=1-margin_right, top=1-margin_top)
os.makedirs(plot_dir, exist_ok=True)
savepath_png=os.path.join(plot_dir,name+'_'+datestring_file+'.png')
fig1.savefig(savepath_png,dpi=dpi)
logging.debug('Figure plotted to ' + str(savepath_png))
plt.close()
def plot_tracks_mask_field(track,field,mask,features,axes=None,axis_extent=None,
plot_outline=True,
plot_marker=True,marker_track='x',markersize_track=4,
plot_number=True,
plot_features=False,marker_feature=None,markersize_feature=None,
title=None,title_str=None,
vmin=None,vmax=None,n_levels=50,
cmap='viridis',extend='neither',
orientation_colorbar='horizontal',pad_colorbar=0.05,
label_colorbar=None,fraction_colorbar=0.046,
rasterized=True,linewidth_contour=1
):
import cartopy
from cartopy.mpl.gridliner import LONGITUDE_FORMATTER, LATITUDE_FORMATTER
import iris.plot as iplt
from matplotlib.ticker import MaxNLocator
import cartopy.feature as cfeature
from .utils import mask_features,mask_features_surface
from matplotlib import ticker
if type(axes) is not cartopy.mpl.geoaxes.GeoAxesSubplot:
raise ValueError('axes had to be cartopy.mpl.geoaxes.GeoAxesSubplot')
datestr=field.coord('time').units.num2date(field.coord('time').points[0]).strftime('%Y-%m-%d %H:%M:%S')
if title is 'datestr':
if title_str is None:
titlestring=datestr
elif type(title_str is str):
titlestring=title+ ' ' + datestr
axes.set_title(titlestring,horizontalalignment='left',loc='left')
gl = axes.gridlines(draw_labels=True)
majorLocator = MaxNLocator(nbins=5,steps=[1,2,5,10])
gl.xlocator=majorLocator
gl.ylocator=majorLocator
gl.xformatter = LONGITUDE_FORMATTER
axes.tick_params(axis='both', which='major')
gl.yformatter = LATITUDE_FORMATTER
gl.xlabels_top = False
gl.ylabels_right = False
axes.coastlines('10m')
# rivers=cfeature.NaturalEarthFeature(category='physical', name='rivers_lake_centerlines',scale='10m',facecolor='none')
lakes=cfeature.NaturalEarthFeature(category='physical', name='lakes',scale='10m',facecolor='none')
axes.add_feature(lakes, edgecolor='black')
axes.set_xlabel('longitude')
axes.set_ylabel('latitude')
# Plot the background field
if np.any(~np.isnan(field.data)): # check if field to plot is not only nan, which causes error:
plot_field=iplt.contourf(field,coords=['longitude','latitude'],
levels=np.linspace(vmin,vmax,num=n_levels),extend=extend,
axes=axes,
cmap=cmap,vmin=vmin,vmax=vmax,zorder=1
)
if rasterized:
axes.set_rasterization_zorder(1)
# create colorbar for background field:
cbar=plt.colorbar(plot_field,orientation=orientation_colorbar, pad=pad_colorbar,fraction=fraction_colorbar,ax=axes)
if label_colorbar is None:
label_colorbar=field.name()+ '('+field.units.symbol +')'
if orientation_colorbar is 'horizontal':
cbar.ax.set_xlabel(label_colorbar)
elif orientation_colorbar is 'vertical':
cbar.ax.set_ylabel(label_colorbar)
tick_locator = ticker.MaxNLocator(nbins=5)
cbar.locator = tick_locator
cbar.update_ticks()
colors_mask=['darkred','orange','crimson','red','darkorange']
#if marker_feature is not explicitly given, set it to marker_track (will then be overwritten by the coloured markers)
if marker_feature is None:
maker_feature=marker_track
if markersize_feature is None:
makersize_feature=markersize_track
#Plot the identified features by looping over rows of DataFrame:
if plot_features:
for i_row,row in features.iterrows():
axes.plot(row['longitude'],row['latitude'],
color='grey',marker=maker_feature,markersize=makersize_feature)
# restrict features to featues inside axis extent
track=track.loc[(track['longitude'] > axis_extent[0])
& (track['longitude'] < axis_extent[1])
& (track['latitude'] > axis_extent[2])
& (track['latitude'] < axis_extent[3])]
#Plot tracked features by looping over rows of Dataframe
for i_row,row in track.iterrows():
feature=row['feature']
cell=row['cell']
if not np.isnan(cell):
color=colors_mask[int(cell%len(colors_mask))]
if plot_number:
cell_string=' '+str(int(row['cell']))
axes.text(row['longitude'],row['latitude'],cell_string,
color=color,fontsize=6, clip_on=True)
else:
color='grey'
if plot_outline:
mask_i=None
# if mask is 3D, create surface projection, if mask is 2D keep the mask
if mask.ndim==2:
mask_i=mask_features(mask,feature,masked=False)
elif mask.ndim==3:
mask_i=mask_features_surface(mask,feature,masked=False,z_coord='model_level_number')
else:
raise ValueError('mask has shape that cannot be understood')
# plot countour lines around the edges of the mask
iplt.contour(mask_i,coords=['longitude','latitude'],
levels=[0,feature],
colors=color,linewidths=linewidth_contour,
axes=axes)
if plot_marker:
axes.plot(row['longitude'],row['latitude'],
color=color,marker=marker_track,markersize=markersize_track)
axes.set_extent(axis_extent)
return axes
def animation_mask_field(track,features,field,mask,interval=500,figsize=(10,10),**kwargs):
import cartopy.crs as ccrs
import matplotlib.pyplot as plt
import matplotlib.animation
from iris import Constraint
fig=plt.figure(figsize=figsize)
plt.close()
def update(time_in):
fig.clf()
ax=fig.add_subplot(111,projection=ccrs.PlateCarree())
constraint_time = Constraint(time=time_in)
field_i=field.extract(constraint_time)
mask_i=mask.extract(constraint_time)
track_i=track[track['time']==time_in]
features_i=features[features['time']==time_in]
#fig1,ax1=plt.subplots(ncols=1, nrows=1,figsize=figsize, subplot_kw={'projection': ccrs.PlateCarree()})
plot_tobac=plot_tracks_mask_field(track_i,field=field_i,mask=mask_i,features=features_i,
axes=ax,
**kwargs)
ax.set_title('{}'.format(time_in))
time=field.coord('time')
datetimes=time.units.num2date(time.points)
animation = matplotlib.animation.FuncAnimation(fig, update,init_func=None, frames=datetimes,interval=interval, blit=False)
return animation
def plot_mask_cell_track_follow(cell,track, cog, features, mask_total,
field_contour, field_filled,
width=10000,
name= 'test', plotdir='./',
file_format=['png'],figsize=(10/2.54, 10/2.54),dpi=300,
**kwargs):
'''Make plots for all cells centred around cell and with one background field as filling and one background field as contrours
Input:
Output:
'''
from iris import Constraint
from numpy import unique
import os
track_cell=track[track['cell']==cell]
for i_row,row in track_cell.iterrows():
constraint_time = Constraint(time=row['time'])
constraint_x = Constraint(projection_x_coordinate = lambda cell: row['projection_x_coordinate']-width < cell < row['projection_x_coordinate']+width)
constraint_y = Constraint(projection_y_coordinate = lambda cell: row['projection_y_coordinate']-width < cell < row['projection_y_coordinate']+width)
constraint = constraint_time & constraint_x & constraint_y
mask_total_i=mask_total.extract(constraint)
if field_contour is None:
field_contour_i=None
else:
field_contour_i=field_contour.extract(constraint)
if field_filled is None:
field_filled_i=None
else:
field_filled_i=field_filled.extract(constraint)
cells=list(unique(mask_total_i.core_data()))
if cell not in cells:
cells.append(cell)
if 0 in cells:
cells.remove(0)
track_i=track[track['cell'].isin(cells)]
track_i=track_i[track_i['time']==row['time']]
if cog is None:
cog_i=None
else:
cog_i=cog[cog['cell'].isin(cells)]
cog_i=cog_i[cog_i['time']==row['time']]
if features is None:
features_i=None
else:
features_i=features[features['time']==row['time']]
fig1, ax1 = plt.subplots(ncols=1, nrows=1, figsize=figsize)
fig1.subplots_adjust(left=0.2, bottom=0.15, right=0.85, top=0.80)
datestring_stamp = row['time'].strftime('%Y-%m-%d %H:%M:%S')
celltime_stamp = "%02d:%02d:%02d" % (row['time_cell'].dt.total_seconds() // 3600,(row['time_cell'].dt.total_seconds() % 3600) // 60, row['time_cell'].dt.total_seconds() % 60 )
title=datestring_stamp + ' , ' + celltime_stamp
datestring_file = row['time'].strftime('%Y-%m-%d_%H%M%S')
ax1=plot_mask_cell_individual_follow(cell_i=cell,track=track_i, cog=cog_i,features=features_i,
mask_total=mask_total_i,
field_contour=field_contour_i, field_filled=field_filled_i,
width=width,
axes=ax1,title=title,
**kwargs)
out_dir = os.path.join(plotdir, name)
os.makedirs(out_dir, exist_ok=True)
if 'png' in file_format:
savepath_png = os.path.join(out_dir, name + '_' + datestring_file + '.png')
fig1.savefig(savepath_png, dpi=dpi)
logging.debug('field_contour field_filled Mask plot saved to ' + savepath_png)
if 'pdf' in file_format:
savepath_pdf = os.path.join(out_dir, name + '_' + datestring_file + '.pdf')
fig1.savefig(savepath_pdf, dpi=dpi)
logging.debug('field_contour field_filled Mask plot saved to ' + savepath_pdf)
plt.close()
plt.clf()
def plot_mask_cell_individual_follow(cell_i,track, cog,features, mask_total,
field_contour, field_filled,
axes=None,width=10000,
label_field_contour=None, cmap_field_contour='Blues',norm_field_contour=None,
linewidths_contour=0.8,contour_labels=False,
vmin_field_contour=0,vmax_field_contour=50,levels_field_contour=None,nlevels_field_contour=10,
label_field_filled=None,cmap_field_filled='summer',norm_field_filled=None,
vmin_field_filled=0,vmax_field_filled=100,levels_field_filled=None,nlevels_field_filled=10,
title=None
):
'''Make individual plot for cell centred around cell and with one background field as filling and one background field as contrours
Input:
Output:
'''
import numpy as np
from .utils import mask_cell_surface
from mpl_toolkits.axes_grid1 import make_axes_locatable
from matplotlib.colors import Normalize
divider = make_axes_locatable(axes)
x_pos=track[track['cell']==cell_i]['projection_x_coordinate'].item()
y_pos=track[track['cell']==cell_i]['projection_y_coordinate'].item()
if field_filled is not None:
if levels_field_filled is None:
levels_field_filled=np.linspace(vmin_field_filled,vmax_field_filled, nlevels_field_filled)
plot_field_filled = axes.contourf((field_filled.coord('projection_x_coordinate').points-x_pos)/1000,
(field_filled.coord('projection_y_coordinate').points-y_pos)/1000,
field_filled.data,
cmap=cmap_field_filled,norm=norm_field_filled,
levels=levels_field_filled,vmin=vmin_field_filled, vmax=vmax_field_filled)
cax_filled = divider.append_axes("right", size="5%", pad=0.1)
norm_filled= Normalize(vmin=vmin_field_filled, vmax=vmax_field_filled)
sm_filled= plt.cm.ScalarMappable(norm=norm_filled, cmap = plot_field_filled.cmap)
sm_filled.set_array([])
cbar_field_filled = plt.colorbar(sm_filled, orientation='vertical',cax=cax_filled)
cbar_field_filled.ax.set_ylabel(label_field_filled)
cbar_field_filled.set_clim(vmin_field_filled, vmax_field_filled)
if field_contour is not None:
if levels_field_contour is None:
levels_field_contour=np.linspace(vmin_field_contour, vmax_field_contour, nlevels_field_contour)
if norm_field_contour:
vmin_field_contour=None,
vmax_field_contour=None,
plot_field_contour = axes.contour((field_contour.coord('projection_x_coordinate').points-x_pos)/1000,
(field_contour.coord('projection_y_coordinate').points-y_pos)/1000,
field_contour.data,
cmap=cmap_field_contour,norm=norm_field_contour,
levels=levels_field_contour,vmin=vmin_field_contour, vmax=vmax_field_contour,
linewidths=linewidths_contour)
if contour_labels:
axes.clabel(plot_field_contour, fontsize=10)
cax_contour = divider.append_axes("bottom", size="5%", pad=0.1)
if norm_field_contour:
vmin_field_contour=None
vmax_field_contour=None
norm_contour=norm_field_contour
else:
norm_contour= Normalize(vmin=vmin_field_contour, vmax=vmax_field_contour)
sm_contour= plt.cm.ScalarMappable(norm=norm_contour, cmap = plot_field_contour.cmap)
sm_contour.set_array([])
cbar_field_contour = plt.colorbar(sm_contour, orientation='horizontal',ticks=levels_field_contour,cax=cax_contour)
cbar_field_contour.ax.set_xlabel(label_field_contour)
cbar_field_contour.set_clim(vmin_field_contour, vmax_field_contour)
for i_row, row in track.iterrows():
cell = int(row['cell'])
if cell==cell_i:
color='darkred'
else:
color='darkorange'
cell_string=' '+str(int(row['cell']))
axes.text((row['projection_x_coordinate']-x_pos)/1000,
(row['projection_y_coordinate']-y_pos)/1000,
cell_string,color=color,fontsize=6, clip_on=True)
# Plot marker for tracked cell centre as a cross
axes.plot((row['projection_x_coordinate']-x_pos)/1000,
(row['projection_y_coordinate']-y_pos)/1000,
'x', color=color,markersize=4)
#Create surface projection of mask for the respective cell and plot it in the right color
z_coord = 'model_level_number'
if len(mask_total.shape)==3:
mask_total_i_surface = mask_cell_surface(mask_total, cell, track, masked=False, z_coord=z_coord)
elif len(mask_total.shape)==2:
mask_total_i_surface=mask_total
axes.contour((mask_total_i_surface.coord('projection_x_coordinate').points-x_pos)/1000,
(mask_total_i_surface.coord('projection_y_coordinate').points-y_pos)/1000,
mask_total_i_surface.data,
levels=[0, cell], colors=color, linestyles=':',linewidth=1)
if cog is not None:
for i_row, row in cog.iterrows():
cell = row['cell']
if cell==cell_i:
color='darkred'
else:
color='darkorange'
# plot marker for centre of gravity as a circle
axes.plot((row['x_M']-x_pos)/1000, (row['y_M']-y_pos)/1000,
'o', markeredgecolor=color, markerfacecolor='None',markersize=4)
if features is not None:
for i_row, row in features.iterrows():
color='purple'
axes.plot((row['projection_x_coordinate']-x_pos)/1000,
(row['projection_y_coordinate']-y_pos)/1000,
'+', color=color,markersize=3)
axes.set_xlabel('x (km)')
axes.set_ylabel('y (km)')
axes.set_xlim([-1*width/1000, width/1000])
axes.set_ylim([-1*width/1000, width/1000])
axes.xaxis.set_label_position('top')
axes.xaxis.set_ticks_position('top')
axes.set_title(title,pad=35,fontsize=10,horizontalalignment='left',loc='left')
return axes
def plot_mask_cell_track_static(cell,track, cog, features, mask_total,
field_contour, field_filled,
width=10000,n_extend=1,
name= 'test', plotdir='./',
file_format=['png'],figsize=(10/2.54, 10/2.54),dpi=300,
**kwargs):
'''Make plots for all cells with fixed frame including entire development of the cell and with one background field as filling and one background field as contrours
Input:
Output:
'''
from iris import Constraint
from numpy import unique
import os
track_cell=track[track['cell']==cell]
x_min=track_cell['projection_x_coordinate'].min()-width
x_max=track_cell['projection_x_coordinate'].max()+width
y_min=track_cell['projection_y_coordinate'].min()-width
y_max=track_cell['projection_y_coordinate'].max()+width
#set up looping over time based on mask's time coordinate to allow for one timestep before and after the track
time_coord=mask_total.coord('time')
time=time_coord.units.num2date(time_coord.points)
i_start=max(0,np.where(time==track_cell['time'].values[0])[0][0]-n_extend)
i_end=min(len(time)-1,np.where(time==track_cell['time'].values[-1])[0][0]+n_extend+1)
time_cell=time[slice(i_start,i_end)]
for time_i in time_cell:
# for i_row,row in track_cell.iterrows():
# time_i=row['time']
# constraint_time = Constraint(time=row['time'])
constraint_time = Constraint(time=time_i)
constraint_x = Constraint(projection_x_coordinate = lambda cell: x_min < cell < x_max)
constraint_y = Constraint(projection_y_coordinate = lambda cell: y_min < cell < y_max)
constraint = constraint_time & constraint_x & constraint_y
mask_total_i=mask_total.extract(constraint)
if field_contour is None:
field_contour_i=None
else:
field_contour_i=field_contour.extract(constraint)
if field_filled is None:
field_filled_i=None
else:
field_filled_i=field_filled.extract(constraint)
track_i=track[track['time']==time_i]
cells_mask=list(unique(mask_total_i.core_data()))
track_cells=track_i.loc[(track_i['projection_x_coordinate'] > x_min) & (track_i['projection_x_coordinate'] < x_max) & (track_i['projection_y_coordinate'] > y_min) & (track_i['projection_y_coordinate'] < y_max)]
cells_track=list(track_cells['cell'].values)
cells=list(set( cells_mask + cells_track ))
if cell not in cells:
cells.append(cell)
if 0 in cells:
cells.remove(0)
track_i=track_i[track_i['cell'].isin(cells)]
if cog is None:
cog_i=None
else:
cog_i=cog[cog['cell'].isin(cells)]
cog_i=cog_i[cog_i['time']==time_i]
if features is None:
features_i=None
else:
features_i=features[features['time']==time_i]
fig1, ax1 = plt.subplots(ncols=1, nrows=1, figsize=figsize)
fig1.subplots_adjust(left=0.2, bottom=0.15, right=0.80, top=0.85)
datestring_stamp = time_i.strftime('%Y-%m-%d %H:%M:%S')
if time_i in track_cell['time'].values:
time_cell_i=track_cell[track_cell['time'].values==time_i]['time_cell']
celltime_stamp = "%02d:%02d:%02d" % (time_cell_i.dt.total_seconds() // 3600,
(time_cell_i.dt.total_seconds() % 3600) // 60,
time_cell_i.dt.total_seconds() % 60 )
else:
celltime_stamp=' - '
title=datestring_stamp + ' , ' + celltime_stamp
datestring_file = time_i.strftime('%Y-%m-%d_%H%M%S')
ax1=plot_mask_cell_individual_static(cell_i=cell,
track=track_i, cog=cog_i,features=features_i,
mask_total=mask_total_i,
field_contour=field_contour_i, field_filled=field_filled_i,
xlim=[x_min/1000,x_max/1000],ylim=[y_min/1000,y_max/1000],
axes=ax1,title=title,**kwargs)
out_dir = os.path.join(plotdir, name)
os.makedirs(out_dir, exist_ok=True)
if 'png' in file_format:
savepath_png = os.path.join(out_dir, name + '_' + datestring_file + '.png')
fig1.savefig(savepath_png, dpi=dpi)
logging.debug('Mask static plot saved to ' + savepath_png)
if 'pdf' in file_format:
savepath_pdf = os.path.join(out_dir, name + '_' + datestring_file + '.pdf')
fig1.savefig(savepath_pdf, dpi=dpi)
logging.debug('Mask static plot saved to ' + savepath_pdf)
plt.close()
plt.clf()
def plot_mask_cell_individual_static(cell_i,track, cog, features, mask_total,
field_contour, field_filled,
axes=None,xlim=None,ylim=None,
label_field_contour=None, cmap_field_contour='Blues',norm_field_contour=None,
linewidths_contour=0.8,contour_labels=False,
vmin_field_contour=0,vmax_field_contour=50,levels_field_contour=None,nlevels_field_contour=10,
label_field_filled=None,cmap_field_filled='summer',norm_field_filled=None,
vmin_field_filled=0,vmax_field_filled=100,levels_field_filled=None,nlevels_field_filled=10,
title=None,feature_number=False
):
'''Make plots for cell in fixed frame and with one background field as filling and one background field as contrours
Input:
Output:
'''
import numpy as np
from .utils import mask_features,mask_features_surface
from mpl_toolkits.axes_grid1 import make_axes_locatable
from matplotlib.colors import Normalize
divider = make_axes_locatable(axes)
if field_filled is not None:
if levels_field_filled is None:
levels_field_filled=np.linspace(vmin_field_filled,vmax_field_filled, 10)
plot_field_filled = axes.contourf(field_filled.coord('projection_x_coordinate').points/1000,
field_filled.coord('projection_y_coordinate').points/1000,
field_filled.data,
levels=levels_field_filled, norm=norm_field_filled,
cmap=cmap_field_filled, vmin=vmin_field_filled, vmax=vmax_field_filled)
cax_filled = divider.append_axes("right", size="5%", pad=0.1)
norm_filled= Normalize(vmin=vmin_field_filled, vmax=vmax_field_filled)
sm1= plt.cm.ScalarMappable(norm=norm_filled, cmap = plot_field_filled.cmap)
sm1.set_array([])
cbar_field_filled = plt.colorbar(sm1, orientation='vertical',cax=cax_filled)
cbar_field_filled.ax.set_ylabel(label_field_filled)
cbar_field_filled.set_clim(vmin_field_filled, vmax_field_filled)
if field_contour is not None:
if levels_field_contour is None:
levels_field_contour=np.linspace(vmin_field_contour, vmax_field_contour, 5)
plot_field_contour = axes.contour(field_contour.coord('projection_x_coordinate').points/1000,
field_contour.coord('projection_y_coordinate').points/1000,
field_contour.data,
cmap=cmap_field_contour,norm=norm_field_contour,
levels=levels_field_contour,vmin=vmin_field_contour, vmax=vmax_field_contour,
linewidths=linewidths_contour)
if contour_labels:
axes.clabel(plot_field_contour, fontsize=10)
cax_contour = divider.append_axes("bottom", size="5%", pad=0.1)
if norm_field_contour:
vmin_field_contour=None
vmax_field_contour=None
norm_contour=norm_field_contour
else:
norm_contour= Normalize(vmin=vmin_field_contour, vmax=vmax_field_contour)
sm_contour= plt.cm.ScalarMappable(norm=norm_contour, cmap = plot_field_contour.cmap)
sm_contour.set_array([])
cbar_field_contour = plt.colorbar(sm_contour, orientation='horizontal',ticks=levels_field_contour,cax=cax_contour)
cbar_field_contour.ax.set_xlabel(label_field_contour)
cbar_field_contour.set_clim(vmin_field_contour, vmax_field_contour)
for i_row, row in track.iterrows():
cell = row['cell']
feature = row['feature']
# logging.debug("cell: "+ str(row['cell']))
# logging.debug("feature: "+ str(row['feature']))
if cell==cell_i:
color='darkred'
if feature_number:
cell_string=' '+str(int(cell))+' ('+str(int(feature))+')'
else:
cell_string=' '+str(int(cell))
elif np.isnan(cell):
color='gray'
if feature_number:
cell_string=' '+'('+str(int(feature))+')'
else:
cell_string=' '
else:
color='darkorange'
if feature_number:
cell_string=' '+str(int(cell))+' ('+str(int(feature))+')'
else:
cell_string=' '+str(int(cell))
axes.text(row['projection_x_coordinate']/1000,
row['projection_y_coordinate']/1000,
cell_string,color=color,fontsize=6, clip_on=True)
# Plot marker for tracked cell centre as a cross
axes.plot(row['projection_x_coordinate']/1000,
row['projection_y_coordinate']/1000,
'x', color=color,markersize=4)
#Create surface projection of mask for the respective cell and plot it in the right color
z_coord = 'model_level_number'
if len(mask_total.shape)==3:
mask_total_i_surface = mask_features_surface(mask_total, feature, masked=False, z_coord=z_coord)
elif len(mask_total.shape)==2:
mask_total_i_surface=mask_features(mask_total, feature, masked=False, z_coord=z_coord)
axes.contour(mask_total_i_surface.coord('projection_x_coordinate').points/1000,
mask_total_i_surface.coord('projection_y_coordinate').points/1000,
mask_total_i_surface.data,
levels=[0, feature], colors=color, linestyles=':',linewidth=1)
if cog is not None:
for i_row, row in cog.iterrows():
cell = row['cell']
if cell==cell_i:
color='darkred'
else:
color='darkorange'
# plot marker for centre of gravity as a circle
axes.plot(row['x_M']/1000, row['y_M']/1000,
'o', markeredgecolor=color, markerfacecolor='None',markersize=4)
if features is not None:
for i_row, row in features.iterrows():
color='purple'
axes.plot(row['projection_x_coordinate']/1000,
row['projection_y_coordinate']/1000,
'+', color=color,markersize=3)
axes.set_xlabel('x (km)')
axes.set_ylabel('y (km)')
axes.set_xlim(xlim)
axes.set_ylim(ylim)
axes.xaxis.set_label_position('top')
axes.xaxis.set_ticks_position('top')
axes.set_title(title,pad=35,fontsize=10,horizontalalignment='left',loc='left')
return axes
def plot_mask_cell_track_2D3Dstatic(cell,track, cog, features, mask_total,
field_contour, field_filled,
width=10000,n_extend=1,
name= 'test', plotdir='./',
file_format=['png'],figsize=(10/2.54, 10/2.54),dpi=300,
ele=10,azim=30,
**kwargs):
'''Make plots for all cells with fixed frame including entire development of the cell and with one background field as filling and one background field as contrours
Input:
Output:
'''
from iris import Constraint
from numpy import unique
import os
from mpl_toolkits.mplot3d import Axes3D
import matplotlib.gridspec as gridspec
track_cell=track[track['cell']==cell]
x_min=track_cell['projection_x_coordinate'].min()-width
x_max=track_cell['projection_x_coordinate'].max()+width
y_min=track_cell['projection_y_coordinate'].min()-width
y_max=track_cell['projection_y_coordinate'].max()+width
#set up looping over time based on mask's time coordinate to allow for one timestep before and after the track
time_coord=mask_total.coord('time')
time=time_coord.units.num2date(time_coord.points)
i_start=max(0,np.where(time==track_cell['time'].values[0])[0][0]-n_extend)
i_end=min(len(time)-1,np.where(time==track_cell['time'].values[-1])[0][0]+n_extend+1)
time_cell=time[slice(i_start,i_end)]
for time_i in time_cell:
# for i_row,row in track_cell.iterrows():
# time_i=row['time']
# constraint_time = Constraint(time=row['time'])
constraint_time = Constraint(time=time_i)
constraint_x = Constraint(projection_x_coordinate = lambda cell: x_min < cell < x_max)
constraint_y = Constraint(projection_y_coordinate = lambda cell: y_min < cell < y_max)
constraint = constraint_time & constraint_x & constraint_y
mask_total_i=mask_total.extract(constraint)
if field_contour is None:
field_contour_i=None
else:
field_contour_i=field_contour.extract(constraint)
if field_filled is None:
field_filled_i=None
else:
field_filled_i=field_filled.extract(constraint)
track_i=track[track['time']==time_i]
cells_mask=list(unique(mask_total_i.core_data()))
track_cells=track_i.loc[(track_i['projection_x_coordinate'] > x_min) & (track_i['projection_x_coordinate'] < x_max) & (track_i['projection_y_coordinate'] > y_min) & (track_i['projection_y_coordinate'] < y_max)]
cells_track=list(track_cells['cell'].values)
cells=list(set( cells_mask + cells_track ))
if cell not in cells:
cells.append(cell)
if 0 in cells:
cells.remove(0)
track_i=track_i[track_i['cell'].isin(cells)]
if cog is None:
cog_i=None
else:
cog_i=cog[cog['cell'].isin(cells)]
cog_i=cog_i[cog_i['time']==time_i]
if features is None:
features_i=None
else:
features_i=features[features['time']==time_i]
fig1=plt.figure(figsize=(20 / 2.54, 10 / 2.54))
fig1.subplots_adjust(left=0.1, bottom=0.15, right=0.9, top=0.9,wspace=0.3, hspace=0.25)
# make two subplots for figure:
gs1 = gridspec.GridSpec(1, 2,width_ratios=[1,1.2])
fig1.add_subplot(gs1[0])
fig1.add_subplot(gs1[1], projection='3d')
ax1 = fig1.get_axes()
datestring_stamp = time_i.strftime('%Y-%m-%d %H:%M:%S')
if time_i in track_cell['time'].values:
time_cell_i=track_cell[track_cell['time'].values==time_i]['time_cell']
celltime_stamp = "%02d:%02d:%02d" % (time_cell_i.dt.total_seconds() // 3600,
(time_cell_i.dt.total_seconds() % 3600) // 60,
time_cell_i.dt.total_seconds() % 60 )
else:
celltime_stamp=' - '
title=datestring_stamp + ' , ' + celltime_stamp
datestring_file = time_i.strftime('%Y-%m-%d_%H%M%S')
ax1[0]=plot_mask_cell_individual_static(cell_i=cell,
track=track_i, cog=cog_i,features=features_i,
mask_total=mask_total_i,
field_contour=field_contour_i, field_filled=field_filled_i,
xlim=[x_min/1000,x_max/1000],ylim=[y_min/1000,y_max/1000],
axes=ax1[0],title=title,**kwargs)
ax1[1]=plot_mask_cell_individual_3Dstatic(cell_i=cell,
track=track_i, cog=cog_i,features=features_i,
mask_total=mask_total_i,
field_contour=field_contour_i, field_filled=field_filled_i,
xlim=[x_min/1000,x_max/1000],ylim=[y_min/1000,y_max/1000],
axes=ax1[1],title=title,
ele=ele,azim=azim,
**kwargs)
out_dir = os.path.join(plotdir, name)
os.makedirs(out_dir, exist_ok=True)
if 'png' in file_format:
savepath_png = os.path.join(out_dir, name + '_' + datestring_file + '.png')
fig1.savefig(savepath_png, dpi=dpi)
logging.debug('Mask static 2d/3D plot saved to ' + savepath_png)
if 'pdf' in file_format:
savepath_pdf = os.path.join(out_dir, name + '_' + datestring_file + '.pdf')
fig1.savefig(savepath_pdf, dpi=dpi)
logging.debug('Mask static 2d/3D plot saved to ' + savepath_pdf)
plt.close()
plt.clf()
def plot_mask_cell_track_3Dstatic(cell,track, cog, features, mask_total,
field_contour, field_filled,
width=10000,n_extend=1,
name= 'test', plotdir='./',
file_format=['png'],figsize=(10/2.54, 10/2.54),dpi=300,
**kwargs):
'''Make plots for all cells with fixed frame including entire development of the cell and with one background field as filling and one background field as contrours
Input:
Output:
'''
from iris import Constraint
from numpy import unique
import os
from mpl_toolkits.mplot3d import Axes3D
track_cell=track[track['cell']==cell]
x_min=track_cell['projection_x_coordinate'].min()-width
x_max=track_cell['projection_x_coordinate'].max()+width
y_min=track_cell['projection_y_coordinate'].min()-width
y_max=track_cell['projection_y_coordinate'].max()+width
#set up looping over time based on mask's time coordinate to allow for one timestep before and after the track
time_coord=mask_total.coord('time')
time=time_coord.units.num2date(time_coord.points)
i_start=max(0,np.where(time==track_cell['time'].values[0])[0][0]-n_extend)
i_end=min(len(time)-1,np.where(time==track_cell['time'].values[-1])[0][0]+n_extend+1)
time_cell=time[slice(i_start,i_end)]
for time_i in time_cell:
# for i_row,row in track_cell.iterrows():
# time_i=row['time']
# constraint_time = Constraint(time=row['time'])
constraint_time = Constraint(time=time_i)
constraint_x = Constraint(projection_x_coordinate = lambda cell: x_min < cell < x_max)
constraint_y = Constraint(projection_y_coordinate = lambda cell: y_min < cell < y_max)
constraint = constraint_time & constraint_x & constraint_y
mask_total_i=mask_total.extract(constraint)
if field_contour is None:
field_contour_i=None
else:
field_contour_i=field_contour.extract(constraint)
if field_filled is None:
field_filled_i=None
else:
field_filled_i=field_filled.extract(constraint)
track_i=track[track['time']==time_i]
cells_mask=list(unique(mask_total_i.core_data()))
track_cells=track_i.loc[(track_i['projection_x_coordinate'] > x_min) & (track_i['projection_x_coordinate'] < x_max) & (track_i['projection_y_coordinate'] > y_min) & (track_i['projection_y_coordinate'] < y_max)]
cells_track=list(track_cells['cell'].values)
cells=list(set( cells_mask + cells_track ))
if cell not in cells:
cells.append(cell)
if 0 in cells:
cells.remove(0)
track_i=track_i[track_i['cell'].isin(cells)]
if cog is None:
cog_i=None
else:
cog_i=cog[cog['cell'].isin(cells)]
cog_i=cog_i[cog_i['time']==time_i]
if features is None:
features_i=None
else:
features_i=features[features['time']==time_i]
# fig1, ax1 = plt.subplots(ncols=1, nrows=1, figsize=figsize)
# fig1.subplots_adjust(left=0.2, bottom=0.15, right=0.80, top=0.85)
fig1, ax1 = plt.subplots(ncols=1, nrows=1, figsize=(10/2.54, 10/2.54), subplot_kw={'projection': '3d'})
datestring_stamp = time_i.strftime('%Y-%m-%d %H:%M:%S')
if time_i in track_cell['time'].values:
time_cell_i=track_cell[track_cell['time'].values==time_i]['time_cell']
celltime_stamp = "%02d:%02d:%02d" % (time_cell_i.dt.total_seconds() // 3600,
(time_cell_i.dt.total_seconds() % 3600) // 60,
time_cell_i.dt.total_seconds() % 60 )
else:
celltime_stamp=' - '
title=datestring_stamp + ' , ' + celltime_stamp
datestring_file = time_i.strftime('%Y-%m-%d_%H%M%S')
ax1=plot_mask_cell_individual_3Dstatic(cell_i=cell,
track=track_i, cog=cog_i,features=features_i,
mask_total=mask_total_i,
field_contour=field_contour_i, field_filled=field_filled_i,
xlim=[x_min/1000,x_max/1000],ylim=[y_min/1000,y_max/1000],
axes=ax1,title=title,**kwargs)
out_dir = os.path.join(plotdir, name)
os.makedirs(out_dir, exist_ok=True)
if 'png' in file_format:
savepath_png = os.path.join(out_dir, name + '_' + datestring_file + '.png')
fig1.savefig(savepath_png, dpi=dpi)
logging.debug('Mask static plot saved to ' + savepath_png)
if 'pdf' in file_format:
savepath_pdf = os.path.join(out_dir, name + '_' + datestring_file + '.pdf')
fig1.savefig(savepath_pdf, dpi=dpi)
logging.debug('Mask static plot saved to ' + savepath_pdf)
plt.close()
plt.clf()
def plot_mask_cell_individual_3Dstatic(cell_i,track, cog, features, mask_total,
field_contour, field_filled,
axes=None,xlim=None,ylim=None,
label_field_contour=None, cmap_field_contour='Blues',norm_field_contour=None,
linewidths_contour=0.8,contour_labels=False,
vmin_field_contour=0,vmax_field_contour=50,levels_field_contour=None,nlevels_field_contour=10,
label_field_filled=None,cmap_field_filled='summer',norm_field_filled=None,
vmin_field_filled=0,vmax_field_filled=100,levels_field_filled=None,nlevels_field_filled=10,
title=None,feature_number=False,
ele=10.,azim=210.
):
'''Make plots for cell in fixed frame and with one background field as filling and one background field as contrours
Input:
Output:
'''
import numpy as np
from .utils import mask_features,mask_features_surface
# from mpl_toolkits.axes_grid1 import make_axes_locatable
# from matplotlib.colors import Normalize
from mpl_toolkits.mplot3d import Axes3D
axes.view_init(elev=ele, azim=azim)
axes.grid(b=False)
axes.set_frame_on(False)
# make the panes transparent
axes.xaxis.set_pane_color((1.0, 1.0, 1.0, 0.0))
axes.yaxis.set_pane_color((1.0, 1.0, 1.0, 0.0))
axes.zaxis.set_pane_color((1.0, 1.0, 1.0, 0.0))
# make the grid lines transparent
axes.xaxis._axinfo["grid"]['color'] = (1,1,1,0)
axes.yaxis._axinfo["grid"]['color'] = (1,1,1,0)
axes.zaxis._axinfo["grid"]['color'] = (1,1,1,0)
if title is not None:
axes.set_title(title,horizontalalignment='left',loc='left')
# colors_mask = ['pink','darkred', 'orange', 'darkred', 'red', 'darkorange']
x = mask_total.coord('projection_x_coordinate').points
y = mask_total.coord('projection_y_coordinate').points
z = mask_total.coord('model_level_number').points
# z = mask_total.coord('geopotential_height').points
zz, yy, xx = np.meshgrid(z, y, x, indexing='ij')
# z_alt = mask_total.coord('geopotential_height').points
# divider = make_axes_locatable(axes)
# if field_filled is not None:
# if levels_field_filled is None:
# levels_field_filled=np.linspace(vmin_field_filled,vmax_field_filled, 10)
# plot_field_filled = axes.contourf(field_filled.coord('projection_x_coordinate').points/1000,
# field_filled.coord('projection_y_coordinate').points/1000,
# field_filled.data,
# levels=levels_field_filled, norm=norm_field_filled,
# cmap=cmap_field_filled, vmin=vmin_field_filled, vmax=vmax_field_filled)
# cax_filled = divider.append_axes("right", size="5%", pad=0.1)
# norm_filled= Normalize(vmin=vmin_field_filled, vmax=vmax_field_filled)
# sm1= plt.cm.ScalarMappable(norm=norm_filled, cmap = plot_field_filled.cmap)
# sm1.set_array([])
# cbar_field_filled = plt.colorbar(sm1, orientation='vertical',cax=cax_filled)
# cbar_field_filled.ax.set_ylabel(label_field_filled)
# cbar_field_filled.set_clim(vmin_field_filled, vmax_field_filled)
# if field_contour is not None:
# if levels_field_contour is None:
# levels_field_contour=np.linspace(vmin_field_contour, vmax_field_contour, 5)
# plot_field_contour = axes.contour(field_contour.coord('projection_x_coordinate').points/1000,
# field_contour.coord('projection_y_coordinate').points/1000,
# field_contour.data,
# cmap=cmap_field_contour,norm=norm_field_contour,
# levels=levels_field_contour,vmin=vmin_field_contour, vmax=vmax_field_contour,
# linewidths=linewidths_contour)
# if contour_labels:
# axes.clabel(plot_field_contour, fontsize=10)
# cax_contour = divider.append_axes("bottom", size="5%", pad=0.1)
# if norm_field_contour:
# vmin_field_contour=None
# vmax_field_contour=None
# norm_contour=norm_field_contour
# else:
# norm_contour= Normalize(vmin=vmin_field_contour, vmax=vmax_field_contour)
#
# sm_contour= plt.cm.ScalarMappable(norm=norm_contour, cmap = plot_field_contour.cmap)
# sm_contour.set_array([])
#
# cbar_field_contour = plt.colorbar(sm_contour, orientation='horizontal',ticks=levels_field_contour,cax=cax_contour)
# cbar_field_contour.ax.set_xlabel(label_field_contour)
# cbar_field_contour.set_clim(vmin_field_contour, vmax_field_contour)
#
for i_row, row in track.iterrows():
cell = row['cell']
feature = row['feature']
# logging.debug("cell: "+ str(row['cell']))
# logging.debug("feature: "+ str(row['feature']))
if cell==cell_i:
color='darkred'
if feature_number:
cell_string=' '+str(int(cell))+' ('+str(int(feature))+')'
else:
cell_string=' '+str(int(cell))
elif np.isnan(cell):
color='gray'
if feature_number:
cell_string=' '+'('+str(int(feature))+')'
else:
cell_string=' '
else:
color='darkorange'
if feature_number:
cell_string=' '+str(int(cell))+' ('+str(int(feature))+')'
else:
cell_string=' '+str(int(cell))
# axes.text(row['projection_x_coordinate']/1000,
# row['projection_y_coordinate']/1000,
# 0,
# cell_string,color=color,fontsize=6, clip_on=True)
# # Plot marker for tracked cell centre as a cross
# axes.plot(row['projection_x_coordinate']/1000,
# row['projection_y_coordinate']/1000,
# 0,
# 'x', color=color,markersize=4)
#Create surface projection of mask for the respective cell and plot it in the right color
# z_coord = 'model_level_number'
# if len(mask_total.shape)==3:
# mask_total_i_surface = mask_features_surface(mask_total, feature, masked=False, z_coord=z_coord)
# elif len(mask_total.shape)==2:
# mask_total_i_surface=mask_features(mask_total, feature, masked=False, z_coord=z_coord)
# axes.contour(mask_total_i_surface.coord('projection_x_coordinate').points/1000,
# mask_total_i_surface.coord('projection_y_coordinate').points/1000,
# 0,
# mask_total_i_surface.data,
# levels=[0, feature], colors=color, linestyles=':',linewidth=1)
mask_feature = mask_total.data == feature
axes.scatter(
# xx[mask_feature]/1000, yy[mask_feature]/1000, zz[mask_feature]/1000,
xx[mask_feature]/1000, yy[mask_feature]/1000, zz[mask_feature],
c=color, marker=',',
s=5,#60000.0 * TWC_i[Mask_particle],
alpha=0.3, cmap='inferno', label=cell_string,rasterized=True)
axes.set_xlim(xlim)
axes.set_ylim(ylim)
axes.set_zlim([0, 100])
# axes.set_zlim([0, 20])
# axes.set_zticks([0, 5,10,15, 20])
axes.set_xlabel('x (km)')
axes.set_ylabel('y (km)')
axes.zaxis.set_rotate_label(False) # disable automatic rotation
# axes.set_zlabel('z (km)', rotation=90)
axes.set_zlabel('model level', rotation=90)
return axes
def plot_mask_cell_track_static_timeseries(cell,track, cog, features, mask_total,
field_contour, field_filled,
track_variable=None,variable=None,variable_ylabel=None,variable_label=[None],variable_legend=False,variable_color=None,
width=10000,n_extend=1,
name= 'test', plotdir='./',
file_format=['png'],figsize=(20/2.54, 10/2.54),dpi=300,
**kwargs):
'''Make plots for all cells with fixed frame including entire development of the cell and with one background field as filling and one background field as contrours
Input:
Output:
'''
'''Make plots for all cells with fixed frame including entire development of the cell and with one background field as filling and one background field as contrours
Input:
Output:
'''
from iris import Constraint
from numpy import unique
import os
import pandas as pd
track_cell=track[track['cell']==cell]
x_min=track_cell['projection_x_coordinate'].min()-width
x_max=track_cell['projection_x_coordinate'].max()+width
y_min=track_cell['projection_y_coordinate'].min()-width
y_max=track_cell['projection_y_coordinate'].max()+width
time_min=track_cell['time'].min()
# time_max=track_cell['time'].max()
track_variable_cell=track_variable[track_variable['cell']==cell]
track_variable_cell['time_cell']=pd.to_timedelta(track_variable_cell['time_cell'])
# track_variable_cell=track_variable_cell[(track_variable_cell['time']>=time_min) & (track_variable_cell['time']<=time_max)]
#set up looping over time based on mask's time coordinate to allow for one timestep before and after the track
time_coord=mask_total.coord('time')
time=time_coord.units.num2date(time_coord.points)
i_start=max(0,np.where(time==track_cell['time'].values[0])[0][0]-n_extend)
i_end=min(len(time)-1,np.where(time==track_cell['time'].values[-1])[0][0]+n_extend+1)
time_cell=time[slice(i_start,i_end)]
for time_i in time_cell:
constraint_time = Constraint(time=time_i)
constraint_x = Constraint(projection_x_coordinate = lambda cell: x_min < cell < x_max)
constraint_y = Constraint(projection_y_coordinate = lambda cell: y_min < cell < y_max)
constraint = constraint_time & constraint_x & constraint_y
mask_total_i=mask_total.extract(constraint)
if field_contour is None:
field_contour_i=None
else:
field_contour_i=field_contour.extract(constraint)
if field_filled is None:
field_filled_i=None
else:
field_filled_i=field_filled.extract(constraint)
track_i=track[track['time']==time_i]
cells_mask=list(unique(mask_total_i.core_data()))
track_cells=track_i.loc[(track_i['projection_x_coordinate'] > x_min) & (track_i['projection_x_coordinate'] < x_max) & (track_i['projection_y_coordinate'] > y_min) & (track_i['projection_y_coordinate'] < y_max)]
cells_track=list(track_cells['cell'].values)
cells=list(set( cells_mask + cells_track ))
if cell not in cells:
cells.append(cell)
if 0 in cells:
cells.remove(0)
track_i=track_i[track_i['cell'].isin(cells)]
if cog is None:
cog_i=None
else:
cog_i=cog[cog['cell'].isin(cells)]
cog_i=cog_i[cog_i['time']==time_i]
if features is None:
features_i=None
else:
features_i=features[features['time']==time_i]
fig1, ax1 = plt.subplots(ncols=2, nrows=1, figsize=figsize)
fig1.subplots_adjust(left=0.1, bottom=0.15, right=0.90, top=0.85,wspace=0.3)
datestring_stamp = time_i.strftime('%Y-%m-%d %H:%M:%S')
if time_i in track_cell['time'].values:
time_cell_i=track_cell[track_cell['time'].values==time_i]['time_cell']
celltime_stamp = "%02d:%02d:%02d" % (time_cell_i.dt.total_seconds() // 3600,
(time_cell_i.dt.total_seconds() % 3600) // 60,
time_cell_i.dt.total_seconds() % 60 )
else:
celltime_stamp=' - '
title=celltime_stamp + ' , ' + datestring_stamp
datestring_file = time_i.strftime('%Y-%m-%d_%H%M%S')
# plot evolving timeseries of variable to second axis:
ax1[0]=plot_mask_cell_individual_static(cell_i=cell,
track=track_i, cog=cog_i,features=features_i,
mask_total=mask_total_i,
field_contour=field_contour_i, field_filled=field_filled_i,
xlim=[x_min/1000,x_max/1000],ylim=[y_min/1000,y_max/1000],
axes=ax1[0],title=title,**kwargs)
track_variable_past=track_variable_cell[(track_variable_cell['time']>=time_min) & (track_variable_cell['time']<=time_i)]
track_variable_current=track_variable_cell[track_variable_cell['time']==time_i]
if variable_color is None:
variable_color='navy'
if type(variable) is str:
# logging.debug('variable: '+str(variable))
if type(variable_color) is str:
variable_color={variable:variable_color}
variable=[variable]
for i_variable,variable_i in enumerate(variable):
color=variable_color[variable_i]
ax1[1].plot(track_variable_past['time_cell'].dt.total_seconds()/ 60.,track_variable_past[variable_i].values,color=color,linestyle='-',label=variable_label[i_variable])
ax1[1].plot(track_variable_current['time_cell'].dt.total_seconds()/ 60.,track_variable_current[variable_i].values,color=color,marker='o',markersize=4,fillstyle='full')
ax1[1].yaxis.tick_right()
ax1[1].yaxis.set_label_position("right")
ax1[1].set_xlim([0,2*60])
ax1[1].set_xticks(np.arange(0,120,15))
ax1[1].set_ylim([0,max(10,1.25*track_variable_cell[variable].max().max())])
ax1[1].set_xlabel('cell lifetime (min)')
if variable_ylabel==None:
variable_ylabel=variable
ax1[1].set_ylabel(variable_ylabel)
ax1[1].set_title(title)
# insert legend, if flag is True
if variable_legend:
if (len(variable_label)<5):
ncol=1
else:
ncol=2
ax1[1].legend(loc='upper right', bbox_to_anchor=(1, 1),ncol=ncol,fontsize=8)
out_dir = os.path.join(plotdir, name)
os.makedirs(out_dir, exist_ok=True)
if 'png' in file_format:
savepath_png = os.path.join(out_dir, name + '_' + datestring_file + '.png')
fig1.savefig(savepath_png, dpi=dpi)
logging.debug('Mask static plot saved to ' + savepath_png)
if 'pdf' in file_format:
savepath_pdf = os.path.join(out_dir, name + '_' + datestring_file + '.pdf')
fig1.savefig(savepath_pdf, dpi=dpi)
logging.debug('Mask static plot saved to ' + savepath_pdf)
plt.close()
plt.clf()
def map_tracks(track,axis_extent=None,figsize=(10,10),axes=None):
for cell in track['cell'].dropna().unique():
track_i=track[track['cell']==cell]
axes.plot(track_i['longitude'],track_i['latitude'],'-')
if axis_extent:
axes.set_extent(axis_extent)
axes=make_map(axes)
return axes
def make_map(axes):
import matplotlib.ticker as mticker
import cartopy.crs as ccrs
from cartopy.mpl.gridliner import LONGITUDE_FORMATTER, LATITUDE_FORMATTER
gl = axes.gridlines(crs=ccrs.PlateCarree(), draw_labels=True,
linewidth=2, color='gray', alpha=0.5, linestyle='-')
axes.coastlines('10m')
gl.xlabels_top = False
gl.ylabels_right = False
gl.xlocator = mticker.MaxNLocator(nbins=5,min_n_ticks=3,steps=None)
gl.ylocator = mticker.MaxNLocator(nbins=5,min_n_ticks=3,steps=None)
gl.xformatter = LONGITUDE_FORMATTER
gl.yformatter = LATITUDE_FORMATTER
#gl.xlabel_style = {'size': 15, 'color': 'gray'}
#gl.xlabel_style = {'color': 'red', 'weight': 'bold'}
return axes
def plot_lifetime_histogram(track,axes=None,bin_edges=np.arange(0,200,20),density=False,**kwargs):
hist, bin_edges,bin_centers = lifetime_histogram(track,bin_edges=bin_edges,density=density)
plot_hist=axes.plot(bin_centers, hist,**kwargs)
return plot_hist
def plot_lifetime_histogram_bar(track,axes=None,bin_edges=np.arange(0,200,20),density=False,width_bar=1,shift=0.5,**kwargs):
hist, bin_edges, bin_centers = lifetime_histogram(track,bin_edges=bin_edges,density=density)
plot_hist=axes.bar(bin_centers+shift,hist,width=width_bar,**kwargs)
return plot_hist
def plot_histogram_cellwise(track,bin_edges,variable,quantity,axes=None,density=False,**kwargs):
hist, bin_edges,bin_centers = histogram_cellwise(track,bin_edges=bin_edges,variable=variable,quantity=quantity,density=density)
plot_hist=axes.plot(bin_centers, hist,**kwargs)
return plot_hist
def plot_histogram_featurewise(Track,bin_edges,variable,axes=None,density=False,**kwargs):
hist, bin_edges, bin_centers = histogram_featurewise(Track,bin_edges=bin_edges,variable=variable,density=density)
plot_hist=axes.plot(bin_centers, hist,**kwargs)
return plot_hist
| en | 0.641111 | # rivers=cfeature.NaturalEarthFeature(category='physical', name='rivers_lake_centerlines',scale='10m',facecolor='none') # Plot the background field # check if field to plot is not only nan, which causes error: # create colorbar for background field: #if marker_feature is not explicitly given, set it to marker_track (will then be overwritten by the coloured markers) #Plot the identified features by looping over rows of DataFrame: # restrict features to featues inside axis extent #Plot tracked features by looping over rows of Dataframe # if mask is 3D, create surface projection, if mask is 2D keep the mask # plot countour lines around the edges of the mask #fig1,ax1=plt.subplots(ncols=1, nrows=1,figsize=figsize, subplot_kw={'projection': ccrs.PlateCarree()}) Make plots for all cells centred around cell and with one background field as filling and one background field as contrours Input: Output: Make individual plot for cell centred around cell and with one background field as filling and one background field as contrours Input: Output: # Plot marker for tracked cell centre as a cross #Create surface projection of mask for the respective cell and plot it in the right color # plot marker for centre of gravity as a circle Make plots for all cells with fixed frame including entire development of the cell and with one background field as filling and one background field as contrours Input: Output: #set up looping over time based on mask's time coordinate to allow for one timestep before and after the track # for i_row,row in track_cell.iterrows(): # time_i=row['time'] # constraint_time = Constraint(time=row['time']) Make plots for cell in fixed frame and with one background field as filling and one background field as contrours Input: Output: # logging.debug("cell: "+ str(row['cell'])) # logging.debug("feature: "+ str(row['feature'])) # Plot marker for tracked cell centre as a cross #Create surface projection of mask for the respective cell and plot it in the right color # plot marker for centre of gravity as a circle Make plots for all cells with fixed frame including entire development of the cell and with one background field as filling and one background field as contrours Input: Output: #set up looping over time based on mask's time coordinate to allow for one timestep before and after the track # for i_row,row in track_cell.iterrows(): # time_i=row['time'] # constraint_time = Constraint(time=row['time']) # make two subplots for figure: Make plots for all cells with fixed frame including entire development of the cell and with one background field as filling and one background field as contrours Input: Output: #set up looping over time based on mask's time coordinate to allow for one timestep before and after the track # for i_row,row in track_cell.iterrows(): # time_i=row['time'] # constraint_time = Constraint(time=row['time']) # fig1, ax1 = plt.subplots(ncols=1, nrows=1, figsize=figsize) # fig1.subplots_adjust(left=0.2, bottom=0.15, right=0.80, top=0.85) Make plots for cell in fixed frame and with one background field as filling and one background field as contrours Input: Output: # from mpl_toolkits.axes_grid1 import make_axes_locatable # from matplotlib.colors import Normalize # make the panes transparent # make the grid lines transparent # colors_mask = ['pink','darkred', 'orange', 'darkred', 'red', 'darkorange'] # z = mask_total.coord('geopotential_height').points # z_alt = mask_total.coord('geopotential_height').points # divider = make_axes_locatable(axes) # if field_filled is not None: # if levels_field_filled is None: # levels_field_filled=np.linspace(vmin_field_filled,vmax_field_filled, 10) # plot_field_filled = axes.contourf(field_filled.coord('projection_x_coordinate').points/1000, # field_filled.coord('projection_y_coordinate').points/1000, # field_filled.data, # levels=levels_field_filled, norm=norm_field_filled, # cmap=cmap_field_filled, vmin=vmin_field_filled, vmax=vmax_field_filled) # cax_filled = divider.append_axes("right", size="5%", pad=0.1) # norm_filled= Normalize(vmin=vmin_field_filled, vmax=vmax_field_filled) # sm1= plt.cm.ScalarMappable(norm=norm_filled, cmap = plot_field_filled.cmap) # sm1.set_array([]) # cbar_field_filled = plt.colorbar(sm1, orientation='vertical',cax=cax_filled) # cbar_field_filled.ax.set_ylabel(label_field_filled) # cbar_field_filled.set_clim(vmin_field_filled, vmax_field_filled) # if field_contour is not None: # if levels_field_contour is None: # levels_field_contour=np.linspace(vmin_field_contour, vmax_field_contour, 5) # plot_field_contour = axes.contour(field_contour.coord('projection_x_coordinate').points/1000, # field_contour.coord('projection_y_coordinate').points/1000, # field_contour.data, # cmap=cmap_field_contour,norm=norm_field_contour, # levels=levels_field_contour,vmin=vmin_field_contour, vmax=vmax_field_contour, # linewidths=linewidths_contour) # if contour_labels: # axes.clabel(plot_field_contour, fontsize=10) # cax_contour = divider.append_axes("bottom", size="5%", pad=0.1) # if norm_field_contour: # vmin_field_contour=None # vmax_field_contour=None # norm_contour=norm_field_contour # else: # norm_contour= Normalize(vmin=vmin_field_contour, vmax=vmax_field_contour) # # sm_contour= plt.cm.ScalarMappable(norm=norm_contour, cmap = plot_field_contour.cmap) # sm_contour.set_array([]) # # cbar_field_contour = plt.colorbar(sm_contour, orientation='horizontal',ticks=levels_field_contour,cax=cax_contour) # cbar_field_contour.ax.set_xlabel(label_field_contour) # cbar_field_contour.set_clim(vmin_field_contour, vmax_field_contour) # # logging.debug("cell: "+ str(row['cell'])) # logging.debug("feature: "+ str(row['feature'])) # axes.text(row['projection_x_coordinate']/1000, # row['projection_y_coordinate']/1000, # 0, # cell_string,color=color,fontsize=6, clip_on=True) # # Plot marker for tracked cell centre as a cross # axes.plot(row['projection_x_coordinate']/1000, # row['projection_y_coordinate']/1000, # 0, # 'x', color=color,markersize=4) #Create surface projection of mask for the respective cell and plot it in the right color # z_coord = 'model_level_number' # if len(mask_total.shape)==3: # mask_total_i_surface = mask_features_surface(mask_total, feature, masked=False, z_coord=z_coord) # elif len(mask_total.shape)==2: # mask_total_i_surface=mask_features(mask_total, feature, masked=False, z_coord=z_coord) # axes.contour(mask_total_i_surface.coord('projection_x_coordinate').points/1000, # mask_total_i_surface.coord('projection_y_coordinate').points/1000, # 0, # mask_total_i_surface.data, # levels=[0, feature], colors=color, linestyles=':',linewidth=1) # xx[mask_feature]/1000, yy[mask_feature]/1000, zz[mask_feature]/1000, #60000.0 * TWC_i[Mask_particle], # axes.set_zlim([0, 20]) # axes.set_zticks([0, 5,10,15, 20]) # disable automatic rotation # axes.set_zlabel('z (km)', rotation=90) Make plots for all cells with fixed frame including entire development of the cell and with one background field as filling and one background field as contrours Input: Output: Make plots for all cells with fixed frame including entire development of the cell and with one background field as filling and one background field as contrours Input: Output: # time_max=track_cell['time'].max() # track_variable_cell=track_variable_cell[(track_variable_cell['time']>=time_min) & (track_variable_cell['time']<=time_max)] #set up looping over time based on mask's time coordinate to allow for one timestep before and after the track # plot evolving timeseries of variable to second axis: # logging.debug('variable: '+str(variable)) # insert legend, if flag is True #gl.xlabel_style = {'size': 15, 'color': 'gray'} #gl.xlabel_style = {'color': 'red', 'weight': 'bold'} | 1.770418 | 2 |
api/urls.py | nf1s/covid-backend | 0 | 8481 | from sanic import Blueprint
from sanic_transmute import add_route
from .views import (
get_all,
get_status_by_country_id,
get_status_by_country_name,
get_deaths,
get_active_cases,
get_recovered_cases,
get_confirmed_cases,
list_countries,
)
cases = Blueprint("cases", url_prefix="/cases")
add_route(cases, get_all)
add_route(cases, get_status_by_country_id)
add_route(cases, get_status_by_country_name)
add_route(cases, get_deaths)
add_route(cases, get_active_cases)
add_route(cases, get_recovered_cases)
add_route(cases, get_confirmed_cases)
add_route(cases, list_countries)
| from sanic import Blueprint
from sanic_transmute import add_route
from .views import (
get_all,
get_status_by_country_id,
get_status_by_country_name,
get_deaths,
get_active_cases,
get_recovered_cases,
get_confirmed_cases,
list_countries,
)
cases = Blueprint("cases", url_prefix="/cases")
add_route(cases, get_all)
add_route(cases, get_status_by_country_id)
add_route(cases, get_status_by_country_name)
add_route(cases, get_deaths)
add_route(cases, get_active_cases)
add_route(cases, get_recovered_cases)
add_route(cases, get_confirmed_cases)
add_route(cases, list_countries)
| none | 1 | 1.843838 | 2 |
|
scribdl/test/test_download.py | fatshotty/scribd-downloader | 182 | 8482 | <filename>scribdl/test/test_download.py
from ..downloader import Downloader
import os
import pytest
@pytest.fixture
def cwd_to_tmpdir(tmpdir):
os.chdir(str(tmpdir))
def test_audiobook_download(cwd_to_tmpdir, monkeypatch):
audiobook_url = "https://www.scribd.com/audiobook/237606860/100-Ways-to-Motivate-Yourself-Change-Your-Life-Forever"
audiobook_downloader = Downloader(audiobook_url)
audio = audiobook_downloader.download()
assert audio[0] == "100_Ways_to_Motivate_Yourself__Change_Your_Life_Forever_preview.mp3"
assert os.path.getsize(audio[0]) == 2127830
def test_text_document_download(cwd_to_tmpdir):
text_doc_url = "https://www.scribd.com/document/96882378/Trademark-License-Agreement"
text_downloader = Downloader(text_doc_url)
md_doc = text_downloader.download(is_image_document=False)
assert os.path.getsize(md_doc.input_content) in range(1000, 2000)
md_doc.to_pdf()
assert os.path.getsize(md_doc.pdf_path) in range(20000, 31000)
def test_img_document_download(cwd_to_tmpdir):
img_doc_url = "https://www.scribd.com/doc/136711944/Signature-Scanning-and-Verification-in-Finacle"
img_downloader = Downloader(img_doc_url)
imgs = img_downloader.download(is_image_document=True)
assert len(imgs.input_content) == 2
imgs.to_pdf()
assert os.path.getsize(imgs.pdf_path) in range(140000, 150000)
def test_book_download(cwd_to_tmpdir, monkeypatch):
book_url = "https://www.scribd.com/read/262694921/Acting-The-First-Six-Lessons"
book_downloader = Downloader(book_url)
# We don't want to clutter stdout with book contents if this test fails
monkeypatch.setattr("builtins.print", lambda x: None)
md_book = book_downloader.download()
assert os.path.getsize(md_book.input_content) in range(10000, 20000)
md_book.to_pdf()
assert os.path.getsize(md_book.pdf_path) in range(200000, 2500000)
| <filename>scribdl/test/test_download.py
from ..downloader import Downloader
import os
import pytest
@pytest.fixture
def cwd_to_tmpdir(tmpdir):
os.chdir(str(tmpdir))
def test_audiobook_download(cwd_to_tmpdir, monkeypatch):
audiobook_url = "https://www.scribd.com/audiobook/237606860/100-Ways-to-Motivate-Yourself-Change-Your-Life-Forever"
audiobook_downloader = Downloader(audiobook_url)
audio = audiobook_downloader.download()
assert audio[0] == "100_Ways_to_Motivate_Yourself__Change_Your_Life_Forever_preview.mp3"
assert os.path.getsize(audio[0]) == 2127830
def test_text_document_download(cwd_to_tmpdir):
text_doc_url = "https://www.scribd.com/document/96882378/Trademark-License-Agreement"
text_downloader = Downloader(text_doc_url)
md_doc = text_downloader.download(is_image_document=False)
assert os.path.getsize(md_doc.input_content) in range(1000, 2000)
md_doc.to_pdf()
assert os.path.getsize(md_doc.pdf_path) in range(20000, 31000)
def test_img_document_download(cwd_to_tmpdir):
img_doc_url = "https://www.scribd.com/doc/136711944/Signature-Scanning-and-Verification-in-Finacle"
img_downloader = Downloader(img_doc_url)
imgs = img_downloader.download(is_image_document=True)
assert len(imgs.input_content) == 2
imgs.to_pdf()
assert os.path.getsize(imgs.pdf_path) in range(140000, 150000)
def test_book_download(cwd_to_tmpdir, monkeypatch):
book_url = "https://www.scribd.com/read/262694921/Acting-The-First-Six-Lessons"
book_downloader = Downloader(book_url)
# We don't want to clutter stdout with book contents if this test fails
monkeypatch.setattr("builtins.print", lambda x: None)
md_book = book_downloader.download()
assert os.path.getsize(md_book.input_content) in range(10000, 20000)
md_book.to_pdf()
assert os.path.getsize(md_book.pdf_path) in range(200000, 2500000)
| en | 0.945928 | # We don't want to clutter stdout with book contents if this test fails | 2.355061 | 2 |
app/migrations/0005_auto_20210619_2310.py | hungitptit/boecdjango | 0 | 8483 | <gh_stars>0
# Generated by Django 3.2.4 on 2021-06-19 16:10
from django.db import migrations, models
import django.utils.timezone
class Migration(migrations.Migration):
dependencies = [
('app', '0004_auto_20210619_1802'),
]
operations = [
migrations.AddField(
model_name='comment',
name='create_at',
field=models.DateTimeField(auto_now_add=True, db_column='create_at', default=django.utils.timezone.now),
preserve_default=False,
),
migrations.AddField(
model_name='comment',
name='subject',
field=models.CharField(blank=True, max_length=255),
),
migrations.AddField(
model_name='comment',
name='update_at',
field=models.DateTimeField(auto_now=True, db_column='update_at'),
),
]
| # Generated by Django 3.2.4 on 2021-06-19 16:10
from django.db import migrations, models
import django.utils.timezone
class Migration(migrations.Migration):
dependencies = [
('app', '0004_auto_20210619_1802'),
]
operations = [
migrations.AddField(
model_name='comment',
name='create_at',
field=models.DateTimeField(auto_now_add=True, db_column='create_at', default=django.utils.timezone.now),
preserve_default=False,
),
migrations.AddField(
model_name='comment',
name='subject',
field=models.CharField(blank=True, max_length=255),
),
migrations.AddField(
model_name='comment',
name='update_at',
field=models.DateTimeField(auto_now=True, db_column='update_at'),
),
] | en | 0.824446 | # Generated by Django 3.2.4 on 2021-06-19 16:10 | 1.796234 | 2 |
vision_datasets/common/dataset_registry.py | shonohs/vision-datasets | 0 | 8484 | <filename>vision_datasets/common/dataset_registry.py
import copy
import json
from .dataset_info import DatasetInfoFactory
class DatasetRegistry:
"""
A central registry of all available datasets
"""
def __init__(self, datasets_json: str):
self.datasets = [DatasetInfoFactory.create(d) for d in json.loads(datasets_json)]
def get_dataset_info(self, dataset_name, dataset_version=None):
datasets = [d for d in self.datasets if d.name == dataset_name and (not dataset_version or d.version == dataset_version)]
if not datasets:
return None
sorted_datasets = sorted(datasets, key=lambda d: d.version)
return copy.deepcopy(sorted_datasets[-1])
def list_data_version_and_types(self):
return [{'name': d.name, 'version': d.version, 'type': d.type, 'description': d.description} for d in self.datasets]
@staticmethod
def _get_default_dataset_json(json_file_name):
import sys
py_version = sys.version_info
if py_version.minor >= 7:
import importlib.resources as pkg_resources
from vision_datasets import resources
datasets_json = pkg_resources.read_text(resources, json_file_name)
else:
import pkgutil
resource_package = 'vision_datasets'
resource_path = '/'.join(('resources', json_file_name))
datasets_json = pkgutil.get_data(resource_package, resource_path)
return datasets_json
| <filename>vision_datasets/common/dataset_registry.py
import copy
import json
from .dataset_info import DatasetInfoFactory
class DatasetRegistry:
"""
A central registry of all available datasets
"""
def __init__(self, datasets_json: str):
self.datasets = [DatasetInfoFactory.create(d) for d in json.loads(datasets_json)]
def get_dataset_info(self, dataset_name, dataset_version=None):
datasets = [d for d in self.datasets if d.name == dataset_name and (not dataset_version or d.version == dataset_version)]
if not datasets:
return None
sorted_datasets = sorted(datasets, key=lambda d: d.version)
return copy.deepcopy(sorted_datasets[-1])
def list_data_version_and_types(self):
return [{'name': d.name, 'version': d.version, 'type': d.type, 'description': d.description} for d in self.datasets]
@staticmethod
def _get_default_dataset_json(json_file_name):
import sys
py_version = sys.version_info
if py_version.minor >= 7:
import importlib.resources as pkg_resources
from vision_datasets import resources
datasets_json = pkg_resources.read_text(resources, json_file_name)
else:
import pkgutil
resource_package = 'vision_datasets'
resource_path = '/'.join(('resources', json_file_name))
datasets_json = pkgutil.get_data(resource_package, resource_path)
return datasets_json
| en | 0.647582 | A central registry of all available datasets | 2.396988 | 2 |
yasql/apps/sqlorders/views.py | Fanduzi/YaSQL | 443 | 8485 | # -*- coding:utf-8 -*-
# edit by fuzongfei
import base64
import datetime
# Create your views here.
import json
from django.http import Http404, HttpResponse
from django.utils import timezone
from django_filters.rest_framework import DjangoFilterBackend
from rest_framework import filters
from rest_framework.exceptions import PermissionDenied
from rest_framework.generics import ListAPIView, GenericAPIView, CreateAPIView, UpdateAPIView, DestroyAPIView
from rest_framework.views import APIView
from rest_framework.viewsets import ViewSet
from libs import permissions
from libs.Pagination import Pagination
from libs.RenderColumns import render_dynamic_columns
from libs.response import JsonResponseV1
from sqlorders import models, serializers
from sqlorders.filters import SqlOrderListFilter, GetTasksListFilter
class GetDBEnvironment(ListAPIView):
queryset = models.DbEnvironment.objects.all()
serializer_class = serializers.DbEnvironmentSerializer
# 获取工单环境
def get(self, request, *args, **kwargs):
serializer = self.get_serializer(self.get_queryset(), many=True)
return JsonResponseV1(data=serializer.data)
class GetDbSchemas(APIView):
# 获取指定环境指定用途的schemas列表
def get(self, request):
serializer = serializers.DbSchemasSerializer(data=request.query_params)
if serializer.is_valid():
return JsonResponseV1(data=serializer.query)
return JsonResponseV1(message=serializer.errors, code='0001')
class IncepSyntaxCheckView(APIView):
def post(self, request, *args, **kwargs):
serializer = serializers.IncepSyntaxCheckSerializer(data=request.data)
if serializer.is_valid():
s, data = serializer.check()
render_columns = [
{'key': 'order_id', 'value': '序号'},
{'key': 'stage', 'value': '阶段'},
{'key': 'stage_status', 'value': '阶段状态'},
{'key': 'error_level', 'value': '错误级别'},
{'key': 'error_message', 'value': '错误信息', 'width': '35%'},
{'key': 'sql', 'value': 'SQL内容', 'width': '25%', 'ellipsis': True},
{'key': 'affected_rows', 'value': '影响/扫描行数'}
]
columns = render_dynamic_columns(render_columns)
message = '语法检查未发现异常,可以提交'
if not s:
message = '语法检查发现异常,详情请查看输出,更正后在提交'
d = {
'status': 0 if s else 1,
'data': data
}
data = {'columns': columns, 'data': d}
return JsonResponseV1(data=data, message=message)
return JsonResponseV1(message=serializer.errors, code='0001', flat=True)
class SqlOrdersCommit(GenericAPIView):
permission_classes = (permissions.CanCommitOrdersPermission,)
serializer_class = serializers.SqlOrdersCommitSerializer
def post(self, request, *args, **kwargs):
serializer = self.get_serializer(data=request.data)
if serializer.is_valid():
serializer.save()
return JsonResponseV1(message="提交成功")
return JsonResponseV1(message=serializer.errors, code='0001', flat=True)
class SqlOrdersList(ListAPIView):
permission_classes = (permissions.CanViewOrdersPermission,)
queryset = models.DbOrders.objects.all()
serializer_class = serializers.SqlOrdersListSerializer
pagination_class = Pagination
filter_backends = [DjangoFilterBackend, filters.SearchFilter, filters.OrderingFilter]
filter_class = SqlOrderListFilter
ordering = ['-created_at']
search_fields = ['title', 'database', 'remark', 'applicant', 'progress', 'contents']
def get(self, request, *args, **kwargs):
queryset = self.filter_queryset(self.get_queryset())
page = self.paginate_queryset(queryset)
serializer = self.get_serializer(page, many=True)
render_columns = [
{'key': 'progress', 'value': '进度', 'width': '8%'},
{'key': 'applicant', 'value': '申请人'},
{'key': 'department', 'value': '部门'},
{'key': 'env_name', 'value': '环境'},
{'key': 'escape_title', 'value': '标题', 'width': '18%', 'ellipsis': True},
{'key': 'sql_type', 'value': '类型'},
{'key': 'remark', 'value': '备注'},
{'key': 'version', 'value': '版本'},
{'key': 'host', 'value': '实例/库'},
{'key': 'auditor', 'value': '审核人'},
{'key': 'reviewer', 'value': '复核人'},
]
columns = render_dynamic_columns(render_columns)
data = {'columns': columns, 'data': serializer.data}
return self.get_paginated_response(data)
class SqlOrdersDetail(ListAPIView):
"""SQL工单详情"""
permission_classes = (permissions.CanViewOrdersPermission,)
queryset = models.DbOrders.objects.all()
serializer_class = serializers.SqlOrderDetailSerializer
lookup_field = 'order_id'
def get(self, request, *args, **kwargs):
queryset = self.get_object()
serializer = self.get_serializer(queryset, context={"request": request})
return JsonResponseV1(data=serializer.data)
class OpSqlOrderView(ViewSet):
"""更新SQL工单状态,如:审核,关闭等"""
permission_classes = (permissions.CanViewOrdersPermission,)
def get_obj(self, pk):
try:
obj = models.DbOrders.objects.get(pk=pk)
return obj
except models.DbOrders.DoesNotExist:
raise Http404
def approve(self, request, pk):
serializer = serializers.OpSqlOrderSerializer(instance=self.get_obj(pk),
data=request.data,
context={"request": request, "handler": "_approve"})
if serializer.is_valid():
serializer.save()
return JsonResponseV1(data=serializer.data, message="操作成功")
return JsonResponseV1(message=serializer.errors, code='0001')
def feedback(self, request, pk):
serializer = serializers.OpSqlOrderSerializer(instance=self.get_obj(pk),
data=request.data,
context={"request": request, "handler": "_feedback"})
if serializer.is_valid():
serializer.save()
return JsonResponseV1(data=serializer.data, message="操作成功")
return JsonResponseV1(message=serializer.errors, code='0001')
def close(self, request, pk):
serializer = serializers.OpSqlOrderSerializer(instance=self.get_obj(pk),
data=request.data,
context={"request": request, "handler": "_close"})
if serializer.is_valid():
serializer.save()
return JsonResponseV1(data=serializer.data, message="操作成功")
return JsonResponseV1(message=serializer.errors, code='0001')
def review(self, request, pk):
serializer = serializers.OpSqlOrderSerializer(instance=self.get_obj(pk),
data=request.data,
context={"request": request, "handler": "_review"})
if serializer.is_valid():
serializer.save()
return JsonResponseV1(data=serializer.data, message="操作成功")
return JsonResponseV1(message=serializer.errors, code='0001')
class GenerateTasksView(APIView):
permission_classes = (permissions.CanExecuteOrdersPermission,)
def post(self, request, *args, **kwargs):
serializer = serializers.GenerateSqlOrdersTasksSerializer(data=request.data)
if serializer.is_valid():
data = serializer.save(request)
return JsonResponseV1(data=data)
return JsonResponseV1(message=serializer.errors, code='0001', flat=True)
class GetTaskIdView(APIView):
def get(self, request, *args, **kwargs):
"""根据order id返回taskid"""
order_id = kwargs.get('order_id')
task_id = models.DbOrdersExecuteTasks.objects.filter(order_id=order_id).first().task_id
return JsonResponseV1(data=task_id)
class GetTasksPreviewView(ListAPIView):
permission_classes = (permissions.CanViewOrdersPermission,)
queryset = models.DbOrdersExecuteTasks.objects.all()
serializer_class = serializers.SqlOrdersTasksListSerializer
pagination_class = Pagination
filter_backends = [DjangoFilterBackend, filters.SearchFilter, filters.OrderingFilter]
filter_class = GetTasksListFilter
search_fields = ['sql']
ordering = ['created_time']
def get(self, request, *args, **kwargs):
task_id = kwargs.get('task_id')
queryset = self.filter_queryset(self.get_queryset().filter(task_id=task_id))
# 数据隐藏按钮打开了
# 仅允许申请人、审核人、复核人和超权用户查看数据
obj = models.DbOrders.objects.get(
pk=models.DbOrdersExecuteTasks.objects.filter(task_id=task_id).first().order_id
)
if obj.is_hide == 'ON' and not request.user.is_superuser:
allowed_view_users = [obj.applicant]
allowed_view_users.extend([x['user'] for x in json.loads(obj.auditor)])
allowed_view_users.extend([x['user'] for x in json.loads(obj.reviewer)])
if request.user.username not in allowed_view_users:
raise PermissionDenied(detail='您没有权限查看该工单的数据,5s后,自动跳转到工单列表页面')
origin_queryset = self.queryset.filter(task_id=task_id)
total = origin_queryset.count()
progress_0 = origin_queryset.filter(progress=0).count()
progress_1 = origin_queryset.filter(progress=1).count()
progress_3 = origin_queryset.filter(progress=3).count()
page = self.paginate_queryset(queryset)
serializer = self.get_serializer(page, context={'request': request}, many=True)
render_columns = [
{'key': 'num', 'value': '序号'}, # 自定义num,前台显示序号使用
{'key': 'applicant', 'value': '申请人'},
{'key': 'sql', 'value': 'SQL', 'ellipsis': True, 'width': '50%'},
{'key': 'progress', 'value': '进度'},
{'key': 'result', 'value': '查看结果'}, # 自定义result
]
columns = render_dynamic_columns(render_columns)
data = {'columns': columns,
'data': {'data': serializer.data,
'total': total,
'progress_0': progress_0,
'progress_1': progress_1,
'progress_3': progress_3}}
return self.get_paginated_response(data)
class GetTasksListView(ListAPIView):
permission_classes = (permissions.CanViewOrdersPermission,)
queryset = models.DbOrdersExecuteTasks.objects.all()
serializer_class = serializers.SqlOrdersTasksListSerializer
pagination_class = Pagination
filter_backends = [DjangoFilterBackend, filters.SearchFilter, filters.OrderingFilter]
filter_class = GetTasksListFilter
search_fields = ['sql']
ordering = ['created_time']
def get(self, request, *args, **kwargs):
task_id = kwargs.get('task_id')
queryset = self.filter_queryset(self.get_queryset().filter(task_id=task_id))
# 数据隐藏按钮打开了
# 仅允许申请人、审核人、复核人和超权用户查看数据
obj = models.DbOrders.objects.get(
pk=models.DbOrdersExecuteTasks.objects.filter(task_id=task_id).first().order_id
)
if obj.is_hide == 'ON' and not request.user.is_superuser:
allowed_view_users = [obj.applicant]
allowed_view_users.extend([x['user'] for x in json.loads(obj.auditor)])
allowed_view_users.extend([x['user'] for x in json.loads(obj.reviewer)])
if request.user.username not in allowed_view_users:
raise PermissionDenied(detail='您没有权限查看该工单的数据,5s后,自动跳转到工单列表页面')
page = self.paginate_queryset(queryset)
serializer = self.get_serializer(page, context={'request': request}, many=True)
render_columns = [
{'key': 'num', 'value': '序号'}, # 自定义num,前台显示序号使用
{'key': 'applicant', 'value': '申请人'},
{'key': 'sql', 'value': 'SQL', 'ellipsis': True, 'width': '50%'},
{'key': 'progress', 'value': '进度'},
{'key': 'execute', 'value': '执行'}, # 自定义execute
{'key': 'result', 'value': '查看结果'}, # 自定义result
]
if queryset.exists():
if queryset.first().sql_type == 'DDL':
render_columns.insert(-1, {'key': 'ghost_pause', 'value': '暂停(gh-ost)'})
render_columns.insert(-1, {'key': 'ghost_recovery', 'value': '恢复(gh-ost)'})
columns = render_dynamic_columns(render_columns)
data = {'columns': columns, 'data': serializer.data}
return self.get_paginated_response(data)
class ExecuteSingleTaskView(APIView):
permission_classes = (permissions.CanExecuteOrdersPermission,)
def post(self, request, *args, **kwargs):
serializer = serializers.ExecuteSingleTaskSerializer(data=request.data)
if serializer.is_valid():
serializer.execute(request)
return JsonResponseV1(message="任务提交成功,请查看输出")
return JsonResponseV1(message=serializer.errors, code='0001', flat=True)
class ExecuteMultiTasksView(APIView):
permission_classes = (permissions.CanExecuteOrdersPermission,)
def post(self, request, *args, **kwargs):
serializer = serializers.ExecuteMultiTasksSerializer(data=request.data)
if serializer.is_valid():
serializer.execute(request)
return JsonResponseV1(message="任务提交成功,请查看输出")
return JsonResponseV1(message=serializer.errors, code='0001', flat=True)
class ThrottleTaskView(APIView):
permission_classes = (permissions.CanExecuteOrdersPermission,)
def post(self, request, *args, **kwargs):
serializer = serializers.ThrottleTaskSerializer(data=request.data)
if serializer.is_valid():
message = serializer.execute(request)
return JsonResponseV1(message=message)
return JsonResponseV1(message=serializer.errors, code='0001', flat=True)
class GetTasksResultView(ListAPIView):
"""SQL工单详情"""
permission_classes = (permissions.CanViewOrdersPermission,)
queryset = models.DbOrdersExecuteTasks.objects.all()
serializer_class = serializers.GetTasksResultSerializer
lookup_field = 'id'
def get(self, request, *args, **kwargs):
queryset = self.get_object()
serializer = self.get_serializer(queryset, context={"request": request})
return JsonResponseV1(data=serializer.data)
class HookSqlOrdersView(APIView):
permission_classes = (permissions.anyof(permissions.CanCommitOrdersPermission,
permissions.CanViewOrdersPermission,
permissions.CanExecuteOrdersPermission,
permissions.CanAuditOrdersPermission),
)
def post(self, request, *args, **kwargs):
serializer = serializers.HookSqlOrdersSerializer(data=request.data)
if serializer.is_valid():
serializer.save()
return JsonResponseV1(message="任务提交成功,请查看输出")
return JsonResponseV1(message=serializer.errors, code='0001', flat=True)
class DownloadExportFilesView(APIView):
"""下载导出文件"""
permission_classes = (permissions.CanViewOrdersPermission,)
def get(self, request, base64_filename):
file_name = base64.b64decode(base64_filename).decode()
if not models.DbExportFiles.objects.filter(file_name=file_name).exists():
raise Http404
obj = models.DbExportFiles.objects.get(file_name=file_name)
if not models.DbOrdersExecuteTasks.objects.get(pk=obj.task_id).applicant == request.user.username:
raise PermissionDenied(detail='您没有权限')
fsock = open(f"media/{obj.files}", 'rb')
response = HttpResponse(fsock, content_type="application/zip")
response['Content-Disposition'] = f'attachment; filename={file_name}'
return response
class ReleaseVersionsGet(APIView):
"""获取上线版本号,提交工单使用"""
def get(self, request):
before_30_days = (timezone.now() - datetime.timedelta(days=30))
queryset = models.ReleaseVersions.objects.filter(
expire_time__gte=before_30_days
).values('id', 'version', 'expire_time').order_by('-created_at')
for row in queryset:
row['disabled'] = 0
if row['expire_time'] < datetime.datetime.date(timezone.now()):
row['disabled'] = 1
return JsonResponseV1(data=queryset)
class ReleaseVersionsList(ListAPIView):
"""获取上线版本号列表,管理上线版本号使用"""
permission_classes = (permissions.CanViewVersionPermission,)
queryset = models.ReleaseVersions.objects.all()
serializer_class = serializers.ReleaseVersionsListSerializer
pagination_class = Pagination
filter_backends = [filters.SearchFilter, filters.OrderingFilter]
search_fields = ['username', 'version', 'expire_time']
ordering = ['-created_at']
def get(self, request, *args, **kwargs):
queryset = self.filter_queryset(self.get_queryset())
page = self.paginate_queryset(queryset)
serializer = self.get_serializer(page, many=True)
render_columns = [
{'key': 'version', 'value': '版本'},
{'key': 'username', 'value': '创建人'},
{'key': 'expire_time', 'value': '截止日期'},
{'key': 'created_at', 'value': '创建时间'},
{'key': 'key', 'value': '操作'},
{'key': 'id', 'value': '详情'},
]
columns = render_dynamic_columns(render_columns)
data = {'columns': columns, 'data': serializer.data}
return self.get_paginated_response(data)
class ReleaseVersionsCreate(CreateAPIView):
"""创建版本"""
permission_classes = (permissions.CanCreateVersionsPermission,)
serializer_class = serializers.ReleaseVersionsCreateSerializer
def create(self, request, *args, **kwargs):
serializer = self.get_serializer(data=request.data)
if serializer.is_valid():
self.perform_create(serializer)
return JsonResponseV1(message="创建成功")
return JsonResponseV1(code='0001', message=serializer.errors, flat=True)
class ReleaseVersionsUpdate(UpdateAPIView):
"""更新版本号,该类只更新单条记录"""
permission_classes = (permissions.CanUpdateVersionsPermission,)
def put(self, request, *args, **kwargs):
serializer = serializers.ReleaseVersionsSerializer(
instance=models.ReleaseVersions.objects.get(pk=kwargs['key']), # 返回单条记录
data=request.data
)
if serializer.is_valid():
serializer.save()
return JsonResponseV1(message="更新成功")
return JsonResponseV1(code='0001', message=serializer.errors, flat=True)
class ReleaseVersionsDelete(DestroyAPIView):
"""删除版本"""
permission_classes = (permissions.CanDeleteVersionsPermission,)
queryset = models.ReleaseVersions.objects.all()
lookup_field = 'id' # 默认为主键,可不写
def destroy(self, request, *args, **kwargs):
instance = self.get_object()
self.perform_destroy(instance)
return JsonResponseV1(message="删除成功")
class ReleaseVersionsView(APIView):
"""获取指定版本内工单在所有环境的进度"""
def get(self, request, *args, **kwargs):
# 获取版本对应的主键
version = kwargs.get('version')
version_id = models.ReleaseVersions.objects.get(version=version).pk
# 获取环境,行转为动态列
obj = models.DbEnvironment.objects.values('id', 'name')
row2columns = ''
for row in obj:
row2columns += f"max(if(env_id={row['id']}, progress, -1)) as {row['name']},"
# 获取任务下所有工单分别在各个环境中的状态,此处的环境为动态环境
# id没有实际意义
query = f"select " + row2columns + \
f"substring(MD5(RAND()),1,20) as id,title as escape_title,order_id, applicant " \
f"from yasql_dborders where version_id='{version_id}' group by escape_title,order_id,applicant"
rawquery = models.DbOrders.objects.raw(query)
# 获取环境列名
dynamic_columns = list(rawquery.columns)[:-4]
data = []
for row in rawquery:
columns = {
'id': row.id,
'escape_title': row.escape_title,
'order_id': row.order_id,
'applicant': row.applicant,
}
for col in dynamic_columns:
columns[col] = getattr(row, col)
data.append(columns)
render_columns = [
{'key': 'escape_title', 'ellipsis': True, 'value': '标题'},
{'key': 'applicant', 'value': '申请人'},
]
render_columns.extend([{'key': x, 'value': x} for x in dynamic_columns])
columns = render_dynamic_columns(render_columns)
data = {'columns': columns, 'data': data}
return JsonResponseV1(data=data)
| # -*- coding:utf-8 -*-
# edit by fuzongfei
import base64
import datetime
# Create your views here.
import json
from django.http import Http404, HttpResponse
from django.utils import timezone
from django_filters.rest_framework import DjangoFilterBackend
from rest_framework import filters
from rest_framework.exceptions import PermissionDenied
from rest_framework.generics import ListAPIView, GenericAPIView, CreateAPIView, UpdateAPIView, DestroyAPIView
from rest_framework.views import APIView
from rest_framework.viewsets import ViewSet
from libs import permissions
from libs.Pagination import Pagination
from libs.RenderColumns import render_dynamic_columns
from libs.response import JsonResponseV1
from sqlorders import models, serializers
from sqlorders.filters import SqlOrderListFilter, GetTasksListFilter
class GetDBEnvironment(ListAPIView):
queryset = models.DbEnvironment.objects.all()
serializer_class = serializers.DbEnvironmentSerializer
# 获取工单环境
def get(self, request, *args, **kwargs):
serializer = self.get_serializer(self.get_queryset(), many=True)
return JsonResponseV1(data=serializer.data)
class GetDbSchemas(APIView):
# 获取指定环境指定用途的schemas列表
def get(self, request):
serializer = serializers.DbSchemasSerializer(data=request.query_params)
if serializer.is_valid():
return JsonResponseV1(data=serializer.query)
return JsonResponseV1(message=serializer.errors, code='0001')
class IncepSyntaxCheckView(APIView):
def post(self, request, *args, **kwargs):
serializer = serializers.IncepSyntaxCheckSerializer(data=request.data)
if serializer.is_valid():
s, data = serializer.check()
render_columns = [
{'key': 'order_id', 'value': '序号'},
{'key': 'stage', 'value': '阶段'},
{'key': 'stage_status', 'value': '阶段状态'},
{'key': 'error_level', 'value': '错误级别'},
{'key': 'error_message', 'value': '错误信息', 'width': '35%'},
{'key': 'sql', 'value': 'SQL内容', 'width': '25%', 'ellipsis': True},
{'key': 'affected_rows', 'value': '影响/扫描行数'}
]
columns = render_dynamic_columns(render_columns)
message = '语法检查未发现异常,可以提交'
if not s:
message = '语法检查发现异常,详情请查看输出,更正后在提交'
d = {
'status': 0 if s else 1,
'data': data
}
data = {'columns': columns, 'data': d}
return JsonResponseV1(data=data, message=message)
return JsonResponseV1(message=serializer.errors, code='0001', flat=True)
class SqlOrdersCommit(GenericAPIView):
permission_classes = (permissions.CanCommitOrdersPermission,)
serializer_class = serializers.SqlOrdersCommitSerializer
def post(self, request, *args, **kwargs):
serializer = self.get_serializer(data=request.data)
if serializer.is_valid():
serializer.save()
return JsonResponseV1(message="提交成功")
return JsonResponseV1(message=serializer.errors, code='0001', flat=True)
class SqlOrdersList(ListAPIView):
permission_classes = (permissions.CanViewOrdersPermission,)
queryset = models.DbOrders.objects.all()
serializer_class = serializers.SqlOrdersListSerializer
pagination_class = Pagination
filter_backends = [DjangoFilterBackend, filters.SearchFilter, filters.OrderingFilter]
filter_class = SqlOrderListFilter
ordering = ['-created_at']
search_fields = ['title', 'database', 'remark', 'applicant', 'progress', 'contents']
def get(self, request, *args, **kwargs):
queryset = self.filter_queryset(self.get_queryset())
page = self.paginate_queryset(queryset)
serializer = self.get_serializer(page, many=True)
render_columns = [
{'key': 'progress', 'value': '进度', 'width': '8%'},
{'key': 'applicant', 'value': '申请人'},
{'key': 'department', 'value': '部门'},
{'key': 'env_name', 'value': '环境'},
{'key': 'escape_title', 'value': '标题', 'width': '18%', 'ellipsis': True},
{'key': 'sql_type', 'value': '类型'},
{'key': 'remark', 'value': '备注'},
{'key': 'version', 'value': '版本'},
{'key': 'host', 'value': '实例/库'},
{'key': 'auditor', 'value': '审核人'},
{'key': 'reviewer', 'value': '复核人'},
]
columns = render_dynamic_columns(render_columns)
data = {'columns': columns, 'data': serializer.data}
return self.get_paginated_response(data)
class SqlOrdersDetail(ListAPIView):
"""SQL工单详情"""
permission_classes = (permissions.CanViewOrdersPermission,)
queryset = models.DbOrders.objects.all()
serializer_class = serializers.SqlOrderDetailSerializer
lookup_field = 'order_id'
def get(self, request, *args, **kwargs):
queryset = self.get_object()
serializer = self.get_serializer(queryset, context={"request": request})
return JsonResponseV1(data=serializer.data)
class OpSqlOrderView(ViewSet):
"""更新SQL工单状态,如:审核,关闭等"""
permission_classes = (permissions.CanViewOrdersPermission,)
def get_obj(self, pk):
try:
obj = models.DbOrders.objects.get(pk=pk)
return obj
except models.DbOrders.DoesNotExist:
raise Http404
def approve(self, request, pk):
serializer = serializers.OpSqlOrderSerializer(instance=self.get_obj(pk),
data=request.data,
context={"request": request, "handler": "_approve"})
if serializer.is_valid():
serializer.save()
return JsonResponseV1(data=serializer.data, message="操作成功")
return JsonResponseV1(message=serializer.errors, code='0001')
def feedback(self, request, pk):
serializer = serializers.OpSqlOrderSerializer(instance=self.get_obj(pk),
data=request.data,
context={"request": request, "handler": "_feedback"})
if serializer.is_valid():
serializer.save()
return JsonResponseV1(data=serializer.data, message="操作成功")
return JsonResponseV1(message=serializer.errors, code='0001')
def close(self, request, pk):
serializer = serializers.OpSqlOrderSerializer(instance=self.get_obj(pk),
data=request.data,
context={"request": request, "handler": "_close"})
if serializer.is_valid():
serializer.save()
return JsonResponseV1(data=serializer.data, message="操作成功")
return JsonResponseV1(message=serializer.errors, code='0001')
def review(self, request, pk):
serializer = serializers.OpSqlOrderSerializer(instance=self.get_obj(pk),
data=request.data,
context={"request": request, "handler": "_review"})
if serializer.is_valid():
serializer.save()
return JsonResponseV1(data=serializer.data, message="操作成功")
return JsonResponseV1(message=serializer.errors, code='0001')
class GenerateTasksView(APIView):
permission_classes = (permissions.CanExecuteOrdersPermission,)
def post(self, request, *args, **kwargs):
serializer = serializers.GenerateSqlOrdersTasksSerializer(data=request.data)
if serializer.is_valid():
data = serializer.save(request)
return JsonResponseV1(data=data)
return JsonResponseV1(message=serializer.errors, code='0001', flat=True)
class GetTaskIdView(APIView):
def get(self, request, *args, **kwargs):
"""根据order id返回taskid"""
order_id = kwargs.get('order_id')
task_id = models.DbOrdersExecuteTasks.objects.filter(order_id=order_id).first().task_id
return JsonResponseV1(data=task_id)
class GetTasksPreviewView(ListAPIView):
permission_classes = (permissions.CanViewOrdersPermission,)
queryset = models.DbOrdersExecuteTasks.objects.all()
serializer_class = serializers.SqlOrdersTasksListSerializer
pagination_class = Pagination
filter_backends = [DjangoFilterBackend, filters.SearchFilter, filters.OrderingFilter]
filter_class = GetTasksListFilter
search_fields = ['sql']
ordering = ['created_time']
def get(self, request, *args, **kwargs):
task_id = kwargs.get('task_id')
queryset = self.filter_queryset(self.get_queryset().filter(task_id=task_id))
# 数据隐藏按钮打开了
# 仅允许申请人、审核人、复核人和超权用户查看数据
obj = models.DbOrders.objects.get(
pk=models.DbOrdersExecuteTasks.objects.filter(task_id=task_id).first().order_id
)
if obj.is_hide == 'ON' and not request.user.is_superuser:
allowed_view_users = [obj.applicant]
allowed_view_users.extend([x['user'] for x in json.loads(obj.auditor)])
allowed_view_users.extend([x['user'] for x in json.loads(obj.reviewer)])
if request.user.username not in allowed_view_users:
raise PermissionDenied(detail='您没有权限查看该工单的数据,5s后,自动跳转到工单列表页面')
origin_queryset = self.queryset.filter(task_id=task_id)
total = origin_queryset.count()
progress_0 = origin_queryset.filter(progress=0).count()
progress_1 = origin_queryset.filter(progress=1).count()
progress_3 = origin_queryset.filter(progress=3).count()
page = self.paginate_queryset(queryset)
serializer = self.get_serializer(page, context={'request': request}, many=True)
render_columns = [
{'key': 'num', 'value': '序号'}, # 自定义num,前台显示序号使用
{'key': 'applicant', 'value': '申请人'},
{'key': 'sql', 'value': 'SQL', 'ellipsis': True, 'width': '50%'},
{'key': 'progress', 'value': '进度'},
{'key': 'result', 'value': '查看结果'}, # 自定义result
]
columns = render_dynamic_columns(render_columns)
data = {'columns': columns,
'data': {'data': serializer.data,
'total': total,
'progress_0': progress_0,
'progress_1': progress_1,
'progress_3': progress_3}}
return self.get_paginated_response(data)
class GetTasksListView(ListAPIView):
permission_classes = (permissions.CanViewOrdersPermission,)
queryset = models.DbOrdersExecuteTasks.objects.all()
serializer_class = serializers.SqlOrdersTasksListSerializer
pagination_class = Pagination
filter_backends = [DjangoFilterBackend, filters.SearchFilter, filters.OrderingFilter]
filter_class = GetTasksListFilter
search_fields = ['sql']
ordering = ['created_time']
def get(self, request, *args, **kwargs):
task_id = kwargs.get('task_id')
queryset = self.filter_queryset(self.get_queryset().filter(task_id=task_id))
# 数据隐藏按钮打开了
# 仅允许申请人、审核人、复核人和超权用户查看数据
obj = models.DbOrders.objects.get(
pk=models.DbOrdersExecuteTasks.objects.filter(task_id=task_id).first().order_id
)
if obj.is_hide == 'ON' and not request.user.is_superuser:
allowed_view_users = [obj.applicant]
allowed_view_users.extend([x['user'] for x in json.loads(obj.auditor)])
allowed_view_users.extend([x['user'] for x in json.loads(obj.reviewer)])
if request.user.username not in allowed_view_users:
raise PermissionDenied(detail='您没有权限查看该工单的数据,5s后,自动跳转到工单列表页面')
page = self.paginate_queryset(queryset)
serializer = self.get_serializer(page, context={'request': request}, many=True)
render_columns = [
{'key': 'num', 'value': '序号'}, # 自定义num,前台显示序号使用
{'key': 'applicant', 'value': '申请人'},
{'key': 'sql', 'value': 'SQL', 'ellipsis': True, 'width': '50%'},
{'key': 'progress', 'value': '进度'},
{'key': 'execute', 'value': '执行'}, # 自定义execute
{'key': 'result', 'value': '查看结果'}, # 自定义result
]
if queryset.exists():
if queryset.first().sql_type == 'DDL':
render_columns.insert(-1, {'key': 'ghost_pause', 'value': '暂停(gh-ost)'})
render_columns.insert(-1, {'key': 'ghost_recovery', 'value': '恢复(gh-ost)'})
columns = render_dynamic_columns(render_columns)
data = {'columns': columns, 'data': serializer.data}
return self.get_paginated_response(data)
class ExecuteSingleTaskView(APIView):
permission_classes = (permissions.CanExecuteOrdersPermission,)
def post(self, request, *args, **kwargs):
serializer = serializers.ExecuteSingleTaskSerializer(data=request.data)
if serializer.is_valid():
serializer.execute(request)
return JsonResponseV1(message="任务提交成功,请查看输出")
return JsonResponseV1(message=serializer.errors, code='0001', flat=True)
class ExecuteMultiTasksView(APIView):
permission_classes = (permissions.CanExecuteOrdersPermission,)
def post(self, request, *args, **kwargs):
serializer = serializers.ExecuteMultiTasksSerializer(data=request.data)
if serializer.is_valid():
serializer.execute(request)
return JsonResponseV1(message="任务提交成功,请查看输出")
return JsonResponseV1(message=serializer.errors, code='0001', flat=True)
class ThrottleTaskView(APIView):
permission_classes = (permissions.CanExecuteOrdersPermission,)
def post(self, request, *args, **kwargs):
serializer = serializers.ThrottleTaskSerializer(data=request.data)
if serializer.is_valid():
message = serializer.execute(request)
return JsonResponseV1(message=message)
return JsonResponseV1(message=serializer.errors, code='0001', flat=True)
class GetTasksResultView(ListAPIView):
"""SQL工单详情"""
permission_classes = (permissions.CanViewOrdersPermission,)
queryset = models.DbOrdersExecuteTasks.objects.all()
serializer_class = serializers.GetTasksResultSerializer
lookup_field = 'id'
def get(self, request, *args, **kwargs):
queryset = self.get_object()
serializer = self.get_serializer(queryset, context={"request": request})
return JsonResponseV1(data=serializer.data)
class HookSqlOrdersView(APIView):
permission_classes = (permissions.anyof(permissions.CanCommitOrdersPermission,
permissions.CanViewOrdersPermission,
permissions.CanExecuteOrdersPermission,
permissions.CanAuditOrdersPermission),
)
def post(self, request, *args, **kwargs):
serializer = serializers.HookSqlOrdersSerializer(data=request.data)
if serializer.is_valid():
serializer.save()
return JsonResponseV1(message="任务提交成功,请查看输出")
return JsonResponseV1(message=serializer.errors, code='0001', flat=True)
class DownloadExportFilesView(APIView):
"""下载导出文件"""
permission_classes = (permissions.CanViewOrdersPermission,)
def get(self, request, base64_filename):
file_name = base64.b64decode(base64_filename).decode()
if not models.DbExportFiles.objects.filter(file_name=file_name).exists():
raise Http404
obj = models.DbExportFiles.objects.get(file_name=file_name)
if not models.DbOrdersExecuteTasks.objects.get(pk=obj.task_id).applicant == request.user.username:
raise PermissionDenied(detail='您没有权限')
fsock = open(f"media/{obj.files}", 'rb')
response = HttpResponse(fsock, content_type="application/zip")
response['Content-Disposition'] = f'attachment; filename={file_name}'
return response
class ReleaseVersionsGet(APIView):
"""获取上线版本号,提交工单使用"""
def get(self, request):
before_30_days = (timezone.now() - datetime.timedelta(days=30))
queryset = models.ReleaseVersions.objects.filter(
expire_time__gte=before_30_days
).values('id', 'version', 'expire_time').order_by('-created_at')
for row in queryset:
row['disabled'] = 0
if row['expire_time'] < datetime.datetime.date(timezone.now()):
row['disabled'] = 1
return JsonResponseV1(data=queryset)
class ReleaseVersionsList(ListAPIView):
"""获取上线版本号列表,管理上线版本号使用"""
permission_classes = (permissions.CanViewVersionPermission,)
queryset = models.ReleaseVersions.objects.all()
serializer_class = serializers.ReleaseVersionsListSerializer
pagination_class = Pagination
filter_backends = [filters.SearchFilter, filters.OrderingFilter]
search_fields = ['username', 'version', 'expire_time']
ordering = ['-created_at']
def get(self, request, *args, **kwargs):
queryset = self.filter_queryset(self.get_queryset())
page = self.paginate_queryset(queryset)
serializer = self.get_serializer(page, many=True)
render_columns = [
{'key': 'version', 'value': '版本'},
{'key': 'username', 'value': '创建人'},
{'key': 'expire_time', 'value': '截止日期'},
{'key': 'created_at', 'value': '创建时间'},
{'key': 'key', 'value': '操作'},
{'key': 'id', 'value': '详情'},
]
columns = render_dynamic_columns(render_columns)
data = {'columns': columns, 'data': serializer.data}
return self.get_paginated_response(data)
class ReleaseVersionsCreate(CreateAPIView):
"""创建版本"""
permission_classes = (permissions.CanCreateVersionsPermission,)
serializer_class = serializers.ReleaseVersionsCreateSerializer
def create(self, request, *args, **kwargs):
serializer = self.get_serializer(data=request.data)
if serializer.is_valid():
self.perform_create(serializer)
return JsonResponseV1(message="创建成功")
return JsonResponseV1(code='0001', message=serializer.errors, flat=True)
class ReleaseVersionsUpdate(UpdateAPIView):
"""更新版本号,该类只更新单条记录"""
permission_classes = (permissions.CanUpdateVersionsPermission,)
def put(self, request, *args, **kwargs):
serializer = serializers.ReleaseVersionsSerializer(
instance=models.ReleaseVersions.objects.get(pk=kwargs['key']), # 返回单条记录
data=request.data
)
if serializer.is_valid():
serializer.save()
return JsonResponseV1(message="更新成功")
return JsonResponseV1(code='0001', message=serializer.errors, flat=True)
class ReleaseVersionsDelete(DestroyAPIView):
"""删除版本"""
permission_classes = (permissions.CanDeleteVersionsPermission,)
queryset = models.ReleaseVersions.objects.all()
lookup_field = 'id' # 默认为主键,可不写
def destroy(self, request, *args, **kwargs):
instance = self.get_object()
self.perform_destroy(instance)
return JsonResponseV1(message="删除成功")
class ReleaseVersionsView(APIView):
"""获取指定版本内工单在所有环境的进度"""
def get(self, request, *args, **kwargs):
# 获取版本对应的主键
version = kwargs.get('version')
version_id = models.ReleaseVersions.objects.get(version=version).pk
# 获取环境,行转为动态列
obj = models.DbEnvironment.objects.values('id', 'name')
row2columns = ''
for row in obj:
row2columns += f"max(if(env_id={row['id']}, progress, -1)) as {row['name']},"
# 获取任务下所有工单分别在各个环境中的状态,此处的环境为动态环境
# id没有实际意义
query = f"select " + row2columns + \
f"substring(MD5(RAND()),1,20) as id,title as escape_title,order_id, applicant " \
f"from yasql_dborders where version_id='{version_id}' group by escape_title,order_id,applicant"
rawquery = models.DbOrders.objects.raw(query)
# 获取环境列名
dynamic_columns = list(rawquery.columns)[:-4]
data = []
for row in rawquery:
columns = {
'id': row.id,
'escape_title': row.escape_title,
'order_id': row.order_id,
'applicant': row.applicant,
}
for col in dynamic_columns:
columns[col] = getattr(row, col)
data.append(columns)
render_columns = [
{'key': 'escape_title', 'ellipsis': True, 'value': '标题'},
{'key': 'applicant', 'value': '申请人'},
]
render_columns.extend([{'key': x, 'value': x} for x in dynamic_columns])
columns = render_dynamic_columns(render_columns)
data = {'columns': columns, 'data': data}
return JsonResponseV1(data=data)
| zh | 0.974127 | # -*- coding:utf-8 -*- # edit by fuzongfei # Create your views here. # 获取工单环境 # 获取指定环境指定用途的schemas列表 SQL工单详情 更新SQL工单状态,如:审核,关闭等 根据order id返回taskid # 数据隐藏按钮打开了 # 仅允许申请人、审核人、复核人和超权用户查看数据 # 自定义num,前台显示序号使用 # 自定义result # 数据隐藏按钮打开了 # 仅允许申请人、审核人、复核人和超权用户查看数据 # 自定义num,前台显示序号使用 # 自定义execute # 自定义result SQL工单详情 下载导出文件 获取上线版本号,提交工单使用 获取上线版本号列表,管理上线版本号使用 创建版本 更新版本号,该类只更新单条记录 # 返回单条记录 删除版本 # 默认为主键,可不写 获取指定版本内工单在所有环境的进度 # 获取版本对应的主键 # 获取环境,行转为动态列 # 获取任务下所有工单分别在各个环境中的状态,此处的环境为动态环境 # id没有实际意义 # 获取环境列名 | 2.036521 | 2 |
perp_adj.py | shmakn99/Knowledge-Graph-VG | 0 | 8486 | <gh_stars>0
import glove_util as gut
import numpy as np
from sklearn.decomposition import TruncatedSVD
import json
with open('freq_count_pred.json') as f:
freq_count_pred = json.load(f)
def get_pc(sentences):
svd = TruncatedSVD(n_components=1, n_iter=7, random_state=0)
svd.fit(sentences)
return svd.components_
def weighted_avg(predicate,a,dim):
predicate = predicate.lower().strip().split()
if len(predicate) == 1:
return gut.glove(predicate[0],dim)
else:
support = np.zeros(dim)
for word in predicate:
vector = gut.glove(word,dim)
if len(vector) == 0:
vector = np.zeros(300)
support += (a/(a+freq_count_pred[word]))*vector
return support
with open('relationships.json') as f:
relationships = json.load(f)
predicate_embedding = {}
sentences = []
i = 0
for image in relationships:
i+=1
if i%1000 == 0:
print (i)
for relation in image['relationships']:
w_avg = weighted_avg(relation['predicate'],0.001,300)
sentences.append(w_avg)
predicate_embedding[relation['relationship_id']] = w_avg
pc = get_pc(np.array(sentences))[0]
projection_space = np.outer(pc,pc)
i = 0
for image in relationships:
i+=1
if i%1000 == 0:
print (i)
for relation in image['relationships']:
predicate_embedding[relation['relationship_id']] = predicate_embedding[relation['relationship_id']] - np.matmul(projection_space,predicate_embedding[relation['relationship_id']])
with open('predicate_embedding_300.json','w') as f:
json.dump(predicate_embedding,f)
| import glove_util as gut
import numpy as np
from sklearn.decomposition import TruncatedSVD
import json
with open('freq_count_pred.json') as f:
freq_count_pred = json.load(f)
def get_pc(sentences):
svd = TruncatedSVD(n_components=1, n_iter=7, random_state=0)
svd.fit(sentences)
return svd.components_
def weighted_avg(predicate,a,dim):
predicate = predicate.lower().strip().split()
if len(predicate) == 1:
return gut.glove(predicate[0],dim)
else:
support = np.zeros(dim)
for word in predicate:
vector = gut.glove(word,dim)
if len(vector) == 0:
vector = np.zeros(300)
support += (a/(a+freq_count_pred[word]))*vector
return support
with open('relationships.json') as f:
relationships = json.load(f)
predicate_embedding = {}
sentences = []
i = 0
for image in relationships:
i+=1
if i%1000 == 0:
print (i)
for relation in image['relationships']:
w_avg = weighted_avg(relation['predicate'],0.001,300)
sentences.append(w_avg)
predicate_embedding[relation['relationship_id']] = w_avg
pc = get_pc(np.array(sentences))[0]
projection_space = np.outer(pc,pc)
i = 0
for image in relationships:
i+=1
if i%1000 == 0:
print (i)
for relation in image['relationships']:
predicate_embedding[relation['relationship_id']] = predicate_embedding[relation['relationship_id']] - np.matmul(projection_space,predicate_embedding[relation['relationship_id']])
with open('predicate_embedding_300.json','w') as f:
json.dump(predicate_embedding,f) | none | 1 | 2.219702 | 2 |
|
crypt.py | ElyTgy/VaultDB | 2 | 8487 | # Importing Fernet class
from cryptography.fernet import Fernet
# Importing dump and load function
from pickle import dump,load
# To generate a strong pw
def generate_pw():
from random import choice
choices = list("ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789!@#$%^&*()_-+=.,/<>?;:\\|[]}{")
pw = ""
for i in range(25):
pw += choice(choices)
return pw
del pw,choice
# To get master pw from the file
def get_masterpw():
# Opening the file storing master pw
with open("key.key",'rb') as file:
# Loading data
keys = load(file)
# Master pw is converted from bytes to string
key = keys[0].decode()
del keys
# Return keys
return key
# To get key from the file
def get_key():
# Opening the file storing master pw
with open("key.key",'rb') as file:
# Loading data
keys = load(file)
# Key is converted from bytes to string
key = keys[1].decode()
del keys
# Return keys
return key
# To store master pw in the file
def add_keys(masterpw,key):
# Opening the file to store master pw
with open("key.key",'wb') as file:
# Making list of value to upload
# key is already in bytes # Converting to bytes is not necessary
keys = [masterpw.encode(),key]
# Dumping the master pw to file
dump(keys,file)
# Deleting the variable
del masterpw,key,keys
# Checking if user is running program for first time
def is_1st_time():
# Trying to open bytes file
# If file is opened means program was executed once or more
try:
with open("key.key",'rb') as file:
pass
return False
# FileNotFound means its first time
# Or either its not in directory of this file or user deleted it :) #
except FileNotFoundError:
return True
# Function to copy pw to clipboard
def copy2clip(pw):
# Importing copy function
from pyperclip import copy
# Copying pw to clipboard
copy(pw)
del pw,copy
# Encrypting the text
def encrypt(text, key):
try:
# Defining Fernet(class) using the key
fernet = Fernet(key)
# Encryption # Text is converted to bytes
encrypted_text = fernet.encrypt(text.encode())
del key
# Return encrypted text
return encrypted_text
# Error message if any
except Exception as e:
print(f"Error occured:{e}\nProcess failed!")
# Decrypting the text
def decrypt(text, key):
try:
# Defining Fernet(class) using the key
fernet = Fernet(key)
# Decryption # Text is converted from bytes to string
decrypted_text = fernet.decrypt(text).decode()
del key
# Return decrypted text
return decrypted_text
# Error message if any
except Exception as e:
print(f"Error occured:{e}\nProcess failed!") | # Importing Fernet class
from cryptography.fernet import Fernet
# Importing dump and load function
from pickle import dump,load
# To generate a strong pw
def generate_pw():
from random import choice
choices = list("ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789!@#$%^&*()_-+=.,/<>?;:\\|[]}{")
pw = ""
for i in range(25):
pw += choice(choices)
return pw
del pw,choice
# To get master pw from the file
def get_masterpw():
# Opening the file storing master pw
with open("key.key",'rb') as file:
# Loading data
keys = load(file)
# Master pw is converted from bytes to string
key = keys[0].decode()
del keys
# Return keys
return key
# To get key from the file
def get_key():
# Opening the file storing master pw
with open("key.key",'rb') as file:
# Loading data
keys = load(file)
# Key is converted from bytes to string
key = keys[1].decode()
del keys
# Return keys
return key
# To store master pw in the file
def add_keys(masterpw,key):
# Opening the file to store master pw
with open("key.key",'wb') as file:
# Making list of value to upload
# key is already in bytes # Converting to bytes is not necessary
keys = [masterpw.encode(),key]
# Dumping the master pw to file
dump(keys,file)
# Deleting the variable
del masterpw,key,keys
# Checking if user is running program for first time
def is_1st_time():
# Trying to open bytes file
# If file is opened means program was executed once or more
try:
with open("key.key",'rb') as file:
pass
return False
# FileNotFound means its first time
# Or either its not in directory of this file or user deleted it :) #
except FileNotFoundError:
return True
# Function to copy pw to clipboard
def copy2clip(pw):
# Importing copy function
from pyperclip import copy
# Copying pw to clipboard
copy(pw)
del pw,copy
# Encrypting the text
def encrypt(text, key):
try:
# Defining Fernet(class) using the key
fernet = Fernet(key)
# Encryption # Text is converted to bytes
encrypted_text = fernet.encrypt(text.encode())
del key
# Return encrypted text
return encrypted_text
# Error message if any
except Exception as e:
print(f"Error occured:{e}\nProcess failed!")
# Decrypting the text
def decrypt(text, key):
try:
# Defining Fernet(class) using the key
fernet = Fernet(key)
# Decryption # Text is converted from bytes to string
decrypted_text = fernet.decrypt(text).decode()
del key
# Return decrypted text
return decrypted_text
# Error message if any
except Exception as e:
print(f"Error occured:{e}\nProcess failed!") | en | 0.825916 | # Importing Fernet class # Importing dump and load function # To generate a strong pw #$%^&*()_-+=.,/<>?;:\\|[]}{") # To get master pw from the file # Opening the file storing master pw # Loading data # Master pw is converted from bytes to string # Return keys # To get key from the file # Opening the file storing master pw # Loading data # Key is converted from bytes to string # Return keys # To store master pw in the file # Opening the file to store master pw # Making list of value to upload # key is already in bytes # Converting to bytes is not necessary # Dumping the master pw to file # Deleting the variable # Checking if user is running program for first time # Trying to open bytes file # If file is opened means program was executed once or more # FileNotFound means its first time # Or either its not in directory of this file or user deleted it :) # # Function to copy pw to clipboard # Importing copy function # Copying pw to clipboard # Encrypting the text # Defining Fernet(class) using the key # Encryption # Text is converted to bytes # Return encrypted text # Error message if any # Decrypting the text # Defining Fernet(class) using the key # Decryption # Text is converted from bytes to string # Return decrypted text # Error message if any | 3.486279 | 3 |
oecp/executor/null.py | openeuler-mirror/oecp | 0 | 8488 | # -*- encoding=utf-8 -*-
"""
# **********************************************************************************
# Copyright (c) Huawei Technologies Co., Ltd. 2020-2020. All rights reserved.
# [oecp] is licensed under the Mulan PSL v1.
# You can use this software according to the terms and conditions of the Mulan PSL v1.
# You may obtain a copy of Mulan PSL v1 at:
# http://license.coscl.org.cn/MulanPSL
# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR FIT FOR A PARTICULAR
# PURPOSE.
# See the Mulan PSL v1 for more details.
# **********************************************************************************
"""
from oecp.executor.base import CompareExecutor
class NullExecutor(CompareExecutor):
def __init__(self, dump_a, dump_b, config=None):
super(NullExecutor, self).__init__(dump_a, dump_b, config)
if hasattr(dump_a, 'run') and hasattr(dump_b, 'run'):
dump_a.run()
dump_b.run()
def run(self):
return []
| # -*- encoding=utf-8 -*-
"""
# **********************************************************************************
# Copyright (c) Huawei Technologies Co., Ltd. 2020-2020. All rights reserved.
# [oecp] is licensed under the Mulan PSL v1.
# You can use this software according to the terms and conditions of the Mulan PSL v1.
# You may obtain a copy of Mulan PSL v1 at:
# http://license.coscl.org.cn/MulanPSL
# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR FIT FOR A PARTICULAR
# PURPOSE.
# See the Mulan PSL v1 for more details.
# **********************************************************************************
"""
from oecp.executor.base import CompareExecutor
class NullExecutor(CompareExecutor):
def __init__(self, dump_a, dump_b, config=None):
super(NullExecutor, self).__init__(dump_a, dump_b, config)
if hasattr(dump_a, 'run') and hasattr(dump_b, 'run'):
dump_a.run()
dump_b.run()
def run(self):
return []
| en | 0.585607 | # -*- encoding=utf-8 -*- # ********************************************************************************** # Copyright (c) Huawei Technologies Co., Ltd. 2020-2020. All rights reserved. # [oecp] is licensed under the Mulan PSL v1. # You can use this software according to the terms and conditions of the Mulan PSL v1. # You may obtain a copy of Mulan PSL v1 at: # http://license.coscl.org.cn/MulanPSL # THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR FIT FOR A PARTICULAR # PURPOSE. # See the Mulan PSL v1 for more details. # ********************************************************************************** | 1.914289 | 2 |
courses/models.py | Biswa5812/CaramelIT-Django-Backend | 1 | 8489 | from django.db import models
from django.utils import timezone
# Course Category
class Course_category(models.Model):
category_id = models.AutoField(primary_key=True)
category_name = models.CharField(max_length=100)
date_of_creation = models.DateTimeField(default=timezone.now)
# Course Subcategory
class Course_subcategory(models.Model):
subcategory_id = models.AutoField(primary_key=True)
category = models.ForeignKey(Course_category, on_delete=models.CASCADE)
subcategory_name = models.CharField(max_length=100)
date_of_creation = models.DateTimeField(default=timezone.now)
# Course
class Course(models.Model):
course_id = models.AutoField(primary_key=True)
subcategory = models.ForeignKey(Course_subcategory, on_delete=models.CASCADE)
subcategory_name = models.CharField(max_length=100)
category_name = models.CharField(max_length=100)
course_name = models.CharField(max_length=100)
date_of_creation = models.DateTimeField(default=timezone.now)
course_description = models.TextField(default="")
course_difficulty = models.CharField(max_length=30)
# Course resources
class Course_resource(models.Model):
course = models.ForeignKey(Course, on_delete=models.CASCADE)
resourse_content = models.TextField(default="NIL")
resourse_name = models.CharField(max_length=100)
resourse_link = models.CharField(max_length=200)
resourse_length = models.CharField(max_length=10)
date_of_creation = models.DateTimeField(default=timezone.now)
| from django.db import models
from django.utils import timezone
# Course Category
class Course_category(models.Model):
category_id = models.AutoField(primary_key=True)
category_name = models.CharField(max_length=100)
date_of_creation = models.DateTimeField(default=timezone.now)
# Course Subcategory
class Course_subcategory(models.Model):
subcategory_id = models.AutoField(primary_key=True)
category = models.ForeignKey(Course_category, on_delete=models.CASCADE)
subcategory_name = models.CharField(max_length=100)
date_of_creation = models.DateTimeField(default=timezone.now)
# Course
class Course(models.Model):
course_id = models.AutoField(primary_key=True)
subcategory = models.ForeignKey(Course_subcategory, on_delete=models.CASCADE)
subcategory_name = models.CharField(max_length=100)
category_name = models.CharField(max_length=100)
course_name = models.CharField(max_length=100)
date_of_creation = models.DateTimeField(default=timezone.now)
course_description = models.TextField(default="")
course_difficulty = models.CharField(max_length=30)
# Course resources
class Course_resource(models.Model):
course = models.ForeignKey(Course, on_delete=models.CASCADE)
resourse_content = models.TextField(default="NIL")
resourse_name = models.CharField(max_length=100)
resourse_link = models.CharField(max_length=200)
resourse_length = models.CharField(max_length=10)
date_of_creation = models.DateTimeField(default=timezone.now)
| en | 0.599675 | # Course Category # Course Subcategory # Course # Course resources | 2.207999 | 2 |
dino/validation/events/message/limit_msg_length.py | thenetcircle/dino | 150 | 8490 | <reponame>thenetcircle/dino
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import traceback
from yapsy.IPlugin import IPlugin
from activitystreams.models.activity import Activity
from dino import utils
from dino.config import ErrorCodes
from dino.config import ConfigKeys
from dino.environ import GNEnvironment
logger = logging.getLogger(__name__)
__author__ = '<NAME> <<EMAIL>>'
class OnMessageCheckContentLength(IPlugin):
def __init__(self):
super(OnMessageCheckContentLength, self).__init__()
self.env = None
self.enabled = False
self.max_length = 1000
def setup(self, env: GNEnvironment):
self.env = env
validation_config = self.env.config.get(ConfigKeys.VALIDATION)
if 'on_message' not in validation_config or 'limit_msg_length' not in validation_config.get('on_message'):
logger.info('no config enabled for plugin not_full, ignoring plugin')
return
on_create_config = validation_config.get('on_message').get('limit_msg_length')
self.enabled = True
self.max_length = on_create_config.get(ConfigKeys.MAX_MSG_LENGTH, 1000)
def _process(self, data: dict, activity: Activity):
message = activity.object.content
if message is None or len(message.strip()) == 0:
return True, None, None
if not utils.is_base64(message):
return False, ErrorCodes.NOT_BASE64, \
'invalid message content, not base64 encoded'
message = utils.b64d(message)
if len(message) > self.max_length:
return False, ErrorCodes.MSG_TOO_LONG, \
'message content needs to be shorter than %s characters' % self.max_length
return True, None, None
def __call__(self, *args, **kwargs) -> (bool, str):
if not self.enabled:
return
data, activity = args[0], args[1]
try:
return self._process(data, activity)
except Exception as e:
logger.error('could not execute plugin not_full: %s' % str(e))
logger.exception(traceback.format_exc())
return False, ErrorCodes.VALIDATION_ERROR, 'could not execute validation plugin not_full'
| # Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import traceback
from yapsy.IPlugin import IPlugin
from activitystreams.models.activity import Activity
from dino import utils
from dino.config import ErrorCodes
from dino.config import ConfigKeys
from dino.environ import GNEnvironment
logger = logging.getLogger(__name__)
__author__ = '<NAME> <<EMAIL>>'
class OnMessageCheckContentLength(IPlugin):
def __init__(self):
super(OnMessageCheckContentLength, self).__init__()
self.env = None
self.enabled = False
self.max_length = 1000
def setup(self, env: GNEnvironment):
self.env = env
validation_config = self.env.config.get(ConfigKeys.VALIDATION)
if 'on_message' not in validation_config or 'limit_msg_length' not in validation_config.get('on_message'):
logger.info('no config enabled for plugin not_full, ignoring plugin')
return
on_create_config = validation_config.get('on_message').get('limit_msg_length')
self.enabled = True
self.max_length = on_create_config.get(ConfigKeys.MAX_MSG_LENGTH, 1000)
def _process(self, data: dict, activity: Activity):
message = activity.object.content
if message is None or len(message.strip()) == 0:
return True, None, None
if not utils.is_base64(message):
return False, ErrorCodes.NOT_BASE64, \
'invalid message content, not base64 encoded'
message = utils.b64d(message)
if len(message) > self.max_length:
return False, ErrorCodes.MSG_TOO_LONG, \
'message content needs to be shorter than %s characters' % self.max_length
return True, None, None
def __call__(self, *args, **kwargs) -> (bool, str):
if not self.enabled:
return
data, activity = args[0], args[1]
try:
return self._process(data, activity)
except Exception as e:
logger.error('could not execute plugin not_full: %s' % str(e))
logger.exception(traceback.format_exc())
return False, ErrorCodes.VALIDATION_ERROR, 'could not execute validation plugin not_full' | en | 0.859654 | # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. | 1.732624 | 2 |
zabbix/prom2zabbix.py | tldr-devops/telegraf-monitoring-agent-setup | 0 | 8491 | <filename>zabbix/prom2zabbix.py<gh_stars>0
#!/usr/bin/env python
# Script for parsing prometheus metrics format and send it into zabbix server
# MIT License
# https://github.com/Friz-zy/telegraf-monitoring-agent-setup
import re
import os
import sys
import time
import json
import socket
import optparse
try:
from urllib.request import urlopen
except:
from urllib import urlopen
METRICS = {
'default': {
'sort_labels': ['name', 'id', 'host', 'path', 'device', 'source', 'cpu'],
},
'docker_container_': {
'sort_labels': ['host', 'source', 'device', 'cpu'],
},
}
def parse(source='http://127.0.0.1:9273/metrics'):
# https://prometheus.io/docs/practices/naming/
# https://prometheus.io/docs/concepts/data_model/#metric-names-and-labels
regex = re.compile(r'^(?P<metric>[a-zA-Z_:][a-zA-Z0-9_:]*)(?P<labels>{.*})?\s+(?P<value>.+)(\s+(?P<timestamp>\w+))?$')
help_line = ''
type_line = ''
metrics = []
text = urlopen(source).read()
for line in text.splitlines():
line = line.decode("utf-8")
if line[0:6] == '# HELP':
help_line = line
continue
elif line[0:6] == '# TYPE':
type_line = line
continue
elif line[0] == '#':
continue
metric = regex.match(line).groupdict()
metric['line_raw'] = line
metric['help'] = help_line
metric['type'] = type_line
metric['source'] = source
metrics.append(metric)
return metrics
def main():
parser = optparse.OptionParser()
source = 'http://127.0.0.1:9273/metrics'
destination = '/tmp/prom2zabbix'
parser.set_defaults(source=source,
destination=destination,
hostname='')
parser.add_option("-s", "--source", dest="source",
help="Prometheus source, default is " + source)
parser.add_option("-d", "--destination", dest="destination",
help="Output .keys and .metrics files pattern, default is " + destination)
(options, args) = parser.parse_args()
seconds = int(time.time())
metrics = parse(options.source)
data = {"data": []}
keys = {}
# fill and prepare metric
for metric in metrics:
if not metric['timestamp']:
metric['timestamp'] = seconds
if not metric['labels']:
metric['labels'] = '{}'
else:
# limit lenght of metric because of zabbix limit
# for graph name even 132 char is too long
if len(metric['metric']) + len(metric['labels']) > 200:
metric['original_labels'] = metric['labels'].replace(',', ';')
short_labels = []
for label in metric['labels'].lstrip('{').rstrip('}').split(','):
for key in METRICS.keys():
if key in metric['metric'] and key != 'default':
for l in METRICS[key]['sort_labels']:
if l in label:
short_labels.append(label)
break
metric['labels'] = '{' + ';'.join(short_labels) + '}'
else:
metric['labels'] = metric['labels'].replace(',', ';')
# hacks
if metric['metric'] == 'procstat_created_at':
metric['value'] = metric['value'].replace('e+18', 'e+09')
m = {}
for k, v in metric.items():
m["{#%s}" % k.upper()] = v
data["data"].append(m)
# addition for metric labels macro
if metric['metric'] not in keys:
keys[metric['metric']] = {"data": []}
keys[metric['metric']]["data"].append({
"{#LABELS}": metric['labels']})
# write metrics
with open(options.destination + '.metrics', 'w') as f:
for metric in metrics:
# https://www.zabbix.com/documentation/3.0/manpages/zabbix_sender
escaped_labels = metric['labels'].replace('\\', '\\\\').replace('"', '\\"')
f.write('- "telegraf[%s,%s]" %s %s\n' % (
metric['metric'],
escaped_labels,
metric['timestamp'],
metric['value']))
# write keys
with open(options.destination + '.keys', 'w') as f:
for metric in keys:
f.write('- "telegraf[keys, %s]" %s "%s"\n' % (
metric,
seconds,
json.dumps(keys[metric]
).replace('\\', '\\\\').replace('"', '\\"')))
data = json.dumps(data)
escaped_data = data.replace('\\', '\\\\').replace('"', '\\"')
f.write('- "telegraf[keys]" %s "%s"\n' % (
seconds,
escaped_data))
# print(data)
if __name__ == "__main__":
main()
| <filename>zabbix/prom2zabbix.py<gh_stars>0
#!/usr/bin/env python
# Script for parsing prometheus metrics format and send it into zabbix server
# MIT License
# https://github.com/Friz-zy/telegraf-monitoring-agent-setup
import re
import os
import sys
import time
import json
import socket
import optparse
try:
from urllib.request import urlopen
except:
from urllib import urlopen
METRICS = {
'default': {
'sort_labels': ['name', 'id', 'host', 'path', 'device', 'source', 'cpu'],
},
'docker_container_': {
'sort_labels': ['host', 'source', 'device', 'cpu'],
},
}
def parse(source='http://127.0.0.1:9273/metrics'):
# https://prometheus.io/docs/practices/naming/
# https://prometheus.io/docs/concepts/data_model/#metric-names-and-labels
regex = re.compile(r'^(?P<metric>[a-zA-Z_:][a-zA-Z0-9_:]*)(?P<labels>{.*})?\s+(?P<value>.+)(\s+(?P<timestamp>\w+))?$')
help_line = ''
type_line = ''
metrics = []
text = urlopen(source).read()
for line in text.splitlines():
line = line.decode("utf-8")
if line[0:6] == '# HELP':
help_line = line
continue
elif line[0:6] == '# TYPE':
type_line = line
continue
elif line[0] == '#':
continue
metric = regex.match(line).groupdict()
metric['line_raw'] = line
metric['help'] = help_line
metric['type'] = type_line
metric['source'] = source
metrics.append(metric)
return metrics
def main():
parser = optparse.OptionParser()
source = 'http://127.0.0.1:9273/metrics'
destination = '/tmp/prom2zabbix'
parser.set_defaults(source=source,
destination=destination,
hostname='')
parser.add_option("-s", "--source", dest="source",
help="Prometheus source, default is " + source)
parser.add_option("-d", "--destination", dest="destination",
help="Output .keys and .metrics files pattern, default is " + destination)
(options, args) = parser.parse_args()
seconds = int(time.time())
metrics = parse(options.source)
data = {"data": []}
keys = {}
# fill and prepare metric
for metric in metrics:
if not metric['timestamp']:
metric['timestamp'] = seconds
if not metric['labels']:
metric['labels'] = '{}'
else:
# limit lenght of metric because of zabbix limit
# for graph name even 132 char is too long
if len(metric['metric']) + len(metric['labels']) > 200:
metric['original_labels'] = metric['labels'].replace(',', ';')
short_labels = []
for label in metric['labels'].lstrip('{').rstrip('}').split(','):
for key in METRICS.keys():
if key in metric['metric'] and key != 'default':
for l in METRICS[key]['sort_labels']:
if l in label:
short_labels.append(label)
break
metric['labels'] = '{' + ';'.join(short_labels) + '}'
else:
metric['labels'] = metric['labels'].replace(',', ';')
# hacks
if metric['metric'] == 'procstat_created_at':
metric['value'] = metric['value'].replace('e+18', 'e+09')
m = {}
for k, v in metric.items():
m["{#%s}" % k.upper()] = v
data["data"].append(m)
# addition for metric labels macro
if metric['metric'] not in keys:
keys[metric['metric']] = {"data": []}
keys[metric['metric']]["data"].append({
"{#LABELS}": metric['labels']})
# write metrics
with open(options.destination + '.metrics', 'w') as f:
for metric in metrics:
# https://www.zabbix.com/documentation/3.0/manpages/zabbix_sender
escaped_labels = metric['labels'].replace('\\', '\\\\').replace('"', '\\"')
f.write('- "telegraf[%s,%s]" %s %s\n' % (
metric['metric'],
escaped_labels,
metric['timestamp'],
metric['value']))
# write keys
with open(options.destination + '.keys', 'w') as f:
for metric in keys:
f.write('- "telegraf[keys, %s]" %s "%s"\n' % (
metric,
seconds,
json.dumps(keys[metric]
).replace('\\', '\\\\').replace('"', '\\"')))
data = json.dumps(data)
escaped_data = data.replace('\\', '\\\\').replace('"', '\\"')
f.write('- "telegraf[keys]" %s "%s"\n' % (
seconds,
escaped_data))
# print(data)
if __name__ == "__main__":
main()
| en | 0.559381 | #!/usr/bin/env python # Script for parsing prometheus metrics format and send it into zabbix server # MIT License # https://github.com/Friz-zy/telegraf-monitoring-agent-setup # https://prometheus.io/docs/practices/naming/ # https://prometheus.io/docs/concepts/data_model/#metric-names-and-labels # fill and prepare metric # limit lenght of metric because of zabbix limit # for graph name even 132 char is too long # hacks #%s}" % k.upper()] = v # addition for metric labels macro #LABELS}": metric['labels']}) # write metrics # https://www.zabbix.com/documentation/3.0/manpages/zabbix_sender # write keys # print(data) | 2.425052 | 2 |
NAS/run_NAS.py | gatech-sysml/CompOFA | 20 | 8492 | <reponame>gatech-sysml/CompOFA<gh_stars>10-100
# CompOFA – Compound Once-For-All Networks for Faster Multi-Platform Deployment
# Under blind review at ICLR 2021: https://openreview.net/forum?id=IgIk8RRT-Z
#
# Implementation based on:
# Once for All: Train One Network and Specialize it for Efficient Deployment
# <NAME>, <NAME>, <NAME>, <NAME>, <NAME>
# International Conference on Learning Representations (ICLR), 2020.
import os
import sys
import torch
import time
import math
import copy
import random
import argparse
import torch.nn as nn
import numpy as np
import pandas as pd
from torchvision import transforms, datasets
from matplotlib import pyplot as plt
sys.path.append("..")
from ofa.model_zoo import ofa_net
from ofa.utils import download_url
from accuracy_predictor import AccuracyPredictor
from flops_table import FLOPsTable
from latency_table import LatencyTable
from evolution_finder import EvolutionFinder
from imagenet_eval_helper import evaluate_ofa_subnet, evaluate_ofa_specialized
parser = argparse.ArgumentParser()
parser.add_argument(
'-n',
'--net',
metavar='OFANET',
help='OFA network',
required=True)
parser.add_argument(
'-t',
'--target-hardware',
metavar='TARGET_HARDWARE',
help='Target Hardware',
required=True)
parser.add_argument(
'--imagenet-path',
metavar='IMAGENET_PATH',
help='The path of ImageNet',
type=str,
required=True)
args = parser.parse_args()
arch = {'compofa' : ('compofa', 'model_best_compofa_simple.pth.tar'),
'compofa-elastic' : ('compofa-elastic', 'model_best_compofa_simple_elastic.pth.tar'),
'ofa_mbv3_d234_e346_k357_w1.0' : ('ofa', 'ofa_mbv3_d234_e346_k357_w1.0'),
}
hardware_latency = {'note10' : [15, 20, 25, 30],
'gpu' : [15, 25, 35, 45],
'cpu' : [12, 15, 18, 21]}
MODEL_DIR = '../ofa/checkpoints/%s' % (arch[args.net][1])
imagenet_data_path = args.imagenet_path
# imagenet_data_path = '/srv/data/datasets/ImageNet/'
# set random seed
random_seed = 3
random.seed(random_seed)
np.random.seed(random_seed)
torch.manual_seed(random_seed)
print('Successfully imported all packages and configured random seed to %d!'%random_seed)
os.environ['CUDA_VISIBLE_DEVICES'] = '0'
cuda_available = torch.cuda.is_available()
if cuda_available:
torch.backends.cudnn.enabled = True
torch.backends.cudnn.benchmark = True
torch.cuda.manual_seed(random_seed)
print('Using GPU.')
else:
print('Using CPU.')
# Initialize the OFA Network
ofa_network = ofa_net(args.net, model_dir=MODEL_DIR, pretrained=True)
if args.target_hardware == 'cpu':
ofa_network = ofa_network.cpu()
else:
ofa_network = ofa_network.cuda()
print('The OFA Network is ready.')
# Carry out data transforms
if cuda_available:
def build_val_transform(size):
return transforms.Compose([
transforms.Resize(int(math.ceil(size / 0.875))),
transforms.CenterCrop(size),
transforms.ToTensor(),
transforms.Normalize(
mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225]
),
])
data_loader = torch.utils.data.DataLoader(
datasets.ImageFolder(
root=os.path.join(imagenet_data_path, 'val'),
transform=build_val_transform(224)
),
batch_size=250, # test batch size
shuffle=True,
num_workers=16, # number of workers for the data loader
pin_memory=True,
drop_last=False,
)
print('The ImageNet dataloader is ready.')
else:
data_loader = None
print('Since GPU is not found in the environment, we skip all scripts related to ImageNet evaluation.')
# set up the accuracy predictor
accuracy_predictor = AccuracyPredictor(
pretrained=True,
device='cuda:0' if cuda_available else 'cpu'
)
print('The accuracy predictor is ready!')
print(accuracy_predictor.model)
# set up the latency table
target_hardware = args.target_hardware
use_latency_table = True if target_hardware == 'note10' else False
latency_table = LatencyTable(device=target_hardware,
use_latency_table=use_latency_table,
network=args.net)
""" Hyper-parameters for the evolutionary search process
You can modify these hyper-parameters to see how they influence the final ImageNet accuracy of the search sub-net.
"""
latency_constraint = hardware_latency[args.target_hardware][0] # ms
P = 100 # The size of population in each generation
N = 500 # How many generations of population to be searched
r = 0.25 # The ratio of networks that are used as parents for next generation
params = {
'constraint_type': target_hardware, # Let's do FLOPs-constrained search
'efficiency_constraint': latency_constraint,
'mutate_prob': 0.1, # The probability of mutation in evolutionary search
'mutation_ratio': 0.5, # The ratio of networks that are generated through mutation in generation n >= 2.
'efficiency_predictor': latency_table, # To use a predefined efficiency predictor.
'accuracy_predictor': accuracy_predictor, # To use a predefined accuracy_predictor predictor.
'population_size': P,
'max_time_budget': N,
'parent_ratio': r,
'arch' : arch[args.net][0],
}
# initialize the evolution finder and run NAS
finder = EvolutionFinder(**params)
result_lis = []
for latency in hardware_latency[args.target_hardware]:
finder.set_efficiency_constraint(latency)
best_valids, best_info = finder.run_evolution_search()
result_lis.append(best_info)
print("NAS Completed!")
# evaluate the searched model on ImageNet
models = []
if cuda_available:
for result in result_lis:
_, net_config, latency = result
print('Evaluating the sub-network with latency = %.1f ms on %s' % (latency, target_hardware))
top1 = evaluate_ofa_subnet(
ofa_network,
imagenet_data_path,
net_config,
data_loader,
batch_size=250,
device='cuda:0' if cuda_available else 'cpu')
models.append([net_config, top1, latency])
df = pd.DataFrame(models, columns=['Model', 'Accuracy', 'Latency'])
df.to_csv('NAS_results.csv')
print('NAS results saved to NAS_results.csv')
| # CompOFA – Compound Once-For-All Networks for Faster Multi-Platform Deployment
# Under blind review at ICLR 2021: https://openreview.net/forum?id=IgIk8RRT-Z
#
# Implementation based on:
# Once for All: Train One Network and Specialize it for Efficient Deployment
# <NAME>, <NAME>, <NAME>, <NAME>, <NAME>
# International Conference on Learning Representations (ICLR), 2020.
import os
import sys
import torch
import time
import math
import copy
import random
import argparse
import torch.nn as nn
import numpy as np
import pandas as pd
from torchvision import transforms, datasets
from matplotlib import pyplot as plt
sys.path.append("..")
from ofa.model_zoo import ofa_net
from ofa.utils import download_url
from accuracy_predictor import AccuracyPredictor
from flops_table import FLOPsTable
from latency_table import LatencyTable
from evolution_finder import EvolutionFinder
from imagenet_eval_helper import evaluate_ofa_subnet, evaluate_ofa_specialized
parser = argparse.ArgumentParser()
parser.add_argument(
'-n',
'--net',
metavar='OFANET',
help='OFA network',
required=True)
parser.add_argument(
'-t',
'--target-hardware',
metavar='TARGET_HARDWARE',
help='Target Hardware',
required=True)
parser.add_argument(
'--imagenet-path',
metavar='IMAGENET_PATH',
help='The path of ImageNet',
type=str,
required=True)
args = parser.parse_args()
arch = {'compofa' : ('compofa', 'model_best_compofa_simple.pth.tar'),
'compofa-elastic' : ('compofa-elastic', 'model_best_compofa_simple_elastic.pth.tar'),
'ofa_mbv3_d234_e346_k357_w1.0' : ('ofa', 'ofa_mbv3_d234_e346_k357_w1.0'),
}
hardware_latency = {'note10' : [15, 20, 25, 30],
'gpu' : [15, 25, 35, 45],
'cpu' : [12, 15, 18, 21]}
MODEL_DIR = '../ofa/checkpoints/%s' % (arch[args.net][1])
imagenet_data_path = args.imagenet_path
# imagenet_data_path = '/srv/data/datasets/ImageNet/'
# set random seed
random_seed = 3
random.seed(random_seed)
np.random.seed(random_seed)
torch.manual_seed(random_seed)
print('Successfully imported all packages and configured random seed to %d!'%random_seed)
os.environ['CUDA_VISIBLE_DEVICES'] = '0'
cuda_available = torch.cuda.is_available()
if cuda_available:
torch.backends.cudnn.enabled = True
torch.backends.cudnn.benchmark = True
torch.cuda.manual_seed(random_seed)
print('Using GPU.')
else:
print('Using CPU.')
# Initialize the OFA Network
ofa_network = ofa_net(args.net, model_dir=MODEL_DIR, pretrained=True)
if args.target_hardware == 'cpu':
ofa_network = ofa_network.cpu()
else:
ofa_network = ofa_network.cuda()
print('The OFA Network is ready.')
# Carry out data transforms
if cuda_available:
def build_val_transform(size):
return transforms.Compose([
transforms.Resize(int(math.ceil(size / 0.875))),
transforms.CenterCrop(size),
transforms.ToTensor(),
transforms.Normalize(
mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225]
),
])
data_loader = torch.utils.data.DataLoader(
datasets.ImageFolder(
root=os.path.join(imagenet_data_path, 'val'),
transform=build_val_transform(224)
),
batch_size=250, # test batch size
shuffle=True,
num_workers=16, # number of workers for the data loader
pin_memory=True,
drop_last=False,
)
print('The ImageNet dataloader is ready.')
else:
data_loader = None
print('Since GPU is not found in the environment, we skip all scripts related to ImageNet evaluation.')
# set up the accuracy predictor
accuracy_predictor = AccuracyPredictor(
pretrained=True,
device='cuda:0' if cuda_available else 'cpu'
)
print('The accuracy predictor is ready!')
print(accuracy_predictor.model)
# set up the latency table
target_hardware = args.target_hardware
use_latency_table = True if target_hardware == 'note10' else False
latency_table = LatencyTable(device=target_hardware,
use_latency_table=use_latency_table,
network=args.net)
""" Hyper-parameters for the evolutionary search process
You can modify these hyper-parameters to see how they influence the final ImageNet accuracy of the search sub-net.
"""
latency_constraint = hardware_latency[args.target_hardware][0] # ms
P = 100 # The size of population in each generation
N = 500 # How many generations of population to be searched
r = 0.25 # The ratio of networks that are used as parents for next generation
params = {
'constraint_type': target_hardware, # Let's do FLOPs-constrained search
'efficiency_constraint': latency_constraint,
'mutate_prob': 0.1, # The probability of mutation in evolutionary search
'mutation_ratio': 0.5, # The ratio of networks that are generated through mutation in generation n >= 2.
'efficiency_predictor': latency_table, # To use a predefined efficiency predictor.
'accuracy_predictor': accuracy_predictor, # To use a predefined accuracy_predictor predictor.
'population_size': P,
'max_time_budget': N,
'parent_ratio': r,
'arch' : arch[args.net][0],
}
# initialize the evolution finder and run NAS
finder = EvolutionFinder(**params)
result_lis = []
for latency in hardware_latency[args.target_hardware]:
finder.set_efficiency_constraint(latency)
best_valids, best_info = finder.run_evolution_search()
result_lis.append(best_info)
print("NAS Completed!")
# evaluate the searched model on ImageNet
models = []
if cuda_available:
for result in result_lis:
_, net_config, latency = result
print('Evaluating the sub-network with latency = %.1f ms on %s' % (latency, target_hardware))
top1 = evaluate_ofa_subnet(
ofa_network,
imagenet_data_path,
net_config,
data_loader,
batch_size=250,
device='cuda:0' if cuda_available else 'cpu')
models.append([net_config, top1, latency])
df = pd.DataFrame(models, columns=['Model', 'Accuracy', 'Latency'])
df.to_csv('NAS_results.csv')
print('NAS results saved to NAS_results.csv') | en | 0.77423 | # CompOFA – Compound Once-For-All Networks for Faster Multi-Platform Deployment # Under blind review at ICLR 2021: https://openreview.net/forum?id=IgIk8RRT-Z # # Implementation based on: # Once for All: Train One Network and Specialize it for Efficient Deployment # <NAME>, <NAME>, <NAME>, <NAME>, <NAME> # International Conference on Learning Representations (ICLR), 2020. # imagenet_data_path = '/srv/data/datasets/ImageNet/' # set random seed # Initialize the OFA Network # Carry out data transforms # test batch size # number of workers for the data loader # set up the accuracy predictor # set up the latency table Hyper-parameters for the evolutionary search process You can modify these hyper-parameters to see how they influence the final ImageNet accuracy of the search sub-net. # ms # The size of population in each generation # How many generations of population to be searched # The ratio of networks that are used as parents for next generation # Let's do FLOPs-constrained search # The probability of mutation in evolutionary search # The ratio of networks that are generated through mutation in generation n >= 2. # To use a predefined efficiency predictor. # To use a predefined accuracy_predictor predictor. # initialize the evolution finder and run NAS # evaluate the searched model on ImageNet | 2.216557 | 2 |
application/model/radar_score_20180117/score_calculate.py | ace-gabriel/chrome-extension | 4 | 8493 | <gh_stars>1-10
# coding: utf-8
import pickle
# import json
# import types
path = 'application/model/radar_score_20180117/'
def f(x, x_range, score):
bottom = 20
y = []
for i in x:
if i < x_range[0]:
pos = 0
else:
for j in range(len(x_range)):
if j == len(x_range) - 1 or \
i >= x_range[j] and i < x_range[j + 1]:
pos = j
break
s = sum(score[:pos]) + score[pos] * (i - x_range[pos])
y.append(s + bottom)
return y
def process_score(house):
# with open('radar.json', 'r') as fj:
# house = json.load(fj)
# print radar
# print house
score = {
'score_appreciation': 60,
'score_cost': 60,
'score_rental': 60,
'score_airbnb': 60,
'score_anti_risk': 60
}
with open(path+'scoremodel.pkl', 'rb') as fp:
# pickle.dump([radar, factor, x_range, score], fopen)
N = 4
a = pickle.load(fp)
if 'increase_ratio' in house and house['increase_ratio'] != None:
# 房屋增值
x = house['increase_ratio'] * a[1]
score['score_appreciation'] = f([x], a[2], a[3])[0]
# print x, score['score_appreciation']
a = pickle.load(fp)
if 'house_price_dollar' in house and house['house_price_dollar'] != None:
# 持有成本
x = a[1] / house['house_price_dollar']
# print 'house_price_dollar', house['house_price_dollar']
score['score_cost'] = f([x], a[2], a[3])[0]
# print score['score_cost']
if 'airbnb_rent' in house and house['airbnb_rent'] != None:
# 短租收益
a = pickle.load(fp)
x = house['airbnb_rent'] * 12.0 / house['house_price_dollar'] * a[1]
score['score_airbnb'] = f([x], a[2], a[3])[0]
# print score['score_airbnb']
a = pickle.load(fp)
if 'rental_income_ratio' in house and house['rental_income_ratio'] != None:
# 长租收益
x = house['rental_income_ratio'] * a[1]
score['score_rental'] = f([x], a[2], a[3])[0]
# print score['score_rental']
if 'neighborhood' in house and 'id' in house['neighborhood'] and house['neighborhood']['id'] != None:
with open(path+'region_anti_drop.pkl', 'r') as fp:
# 抗跌能力
region = pickle.load(fp)
score_anti = pickle.load(fp)
if house['neighborhood']['id'] in region:
# print house['neighborhood']['id']
i = region.index(house['neighborhood']['id'])
score['score_anti_risk'] = score_anti[i]
# for i in score:
# print '%20s %2.3f ' % (i, score[i])
# check: make sure score in range(20, 100)
for i in score:
if score[i] < 20:
score[i] = 20
if score[i] > 100:
score[i] = 100
return score
if __name__ == '__main__':
# README
print "This is a program calculating house's 5 scores:" \
"Anti Drop Score," \
"House Appreciation," \
"Possess Cost," \
"Long-term Income" \
"Short-term Income"
| # coding: utf-8
import pickle
# import json
# import types
path = 'application/model/radar_score_20180117/'
def f(x, x_range, score):
bottom = 20
y = []
for i in x:
if i < x_range[0]:
pos = 0
else:
for j in range(len(x_range)):
if j == len(x_range) - 1 or \
i >= x_range[j] and i < x_range[j + 1]:
pos = j
break
s = sum(score[:pos]) + score[pos] * (i - x_range[pos])
y.append(s + bottom)
return y
def process_score(house):
# with open('radar.json', 'r') as fj:
# house = json.load(fj)
# print radar
# print house
score = {
'score_appreciation': 60,
'score_cost': 60,
'score_rental': 60,
'score_airbnb': 60,
'score_anti_risk': 60
}
with open(path+'scoremodel.pkl', 'rb') as fp:
# pickle.dump([radar, factor, x_range, score], fopen)
N = 4
a = pickle.load(fp)
if 'increase_ratio' in house and house['increase_ratio'] != None:
# 房屋增值
x = house['increase_ratio'] * a[1]
score['score_appreciation'] = f([x], a[2], a[3])[0]
# print x, score['score_appreciation']
a = pickle.load(fp)
if 'house_price_dollar' in house and house['house_price_dollar'] != None:
# 持有成本
x = a[1] / house['house_price_dollar']
# print 'house_price_dollar', house['house_price_dollar']
score['score_cost'] = f([x], a[2], a[3])[0]
# print score['score_cost']
if 'airbnb_rent' in house and house['airbnb_rent'] != None:
# 短租收益
a = pickle.load(fp)
x = house['airbnb_rent'] * 12.0 / house['house_price_dollar'] * a[1]
score['score_airbnb'] = f([x], a[2], a[3])[0]
# print score['score_airbnb']
a = pickle.load(fp)
if 'rental_income_ratio' in house and house['rental_income_ratio'] != None:
# 长租收益
x = house['rental_income_ratio'] * a[1]
score['score_rental'] = f([x], a[2], a[3])[0]
# print score['score_rental']
if 'neighborhood' in house and 'id' in house['neighborhood'] and house['neighborhood']['id'] != None:
with open(path+'region_anti_drop.pkl', 'r') as fp:
# 抗跌能力
region = pickle.load(fp)
score_anti = pickle.load(fp)
if house['neighborhood']['id'] in region:
# print house['neighborhood']['id']
i = region.index(house['neighborhood']['id'])
score['score_anti_risk'] = score_anti[i]
# for i in score:
# print '%20s %2.3f ' % (i, score[i])
# check: make sure score in range(20, 100)
for i in score:
if score[i] < 20:
score[i] = 20
if score[i] > 100:
score[i] = 100
return score
if __name__ == '__main__':
# README
print "This is a program calculating house's 5 scores:" \
"Anti Drop Score," \
"House Appreciation," \
"Possess Cost," \
"Long-term Income" \
"Short-term Income" | en | 0.588978 | # coding: utf-8 # import json # import types # with open('radar.json', 'r') as fj: # house = json.load(fj) # print radar # print house # pickle.dump([radar, factor, x_range, score], fopen) # 房屋增值 # print x, score['score_appreciation'] # 持有成本 # print 'house_price_dollar', house['house_price_dollar'] # print score['score_cost'] # 短租收益 # print score['score_airbnb'] # 长租收益 # print score['score_rental'] # 抗跌能力 # print house['neighborhood']['id'] # for i in score: # print '%20s %2.3f ' % (i, score[i]) # check: make sure score in range(20, 100) # README | 2.746624 | 3 |
Dominant_cell.py | xi6th/Python_Algorithm | 0 | 8494 | <filename>Dominant_cell.py
#!/bin/python3
import math
import os
import random
import re
import sys
from typing import Counter
#
# Complete the 'numCells' function below.
#
# The function is expected to return an INTEGER.
# The function accepts 2D_INTEGER_ARRAY grid as parameter.
#
def numCells(grid):
# Write your code here
n = []
m = []
for neigbours in grid:
individual = max(neigbours)
n.append(individual)
m = len(n)
return(m)
# for individuals in neigbours:
# print(individuals)
grid = [[1, 2, 7], [4, 5, 6], [8, 8, 9]]
print(numCells(grid))
# if __name__ == '__main__':
# fptr = open(os.environ['OUTPUT_PATH'], 'w')
# grid_rows = int(input().strip())
# grid_columns = int(input().strip())
# grid = []
# for _ in range(grid_rows):
# grid.append(list(map(int, input().rstrip().split())))
# result = numCells(grid)
# fptr.write(str(result) + '\n')
# fptr.close()
| <filename>Dominant_cell.py
#!/bin/python3
import math
import os
import random
import re
import sys
from typing import Counter
#
# Complete the 'numCells' function below.
#
# The function is expected to return an INTEGER.
# The function accepts 2D_INTEGER_ARRAY grid as parameter.
#
def numCells(grid):
# Write your code here
n = []
m = []
for neigbours in grid:
individual = max(neigbours)
n.append(individual)
m = len(n)
return(m)
# for individuals in neigbours:
# print(individuals)
grid = [[1, 2, 7], [4, 5, 6], [8, 8, 9]]
print(numCells(grid))
# if __name__ == '__main__':
# fptr = open(os.environ['OUTPUT_PATH'], 'w')
# grid_rows = int(input().strip())
# grid_columns = int(input().strip())
# grid = []
# for _ in range(grid_rows):
# grid.append(list(map(int, input().rstrip().split())))
# result = numCells(grid)
# fptr.write(str(result) + '\n')
# fptr.close()
| en | 0.509885 | #!/bin/python3 # # Complete the 'numCells' function below. # # The function is expected to return an INTEGER. # The function accepts 2D_INTEGER_ARRAY grid as parameter. # # Write your code here # for individuals in neigbours: # print(individuals) # if __name__ == '__main__': # fptr = open(os.environ['OUTPUT_PATH'], 'w') # grid_rows = int(input().strip()) # grid_columns = int(input().strip()) # grid = [] # for _ in range(grid_rows): # grid.append(list(map(int, input().rstrip().split()))) # result = numCells(grid) # fptr.write(str(result) + '\n') # fptr.close() | 3.724768 | 4 |
evetool/urls.py | Sult/evetool | 0 | 8495 | from django.conf import settings
from django.conf.urls import include, url
from django.conf.urls.static import static
urlpatterns = [
# Examples:
# url(r'^$', 'evetool.views.home', name='home'),
url(r'^', include('users.urls')),
url(r'^', include('apis.urls')),
] + static(settings.STATIC_URL, document_root=settings.STATIC_ROOT)
| from django.conf import settings
from django.conf.urls import include, url
from django.conf.urls.static import static
urlpatterns = [
# Examples:
# url(r'^$', 'evetool.views.home', name='home'),
url(r'^', include('users.urls')),
url(r'^', include('apis.urls')),
] + static(settings.STATIC_URL, document_root=settings.STATIC_ROOT)
| en | 0.114272 | # Examples: # url(r'^$', 'evetool.views.home', name='home'), | 1.643015 | 2 |
actvenv.py | lastone9182/console-keep | 0 | 8496 | <filename>actvenv.py<gh_stars>0
import os
# virtualenv
SCRIPTDIR = os.path.realpath(os.path.dirname(__file__))
venv_name = '_ck'
osdir = 'Scripts' if os.name is 'nt' else 'bin'
venv = os.path.join(venv_name, osdir, 'activate_this.py')
activate_this = (os.path.join(SCRIPTDIR, venv))
# Python 3: exec(open(...).read()), Python 2: execfile(...)
exec(open(activate_this).read(), dict(__file__=activate_this)) | <filename>actvenv.py<gh_stars>0
import os
# virtualenv
SCRIPTDIR = os.path.realpath(os.path.dirname(__file__))
venv_name = '_ck'
osdir = 'Scripts' if os.name is 'nt' else 'bin'
venv = os.path.join(venv_name, osdir, 'activate_this.py')
activate_this = (os.path.join(SCRIPTDIR, venv))
# Python 3: exec(open(...).read()), Python 2: execfile(...)
exec(open(activate_this).read(), dict(__file__=activate_this)) | en | 0.125276 | # virtualenv # Python 3: exec(open(...).read()), Python 2: execfile(...) | 2.015448 | 2 |
testing/scripts/checklicenses.py | zealoussnow/chromium | 14,668 | 8497 | <reponame>zealoussnow/chromium
#!/usr/bin/env python
# Copyright 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import json
import os
import sys
import common
def main_run(args):
with common.temporary_file() as tempfile_path:
rc = common.run_command([
os.path.join(common.SRC_DIR, 'tools', 'checklicenses',
'checklicenses.py'),
'--json', tempfile_path
])
with open(tempfile_path) as f:
checklicenses_results = json.load(f)
result_set = set()
for result in checklicenses_results:
result_set.add((result['filename'], result['license']))
json.dump({
'valid': True,
'failures': ['%s: %s' % (r[0], r[1]) for r in result_set],
}, args.output)
return rc
def main_compile_targets(args):
json.dump([], args.output)
if __name__ == '__main__':
funcs = {
'run': main_run,
'compile_targets': main_compile_targets,
}
sys.exit(common.run_script(sys.argv[1:], funcs))
| #!/usr/bin/env python
# Copyright 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import json
import os
import sys
import common
def main_run(args):
with common.temporary_file() as tempfile_path:
rc = common.run_command([
os.path.join(common.SRC_DIR, 'tools', 'checklicenses',
'checklicenses.py'),
'--json', tempfile_path
])
with open(tempfile_path) as f:
checklicenses_results = json.load(f)
result_set = set()
for result in checklicenses_results:
result_set.add((result['filename'], result['license']))
json.dump({
'valid': True,
'failures': ['%s: %s' % (r[0], r[1]) for r in result_set],
}, args.output)
return rc
def main_compile_targets(args):
json.dump([], args.output)
if __name__ == '__main__':
funcs = {
'run': main_run,
'compile_targets': main_compile_targets,
}
sys.exit(common.run_script(sys.argv[1:], funcs)) | en | 0.86726 | #!/usr/bin/env python # Copyright 2015 The Chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. | 2.328012 | 2 |
autoPyTorch/utils/benchmarking/benchmark_pipeline/for_autonet_config.py | gaohuan2015/Auto-PyTorch | 1 | 8498 | <reponame>gaohuan2015/Auto-PyTorch
from autoPyTorch.utils.config.config_option import ConfigOption
from autoPyTorch.pipeline.base.sub_pipeline_node import SubPipelineNode
import traceback
class ForAutoNetConfig(SubPipelineNode):
def fit(self, pipeline_config, autonet, instance, data_manager, run_id, task_id):
for config_file in self.get_config_files(pipeline_config):
try:
self.sub_pipeline.fit_pipeline(pipeline_config=pipeline_config,
autonet=autonet, instance=instance, data_manager=data_manager,
autonet_config_file=config_file, run_id=run_id, task_id=task_id)
except Exception as e:
print(e)
traceback.print_exc()
return dict()
def get_pipeline_config_options(self):
options = [
ConfigOption("autonet_configs", default=None, type='directory', list=True, required=True),
ConfigOption("autonet_config_slice", default=None, type=str)
]
return options
@staticmethod
def get_config_files(pipeline_config, parse_slice=True):
config_files = pipeline_config['autonet_configs']
autonet_config_slice = ForAutoNetConfig.parse_slice(pipeline_config['autonet_config_slice'])
if autonet_config_slice is not None and parse_slice:
return config_files[autonet_config_slice]
return config_files
@staticmethod
def parse_slice(splice_string):
if (splice_string is None):
return None
split = splice_string.split(":")
if len(split) == 1:
start = int(split[0]) if split[0] != "" else 0
stop = (int(split[0]) + 1) if split[0] != "" else None
step = 1
elif len(split) == 2:
start = int(split[0]) if split[0] != "" else 0
stop = int(split[1]) if split[1] != "" else None
step = 1
elif len(split) == 3:
start = int(split[0]) if split[0] != "" else 0
stop = int(split[1]) if split[1] != "" else None
step = int(split[2]) if split[2] != "" else 1
return slice(start, stop, step) | from autoPyTorch.utils.config.config_option import ConfigOption
from autoPyTorch.pipeline.base.sub_pipeline_node import SubPipelineNode
import traceback
class ForAutoNetConfig(SubPipelineNode):
def fit(self, pipeline_config, autonet, instance, data_manager, run_id, task_id):
for config_file in self.get_config_files(pipeline_config):
try:
self.sub_pipeline.fit_pipeline(pipeline_config=pipeline_config,
autonet=autonet, instance=instance, data_manager=data_manager,
autonet_config_file=config_file, run_id=run_id, task_id=task_id)
except Exception as e:
print(e)
traceback.print_exc()
return dict()
def get_pipeline_config_options(self):
options = [
ConfigOption("autonet_configs", default=None, type='directory', list=True, required=True),
ConfigOption("autonet_config_slice", default=None, type=str)
]
return options
@staticmethod
def get_config_files(pipeline_config, parse_slice=True):
config_files = pipeline_config['autonet_configs']
autonet_config_slice = ForAutoNetConfig.parse_slice(pipeline_config['autonet_config_slice'])
if autonet_config_slice is not None and parse_slice:
return config_files[autonet_config_slice]
return config_files
@staticmethod
def parse_slice(splice_string):
if (splice_string is None):
return None
split = splice_string.split(":")
if len(split) == 1:
start = int(split[0]) if split[0] != "" else 0
stop = (int(split[0]) + 1) if split[0] != "" else None
step = 1
elif len(split) == 2:
start = int(split[0]) if split[0] != "" else 0
stop = int(split[1]) if split[1] != "" else None
step = 1
elif len(split) == 3:
start = int(split[0]) if split[0] != "" else 0
stop = int(split[1]) if split[1] != "" else None
step = int(split[2]) if split[2] != "" else 1
return slice(start, stop, step) | none | 1 | 2.364712 | 2 |
|
csv/query_csv.py | RobustPerception/python_examples | 31 | 8499 | <gh_stars>10-100
import csv
import requests
import sys
"""
A simple program to print the result of a Prometheus query as CSV.
"""
if len(sys.argv) != 3:
print('Usage: {0} http://prometheus:9090 a_query'.format(sys.argv[0]))
sys.exit(1)
response = requests.get('{0}/api/v1/query'.format(sys.argv[1]),
params={'query': sys.argv[2]})
results = response.json()['data']['result']
# Build a list of all labelnames used.
labelnames = set()
for result in results:
labelnames.update(result['metric'].keys())
# Canonicalize
labelnames.discard('__name__')
labelnames = sorted(labelnames)
writer = csv.writer(sys.stdout)
# Write the header,
writer.writerow(['name', 'timestamp', 'value'] + labelnames)
# Write the samples.
for result in results:
l = [result['metric'].get('__name__', '')] + result['value']
for label in labelnames:
l.append(result['metric'].get(label, ''))
writer.writerow(l)
| import csv
import requests
import sys
"""
A simple program to print the result of a Prometheus query as CSV.
"""
if len(sys.argv) != 3:
print('Usage: {0} http://prometheus:9090 a_query'.format(sys.argv[0]))
sys.exit(1)
response = requests.get('{0}/api/v1/query'.format(sys.argv[1]),
params={'query': sys.argv[2]})
results = response.json()['data']['result']
# Build a list of all labelnames used.
labelnames = set()
for result in results:
labelnames.update(result['metric'].keys())
# Canonicalize
labelnames.discard('__name__')
labelnames = sorted(labelnames)
writer = csv.writer(sys.stdout)
# Write the header,
writer.writerow(['name', 'timestamp', 'value'] + labelnames)
# Write the samples.
for result in results:
l = [result['metric'].get('__name__', '')] + result['value']
for label in labelnames:
l.append(result['metric'].get(label, ''))
writer.writerow(l) | en | 0.781717 | A simple program to print the result of a Prometheus query as CSV. # Build a list of all labelnames used. # Canonicalize # Write the header, # Write the samples. | 3.354007 | 3 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.