ext
stringclasses 9
values | sha
stringlengths 40
40
| content
stringlengths 3
1.04M
|
---|---|---|
py
|
1a5df5068b268699a46c88bc9d6d5d59355a428d
|
"""
This file contains environment defined constants that are set at the start
of the program
"""
import os
API_PORT = os.environ.get('SHH_API_PORT')
API_KEY = os.environ.get('SHH_API_KEY')
CONFIG_BASE_DIR = os.environ.get('SHH_CONFIG_BASE_DIR')
if CONFIG_BASE_DIR is None:
CONFIG_BASE_DIR = 'config/'
try:
MIC_DEVICE_NDX = int(os.environ.get('SHH_MIC_DEVICE_INDEX'))
except (TypeError, ValueError):
MIC_DEVICE_NDX = None
|
py
|
1a5df55bccfb21687a75f08e0a5a4ceda57cd226
|
"""
Adminstration.py allows users to control ArcGIS for Server 10.1+
through the Administration REST API
"""
from __future__ import absolute_import
from .._common import BaseServer
from . import _machines, _clusters
from . import _data, _info
from . import _kml, _logs
from . import _security, _services
from . import _system
from . import _uploads, _usagereports
from . import _mode
from .. import ServicesDirectory
from arcgis._impl.connection import _ArcGISConnection
from .._common import ServerConnection
########################################################################
class Server(BaseServer):
"""
An ArcGIS Enterprise Server site used for hosting GIS Web services.
================== ====================================================================
**Argument** **Description**
------------------ --------------------------------------------------------------------
url Required string. The URL to the ArcGIS Server administration
end point for the ArcGIS Server Site.
Example: https://gis.mysite.com/arcgis/admin
The URL should be formatted as follows:
<scheme>://<fully_qualified_domain_name>:<port (optional)>/<web adaptor>/admin
Note: Using the fully-qualified domain name to the server, also known as the
Web Context URL, is recommended as generally the SSL Certificate binding for
the web server uses this hostname.
------------------ --------------------------------------------------------------------
gis Optional string. The GIS object representing the Portal which thi
Server is federated with. The GIS object should be logged in with a username
in the publisher or administrator Role in order to administer the Server
================== ====================================================================
===================== ====================================================================
**Optional Argument** **Description**
--------------------- --------------------------------------------------------------------
baseurl Optional string. The root URL to a site.
Example: https://mysite.com/arcgis
--------------------- --------------------------------------------------------------------
tokenurl Optional string. Used when a site is federated or when the token
URL differs from the site's baseurl. If a site is federated, the
token URL will return as the Portal token and ArcGIS Server users
will not validate correctly.
--------------------- --------------------------------------------------------------------
username Optional string. The login username for BUILT-IN GIS Server security.
--------------------- --------------------------------------------------------------------
password Optional string. A secret word or phrase that must be used to gain
access to the account above.
--------------------- --------------------------------------------------------------------
key_file Optional string. The path to a PKI key file used to authenticate the
user to the Web Server in front of the ArcGIS Server site.
--------------------- --------------------------------------------------------------------
cert_file Optional string. The path to PKI cert file used to authenticate the
user to the Web Server in front of the ArcGIS Server site.
--------------------- --------------------------------------------------------------------
proxy_host Optional string. The web address to the proxy host if the environment
where the Python API is running requires a proxy host for access to the
Site URL or GIS URL.
Example: proxy.mysite.com
--------------------- --------------------------------------------------------------------
proxy_port Optional integer. The port which the proxy is accessed through,
default is 80.
--------------------- --------------------------------------------------------------------
expiration Optional integer. This is the length of time in minutes that a token
requested through this login process will be valid for.
Example: 1440 is one day. The Default is 60.
--------------------- --------------------------------------------------------------------
all_ssl Optional boolean. If True, all calls will be made over HTTPS instead
of HTTP. The default is False.
--------------------- --------------------------------------------------------------------
portal_connection Optional string. This is used when a site is federated. It is the
ArcGIS Online or Portal GIS object used.
--------------------- --------------------------------------------------------------------
initialize Optional boolean. If True, the object will attempt to reach out to
the URL resource and populate at creation time. The default is False.
===================== ====================================================================
"""
_url = None
_con = None
_json_dict = None
_json = None
_catalog = None
_sitemanager = None
#----------------------------------------------------------------------
def __init__(self,
url,
gis=None,
**kwargs):
"""Constructor"""
if gis is None and len(kwargs) > 0:
if 'baseurl' not in kwargs:
kwargs['baseurl'] = url
gis = ServerConnection(**kwargs)
initialize = kwargs.pop('initialize', False)
super(Server, self).__init__(gis=gis,
url=url,
initialize=initialize,
**kwargs)
self._catalog = kwargs.pop('servicesdirectory', None)
if not url.lower().endswith('/admin'):
url = "%s/admin" % url
self._url = url
#else:
# raise ValueError("You must provide either a GIS or login credentials to use this object.")
if hasattr(gis, '_con'):
self._con = gis._con
elif hasattr(gis, '_portal'):
self._con = gis._portal._con
elif isinstance(gis, (_ArcGISConnection,
ServerConnection)):
self._con = gis
else:
raise ValueError("Invalid gis Type: Must be GIS/ServicesDirectory Object")
if initialize:
self._init(self._con)
#----------------------------------------------------------------------
def __str__(self):
return '<%s at %s>' % (type(self).__name__, self._url)
#----------------------------------------------------------------------
def __repr__(self):
return '<%s at %s>' % (type(self).__name__, self._url)
#----------------------------------------------------------------------
def publish_sd(self,
sd_file,
folder=None):
"""
Publishes a service definition file to ArcGIS Server.
================== ====================================================================
**Argument** **Description**
------------------ --------------------------------------------------------------------
sd_file Required string. The service definition file to be uploaded and published.
------------------ --------------------------------------------------------------------
folder Optional string. The folder in which to publish the service definition
file to. If this folder is not present, it will be created. The
default is None in which case the service definition will be published
to the System folder.
================== ====================================================================
:return:
A boolean indicating success (True) or failure (False).
"""
import json
if sd_file.lower().endswith('.sd') == False:
return False
catalog = self._catalog
if 'System' not in catalog.folders:
return False
if folder and \
folder.lower() not in [f.lower() for f in catalog.folders]:
self.services.create_folder(folder)
service = catalog.get(name="PublishingTools", folder='System')
if service is None:
service = catalog.get(name="PublishingToolsEx", folder='System')
if service is None:
return False
status, res = self._uploads.upload(path=sd_file, description="sd file")
if status:
uid = res['item']['itemID']
if folder:
config = self._uploads._service_configuration(uid)
if 'folderName' in config:
config['folderName'] = folder
res = service.publish_service_definition(in_sdp_id=uid,
in_config_overwrite=json.dumps(config))
else:
res = service.publish_service_definition(in_sdp_id=uid)
return True
return False
#----------------------------------------------------------------------
@staticmethod
def _create(url,
username,
password,
config_store_connection,
directories,
cluster=None,
logs_settings=None,
run_async=False,
**kwargs):
"""
This is the first operation that you must invoke when you install
ArcGIS Server for the first time. Creating a new site involves:
-Allocating a store to save the site configuration
-Configuring the server machine and registering it with the site
-Creating a new cluster configuration that includes the server
machine
-Configuring server directories
-Deploying the services that are marked to auto-deploy
Because of the sheer number of tasks, it usually takes some time
for this operation to complete. Once a site has been created,
you can publish GIS services and deploy them to your server
machines.
====================== ====================================================================
**Argument** **Description**
---------------------- --------------------------------------------------------------------
connection
---------------------- --------------------------------------------------------------------
url Required string. URI string to the site.
---------------------- --------------------------------------------------------------------
username Required string. The name of the administrative account to be used by
the site. This can be changed at a later stage.
---------------------- --------------------------------------------------------------------
password Required string. The password to the administrative account.
---------------------- --------------------------------------------------------------------
configStoreConnection Required string. A JSON object representing the connection to the
configuration store. By default, the configuration store will be
maintained in the ArcGIS Server installation directory.
---------------------- --------------------------------------------------------------------
directories Required string. A JSON object representing a collection of server
directories to create. By default, the server directories will be
created locally.
---------------------- --------------------------------------------------------------------
cluster Optional string. An optional cluster configuration. By default, the
site will create a cluster called 'default' with the first available
port numbers starting from 4004.
---------------------- --------------------------------------------------------------------
logsSettings Optional string. Optional log settings, see http://resources.arcgis.com/en/help/arcgis-rest-api/index.html#/Log_Settings/02r3000001t6000000/ .
---------------------- --------------------------------------------------------------------
runAsync Optional boolean. A flag to indicate if the operation needs to be run
asynchronously.
====================== ====================================================================
===================== ====================================================================
**Optional Argument** **Description**
--------------------- --------------------------------------------------------------------
baseurl Optional string. The root URL to a site.
Example: https://mysite.com/arcgis
--------------------- --------------------------------------------------------------------
tokenurl Optional string. Used when a site is federated or when the token
URL differs from the site's baseurl. If a site is federated, the
token URL will return as the Portal token and ArcGIS Server users
will not validate correctly.
--------------------- --------------------------------------------------------------------
username Optional string. The login username for BUILT-IN security.
--------------------- --------------------------------------------------------------------
password Optional string. A secret word or phrase that must be used to gain
access to the account above.
--------------------- --------------------------------------------------------------------
key_file Optional string. The path to PKI key file.
--------------------- --------------------------------------------------------------------
cert_file Optional string. The path to PKI cert file.
--------------------- --------------------------------------------------------------------
proxy_host Optional string. The web address to the proxy host.
Example: proxy.mysite.com
--------------------- --------------------------------------------------------------------
proxy_port Optional integer. The port where the proxy resides on, default is 80.
--------------------- --------------------------------------------------------------------
expiration Optional integer. This is the length of time a token is valid for.
Example 1440 is one week. The Default is 60.
--------------------- --------------------------------------------------------------------
all_ssl Optional boolean. If True, all calls will be made over HTTPS instead
of HTTP. The default is False.
--------------------- --------------------------------------------------------------------
portal_connection Optional string. This is used when a site is federated. It is the
ArcGIS Online or Portal GIS object used.
--------------------- --------------------------------------------------------------------
initialize Optional boolean. If True, the object will attempt to reach out to
the URL resource and populate at creation time. The default is False.
===================== ====================================================================
:return:
Success statement.
"""
url = url + "/createNewSite"
params = {
"f" : "json",
"cluster" : cluster,
"directories" : directories,
"username" : username,
"password" : password,
"configStoreConnection" : config_store_connection,
"logSettings" : logs_settings,
"runAsync" : run_async
}
con = ServerConnection(**kwargs)
return con.post(path=url,
postdata=params)
#----------------------------------------------------------------------
def _join(self, admin_url, username, password):
"""
The Join Site operation is used to connect a server machine to an
existing site. This is considered a 'push' mechanism, in which a
server machine pushes its configuration to the site. For the
operation to be successful, you need to provide an account with
administrative privileges to the site.
When an attempt is made to join a site, the site validates the
administrative credentials, then returns connection information
about its configuration store back to the server machine. The
server machine then uses the connection information to work with
the configuration store.
If this is the first server machine in your site, use the Create
Site operation instead.
====================== ====================================================================
**Argument** **Description**
---------------------- --------------------------------------------------------------------
admin_url Required string. The site URL of the currently live site. This is
typically the Administrator Directory URL of one of the server
machines of a site.
---------------------- --------------------------------------------------------------------
username Required string. The name of the administrative account for this site.
---------------------- --------------------------------------------------------------------
password Required string. The password to the administrative account.
====================== ====================================================================
:return:
Success statement.
"""
url = self._url + "/joinSite"
params = {
"f" : "json",
"adminURL" : admin_url,
"username" : username,
"password" : password
}
return self._con.post(path=url,
postdata=params)
#----------------------------------------------------------------------
def _delete(self):
"""
Deletes the site configuration and releases all server resources.
This is an unrecoverable operation. This operation is well suited
for development or test servers that need to be cleaned up
regularly. It can also be performed prior to uninstall. Use caution
with this option because it deletes all services, settings, and
other configurations.
This operation performs the following tasks:
- Stops all server machines participating in the site. This in
turn stops all GIS services hosted on the server machines.
- All services and cluster configurations are deleted.
- All server machines are unregistered from the site.
- All server machines are unregistered from the site.
- The configuration store is deleted.
:return:
Success statement.
"""
url = self._url + "/deleteSite"
params = {
"f" : "json"
}
return self._con.post(path=url,
postdata=params)
#----------------------------------------------------------------------
def _export(self, location=None):
"""
Exports the site configuration to a location you specify as input
to this operation.
================== ====================================================================
**Argument** **Description**
------------------ --------------------------------------------------------------------
location Optional string. A path to a folder accessible to the server where
the exported site configuration will be written. If a location is
not specified, the server writes the exported site configuration
file to directory owned by the server and returns a virtual path
(an HTTP URL) to that location from where it can be downloaded.
================== ====================================================================
:return:
Success statement.
"""
url = self._url + "/exportSite"
params = {
"f" : "json"
}
if location is not None:
params['location'] = location
return self._con.post(path=url,
postdata=params)
#----------------------------------------------------------------------
def _import_site(self, location):
"""
This operation imports a site configuration into the currently
running site. Importing a site means replacing all site
configurations (including GIS services, security configurations,
and so on) of the currently running site with those contained in
the site configuration file you supply as input. The input site
configuration file can be obtained through the exportSite
operation.
This operation will restore all information included in the backup,
as noted in exportSite. When it is complete, this operation returns
a report as the response. You should review this report and fix any
problems it lists to ensure your site is fully functioning again.
The importSite operation lets you restore your site from a backup
that you created using the exportSite operation.
================== ====================================================================
**Argument** **Description**
------------------ --------------------------------------------------------------------
location Required string. A file path to an exported configuration or an ID
referencing the stored configuration on the server.
================== ====================================================================
:return:
A report.
"""
url = self._url + "/importSite"
params = {
"f" : "json",
"location" : location
}
return self._con.post(path=url,
postdata=params)
#----------------------------------------------------------------------
def _upgrade(self, run_async=False):
"""
This is the first operation that must be invoked during an ArcGIS
Server upgrade. Once the new software version has been installed
and the setup has completed, this operation will be available. A
successful run of this operation will complete the upgrade of
ArcGIS Server.
.. note::
**caution** If errors are returned with the upgrade operation,
you must address the errors before you may continue. For example,
if you encounter an error about an invalid license, you will need
to re-authorize the software using a valid license and you may
then retry this operation.
This operation is available only when a server machine is
currently being upgraded. It will not be available after a
successful upgrade of a server machine.
================== ====================================================================
**Argument** **Description**
------------------ --------------------------------------------------------------------
run_async Optional boolean. A flag to indicate if the operation needs to be run
asynchronously. The default value is False.
================== ====================================================================
:return:
Success statement.
"""
url = self._url + "/upgrade"
params = {
"f" : "json",
"runAsync" : run_async
}
return self._con.post(path=url,
postdata=params)
#----------------------------------------------------------------------
@property
def _public_key(self):
"""Gets the public key."""
url = self._url + "/publicKey"
params = {
"f" : "json",
}
return self._con.get(path=url,
params=params)
#----------------------------------------------------------------------
@property
def machines(self):
"""
Gets the list of server machines registered with the site.
This resource represents a collection of all the server machines that
have been registered with the site. It other words, it represents
the total computing power of your site. A site will continue to run
as long as there is one server machine online.
For a server machine to start hosting GIS services, it must be
grouped (or clustered). When you create a new site, a cluster called
'default' is created for you.
The list of server machines in your site can be dynamic. You can
register additional server machines when you need to increase the
computing power of your site or unregister them if you no longer
need them.
"""
if self.resources is None:
self._init()
if isinstance(self.resources, list) and \
'machines' in self.resources:
url = self._url + "/machines"
return _machines.MachineManager(url,
gis=self._con,
initialize=False)
else:
return None
#----------------------------------------------------------------------
@property
def datastores(self):
"""
Gets the information about the data holdings of the server.
Data items are used by ArcGIS for Desktop and other clients
to validate data paths referenced by GIS services.
You can register new data items with the server by using the
Register Data Item operation. Use the Find Data Items operation to
search through the hierarchy of data items.
A relational data store type represents a database platform that
has been registered for use on a portal's hosting server by the
ArcGIS Server administrator. Each relational data store type
describes the properties ArcGIS Server requires in order to connect
to an instance of a database for a particular platform. At least
one registered relational data store type is required before client
applications such as Insights for ArcGIS can create Relational
Database Connection portal items.
The Compute Ref Count operation counts and lists all references to
a specific data item. This operation helps you determine if a
particular data item can be safely deleted or refreshed.
"""
if self.properties is None:
self._init()
if isinstance(self.resources, list) and \
"data" in self.resources:
url = self._url + "/data"
return _data.DataStoreManager(url=url,
gis=self._con)
else:
return None
#----------------------------------------------------------------------
@property
def _info(self):
"""
A read-only resource that returns meta information about the server.
"""
url = self._url + "/info"
return _info.Info(url=url,
gis=self._con,
initialize=True)
#----------------------------------------------------------------------
@property
def site(self):
"""
Gets the site's collection of server resources. This collection
includes server machines that are installed with ArcGIS Server,
including GIS services, data and so on. The site resource also
lists the current version of the software.
When you install ArcGIS Server on a server machine for the first
time, you must create a new site. Subsequently, newer server
machines can join your site and increase its computing power. Once
a site is no longer required, you can delete the site, which will
cause all of the resources to be cleaned up.
"""
if self._sitemanager is None:
self._sitemanager = SiteManager(self)
return self._sitemanager
#----------------------------------------------------------------------
@property
def _clusters(self):
"""Gets the clusters functions if supported in resources."""
if self.properties is None:
self._init()
if isinstance(self.resources, list) and \
"clusters" in self.resources:
url = self._url + "/clusters"
return _clusters.Cluster(url=url,
gis=self._con,
initialize=True)
else:
return None
#----------------------------------------------------------------------
@property
def services(self):
"""
Gives the administrator access to the services on ArcGIS Server as a
ServerManager Object.
"""
if self.resources is None:
self._init()
if isinstance(self.resources, list) and \
'services' in self.resources:
url = self._url + "/services"
return _services.ServiceManager(url=url,
gis=self._con,
initialize=True,
sm=self)
else:
return None
#----------------------------------------------------------------------
@property
def usage(self):
"""
Gets the collection of all the usage reports created
within your site. The Create Usage Report operation lets you define
a new usage report.
"""
if self.resources is None:
self._init()
if isinstance(self.resources, list) and \
'usagereports' in self.resources:
url = self._url + "/usagereports"
return _usagereports.ReportManager(url=url,
gis=self._con,
initialize=True)
else:
return None
#----------------------------------------------------------------------
@property
def _kml(self):
"""Gets the KML functions for a server."""
url = self._url + "/kml"
return _kml.KML(url=url,
gis=self._con,
initialize=True)
#----------------------------------------------------------------------
@property
def logs(self):
"""
Gives users access the ArcGIS Server's logs and lets
administrators query and find errors and/or problems related to
the server or a service.
Logs are the records written by the various components of ArcGIS
Server. You can query the logs and change various log settings.
**Note**
ArcGIS Server Only
"""
if self.resources is None:
self._init()
if isinstance(self.resources, list) and \
'logs' in self.resources:
url = self._url + "/logs"
return _logs.LogManager(url=url,
gis=self._con,
initialize=True)
else:
return None
#----------------------------------------------------------------------
@property
def _security(self):
"""Gets an object to work with the site security."""
if self.resources is None:
self._init()
if isinstance(self.resources, list) and \
"security" in self.resources:
url = self._url + "/security"
return _security.Security(url=url,
gis=self._con,
initialize=True)
else:
return None
#----------------------------------------------------------------------
@property
def users(self):
"""Gets operations to work with users."""
return self._security.users
#----------------------------------------------------------------------
@property
def content(self):
"""
Gets the Services Directory which can help you discover information about
services available on a particular server. A service represents a
local GIS resource whose functionality has been made available on
the server to a wider audience. For example, an ArcGIS Server
administrator can publish an ArcMap document (.mxd) as a map
service. Developers and clients can display the map service and
query its contents.
The Services Directory is available as part of the REST services
infrastructure available with ArcGIS Server installations. It
enables you to list the services available, including secured
services when you provide a proper login. For each service, a set
of general properties are displayed. For map services, these
properties include the spatial extent, spatial reference
(coordinate system) and supported operations. Layers are also
listed, with links to details about layers, which includes layer
fields and extent. The Services Directory can execute simple
queries on layers.
The Services Directory is also useful for finding information about
non-map service types. For example, you can use the Services
Directory to determine the required address format for a geocode
service, or the necessary model inputs for a geoprocessing service.
"""
from .. import ServicesDirectory
if self._catalog is None:
url = self._url.lower().replace("/admin", "")
self._catalog = ServicesDirectory(url=url,
con=self._con)
return self._catalog
#----------------------------------------------------------------------
@property
def system(self):
"""
Provides access to common system configuration settings.
"""
if self.resources is None:
self._init()
if isinstance(self.resources, list) and \
"system" in self.resources:
url = self._url + "/system"
return _system.SystemManager(url=url,
gis=self._con,
initialize=True)
else:
return None
#----------------------------------------------------------------------
@property
def _uploads(self):
"""Gets an object to work with the site uploads."""
if self.resources is None:
self._init()
if isinstance(self.resources, list) and \
"uploads" in self.resources:
url = self._url + "/uploads"
return _uploads.Uploads(url=url,
gis=self._con,
initialize=True)
else:
return None
#----------------------------------------------------------------------
@property
def _mode(self):
"""Gets the class that works with Mode."""
if self.resources is None:
self._init()
if isinstance(self.resources, list) and \
'mode' in self.resources:
url = self._url + "/mode"
return _mode.Mode(url=url,
gis=self._con,
initialize=True)
return None
########################################################################
class SiteManager(object):
"""
A site is a collection of server resources. This collection includes
server machines that are installed with ArcGIS Server, including GIS
services, data and so on. The site resource also lists the current
version of the software.
When you install ArcGIS Server on a server machine for the first time,
you must create a new site. Subsequently, newer server machines can
join your site and increase its computing power. Once a site is no
longer required, you can delete the site, which will cause all of
the resources to be cleaned up.
================== ====================================================================
**Argument** **Description**
------------------ --------------------------------------------------------------------
server Required string. The arcgis.gis.server object.
================== ====================================================================
"""
_sm = None
#----------------------------------------------------------------------
def __init__(self, server, initialize=False):
"""Constructor"""
self._sm = server
isinstance(self._sm, SiteManager)
if initialize:
self._sm._init()
#----------------------------------------------------------------------
def __str__(self):
return '<%s at %s>' % (type(self).__name__, self._sm._url)
#----------------------------------------------------------------------
def __repr__(self):
return '<%s at %s>' % (type(self).__name__, self._sm._url)
#----------------------------------------------------------------------
@property
def properties(self):
"""Gets the site properties. """
return self._sm.properties
#----------------------------------------------------------------------
@staticmethod
def create(username,
password,
config_store_connection,
directories,
cluster=None,
logs_settings=None,
run_async=False,
**kwargs):
"""
This is the first operation that you must invoke when you install
ArcGIS Server for the first time. Creating a new site involves:
-Allocating a store to save the site configuration
-Configuring the server machine and registering it with the site
-Creating a new cluster configuration that includes the server
machine
-Configuring server directories
-Deploying the services that are marked to auto-deploy
Because of the sheer number of tasks, it usually takes some time
for this operation to complete. Once a site has been created,
you can publish GIS services and deploy them to your server
machines.
====================== ====================================================================
**Argument** **Description**
---------------------- --------------------------------------------------------------------
connection
---------------------- --------------------------------------------------------------------
url Required string. URI string to the site.
---------------------- --------------------------------------------------------------------
username Required string. The name of the administrative account to be used by
the site. This can be changed at a later stage.
---------------------- --------------------------------------------------------------------
password Required string. The password to the administrative account.
---------------------- --------------------------------------------------------------------
configStoreConnection Required string. A JSON object representing the connection to the
configuration store. By default, the configuration store will be
maintained in the ArcGIS Server installation directory.
---------------------- --------------------------------------------------------------------
directories Required string. A JSON object representing a collection of server
directories to create. By default, the server directories will be
created locally.
---------------------- --------------------------------------------------------------------
cluster Optional string. An optional cluster configuration. By default, the
site will create a cluster called 'default' with the first available
port numbers starting from 4004.
---------------------- --------------------------------------------------------------------
logsSettings Optional string. Optional log settings, see http://resources.arcgis.com/en/help/arcgis-rest-api/index.html#/Log_Settings/02r3000001t6000000/ .
---------------------- --------------------------------------------------------------------
runAsync Optional boolean. A flag to indicate if the operation needs to be run
asynchronously.
====================== ====================================================================
===================== ====================================================================
**Optional Argument** **Description**
--------------------- --------------------------------------------------------------------
baseurl Optional string. The root URL to a site.
Example: https://mysite.com/arcgis
--------------------- --------------------------------------------------------------------
tokenurl Optional string. Used when a site is federated or when the token
URL differs from the site's baseurl. If a site is federated, the
token URL will return as the Portal token and ArcGIS Server users
will not validate correctly.
--------------------- --------------------------------------------------------------------
username Optional string. The login username for BUILT-IN security.
--------------------- --------------------------------------------------------------------
password Optional string. A secret word or phrase that must be used to gain
access to the account above.
--------------------- --------------------------------------------------------------------
key_file Optional string. The path to PKI key file.
--------------------- --------------------------------------------------------------------
cert_file Optional string. The path to PKI cert file.
--------------------- --------------------------------------------------------------------
proxy_host Optional string. The web address to the proxy host.
Example: proxy.mysite.com
--------------------- --------------------------------------------------------------------
proxy_port Optional integer. The port where the proxy resides on, default is 80.
--------------------- --------------------------------------------------------------------
expiration Optional integer. This is the length of time a token is valid for.
Example 1440 is one week. The Default is 60.
--------------------- --------------------------------------------------------------------
all_ssl Optional boolean. If True, all calls will be made over HTTPS instead
of HTTP. The default is False.
--------------------- --------------------------------------------------------------------
portal_connection Optional string. This is used when a site is federated. It is the
ArcGIS Online or Portal GIS object used.
--------------------- --------------------------------------------------------------------
initialize Optional boolean. If True, the object will attempt to reach out to
the URL resource and populate at creation time. The default is False.
===================== ====================================================================
:return:
Success statement.
"""
return Server._create(username,
password,
config_store_connection,
directories,
cluster,
logs_settings,
run_async)
#----------------------------------------------------------------------
def join(self, admin_url, username, password):
"""
The Join Site operation is used to connect a server machine to an
existing site. This is considered a 'push' mechanism, in which a
server machine pushes its configuration to the site. For the
operation to be successful, you need to provide an account with
administrative privileges to the site.
When an attempt is made to join a site, the site validates the
administrative credentials, then returns connection information
about its configuration store back to the server machine. The
server machine then uses the connection information to work with
the configuration store.
If this is the first server machine in your site, use the Create
Site operation instead.
================== ====================================================================
**Argument** **Description**
------------------ --------------------------------------------------------------------
admin_url Required string. The site URL of the currently live site. This is
typically the Administrator Directory URL of one of the server
machines of a site.
------------------ --------------------------------------------------------------------
username Required string. The name of an administrative account for the site.
------------------ --------------------------------------------------------------------
password Required string. The password of the administrative account.
================== ====================================================================
:return:
A status indicating success or failure.
"""
return self._sm._join(admin_url, username, password)
#----------------------------------------------------------------------
def delete(self):
"""
Deletes the site configuration and releases all server resources.
This operation is well suited
for development or test servers that need to be cleaned up
regularly. It can also be performed prior to uninstall. Use caution
with this option because it deletes all services, settings, and
other configurations.
This operation performs the following tasks:
- Stops all server machines participating in the site. This in
turn stops all GIS services hosted on the server machines.
- All services and cluster configurations are deleted.
- All server machines are unregistered from the site.
- All server machines are unregistered from the site.
- The configuration store is deleted.
.. note::
This is an unrecoverable operation!
:return:
A status indicating success or failure.
"""
return self._sm._delete()
#----------------------------------------------------------------------
def export(self, location=None):
"""
Exports the site configuration to a location you specify as input
to this operation.
================== ====================================================================
**Argument** **Description**
------------------ --------------------------------------------------------------------
location Optional string. A path to a folder accessible to the server
where the exported site configuration will be written. If a location
is not specified, the server writes the exported site configuration
file to directory owned by the server and returns a virtual path
(an HTTP URL) to that location from where it can be downloaded.
================== ====================================================================
:return:
A status indicating success (along with the folder location) or failure.
"""
return self._sm._export(location)
#----------------------------------------------------------------------
def import_site(self, location):
"""
This operation imports a site configuration into the currently
running site. Importing a site means replacing all site
configurations (including GIS services, security configurations,
and so on) of the currently running site with those contained in
the site configuration file you supply as input. The input site
configuration file can be obtained through the exportSite
operation.
This operation will restore all information included in the backup,
as noted in exportSite. When it is complete, this operation returns
a report as the response. You should review this report and fix any
problems it lists to ensure your site is fully functioning again.
The importSite operation lets you restore your site from a backup
that you created using the exportSite operation.
================== ====================================================================
**Argument** **Description**
------------------ --------------------------------------------------------------------
location Required string. A file path to an exported configuration or an ID
referencing the stored configuration on the server.
================== ====================================================================
:return:
A status indicating success (along with site details) or failure.
"""
return self._sm._import_site(location=location)
#----------------------------------------------------------------------
def upgrade(self, run_async=False):
"""
This is the first operation that must be invoked during an ArcGIS
Server upgrade. Once the new software version has been installed
and the setup has completed, this operation will be available. A
successful run of this operation will complete the upgrade of
ArcGIS Server.
.. note::
If errors are returned with the upgrade operation, you must address
the errors before you can continue. For example, if you encounter
an error about an invalid license, you will need to re-authorize
the software using a valid license and you may then retry this
operation.
This operation is available only when a server machine is currently
being upgraded. It will not be available after a successful upgrade
of a server machine.
================== ====================================================================
**Argument** **Description**
------------------ --------------------------------------------------------------------
run_async Required string. A flag to indicate if the operation needs to be run
asynchronously. The default value is False.
================== ====================================================================
:return:
A status indicating success or failure.
"""
return self._sm._upgrade(run_async)
#----------------------------------------------------------------------
@property
def public_key(self):
"""Gets the public key."""
return self._sm._public_key
|
py
|
1a5df676f1ae57831b073b609254802a198ab429
|
# coding: utf-8
#
# Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file
# except in compliance with the License. A copy of the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for
# the specific language governing permissions and limitations under the License.
#
import pprint
import re # noqa: F401
import six
import typing
from enum import Enum
from ask_sdk_model.interfaces.alexa.presentation.apl.command import Command
if typing.TYPE_CHECKING:
from typing import Dict, List, Optional, Union
from datetime import datetime
from ask_sdk_model.interfaces.alexa.presentation.apl.component_state import ComponentState
class SetStateCommand(Command):
"""
The SetState command changes one of the component’s state settings. The SetState command can be used to change the checked, disabled, and focused states. The karaoke and pressed states may not be directly set; use the Select command or SpeakItem commands to change those states. Also, note that the focused state may only be set - it can’t be cleared.
:param delay: The delay in milliseconds before this command starts executing; must be non-negative. Defaults to 0.
:type delay: (optional) int
:param description: A user-provided description of this command.
:type description: (optional) str
:param when: If false, the execution of the command is skipped. Defaults to true.
:type when: (optional) bool
:param component_id: The id of the component whose value should be set.
:type component_id: (optional) str
:param state: The name of the state to set. Must be one of “checked”, “disabled”, and “focused”.
:type state: (optional) ask_sdk_model.interfaces.alexa.presentation.apl.component_state.ComponentState
:param value: The value to set on the property
:type value: (optional) bool
"""
deserialized_types = {
'object_type': 'str',
'delay': 'int',
'description': 'str',
'when': 'bool',
'component_id': 'str',
'state': 'ask_sdk_model.interfaces.alexa.presentation.apl.component_state.ComponentState',
'value': 'bool'
} # type: Dict
attribute_map = {
'object_type': 'type',
'delay': 'delay',
'description': 'description',
'when': 'when',
'component_id': 'componentId',
'state': 'state',
'value': 'value'
} # type: Dict
supports_multiple_types = False
def __init__(self, delay=None, description=None, when=None, component_id=None, state=None, value=None):
# type: (Union[int, str, None], Optional[str], Optional[bool], Optional[str], Optional[ComponentState], Union[bool, str, None]) -> None
"""The SetState command changes one of the component’s state settings. The SetState command can be used to change the checked, disabled, and focused states. The karaoke and pressed states may not be directly set; use the Select command or SpeakItem commands to change those states. Also, note that the focused state may only be set - it can’t be cleared.
:param delay: The delay in milliseconds before this command starts executing; must be non-negative. Defaults to 0.
:type delay: (optional) int
:param description: A user-provided description of this command.
:type description: (optional) str
:param when: If false, the execution of the command is skipped. Defaults to true.
:type when: (optional) bool
:param component_id: The id of the component whose value should be set.
:type component_id: (optional) str
:param state: The name of the state to set. Must be one of “checked”, “disabled”, and “focused”.
:type state: (optional) ask_sdk_model.interfaces.alexa.presentation.apl.component_state.ComponentState
:param value: The value to set on the property
:type value: (optional) bool
"""
self.__discriminator_value = "SetState" # type: str
self.object_type = self.__discriminator_value
super(SetStateCommand, self).__init__(object_type=self.__discriminator_value, delay=delay, description=description, when=when)
self.component_id = component_id
self.state = state
self.value = value
def to_dict(self):
# type: () -> Dict[str, object]
"""Returns the model properties as a dict"""
result = {} # type: Dict
for attr, _ in six.iteritems(self.deserialized_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else
x.value if isinstance(x, Enum) else x,
value
))
elif isinstance(value, Enum):
result[attr] = value.value
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else
(item[0], item[1].value)
if isinstance(item[1], Enum) else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
# type: () -> str
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
# type: () -> str
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
# type: (object) -> bool
"""Returns true if both objects are equal"""
if not isinstance(other, SetStateCommand):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
# type: (object) -> bool
"""Returns true if both objects are not equal"""
return not self == other
|
py
|
1a5df79f6f3d3f3bca3b4e1c801ec81efa95c0cb
|
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# [START job_search_commute_search]
from google.cloud import talent
from google.cloud.talent import enums
import six
def search_jobs(project_id, tenant_id):
"""Search Jobs using commute distance"""
client = talent.JobServiceClient()
# project_id = 'Your Google Cloud Project ID'
# tenant_id = 'Your Tenant ID (using tenancy is optional)'
if isinstance(project_id, six.binary_type):
project_id = project_id.decode("utf-8")
if isinstance(tenant_id, six.binary_type):
tenant_id = tenant_id.decode("utf-8")
parent = client.tenant_path(project_id, tenant_id)
domain = "www.example.com"
session_id = "Hashed session identifier"
user_id = "Hashed user identifier"
request_metadata = {"domain": domain, "session_id": session_id, "user_id": user_id}
commute_method = enums.CommuteMethod.TRANSIT
seconds = 1800
travel_duration = {"seconds": seconds}
latitude = 37.422408
longitude = -122.084068
start_coordinates = {"latitude": latitude, "longitude": longitude}
commute_filter = {
"commute_method": commute_method,
"travel_duration": travel_duration,
"start_coordinates": start_coordinates,
}
job_query = {"commute_filter": commute_filter}
# Iterate over all results
results = []
for response_item in client.search_jobs(
parent, request_metadata, job_query=job_query
):
print("Job summary: {}".format(response_item.job_summary))
print("Job title snippet: {}".format(response_item.job_title_snippet))
job = response_item.job
results.append(job.name)
print("Job name: {}".format(job.name))
print("Job title: {}".format(job.title))
return results
# [END job_search_commute_search]
|
py
|
1a5df7a6c38ee04226b885fd1a8de6491b3dd107
|
import torch
from torch import nn
# Define model
class TermScorer(nn.Module):
def __init__(self, d_hidden=768, max_sentence_length=40, num_of_class=3):
super(TermScorer, self).__init__()
self.dropout = nn.Dropout(0.10)
self.hidden = nn.Sequential(
nn.Linear(d_hidden, 768),
nn.ReLU()
)
self.linear = nn.Linear(768, num_of_class)
def forward(self, x):
# input x (batch_size, num_of_span, d_hidden)
x = self.dropout(x)
h = self.hidden(x)
logits = self.linear(h)
return logits
|
py
|
1a5df7fecd8965f1977da9f1f0cca3919e510828
|
#!/usr/bin/env python3
import glob
import os.path
from datetime import datetime
from typing import Generator, Optional
import filetype
from pysymphony import SymphonyClient
from ..common.data_class import Document, Location, SiteSurvey
from ..graphql.enum.image_entity import ImageEntity
from ..graphql.input.add_image import AddImageInput
from ..graphql.mutation.add_image import AddImageMutation
from ..graphql.mutation.delete_image import DeleteImageMutation
def _add_image(
client: SymphonyClient,
local_file_path: str,
entity_type: ImageEntity,
entity_id: str,
category: Optional[str] = None,
) -> None:
file_type = filetype.guess(local_file_path)
file_type = file_type.MIME if file_type is not None else ""
img_key = client.store_file(local_file_path, file_type, False)
file_size = os.path.getsize(local_file_path)
AddImageMutation.execute(
client,
AddImageInput(
entityType=entity_type,
entityId=entity_id,
imgKey=img_key,
fileName=os.path.basename(local_file_path),
fileSize=file_size,
modified=datetime.utcnow(),
contentType=file_type,
category=category,
),
)
def list_dir(directory_path: str) -> Generator[str, None, None]:
files = list(glob.glob(os.path.join(directory_path, "**/**"), recursive=True))
for file_path in set(files):
if os.path.isfile(file_path):
yield file_path
def add_file(
client: SymphonyClient,
local_file_path: str,
entity_type: str,
entity_id: str,
category: Optional[str] = None,
) -> None:
"""This function adds file to an entity of a given type.
Args:
local_file_path (str): local system path to the file
entity_type (str): one of existing options ["LOCATION", "WORK_ORDER", "SITE_SURVEY", "EQUIPMENT"]
entity_id (string): valid entity ID
category (Optional[string]): file category name
Raises:
FailedOperationException: on operation failure
Example:
```
location = client.get_location({("Country", "LS_IND_Prod_Copy")})
client.add_file(
local_file_path="./document.pdf",
entity_type="LOCATION",
entity_id=location.id,
category="category_name",
)
```
"""
entity = {
"LOCATION": ImageEntity.LOCATION,
"WORK_ORDER": ImageEntity.WORK_ORDER,
"SITE_SURVEY": ImageEntity.SITE_SURVEY,
"EQUIPMENT": ImageEntity.EQUIPMENT,
}.get(entity_type, ImageEntity.LOCATION)
_add_image(client, local_file_path, entity, entity_id, category)
def add_files(
client: SymphonyClient,
local_directory_path: str,
entity_type: str,
entity_id: str,
category: Optional[str] = None,
) -> None:
"""This function adds all files located in folder to an entity of a given type.
Args:
local_directory_path (str): local system path to the directory
entity_type (str): one of existing options ["LOCATION", "WORK_ORDER", "SITE_SURVEY", "EQUIPMENT"]
entity_id (string): valid entity ID
category (Optional[string]): file category name
Example:
```
location = client.get_location({("Country", "LS_IND_Prod_Copy")})
client.add_files(
local_directory_path="./documents_folder/",
entity_type="LOCATION",
entity_id=location.id,
category="category_name",
)
```
"""
for file in list_dir(local_directory_path):
add_file(client, file, entity_type, entity_id, category)
def add_location_image(
client: SymphonyClient, local_file_path: str, location: Location
) -> None:
"""This function adds image to existing location.
Args:
local_file_path (str): local system path to the file
location ( `pyinventory.common.data_class.Location` ): existing location object
Raises:
FailedOperationException: on operation failure
Example:
```
location = client.get_location({("Country", "LS_IND_Prod_Copy")})
client.add_location_image(
local_file_path="./document.pdf",
location=location,
)
```
"""
_add_image(client, local_file_path, ImageEntity.LOCATION, location.id)
def add_site_survey_image(
client: SymphonyClient, local_file_path: str, id: str
) -> None:
"""This function adds image to existing site survey.
Args:
local_file_path (str): local system path to the file
id (str): site survey ID
Raises:
FailedOperationException: on operation failure
Example:
```
client.add_site_survey_image(
local_file_path="./document.pdf",
id="123456"
)
```
"""
_add_image(client, local_file_path, ImageEntity.SITE_SURVEY, id)
def _delete_image(
client: SymphonyClient, entity_type: ImageEntity, entity_id: str, image_id: str
) -> None:
DeleteImageMutation.execute(
client, entityType=entity_type, entityId=entity_id, id=image_id
)
def delete_site_survey_image(client: SymphonyClient, survey: SiteSurvey) -> None:
"""This function deletes image from existing site survey.
Args:
survey ( `pyinventory.common.data_class.SiteSurvey` ): site survey object
Raises:
FailedOperationException: on operation failure
Example:
```
client.delete_site_survey_image(survey=survey)
```
"""
source_file_key = survey.sourceFileKey
source_file_id = survey.sourceFileId
if source_file_key is not None:
client.delete_file(source_file_key, False)
if source_file_id is not None:
_delete_image(client, ImageEntity.SITE_SURVEY, survey.id, source_file_id)
def delete_document(client: SymphonyClient, document: Document) -> None:
"""This function deletes existing document.
Args:
document ( `pyinventory.common.data_class.Document` ): document object
Raises:
FailedOperationException: on operation failure
Example:
```
client.delete_document(document=document)
```
"""
_delete_image(client, document.parentEntity, document.parentId, document.id)
|
py
|
1a5df851de77ca62774ed07a1db9889e68916381
|
'''
Created on May 19, 2019
@author: ballance
'''
# TODO: implement simulation-access methods
# - yield
# - get sim time
# - ...
#
# The launcher will ultimately implement these methods
#
|
py
|
1a5df870900a7f4e96521278565cb35424579b1e
|
from .base import APITestCase
from rest_framework import status
from tests.rest_app.models import (
RootModel, OneToOneModel, ForeignKeyModel, ExtraModel, UserManagedModel,
Parent, Child, ItemType, Item, SlugModel, SlugRefParent, ChoiceModel,
)
from django.contrib.auth.models import User
from django.conf import settings
class TemplateTestCase(APITestCase):
def setUp(self):
instance = RootModel.objects.create(
slug='instance',
description="Test"
)
for cls in OneToOneModel, ForeignKeyModel, ExtraModel:
cls.objects.create(
root=instance,
)
user = User.objects.create(username="testuser", is_superuser=True)
self.client.force_authenticate(user)
UserManagedModel.objects.create(id=1, user=user)
parent = Parent.objects.create(name="Test", pk=1)
parent.children.create(name="Test 1")
parent.children.create(name="Test 2")
itype = ItemType.objects.create(name="Test", pk=1)
itype.item_set.create(name="Test 1")
itype.item_set.create(name="Test 2")
slugref = SlugModel.objects.create(
code="test",
name="Test",
)
SlugRefParent.objects.create(
ref=slugref,
pk=1,
name="Test Slug Ref"
)
SlugRefParent.objects.create(
ref=SlugModel.objects.create(
code="other",
name="Other",
),
pk=2,
name="Test Another Ref",
)
ItemType.objects.create(
name="Inactive",
pk=2,
active=False
)
ChoiceModel.objects.create(
name="Test",
pk=1,
choice="two"
)
def assertHTMLEqual(self, expected_html, html, auto_replace=True):
if settings.WITH_NONROOT and auto_replace:
html = html.replace('/wqsite/', '/')
super().assertHTMLEqual(expected_html, html)
def check_html(self, url, expected_html):
response = self.client.get(url)
self.assertTrue(status.is_success(response.status_code), response.data)
html = response.content.decode('utf-8')
self.assertHTMLEqual(expected_html, html)
# Test url="" use case
def test_template_list_at_root(self):
self.check_html("/", """
<ul>
<li><a href="/instance">instance</a></li>
</ul>
""")
def test_template_detail_at_root(self):
instance = RootModel.objects.get(slug='instance')
self.check_html("/instance", """
<h1>instance</h1>
<p>Test</p>
<h3>OneToOneModel</h3>
<p>
<a href="/onetoonemodels/{onetoone_pk}">
onetoonemodel for instance
</a>
</p>
<h3>ExtraModels</h3>
<ul>
<li>
<a href="/extramodels/{extra_pk}">
extramodel for instance
</a>
</li>
</ul>
<p><a href="/instance/edit">Edit</a></p>
""".format(
onetoone_pk=instance.onetoonemodel.pk,
extra_pk=instance.extramodels.all()[0].pk,
))
def test_template_filter_by_parent(self):
childs = Parent.objects.get(pk=1).children.order_by('pk')
self.check_html('/parents/1/children', """
<p>2 Records</p>
<h3>Childs for <a href="/parents/1">Test</a></h3>
<ul>
<li><a href="/children/{c1_pk}">Test 1</a></li>
<li><a href="/children/{c2_pk}">Test 2</a></li>
</ul>
""".format(
c1_pk=childs[0].pk,
c2_pk=childs[1].pk,
))
items = ItemType.objects.get(pk=1).item_set.order_by('pk')
self.check_html('/itemtypes/1/items', """
<h3><a href="/itemtypes/1">Test</a> Items</h3>
<ul>
<li><a href="/items/{i1_pk}">Test 1</a></li>
<li><a href="/items/{i2_pk}">Test 2</a></li>
</ul>
""".format(
i1_pk=items[0].pk,
i2_pk=items[1].pk,
))
def test_template_detail_user_serializer(self):
self.check_html('/usermanagedmodels/1', """
<h1>Object #1</h1>
<p>Created by testuser</p>
<p></p>
""")
def test_template_custom_lookup(self):
self.check_html('/slugmodels/test', "<h1>Test</h1>")
def test_template_default_per_page(self):
parent = Parent.objects.get(pk=1)
parent.name = "Test 1"
parent.save()
for i in range(2, 101):
Parent.objects.create(
id=i,
name="Test %s" % i,
)
html = """
<p>100 Records</p>
<div>
<h3>Page 1 of 2</h3>
<a href="http://testserver/parents/?page=2">Next 50</a>
</div>
<ul>
"""
for i in range(1, 51):
html += """
<li><a href="/parents/{pk}">Test {pk}</a></li>
""".format(pk=i)
html += """
</ul>
"""
self.check_html("/parents/", html)
def test_template_custom_per_page(self):
for i in range(3, 102):
child = Child.objects.create(
name="Test %s" % i,
parent_id=1,
)
self.check_html("/children/?page=2", """
<p>101 Records</p>
<div>
<a href="http://testserver/children/">Prev 100</a>
<h3>Page 2 of 2</h3>
</div>
<ul>
<li><a href="/children/{pk}">Test 101</a></li>
</ul>
""".format(pk=child.pk))
def test_template_limit(self):
for i in range(3, 101):
child = Child.objects.create(
name="Test %s" % i,
parent_id=1,
)
html = """
<p>100 Records</p>
<div>
<h3>Page 1 of 10</h3>
<a href="http://testserver/children/?limit=10&page=2">Next 10</a>
</div>
<ul>
"""
for child in Child.objects.all()[:10]:
html += """
<li><a href="/children/{pk}">{label}</a></li>
""".format(pk=child.pk, label=child.name)
html += """
</ul>
"""
self.check_html("/children/?limit=10", html)
def test_template_context_processors(self):
response = self.client.get('/rest_context')
html = response.content.decode('utf-8')
token = html.split('value="')[1].split('"')[0]
self.assertTrue(len(token) >= 32)
if settings.WITH_NONROOT:
base_url = '/wqsite'
else:
base_url = ''
self.assertHTMLEqual("""
<p>{base_url}/rest_context</p>
<p>rest_context</p>
<p>{base_url}/</p>
<p>{base_url}/</p>
<p>0.0.0</p>
<p>
<input name="csrfmiddlewaretoken" type="hidden" value="{csrf}">
</p>
<p>rest_context</p>
<p>Can Edit Items</p>
""".format(csrf=token, base_url=base_url), html, auto_replace=False)
def test_template_page_config(self):
item = Item.objects.get(name="Test 1")
self.check_html('/items/%s' % item.pk, """
<h3>Test 1</h3>
<a href="/itemtypes/1">Test</a>
<a href="/items/{pk}/edit">Edit</a>
""".format(pk=item.pk))
def test_template_edit_fk(self):
item = Item.objects.get(name="Test 1")
self.check_html('/items/%s/edit' % item.pk, """
<form>
<input name="name" required value="Test 1">
<select name="type_id" required>
<option value="">Select one...</option>
<option value="1" selected>Test</option>
<option value="2">Inactive</option>
</select>
<button>Submit</button>
</form>
""")
def test_template_edit_choice(self):
self.check_html('/choicemodels/1/edit', """
<form>
<input name="name" required value="Test">
<fieldset>
<legend>Choice</legend>
<input type="radio" id="choicemodel-choice-one"
name="choice" value="one">
<label for="choicemodel-choice-one">Choice One</label>
<input type="radio" id="choicemodel-choice-two"
name="choice" value="two" checked>
<label for="choicemodel-choice-two">Choice Two</label>
<input type="radio" id="choicemodel-choice-three"
name="choice" value="three">
<label for="choicemodel-choice-three">Choice Three</label>
</fieldset>
<button>Submit</button>
</form>
""")
def test_template_new_fk(self):
self.check_html('/children/new', """
<form>
<input name="name" required value="">
<select name="parent_id" required>
<option value="">Select one...</option>
<option value="1">Test</option>
</select>
<button>Submit</button>
</form>
""")
def test_template_new_fk_filtered(self):
self.check_html('/items/new', """
<form>
<input name="name" required value="">
<select name="type_id" required>
<option value="">Select one...</option>
<option value="1">Test</option>
</select>
<button>Submit</button>
</form>
""")
def test_template_new_fk_defaults(self):
self.check_html('/items/new?type_id=1', """
<form>
<input name="name" required value="">
<select name="type_id" required>
<option value="">Select one...</option>
<option value="1" selected>Test</option>
</select>
<button>Submit</button>
</form>
""")
def test_template_new_fk_slug(self):
self.check_html('/slugrefparents/new?ref_id=test', """
<form>
<input name="name" required value="">
<select name="ref_id" required>
<option value="">Select one...</option>
<option value="test" selected>Test</option>
<option value="other">Other</option>
</select>
<button>Submit</button>
</form>
""")
def test_template_new_fk_slug_filtered(self):
self.check_html('/slugrefchildren/new', """
<form>
<input name="name" required value="">
<select name="parent_id" required>
<option value="">Select one...</option>
<option value="1">Test Slug Ref (Test)</option>
</select>
<button>Submit</button>
</form>
""")
def test_template_new_choice(self):
self.check_html('/choicemodels/new', """
<form>
<input name="name" required value="">
<fieldset>
<legend>Choice</legend>
<input type="radio" id="choicemodel-choice-one"
name="choice" value="one">
<label for="choicemodel-choice-one">Choice One</label>
<input type="radio" id="choicemodel-choice-two"
name="choice" value="two">
<label for="choicemodel-choice-two">Choice Two</label>
<input type="radio" id="choicemodel-choice-three"
name="choice" value="three">
<label for="choicemodel-choice-three">Choice Three</label>
</fieldset>
<button>Submit</button>
</form>
""")
def test_template_new_choice_defaults(self):
self.check_html('/choicemodels/new?choice=three', """
<form>
<input name="name" required value="">
<fieldset>
<legend>Choice</legend>
<input type="radio" id="choicemodel-choice-one"
name="choice" value="one">
<label for="choicemodel-choice-one">Choice One</label>
<input type="radio" id="choicemodel-choice-two"
name="choice" value="two">
<label for="choicemodel-choice-two">Choice Two</label>
<input type="radio" id="choicemodel-choice-three"
name="choice" value="three" checked>
<label for="choicemodel-choice-three">Choice Three</label>
</fieldset>
<button>Submit</button>
</form>
""")
|
py
|
1a5dfa6b9e58b25f4c9acd0c27ae317e045ff0d0
|
from hwtypes import BitVector
import random
NTESTS = 10
MAX_BITS = 128
def test_concat_const():
a = BitVector[4](4)
b = BitVector[4](1)
c = a.concat(b)
print(a.binary_string())
print(c.binary_string())
expected = BitVector[8]([0,0,1,0,1,0,0,0])
assert expected == c
def test_concat_random():
for _ in range(NTESTS):
n1 = random.randint(1, MAX_BITS)
n2 = random.randint(1, MAX_BITS)
a = BitVector.random(n1)
b = BitVector.random(n2)
c = a.concat(b)
assert c.size == a.size + b.size
assert c == BitVector[n1 + n2](a.bits() + b.bits())
assert c.binary_string() == b.binary_string() + a.binary_string()
|
py
|
1a5dfba18fad09707a5fba2ea544191189d149c5
|
"""XML-RPC methods of Zinnia metaWeblog API"""
import os
from datetime import datetime
from xmlrpclib import Fault
from xmlrpclib import DateTime
from django.conf import settings
from django.utils import timezone
from django.contrib.sites.models import Site
from django.core.urlresolvers import reverse
from django.utils.translation import gettext as _
from django.utils.text import Truncator
from django.utils.html import strip_tags
from django.core.files.base import ContentFile
from django.core.files.storage import default_storage
from django.template.defaultfilters import slugify
from zinnia.models.entry import Entry
from zinnia.models.author import Author
from zinnia.models.category import Category
from zinnia.settings import PROTOCOL
from zinnia.settings import UPLOAD_TO
from zinnia.managers import DRAFT, PUBLISHED
from django_xmlrpc.decorators import xmlrpc_func
# http://docs.nucleuscms.org/blog/12#errorcodes
LOGIN_ERROR = 801
PERMISSION_DENIED = 803
def authenticate(username, password, permission=None):
"""Authenticate staff_user with permission"""
try:
author = Author.objects.get(username__exact=username)
except Author.DoesNotExist:
raise Fault(LOGIN_ERROR, _('Username is incorrect.'))
if not author.check_password(password):
raise Fault(LOGIN_ERROR, _('Password is invalid.'))
if not author.is_staff or not author.is_active:
raise Fault(PERMISSION_DENIED, _('User account unavailable.'))
if permission:
if not author.has_perm(permission):
raise Fault(PERMISSION_DENIED, _('User cannot %s.') % permission)
return author
def blog_structure(site):
"""A blog structure"""
return {'blogid': settings.SITE_ID,
'blogName': site.name,
'url': '%s://%s%s' % (
PROTOCOL, site.domain,
reverse('zinnia_entry_archive_index'))}
def user_structure(user, site):
"""An user structure"""
return {'userid': user.pk,
'email': user.email,
'nickname': user.username,
'lastname': user.last_name,
'firstname': user.first_name,
'url': '%s://%s%s' % (
PROTOCOL, site.domain,
reverse('zinnia_author_detail', args=[user.username]))}
def author_structure(user):
"""An author structure"""
return {'user_id': user.pk,
'user_login': user.username,
'display_name': user.username,
'user_email': user.email}
def category_structure(category, site):
"""A category structure"""
return {'description': category.title,
'htmlUrl': '%s://%s%s' % (
PROTOCOL, site.domain,
category.get_absolute_url()),
'rssUrl': '%s://%s%s' % (
PROTOCOL, site.domain,
reverse('zinnia_category_feed', args=[category.tree_path])),
# Useful Wordpress Extensions
'categoryId': category.pk,
'parentId': category.parent and category.parent.pk or 0,
'categoryDescription': category.description,
'categoryName': category.title}
def post_structure(entry, site):
"""A post structure with extensions"""
author = entry.authors.all()[0]
return {'title': entry.title,
'description': unicode(entry.html_content),
'link': '%s://%s%s' % (PROTOCOL, site.domain,
entry.get_absolute_url()),
# Basic Extensions
'permaLink': '%s://%s%s' % (PROTOCOL, site.domain,
entry.get_absolute_url()),
'categories': [cat.title for cat in entry.categories.all()],
'dateCreated': DateTime(entry.creation_date.isoformat()),
'postid': entry.pk,
'userid': author.username,
# Useful Movable Type Extensions
'mt_excerpt': entry.excerpt,
'mt_allow_comments': int(entry.comment_enabled),
'mt_allow_pings': (int(entry.pingback_enabled) or
int(entry.trackback_enabled)),
'mt_keywords': entry.tags,
# Useful Wordpress Extensions
'wp_author': author.username,
'wp_author_id': author.pk,
'wp_author_display_name': author.username,
'wp_password': entry.password,
'wp_slug': entry.slug,
'sticky': entry.featured}
@xmlrpc_func(returns='struct[]', args=['string', 'string', 'string'])
def get_users_blogs(apikey, username, password):
"""blogger.getUsersBlogs(api_key, username, password)
=> blog structure[]"""
authenticate(username, password)
site = Site.objects.get_current()
return [blog_structure(site)]
@xmlrpc_func(returns='struct', args=['string', 'string', 'string'])
def get_user_info(apikey, username, password):
"""blogger.getUserInfo(api_key, username, password)
=> user structure"""
user = authenticate(username, password)
site = Site.objects.get_current()
return user_structure(user, site)
@xmlrpc_func(returns='struct[]', args=['string', 'string', 'string'])
def get_authors(apikey, username, password):
"""wp.getAuthors(api_key, username, password)
=> author structure[]"""
authenticate(username, password)
return [author_structure(author)
for author in Author.objects.filter(is_staff=True)]
@xmlrpc_func(returns='boolean', args=['string', 'string',
'string', 'string', 'string'])
def delete_post(apikey, post_id, username, password, publish):
"""blogger.deletePost(api_key, post_id, username, password, 'publish')
=> boolean"""
user = authenticate(username, password, 'zinnia.delete_entry')
entry = Entry.objects.get(id=post_id, authors=user)
entry.delete()
return True
@xmlrpc_func(returns='struct', args=['string', 'string', 'string'])
def get_post(post_id, username, password):
"""metaWeblog.getPost(post_id, username, password)
=> post structure"""
user = authenticate(username, password)
site = Site.objects.get_current()
return post_structure(Entry.objects.get(id=post_id, authors=user), site)
@xmlrpc_func(returns='struct[]',
args=['string', 'string', 'string', 'integer'])
def get_recent_posts(blog_id, username, password, number):
"""metaWeblog.getRecentPosts(blog_id, username, password, number)
=> post structure[]"""
user = authenticate(username, password)
site = Site.objects.get_current()
return [post_structure(entry, site)
for entry in Entry.objects.filter(authors=user)[:number]]
@xmlrpc_func(returns='struct[]', args=['string', 'string', 'string'])
def get_categories(blog_id, username, password):
"""metaWeblog.getCategories(blog_id, username, password)
=> category structure[]"""
authenticate(username, password)
site = Site.objects.get_current()
return [category_structure(category, site)
for category in Category.objects.all()]
@xmlrpc_func(returns='string', args=['string', 'string', 'string', 'struct'])
def new_category(blog_id, username, password, category_struct):
"""wp.newCategory(blog_id, username, password, category)
=> category_id"""
authenticate(username, password, 'zinnia.add_category')
category_dict = {'title': category_struct['name'],
'description': category_struct['description'],
'slug': category_struct['slug']}
if int(category_struct['parent_id']):
category_dict['parent'] = Category.objects.get(
pk=category_struct['parent_id'])
category = Category.objects.create(**category_dict)
return category.pk
@xmlrpc_func(returns='string', args=['string', 'string', 'string',
'struct', 'boolean'])
def new_post(blog_id, username, password, post, publish):
"""metaWeblog.newPost(blog_id, username, password, post, publish)
=> post_id"""
user = authenticate(username, password, 'zinnia.add_entry')
if post.get('dateCreated'):
creation_date = datetime.strptime(
post['dateCreated'].value[:18], '%Y-%m-%dT%H:%M:%S')
if settings.USE_TZ:
creation_date = timezone.make_aware(
creation_date, timezone.utc)
else:
creation_date = timezone.now()
entry_dict = {'title': post['title'],
'content': post['description'],
'excerpt': post.get('mt_excerpt', Truncator(
strip_tags(post['description'])).words(50)),
'creation_date': creation_date,
'last_update': creation_date,
'comment_enabled': post.get('mt_allow_comments', 1) == 1,
'pingback_enabled': post.get('mt_allow_pings', 1) == 1,
'trackback_enabled': post.get('mt_allow_pings', 1) == 1,
'featured': post.get('sticky', 0) == 1,
'tags': 'mt_keywords' in post and post['mt_keywords'] or '',
'slug': 'wp_slug' in post and post['wp_slug'] or slugify(
post['title']),
'password': post.get('wp_password', '')}
if user.has_perm('zinnia.can_change_status'):
entry_dict['status'] = publish and PUBLISHED or DRAFT
entry = Entry.objects.create(**entry_dict)
author = user
if 'wp_author_id' in post and user.has_perm('zinnia.can_change_author'):
if int(post['wp_author_id']) != user.pk:
author = Author.objects.get(pk=post['wp_author_id'])
entry.authors.add(author)
entry.sites.add(Site.objects.get_current())
if 'categories' in post:
entry.categories.add(*[
Category.objects.get_or_create(
title=cat, slug=slugify(cat))[0]
for cat in post['categories']])
return entry.pk
@xmlrpc_func(returns='boolean', args=['string', 'string', 'string',
'struct', 'boolean'])
def edit_post(post_id, username, password, post, publish):
"""metaWeblog.editPost(post_id, username, password, post, publish)
=> boolean"""
user = authenticate(username, password, 'zinnia.change_entry')
entry = Entry.objects.get(id=post_id, authors=user)
if post.get('dateCreated'):
creation_date = datetime.strptime(
post['dateCreated'].value[:18], '%Y-%m-%dT%H:%M:%S')
if settings.USE_TZ:
creation_date = timezone.make_aware(
creation_date, timezone.utc)
else:
creation_date = entry.creation_date
entry.title = post['title']
entry.content = post['description']
entry.excerpt = post.get('mt_excerpt', Truncator(
strip_tags(post['description'])).words(50))
entry.creation_date = creation_date
entry.last_update = timezone.now()
entry.comment_enabled = post.get('mt_allow_comments', 1) == 1
entry.pingback_enabled = post.get('mt_allow_pings', 1) == 1
entry.trackback_enabled = post.get('mt_allow_pings', 1) == 1
entry.featured = post.get('sticky', 0) == 1
entry.tags = 'mt_keywords' in post and post['mt_keywords'] or ''
entry.slug = 'wp_slug' in post and post['wp_slug'] or slugify(
post['title'])
if user.has_perm('zinnia.can_change_status'):
entry.status = publish and PUBLISHED or DRAFT
entry.password = post.get('wp_password', '')
entry.save()
if 'wp_author_id' in post and user.has_perm('zinnia.can_change_author'):
if int(post['wp_author_id']) != user.pk:
author = Author.objects.get(pk=post['wp_author_id'])
entry.authors.clear()
entry.authors.add(author)
if 'categories' in post:
entry.categories.clear()
entry.categories.add(*[
Category.objects.get_or_create(
title=cat, slug=slugify(cat))[0]
for cat in post['categories']])
return True
@xmlrpc_func(returns='struct', args=['string', 'string', 'string', 'struct'])
def new_media_object(blog_id, username, password, media):
"""metaWeblog.newMediaObject(blog_id, username, password, media)
=> media structure"""
authenticate(username, password)
path = default_storage.save(os.path.join(UPLOAD_TO, media['name']),
ContentFile(media['bits'].data))
return {'url': default_storage.url(path)}
|
py
|
1a5dfbaec588200f35139e93de175e7e4eb8e917
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from runner.koan import *
class AboutControlStatements(Koan):
def test_if_then_else_statements(self):
if True:
result = 'true value'
else:
result = 'false value'
self.assertEqual("true value", result)
def test_if_then_statements(self):
result = 'default value'
if True:
result = 'true value'
self.assertEqual("true value", result)
def test_while_statement(self):
i = 1
result = 1
while i <= 10:
result = result * i
i += 1
self.assertEqual(3628800, result)
def test_break_statement(self):
i = 1
result = 1
while True:
if i > 10: break
result = result * i
i += 1
self.assertEqual(3628800, result)
def test_continue_statement(self):
i = 0
result = []
while i < 10:
i += 1
if (i % 2) == 0: continue
result.append(i)
self.assertEqual([1, 3, 5, 7, 9], result)
def test_for_statement(self):
phrase = ["fish", "and", "chips"]
result = []
for item in phrase:
result.append(item.upper())
self.assertEqual(["FISH", "AND", "CHIPS"], result)
def test_for_statement_with_tuples(self):
round_table = [
("Lancelot", "Blue"),
("Galahad", "I don't know!"),
("Robin", "Blue! I mean Green!"),
("Arthur", "Is that an African Swallow or Amazonian Swallow?")
]
result = []
for knight, answer in round_table:
result.append("Contestant: '" + knight + "' Answer: '" + answer + "'")
text = "Contestant: 'Robin' Answer: 'Blue! I mean Green!'"
self.assertMatch(text, result[2])
self.assertNoMatch(text, result[0])
self.assertNoMatch(text, result[1])
self.assertNoMatch(text, result[3])
|
py
|
1a5dfc47ba53b23cc0e10ec1d14bda42f9025a6f
|
"""
Human Module
"""
from player import Player
from six import PY2
__author__ = "Matthew 'MasterOdin' Peveler"
__license__ = "The MIT License (MIT)"
class Human(Player):
"""
this is the AI base class?
"""
def get_move(self, board):
"""
:return:
"""
while True:
col = input("Player "+self.piece+" Column ==> ").strip()
if not col.isdigit() or not board.can_add_piece(int(col)):
continue
else:
return int(col)
def get_type(self):
"""
return that this is a human player
:return: (str) "Human"
"""
return "Human (Player "+self.piece+")"
# fix for input so works same as Python 2 & 3
if PY2:
# pylint: disable=undefined-variable, invalid-name, redefined-builtin
input = raw_input
else:
pass
|
py
|
1a5dfd80715d38ac3f161822a79053c7dc077322
|
CONFIG_VERSION = 1
config = None
class Config:
def __init__(self, obj):
if 'CONFIG_VERSION' not in obj:
raise Exception("Configuration file must contain 'CONFIG_VERSION' field")
if obj['CONFIG_VERSION'] != CONFIG_VERSION:
raise Exception("Expected CONFIG_VERSION to be %s found %s" % (
CONFIG_VERSION, obj['CONFIG_VERSION']))
if 'DEFAULT_USER' not in obj:
raise Exception("Configuration file must contain 'DEFAULT_USER' field")
self._defaultUser = obj['DEFAULT_USER']
if 'DEFAULT_PURPOSE' not in obj:
raise Exception("Configuration file must contain 'DEFAULT_PURPOSE' field")
self._defaultPurpose = obj['DEFAULT_PURPOSE']
if 'PROVIDER' not in obj:
raise Exception("Configuration file must contain 'PROVIDER' field")
self._provider = obj['PROVIDER']
if 'DEFAULT_CONTINENT' not in obj:
raise Exception("Configuration file must contain 'DEFAULT_CONTINENT' field")
self._defaultContinent = obj['DEFAULT_CONTINENT']
global config
config = self
def defaultUser(self):
return self._defaultUser
def defaultContinent(self):
return self._defaultContinent
def defaultPurpose(self):
return self._defaultPurpose
def provider(self):
return self._provider
|
py
|
1a5dfe04bb4052d0a37fa27ad0b250facf2adc60
|
import os
from datetime import datetime, timedelta, timezone
import discord
from Core.error import InteractionError
from discord import ApplicationContext
from discord.commands import slash_command
from discord.ext import commands
from dotenv import load_dotenv
from SongDBCore import SongDBClient
from SongDB.embed_builder import EmbedBuilder as EB
from SongDB.many_page import PagePage
from SongDB.match import match_url
load_dotenv()
req_url = "https://script.google.com/macros/s/AKfycbybEQO66Ui5AbgaPvisluBbWMqxayLM2iyPCNeipXUOvn__Jp4SQsm7X8Z4w3HQvxja/exec"
guild_id = int(os.environ["GUILD_ID"])
utc = timezone.utc
jst = timezone(timedelta(hours=9), "Asia/Tokyo")
class SongDB(commands.Cog):
def __init__(self, bot):
self.bot = bot
@slash_command(guild_ids=[guild_id], name="song")
async def _song(self, ctx: ApplicationContext):
await ctx.interaction.response.defer(ephemeral=True)
embed = EB()._start()
view = ProdDropdownView()
await ctx.interaction.followup.send(embed=embed, view=view, ephemeral=True)
return
class ProdDropdown(discord.ui.Select):
def __init__(self) -> None:
options = [
discord.SelectOption(
label="データベース検索",
value="multi",
description="曲名、アーティスト名、配信URLなどの条件で検索",
default=False,
),
discord.SelectOption(
label="最近歌われていない曲",
value="no_recent",
description="最近歌われていない曲の一覧を検索できます。",
default=False,
),
]
super().__init__(
placeholder="検索方式を指定してください。", min_values=1, max_values=1, options=options
)
async def callback(self, interaction: discord.Interaction) -> None:
if self.values[0] == "multi":
await interaction.response.send_modal(modal=ProdSearch())
return
elif self.values[0] == "no_recent":
embed = EB()._start()
view = ProdRecentDropdownView()
await interaction.response.send_message(
embed=embed, view=view, ephemeral=True
)
return
raise InteractionError(interaction=interaction, cls=self)
class DateSelect(discord.ui.Select):
def __init__(self) -> None:
options = [
discord.SelectOption(
label="1ヶ月",
value="1",
description="1ヶ月歌われていない曲を検索します。",
default=False,
),
discord.SelectOption(
label="3ヶ月",
value="3",
description="3ヶ月歌われていない曲を検索します。",
default=False,
),
discord.SelectOption(
label="6ヶ月",
value="6",
description="6ヶ月歌われていない曲を検索します。",
default=False,
),
discord.SelectOption(
label="1年",
value="12",
description="1年歌われていない曲を検索します。",
default=False,
),
]
super().__init__(
placeholder="検索する期間を選択してください。", min_values=1, max_values=1, options=options
)
async def callback(self, interaction: discord.Interaction):
mth = int(self.values[0]) # 1 or 3 or 6 or 12
now = datetime.now().astimezone(jst)
__to = (now - timedelta(days=30 * mth)).date()
if __to.month < 10:
month = f"0{str(__to.month)}"
else:
month = str(__to.month)
_to = f"{__to.year}/{month}/{__to.day}"
print(_to)
client = SongDBClient(url=req_url)
_date = await client.search_by_date(_to=_to)
if _date.songs == []: # no result found
embed = EB()._empty_recent(_to=_to)
await interaction.response.send_message(embed=embed, ephemeral=True)
return
embeds = EB()._recent(_to=_to, songs=_date.songs)
# await interaction.response.send_message(embed=embeds[0])
await PagePage(embeds=embeds)._send(interaction)
return
class ProdSearch(discord.ui.Modal):
def __init__(self) -> None:
super().__init__(title="歌枠データベース")
self.add_item(
discord.ui.InputText(
label="検索したい曲名を入力してください。",
style=discord.InputTextStyle.short,
required=False,
row=0,
placeholder="曲名",
)
)
self.add_item(
discord.ui.InputText(
label="検索したいアーティスト名や作曲者名を入力してください。",
style=discord.InputTextStyle.short,
required=False,
row=1,
placeholder="アーティスト名/作曲者名",
),
)
self.add_item(
discord.ui.InputText(
label="検索したい歌枠のURLを入力してください。",
style=discord.InputTextStyle.short,
required=False,
row=2,
placeholder="youtube.comとyoutu.beに対応しています",
)
)
async def callback(self, interaction: discord.Interaction):
# await interaction.response.defer(ephemeral=True)
if self.children[2].value:
matched_id = match_url(self.children[2].value)
if not matched_id:
print("Invalid url inputted.")
await interaction.response.send_message(
content="対応していないURLが入力されました。", ephemeral=True
)
return
client = SongDBClient(url=req_url)
d = {
"song_name": self.children[0].value,
"artist_name": self.children[1].value,
"stream_id": self.children[2].value,
}
if not any(d.values()):
await interaction.response.send_message(
content="一つ以上の検索条件を指定してください。", ephemeral=True
)
return
songs = await client.multi_search(**d)
if songs.songs == []: # no result found
embed = EB()._empty(_input=d)
await interaction.response.send_message(embed=embed, ephemeral=True)
return
embeds = EB()._rawsong(_input=d, songs=songs.songs)
# await interaction.response.send_message(embed=embeds[0])
await PagePage(embeds=embeds)._send(interaction)
return
class ProdDropdownView(discord.ui.View):
def __init__(self):
super().__init__(timeout=None)
self.add_item(ProdDropdown())
class ProdRecentDropdownView(discord.ui.View):
def __init__(self):
super().__init__(timeout=None)
self.add_item(DateSelect())
def setup(bot):
return bot.add_cog(SongDB(bot))
|
py
|
1a5e00545204b5615f139d9eaa1e03e6d4a9b52b
|
import unittest
from cricket.model import TestModule, TestCase
# Use Unittest as a template for TestSuite behavior.
from cricket.unittest.model import UnittestTestSuite as TestSuite
class TestTestSuite(unittest.TestCase):
"""Tests for the process of converting the output of the Discoverer
into an internal tree.
"""
def _full_tree(self, node):
"Internal method generating a simple tree version of a test_suite node"
if isinstance(node, TestCase):
return (type(node), node._child_labels)
else:
return dict(
((type(sub_tree), sub_node), self._full_tree(sub_tree))
for sub_node, sub_tree in node._child_nodes.items()
)
def test_no_tests(self):
"If there are no tests, an empty tree is generated"
test_suite = TestSuite()
test_suite.refresh(test_list=[])
self.assertEqual(test_suite.errors, [])
self.assertEqual(sorted(self._full_tree(test_suite)), sorted({}))
def test_with_tests(self):
"If tests are found, the right tree is created"
test_suite = TestSuite()
test_suite.refresh([
'tests.FunkyTestCase.test_something_unnecessary',
'more_tests.FunkyTestCase.test_this_does_make_sense',
'more_tests.FunkyTestCase.test_this_doesnt_make_sense',
'more_tests.JankyTestCase.test_things',
'deep_tests.package.DeepTestCase.test_doo_hickey',
])
self.assertEqual(test_suite.errors, [])
self.assertEqual(sorted(self._full_tree(test_suite)), sorted({
(TestModule, 'tests'): {
(TestCase, 'FunkyTestCase'): [
'test_something_unnecessary'
]
},
(TestModule, 'more_tests'): {
(TestCase, 'FunkyTestCase'): [
'test_this_doesnt_make_sense',
'test_this_doesnt_make_sense'
],
(TestCase, 'JankyTestCase'): [
'test_things'
]
},
(TestModule, 'deep_tests'): {
(TestModule, 'package'): {
(TestCase, 'DeepTestCase'): [
'test_doo_hickey'
]
}
}
}))
def test_with_tests_and_errors(self):
"If tests *and* errors are found, the tree is still created."
test_suite = TestSuite()
test_suite.refresh([
'tests.FunkyTestCase.test_something_unnecessary',
],
errors=[
'ERROR: you broke it, fool!',
]
)
self.assertEqual(test_suite.errors, [
'ERROR: you broke it, fool!',
])
self.assertEqual(sorted(self._full_tree(test_suite)), sorted({
(TestModule, 'tests'): {
(TestCase, 'FunkyTestCase'): [
'test_something_unnecessary'
]
}
}))
class FindLabelTests(unittest.TestCase):
"Check that naming tests by labels reduces to the right runtime list."
def setUp(self):
self.maxDiff = None
self.test_suite = TestSuite()
self.test_suite.refresh([
'app1.TestCase.test_method',
'app2.TestCase1.test_method',
'app2.TestCase2.test_method1',
'app2.TestCase2.test_method2',
'app3.tests.TestCase.test_method',
'app4.tests1.TestCase.test_method',
'app4.tests2.TestCase1.test_method',
'app4.tests2.TestCase2.test_method1',
'app4.tests2.TestCase2.test_method2',
'app5.package.tests.TestCase.test_method',
'app6.package1.tests.TestCase.test_method',
'app6.package2.tests1.TestCase.test_method',
'app6.package2.tests2.TestCase1.test_method',
'app6.package2.tests2.TestCase2.test_method1',
'app6.package2.tests2.TestCase2.test_method2',
'app7.package.subpackage.tests.TestCase.test_method',
'app8.package1.subpackage.tests.TestCase.test_method',
'app8.package2.subpackage1.tests.TestCase.test_method',
'app8.package2.subpackage2.tests1.TestCase.test_method',
'app8.package2.subpackage2.tests2.TestCase1.test_method',
'app8.package2.subpackage2.tests2.TestCase2.test_method1',
'app8.package2.subpackage2.tests2.TestCase2.test_method2',
])
def test_single_test_test_suite(self):
"If the test_suite only contains a single test, the reduction is always the full suite"
self.test_suite = TestSuite()
self.test_suite.refresh([
'app.package.tests.TestCase.test_method',
])
self.assertEqual(self.test_suite.find_tests(labels=[
'app.package.tests.TestCase.test_method'
]),
(1, None))
self.assertEqual(self.test_suite.find_tests(labels=[
'app.package.tests.TestCase'
]),
(1, None))
self.assertEqual(self.test_suite.find_tests(labels=[
'app.package.tests'
]),
(1, None))
self.assertEqual(self.test_suite.find_tests(labels=[
'app.package'
]),
(1, None))
self.assertEqual(self.test_suite.find_tests(labels=[
'app'
]),
(1, None))
def test_all_tests(self):
"Without any qualifiers, all tests are run"
self.assertEqual(self.test_suite.find_tests(), (22, None))
def test_method_selection(self):
"Explicitly named test method paths may be trimmed if they are unique"
self.assertEqual(self.test_suite.find_tests(labels=[
'app1.TestCase.test_method'
]),
(1, ['app1']))
self.assertEqual(self.test_suite.find_tests(labels=[
'app2.TestCase1.test_method'
]),
(1, ['app2.TestCase1']))
self.assertEqual(self.test_suite.find_tests(labels=[
'app2.TestCase2.test_method1'
]),
(1, ['app2.TestCase2.test_method1']))
self.assertEqual(self.test_suite.find_tests(labels=[
'app3.tests.TestCase.test_method'
]),
(1, ['app3']))
self.assertEqual(self.test_suite.find_tests(labels=[
'app4.tests1.TestCase.test_method'
]),
(1, ['app4.tests1']))
self.assertEqual(self.test_suite.find_tests(labels=[
'app4.tests2.TestCase1.test_method'
]),
(1, ['app4.tests2.TestCase1']))
self.assertEqual(self.test_suite.find_tests(labels=[
'app4.tests2.TestCase2.test_method1'
]),
(1, ['app4.tests2.TestCase2.test_method1']))
self.assertEqual(self.test_suite.find_tests(labels=[
'app5.package.tests.TestCase.test_method'
]),
(1, ['app5']))
self.assertEqual(self.test_suite.find_tests(labels=[
'app6.package1.tests.TestCase.test_method'
]),
(1, ['app6.package1']))
self.assertEqual(self.test_suite.find_tests(labels=[
'app6.package2.tests1.TestCase.test_method'
]),
(1, ['app6.package2.tests1']))
self.assertEqual(self.test_suite.find_tests(labels=[
'app6.package2.tests2.TestCase1.test_method'
]),
(1, ['app6.package2.tests2.TestCase1']))
self.assertEqual(self.test_suite.find_tests(labels=[
'app6.package2.tests2.TestCase2.test_method1'
]),
(1, ['app6.package2.tests2.TestCase2.test_method1']))
self.assertEqual(self.test_suite.find_tests(labels=[
'app7.package.subpackage.tests.TestCase.test_method'
]),
(1, ['app7']))
self.assertEqual(self.test_suite.find_tests(labels=[
'app8.package1.subpackage.tests.TestCase.test_method'
]),
(1, ['app8.package1']))
self.assertEqual(self.test_suite.find_tests(labels=[
'app8.package2.subpackage1.tests.TestCase.test_method'
]),
(1, ['app8.package2.subpackage1']))
self.assertEqual(self.test_suite.find_tests(labels=[
'app8.package2.subpackage2.tests1.TestCase.test_method'
]),
(1, ['app8.package2.subpackage2.tests1']))
self.assertEqual(self.test_suite.find_tests(labels=[
'app8.package2.subpackage2.tests2.TestCase1.test_method'
]),
(1, ['app8.package2.subpackage2.tests2.TestCase1']))
self.assertEqual(self.test_suite.find_tests(labels=[
'app8.package2.subpackage2.tests2.TestCase2.test_method1'
]),
(1, ['app8.package2.subpackage2.tests2.TestCase2.test_method1']))
def test_testcase_selection(self):
"Explicitly named test case paths may be trimmed if they are unique"
self.assertEqual(self.test_suite.find_tests(labels=[
'app1.TestCase'
]),
(1, ['app1']))
self.assertEqual(self.test_suite.find_tests(labels=[
'app2.TestCase1'
]),
(1, ['app2.TestCase1']))
self.assertEqual(self.test_suite.find_tests(labels=[
'app2.TestCase2'
]),
(2, ['app2.TestCase2']))
self.assertEqual(self.test_suite.find_tests(labels=[
'app3.tests.TestCase'
]),
(1, ['app3']))
self.assertEqual(self.test_suite.find_tests(labels=[
'app4.tests1.TestCase'
]),
(1, ['app4.tests1']))
self.assertEqual(self.test_suite.find_tests(labels=[
'app4.tests2.TestCase1'
]),
(1, ['app4.tests2.TestCase1']))
self.assertEqual(self.test_suite.find_tests(labels=[
'app4.tests2.TestCase2'
]),
(2, ['app4.tests2.TestCase2']))
self.assertEqual(self.test_suite.find_tests(labels=[
'app5.package.tests.TestCase'
]),
(1, ['app5']))
self.assertEqual(self.test_suite.find_tests(labels=[
'app6.package1.tests.TestCase'
]),
(1, ['app6.package1']))
self.assertEqual(self.test_suite.find_tests(labels=[
'app6.package2.tests1.TestCase'
]),
(1, ['app6.package2.tests1']))
self.assertEqual(self.test_suite.find_tests(labels=[
'app6.package2.tests2.TestCase1'
]),
(1, ['app6.package2.tests2.TestCase1']))
self.assertEqual(self.test_suite.find_tests(labels=[
'app6.package2.tests2.TestCase2'
]),
(2, ['app6.package2.tests2.TestCase2']))
self.assertEqual(self.test_suite.find_tests(labels=[
'app7.package.subpackage.tests.TestCase'
]),
(1, ['app7']))
self.assertEqual(self.test_suite.find_tests(labels=[
'app8.package1.subpackage.tests.TestCase'
]),
(1, ['app8.package1']))
self.assertEqual(self.test_suite.find_tests(labels=[
'app8.package2.subpackage1.tests.TestCase'
]),
(1, ['app8.package2.subpackage1']))
self.assertEqual(self.test_suite.find_tests(labels=[
'app8.package2.subpackage2.tests1.TestCase'
]),
(1, ['app8.package2.subpackage2.tests1']))
self.assertEqual(self.test_suite.find_tests(labels=[
'app8.package2.subpackage2.tests2.TestCase1'
]),
(1, ['app8.package2.subpackage2.tests2.TestCase1']))
self.assertEqual(self.test_suite.find_tests(labels=[
'app8.package2.subpackage2.tests2.TestCase2'
]),
(2, ['app8.package2.subpackage2.tests2.TestCase2']))
def test_testmodule_selection(self):
"Explicitly named test module paths may be trimmed if they are unique"
self.assertEqual(self.test_suite.find_tests(labels=[
'app3.tests'
]),
(1, ['app3']))
self.assertEqual(self.test_suite.find_tests(labels=[
'app4.tests1'
]),
(1, ['app4.tests1']))
self.assertEqual(self.test_suite.find_tests(labels=[
'app4.tests2'
]),
(3, ['app4.tests2']))
self.assertEqual(self.test_suite.find_tests(labels=[
'app5.package.tests'
]),
(1, ['app5']))
self.assertEqual(self.test_suite.find_tests(labels=[
'app6.package1.tests'
]),
(1, ['app6.package1']))
self.assertEqual(self.test_suite.find_tests(labels=[
'app6.package2.tests1'
]),
(1, ['app6.package2.tests1']))
self.assertEqual(self.test_suite.find_tests(labels=[
'app6.package2.tests2'
]),
(3, ['app6.package2.tests2']))
self.assertEqual(self.test_suite.find_tests(labels=[
'app7.package.subpackage.tests'
]),
(1, ['app7']))
self.assertEqual(self.test_suite.find_tests(labels=[
'app8.package1.subpackage.tests'
]),
(1, ['app8.package1']))
self.assertEqual(self.test_suite.find_tests(labels=[
'app8.package2.subpackage1.tests'
]),
(1, ['app8.package2.subpackage1']))
self.assertEqual(self.test_suite.find_tests(labels=[
'app8.package2.subpackage2.tests1'
]),
(1, ['app8.package2.subpackage2.tests1']))
self.assertEqual(self.test_suite.find_tests(labels=[
'app8.package2.subpackage2.tests2'
]),
(3, ['app8.package2.subpackage2.tests2']))
def test_package_selection(self):
"Explicitly named test package paths may be trimmed if they are unique"
self.assertEqual(self.test_suite.find_tests(labels=[
'app5.package'
]),
(1, ['app5']))
self.assertEqual(self.test_suite.find_tests(labels=[
'app6.package1'
]),
(1, ['app6.package1']))
self.assertEqual(self.test_suite.find_tests(labels=[
'app6.package2'
]),
(4, ['app6.package2']))
self.assertEqual(self.test_suite.find_tests(labels=[
'app7.package'
]),
(1, ['app7']))
self.assertEqual(self.test_suite.find_tests(labels=[
'app8.package1'
]),
(1, ['app8.package1']))
self.assertEqual(self.test_suite.find_tests(labels=[
'app8.package2'
]),
(5, ['app8.package2']))
def test_subpackage_selection(self):
"Explicitly named test subpackage paths may be trimmed if they are unique"
self.assertEqual(self.test_suite.find_tests(labels=[
'app7.package.subpackage'
]),
(1, ['app7']))
self.assertEqual(self.test_suite.find_tests(labels=[
'app8.package1.subpackage'
]),
(1, ['app8.package1']))
self.assertEqual(self.test_suite.find_tests(labels=[
'app8.package2.subpackage1'
]),
(1, ['app8.package2.subpackage1']))
self.assertEqual(self.test_suite.find_tests(labels=[
'app8.package2.subpackage2'
]),
(4, ['app8.package2.subpackage2']))
def test_app_selection(self):
"Explicitly named app paths return a count of all tests in the app"
self.assertEqual(self.test_suite.find_tests(labels=[
'app1'
]),
(1, ['app1']))
self.assertEqual(self.test_suite.find_tests(labels=[
'app2'
]),
(3, ['app2']))
self.assertEqual(self.test_suite.find_tests(labels=[
'app3'
]),
(1, ['app3']))
self.assertEqual(self.test_suite.find_tests(labels=[
'app4'
]),
(4, ['app4']))
self.assertEqual(self.test_suite.find_tests(labels=[
'app5'
]),
(1, ['app5']))
self.assertEqual(self.test_suite.find_tests(labels=[
'app6'
]),
(5, ['app6']))
self.assertEqual(self.test_suite.find_tests(labels=[
'app7'
]),
(1, ['app7']))
self.assertEqual(self.test_suite.find_tests(labels=[
'app8'
]),
(6, ['app8']))
def test_testcase_collapse(self):
"If all methods in a test are selected, path is trimmed to the case"
self.assertEqual(self.test_suite.find_tests(labels=[
'app2.TestCase2.test_method1',
'app2.TestCase2.test_method2',
]),
(2, ['app2.TestCase2']))
self.assertEqual(self.test_suite.find_tests(labels=[
'app4.tests2.TestCase2.test_method1',
'app4.tests2.TestCase2.test_method2',
]),
(2, ['app4.tests2.TestCase2']))
self.assertEqual(self.test_suite.find_tests(labels=[
'app6.package2.tests2.TestCase2.test_method1',
'app6.package2.tests2.TestCase2.test_method2',
]),
(2, ['app6.package2.tests2.TestCase2']))
self.assertEqual(self.test_suite.find_tests(labels=[
'app8.package2.subpackage2.tests2.TestCase2.test_method1',
'app8.package2.subpackage2.tests2.TestCase2.test_method2',
]),
(2, ['app8.package2.subpackage2.tests2.TestCase2']))
def test_testmethod_collapse(self):
"If all test cases in a test are selected, path is trimmed to the testmethod"
self.assertEqual(self.test_suite.find_tests(labels=[
'app2.TestCase1.test_method',
'app2.TestCase2.test_method1',
'app2.TestCase2.test_method2',
]),
(3, ['app2']))
self.assertEqual(self.test_suite.find_tests(labels=[
'app2.TestCase1.test_method',
'app2.TestCase2',
]),
(3, ['app2']))
self.assertEqual(self.test_suite.find_tests(labels=[
'app2.TestCase1',
'app2.TestCase2',
]),
(3, ['app2']))
self.assertEqual(self.test_suite.find_tests(labels=[
'app4.tests2.TestCase1.test_method',
'app4.tests2.TestCase2.test_method1',
'app4.tests2.TestCase2.test_method2',
]),
(3, ['app4.tests2']))
self.assertEqual(self.test_suite.find_tests(labels=[
'app4.tests2.TestCase1.test_method',
'app4.tests2.TestCase2',
]),
(3, ['app4.tests2']))
self.assertEqual(self.test_suite.find_tests(labels=[
'app4.tests2.TestCase1',
'app4.tests2.TestCase2',
]),
(3, ['app4.tests2']))
self.assertEqual(self.test_suite.find_tests(labels=[
'app6.package2.tests2.TestCase1.test_method',
'app6.package2.tests2.TestCase2.test_method1',
'app6.package2.tests2.TestCase2.test_method2',
]),
(3, ['app6.package2.tests2']))
self.assertEqual(self.test_suite.find_tests(labels=[
'app6.package2.tests2.TestCase1.test_method',
'app6.package2.tests2.TestCase2',
'app6.package2.tests2',
]),
(3, ['app6.package2.tests2']))
self.assertEqual(self.test_suite.find_tests(labels=[
'app6.package2.tests2.TestCase1',
'app6.package2.tests2.TestCase2',
]),
(3, ['app6.package2.tests2']))
self.assertEqual(self.test_suite.find_tests(labels=[
'app8.package2.subpackage2.tests2.TestCase1.test_method',
'app8.package2.subpackage2.tests2.TestCase2.test_method1',
'app8.package2.subpackage2.tests2.TestCase2.test_method2',
]),
(3, ['app8.package2.subpackage2.tests2']))
self.assertEqual(self.test_suite.find_tests(labels=[
'app8.package2.subpackage2.tests2.TestCase1.test_method',
'app8.package2.subpackage2.tests2.TestCase2',
]),
(3, ['app8.package2.subpackage2.tests2']))
self.assertEqual(self.test_suite.find_tests(labels=[
'app8.package2.subpackage2.tests2.TestCase1',
'app8.package2.subpackage2.tests2.TestCase2',
]),
(3, ['app8.package2.subpackage2.tests2']))
def test_package_collapse(self):
"If all test cases in a test package are selected, path is trimmed to the testmethod"
self.assertEqual(self.test_suite.find_tests(labels=[
'app6.package2.tests1.TestCase.test_method',
'app6.package2.tests2.TestCase1.test_method',
'app6.package2.tests2.TestCase2.test_method1',
'app6.package2.tests2.TestCase2.test_method2',
]),
(4, ['app6.package2']))
self.assertEqual(self.test_suite.find_tests(labels=[
'app6.package2.tests1.TestCase.test_method',
'app6.package2.tests2.TestCase1.test_method',
'app6.package2.tests2.TestCase2',
]),
(4, ['app6.package2']))
self.assertEqual(self.test_suite.find_tests(labels=[
'app6.package2.tests1.TestCase',
'app6.package2.tests2.TestCase1',
'app6.package2.tests2.TestCase2',
]),
(4, ['app6.package2']))
self.assertEqual(self.test_suite.find_tests(labels=[
'app8.package2.subpackage1.tests.TestCase.test_method',
'app8.package2.subpackage2.tests1.TestCase.test_method',
'app8.package2.subpackage2.tests2.TestCase1.test_method',
'app8.package2.subpackage2.tests2.TestCase2.test_method1',
'app8.package2.subpackage2.tests2.TestCase2.test_method2',
]),
(5, ['app8.package2']))
self.assertEqual(self.test_suite.find_tests(labels=[
'app8.package2.subpackage1.tests.TestCase.test_method',
'app8.package2.subpackage2.tests1.TestCase.test_method',
'app8.package2.subpackage2.tests2.TestCase1.test_method',
'app8.package2.subpackage2.tests2.TestCase2',
]),
(5, ['app8.package2']))
self.assertEqual(self.test_suite.find_tests(labels=[
'app8.package2.subpackage1.tests.TestCase',
'app8.package2.subpackage2.tests1.TestCase',
'app8.package2.subpackage2.tests2.TestCase1',
'app8.package2.subpackage2.tests2.TestCase2',
]),
(5, ['app8.package2']))
def test_subpackage_collapse(self):
self.assertEqual(self.test_suite.find_tests(labels=[
'app8.package2.subpackage2.tests1.TestCase.test_method',
'app8.package2.subpackage2.tests2.TestCase1.test_method',
'app8.package2.subpackage2.tests2.TestCase2.test_method1',
'app8.package2.subpackage2.tests2.TestCase2.test_method2',
]),
(4, ['app8.package2.subpackage2']))
self.assertEqual(self.test_suite.find_tests(labels=[
'app8.package2.subpackage2.tests1.TestCase.test_method',
'app8.package2.subpackage2.tests2.TestCase1.test_method',
'app8.package2.subpackage2.tests2.TestCase2',
]),
(4, ['app8.package2.subpackage2']))
self.assertEqual(self.test_suite.find_tests(labels=[
'app8.package2.subpackage2.tests1.TestCase',
'app8.package2.subpackage2.tests2.TestCase1',
'app8.package2.subpackage2.tests2.TestCase2',
]),
(4, ['app8.package2.subpackage2']))
def test_app_collapse(self):
self.assertEqual(self.test_suite.find_tests(labels=[
'app2.TestCase1.test_method',
'app2.TestCase2.test_method1',
'app2.TestCase2.test_method2',
]),
(3, ['app2']))
self.assertEqual(self.test_suite.find_tests(labels=[
'app2.TestCase1.test_method',
'app2.TestCase2',
]),
(3, ['app2']))
self.assertEqual(self.test_suite.find_tests(labels=[
'app2.TestCase1',
'app2.TestCase2',
]),
(3, ['app2']))
self.assertEqual(self.test_suite.find_tests(labels=[
'app4.tests1.TestCase.test_method',
'app4.tests2.TestCase1.test_method',
'app4.tests2.TestCase2.test_method1',
'app4.tests2.TestCase2.test_method2',
]),
(4, ['app4']))
self.assertEqual(self.test_suite.find_tests(labels=[
'app4.tests1.TestCase.test_method',
'app4.tests2.TestCase1.test_method',
'app4.tests2.TestCase2',
]),
(4, ['app4']))
self.assertEqual(self.test_suite.find_tests(labels=[
'app4.tests1.TestCase.test_method',
'app4.tests2.TestCase1',
'app4.tests2.TestCase2',
]),
(4, ['app4']))
self.assertEqual(self.test_suite.find_tests(labels=[
'app4.tests1.TestCase.test_method',
'app4.tests2',
]),
(4, ['app4']))
self.assertEqual(self.test_suite.find_tests(labels=[
'app4.tests1.TestCase',
'app4.tests2',
]),
(4, ['app4']))
self.assertEqual(self.test_suite.find_tests(labels=[
'app4.tests1',
'app4.tests2',
]),
(4, ['app4']))
self.assertEqual(self.test_suite.find_tests(labels=[
'app6.package1.tests.TestCase.test_method',
'app6.package2.tests1.TestCase.test_method',
'app6.package2.tests2.TestCase1.test_method',
'app6.package2.tests2.TestCase2.test_method1',
'app6.package2.tests2.TestCase2.test_method2',
]),
(5, ['app6']))
self.assertEqual(self.test_suite.find_tests(labels=[
'app6.package1.tests.TestCase.test_method',
'app6.package2.tests1.TestCase.test_method',
'app6.package2.tests2.TestCase1.test_method',
'app6.package2.tests2.TestCase2',
]),
(5, ['app6']))
self.assertEqual(self.test_suite.find_tests(labels=[
'app6.package1.tests.TestCase.test_method',
'app6.package2.tests1.TestCase.test_method',
'app6.package2.tests2.TestCase1',
'app6.package2.tests2.TestCase2',
]),
(5, ['app6']))
self.assertEqual(self.test_suite.find_tests(labels=[
'app6.package1.tests.TestCase.test_method',
'app6.package2.tests1.TestCase.test_method',
'app6.package2.tests2',
]),
(5, ['app6']))
self.assertEqual(self.test_suite.find_tests(labels=[
'app6.package1.tests.TestCase.test_method',
'app6.package2.tests1.TestCase.test_method',
'app6.package2.tests2.TestCase1',
'app6.package2.tests2.TestCase2',
]),
(5, ['app6']))
self.assertEqual(self.test_suite.find_tests(labels=[
'app6.package1.tests.TestCase.test_method',
'app6.package2.tests1.TestCase.test_method',
'app6.package2.tests2',
]),
(5, ['app6']))
self.assertEqual(self.test_suite.find_tests(labels=[
'app6.package1.tests.TestCase.test_method',
'app6.package2.tests1.TestCase',
'app6.package2.tests2',
]),
(5, ['app6']))
self.assertEqual(self.test_suite.find_tests(labels=[
'app6.package1.tests.TestCase.test_method',
'app6.package2.tests1',
'app6.package2.tests2',
]),
(5, ['app6']))
self.assertEqual(self.test_suite.find_tests(labels=[
'app6.package1.tests.TestCase.test_method',
'app6.package2',
]),
(5, ['app6']))
self.assertEqual(self.test_suite.find_tests(labels=[
'app6.package1.tests.TestCase',
'app6.package2',
]),
(5, ['app6']))
self.assertEqual(self.test_suite.find_tests(labels=[
'app6.package1.tests',
'app6.package2',
]),
(5, ['app6']))
self.assertEqual(self.test_suite.find_tests(labels=[
'app6.package1',
'app6.package2',
]),
(5, ['app6']))
self.assertEqual(self.test_suite.find_tests(labels=[
'app8.package1.subpackage.tests.TestCase.test_method',
'app8.package2.subpackage1.tests.TestCase.test_method',
'app8.package2.subpackage2.tests1.TestCase.test_method',
'app8.package2.subpackage2.tests2.TestCase1.test_method',
'app8.package2.subpackage2.tests2.TestCase2.test_method1',
'app8.package2.subpackage2.tests2.TestCase2.test_method2',
]),
(6, ['app8']))
self.assertEqual(self.test_suite.find_tests(labels=[
'app8.package1.subpackage.tests.TestCase.test_method',
'app8.package2.subpackage1.tests.TestCase.test_method',
'app8.package2.subpackage2.tests1.TestCase.test_method',
'app8.package2.subpackage2.tests2.TestCase1.test_method',
'app8.package2.subpackage2.tests2.TestCase2',
]),
(6, ['app8']))
self.assertEqual(self.test_suite.find_tests(labels=[
'app8.package1.subpackage.tests.TestCase.test_method',
'app8.package2.subpackage1.tests.TestCase.test_method',
'app8.package2.subpackage2.tests1.TestCase.test_method',
'app8.package2.subpackage2.tests2.TestCase1',
'app8.package2.subpackage2.tests2.TestCase2',
]),
(6, ['app8']))
self.assertEqual(self.test_suite.find_tests(labels=[
'app8.package1.subpackage.tests.TestCase.test_method',
'app8.package2.subpackage1.tests.TestCase.test_method',
'app8.package2.subpackage2.tests1.TestCase.test_method',
'app8.package2.subpackage2.tests2',
]),
(6, ['app8']))
self.assertEqual(self.test_suite.find_tests(labels=[
'app8.package1.subpackage.tests.TestCase.test_method',
'app8.package2.subpackage1.tests.TestCase.test_method',
'app8.package2.subpackage2.tests1.TestCase',
'app8.package2.subpackage2.tests2',
]),
(6, ['app8']))
self.assertEqual(self.test_suite.find_tests(labels=[
'app8.package1.subpackage.tests.TestCase.test_method',
'app8.package2.subpackage1.tests.TestCase.test_method',
'app8.package2.subpackage2.tests1',
'app8.package2.subpackage2.tests2',
]),
(6, ['app8']))
self.assertEqual(self.test_suite.find_tests(labels=[
'app8.package1.subpackage.tests.TestCase.test_method',
'app8.package2.subpackage1.tests.TestCase.test_method',
'app8.package2.subpackage2',
]),
(6, ['app8']))
self.assertEqual(self.test_suite.find_tests(labels=[
'app8.package1.subpackage.tests.TestCase.test_method',
'app8.package2.subpackage1.tests.TestCase',
'app8.package2.subpackage2',
]),
(6, ['app8']))
self.assertEqual(self.test_suite.find_tests(labels=[
'app8.package1.subpackage.tests.TestCase.test_method',
'app8.package2.subpackage1.tests',
'app8.package2.subpackage2',
]),
(6, ['app8']))
self.assertEqual(self.test_suite.find_tests(labels=[
'app8.package1.subpackage.tests.TestCase.test_method',
'app8.package2.subpackage1',
'app8.package2.subpackage2',
]),
(6, ['app8']))
self.assertEqual(self.test_suite.find_tests(labels=[
'app8.package1.subpackage.tests.TestCase.test_method',
'app8.package2',
]),
(6, ['app8']))
self.assertEqual(self.test_suite.find_tests(labels=[
'app8.package1.subpackage.tests.TestCase',
'app8.package2',
]),
(6, ['app8']))
self.assertEqual(self.test_suite.find_tests(labels=[
'app8.package1.subpackage.tests',
'app8.package2',
]),
(6, ['app8']))
self.assertEqual(self.test_suite.find_tests(labels=[
'app8.package1.subpackage',
'app8.package2',
]),
(6, ['app8']))
self.assertEqual(self.test_suite.find_tests(labels=[
'app8.package1',
'app8.package2',
]),
(6, ['app8']))
|
py
|
1a5e02b48465661d2e4eb67560b4d7a34943918e
|
# Generated by Django 3.0.8 on 2020-11-08 19:57
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
("translations", "0024_auto_20201107_2252"),
]
operations = [
migrations.AddField(
model_name="review",
name="closeness_rating",
field=models.IntegerField(
blank=True,
choices=[(1, "Low"), (2, "Average"), (3, "Excellent")],
null=True,
),
),
migrations.AddField(
model_name="review",
name="readability_rating",
field=models.IntegerField(
blank=True,
choices=[(1, "Low"), (2, "Average"), (3, "Excellent")],
null=True,
),
),
]
|
py
|
1a5e03ae4c1dc8b6af2613c0798e1422da1085b0
|
##########################################################################
#
# pgAdmin 4 - PostgreSQL Tools
#
# Copyright (C) 2013 - 2018, The pgAdmin Development Team
# This software is released under the PostgreSQL Licence
#
##########################################################################
import json
import logging
from abc import ABCMeta, abstractmethod, abstractproperty
import six
from socket import error as SOCKETErrorException
from smtplib import SMTPConnectError, SMTPResponseException,\
SMTPServerDisconnected, SMTPDataError,SMTPHeloError, SMTPException, \
SMTPAuthenticationError, SMTPSenderRefused, SMTPRecipientsRefused
from flask import current_app, render_template, url_for, make_response, flash,\
Response, request, after_this_request, redirect
from flask_babel import gettext
from flask_login import current_user, login_required
from flask_security.decorators import anonymous_user_required
from flask_gravatar import Gravatar
from pgadmin.settings import get_setting
from pgadmin.utils import PgAdminModule
from pgadmin.utils.ajax import make_json_response
from pgadmin.utils.preferences import Preferences
from werkzeug.datastructures import MultiDict
from flask_security.views import _security, _commit, _render_json, _ctx
from flask_security.changeable import change_user_password
from flask_security.recoverable import reset_password_token_status, \
generate_reset_password_token, update_password
from flask_security.utils import config_value, do_flash, get_url, get_message,\
slash_url_suffix, login_user, send_mail
from flask_security.signals import reset_password_instructions_sent
import config
from pgadmin import current_blueprint
try:
import urllib.request as urlreq
except:
import urllib2 as urlreq
MODULE_NAME = 'browser'
class BrowserModule(PgAdminModule):
LABEL = gettext('Browser')
def get_own_stylesheets(self):
stylesheets = []
# Add browser stylesheets
for (endpoint, filename) in [
('static', 'vendor/codemirror/codemirror.css'),
('static', 'vendor/codemirror/addon/dialog/dialog.css'),
('static', 'vendor/jQuery-contextMenu/jquery.contextMenu.css' if current_app.debug
else 'vendor/jQuery-contextMenu/jquery.contextMenu.min.css'),
('static', 'vendor/wcDocker/wcDocker.css' if current_app.debug
else 'vendor/wcDocker/wcDocker.min.css'),
('browser.static', 'css/browser.css'),
('browser.static', 'vendor/aciTree/css/aciTree.css')
]:
stylesheets.append(url_for(endpoint, filename=filename))
stylesheets.append(url_for('browser.browser_css'))
return stylesheets
def get_own_javascripts(self):
scripts = list()
scripts.append({
'name': 'alertify',
'path': url_for(
'static',
filename='vendor/alertifyjs/alertify' if current_app.debug
else 'vendor/alertifyjs/alertify.min'
),
'exports': 'alertify',
'preloaded': True
})
scripts.append({
'name': 'jqueryui.position',
'path': url_for(
'static',
filename='vendor/jQuery-contextMenu/jquery.ui.position' if \
current_app.debug else \
'vendor/jQuery-contextMenu/jquery.ui.position.min'
),
'deps': ['jquery'],
'exports': 'jQuery.ui.position',
'preloaded': True
})
scripts.append({
'name': 'jquery.contextmenu',
'path': url_for(
'static',
filename='vendor/jQuery-contextMenu/jquery.contextMenu' if \
current_app.debug else \
'vendor/jQuery-contextMenu/jquery.contextMenu.min'
),
'deps': ['jquery', 'jqueryui.position'],
'exports': 'jQuery.contextMenu',
'preloaded': True
})
scripts.append({
'name': 'jquery.aciplugin',
'path': url_for(
'browser.static',
filename='vendor/aciTree/jquery.aciPlugin.min'
),
'deps': ['jquery'],
'exports': 'aciPluginClass',
'preloaded': True
})
scripts.append({
'name': 'jquery.acitree',
'path': url_for(
'browser.static',
filename='vendor/aciTree/jquery.aciTree' if
current_app.debug else 'vendor/aciTree/jquery.aciTree.min'
),
'deps': ['jquery', 'jquery.aciplugin'],
'exports': 'aciPluginClass.plugins.aciTree',
'preloaded': True
})
scripts.append({
'name': 'jquery.acisortable',
'path': url_for(
'browser.static',
filename='vendor/aciTree/jquery.aciSortable.min'
),
'deps': ['jquery', 'jquery.aciplugin'],
'exports': 'aciPluginClass.plugins.aciSortable',
'when': None,
'preloaded': True
})
scripts.append({
'name': 'jquery.acifragment',
'path': url_for(
'browser.static',
filename='vendor/aciTree/jquery.aciFragment.min'
),
'deps': ['jquery', 'jquery.aciplugin'],
'exports': 'aciPluginClass.plugins.aciFragment',
'when': None,
'preloaded': True
})
scripts.append({
'name': 'wcdocker',
'path': url_for(
'static',
filename='vendor/wcDocker/wcDocker' if current_app.debug
else 'vendor/wcDocker/wcDocker.min'
),
'deps': ['jquery.contextmenu'],
'exports': '',
'preloaded': True
})
scripts.append({
'name': 'pgadmin.browser.datamodel',
'path': url_for('browser.static', filename='js/datamodel'),
'preloaded': True
})
for name, script in [
['pgadmin.browser', 'js/browser'],
['pgadmin.browser.endpoints', 'js/endpoints'],
['pgadmin.browser.error', 'js/error']]:
scripts.append({
'name': name,
'path': url_for('browser.index') + script,
'preloaded': True
})
for name, script in [
['pgadmin.browser.node', 'js/node'],
['pgadmin.browser.messages', 'js/messages'],
['pgadmin.browser.collection', 'js/collection']]:
scripts.append({
'name': name,
'path': url_for('browser.index') + script,
'preloaded': True,
'deps': ['pgadmin.browser.datamodel']
})
for name, end in [
['pgadmin.browser.menu', 'js/menu'],
['pgadmin.browser.panel', 'js/panel'],
['pgadmin.browser.frame', 'js/frame']]:
scripts.append({
'name': name, 'path': url_for('browser.static', filename=end),
'preloaded': True})
scripts.append({
'name': 'pgadmin.browser.node.ui',
'path': url_for('browser.static', filename='js/node.ui'),
'when': 'server_group'
})
for module in self.submodules:
scripts.extend(module.get_own_javascripts())
return scripts
def register_preferences(self):
self.show_system_objects = self.preference.register(
'display', 'show_system_objects',
gettext("Show system objects?"), 'boolean', False,
category_label=gettext('Display')
)
self.table_row_count_threshold = self.preference.register(
'properties', 'table_row_count_threshold',
gettext("Count rows if estimated less than"), 'integer', 2000,
category_label=gettext('Properties')
)
def get_exposed_url_endpoints(self):
"""
Returns:
list: a list of url endpoints exposed to the client.
"""
return ['browser.index', 'browser.nodes']
blueprint = BrowserModule(MODULE_NAME, __name__)
@six.add_metaclass(ABCMeta)
class BrowserPluginModule(PgAdminModule):
"""
Abstract base class for browser submodules.
It helps to define the node for each and every node comes under the browser
tree. It makes sure every module comes under browser will have prefix
'/browser', and sets the 'url_prefix', 'static_url_path', etc.
Also, creates some of the preferences to be used by the node.
"""
browser_url_prefix = blueprint.url_prefix + '/'
SHOW_ON_BROWSER = True
def __init__(self, import_name, **kwargs):
"""
Construct a new 'BrowserPluginModule' object.
:param import_name: Name of the module
:param **kwargs: Extra parameters passed to the base class
pgAdminModule.
:return: returns nothing
It sets the url_prefix to based on the 'node_path'. And,
static_url_path to relative path to '/static'.
Every module extended from this will be identified as 'NODE-<type>'.
Also, create a preference 'show_node_<type>' to fetch whether it
can be shown in the browser or not. Also, refer to the browser-preference.
"""
kwargs.setdefault("url_prefix", self.node_path)
kwargs.setdefault("static_url_path", '/static')
self.browser_preference = None
self.pref_show_system_objects = None
self.pref_show_node = None
super(BrowserPluginModule, self).__init__(
"NODE-%s" % self.node_type,
import_name,
**kwargs
)
@property
def jssnippets(self):
"""
Returns a snippet of javascript to include in the page
"""
return []
@property
def module_use_template_javascript(self):
"""
Returns whether Jinja2 template is used for generating the javascript
module.
"""
return False
def get_own_javascripts(self):
"""
Returns the list of javascripts information used by the module.
Each javascripts information must contain name, path of the script.
The name must be unique for each module, hence - in order to refer them
properly, we do use 'pgadmin.node.<type>' as norm.
That can also refer to when to load the script.
i.e.
We may not need to load the javascript of table node, when we're
not yet connected to a server, and no database is loaded. Hence - it
make sense to load them when a database is loaded.
We may also add 'deps', which also refers to the list of javascripts,
it may depends on.
"""
scripts = []
if self.module_use_template_javascript:
scripts.extend([{
'name': 'pgadmin.node.%s' % self.node_type,
'path': url_for('browser.index') + '%s/module' % self.node_type,
'when': self.script_load,
'is_template': True
}])
else:
scripts.extend([{
'name': 'pgadmin.node.%s' % self.node_type,
'path': url_for(
'%s.static'% self.name, filename=('js/%s' % self.node_type)
),
'when': self.script_load,
'is_template': False
}])
for module in self.submodules:
scripts.extend(module.get_own_javascripts())
return scripts
def generate_browser_node(
self, node_id, parent_id, label, icon, inode, node_type, **kwargs
):
"""
Helper function to create a browser node for this particular subnode.
:param node_id: Unique Id for each node
:param parent_id: Id of the parent.
:param label: Label for the node
:param icon: Icon for displaying along with this node on browser
tree. Icon refers to a class name, it refers to.
:param inode: True/False.
Used by the browser tree node to check, if the
current node will have children or not.
:param node_type: String to refer to the node type.
:param **kwargs: A node can have extra information other than this
data, which can be passed as key-value pair as
argument here.
i.e. A database, server node can have extra
information like connected, or not.
Returns a dictionary object representing this node object for the
browser tree.
"""
obj = {
"id": "%s/%s" % (node_type, node_id),
"label": label,
"icon": icon,
"inode": inode,
"_type": node_type,
"_id": node_id,
"_pid": parent_id,
"module": 'pgadmin.node.%s' % node_type
}
for key in kwargs:
obj.setdefault(key, kwargs[key])
return obj
@property
def csssnippets(self):
"""
Returns a snippet of css to include in the page
"""
snippets = [
render_template(
"browser/css/node.css",
node_type=self.node_type,
_=gettext
)]
for submodule in self.submodules:
snippets.extend(submodule.csssnippets)
return snippets
@abstractmethod
def get_nodes(self):
"""
Each browser module is responsible for fetching
its own tree subnodes.
"""
return []
@abstractproperty
def node_type(self):
pass
@abstractproperty
def script_load(self):
"""
This property defines, when to load this script.
In order to allow creation of an object, we need to load script for any
node at the parent level.
i.e.
- In order to allow creating a server object, it should be loaded at
server-group node.
"""
pass
@property
def node_path(self):
"""
Defines the url path prefix for this submodule.
"""
return self.browser_url_prefix + self.node_type
@property
def javascripts(self):
"""
Override the javascript of PgAdminModule, so that - we don't return
javascripts from the get_own_javascripts itself.
"""
return []
@property
def label(self):
"""
Module label.
"""
return self.LABEL
@property
def show_node(self):
"""
A proper to check to show node for this module on the browser tree or not.
Relies on show_node preference object, otherwise on the SHOW_ON_BROWSER
default value.
"""
if self.pref_show_node:
return self.pref_show_node.get()
else:
return self.SHOW_ON_BROWSER
@property
def show_system_objects(self):
"""
Show/Hide the system objects in the database server.
"""
if self.pref_show_system_objects:
return self.pref_show_system_objects.get()
else:
return False
def register_preferences(self):
"""
Registers the preferences object for this module.
Sets the browser_preference, show_system_objects, show_node preference
objects for this submodule.
"""
# Add the node informaton for browser, not in respective node preferences
self.browser_preference = blueprint.preference
self.pref_show_system_objects = blueprint.preference.preference(
'display', 'show_system_objects'
)
self.pref_show_node = self.browser_preference.preference(
'node', 'show_node_' + self.node_type,
self.label, 'boolean', self.SHOW_ON_BROWSER, category_label=gettext('Nodes')
)
@blueprint.route("/")
@login_required
def index():
"""Render and process the main browser window."""
# Get the Gravatar
Gravatar(
current_app,
size=100,
rating='g',
default='retro',
force_default=False,
use_ssl=True,
base_url=None
)
msg = None
# Get the current version info from the website, and flash a message if
# the user is out of date, and the check is enabled.
if config.UPGRADE_CHECK_ENABLED:
data = None
url = '%s?version=%s' % (config.UPGRADE_CHECK_URL, config.APP_VERSION)
current_app.logger.debug('Checking version data at: %s' % url)
try:
# Do not wait for more than 5 seconds.
# It stuck on rendering the browser.html, while working in the
# broken network.
response = urlreq.urlopen(url, data, 5)
current_app.logger.debug(
'Version check HTTP response code: %d' % response.getcode()
)
if response.getcode() == 200:
data = json.loads(response.read().decode('utf-8'))
current_app.logger.debug('Response data: %s' % data)
except:
current_app.logger.exception('Exception when checking for update')
if data is not None:
if data['pgadmin4']['version_int'] > config.APP_VERSION_INT:
msg = render_template(
MODULE_NAME + "/upgrade.html",
current_version=config.APP_VERSION,
upgrade_version=data['pgadmin4']['version'],
product_name=config.APP_NAME,
download_url=data['pgadmin4']['download_url']
)
flash(msg, 'warning')
response = Response(render_template(
MODULE_NAME + "/index.html",
username=current_user.email,
is_admin=current_user.has_role("Administrator"),
_=gettext
))
# Set the language cookie after login, so next time the user will have that
# same option at the login time.
misc_preference = Preferences.module('miscellaneous')
user_languages = misc_preference.preference(
'user_language'
)
language = 'en'
if user_languages:
language = user_languages.get() or 'en'
response.set_cookie("PGADMIN_LANGUAGE", language)
return response
@blueprint.route("/js/utils.js")
@login_required
def utils():
layout = get_setting('Browser/Layout', default='')
snippets = []
prefs = Preferences.module('paths')
pg_help_path_pref = prefs.preference('pg_help_path')
pg_help_path = pg_help_path_pref.get()
edbas_help_path_pref = prefs.preference('edbas_help_path')
edbas_help_path = edbas_help_path_pref.get()
# Get sqleditor options
prefs = Preferences.module('sqleditor')
editor_tab_size_pref = prefs.preference('tab_size')
editor_tab_size = editor_tab_size_pref.get()
editor_use_spaces_pref = prefs.preference('use_spaces')
editor_use_spaces = editor_use_spaces_pref.get()
editor_wrap_code_pref = prefs.preference('wrap_code')
editor_wrap_code = editor_wrap_code_pref.get()
brace_matching_pref = prefs.preference('brace_matching')
brace_matching = brace_matching_pref.get()
insert_pair_brackets_perf = prefs.preference('insert_pair_brackets')
insert_pair_brackets = insert_pair_brackets_perf.get()
# This will be opposite of use_space option
editor_indent_with_tabs = False if editor_use_spaces else True
# Try to fetch current libpq version from the driver
try:
from config import PG_DEFAULT_DRIVER
from pgadmin.utils.driver import get_driver
driver = get_driver(PG_DEFAULT_DRIVER)
pg_libpq_version = driver.libpq_version()
except:
pg_libpq_version = 0
for submodule in current_blueprint.submodules:
snippets.extend(submodule.jssnippets)
return make_response(
render_template(
'browser/js/utils.js',
layout=layout,
jssnippets=snippets,
pg_help_path=pg_help_path,
edbas_help_path=edbas_help_path,
editor_tab_size=editor_tab_size,
editor_use_spaces=editor_use_spaces,
editor_wrap_code=editor_wrap_code,
editor_brace_matching=brace_matching,
editor_insert_pair_brackets=insert_pair_brackets,
editor_indent_with_tabs=editor_indent_with_tabs,
app_name=config.PGADMIN_APP_NAME,
pg_libpq_version=pg_libpq_version
),
200, {'Content-Type': 'application/x-javascript'})
@blueprint.route("/js/endpoints.js")
def exposed_urls():
return make_response(
render_template('browser/js/endpoints.js'),
200, {'Content-Type': 'application/x-javascript'}
)
@blueprint.route("/js/error.js")
@login_required
def error_js():
return make_response(
render_template('browser/js/error.js', _=gettext),
200, {'Content-Type': 'application/x-javascript'})
@blueprint.route("/js/node.js")
@login_required
def node_js():
prefs = Preferences.module('paths')
pg_help_path_pref = prefs.preference('pg_help_path')
pg_help_path = pg_help_path_pref.get()
edbas_help_path_pref = prefs.preference('edbas_help_path')
edbas_help_path = edbas_help_path_pref.get()
return make_response(
render_template('browser/js/node.js',
pg_help_path=pg_help_path,
edbas_help_path=edbas_help_path,
_=gettext
),
200, {'Content-Type': 'application/x-javascript'})
@blueprint.route("/js/messages.js")
def messages_js():
return make_response(
render_template('browser/js/messages.js', _=gettext),
200, {'Content-Type': 'application/x-javascript'})
@blueprint.route("/js/collection.js")
@login_required
def collection_js():
return make_response(
render_template('browser/js/collection.js', _=gettext),
200, {'Content-Type': 'application/x-javascript'})
@blueprint.route("/browser.css")
@login_required
def browser_css():
"""Render and return CSS snippets from the nodes and modules."""
snippets = []
# Get configurable options
prefs = Preferences.module('sqleditor')
sql_font_size_pref = prefs.preference('sql_font_size')
sql_font_size = round(float(sql_font_size_pref.get()), 2)
if sql_font_size != 0:
snippets.append('.CodeMirror { font-size: %sem; }' % str(sql_font_size))
for submodule in blueprint.submodules:
snippets.extend(submodule.csssnippets)
return make_response(
render_template(
'browser/css/browser.css', snippets=snippets, _=gettext
),
200, {'Content-Type': 'text/css'})
@blueprint.route("/nodes/", endpoint="nodes")
@login_required
def get_nodes():
"""Build a list of treeview nodes from the child nodes."""
nodes = []
for submodule in current_blueprint.submodules:
nodes.extend(submodule.get_nodes())
return make_json_response(data=nodes)
# Only register route if SECURITY_CHANGEABLE is set to True
# We can't access app context here so cannot
# use app.config['SECURITY_CHANGEABLE']
if hasattr(config, 'SECURITY_CHANGEABLE') and config.SECURITY_CHANGEABLE:
@blueprint.route("/change_password", endpoint="change_password",
methods=['GET', 'POST'])
@login_required
def change_password():
"""View function which handles a change password request."""
has_error = False
form_class = _security.change_password_form
if request.json:
form = form_class(MultiDict(request.json))
else:
form = form_class()
if form.validate_on_submit():
try:
change_user_password(current_user, form.new_password.data)
except SOCKETErrorException as e:
# Handle socket errors which are not covered by SMTPExceptions.
logging.exception(str(e), exc_info=True)
flash(gettext(u'SMTP Socket error: {}\nYour password has not been changed.').format(e), 'danger')
has_error = True
except (SMTPConnectError, SMTPResponseException,
SMTPServerDisconnected, SMTPDataError, SMTPHeloError,
SMTPException, SMTPAuthenticationError, SMTPSenderRefused,
SMTPRecipientsRefused) as e:
# Handle smtp specific exceptions.
logging.exception(str(e), exc_info=True)
flash(gettext(u'SMTP error: {}\nYour password has not been changed.').format(e), 'danger')
has_error = True
except Exception as e:
# Handle other exceptions.
logging.exception(str(e), exc_info=True)
flash(gettext(u'Error: {}\nYour password has not been changed.').format(e), 'danger')
has_error = True
if request.json is None and not has_error:
after_this_request(_commit)
do_flash(*get_message('PASSWORD_CHANGE'))
return redirect(get_url(_security.post_change_view) or
get_url(_security.post_login_view))
if request.json and not has_error:
form.user = current_user
return _render_json(form)
return _security.render_template(
config_value('CHANGE_PASSWORD_TEMPLATE'),
change_password_form=form,
**_ctx('change_password'))
# Only register route if SECURITY_RECOVERABLE is set to True
if hasattr(config, 'SECURITY_RECOVERABLE') and config.SECURITY_RECOVERABLE:
def send_reset_password_instructions(user):
"""Sends the reset password instructions email for the specified user.
:param user: The user to send the instructions to
"""
token = generate_reset_password_token(user)
reset_link = url_for('browser.reset_password', token=token,
_external=True)
send_mail(config_value('EMAIL_SUBJECT_PASSWORD_RESET'), user.email,
'reset_instructions',
user=user, reset_link=reset_link)
reset_password_instructions_sent.send(
current_app._get_current_object(),
user=user, token=token)
@blueprint.route("/reset_password", endpoint="forgot_password",
methods=['GET', 'POST'])
@anonymous_user_required
def forgot_password():
"""View function that handles a forgotten password request."""
has_error = False
form_class = _security.forgot_password_form
if request.json:
form = form_class(MultiDict(request.json))
else:
form = form_class()
if form.validate_on_submit():
try:
send_reset_password_instructions(form.user)
except SOCKETErrorException as e:
# Handle socket errors which are not covered by SMTPExceptions.
logging.exception(str(e), exc_info=True)
flash(gettext(u'SMTP Socket error: {}\nYour password has not been changed.').format(e), 'danger')
has_error = True
except (SMTPConnectError, SMTPResponseException,
SMTPServerDisconnected, SMTPDataError, SMTPHeloError,
SMTPException, SMTPAuthenticationError, SMTPSenderRefused,
SMTPRecipientsRefused) as e:
# Handle smtp specific exceptions.
logging.exception(str(e), exc_info=True)
flash(gettext(u'SMTP error: {}\nYour password has not been changed.').format(e), 'danger')
has_error = True
except Exception as e:
# Handle other exceptions.
logging.exception(str(e), exc_info=True)
flash(gettext(u'Error: {}\nYour password has not been changed.').format(e), 'danger')
has_error = True
if request.json is None and not has_error:
do_flash(*get_message('PASSWORD_RESET_REQUEST',
email=form.user.email))
if request.json and not has_error:
return _render_json(form, include_user=False)
return _security.render_template(
config_value('FORGOT_PASSWORD_TEMPLATE'),
forgot_password_form=form,
**_ctx('forgot_password'))
# We are not in app context so cannot use url_for('browser.forgot_password')
# So hard code the url '/browser/reset_password' while passing as
# parameter to slash_url_suffix function.
@blueprint.route('/reset_password' + slash_url_suffix(
'/browser/reset_password', '<token>'),
methods=['GET', 'POST'],
endpoint='reset_password')
@anonymous_user_required
def reset_password(token):
"""View function that handles a reset password request."""
expired, invalid, user = reset_password_token_status(token)
if invalid:
do_flash(*get_message('INVALID_RESET_PASSWORD_TOKEN'))
if expired:
do_flash(*get_message('PASSWORD_RESET_EXPIRED', email=user.email,
within=_security.reset_password_within))
if invalid or expired:
return redirect(url_for('browser.forgot_password'))
has_error = False
form = _security.reset_password_form()
if form.validate_on_submit():
try:
update_password(user, form.password.data)
except SOCKETErrorException as e:
# Handle socket errors which are not covered by SMTPExceptions.
logging.exception(str(e), exc_info=True)
flash(gettext(u'SMTP Socket error: {}\nYour password has not been changed.').format(e), 'danger')
has_error = True
except (SMTPConnectError, SMTPResponseException,
SMTPServerDisconnected, SMTPDataError, SMTPHeloError,
SMTPException, SMTPAuthenticationError, SMTPSenderRefused,
SMTPRecipientsRefused) as e:
# Handle smtp specific exceptions.
logging.exception(str(e), exc_info=True)
flash(gettext(u'SMTP error: {}\nYour password has not been changed.').format(e), 'danger')
has_error = True
except Exception as e:
# Handle other exceptions.
logging.exception(str(e), exc_info=True)
flash(gettext(u'Error: {}\nYour password has not been changed.').format(e), 'danger')
has_error = True
if not has_error:
after_this_request(_commit)
do_flash(*get_message('PASSWORD_RESET'))
login_user(user)
return redirect(get_url(_security.post_reset_view) or
get_url(_security.post_login_view))
return _security.render_template(
config_value('RESET_PASSWORD_TEMPLATE'),
reset_password_form=form,
reset_password_token=token,
**_ctx('reset_password'))
|
py
|
1a5e03bc71da510aa9890758838a3aa2e006f006
|
# coding: utf8
# 在scikit-learn中,提供了3中朴素贝叶斯分类算法:GaussianNB(高斯朴素贝叶斯)、MultinomialNB(多项式朴素贝叶斯)、BernoulliNB(伯努利朴素贝叶斯)
import numpy as np
from sklearn.naive_bayes import GaussianNB, MultinomialNB, BernoulliNB
X = np.asarray([[-1, -1], [-2, -2], [-3, -3], [-4, -4], [-5, -5], [1, 1], [2, 2], [3, 3]])
y = np.asarray([1, 1, 1, 1, 1, 2, 2, 2])
clf = GaussianNB()
clf.class_prior_ = [0.675, 0.325]
clf.fit(X, y)
print(clf.predict([[-1, -1], [2,3]]))
|
py
|
1a5e06238346415afd00032f8f03d60f5aebb730
|
# pipeline example modified from David Johnson of the Michigan SPIDER team's regression example, originally at
# https://gitlab.datadrivendiscovery.org/michigan/spider/blob/master/spider/pipelines/supervised_learning_owl.py
from d3m.metadata import pipeline as d3m_pipeline
from d3m.metadata import base as d3m_base
from d3m.metadata.base import ArgumentType, Context
from realML.pipelines.base import BasePipeline
#from realML.kernel import RFMPreconditionedGaussianKRR
from common_primitives.dataframe_to_ndarray import DataFrameToNDArrayPrimitive
from common_primitives.ndarray_to_dataframe import NDArrayToDataFramePrimitive
from common_primitives.dataset_to_dataframe import DatasetToDataFramePrimitive
from common_primitives.column_parser import ColumnParserPrimitive
from common_primitives.construct_predictions import ConstructPredictionsPrimitive
from common_primitives.extract_columns_semantic_types import ExtractColumnsBySemanticTypesPrimitive
from d3m.primitives.data_transformation.encoder import DistilBinaryEncoder as BinaryEncoderPrimitive
from d3m import index
import d3m.primitives.data_cleaning.imputer
#import d3m.primitives.data_preprocessing.horizontal_concat
import os.path
#from d3m.primitives.data_preprocessing.horizontal_concat import HorizontalConcat as HorizontalConcat
from common_primitives.horizontal_concat import HorizontalConcatPrimitive
import pandas as pd
import d3m.primitives.regression.gradient_boosting
from d3m import index
import numpy as np
from realML.matrix import SparsePCA
class sparsepcaPipeline2(BasePipeline):
def __init__(self):
super().__init__()
#specify one seed dataset on which this pipeline can operate
dataset = '534_cps_85_wages'
self.meta_info = self.genmeta(dataset)
#define pipeline object
def _gen_pipeline(self):
pipeline = d3m_pipeline.Pipeline()
#define inputs. This will be read in automatically as a Dataset object.
pipeline.add_input(name = 'inputs')
#step 0: Denormalize: join multiple tabular resource?
# Why is there no entry point for Denormalize?
#step 0: Dataset -> Dataframe
step_0 = d3m_pipeline.PrimitiveStep(primitive_description = DatasetToDataFramePrimitive.metadata.query())
step_0.add_argument(
name = 'inputs',
argument_type = d3m_base.ArgumentType.CONTAINER,
data_reference = 'inputs.0')
step_0.add_output('produce')
pipeline.add_step(step_0)
# Step 1: Simple Profiler Column Role Annotation
step_1 = d3m_pipeline.PrimitiveStep(
primitive=index.get_primitive("d3m.primitives.schema_discovery.profiler.Common")
)
step_1.add_argument(
name="inputs",
argument_type=d3m_base.ArgumentType.CONTAINER,
data_reference="steps.0.produce",
)
step_1.add_output("produce")
pipeline.add_step(step_1)
#step 2: ColumnParser
step_2 = d3m_pipeline.PrimitiveStep(primitive_description=ColumnParserPrimitive.metadata.query())
step_2.add_argument(
name='inputs',
argument_type=d3m_base.ArgumentType.CONTAINER,
data_reference='steps.1.produce')
step_2.add_output('produce')
pipeline.add_step(step_2)
#step 3: Extract attributes from dataset into a dedicated dataframe
step_3 = d3m_pipeline.PrimitiveStep(primitive_description = ExtractColumnsBySemanticTypesPrimitive.metadata.query())
step_3.add_argument(
name = 'inputs',
argument_type = d3m_base.ArgumentType.CONTAINER,
data_reference = 'steps.2.produce')
step_3.add_output('produce')
step_3.add_hyperparameter(
name='semantic_types',
argument_type=d3m_base.ArgumentType.VALUE,
data=['https://metadata.datadrivendiscovery.org/types/Attribute'])
pipeline.add_step(step_3)
#step 4: Binary encoding for categorical features
step_4 = d3m_pipeline.PrimitiveStep(primitive_description = BinaryEncoderPrimitive.metadata.query())
step_4.add_hyperparameter(
name = 'min_binary',
argument_type = d3m_base.ArgumentType.VALUE,
data = 2
)
step_4.add_argument(
name = 'inputs',
argument_type = d3m_base.ArgumentType.CONTAINER,
data_reference = 'steps.3.produce')
step_4.add_output('produce')
pipeline.add_step(step_4)
#step 5: Extract Targets
step_5 = d3m_pipeline.PrimitiveStep(primitive_description = ExtractColumnsBySemanticTypesPrimitive.metadata.query())
step_5.add_argument(
name='inputs',
argument_type=d3m_base.ArgumentType.CONTAINER,
data_reference='steps.2.produce'
)
step_5.add_hyperparameter(
name='semantic_types',
argument_type=d3m_base.ArgumentType.VALUE,
data=['https://metadata.datadrivendiscovery.org/types/TrueTarget'])
step_5.add_output('produce')
pipeline.add_step(step_5)
#step 6: transform targets dataframe into an ndarray
step_6 = d3m_pipeline.PrimitiveStep(primitive_description = DataFrameToNDArrayPrimitive.metadata.query())
step_6.add_argument(
name = 'inputs',
argument_type = d3m_base.ArgumentType.CONTAINER,
data_reference = 'steps.5.produce'
)
step_6.add_output('produce')
pipeline.add_step(step_6)
#step 7 : transform features dataframe into an ndarray
step_7 = d3m_pipeline.PrimitiveStep(primitive_description = DataFrameToNDArrayPrimitive.metadata.query())
step_7.add_argument(
name = 'inputs',
argument_type = d3m_base.ArgumentType.CONTAINER,
data_reference = 'steps.4.produce'
)
step_7.add_output('produce')
pipeline.add_step(step_7)
attributes = 'steps.7.produce'
targets = 'steps.6.produce'
#step 8: call RFMPreconditionedGaussianKRR for regression
#Run SparsePCA
step_8 = d3m_pipeline.PrimitiveStep(primitive_description = SparsePCA.metadata.query())
step_8.add_argument(
name = 'inputs',
argument_type = d3m_base.ArgumentType.CONTAINER,
data_reference = attributes #inputs here are the outputs from step 7
)
step_8.add_hyperparameter(
name = 'n_components',
argument_type = d3m_base.ArgumentType.VALUE,
data = 9
)
step_8.add_hyperparameter(
name = 'beta',
argument_type = d3m_base.ArgumentType.VALUE,
data = 1e-9
)
step_8.add_hyperparameter(
name = 'alpha',
argument_type = d3m_base.ArgumentType.VALUE,
data = 1e-4
)
step_8.add_hyperparameter(
name = 'degree',
argument_type = d3m_base.ArgumentType.VALUE,
data = 2
)
step_8.add_output('produce')
pipeline.add_step(step_8)
#step 9: convert numpy-formatted prediction outputs to a dataframe
step_9 = d3m_pipeline.PrimitiveStep(primitive_description = NDArrayToDataFramePrimitive.metadata.query())
step_9.add_argument(
name = 'inputs',
argument_type = d3m_base.ArgumentType.CONTAINER,
data_reference = 'steps.8.produce'
)
step_9.add_output('produce')
pipeline.add_step(step_9)
#step 9: convert numpy-formatted prediction outputs to a dataframe
step_10 = d3m_pipeline.PrimitiveStep(primitive_description = HorizontalConcatPrimitive.metadata.query())
step_10.add_argument(
name = 'left',
argument_type = d3m_base.ArgumentType.CONTAINER,
data_reference = 'steps.4.produce'
)
step_10.add_argument(
name = 'right',
argument_type = d3m_base.ArgumentType.CONTAINER,
data_reference = 'steps.9.produce'
)
step_10.add_output('produce')
pipeline.add_step(step_10)
#Linear Regression on low-rank data (inputs and outputs for sklearns are both dataframes)
step_11 = d3m_pipeline.PrimitiveStep(primitive_description = d3m.primitives.regression.gradient_boosting.SKlearn.metadata.query())
step_11.add_argument(
name = 'inputs',
argument_type = d3m_base.ArgumentType.CONTAINER,
data_reference = 'steps.10.produce'
)
step_11.add_argument(
name = 'outputs',
argument_type = d3m_base.ArgumentType.CONTAINER,
data_reference = 'steps.5.produce'
)
step_11.add_hyperparameter(
name = 'n_estimators',
argument_type = d3m_base.ArgumentType.VALUE,
data = 63000
)
step_11.add_hyperparameter(
name = 'learning_rate',
argument_type = d3m_base.ArgumentType.VALUE,
data = 0.0001
)
step_11.add_hyperparameter(
name = 'max_depth',
argument_type = d3m_base.ArgumentType.VALUE,
data = 3
)
step_11.add_output('produce')
pipeline.add_step(step_11)
#step 10: generate a properly-formatted output dataframe from the dataframed prediction outputs using the input dataframe as a reference
step_12 = d3m_pipeline.PrimitiveStep(primitive_description = ConstructPredictionsPrimitive.metadata.query())
step_12.add_argument(
name = 'inputs',
argument_type = d3m_base.ArgumentType.CONTAINER,
data_reference = 'steps.11.produce' #inputs here are the prediction column
)
step_12.add_argument(
name = 'reference',
argument_type = d3m_base.ArgumentType.CONTAINER,
data_reference = 'steps.1.produce' #inputs here are the dataframe input dataset
)
step_12.add_output('produce')
pipeline.add_step(step_12)
# Final Output
pipeline.add_output(
name='output',
data_reference='steps.12.produce')
return pipeline
if __name__ == '__main__':
instance = sparsepcaPipeline2()
json_info = instance.get_json()
instanceid = instance.get_id()
instancepath = os.path.join(".", instanceid)
with open(instancepath + ".json", 'w') as file:
file.write(json_info)
file.close()
|
py
|
1a5e0700ebefed6d9f13f728ed65fd5ffa91e028
|
# Copyright (C) 2020 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions
# and limitations under the License.
# pylint: disable=all
import torch
from ...core import multiclass_nms
from ...core.bbox.coder.delta_xywh_bbox_coder import delta2bbox
from ...core.anchor.anchor_generator import SSDAnchorGeneratorClustered
def get_proposals(img_metas, cls_scores, bbox_preds, priors,
cfg, rescale, cls_out_channels, use_sigmoid_cls,
target_means, target_stds):
result_list = []
cls_score_list = cls_scores.tolist()
bbox_pred_list = bbox_preds.tolist()
assert len(cls_score_list) == len(bbox_pred_list)
for img_id in range(len(img_metas)):
cls_score = \
torch.Tensor(cls_score_list[img_id]).detach().to(priors.device)
bbox_pred = \
torch.Tensor(bbox_pred_list[img_id]).detach().to(priors.device)
img_shape = img_metas[img_id]['img_shape']
scale_factor = img_metas[img_id]['scale_factor']
proposals = get_bboxes_single(cls_score, bbox_pred, priors, img_shape,
scale_factor, cfg, rescale,
cls_out_channels, use_sigmoid_cls,
target_means, target_stds)
result_list.append(proposals)
return result_list
def get_bboxes_single(cls_scores, bbox_preds, priors, img_shape, scale_factor,
cfg, rescale, cls_out_channels, use_sigmoid_cls,
target_means, target_stds):
cls_scores = cls_scores.view(-1, cls_out_channels)
bbox_preds = bbox_preds.view(-1, 4)
priors = priors.view(-1, 4)
nms_pre = cfg.get('nms_pre', -1)
if nms_pre > 0 and cls_scores.shape[0] > nms_pre:
if use_sigmoid_cls:
max_scores, _ = cls_scores.max(dim=1)
else:
max_scores, _ = cls_scores[:, :-1].max(dim=1)
_, topk_inds = max_scores.topk(nms_pre)
priors = priors[topk_inds, :]
bbox_preds = bbox_preds[topk_inds, :]
cls_scores = cls_scores[topk_inds, :]
mlvl_bboxes = delta2bbox(priors, bbox_preds, target_means,
target_stds, img_shape)
if rescale:
mlvl_bboxes /= mlvl_bboxes.new_tensor(scale_factor)
if use_sigmoid_cls:
padding = cls_scores.new_zeros(cls_scores.shape[0], 1)
cls_scores = torch.cat([padding, cls_scores], dim=1)
det_bboxes, det_labels = multiclass_nms(
mlvl_bboxes, cls_scores, cfg.score_thr, cfg.nms, cfg.max_per_img)
return det_bboxes, det_labels
class PriorBox(torch.autograd.Function):
"""Compute priorbox coordinates in point form for each source
feature map.
"""
@staticmethod
def symbolic(g, single_level_grid_anchors, base_anchors, base_size, scales, ratios,
anchor_stride, feat, img_tensor, target_stds):
min_size = base_size
max_sizes = []
ars = []
for scale in scales[1:]:
max_sizes.append(scale * scale * min_size)
for ar in ratios:
if ar > 1:
ars.append(ar)
return g.op("PriorBox", feat, img_tensor, min_size_f=[min_size],
max_size_f=max_sizes, aspect_ratio_f=ars, flip_i=1,
clip_i=0, variance_f=list(target_stds),
step_f=anchor_stride[0], offset_f=0.5, step_h_f=0,
step_w_f=0, img_size_i=0, img_h_i=0, img_w_i=0)
@staticmethod
def forward(ctx, single_level_grid_anchors, base_anchors, base_size, scales, ratios,
anchor_stride, feat, img_tensor, target_stds):
assert anchor_stride[0] == anchor_stride[1]
mlvl_anchor = single_level_grid_anchors(base_anchors, feat.size()[-2:], anchor_stride)
mlvl_anchor = mlvl_anchor.view(1, -1).unsqueeze(0)
return mlvl_anchor
class PriorBoxClustered(torch.autograd.Function):
"""Compute priorbox coordinates in point form for each source
feature map.
"""
@staticmethod
def symbolic(g, single_level_grid_anchors, base_anchors, anchors_heights, anchors_widths,
anchor_stride, feat, img_tensor, target_stds):
return g.op("PriorBoxClustered", feat, img_tensor,
height_f=anchors_heights, width_f=anchors_widths,
flip_i=0, clip_i=0, variance_f=list(target_stds),
step_f=anchor_stride[0], offset_f=0.5, step_h_f=0,
step_w_f=0, img_size_i=0, img_h_i=0, img_w_i=0)
@staticmethod
def forward(ctx, single_level_grid_anchors, base_anchors, anchors_heights, anchors_widths,
anchor_stride, feat, img_tensor, target_stds):
assert anchor_stride[0] == anchor_stride[1]
mlvl_anchor = single_level_grid_anchors(base_anchors, feat.size()[-2:], anchor_stride, base_anchors.device)
mlvl_anchor = mlvl_anchor.view(1, -1).unsqueeze(0)
return mlvl_anchor
class DetectionOutput(torch.autograd.Function):
"""At test time, Detect is the final layer of SSD. Decode location preds,
apply non-maximum suppression to location predictions based on conf
scores and threshold to a top_k number of output predictions for both
confidence score and locations.
"""
@staticmethod
def symbolic(g, cls_scores, bbox_preds, img_metas, cfg,
rescale, priors, cls_out_channels, use_sigmoid_cls,
target_means, target_stds):
return g.op("DetectionOutput", bbox_preds, cls_scores, priors,
num_classes_i=cls_out_channels, background_label_id_i=cls_out_channels - 1,
top_k_i=cfg['max_per_img'],
keep_top_k_i=cfg['max_per_img'],
confidence_threshold_f=cfg['score_thr'],
nms_threshold_f=cfg['nms']['iou_thr'],
eta_f=1, share_location_i=1,
code_type_s="CENTER_SIZE", variance_encoded_in_target_i=0)
@staticmethod
def forward(ctx, cls_scores, bbox_preds, img_metas, cfg,
rescale, priors, cls_out_channels, use_sigmoid_cls,
target_means, target_stds):
proposals = get_proposals(img_metas, cls_scores, bbox_preds, priors,
cfg, rescale, cls_out_channels,
use_sigmoid_cls, target_means, target_stds)
b_s = len(proposals)
output = \
torch.zeros(b_s, 1, cfg.max_per_img, 7).to(cls_scores.device)
for img_id in range(0, b_s):
bboxes, labels = proposals[img_id]
coords = bboxes[:, :4]
scores = bboxes[:, 4]
labels = labels.float()
output_for_img = \
torch.zeros(scores.size()[0], 7).to(cls_scores.device)
output_for_img[:, 0] = img_id
output_for_img[:, 1] = labels
output_for_img[:, 2] = scores
output_for_img[:, 3:] = coords
output[img_id, 0, :output_for_img.size()[0]] = output_for_img
return output
def onnx_export(self, img, img_metas, export_name='', **kwargs):
self._export_mode = True
self.img_metas = img_metas
torch.onnx.export(self, img, export_name, **kwargs)
def forward(self, img, img_meta=[None], return_loss=True,
**kwargs): # passing None here is a hack to fool the jit engine
if self._export_mode:
return self.forward_export(img)
if return_loss:
return self.forward_train(img, img_meta, **kwargs)
else:
return self.forward_test(img, img_meta, **kwargs)
def forward_export_detector(self, img):
x = self.extract_feat(img)
outs = self.bbox_head(x)
bbox_result = self.bbox_head.export_forward(*outs, self.test_cfg, True,
self.img_metas, x, img)
return bbox_result
def export_forward_ssd_head(self, cls_scores, bbox_preds, cfg, rescale,
img_metas, feats, img_tensor):
num_levels = len(cls_scores)
anchors = []
for i in range(num_levels):
if isinstance(self.anchor_generator, SSDAnchorGeneratorClustered):
anchors.append(PriorBoxClustered.apply(
self.anchor_generator.single_level_grid_anchors,
self.anchor_generator.base_anchors[i],
self.anchor_generator.heights[i],
self.anchor_generator.widths[i],
self.anchor_generator.strides[i],
feats[i], img_tensor, self.bbox_coder.stds))
else:
anchors.append(PriorBox.apply(
self.anchor_generator.single_level_grid_anchors,
self.anchor_generator.base_anchors[i],
self.anchor_generator.base_sizes[i],
self.anchor_generator.scales[i].tolist(),
self.anchor_generator.ratios[i].tolist(),
self.anchor_generator.strides[i],
feats[i],
img_tensor, self.bbox_coder.stds))
anchors = torch.cat(anchors, 2)
cls_scores, bbox_preds = self._prepare_cls_scores_bbox_preds(cls_scores, bbox_preds)
return DetectionOutput.apply(cls_scores, bbox_preds, img_metas, cfg,
rescale, anchors, self.cls_out_channels,
self.use_sigmoid_cls, self.bbox_coder.means,
self.bbox_coder.stds)
def prepare_cls_scores_bbox_preds_ssd_head(self, cls_scores, bbox_preds):
scores_list = []
for o in cls_scores:
score = o.permute(0, 2, 3, 1).contiguous().view(o.size(0), -1)
scores_list.append(score)
cls_scores = torch.cat(scores_list, 1)
cls_scores = cls_scores.view(cls_scores.size(0), -1, self.cls_out_channels)
if self.use_sigmoid_cls:
cls_scores = cls_scores.sigmoid()
else:
cls_scores = cls_scores.softmax(-1)
cls_scores = cls_scores.view(cls_scores.size(0), -1)
bbox_list = []
for o in bbox_preds:
boxes = o.permute(0, 2, 3, 1).contiguous().view(o.size(0), -1)
bbox_list.append(boxes)
bbox_preds = torch.cat(bbox_list, 1)
return cls_scores, bbox_preds
|
py
|
1a5e07bb8105774a74e89f19bff389946c150846
|
import os
with open(os.path.join(os.path.dirname(__file__), 'VERSION')) as f:
__version__ = f.read().strip()
|
py
|
1a5e07d756e662d5b6cfdc0461cd979b958df54a
|
import _dk_core as core
from weakref import WeakKeyDictionary
from . import view
class Control(view.View):
STATE_NORMAL = 0
STATE_HIGHLIGHTED = 1
STATE_ACTIVATED = 2
STATE_DISABLED = 3
STATE_ALL = 0xff
removeAllTargetsWhenUnload = True
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.__targets = WeakKeyDictionary()
def addTarget(self, key, callback):
if callable(callback):
self.__targets[key] = callback
else:
raise TypeError('func is not callable')
pass
def removeTarget(self, key):
try:
del self.__targets[key]
except KeyError:
pass
def removeAllTargets(self):
self.__targets = WeakKeyDictionary()
def invokeAllTargets(self, *args):
for cb in self.__targets.values():
cb(*args)
def invokeOneTarget(self, key, *args):
cb = self.__targets[key]
cb(*args)
def onUnload(self):
if self.removeAllTargetsWhenUnload:
self.removeAllTargets()
return super().onUnload()
|
py
|
1a5e08cd304ec885ebd6bd2a86685acc4dcdb686
|
#!/usr/bin/env python
import os
import glob
import yaml
import flask
from flask_hal import HAL
from flask_hal.document import Document, Embedded
from flask_hal.link import Collection, Link
from versions import Version
app = flask.Flask(__name__)
HAL(app)
def filter_versions():
"""
Filters versions for the current request.
"""
snapshots = flask.request.args.get('snapshots', flask.request.args.get('snapshot'))
platform = flask.request.args.get('platform')
if snapshots == 'true':
snapshots = True
else:
snapshots = False
return Version.objects.filter(snapshots=snapshots, platform=platform)
@app.route('/')
def root():
return Document(links=Collection(
Link('versions', '/versions'),
)).to_dict()
@app.route('/versions/<name>')
def version_detail(name):
version = Version.objects.get(version=name)
links = [Link(rel, url) for (rel, url) in version.binaries.items()]
return Document(data={'version': version.version}, links=Collection(*links)).to_dict()
@app.route('/versions/<name>/binaries/<platform>')
def binary_detail(name, platform):
version = Version.objects.get(version=name)
binary = version.binaries.get(platform)
if not binary:
raise flask.abort(404)
response = app.make_response(binary)
response.headers['Content-Type'] = 'text/plain'
return response
@app.route('/versions')
def list_text_versions():
versions = filter_versions().versions
if 'text/plain' in flask.request.accept_mimetypes.values():
names = [str(v) for v in versions]
response = app.make_response('\n'.join(names) + '\n')
response.headers['Content-Type'] = 'text/plain'
return response
def to_embedded(v):
return Embedded(links=Collection(Link('self', '/versions/{}'.format(v.version))))
return Document(embedded=dict([(v.version, to_embedded(v)) for v in versions])).to_dict()
if __name__ == '__main__':
app.run(debug=True)
|
py
|
1a5e097079efd5d7c95990540cbd9a4af1298a60
|
# -*- coding: utf-8 -*-
# This code is part of Qiskit.
#
# (C) Copyright IBM 2019.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
from collections import OrderedDict
import numpy as np
from qiskit.quantum_info import Pauli
from qiskit.aqua import Operator
def get_portfoliodiversification_qubitops(rho, n, q):
"""Converts an instnance of portfolio optimization into a list of Paulis.
Args:
rho (numpy.ndarray) : an asset-to-asset similarity matrix, such as the covariance matrix.
n (integer) : the number of assets.
q (integer) : the number of clusters of assets to output.
Returns:
operator.Operator: operator for the Hamiltonian.
"""
#N = (n + 1) * n # number of qubits
N = n**2 + n
A = np.max(np.abs(rho)) * 1000 # A parameter of cost function
# Determine the weights w
instance_vec = rho.reshape(n ** 2)
# quadratic term Q
q0 = np.zeros([N,1])
Q1 = np.zeros([N,N])
Q2 = np.zeros([N,N])
Q3 = np.zeros([N, N])
for x in range(n**2,n**2+n):
q0[x] = 1
Q0 = A*np.dot(q0,q0.T)
for ii in range(0,n):
v0 = np.zeros([N,1])
for jj in range(n*ii,n*(ii+1)):
v0[jj] = 1
Q1 = Q1 + np.dot(v0,v0.T)
Q1 = A*Q1
for jj in range(0,n):
v0 = np.zeros([N,1])
v0[n*jj+jj] = 1
v0[n**2+jj] = -1
Q2 = Q2 + np.dot(v0, v0.T)
Q2 = A*Q2
for ii in range(0, n):
for jj in range(0,n):
Q3[ii*n + jj, n**2+jj] = -0.5
Q3[n ** 2 + jj,ii * n + jj] = -0.5
Q3 = A * Q3
Q = Q0+Q1+Q2+Q3
# linear term c:
c0 = np.zeros(N)
c1 = np.zeros(N)
c2 = np.zeros(N)
c3 = np.zeros(N)
for x in range(n**2):
c0[x] = instance_vec[x]
for x in range(n**2,n**2+n):
c1[x] = -2*A*q
for x in range(n**2):
c2[x] = -2*A
for x in range(n**2):
c3[x] = A
g = c0+c1+c2+c3
# constant term r
c = A*(q**2 + n)
# Defining the new matrices in the Z-basis
Iv = np.ones(N)
Qz = (Q / 4)
gz = (-g / 2 - np.dot(Iv, Q / 4) - np.dot(Q / 4, Iv))
cz = (c + np.dot(g / 2, Iv) + np.dot(Iv, np.dot(Q / 4, Iv)))
cz = cz + np.trace(Qz)
Qz = Qz - np.diag(np.diag(Qz))
# Getting the Hamiltonian in the form of a list of Pauli terms
pauli_list = []
for i in range(N):
if gz[i] != 0:
wp = np.zeros(N)
vp = np.zeros(N)
vp[i] = 1
pauli_list.append((gz[i], Pauli(vp, wp)))
for i in range(N):
for j in range(i):
if Qz[i, j] != 0:
wp = np.zeros(N)
vp = np.zeros(N)
vp[i] = 1
vp[j] = 1
pauli_list.append((2 * Qz[i, j], Pauli(vp, wp)))
pauli_list.append((cz, Pauli(np.zeros(N), np.zeros(N))))
return Operator(paulis=pauli_list)
def get_portfoliodiversification_solution(rho, n, q, result):
"""Tries to obtain a feasible solution (in vector form) of an instnance of portfolio diversification from the results dictionary.
Args:
rho (numpy.ndarray) : an asset-to-asset similarity matrix, such as the covariance matrix.
n (integer) : the number of assets.
q (integer) : the number of clusters of assets to output.
result (dictionary) : a dictionary obtained by QAOA.run or VQE.run containing key 'eigvecs'.
Returns:
x_state (numpy.ndarray) : a vector describing the solution.
"""
v = result['eigvecs'][0]
# N = (n + 1) * n # number of qubits
N = n ** 2 + n
index_value = [x for x in range(len(v)) if v[x] == max(v)][0]
string_value = "{0:b}".format(index_value)
while len(string_value)<N:
string_value = '0'+string_value
x_state = list()
for elements in string_value:
if elements == '0':
x_state.append(0)
else:
x_state.append(1)
x_state = np.flip(x_state, axis=0)
return x_state
def get_portfoliodiversification_value(rho, n, q, x_state):
"""Evaluates an objective function of an instnance of portfolio diversification and its solution (in vector form).
Args:
rho (numpy.ndarray) : an asset-to-asset similarity matrix, such as the covariance matrix.
n (integer) : the number of assets.
q (integer) : the number of clusters of assets to output.
x_state (numpy.ndarray) : a vector describing the solution.
Returns:
float: cost of the solution.
"""
# N = (n + 1) * n # number of qubits
N = n ** 2 + n
A = np.max(np.abs(rho)) * 1000 # A parameter of cost function
# Determine the weights w
instance_vec = rho.reshape(n ** 2)
# quadratic term Q
q0 = np.zeros([N, 1])
Q1 = np.zeros([N, N])
Q2 = np.zeros([N, N])
Q3 = np.zeros([N, N])
for x in range(n ** 2, n ** 2 + n):
q0[x] = 1
Q0 = A * np.dot(q0, q0.T)
for ii in range(0, n):
v0 = np.zeros([N, 1])
for jj in range(n * ii, n * (ii + 1)):
v0[jj] = 1
Q1 = Q1 + np.dot(v0, v0.T)
Q1 = A * Q1
for jj in range(0, n):
v0 = np.zeros([N, 1])
v0[n * jj + jj] = 1
v0[n ** 2 + jj] = -1
Q2 = Q2 + np.dot(v0, v0.T)
Q2 = A * Q2
for ii in range(0, n):
for jj in range(0, n):
Q3[ii * n + jj, n ** 2 + jj] = -0.5
Q3[n ** 2 + jj, ii * n + jj] = -0.5
Q3 = A * Q3
Q = Q0 + Q1 + Q2 + Q3
# linear term c:
c0 = np.zeros(N)
c1 = np.zeros(N)
c2 = np.zeros(N)
c3 = np.zeros(N)
for x in range(n ** 2):
c0[x] = instance_vec[x]
for x in range(n ** 2, n ** 2 + n):
c1[x] = -2 * A * q
for x in range(n ** 2):
c2[x] = -2 * A
for x in range(n ** 2):
c3[x] = A
g = c0 + c1 + c2 + c3
# constant term r
c = A * (q ** 2 + n)
# Evaluates the cost distance from a binary representation
fun = lambda x: np.dot(np.around(x), np.dot(Q, np.around(x))) + np.dot(g, np.around(x)) + c
return fun(x_state)
|
py
|
1a5e0a4b2d373d9a5beed86779091d25d79049a3
|
"""
Functions for preference fusion.
"""
#Author: Yiru Zhang <[email protected]>
#License: Unlicense
import numpy as np
from baseClass import *
import math
def mergePairs(pairs):
merged = []
for pair in pairs:
merged = list(set(merged + pair))
return merged
def fusion(users):
#in our first step, we only consider the ideal case, which is:
#1. all users contain same number of alternatives, and
#2. all mass values are properly given
pairs = mergePairs([_.relationDict_.keys() for _ in users])
#create mass matrix on all users for each pair
#all users provide the same contribution to the final result
nbSingleton = 4 # In our experiment, the discernment of mass function consists 16 focal element based on 4 singletons.
omega = math.pow(2, nbSingleton)
#TODO propose a more general way for mass matrix initialisation
fRelationDict = {} # initialize the final relation dictionary
for pair in pairs:
massMat = np.empty((0,omega),dtype = float)
for user in users:
if pair in user.relationDict_.keys():
#add the mean mass vector into the mass matrix.
#In this step, we do the first combination of masses on a single relation pair of one user.
massMat = np.vstack(massMat, user.relationDict_[pair].getMeanMassVect())
massCom = DST(massMat.T, 1) # using Smets rule for combination. note that massIn for DST stock each mass in a column (vertically)
fRelationDict[pair] = massCom.reshape(1,nbSingleton)
fUser = User(fRelationDict, m=1 ) #construct the final user with its relation dictionary
return User
|
py
|
1a5e0a6d4347be48e1f6d6fb5f5fdafef5623672
|
# -*- coding: utf-8 -*-
# Copyright (c) 2006-2014 LOGILAB S.A. (Paris, FRANCE) <[email protected]>
# Copyright (c) 2009 Mads Kiilerich <[email protected]>
# Copyright (c) 2010 Daniel Harding <[email protected]>
# Copyright (c) 2011-2014, 2017 Google, Inc.
# Copyright (c) 2012 FELD Boris <[email protected]>
# Copyright (c) 2013-2018 Claudiu Popa <[email protected]>
# Copyright (c) 2014 Michal Nowikowski <[email protected]>
# Copyright (c) 2014 Brett Cannon <[email protected]>
# Copyright (c) 2014 Ricardo Gemignani <[email protected]>
# Copyright (c) 2014 Arun Persaud <[email protected]>
# Copyright (c) 2015 Dmitry Pribysh <[email protected]>
# Copyright (c) 2015 Radu Ciorba <[email protected]>
# Copyright (c) 2015 Simu Toni <[email protected]>
# Copyright (c) 2015 Ionel Cristian Maries <[email protected]>
# Copyright (c) 2016, 2018 Ashley Whetter <[email protected]>
# Copyright (c) 2016-2017 Derek Gustafson <[email protected]>
# Copyright (c) 2016-2017 Łukasz Rogalski <[email protected]>
# Copyright (c) 2016 Grant Welch <[email protected]>
# Copyright (c) 2016 Jakub Wilk <[email protected]>
# Copyright (c) 2017-2018 hippo91 <[email protected]>
# Copyright (c) 2017 Dan Garrette <[email protected]>
# Copyright (c) 2017 Ville Skyttä <[email protected]>
# Copyright (c) 2018 Bryce Guinta <[email protected]>
# Licensed under the GPL: https://www.gnu.org/licenses/old-licenses/gpl-2.0.html
# For details: https://github.com/PyCQA/pylint/blob/master/COPYING
"""variables checkers for Python code
"""
import copy
import itertools
import collections
import os
import sys
import re
try:
from functools import lru_cache
except ImportError:
from backports.functools_lru_cache import lru_cache
import six
import astroid
from astroid import decorators
from astroid import modutils
from pylint.interfaces import IAstroidChecker, INFERENCE, INFERENCE_FAILURE, HIGH
from pylint.utils import get_global_option
from pylint.checkers import BaseChecker
from pylint.checkers import utils
SPECIAL_OBJ = re.compile("^_{2}[a-z]+_{2}$")
FUTURE = '__future__'
# regexp for ignored argument name
IGNORED_ARGUMENT_NAMES = re.compile('_.*|^ignored_|^unused_')
PY3K = sys.version_info >= (3, 0)
def _is_from_future_import(stmt, name):
"""Check if the name is a future import from another module."""
try:
module = stmt.do_import_module(stmt.modname)
except astroid.AstroidBuildingException:
return None
for local_node in module.locals.get(name, []):
if (isinstance(local_node, astroid.ImportFrom)
and local_node.modname == FUTURE):
return True
return None
def in_for_else_branch(parent, stmt):
"""Returns True if stmt in inside the else branch for a parent For stmt."""
return (isinstance(parent, astroid.For) and
any(else_stmt.parent_of(stmt) or else_stmt == stmt
for else_stmt in parent.orelse))
@lru_cache(maxsize=1000)
def overridden_method(klass, name):
"""get overridden method if any"""
try:
parent = next(klass.local_attr_ancestors(name))
except (StopIteration, KeyError):
return None
try:
meth_node = parent[name]
except KeyError:
# We have found an ancestor defining <name> but it's not in the local
# dictionary. This may happen with astroid built from living objects.
return None
if isinstance(meth_node, astroid.FunctionDef):
return meth_node
return None
def _get_unpacking_extra_info(node, infered):
"""return extra information to add to the message for unpacking-non-sequence
and unbalanced-tuple-unpacking errors
"""
more = ''
infered_module = infered.root().name
if node.root().name == infered_module:
if node.lineno == infered.lineno:
more = ' %s' % infered.as_string()
elif infered.lineno:
more = ' defined at line %s' % infered.lineno
elif infered.lineno:
more = ' defined at line %s of %s' % (infered.lineno, infered_module)
return more
def _detect_global_scope(node, frame, defframe):
""" Detect that the given frames shares a global
scope.
Two frames shares a global scope when neither
of them are hidden under a function scope, as well
as any of parent scope of them, until the root scope.
In this case, depending from something defined later on
will not work, because it is still undefined.
Example:
class A:
# B has the same global scope as `C`, leading to a NameError.
class B(C): ...
class C: ...
"""
def_scope = scope = None
if frame and frame.parent:
scope = frame.parent.scope()
if defframe and defframe.parent:
def_scope = defframe.parent.scope()
if isinstance(frame, astroid.FunctionDef):
# If the parent of the current node is a
# function, then it can be under its scope
# (defined in, which doesn't concern us) or
# the `->` part of annotations. The same goes
# for annotations of function arguments, they'll have
# their parent the Arguments node.
if not isinstance(node.parent,
(astroid.FunctionDef, astroid.Arguments)):
return False
elif any(not isinstance(f, (astroid.ClassDef, astroid.Module))
for f in (frame, defframe)):
# Not interested in other frames, since they are already
# not in a global scope.
return False
break_scopes = []
for s in (scope, def_scope):
# Look for parent scopes. If there is anything different
# than a module or a class scope, then they frames don't
# share a global scope.
parent_scope = s
while parent_scope:
if not isinstance(parent_scope, (astroid.ClassDef, astroid.Module)):
break_scopes.append(parent_scope)
break
if parent_scope.parent:
parent_scope = parent_scope.parent.scope()
else:
break
if break_scopes and len(set(break_scopes)) != 1:
# Store different scopes than expected.
# If the stored scopes are, in fact, the very same, then it means
# that the two frames (frame and defframe) shares the same scope,
# and we could apply our lineno analysis over them.
# For instance, this works when they are inside a function, the node
# that uses a definition and the definition itself.
return False
# At this point, we are certain that frame and defframe shares a scope
# and the definition of the first depends on the second.
return frame.lineno < defframe.lineno
def _fix_dot_imports(not_consumed):
""" Try to fix imports with multiple dots, by returning a dictionary
with the import names expanded. The function unflattens root imports,
like 'xml' (when we have both 'xml.etree' and 'xml.sax'), to 'xml.etree'
and 'xml.sax' respectively.
"""
# TODO: this should be improved in issue astroid #46
names = {}
for name, stmts in six.iteritems(not_consumed):
if any(isinstance(stmt, astroid.AssignName)
and isinstance(stmt.assign_type(), astroid.AugAssign)
for stmt in stmts):
continue
for stmt in stmts:
if not isinstance(stmt, (astroid.ImportFrom, astroid.Import)):
continue
for imports in stmt.names:
second_name = None
if imports[0] == "*":
# In case of wildcard imports,
# pick the name from inside the imported module.
second_name = name
else:
if imports[0].find(".") > -1 or name in imports:
# Most likely something like 'xml.etree',
# which will appear in the .locals as 'xml'.
# Only pick the name if it wasn't consumed.
second_name = imports[0]
if second_name and second_name not in names:
names[second_name] = stmt
return sorted(names.items(), key=lambda a: a[1].fromlineno)
def _find_frame_imports(name, frame):
"""
Detect imports in the frame, with the required
*name*. Such imports can be considered assignments.
Returns True if an import for the given name was found.
"""
imports = frame.nodes_of_class((astroid.Import, astroid.ImportFrom))
for import_node in imports:
for import_name, import_alias in import_node.names:
# If the import uses an alias, check only that.
# Otherwise, check only the import name.
if import_alias:
if import_alias == name:
return True
elif import_name and import_name == name:
return True
return None
def _import_name_is_global(stmt, global_names):
for import_name, import_alias in stmt.names:
# If the import uses an alias, check only that.
# Otherwise, check only the import name.
if import_alias:
if import_alias in global_names:
return True
elif import_name in global_names:
return True
return False
def _flattened_scope_names(iterator):
values = (set(stmt.names) for stmt in iterator)
return set(itertools.chain.from_iterable(values))
def _assigned_locally(name_node):
"""
Checks if name_node has corresponding assign statement in same scope
"""
assign_stmts = name_node.scope().nodes_of_class(astroid.AssignName)
return any(a.name == name_node.name for a in assign_stmts)
MSGS = {
'E0601': ('Using variable %r before assignment',
'used-before-assignment',
'Used when a local variable is accessed before it\'s \
assignment.'),
'E0602': ('Undefined variable %r',
'undefined-variable',
'Used when an undefined variable is accessed.'),
'E0603': ('Undefined variable name %r in __all__',
'undefined-all-variable',
'Used when an undefined variable name is referenced in __all__.'),
'E0604': ('Invalid object %r in __all__, must contain only strings',
'invalid-all-object',
'Used when an invalid (non-string) object occurs in __all__.'),
'E0611': ('No name %r in module %r',
'no-name-in-module',
'Used when a name cannot be found in a module.'),
'W0601': ('Global variable %r undefined at the module level',
'global-variable-undefined',
'Used when a variable is defined through the "global" statement \
but the variable is not defined in the module scope.'),
'W0602': ('Using global for %r but no assignment is done',
'global-variable-not-assigned',
'Used when a variable is defined through the "global" statement \
but no assignment to this variable is done.'),
'W0603': ('Using the global statement', # W0121
'global-statement',
'Used when you use the "global" statement to update a global \
variable. Pylint just try to discourage this \
usage. That doesn\'t mean you cannot use it !'),
'W0604': ('Using the global statement at the module level', # W0103
'global-at-module-level',
'Used when you use the "global" statement at the module level \
since it has no effect'),
'W0611': ('Unused %s',
'unused-import',
'Used when an imported module or variable is not used.'),
'W0612': ('Unused variable %r',
'unused-variable',
'Used when a variable is defined but not used.'),
'W0613': ('Unused argument %r',
'unused-argument',
'Used when a function or method argument is not used.'),
'W0614': ('Unused import %s from wildcard import',
'unused-wildcard-import',
'Used when an imported module or variable is not used from a \
`\'from X import *\'` style import.'),
'W0621': ('Redefining name %r from outer scope (line %s)',
'redefined-outer-name',
'Used when a variable\'s name hides a name defined in the outer \
scope.'),
'W0622': ('Redefining built-in %r',
'redefined-builtin',
'Used when a variable or function override a built-in.'),
'W0623': ('Redefining name %r from %s in exception handler',
'redefine-in-handler',
'Used when an exception handler assigns the exception \
to an existing name'),
'W0631': ('Using possibly undefined loop variable %r',
'undefined-loop-variable',
'Used when an loop variable (i.e. defined by a for loop or \
a list comprehension or a generator expression) is used outside \
the loop.'),
'E0632': ('Possible unbalanced tuple unpacking with '
'sequence%s: '
'left side has %d label(s), right side has %d value(s)',
'unbalanced-tuple-unpacking',
'Used when there is an unbalanced tuple unpacking in assignment',
{'old_names': [('W0632', 'unbalanced-tuple-unpacking')]}),
'E0633': ('Attempting to unpack a non-sequence%s',
'unpacking-non-sequence',
'Used when something which is not '
'a sequence is used in an unpack assignment',
{'old_names': [('W0633', 'unpacking-non-sequence')]}),
'W0640': ('Cell variable %s defined in loop',
'cell-var-from-loop',
'A variable used in a closure is defined in a loop. '
'This will result in all closures using the same value for '
'the closed-over variable.'),
}
ScopeConsumer = collections.namedtuple("ScopeConsumer", "to_consume consumed scope_type")
class NamesConsumer(object):
"""
A simple class to handle consumed, to consume and scope type info of node locals
"""
def __init__(self, node, scope_type):
self._atomic = ScopeConsumer(copy.copy(node.locals), {}, scope_type)
def __repr__(self):
msg = "\nto_consume : {:s}\n".format(
", ".join(["{}->{}".format(key, val)
for key, val in self._atomic.to_consume.items()]))
msg += "consumed : {:s}\n".format(
", ".join(["{}->{}".format(key, val)
for key, val in self._atomic.consumed.items()]))
msg += "scope_type : {:s}\n".format(self._atomic.scope_type)
return msg
def __iter__(self):
return iter(self._atomic)
@property
def to_consume(self):
return self._atomic.to_consume
@property
def consumed(self):
return self._atomic.consumed
@property
def scope_type(self):
return self._atomic.scope_type
def mark_as_consumed(self, name, new_node):
"""
Mark the name as consumed and delete it from
the to_consume dictionnary
"""
self.consumed[name] = new_node
del self.to_consume[name]
def get_next_to_consume(self, node):
# mark the name as consumed if it's defined in this scope
name = node.name
parent_node = node.parent
found_node = self.to_consume.get(name)
if (found_node and isinstance(parent_node, astroid.Assign)
and parent_node == found_node[0].parent):
lhs = found_node[0].parent.targets[0]
if lhs.name == name: # this name is defined in this very statement
found_node = None
return found_node
class VariablesChecker(BaseChecker):
"""checks for
* unused variables / imports
* undefined variables
* redefinition of variable from builtins or from an outer scope
* use of variable before assignment
* __all__ consistency
"""
__implements__ = IAstroidChecker
name = 'variables'
msgs = MSGS
priority = -1
options = (("init-import",
{'default': 0, 'type' : 'yn', 'metavar' : '<y_or_n>',
'help' : 'Tells whether we should check for unused import in '
'__init__ files.'}),
("dummy-variables-rgx",
{'default': '_+$|(_[a-zA-Z0-9_]*[a-zA-Z0-9]+?$)|dummy|^ignored_|^unused_',
'type' :'regexp', 'metavar' : '<regexp>',
'help' : 'A regular expression matching the name of dummy '
'variables (i.e. expectedly not used).'}),
("additional-builtins",
{'default': (), 'type' : 'csv',
'metavar' : '<comma separated list>',
'help' : 'List of additional names supposed to be defined in '
'builtins. Remember that you should avoid to define new builtins '
'when possible.'
}),
("callbacks",
{'default' : ('cb_', '_cb'), 'type' : 'csv',
'metavar' : '<callbacks>',
'help' : 'List of strings which can identify a callback '
'function by name. A callback name must start or '
'end with one of those strings.'}
),
("redefining-builtins-modules",
{'default': ('six.moves', 'past.builtins', 'future.builtins', 'io', 'builtins'),
'type': 'csv',
'metavar': '<comma separated list>',
'help': 'List of qualified module names which can have objects '
'that can redefine builtins.'}
),
('ignored-argument-names',
{'default' : IGNORED_ARGUMENT_NAMES,
'type' :'regexp', 'metavar' : '<regexp>',
'help' : 'Argument names that match this expression will be '
'ignored. Default to name with leading underscore'}
),
('allow-global-unused-variables',
{'default': True,
'type': 'yn', 'metavar': '<y_or_n>',
'help': 'Tells whether unused global variables should be treated as a violation.'}
),
)
def __init__(self, linter=None):
BaseChecker.__init__(self, linter)
self._to_consume = None # list of tuples: (to_consume:dict, consumed:dict, scope_type:str)
self._checking_mod_attr = None
self._loop_variables = []
# Relying on other checker's options, which might not have been initialized yet.
@decorators.cachedproperty
def _analyse_fallback_blocks(self):
return get_global_option(self, 'analyse-fallback-blocks', default=False)
@decorators.cachedproperty
def _ignored_modules(self):
return get_global_option(self, 'ignored-modules', default=[])
@decorators.cachedproperty
def _allow_global_unused_variables(self):
return get_global_option(self, 'allow-global-unused-variables', default=True)
@utils.check_messages('redefined-outer-name')
def visit_for(self, node):
assigned_to = [var.name for var in node.target.nodes_of_class(astroid.AssignName)]
# Only check variables that are used
dummy_rgx = self.config.dummy_variables_rgx
assigned_to = [var for var in assigned_to if not dummy_rgx.match(var)]
for variable in assigned_to:
for outer_for, outer_variables in self._loop_variables:
if (variable in outer_variables
and not in_for_else_branch(outer_for, node)):
self.add_message(
'redefined-outer-name',
args=(variable, outer_for.fromlineno),
node=node
)
break
self._loop_variables.append((node, assigned_to))
@utils.check_messages('redefined-outer-name')
def leave_for(self, _):
self._loop_variables.pop()
def visit_module(self, node):
"""visit module : update consumption analysis variable
checks globals doesn't overrides builtins
"""
self._to_consume = [NamesConsumer(node, 'module')]
for name, stmts in six.iteritems(node.locals):
if utils.is_builtin(name) and not utils.is_inside_except(stmts[0]):
if self._should_ignore_redefined_builtin(stmts[0]) or name == '__doc__':
continue
self.add_message('redefined-builtin', args=name, node=stmts[0])
@utils.check_messages('unused-import', 'unused-wildcard-import',
'redefined-builtin', 'undefined-all-variable',
'invalid-all-object', 'unused-variable')
def leave_module(self, node):
"""leave module: check globals
"""
assert len(self._to_consume) == 1
not_consumed = self._to_consume.pop().to_consume
# attempt to check for __all__ if defined
if '__all__' in node.locals:
self._check_all(node, not_consumed)
# check for unused globals
self._check_globals(not_consumed)
# don't check unused imports in __init__ files
if not self.config.init_import and node.package:
return
self._check_imports(not_consumed)
def _check_all(self, node, not_consumed):
assigned = next(node.igetattr('__all__'))
if assigned is astroid.YES:
return
for elt in getattr(assigned, 'elts', ()):
try:
elt_name = next(elt.infer())
except astroid.InferenceError:
continue
if elt_name is astroid.Uninferable:
continue
if not elt_name.parent:
continue
if (not isinstance(elt_name, astroid.Const)
or not isinstance(elt_name.value, six.string_types)):
self.add_message('invalid-all-object',
args=elt.as_string(), node=elt)
continue
elt_name = elt_name.value
# If elt is in not_consumed, remove it from not_consumed
if elt_name in not_consumed:
del not_consumed[elt_name]
continue
if elt_name not in node.locals:
if not node.package:
self.add_message('undefined-all-variable',
args=(elt_name, ),
node=elt)
else:
basename = os.path.splitext(node.file)[0]
if os.path.basename(basename) == '__init__':
name = node.name + "." + elt_name
try:
modutils.file_from_modpath(name.split("."))
except ImportError:
self.add_message('undefined-all-variable',
args=(elt_name, ),
node=elt)
except SyntaxError:
# don't yield an syntax-error warning,
# because it will be later yielded
# when the file will be checked
pass
def _check_globals(self, not_consumed):
if self._allow_global_unused_variables:
return
for name, nodes in six.iteritems(not_consumed):
for node in nodes:
self.add_message('unused-variable', args=(name,), node=node)
def _check_imports(self, not_consumed):
local_names = _fix_dot_imports(not_consumed)
checked = set()
for name, stmt in local_names:
for imports in stmt.names:
real_name = imported_name = imports[0]
if imported_name == "*":
real_name = name
as_name = imports[1]
if real_name in checked:
continue
if name not in (real_name, as_name):
continue
checked.add(real_name)
if (isinstance(stmt, astroid.Import) or
(isinstance(stmt, astroid.ImportFrom) and
not stmt.modname)):
if (isinstance(stmt, astroid.ImportFrom) and
SPECIAL_OBJ.search(imported_name)):
# Filter special objects (__doc__, __all__) etc.,
# because they can be imported for exporting.
continue
if as_name == "_":
continue
if as_name is None:
msg = "import %s" % imported_name
else:
msg = "%s imported as %s" % (imported_name, as_name)
self.add_message('unused-import', args=msg, node=stmt)
elif (isinstance(stmt, astroid.ImportFrom)
and stmt.modname != FUTURE):
if SPECIAL_OBJ.search(imported_name):
# Filter special objects (__doc__, __all__) etc.,
# because they can be imported for exporting.
continue
if _is_from_future_import(stmt, name):
# Check if the name is in fact loaded from a
# __future__ import in another module.
continue
if imported_name == '*':
self.add_message('unused-wildcard-import',
args=name, node=stmt)
else:
if as_name is None:
msg = "%s imported from %s" % (imported_name, stmt.modname)
else:
fields = (imported_name, stmt.modname, as_name)
msg = "%s imported from %s as %s" % fields
self.add_message('unused-import', args=msg, node=stmt)
del self._to_consume
def visit_classdef(self, node):
"""visit class: update consumption analysis variable
"""
self._to_consume.append(NamesConsumer(node, 'class'))
def leave_classdef(self, _):
"""leave class: update consumption analysis variable
"""
# do not check for not used locals here (no sense)
self._to_consume.pop()
def visit_lambda(self, node):
"""visit lambda: update consumption analysis variable
"""
self._to_consume.append(NamesConsumer(node, 'lambda'))
def leave_lambda(self, _):
"""leave lambda: update consumption analysis variable
"""
# do not check for not used locals here
self._to_consume.pop()
def visit_generatorexp(self, node):
"""visit genexpr: update consumption analysis variable
"""
self._to_consume.append(NamesConsumer(node, 'comprehension'))
def leave_generatorexp(self, _):
"""leave genexpr: update consumption analysis variable
"""
# do not check for not used locals here
self._to_consume.pop()
def visit_dictcomp(self, node):
"""visit dictcomp: update consumption analysis variable
"""
self._to_consume.append(NamesConsumer(node, 'comprehension'))
def leave_dictcomp(self, _):
"""leave dictcomp: update consumption analysis variable
"""
# do not check for not used locals here
self._to_consume.pop()
def visit_setcomp(self, node):
"""visit setcomp: update consumption analysis variable
"""
self._to_consume.append(NamesConsumer(node, 'comprehension'))
def leave_setcomp(self, _):
"""leave setcomp: update consumption analysis variable
"""
# do not check for not used locals here
self._to_consume.pop()
def visit_functiondef(self, node):
"""visit function: update consumption analysis variable and check locals
"""
self._to_consume.append(NamesConsumer(node, 'function'))
if not (self.linter.is_message_enabled('redefined-outer-name') or
self.linter.is_message_enabled('redefined-builtin')):
return
globs = node.root().globals
for name, stmt in node.items():
if utils.is_inside_except(stmt):
continue
if name in globs and not isinstance(stmt, astroid.Global):
definition = globs[name][0]
if (isinstance(definition, astroid.ImportFrom)
and definition.modname == FUTURE):
# It is a __future__ directive, not a symbol.
continue
line = definition.fromlineno
if not self._is_name_ignored(stmt, name):
self.add_message('redefined-outer-name',
args=(name, line), node=stmt)
elif utils.is_builtin(name) and not self._should_ignore_redefined_builtin(stmt):
# do not print Redefining builtin for additional builtins
self.add_message('redefined-builtin', args=name, node=stmt)
def _is_name_ignored(self, stmt, name):
authorized_rgx = self.config.dummy_variables_rgx
if (isinstance(stmt, astroid.AssignName)
and isinstance(stmt.parent, astroid.Arguments)):
regex = self.config.ignored_argument_names
else:
regex = authorized_rgx
return regex and regex.match(name)
def _check_is_unused(self, name, node, stmt, global_names, nonlocal_names):
# Ignore some special names specified by user configuration.
if self._is_name_ignored(stmt, name):
return
# Ignore names that were added dynamically to the Function scope
if (isinstance(node, astroid.FunctionDef)
and name == '__class__'
and len(node.locals['__class__']) == 1
and isinstance(node.locals['__class__'][0], astroid.ClassDef)):
return
# Ignore names imported by the global statement.
# FIXME: should only ignore them if it's assigned latter
if isinstance(stmt, astroid.Global):
return
if isinstance(stmt, (astroid.Import, astroid.ImportFrom)):
# Detect imports, assigned to global statements.
if global_names and _import_name_is_global(stmt, global_names):
return
argnames = list(itertools.chain(
node.argnames(),
[arg.name for arg in node.args.kwonlyargs]
))
is_method = node.is_method()
klass = node.parent.frame()
if is_method and isinstance(klass, astroid.ClassDef):
confidence = INFERENCE if utils.has_known_bases(klass) else INFERENCE_FAILURE
else:
confidence = HIGH
# Care about functions with unknown argument (builtins)
if name in argnames:
if is_method:
# Don't warn for the first argument of a (non static) method
if node.type != 'staticmethod' and name == argnames[0]:
return
# Don't warn for argument of an overridden method
overridden = overridden_method(klass, node.name)
if overridden is not None and name in overridden.argnames():
return
if node.name in utils.PYMETHODS and node.name not in ('__init__', '__new__'):
return
# Don't check callback arguments
if any(node.name.startswith(cb) or node.name.endswith(cb)
for cb in self.config.callbacks):
return
# Don't check arguments of singledispatch.register function.
if utils.is_registered_in_singledispatch_function(node):
return
self.add_message('unused-argument', args=name, node=stmt,
confidence=confidence)
else:
if stmt.parent and isinstance(stmt.parent, astroid.Assign):
if name in nonlocal_names:
return
if isinstance(stmt, astroid.Import):
# Need the complete name, which we don't have in .locals.
qname, asname = stmt.names[0]
name = asname or qname
self.add_message('unused-variable', args=name, node=stmt)
def leave_functiondef(self, node):
"""leave function: check function's locals are consumed"""
not_consumed = self._to_consume.pop().to_consume
if not (self.linter.is_message_enabled('unused-variable') or
self.linter.is_message_enabled('unused-argument')):
return
# Don't check arguments of function which are only raising an exception.
if utils.is_error(node):
return
# Don't check arguments of abstract methods or within an interface.
is_method = node.is_method()
if is_method and node.is_abstract():
return
global_names = _flattened_scope_names(node.nodes_of_class(astroid.Global))
nonlocal_names = _flattened_scope_names(node.nodes_of_class(astroid.Nonlocal))
for name, stmts in six.iteritems(not_consumed):
self._check_is_unused(name, node, stmts[0], global_names, nonlocal_names)
visit_asyncfunctiondef = visit_functiondef
leave_asyncfunctiondef = leave_functiondef
@utils.check_messages('global-variable-undefined', 'global-variable-not-assigned',
'global-statement', 'global-at-module-level',
'redefined-builtin')
def visit_global(self, node):
"""check names imported exists in the global scope"""
frame = node.frame()
if isinstance(frame, astroid.Module):
self.add_message('global-at-module-level', node=node)
return
module = frame.root()
default_message = True
for name in node.names:
try:
assign_nodes = module.getattr(name)
except astroid.NotFoundError:
# unassigned global, skip
assign_nodes = []
if not assign_nodes:
self.add_message('global-variable-not-assigned',
args=name, node=node)
default_message = False
continue
for anode in assign_nodes:
if (isinstance(anode, astroid.AssignName)
and anode.name in module.special_attributes):
self.add_message('redefined-builtin', args=name, node=node)
break
if anode.frame() is module:
# module level assignment
break
else:
# global undefined at the module scope
self.add_message('global-variable-undefined', args=name, node=node)
default_message = False
if default_message:
self.add_message('global-statement', node=node)
def _check_late_binding_closure(self, node, assignment_node):
def _is_direct_lambda_call():
return (isinstance(node_scope.parent, astroid.Call)
and node_scope.parent.func is node_scope)
node_scope = node.scope()
if not isinstance(node_scope, (astroid.Lambda, astroid.FunctionDef)):
return
if isinstance(node.parent, astroid.Arguments):
return
if isinstance(assignment_node, astroid.Comprehension):
if assignment_node.parent.parent_of(node.scope()):
self.add_message('cell-var-from-loop', node=node, args=node.name)
else:
assign_scope = assignment_node.scope()
maybe_for = assignment_node
while not isinstance(maybe_for, astroid.For):
if maybe_for is assign_scope:
break
maybe_for = maybe_for.parent
else:
if (maybe_for.parent_of(node_scope)
and not _is_direct_lambda_call()
and not isinstance(node_scope.statement(), astroid.Return)):
self.add_message('cell-var-from-loop', node=node, args=node.name)
def _loopvar_name(self, node, name):
# filter variables according to node's scope
# XXX used to filter parents but don't remember why, and removing this
# fixes a W0631 false positive reported by Paul Hachmann on 2008/12 on
# python-projects (added to func_use_for_or_listcomp_var test)
#astmts = [stmt for stmt in node.lookup(name)[1]
# if hasattr(stmt, 'ass_type')] and
# not stmt.statement().parent_of(node)]
if not self.linter.is_message_enabled('undefined-loop-variable'):
return
astmts = [stmt for stmt in node.lookup(name)[1]
if hasattr(stmt, 'ass_type')]
# filter variables according their respective scope test is_statement
# and parent to avoid #74747. This is not a total fix, which would
# introduce a mechanism similar to special attribute lookup in
# modules. Also, in order to get correct inference in this case, the
# scope lookup rules would need to be changed to return the initial
# assignment (which does not exist in code per se) as well as any later
# modifications.
if not astmts or (astmts[0].is_statement or astmts[0].parent) \
and astmts[0].statement().parent_of(node):
_astmts = []
else:
_astmts = astmts[:1]
for i, stmt in enumerate(astmts[1:]):
if (astmts[i].statement().parent_of(stmt)
and not in_for_else_branch(astmts[i].statement(), stmt)):
continue
_astmts.append(stmt)
astmts = _astmts
if len(astmts) == 1:
assign = astmts[0].assign_type()
if (isinstance(assign, (astroid.For, astroid.Comprehension,
astroid.GeneratorExp))
and assign.statement() is not node.statement()):
self.add_message('undefined-loop-variable', args=name, node=node)
def _should_ignore_redefined_builtin(self, stmt):
if not isinstance(stmt, astroid.ImportFrom):
return False
return stmt.modname in self.config.redefining_builtins_modules
@utils.check_messages('redefine-in-handler')
def visit_excepthandler(self, node):
for name in utils.get_all_elements(node.name):
clobbering, args = utils.clobber_in_except(name)
if clobbering:
self.add_message('redefine-in-handler', args=args, node=name)
def visit_assignname(self, node):
if isinstance(node.assign_type(), astroid.AugAssign):
self.visit_name(node)
def visit_delname(self, node):
self.visit_name(node)
@staticmethod
def _defined_in_function_definition(node, frame):
in_annotation_or_default = False
if (isinstance(frame, astroid.FunctionDef) and
node.statement() is frame):
in_annotation_or_default = (
(
PY3K and (node in frame.args.annotations
or node in frame.args.kwonlyargs_annotations
or node is frame.args.varargannotation
or node is frame.args.kwargannotation)
)
or
frame.args.parent_of(node)
)
return in_annotation_or_default
@staticmethod
def _is_variable_violation(node, name, defnode, stmt, defstmt,
frame, defframe, base_scope_type,
recursive_klass):
# node: Node to check for violation
# name: name of node to check violation for
# frame: Scope of statement of node
# base_scope_type: local scope type
maybee0601 = True
annotation_return = False
use_outer_definition = False
if frame is not defframe:
maybee0601 = _detect_global_scope(node, frame, defframe)
elif defframe.parent is None:
# we are at the module level, check the name is not
# defined in builtins
if name in defframe.scope_attrs or astroid.builtin_lookup(name)[1]:
maybee0601 = False
else:
# we are in a local scope, check the name is not
# defined in global or builtin scope
# skip this lookup if name is assigned later in function scope/lambda
# Note: the node.frame() is not the same as the `frame` argument which is
# equivalent to frame.statement().scope()
forbid_lookup = ((isinstance(frame, astroid.FunctionDef) or
isinstance(node.frame(), astroid.Lambda)) and
_assigned_locally(node))
if not forbid_lookup and defframe.root().lookup(name)[1]:
maybee0601 = False
use_outer_definition = (
stmt == defstmt
and not isinstance(defnode, astroid.node_classes.Comprehension)
)
else:
# check if we have a nonlocal
if name in defframe.locals:
maybee0601 = not any(isinstance(child, astroid.Nonlocal)
and name in child.names
for child in defframe.get_children())
if (base_scope_type == 'lambda' and
isinstance(frame, astroid.ClassDef)
and name in frame.locals):
# This rule verifies that if the definition node of the
# checked name is an Arguments node and if the name
# is used a default value in the arguments defaults
# and the actual definition of the variable label
# is happening before the Arguments definition.
#
# bar = None
# foo = lambda bar=bar: bar
#
# In this case, maybee0601 should be False, otherwise
# it should be True.
maybee0601 = not (isinstance(defnode, astroid.Arguments) and
node in defnode.defaults and
frame.locals[name][0].fromlineno < defstmt.fromlineno)
elif (isinstance(defframe, astroid.ClassDef) and
isinstance(frame, astroid.FunctionDef)):
# Special rule for function return annotations,
# which uses the same name as the class where
# the function lives.
if (PY3K and node is frame.returns and
defframe.parent_of(frame.returns)):
maybee0601 = annotation_return = True
if (maybee0601 and defframe.name in defframe.locals and
defframe.locals[name][0].lineno < frame.lineno):
# Detect class assignments with the same
# name as the class. In this case, no warning
# should be raised.
maybee0601 = False
if isinstance(node.parent, astroid.Arguments):
maybee0601 = stmt.fromlineno <= defstmt.fromlineno
elif recursive_klass:
maybee0601 = True
else:
maybee0601 = maybee0601 and stmt.fromlineno <= defstmt.fromlineno
if maybee0601 and stmt.fromlineno == defstmt.fromlineno:
if (isinstance(defframe, astroid.FunctionDef)
and frame is defframe
and defframe.parent_of(node)
and stmt is not defstmt):
# Single statement function, with the statement on the
# same line as the function definition
maybee0601 = False
return maybee0601, annotation_return, use_outer_definition
def _ignore_class_scope(self, node):
"""
Return True if the node is in a local class scope, as an assignment.
:param node: Node considered
:type node: astroid.Node
:return: True if the node is in a local class scope, as an assignment. False otherwise.
:rtype: bool
"""
# Detect if we are in a local class scope, as an assignment.
# For example, the following is fair game.
#
# class A:
# b = 1
# c = lambda b=b: b * b
#
# class B:
# tp = 1
# def func(self, arg: tp):
# ...
# class C:
# tp = 2
# def func(self, arg=tp):
# ...
name = node.name
frame = node.statement().scope()
in_annotation_or_default = self._defined_in_function_definition(node, frame)
if in_annotation_or_default:
frame_locals = frame.parent.scope().locals
else:
frame_locals = frame.locals
return not ((isinstance(frame, astroid.ClassDef) or in_annotation_or_default) and
name in frame_locals)
@utils.check_messages(*(MSGS.keys()))
def visit_name(self, node):
"""check that a name is defined if the current scope and doesn't
redefine a built-in
"""
stmt = node.statement()
if stmt.fromlineno is None:
# name node from a astroid built from live code, skip
assert not stmt.root().file.endswith('.py')
return
name = node.name
frame = stmt.scope()
# if the name node is used as a function default argument's value or as
# a decorator, then start from the parent frame of the function instead
# of the function frame - and thus open an inner class scope
if ((utils.is_func_default(node) and not utils.in_comprehension(node)) or
utils.is_func_decorator(node) or utils.is_ancestor_name(frame, node)):
# Do not use the highest scope to look for variable name consumption in this case
# If the name is used in the function default, or as a decorator, then it
# cannot be defined there
# (except for list comprehensions in function defaults)
start_index = len(self._to_consume) - 2
else:
start_index = len(self._to_consume) - 1
# iterates through parent scopes, from the inner to the outer
base_scope_type = self._to_consume[start_index].scope_type
# pylint: disable=too-many-nested-blocks; refactoring this block is a pain.
for i in range(start_index, -1, -1):
current_consumer = self._to_consume[i]
# if the current scope is a class scope but it's not the inner
# scope, ignore it. This prevents to access this scope instead of
# the globals one in function members when there are some common
# names. The only exception is when the starting scope is a
# comprehension and its direct outer scope is a class
if current_consumer.scope_type == 'class' and i != start_index and not (
base_scope_type == 'comprehension' and i == start_index-1):
if self._ignore_class_scope(node):
continue
# the name has already been consumed, only check it's not a loop
# variable used outside the loop
# avoid the case where there are homonyms inside function scope and
# comprehension current scope (avoid bug #1731)
if name in current_consumer.consumed and not (
current_consumer.scope_type == 'comprehension'
and self._has_homonym_in_upper_function_scope(node, i)):
defnode = utils.assign_parent(current_consumer.consumed[name][0])
self._check_late_binding_closure(node, defnode)
self._loopvar_name(node, name)
break
found_node = current_consumer.get_next_to_consume(node)
if found_node is None:
continue
# checks for use before assignment
defnode = utils.assign_parent(current_consumer.to_consume[name][0])
if defnode is not None:
self._check_late_binding_closure(node, defnode)
defstmt = defnode.statement()
defframe = defstmt.frame()
# The class reuses itself in the class scope.
recursive_klass = (frame is defframe and
defframe.parent_of(node) and
isinstance(defframe, astroid.ClassDef) and
node.name == defframe.name)
maybee0601, annotation_return, use_outer_definition = self._is_variable_violation(
node, name, defnode, stmt, defstmt,
frame, defframe,
base_scope_type, recursive_klass)
if use_outer_definition:
continue
if (maybee0601
and not utils.is_defined_before(node)
and not astroid.are_exclusive(stmt, defstmt, ('NameError',))):
# Used and defined in the same place, e.g `x += 1` and `del x`
defined_by_stmt = (
defstmt is stmt
and isinstance(node, (astroid.DelName, astroid.AssignName))
)
if (recursive_klass
or defined_by_stmt
or annotation_return
or isinstance(defstmt, astroid.Delete)):
if not utils.node_ignores_exception(node, NameError):
self.add_message('undefined-variable', args=name,
node=node)
elif base_scope_type != 'lambda':
# E0601 may *not* occurs in lambda scope.
self.add_message('used-before-assignment', args=name, node=node)
elif base_scope_type == 'lambda':
# E0601 can occur in class-level scope in lambdas, as in
# the following example:
# class A:
# x = lambda attr: f + attr
# f = 42
if isinstance(frame, astroid.ClassDef) and name in frame.locals:
if isinstance(node.parent, astroid.Arguments):
if stmt.fromlineno <= defstmt.fromlineno:
# Doing the following is fine:
# class A:
# x = 42
# y = lambda attr=x: attr
self.add_message('used-before-assignment',
args=name, node=node)
else:
self.add_message('undefined-variable',
args=name, node=node)
elif current_consumer.scope_type == 'lambda':
self.add_message('undefined-variable',
node=node, args=name)
current_consumer.mark_as_consumed(name, found_node)
# check it's not a loop variable used outside the loop
self._loopvar_name(node, name)
break
else:
# we have not found the name, if it isn't a builtin, that's an
# undefined name !
if not (name in astroid.Module.scope_attrs or utils.is_builtin(name)
or name in self.config.additional_builtins):
if not utils.node_ignores_exception(node, NameError):
self.add_message('undefined-variable', args=name, node=node)
def _has_homonym_in_upper_function_scope(self, node, index):
"""
Return True if there is a node with the same name in the to_consume dict of an upper scope
and if that scope is a function
:param node: node to check for
:type node: astroid.Node
:param index: index of the current consumer inside self._to_consume
:type index: int
:return: True if there is a node with the same name in the to_consume dict of a upper scope
and if that scope is a function
:rtype: bool
"""
for _consumer in self._to_consume[index-1::-1]:
if _consumer.scope_type == 'function' and node.name in _consumer.to_consume:
return True
return False
@utils.check_messages('no-name-in-module')
def visit_import(self, node):
"""check modules attribute accesses"""
if not self._analyse_fallback_blocks and utils.is_from_fallback_block(node):
# No need to verify this, since ImportError is already
# handled by the client code.
return
for name, _ in node.names:
parts = name.split('.')
try:
module = next(node.infer_name_module(parts[0]))
except astroid.ResolveError:
continue
self._check_module_attrs(node, module, parts[1:])
@utils.check_messages('no-name-in-module')
def visit_importfrom(self, node):
"""check modules attribute accesses"""
if not self._analyse_fallback_blocks and utils.is_from_fallback_block(node):
# No need to verify this, since ImportError is already
# handled by the client code.
return
name_parts = node.modname.split('.')
try:
module = node.do_import_module(name_parts[0])
except astroid.AstroidBuildingException:
return
module = self._check_module_attrs(node, module, name_parts[1:])
if not module:
return
for name, _ in node.names:
if name == '*':
continue
self._check_module_attrs(node, module, name.split('.'))
@utils.check_messages('unbalanced-tuple-unpacking', 'unpacking-non-sequence')
def visit_assign(self, node):
"""Check unbalanced tuple unpacking for assignments
and unpacking non-sequences.
"""
if not isinstance(node.targets[0], (astroid.Tuple, astroid.List)):
return
targets = node.targets[0].itered()
try:
infered = utils.safe_infer(node.value)
if infered is not None:
self._check_unpacking(infered, node, targets)
except astroid.InferenceError:
return
def _check_unpacking(self, infered, node, targets):
""" Check for unbalanced tuple unpacking
and unpacking non sequences.
"""
if utils.is_inside_abstract_class(node):
return
if utils.is_comprehension(node):
return
if infered is astroid.YES:
return
if (isinstance(infered.parent, astroid.Arguments) and
isinstance(node.value, astroid.Name) and
node.value.name == infered.parent.vararg):
# Variable-length argument, we can't determine the length.
return
if isinstance(infered, (astroid.Tuple, astroid.List)):
# attempt to check unpacking is properly balanced
values = infered.itered()
if len(targets) != len(values):
# Check if we have starred nodes.
if any(isinstance(target, astroid.Starred)
for target in targets):
return
self.add_message('unbalanced-tuple-unpacking', node=node,
args=(_get_unpacking_extra_info(node, infered),
len(targets),
len(values)))
# attempt to check unpacking may be possible (ie RHS is iterable)
else:
if not utils.is_iterable(infered):
self.add_message('unpacking-non-sequence', node=node,
args=(_get_unpacking_extra_info(node, infered),))
def _check_module_attrs(self, node, module, module_names):
"""check that module_names (list of string) are accessible through the
given module
if the latest access name corresponds to a module, return it
"""
assert isinstance(module, astroid.Module), module
while module_names:
name = module_names.pop(0)
if name == '__dict__':
module = None
break
try:
module = next(module.getattr(name)[0].infer())
if module is astroid.Uninferable:
return None
except astroid.NotFoundError:
if module.name in self._ignored_modules:
return None
self.add_message('no-name-in-module',
args=(name, module.name), node=node)
return None
except astroid.InferenceError:
return None
if module_names:
# FIXME: other message if name is not the latest part of
# module_names ?
modname = module.name if module else '__dict__'
self.add_message('no-name-in-module', node=node,
args=('.'.join(module_names), modname))
return None
if isinstance(module, astroid.Module):
return module
return None
class VariablesChecker3k(VariablesChecker):
'''Modified variables checker for 3k'''
# listcomp have now also their scope
def visit_listcomp(self, node):
"""visit dictcomp: update consumption analysis variable
"""
self._to_consume.append(NamesConsumer(node, 'comprehension'))
def leave_listcomp(self, _):
"""leave dictcomp: update consumption analysis variable
"""
# do not check for not used locals here
self._to_consume.pop()
def leave_functiondef(self, node):
self._check_metaclasses(node)
super(VariablesChecker3k, self).leave_functiondef(node)
def leave_module(self, node):
self._check_metaclasses(node)
super(VariablesChecker3k, self).leave_module(node)
def _check_metaclasses(self, node):
""" Update consumption analysis for metaclasses. """
consumed = [] # [(scope_locals, consumed_key)]
for child_node in node.get_children():
if isinstance(child_node, astroid.ClassDef):
consumed.extend(self._check_classdef_metaclasses(child_node, node))
# Pop the consumed items, in order to avoid having
# unused-import and unused-variable false positives
for scope_locals, name in consumed:
scope_locals.pop(name, None)
def _check_classdef_metaclasses(self, klass, parent_node):
if not klass._metaclass:
# Skip if this class doesn't use explicitly a metaclass, but inherits it from ancestors
return []
consumed = [] # [(scope_locals, consumed_key)]
metaclass = klass.metaclass()
name = None
if isinstance(klass._metaclass, astroid.Name):
name = klass._metaclass.name
elif metaclass:
name = metaclass.root().name
found = None
if name:
# check enclosing scopes starting from most local
for scope_locals, _, _ in self._to_consume[::-1]:
found = scope_locals.get(name)
if found:
consumed.append((scope_locals, name))
break
if found is None and not metaclass:
name = None
if isinstance(klass._metaclass, astroid.Name):
name = klass._metaclass.name
elif isinstance(klass._metaclass, astroid.Attribute):
name = klass._metaclass.as_string()
if name is not None:
if not (name in astroid.Module.scope_attrs or
utils.is_builtin(name) or
name in self.config.additional_builtins or
name in parent_node.locals):
self.add_message('undefined-variable',
node=klass,
args=(name,))
return consumed
if sys.version_info >= (3, 0):
VariablesChecker = VariablesChecker3k
def register(linter):
"""required method to auto register this checker"""
linter.register_checker(VariablesChecker(linter))
|
py
|
1a5e0b10c9418cc142315207988feb56049d9f27
|
from . import utils, wsj0mix
|
py
|
1a5e0b5560bd890521026f3f552249c88dd1221a
|
# -*- coding: utf-8 -*-
"""Example of using Linear Method Deviation-base outlier detection (LMDD)
"""
# Author: Yahya Almardeny <[email protected]>
# License: BSD 2 clause
from __future__ import division
from __future__ import print_function
import os
import sys
# temporary solution for relative imports in case pyod is not installed
# if pyod is installed, no need to use the following line
sys.path.append(
os.path.abspath(os.path.join(os.path.dirname("__file__"), '..')))
from pyod.models.lmdd import LMDD
from pyod.utils.data import generate_data
from pyod.utils.data import evaluate_print
from pyod.utils.example import visualize
if __name__ == "__main__":
contamination = 0.1 # percentage of outliers
n_train = 200 # number of training points
n_test = 100 # number of testing points
# Generate sample data
X_train, y_train, X_test, y_test = \
generate_data(n_train=n_train,
n_test=n_test,
n_features=2,
contamination=contamination,
random_state=42)
# train LMDD detector
clf_name = 'LMDD'
clf = LMDD(random_state=42)
clf.fit(X_train)
# get the prediction labels and outlier scores of the training data
y_train_pred = clf.labels_ # binary labels (0: inliers, 1: outliers)
y_train_scores = clf.decision_scores_ # raw outlier scores
# get the prediction on the test data
y_test_pred = clf.predict(X_test) # outlier labels (0 or 1)
y_test_scores = clf.decision_function(X_test) # outlier scores
# evaluate and print the results
print("\nOn Training Data:")
evaluate_print(clf_name, y_train, y_train_scores)
print("\nOn Test Data:")
evaluate_print(clf_name, y_test, y_test_scores)
# visualize the results
visualize(clf_name, X_train, y_train, X_test, y_test, y_train_pred,
y_test_pred, show_figure=True, save_figure=False)
|
py
|
1a5e0bbb58527b1c713db635676e5da2d3228ca1
|
#!/share/apps/canopy-1.4.1/Canopy_64bit/User/bin/python
import numpy as np
import matplotlib
#matplotlib.use('Agg')
import matplotlib.pyplot as plt
import matplotlib.colors as colors
import matplotlib.cm as cmx
from matplotlib.ticker import (MultipleLocator, FormatStrFormatter,AutoMinorLocator)
import colormaps as cmaps
from matplotlib import rcParams
from matplotlib.ticker import MaxNLocator
import os
import IO.reader
fig_width = 3.0 # width in inches
fig_height = fig_width/1.333 # height in inches
fig_size = [fig_width,fig_height]
params = {'backend': 'Agg',
'axes.labelsize': 8,
'axes.titlesize': 8,
'font.size': 8,
'xtick.labelsize': 8,
'ytick.labelsize': 8,
'figure.figsize': fig_size,
'savefig.dpi' : 600,
'font.family': 'sans-serif',
'axes.linewidth' : 0.5,
'xtick.major.size' : 2,
'ytick.major.size' : 2,
'font.size' : 8,
'svg.fonttype' : 'none',
'pdf.fonttype' : 42
}
rcParams.update(params)
# Create inset
fig = plt.figure()
ax1 = fig.add_subplot(111)
lwidth=0.8
msize=4
#left, bottom, width, height = [0.3, 0.3, 0.3, 0.3]
#ax2 = fig.add_axes([left, bottom, width, height])
# colormap
n_curves = 11
values = list(range(n_curves))
plt.register_cmap(name='magma', cmap=cmaps.magma)
jet = cm = plt.get_cmap('magma')
cNorm = colors.Normalize(vmin=0, vmax=values[-1])
scalarMap = cmx.ScalarMappable(norm=cNorm, cmap=jet)
# Data
# column 1: q, column 2: P(q)
#Colors
col=list(range(n_curves))
col[0] = scalarMap.to_rgba(values[0])
col[1] = scalarMap.to_rgba(values[1])
col[2] = scalarMap.to_rgba(values[2])
col[3] = scalarMap.to_rgba(values[3])
col[4] = scalarMap.to_rgba(values[4])
col[5] = scalarMap.to_rgba(values[5])
col[6] = scalarMap.to_rgba(values[6])
col[7] = scalarMap.to_rgba(values[7])
col[8] = scalarMap.to_rgba(values[8])
col[9] = scalarMap.to_rgba(values[9])
#Labels
# Force data:
job_folder = "Force_mathcing_442301"
ref_data_path = "../Tutorial_04_preparation/ReferenceData"
guess_force_data_address = os.path.join(job_folder,"Output/mW_300K_1bar_500_guess.force")
ref_force_data_address = os.path.join(ref_data_path,"force/mW_300K_1bar_500/Ref.force")
best_force_data_address = os.path.join(job_folder,"Output/mW_300K_1bar_500_best.force")
# Force matching:
# read the force data in parallel:
# ----- Modify the following depending on the size your data -----
start_at = 1 # The nth configuration to start with ( by default starts with 1st configuration)
work_load = 500 # total number of configurations
num_cores = 1 # total number of cores assigned
buffer_size = 2000 # total number of configuration read into memory at once for each time
total_atoms = 512
# ---------------------------------------------------------------
work_flow = IO.reader.parallel_assignment(start_at,work_load,num_cores,buffer_size)
# work_flow: A python list containing
# [((start nconfigs),(start,nconfigs)),((start,nconfigs) ...]
ref_output_lst = IO.reader.read_LAMMPS_traj_in_parallel(ref_force_data_address,num_cores,total_atoms,work_load,first=1,buffer_size=buffer_size)
guess_output_lst = IO.reader.read_LAMMPS_traj_in_parallel(guess_force_data_address,num_cores,total_atoms,work_load,first=1,buffer_size=buffer_size)
best_output_lst = IO.reader.read_LAMMPS_traj_in_parallel(best_force_data_address,num_cores,total_atoms,work_load,first=1,buffer_size=buffer_size)
x_force = np.arange(-100,100)
y_force = x_force
# Loop over each chunk of data and plot it
for ref_output,guess_output,best_output in zip(ref_output_lst, guess_output_lst, best_output_lst):
ref_data = ref_output.get()
guess_data = guess_output.get()
best_data = best_output.get()
ax1.scatter(ref_data, best_data,color="r")
ax1.set_xlim([-40,40])
ax1.set_ylim([-40,40])
ax1.set_xlabel("Reference forces: " + "$kcal \cdot (mol \cdot \AA)^{-1}$")
#ax1.set_ylabel("Guess forces: "+ "$kcal \cdot (mol \cdot \AA)^{-1}$")
ax1.set_ylabel("Best forces: "+ "$kcal \cdot (mol \cdot \AA)^{-1}$")
plt.plot(x_force,y_force,label="forces are equal",color="k")
#ax1.plot(ref_gr_data[:,0],ref_gr_data[:,1],color="k",label="Ref")
#ax1.plot(guess_gr_data[:,0],guess_gr_data[:,1],color="r",label="Guess")
#plt.plot(best_gr_data[:,0],best_gr_data[:,1],color="r",label="Best predicted")
#ax1.scatter(T,predicted_data_T,color="r",label="Best Predicted")
#plt.ylim([0.99,1.48])
#ax1.set_ylim([0.995,1.01])
#ax1.set_ylim([0.8,1.05])
# Plot P(q) vs q:
#ax1.set_title("production")
#ax1.scatter(tersoff[:,0],tersoff [:,1],s=6,label=plot_ID[0],color=col[0])
#ax1.scatter(tersoff_table[:,0],tersoff_table[:,1],s=6,label=plot_ID[1],color=col[5])
minorLocator = MultipleLocator(0.5)
majorLocator=MultipleLocator(5)
ax = plt.subplot(111)
handles, labels = ax.get_legend_handles_labels()
plt.legend(handles[::-1],labels[::-1],loc="upper center",fontsize=5,frameon=False, labelspacing=0.07,ncol=2)
#plt.legend(loc="upper right")
#plt.legend(loc=(0.1,0.385),fontsize=7,frameon=False, labelspacing=0.15,ncol=1)
left, bottom, width, height = [0.48, 0.62, 0.48, 0.3]
plt.subplots_adjust(left=0.2, bottom=0.22, right=0.95, top=0.90, wspace=0.0, hspace=0.0)
#plt.savefig('fig1a.pdf',transparent=True)
plt.savefig('force_mW_300K_best_ref.png',transparent=False)
#plt.savefig('fig1a.eps',transparent=True)
plt.show()
|
py
|
1a5e0c6f78ecfb72de58c2d9a53a86eca57f73fd
|
import pytest
from pymyenergi.client import MyenergiClient
from pymyenergi.eddi import Eddi
from pymyenergi.harvi import Harvi
from pymyenergi.zappi import Zappi
# All test coroutines will be treated as marked.
pytestmark = pytest.mark.asyncio
conn = {}
async def test_init(bypass_client_fetch_data):
client = MyenergiClient(conn)
await client.refresh()
assert len(client.devices) == 0
async def test_init_error(error_on_client_fetch_data):
client = MyenergiClient(conn)
with pytest.raises(Exception):
assert await client.refresh()
async def test_get_all_devices(client_fetch_data_fixture):
client = MyenergiClient(conn)
devices = await client.get_devices()
assert len(devices) == 5
async def test_get_eddi_devices(client_fetch_data_fixture):
client = MyenergiClient(conn)
devices = await client.get_devices("eddi")
assert len(devices) == 1
assert isinstance(devices[0], Eddi)
async def test_get_zappi_devices(client_fetch_data_fixture):
client = MyenergiClient(conn)
devices = await client.get_devices("zappi")
assert len(devices) == 2
assert isinstance(devices[1], Zappi)
async def test_get_harvi_devices(client_fetch_data_fixture):
client = MyenergiClient(conn)
devices = await client.get_devices("harvi")
assert len(devices) == 2
assert isinstance(devices[1], Harvi)
async def test_1p_harvi_eddi_solar_battery(client_1p_zappi_harvi_solar_battery_fixture):
client = MyenergiClient(conn)
devices = await client.get_devices("harvi")
assert len(devices) == 1
assert isinstance(devices[0], Harvi)
devices = await client.get_devices("zappi")
assert len(devices) == 1
assert isinstance(devices[0], Zappi)
assert client.power_grid == 10000
assert client.power_generation == 5000
assert client.power_battery == 3000
assert client.power_charging == 2000
assert client.consumption_home == 16000
|
py
|
1a5e0d1935a9776750b0a60cf223c1e785b38fef
|
txtFile=open("/Users/chenchaoyang/Desktop/python/Python/content/content3.txt","w")
txtFile.write("春晓\n")
txtFile.write("唐 孟浩然\n")
txtFile.write("春眠不觉晓,\n")
txtFile.write("处处闻啼鸟。\n")
txtFile.write("夜来风雨声,\n")
txtFile.write("花落知多少。\n")
txtFile.close()
|
py
|
1a5e0d82ebb4fcea00071e79dfef4753fa6602a8
|
"""Color edit tool."""
import sublime
import sublime_plugin
from .lib.coloraide import Color
import mdpopups
from . import ch_util as util
from .ch_mixin import _ColorMixin
import copy
from . import ch_tools as tools
DEF_EDIT = """---
markdown_extensions:
- markdown.extensions.attr_list
- markdown.extensions.def_list
- pymdownx.betterem
...
{}
## Format
<code>Source( + Backdrop)?( !blendmode)?( @colorspace)?</code>
## Instructions
Colors can be specified in any supported color space, but blend modes work best on<br>
RGB-ish colors spaces. They can be converted and output to another color space with<br>
<code>@colorspace</code>.
If two colors are provided, joined with <code>+</code>, the colors will be blended.<br>
Default blend mode is <code>normal</code>, but can be changed with<br>
<code>!blendmode</code>.
Transparent backdrops will be <code>normal</code> blended with white.
"""
def parse_color(string, start=0, second=False):
"""
Parse colors.
The return of `more`:
- `None`: there is no more colors to process
- `True`: there are more colors to process
- `False`: there are more colors to process, but we failed to find them.
"""
length = len(string)
more = None
space = None
blend_mode = 'normal'
# First color
color = Color.match(string, start=start, fullmatch=False)
if color:
start = color.end
if color.end != length:
more = True
# Is the first color in the input or the second?
if not second:
# Plus sign indicating we have an additional color to mix
m = tools.RE_PLUS.match(string, start)
if m:
start = m.end(0)
more = start != length
else:
m = tools.RE_MODE.match(string, start)
if m:
blend_mode = m.group(1)
start = m.end(0)
m = tools.RE_SPACE.match(string, start)
if m:
text = m.group(1).lower()
if text in color.color.CS_MAP:
space = text
start = m.end(0)
more = None if start == length else False
else:
m = tools.RE_MODE.match(string, start)
if m:
blend_mode = m.group(1)
start = m.end(0)
# Color space indicator
m = tools.RE_SPACE.match(string, start)
if m:
text = m.group(1).lower()
if text in color.color.CS_MAP:
space = text
start = m.end(0)
more = None if start == length else False
if color:
color.end = start
return color, more, space, blend_mode
def evaluate(string):
"""Evaluate color."""
colors = []
try:
color = string.strip()
second = None
blend_mode = 'normal'
space = None
# Try to capture the color or the two colors to mix
first, more, space, blend_mode = parse_color(color)
if first and more is not None:
if more is False:
first = None
else:
second, more, space, blend_mode = parse_color(color, start=first.end, second=True)
if not second or more is False:
first = None
second = None
# Package up the color, or the two reference colors along with the mixed.
if first:
colors.append(first.color)
if second is None and space is not None and space != first.color.space():
colors[0] = first.color.convert(space)
if second:
colors.append(second.color)
colors.append(first.color.compose(second.color, blend=blend_mode, space=space, out_space=space))
except Exception:
colors = []
return colors
class ColorHelperBlendModeInputHandler(tools._ColorInputHandler):
"""Handle color inputs."""
def __init__(self, view, initial=None, **kwargs):
"""Initialize."""
self.color = initial
super().__init__(view, **kwargs)
def placeholder(self):
"""Placeholder."""
return "Color"
def initial_text(self):
"""Initial text."""
if self.color is not None:
return self.color
elif len(self.view.sel()) == 1:
self.setup_color_class()
text = self.view.substr(self.view.sel()[0])
if text:
color = None
try:
color = self.custom_color_class(text, filters=self.filters)
except Exception:
pass
if color is not None:
color = Color(color)
return color.to_string(**util.DEFAULT)
return ''
def preview(self, text):
"""Preview."""
style = self.get_html_style()
try:
colors = evaluate(text)
html = ""
for color in colors:
orig = Color(color)
message = ""
color_string = ""
check_space = 'srgb' if orig.space() not in util.SRGB_SPACES else orig.space()
if not orig.in_gamut(check_space):
orig = orig.fit("srgb")
message = '<br><em style="font-size: 0.9em;">* preview out of gamut</em>'
color_string = "<strong>Gamut Mapped</strong>: {}<br>".format(orig.to_string())
srgb = orig.convert('srgb', fit=True)
color_string += "<strong>Color</strong>: {}".format(color.to_string(**util.DEFAULT))
preview = srgb.to_string(**util.HEX_NA)
preview_alpha = srgb.to_string(**util.HEX)
preview_border = self.default_border
temp = Color(preview_border)
if temp.luminance() < 0.5:
second_border = temp.mix('white', 0.25, space="srgb").to_string(**util.HEX_NA)
else:
second_border = temp.mix('black', 0.25, space="srgb").to_string(**util.HEX_NA)
height = self.height * 3
width = self.width * 3
check_size = self.check_size(height, scale=8)
html += tools.PREVIEW_IMG.format(
mdpopups.color_box(
[preview, preview_alpha],
preview_border, second_border,
border_size=2, height=height, width=width, check_size=check_size
),
message,
color_string
)
if html:
return sublime.Html('<html><body>{}</body></html>'.format(style + html))
else:
return sublime.Html(
'<html><body>{}</body></html>'.format(mdpopups.md2html(self.view, DEF_EDIT.format(style)))
)
except Exception:
return sublime.Html(mdpopups.md2html(self.view, DEF_EDIT.format(style)))
def validate(self, color):
"""Validate."""
try:
color = evaluate(color)
return len(color) > 0
except Exception:
return False
class ColorHelperBlendModeCommand(_ColorMixin, sublime_plugin.TextCommand):
"""Open edit a color directly."""
def run(
self, edit, color_helper_blend_mode, initial=None, on_done=None, **kwargs
):
"""Run command."""
colors = evaluate(color_helper_blend_mode)
color = None
if colors:
color = colors[-1]
if color is not None:
if on_done is None:
on_done = {
'command': 'color_helper',
'args': {'mode': "result", "result_type": "__tool__:__blend__"}
}
call = on_done.get('command')
if call is None:
return
args = copy.deepcopy(on_done.get('args', {}))
args['color'] = color.to_string(**util.COLOR_FULL_PREC)
self.view.run_command(call, args)
def input(self, kwargs): # noqa: A003
"""Input."""
return ColorHelperBlendModeInputHandler(self.view, **kwargs)
|
py
|
1a5e0de647ef014672a59f0788a74144694185e3
|
import argparse
import os
import math
# import time
import numpy as np
import cv2
import matplotlib.pyplot as plt
import tensorflow as tf
import progressbar
from waymo_open_dataset.utils import range_image_utils
from waymo_open_dataset.utils import transform_utils
from waymo_open_dataset.utils import test_utils
from waymo_open_dataset.utils import box_utils
from waymo_open_dataset import dataset_pb2 as open_dataset
from adapter_lib import *
import pdb
############################Config###########################################
# path to waymo dataset "folder" (all .tfrecord files in that folder will
# be converted)
DATA_PATH = '/media/trail/harddrive/datasets/Waymo/original/validation'
# path to save kitti dataset
KITTI_PATH = '/media/trail/harddrive/datasets/Waymo/waymo/validation'
# location filter, use this to convert your preferred location
LOCATION_FILTER = False
LOCATION_NAME = ['location_sf']
# max indexing length
INDEX_LENGTH = 15
# as name
IMAGE_FORMAT = 'png'
# do not change
LABEL_PATH = KITTI_PATH + '/label_0'
LABEL_ALL_PATH = KITTI_PATH + '/label_all'
IMAGE_PATH = KITTI_PATH + '/image_0'
CALIB_PATH = KITTI_PATH + '/calib'
LIDAR_PATH = KITTI_PATH + '/velodyne'
IMG_CALIB_PATH = KITTI_PATH + '/img_calib'
###############################################################################
class Adapter:
def __init__(self):
self.__lidar_list = ['_FRONT', '_FRONT_RIGHT',
'_FRONT_LEFT', '_SIDE_RIGHT', '_SIDE_LEFT']
self.__type_list = ['UNKNOWN', 'VEHICLE',
'PEDESTRIAN', 'SIGN', 'CYCLIST']
self.__file_names = []
self.T_front_cam_to_ref = []
self.T_vehicle_to_front_cam = []
def cvt(self, args, folder, start_ind):
""" convert dataset from Waymo to KITTI
Args:
return:
"""
self.start_ind = start_ind
self.get_file_names(DATA_PATH + '/' + folder)
print("Converting ..." + folder)
self.create_folder(args.camera_type)
bar = progressbar.ProgressBar(maxval=len(self.__file_names) + 1,
widgets=[progressbar.Percentage(), ' ',
progressbar.Bar(
marker='>', left='[', right=']'), ' ',
progressbar.ETA()])
tf.enable_eager_execution()
file_num = 1
frame_num = 0
frame_name = self.start_ind
label_exists = False
print("start converting ...")
bar.start()
for file_idx, file_name in enumerate(self.__file_names):
print('File {}/{}'.format(file_idx, len(self.__file_names)))
dataset = tf.data.TFRecordDataset(file_name, compression_type='')
for data in dataset:
frame = open_dataset.Frame()
frame.ParseFromString(bytearray(data.numpy()))
if (frame_num % args.keyframe) == 0:
if LOCATION_FILTER == True and frame.context.stats.location not in LOCATION_NAME:
continue
if args.test == False:
label_exists = self.save_label(frame, frame_name, args.camera_type, False, True)
if args.test == label_exists:
frame_num += 1
continue
self.save_calib(frame, frame_name)
self.save_label(
frame, frame_name, args.camera_type)
self.save_image(frame, frame_name, args.camera_type)
self.save_lidar(frame, frame_name)
self.save_image_calib(frame, frame_name)
# print("image:{}\ncalib:{}\nlidar:{}\nlabel:{}\n".format(str(s1-e1),str(s2-e2),str(s3-e3),str(s4-e4)))
frame_name += 1
frame_num += 1
bar.update(file_num)
file_num += 1
bar.finish()
print("\nfinished ...")
return frame_name
def save_image(self, frame, frame_num, cam_type):
""" parse and save the images in png format
:param frame: open dataset frame proto
:param frame_num: the current frame number
:return:
"""
for img in frame.images:
if cam_type == 'all' or cam_type == str(img.name - 1):
img_path = IMAGE_PATH + '/' + \
str(frame_num).zfill(INDEX_LENGTH) + '.' + IMAGE_FORMAT
img = cv2.imdecode(np.frombuffer(
img.image, np.uint8), cv2.IMREAD_COLOR)
rgb_img = cv2.cvtColor(img, cv2.COLOR_RGB2BGR)
plt.imsave(img_path, rgb_img, format=IMAGE_FORMAT)
def save_calib(self, frame, frame_num, kitti_format=True):
""" parse and save the calibration data
:param frame: open dataset frame proto
:param frame_num: the current frame number
:return:
"""
fp_calib = open(CALIB_PATH + '/' +
str(frame_num).zfill(INDEX_LENGTH) + '.txt', 'w+')
self.T_front_cam_to_ref = np.array([
[0.0, -1.0, 0.0],
[0.0, 0.0, -1.0],
[1.0, 0.0, 0.0]
])
camera_calib = []
R0_rect = ["%e" % i for i in np.eye(3).flatten()]
Tr_velo_to_cam = []
calib_context = ''
for camera in frame.context.camera_calibrations:
tmp = np.array(camera.extrinsic.transform).reshape(4, 4)
tmp = self.cart_to_homo(self.T_front_cam_to_ref) @ np.linalg.inv(tmp)
Tr_velo_to_cam.append(["%e" % i for i in tmp[:3,:].reshape(12)])
for cam in frame.context.camera_calibrations:
tmp = np.zeros((3, 4))
tmp[0, 0] = cam.intrinsic[0]
tmp[1, 1] = cam.intrinsic[1]
tmp[0, 2] = cam.intrinsic[2]
tmp[1, 2] = cam.intrinsic[3]
tmp[2, 2] = 1
tmp = list(tmp.reshape(12))
tmp = ["%e" % i for i in tmp]
camera_calib.append(tmp)
T_front_cam_to_vehicle = np.array(frame.context.camera_calibrations[0].extrinsic.transform).reshape(4, 4)
self.T_vehicle_to_front_cam = np.linalg.inv(T_front_cam_to_vehicle)
for i in range(5):
calib_context += "P" + str(i) + ": " + \
" ".join(camera_calib[i]) + '\n'
calib_context += "R0_rect" + ": " + " ".join(R0_rect) + '\n'
for i in range(5):
calib_context += "Tr_velo_to_cam_" + \
str(i) + ": " + " ".join(Tr_velo_to_cam[i]) + '\n'
calib_context += "timestamp_micros: " + \
str(frame.timestamp_micros) + '\n'
calib_context += "context_name: " + str(frame.context.name) + '\n'
fp_calib.write(calib_context)
fp_calib.close()
def save_lidar(self, frame, frame_num):
""" parse and save the lidar data in psd format
:param frame: open dataset frame proto
:param frame_num: the current frame number
:return:
"""
range_images, range_image_top_pose = self.parse_range_image_and_camera_projection(
frame)
points, intensity = self.convert_range_image_to_point_cloud(
frame,
range_images,
range_image_top_pose)
points_all = np.concatenate(points, axis=0)
intensity_all = np.concatenate(intensity, axis=0)
point_cloud = np.column_stack((points_all, intensity_all))
pc_path = LIDAR_PATH + '/' + \
str(frame_num).zfill(INDEX_LENGTH) + '.bin'
point_cloud.tofile(pc_path)
def save_label(self, frame, frame_num, cam_type, kitti_format=False, check_label_exists = False):
""" parse and save the label data in .txt format
:param frame: open dataset frame proto
:param frame_num: the current frame number
:return:
"""
# get point cloud in the frame
range_images, range_image_top_pose = self.parse_range_image_and_camera_projection(
frame)
points, intensity = self.convert_range_image_to_point_cloud(
frame,
range_images,
range_image_top_pose)
points_all = tf.convert_to_tensor(
np.concatenate(points, axis=0), dtype=np.float32)
# preprocess bounding box data
id_to_bbox = dict()
id_to_name = dict()
for labels in frame.projected_lidar_labels:
name = labels.name
for label in labels.labels:
bbox = [label.box.center_x - label.box.length / 2, label.box.center_y - label.box.width / 2,
label.box.center_x + label.box.length / 2, label.box.center_y + label.box.width / 2]
id_to_bbox[label.id] = bbox
id_to_name[label.id] = name - 1
Tr_velo_to_cam = []
recorded_label = []
label_lines = ''
label_all_lines = ''
"""
if kitti_format:
for camera in frame.context.camera_calibrations:
tmp = np.array(camera.extrinsic.transform).reshape(4, 4)
tmp = np.linalg.inv(tmp)
axes_transformation = np.array([[0, -1, 0, 0],
[0, 0, -1, 0],
[1, 0, 0, 0],
[0, 0, 0, 1]])
tmp = np.matmul(axes_transformation, tmp)
Tr_velo_to_cam.append(tmp)
"""
for obj in frame.laser_labels:
# caculate bounding box
bounding_box = None
name = None
id = obj.id
for lidar in self.__lidar_list:
if id + lidar in id_to_bbox:
bounding_box = id_to_bbox.get(id + lidar)
name = str(id_to_name.get(id + lidar))
break
if bounding_box == None or name == None:
continue
box = tf.convert_to_tensor(
[obj.box.center_x, obj.box.center_y, obj.box.center_z, obj.box.length, obj.box.width, obj.box.height, obj.box.heading], dtype=np.float32)
box = tf.reshape(box, (1, 7))
num_points = box_utils.compute_num_points_in_box_3d(
points_all, box)
num_points = num_points.numpy()[0]
detection_difficulty = obj.detection_difficulty_level
my_type = self.__type_list[obj.type]
truncated = 0
occluded = 0
height = obj.box.height
width = obj.box.width
length = obj.box.length
x = obj.box.center_x
y = obj.box.center_y
z = obj.box.center_z - height/2
if check_label_exists == False:
pt_ref = self.cart_to_homo(self.T_front_cam_to_ref) @ self.T_vehicle_to_front_cam @ np.array([x,y,z,1]).reshape((4,1))
x, y, z, _ = pt_ref.flatten().tolist()
rotation_y = -obj.box.heading - np.pi/2
beta = math.atan2(x, z)
alpha = (rotation_y + beta - math.pi / 2) % (2 * math.pi)
# save the labels
line = my_type + ' {} {} {} {} {} {} {} {} {} {} {} {} {} {} {} {}\n'.format(round(truncated, 2),
occluded,
round(
alpha, 2),
round(
bounding_box[0], 2),
round(
bounding_box[1], 2),
round(
bounding_box[2], 2),
round(
bounding_box[3], 2),
round(
height, 2),
round(
width, 2),
round(
length, 2),
round(
x, 2),
round(
y, 2),
round(
z, 2),
round(
rotation_y, 2),
num_points,
detection_difficulty)
line_all = line[:-1] + ' ' + name + '\n'
# store the label
label_all_lines += line_all
if (name == cam_type):
label_lines += line
recorded_label.append(line)
if len(recorded_label) == 0:
return False
else:
fp_label_all = open(LABEL_ALL_PATH + '/' +
str(frame_num).zfill(INDEX_LENGTH) + '.txt', 'w+')
fp_label = open(LABEL_PATH + '/' +
str(frame_num).zfill(INDEX_LENGTH) + '.txt', 'w+')
fp_label.write(label_lines)
fp_label.close()
fp_label_all.write(label_all_lines)
fp_label_all.close()
return True
def save_image_calib(self, frame, frame_num):
fp_image_calib = open(IMG_CALIB_PATH + '/' +
str(frame_num).zfill(INDEX_LENGTH) + '.txt', 'w+')
camera_calib = []
pose = []
velocity = []
timestamp = []
shutter = []
trigger_time = []
readout_done_time = []
calib_context = ''
for camera in frame.images:
tmp = np.array(camera.pose.transform).reshape((16,))
pose.append(["%e" % i for i in tmp])
tmp = np.zeros(6)
tmp[0] = camera.velocity.v_x
tmp[1] = camera.velocity.v_y
tmp[2] = camera.velocity.v_z
tmp[3] = camera.velocity.w_x
tmp[4] = camera.velocity.w_y
tmp[5] = camera.velocity.w_z
velocity.append(["%e" % i for i in tmp])
timestamp.append(camera.pose_timestamp)
shutter.append(camera.shutter)
trigger_time.append(camera.camera_trigger_time)
readout_done_time.append(camera.camera_readout_done_time)
for i in range(5):
calib_context += "Pose_" + str(i) + ": " + \
" ".join(pose[i]) + '\n'
for i in range(5):
calib_context += "Velocity_" + str(i) + ": " + \
" ".join(velocity[i]) + '\n'
for i in range(5):
calib_context += "Timestamp_" + str(i) + ": " + \
" ".join(velocity[i]) + '\n'
for i in range(5):
calib_context += "Shutter_" + str(i) + ": " + \
" ".join(velocity[i]) + '\n'
for i in range(5):
calib_context += "Trigger_" + str(i) + ": " + \
" ".join(velocity[i]) + '\n'
for i in range(5):
calib_context += "Readout_" + str(i) + ": " + \
" ".join(velocity[i]) + '\n'
fp_image_calib.write(calib_context)
fp_image_calib.close()
def get_file_names(self, folder):
for i in os.listdir(folder):
if i.split('.')[-1] == 'tfrecord':
self.__file_names.append(folder + '/' + i)
def cart_to_homo(self, mat):
ret = np.eye(4)
if mat.shape == (3, 3):
ret[:3, :3] = mat
elif mat.shape == (3, 4):
ret[:3, :] = mat
else:
raise ValueError(mat.shape)
return ret
def create_folder(self, cam_type):
if not os.path.exists(KITTI_PATH):
os.mkdir(KITTI_PATH)
if not os.path.exists(CALIB_PATH):
os.mkdir(CALIB_PATH)
if not os.path.exists(LIDAR_PATH):
os.mkdir(LIDAR_PATH)
if not os.path.exists(LABEL_ALL_PATH):
os.mkdir(LABEL_ALL_PATH)
if not os.path.exists(IMG_CALIB_PATH):
os.mkdir(IMG_CALIB_PATH)
if not os.path.exists(IMAGE_PATH):
os.mkdir(IMAGE_PATH)
if not os.path.exists(LABEL_PATH):
os.mkdir(LABEL_PATH)
def extract_intensity(self, frame, range_images, lidar_num):
""" extract the intensity from the original range image
:param frame: open dataset frame proto
:param frame_num: the current frame number
:param lidar_num: the number of current lidar
:return:
"""
intensity_0 = np.array(range_images[lidar_num][0].data).reshape(-1, 4)
intensity_0 = intensity_0[:, 1]
intensity_1 = np.array(range_images[lidar_num][
1].data).reshape(-1, 4)[:, 1]
return intensity_0, intensity_1
def image_show(self, data, name, layout, cmap=None):
"""Show an image."""
plt.subplot(*layout)
plt.imshow(tf.image.decode_jpeg(data), cmap=cmap)
plt.title(name)
plt.grid(False)
plt.axis('off')
def parse_range_image_and_camera_projection(self, frame):
"""Parse range images and camera projections given a frame.
Args:
frame: open dataset frame proto
Returns:
range_images: A dict of {laser_name,
[range_image_first_return, range_image_second_return]}.
camera_projections: A dict of {laser_name,
[camera_projection_from_first_return,
camera_projection_from_second_return]}.
range_image_top_pose: range image pixel pose for top lidar.
"""
self.__range_images = {}
# camera_projections = {}
# range_image_top_pose = None
for laser in frame.lasers:
if len(laser.ri_return1.range_image_compressed) > 0:
range_image_str_tensor = tf.decode_compressed(
laser.ri_return1.range_image_compressed, 'ZLIB')
ri = open_dataset.MatrixFloat()
ri.ParseFromString(bytearray(range_image_str_tensor.numpy()))
self.__range_images[laser.name] = [ri]
if laser.name == open_dataset.LaserName.TOP:
range_image_top_pose_str_tensor = tf.decode_compressed(
laser.ri_return1.range_image_pose_compressed, 'ZLIB')
range_image_top_pose = open_dataset.MatrixFloat()
range_image_top_pose.ParseFromString(
bytearray(range_image_top_pose_str_tensor.numpy()))
# camera_projection_str_tensor = tf.decode_compressed(
# laser.ri_return1.camera_projection_compressed, 'ZLIB')
# cp = open_dataset.MatrixInt32()
# cp.ParseFromString(bytearray(camera_projection_str_tensor.numpy()))
# camera_projections[laser.name] = [cp]
if len(laser.ri_return2.range_image_compressed) > 0:
range_image_str_tensor = tf.decode_compressed(
laser.ri_return2.range_image_compressed, 'ZLIB')
ri = open_dataset.MatrixFloat()
ri.ParseFromString(bytearray(range_image_str_tensor.numpy()))
self.__range_images[laser.name].append(ri)
#
# camera_projection_str_tensor = tf.decode_compressed(
# laser.ri_return2.camera_projection_compressed, 'ZLIB')
# cp = open_dataset.MatrixInt32()
# cp.ParseFromString(bytearray(camera_projection_str_tensor.numpy()))
# camera_projections[laser.name].append(cp)
return self.__range_images, range_image_top_pose
def plot_range_image_helper(self, data, name, layout, vmin=0, vmax=1, cmap='gray'):
"""Plots range image.
Args:
data: range image data
name: the image title
layout: plt layout
vmin: minimum value of the passed data
vmax: maximum value of the passed data
cmap: color map
"""
plt.subplot(*layout)
plt.imshow(data, cmap=cmap, vmin=vmin, vmax=vmax)
plt.title(name)
plt.grid(False)
plt.axis('off')
def get_range_image(self, laser_name, return_index):
"""Returns range image given a laser name and its return index."""
return self.__range_images[laser_name][return_index]
def show_range_image(self, range_image, layout_index_start=1):
"""Shows range image.
Args:
range_image: the range image data from a given lidar of type MatrixFloat.
layout_index_start: layout offset
"""
range_image_tensor = tf.convert_to_tensor(range_image.data)
range_image_tensor = tf.reshape(
range_image_tensor, range_image.shape.dims)
lidar_image_mask = tf.greater_equal(range_image_tensor, 0)
range_image_tensor = tf.where(lidar_image_mask, range_image_tensor,
tf.ones_like(range_image_tensor) * 1e10)
range_image_range = range_image_tensor[..., 0]
range_image_intensity = range_image_tensor[..., 1]
range_image_elongation = range_image_tensor[..., 2]
self.plot_range_image_helper(range_image_range.numpy(), 'range',
[8, 1, layout_index_start], vmax=75, cmap='gray')
self.plot_range_image_helper(range_image_intensity.numpy(), 'intensity',
[8, 1, layout_index_start + 1], vmax=1.5, cmap='gray')
self.plot_range_image_helper(range_image_elongation.numpy(), 'elongation',
[8, 1, layout_index_start + 2], vmax=1.5, cmap='gray')
def convert_range_image_to_point_cloud(self, frame, range_images, range_image_top_pose, ri_index=0):
"""Convert range images to point cloud.
Args:
frame: open dataset frame
range_images: A dict of {laser_name,
[range_image_first_return, range_image_second_return]}.
camera_projections: A dict of {laser_name,
[camera_projection_from_first_return,
camera_projection_from_second_return]}.
range_image_top_pose: range image pixel pose for top lidar.
ri_index: 0 for the first return, 1 for the second return.
Returns:
points: {[N, 3]} list of 3d lidar points of length 5 (number of lidars).
cp_points: {[N, 6]} list of camera projections of length 5
(number of lidars).
intensity: {[N, 1]} list of intensity of length 5 (number of lidars).
"""
calibrations = sorted(
frame.context.laser_calibrations, key=lambda c: c.name)
# lasers = sorted(frame.lasers, key=lambda laser: laser.name)
points = []
# cp_points = []
intensity = []
frame_pose = tf.convert_to_tensor(
np.reshape(np.array(frame.pose.transform), [4, 4]))
# [H, W, 6]
range_image_top_pose_tensor = tf.reshape(
tf.convert_to_tensor(range_image_top_pose.data),
range_image_top_pose.shape.dims)
# [H, W, 3, 3]
range_image_top_pose_tensor_rotation = transform_utils.get_rotation_matrix(
range_image_top_pose_tensor[...,
0], range_image_top_pose_tensor[..., 1],
range_image_top_pose_tensor[..., 2])
range_image_top_pose_tensor_translation = range_image_top_pose_tensor[
..., 3:]
range_image_top_pose_tensor = transform_utils.get_transform(
range_image_top_pose_tensor_rotation,
range_image_top_pose_tensor_translation)
for c in calibrations:
range_image = range_images[c.name][ri_index]
if len(c.beam_inclinations) == 0:
beam_inclinations = range_image_utils.compute_inclination(
tf.constant([c.beam_inclination_min,
c.beam_inclination_max]),
height=range_image.shape.dims[0])
else:
beam_inclinations = tf.constant(c.beam_inclinations)
beam_inclinations = tf.reverse(beam_inclinations, axis=[-1])
extrinsic = np.reshape(np.array(c.extrinsic.transform), [4, 4])
range_image_tensor = tf.reshape(
tf.convert_to_tensor(range_image.data), range_image.shape.dims)
pixel_pose_local = None
frame_pose_local = None
if c.name == open_dataset.LaserName.TOP:
pixel_pose_local = range_image_top_pose_tensor
pixel_pose_local = tf.expand_dims(pixel_pose_local, axis=0)
frame_pose_local = tf.expand_dims(frame_pose, axis=0)
range_image_mask = range_image_tensor[..., 0] > 0
range_image_cartesian = range_image_utils.extract_point_cloud_from_range_image(
tf.expand_dims(range_image_tensor[..., 0], axis=0),
tf.expand_dims(extrinsic, axis=0),
tf.expand_dims(tf.convert_to_tensor(
beam_inclinations), axis=0),
pixel_pose=pixel_pose_local,
frame_pose=frame_pose_local)
range_image_cartesian = tf.squeeze(range_image_cartesian, axis=0)
points_tensor = tf.gather_nd(range_image_cartesian,
tf.where(range_image_mask))
intensity_tensor = tf.gather_nd(range_image_tensor,
tf.where(range_image_mask))
# cp = camera_projections[c.name][0]
# cp_tensor = tf.reshape(tf.convert_to_tensor(cp.data), cp.shape.dims)
# cp_points_tensor = tf.gather_nd(cp_tensor, tf.where(range_image_mask))
points.append(points_tensor.numpy())
# cp_points.append(cp_points_tensor.numpy())
intensity.append(intensity_tensor.numpy()[:, 1])
return points, intensity
def rgba(self, r):
"""Generates a color based on range.
Args:
r: the range value of a given point.
Returns:
The color for a given range
"""
c = plt.get_cmap('jet')((r % 20.0) / 20.0)
c = list(c)
c[-1] = 0.5 # alpha
return c
def plot_image(self, camera_image):
"""Plot a cmaera image."""
plt.figure(figsize=(20, 12))
plt.imshow(tf.image.decode_jpeg(camera_image.image))
plt.grid("off")
def plot_points_on_image(self, projected_points, camera_image, rgba_func, point_size=5.0):
"""Plots points on a camera image.
Args:
projected_points: [N, 3] numpy array. The inner dims are
[camera_x, camera_y, range].
camera_image: jpeg encoded camera image.
rgba_func: a function that generates a color from a range value.
point_size: the point size.
"""
self.plot_image(camera_image)
xs = []
ys = []
colors = []
for point in projected_points:
xs.append(point[0]) # width, col
ys.append(point[1]) # height, row
colors.append(rgba_func(point[2]))
plt.scatter(xs, ys, c=colors, s=point_size, edgecolors="none")
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description='Save Waymo dataset into Kitti format')
parser.add_argument('--keyframe',
type=int,
default=10,
help='Saves every specified # of scenes. Default is 1 and the program saves every scene')
parser.add_argument('--camera_type',
type=str,
default="0",
help='Select camera views to save. Input argument from 0 to 4 or all')
parser.add_argument('--start_ind',
type=int,
default=0,
help='File number starts counting from this index')
parser.add_argument('--test',
type=bool,
default=False,
help='if true, does not save any ground truth data')
args = parser.parse_args()
start_ind = args.start_ind
path, dirs, files = next(os.walk(DATA_PATH))
dirs.sort()
for directory in dirs:
adapter = Adapter()
last_ind = adapter.cvt(args, directory, start_ind)
start_ind = last_ind
|
py
|
1a5e0e5df59035c3fd0a9db692d15231a3f4b8c4
|
# Copyright (C) 2014 Universidad Politecnica de Madrid
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import horizon
from openstack_dashboard.dashboards.idm import dashboard
class myApplications(horizon.Panel):
name = ("My Applications")
slug = "myApplications"
dashboard.Idm.register(myApplications)
|
py
|
1a5e0f6f67818a56755389d92f57b1ed85935915
|
# Global program step time in seconds
STEP_TIME = 0.004
# Left Motor pins
PINS_MOTOR_LEFT = [2, 3, 4, 17]
# Right Motor pins
PINS_MOTOR_RIGHT = [27, 22, 10, 9]
# IR Sensor Pins
PINS_SENSOR = [14, 15, 18]
# Button pin
PIN_BUTTON = 26
# Led Pins
PINS_FRONT_LED = [12, 16, 20, 21]
# Left indicatior led
PINS_LEFT_LED = 13
# Right indicator led
PINS_RIGHT_LED = 19
# Degrees the wheel turns per motor step
MOTOR_DEG_PER_STEP = 5.625 / 64
# Url to get new delivers
DELIVERY_URL = "http://rutgeruijtendaal.com/PiRiders/api.php?function=deliveryInfo"
# Url to say a delivery is done
DELIVERED_URL = "http://rutgeruijtendaal.com/PiRiders/api.php?function=deliveryUpdate"
|
py
|
1a5e1012db3006c556ccf8b391aef68a24b3baa7
|
# Everything is better with friends: Executing SAS® code in Python scripts with
# SASPy, and turbocharging your SAS programming with open-source tooling
#
# Half-day class, Western Users of SAS Software (WUSS) 2019
###############################################################################
# Exercises 6-9: SASPy Data Round Trip #
###############################################################################
# Lines 12-13 load modules needed for exercises and should be left as-is
from class_setup import print_with_title
from saspy import SASsession
###############################################################################
# #
# Exercise 6. [Python w/ saspy] Connect to a SAS kernel #
# #
# Instructions: Uncomment the code immediately below, and then execute #
# #
###############################################################################
sas = SASsession()
print_with_title(type(sas), 'The type of SAS session object sas:')
# Notes:
#
# 1. A SASsession object named sas is created, and the following are printed:
# * confirmation a SAS session has been established
# * the type of object sas (which is saspy.sasbase.SASsession)
#
# 2. As with the DataFrame object type above, SASsession is not built into
# Python, so we had to import its definition from the saspy module at the
# beginning of this file.
#
# 3. All subsequent exercises in this file will assume the object sas exists,
# so please don't comment out the line creating it.
###############################################################################
# #
# Exercise 7. [Python w/ pandas & saspy] Load a SAS dataset into a DataFrame #
# #
# Instructions: Uncomment the code immediately below, and then execute #
# #
###############################################################################
# Original Version
fish_df = sas.sasdata2dataframe(table='fish', libref='sashelp')
print_with_title(fish_df, 'The value of fish_df:')
print_with_title(
fish_df.describe(),
'The Python equivalent of PROC MEANS using fish_df:'
)
print_with_title(fish_df.head(), 'The first five rows of fish_df:')
# Pass a numerical parameter to the head method
print(fish_df.head(4))
# Change the head method to tail
print(fish_df.tail())
# View other portions of fish_df
print(fish_df.iloc[0:2, 1:4])
# Notes:
#
# 1. A DataFrame object named fish_df with dimensions 159x7 (159 rows and 7
# columns) is created from the SAS dataset fish in the sashelp library, and
# the following are printed:
# * the type of object fish_df (which is
# <class 'pandas.core.frame.DataFrame'>)
# * the first five rows of fish_df, which are at row indices 0 through 4
# since Python uses zero-based indexing
# * summary information about the 6 numerical columns of fish_df, which is
# obtained by fish_df calling its describe method (the pandas equivalent
# of the SAS MEANS procedure)
#
# 2. The sas object represents a connection to a SAS session and was created
# in a previous exercise. Here, sas calls its sasdata2dataframe method to
# access the SAS library sashelp defined within this SAS session and to load
# the entire contents of SAS dataset sashelp.fish into the DataFrame
# fish_df.
#
# 3. All subsequent exercises in this file will assume the object fish_df
# exists, so please don't comment out the line creating it.
#
# 4. For additional practice, try any or all of the following:
# * Pass a numerical parameter to the head method to see a different number
# of rows (e.g., fish_df.head(4)).
# * Change the head method to tail to see a different part of the dataset.
# * To view other portions of fish_df, explore the more advanced indexing
# methods loc and iloc explained at
# https://brohrer.github.io/dataframe_indexing.html.
###############################################################################
# #
# Exercise 8. [Python w/ pandas] Manipulate a DataFrame #
# #
# Instructions: Uncomment the code immediately below, and then execute #
# #
###############################################################################
# Original Version
fish_df_g = fish_df.groupby('Species')
fish_df_gs = fish_df_g['Weight']
fish_df_gsa = fish_df_gs.agg(['count', 'std', 'mean', 'min', 'max'])
print_with_title(
fish_df_gsa,
'The Python equivalent of PROC MEANS with CLASS and VAR statements:'
)
# Move around and/or remove functions used for aggregation
fish_df_gsa = fish_df_gs.agg(['min', 'mean', 'max'])
print(fish_df_gsa)
# Change the variable whose values are summarized to 'Width'
fish_df_gs = fish_df_g['Width']
fish_df_gsa = fish_df_gs.agg(['count', 'std', 'mean', 'min', 'max'])
print(fish_df_gsa)
# Print out the results of using the one-liner version
print(
fish_df.groupby('Species')['Weight'].agg(
['count', 'std', 'mean', 'min', 'max']
)
)
# Notes:
#
# 1. The DataFrame fish_df, which was created in an exercise above from the SAS
# dataset sashelp.fish, is manipulated, and the following is printed:
# * a table giving the number of rows, standard deviation, mean, min, and
# max of Weight in fish_df when aggregated by Species
#
# 2. This is accomplished by creating a series of new DataFrames:
# * The DataFrame fish_df_g is created from fish_df using the groupby method
# to group rows by values in column 'Species'.
# * The DataFrame fish_df_gs is created from fish_df_g by extracting the
# 'Weight' column using bracket notation.
# * The DataFrame fish_df_gsa is created from fish_df_gs using the agg
# method to aggregate by the functions in the list ['count', 'std',
# 'mean', 'min', 'max'].
#
# 3. Identical results could be obtained using the following SAS code:
# proc means data=sashelp.fish std mean min max;
# class species;
# var Weight;
# run;
# However, while PROC MEANS operates on SAS datasets row-by-row from disk,
# DataFrames are stored entirely in main memory. This allows any number of
# DataFrame operations to be combined for on-the-fly reshaping using "method
# chaining." In other words, fish_df_gsa could have instead been created
# with the following one-liner, which avoids the need for intermediate
# DataFrames (and thus executes much more quickly):
# fish_df_gsa = fish_df.groupby('Species')['Weight'].agg(
# ['count', 'std', 'mean', 'min', 'max']
# )
#
# 3. All subsequent exercises in this file will assume the object fish_df_gsa
# exists, so please don't comment out the line(s) creating it.
#
# 4. For additional practice, try any or all of the following:
# * Move around and/or remove functions used for aggregation, and see how
# the output changes.
# * Change the variable whose values are summarized to 'Width'.
# * Print out the results of using the one-liner version.
###############################################################################
# #
# Exercise 9. [Python w/ pandas & saspy] Load a DataFrame into a SAS dataset #
# #
# Instructions: Uncomment the code immediately below, and then execute #
# #
###############################################################################
# Original Version
sas.dataframe2sasdata(fish_df_gsa, table="fish_sds_gsa", libref="Work")
sas_submit_return_value = sas.submit(
'''
PROC PRINT DATA=fish_sds_gsa;
RUN;
''',
results='TEXT'
)
sas_submit_results = sas_submit_return_value['LST']
print_with_title(
sas_submit_results,
'SAS results from PROC PRINT applies to new SAS dataset Work.fish_sds_gsa:'
)
# Print out the SAS log
print(sas_submit_return_value['LOG'])
# Change the SAS procedure
print(
sas.submit(
'''
PROC CONTENTS DATA=fish_sds_gsa;
RUN;
''',
results='TEXT'
)['LST']
)
# Notes:
#
# 1. The DataFrame fish_df_gsa, which was created in an exercise above from the
# SAS dataset sashelp.fish, is used to create the new SAS dataset
# Work.fish_sds_gsa. The SAS PRINT procedure is then called, and the
# following is printed:
# * the output returned by PROC PRINT
#
# 2. The sas object, which was created in a cell above, is a persistent
# connection to a SAS session, and two of its methods are used as follows:
# * The dataframe2sasdata method writes the contents of the DataFrame
# fish_df_gsa to the SAS dataset fish_sds_gsa stored in the Work library.
# (Note: The row indexes of the DataFrame fish_df_gsa are lost when the
# SAS dataset fish_sds_gsa is created.)
# * The submit method is used to submit the PROC PRINT step to the SAS
# kernel, and a dictionary is returned with the following two key-value
# pairs:
# - sas_submit_return_value['LST'] is a string comprising the results from
# executing PROC PRINT, which will be in plain text because the
# results='TEXT' was used
# - sas_submit_return_value['LOG'] is a string comprising the plain-text
# log resulting from executing PROC PRINT
#
# 3. Python strings surrounded by single quotes (e.g., 'Hello, World!') cannot
# be written across multiple lines of code, whereas strings surrounded by
# triple quotes (e.g., the argument to the submit method) can.
#
# 4. For additional practice, try any or all of the following:
# * Print out the SAS log.
# * Change the SAS procedure used to interact with SAS dataset
# Work.fish_sds_gsa (e.g., try PROC CONTENTS).
|
py
|
1a5e1183f2eb5107b4351c1cb4e0df79a4389c36
|
import asyncio
import docopt
import logging
import pymongo
import pymongo.errors
import sys
# from zonlib.scripts.utils import create_app, get_host_by_db_name
from tycho.app import create_app
from tycho.app import init_app
# from zonlib.async_db.connection import COLLECTIONS
LOGGER = logging.getLogger(__name__)
LOGGER.setLevel(logging.DEBUG)
async def create_indexes(app):
# the check can not be replaced with None check because
# pymongo/motor returns collection object if any property is missing
# hence explicitly checking its type as list
if not isinstance(app['db'].event.indexes, list):
LOGGER.warn("Collection {0} on {1} has no attribute 'indexes'".format(
'event', app.async_db.name))
# not throwing exception here but continue to run script with other
# valid collections
return
for index in app['db'].event.indexes:
LOGGER.debug(
"Creating index {0} for {1} collection with unique constraint as {2} \n".format(
index['keys'], app['db'].event, index['unique'])
)
try:
await app['db'].event.collection.create_index(index['keys'],
unique=index['unique'])
except pymongo.errors.OperationFailure as e:
LOGGER.exception(
"Error occured while creating the index {0} on collection {1}".format(
index, 'event')
)
def main(argv=sys.argv[1:]):
'''
Script to create indexes for collections in given DB
Usage:
create_indexes
'''
loop = asyncio.get_event_loop()
from tycho.main import app
init_app(app, app['config'])
loop.run_until_complete(create_indexes(app))
|
py
|
1a5e12e60050ac02cf1aea6ee14242bad5c2f6c0
|
import numpy as np
import os
os.environ.setdefault('PATH', '')
from collections import deque
import gym
from gym import spaces
import cv2
cv2.ocl.setUseOpenCL(False)
from wrappers import TimeLimit
class NoopResetEnv(gym.Wrapper):
def __init__(self, env, noop_max=30):
"""Sample initial states by taking random number of no-ops on reset.
No-op is assumed to be action 0.
"""
gym.Wrapper.__init__(self, env)
self.noop_max = noop_max
self.override_num_noops = None
self.noop_action = 0
assert env.unwrapped.get_action_meanings()[0] == 'NOOP'
def reset(self, **kwargs):
""" Do no-op action for a number of steps in [1, noop_max]."""
self.env.reset(**kwargs)
if self.override_num_noops is not None:
noops = self.override_num_noops
else:
noops = self.unwrapped.np_random.randint(1, self.noop_max + 1) #pylint: disable=E1101
assert noops > 0
obs = None
for _ in range(noops):
obs, _, done, _ = self.env.step(self.noop_action)
if done:
obs = self.env.reset(**kwargs)
return obs
def step(self, ac):
return self.env.step(ac)
class FireResetEnv(gym.Wrapper):
def __init__(self, env):
"""Take action on reset for environments that are fixed until firing."""
gym.Wrapper.__init__(self, env)
assert env.unwrapped.get_action_meanings()[1] == 'FIRE'
assert len(env.unwrapped.get_action_meanings()) >= 3
def reset(self, **kwargs):
self.env.reset(**kwargs)
obs, _, done, _ = self.env.step(1)
if done:
self.env.reset(**kwargs)
obs, _, done, _ = self.env.step(2)
if done:
self.env.reset(**kwargs)
return obs
def step(self, ac):
return self.env.step(ac)
class EpisodicLifeEnv(gym.Wrapper):
def __init__(self, env):
"""Make end-of-life == end-of-episode, but only reset on true game over.
Done by DeepMind for the DQN and co. since it helps value estimation.
"""
gym.Wrapper.__init__(self, env)
self.lives = 0
self.was_real_done = True
def step(self, action):
obs, reward, done, info = self.env.step(action)
self.was_real_done = done
# check current lives, make loss of life terminal,
# then update lives to handle bonus lives
lives = self.env.unwrapped.ale.lives()
if lives < self.lives and lives > 0:
# for Qbert sometimes we stay in lives == 0 condition for a few frames
# so it's important to keep lives > 0, so that we only reset once
# the environment advertises done.
done = True
self.lives = lives
return obs, reward, done, info
def reset(self, **kwargs):
"""Reset only when lives are exhausted.
This way all states are still reachable even though lives are episodic,
and the learner need not know about any of this behind-the-scenes.
"""
if self.was_real_done:
obs = self.env.reset(**kwargs)
else:
# no-op step to advance from terminal/lost life state
obs, _, _, _ = self.env.step(0)
self.lives = self.env.unwrapped.ale.lives()
return obs
class MaxAndSkipEnv(gym.Wrapper):
def __init__(self, env, skip=4):
"""Return only every `skip`-th frame"""
gym.Wrapper.__init__(self, env)
# most recent raw observations (for max pooling across time steps)
self._obs_buffer = np.zeros((2,)+env.observation_space.shape, dtype=np.uint8)
self._skip = skip
def step(self, action):
"""Repeat action, sum reward, and max over last observations."""
total_reward = 0.0
done = None
for i in range(self._skip):
obs, reward, done, info = self.env.step(action)
if i == self._skip - 2: self._obs_buffer[0] = obs
if i == self._skip - 1: self._obs_buffer[1] = obs
total_reward += reward
if done:
break
# Note that the observation on the done=True frame
# doesn't matter
max_frame = self._obs_buffer.max(axis=0)
return max_frame, total_reward, done, info
def reset(self, **kwargs):
return self.env.reset(**kwargs)
class ClipRewardEnv(gym.RewardWrapper):
def __init__(self, env):
gym.RewardWrapper.__init__(self, env)
def reward(self, reward):
"""Bin reward to {+1, 0, -1} by its sign."""
return np.sign(reward)
class WarpFrame(gym.ObservationWrapper):
def __init__(self, env, width=84, height=84, grayscale=True, dict_space_key=None):
"""
Warp frames to 84x84 as done in the Nature paper and later work.
If the environment uses dictionary observations, `dict_space_key` can be specified which indicates which
observation should be warped.
"""
super().__init__(env)
self._width = width
self._height = height
self._grayscale = grayscale
self._key = dict_space_key
if self._grayscale:
num_colors = 1
else:
num_colors = 3
new_space = gym.spaces.Box(
low=0,
high=255,
shape=(self._height, self._width, num_colors),
dtype=np.uint8,
)
if self._key is None:
original_space = self.observation_space
self.observation_space = new_space
else:
original_space = self.observation_space.spaces[self._key]
self.observation_space.spaces[self._key] = new_space
assert original_space.dtype == np.uint8 and len(original_space.shape) == 3
def observation(self, obs):
if self._key is None:
frame = obs
else:
frame = obs[self._key]
if self._grayscale:
frame = cv2.cvtColor(frame, cv2.COLOR_RGB2GRAY)
frame = cv2.resize(
frame, (self._width, self._height), interpolation=cv2.INTER_AREA
)
if self._grayscale:
frame = np.expand_dims(frame, -1)
if self._key is None:
obs = frame
else:
obs = obs.copy()
obs[self._key] = frame
return obs
class FrameStack(gym.Wrapper):
def __init__(self, env, k):
"""Stack k last frames.
Returns lazy array, which is much more memory efficient.
See Also
--------
baselines.common.atari_wrappers.LazyFrames
"""
gym.Wrapper.__init__(self, env)
self.k = k
self.frames = deque([], maxlen=k)
shp = env.observation_space.shape
self.observation_space = spaces.Box(low=0, high=255, shape=(shp[:-1] + (shp[-1] * k,)), dtype=env.observation_space.dtype)
def reset(self):
ob = self.env.reset()
for _ in range(self.k):
self.frames.append(ob)
return self._get_ob()
def step(self, action):
ob, reward, done, info = self.env.step(action)
self.frames.append(ob)
return self._get_ob(), reward, done, info
def _get_ob(self):
assert len(self.frames) == self.k
return LazyFrames(list(self.frames))
class ScaledFloatFrame(gym.ObservationWrapper):
def __init__(self, env):
gym.ObservationWrapper.__init__(self, env)
self.observation_space = gym.spaces.Box(low=0, high=1, shape=env.observation_space.shape, dtype=np.float32)
def observation(self, observation):
# careful! This undoes the memory optimization, use
# with smaller replay buffers only.
return np.array(observation).astype(np.float32) / 255.0
class LazyFrames(object):
def __init__(self, frames):
"""This object ensures that common frames between the observations are only stored once.
It exists purely to optimize memory usage which can be huge for DQN's 1M frames replay
buffers.
This object should only be converted to numpy array before being passed to the model.
You'd not believe how complex the previous solution was."""
self._frames = frames
self._out = None
def _force(self):
if self._out is None:
self._out = np.concatenate(self._frames, axis=-1)
self._frames = None
return self._out
def __array__(self, dtype=None):
out = self._force()
if dtype is not None:
out = out.astype(dtype)
return out
def __len__(self):
return len(self._force())
def __getitem__(self, i):
return self._force()[i]
def count(self):
frames = self._force()
return frames.shape[frames.ndim - 1]
def frame(self, i):
return self._force()[..., i]
def make_atari(env_id, max_episode_steps=None):
env = gym.make(env_id)
assert 'NoFrameskip' in env.spec.id
env = NoopResetEnv(env, noop_max=30)
env = MaxAndSkipEnv(env, skip=4)
if max_episode_steps is not None:
env = TimeLimit(env, max_episode_steps=max_episode_steps)
return env
def wrap_deepmind(env, episode_life=True, clip_rewards=True, frame_stack=False, scale=False):
"""Configure environment for DeepMind-style Atari.
"""
if episode_life:
env = EpisodicLifeEnv(env)
if 'FIRE' in env.unwrapped.get_action_meanings():
env = FireResetEnv(env)
env = WarpFrame(env)
if scale:
env = ScaledFloatFrame(env)
if clip_rewards:
env = ClipRewardEnv(env)
if frame_stack:
env = FrameStack(env, 4)
return env
|
py
|
1a5e12fea24c268c4b2f3755f30f61b8f05d468b
|
from django.test import TestCase
from django.urls import reverse
from django.contrib.auth import get_user_model
from rest_framework import status
from rest_framework.test import APIClient
from .serializers import UserSerializer
from profiles.models import Profile
USER_LIST_URL = reverse('users:user-list')
REGISTER_URL = reverse('users:register')
LOGIN_URL = reverse('users:login')
LOGOUT_URL = reverse('users:knox-logout')
USER_DETAIL_URL = reverse('users:user-detail')
USER_UPDATE_URL = reverse('users:user-update')
class UserModelTests(TestCase):
@classmethod
def setUpTestData(cls):
cls.admin = get_user_model().objects.create_superuser(
'[email protected]',
'1212qwqw'
)
cls.user = get_user_model().objects.create_user(
'[email protected]',
'1212qwqw'
)
def setUp(self):
self.admin.refresh_from_db()
self.user.refresh_from_db()
def test_str(self):
"""Test display from user model"""
self.assertEqual(str(self.user), '[email protected]')
self.assertEqual(str(self.admin), '[email protected]')
def test_create_admin(self):
"""Test create superuser"""
self.assertTrue(self.admin.is_staff)
self.assertTrue(self.admin.is_superuser)
class PublicUserAPITest(TestCase):
"""Test public api for user"""
def setUp(self):
self.client = APIClient()
self.user = get_user_model().objects.create_user(
username="user1",
email="[email protected]",
password="1212qwqw"
)
def test_get_user_list(self):
"""Test get list user API"""
res = self.client.get(USER_LIST_URL)
users = get_user_model().objects.all()
serializer = UserSerializer(users, many=True)
self.assertEqual(res.status_code, status.HTTP_200_OK)
self.assertEqual(res.data, serializer.data)
def test_user_register(self):
"""Test user register api"""
payload = {
"username": "a",
"email": "[email protected]",
"password": "1212qwqw"
}
res = self.client.post(REGISTER_URL, payload)
user = get_user_model().objects.get(email=payload["email"])
profile = Profile.objects.get(user=user)
self.assertEqual(res.status_code, status.HTTP_201_CREATED)
self.assertIsNotNone(user)
self.assertIsNotNone(profile)
self.assertEqual(str(profile), payload["email"])
def test_user_login_with_email(self):
"""Test user login with email api"""
payload = {
"username": "[email protected]",
"password": "1212qwqw"
}
res = self.client.post(LOGIN_URL, payload)
self.assertEqual(res.status_code, status.HTTP_200_OK)
def test_user_login_with_username(self):
"""Test user login with username api"""
payload = {
"username": "user1",
"password": "1212qwqw"
}
res = self.client.post(LOGIN_URL, payload)
self.assertEqual(res.status_code, status.HTTP_200_OK)
def test_user_login_with_wrong_credentials(self):
"""Test user login api"""
payload = {
"username": "[email protected]",
"password": "1212"
}
res = self.client.post(LOGIN_URL, payload)
self.assertEqual(res.status_code, status.HTTP_400_BAD_REQUEST)
class PrivateUserAPITests(TestCase):
def setUp(self):
self.client = APIClient()
self.user = get_user_model().objects.create_user(
email='[email protected]', username='test', password='1212')
self.client.force_authenticate(user=self.user)
def test_retrieve_user_detail(self):
"""Test retrieve user detail"""
res = self.client.get(USER_DETAIL_URL)
self.assertEqual(res.status_code, status.HTTP_200_OK)
self.assertEqual(res.data['username'], self.user.username)
self.assertFalse(res.data['is_social_account'])
def test_update_user(self):
payload = {'username': 'testtest',
'email': '[email protected]',
'old_password': '1212',
'password': 'qwqw'}
res = self.client.patch(USER_UPDATE_URL, payload)
self.user.refresh_from_db()
self.assertTrue(self.user.username, payload['username'])
self.assertEqual(self.user.email, payload['email'])
self.assertTrue(self.user.check_password(payload['password']))
self.assertEqual(res.status_code, status.HTTP_200_OK)
|
py
|
1a5e139d73ce608c3f6833a41648a94c77144a55
|
from datetime import timedelta, datetime
from django.contrib.auth import get_user_model
from django.contrib.auth.models import AbstractUser
from django.core.mail import send_mail
from django.core.validators import MaxValueValidator, MinValueValidator
from django.db import models
from django.db.models import Count
from django.dispatch import receiver
from django.urls import reverse
from django.utils import timezone
from django.utils.translation import gettext_lazy as _
from django_rest_passwordreset.signals import reset_password_token_created
from phonenumber_field.modelfields import PhoneNumberField
class User(AbstractUser):
email = models.EmailField(_('email address'), unique=True)
USERNAME_FIELD = 'email'
REQUIRED_FIELDS = ['username']
class Badge(models.Model):
label = models.CharField(max_length=50)
def __str__(self):
return self.label
class Supplier(models.Model):
name = models.CharField(max_length=100)
phone = PhoneNumberField(blank=True, null=True, default=None)
email = models.EmailField(blank=True, null=True, default=None)
website = models.CharField(blank=True, null=True, default=None, max_length=200)
class Location(models.Model):
TYPE_ROOM = 'room'
TYPE_SHELF = 'shelf'
TYPES = [(TYPE_ROOM, 'Room'), (TYPE_SHELF, 'Shelf')]
name = models.CharField(max_length=20, unique=True)
type = models.CharField(max_length=32, choices=TYPES, default=TYPE_SHELF)
def __str__(self):
return self.name
class BggGame(models.Model):
bggid = models.CharField(max_length=100, primary_key=True)
name = models.CharField(max_length=200)
badges = models.ManyToManyField(Badge, blank=True, default=None)
rank = models.IntegerField(null=True)
min_players = models.IntegerField(null=True)
max_players = models.IntegerField(null=True)
min_playtime = models.IntegerField(null=True)
max_playtime = models.IntegerField(null=True)
thumbnail = models.CharField(blank=True, max_length=500, default='')
image = models.CharField(blank=True, max_length=500)
other_names = models.JSONField(blank=True, default=list)
@staticmethod
def most_withdraws(number):
return Withdraw.objects.all().values('game__game__name', 'game__game__image').annotate(
total=Count('game')).order_by('-total')[:number]
def __unicode__(self):
return self.name
def __str__(self):
return self.name
class Game(models.Model):
owner = models.ForeignKey(User, on_delete=models.CASCADE)
game = models.ForeignKey(BggGame, null=True, blank=True, on_delete=models.CASCADE)
notes = models.TextField(blank=True)
date_checkin = models.DateTimeField(default=None, blank=True, null=True)
date_checkout = models.DateTimeField(default=None, blank=True, null=True)
class Meta:
abstract = True
def __unicode__(self):
return u"%s" % self.name
class LibraryGame(Game):
location = models.ForeignKey(Location, null=True, blank=True, default=None, on_delete=models.CASCADE)
def status(self):
if not self.location or self.date_checkin is None:
return 'not-checked-in'
elif self.date_checkout is not None:
return 'checked-out'
elif self.withdraw_set.filter(date_returned=None).count() == 0:
return 'available'
else:
return 'not-available'
def current_withdraw(self):
if self.status() == "not-available":
return self.withdraw_set.filter(date_returned=None).first()
else:
return None
def save(self, *args, **kwargs):
if self.location:
self.date_checkin = timezone.now()
super(LibraryGame, self).save()
class UsedGame(Game):
price = models.FloatField(default=0.0, blank=False, null=False)
class StoreGame(models.Model):
game = models.ForeignKey(BggGame, null=True, blank=True, on_delete=models.CASCADE)
supplier = models.ForeignKey(Supplier, null=True, blank=True, on_delete=models.CASCADE)
selling_price = models.FloatField(default=0.0, blank=False, null=False)
buying_price = models.FloatField(default=0.0, blank=False, null=False)
stock = models.FloatField(default=0.0, blank=False, null=False)
class Withdraw(models.Model):
requisitor = models.ForeignKey(User, on_delete=models.CASCADE, default=None)
game = models.ForeignKey(LibraryGame, on_delete=models.CASCADE)
notes = models.TextField(blank=True)
date_withdrawn = models.DateTimeField(auto_now_add=True)
date_returned = models.DateTimeField(default=None, blank=True, null=True)
def returned(self):
return self.date_returned is not None
returned.boolean = True
def __unicode__(self):
return u"%s with %s" % (self.game.name, self.requisitor.name)
def __str__(self):
return u"%s with %s" % (self.game.name, self.requisitor.name)
@staticmethod
def last(days):
items = Withdraw.objects.filter(date_withdrawn__range=(datetime.now() - timedelta(days=days), datetime.now())) \
.extra({'date_withdrawn': "date(date_withdrawn)"}) \
.values('date_withdrawn') \
.annotate(created_count=Count('id'))
items = list(items)
dates = [x.get('date_withdrawn') for x in items]
for d in (datetime.today() - timedelta(days=x) for x in range(0, days)):
if d.date().isoformat() not in dates:
items.append({'date_withdrawn': d.date().isoformat(), 'created_count': 0})
items.sort(key=lambda o: o['date_withdrawn'])
return items
class Configuration(models.Model):
class Types(models.TextChoices):
STORE = 'store', 'Store'
LIBRARY = 'library', 'Library'
key = models.fields.CharField(unique=True, max_length=100)
type = models.TextField(choices=Types.choices)
value = models.TextField()
@receiver(reset_password_token_created)
def password_reset_token_created(sender, instance, reset_password_token, *args, **kwargs):
email_plaintext_message = "{}?token={}".format(reverse('password_reset:reset-password-request'),
reset_password_token.key)
send_mail(
# title:
"Password Reset for {title}".format(title="Some website title"),
# message:
email_plaintext_message,
# from:
"[email protected]",
# to:
[reset_password_token.user.email]
)
|
py
|
1a5e1439a8d0ccba42de62b8a05b5f21e349ca3b
|
import configparser
import os
from load_config_file import locate_config_file
from default_colors import default_print_info, default_print_error, default_print_instruction
from colored_output import ColoredOutput
from pkg_resources import Requirement, resource_filename
from shutil import copyfile
def write_example_config():
garrick_dir, config_file_name = locate_config_file()
default_print_info(
'Your config file is {}.'.format(os.path.join(garrick_dir, config_file_name))
)
default_print_info(
'I am writing a file called {}.example into the same directory.'.format(config_file_name)
)
default_print_instruction(
'You can work from this file to restore your garrick.conf file to a valid state.'
)
print()
example_config_file = resource_filename(Requirement.parse('garrick'), 'garrick.conf.example')
copyfile(example_config_file, os.path.join(garrick_dir, '{}.example'.format(config_file_name)))
raise Exception('Invalid or incomplete config file.')
def get_config():
garrick_dir, config_file_name = locate_config_file()
config_file = os.path.join(garrick_dir, config_file_name)
config = configparser.ConfigParser(allow_no_value = True)
try:
config.read(config_file)
except Exception as exception:
print()
default_print_error('Something is wrong with your config file.')
default_print_error('ConfigParser has thrown the following exception:')
print()
print(exception)
print()
write_example_config()
return config
def parse_db_files():
config = get_config()
if not 'database_files' in config.sections():
print()
default_print_error(
'Error: There is no [database_files] section in your config file.'
)
print()
write_example_config()
db_files = []
for db_file in config['database_files']:
db_files.append(db_file)
if len(db_files) == 0:
print()
default_print_error(
'Error: No databases are listed in your config file.'
)
default_print_instruction(
'Write a name for a database file into its [database_files] section.'
)
default_print_info('This file will be created the next time you run garrick,')
default_print_info('or it will be used if it already exists.')
print()
write_example_config()
return db_files
def parse_editor():
config = get_config()
if not 'config' in config.sections():
print()
default_print_error('Error: There is no [config] section in your config file.')
print()
write_example_config()
if not 'editor' in config['config']:
print()
default_print_error(
'Error: There is no "editor" variable in the [config] section of your config file.'
)
print()
write_example_config()
editor = config['config']['editor']
if editor == '' or editor == None:
editor = os.getenv('EDITOR')
if editor == None:
print()
default_print_error('Error: No editor is defined in your config file.')
default_print_instruction(
'Add the name of your favourite editor at the end of the line "editor = "'
)
default_print_instruction('so you can use it to edit your cards.')
default_print_info(
"(This is normal if you haven't set the editor variable before.)"
)
print()
write_example_config()
return editor
def parse_colors():
config = get_config()
if not 'config' in config.sections():
print()
default_print_error('Error: There is no [config] section in your config file.')
print()
write_example_config()
if 'info' in config['config']:
info_color = config['config']['info']
else:
info_color = 'brightgreen'
if 'error' in config['config']:
error_color = config['config']['error']
else:
error_color = 'brightred'
if 'instruction' in config['config']:
instruction_color = config['config']['instruction']
else:
instruction_color = 'brightmagenta'
if 'side_of_card' in config['config']:
side_color = config['config']['side_of_card']
else:
side_color = 'brightyellow'
if 'prompt' in config['config']:
prompt_color = config['config']['prompt']
else:
prompt_color = 'brightcyan'
if 'silent_prompt' in config['config']:
silent_prompt_color = config['config']['silent_prompt']
else:
silent_prompt_color = 'brightyellow'
return ColoredOutput(
info_color,
error_color,
instruction_color,
side_color,
prompt_color,
silent_prompt_color
)
|
py
|
1a5e144a0f61c3b74fcb93c2932584b016f10bcc
|
from django.contrib import admin
from suspects.models import Suspect
# Register your models here.
admin.site.register(Suspect)
|
py
|
1a5e153c0dec800e76dd634d9bc0806784e8937e
|
from machinetranslation import translator
from flask import Flask, render_template, request
import json
app = Flask("Web Translator")
@app.route("/englishToFrench")
def englishToFrench():
textToTranslate = request.args.get('textToTranslate')
txt = translator.english_to_french(textToTranslate)
return txt
@app.route("/frenchToEnglish")
def frenchToEnglish():
textToTranslate = request.args.get('textToTranslate')
txt = translator.french_to_english(textToTranslate)
return txt
@app.route("/")
def renderIndexPage():
return render_template("index.html")
if __name__ == "__main__":
app.run(host="0.0.0.0", port=8080)
|
py
|
1a5e17d06f4c26fac69a5a8763df7759de8bfb65
|
"""
THIS CLASS IS NOT A LOGGER, but it plays one on TV.
The purpose of this class is to make it easy for other code to get a scoped-logger with just an import statement.
And while I was at it, I added a TRACE log level (below DEBUG).
USAGE:
If you want to do some logging...
put this at the top of your file:
from mi.logging import log
then log out:
log.info("PI is an irrational number")
log.debug("after the 3 i found a point 14")
log.trace("what the heck is trace for?")
WARNING: DO NOT stray from the path. Some evil trickery is going on here and you could get hurt!
DO NOT import as something else:
from mi.logging import log as foo_bar
DO NOT use the fully qualified name:
import mi.logging
DO NOT use the internal classes directly
from mi.logging.logger import _ScopedLogger
cant_stop_me = _ScopedLogger()
DO NOT pass "log" around for other things to use:
from mi.logging import log
import other.module
other.module.write_some_messages(log)
Yes, this is hacky. I would rather just stick with plain python logging. But if this is the only way to quit the
imported monkey patch habit, then give me hacky.
EVOLUTION / CREATION:
In the early days of the project, it seems like this was just too much work:
import logging
log = logging.getLogger('python.module.name')
To save those extra bytes, we monkey-patched import itself to save a few characters with this replacement:
from pyon.util.log import log
Unfortunately, this makes troubleshooting import problems much more difficult. And solving a mundane problem by
monkey-patching import kind of makes me taste my lunch a little bit, even well into the afternoon.
If I've done this correctly, nobody should notice anything different. But I'll sleep better with one less monkey
running loose.
"""
import logging
import inspect
import threading
# invent a new log level called "trace". hope that people will use it.
# lifted from http://stackoverflow.com/questions/2183233/how-to-add-a-custom-loglevel-to-pythons-logging-facility
#
TRACE=5
logging.TRACE = TRACE
logging.addLevelName(TRACE, 'TRACE')
def trace(self, message, *args, **kws):
if self.isEnabledFor(TRACE):
self._log(TRACE, message, args, **kws) # resist the urge to stick your * in my args
logging.Logger.trace = trace
## next bit filched from 1.5.2's inspect.py
#def currentframe():
# """Return the frame object for the caller's stack frame."""
# try:
# raise Exception
# except:
# return sys.exc_info()[2].tb_frame.f_back
#
#if hasattr(sys, '_getframe'): currentframe = lambda: sys._getframe(3)
## done filching
# here the magic happens
#
# the _ScopedLogger object has the same functions as a logger,
# but as soon as one of them is called:
# - it figures out what module is the caller
# - creates a logger for that module
# - installs the logger as "module.log"
# - invokes whatever the original function was on that new logger
#
# this should happen exactly once in each module. after that, the installed logger would be called:
# # say i'm inside x.y.py
# from mi.logging import log # x.y.log is a _ScopedLogger()
# log.info('first message') # sets x.y.log = logging.getLogger('x.y') [and then runs .info() on it]
# log.info('second message') # this isn't a _ScopedLogger any more, its logging.getLogger('x.y'), remember?
#
class _ScopedLogger(object):
_filters = []
def _add_filter(self, filter):
""" set this filter on each new logger created (does not affect loggers already created)
not intended to be called directly by client code (interface is supposed to look like a Logger).
instead, call mi.logging.config.add_filter(filter)
"""
self._filters.append(filter)
def _install_logger(self):
name = "UNKNOWN_MODULE_NAME"
module = None
stack = inspect.stack()
# stack[0]: call to inspect.stack() on the line above
# stack[1]: call to _install_logger() by one of the delegate methods below
frame=stack[2] # call to the delegate method from some outside calling module
if frame and frame[0]:
module = inspect.getmodule(frame[0])
if module:
name = module.__name__
elif frame[1]:
name = frame[1]
true_caller_tuple = (name, frame[2], frame[3])
logger = logging.getLogger(name)
# fix bug -- first message logged was reporting line number from this file
def first_time_find_caller():
logger.findCaller = logger._original_find_caller
return true_caller_tuple
logger._original_find_caller = logger.findCaller
logger.findCaller = first_time_find_caller
for filter in self._filters:
logger.addFilter(filter)
if module:
module.log = logger
return logger
# all Logger methods quietly install the true logger object and then delegate
def setLevel(self,*a,**b): return self._install_logger().setLevel(*a,**b)
def isEnabledFor(self,*a,**b): return self._install_logger().isEnabledFor(*a,**b)
def getEffectiveLevel(self,*a,**b): return self._install_logger().getEffectiveLevel(*a,**b)
def getChild(self,*a,**b): return self._install_logger().getChild(*a,**b)
def trace(self,*a,**b): return self._install_logger().debug(*a,**b)
def debug(self,*a,**b): return self._install_logger().debug(*a,**b)
def info(self,*a,**b): return self._install_logger().info(*a,**b)
def warning(self,*a,**b): return self._install_logger().warning(*a,**b)
def warn(self,*a,**b): return self._install_logger().warn(*a,**b)
def error(self,*a,**b): return self._install_logger().error(*a,**b)
def critical(self,*a,**b): return self._install_logger().critical(*a,**b)
def log(self,*a,**b): return self._install_logger().log(*a,**b)
def exception(self,*a,**b): return self._install_logger().exception(*a,**b)
def addFilter(self,*a,**b): return self._install_logger().addFilter(*a,**b)
def removeFilter(self,*a,**b): return self._install_logger().removeFilter(*a,**b)
def filter(self,*a,**b): return self._install_logger().filter(*a,**b)
def addHandler(self,*a,**b): return self._install_logger().addHandler(*a,**b)
def removeHandler(self,*a,**b): return self._install_logger().removeHandler(*a,**b)
def findCaller(self,*a,**b): return self._install_logger().findCaller(*a,**b)
def handle(self,*a,**b): return self._install_logger().handle(*a,**b)
def makeRecord(self,*a,**b): return self._install_logger().makeRecord(*a,**b)
class AddFields(logging.Filter):
""" add custom fields to messages for graypy to forward to graylog
if the values are constant, can may be added as a dictionary when the filter is created.
if they change, the values can be copied form thread-local fields with the given names.
NOTE: graypy will automatically also add: function, pid, process_name, thread_name
"""
def __init__(self, attribute_name, thread_local_field_names, constant_field_values):
"""
@param thread_local_field_names is a dictionary mapping the name of the thread-local field
to the name it should have in the logging record. for example,
if the dictionary has an entry 'a': 'b', then the logging record
will be set: record.b = threading.local().a # direct, no local context
or: record.b = threading.local().<attribute_name>.a # attr-style
or: record.b = threading.local().<attribute_name>['a'] # dict-style
@param constant_field_values is a dictionary mapping logging field names to string values.
if this dictionary has an entry 'a': 'b', then: record.b = 'a'
"""
self.attribute_name = attribute_name
self.thread_local_field_names = thread_local_field_names
self.constant_field_values = constant_field_values
def filter(self, record):
# add values from thread local context
values = threading.local()
if self.attribute_name:
values = getattr(values, self.attribute_name)
for local_field_name, logging_field_name in self.thread_local_field_names.iteritems():
if hasattr(values, local_field_name):
record.setattr(record, logging_field_name, getattr(values,local_field_name))
elif isinstance(values, dict) and local_field_name in values:
record.setattr(record, logging_field_name, values[local_field_name])
# add values constant for the container
for key,value in self.constant_field_values.iteritems():
setattr(record,key,value)
|
py
|
1a5e1a600432dc53a5e118831c3f116a42b25090
|
_base_ = [
'../_base_/models/ssd300.py', '../_base_/datasets/coco_detection.py',
'../_base_/default_runtime.py'
]
# model settings
input_size = 300
model = dict(
type='SingleStageDetector',
pretrained='open-mmlab://vgg16_caffe',
backbone=dict(
type='CCB',
input_size=input_size,
depth=16,
with_last_pool=False,
ceil_mode=True,
out_indices=(3, 4),
out_feature_indices=(22, 34),
l2_norm_scale=20,
assist = dict(depth=50,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=dict(type='BN', requires_grad=True),
norm_eval=True,
style='pytorch')),
neck=None,
bbox_head=dict(
type='PRSHead',
in_channels=(512, 1024, 512, 256, 256, 256),
num_classes=4,
anchor_generator=dict(
type='SSDAnchorGenerator',
scale_major=False,
input_size=input_size,
basesize_ratio_range=(0.15, 0.9),
strides=[8, 16, 32, 64, 100, 300],
ratios=[[2, 3], [2, 3], [2, 3], [2, 3], [2], [2]]),
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=[.0, .0, .0, .0],
target_stds=[0.1, 0.1, 0.2, 0.2])))
cudnn_benchmark = True
train_cfg = dict(
assigner=dict(
type='MaxIoUAssigner',
pos_iou_thr=0.5,
neg_iou_thr=0.5,
min_pos_iou=0.,
ignore_iof_thr=-1,
gt_max_assign_all=False),
smoothl1_beta=1.,
allowed_border=-1,
pos_weight=-1,
neg_pos_ratio=3,
debug=False)
test_cfg = dict(
nms=dict(type='nms', iou_threshold=0.45),
min_bbox_size=0,
score_thr=0.02,
max_per_img=200)
# dataset settings
dataset_type = 'CocoDataset'
data_root = 'data/coco_new/'
classes = ('echinus','starfish','holothurian','scallop','waterweeds')
img_norm_cfg = dict(mean=[123.675, 116.28, 103.53], std=[1, 1, 1], to_rgb=True)
train_pipeline = [
dict(type='LoadImageFromFile', to_float32=True),
dict(type='LoadAnnotations', with_bbox=True),
dict(
type='PhotoMetricDistortion',
brightness_delta=32,
contrast_range=(0.5, 1.5),
saturation_range=(0.5, 1.5),
hue_delta=18),
dict(
type='Expand',
mean=img_norm_cfg['mean'],
to_rgb=img_norm_cfg['to_rgb'],
ratio_range=(1, 4)),
dict(
type='MinIoURandomCrop',
min_ious=(0.1, 0.3, 0.5, 0.7, 0.9),
min_crop_size=0.3),
dict(type='Resize', img_scale=(300, 300), keep_ratio=False),
dict(type='Normalize', **img_norm_cfg),
dict(type='RandomFlip', flip_ratio=0.5),
dict(type='DefaultFormatBundle'),
dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']),
]
test_pipeline = [
dict(type='LoadImageFromFile'),
dict(
type='MultiScaleFlipAug',
img_scale=(300, 300),
flip=False,
transforms=[
dict(type='Resize', keep_ratio=False),
dict(type='Normalize', **img_norm_cfg),
dict(type='ImageToTensor', keys=['img']),
dict(type='Collect', keys=['img']),
])
]
data = dict(
samples_per_gpu=8,
workers_per_gpu=2,
train=dict(
_delete_=True,
type='RepeatDataset',
times=5,
dataset=dict(
type=dataset_type,
ann_file=data_root + 'annotations/instances_train2017.json',
img_prefix=data_root + 'train2017/',
pipeline=train_pipeline)),
val=dict(pipeline=test_pipeline),
test=dict(pipeline=test_pipeline))
# optimizer
optimizer = dict(type='SGD', lr=1e-3, momentum=0.9, weight_decay=0.0001)
optimizer_config = dict(grad_clip=dict(max_norm=35, norm_type=2))
# learning policy
lr_config = dict(
policy='step',
warmup='linear',
warmup_iters=500,
warmup_ratio=0.0001,
step=[8, 11])
total_epochs = 24
|
py
|
1a5e1b68f3a73c719a87dd6dc5af47514583b9c4
|
import logging
import claripy
from rex import Vulnerability
from rex.exploit import CannotExploit
from rex.exploit.cgc import CGCType1CircumstantialExploit
from ..technique import Technique
l = logging.getLogger("rex.exploit.techniques.circumstantial_set_register")
class CircumstantialSetRegister(Technique):
name = "circumstantially_set_register"
applicable_to = ['cgc']
cgc_registers = ["eax", "ecx", "edx", "ebx", "esp", "ebp", "esi", "edi"]
bitmask_threshold = 20
# this technique should create an exploit which is a type1 pov
pov_type = 1
generates_pov = True
def __init__(self, crash, rop, shellcode):
super(CircumstantialSetRegister, self).__init__(crash, rop, shellcode)
self._ip_bitmask = None
self._ip_bitcnt = None
def set_register(self, register):
"""
:param register
set a register with shellcode on cgc
"""
# can only exploit ip overwrites
if not self.crash.one_of([Vulnerability.IP_OVERWRITE, Vulnerability.PARTIAL_IP_OVERWRITE]):
raise CannotExploit("[%s] cannot control ip" % self.name)
state = self.crash.state
if self._ip_bitcnt < CircumstantialSetRegister.bitmask_threshold:
raise CannotExploit("not enough controlled bits of ip")
# see if the register value is nearly unconstrained
reg = getattr(state.regs, register)
# we need to make sure that the pc and this register don't conflict
conflict = not state.satisfiable(extra_constraints=(reg != state.regs.pc,))
if conflict:
raise CannotExploit("register %s conflicts with pc, pc and register must be equal" % register)
# get the register's bitmask
reg_bitmask, reg_bitcnt = self.get_bitmask_for_var(state, reg)
if reg_bitcnt >= CircumstantialSetRegister.bitmask_threshold:
if not any([v.startswith('aeg_stdin') for v in reg.variables]):
raise CannotExploit("register %s was symbolic but was not tainted by user input" % register)
l.info("can circumstantially set register %s", register)
ccp = self.crash.copy()
value_var = claripy.BVS('value_var', 32, explicit_name=True)
ip_var = claripy.BVS('ip_var', 32, explicit_name=True)
reg = getattr(ccp.state.regs, register)
ccp.state.add_constraints(reg == value_var)
ccp.state.add_constraints(ccp.state.regs.ip == ip_var)
mem = [reg] + [ccp.state.regs.ip]
return CGCType1CircumstantialExploit(ccp, register, reg_bitmask,
self._ip_bitmask, mem, value_var, ip_var)
else:
raise CannotExploit("register %s's value does not appear to be unconstrained" % register)
def apply(self, **kwargs):
ip = self.crash.state.regs.ip
self._ip_bitmask, self._ip_bitcnt = self.get_bitmask_for_var(self.crash.state, ip)
for register in CircumstantialSetRegister.cgc_registers:
try:
reg_setter = self.set_register(register)
l.info("was able to set register [%s] circumstantially", register)
return reg_setter
except CannotExploit as e:
l.debug("could not set register %s circumstantially (%s)", register, e)
|
py
|
1a5e1b6f90043799e50969fd16bf3e4192d0f05c
|
#! python3
tableData = [['apples', 'oranges', 'cherries', 'banana'],
['Alice', 'Bob', 'Carol', 'David'],
['dogs', 'cats', 'moose', 'goose']]
def printTable(inputList):
colWidths = [0] * len(inputList)
for i in range(len(inputList)):
for j in range(len(inputList[i])):
if len(inputList[i][j]) > colWidths[i]:
colWidths[i] = len(inputList[i][j])
for x in range(len(inputList[0])):
for y in range(len(inputList)):
print(inputList[y][x].rjust(colWidths[y]), end = ' ')
print('')
printTable(tableData)
|
py
|
1a5e1bb6d420a8ff149ffd1acf95d734d01b478e
|
from .ssladapter import SSLAdapter # flake8: noqa
|
py
|
1a5e1c269461c15d89af8c45169de5d1638ee839
|
from collections import OrderedDict
import torch
import torch.nn as nn
import torch.nn.functional as F
class StackedFullyConnected(nn.Module):
def __init__(self, FC_List=[500, 200, 100]):
super(StackedFullyConnected, self).__init__()
self.FC_List = FC_List
self.FCs = nn.ModuleList()
self.__get_fc()
def __get_fc(self):
s = self.FC_List[0]
num = self.FC_List[1]
self.FCs.append(nn.Linear(s, num))
s = num
for num in self.FC_List[2:]:
self.FCs.append(nn.Dropout(p=0.5))
self.FCs.append(nn.Linear(s, num))
s = num
def forward(self, inputs):
x = inputs
for layer in self.FCs:
x = F.softsign(layer(x))
return x
class erfh5_Distributed_Autoencoder(nn.Module):
def __init__(self, dgx_mode=True, layers_size_list=[69366, 15000]):
super(erfh5_Distributed_Autoencoder, self).__init__()
self.encoder = StackedFullyConnected(layers_size_list)
self.decoder = StackedFullyConnected(list(reversed(layers_size_list)))
print(self.encoder)
print(self.decoder)
if dgx_mode:
self.encoder = nn.DataParallel(self.encoder, device_ids=[
0, 1, 2, 3]).to('cuda:0')
self.decoder = nn.DataParallel(self.decoder, device_ids=[
4, 5, 6, 7]).to('cuda:4')
def forward(self, x):
x = self.encoder(x)
x = self.decoder(x)
return x.to('cuda:0')
def save_encoder(self, path):
torch.save(self.encoder.state_dict(), path)
class erfh5_Autoencoder(nn.Module):
def __init__(self, input_size, FC_List=[500, 200, 100]):
super(erfh5_Autoencoder, self).__init__()
self.FC_List = FC_List
self.input_size = input_size
self.FCs = nn.ModuleList()
self.__get_fc()
# self.weightList =
# nn.ParameterList([nn.Parameter(f.weight) for f in self.FCs])
# self.biasList =
# nn.ParameterList([nn.Parameter(f.bias) for f in self.FCs])
[print(f) for f in self.FCs]
def __get_fc(self):
s = self.input_size
for num in self.FC_List:
self.FCs.append(nn.Linear(s, num))
self.FCs.append(nn.Dropout(p=0.5))
s = num
for num in reversed(self.FC_List[:-1]):
self.FCs.append(nn.Linear(s, num))
self.FCs.append(nn.Dropout(p=0.5))
s = num
self.FCs.append(nn.Linear(s, self.input_size))
def forward(self, inputs):
x = inputs
for layer in self.FCs:
x = F.relu(layer(x))
return x
def get_encoding(self):
return self.FCs[int((self.FCs.__len__() - 1) / 2)]
# '/home/lodes/Sim_Results'
# '/cfs/share/data/RTM/Lautern/clean_erfh5/'
def load_stacked_fc(path, list=[69366, 15000, 8192]):
state_dict = torch.load(path)
new_state_dict = OrderedDict()
model = StackedFullyConnected(list)
for k, v in state_dict.items():
name = k[7:] # remove `module.`
new_state_dict[name] = v
# load params
model.load_state_dict(new_state_dict)
return model
if __name__ == "__main__":
pass
# half_encoder = load_stacked_fc(path)
""" print(">>>INFO: Loading State dict finished.")
half_encoder.to(device)
with torch.no_grad():
half_encoder.eval()
loss = 0
counter = 0
for i in validation_samples:
i = torch.FloatTensor(i)
i = i.to(device)
i = torch.unsqueeze(i, 0)
output = half_encoder(i)
#output = output.to(device)
#loss = loss + loss_criterion(output, i).item()
output = output.cpu().numpy()
i = i.cpu().numpy()
plt.figure()
plt.subplot(211)
plt.plot(i, 'bo')
plt.subplot(212)
plt.plot(output, 'ro')
plt.savefig('/cfs/home/l/o/lodesluk/models/' +
str(counter) + '.png')
print("plot saved")
counter = counter + 1
#loss = loss / len(validation_samples)
#print(">>>Loss on loaded model:", "{:8.4f}".format(loss))
half_encoder.train()
"""
print(">>>INFO: Finished.")
|
py
|
1a5e1c7a72818cd555202419d089b39ea76867c3
|
# Copyright 2015: Mirantis Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from rally.plugins.openstack.context.heat import stacks
from rally.plugins.openstack.scenarios.heat import utils as heat_utils
from tests.unit import fakes
from tests.unit import test
CTX = "rally.plugins.openstack.context"
SCN = "rally.plugins.openstack.scenarios"
class TestStackGenerator(test.ScenarioTestCase):
def _gen_tenants(self, count):
tenants = {}
for id_ in range(count):
tenants[str(id_)] = dict(name=str(id_))
return tenants
def test_init(self):
self.context.update({
"config": {
"stacks": {
"stacks_per_tenant": 1,
"resources_per_stack": 1
}
}
})
inst = stacks.StackGenerator(self.context)
self.assertEqual(inst.config, self.context["config"]["stacks"])
@mock.patch("%s.heat.utils.HeatScenario._create_stack" % SCN,
return_value=fakes.FakeStack(id="uuid"))
def test_setup(self, mock_heat_scenario__create_stack):
tenants_count = 2
users_per_tenant = 5
stacks_per_tenant = 1
tenants = self._gen_tenants(tenants_count)
users = []
for ten_id in tenants:
for i in range(users_per_tenant):
users.append({"id": i, "tenant_id": ten_id,
"credential": mock.MagicMock()})
self.context.update({
"config": {
"users": {
"tenants": tenants_count,
"users_per_tenant": users_per_tenant,
"concurrent": 10,
},
"stacks": {
"stacks_per_tenant": stacks_per_tenant,
"resources_per_stack": 1
}
},
"users": users,
"tenants": tenants
})
stack_ctx = stacks.StackGenerator(self.context)
stack_ctx.setup()
self.assertEqual(tenants_count * stacks_per_tenant,
mock_heat_scenario__create_stack.call_count)
# check that stack ids have been saved in context
for ten_id in self.context["tenants"].keys():
self.assertEqual(stacks_per_tenant,
len(self.context["tenants"][ten_id]["stacks"]))
@mock.patch("%s.heat.stacks.resource_manager.cleanup" % CTX)
def test_cleanup(self, mock_cleanup):
self.context.update({
"users": mock.MagicMock()
})
stack_ctx = stacks.StackGenerator(self.context)
stack_ctx.cleanup()
mock_cleanup.assert_called_once_with(
names=["heat.stacks"],
users=self.context["users"],
superclass=heat_utils.HeatScenario,
task_id=self.context["owner_id"])
|
py
|
1a5e1d1699152e815b3b535bfac3d3a07aa29380
|
import sys
import os
from os import path, mkdir, listdir, rmdir
from getpass import getpass as inputHidden
import math
##############
# USER INPUT #
##############
"""
Asks the user a question and returns the number of the response. If an invalid answer is given, the question is repeated.
Parameters
----------
question : str
The question that is asked.
choices : list (str)
An array of the different possible answers.
allowMultiple : bool
If True, the user may give multiple answers, each separated by a space. An array of these answers is returned.
Returns
-------
If allowMultiple is True:
int
The chosen answer.
Else:
list (int)
An array of ints representing chosen answers.
"""
def makeChoice(question, options, allowMultiple=False):
numChoices = len(options)
if numChoices == 0:
print("Warning: A question was asked with no valid answers. Returning None.")
return None
if numChoices == 1:
print("A question was asked with only one valid answer. Returning this answer.")
return 1
print("\n"+question)
for i in range(numChoices):
print(str(i+1)+": "+options[i])
cInput = input("\n").split(" ")
if not allowMultiple:
try:
assert len(cInput) == 1
choice = int(cInput[0])
assert choice > 0 and choice <= numChoices
return choice
except:
print("\nInvalid input.")
return makeChoice(question, options, allowMultiple)
else:
try:
choices = [int(c) for c in cInput]
for choice in choices:
assert choice > 0 and choice <= numChoices
return choices
except:
print("\nInvalid input.")
return makeChoice(question, options, allowMultiple)
"""
Asks the user a question. The answer can be any number between the given minVal and maxVal. If an invalid answer is given, the question is repeated.
Parameters
----------
question : str
The question that is asked.
minVal : float
The minimum allowed value.
maxVal : float
The maximum allowed value.
Returns
-------
float
The given value.
"""
def makeChoiceNumInput(question, minVal, maxVal):
while True:
print("\n"+question)
try:
var = float(input())
assert minVal <= var <= maxVal
return var
except:
print("Invalid input.")
###########
# SEEDING #
###########
"""
Encodes an array of variable values into a seed according to a given max value array.
Parameters
----------
varArray : list (int)
The array of values
maxValueArray:
An array of the (number of possible values - 1) of each variable. For example, if you have three variables with the possible values...
var1 : [0, 1, 2, 3]
var2 : [0, 1]
var3 : [0, 1, 2, 3, 4]
... then the maxValueArray should be [4, 2, 5].
Note that the maxValueArray's implementation assumes that possible values start at 0 and each increment by 1. For example, if a variable is stated to have 4 possible values, it asusmes those values are [0, 1, 2, 3].
base : int
Between 2 and 36. The numerical base used by the seed (in other words, how many values are possible for each character, such as 0-9 and a-z).
Returns
-------
int
The seed in base-10 numerical form.
str
The seed in the given base.
"""
def encodeSeed(varArray, maxValueArray, base=10):
if base > 36:
print("Base must be between 2 and 36. Lowering to 36.")
base = 36
seed = 0
baseShift = 0
for i in range(len(varArray)):
seed += varArray[i]<<baseShift
baseShift += maxValueArray[i].bit_length()
return seed, dec_to_base(seed, base)
"""
Decodes a string or non-base-10 number into an array of variable values according to a given max value array.
Parameters
----------
seed : str or int
The seed that will be decoded.
maxValueArray:
An array of the (number of possible values - 1) of each variable. For example, if you have three variables with the possible values...
var1 : [0, 1, 2, 3]
var2 : [0, 1]
var3 : [0, 1, 2, 3, 4]
... then the maxValueArray should be [4, 2, 5].
Note that the maxValueArray's implementation assumes that possible values start at 0 and each increment by 1. For example, if a variable is stated to have 4 possible values, it asusmes those values are [0, 1, 2, 3].
base : int
Unused if seed is an int (base-10 is assumed). Between 2 and 36. The numerical base used by the seed (in other words, how many values are possible for each character, such as 0-9 and a-z).
Returns
-------
list (int)
An array of variable values decoded from the string. For example, if there are 3 variables, the returned array is [var1's value, var2's value, var3's value]
"""
def decodeSeed(seed, maxValueArray, base=10):
if type(seed) is str:
if base > 36:
print("Base must be between 2 and 36. Lowering to 36.")
base = 36
elif base < 2:
print("Base must be between 2 and 36. Increasing to 2.")
base = 2
seed = int(seed, base)
baseShift = 0
varArray = []
for i in range(len(maxValueArray)):
bitLength = maxValueArray[i].bit_length()
varArray.append((seed>>baseShift) & ((2**bitLength)-1))
baseShift += bitLength
return varArray
"""
Returns whether or not a seed is possible given a maxValueArray and base.
Parameters
----------
seed : str or int
The seed that will be verified.
maxValueArray:
An array of the (number of possible values - 1) of each variable. For example, if you have three variables with the possible values...
var1 : [0, 1, 2, 3]
var2 : [0, 1]
var3 : [0, 1, 2, 3, 4]
... then the maxValueArray should be [4, 2, 5].
Note that the maxValueArray's implementation assumes that possible values start at 0 and each increment by 1. For example, if a variable is stated to have 4 possible values, it asusmes those values are [0, 1, 2, 3].
base : int
Between 2 and 36. The numerical base used by the seed (in other words, how many values are possible for each character, such as 0-9 and a-z).
Returns
-------
bool
Whether or not the seed is valid.
list (int)
An array of variable values decoded from the string. For example, if there are 3 variables, the returned array is [var1's value, var2's value, var3's value]
"""
def verifySeed(seed, maxValueArray, base=10):
if base > 36:
print("Base must be between 2 and 36. Lowering to 36.")
base = 36
elif base < 2:
print("Base must be between 2 and 36. Increasing to 2.")
base = 2
if type(seed) is int:
base = 10
seed = dec_to_base(seed,base)
seed = seed.upper().strip()
try:
maxSeed = 0
baseShift = 0
for i in range(len(maxValueArray)):
maxSeed += maxValueArray[i]<<baseShift
baseShift += maxValueArray[i].bit_length()
assert int(seed, 36) <= maxSeed
varsInSeed = decodeSeed(seed, maxValueArray, base)
for i in range(len(varsInSeed)):
assert 0 <= varsInSeed[i] <= maxValueArray[i]
return True, varsInSeed
except:
return False, None
"""
From https://www.codespeedy.com/inter-convert-decimal-and-any-base-using-python/
Converts a base-10 int into a different base.
Parameters
----------
num : int
The number that will be converted.
base : int
Between 2 and 36. The numerical base used by the seed (in other words, how many values are possible for each character, such as 0-9 and a-z).
Returns
-------
str
The number in the given base.
"""
def dec_to_base(num,base): #Maximum base - 36
base_num = ""
while num>0:
dig = int(num%base)
if dig<10:
base_num += str(dig)
else:
base_num += chr(ord('A')+dig-10) #Using uppercase letters
num //= base
base_num = base_num[::-1] #To reverse the string
return base_num
########################
# FILE/PATH MANAGEMENT #
########################
"""
Writes a value to a file at a given address. Supports multi-byte addresses.
Parameters
----------
file : str
The file that will be modified.
address : int
The value (ideally, a hex value such as 0x12345) that will be modified.
val : int
The value that will be written to this address.
numBytes : int
The number of bytes that this value will take up.
Retruns
-------
False if the value is too large to be written within the given number of bytes; True otherwise.
Examples
--------
Example 1
writeToAddress(file.exe, 0x12345, 0x41, 1) will write the following value:
0x12345 = 41
Example 2
writeToAddress(file.exe, 0x12345, 0x6D18, 2) will write the following values:
0x12345 = 6D
0x12346 = 18
Example 3
writeToAddress(file.exe, 0x12345, 0x1C, 2) will write the following values:
0x12345 = 00
0x12346 = 1C
"""
def writeToAddress(file, address, val, numBytes=1):
if val.bit_length() > numBytes*8:
print("Given value is greater than "+str(numBytes)+" bytes.")
return False
address += (numBytes-1)
for i in range(numBytes):
file.seek(address)
currByte = val & 0xFF
file.write(bytes([currByte]))
address -= 1
val = val>>8
return True
"""
From https://gist.github.com/jacobtomlinson/9031697
Removes all empty folders, including nested empty folders, in a directory.
Parameters
----------
p : str
The path of the starting directory; all empty folders that are children (or grandchildren, etc) of this directory are removed.
"""
def removeEmptyFolders(p):
if not path.isdir(p):
return
files = listdir(p)
if len(files):
for f in files:
fullpath = path.join(p, f)
if path.isdir(fullpath):
removeEmptyFolders(fullpath)
files = listdir(p)
if len(files) == 0:
rmdir(p)
"""
Returns an array of the individual components of a given path.
Parameters
----------
p : str
The path.
Returns
-------
list (str)
The path array.
Example
-------
Input
"C:/early folder/test2/thing.exe"
Output
["C:", "early folder", "test2", "thing.exe"]
"""
def getPathArray(p):
p1, p2 = path.split(p)
if p2 == "":
p = p1
pathArray = []
while True:
p1, p2 = path.split(p)
pathArray = [p2] + pathArray
if p2 == "":
pathArray = [p1] + pathArray
try:
while pathArray[0] == "":
del pathArray[0]
except:
pass
return pathArray
p = p1
"""
Creates the given directory. Unlike mkdir, this will also create any necessary parent directories that do not already exist.
Parameters
----------
p : str
The path of the folder that will be created.
Returns
-------
True if the folder was created, False if it already exists.
"""
def createDir(p):
if path.isdir(p):
return False
pathArray = getPathArray(p)
currPath = pathArray[0]
for i in range(1, len(pathArray)):
currPath = path.join(currPath, pathArray[i])
if not path.isdir(currPath):
mkdir(currPath)
return True
"""
Returns the directory containing the current program, regardless of whether it is a standalone script or a wrapped executable.
Returns
-------
str
The directory containing the current program.
"""
def getCurrFolder():
if getattr(sys, 'frozen', False):
mainFolder = path.dirname(sys.executable) # EXE (executable) file
else:
mainFolder = path.dirname(path.realpath(__file__)) # PY (source) file
sys.path.append(mainFolder)
return mainFolder
"""
Returns the file extension (including the ".") of the first file found in the given folder that matches the given file name.
Parameters
----------
folder : str
The given folder.
fileName : str
The given file name.
Returns
-------
str
The file extension (including the ".") of the first file found in folder named fileName (with any extension); if no file with that name is found, return an empty string.
"""
def getFileExt(folder, fileName):
for f in listdir(folder):
fName, fExt = path.splitext(f)
if fName == fileName:
return fExt
return ""
"""
From https://stackoverflow.com/questions/1392413/calculating-a-directorys-size-using-python
Returns the total number of bytes taken up by the given directory and its subdirectories.
Parameters
----------
startPath : str
The given directory.
Returns
-------
int
The number of bytes taken up by the directory.
"""
def getDirSize(startPath = '.'):
totalSize = 0
for dirpath, dirnames, filenames in os.walk(startPath):
for f in filenames:
fp = os.path.join(dirpath, f)
# skip if it is symbolic link
if not os.path.islink(fp):
totalSize += os.path.getsize(fp)
return totalSize
####################
# ARRAY MANAGEMENT #
####################
"""
Returns the number of elements (including duplicates) that exist in two different given arrays.
Parameters
----------
arr1 : list
The first array.
arr2 : list
The second array.
Returns
-------
int
The number of elements in the overlap
"""
def arrayOverlap(arr1, arr2):
count = 0
for a in arr1:
if a in arr2:
count += 1
return count
"""
Merges a nested array into a single one-dimensional array.
Parameters
----------
arr : list
The nested array that will be merged.
finalArr : list (str)
Should be ignored (only used in recursion). The created array so far.
Returns
-------
list (str):
The merged array.
Example
-------
Input
[item1, [item2, item3], item4, [item 5, [item6, item7], item8]]
Output
[item1, item2, item3, item4, item5, item6, item7, item8]
"""
def mergeNestedArray(arr, finalArr=[]):
for val in arr:
if not isinstance(val, list):
finalArr.append(val)
else:
finalArr = mergeNestedArray(val, finalArr)
return finalArr
"""
From https://www.geeksforgeeks.org/python-find-most-frequent-element-in-a-list/
Returns the most common element in a list, along with how many times it occurrs.
Parameters
----------
arr : list
The array.
Returns
-------
anything
The most frequently-occurring element.
int
How many instances of this element there are in the array.
"""
def most_frequent(arr):
counter = 0
elem = arr[0]
for i in arr:
curr_frequency = arr.count(i)
if (curr_frequency > counter):
counter = curr_frequency
elem = i
return elem, counter
"""
Returns whether or not arr1 is an ordered subset of arr2.
Parameters
----------
arr1 : list
The first array.
arr2: list
The second array.
Returns
-------
bool
Whether or not arr1 is an ordered subset of arr2.
Examples
--------
Input 1
[3, 5], [1, 3, 5, 7, 9]
Output 1
True
Input 2
[3, 5], [1, 2, 3, 4, 5, 6, 7]
Output 2
False
"""
def arrayInArray(arr1, arr2):
for i in range(len(arr2)-len(arr1)+1):
passed = True
for j in range(len(arr1)):
if arr1[j] != arr2[i+j]:
passed = False
break
if passed:
return True
return False
###############################
# CONSOLE/TERMINAL MANAGEMENT #
###############################
"""
Clears the console screen.
"""
def clearScreen():
os.system('clear' if os.name =='posix' else 'cls')
"""
From https://www.quora.com/How-can-I-delete-the-last-printed-line-in-Python-language
Clears ("backspaces") the last n console lines.
PARAMETERS
----------
n : int
The number of lines to clear.
"""
def delete_last_lines(n=1):
for _ in range(n):
sys.stdout.write('\x1b[1A')
sys.stdout.write('\x1b[2K')
#######################
# STRING MANIPULATION #
#######################
"""
Prints a title surrounded by a certain character.
Parameters
----------
string : str
The string that is printed.
char : str
The one-character string that surrounds the string.
Example
-------
Input
"MY TITLE", "#"
Output
############
# MY TITLE #
############
"""
def printTitle(string, topBottomChar="#", sideChar="#", cornerChar="#"):
topBottom = cornerChar+(topBottomChar*(len(string)+2))+cornerChar
print(topBottom)
print(sideChar+" "+string+" "+sideChar)
print(topBottom)
"""
Returns the base string with either the singular or plural suffix depending on the value of num.
Parameters
----------
base : str
The base of the word.
num : int
The quantity of the desired word.
singularSuffix : str
The suffix of the word's singular form
pluralSuffix : str
The suffix of the word's plural form
Returns
-------
str
The resulting string
Examples
--------
Input 1
pluralize("ind", 1, "ex", "ices")
Output 1
"index"
Input 2
pluralize("ind", 2, "ex", "ices")
Output 2
"indices"
"""
def pluralize(base, num, singularSuffix="", pluralSuffix="s"):
return base+singularSuffix if num == 1 else base+pluralSuffix
"""
Creates a copy of a given string, automatically adding line breaks and indenting lines, without splitting any words in two.
A line's length will only exceed the given limit if a single word in the string exceeds it.
Parameters
----------
string : str
The string to be printed.
lineLength : int
The max length of each printed line.
firstLineIndent : str
The start of the first line.
lineIndent : str
The start of all subsequent lines.
Returns
-------
The output string.
Examples
--------
Input 1
limitedString("Strong Bad's test sentence is as follows: The fish was delish, and it made quite a dish.", 40, "? ", ". ! ")
Output 1
"? Strong Bad's test sentence is as\n. ! follows: The fish was delish, and it\n. ! made quite a dish."
(Which would look like the following when printed):
? Strong Bad's test sentence is as
. ! follows: The fish was delish, and it
. ! made quite a dish.
Input 2
limitedString("THIS_WORD_IS_VERY_LONG there", 15, "", "")
Output 2:
"THIS_WORD_IS_VERY_LONG\nthere"
(Which would look like the following when printed):
THIS_WORD_IS_VERY_LONG
there
"""
def limitedString(string, lineLength=80, firstLineIndent="", lineIndent=" "):
printArray = string.split(" ")
totalString = ""
currString = firstLineIndent
isStartOfLine = True
while len(printArray) > 0:
if isStartOfLine or (len(printArray[0]) + (not isStartOfLine) <= lineLength - len(currString)):
currString += (" " if not isStartOfLine else "")+printArray.pop(0)
isStartOfLine = False
else:
totalString += currString+"\n"
currString = lineIndent
isStartOfLine = True
totalString += currString
return totalString
"""
Shortens a string to a maximum length, padding the last few characters with a given character if necessary.
You have the option of whether or not the string can cut off mid-word.
Parameters
----------
string : str
The string to be shortened.
maxLength : int
The maximum length of the output.
suffixChar : str
The character that will pad a long string
suffixLength : int
The length of the padding
cutoff : bool
If True, the string can be cut mid-word; else, it will be cut at the end of the previous word.
Returns
-------
The (possibly) shortened string, with spaces stripped from the right side of the pre-padded output.
Examples
--------
Input 1
shorten("this string is too long", 20, '.', 3, True)
Output 1
"This string is to..."
Input 2
shorten("this string is too long", 20, '.', 3, False)
Output 2
"This string is..."
Input 3
shorten("this is short", 15, '.', 3, True)
Output 3
"this is short"
"""
def shorten(string, maxLength=10, suffixChar='.', suffixLength=3, cutoff=True):
if len(string) <= maxLength:
return string
if cutoff:
return string[:(maxLength-suffixLength)].rstrip()+(suffixChar*suffixLength)
shortened = string.rstrip()
while len(shortened) > maxLength-suffixLength:
shortened = " ".join(shortened.split(" ")[:-1]).rstrip()
return shortened+(suffixChar*suffixLength)
"""
Splits a string into multiple parts, with each part being about equal in length, and no words cut off in the middle.
Parameters
----------
string : str
The string to be split.
numParts : int
The number of parts to split the string into.
reverse : bool
Decide if the last part (False) or first part (True) is likely to be the longest part.
Returns
-------
list
The split string.
Examples
--------
Input 1
splitStringIntoParts("This string is split into three whole parts", 3, True)
Output 1
['This string is split', 'into three', 'whole parts']
Input 2
splitStringIntoParts("This string is split into three whole parts", 3, False)
Output 2
['This string', 'is split into', 'three whole parts']
"""
def splitStringIntoParts(string, numParts=2, reverse=False):
totalLen = len(string) - (numParts-1)
maxSubStringLength = math.ceil(totalLen/numParts)
stringArray = string.split(" ")
if reverse:
stringArray.reverse()
splitArray = []
currString = ""
offset = 0
while len(stringArray) > 0:
if len(currString) + (currString != "") + len(stringArray[0]) < maxSubStringLength + offset:
currString += (" " if currString != "" else "")+stringArray.pop(0)
else:
offset = (maxSubStringLength + offset) - (len(currString) + (currString != ""))
splitArray.append(currString)
currString = ""
splitArray[-1] += " "+currString
if reverse:
newSplitArray = []
while len(splitArray) > 0:
curr = splitArray.pop(-1).split(" ")
curr.reverse()
curr = " ".join(curr)
newSplitArray.append(curr)
return newSplitArray
return splitArray
"""
Returns a string indicating the input number of bytes in its most significant form, rounding up to the indicated number of decimal places.
For example, if numBytes is at least 1 MB but less than 1 GB, it will be displayed in MB.
Parameters
----------
numBytes : int
The number of bytes.
decimalPlaces : int
The number of decimal places to round to.
Retruns
-------
str
The number of the most significant data size, along with the data size itself.
Examples
--------
Input 1
5000000, 3
Output 1
4.769 MB
Input 2
2048, 1
Output 2
2 KB
Input 3
2049, 1
Output 3
2.1 KB
"""
def simplifyNumBytes(numBytes, decimalPlaces=2):
numBytes = float(numBytes)
byteTypeArray = ["B", "KB", "MB", "GB", "TB", "PB", "EB", "ZB", "YB"]
temp = (10.0**decimalPlaces)
for byteType in byteTypeArray:
if numBytes < 1024:
num = math.ceil(numBytes * temp) / temp
if num == int(num):
num = int(num)
return str(num)+" "+byteType
numBytes /= 1024.0
numBytes *= 1024
num = math.ceil(numBytes * temp) / temp
if num == int(num):
num = int(num)
return str(num)+" YB"
"""
SOURCES
dec_to_base
https://www.codespeedy.com/inter-convert-decimal-and-any-base-using-python/
removeEmptyFolders
https://gist.github.com/jacobtomlinson/9031697
getDirSize
https://stackoverflow.com/questions/1392413/calculating-a-directorys-size-using-python
most_frequent
https://www.geeksforgeeks.org/python-find-most-frequent-element-in-a-list/
delete_last_lines
https://www.quora.com/How-can-I-delete-the-last-printed-line-in-Python-language
All other functions made by GateGuy
"""
|
py
|
1a5e1d7d0770e99a0cd05499330f4ae8055cc9b1
|
"""Test scikit-optimize based implementation of hyperparameter
search with interface similar to those of GridSearchCV
"""
import pytest
import time
from sklearn.datasets import load_iris, make_classification
from sklearn.model_selection import train_test_split
from sklearn.pipeline import Pipeline
from sklearn.svm import SVC, LinearSVC
from sklearn.tree import DecisionTreeClassifier
from sklearn.base import clone
from sklearn.base import BaseEstimator
from joblib import cpu_count
from scipy.stats import rankdata
import numpy as np
from numpy.testing import assert_array_equal
from skopt.space import Real, Categorical, Integer
from skopt import BayesSearchCV
def _fit_svc(n_jobs=1, n_points=1, cv=None):
"""
Utility function to fit a larger classification task with SVC
"""
X, y = make_classification(n_samples=1000, n_features=20, n_redundant=0,
n_informative=18, random_state=1,
n_clusters_per_class=1)
opt = BayesSearchCV(
SVC(),
{
'C': Real(1e-3, 1e+3, prior='log-uniform'),
'gamma': Real(1e-3, 1e+1, prior='log-uniform'),
'degree': Integer(1, 3),
},
n_jobs=n_jobs, n_iter=11, n_points=n_points, cv=cv,
random_state=42,
)
opt.fit(X, y)
assert opt.score(X, y) > 0.9
def test_raise_errors():
# check if empty search space is raising errors
with pytest.raises(ValueError):
BayesSearchCV(SVC(), {})
# check if invalid dimensions are raising errors
with pytest.raises(ValueError):
BayesSearchCV(SVC(), {'C': '1 ... 100.0'})
with pytest.raises(TypeError):
BayesSearchCV(SVC(), ['C', (1.0, 1)])
@pytest.mark.parametrize("surrogate", ['gp', None])
@pytest.mark.parametrize("n_jobs", [1, -1]) # test sequential and parallel
@pytest.mark.parametrize("n_points", [1, 3]) # test query of multiple points
def test_searchcv_runs(surrogate, n_jobs, n_points, cv=None):
"""
Test whether the cross validation search wrapper around sklearn
models runs properly with available surrogates and with single
or multiple workers and different number of parameter settings
to ask from the optimizer in parallel.
Parameters
----------
* `surrogate` [str or None]:
A class of the scikit-optimize surrogate used. None means
to use default surrogate.
* `n_jobs` [int]:
Number of parallel processes to use for computations.
"""
X, y = load_iris(True)
X_train, X_test, y_train, y_test = train_test_split(
X, y, train_size=0.75, random_state=0
)
# create an instance of a surrogate if it is not a string
if surrogate is not None:
optimizer_kwargs = {'base_estimator': surrogate}
else:
optimizer_kwargs = None
opt = BayesSearchCV(
SVC(),
{
'C': Real(1e-6, 1e+6, prior='log-uniform'),
'gamma': Real(1e-6, 1e+1, prior='log-uniform'),
'degree': Integer(1, 8),
'kernel': Categorical(['linear', 'poly', 'rbf']),
},
n_jobs=n_jobs, n_iter=11, n_points=n_points, cv=cv,
optimizer_kwargs=optimizer_kwargs
)
opt.fit(X_train, y_train)
# this normally does not hold only if something is wrong
# with the optimizaiton procedure as such
assert opt.score(X_test, y_test) > 0.9
@pytest.mark.slow_test
def test_parallel_cv():
"""
Test whether parallel jobs work
"""
_fit_svc(n_jobs=1, cv=5)
_fit_svc(n_jobs=2, cv=5)
def test_searchcv_runs_multiple_subspaces():
"""
Test whether the BayesSearchCV runs without exceptions when
multiple subspaces are given.
"""
X, y = load_iris(True)
X_train, X_test, y_train, y_test = train_test_split(
X, y, train_size=0.75, random_state=0
)
# used to try different model classes
pipe = Pipeline([
('model', SVC())
])
# single categorical value of 'model' parameter sets the model class
lin_search = {
'model': Categorical([LinearSVC()]),
'model__C': Real(1e-6, 1e+6, prior='log-uniform'),
}
dtc_search = {
'model': Categorical([DecisionTreeClassifier()]),
'model__max_depth': Integer(1, 32),
'model__min_samples_split': Real(1e-3, 1.0, prior='log-uniform'),
}
svc_search = {
'model': Categorical([SVC()]),
'model__C': Real(1e-6, 1e+6, prior='log-uniform'),
'model__gamma': Real(1e-6, 1e+1, prior='log-uniform'),
'model__degree': Integer(1, 8),
'model__kernel': Categorical(['linear', 'poly', 'rbf']),
}
opt = BayesSearchCV(
pipe,
[(lin_search, 1), (dtc_search, 1), svc_search],
n_iter=2
)
opt.fit(X_train, y_train)
# test if all subspaces are explored
total_evaluations = len(opt.cv_results_['mean_test_score'])
assert total_evaluations == 1+1+2, "Not all spaces were explored!"
def test_searchcv_sklearn_compatibility():
"""
Test whether the BayesSearchCV is compatible with base sklearn methods
such as clone, set_params, get_params.
"""
X, y = load_iris(True)
X_train, X_test, y_train, y_test = train_test_split(
X, y, train_size=0.75, random_state=0
)
# used to try different model classes
pipe = Pipeline([
('model', SVC())
])
# single categorical value of 'model' parameter sets the model class
lin_search = {
'model': Categorical([LinearSVC()]),
'model__C': Real(1e-6, 1e+6, prior='log-uniform'),
}
dtc_search = {
'model': Categorical([DecisionTreeClassifier()]),
'model__max_depth': Integer(1, 32),
'model__min_samples_split': Real(1e-3, 1.0, prior='log-uniform'),
}
svc_search = {
'model': Categorical([SVC()]),
'model__C': Real(1e-6, 1e+6, prior='log-uniform'),
'model__gamma': Real(1e-6, 1e+1, prior='log-uniform'),
'model__degree': Integer(1, 8),
'model__kernel': Categorical(['linear', 'poly', 'rbf']),
}
opt = BayesSearchCV(
pipe,
[(lin_search, 1), svc_search],
n_iter=2
)
opt_clone = clone(opt)
params, params_clone = opt.get_params(), opt_clone.get_params()
assert params.keys() == params_clone.keys()
for param, param_clone in zip(params.items(), params_clone.items()):
assert param[0] == param_clone[0]
assert isinstance(param[1], type(param_clone[1]))
opt.set_params(search_spaces=[(dtc_search, 1)])
opt.fit(X_train, y_train)
opt_clone.fit(X_train, y_train)
total_evaluations = len(opt.cv_results_['mean_test_score'])
total_evaluations_clone = len(opt_clone.cv_results_['mean_test_score'])
# test if expected number of subspaces is explored
assert total_evaluations == 1
assert total_evaluations_clone == 1+2
def test_searchcv_reproducibility():
"""
Test whether results of BayesSearchCV can be reproduced with a fixed
random state.
"""
X, y = load_iris(True)
X_train, X_test, y_train, y_test = train_test_split(
X, y, train_size=0.75, random_state=0
)
random_state = 42
opt = BayesSearchCV(
SVC(random_state=random_state),
{
'C': Real(1e-6, 1e+6, prior='log-uniform'),
'gamma': Real(1e-6, 1e+1, prior='log-uniform'),
'degree': Integer(1, 8),
'kernel': Categorical(['linear', 'poly', 'rbf']),
},
n_iter=11, random_state=random_state
)
opt.fit(X_train, y_train)
best_est = opt.best_estimator_
opt2 = clone(opt).fit(X_train, y_train)
best_est2 = opt2.best_estimator_
assert getattr(best_est, 'C') == getattr(best_est2, 'C')
assert getattr(best_est, 'gamma') == getattr(best_est2, 'gamma')
assert getattr(best_est, 'degree') == getattr(best_est2, 'degree')
assert getattr(best_est, 'kernel') == getattr(best_est2, 'kernel')
@pytest.mark.fast_test
def test_searchcv_rank():
"""
Test whether results of BayesSearchCV can be reproduced with a fixed
random state.
"""
X, y = load_iris(True)
X_train, X_test, y_train, y_test = train_test_split(
X, y, train_size=0.75, random_state=0
)
random_state = 42
opt = BayesSearchCV(
SVC(random_state=random_state),
{
'C': Real(1e-6, 1e+6, prior='log-uniform'),
'gamma': Real(1e-6, 1e+1, prior='log-uniform'),
'degree': Integer(1, 8),
'kernel': Categorical(['linear', 'poly', 'rbf']),
},
n_iter=11, random_state=random_state, return_train_score=True
)
opt.fit(X_train, y_train)
results = opt.cv_results_
test_rank = np.asarray(rankdata(-np.array(results["mean_test_score"]),
method='min'), dtype=np.int32)
train_rank = np.asarray(rankdata(-np.array(results["mean_train_score"]),
method='min'), dtype=np.int32)
assert_array_equal(np.array(results['rank_test_score']), test_rank)
assert_array_equal(np.array(results['rank_train_score']), train_rank)
def test_searchcv_refit():
"""
Test whether results of BayesSearchCV can be reproduced with a fixed
random state.
"""
X, y = load_iris(True)
X_train, X_test, y_train, y_test = train_test_split(
X, y, train_size=0.75, random_state=0
)
random_state = 42
opt = BayesSearchCV(
SVC(random_state=random_state),
{
'C': Real(1e-6, 1e+6, prior='log-uniform'),
'gamma': Real(1e-6, 1e+1, prior='log-uniform'),
'degree': Integer(1, 8),
'kernel': Categorical(['linear', 'poly', 'rbf']),
},
n_iter=11, random_state=random_state
)
opt2 = BayesSearchCV(
SVC(random_state=random_state),
{
'C': Real(1e-6, 1e+6, prior='log-uniform'),
'gamma': Real(1e-6, 1e+1, prior='log-uniform'),
'degree': Integer(1, 8),
'kernel': Categorical(['linear', 'poly', 'rbf']),
},
n_iter=11, random_state=random_state, refit=True
)
opt.fit(X_train, y_train)
opt2.best_estimator_ = opt.best_estimator_
opt2.fit(X_train, y_train)
# this normally does not hold only if something is wrong
# with the optimizaiton procedure as such
assert opt2.score(X_test, y_test) > 0.9
def test_searchcv_callback():
# Test whether callback is used in BayesSearchCV and
# whether is can be used to interrupt the search loop
X, y = load_iris(True)
opt = BayesSearchCV(
DecisionTreeClassifier(),
{
'max_depth': [3], # additional test for single dimension
'min_samples_split': Real(0.1, 0.9),
},
n_iter=5
)
total_iterations = [0]
def callback(opt_result):
# this simply counts iterations
total_iterations[0] += 1
# break the optimization loop at some point
if total_iterations[0] > 2:
return True # True == stop optimization
return False
opt.fit(X, y, callback=callback)
assert total_iterations[0] == 3
# test whether final model was fit
opt.score(X, y)
def test_searchcv_total_iterations():
# Test the total iterations counting property of BayesSearchCV
opt = BayesSearchCV(
DecisionTreeClassifier(),
[
({'max_depth': (1, 32)}, 10), # 10 iterations here
{'min_samples_split': Real(0.1, 0.9)} # 5 (default) iters here
],
n_iter=5
)
assert opt.total_iterations == 10 + 5
def test_search_cv_internal_parameter_types():
# Test whether the parameters passed to the
# estimator of the BayesSearchCV are of standard python
# types - float, int, str
# This is estimator is used to check whether the types provided
# are native python types.
class TypeCheckEstimator(BaseEstimator):
def __init__(self, float_param=0.0, int_param=0, str_param=""):
self.float_param = float_param
self.int_param = int_param
self.str_param = str_param
def fit(self, X, y):
assert isinstance(self.float_param, float)
assert isinstance(self.int_param, int)
assert isinstance(self.str_param, str)
return self
def score(self, X, y):
return 0.0
# Below is example code that used to not work.
X, y = make_classification(10, 4)
model = BayesSearchCV(
estimator=TypeCheckEstimator(),
search_spaces={
'float_param': [0.0, 1.0],
'int_param': [0, 10],
'str_param': ["one", "two", "three"],
},
n_iter=11
)
model.fit(X, y)
|
py
|
1a5e1dbbc7d7e9192abe3fa8e83d34d0f68c47cb
|
#!/bin/env python
#
# Copyright (c) 2012-2017 The Khronos Group Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
import sys
import traceback
import datetime
import time
import subprocess
import threading
import re
from pprint import pprint
class BackgroundProcess(threading.Thread):
def __init__(self, **args):
self.args = args
self.stdout = None
self.stderr = None
self.process = None
threading.Thread.__init__(self)
def run(self):
self.process = subprocess.Popen(
stderr=subprocess.PIPE,
**self.args)
self.stdout, self.stderr = self.process.communicate()
report_re = re.compile(
r'#REPORT:'
r' (?P<timestamp>.{14})'
r' (?P<testid>.*)'
r' (?P<total>\d+)'
r' (?P<disabled>\d+)'
r' (?P<started>\d+)'
r' (?P<completed>\d+)'
r' (?P<passed>\d+)'
r' (?P<failed>\d+)'
r' (?P<version>[^$]+)'
)
class TestRunner(object):
tests = []
timeout = float(os.environ.get('VX_TEST_TIMEOUT', '65')) # seconds
testid = '?'
total_tests = 0
total_disabled_tests = 0
total_started_tests = 0
total_completed_tests = 0
total_passed_tests = 0
total_failed_tests = 0
tests_version = 'unknown'
def get_test_list(self):
p = subprocess.Popen(
args=self.launch_args + ['--quiet', '--list_tests', '--run_disabled'],
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout, stderr = p.communicate()
if p.returncode != 0:
raise Exception("Can't get list of tests")
lines = stdout.replace('\r\n', '\n').split('\n');
for line in lines:
if line == "":
continue
self.tests.append(line)
m = re.search(report_re, stderr)
if m:
self.testid = m.group('testid')
def run_test(self, test):
self.total_started_tests += 1
bp = BackgroundProcess(
args=self.launch_args + ['--quiet', '--filter=%s' % test.replace(':', '*')]
)
bp.start()
bp.join(self.timeout)
if bp.is_alive():
print('#TIMEOUT on test "%s". TERMINATING' % test)
sys.stdout.flush()
try:
bp.process.terminate()
except:
pass
bp.join()
m = re.search(report_re, bp.stderr)
if m:
timestamp = m.group('timestamp')
testid = m.group('testid')
total = m.group('total')
disabled = m.group('disabled')
started = m.group('started')
completed = m.group('completed')
passed = m.group('passed')
failed = m.group('failed')
if m.group('version'):
self.tests_version = m.group('version')
if str(started) == '0' and str(disabled) == '0':
print("#CHECK FILTER: %s" % test)
self.total_failed_tests += 1
if str(disabled) != '0':
self.total_disabled_tests += 1
self.total_started_tests -= 1
if str(completed) != '0':
self.total_completed_tests += 1
if str(passed) != '0':
self.total_passed_tests += 1
if str(failed) != '0':
self.total_failed_tests += 1
else:
self.total_failed_tests += 1
sys.stdout.write(bp.stderr)
if bp.process.returncode != 0:
print('Process exit code: %d' % bp.process.returncode)
def printUsage(self):
print('''\
Usage:
run_tests.py <vx_test_conformance executable> <filter and other parameters>
Environment variables:
VX_TEST_DATA_PATH - path to test_data directory (used by vx_test_conformance)
VX_TEST_TIMEOUT - single test timeout (in seconds)
Example:
run_tests.py ./bin/vx_test_conformance
run_tests.py ./bin/vx_test_conformance --filter=*Canny*\
''')
def run(self):
try:
if len(sys.argv) < 2:
print("Missed executable path")
self.printUsage()
return 2
if sys.argv[1] in ['-h', '--help', '/?']:
self.printUsage()
return 0
self.launch_args = sys.argv[1:]
self.get_test_list()
self.launch_args = [a for a in self.launch_args if not a.startswith('--filter=')]
self.total_tests = len(self.tests)
print('#FOUND %d tests' % self.total_tests)
print('Test timeout=%s' % self.timeout)
print('')
sys.stdout.flush()
prev = 0
i = 0
for t in self.tests:
if (self.total_tests >= 500):
next = i * 100 / self.total_tests
if int(next) != prev:
print('# %02d%%' % next)
prev = next
i += 1
sys.stdout.flush()
sys.stderr.flush()
try:
self.run_test(t)
except KeyboardInterrupt:
break
except:
print traceback.format_exc()
print('')
print('ALL DONE')
timestamp = datetime.datetime.now().strftime('%Y%m%d%H%M%S')
print('')
print('#REPORT: %s %s %d %d %d %d %d %d %s' % (
timestamp, self.testid,
self.total_tests,
self.total_disabled_tests,
self.total_started_tests,
self.total_completed_tests,
self.total_passed_tests,
self.total_failed_tests,
self.tests_version))
return 0 if (self.total_tests == (self.total_started_tests + self.total_disabled_tests) and self.total_failed_tests == 0) else 1
except:
print traceback.format_exc()
if __name__ == "__main__":
sys.exit(TestRunner().run())
|
py
|
1a5e1eb2304558d5263cc8e5ba8e4c0839c7a892
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('tutor', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Album',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name', models.CharField(max_length=100)),
('release_date', models.DateField()),
('num_stars', models.IntegerField()),
],
options={
},
bases=(models.Model,),
),
migrations.CreateModel(
name='Musician',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('first_name', models.CharField(max_length=50)),
('last_name', models.CharField(max_length=50)),
('instrument', models.CharField(max_length=100)),
],
options={
},
bases=(models.Model,),
),
migrations.AddField(
model_name='album',
name='artist',
field=models.ForeignKey(to='tutor.Musician'),
preserve_default=True,
),
]
|
py
|
1a5e1f18ac965d658ddbe4ecbfda7341e3a1a30e
|
# -*- coding: utf-8 -*-
"""
Created on Tue Jun 20 22:14:07 2017
@author: YSu
"""
from pyomo.opt import SolverFactory
from CA_dispatch_equal_pathflows import model as m1
from CA_dispatchLP_equal_pathflows import model as m2
from pyomo.core import Var
from pyomo.core import Constraint
from pyomo.core import Param
from operator import itemgetter
import pandas as pd
import numpy as np
from datetime import datetime
import pyomo.environ as pyo
def sim(days):
instance = m1.create_instance('data.dat')
instance2 = m2.create_instance('data.dat')
instance2.dual = pyo.Suffix(direction=pyo.Suffix.IMPORT)
opt = SolverFactory("cplex")
H = instance.HorizonHours
D = int(H/24)
K=range(1,H+1)
#Space to store results
mwh_1=[]
mwh_2=[]
mwh_3=[]
on=[]
switch=[]
srsv=[]
nrsv=[]
solar=[]
wind=[]
flow=[]
Generator=[]
Duals=[]
System_cost = []
df_generators = pd.read_csv('generators.csv',header=0)
# forecast days
forecast_days = []
for f in range(1,D+1):
a = 'fd%d' % f
forecast_days.append(a)
pge_pool = 0
sce_pool = 0
#max here can be (1,358)
for day in range(1,days+1):
#load time series data
for z in instance.zones:
instance.GasPrice[z] = instance.SimGasPrice[z,day]
instance2.GasPrice[z] = instance.SimGasPrice[z,day]
for i in K:
instance.HorizonDemand[z,i] = instance.SimDemand[z,(day-1)*24+i]
instance.HorizonWind[z,i] = instance.SimWind[z,(day-1)*24+i]
instance.HorizonSolar[z,i] = instance.SimSolar[z,(day-1)*24+i]
instance.HorizonMustRun[z,i] = instance.SimMustRun[z,(day-1)*24+i]
instance2.HorizonDemand[z,i] = instance.SimDemand[z,(day-1)*24+i]
instance2.HorizonWind[z,i] = instance.SimWind[z,(day-1)*24+i]
instance2.HorizonSolar[z,i] = instance.SimSolar[z,(day-1)*24+i]
instance2.HorizonMustRun[z,i] = instance.SimMustRun[z,(day-1)*24+i]
for d in range(1,D+1):
instance.HorizonPath66_imports[d] = instance.SimPath66_imports[day-1+d]
instance.HorizonPath46_SCE_imports[d] = instance.SimPath46_SCE_imports[day-1+d]
instance.HorizonPath61_imports[d] = instance.SimPath61_imports[day-1+d]
instance.HorizonPath42_imports[d] = instance.SimPath42_imports[day-1+d]
instance.HorizonPath24_imports[d] = instance.SimPath24_imports[day-1+d]
instance.HorizonPath45_imports[d] = instance.SimPath45_imports[day-1+d]
instance2.HorizonPath66_imports[d] = instance2.SimPath66_imports[day-1+d]
instance2.HorizonPath46_SCE_imports[d] = instance2.SimPath46_SCE_imports[day-1+d]
instance2.HorizonPath61_imports[d] = instance2.SimPath61_imports[day-1+d]
instance2.HorizonPath42_imports[d] = instance2.SimPath42_imports[day-1+d]
instance2.HorizonPath24_imports[d] = instance2.SimPath24_imports[day-1+d]
instance2.HorizonPath45_imports[d] = instance2.SimPath45_imports[day-1+d]
pge = 0
sce = 0
for d in range(1,D+1):
fd = forecast_days[d-1]
sce = sce + instance.SimSCE_hydro[fd,day]
pge = pge + instance.SimPGE_valley_hydro[fd,day]
if day < 2:
instance.HorizonPGE_valley_hydro = pge
instance.HorizonSCE_hydro = sce
instance2.HorizonPGE_valley_hydro = pge
instance2.HorizonSCE_hydro = sce
else:
# amount of generation shifted to 1st day in previous model solution
pge_shift = np.sum(pgeH_first) - instance.SimPGE_valley_hydro['fd1',day-1]
sce_shift = np.sum(sceH_first) - instance.SimSCE_hydro['fd1',day-1]
# update "pool" storage
pge_pool = pge_pool - pge_shift
sce_pool = sce_pool - sce_shift
# correct for this difference in new amount to be scheduled, using "pooled" deficit/storage if there is any
instance.HorizonPGE_valley_hydro = np.max((0,pge + pge_pool))
instance.HorizonSCE_hydro = np.max((0,sce + sce_pool))
instance2.HorizonPGE_valley_hydro = np.max((0,pge + pge_pool))
instance2.HorizonSCE_hydro = np.max((0,sce + sce_pool))
# pge_deficit = np.max((0,pge_deficit - pge - np.sum(pgeH_first) + instance.SimPGE_valley_hydro['fd1',day-1]))
# sce_deficit = np.max((0,sce_deficit - sce - np.sum(sceH_first) + instance.SimSCE_hydro['fd1',day-1]))
for i in K:
instance.HorizonReserves[i] = instance.SimReserves[(day-1)*24+i]
instance2.HorizonReserves[i] = instance.SimReserves[(day-1)*24+i]
instance.HorizonPath42_exports[i] = instance.SimPath42_exports[(day-1)*24+i]
instance.HorizonPath24_exports[i] = instance.SimPath24_exports[(day-1)*24+i]
instance.HorizonPath45_exports[i] = instance.SimPath45_exports[(day-1)*24+i]
instance.HorizonPath66_exports[i] = instance.SimPath66_exports[(day-1)*24+i]
instance.HorizonPath46_SCE_minflow[i] = instance.SimPath46_SCE_imports_minflow[(day-1)*24+i]
instance.HorizonPath66_minflow[i] = instance.SimPath66_imports_minflow[(day-1)*24+i]
instance.HorizonPath42_minflow[i] = instance.SimPath42_imports_minflow[(day-1)*24+i]
instance.HorizonPath61_minflow[i] = instance.SimPath61_imports_minflow[(day-1)*24+i]
instance2.HorizonPath42_exports[i] = instance2.SimPath42_exports[(day-1)*24+i]
instance2.HorizonPath24_exports[i] = instance2.SimPath24_exports[(day-1)*24+i]
instance2.HorizonPath45_exports[i] = instance2.SimPath45_exports[(day-1)*24+i]
instance2.HorizonPath66_exports[i] = instance2.SimPath66_exports[(day-1)*24+i]
instance2.HorizonPath46_SCE_minflow[i] = instance2.SimPath46_SCE_imports_minflow[(day-1)*24+i]
instance2.HorizonPath66_minflow[i] = instance2.SimPath66_imports_minflow[(day-1)*24+i]
instance2.HorizonPath42_minflow[i] = instance2.SimPath42_imports_minflow[(day-1)*24+i]
instance2.HorizonPath61_minflow[i] = instance2.SimPath61_imports_minflow[(day-1)*24+i]
for d in range(1,D+1):
fd = forecast_days[d-1]
for j in range(1,25):
instance.HorizonPGE_valley_hydro_minflow[(d-1)*24+j] = instance.SimPGE_valley_hydro_minflow[fd,(day-1)*24+j]
instance.HorizonSCE_hydro_minflow[(d-1)*24+j] = instance.SimSCE_hydro_minflow[fd,(day-1)*24+j]
instance2.HorizonPGE_valley_hydro_minflow[(d-1)*24+j] = instance.SimPGE_valley_hydro_minflow[fd,(day-1)*24+j]
instance2.HorizonSCE_hydro_minflow[(d-1)*24+j] = instance.SimSCE_hydro_minflow[fd,(day-1)*24+j]
CAISO_result = opt.solve(instance,tee=True,symbolic_solver_labels=True)
instance.solutions.load_from(CAISO_result)
coal = 0
gas11 = 0
gas21 = 0
gas31 = 0
gas12 = 0
gas22 = 0
gas32 = 0
gas13 = 0
gas23 = 0
gas33 = 0
gas14 = 0
gas24 = 0
gas34 = 0
oil = 0
psh = 0
slack = 0
f_gas1 = 0
f_gas2 = 0
f_gas3 = 0
f_oil = 0
f_coal = 0
st = 0
sdgei = 0
scei = 0
pgei = 0
f = 0
for i in range(1,25):
for j in instance.Coal:
coal = coal + instance.mwh_1[j,i].value*(instance.seg1[j]*2 + instance.var_om[j]) + instance.mwh_2[j,i].value*(instance.seg2[j]*2 + instance.var_om[j]) + instance.mwh_3[j,i].value*(instance.seg3[j]*2 + instance.var_om[j])
for j in instance.Zone1Gas:
gas11 = gas11 + instance.mwh_1[j,i].value*(instance.seg1[j]*instance.GasPrice['PGE_valley'].value + instance.var_om[j])
gas21 = gas21 + instance.mwh_2[j,i].value*(instance.seg2[j]*instance.GasPrice['PGE_valley'].value + instance.var_om[j])
gas31 = gas31 + instance.mwh_3[j,i].value*(instance.seg3[j]*instance.GasPrice['PGE_valley'].value + instance.var_om[j])
for j in instance.Zone2Gas:
gas12 = gas12 + instance.mwh_1[j,i].value*(instance.seg1[j]*instance.GasPrice['PGE_bay'].value + instance.var_om[j])
gas22 = gas22 + instance.mwh_2[j,i].value*(instance.seg2[j]*instance.GasPrice['PGE_bay'].value + instance.var_om[j])
gas32 = gas32 + instance.mwh_3[j,i].value*(instance.seg3[j]*instance.GasPrice['PGE_bay'].value + instance.var_om[j])
for j in instance.Zone3Gas:
gas13 = gas13 + instance.mwh_1[j,i].value*(instance.seg1[j]*instance.GasPrice['SCE'].value + instance.var_om[j])
gas23 = gas23 + instance.mwh_2[j,i].value*(instance.seg2[j]*instance.GasPrice['SCE'].value + instance.var_om[j])
gas33 = gas33 + instance.mwh_3[j,i].value*(instance.seg3[j]*instance.GasPrice['SCE'].value + instance.var_om[j])
for j in instance.Zone4Gas:
gas14 = gas14 + instance.mwh_1[j,i].value*(instance.seg1[j]*instance.GasPrice['SDGE'].value + instance.var_om[j])
gas24 = gas24 + instance.mwh_2[j,i].value*(instance.seg2[j]*instance.GasPrice['SDGE'].value + instance.var_om[j])
gas34 = gas34 + instance.mwh_3[j,i].value*(instance.seg3[j]*instance.GasPrice['SDGE'].value + instance.var_om[j])
for j in instance.Oil:
oil = oil + instance.mwh_1[j,i].value*(instance.seg1[j]*20 + instance.var_om[j]) + instance.mwh_2[j,i].value*(instance.seg2[j]*20 + instance.var_om[j]) + instance.mwh_3[j,i].value*(instance.seg3[j]*20 + instance.var_om[j])
for j in instance.PSH:
psh = psh + instance.mwh_1[j,i].value*(instance.seg1[j]*10 + instance.var_om[j]) + instance.mwh_2[j,i].value*(instance.seg2[j]*10 + instance.var_om[j]) + instance.mwh_3[j,i].value*(instance.seg3[j]*10 + instance.var_om[j])
for j in instance.Slack:
slack = slack + instance.mwh_1[j,i].value*(instance.seg1[j]*2000 + instance.var_om[j]) + instance.mwh_2[j,i].value*(instance.seg2[j]*2000 + instance.var_om[j]) + instance.mwh_3[j,i].value*(instance.seg3[j]*2000 + instance.var_om[j])
for j in instance.Zone1Gas:
f_gas1 = f_gas1 + instance.no_load[j]*instance.on[j,i].value*2
for j in instance.Zone2Gas:
f_gas2 = f_gas2 + instance.no_load[j]*instance.on[j,i].value*2
for j in instance.Zone3Gas:
f_gas3 = f_gas3 + instance.no_load[j]*instance.on[j,i].value*2
for j in instance.Coal:
f_coal = f_coal + instance.no_load[j]*instance.on[j,i].value*2
for j in instance.Oil:
f_oil = f_oil + instance.no_load[j]*instance.on[j,i].value*2
for j in instance.Generators:
st = st + instance.st_cost[j]*instance.switch[j,i].value
for j in instance.WECCImportsSDGE:
sdgei =sdgei + instance.mwh_1[j,i].value*(14.5 + 2.76*instance.GasPrice['SDGE'].value) + instance.mwh_2[j,i].value*(14.5 + 2.76*instance.GasPrice['SDGE'].value) + instance.mwh_3[j,i].value*(14.5 + 2.76*instance.GasPrice['SDGE'].value)
for j in instance.WECCImportsSCE:
scei =scei + instance.mwh_1[j,i].value*(14.5 + 2.76*instance.GasPrice['SCE'].value) + instance.mwh_2[j,i].value*(14.5 + 2.76*instance.GasPrice['SCE'].value) + instance.mwh_3[j,i].value*(14.5 + 2.76*instance.GasPrice['SCE'].value)
for j in instance.WECCImportsPGEV:
pgei =pgei + instance.mwh_1[j,i].value*5 + instance.mwh_2[j,i].value*5 + instance.mwh_3[j,i].value*5
for s in instance.sources:
for k in instance.sinks:
f = f + instance.flow[s,k,i].value*instance.hurdle[s,k]
S = f + oil + coal + slack + psh + st + sdgei + scei + pgei + f_gas1 + f_gas2 + f_gas3 + f_oil + gas11 + gas21 + gas31 + gas12 + gas22 + gas32 + gas13 + gas23 + gas33 + gas14 + gas24 + gas34
System_cost.append(S)
for j in instance.Generators:
for t in K:
if instance.on[j,t] == 1:
instance2.on[j,t] = 1
instance2.on[j,t].fixed = True
else:
instance.on[j,t] = 0
instance2.on[j,t] = 0
instance2.on[j,t].fixed = True
if instance.switch[j,t] == 1:
instance2.switch[j,t] = 1
instance2.switch[j,t].fixed = True
else:
instance2.switch[j,t] = 0
instance2.switch[j,t] = 0
instance2.switch[j,t].fixed = True
results = opt.solve(instance2,tee=True,symbolic_solver_labels=True)
instance2.solutions.load_from(results)
pgeH_first = []
sceH_first = []
p66_first = []
p46_first = []
p61_first = []
p42_first = []
p24_first = []
p45_first = []
print ("Duals")
for c in instance2.component_objects(Constraint, active=True):
# print (" Constraint",c)
cobject = getattr(instance2, str(c))
if str(c) in ['Bal1Constraint','Bal2Constraint','Bal3Constraint','Bal4Constraint']:
for index in cobject:
if int(index>0 and index<25):
# print (" Constraint",c)
Duals.append((str(c),index+((day-1)*24), instance2.dual[cobject[index]]))
# print (" ", index, instance2.dual[cobject[index]])
#The following section is for storing and sorting results
for v in instance.component_objects(Var, active=True):
varobject = getattr(instance, str(v))
a=str(v)
if a=='mwh_1':
for index in varobject:
name = index[0]
g = df_generators[df_generators['name']==name]
seg1 = g['seg1'].values
seg1 = seg1[0]
if int(index[1]>0 and index[1]<25):
if index[0] in instance.Zone1Generators:
gas_price = instance.GasPrice['PGE_valley'].value
if index[0] in instance.Gas:
marginal_cost = seg1*gas_price
mwh_1.append((index[0],index[1]+((day-1)*24),varobject[index].value,'PGE_valley','Gas',marginal_cost))
elif index[0] in instance.Coal:
marginal_cost = seg1*2
mwh_1.append((index[0],index[1]+((day-1)*24),varobject[index].value,'PGE_valley','Coal',marginal_cost))
elif index[0] in instance.Oil:
marginal_cost = seg1*20
mwh_1.append((index[0],index[1]+((day-1)*24),varobject[index].value,'PGE_valley','Oil',marginal_cost))
elif index[0] in instance.PSH:
marginal_cost = 10
mwh_1.append((index[0],index[1]+((day-1)*24),varobject[index].value,'PGE_valley','PSH',marginal_cost))
elif index[0] in instance.Slack:
marginal_cost = 700
mwh_1.append((index[0],index[1]+((day-1)*24),varobject[index].value,'PGE_valley','Slack',marginal_cost))
elif index[0] in instance.Hydro:
marginal_cost = 0
mwh_1.append((index[0],index[1]+((day-1)*24),varobject[index].value,'PGE_valley','Hydro',marginal_cost))
pgeH_first.append(varobject[index].value)
elif index[0] in instance.Zone2Generators:
gas_price = instance.GasPrice['PGE_bay'].value
if index[0] in instance.Gas:
marginal_cost = seg1*gas_price
mwh_1.append((index[0],index[1]+((day-1)*24),varobject[index].value,'PGE_bay','Gas',marginal_cost))
elif index[0] in instance.Coal:
marginal_cost = seg1*2
mwh_1.append((index[0],index[1]+((day-1)*24),varobject[index].value,'PGE_bay','Coal',marginal_cost))
elif index[0] in instance.Oil:
marginal_cost = seg1*20
mwh_1.append((index[0],index[1]+((day-1)*24),varobject[index].value,'PGE_bay','Oil',marginal_cost))
elif index[0] in instance.PSH:
marginal_cost = 10
mwh_1.append((index[0],index[1]+((day-1)*24),varobject[index].value,'PGE_bay','PSH',marginal_cost))
elif index[0] in instance.Slack:
marginal_cost = 700
mwh_1.append((index[0],index[1]+((day-1)*24),varobject[index].value,'PGE_bay','Slack',marginal_cost))
elif index[0] in instance.Zone3Generators:
gas_price = instance.GasPrice['SCE'].value
if index[0] in instance.Gas:
marginal_cost = seg1*gas_price
mwh_1.append((index[0],index[1]+((day-1)*24),varobject[index].value,'SCE','Gas',marginal_cost))
elif index[0] in instance.Coal:
marginal_cost = seg1*2
mwh_1.append((index[0],index[1]+((day-1)*24),varobject[index].value,'SCE','Coal',marginal_cost))
elif index[0] in instance.Oil:
marginal_cost = seg1*20
mwh_1.append((index[0],index[1]+((day-1)*24),varobject[index].value,'SCE','Oil',marginal_cost))
elif index[0] in instance.PSH:
marginal_cost = 10
mwh_1.append((index[0],index[1]+((day-1)*24),varobject[index].value,'SCE','PSH',marginal_cost))
elif index[0] in instance.Slack:
marginal_cost = 700
mwh_1.append((index[0],index[1]+((day-1)*24),varobject[index].value,'SCE','Slack',marginal_cost))
elif index[0] in instance.Hydro:
marginal_cost = 0
mwh_1.append((index[0],index[1]+((day-1)*24),varobject[index].value,'SCE','Hydro',marginal_cost))
sceH_first.append(varobject[index].value)
elif index[0] in instance.Zone4Generators:
gas_price = instance.GasPrice['SDGE'].value
if index[0] in instance.Gas:
marginal_cost = seg1*gas_price
mwh_1.append((index[0],index[1]+((day-1)*24),varobject[index].value,'SDGE','Gas',marginal_cost))
elif index[0] in instance.Coal:
marginal_cost = seg1*2
mwh_1.append((index[0],index[1]+((day-1)*24),varobject[index].value,'SDGE','Coal',marginal_cost))
elif index[0] in instance.Oil:
marginal_cost = seg1*20
mwh_1.append((index[0],index[1]+((day-1)*24),varobject[index].value,'SDGE','Oil',marginal_cost))
elif index[0] in instance.PSH:
marginal_cost = 10
mwh_1.append((index[0],index[1]+((day-1)*24),varobject[index].value,'SDGE','PSH',marginal_cost))
elif index[0] in instance.Slack:
marginal_cost = 700
mwh_1.append((index[0],index[1]+((day-1)*24),varobject[index].value,'SDGE','Slack',marginal_cost))
elif index[0] in instance.WECCImportsSDGE:
gas_price = instance.GasPrice['SDGE'].value
marginal_cost = 14.5+2.76*gas_price
mwh_1.append((index[0],index[1]+((day-1)*24),varobject[index].value,'SDGE','imports',marginal_cost))
if index[0] == 'P45I':
p45_first.append(varobject[index].value)
elif index[0] in instance.WECCImportsSCE:
gas_price = instance.GasPrice['SCE'].value
marginal_cost = 14.5+2.76*gas_price
mwh_1.append((index[0],index[1]+((day-1)*24),varobject[index].value,'SCE','imports',marginal_cost))
if index[0] == 'P46I_SCE':
p46_first.append(varobject[index].value)
elif index[0] == 'P42I':
p42_first.append(varobject[index].value)
elif index[0] == 'P61I':
p61_first.append(varobject[index].value)
elif index[0] in instance.WECCImportsPGEV:
marginal_cost = 5
mwh_1.append((index[0],index[1]+((day-1)*24),varobject[index].value,'PGE_valley','imports',marginal_cost))
if index[0] == 'P66I':
p66_first.append(varobject[index].value)
elif index[0] == 'P24I':
p24_first.append(varobject[index].value)
if a=='mwh_2':
for index in varobject:
name = index[0]
g = df_generators[df_generators['name']==name]
seg2 = g['seg2'].values
seg2 = seg2[0]
if int(index[1]>0 and index[1]<25):
if index[0] in instance.Zone1Generators:
gas_price = instance.GasPrice['PGE_valley'].value
if index[0] in instance.Gas:
marginal_cost = seg2*gas_price
mwh_2.append((index[0],index[1]+((day-1)*24),varobject[index].value,'PGE_valley','Gas',marginal_cost))
elif index[0] in instance.Coal:
marginal_cost = seg2*2
mwh_2.append((index[0],index[1]+((day-1)*24),varobject[index].value,'PGE_valley','Coal',marginal_cost))
elif index[0] in instance.Oil:
marginal_cost = seg2*20
mwh_2.append((index[0],index[1]+((day-1)*24),varobject[index].value,'PGE_valley','Oil',marginal_cost))
elif index[0] in instance.PSH:
marginal_cost = 10
mwh_2.append((index[0],index[1]+((day-1)*24),varobject[index].value,'PGE_valley','PSH',marginal_cost))
elif index[0] in instance.Slack:
marginal_cost = 700
mwh_2.append((index[0],index[1]+((day-1)*24),varobject[index].value,'PGE_valley','Slack',marginal_cost))
elif index[0] in instance.Hydro:
marginal_cost = 0
mwh_2.append((index[0],index[1]+((day-1)*24),varobject[index].value,'PGE_valley','Hydro',marginal_cost))
pgeH_first.append(varobject[index].value)
elif index[0] in instance.Zone2Generators:
gas_price = instance.GasPrice['PGE_bay'].value
if index[0] in instance.Gas:
marginal_cost = seg2*gas_price
mwh_2.append((index[0],index[1]+((day-1)*24),varobject[index].value,'PGE_bay','Gas',marginal_cost))
elif index[0] in instance.Coal:
marginal_cost = seg2*2
mwh_2.append((index[0],index[1]+((day-1)*24),varobject[index].value,'PGE_bay','Coal',marginal_cost))
elif index[0] in instance.Oil:
marginal_cost = seg2*20
mwh_2.append((index[0],index[1]+((day-1)*24),varobject[index].value,'PGE_bay','Oil',marginal_cost))
elif index[0] in instance.PSH:
marginal_cost = 10
mwh_2.append((index[0],index[1]+((day-1)*24),varobject[index].value,'PGE_bay','PSH',marginal_cost))
elif index[0] in instance.Slack:
marginal_cost = 700
mwh_2.append((index[0],index[1]+((day-1)*24),varobject[index].value,'PGE_bay','Slack',marginal_cost))
elif index[0] in instance.Zone3Generators:
gas_price = instance.GasPrice['SCE'].value
if index[0] in instance.Gas:
marginal_cost = seg2*gas_price
mwh_2.append((index[0],index[1]+((day-1)*24),varobject[index].value,'SCE','Gas',marginal_cost))
elif index[0] in instance.Coal:
marginal_cost = seg2*2
mwh_2.append((index[0],index[1]+((day-1)*24),varobject[index].value,'SCE','Coal',marginal_cost))
elif index[0] in instance.Oil:
marginal_cost = seg2*20
mwh_2.append((index[0],index[1]+((day-1)*24),varobject[index].value,'SCE','Oil',marginal_cost))
elif index[0] in instance.PSH:
marginal_cost = 10
mwh_2.append((index[0],index[1]+((day-1)*24),varobject[index].value,'SCE','PSH',marginal_cost))
elif index[0] in instance.Slack:
marginal_cost = 700
mwh_2.append((index[0],index[1]+((day-1)*24),varobject[index].value,'SCE','Slack',marginal_cost))
elif index[0] in instance.Hydro:
marginal_cost = 0
mwh_2.append((index[0],index[1]+((day-1)*24),varobject[index].value,'SCE','Hydro',marginal_cost))
sceH_first.append(varobject[index].value)
elif index[0] in instance.Zone4Generators:
gas_price = instance.GasPrice['SDGE'].value
if index[0] in instance.Gas:
marginal_cost = seg2*gas_price
mwh_2.append((index[0],index[1]+((day-1)*24),varobject[index].value,'SDGE','Gas',marginal_cost))
elif index[0] in instance.Coal:
marginal_cost = seg2*2
mwh_2.append((index[0],index[1]+((day-1)*24),varobject[index].value,'SDGE','Coal',marginal_cost))
elif index[0] in instance.Oil:
marginal_cost = seg2*20
mwh_2.append((index[0],index[1]+((day-1)*24),varobject[index].value,'SDGE','Oil',marginal_cost))
elif index[0] in instance.PSH:
marginal_cost = 10
mwh_2.append((index[0],index[1]+((day-1)*24),varobject[index].value,'SDGE','PSH',marginal_cost))
elif index[0] in instance.Slack:
marginal_cost = 700
mwh_2.append((index[0],index[1]+((day-1)*24),varobject[index].value,'SDGE','Slack',marginal_cost))
elif index[0] in instance.WECCImportsSDGE:
gas_price = instance.GasPrice['SDGE'].value
marginal_cost = 14.5+2.76*gas_price
mwh_2.append((index[0],index[1]+((day-1)*24),varobject[index].value,'SDGE','imports',marginal_cost))
if index[0] == 'P45I':
p45_first.append(varobject[index].value)
elif index[0] in instance.WECCImportsSCE:
gas_price = instance.GasPrice['SCE'].value
marginal_cost = 14.5+2.76*gas_price
mwh_2.append((index[0],index[1]+((day-1)*24),varobject[index].value,'SCE','imports',marginal_cost))
if index[0] == 'P46I_SCE':
p46_first.append(varobject[index].value)
elif index[0] == 'P42I':
p42_first.append(varobject[index].value)
elif index[0] == 'P61I':
p61_first.append(varobject[index].value)
elif index[0] in instance.WECCImportsPGEV:
marginal_cost = 5
mwh_2.append((index[0],index[1]+((day-1)*24),varobject[index].value,'PGE_valley','imports',marginal_cost))
if index[0] == 'P66I':
p66_first.append(varobject[index].value)
elif index[0] == 'P24I':
p24_first.append(varobject[index].value)
if a=='mwh_3':
for index in varobject:
name = index[0]
g = df_generators[df_generators['name']==name]
seg3 = g['seg3'].values
seg3 = seg3[0]
if int(index[1]>0 and index[1]<25):
if index[0] in instance.Zone1Generators:
gas_price = instance.GasPrice['PGE_valley'].value
if index[0] in instance.Gas:
marginal_cost = seg3*gas_price
mwh_3.append((index[0],index[1]+((day-1)*24),varobject[index].value,'PGE_valley','Gas',marginal_cost))
elif index[0] in instance.Coal:
marginal_cost = seg3*2
mwh_3.append((index[0],index[1]+((day-1)*24),varobject[index].value,'PGE_valley','Coal',marginal_cost))
elif index[0] in instance.Oil:
marginal_cost = seg3*20
mwh_3.append((index[0],index[1]+((day-1)*24),varobject[index].value,'PGE_valley','Oil',marginal_cost))
elif index[0] in instance.PSH:
marginal_cost = 10
mwh_3.append((index[0],index[1]+((day-1)*24),varobject[index].value,'PGE_valley','PSH',marginal_cost))
elif index[0] in instance.Slack:
marginal_cost = 700
mwh_3.append((index[0],index[1]+((day-1)*24),varobject[index].value,'PGE_valley','Slack',marginal_cost))
elif index[0] in instance.Hydro:
marginal_cost = 0
mwh_3.append((index[0],index[1]+((day-1)*24),varobject[index].value,'PGE_valley','Hydro',marginal_cost))
pgeH_first.append(varobject[index].value)
elif index[0] in instance.Zone2Generators:
gas_price = instance.GasPrice['PGE_bay'].value
if index[0] in instance.Gas:
marginal_cost = seg3*gas_price
mwh_3.append((index[0],index[1]+((day-1)*24),varobject[index].value,'PGE_bay','Gas',marginal_cost))
elif index[0] in instance.Coal:
marginal_cost = seg3*2
mwh_3.append((index[0],index[1]+((day-1)*24),varobject[index].value,'PGE_bay','Coal',marginal_cost))
elif index[0] in instance.Oil:
marginal_cost = seg3*20
mwh_3.append((index[0],index[1]+((day-1)*24),varobject[index].value,'PGE_bay','Oil',marginal_cost))
elif index[0] in instance.PSH:
marginal_cost = 10
mwh_3.append((index[0],index[1]+((day-1)*24),varobject[index].value,'PGE_bay','PSH',marginal_cost))
elif index[0] in instance.Slack:
marginal_cost = 700
mwh_3.append((index[0],index[1]+((day-1)*24),varobject[index].value,'PGE_bay','Slack',marginal_cost))
elif index[0] in instance.Zone3Generators:
gas_price = instance.GasPrice['SCE'].value
if index[0] in instance.Gas:
marginal_cost = seg3*gas_price
mwh_3.append((index[0],index[1]+((day-1)*24),varobject[index].value,'SCE','Gas',marginal_cost))
elif index[0] in instance.Coal:
marginal_cost = seg3*2
mwh_3.append((index[0],index[1]+((day-1)*24),varobject[index].value,'SCE','Coal',marginal_cost))
elif index[0] in instance.Oil:
marginal_cost = seg3*20
mwh_3.append((index[0],index[1]+((day-1)*24),varobject[index].value,'SCE','Oil',marginal_cost))
elif index[0] in instance.PSH:
marginal_cost = 10
mwh_3.append((index[0],index[1]+((day-1)*24),varobject[index].value,'SCE','PSH',marginal_cost))
elif index[0] in instance.Slack:
marginal_cost = 700
mwh_3.append((index[0],index[1]+((day-1)*24),varobject[index].value,'SCE','Slack',marginal_cost))
elif index[0] in instance.Hydro:
marginal_cost = 0
mwh_3.append((index[0],index[1]+((day-1)*24),varobject[index].value,'SCE','Hydro',marginal_cost))
sceH_first.append(varobject[index].value)
elif index[0] in instance.Zone4Generators:
gas_price = instance.GasPrice['SDGE'].value
if index[0] in instance.Gas:
marginal_cost = seg3*gas_price
mwh_3.append((index[0],index[1]+((day-1)*24),varobject[index].value,'SDGE','Gas',marginal_cost))
elif index[0] in instance.Coal:
marginal_cost = seg3*2
mwh_3.append((index[0],index[1]+((day-1)*24),varobject[index].value,'SDGE','Coal',marginal_cost))
elif index[0] in instance.Oil:
marginal_cost = seg3*20
mwh_3.append((index[0],index[1]+((day-1)*24),varobject[index].value,'SDGE','Oil',marginal_cost))
elif index[0] in instance.PSH:
marginal_cost = 10
mwh_3.append((index[0],index[1]+((day-1)*24),varobject[index].value,'SDGE','PSH',marginal_cost))
elif index[0] in instance.Slack:
marginal_cost = 700
mwh_3.append((index[0],index[1]+((day-1)*24),varobject[index].value,'SDGE','Slack',marginal_cost))
elif index[0] in instance.WECCImportsSDGE:
gas_price = instance.GasPrice['SDGE'].value
marginal_cost = 14.5+2.76*gas_price
mwh_3.append((index[0],index[1]+((day-1)*24),varobject[index].value,'SDGE','imports',marginal_cost))
if index[0] == 'P45I':
p45_first.append(varobject[index].value)
elif index[0] in instance.WECCImportsSCE:
gas_price = instance.GasPrice['SCE'].value
marginal_cost = 14.5+2.76*gas_price
mwh_3.append((index[0],index[1]+((day-1)*24),varobject[index].value,'SCE','imports',marginal_cost))
if index[0] == 'P46I_SCE':
p46_first.append(varobject[index].value)
elif index[0] == 'P42I':
p42_first.append(varobject[index].value)
elif index[0] == 'P61I':
p61_first.append(varobject[index].value)
elif index[0] in instance.WECCImportsPGEV:
marginal_cost = 5
mwh_3.append((index[0],index[1]+((day-1)*24),varobject[index].value,'PGE_valley','imports',marginal_cost))
if index[0] == 'P66I':
p66_first.append(varobject[index].value)
elif index[0] == 'P24I':
p24_first.append(varobject[index].value)
if a=='on':
for index in varobject:
if int(index[1]>0 and index[1]<25):
if index[0] in instance.Zone1Generators:
on.append((index[0],index[1]+((day-1)*24),varobject[index].value,'PGE_valley'))
elif index[0] in instance.Zone2Generators:
on.append((index[0],index[1]+((day-1)*24),varobject[index].value,'PGE_bay'))
elif index[0] in instance.Zone3Generators:
on.append((index[0],index[1]+((day-1)*24),varobject[index].value,'SCE'))
elif index[0] in instance.Zone4Generators:
on.append((index[0],index[1]+((day-1)*24),varobject[index].value,'SDGE'))
if a=='switch':
for index in varobject:
if int(index[1]>0 and index[1]<25):
if index[0] in instance.Zone1Generators:
switch.append((index[0],index[1]+((day-1)*24),varobject[index].value,'PGE_valley'))
elif index[0] in instance.Zone2Generators:
switch.append((index[0],index[1]+((day-1)*24),varobject[index].value,'PGE_bay'))
elif index[0] in instance.Zone3Generators:
switch.append((index[0],index[1]+((day-1)*24),varobject[index].value,'SCE'))
elif index[0] in instance.Zone4Generators:
switch.append((index[0],index[1]+((day-1)*24),varobject[index].value,'SDGE'))
if a=='srsv':
for index in varobject:
if int(index[1]>0 and index[1]<25):
if index[0] in instance.Zone1Generators:
srsv.append((index[0],index[1]+((day-1)*24),varobject[index].value,'PGE_valley'))
elif index[0] in instance.Zone2Generators:
srsv.append((index[0],index[1]+((day-1)*24),varobject[index].value,'PGE_bay'))
elif index[0] in instance.Zone3Generators:
srsv.append((index[0],index[1]+((day-1)*24),varobject[index].value,'SCE'))
elif index[0] in instance.Zone4Generators:
srsv.append((index[0],index[1]+((day-1)*24),varobject[index].value,'SDGE'))
if a=='nrsv':
for index in varobject:
if int(index[1]>0 and index[1]<25):
if index[0] in instance.Zone1Generators:
nrsv.append((index[0],index[1]+((day-1)*24),varobject[index].value,'PGE_valley'))
elif index[0] in instance.Zone2Generators:
nrsv.append((index[0],index[1]+((day-1)*24),varobject[index].value,'PGE_bay'))
elif index[0] in instance.Zone3Generators:
nrsv.append((index[0],index[1]+((day-1)*24),varobject[index].value,'SCE'))
elif index[0] in instance.Zone4Generators:
nrsv.append((index[0],index[1]+((day-1)*24),varobject[index].value,'SDGE'))
if a=='solar':
for index in varobject:
if int(index[1]>0 and index[1]<25):
solar.append((index[0],index[1]+((day-1)*24),varobject[index].value))
if a=='wind':
for index in varobject:
if int(index[1]>0 and index[1]<25):
wind.append((index[0],index[1]+((day-1)*24),varobject[index].value))
if a=='flow':
for index in varobject:
if int(index[2]>0 and index[2]<25):
flow.append((index[0],index[1],index[2]+((day-1)*24),varobject[index].value))
#
#
for j in instance.Generators:
if instance.on[j,24] == 1:
instance.on[j,0] = 1
else:
instance.on[j,0] = 0
instance.on[j,0].fixed = True
if instance.mwh_1[j,24].value <=0 and instance.mwh_1[j,24].value>= -0.0001:
newval_1=0
else:
newval_1=instance.mwh_1[j,24].value
instance.mwh_1[j,0] = newval_1
instance.mwh_1[j,0].fixed = True
if instance.mwh_2[j,24].value <=0 and instance.mwh_2[j,24].value>= -0.0001:
newval=0
else:
newval=instance.mwh_2[j,24].value
if instance.mwh_3[j,24].value <=0 and instance.mwh_3[j,24].value>= -0.0001:
newval2=0
else:
newval2=instance.mwh_3[j,24].value
instance.mwh_2[j,0] = newval
instance.mwh_2[j,0].fixed = True
instance.mwh_3[j,0] = newval2
instance.mwh_3[j,0].fixed = True
if instance.switch[j,24] == 1:
instance.switch[j,0] = 1
else:
instance.switch[j,0] = 0
instance.switch[j,0].fixed = True
if instance.srsv[j,24].value <=0 and instance.srsv[j,24].value>= -0.0001:
newval_srsv=0
else:
newval_srsv=instance.srsv[j,24].value
instance.srsv[j,0] = newval_srsv
instance.srsv[j,0].fixed = True
if instance.nrsv[j,24].value <=0 and instance.nrsv[j,24].value>= -0.0001:
newval_nrsv=0
else:
newval_nrsv=instance.nrsv[j,24].value
instance.nrsv[j,0] = newval_nrsv
instance.nrsv[j,0].fixed = True
#
print(day)
#
mwh_1_pd=pd.DataFrame(mwh_1,columns=('Generator','Time','Value','Zones','Type','$/MWh'))
mwh_2_pd=pd.DataFrame(mwh_2,columns=('Generator','Time','Value','Zones','Type','$/MWh'))
mwh_3_pd=pd.DataFrame(mwh_3,columns=('Generator','Time','Value','Zones','Type','$/MWh'))
on_pd=pd.DataFrame(on,columns=('Generator','Time','Value','Zones'))
switch_pd=pd.DataFrame(switch,columns=('Generator','Time','Value','Zones'))
srsv_pd=pd.DataFrame(srsv,columns=('Generator','Time','Value','Zones'))
nrsv_pd=pd.DataFrame(nrsv,columns=('Generator','Time','Value','Zones'))
solar_pd=pd.DataFrame(solar,columns=('Zone','Time','Value'))
wind_pd=pd.DataFrame(wind,columns=('Zone','Time','Value'))
flow_pd=pd.DataFrame(flow,columns=('Source','Sink','Time','Value'))
shadow_price=pd.DataFrame(Duals,columns=('Constraint','Time','Value'))
objective = pd.DataFrame(System_cost)
flow_pd.to_csv('flow.csv')
mwh_1_pd.to_csv('mwh_1.csv')
mwh_2_pd.to_csv('mwh_2.csv')
mwh_3_pd.to_csv('mwh_3.csv')
on_pd.to_csv('on.csv')
switch_pd.to_csv('switch.csv')
srsv_pd.to_csv('srsv.csv')
nrsv_pd.to_csv('nrsv.csv')
solar_pd.to_csv('solar_out.csv')
wind_pd.to_csv('wind_out.csv')
shadow_price.to_csv('shadow_price.csv')
objective.to_csv('obj_function.csv')
return None
|
py
|
1a5e204c1a088454b04e7501197e2868a35c1456
|
import _plotly_utils.basevalidators
class UidValidator(_plotly_utils.basevalidators.StringValidator):
def __init__(self, plotly_name="uid", parent_name="parcoords", **kwargs):
super(UidValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "plot"),
**kwargs
)
|
py
|
1a5e20a7b9349079b060b5317317256e9aa24b16
|
#!/usr/bin/env python
"""MODULE DOCSTRING WILL BE DYNAMICALLY OVERRIDED."""
from argparse import ArgumentParser
from functools import wraps
import sys
from secret import twitter_instance
from twmods import (EPILOG, output)
DESCRIPTION = "Demonstrate Twitter's GET help/xxx endpoints."
USAGE = """
twhelp.py [--version] [--help]
twhelp.py configuration | languages | privacy | tos
"""
# pylint: disable=redefined-builtin
__doc__ = '\n'.join((DESCRIPTION, USAGE, EPILOG))
__version__ = '1.0.2'
def parse_args(args):
"""Parse the command line parameters."""
root_parser = ArgumentParser(
description=DESCRIPTION,
epilog=EPILOG,
usage=USAGE)
root_parser.add_argument(
'--version',
action='version',
version=__version__)
commands = (
dict(func=request_help_configuration,
command='help/configuration',
aliases=['configuration', 'config'],
help='print the current configuration used by Twitter'),
dict(func=request_help_languages,
command='help/languages',
aliases=['languages', 'lang'],
help='print the list of languages supported by Twitter'),
dict(func=request_help_privacy,
command='help/privacy',
aliases=['privacy'],
help='print Twitter\'s Privacy Policy'),
dict(func=request_help_tos,
command='help/tos',
aliases=['tos'],
help='print Twitter Terms of Service'))
subparsers = root_parser.add_subparsers(help='commands')
for cmd in commands:
parser = subparsers.add_parser(
cmd['command'],
aliases=cmd['aliases'],
help=cmd['help'])
parser.set_defaults(func=cmd['func'])
return root_parser.parse_args(args=args or ('--help',))
def request_decorator(request):
"""Decorate a function that returns an endpoint."""
@wraps(request)
def request_wrapper(stdout, stderr):
"""Output the response received from Twitter."""
output(request(twitter_instance())(), stdout)
return request_wrapper
@request_decorator
def request_help_configuration(twhandler):
"""Return the handler for GET help/configuration."""
return twhandler.help.configuration
@request_decorator
def request_help_languages(twhandler):
"""Return the handler for GET help/languages."""
return twhandler.help.languages
@request_decorator
def request_help_privacy(twhandler):
"""Return the handler for GET help/privacy."""
return twhandler.help.privacy
@request_decorator
def request_help_tos(twhandler):
"""Return the handler for GET help/tos."""
return twhandler.help.tos
def run(args, stdout=sys.stdout, stderr=sys.stderr):
args.func(stdout, stderr)
def main(args=sys.argv[1:]):
sys.exit(run(parse_args(args)))
if __name__ == '__main__':
main()
|
py
|
1a5e20a887bc02fe7a31af79d8485b931afe5408
|
# -*- coding: utf-8 -*-
from openerp import tools
from openerp import models,fields,api
from openerp.tools.translate import _
class is_res_partner(models.Model):
_name='is.res.partner'
_order='partner_id'
_auto = False
partner_id = fields.Many2one('res.partner' , 'Fournisseur')
segment_id = fields.Many2one('is.segment.achat', 'Segment')
cde_ouverte_id = fields.Many2one('is.cde.ouverte.fournisseur', 'Commande prévisionnelle')
cde_ferme_cadencee_id = fields.Many2one('is.cde.ferme.cadencee', 'Commande ferme cadencée')
supplier = fields.Boolean('Est un fournisseur')
customer = fields.Boolean('Est un client')
def init(self, cr):
tools.drop_view_if_exists(cr, 'is_res_partner')
cr.execute("""
CREATE OR REPLACE view is_res_partner AS (
select
rp.id,
rp.id partner_id,
rp.is_segment_achat segment_id,
rp.supplier,
rp.customer,
isa.name,
(
select icof.id
from is_cde_ouverte_fournisseur icof
where icof.partner_id=rp.id
limit 1
) cde_ouverte_id,
(
select icfc.id
from is_cde_ferme_cadencee icfc
where icfc.partner_id=rp.id
limit 1
) cde_ferme_cadencee_id
from res_partner rp left outer join is_segment_achat isa on rp.is_segment_achat=isa.id
where is_company='t' and active='t' and isa.name not in ('frais généraux','mouliste','Transport')
)
""")
|
py
|
1a5e20d0b4fec6616a981aeca3434fec2c6cfc4b
|
# coding: utf-8
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from .uri import ApiUri
from .utils import method_name
from .utils import check_uri_security
from .errors import api_server_error
from .errors import api_response_error
from .errors import ParameterRequiredError
class Client(object):
""" API Client for the Zaif REST API.
Entry point for making requests to the Zaif REST API.
Provides helper methods for common API endpoints, as well as niceties around response handling.
Any errors will be raised as exceptions. These exceptions will always be subclasses of `zaif.error.ZaifError`. HTTP-related errors will be subclasses of `zaif.errors.APIServerError` and Errors in Responses will be subclasses of `zaif.errors.APIResponseError`.
Full API docs, including descriptions of each API and its parameters, are available here: http://techbureau-api-document.readthedocs.io/ja/latest/index.html
"""
BASE_API_URI = 'https://api.zaif.jp/'
def __init__(self,key,secret,base_api_uri=None):
if not key:
raise ValueError("Missing API 'key'")
if not secret:
raise ValueError("Missing API 'secret'")
self._key = key
self._secret = secret
# Allow passing in a different API base and warn if it is insecure.
self.BASE_API_URI = check_uri_security(base_api_uri or self.BASE_API_URI)
self._api_uri = ApiUri(key,secret,self.BASE_API_URI)
def _handle_response(self,response):
"""
Internal helper for handling API responses from the Zaif server. Raises the appropriate server errors when response is not 200; otherwise, parses the response.
"""
if response.status_code != 200:
raise api_server_error(response)
return self._parse_response(response)
def _parse_response(self,response):
"""
Returns the json data in case of PUBLIC API and FUTURES API reponses.
For TRADING API and LEVERAGE API, returns the json data if the response is a success, otherwise raises APIResponseError.
"""
response_json = response.json()
if isinstance(response_json,dict):
success = response_json.pop('success',None)
if success==1:
return response_json['return']
elif success==0:
raise api_response_error(response_json)
return response_json
def _check_req_params(self,req_params,params):
"""
Internal helper to check if all required parameters for the method have been provided. Raises ParameterRequiredError if any of the required parameters is missing.
"""
if not all(req_p in params for req_p in req_params):
raise ParameterRequiredError('Missing required parameter(s) %s' % req_params)
# --------------------
# PUBLIC API
# --------------------
def _public(self,*dirs):
"""
Helper method to execute get request to public API URI
"""
return self._api_uri.get('api','1',*dirs)
def get_currencies(self):
response = self._public('currencies','all')
return self._handle_response(response)
def get_currency(self,currency):
response = self._public('currencies',currency)
return self._handle_response(response)
def get_currency_pairs(self):
response = self._public('currency_pairs','all')
return self._handle_response(response)
def get_currency_pair(self,currency_pair):
response = self._public('currency_pairs',currency_pair)
return self._handle_response(response)
def get_last_price(self,currency_pair):
response = self._public('last_price',currency_pair)
return self._handle_response(response)
def get_ticker(self,currency_pair):
response = self._public('ticker',currency_pair)
return self._handle_response(response)
def get_trades(self,currency_pair):
response = self._public('trades',currency_pair)
return self._handle_response(response)
def get_depth(self,currency_pair):
response = self._public('depth',currency_pair)
return self._handle_response(response)
# --------------------
# TRADING API
# --------------------
def _trading(self,func_name,**params):
"""
Helper method to execute post request to trading API URI
"""
return self._api_uri.post(func_name,'tapi',**params)
def get_info(self):
response = self._trading(method_name())
return self._handle_response(response)
def get_info2(self):
response = self._trading(method_name())
return self._handle_response(response)
def get_personal_info(self):
response = self._trading(method_name())
return self._handle_response(response)
def get_id_info(self):
response = self._trading(method_name())
return self._handle_response(response)
def get_trade_history(self,**params):
response = self._trading('trade_history',**params)
return self._handle_response(response)
def get_active_orders(self,**params):
response = self._trading('active_orders',**params)
return self._handle_response(response)
def trade(self,**params):
req_params = ['currency_pair','action','price','amount']
self._check_req_params(req_params,params)
response = self._trading(method_name(),**params)
return self._handle_response(response)
def buy(self,**params):
return self.trade(action='bid',**params)
def sell(self,**params):
return self.trade(action='ask',**params)
def cancel_order(self,**params):
req_params = ['order_id']
self._check_req_params(req_params,params)
response = self._trading(method_name(),**params)
return self._handle_response(response)
def withdraw(self,**params):
req_params = ['currency','address','amount']
self._check_req_params(req_params,params)
response = self._trading(method_name(),**params)
return self._handle_response(response)
def get_deposit_history(self,**params):
req_params = ['currency']
self._check_req_params(req_params,params)
response = self._trading('deposit_history',**params)
return self._handle_response(response)
def get_withdraw_history(self,**params):
req_params = ['currency']
self._check_req_params(req_params,params)
response = self._trading('withdraw_history',**params)
return self._handle_response(response)
# --------------------
# FUTURES API
# --------------------
def _futures(self,*dirs):
"""
Helper method to execute get request to futures API URI
"""
return self._api_uri.get('fapi','1',*dirs)
def get_groups(self):
response = self._futures('groups','all')
return self._handle_response(response)
def get_group(self,group_id):
response = self._futures('groups',group_id)
return self._handle_response(response)
def get_group_last_price(self,group_id,currency_pair):
response = self._futures('last_price',group_id,currency_pair)
return self._handle_response(response)
def get_group_ticker(self,group_id,currency_pair):
response = self._futures('ticker',group_id,currency_pair)
return self._handle_response(response)
def get_group_trades(self,group_id,currency_pair):
response = self._futures('trades',group_id,currency_pair)
return self._handle_response(response)
def get_group_depth(self,group_id,currency_pair):
response = self._futures('depth',group_id,currency_pair)
return self._handle_response(response)
# --------------------
# LEVERAGE API
# --------------------
def _leverage(self,func_name,**params):
"""
Helper method to execute post request to leverage API URI
"""
return self._api_uri.post(func_name,'tlapi',**params)
def get_positions(self,**params):
req_params = ['type','group_id']
self._check_req_params(req_params,params)
response = self._leverage(method_name(),**params)
return self._handle_response(response)
def get_position_history(self,**params):
req_params = ['type','group_id','leverage_id']
self._check_req_params(req_params,params)
response = self._leverage('position_history',**params)
return self._handle_response(response)
def get_active_positions(self,**params):
req_params = ['type','group_id']
self._check_req_params(req_params,params)
response = self._leverage('active_positions',**params)
return self._handle_response(response)
def create_position(self,**params):
req_params = ['type', 'group_id', 'currency_pair', 'action', 'price', 'amount', 'leverage']
self._check_req_params(req_params,params)
response = self._leverage(method_name(),**params)
return self._handle_response(response)
def create_buy_position(self,**params):
return self.create_position(action='bid',**params)
def create_sell_position(self,**params):
return self.create_position(action='ask',**params)
def change_position(self,**params):
req_params = ['type','group_id','leverage_id','price']
self._check_req_params(req_params,params)
response = self._leverage(method_name(),**params)
return self._handle_response(response)
def cancel_position(self,**params):
req_params = ['type','group_id','leverage_id']
self._check_req_params(req_params,params)
response = self._leverage(method_name(),**params)
return self._handle_response(response)
|
py
|
1a5e2321b90557b99dbb0892bf8f2e0b85ca3972
|
import pickle
import os
import numpy as np
import pandas as pd
from .dataset import GeneExpressionDataset, arrange_categories
from .dataset10X import Dataset10X
class PbmcDataset(GeneExpressionDataset):
r""" Loads pbmc dataset.
We considered scRNA-seq data from two batches of peripheral blood mononuclear cells (PBMCs) from a healthy donor
(4K PBMCs and 8K PBMCs). We derived quality control metrics using the cellrangerRkit R package (v. 1.1.0).
Quality metrics were extracted from CellRanger throughout the molecule specific information file. After filtering,
we extract 12,039 cells with 10,310 sampled genes and get biologically meaningful clusters with the
software Seurat. We then filter genes that we could not match with the bulk data used for differential
expression to be left with g = 3346.
Args:
:save_path: Save path of raw data file. Default: ``'data/'``.
Examples:
>>> gene_dataset = PbmcDataset()
"""
def __init__(self, save_path="data/"):
self.save_path = save_path
self.urls = [
"https://github.com/YosefLab/scVI-data/raw/master/gene_info.csv",
"https://github.com/YosefLab/scVI-data/raw/master/pbmc_metadata.pickle",
]
self.download_names = ["gene_info_pbmc.csv", "pbmc_metadata.pickle"]
self.download()
self.de_metadata = pd.read_csv(
os.path.join(self.save_path, "gene_info_pbmc.csv"), sep=","
)
pbmc_metadata = pickle.load(
open(os.path.join(self.save_path, "pbmc_metadata.pickle"), "rb")
)
pbmc = GeneExpressionDataset.concat_datasets(
Dataset10X("pbmc8k", save_path=save_path),
Dataset10X("pbmc4k", save_path=save_path),
)
self.barcodes = pd.concat(pbmc.barcodes).values.ravel().astype(str)
super().__init__(
pbmc.X,
pbmc.local_means,
pbmc.local_vars,
pbmc.batch_indices,
pbmc.labels,
pbmc.gene_names,
)
dict_barcodes = dict(zip(self.barcodes, np.arange(len(self.barcodes))))
subset_cells = []
barcodes_metadata = (
pbmc_metadata["barcodes"].index.values.ravel().astype(np.str)
)
for barcode in barcodes_metadata:
if (
barcode in dict_barcodes
): # barcodes with end -11 filtered on 10X website (49 cells)
subset_cells += [dict_barcodes[barcode]]
self.update_cells(subset_cells=np.array(subset_cells))
idx_metadata = np.array(
[not barcode.endswith("11") for barcode in barcodes_metadata], dtype=np.bool
)
self.design = pbmc_metadata["design"][idx_metadata]
self.raw_qc = pbmc_metadata["raw_qc"][idx_metadata]
self.qc_names = self.raw_qc.columns
self.qc = self.raw_qc.values
self.qc_pc = pbmc_metadata["qc_pc"][idx_metadata]
self.normalized_qc = pbmc_metadata["normalized_qc"][idx_metadata]
labels = pbmc_metadata["clusters"][idx_metadata].reshape(-1, 1)[: len(self)]
self.labels, self.n_labels = arrange_categories(labels)
self.cell_types = pbmc_metadata["list_clusters"][: self.n_labels]
genes_to_keep = list(
self.de_metadata["ENSG"].values
) # only keep the genes for which we have de data
difference = list(
set(genes_to_keep).difference(set(pbmc.gene_names))
) # Non empty only for unit tests
for gene in difference:
genes_to_keep.remove(gene)
self.filter_genes(genes_to_keep)
self.de_metadata = self.de_metadata.head(
len(genes_to_keep)
) # this would only affect the unit tests
class PurifiedPBMCDataset(GeneExpressionDataset):
r""" The purified PBMC dataset from: "Massively parallel digital transcriptional profiling of single cells".
Args:
:save_path: Save path of raw data file. Default: ``'data/'``.
Examples:
>>> gene_dataset = PurifiedPBMCDataset()
"""
def __init__(self, save_path="data/", filter_cell_types=None):
cell_types = np.array(
[
"cd4_t_helper",
"regulatory_t",
"naive_t",
"memory_t",
"cytotoxic_t",
"naive_cytotoxic",
"b_cells",
"cd4_t_helper",
"cd34",
"cd56_nk",
"cd14_monocytes",
]
)
if (
filter_cell_types
): # filter = np.arange(6) - for T cells: np.arange(4) for T/CD4 cells
cell_types = cell_types[np.array(filter_cell_types)]
datasets = []
for cell_type in cell_types:
dataset = Dataset10X(cell_type, save_path=save_path)
dataset.cell_types = np.array([cell_type])
datasets += [dataset]
pbmc = GeneExpressionDataset.concat_datasets(*datasets, shared_batches=True)
pbmc.subsample_genes(subset_genes=(np.array(pbmc.X.sum(axis=0)) > 0).ravel())
super().__init__(
pbmc.X,
pbmc.local_means,
pbmc.local_vars,
pbmc.batch_indices,
pbmc.labels,
gene_names=pbmc.gene_names,
cell_types=pbmc.cell_types,
)
|
py
|
1a5e248f467371097abfec8eee63ab3eae625282
|
"""
Django settings for AwardsService project.
Generated by 'django-admin startproject' using Django 3.0.3.
For more information on this file, see
https://docs.djangoproject.com/en/3.0/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.0/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.0/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'g@at9%1$^2gx877hekn!!2&f%f$2+beq4dki-$i$all&$ip+=-'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = False
ALLOWED_HOSTS = []
# Application definition
DJANGO_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
THIRD_PARTY_APPS = [
'rest_framework',
]
DEV_APPS = [
'Pins',
'Achievements',
]
INSTALLED_APPS = DJANGO_APPS + THIRD_PARTY_APPS + DEV_APPS
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'AwardsService.urls'
REST_FRAMEWORK = {
'DEFAULT_RENDERER_CLASSES': [
'rest_framework.renderers.JSONRenderer',
]
}
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'AwardsService.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.0/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/3.0/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.0/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.0/howto/static-files/
STATIC_ROOT = os.path.join(BASE_DIR, 'staticfiles/')
STATIC_URL = '/static/'
MEDIA_ROOT = os.path.join(BASE_DIR, 'media')
MEDIA_URL = '/media/'
try:
from .settings_local import *
except ImportError:
pass
try:
from ApiRequesters.settings import *
except ImportError as e:
raise e
if DEBUG:
REST_FRAMEWORK['DEFAULT_RENDERER_CLASSES'] = [
'rest_framework.renderers.JSONRenderer',
'rest_framework.renderers.BrowsableAPIRenderer',
]
APP_ID = ENV['AWARDS_APP_ID']
APP_SECRET = ENV['AWARDS_SECRET']
ALLOW_REQUESTS = True
ON_HEROKU = not (os.getenv('ON_HEROKU', '0') == '0')
if not DEBUG:
import django_heroku
django_heroku.settings(locals(), databases=ON_HEROKU, test_runner=False, secret_key=False)
|
py
|
1a5e24b60c3c9350680660df569105b4f17cd45a
|
"""
This is where I will actually make my
daily dashboard to monitor my stocks.
"""
#%%
import ingest
with open("../data/stocks.txt") as f:
raw = f.read()
df = ingest.ingest(raw)
# %%
import dash
import dash_core_components as dcc
import dash_html_components as html
from dash.dependencies import Input, Output, State
# Add an import for pandas_datareader and datetime
import pandas_datareader.data as web
from datetime import datetime
#%%
app = dash.Dash()
portfolio = ["TSLA", "PLTR", "JMIA"]
names = ["Tesla", "Palantir", "Jumia"]
options = []
for tic, name in zip(portfolio, names):
mydict = {} # label: user sees, value: script sees
mydict["label"] = tic + " " + name
mydict["value"] = tic
options.append(mydict)
app.layout = html.Div(
[
html.H1("Stock Ticker Dashboard"),
html.Div(
[
html.H3("Enter a stock symbol:", style={"paddingRight": "30px"}),
dcc.Dropdown(
id="my_ticker_symbol", options=options, value=["TSLA"], multi=True
),
],
style={"display": "inline-block", "verticalAlign": "top", "width": "30%"},
),
html.Div(
[
html.H3("Select a start and end date"),
dcc.DatePickerRange(
id="my_date_picker",
min_date_allowed=datetime(2021, 1, 1),
max_date_allowed=datetime.today(),
start_date=datetime(2021, 1, 1),
end_date=datetime.today(),
),
],
style={"display": "inline-block"},
),
html.Div(
[
html.Button(
id="submit-button",
n_clicks=0,
children="Submit",
style={"fontSize": 24, "marginLeft": "30px"},
)
]
),
dcc.Graph(id="my_graph", figure={"data": [{"x": [1, 2], "y": [3, 1]}]}),
]
)
@app.callback(
Output("my_graph", "figure"),
[Input("submit-button", "n_clicks")],
[
State("my_ticker_symbol", "value"),
State("my_date_picker", "start_date"),
State("my_date_picker", "end_date"),
],
)
def update_graph(n_clicks, stock_ticker, start_date, end_date):
# Use datareader and datetime to define a DataFrame
start = (datetime.strptime(str(start_date)[:10], "%Y-%m-%d"),)
end = datetime.strptime(str(end_date)[:10], "%Y-%m-%d")
# creating trace for every stock ticker
traces = []
for tic in stock_ticker:
df = price_action(
tic, token_path="../token.txt", start_date=start, end_date=end
)
traces.append({"x": df.reset_index()["date"], "y": df["close"], "name": tic})
# change the output data
fig = {
"data": traces,
"layout": {"title": ", ".join(stock_ticker) + " Closing Prices"},
}
return fig
if __name__ == "__main__":
app.run_server()
# %%
|
py
|
1a5e24bf8e61f507be0f0553250dff482195339f
|
#!/usr/bin/env python
from setuptools import setup, find_packages
setup(name='deep_sort',
version='1.0.0',
description='Deep sort for object tracking.',
url='[email protected]:williammc/deep_sort.git',
author='ZQPei',
author_email='',
packages=find_packages(exclude=["*.tests", "*.tests.*", "tests.*", "tests"]),
license='LICENSE',
)
|
py
|
1a5e2500bc5e5ea6d3bbe22fb26f05e872cd558a
|
from nose.tools import (
assert_equal, assert_in, assert_not_in,
assert_false, assert_true
)
from app.main.services.query_builder import construct_query, is_filtered
from app.main.services.query_builder import (
field_is_or_filter, field_filters,
or_field_filters, and_field_filters,
filter_clause
)
from werkzeug.datastructures import MultiDict
def test_should_have_correct_root_element():
assert_equal("query" in construct_query(build_query_params()), True)
def test_should_have_page_size_set():
assert_equal(construct_query(build_query_params())["size"], 100)
def test_should_be_able_to_override_pagesize():
assert_equal(construct_query(build_query_params(), 10)["size"], 10)
def test_page_should_set_from_parameter():
assert_equal(
construct_query(build_query_params(page=2))["from"], 100)
def test_should_have_no_from_by_default():
assert_false("from" in construct_query(build_query_params()))
def test_should_have_match_all_query_if_no_params():
assert_equal("query" in construct_query(build_query_params()), True)
assert_equal("match_all" in
construct_query(build_query_params())["query"], True)
def test_should_make_multi_match_query_if_keywords_supplied():
keywords = "these are my keywords"
query = construct_query(build_query_params(keywords))
assert_equal("query" in query, True)
assert_in("simple_query_string", query["query"])
query_string_clause = query["query"]["simple_query_string"]
assert_equal(query_string_clause["query"], keywords)
assert_equal(query_string_clause["default_operator"], "and")
assert_equal(query_string_clause["fields"], [
"frameworkName",
"id",
"lot",
"serviceBenefits",
"serviceFeatures",
"serviceName",
"serviceSummary",
"serviceTypes",
"supplierName",
])
def test_should_identify_filter_search_from_query_params():
cases = (
(build_query_params(), False),
(build_query_params(keywords="lot"), False),
(build_query_params(lot="lot"), True),
(build_query_params(keywords="something", lot="lot"), True),
(build_query_params(service_types=["serviceTypes"]), True)
)
for query, expected in cases:
yield assert_equal, is_filtered(query), expected
def test_should_have_filtered_root_element_if_service_types_search():
query = construct_query(build_query_params(
service_types=["my serviceTypes"]))
assert_equal("query" in query, True)
assert_equal("filtered" in query["query"], True)
def test_should_have_filtered_root_element_if_lot_search():
query = construct_query(build_query_params(lot="SaaS"))
assert_equal("query" in query, True)
assert_equal("filtered" in query["query"], True)
def test_should_have_filtered_root_element_and_match_all_if_no_keywords():
query = construct_query(build_query_params(
service_types=["my serviceTypes"]))
assert_equal("match_all" in query["query"]["filtered"]["query"], True)
def test_should_have_filtered_root_element_and_match_keywords():
query = construct_query(
build_query_params(keywords="some keywords",
service_types=["my serviceTypes"])
)["query"]["filtered"]["query"]
assert_in("simple_query_string", query)
query_string_clause = query["simple_query_string"]
assert_equal(query_string_clause["query"], "some keywords")
assert_equal(query_string_clause["default_operator"], "and")
assert_equal(query_string_clause["fields"], [
"frameworkName",
"id",
"lot",
"serviceBenefits",
"serviceFeatures",
"serviceName",
"serviceSummary",
"serviceTypes",
"supplierName",
])
def test_should_have_filtered_term_service_types_clause():
query = construct_query(build_query_params(service_types=["serviceTypes"]))
assert_equal("term" in
query["query"]["filtered"]["filter"]["bool"]["must"][0], True)
assert_equal(
query["query"]["filtered"]["filter"]
["bool"]["must"][0]["term"]["filter_serviceTypes"],
"servicetypes")
def test_should_have_filtered_term_lot_clause():
query = construct_query(build_query_params(lot="SaaS"))
assert_equal(
"term" in query["query"]["filtered"]["filter"]["bool"]["must"][0],
True)
assert_equal(
query["query"]["filtered"]["filter"]
["bool"]["must"][0]["term"]["filter_lot"],
"saas")
def test_should_have_filtered_term_for_lot_and_service_types_clause():
query = construct_query(
build_query_params(lot="SaaS", service_types=["serviceTypes"]))
terms = query["query"]["filtered"]["filter"]["bool"]["must"]
assert_in({"term": {'filter_serviceTypes': 'servicetypes'}}, terms)
assert_in({"term": {'filter_lot': 'saas'}}, terms)
def test_should_not_filter_on_unknown_keys():
params = build_query_params(lot="SaaS", service_types=["serviceTypes"])
params.add("this", "that")
query = construct_query(params)
terms = query["query"]["filtered"]["filter"]["bool"]["must"]
assert_in({"term": {'filter_serviceTypes': 'servicetypes'}}, terms)
assert_in({"term": {'filter_lot': 'saas'}}, terms)
assert_not_in({"term": {'unknown': 'something to ignore'}}, terms)
def test_should_have_filtered_term_for_multiple_service_types_clauses():
query = construct_query(
build_query_params(
service_types=["serviceTypes1", "serviceTypes2", "serviceTypes3"]))
terms = query["query"]["filtered"]["filter"]["bool"]["must"]
assert_in({"term": {'filter_serviceTypes': 'servicetypes1'}}, terms)
assert_in({"term": {'filter_serviceTypes': 'servicetypes2'}}, terms)
assert_in({"term": {'filter_serviceTypes': 'servicetypes3'}}, terms)
def test_should_use_whitespace_stripped_lowercased_service_types():
query = construct_query(build_query_params(
service_types=["My serviceTypes"]))
assert_equal(
"term" in query["query"]["filtered"]["filter"]["bool"]["must"][0],
True)
assert_equal(
query["query"]["filtered"]["filter"]
["bool"]["must"][0]["term"]["filter_serviceTypes"],
"myservicetypes")
def test_should_use_no_non_alphanumeric_characters_in_service_types():
query = construct_query(
build_query_params(service_types=["Mys Service TYPes"]))
assert_equal(
"term" in query["query"]["filtered"]["filter"]["bool"]["must"][0],
True)
assert_equal(
query["query"]["filtered"]["filter"]["bool"]["must"][0]
["term"]["filter_serviceTypes"],
"mysservicetypes")
def test_should_have_highlight_block_on_keyword_search():
query = construct_query(build_query_params(keywords="some keywords"))
assert_equal("highlight" in query, True)
def test_should_have_highlight_block_on_filtered_search():
query = construct_query(
build_query_params(keywords="some keywords",
service_types=["some serviceTypes"]))
assert_equal("highlight" in query, True)
def test_highlight_block_sets_encoder_to_html():
query = construct_query(
build_query_params(keywords="some keywords",
service_types=["some serviceTypes"]))
assert_equal(query["highlight"]["encoder"], "html")
def test_highlight_block_contains_correct_fields():
query = construct_query(
build_query_params(keywords="some keywords",
service_types=["some serviceTypes"]))
assert_equal("highlight" in query, True)
cases = [
("id", True),
("lot", True),
("serviceName", True),
("serviceSummary", True),
("serviceFeatures", True),
("serviceBenefits", True),
("serviceTypes", True),
("supplierName", True)
]
for example, expected in cases:
yield \
assert_equal, \
example in query["highlight"]["fields"], \
expected, \
example
def build_query_params(keywords=None, service_types=None, lot=None, page=None):
query_params = MultiDict()
if keywords:
query_params["q"] = keywords
if service_types:
for service_type in service_types:
query_params.add("filter_serviceTypes", service_type)
if lot:
query_params["filter_lot"] = lot
if page:
query_params["page"] = page
return query_params
class TestFieldFilters(object):
def test_field_is_or_filter(self):
assert_true(field_is_or_filter(['a,b']))
def test_field_is_or_filter_no_comma(self):
assert_false(field_is_or_filter(['a']))
def test_field_is_or_filter_multiple_values_no_comma(self):
assert_false(field_is_or_filter(['a', 'b']))
def test_field_is_or_filter_multiple_values(self):
assert_false(field_is_or_filter(['a,b', 'b,c']))
def test_or_field_filters(self):
assert_equal(
or_field_filters('filterName', ['Aa bb', 'Bb cc']),
[{"terms": {"filterName": ['aabb', 'bbcc'], "execution": "bool"}}]
)
def test_or_field_filters_single_value(self):
assert_equal(
or_field_filters('filterName', ['Aa bb']),
[{"terms": {"filterName": ['aabb'], "execution": "bool"}}]
)
def test_and_field_filters(self):
assert_equal(
and_field_filters('filterName', ['Aa bb', 'Bb cc']),
[
{"term": {"filterName": 'aabb'}},
{"term": {"filterName": 'bbcc'}}
]
)
def test_and_field_filters_single_value(self):
assert_equal(
and_field_filters('filterName', ['Aa bb']),
[{"term": {"filterName": 'aabb'}}]
)
def test_field_filters_single_value(self):
assert_equal(
field_filters('filterName', ['Aa Bb']),
[{"term": {"filterName": 'aabb'}}]
)
def test_field_filters_multiple_and_values(self):
assert_equal(
field_filters('filterName', ['Aa bb', 'Bb,Cc']),
[
{"term": {"filterName": 'aabb'}},
{"term": {"filterName": 'bbcc'}}
]
)
def test_field_filters_or_value(self):
assert_equal(
field_filters('filterName', ['Aa,Bb']),
[{"terms": {"filterName": ['aa', 'bb'], "execution": "bool"}}]
)
class TestFilterClause(object):
def test_filter_ignores_non_filter_query_args(self):
assert_equal(
filter_clause(
MultiDict({'fieldName': ['Aa bb'], 'lot': ['saas']})
),
{'bool': {'must': []}}
)
def test_single_and_field(self):
assert_equal(
filter_clause(MultiDict(
{'filter_fieldName': ['Aa bb'], 'lot': 'saas'}
)),
{'bool': {
'must': [
{"term": {"filter_fieldName": 'aabb'}},
]
}}
)
def test_single_or_field(self):
assert_equal(
filter_clause(MultiDict({'filter_fieldName': ['Aa,Bb']})),
{'bool': {
'must': [
{"terms": {"filter_fieldName": ['aa', 'bb'], "execution": "bool"}},
]
}}
)
def test_or_and_combination(self):
bool_filter = filter_clause(MultiDict({
'filter_andFieldName': ['Aa', 'bb'],
'filter_orFieldName': ['Aa,Bb']
}))
assert_in(
{"terms": {"filter_orFieldName": ['aa', 'bb'], "execution": "bool"}},
bool_filter['bool']['must']
)
assert_in(
{"term": {"filter_andFieldName": 'aa'}},
bool_filter['bool']['must']
)
assert_in(
{"term": {"filter_andFieldName": 'bb'}},
bool_filter['bool']['must']
)
|
py
|
1a5e25093d040bdd99a026069f19a4dd3f4fe7c6
|
import logging
from typing import Optional, Union
from .MatchMSDataBuilder import MatchMSDataBuilder
from .PandasDataBuilder import PandasDataBuilder
logging.getLogger(__name__).addHandler(logging.NullHandler())
def get_builder(filetype) -> Optional[Union[PandasDataBuilder, MatchMSDataBuilder]]:
if (filetype in ['csv', 'tsv']):
return PandasDataBuilder().with_filetype(filetype)
if (filetype in ['msp']):
return MatchMSDataBuilder().with_filetype(filetype)
return None
__all__ = [
"MatchMSDataBuilder",
"PandasDataBuilder",
]
|
py
|
1a5e2592521ddee6e7fc2947f2f709f171c2e4b8
|
"""
This module is meant to compare results with those expected from papers, or create figures illustrating the
behavior of sdba methods and utilities.
"""
import numpy as np
from scipy.stats import scoreatpercentile
from scipy.stats.kde import gaussian_kde
from xclim.sdba.adjustment import (
DetrendedQuantileMapping,
EmpiricalQuantileMapping,
QuantileDeltaMapping,
)
from xclim.sdba.processing import adapt_freq
from . import utils as tu
try:
from matplotlib import pyplot as plt
except ModuleNotFoundError:
plt = False
__all__ = ["synth_rainfall", "cannon_2015_figure_2", "adapt_freq_graph"]
def synth_rainfall(shape, scale=1, wet_freq=0.25, size=1):
"""Return gamma distributed rainfall values for wet days.
Notes
-----
The probability density for the Gamma distribution is:
.. math::
p(x) = x^{k-1}\frac{e^{-x/\theta}}{\theta^k\\Gamma(k)},
where :math:`k` is the shape and :math:`\theta` the scale, and :math:`\\Gamma` is the Gamma function.
"""
is_wet = np.random.binomial(1, p=wet_freq, size=size)
wet_intensity = np.random.gamma(shape, scale, size)
return np.where(is_wet, wet_intensity, 0)
def cannon_2015_figure_2():
n = 10000
ref, hist, sim = tu.cannon_2015_rvs(n, random=False)
QM = EmpiricalQuantileMapping(kind="*", group="time", interp="linear")
QM.train(ref, hist)
sim_eqm = QM.predict(sim)
DQM = DetrendedQuantileMapping(kind="*", group="time", interp="linear")
DQM.train(ref, hist)
sim_dqm = DQM.predict(sim, degree=0)
QDM = QuantileDeltaMapping(kind="*", group="time", interp="linear")
QDM.train(ref, hist)
sim_qdm = QDM.predict(sim)
fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(11, 4))
x = np.linspace(0, 105, 50)
ax1.plot(x, gaussian_kde(ref)(x), color="r", label="Obs hist")
ax1.plot(x, gaussian_kde(hist)(x), color="k", label="GCM hist")
ax1.plot(x, gaussian_kde(sim)(x), color="blue", label="GCM simure")
ax1.plot(x, gaussian_kde(sim_qdm)(x), color="lime", label="QDM future")
ax1.plot(x, gaussian_kde(sim_eqm)(x), color="darkgreen", ls="--", label="QM future")
ax1.plot(x, gaussian_kde(sim_dqm)(x), color="lime", ls=":", label="DQM future")
ax1.legend(frameon=False)
ax1.set_xlabel("Value")
ax1.set_ylabel("Density")
tau = np.array([0.25, 0.5, 0.75, 0.95, 0.99]) * 100
bc_gcm = (
scoreatpercentile(sim, tau) - scoreatpercentile(hist, tau)
) / scoreatpercentile(hist, tau)
bc_qdm = (
scoreatpercentile(sim_qdm, tau) - scoreatpercentile(ref, tau)
) / scoreatpercentile(ref, tau)
bc_eqm = (
scoreatpercentile(sim_eqm, tau) - scoreatpercentile(ref, tau)
) / scoreatpercentile(ref, tau)
bc_dqm = (
scoreatpercentile(sim_dqm, tau) - scoreatpercentile(ref, tau)
) / scoreatpercentile(ref, tau)
ax2.plot([0, 1], [0, 1], ls=":", color="blue")
ax2.plot(bc_gcm, bc_gcm, "-", color="blue", label="GCM")
ax2.plot(bc_gcm, bc_qdm, marker="o", mfc="lime", label="QDM")
ax2.plot(
bc_gcm,
bc_eqm,
marker="o",
mfc="darkgreen",
ls=":",
color="darkgreen",
label="QM",
)
ax2.plot(
bc_gcm,
bc_dqm,
marker="s",
mec="lime",
mfc="w",
ls="--",
color="lime",
label="DQM",
)
for i, s in enumerate(tau / 100):
ax2.text(bc_gcm[i], bc_eqm[i], f"{s} ", ha="right", va="center", fontsize=9)
ax2.set_xlabel("GCM relative change")
ax2.set_ylabel("Bias adjusted relative change")
ax2.legend(loc="upper left", frameon=False)
ax2.set_aspect("equal")
plt.tight_layout()
return fig
def adapt_freq_graph():
"""
Create a graphic with the additive adjustment factors estimated after applying the adapt_freq method.
"""
n = 10000
x = tu.series(synth_rainfall(2, 2, wet_freq=0.25, size=n), "pr") # sim
y = tu.series(synth_rainfall(2, 2, wet_freq=0.5, size=n), "pr") # ref
xp = adapt_freq(x, y, thresh=0).sim_ad
fig, (ax1, ax2) = plt.subplots(2, 1)
sx = x.sortby(x)
sy = y.sortby(y)
sxp = xp.sortby(xp)
# Original and corrected series
ax1.plot(sx.values, color="blue", lw=1.5, label="x : sim")
ax1.plot(sxp.values, color="pink", label="xp : sim corrected")
ax1.plot(sy.values, color="k", label="y : ref")
ax1.legend()
# Compute qm factors
qm_add = QuantileDeltaMapping(kind="+", group="time").train(y, x).ds
qm_mul = QuantileDeltaMapping(kind="*", group="time").train(y, x).ds
qm_add_p = QuantileDeltaMapping(kind="+", group="time").train(y, xp).ds
qm_mul_p = QuantileDeltaMapping(kind="*", group="time").train(y, xp).ds
qm_add.cf.plot(ax=ax2, color="cyan", ls="--", label="+: y-x")
qm_add_p.cf.plot(ax=ax2, color="cyan", label="+: y-xp")
qm_mul.cf.plot(ax=ax2, color="brown", ls="--", label="*: y/x")
qm_mul_p.cf.plot(ax=ax2, color="brown", label="*: y/xp")
ax2.legend(loc="upper left", frameon=False)
return fig
|
py
|
1a5e25ac9a739e3a3e021a6ec6e9ab047e787dab
|
from collections import deque
import weakref
import py4j.protocol as proto
from py4j.clientserver import (
ClientServerConnection, ClientServer, JavaClient, PythonServer)
from py4j.java_gateway import (
CallbackServer, JavaGateway, GatewayClient, GatewayProperty,
PythonProxyPool, GatewayConnection, CallbackConnection)
from py4j.tests.py4j_callback_recursive_example import PythonPing
# Use deque to be thread-safe
MEMORY_HOOKS = deque()
CREATED = deque()
FINALIZED = deque()
def register_creation(obj):
obj_str = str(obj)
CREATED.append(obj_str)
MEMORY_HOOKS.append(weakref.ref(
obj,
lambda wr: FINALIZED.append(obj_str)
))
class InstrumentedPythonPing(PythonPing):
def __init__(self, fail=False):
super(InstrumentedPythonPing, self).__init__(fail)
register_creation(self)
class InstrJavaGateway(JavaGateway):
def __init__(self, *args, **kwargs):
super(InstrJavaGateway, self). __init__(*args, **kwargs)
register_creation(self)
def _create_gateway_client(self):
gateway_client = InstrGatewayClient(
gateway_parameters=self.gateway_parameters)
return gateway_client
def _create_callback_server(self, callback_server_parameters):
callback_server = InstrCallbackServer(
self.gateway_property.pool, self._gateway_client,
callback_server_parameters=callback_server_parameters)
return callback_server
def _create_gateway_property(self):
gateway_property = InstrGatewayProperty(
self.gateway_parameters.auto_field, PythonProxyPool(),
self.gateway_parameters.enable_memory_management)
if self.python_server_entry_point:
gateway_property.pool.put(
self.python_server_entry_point, proto.ENTRY_POINT_OBJECT_ID)
return gateway_property
class InstrGatewayClient(GatewayClient):
def __init__(self, *args, **kwargs):
super(InstrGatewayClient, self).__init__(*args, **kwargs)
register_creation(self)
def _create_connection(self):
connection = InstrGatewayConnection(
self.gateway_parameters, self.gateway_property)
connection.start()
return connection
class InstrGatewayProperty(GatewayProperty):
"""Object shared by callbackserver, gateway, and connections.
"""
def __init__(self, *args, **kwargs):
super(InstrGatewayProperty, self).__init__(*args, **kwargs)
register_creation(self)
class InstrGatewayConnection(GatewayConnection):
def __init__(self, *args, **kwargs):
super(InstrGatewayConnection, self).__init__(*args, **kwargs)
register_creation(self)
class InstrCallbackServer(CallbackServer):
def __init__(self, *args, **kwargs):
super(InstrCallbackServer, self).__init__(*args, **kwargs)
register_creation(self)
def _create_connection(self, socket_instance, stream):
connection = InstrCallbackConnection(
self.pool, stream, socket_instance, self.gateway_client,
self.callback_server_parameters, self)
return connection
class InstrCallbackConnection(CallbackConnection):
def __init__(self, *args, **kwargs):
super(InstrCallbackConnection, self).__init__(*args, **kwargs)
register_creation(self)
class InstrClientServerConnection(ClientServerConnection):
def __init__(self, *args, **kwargs):
super(InstrClientServerConnection, self).__init__(*args, **kwargs)
register_creation(self)
class InstrPythonServer(PythonServer):
def __init__(self, *args, **kwargs):
super(InstrPythonServer, self).__init__(*args, **kwargs)
register_creation(self)
def _create_connection(self, socket, stream):
connection = InstrClientServerConnection(
self.java_parameters, self.python_parameters,
self.gateway_property, self.gateway_client, self)
connection.init_socket_from_python_server(socket, stream)
return connection
class InstrJavaClient(JavaClient):
def __init__(self, *args, **kwargs):
super(InstrJavaClient, self).__init__(*args, **kwargs)
register_creation(self)
def _create_new_connection(self):
connection = InstrClientServerConnection(
self.java_parameters, self.python_parameters,
self.gateway_property, self)
connection.connect_to_java_server()
self.set_thread_connection(connection)
self.deque.append(connection)
return connection
class InstrClientServer(ClientServer):
def __init__(self, *args, **kwargs):
super(InstrClientServer, self).__init__(*args, **kwargs)
register_creation(self)
def _create_gateway_client(self):
java_client = InstrJavaClient(
self.java_parameters, self.python_parameters)
return java_client
def _create_callback_server(self, callback_server_parameters):
callback_server = InstrPythonServer(
self._gateway_client, self.java_parameters, self.python_parameters,
self.gateway_property)
return callback_server
def _create_gateway_property(self):
gateway_property = InstrGatewayProperty(
self.java_parameters.auto_field, PythonProxyPool(),
self.java_parameters.enable_memory_management)
if self.python_server_entry_point:
gateway_property.pool.put(
self.python_server_entry_point, proto.ENTRY_POINT_OBJECT_ID)
return gateway_property
|
py
|
1a5e25d5cbb1ad6ef7226ef2bf853558acc64dfc
|
""" Basic tests for idact-gui.
"""
import pytest
from pytestqt.qt_compat import qt_api
from gui.functionality.running_notebooks import RunningNotebooks
from gui.helpers.configuration_provider import ConfigurationProvider
from gui.functionality.main_window import MainWindow
from gui.functionality.idact_notebook import IdactNotebook
from gui.functionality.manage_jobs import ManageJobs
from gui.functionality.add_cluster import AddCluster
from gui.functionality.remove_cluster import RemoveCluster
from gui.functionality.adjust_timeouts import AdjustTimeouts
@pytest.fixture()
def window():
assert qt_api.QApplication.instance() is not None
conf_provider = ConfigurationProvider()
if not conf_provider.check_if_conf_file_exists():
conf_provider.create_conf_file()
if not conf_provider.check_if_args_files_exist():
conf_provider.create_args_files()
window = MainWindow()
return window
def test_basics(window, qtbot):
""" Tests if idact-gui renders itself.
"""
window.show()
assert window.isVisible()
assert window.windowTitle() == 'Idact GUI'
def test_deploy_notebook_window(window, qtbot):
""" Tests if it is possible to open deploy notebook window.
"""
window.show()
window.deploy_notebook_action.trigger()
assert window.centralWidget().__class__ == IdactNotebook(window).__class__
def test_manage_jobs_window(window, qtbot):
""" Tests if it is possible to open manage jobs window.
"""
window.show()
window.manage_jobs_action.trigger()
assert window.centralWidget().__class__ == ManageJobs(window).__class__
def test_running_notebooks_window(window, qtbot):
""" Tests if it is possible to open running notebooks window.
"""
window.show()
window.running_notebooks_action.trigger()
assert window.centralWidget().__class__ == RunningNotebooks(window).__class__
def test_add_cluster_window(window, qtbot):
""" Tests if it is possible to open add cluster window.
"""
window.show()
window.add_cluster_action.trigger()
assert window.centralWidget().__class__ == AddCluster(window).__class__
def test_remove_cluster_window(window, qtbot):
""" Tests if it is possible to open remove cluster window.
"""
window.show()
window.remove_cluster_action.trigger()
assert window.centralWidget().__class__ == RemoveCluster(window).__class__
def test_edit_configuration_window(window, qtbot):
""" Tests if it is possible to open edit configuration window.
"""
window.show()
window.edit_configuration_action.trigger()
assert window.centralWidget().__class__ == AdjustTimeouts(window).__class__
def test_logs_window(window, qtbot):
""" Tests if it is possible to open logs window.
"""
window.show()
window.show_logs_action.trigger()
assert window.show_logs_window.isVisible()
assert window.show_logs_window.windowTitle() == 'Logs'
def test_help_window(window, qtbot):
""" Tests if it is possible to open help window.
"""
window.show()
window.see_help_action.trigger()
assert window.help_window.isVisible()
assert window.help_window.windowTitle() == 'Help'
def test_about_window(window, qtbot):
""" Tests if it is possible to open help window.
"""
window.show()
window.about_the_program_action.trigger()
assert window.program_info_window.isVisible()
assert window.program_info_window.windowTitle() == 'About'
|
py
|
1a5e2791353fef2380a66b461d94432dab307aa9
|
""" A simple module for caching a single object on disk"""
import cPickle as pickle
import errno
import logging
import os
import simpleflock
__LOCK_FILE = 'dmsa.cache.lockfile'
__CACHE_FILE = 'dmsa.cache'
__DIR = None
def set_cache_dir(cache_dir):
"""Set the directory to use for holding cache and lock files
If the directory is never set, the current directory is used (see below).
"""
global __DIR
__DIR = cache_dir
def _pathname(name):
"""Ensure directory `__DIR` exists and return path name
If `__DIR` is falsy, simply return `name` as the pathname.
Otherwise, create `__DIR` if necessary, and return the pathname.
Return: the pathname resulting from path-joining `__DIR` and `name`
(or just `name`).
"""
if not __DIR:
return name
try:
os.makedirs(__DIR)
except OSError:
pass
return os.path.join(__DIR, name)
def _pickle_and_cache_models(obj):
pathname = _pathname(__CACHE_FILE)
try:
with open(pathname, mode='w') as f:
pickle.dump(obj, f)
except pickle.PicklingError as e:
logging.error('pickling object: {}'.format(e))
raise
except IOError as e:
logging.error('opening {} for writing: {}'.format(pathname, e))
raise
def set_cache(obj):
"""Update the cache with an object (dict, e.g.)
The object is cached on disk. A lock file is used to coordinate
updates to this cache among threads and processes.
If another process has the cache locked (only used for writing),
then this function does nothing; i.e. it assumes that somebody
else is taking care of the update ....
Arguments:
obj - object to write to the cache
Return:
none
"""
lock_path = _pathname(__LOCK_FILE)
try:
with simpleflock.SimpleFlock(lock_path, timeout=0):
_pickle_and_cache_models(obj)
except IOError as e:
if e.errno != errno.EWOULDBLOCK:
logging.error('creating lock file {}: {}'.format(lock_path, e))
raise
def get_cache():
"""Fetch the object from disk cache
Return: cached object as written by set_cache, or None if no cache file
"""
pathname = _pathname(__CACHE_FILE)
try:
with open(pathname, mode='r') as f:
try:
obj = pickle.load(f)
except pickle.UnpicklingError as e:
logging.error('unpickling object: {}'.format(e))
raise
except IOError as e:
if e.errno == errno.ENOENT:
return None
logging.error('opening {} for reading: {}'.format(pathname, e))
raise
return obj
|
py
|
1a5e2a597cf0b49f3fc49bae189ad9c0d3f108b7
|
"""Initialization of ATAG One climate platform."""
from __future__ import annotations
from homeassistant.components.climate import ClimateEntity
from homeassistant.components.climate.const import (
CURRENT_HVAC_HEAT,
CURRENT_HVAC_IDLE,
HVAC_MODE_AUTO,
HVAC_MODE_HEAT,
PRESET_AWAY,
PRESET_BOOST,
SUPPORT_PRESET_MODE,
SUPPORT_TARGET_TEMPERATURE,
)
from homeassistant.const import ATTR_TEMPERATURE
from . import CLIMATE, DOMAIN, AtagEntity
PRESET_MAP = {
"Manual": "manual",
"Auto": "automatic",
"Extend": "extend",
PRESET_AWAY: "vacation",
PRESET_BOOST: "fireplace",
}
PRESET_INVERTED = {v: k for k, v in PRESET_MAP.items()}
SUPPORT_FLAGS = SUPPORT_TARGET_TEMPERATURE | SUPPORT_PRESET_MODE
HVAC_MODES = [HVAC_MODE_AUTO, HVAC_MODE_HEAT]
async def async_setup_entry(hass, entry, async_add_entities):
"""Load a config entry."""
coordinator = hass.data[DOMAIN][entry.entry_id]
async_add_entities([AtagThermostat(coordinator, CLIMATE)])
class AtagThermostat(AtagEntity, ClimateEntity):
"""Atag climate device."""
_attr_hvac_modes = HVAC_MODES
_attr_preset_modes = list(PRESET_MAP.keys())
_attr_supported_features = SUPPORT_FLAGS
def __init__(self, coordinator, atag_id):
"""Initialize an Atag climate device."""
super().__init__(coordinator, atag_id)
self._attr_temperature_unit = coordinator.data.climate.temp_unit
@property
def hvac_mode(self) -> str | None: # type: ignore[override]
"""Return hvac operation ie. heat, cool mode."""
if self.coordinator.data.climate.hvac_mode in HVAC_MODES:
return self.coordinator.data.climate.hvac_mode
return None
@property
def hvac_action(self) -> str | None:
"""Return the current running hvac operation."""
is_active = self.coordinator.data.climate.status
return CURRENT_HVAC_HEAT if is_active else CURRENT_HVAC_IDLE
@property
def current_temperature(self) -> float | None:
"""Return the current temperature."""
return self.coordinator.data.climate.temperature
@property
def target_temperature(self) -> float | None:
"""Return the temperature we try to reach."""
return self.coordinator.data.climate.target_temperature
@property
def preset_mode(self) -> str | None:
"""Return the current preset mode, e.g., auto, manual, fireplace, extend, etc."""
preset = self.coordinator.data.climate.preset_mode
return PRESET_INVERTED.get(preset)
async def async_set_temperature(self, **kwargs) -> None:
"""Set new target temperature."""
await self.coordinator.data.climate.set_temp(kwargs.get(ATTR_TEMPERATURE))
self.async_write_ha_state()
async def async_set_hvac_mode(self, hvac_mode: str) -> None:
"""Set new target hvac mode."""
await self.coordinator.data.climate.set_hvac_mode(hvac_mode)
self.async_write_ha_state()
async def async_set_preset_mode(self, preset_mode: str) -> None:
"""Set new preset mode."""
await self.coordinator.data.climate.set_preset_mode(PRESET_MAP[preset_mode])
self.async_write_ha_state()
|
py
|
1a5e2ab6bc0e059872294442d33df9fb6fc4f021
|
import torch
from torch.nn import Parameter
from torch_geometric.nn import ChebConv
from torch_geometric.nn.inits import glorot, zeros
class GConvLSTM(torch.nn.Module):
r"""An implementation of the Chebyshev Graph Convolutional Long Short Term Memory
Cell. For details see this paper: `"Structured Sequence Modeling with Graph
Convolutional Recurrent Networks." <https://arxiv.org/abs/1612.07659>`_
Args:
in_channels (int): Number of input features.
out_channels (int): Number of output features.
K (int): Chebyshev filter size :math:`K`.
normalization (str, optional): The normalization scheme for the graph
Laplacian (default: :obj:`"sym"`):
1. :obj:`None`: No normalization
:math:`\mathbf{L} = \mathbf{D} - \mathbf{A}`
2. :obj:`"sym"`: Symmetric normalization
:math:`\mathbf{L} = \mathbf{I} - \mathbf{D}^{-1/2} \mathbf{A}
\mathbf{D}^{-1/2}`
3. :obj:`"rw"`: Random-walk normalization
:math:`\mathbf{L} = \mathbf{I} - \mathbf{D}^{-1} \mathbf{A}`
You need to pass :obj:`lambda_max` to the :meth:`forward` method of
this operator in case the normalization is non-symmetric.
:obj:`\lambda_max` should be a :class:`torch.Tensor` of size
:obj:`[num_graphs]` in a mini-batch scenario and a
scalar/zero-dimensional tensor when operating on single graphs.
You can pre-compute :obj:`lambda_max` via the
:class:`torch_geometric.transforms.LaplacianLambdaMax` transform.
bias (bool, optional): If set to :obj:`False`, the layer will not learn
an additive bias. (default: :obj:`True`)
"""
def __init__(self, in_channels: int, out_channels: int, K: int,
normalization: str="sym", bias: bool=True):
super(GConvLSTM, self).__init__()
self.in_channels = in_channels
self.out_channels = out_channels
self.K = K
self.normalization = normalization
self.bias = bias
self._create_parameters_and_layers()
self._set_parameters()
def _create_input_gate_parameters_and_layers(self):
self.conv_x_i = ChebConv(in_channels=self.in_channels,
out_channels=self.out_channels,
K=self.K,
normalization=self.normalization,
bias=self.bias)
self.conv_h_i = ChebConv(in_channels=self.out_channels,
out_channels=self.out_channels,
K=self.K,
normalization=self.normalization,
bias=self.bias)
self.w_c_i = Parameter(torch.Tensor(1, self.out_channels))
self.b_i = Parameter(torch.Tensor(1, self.out_channels))
def _create_forget_gate_parameters_and_layers(self):
self.conv_x_f = ChebConv(in_channels=self.in_channels,
out_channels=self.out_channels,
K=self.K,
normalization=self.normalization,
bias=self.bias)
self.conv_h_f = ChebConv(in_channels=self.out_channels,
out_channels=self.out_channels,
K=self.K,
normalization=self.normalization,
bias=self.bias)
self.w_c_f = Parameter(torch.Tensor(1, self.out_channels))
self.b_f = Parameter(torch.Tensor(1, self.out_channels))
def _create_cell_state_parameters_and_layers(self):
self.conv_x_c = ChebConv(in_channels=self.in_channels,
out_channels=self.out_channels,
K=self.K,
normalization=self.normalization,
bias=self.bias)
self.conv_h_c = ChebConv(in_channels=self.out_channels,
out_channels=self.out_channels,
K=self.K,
normalization=self.normalization,
bias=self.bias)
self.b_c = Parameter(torch.Tensor(1, self.out_channels))
def _create_output_gate_parameters_and_layers(self):
self.conv_x_o = ChebConv(in_channels=self.in_channels,
out_channels=self.out_channels,
K=self.K,
normalization=self.normalization,
bias=self.bias)
self.conv_h_o = ChebConv(in_channels=self.out_channels,
out_channels=self.out_channels,
K=self.K,
normalization=self.normalization,
bias=self.bias)
self.w_c_o = Parameter(torch.Tensor(1, self.out_channels))
self.b_o = Parameter(torch.Tensor(1, self.out_channels))
def _create_parameters_and_layers(self):
self._create_input_gate_parameters_and_layers()
self._create_forget_gate_parameters_and_layers()
self._create_cell_state_parameters_and_layers()
self._create_output_gate_parameters_and_layers()
def _set_parameters(self):
glorot(self.w_c_i)
glorot(self.w_c_f)
glorot(self.w_c_o)
zeros(self.b_i)
zeros(self.b_f)
zeros(self.b_c)
zeros(self.b_o)
def _set_hidden_state(self, X, H):
if H is None:
H = torch.zeros(X.shape[0], self.out_channels)
return H
def _set_cell_state(self, X, C):
if C is None:
C = torch.zeros(X.shape[0], self.out_channels)
return C
def _calculate_input_gate(self, X, edge_index, edge_weight, H, C):
I = self.conv_x_i(X, edge_index, edge_weight)
I = I + self.conv_h_i(H, edge_index, edge_weight)
I = I + (self.w_c_i*C)
I = I + self.b_i
I = torch.sigmoid(I)
return I
def _calculate_forget_gate(self, X, edge_index, edge_weight, H, C):
F = self.conv_x_f(X, edge_index, edge_weight)
F = F + self.conv_h_f(H, edge_index, edge_weight)
F = F + (self.w_c_f*C)
F = F + self.b_f
F = torch.sigmoid(F)
return F
def _calculate_cell_state(self, X, edge_index, edge_weight, H, C, I, F):
T = self.conv_x_c(X, edge_index, edge_weight)
T = T + self.conv_h_c(H, edge_index, edge_weight)
T = T + self.b_c
T = torch.tanh(T)
C = F*C + I*T
return C
def _calculate_output_gate(self, X, edge_index, edge_weight, H, C):
O = self.conv_x_o(X, edge_index, edge_weight)
O = O + self.conv_h_o(H, edge_index, edge_weight)
O = O + (self.w_c_o*C)
O = O + self.b_o
O = torch.sigmoid(O)
return O
def _calculate_hidden_state(self, O, C):
H = O * torch.tanh(C)
return H
def forward(self, X: torch.FloatTensor, edge_index: torch.LongTensor, edge_weight: torch.FloatTensor=None,
H: torch.FloatTensor=None, C: torch.FloatTensor=None) -> torch.FloatTensor:
"""
Making a forward pass. If edge weights are not present the forward pass
defaults to an unweighted graph. If the hidden state and cell state
matrices are not present when the forward pass is called these are
initialized with zeros.
Arg types:
* **X** *(PyTorch Float Tensor)* - Node features.
* **edge_index** *(PyTorch Long Tensor)* - Graph edge indices.
* **edge_weight** *(PyTorch Long Tensor, optional)* - Edge weight vector.
* **H** *(PyTorch Float Tensor, optional)* - Hidden state matrix for all nodes.
* **C** *(PyTorch Float Tensor, optional)* - Cell state matrix for all nodes.
Return types:
* **H** *(PyTorch Float Tensor)* - Hidden state matrix for all nodes.
* **C** *(PyTorch Float Tensor)* - Cell state matrix for all nodes.
"""
H = self._set_hidden_state(X, H)
C = self._set_cell_state(X, C)
I = self._calculate_input_gate(X, edge_index, edge_weight, H, C)
F = self._calculate_forget_gate(X, edge_index, edge_weight, H, C)
C = self._calculate_cell_state(X, edge_index, edge_weight, H, C, I, F)
O = self._calculate_output_gate(X, edge_index, edge_weight, H, C)
H = self._calculate_hidden_state(O, C)
return H, C
|
py
|
1a5e2b178ed36d362526a9911802c3b9c93480b6
|
import argparse
import os
import random
import shutil
import time
import numpy as np
import torch
import torch.backends.cudnn as cudnn
import torch.nn as nn
import torch.optim
import torch.utils.data
from torch.utils.data import DataLoader
from torchvision import transforms
import data
import new_data
import resnet
def get_arguments():
parser = argparse.ArgumentParser(description='RecycleNet')
parser.add_argument('--b', '--batch', type=int, default=16)
parser.add_argument('--gpu', type=str, help='0; 0,1; 0,3; etc', required=True)
parser.add_argument('--root_dir', type=str, default='data/')
parser.add_argument('--save_dir', type=str, default='save/')
parser.add_argument('--resume', type=str, default=None)
parser.add_argument('--lr', type=float, default=2e-4)
parser.add_argument('--momentum', default=0.9, type=float, metavar='M', help='momentum')
parser.add_argument('--weight-decay', '--wd', default=1e-4, type=float, metavar='W',
help='weight decay (default: 1e-4)')
parser.add_argument('--arch', type=str, default='resnet18_base', help='resnet18, 34, 50, 101, 152')
# parser.add_argument('--lr_finetune', type=float, default=5e-5)
# parser.add_argument('--save_model_interval', type=int, default=5000)
# parser.add_argument('--save_training_img_interval', type=int, default=5000)
# parser.add_argument('--vis_interval', type=int, default=5)
# parser.add_argument('--max_iter', type=int, default=1000000)
# parser.add_argument('--display_id', type=int, default=10)
parser.add_argument('--att_mode', type=str, default='ours', help='attention module mode: ours, cbam, se')
parser.add_argument('--use_att', action='store_true', help='use attention module')
parser.add_argument('--no_pretrain', action='store_false', help='training from scratch')
parser.add_argument('-e', '--evaluate', dest='evaluate', action='store_true',
help='evaluate model on validation set')
parser.add_argument('--epochs', default=100, type=int, metavar='N', help='number of total epochs to run')
parser.add_argument('--start-epoch', default=0, type=int, metavar='N',
help='manual epoch number (useful on restarts)')
parser.add_argument('--adjust-freq', type=int, default=40, help='learning rate adjustment frequency (default: 40)')
parser.add_argument('--print-freq', '-p', default=10, type=int, metavar='N', help='print frequency (default: 10)')
parser.add_argument('-j', '--workers', default=4, type=int, metavar='N',
help='number of data loading workers (default: 4)')
parser.add_argument('--seed', default=1234, type=int, help='seed for initializing training. ')
parser.add_argument('--new_data', action='store_true', help='use scott\'s relabelled dataset')
return parser.parse_args()
def main():
args = get_arguments()
if args.seed is not None:
random.seed(args.seed)
torch.manual_seed(args.seed)
cudnn.deterministic = True
BATCH_SIZE = args.b
GPU = args.gpu
ROOT_DIR = args.root_dir
MEAN = [0.485, 0.456, 0.406]
STD = [0.229, 0.224, 0.225]
os.environ['CUDA_VISIBLE_DEVICES'] = GPU
if torch.cuda.is_available():
print('using Cuda devices, num:', torch.cuda.device_count())
if not args.evaluate:
if not os.path.exists(args.save_dir):
os.makedirs(args.save_dir)
'''def ToCudaVariable(xs, volatile=False, requires_grad=True):
if torch.cuda.is_available():
return [Variable(x.cuda(), volatile=volatile, requires_grad=requires_grad) for x in xs]
else:
return [Variable(x, volatile=volatile, requires_grad=requires_grad) for x in xs]
'''
if torch.cuda.is_available():
device = torch.device('cuda')
else:
device = torch.device('cpu')
if args.new_data:
n_classes = 9
else:
n_classes = 6
if args.arch == 'resnet18_base':
model = nn.DataParallel(
resnet.resnet18(pretrained=True if not args.resume else False, num_classes=n_classes, use_att=args.use_att,
att_mode=args.att_mode).to(device))
elif args.arch == 'resnet34_base':
model = nn.DataParallel(
resnet.resnet34(pretrained=not args.no_pretrain if not args.resume else False, num_classes=n_classes,
use_att=args.use_att, att_mode=args.att_mode).to(device))
elif args.arch == 'resnet50_base':
model = nn.DataParallel(
resnet.resnet50(pretrained=not args.no_pretrain if not args.resume else False, num_classes=n_classes,
use_att=args.use_att, att_mode=args.att_mode).to(device))
elif args.arch == 'resnet101_base':
model = nn.DataParallel(
resnet.resnet101(pretrained=not args.no_pretrain if not args.resume else False, num_classes=n_classes,
use_att=args.use_att, att_mode=args.att_mode).to(device))
elif args.arch == 'resnet152_base':
model = nn.DataParallel(
resnet.resnet152(pretrained=not args.no_pretrain if not args.resume else False, num_classes=n_classes,
use_att=args.use_att, att_mode=args.att_mode).to(device))
else:
model = nn.DataParallel(resnet.resnet18(pretrained=True, num_classes=5, use_att=False).to(device))
print(model)
print('Number of model parameters: {}'.format(
sum([p.data.nelement() for p in model.parameters()])))
criterion = nn.CrossEntropyLoss().to(device)
# att_params = [p for n,p in model.named_parameters() if n.startswith('module.att') and p.requires_grad]
# non_att_params = [p for n,p in model.named_parameters() if not n.startswith('module.att') and p.requires_grad]
# params = [{'params': non_att_params, 'lr': args.lr / 10.0}, {'params': att_params}]
optimizer = torch.optim.SGD(model.parameters(), args.lr,
momentum=args.momentum,
weight_decay=args.weight_decay)
if args.resume:
if os.path.isfile(args.resume):
print("=> loading checkpoint '{}'".format(args.resume))
checkpoint = torch.load(args.resume, map_location=device)
args.start_epoch = checkpoint['epoch']
best_acc1 = checkpoint['best_acc1']
model.load_state_dict(checkpoint['state_dict'])
optimizer.load_state_dict(checkpoint['optimizer'])
print("=> loaded checkpoint '{}' (epoch {})"
.format(args.resume, checkpoint['epoch']))
print('=> best accuracy {}'.format(best_acc1))
else:
print("=> no checkpoint found at '{}'".format(args.resume))
cudnn.benchmark = True
train_img_transform = transforms.Compose([
transforms.RandomResizedCrop(224),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize(mean=MEAN, std=STD)])
if args.new_data is True:
train_dataset = new_data.TrashDataset(ROOT_DIR, train_img_transform, 'train')
else:
train_dataset = data.TrashDataset(ROOT_DIR, train_img_transform, 'train')
train_loader = DataLoader(train_dataset, batch_size=BATCH_SIZE, shuffle=True, num_workers=args.workers,
pin_memory=True)
val_img_transform = transforms.Compose([
transforms.Resize(256),
transforms.CenterCrop(224),
transforms.ToTensor(),
transforms.Normalize(mean=MEAN, std=STD)])
if args.new_data is True:
val_dataset = new_data.TrashDataset(ROOT_DIR, val_img_transform, 'val')
else:
val_dataset = data.TrashDataset(ROOT_DIR, val_img_transform, 'val')
val_loader = DataLoader(val_dataset, batch_size=BATCH_SIZE, shuffle=False, num_workers=args.workers,
pin_memory=True)
test_img_transform = transforms.Compose([
transforms.Resize(256),
transforms.CenterCrop(224),
transforms.ToTensor(),
transforms.Normalize(mean=MEAN, std=STD)])
if args.new_data is True:
test_dataset = new_data.TrashDataset(ROOT_DIR, test_img_transform, 'test')
else:
test_dataset = data.TrashDataset(ROOT_DIR, test_img_transform, 'test')
test_loader = DataLoader(test_dataset, batch_size=BATCH_SIZE, shuffle=False, num_workers=args.workers,
pin_memory=True)
if args.evaluate:
# validate(args, val_loader, model, criterion, device)
test(args, test_loader, model, criterion, device)
return
best_acc1 = 0
for epoch in range(args.start_epoch, args.epochs):
adjust_learning_rate(args, optimizer, epoch, args.adjust_freq)
train(args, train_loader, model, criterion, optimizer, epoch, device)
acc1 = validate(args, val_loader, model, criterion, device)
is_best = acc1 > best_acc1
best_acc1 = max(acc1, best_acc1)
save_checkpoint({
'epoch': epoch + 1,
'arch': args.arch,
'state_dict': model.state_dict(),
'best_acc1': best_acc1,
'optimizer': optimizer.state_dict(),
}, is_best, args.save_dir)
def train(args, train_loader, model, criterion, optimizer, epoch, device):
batch_time = AverageMeter()
data_time = AverageMeter()
losses = AverageMeter()
top1 = AverageMeter()
model.train()
end = time.time()
for i, (input, target) in enumerate(train_loader):
data_time.update(time.time() - end)
if args.gpu is not None:
input = input.to(device)
target = torch.from_numpy(np.asarray(target))
target = target.to(device)
output = model(input)
loss = criterion(output[0], target)
acc1 = accuracy(output[0], target)
losses.update(loss.item(), input.size(0))
top1.update(acc1[0].item(), input.size(0))
optimizer.zero_grad()
loss.backward()
optimizer.step()
batch_time.update(time.time() - end)
end = time.time()
# import pdb
# pdb.set_trace()
if i % args.print_freq == 0:
print('Epoch: [{0}][{1}/{2}]\t'
'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
'Data {data_time.val:.3f} ({data_time.avg:.3f})\t'
'Loss {loss.val:.4f} ({loss.avg:.4f})\t'
'Acc@1 {top1.val:.3f} ({top1.avg:.3f})'.format(
epoch, i, len(train_loader), batch_time=batch_time,
data_time=data_time, loss=losses, top1=top1))
def validate(args, val_loader, model, criterion, device):
batch_time = AverageMeter()
losses = AverageMeter()
top1 = AverageMeter()
model.eval()
with torch.no_grad():
end = time.time()
for i, (input, target) in enumerate(val_loader):
if args.gpu is not None:
input = input.to(device)
target = torch.from_numpy(np.asarray(target))
target = target.to(device)
output = model(input)
loss = criterion(output[0], target)
acc1 = accuracy(output[0], target)
losses.update(loss.item(), input.size(0))
top1.update(acc1[0].item(), input.size(0))
batch_time.update(time.time() - end)
end = time.time()
if i % args.print_freq == 0:
print('Test: [{0}/{1}]\t'
'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
'Loss {loss.val:.4f} ({loss.avg:.4f})\t'
'Acc@1 {top1.val:.3f} ({top1.avg:.3f})'.format(
i, len(val_loader), batch_time=batch_time, loss=losses,
top1=top1))
print(' * Acc@1 {top1.avg:.3f}'.format(top1=top1))
return top1.avg
def test(args, val_loader, model, criterion, device):
batch_time = AverageMeter()
losses = AverageMeter()
top1 = AverageMeter()
model.eval()
with torch.no_grad():
end = time.time()
for i, (input, target, input_path) in enumerate(val_loader):
if args.gpu is not None:
input = input.to(device)
target = torch.from_numpy(np.asarray(target))
target = target.to(device)
output = model(input)
# import pdb
# npdb.set_trace()
loss = criterion(output[0], target)
acc1 = accuracy(output[0], target)
losses.update(loss.item(), input.size(0))
top1.update(acc1[0].item(), input.size(0))
batch_time.update(time.time() - end)
end = time.time()
if i % args.print_freq == 0:
print('Test: [{0}/{1}]\t'
'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
'Loss {loss.val:.4f} ({loss.avg:.4f})\t'
'Acc@1 {top1.val:.3f} ({top1.avg:.3f})'.format(
i, len(val_loader), batch_time=batch_time, loss=losses,
top1=top1))
print(' * Acc@1 {top1.avg:.3f}'.format(top1=top1))
return top1.avg
def save_checkpoint(state, is_best, save_dir, filename='checkpoint.pth.tar'):
torch.save(state, os.path.join(save_dir, filename))
if is_best:
shutil.copyfile(os.path.join(save_dir, filename), os.path.join(save_dir, 'model_best.pth.tar'))
class AverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self):
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
def adjust_learning_rate(args, optimizer, epoch, N):
"""Sets the learning rate to the initial LR decayed by 10 every N epochs"""
lr = args.lr * (0.1 ** (epoch // N))
for param_group in optimizer.param_groups:
param_group['lr'] = lr
def accuracy(output, target, topk=(1,)):
"""Computes the accuracy over the k top predictions for the specified values of k"""
with torch.no_grad():
maxk = max(topk)
batch_size = target.size(0)
_, pred = output.topk(maxk, 1, True, True)
pred = pred.t()
correct = pred.eq(target.view(1, -1).expand_as(pred))
res = []
for k in topk:
correct_k = correct[:k].view(-1).float().sum(0, keepdim=True)
res.append(correct_k.mul_(100.0 / batch_size))
return res
if __name__ == '__main__':
main()
|
py
|
1a5e2b855dffa1de6f9dbe6b58de0deae750790d
|
import random
UA = """
Mozilla/5.0 (Windows NT 6.1; WOW64; rv:41.0) Gecko/20100101 Firefox/41.0
Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/45.0.2454.101 Safari/537.36
Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/46.0.2490.80 Safari/537.36
Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/46.0.2490.71 Safari/537.36
Mozilla/5.0 (Macintosh; Intel Mac OS X 10_11) AppleWebKit/601.1.56 (KHTML, like Gecko) Version/9.0 Safari/601.1.56
Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/46.0.2490.80 Safari/537.36
Mozilla/5.0 (Macintosh; Intel Mac OS X 10_11_1) AppleWebKit/601.2.7 (KHTML, like Gecko) Version/9.0.1 Safari/601.2.7
Mozilla/5.0 (Windows NT 6.1; WOW64; Trident/7.0; rv:11.0) like Gecko
Mozilla/5.0 (Windows NT 6.1; WOW64; rv:41.0) Gecko/20100101 Firefox/41.0
Mozilla/5.0 (Windows NT 10.0; WOW64; rv:41.0) Gecko/20100101 Firefox/41.0
Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:41.0) Gecko/20100101 Firefox/41.0
Mozilla/5.0 (Windows NT 6.3; WOW64; rv:41.0) Gecko/20100101 Firefox/41.0
Mozilla/5.0 (Macintosh; Intel Mac OS X 10.10; rv:41.0) Gecko/20100101 Firefox/41.0
Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/45.0.2454.101 Safari/537.36
Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/46.0.2490.80 Safari/537.36
Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/46.0.2490.71 Safari/537.36
Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/46.0.2490.80 Safari/537.36
Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10_5) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/45.0.2454.101 Safari/537.36
Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; FSL 7.0.6.01001)
Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; FSL 7.0.7.01001)
Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; FSL 7.0.5.01003)
Mozilla/5.0 (Windows NT 6.1; WOW64; rv:12.0) Gecko/20100101 Firefox/12.0
Mozilla/5.0 (X11; U; Linux x86_64; de; rv:1.9.2.8) Gecko/20100723 Ubuntu/10.04 (lucid) Firefox/3.6.8
Mozilla/5.0 (Windows NT 5.1; rv:13.0) Gecko/20100101 Firefox/13.0.1
Mozilla/5.0 (Windows NT 6.1; WOW64; rv:11.0) Gecko/20100101 Firefox/11.0
Mozilla/5.0 (X11; U; Linux x86_64; de; rv:1.9.2.8) Gecko/20100723 Ubuntu/10.04 (lucid) Firefox/3.6.8
Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.0; .NET CLR 1.0.3705)
Mozilla/5.0 (Windows NT 5.1; rv:13.0) Gecko/20100101 Firefox/13.0.1
Mozilla/5.0 (Windows NT 6.1; WOW64; rv:13.0) Gecko/20100101 Firefox/13.0.1
Mozilla/5.0 (compatible; Baiduspider/2.0; +http://www.baidu.com/search/spider.html)
Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.1; WOW64; Trident/5.0)
Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1; Trident/4.0; .NET CLR 2.0.50727; .NET CLR 3.0.4506.2152; .NET CLR 3.5.30729)
Opera/9.80 (Windows NT 5.1; U; en) Presto/2.10.289 Version/12.01
Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1; SV1; .NET CLR 2.0.50727)
Mozilla/5.0 (Windows NT 5.1; rv:5.0.1) Gecko/20100101 Firefox/5.0.1
Mozilla/5.0 (Windows NT 6.1; rv:5.0) Gecko/20100101 Firefox/5.02
Mozilla/5.0 (Windows NT 6.0) AppleWebKit/535.1 (KHTML, like Gecko) Chrome/13.0.782.112 Safari/535.1
Mozilla/4.0 (compatible; MSIE 6.0; MSIE 5.5; Windows NT 5.0) Opera 7.02 Bork-edition [en]
"""
UAS = [ua.strip() for ua in UA.strip().split('\n')]
def get_ua():
return random.choice(UAS)
|
py
|
1a5e2c5d301c18fce5079eebff26031619479bf3
|
# -*- coding: utf-8 -*-
import time
import numpy as np
from pyodesys import ODESys as _ODESys
from pyodesys.results import Result
from chempy.units import get_derived_unit, unitless_in_registry, uniform, patched_numpy as pnp
from .integrate import run
from ._chemreac import cvode_predefined_durations_fields
class ODESys(_ODESys):
def __init__(self, rd, k_from_params=None, variables_from_params=None):
if rd.N > 1:
raise NotImplementedError("ODESys expects single bin for now")
self.rd = rd
self.k_from_params = k_from_params
self.variables_from_params = variables_from_params
ny = property(lambda self: self.rd.n*self.rd.N)
names = property(lambda self: self.rd.substance_names)
latex_names = property(lambda self: self.rd.substance_latex_names)
param_names = property(lambda self: self.rd.param_names)
autonomous_interface = property(lambda self: not self.rd.logt)
numpy = pnp
# dep_by_name = True
# par_by_name = True
def _get_units_util(self):
if self.rd.unit_registry is None:
_dedim = lambda x: np.array(x)
time_u = 1
conc_u = 1
dr_u = 1
else:
_dedim = lambda x: unitless_in_registry(x, self.rd.unit_registry)
time_u = get_derived_unit(self.rd.unit_registry, 'time')
conc_u = get_derived_unit(self.rd.unit_registry, 'concentration')
dr_u = get_derived_unit(self.rd.unit_registry, 'doserate')
return locals()
def integrate(self, x, y0, params=None, integrator='cvode', **kwargs):
if params is not None and self.k_from_params is not None:
self.rd.k = self.k_from_params(self, params)
if 'doserate' in (params or {}):
self.rd.set_with_units(
'fields', [[self.variables_from_params['density'](self, params)*params['doserate']]])
if 'atol' in kwargs and isinstance(kwargs['atol'], dict):
kwargs['atol'] = [kwargs['atol'][k] for k in self.names]
integr = run(self.rd, [y0[k] for k in self.names] if isinstance(y0, dict) else y0,
x, integrator=integrator, **kwargs)
pout = [params[k] for k in self.param_names] if self.param_names else None
return Result(integr.with_units('tout'), integr.with_units('Cout')[:, 0, :],
pout, integr.info, self)
def chained_parameter_variation(self, durations, y0, varied_params, default_params=None,
integrate_kwargs=None, x0=None, npoints=1, numpy=None):
if list(varied_params) != ['doserate']:
raise NotImplementedError("For now only varied doserate is supported")
if self.param_names != ['doserate']:
raise NotImplementedError("We expect doserate to be varied for now")
uutil = self._get_units_util()
_dedim, time_u, conc_u, dr_u = [uutil[k] for k in '_dedim time_u conc_u dr_u'.split()]
density = _dedim(self.variables_from_params['density'](self, default_params))
if default_params:
self.rd.k = _dedim(self.k_from_params(self, default_params))
if x0 is not None:
assert x0 == 0*time_u
integrate_kwargs = integrate_kwargs or {}
atol = integrate_kwargs.pop('atol', 1e-8)
if isinstance(atol, float):
atol = [atol]
elif isinstance(atol, dict):
atol = [atol[k] for k in self.names]
rtol = integrate_kwargs.pop('rtol', 1e-8)
method = integrate_kwargs.pop('method', 'bdf')
integrator = integrate_kwargs.pop('integrator', 'cvode')
if integrator != 'cvode':
raise NotImplementedError("chained_parameter_variation requires cvode for now")
drate = uniform(varied_params['doserate'])
time_cpu = time.process_time()
time_wall = time.time()
tout, yout = cvode_predefined_durations_fields(
self.rd, _dedim([y0[k] for k in self.names]),
_dedim(durations),
_dedim(drate*density),
atol=atol, rtol=rtol, method=method, npoints=npoints, **integrate_kwargs)
info = dict(
nsteps=-1,
nfev=self.rd.nfev,
njev=self.rd.njev,
time_wall=time.time() - time_wall,
time_cpu=time.process_time() - time_cpu,
success=True,
integrator=[integrator],
t0_set=False,
linear_solver=0, # pyodesys.results.Result work-around for now (not important)
)
info.update(self.rd.last_integration_info)
dr_out = np.concatenate((np.repeat(drate, npoints), drate[-1:]))
return Result(tout*time_u, yout[:, 0, :]*conc_u, dr_out.reshape((-1, 1))*dr_u, info, self)
|
py
|
1a5e2cdaf1acb4e7e4dd08a8a957b89d8254902c
|
"""
CryptoAPIs
Crypto APIs 2.0 is a complex and innovative infrastructure layer that radically simplifies the development of any Blockchain and Crypto related applications. Organized around REST, Crypto APIs 2.0 can assist both novice Bitcoin/Ethereum enthusiasts and crypto experts with the development of their blockchain applications. Crypto APIs 2.0 provides unified endpoints and data, raw data, automatic tokens and coins forwardings, callback functionalities, and much more. # noqa: E501
The version of the OpenAPI document: 2.0.0
Contact: [email protected]
Generated by: https://openapi-generator.tech
"""
import sys
import unittest
import cryptoapis
from cryptoapis.model.banned_ip_address_details import BannedIpAddressDetails
from cryptoapis.model.invalid_api_key import InvalidApiKey
from cryptoapis.model.missing_api_key import MissingApiKey
globals()['BannedIpAddressDetails'] = BannedIpAddressDetails
globals()['InvalidApiKey'] = InvalidApiKey
globals()['MissingApiKey'] = MissingApiKey
from cryptoapis.model.new_confirmed_tokens_transactions_and_each_confirmation_e401 import NewConfirmedTokensTransactionsAndEachConfirmationE401
class TestNewConfirmedTokensTransactionsAndEachConfirmationE401(unittest.TestCase):
"""NewConfirmedTokensTransactionsAndEachConfirmationE401 unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testNewConfirmedTokensTransactionsAndEachConfirmationE401(self):
"""Test NewConfirmedTokensTransactionsAndEachConfirmationE401"""
# FIXME: construct object with mandatory attributes with example values
# model = NewConfirmedTokensTransactionsAndEachConfirmationE401() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
|
py
|
1a5e2ce56d3ce6d055e1b1763045eb8e35ffd69d
|
import os
import sys
import argparse
here = os.path.abspath(os.path.dirname(__file__))
sys.path.insert(0, here + '/../../')
import test_util
from tree_influence.explainers import BoostInW2
def main(args):
# explainer arguments
kwargs = {}
# tests
test_util.test_local_influence_regression(args, BoostInW2, 'BoostInW2', kwargs)
test_util.test_local_influence_binary(args, BoostInW2, 'BoostInW2', kwargs)
test_util.test_local_influence_multiclass(args, BoostInW2, 'BoostInW2', kwargs)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
# data settings
parser.add_argument('--n_train', type=int, default=100)
parser.add_argument('--n_test', type=int, default=100)
parser.add_argument('--n_local', type=int, default=2)
parser.add_argument('--n_class', type=int, default=3)
parser.add_argument('--n_feat', type=int, default=10)
# tree-ensemble settings
parser.add_argument('--n_tree', type=int, default=100)
parser.add_argument('--n_leaf', type=int, default=31)
parser.add_argument('--max_depth', type=int, default=7)
parser.add_argument('--tree_type', type=str, default='lgb')
parser.add_argument('--model_type', type=str, default='dummy')
parser.add_argument('--rs', type=int, default=1)
args = parser.parse_args()
main(args)
|
py
|
1a5e2d592923f1f6e43f3a5d77bf946881cac30d
|
#!/usr/bin/env python3
# controls individual pis locally based on LAN commands
import socket
import sys
import os
import time
import picamera
# server connection and client identification
SERVER = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
SERVER.connect((sys.argv[1], int(sys.argv[2])))
# utf-8 byte encoder with injected header
def msgEncode(message):
msg = str(message)
msgLength = len(msg)
msg = "{:<4}".format(msgLength) + msg
msg = msg.encode()
return msg
# utf-8 byte reciever, buffer, and decoder
def msgDecode():
chunk = SERVER.recv(1)
while len(chunk) < 4:
chunk += SERVER.recv(1)
chunk = chunk.decode()
msgLength = int(chunk[:4])
msg = chunk[4:]
while len(msg) < msgLength:
chunk = SERVER.recv(1)
chunk = chunk.decode()
msg += chunk
return msg
# send encoded message to server
def msgSend(message):
msg = msgEncode(message)
SERVER.send(msg)
msgSend(socket.gethostname()) # confirm connection to server with host name
# calibrate camera
camera = picamera.PiCamera()
camera.resolution = (3280, 2464)
camera.meter_mode = 'spot'
camera.image_denoise = False
fileName = msgDecode()
imgFormat = msgDecode()
FILE_NAME = str(socket.gethostname()) + "_" + fileName + "." + imgFormat
def cameraCalibration(iso=0, shutter=0):
camera.start_preview()
camera.iso = iso
camera.shutter_speed = shutter
camera.exposure_mode = 'auto'
camera.awb_mode = "auto"
time.sleep(2) # pause for exposure adjustments
camera.exposure_mode = 'off'
time.sleep(0.25) # allow white balance to adjust based on locked exposure
whiteBal = camera.awb_gains
camera.awb_mode = "off"
time.sleep(0.25) # allow gains to settle
camera.awb_gains = whiteBal
camera.stop_preview()
def profileAnnotation(profile):
string = '''PROFILE {}\nShutter: {:.3f} ISO: {}\nGain: {:.3f} :: {:.3f}
White Balance: {:.3f} :: {:.3f}'''.format(profile, camera.exposure_speed * 0.000001,
camera.iso, float(camera.digital_gain), float(camera.analog_gain),
float(camera.awb_gains[0]), float(camera.awb_gains[1]))
return string
def profileCycle(count, path, iso, shutter):
cameraCalibration(iso, shutter)
camera.annotate_text = profileAnnotation(count)
camera.capture("{}/{}.jpeg".format(path, count))
def generateProfiles():
path = fileName + "_Profiles"
os.mkdir(path, 0o777)
camera.resolution = (1280, 720) # adjust camera resoluton for preview images
profileCycle(1, path, 0, 0)
profileCycle(2, path, 100, 0)
profileCycle(3, path, 100, 10000)
profileCycle(4, path, 200, 10000)
profileCycle(5, path, 400, 10000)
camera.resolution = (3280, 2464) #resotre camera resolution to full quality
camera.annotate_text = ""
# generate exposure profiles
msg = msgDecode()
if msg == "EXPOSURE":
generateProfiles()
msgSend("GENERATED")
msg = msgDecode()
# set exposure
while True:
if msg == "1":
cameraCalibration(0, 0)
break
if msg == "2":
cameraCalibration(100, 0)
break
if msg == "3":
cameraCalibration(100, 10000)
break
if msg == "4":
cameraCalibration(200, 10000)
break
if msg == "5":
cameraCalibration(400, 10000)
break
msgSend("EXPOSED")
# create workspace
os.mkdir(fileName, 0o777)
directory = fileName + "/"
imgName = directory + FILE_NAME
cycle = 1
# capture sequence
while True:
msg = msgDecode()
if msg == "CAPTURE":
img = imgName + "_{:0>3}.{}".format(cycle, imgFormat)
camera.capture(img, format=imgFormat, quality=100)
cycle += 1
msgSend("CAPTURED")
if msg == "DONE":
break
# exit
camera.close()
SERVER.close()
|
py
|
1a5e2dffaadd2db06a7ef7fead728c7e01af7849
|
import datetime
import logging
import math
import sys
import textwrap
import time
from pathlib import Path
from typing import Union
from amset.constants import output_width
__author__ = "Alex Ganose"
__maintainer__ = "Alex Ganose"
__email__ = "[email protected]"
logger = logging.getLogger(__name__)
def initialize_amset_logger(
directory: Union[str, Path] = ".",
filename: Union[str, Path, bool] = "amset.log",
level: int = logging.INFO,
print_log: bool = True,
) -> logging.Logger:
"""Initialize the default logger with stdout and file handlers.
Args:
directory: Path to the folder where the log file will be written.
filename: The log filename. If False, no log will be written.
level: The log level.
print_log: Whether to print the log to the screen.
Returns:
A logging instance with customized formatter and handlers.
"""
log = logging.getLogger("amset")
log.setLevel(level)
log.handlers = [] # reset logging handlers if they already exist
screen_formatter = WrappingFormatter(fmt="%(message)s")
file_formatter = WrappingFormatter(fmt="%(message)s", simple_ascii=True)
if filename is not False:
handler = logging.FileHandler(Path(directory) / filename, mode="w")
handler.setFormatter(file_formatter)
log.addHandler(handler)
if print_log:
screen_handler = logging.StreamHandler(stream=sys.stdout)
screen_handler.setFormatter(screen_formatter)
log.addHandler(screen_handler)
def handle_exception(exc_type, exc_value, exc_traceback):
if issubclass(exc_type, KeyboardInterrupt):
sys.__excepthook__(exc_type, exc_value, exc_traceback)
return
now = datetime.datetime.now()
exit_msg = "amset exiting on {} at {}".format(
now.strftime("%d %b %Y"), now.strftime("%H:%M")
)
log.error(
f"\n ERROR: {exit_msg}",
exc_info=(exc_type, exc_value, exc_traceback),
)
sys.excepthook = handle_exception
return log
class WrappingFormatter(logging.Formatter):
def __init__(
self, fmt=None, datefmt=None, style="%", width=output_width, simple_ascii=False
):
super().__init__(fmt=fmt, datefmt=datefmt, style=style)
self.simple_ascii = simple_ascii
self.wrapper = textwrap.TextWrapper(
width=width,
subsequent_indent=" ",
replace_whitespace=True,
drop_whitespace=False,
)
def format(self, record):
text = super().format(record)
if "└" in text or "├" in text:
# don't have blank time when reporting list
text = " " + text
else:
text = "\n" + "\n".join(
[self.wrapper.fill(" " + s) for s in text.splitlines()]
)
if self.simple_ascii:
return self.make_simple_ascii(text)
return text
@staticmethod
def make_simple_ascii(text):
replacements = {
"├──": "-",
"│": " ",
"└──": "-",
fancy_logo: simple_logo,
"ᵢᵢ": "_i",
"ħω": "hbar.omega",
"cm²/Vs": "cm2/Vs",
"β²": "b2",
"a₀⁻²": "a^-2",
"cm⁻³": "cm-3",
"–": "-",
"₀": "0",
"₁": "1",
"₂": "2",
"₃": "3",
"₄": "4",
"₅": "5",
"₆": "6",
"₇": "7",
"₈": "8",
"₉": "8",
"\u0305": "-",
"π": "pi",
"ħ": "h",
"ω": "w",
"α": "a",
"β": "b",
"γ": "y",
"°": "deg",
"Å": "angstrom",
}
for initial, final in replacements.items():
text = text.replace(initial, final)
return text
def log_time_taken(t0: float):
logger.info(f" └── time: {time.perf_counter() - t0:.4f} s")
def log_banner(text):
width = output_width - 2
nstars = (width - (len(text) + 2)) / 2
logger.info(
"\n{} {} {}".format("~" * math.ceil(nstars), text, "~" * math.floor(nstars))
)
def log_list(list_strings, prefix=" ", level=logging.INFO):
for i, text in enumerate(list_strings):
if i == len(list_strings) - 1:
pipe = "└"
else:
pipe = "├"
logger.log(level, f"{prefix}{pipe}── {text}")
fancy_logo = """ █████╗ ███╗ ███╗███████╗███████╗████████╗
██╔══██╗████╗ ████║██╔════╝██╔════╝╚══██╔══╝
███████║██╔████╔██║███████╗█████╗ ██║
██╔══██║██║╚██╔╝██║╚════██║██╔══╝ ██║
██║ ██║██║ ╚═╝ ██║███████║███████╗ ██║
╚═╝ ╚═╝╚═╝ ╚═╝╚══════╝╚══════╝ ╚═╝
"""
simple_logo = r""" /$$$$$$ /$$ /$$ /$$$$$$ /$$$$$$$$ /$$$$$$$$
/$$__ $$| $$$ /$$$ /$$__ $$| $$_____/|__ $$__/
| $$ \ $$| $$$$ /$$$$| $$ \__/| $$ | $$
| $$$$$$$$| $$ $$/$$ $$| $$$$$$ | $$$$$ | $$
| $$__ $$| $$ $$$| $$ \____ $$| $$__/ | $$
| $$ | $$| $$\ $ | $$ /$$ \ $$| $$ | $$
| $$ | $$| $$ \/ | $$| $$$$$$/| $$$$$$$$ | $$
|__/ |__/|__/ |__/ \______/ |________/ |__/
"""
|
py
|
1a5e2e14f470de665c249c666cea8e24128f4e1a
|
"""Represents an invitation returned to the introduction service."""
from marshmallow import EXCLUDE, fields
from .....messaging.agent_message import AgentMessage, AgentMessageSchema
from ....connections.v1_0.messages.connection_invitation import (
ConnectionInvitation,
ConnectionInvitationSchema,
)
from ..message_types import INVITATION, PROTOCOL_PACKAGE
HANDLER_CLASS = f"{PROTOCOL_PACKAGE}.handlers.invitation_handler.InvitationHandler"
class Invitation(AgentMessage):
"""Class representing an invitation returned to the introduction service."""
class Meta:
"""Metadata for an invitation."""
handler_class = HANDLER_CLASS
message_type = INVITATION
schema_class = "InvitationSchema"
def __init__(
self, *, invitation: ConnectionInvitation = None, message: str = None, **kwargs
):
"""
Initialize invitation object.
Args:
invitation: The connection invitation
message: Comments on the introduction
"""
super().__init__(**kwargs)
self.invitation = invitation
self.message = message
class InvitationSchema(AgentMessageSchema):
"""Invitation request schema class."""
class Meta:
"""Invitation request schema metadata."""
model_class = Invitation
unknown = EXCLUDE
invitation = fields.Nested(ConnectionInvitationSchema(), required=True)
message = fields.Str(
required=False,
description="Comments on the introduction",
example="Hello Bob, it's Charlie as Alice mentioned",
allow_none=True,
)
|
py
|
1a5e2e3f2b02348a23b9359881debddbbe87545d
|
#script to make rigify compatible with unity humanoid
#HOWTO: right after generating rig using rigify
# press armature -> Rigify To Unity Converter -> (Prepare rig for unity) button
bl_info = {
"name": "Rigify to Unity",
"category": "Rigging",
"description": "Change Rigify rig into Mecanim-ready rig for Unity",
"location": "At the bottom of Rigify rig data/armature tab",
"blender":(2,80,0)
}
import bpy
import re
class UnityMecanim_Panel(bpy.types.Panel):
bl_label = "Rigify to Unity converter"
bl_space_type = "PROPERTIES"
bl_region_type = "WINDOW"
bl_context = "data"
@classmethod
def poll(self, context):
return context.object.type == 'ARMATURE' and "DEF-upper_arm.L.001" in bpy.context.object.data.bones
def draw(self, context):
self.layout.operator("rig4mec.convert2unity")
class UnityMecanim_Convert2Unity(bpy.types.Operator):
bl_idname = "rig4mec.convert2unity"
bl_label = "Prepare rig for unity"
def execute(self, context):
ob = bpy.context.object
bpy.ops.object.mode_set(mode='OBJECT')
if 'DEF-breast.L' in ob.data.bones :
ob.data.bones['DEF-breast.L'].use_deform = False
if 'DEF-breast.R' in ob.data.bones :
ob.data.bones['DEF-breast.R'].use_deform = False
if 'DEF-pelvis.L' in ob.data.bones :
ob.data.bones['DEF-pelvis.L'].use_deform = False
if 'DEF-pelvis.R' in ob.data.bones :
ob.data.bones['DEF-pelvis.R'].use_deform = False
bpy.ops.object.mode_set(mode='EDIT')
ob.data.edit_bones['DEF-shoulder.L'].parent = ob.data.edit_bones['DEF-spine.003']
ob.data.edit_bones['DEF-shoulder.R'].parent = ob.data.edit_bones['DEF-spine.003']
ob.data.edit_bones['DEF-upper_arm.L'].parent = ob.data.edit_bones['DEF-shoulder.L']
ob.data.edit_bones['DEF-upper_arm.R'].parent = ob.data.edit_bones['DEF-shoulder.R']
ob.data.edit_bones['DEF-thigh.L'].parent = ob.data.edit_bones['DEF-spine']
ob.data.edit_bones['DEF-thigh.R'].parent = ob.data.edit_bones['DEF-spine']
ob.data.edit_bones['DEF-upper_arm.L'].tail = ob.data.edit_bones['DEF-upper_arm.L.001'].tail
ob.data.edit_bones['DEF-forearm.L'].tail = ob.data.edit_bones['DEF-forearm.L.001'].tail
ob.data.edit_bones['DEF-forearm.L'].parent = ob.data.edit_bones['DEF-upper_arm.L.001'].parent
ob.data.edit_bones['DEF-hand.L'].parent = ob.data.edit_bones['DEF-forearm.L.001'].parent
ob.data.edit_bones.remove(ob.data.edit_bones['DEF-upper_arm.L.001'])
ob.data.edit_bones.remove(ob.data.edit_bones['DEF-forearm.L.001'])
ob.data.edit_bones['DEF-upper_arm.R'].tail = ob.data.edit_bones['DEF-upper_arm.R.001'].tail
ob.data.edit_bones['DEF-forearm.R'].tail = ob.data.edit_bones['DEF-forearm.R.001'].tail
ob.data.edit_bones['DEF-forearm.R'].parent = ob.data.edit_bones['DEF-upper_arm.R.001'].parent
ob.data.edit_bones['DEF-hand.R'].parent = ob.data.edit_bones['DEF-forearm.R.001'].parent
ob.data.edit_bones.remove(ob.data.edit_bones['DEF-upper_arm.R.001'])
ob.data.edit_bones.remove(ob.data.edit_bones['DEF-forearm.R.001'])
ob.data.edit_bones['DEF-thigh.L'].tail = ob.data.edit_bones['DEF-thigh.L.001'].tail
ob.data.edit_bones['DEF-shin.L'].tail = ob.data.edit_bones['DEF-shin.L.001'].tail
ob.data.edit_bones['DEF-shin.L'].parent = ob.data.edit_bones['DEF-thigh.L.001'].parent
ob.data.edit_bones['DEF-foot.L'].parent = ob.data.edit_bones['DEF-shin.L.001'].parent
ob.data.edit_bones.remove(ob.data.edit_bones['DEF-thigh.L.001'])
ob.data.edit_bones.remove(ob.data.edit_bones['DEF-shin.L.001'])
ob.data.edit_bones['DEF-thigh.R'].tail = ob.data.edit_bones['DEF-thigh.R.001'].tail
ob.data.edit_bones['DEF-shin.R'].tail = ob.data.edit_bones['DEF-shin.R.001'].tail
ob.data.edit_bones['DEF-shin.R'].parent = ob.data.edit_bones['DEF-thigh.R.001'].parent
ob.data.edit_bones['DEF-foot.R'].parent = ob.data.edit_bones['DEF-shin.R.001'].parent
ob.data.edit_bones.remove(ob.data.edit_bones['DEF-thigh.R.001'])
ob.data.edit_bones.remove(ob.data.edit_bones['DEF-shin.R.001'])
if 'DEF-pelvis.L' in ob.data.bones :
ob.data.edit_bones.remove(ob.data.edit_bones['DEF-pelvis.L'])
if 'DEF-pelvis.R' in ob.data.bones :
ob.data.edit_bones.remove(ob.data.edit_bones['DEF-pelvis.R'])
if 'DEF-breast.L' in ob.data.bones :
ob.data.edit_bones.remove(ob.data.edit_bones['DEF-breast.L'])
if 'DEF-breast.R' in ob.data.bones :
ob.data.edit_bones.remove(ob.data.edit_bones['DEF-breast.R'])
bpy.ops.object.mode_set(mode='OBJECT')
namelist = [("DEF-spine.006", "DEF-head"),("DEF-spine.005","DEF-neck"),("DEF-spine","DEF-Hips"),("DEF-spine.001","DEF-spine"),("DEF-spine.002","DEF-spine.001"),("DEF-spine.003","DEF-spine.002"),("DEF-spine.004","DEF-spine.003")]
for name, newname in namelist:
# get the pose bone with name
pb = ob.pose.bones.get(name)
# continue if no bone of that name
if pb is None:
continue
# rename
pb.name = newname
self.report({'INFO'}, 'Unity ready rig!')
return{'FINISHED'}
def register():
#classes
bpy.utils.register_class(UnityMecanim_Panel)
bpy.utils.register_class(UnityMecanim_Convert2Unity)
def unregister():
#classes
bpy.utils.unregister_class(UnityMecanim_Panel)
bpy.utils.unregister_class(UnityMecanim_Convert2Unity)
|
py
|
1a5e2f4dc964caae20fca0ae9a8a93f553a0d743
|
from .common import Benchmark, get_squares
import numpy_demo as np
from io import StringIO
class Copy(Benchmark):
params = ["int8", "int16", "float32", "float64",
"complex64", "complex128"]
param_names = ['type']
def setup(self, typename):
dtype = np.dtype(typename)
self.d = np.arange((50 * 500), dtype=dtype).reshape((500, 50))
self.e = np.arange((50 * 500), dtype=dtype).reshape((50, 500))
self.e_d = self.e.reshape(self.d.shape)
self.dflat = np.arange((50 * 500), dtype=dtype)
def time_memcpy(self, typename):
self.d[...] = self.e_d
def time_memcpy_large_out_of_place(self, typename):
l = np.ones(1024**2, dtype=np.dtype(typename))
l.copy()
def time_cont_assign(self, typename):
self.d[...] = 1
def time_strided_copy(self, typename):
self.d[...] = self.e.T
def time_strided_assign(self, typename):
self.dflat[::2] = 2
class CopyTo(Benchmark):
def setup(self):
self.d = np.ones(50000)
self.e = self.d.copy()
self.m = (self.d == 1)
self.im = (~ self.m)
self.m8 = self.m.copy()
self.m8[::8] = (~ self.m[::8])
self.im8 = (~ self.m8)
def time_copyto(self):
np.copyto(self.d, self.e)
def time_copyto_sparse(self):
np.copyto(self.d, self.e, where=self.m)
def time_copyto_dense(self):
np.copyto(self.d, self.e, where=self.im)
def time_copyto_8_sparse(self):
np.copyto(self.d, self.e, where=self.m8)
def time_copyto_8_dense(self):
np.copyto(self.d, self.e, where=self.im8)
class Savez(Benchmark):
def setup(self):
self.squares = get_squares()
def time_vb_savez_squares(self):
np.savez('tmp.npz', **self.squares)
class LoadtxtCSVComments(Benchmark):
# benchmarks for np.loadtxt comment handling
# when reading in CSV files
params = [10, int(1e2), int(1e4), int(1e5)]
param_names = ['num_lines']
def setup(self, num_lines):
data = [u'1,2,3 # comment'] * num_lines
# unfortunately, timeit will only run setup()
# between repeat events, but not for iterations
# within repeats, so the StringIO object
# will have to be rewinded in the benchmark proper
self.data_comments = StringIO(u'\n'.join(data))
def time_comment_loadtxt_csv(self, num_lines):
# benchmark handling of lines with comments
# when loading in from csv files
# inspired by similar benchmark in pandas
# for read_csv
# need to rewind StringIO object (unfortunately
# confounding timing result somewhat) for every
# call to timing test proper
np.loadtxt(self.data_comments,
delimiter=u',')
self.data_comments.seek(0)
class LoadtxtCSVdtypes(Benchmark):
# benchmarks for np.loadtxt operating with
# different dtypes parsed / cast from CSV files
params = (['float32', 'float64', 'int32', 'int64',
'complex128', 'str', 'object'],
[10, int(1e2), int(1e4), int(1e5)])
param_names = ['dtype', 'num_lines']
def setup(self, dtype, num_lines):
data = [u'5, 7, 888'] * num_lines
self.csv_data = StringIO(u'\n'.join(data))
def time_loadtxt_dtypes_csv(self, dtype, num_lines):
# benchmark loading arrays of various dtypes
# from csv files
# state-dependent timing benchmark requires
# rewind of StringIO object
np.loadtxt(self.csv_data,
delimiter=u',',
dtype=dtype)
self.csv_data.seek(0)
class LoadtxtCSVStructured(Benchmark):
# benchmarks for np.loadtxt operating with
# a structured data type & CSV file
def setup(self):
num_lines = 50000
data = [u"M, 21, 72, X, 155"] * num_lines
self.csv_data = StringIO(u'\n'.join(data))
def time_loadtxt_csv_struct_dtype(self):
# obligate rewind of StringIO object
# between iterations of a repeat:
np.loadtxt(self.csv_data,
delimiter=u',',
dtype=[('category_1', 'S1'),
('category_2', 'i4'),
('category_3', 'f8'),
('category_4', 'S1'),
('category_5', 'f8')])
self.csv_data.seek(0)
class LoadtxtCSVSkipRows(Benchmark):
# benchmarks for loadtxt row skipping when
# reading in csv file data; a similar benchmark
# is present in the pandas asv suite
params = [0, 500, 10000]
param_names = ['skiprows']
def setup(self, skiprows):
np.random.seed(123)
test_array = np.random.rand(100000, 3)
self.fname = 'test_array.csv'
np.savetxt(fname=self.fname,
X=test_array,
delimiter=',')
def time_skiprows_csv(self, skiprows):
np.loadtxt(self.fname,
delimiter=',',
skiprows=skiprows)
class LoadtxtReadUint64Integers(Benchmark):
# pandas has a similar CSV reading benchmark
# modified to suit np.loadtxt
params = [550, 1000, 10000]
param_names = ['size']
def setup(self, size):
arr = np.arange(size).astype('uint64') + 2**63
self.data1 = StringIO(u'\n'.join(arr.astype(str).tolist()))
arr = arr.astype(object)
arr[500] = -1
self.data2 = StringIO(u'\n'.join(arr.astype(str).tolist()))
def time_read_uint64(self, size):
# mandatory rewind of StringIO object
# between iterations of a repeat:
np.loadtxt(self.data1)
self.data1.seek(0)
def time_read_uint64_neg_values(self, size):
# mandatory rewind of StringIO object
# between iterations of a repeat:
np.loadtxt(self.data2)
self.data2.seek(0)
class LoadtxtUseColsCSV(Benchmark):
# benchmark selective column reading from CSV files
# using np.loadtxt
params = [2, [1, 3], [1, 3, 5, 7]]
param_names = ['usecols']
def setup(self, usecols):
num_lines = 5000
data = [u'0, 1, 2, 3, 4, 5, 6, 7, 8, 9'] * num_lines
self.csv_data = StringIO(u'\n'.join(data))
def time_loadtxt_usecols_csv(self, usecols):
# must rewind StringIO because of state
# dependence of file reading
np.loadtxt(self.csv_data,
delimiter=u',',
usecols=usecols)
self.csv_data.seek(0)
class LoadtxtCSVDateTime(Benchmark):
# benchmarks for np.loadtxt operating with
# datetime data in a CSV file
params = [20, 200, 2000, 20000]
param_names = ['num_lines']
def setup(self, num_lines):
# create the equivalent of a two-column CSV file
# with date strings in the first column and random
# floating point data in the second column
dates = np.arange('today', 20, dtype=np.datetime64)
np.random.seed(123)
values = np.random.rand(20)
date_line = u''
for date, value in zip(dates, values):
date_line += (str(date) + ',' + str(value) + '\n')
# expand data to specified number of lines
data = date_line * (num_lines // 20)
self.csv_data = StringIO(data)
def time_loadtxt_csv_datetime(self, num_lines):
# rewind StringIO object -- the timing iterations
# are state-dependent
X = np.loadtxt(self.csv_data,
delimiter=u',',
dtype=([('dates', 'M8[us]'),
('values', 'float64')]))
self.csv_data.seek(0)
|
py
|
1a5e2f8ca608f131d6ca5ae1fd1e5da58edda4ff
|
from abc import ABC, abstractmethod
from data_generators.basic_generator import *
from data_generators.standard_generator import StandardDataGenerator
class Environment(ABC):
"""Environment abstract base class.
Constructor method uploads all the basic data from the given json source in the given mode"""
def __init__(self, mode='all', bid=None, src='src/basic003.json', generator='basic'):
if generator == 'basic':
self.data_gen = BasicDataGenerator(src)
elif generator == 'standard':
self.data_gen = StandardDataGenerator(src)
else:
raise NotImplementedError
self.bids = self.data_gen.get_bids()
self.prices = self.data_gen.get_prices()
self.margins = self.data_gen.get_margins()
self.n_clicks = self.data_gen.get_daily_clicks(mode=mode)
if bid is not None:
self.cpc = self.data_gen.get_costs_per_click(mode=mode, bid=bid)
self.conv_rates = self.data_gen.get_conversion_rates(mode=mode, bid=bid)
self.tau = self.data_gen.get_future_purchases(mode=mode, bid=bid)
self.features = self.data_gen.get_features()
self.customer_classes = self.data_gen.get_classes()
@abstractmethod
def round(self, pulled_arm):
"""Play a single round of the environment"""
pass
|
py
|
1a5e31793b7adf2840adebfa84e895479cd33211
|
import itertools
import numpy as np
import pytest
import matplotlib.pyplot as plt
import cirq
import examples.basic_arithmetic
import examples.bell_inequality
import examples.bernstein_vazirani
import examples.bcs_mean_field
import examples.bristlecone_heatmap_example
import examples.cross_entropy_benchmarking_example
import examples.deutsch
import examples.grover
import examples.hello_qubit
import examples.hhl
import examples.noisy_simulation_example
import examples.phase_estimator
import examples.place_on_bristlecone
import examples.qaoa
import examples.quantum_fourier_transform
import examples.quantum_teleportation
import examples.qubit_characterizations_example
import examples.shor
import examples.superdense_coding
import examples.swap_networks
def test_example_runs_bernstein_vazirani():
examples.bernstein_vazirani.main(qubit_count=3)
# Check empty oracle case. Cover both biases.
a = cirq.NamedQubit('a')
assert list(examples.bernstein_vazirani.make_oracle(
[], a, [], False)) == []
assert list(examples.bernstein_vazirani.make_oracle(
[], a, [], True)) == [cirq.X(a)]
def test_example_runs_deutsch():
examples.deutsch.main()
def test_example_runs_hello_line():
examples.place_on_bristlecone.main()
def test_example_runs_hello_qubit():
examples.hello_qubit.main()
def test_example_runs_bell_inequality():
examples.bell_inequality.main()
def test_example_runs_quantum_fourier_transform():
examples.quantum_fourier_transform.main()
def test_example_runs_bcs_mean_field():
examples.bcs_mean_field.main()
def test_example_runs_grover():
examples.grover.main()
def test_example_runs_basic_arithmetic():
examples.basic_arithmetic.main(n=2)
def test_example_runs_phase_estimator():
examples.phase_estimator.main(qnums=(2,), repetitions=2)
def test_example_runs_bristlecone_heatmap():
plt.switch_backend('agg')
examples.bristlecone_heatmap_example.main()
def test_example_runs_qaoa():
examples.qaoa.main(repetitions=10, maxiter=5)
def test_example_runs_quantum_teleportation():
expected, teleported = examples.quantum_teleportation.main()
assert np.all(np.isclose(expected, teleported, atol=1e-4))
def test_example_runs_superdense_coding():
examples.superdense_coding.main()
def test_example_runs_hhl():
examples.hhl.main()
def test_example_runs_qubit_characterizations():
examples.qubit_characterizations_example.main()
def test_example_swap_networks():
examples.swap_networks.main()
def test_example_cross_entropy_benchmarking():
examples.cross_entropy_benchmarking_example.main(repetitions=10,
num_circuits=2,
cycles=[2, 3, 4])
def test_example_noisy_simulation():
examples.noisy_simulation_example.main()
def test_example_shor_modular_exp_register_size():
with pytest.raises(ValueError):
_ = examples.shor.ModularExp(target=cirq.LineQubit.range(2),
exponent=cirq.LineQubit.range(2, 5),
base=4,
modulus=5)
def test_example_shor_modular_exp_register_type():
operation = examples.shor.ModularExp(target=cirq.LineQubit.range(3),
exponent=cirq.LineQubit.range(3, 5),
base=4,
modulus=5)
with pytest.raises(ValueError):
_ = operation.with_registers(cirq.LineQubit.range(3))
with pytest.raises(ValueError):
_ = operation.with_registers(1, cirq.LineQubit.range(3, 6), 4, 5)
with pytest.raises(ValueError):
_ = operation.with_registers(cirq.LineQubit.range(3),
cirq.LineQubit.range(3, 6),
cirq.LineQubit.range(6, 9), 5)
with pytest.raises(ValueError):
_ = operation.with_registers(cirq.LineQubit.range(3),
cirq.LineQubit.range(3, 6), 4,
cirq.LineQubit.range(6, 9))
def test_example_shor_modular_exp_registers():
target = cirq.LineQubit.range(3)
exponent = cirq.LineQubit.range(3, 5)
operation = examples.shor.ModularExp(target, exponent, 4, 5)
assert operation.registers() == (target, exponent, 4, 5)
new_target = cirq.LineQubit.range(5, 8)
new_exponent = cirq.LineQubit.range(8, 12)
new_operation = operation.with_registers(new_target, new_exponent, 6, 7)
assert new_operation.registers() == (new_target, new_exponent, 6, 7)
def test_example_shor_modular_exp_diagram():
target = cirq.LineQubit.range(3)
exponent = cirq.LineQubit.range(3, 5)
operation = examples.shor.ModularExp(target, exponent, 4, 5)
circuit = cirq.Circuit(operation)
cirq.testing.assert_has_diagram(
circuit, """
0: ───ModularExp(t*4**e % 5)───
│
1: ───t1───────────────────────
│
2: ───t2───────────────────────
│
3: ───e0───────────────────────
│
4: ───e1───────────────────────
""")
operation = operation.with_registers(target, 2, 4, 5)
circuit = cirq.Circuit(operation)
cirq.testing.assert_has_diagram(
circuit, """
0: ───ModularExp(t*4**2 % 5)───
│
1: ───t1───────────────────────
│
2: ───t2───────────────────────
""")
def assert_order(r: int, x: int, n: int) -> None:
"""Assert that r is the order of x modulo n."""
y = x
for _ in range(1, r):
assert y % n != 1
y *= x
assert y % n == 1
@pytest.mark.parametrize('x, n', ((2, 3), (5, 6), (2, 7), (6, 7), (5, 8),
(6, 11), (6, 49), (7, 810)))
def test_example_shor_naive_order_finder(x, n):
r = examples.shor.naive_order_finder(x, n)
assert_order(r, x, n)
@pytest.mark.parametrize('x, n', ((2, 3), (5, 6), (2, 7), (6, 7), (5, 8)))
def test_example_shor_quantum_order_finder(x, n):
r = None
for _ in range(15):
r = examples.shor.quantum_order_finder(x, n)
if r is not None:
break
assert_order(r, x, n)
@pytest.mark.parametrize('x, n', ((1, 7), (7, 7)))
def test_example_shor_naive_order_finder_invalid_x(x, n):
with pytest.raises(ValueError):
_ = examples.shor.naive_order_finder(x, n)
@pytest.mark.parametrize('x, n', ((1, 7), (7, 7)))
def test_example_shor_quantum_order_finder_invalid_x(x, n):
with pytest.raises(ValueError):
_ = examples.shor.quantum_order_finder(x, n)
@pytest.mark.parametrize('n', (4, 6, 15, 125, 101 * 103, 127 * 127))
def test_example_shor_find_factor_with_composite_n_and_naive_order_finder(n):
d = examples.shor.find_factor(n, examples.shor.naive_order_finder)
assert 1 < d < n
assert n % d == 0
@pytest.mark.parametrize('n', (4, 6, 15, 125))
def test_example_shor_find_factor_with_composite_n_and_quantum_order_finder(n):
d = examples.shor.find_factor(n, examples.shor.naive_order_finder)
assert 1 < d < n
assert n % d == 0
@pytest.mark.parametrize(
'n, order_finder',
itertools.product(
(2, 3, 5, 11, 101, 127, 907),
(examples.shor.naive_order_finder, examples.shor.quantum_order_finder)))
def test_example_shor_find_factor_with_prime_n(n, order_finder):
d = examples.shor.find_factor(n, order_finder)
assert d is None
@pytest.mark.parametrize('n', (2, 3, 15, 17, 2**89 - 1))
def test_example_runs_shor_valid(n):
examples.shor.main(n=n)
@pytest.mark.parametrize('n', (-1, 0, 1))
def test_example_runs_shor_invalid(n):
with pytest.raises(ValueError):
examples.shor.main(n=n)
|
py
|
1a5e32e8ba36da6d62f07928344a23bb7c60b0ae
|
# coding: utf-8
from django.urls import path,re_path,include
from . import views
app_name = 'reviews.conducting'
urlpatterns = [
re_path(r'^add_source_string/$', views.add_source_string, name='add_source_string'),
re_path(r'^save_source_string/$', views.save_source_string, name='save_source_string'),
re_path(r'^remove_source_string/$', views.remove_source_string, name='remove_source_string'),
re_path(r'^import_base_string/$', views.import_base_string, name='import_base_string'),
re_path(r'^search_scopus/$', views.search_scopus, name='search_scopus'),
re_path(r'^search_science_direct/$', views.search_science_direct, name='search_science_direct'),
re_path(r'^new_article/$', views.new_article, name='new_article'),
re_path(r'^import/bibtex_file/$', views.import_bibtex, name='import_bibtex'),
re_path(r'^import/bibtex_raw_content/$', views.import_bibtex_raw_content, name='import_bibtex_raw_content'),
re_path(r'^source_articles/$', views.source_articles, name='source_articles'),
re_path(r'^article_details/$', views.article_details, name='article_details'),
re_path(r'^find_duplicates/$', views.find_duplicates, name='find_duplicates'),
re_path(r'^resolve_duplicated/$', views.resolve_duplicated, name='resolve_duplicated'),
re_path(r'^export_results/$', views.export_results, name='export_results'),
re_path(r'^resolve_all/$', views.resolve_all, name='resolve_all'),
re_path(r'^save_article_details/$', views.save_article_details, name='save_article_details'),
re_path(r'^save_quality_assessment/$', views.save_quality_assessment, name='save_quality_assessment'),
re_path(r'^quality_assessment_detailed/$', views.quality_assessment_detailed, name='quality_assessment_detailed'),
re_path(r'^quality_assessment_summary/$', views.quality_assessment_summary, name='quality_assessment_summary'),
re_path(r'^multiple_articles_action/remove/$', views.multiple_articles_action_remove, name='multiple_articles_action_remove'),
re_path(r'^multiple_articles_action/accept/$', views.multiple_articles_action_accept, name='multiple_articles_action_accept'),
re_path(r'^multiple_articles_action/reject/$', views.multiple_articles_action_reject, name='multiple_articles_action_reject'),
re_path(r'^multiple_articles_action/duplicated/$', views.multiple_articles_action_duplicated, name='multiple_articles_action_duplicated'),
#re_path(r'^articles/upload/$', 'articles_upload', name='articles_upload'),
re_path(r'^save_data_extraction/$', views.save_data_extraction, name='save_data_extraction'),
re_path(r'^save_data_extraction_status/$', views.save_data_extraction_status, name='save_data_extraction_status'),
re_path(r'^articles_selection_chart/$', views.articles_selection_chart, name='articles_selection_chart'),
re_path(r'^articles_per_year/$', views.articles_per_year, name='articles_per_year'),
re_path(r'^export_data_extraction/$', views.export_data_extraction, name='export_data_extraction')
]
|
py
|
1a5e339438011642a41f733c8ee3e18940b04141
|
# from .frequency_series import FrequencySeries
|
py
|
1a5e33b19d87e973cfabee4b18d5f5b1044d9662
|
from logging import getLogger
from drf_yasg.utils import swagger_auto_schema
from hexbytes import HexBytes
from rest_framework import status
from rest_framework.generics import CreateAPIView
from rest_framework.permissions import AllowAny
from rest_framework.response import Response
from gnosis.eth.constants import NULL_ADDRESS
from .serializers import (SafeCreation2ResponseSerializer,
SafeCreation2Serializer,
SafeCreationEstimateResponseSerializer,
SafeCreationEstimateV2Serializer)
from .services.safe_creation_service import SafeCreationServiceProvider
logger = getLogger(__name__)
class SafeCreationEstimateView(CreateAPIView):
permission_classes = (AllowAny,)
serializer_class = SafeCreationEstimateV2Serializer
@swagger_auto_schema(responses={201: SafeCreationEstimateResponseSerializer(),
400: 'Invalid data',
422: 'Cannot process data'})
def post(self, request, *args, **kwargs):
"""
Estimates creation of a Safe
"""
serializer = self.serializer_class(data=request.data)
if serializer.is_valid():
number_owners = serializer.data['number_owners']
safe_creation_estimates = SafeCreationServiceProvider().estimate_safe_creation_for_all_tokens(number_owners)
safe_creation_estimate_response_data = SafeCreationEstimateResponseSerializer(safe_creation_estimates,
many=True)
return Response(status=status.HTTP_200_OK, data=safe_creation_estimate_response_data.data)
else:
return Response(status=status.HTTP_422_UNPROCESSABLE_ENTITY, data=serializer.errors)
class SafeCreationView(CreateAPIView):
permission_classes = (AllowAny,)
serializer_class = SafeCreation2Serializer
@swagger_auto_schema(responses={201: SafeCreation2ResponseSerializer(),
400: 'Invalid data',
422: 'Cannot process data'})
def post(self, request, *args, **kwargs):
"""
Begins creation of a Safe
"""
serializer = self.serializer_class(data=request.data)
if serializer.is_valid():
salt_nonce, owners, threshold, payment_token = (serializer.data['salt_nonce'], serializer.data['owners'],
serializer.data['threshold'],
serializer.data['payment_token'])
safe_creation_service = SafeCreationServiceProvider()
safe_creation = safe_creation_service.create2_safe_tx(salt_nonce, owners, threshold, payment_token)
safe_creation_response_data = SafeCreation2ResponseSerializer(data={
'safe': safe_creation.safe.address,
'master_copy': safe_creation.master_copy,
'proxy_factory': safe_creation.proxy_factory,
'payment': safe_creation.payment,
'payment_token': safe_creation.payment_token or NULL_ADDRESS,
'payment_receiver': safe_creation.payment_receiver or NULL_ADDRESS,
'setup_data': HexBytes(safe_creation.setup_data).hex(),
'gas_estimated': safe_creation.gas_estimated,
'gas_price_estimated': safe_creation.gas_price_estimated,
})
safe_creation_response_data.is_valid(raise_exception=True)
return Response(status=status.HTTP_201_CREATED, data=safe_creation_response_data.data)
else:
return Response(status=status.HTTP_422_UNPROCESSABLE_ENTITY,
data=serializer.errors)
|
py
|
1a5e34e8332c47f51712f7db61f34e5ef35e7ee0
|
#!/usr/bin/env python
import os
import sys
import re
# from distutils.core import setup
from setuptools import setup
VERSION = "0.9.8"
if __name__ == "__main__":
if "--format=msi" in sys.argv or "bdist_msi" in sys.argv:
# hack the version name to a format msi doesn't have trouble with
VERSION = VERSION.replace("-alpha", "a")
VERSION = VERSION.replace("-beta", "b")
VERSION = VERSION.replace("-rc", "r")
fname = os.path.join(os.path.dirname(os.path.abspath(__file__)), "README.md")
with open(fname, "r") as readme:
long_desc = readme.read()
# Strip out CI badges for PyPI releases
long_desc = re.sub(r"\[!\[Build Status(.*?)\n", "", long_desc)
setupdata = {
"name": "PySDL2",
"version": VERSION,
"description": "Python SDL2 bindings",
"long_description": long_desc,
"long_description_content_type": "text/markdown",
"author": "Marcus von Appen",
"author_email": "[email protected]",
"license": "Public Domain / zlib",
"url": "https://github.com/marcusva/py-sdl2",
"download_url": "https://pypi.python.org/pypi/PySDL2",
"package_dir": {"sdl2.examples": "examples"},
"package_data": {"sdl2.test": ["resources/*.*"],
"sdl2.examples": ["resources/*.*"]},
"packages": ["sdl2",
"sdl2.ext",
"sdl2.test",
"sdl2.examples"
],
"classifiers": [
"Development Status :: 4 - Beta",
"Intended Audience :: Developers",
"License :: Public Domain",
"License :: OSI Approved :: zlib/libpng License",
"Operating System :: OS Independent",
"Programming Language :: Python",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.5",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: Implementation :: CPython",
"Programming Language :: Python :: Implementation :: PyPy",
"Topic :: Software Development :: Libraries :: Python Modules",
],
}
setup(**setupdata)
|
py
|
1a5e370cfbef0eac638f2117f6ae481999306019
|
from distutils.core import setup
from setuptools import find_packages
setup(
name='autokeras',
packages=find_packages(exclude=('tests',)),
install_requires=['scipy==1.1.0',
'torch==0.4.1',
'torchvision==0.2.1',
'numpy==1.14.5',
'keras==2.2.2',
'scikit-learn==0.20.1',
'scikit-image==0.13.1',
'tqdm==4.25.0',
'tensorflow==1.10.0',
'imageio==2.4.1',
'requests==2.20.1',
'lightgbm==2.2.2',
'pandas==0.23.4',
'opencv-python==3.4.4.19'],
version='0.3.5',
description='AutoML for deep learning',
author='DATA Lab at Texas A&M University',
author_email='[email protected]',
url='http://autokeras.com',
download_url='https://github.com/jhfjhfj1/autokeras/archive/0.3.5.tar.gz',
keywords=['AutoML', 'keras'],
classifiers=[]
)
|
py
|
1a5e37a22a7272084e30ddabee70f2d6052e8bd1
|
import torch.nn.functional as F
from torch import nn
class Classifier(nn.Module):
"""
A class used build a neural network for classification of MNIST digits
...
Methods
-------
forward()
Forward pass through the network, returns the output logits
"""
def __init__(self):
super().__init__()
# Define fully connected layers
self.fc1 = nn.Linear(28*28, 256)
self.fc2 = nn.Linear(256, 128)
self.fc3 = nn.Linear(128, 64)
self.fc4 = nn.Linear(64, 10)
# Dropout module with 0.2 drop probability
self.dropout = nn.Dropout(p=0.2)
def forward(self, x):
""" Forward pass through the network, returns the output logits """
# Flattening input tensor except for the minibatch dimension
x = x.view(x.shape[0], -1)
# Fully connected layers with dropout
x = self.dropout(F.relu(self.fc1(x)))
x = self.dropout(F.relu(self.fc2(x)))
x = self.dropout(F.relu(self.fc3(x)))
features = x
# Output so no dropout here
x = F.log_softmax(self.fc4(x), dim=1)
return x, features
|
py
|
1a5e39c555aed7b5d521441df43732dba85ef0ab
|
import argparse
parser = argparse.ArgumentParser()
def setarg(parser, argname, dfl):
parser.add_argument('-'+argname, dest=argname,
action='store_true')
parser.add_argument('-no_'+argname, dest=argname,
action='store_false')
exec('parser.set_defaults('+argname+'=dfl)')
parser.add_argument('-bs', type=int, default=4)
parser.add_argument('-epoch', type=int, default=8)
parser.add_argument('-lr', type=float, default=5e-5)
# normalize output on the tmean matrix, to have min = 0 and max = 1
setarg(parser, 'minmax',False)
# normalize input point cloud to have every coordinate between 0 and 1
setarg(parser, 'minmax3dimage',False)
# normalize input point cloud, that it is in canonical view
setarg(parser, 'normalize',False)
# centerize input point cloud, to have it's center of masses in the origin
setarg(parser, 'center',False)
# linearly downsample input point cloud
parser.add_argument('-downsample', type=int, default=1)
# use f_n or f, that was gotten with normalization on canonical view before
# processing
setarg(parser, 'classicnorm',False)
# cut the number of maximum SH amplitude to regress
parser.add_argument('-ampl', type=int, default=441)
# centerize seed on the input image and crop to this width
parser.add_argument('-cmscrop', type=int, default=0)
parser.add_argument('-cencrop', type=int, default=700)
# rescale input image
parser.add_argument('-rescale', type=int, default=500)
setarg(parser, 'use_adasum',False)
parser.add_argument(
'-gradient_predivide_factor', type=float, default=1.0,
help='apply gradient predivide factor in optimizer (default: 1.0)')
# name of experiment directory
parser.add_argument('-expnum', type=str, default='111')
# hidden_dim - size of appendix FC layers
parser.add_argument(
'-hidden_dim', nargs='+', type=int, default=[5000,2500,1000,441])
parser.add_argument(
'-chidden_dim', nargs='+', type=int, default=[96, 128, 256, 256, 256])
parser.add_argument('-kernel_sizes', nargs='+', default=[7, 3, 3, 3, 3, 3])
# number of input images that will be loaded
parser.add_argument('-num_input_images', type=int, default=1)
# name of standard model
parser.add_argument('-model_name', type=str, default='')
parser.add_argument('-netname', nargs='+', default=['cnet'])
setarg(parser, 'use_pretrained',False)
parser.add_argument('-weight_decay', type=float, default=0)
# used to load images all in parallel, or merge them after output
# "separate" merging order means to get from Dataloader tensor like as for
# color channel, that [15, 3, 1000, 1800], but then reshape this tensor to
# the [45, 1, 1000, 1800] and work with it like with separate data points
parser.add_argument('-merging', type=str,
choices=['color', 'latent', 'batch'], default='batch')
# take input image of random angle, if not, then image will
# be taken relative to the horizontal pose
setarg(parser, 'rand_angle',False)
# number of experiment from phenoseeder
parser.add_argument('-specie', type=str, default='598')
# number of sampled directions to make subsampling after f_n
parser.add_argument('-num_sam_points', type=int, default=500)
# loss calculating between 'pc','f' or 'f_n'
parser.add_argument('-lb', type=str, default='f')
# short description what exactly this job is up for
parser.add_argument('-expdescr', type=str, default='')
# use csv file with pathes to all input files together with
# horizontal image index
setarg(parser, 'use_existing_csv',True)
setarg(parser, 'use_sep_csv',True)
# instead of input files noise is generating with random numbers
setarg(parser, 'noise_input',False)
# use convolutional part of the network or not
setarg(parser, 'haf',True)
# type of input data. can be 'img', 'f' or 'pc'
parser.add_argument('-inputt', type=str, default='img')
# normalize to make min = 0 and max = 1 for input f
setarg(parser, 'minmax_f',True)
# criterion to calculate loss
parser.add_argument('-criterion', type=str, default='L1')
# number of GPUs is used in the job
parser.add_argument('-ngpu', type=int, default=4)
# type of parallelization. 'hvd' means horovod, or 't'
parser.add_argument('-parallel', type=str, choices=['horovod', 'torch'],
default='hvd')
# in case loading standard model, it can be use as feature extracting
# (when freezeing all layers except the last one)
setarg(parser, 'feature_extract',False)
# if load only one image as input, this will be always image with index
# 000_rotation
# if load more than 1 image, then number of images will be spread evenly in
# the range (0,36)
# if false, images will be taking that first image in views will be with
# horizontal pose
setarg(parser, 'zero_angle',True)
# is used for testing computing time,
# where all needed files including data in one folder
parser.add_argument('-single_folder',
dest='single_folder', action='store_true')
parser.set_defaults(single_folder=False)
parser.add_argument('-noise_output', dest='noise_output',
action='store_true')
parser.set_defaults(noise_output=False)
# only log will be in the output
setarg(parser, 'save_output',True)
# type of data that is loaded for gt. for example, single_f_n
# means that only *f_n files will be used for GT in dataloader
# and maybe it will be singular loading of y_n
# it is used separate transform_f_n.py to not load more than is
# needed
# In case if gt is loaded not from dataloader, but from csv or from h5 file,
# there is option "single_file"
parser.add_argument('-gttype', type=str,
choices=['single_file'],
default='single_file')
# name of csv that will be used for loading GT
# it can be 598csv9 for original pose and 598csv11 for normalized pose
parser.add_argument('-csvname', type=str, default='598csv9')
# name of the csv which will be used for loading data
# choices are : 598frame for full or 598frame_dummy
parser.add_argument('-dfname', type=str, default='598frame')
# factor on which all output point cloud data will be normalized
parser.add_argument('-pscale', type=int, default=100)
# if view_sep = True, and more than one image is loaded,
# all input images will be treated as separate data elements
# new dataframe will be created
setarg(parser, 'view_sep',False)
# rotate directions together with angle from which
# current image were taken
setarg(parser, 'rot_dirs',False)
# for dataloader
parser.add_argument('-num_workers', type=int, default=0)
setarg(parser, 'pin_memory',False)
# manually calculate distance vector F out of point cloud output
setarg(parser, 'man_dist',False)
setarg(parser, 'use_cuda',True)
parser.add_argument('-machine', type=str,
choices=['jureca', 'workstation', 'lenovo', 'huawei'],
default='jureca')
setarg(parser, 'maintain',False)
setarg(parser, 'maintain_line',False)
parser.add_argument('-wandb', type=str, default="")
setarg(parser, 'measure_time',False)
setarg(parser, 'rotate_output',False)
parser.add_argument('-transappendix', type=str, default="_image")
# how often to save batch output intermediate in epoch
parser.add_argument('-batch_output', type=int, default=2)
# minmax fun for current ground truth preparation before training
parser.add_argument('-minmax_fn', type=str,
choices=['min,max','mean,std', ''], default='')
parser.add_argument('-updateFraction', type=float, default=3)
parser.add_argument('-standardize', nargs='+', default=255)
# parser.add_argument('-standardize', default=(18.31589541, 39.63290785))
# if rmdirname is True, delete dirname content and use this directory again
# for saving output
setarg(parser, 'rmdirname', False)
parser.add_argument('-steplr', nargs='+', type=float, default=(30,1))
parser.add_argument('-outputt', type=str,
choices=['points','pose6', 'eul', 'orient', 'cms'],
default='points')
parser.add_argument('-ufmodel', type=int, default=100000)
parser.add_argument('-framelim', type=int, default=int(1e20))
parser.add_argument('-conTrain', type=str, default='')
# how often to print loss in the log output
parser.add_argument('-print_minibatch', type=int, default=10)
# for orientation there are two right GT, because it is a ray. That is why
# augementation of ground truth is needed for evaluation
parser.add_argument('-aug_gt', nargs='+', type=str, default='')
parser.add_argument('-datapath', type=str,
default='C:/cherepashkin1/phenoseed')
# job name is used to create corresponding subdirectory
parser.add_argument('-jobname', type=str, default='')
# real job of the executed sh file. it is needed to copy sh file to the new
# directory
parser.add_argument('-realjobname', type=str, default='')
parser.add_argument('-jobdir', type=str, default='')
setarg(parser, 'loadh5', False)
opt = parser.parse_args()
|
py
|
1a5e3a96aab5c68cc4a11406895ef2eb0617609b
|
import random
import matplotlib
import numpy as np
from sklearn.model_selection import KFold
matplotlib.use('Agg') # todo: remove or change if not working
def augment(X):
if X.ndim == 1:
return np.concatenate((X, [1]))
else:
pad = np.ones((1, X.shape[1]))
return np.concatenate((X, pad), axis=0)
def onehot_decode(X, axis):
return np.argmax(X, axis=axis)
def onehot_encode(L, c):
if isinstance(L, int):
L = [L]
n = len(L)
out = np.zeros((c, n))
out[L, range(n)] = 1
return np.squeeze(out)
# normalize inputs
def normalize(x, axis=1):
"""
By rows...
x1 - 5 4 68 0
x2 - 8 6 5 0
"""
_avg = x.mean(axis=axis, keepdims=True)
_std = x.std(axis=axis, keepdims=True)
return (x - _avg) / _std
def load_data(path):
"""
Load data, convert classes to ints, split inputs and labels.
:param path: path to data
:return:
"""
letter_map = {
'A': 0,
'B': 1,
'C': 2
}
convert_letter = lambda x: letter_map[x.decode('UTF-8')]
data = np.loadtxt(path, skiprows=1, converters={2: convert_letter}).T
inputs = data[:-1]
labels = data[-1].astype(int)
return inputs, labels
def split_train_test(inputs, labels, ratio=0.8):
"""
Randomly shuffle dataset and split it to training and testing.
:return: tuple with training/testing inputs/labels
"""
count = inputs.shape[1]
ind = np.arange(count)
random.shuffle(ind)
split = int(count * ratio)
train_ind = ind[:split]
test_ind = ind[split:]
train_inputs = inputs[:, train_ind]
train_labels = labels[train_ind]
test_inputs = inputs[:, test_ind]
test_labels = labels[test_ind]
return train_inputs, train_labels, test_inputs, test_labels
def k_fold_cross_validation(clf, inputs, labels, n, verbosity):
kf = KFold(n_splits=n)
i = 1
train_acc, train_rmse = [], []
test_acc, test_rmse = [], []
for train, validate in kf.split(inputs.T):
train_fold_inputs, train_fold_labels = inputs[:, train], labels[train]
validate_fold_inputs, validate_fold_labels = inputs[:, validate], labels[validate]
trainCE, trainRE = clf.train(train_fold_inputs, train_fold_labels)
testCE, testRE = clf.test(validate_fold_inputs, validate_fold_labels)
if verbosity > 1:
print('Fold n.{}: CE = {:6.2%}, RE = {:.5f}'.format(i, testCE, testRE))
train_acc.append(trainCE)
train_rmse.append(trainRE)
test_acc.append(testCE)
test_rmse.append(testRE)
i += 1
# reset weights on classifier for evaluating next fold
clf.init_weights()
if verbosity > 0:
print('After {n}-fold cross-validation'.format(n=n))
print('CEs - AVG - {avg:.5f}, STD - {std:.5f}'.format(avg=np.mean(test_acc),
std=np.std(test_acc)))
print('REs - AVG - {avg:.5f}, STD - {std:.5f}'.format(avg=np.mean(test_rmse),
std=np.std(test_rmse)))
train_acc = np.mean(train_acc, axis=0)
train_rmse = np.mean(train_rmse, axis=0)
return list(train_acc), list(train_rmse), np.mean(test_acc), np.mean(test_rmse)
def save_confusion_matrix(true_labels, predicted_labels, n_classes):
confusion_matrix = np.zeros((n_classes, n_classes))
for g_true, predict in zip(true_labels, predicted_labels):
confusion_matrix[g_true, predict] += 1
with open('results/confusion.txt', 'w') as f:
for row in confusion_matrix:
f.write(str(row) + '\n')
|
py
|
1a5e3b2dc87ce65466c1fce0f29b706f7957519f
|
from werkzeug.datastructures import ImmutableMultiDict
from overseed_tests.overseed_test_case import OverseedTestCase
# Assign user test
# ---------------
# This test case covers all the Assign User tests, where admins and supervisors assign
# users to companies.
class TestAssignUser(OverseedTestCase):
def test_assign_user_admin(self):
self.client.post("/login",
data=dict(email='[email protected]', password='admin', remember=False),
follow_redirects=True)
data = {
'companies' : ['<Company 1>', '<Company 2>']
}
data = ImmutableMultiDict(data)
# now navigate to the assign user page and assign to valid companies.
result = self.client.post("/assign_user/3",
data=data,
follow_redirects=True)
self.assert_template_used('account_list.html')
self.assert_message_flashed('Successfully assigned user to the selected companies.', 'success')
self.assertIn(b'company_x.png', result.data)
self.assertIn(b'company_y.png', result.data)
def test_assign_user_admin_no_assignments(self):
self.client.post("/login",
data=dict(email='[email protected]', password='admin', remember=False),
follow_redirects=True)
data = {
'companies' : []
}
data = ImmutableMultiDict(data)
# now navigate to the assign user page and assign to valid companies.
result = self.client.post("/assign_user/3",
data=data,
follow_redirects=True)
self.assert_template_used('assign_user.html')
self.assertIn(b'This field is required.', result.data)
def test_assign_admin_admin(self):
self.client.post("/login",
data=dict(email='[email protected]', password='admin', remember=False),
follow_redirects=True)
data = {
'companies' : ['<Company 1>', '<Company 2>']
}
data = ImmutableMultiDict(data)
# now navigate to the assign user page and assign to valid companies.
result = self.client.post("/assign_user/1",
data=data,
follow_redirects=True)
self.assert_message_flashed('You cannot assign companies to this account.', 'danger')
def test_assign_admin_admin(self):
self.client.post("/login",
data=dict(email='[email protected]', password='admin', remember=False),
follow_redirects=True)
data = {
'companies' : ['<Company 1>', '<Company 2>']
}
data = ImmutableMultiDict(data)
# now navigate to the assign user page and assign to valid companies.
result = self.client.post("/assign_user/2",
data=data,
follow_redirects=True)
self.assert_message_flashed('You cannot assign companies to this account.', 'danger')
def test_assign_user_supervisor(self):
self.client.post("/login",
data=dict(email='[email protected]', password='supervisor', remember=False),
follow_redirects=True)
data = {
'companies' : ['<Company 1>', '<Company 2>']
}
data = ImmutableMultiDict(data)
# now navigate to the assign user page and assign to valid companies.
result = self.client.post("/assign_user/3",
data=data,
follow_redirects=True)
self.assert_template_used('account_list.html')
self.assert_message_flashed('Successfully assigned user to the selected companies.', 'success')
self.assertIn(b'company_x.png', result.data)
self.assertIn(b'company_y.png', result.data)
def test_assign_user_user(self):
self.client.post("/login",
data=dict(email='[email protected]', password='user', remember=False),
follow_redirects=True)
data = {
'companies' : ['<Company 1>', '<Company 2>']
}
data = ImmutableMultiDict(data)
# now navigate to the assign user page and assign to valid companies.
result = self.client.post("/assign_user/3",
data=data,
follow_redirects=True)
self.assert403(result)
def test_assign_user_logged_out(self):
data = {
'companies' : ['<Company 1>', '<Company 2>']
}
data = ImmutableMultiDict(data)
# now navigate to the assign user page and assign to valid companies.
result = self.client.post("/assign_user/3",
data=data,
follow_redirects=True)
self.assert403(result)
|
py
|
1a5e3c4fe80ce3cec6eea0d6d03eb18d94048f39
|
class Employee:
# name=''
# salary=0
def insertDetails(self):
self.empId=int(input('Enter the empid'))
self.name=input('Enter the name:')
self.salary=int(input('Enter the salary:'))
def display(self):
print(self.empId,"\n ",self.name," ",self.salary)
|
py
|
1a5e3ce88d2431a100c1567b49348ec3948aaee7
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import html.parser
class ContentParser(html.parser.HTMLParser):
def __init__(self, begin_tag, stop_tag):
html.parser.HTMLParser.__init__(self)
tag_temple = ('type', 'name', 'attrs', 'contains_me')
self.begin_tag = dict(zip(tag_temple, begin_tag))
self.begin_tag.setdefault('contains_me', False)
self.stop_tag = dict(zip(tag_temple, stop_tag))
self.stop_tag.setdefault('contains_me', False)
def reset(self):
html.parser.HTMLParser.reset(self)
self.switch_flag = False
self.content = ['']
def begin_now(self):
self.switch_flag = True
return
def stop_now(self):
self.switch_flag = False
return
@staticmethod
def tag_process(tag_type, target_tag, target_action, tag, attrs):
def has_attr(match_attrs, source_attrs):
match_dict = dict(match_attrs)
source_dict = dict(source_attrs)
if 'class' in match_dict:
if 'class' in source_dict:
if set(str.split(match_dict.pop('class'))).issubset(set(str.split(source_dict.pop('class')))):
pass
else:
return False
else:
return False
return set(match_dict.items()).issubset(set(source_dict.items()))
if target_tag['type'] == tag_type:
if tag == target_tag['name']:
if target_tag['attrs'] is None or len(target_tag['attrs']) == 0 or tag_type == 'endtag':
target_action()
return True
else:
if len(target_tag['attrs']) > len(attrs):
return False
else:
if has_attr(target_tag['attrs'], attrs):
target_action()
return True
else:
return False
else:
return False
else:
return False
def pre_tag_process(self, tag_type, tag, attrs = None):
def get_tag_text():
if tag_type == 'endtag':
return '</{0}>'.format(tag)
else:
return self.get_starttag_text()
if self.switch_flag == False:
if self.tag_process(tag_type, self.begin_tag, self.begin_now, tag, attrs) == True:
if self.begin_tag['contains_me'] == False:
self.content = []
else:
self.content = [get_tag_text()]
return True
else:
return False
else:
if self.tag_process(tag_type, self.stop_tag, self.stop_now, tag, attrs) == True:
if self.stop_tag['contains_me'] == False:
return False
else:
self.content.append(get_tag_text())
return True
else:
self.content.append(get_tag_text())
return True
def handle_starttag(self, tag, attrs):
self.pre_tag_process('starttag', tag, attrs)
def handle_endtag(self, tag):
self.pre_tag_process('endtag', tag)
def handle_startendtag(self, tag, attrs):
self.pre_tag_process('startendtag', tag, attrs)
def handle_data(self, data):
if self.switch_flag == False:
return False
else:
self.content.append(data)
return True
def main():
page = '<html><h1 id="q" class="a c b">Title</h1><p>Im a paragraph!</p><p>Another paragraph</p></html>'
myparser = ContentParser(['starttag', 'h1', [('class','a b'), ('id', 'q')], True], ['endtag', 'p', None, True])
myparser.feed(page)
print(''.join(myparser.content))
myparser.reset()
print(myparser.content)
if __name__ == '__main__':
main()
|
py
|
1a5e3d461ee182b5035731d3ea137ac67dc1fff7
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('proposals', '0009_auto_20170209_2326'),
]
operations = [
migrations.RenameField('proposalbase', 'submitted', 'submitted_at'),
migrations.AlterField(
model_name='proposalbase',
name='submitted_at',
field=models.DateTimeField(null=True, editable=False, blank=True),
),
migrations.AddField(
model_name='proposalbase',
name='submitted',
field=models.BooleanField(default=False),
),
]
|
py
|
1a5e3d5866923f5207c5db29ba95177f8f163cab
|
import sys
import os
sys.path.append(os.path.join(os.path.dirname(__file__), '..'))
sys.path.append(os.path.join(os.path.dirname(__file__), '..', 'lib'))
import base58
import hashlib
import re
from decimal import Decimal
import simplejson
import binascii
from misc import printdbg, epoch2str
import time
def is_valid_moondex_address(address, network='mainnet'):
# Only public key addresses are allowed
# A valid address is a RIPEMD-160 hash which contains 20 bytes
# Prior to base58 encoding 1 version byte is prepended and
# 4 checksum bytes are appended so the total number of
# base58 encoded bytes should be 25. This means the number of characters
# in the encoding should be about 34 ( 25 * log2( 256 ) / log2( 58 ) ).
moondex_version = 140 if network == 'testnet' else 76
# Check length (This is important because the base58 library has problems
# with long addresses (which are invalid anyway).
if ((len(address) < 26) or (len(address) > 35)):
return False
address_version = None
try:
decoded = base58.b58decode_chk(address)
address_version = ord(decoded[0:1])
except:
# rescue from exception, not a valid Moondex address
return False
if (address_version != moondex_version):
return False
return True
def hashit(data):
return int(hashlib.sha256(data.encode('utf-8')).hexdigest(), 16)
# returns the masternode VIN of the elected winner
def elect_mn(**kwargs):
current_block_hash = kwargs['block_hash']
mn_list = kwargs['mnlist']
# filter only enabled MNs
enabled = [mn for mn in mn_list if mn.status == 'ENABLED']
block_hash_hash = hashit(current_block_hash)
candidates = []
for mn in enabled:
mn_vin_hash = hashit(mn.vin)
diff = mn_vin_hash - block_hash_hash
absdiff = abs(diff)
candidates.append({'vin': mn.vin, 'diff': absdiff})
candidates.sort(key=lambda k: k['diff'])
try:
winner = candidates[0]['vin']
except:
winner = None
return winner
def parse_masternode_status_vin(status_vin_string):
status_vin_string_regex = re.compile('CTxIn\(COutPoint\(([0-9a-zA-Z]+),\\s*(\d+)\),')
m = status_vin_string_regex.match(status_vin_string)
# To Support additional format of string return from masternode status rpc.
if m is None:
status_output_string_regex = re.compile('([0-9a-zA-Z]+)\-(\d+)')
m = status_output_string_regex.match(status_vin_string)
txid = m.group(1)
index = m.group(2)
vin = txid + '-' + index
if (txid == '0000000000000000000000000000000000000000000000000000000000000000'):
vin = None
return vin
def create_superblock(proposals, event_block_height, budget_max, sb_epoch_time):
from models import Superblock, GovernanceObject, Proposal
from constants import SUPERBLOCK_FUDGE_WINDOW
# don't create an empty superblock
if (len(proposals) == 0):
printdbg("No proposals, cannot create an empty superblock.")
return None
budget_allocated = Decimal(0)
fudge = SUPERBLOCK_FUDGE_WINDOW # fudge-factor to allow for slighly incorrect estimates
payments = []
for proposal in proposals:
fmt_string = "name: %s, rank: %4d, hash: %s, amount: %s <= %s"
# skip proposals that are too expensive...
if (budget_allocated + proposal.payment_amount) > budget_max:
printdbg(
fmt_string % (
proposal.name,
proposal.rank,
proposal.object_hash,
proposal.payment_amount,
"skipped (blows the budget)",
)
)
continue
# skip proposals if the SB isn't within the Proposal time window...
window_start = proposal.start_epoch - fudge
window_end = proposal.end_epoch + fudge
printdbg("\twindow_start: %s" % epoch2str(window_start))
printdbg("\twindow_end: %s" % epoch2str(window_end))
printdbg("\tsb_epoch_time: %s" % epoch2str(sb_epoch_time))
if (sb_epoch_time < window_start or sb_epoch_time > window_end):
printdbg(
fmt_string % (
proposal.name,
proposal.rank,
proposal.object_hash,
proposal.payment_amount,
"skipped (SB time is outside of Proposal window)",
)
)
continue
printdbg(
fmt_string % (
proposal.name,
proposal.rank,
proposal.object_hash,
proposal.payment_amount,
"adding",
)
)
# else add proposal and keep track of total budget allocation
budget_allocated += proposal.payment_amount
payment = {'address': proposal.payment_address,
'amount': "{0:.8f}".format(proposal.payment_amount),
'proposal': "{}".format(proposal.object_hash)}
payments.append(payment)
# don't create an empty superblock
if not payments:
printdbg("No proposals made the cut!")
return None
# 'payments' now contains all the proposals for inclusion in the
# Superblock, but needs to be sorted by proposal hash descending
payments.sort(key=lambda k: k['proposal'], reverse=True)
sb = Superblock(
event_block_height=event_block_height,
payment_addresses='|'.join([pd['address'] for pd in payments]),
payment_amounts='|'.join([pd['amount'] for pd in payments]),
proposal_hashes='|'.join([pd['proposal'] for pd in payments]),
)
printdbg("generated superblock: %s" % sb.__dict__)
return sb
# shims 'til we can fix the moondexd side
def SHIM_serialise_for_moondexd(sentinel_hex):
from models import DASHD_GOVOBJ_TYPES
# unpack
obj = deserialise(sentinel_hex)
# shim for moondexd
govtype = obj[0]
# add 'type' attribute
obj[1]['type'] = DASHD_GOVOBJ_TYPES[govtype]
# superblock => "trigger" in moondexd
if govtype == 'superblock':
obj[0] = 'trigger'
# moondexd expects an array (even though there is only a 1:1 relationship between govobj->class)
obj = [obj]
# re-pack
moondexd_hex = serialise(obj)
return moondexd_hex
# shims 'til we can fix the moondexd side
def SHIM_deserialise_from_moondexd(moondexd_hex):
from models import DASHD_GOVOBJ_TYPES
# unpack
obj = deserialise(moondexd_hex)
# shim from moondexd
# only one element in the array...
obj = obj[0]
# extract the govobj type
govtype = obj[0]
# superblock => "trigger" in moondexd
if govtype == 'trigger':
obj[0] = govtype = 'superblock'
# remove redundant 'type' attribute
if 'type' in obj[1]:
del obj[1]['type']
# re-pack
sentinel_hex = serialise(obj)
return sentinel_hex
# convenience
def deserialise(hexdata):
json = binascii.unhexlify(hexdata)
obj = simplejson.loads(json, use_decimal=True)
return obj
def serialise(dikt):
json = simplejson.dumps(dikt, sort_keys=True, use_decimal=True)
hexdata = binascii.hexlify(json.encode('utf-8')).decode('utf-8')
return hexdata
def did_we_vote(output):
from bitcoinrpc.authproxy import JSONRPCException
# sentinel
voted = False
err_msg = ''
try:
detail = output.get('detail').get('moondex.conf')
result = detail.get('result')
if 'errorMessage' in detail:
err_msg = detail.get('errorMessage')
except JSONRPCException as e:
result = 'failed'
err_msg = e.message
# success, failed
printdbg("result = [%s]" % result)
if err_msg:
printdbg("err_msg = [%s]" % err_msg)
voted = False
if result == 'success':
voted = True
# in case we spin up a new instance or server, but have already voted
# on the network and network has recorded those votes
m_old = re.match(r'^time between votes is too soon', err_msg)
m_new = re.search(r'Masternode voting too often', err_msg, re.M)
if result == 'failed' and (m_old or m_new):
printdbg("DEBUG: Voting too often, need to sync w/network")
voted = False
return voted
def parse_raw_votes(raw_votes):
votes = []
for v in list(raw_votes.values()):
(outpoint, ntime, outcome, signal) = v.split(':')
signal = signal.lower()
outcome = outcome.lower()
mn_collateral_outpoint = parse_masternode_status_vin(outpoint)
v = {
'mn_collateral_outpoint': mn_collateral_outpoint,
'signal': signal,
'outcome': outcome,
'ntime': ntime,
}
votes.append(v)
return votes
def blocks_to_seconds(blocks):
"""
Return the estimated number of seconds which will transpire for a given
number of blocks.
"""
return blocks * 2.62 * 60
|
py
|
1a5e3ddd0ee291e05344871fcc39e4055bc5865e
|
import os
import numpy as np
from PIL import Image
data_path = '../data/red-lights'
template_imgs_dir = './templates/red-light'
template_img_files = sorted(os.listdir(template_imgs_dir))
template_img_files = [f for f in template_img_files if '.jpg' in f]
DATA_MEAN = 90
DATA_STD = 65
for i, filename in enumerate(template_img_files):
I = Image.open(os.path.join(data_path, filename))
template = Image.open(os.path.join(template_imgs_dir, filename))
I = np.asarray(I)
template = np.asarray(template)
mean = np.mean(I, axis=(0, 1))
std = np.std(I, axis=(0, 1))
template = (template - mean) / std
# template = (template - np.mean(I)) / np.std(I)
# template = (template - DATA_MEAN) / DATA
print(filename, mean, std, np.mean(template), np.std(template))
np.save(os.path.join(template_imgs_dir, f'template{i}'), # chop off '.jpg'
template)
|
py
|
1a5e3efe219fa5e84dc4b3eddee1249835ff6287
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# reana documentation build configuration file, created by
# sphinx-quickstart on Mon Jan 23 14:17:34 2017.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
# import os
# import sys
# sys.path.insert(0, os.path.abspath('.'))
from __future__ import print_function
import os
import sphinx.environment
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Do not warn on external images.
suppress_warnings = ["image.nonlocal_uri"]
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
"sphinx.ext.autodoc",
"sphinx.ext.coverage",
"sphinx.ext.doctest",
"sphinx.ext.graphviz",
"sphinx.ext.intersphinx",
"sphinx.ext.viewcode",
"sphinx_click.ext",
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ["_templates"]
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = ".rst"
# The master toctree document.
master_doc = "index"
# General information about the project.
project = "reana"
copyright = "2017-2020 [email protected]"
author = "[email protected]"
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
# Get the version string. Cannot be done with import!
g = {}
with open(os.path.join("..", "reana_client", "version.py"), "rt") as fp:
exec(fp.read(), g)
version = g["__version__"]
# The full version, including alpha/beta/rc tags.
release = version
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = ["_build", "Thumbs.db", ".DS_Store"]
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = "sphinx"
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = "alabaster"
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
html_theme_options = {
"description": """<p>REANA-Client is a component of the <a
href="http://www.reana.io">REANA</a> reusable and
reproducible research data analysis
platform.</p><p>REANA-Client provides a command-line tool
that allows researchers to submit, run, and manage their
computational workflows.</p>""",
"github_user": "reanahub",
"github_repo": "reana-client",
"github_button": False,
"github_banner": True,
"show_powered_by": False,
"extra_nav_links": {
"REANA@DockerHub": "https://hub.docker.com/u/reanahub/",
"REANA@GitHub": "https://github.com/reanahub",
"REANA@Twitter": "https://twitter.com/reanahub",
"REANA@Web": "http://www.reana.io",
},
"nosidebar": True,
}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
# html_static_path = ["_static"]
# Custom sidebar templates, maps document names to template names.
html_sidebars = {
"**": [
"about.html",
"navigation.html",
"relations.html",
"searchbox.html",
"donate.html",
]
}
# -- Options for HTMLHelp output ------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = "reanadoc"
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, "reana.tex", "reana Documentation", "[email protected]", "manual"),
]
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [(master_doc, "reana", "reana Documentation", [author], 1)]
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(
master_doc,
"reana",
"reana Documentation",
author,
"reana",
"One line description of project.",
"Miscellaneous",
),
]
|
py
|
1a5e3f8f2014aeca023511c6ceec27f2bbe6e0e8
|
# fa19-516-170 E.Cloudmesh.Shell.1
# run cms in terminal
|
py
|
1a5e404c98b785c7fe6ddea1b20bd3ec3b6e691b
|
"""
Originally a mod of ajpalkovic's plugin https://github.com/ajpalkovic, though it doesn't appear to be available anymore.
Highlights all other instances of the selected word (or optional word under cursor):
```
// Require the word to be selected.
// Disable to highlight word with no selection.
"require_word_select": true,
```
Can be configured to highlight multiple word sets simultaneously with multiple cursors.
When doing multiple cursors, you can highlight each in their own color
(limited by available theme colors):
```
// Define scopes for highlights
// The more you define, the more selections you can do
"highlight_scopes": ["string", "keyword", "constant.language"],
```
Style of highlights can also be controlled:
```
// Highlight style (solid|outline|underline|thin_underline|squiggly|stippled)
"highlight_style": "outline",
```
Optionally can disable highlight if number of selections in view are greater than a certain value:
```
// If selection threshold is greater than
// the specified setting, don't highlight words.
// -1 means no threshold.
"selection_threshold": -1
```
"""
import sublime
import sublime_plugin
from time import time, sleep
import threading
KEY = "HighlightCurrentWord"
SCOPE = 'comment'
reload_flag = False
highlight_word = None
settings = None
if 'hw_thread' not in globals():
hw_thread = None
def debug(s):
"""Debug logging."""
print("HighlightWord: " + s)
def highlight_style(option):
"""Configure style of region based on option."""
style = 0
if option == "outline":
style |= sublime.DRAW_NO_FILL
elif option == "none":
style |= sublime.HIDDEN
elif option == "underline":
style |= sublime.DRAW_EMPTY_AS_OVERWRITE
elif option == "thin_underline":
style |= sublime.DRAW_NO_FILL
style |= sublime.DRAW_NO_OUTLINE
style |= sublime.DRAW_SOLID_UNDERLINE
elif option == "squiggly":
style |= sublime.DRAW_NO_FILL
style |= sublime.DRAW_NO_OUTLINE
style |= sublime.DRAW_SQUIGGLY_UNDERLINE
elif option == "stippled":
style |= sublime.DRAW_NO_FILL
style |= sublime.DRAW_NO_OUTLINE
style |= sublime.DRAW_STIPPLED_UNDERLINE
return style
def clear_regions(view=None):
"""Clear regions."""
if view is None:
win = sublime.active_window()
if win is not None:
view = win.active_view()
if view is not None:
regions = view.settings().get('highlight_word.regions', 0)
if highlight_word is not None:
for count in range(0, regions):
view.erase_regions(KEY + str(count))
view.settings().set('highlight_word.regions', 0)
def underline(regions):
"""Convert to empty regions."""
new_regions = []
for region in regions:
start = region.begin()
end = region.end()
while start < end:
new_regions.append(sublime.Region(start))
start += 1
return new_regions
# The search is performed half a second after the most recent event
# in order to prevent the search happening on every key press.
# Each of the event handlers simply marks the time of the most recent
# event and a timer periodically executes do_search
class HighlightWord(object):
"""HighlightWord."""
def __init__(self):
"""Setup."""
self.previous_region = sublime.Region(0, 0)
self.theme_selectors = tuple(settings.get('highlight_scopes', [SCOPE]))
self.word_select = settings.get('require_word_select', False)
style = settings.get('highlight_style', 'outline')
self.style = highlight_style(style)
self.underline = style == 'underline'
self.max_selections = len(self.theme_selectors)
self.sel_threshold = int(settings.get('selection_threshold', -1))
def do_search(self, view, force=True):
"""Perform the search for the highlighted word."""
global reload_flag
if view is None:
return
if reload_flag:
reload_flag = False
self.theme_selectors = tuple(settings.get('highlight_scopes', [SCOPE]))
self.max_selections = len(self.theme_selectors)
self.word_select = settings.get('require_word_select', False)
style = settings.get('highlight_style', 'outline')
self.style = highlight_style(style)
self.underline = style == 'underline'
self.sel_threshold = int(settings.get('selection_threshold', -1))
force = True
visible_region = view.visible_region()
if not force and self.previous_region == visible_region:
return
clear_regions()
# The default separator does not include whitespace, so I add that here no matter what
separator_string = view.settings().get('word_separators', "") + " \n\r\t"
current_words = []
current_regions = []
good_words = set()
words = []
selections = view.sel()
sel_len = len(selections)
if sel_len > 0 and (self.sel_threshold == -1 or self.sel_threshold >= sel_len):
self.previous_region = visible_region
# Reduce m*n search to just n by mapping each word
# separator character into a dictionary
self.separators = {}
for c in separator_string:
self.separators[c] = True
for selection in selections:
current_regions.append(view.word(selection))
current_words.append(
view.substr(current_regions[-1]).strip(separator_string)
)
count = 0
for word in current_words:
if word not in good_words:
if count != self.max_selections:
good_words.add(word)
words.append((word, current_regions[count]))
count += 1
else:
return
count = 0
for word in words:
key = KEY + str(count)
selector = self.theme_selectors[count]
# See if a word is selected or if you are just in a word
if self.word_select and word[1].size() != selections[count].size():
continue
# remove leading/trailing separator characters just in case
if len(word[0]) == 0:
continue
# ignore the selection if it spans multiple words
abort = False
for c in word[0]:
if c in self.separators:
abort = True
break
if abort:
continue
self.highlight_word(view, key, selector, word[1], word[0])
count += 1
def highlight_word(self, view, key, selector, current_region, current_word):
"""Find and highlight word."""
size = view.size() - 1
search_start = max(0, self.previous_region.begin() - len(current_word))
search_end = min(size, self.previous_region.end() + len(current_word))
valid_regions = []
while True:
found_region = view.find(current_word, search_start, sublime.LITERAL)
if found_region is None:
break
# regions can have reversed start/ends so normalize them
start = max(0, found_region.begin())
end = min(size, found_region.end())
if search_start == end:
search_start += 1
continue
search_start = end
if search_start >= size:
break
if found_region.empty():
break
if found_region.intersects(current_region):
continue
# check if the character before and after the region is a separator character
# if it is not, then the region is part of a larger word and shouldn't match
# this can't be done in a regex because we would be unable to use the word_separators setting string
if start == 0 or view.substr(sublime.Region(start - 1, start)) in self.separators:
if end == size or view.substr(sublime.Region(end, end + 1)) in self.separators:
valid_regions.append(found_region)
if search_start > search_end:
break
view.add_regions(
key,
valid_regions if not self.underline else underline(valid_regions),
selector,
"",
self.style
)
view.settings().set('highlight_word.regions', self.max_selections)
class HighlightWordListenerCommand(sublime_plugin.EventListener):
"""Handle listener events."""
def on_selection_modified(self, view):
"""Handle selection events for highlighting."""
if hw_thread is None or hw_thread.ignore_all:
return
now = time()
hw_thread.modified = True
hw_thread.time = now
class HighlightWordSelectCommand(sublime_plugin.TextCommand):
"""Select all instances of the selected word(s)."""
def run(self, edit):
"""Run the command."""
theme_selectors = tuple(settings.get('highlight_scopes', [SCOPE]))
max_selections = len(theme_selectors)
word_select = settings.get('require_word_select', False)
current_words = []
current_regions = []
good_words = set()
words = []
separator_string = self.view.settings().get('word_separators', "") + " \n\r\t"
selections = self.view.sel()
sel_len = len(selections)
if sel_len > 0:
# Reduce m*n search to just n by mapping each word
# separator character into a dictionary
self.separators = {}
for c in separator_string:
self.separators[c] = True
for selection in selections:
current_regions.append(self.view.word(selection))
current_words.append(
self.view.substr(current_regions[-1]).strip(separator_string)
)
count = 0
for word in current_words:
if word not in good_words:
if count != max_selections:
good_words.add(word)
words.append((word, current_regions[count]))
count += 1
else:
return
count = 0
select_regions = []
for word in words:
key = KEY + str(count)
selector = theme_selectors[count]
# See if a word is selected or if you are just in a word
if word_select and word[1].size() != selections[count].size():
continue
# remove leading/trailing separator characters just in case
if len(word[0]) == 0:
continue
# ignore the selection if it spans multiple words
abort = False
for c in word[0]:
if c in self.separators:
abort = True
break
if abort:
continue
select_regions += self.select_word(key, selector, word[0])
count += 1
if select_regions:
self.view.sel().clear()
self.view.sel().add_all(select_regions)
def select_word(self, key, selector, current_word):
"""Find and highlight word."""
size = self.view.size() - 1
search_start = 0
valid_regions = []
while True:
found_region = self.view.find(current_word, search_start, sublime.LITERAL)
if found_region is None:
break
# regions can have reversed start/ends so normalize them
start = max(0, found_region.begin())
end = min(size, found_region.end())
if search_start == end:
search_start += 1
continue
search_start = end
if search_start >= size:
break
if found_region.empty():
break
# check if the character before and after the region is a separator character
# if it is not, then the region is part of a larger word and shouldn't match
# this can't be done in a regex because we would be unable to use the word_separators setting string
if start == 0 or self.view.substr(sublime.Region(start - 1, start)) in self.separators:
if end == size or self.view.substr(sublime.Region(end, end + 1)) in self.separators:
valid_regions.append(found_region)
return valid_regions
class HwThread(threading.Thread):
"""Load up defaults."""
def __init__(self):
"""Setup the thread."""
self.reset()
threading.Thread.__init__(self)
def reset(self):
"""Reset the thread variables."""
self.wait_time = 0.12
self.time = time()
self.modified = False
self.ignore_all = False
self.abort = False
def payload(self, force=False):
"""Code to run."""
self.modified = False
# Ignore selection and edit events inside the routine
self.ignore_all = True
if highlight_word is not None:
highlight_word.do_search(sublime.active_window().active_view(), force)
self.ignore_all = False
self.time = time()
def kill(self):
"""Kill thread."""
self.abort = True
while self.is_alive():
pass
self.reset()
def run(self):
"""Thread loop."""
while not self.abort:
if self.modified is True and time() - self.time > self.wait_time:
sublime.set_timeout(lambda: self.payload(force=True), 0)
elif not self.modified:
sublime.set_timeout(self.payload, 0)
sleep(0.5)
def set_reload():
"""Set reload events."""
global reload_flag
global settings
reload_flag = True
settings = sublime.load_settings("highlight_word.sublime-settings")
settings.clear_on_change('reload')
settings.add_on_change('reload', set_reload)
def plugin_loaded():
"""Setup plugin."""
global highlight_word
global hw_thread
set_reload()
highlight_word = HighlightWord()
if hw_thread is not None:
hw_thread.kill()
hw_thread = HwThread()
hw_thread.start()
def plugin_unloaded():
"""Kill thread."""
hw_thread.kill()
clear_regions()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.