content
stringlengths 5
1.05M
|
---|
#! /usr/bin/env python3
from typing import List, Tuple, Dict, T
# Linked List
class Node :
val : int
nxt : T
def __init__(self, val) :
self.val = val
self.nxt = None
class CupGame :
head: Node
ndes: List[Node]
def __init__(self, ls: List[int]) -> None :
self.ndes = {}
prev = None
for x in ls :
curr = Node(x)
if prev is not None :
prev.nxt = curr
prev = curr
self.ndes[x] = curr
self.head = self.ndes[ls[0]]
prev.nxt = self.head
def run(self, i) -> None :
if(i % 1000000 == 0) : print(f"{i // 1000000}/9")
a, b, c = self.step1()
dst = self.step2(a, b, c)
self.step3(a, b, c, dst)
def step1(self) -> Tuple[Node, Node, Node] :
a = self.head.nxt
b = a.nxt
c = b.nxt
self.head.nxt = c.nxt
return (a, b, c)
def step2(self, a: Node, b: Node, c: Node) -> Node :
val = self.head.val - 1 or 1000000
while val in (a.val, b.val, c.val) :
val = val - 1 or 1000000
return self.ndes[val]
def step3(self, a: Node, b: Node, c: Node, dst: Node) -> None :
c.nxt = dst.nxt
dst.nxt = a
self.head = self.head.nxt
def answer(self) -> None :
node = self.ndes[1]
print(f"{node.nxt.val} * {node.nxt.nxt.val} : {node.nxt.val * node.nxt.nxt.val}")
with open("input", "r") as fd :
base = [int(w) for w in fd.read().strip()]
for i in range(max(base) + 1, 1000001) :
base.append(i)
game = CupGame(base)
for i in range(10000000) : game.run(i)
game.answer() |
#!/usr/bin/env python
u"""
test_coordinates.py (08/2020)
Verify forward and backwards coordinate conversions
"""
import warnings
import pytest
import numpy as np
import pyTMD.convert_ll_xy
#-- parameterize projections
@pytest.mark.parametrize("PROJ", ['3031','CATS2008','3976','PSNorth','4326'])
#-- PURPOSE: verify forward and backwards coordinate conversions
def test_coordinates(PROJ):
startlat = {'3031':-60,'CATS2008':-60,'3976':-60,'PSNorth':60,'4326':90}
endlat = {'3031':-70,'CATS2008':-70,'3976':-70,'PSNorth':70,'4326':-90}
i1 = np.arange(-180,180+1,1)
i2 = np.linspace(startlat[PROJ],endlat[PROJ],len(i1))
#-- convert latitude and longitude to and from projection
o1,o2 = pyTMD.convert_ll_xy(i1,i2,PROJ,'F')
lon,lat = pyTMD.convert_ll_xy(o1,o2,PROJ,'B')
#-- calculate great circle distance between inputs and outputs
cdist = np.arccos(np.sin(i2*np.pi/180.0)*np.sin(lat*np.pi/180.0) +
np.cos(i2*np.pi/180.0)*np.cos(lat*np.pi/180.0)*
np.cos((lon-i1)*np.pi/180.0),dtype=np.float32)
#-- test that forward and backwards conversions are within tolerance
eps = np.finfo(np.float32).eps
assert np.all(cdist < eps)
|
#
# PySNMP MIB module CISCO-UBE-MIB (http://snmplabs.com/pysmi)
# ASN.1 source file:///Users/davwang4/Dev/mibs.snmplabs.com/asn1/CISCO-UBE-MIB
# Produced by pysmi-0.3.4 at Wed May 1 12:14:45 2019
# On host DAVWANG4-M-1475 platform Darwin version 18.5.0 by user davwang4
# Using Python version 3.7.3 (default, Mar 27 2019, 09:23:15)
#
Integer, OctetString, ObjectIdentifier = mibBuilder.importSymbols("ASN1", "Integer", "OctetString", "ObjectIdentifier")
NamedValues, = mibBuilder.importSymbols("ASN1-ENUMERATION", "NamedValues")
ValueRangeConstraint, ConstraintsIntersection, ConstraintsUnion, ValueSizeConstraint, SingleValueConstraint = mibBuilder.importSymbols("ASN1-REFINEMENT", "ValueRangeConstraint", "ConstraintsIntersection", "ConstraintsUnion", "ValueSizeConstraint", "SingleValueConstraint")
ciscoMgmt, = mibBuilder.importSymbols("CISCO-SMI", "ciscoMgmt")
SnmpAdminString, = mibBuilder.importSymbols("SNMP-FRAMEWORK-MIB", "SnmpAdminString")
ObjectGroup, NotificationGroup, ModuleCompliance = mibBuilder.importSymbols("SNMPv2-CONF", "ObjectGroup", "NotificationGroup", "ModuleCompliance")
Gauge32, MibScalar, MibTable, MibTableRow, MibTableColumn, Bits, Integer32, MibIdentifier, IpAddress, ObjectIdentity, TimeTicks, Counter64, ModuleIdentity, Unsigned32, Counter32, iso, NotificationType = mibBuilder.importSymbols("SNMPv2-SMI", "Gauge32", "MibScalar", "MibTable", "MibTableRow", "MibTableColumn", "Bits", "Integer32", "MibIdentifier", "IpAddress", "ObjectIdentity", "TimeTicks", "Counter64", "ModuleIdentity", "Unsigned32", "Counter32", "iso", "NotificationType")
TruthValue, DisplayString, TextualConvention = mibBuilder.importSymbols("SNMPv2-TC", "TruthValue", "DisplayString", "TextualConvention")
ciscoUbeMIB = ModuleIdentity((1, 3, 6, 1, 4, 1, 9, 9, 764))
ciscoUbeMIB.setRevisions(('2010-11-29 00:00',))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
if mibBuilder.loadTexts: ciscoUbeMIB.setRevisionsDescriptions(('Initial version of this MIB module.',))
if mibBuilder.loadTexts: ciscoUbeMIB.setLastUpdated('201011290000Z')
if mibBuilder.loadTexts: ciscoUbeMIB.setOrganization('Cisco Systems, Inc.')
if mibBuilder.loadTexts: ciscoUbeMIB.setContactInfo('Cisco Systems Customer Service Postal: 170 W Tasman Drive San Jose, CA 95134 USA Tel: +1 800 553-NETS E-mail: [email protected]')
if mibBuilder.loadTexts: ciscoUbeMIB.setDescription('This MIB describes objects used for managing Cisco Unified Border Element (CUBE). The Cisco Unified Border Element (CUBE) is a Cisco IOS Session Border Controller (SBC) that interconnects independent voice over IP (VoIP) and video over IP networks for data, voice, and video transport')
ciscoUbeMIBObjects = MibIdentifier((1, 3, 6, 1, 4, 1, 9, 9, 764, 0))
ciscoUbeMIBConform = MibIdentifier((1, 3, 6, 1, 4, 1, 9, 9, 764, 1))
cubeEnabled = MibScalar((1, 3, 6, 1, 4, 1, 9, 9, 764, 0, 1), TruthValue()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: cubeEnabled.setStatus('current')
if mibBuilder.loadTexts: cubeEnabled.setDescription("This object represents, whether the Cisco Unified Border Element (CUBE) is enabled on the device or not. The value 'true' means that the CUBE feature is enabled on the device. The value 'false' means that the CUBE feature is disabled.")
cubeVersion = MibScalar((1, 3, 6, 1, 4, 1, 9, 9, 764, 0, 2), SnmpAdminString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: cubeVersion.setStatus('current')
if mibBuilder.loadTexts: cubeVersion.setDescription('This object represents the version of Cisco Unified Border Element on the device.')
cubeTotalSessionAllowed = MibScalar((1, 3, 6, 1, 4, 1, 9, 9, 764, 0, 3), Unsigned32().subtype(subtypeSpec=ValueRangeConstraint(0, 999999))).setUnits('session').setMaxAccess("readwrite")
if mibBuilder.loadTexts: cubeTotalSessionAllowed.setStatus('current')
if mibBuilder.loadTexts: cubeTotalSessionAllowed.setDescription('This object provides the total number of CUBE session allowed on the device. The value zero means no sessions are allowed with CUBE.')
ciscoUbeMIBCompliances = MibIdentifier((1, 3, 6, 1, 4, 1, 9, 9, 764, 1, 1))
ciscoUbeMIBGroups = MibIdentifier((1, 3, 6, 1, 4, 1, 9, 9, 764, 1, 2))
ciscoCubeMIBCompliance = ModuleCompliance((1, 3, 6, 1, 4, 1, 9, 9, 764, 1, 1, 1)).setObjects(("CISCO-UBE-MIB", "ciscoUbeMIBGroup"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
ciscoCubeMIBCompliance = ciscoCubeMIBCompliance.setStatus('current')
if mibBuilder.loadTexts: ciscoCubeMIBCompliance.setDescription('The compliance statement for Cisco Unified Border Element (CUBE) MIB.')
ciscoUbeMIBGroup = ObjectGroup((1, 3, 6, 1, 4, 1, 9, 9, 764, 1, 2, 1)).setObjects(("CISCO-UBE-MIB", "cubeEnabled"), ("CISCO-UBE-MIB", "cubeVersion"), ("CISCO-UBE-MIB", "cubeTotalSessionAllowed"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
ciscoUbeMIBGroup = ciscoUbeMIBGroup.setStatus('current')
if mibBuilder.loadTexts: ciscoUbeMIBGroup.setDescription('A collection of objects which provides the capabilities of the CUBE feature.')
mibBuilder.exportSymbols("CISCO-UBE-MIB", ciscoUbeMIBGroups=ciscoUbeMIBGroups, cubeVersion=cubeVersion, ciscoUbeMIBConform=ciscoUbeMIBConform, ciscoUbeMIBCompliances=ciscoUbeMIBCompliances, PYSNMP_MODULE_ID=ciscoUbeMIB, cubeTotalSessionAllowed=cubeTotalSessionAllowed, ciscoCubeMIBCompliance=ciscoCubeMIBCompliance, ciscoUbeMIB=ciscoUbeMIB, ciscoUbeMIBGroup=ciscoUbeMIBGroup, ciscoUbeMIBObjects=ciscoUbeMIBObjects, cubeEnabled=cubeEnabled)
|
# coding: utf-8
"""
Metal API
This is the API for Equinix Metal. The API allows you to programmatically interact with all of your Equinix Metal resources, including devices, networks, addresses, organizations, projects, and your user account. The official API docs are hosted at <https://metal.equinix.com/developers/api>. # noqa: E501
The version of the OpenAPI document: 1.0.0
Contact: [email protected]
Generated by: https://openapi-generator.tech
"""
from __future__ import absolute_import
import unittest
import metal
from models.connections_api import ConnectionsApi # noqa: E501
from metal.rest import ApiException
class TestConnectionsApi(unittest.TestCase):
"""ConnectionsApi unit test stubs"""
def setUp(self):
self.api = models.connections_api.ConnectionsApi() # noqa: E501
def tearDown(self):
pass
def test_create_connection_port_virtual_circuit(self):
"""Test case for create_connection_port_virtual_circuit
Create a new Virtual Circuit # noqa: E501
"""
pass
def test_create_organization_interconnection(self):
"""Test case for create_organization_interconnection
Request a new connection for the organization # noqa: E501
"""
pass
def test_create_project_interconnection(self):
"""Test case for create_project_interconnection
Request a new connection for the project's organization # noqa: E501
"""
pass
def test_delete_interconnection(self):
"""Test case for delete_interconnection
Delete connection # noqa: E501
"""
pass
def test_delete_virtual_circuit(self):
"""Test case for delete_virtual_circuit
Delete a virtual circuit # noqa: E501
"""
pass
def test_find_connection_events(self):
"""Test case for find_connection_events
Retrieve connection events # noqa: E501
"""
pass
def test_find_connection_port_events(self):
"""Test case for find_connection_port_events
Retrieve connection port events # noqa: E501
"""
pass
def test_find_virtual_circuit_events(self):
"""Test case for find_virtual_circuit_events
Retrieve connection events # noqa: E501
"""
pass
def test_get_connection_port(self):
"""Test case for get_connection_port
Get a connection port # noqa: E501
"""
pass
def test_get_interconnection(self):
"""Test case for get_interconnection
Get connection # noqa: E501
"""
pass
def test_get_virtual_circuit(self):
"""Test case for get_virtual_circuit
Get a virtual circuit # noqa: E501
"""
pass
def test_list_connection_port_virtual_circuits(self):
"""Test case for list_connection_port_virtual_circuits
List a connection port's virtual circuits # noqa: E501
"""
pass
def test_list_connection_ports(self):
"""Test case for list_connection_ports
List a connection's ports # noqa: E501
"""
pass
def test_organization_list_interconnections(self):
"""Test case for organization_list_interconnections
List organization connections # noqa: E501
"""
pass
def test_project_list_interconnections(self):
"""Test case for project_list_interconnections
List project connections # noqa: E501
"""
pass
def test_update_interconnection(self):
"""Test case for update_interconnection
Update connection # noqa: E501
"""
pass
def test_update_virtual_circuit(self):
"""Test case for update_virtual_circuit
Update a virtual circuit # noqa: E501
"""
pass
if __name__ == '__main__':
unittest.main()
|
# -*- coding: utf-8 -*-
#------------------------------------------------------------------------------
# file: $Id$
# lib: genemail.sender
# auth: Philip J Grabner <[email protected]>
# date: 2013/07/09
# copy: (C) Copyright 2013 Cadit Health Inc., All Rights Reserved.
#------------------------------------------------------------------------------
'''
The low-level mail sending agent. Used by the manager to delegate the
actual sending of the composed email. Note that the primary reason to
use a delegated object for this (rather than just using
smtplib.SMTP()) is so that an email can be serialized into another
form, such as for entry into a database or for unit testing and
comparison.
'''
from __future__ import absolute_import
__all__ = ('Sender', 'StoredSender', 'SmtpSender', 'DebugSender')
import smtplib
import email.parser
from templatealchemy.util import adict
#------------------------------------------------------------------------------
class Sender(object):
'''
Abstract interface for an object capable of sending a genemail email
out, usually to an SMTP MTA.
'''
#----------------------------------------------------------------------------
def send(self, mailfrom, recipients, message):
'''
Sends the specified `message` (in SMTP format) to the specified
`recipients` coming from the email address `mailfrom`. Parameters:
:Parameters:
mailfrom : str
equivalent to the the SMTP ``MAIL FROM`` command.
recipients : list(str)
equivalent to the SMTP ``RCPT TO`` command.
message : str
the actuall message to be transferred, equivalent to the payload of
the SMTP ``DATA`` command.
'''
raise NotImplementedError()
#------------------------------------------------------------------------------
class SmtpSender(Sender):
'''
An implementation of the :class:`genemail.sender.Sender` interface that
connects to a local or remote SMTP server and submits the message for
transfer or delivery.
:Parameters:
host : str, optional, default: 'localhost'
the SMTP server to connect to.
port : int, optional, default: 25
the SMTP server port to connect to.
ssl : bool, optional, default: false
indicates whether or not to connect using SSL.
starttls : bool, optional, default: false
indicates that a STARTTLS command should be sent after connecting.
username : str, optional
set the SMTP username to authenticate as.
password : str, optional
set the password for the `username`.
'''
#----------------------------------------------------------------------------
def __init__(self,
host='localhost', port=25, ssl=False, starttls=False,
username=None, password=None, *args, **kwargs):
super(SmtpSender, self).__init__(*args, **kwargs)
self.smtpHost = host or 'localhost'
self.smtpPort = port or 25
self.username = username
self.password = password
self.starttls = starttls
self.ssl = ssl
#----------------------------------------------------------------------------
def send(self, mailfrom, recipients, message):
smtp = smtplib.SMTP_SSL() if self.ssl else smtplib.SMTP()
smtp.connect(self.smtpHost, self.smtpPort)
if self.starttls:
smtp.starttls()
if self.username is not None:
smtp.login(self.username, self.password)
smtp.sendmail(mailfrom, recipients, message)
smtp.quit()
#------------------------------------------------------------------------------
class StoredSender(Sender):
'''
An implementation of the :class:`genemail.sender.Sender` interface
that simply stores all messages in local memory in the
:attr:`emails` attribute. Most useful when unit testing email
generation.
'''
#----------------------------------------------------------------------------
def __init__(self, *args, **kwargs):
super(StoredSender, self).__init__(*args, **kwargs)
self.emails = []
#----------------------------------------------------------------------------
def send(self, mailfrom, recipients, message):
self.emails.append(
adict(mailfrom=mailfrom, recipients=recipients, message=message))
#------------------------------------------------------------------------------
class DebugSender(StoredSender):
'''
An extension to the :class:`StoredSender` class that parses each
email into it's MIME components, which simplifies unittesting. Each
element in the `emails` attribute has the following attributes:
* `mailfrom`: SMTP-level `MAIL FROM` value (string)
* `recipients`: SMTP-level `RCPT TO` value (list)
* `message`: raw SMTP `DATA` value (string)
* `mime`: the parsed :class:`email.message.Message` object
* `from`: email "From" header - not used by SMTP
* `to`: email "To" header - not used by SMTP
* `date`: email "Date" header
* `message-id`: email "Message-ID" header
* `subject`: email "Subject" header
* `plain`: text/plain version of the email (or None)
* `html`: text/html version of the email (or None)
* `calendar`: text/calendar attachment of the email (or None)
'''
#----------------------------------------------------------------------------
def send(self, mailfrom, recipients, message):
eml = adict(mailfrom=mailfrom, recipients=recipients, message=message)
mime = email.parser.Parser().parsestr(message)
eml['mime'] = mime
eml['from'] = mime.get('from')
eml['to'] = mime.get('to')
eml['date'] = mime.get('date')
eml['message-id'] = mime.get('message-id')
eml['subject'] = mime.get('subject')
for part in mime.walk():
ct = part.get_content_type()
if not ct.startswith('text/'):
continue
ct = ct.split('/')[1]
if eml.get(ct) is None:
eml[ct] = part.get_payload()
elif isinstance(eml[ct], list):
eml[ct].append(part.get_payload())
else:
eml[ct] = [eml[ct], part.get_payload()]
self.emails.append(eml)
#------------------------------------------------------------------------------
# end of $Id$
#------------------------------------------------------------------------------
|
# -*- coding: utf-8 -*-
#
# Copyright (C) 2020 alzp.
#
# testInvenio is free software; you can redistribute it and/or modify it
# under the terms of the MIT License; see LICENSE file for more details.
"""Default configuration for testInvenio.
You overwrite and set instance-specific configuration by either:
- Configuration file: ``<virtualenv prefix>/var/instance/invenio.cfg``
- Environment variables: ``APP_<variable name>``
"""
from __future__ import absolute_import, print_function
from datetime import timedelta
from invenio_app.config import APP_DEFAULT_SECURE_HEADERS
from invenio_previewer.config import PREVIEWER_PREFERENCE as BASE_PREFERENCE
def _(x):
"""Identity function used to trigger string extraction."""
return x
# Rate limiting
# =============
#: Storage for ratelimiter.
RATELIMIT_STORAGE_URL = 'redis://localhost:6379/3'
# I18N
# ====
#: Default language
BABEL_DEFAULT_LANGUAGE = 'en'
#: Default time zone
BABEL_DEFAULT_TIMEZONE = 'Europe/Zurich'
#: Other supported languages (do not include the default language in list).
I18N_LANGUAGES = [
# ('fr', _('French'))
]
# Base templates
# ==============
#: Global base template.
BASE_TEMPLATE = 'testinvenio/page.html'
#: Cover page base template (used for e.g. login/sign-up).
COVER_TEMPLATE = 'invenio_theme/page_cover.html'
#: Footer base template.
FOOTER_TEMPLATE = 'invenio_theme/footer.html'
#: Header base template.
HEADER_TEMPLATE = 'invenio_theme/header.html'
#: Settings base template.
SETTINGS_TEMPLATE = 'invenio_theme/page_settings.html'
# Theme configuration
# ===================
#: Site name
THEME_SITENAME = _('testInvenio')
#: Use default frontpage.
THEME_FRONTPAGE = True
#: Frontpage title.
THEME_FRONTPAGE_TITLE = _('testInvenio')
#: Frontpage template.
THEME_FRONTPAGE_TEMPLATE = 'testinvenio/frontpage.html'
# Email configuration
# ===================
#: Email address for support.
SUPPORT_EMAIL = "[email protected]"
#: Disable email sending by default.
MAIL_SUPPRESS_SEND = True
# Assets
# ======
#: Static files collection method (defaults to copying files).
COLLECT_STORAGE = 'flask_collect.storage.file'
# Accounts
# ========
#: Email address used as sender of account registration emails.
SECURITY_EMAIL_SENDER = SUPPORT_EMAIL
#: Email subject for account registration emails.
SECURITY_EMAIL_SUBJECT_REGISTER = _(
"Welcome to testInvenio!")
#: Redis session storage URL.
ACCOUNTS_SESSION_REDIS_URL = 'redis://localhost:6379/1'
#: Enable session/user id request tracing. This feature will add X-Session-ID
#: and X-User-ID headers to HTTP response. You MUST ensure that NGINX (or other
#: proxies) removes these headers again before sending the response to the
#: client. Set to False, in case of doubt.
ACCOUNTS_USERINFO_HEADERS = True
# Celery configuration
# ====================
BROKER_URL = 'amqp://guest:guest@localhost:5672/'
#: URL of message broker for Celery (default is RabbitMQ).
CELERY_BROKER_URL = 'amqp://guest:guest@localhost:5672/'
#: URL of backend for result storage (default is Redis).
CELERY_RESULT_BACKEND = 'redis://localhost:6379/2'
#: Scheduled tasks configuration (aka cronjobs).
CELERY_BEAT_SCHEDULE = {
'indexer': {
'task': 'invenio_indexer.tasks.process_bulk_queue',
'schedule': timedelta(minutes=5),
},
'accounts': {
'task': 'invenio_accounts.tasks.clean_session_table',
'schedule': timedelta(minutes=60),
},
}
# Database
# ========
#: Database URI including user and password
SQLALCHEMY_DATABASE_URI = \
'postgresql+psycopg2://testinvenio:testinvenio@localhost/testinvenio'
# JSONSchemas
# ===========
#: Hostname used in URLs for local JSONSchemas.
JSONSCHEMAS_HOST = 'testinvenio.com'
# Flask configuration
# ===================
# See details on
# http://flask.pocoo.org/docs/0.12/config/#builtin-configuration-values
#: Secret key - each installation (dev, production, ...) needs a separate key.
#: It should be changed before deploying.
SECRET_KEY = 'CHANGE_ME'
#: Max upload size for form data via application/mulitpart-formdata.
MAX_CONTENT_LENGTH = 100 * 1024 * 1024 # 100 MiB
#: Sets cookie with the secure flag by default
SESSION_COOKIE_SECURE = True
#: Since HAProxy and Nginx route all requests no matter the host header
#: provided, the allowed hosts variable is set to localhost. In production it
#: should be set to the correct host and it is strongly recommended to only
#: route correct hosts to the application.
APP_ALLOWED_HOSTS = ['testinvenio.com', 'localhost', '127.0.0.1']
# OAI-PMH
# =======
OAISERVER_ID_PREFIX = 'oai:testinvenio.com:'
# Previewers
# ==========
#: Include IIIF preview for images.
PREVIEWER_PREFERENCE = ['iiif_image'] + BASE_PREFERENCE
# Debug
# =====
# Flask-DebugToolbar is by default enabled when the application is running in
# debug mode. More configuration options are available at
# https://flask-debugtoolbar.readthedocs.io/en/latest/#configuration
#: Switches off incept of redirects by Flask-DebugToolbar.
DEBUG_TB_INTERCEPT_REDIRECTS = False
# Configures Content Security Policy for PDF Previewer
# Remove it if you are not using PDF Previewer
APP_DEFAULT_SECURE_HEADERS['content_security_policy'] = {
'default-src': ["'self'", "'unsafe-inline'"],
'object-src': ["'none'"],
'style-src': ["'self'", "'unsafe-inline'"],
'font-src': ["'self'", "data:", "https://fonts.gstatic.com",
"https://fonts.googleapis.com"],
}
|
# -*- coding: utf-8 -*-
# This code is part of Qiskit.
#
# (C) Copyright IBM 2017, 2021.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
import numpy as np
from qiskit_metal import draw, Dict
from qiskit_metal.qlibrary.core import BaseQubit
class TunableCoupler01(BaseQubit):
"""One of the tunable couplers Based off the implementation in
https://arxiv.org/pdf/2011.01261.pdf.
WIP - initial test structure
Inherits `BaseQubit` class
Description:
Creates a tunable coupler, interdigitated capacitor to ground, with a junction to ground and a coupler arm.
The shapes origin is shown with 0. X the location of the SQUID.
::
connection claw
_____
X | |
| | | | | | | |
| | | | | | | | charge island
| | | | | |
--------------------0--------------------
Options:
Convention: Values (unless noted) are strings with units included,
(e.g., '30um')
BaseQubit Default Options:
* pos_x: '0um' -- Origin of the component (see above figure)
* pos_y: '0um' -- Origin of the component (see above figure)
* orientation: '0' -- Degree of component rotation
* layer: '1' -- layer info, gds and other applications
* connection_pads: empty Dict -- Currently not used, connection count is static. (WIP)
* _default_connection_pads: empty Dict -- The default values for the (if any) connection lines of the qubit.
Default Options:
* c_width: '400um' -- The width (x-axis) of the interdigitated charge island
* l_width: '20um' -- The width of lines forming the body and arms of the charge island
* l_gap: '10um' -- The dielectric gap of the charge island to ground
* a_height: '60um' -- The length of the arms forming the 'fingers' of the charge island
* cp_height: '15um' -- The thickness (y-axis) of the connection claw
* cp_arm_length: '30um' -- The length of the 'fingers' of the connection claw (Warning: can break
the component if they are too long)
* cp_arm_width: '6um' -- The width of the 'fingers' of the connection claw (Warning: can break
the component if too wide)
* cp_gap: '6um' -- The dielectric gap of the connection claw
* cp_gspace: '3um' -- How much ground remains between the connection claw and the charge island
* fl_width: '5um' -- Width of the flux line
* fl_gap: '3um' -- Dielectric gap of the flux line
* fl_length: '10um' -- Length of the flux line for mutual inductance to the SQUID
* fl_ground: '2um' -- Amount of ground between the SQUID and the flux line
* _default_connection_pads: Currently empty
"""
default_options = Dict(pos_x='0um',
pos_y='0um',
orientation='0',
layer='1',
c_width='400um',
l_width='20um',
l_gap='10um',
a_height='60um',
cp_height='15um',
cp_arm_length='30um',
cp_arm_width='6um',
cp_gap='6um',
cp_gspace='3um',
fl_width='5um',
fl_gap='3um',
fl_length='10um',
fl_ground='2um')
component_metadata = Dict(short_name='Pocket',
_qgeometry_table_path='True',
_qgeometry_table_poly='True',
_qgeometry_table_junction='True')
TOOLTIP = """One of the tunable couplers"""
def make(self):
"""Builds the component."""
p = self.p
#Draw the charge island
btm = draw.shapely.geometry.box(-p.c_width / 2, -p.l_width / 2, 0,
p.l_width / 2)
x_spot = p.c_width / 2 - p.l_width / 2
arm1 = draw.shapely.geometry.box(-(x_spot + p.l_width / 2),
p.l_width / 2,
-(x_spot - p.l_width / 2), p.a_height)
arm2 = draw.shapely.geometry.box(-((x_spot) * 3 / 5 + p.l_width / 2),
p.l_width / 2,
-((x_spot) * 3 / 5 - p.l_width / 2),
p.a_height)
arm3 = draw.shapely.geometry.box(-((x_spot) * 1 / 5 + p.l_width / 2),
p.l_width / 2,
-((x_spot) * 1 / 5 - p.l_width / 2),
p.a_height)
left_side = draw.shapely.ops.cascaded_union([btm, arm1, arm2, arm3])
cap_island = draw.shapely.ops.cascaded_union([
left_side,
draw.shapely.affinity.scale(left_side,
xfact=-1,
yfact=1,
origin=(0, 0))
])
cap_subtract = cap_island.buffer(p.l_gap, cap_style=3, join_style=2)
#Reference coordinates
cpl_x = 1 / 5 * x_spot
cpl_y = p.a_height + p.l_gap + p.cp_gap + p.cp_gspace
fl_y = p.a_height + p.l_gap + p.fl_ground + p.fl_gap + p.fl_width / 2
#Draw the junction and flux line
rect_jj = draw.LineString([(-cpl_x * 3, p.a_height),
(-cpl_x * 3, p.a_height + p.l_gap)])
flux_line = draw.LineString([[-cpl_x * 3 - p.fl_length, fl_y],
[-cpl_x * 3, fl_y],
[-cpl_x * 3, fl_y + 0.01]])
#Draw the connector
cpl_x = 1 / 5 * x_spot
cpl_y = p.a_height + p.l_gap + p.cp_gap + p.cp_gspace
con_pad = draw.shapely.geometry.box(
cpl_x - 1 / 5 * x_spot - p.cp_arm_width / 2, cpl_y,
cpl_x + 1 / 5 * x_spot + p.cp_arm_width / 2, cpl_y + p.cp_height)
con_arm_l = draw.shapely.geometry.box(
cpl_x - 1 / 5 * x_spot - p.cp_arm_width / 2,
cpl_y - p.cp_arm_length,
cpl_x - 1 / 5 * x_spot + p.cp_arm_width / 2, cpl_y)
con_arm_r = draw.shapely.geometry.box(
cpl_x + 1 / 5 * x_spot - p.cp_arm_width / 2,
cpl_y - p.cp_arm_length,
cpl_x + 1 / 5 * x_spot + p.cp_arm_width / 2, cpl_y)
con_body = draw.shapely.ops.cascaded_union(
[con_pad, con_arm_l, con_arm_r])
con_sub = con_body.buffer(p.cp_gap, cap_style=3, join_style=2)
con_pin = draw.LineString([[cpl_x, cpl_y], [cpl_x,
cpl_y + p.cp_height]])
#Rotate and translate.
c_items = [
cap_island, cap_subtract, rect_jj, con_body, con_sub, flux_line,
con_pin
]
c_items = draw.rotate(c_items, p.orientation, origin=(0, 0))
c_items = draw.translate(c_items, p.pos_x, p.pos_y)
[
cap_island, cap_subtract, rect_jj, con_body, con_sub, flux_line,
con_pin
] = c_items
#Add to qgeometry
self.add_qgeometry('poly', {
'cap_island': cap_island,
'connector_body': con_body
},
layer=p.layer)
self.add_qgeometry('poly', {
'cap_subtract': cap_subtract,
'connector_sub': con_sub
},
layer=p.layer,
subtract=True)
self.add_qgeometry('path', {'flux_line': flux_line},
width=p.fl_width,
layer=p.layer)
self.add_qgeometry('path', {'flux_line_sub': flux_line},
width=p.fl_width + 2 * p.fl_gap,
subtract=True,
layer=p.layer)
self.add_qgeometry('junction', dict(rect_jj=rect_jj), width=p.l_width)
#Add pin
self.add_pin('Control',
points=np.array(con_pin),
width=p.l_width,
input_as_norm=True)
self.add_pin('Flux',
points=np.array(flux_line.coords[-2:]),
width=p.l_width,
input_as_norm=True)
|
# -*- coding: utf-8 -*-
## Demonstration of Link with multiple outputs: Combined-Heat-and-Power (CHP) with fixed heat-power ratio
#
# For a CHP with a more complicated heat-power feasible operational area, see https://pypsa.readthedocs.io/en/latest/examples/power-to-gas-boiler-chp.html.
#
# This example demonstrates a Link component with more than one bus output ("bus2" in this case). In general links can have many output buses.
#
# In this example a CHP must be heat-following because there is no other supply of heat to the bus "Frankfurt heat".
import numpy as np
import pypsa
# First tell PyPSA that links will have a 2nd bus by
# overriding the component_attrs. This can be done for
# as many buses as you need with format busi for i = 2,3,4,5,....
override_component_attrs = pypsa.descriptors.Dict(
{k: v.copy() for k, v in pypsa.components.component_attrs.items()}
)
override_component_attrs["Link"].loc["bus2"] = [
"string",
np.nan,
np.nan,
"2nd bus",
"Input (optional)",
]
override_component_attrs["Link"].loc["efficiency2"] = [
"static or series",
"per unit",
1.0,
"2nd bus efficiency",
"Input (optional)",
]
override_component_attrs["Link"].loc["p2"] = [
"series",
"MW",
0.0,
"2nd bus output",
"Output",
]
network = pypsa.Network(override_component_attrs=override_component_attrs)
network.add("Bus", "Frankfurt", carrier="AC")
network.add("Load", "Frankfurt", bus="Frankfurt", p_set=5)
network.add("Bus", "Frankfurt heat", carrier="heat")
network.add("Load", "Frankfurt heat", bus="Frankfurt heat", p_set=3)
network.add("Bus", "Frankfurt gas", carrier="gas")
network.add("Store", "Frankfurt gas", e_initial=1e6, e_nom=1e6, bus="Frankfurt gas")
network.add(
"Link",
"OCGT",
bus0="Frankfurt gas",
bus1="Frankfurt",
p_nom_extendable=True,
capital_cost=600,
efficiency=0.4,
)
network.add(
"Link",
"CHP",
bus0="Frankfurt gas",
bus1="Frankfurt",
bus2="Frankfurt heat",
p_nom_extendable=True,
capital_cost=1400,
efficiency=0.3,
efficiency2=0.3,
)
network.lopf()
network.loads_t.p
network.links_t.p0
network.links_t.p1
network.links_t.p2
|
import argparse
import math
import numpy as np
import torch
from torch import nn
from basicsr.archs.stylegan2_arch import StyleGAN2Generator
from basicsr.metrics.fid import (calculate_fid, extract_inception_features,
load_patched_inception_v3)
def calculate_stylegan2_fid():
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
parser = argparse.ArgumentParser()
parser.add_argument(
'ckpt', type=str, help='Path to the stylegan2 checkpoint.')
parser.add_argument(
'fid_stats', type=str, help='Path to the dataset fid statistics.')
parser.add_argument('--size', type=int, default=256)
parser.add_argument('--channel_multiplier', type=int, default=2)
parser.add_argument('--batch_size', type=int, default=64)
parser.add_argument('--num_sample', type=int, default=50000)
parser.add_argument('--truncation', type=float, default=1)
parser.add_argument('--truncation_mean', type=int, default=4096)
args = parser.parse_args()
# create stylegan2 model
generator = StyleGAN2Generator(
out_size=args.size,
num_style_feat=512,
num_mlp=8,
channel_multiplier=args.channel_multiplier,
resample_kernel=(1, 3, 3, 1))
generator.load_state_dict(torch.load(args.ckpt)['params_ema'])
generator = nn.DataParallel(generator).eval().to(device)
if args.truncation < 1:
with torch.no_grad():
truncation_latent = generator.mean_latent(args.truncation_mean)
else:
truncation_latent = None
# inception model
inception = load_patched_inception_v3(device)
total_batch = math.ceil(args.num_sample / args.batch_size)
def sample_generator(total_batch):
for i in range(total_batch):
with torch.no_grad():
latent = torch.randn(args.batch_size, 512, device=device)
samples, _ = generator([latent],
truncation=args.truncation,
truncation_latent=truncation_latent)
yield samples
features = extract_inception_features(
sample_generator(total_batch), inception, total_batch, device)
features = features.numpy()
total_len = features.shape[0]
features = features[:args.num_sample]
print(f'Extracted {total_len} features, '
f'use the first {features.shape[0]} features to calculate stats.')
sample_mean = np.mean(features, 0)
sample_cov = np.cov(features, rowvar=False)
# load the dataset stats
stats = torch.load(args.fid_stats)
real_mean = stats['mean']
real_cov = stats['cov']
# calculate FID metric
fid = calculate_fid(sample_mean, sample_cov, real_mean, real_cov)
print('fid:', fid)
if __name__ == '__main__':
calculate_stylegan2_fid()
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
r"""Creates and runs `Estimator` for object detection model on TPUs.
This uses the TPUEstimator API to define and run a model in TRAIN/EVAL modes.
"""
# pylint: enable=line-too-long
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl import flags
import tensorflow.compat.v1 as tf
from tensorflow.compat.v1 import estimator as tf_estimator
from object_detection import model_lib
tf.flags.DEFINE_bool('use_tpu', True, 'Use TPUs rather than plain CPUs')
# Cloud TPU Cluster Resolvers
flags.DEFINE_string(
'gcp_project',
default=None,
help='Project name for the Cloud TPU-enabled project. If not specified, we '
'will attempt to automatically detect the GCE project from metadata.')
flags.DEFINE_string(
'tpu_zone',
default=None,
help='GCE zone where the Cloud TPU is located in. If not specified, we '
'will attempt to automatically detect the GCE project from metadata.')
flags.DEFINE_string(
'tpu_name',
default=None,
help='Name of the Cloud TPU for Cluster Resolvers.')
flags.DEFINE_integer('num_shards', 8, 'Number of shards (TPU cores).')
flags.DEFINE_integer('iterations_per_loop', 100,
'Number of iterations per TPU training loop.')
# For mode=train_and_eval, evaluation occurs after training is finished.
# Note: independently of steps_per_checkpoint, estimator will save the most
# recent checkpoint every 10 minutes by default for train_and_eval
flags.DEFINE_string('mode', 'train',
'Mode to run: train, eval')
flags.DEFINE_integer('train_batch_size', None, 'Batch size for training. If '
'this is not provided, batch size is read from training '
'config.')
flags.DEFINE_integer('num_train_steps', None, 'Number of train steps.')
flags.DEFINE_boolean('eval_training_data', False,
'If training data should be evaluated for this job.')
flags.DEFINE_integer('sample_1_of_n_eval_examples', 1, 'Will sample one of '
'every n eval input examples, where n is provided.')
flags.DEFINE_integer('sample_1_of_n_eval_on_train_examples', 5, 'Will sample '
'one of every n train input examples for evaluation, '
'where n is provided. This is only used if '
'`eval_training_data` is True.')
flags.DEFINE_string(
'model_dir', None, 'Path to output model directory '
'where event and checkpoint files will be written.')
flags.DEFINE_string('pipeline_config_path', None, 'Path to pipeline config '
'file.')
flags.DEFINE_integer(
'max_eval_retries', 0, 'If running continuous eval, the maximum number of '
'retries upon encountering tf.errors.InvalidArgumentError. If negative, '
'will always retry the evaluation.'
)
FLAGS = tf.flags.FLAGS
def main(unused_argv):
flags.mark_flag_as_required('model_dir')
flags.mark_flag_as_required('pipeline_config_path')
tpu_cluster_resolver = (
tf.distribute.cluster_resolver.TPUClusterResolver(
tpu=[FLAGS.tpu_name], zone=FLAGS.tpu_zone, project=FLAGS.gcp_project))
tpu_grpc_url = tpu_cluster_resolver.get_master()
config = tf_estimator.tpu.RunConfig(
master=tpu_grpc_url,
evaluation_master=tpu_grpc_url,
model_dir=FLAGS.model_dir,
tpu_config=tf_estimator.tpu.TPUConfig(
iterations_per_loop=FLAGS.iterations_per_loop,
num_shards=FLAGS.num_shards))
kwargs = {}
if FLAGS.train_batch_size:
kwargs['batch_size'] = FLAGS.train_batch_size
train_and_eval_dict = model_lib.create_estimator_and_inputs(
run_config=config,
pipeline_config_path=FLAGS.pipeline_config_path,
train_steps=FLAGS.num_train_steps,
sample_1_of_n_eval_examples=FLAGS.sample_1_of_n_eval_examples,
sample_1_of_n_eval_on_train_examples=(
FLAGS.sample_1_of_n_eval_on_train_examples),
use_tpu_estimator=True,
use_tpu=FLAGS.use_tpu,
num_shards=FLAGS.num_shards,
save_final_config=FLAGS.mode == 'train',
**kwargs)
estimator = train_and_eval_dict['estimator']
train_input_fn = train_and_eval_dict['train_input_fn']
eval_input_fns = train_and_eval_dict['eval_input_fns']
eval_on_train_input_fn = train_and_eval_dict['eval_on_train_input_fn']
train_steps = train_and_eval_dict['train_steps']
if FLAGS.mode == 'train':
estimator.train(input_fn=train_input_fn, max_steps=train_steps)
# Continuously evaluating.
if FLAGS.mode == 'eval':
if FLAGS.eval_training_data:
name = 'training_data'
input_fn = eval_on_train_input_fn
else:
name = 'validation_data'
# Currently only a single eval input is allowed.
input_fn = eval_input_fns[0]
model_lib.continuous_eval(estimator, FLAGS.model_dir, input_fn, train_steps,
name, FLAGS.max_eval_retries)
if __name__ == '__main__':
tf.app.run()
|
import unittest
import networkx as nx
from core.placement.swsolver import RecShortestWalkSolver
class TestShorthestWalkSolverMethods(unittest.TestCase):
def setUp(self):
self.g1 = nx.read_weighted_edgelist('tests/test_graph_2.txt', create_using=nx.MultiDiGraph, nodetype=int)
def test_shortest_walk(self):
u = 0
v = 3
k = 1
weight_shortest_walk = 2
(weight, walk) = RecShortestWalkSolver.shortest_walk(self.g1, u, v, k)
print(walk)
self.assertEqual(weight, weight_shortest_walk)
self.assertEqual(walk, [0, 3])
u = 0
v = 3
k = 2
weight_shortest_walk = 3
(weight, walk) = RecShortestWalkSolver.shortest_walk(self.g1, u, v, k)
print(walk)
self.assertEqual(weight, weight_shortest_walk)
self.assertEqual(walk, [0, 0, 3])
if __name__ == '__main__':
unittest.main() |
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import testtools
from webob import headers as wb_headers
import microversion_parse
class TestWebobHeaders(testtools.TestCase):
"""Webob uses a dict-like header which is not actually a dict."""
def test_simple_headers(self):
headers = wb_headers.EnvironHeaders({
'HTTP_HEADER_ONE': 'alpha',
'HTTP_HEADER_TWO': 'beta',
'HTTP_HEADER_THREE': 'gamma',
})
folded_headers = microversion_parse.fold_headers(headers)
self.assertEqual(3, len(folded_headers))
self.assertEqual(set(['header-one', 'header-three', 'header-two']),
set(folded_headers.keys()))
self.assertEqual('gamma', folded_headers['header-three'])
def test_simple_match(self):
headers = wb_headers.EnvironHeaders({
'HTTP_HEADER_ONE': 'alpha',
'HTTP_OPENSTACK_API_VERSION': 'compute 2.1',
'HTTP_HEADER_TWO': 'beta',
})
version = microversion_parse.check_standard_header(headers, 'compute')
self.assertEqual('2.1', version)
def test_match_multiple_services(self):
headers = wb_headers.EnvironHeaders({
'HTTP_HEADER_ONE': 'alpha',
'HTTP_OPENSTACK_API_VERSION':
'network 5.9 ,compute 2.1,telemetry 7.8',
'HTTP_HEADER_TWO': 'beta',
})
version = microversion_parse.check_standard_header(
headers, 'compute')
self.assertEqual('2.1', version)
version = microversion_parse.check_standard_header(
headers, 'telemetry')
self.assertEqual('7.8', version)
def test_legacy_headers_straight(self):
headers = wb_headers.EnvironHeaders({
'HTTP_HEADER_ONE': 'alpha',
'HTTP_X_OPENSTACK_NOVA_API_VERSION': ' 2.1 ',
'HTTP_HEADER_TWO': 'beta',
})
version = microversion_parse.get_version(
headers, service_type='compute',
legacy_headers=['x-openstack-nova-api-version'])
self.assertEqual('2.1', version)
def test_legacy_headers_folded(self):
headers = wb_headers.EnvironHeaders({
'HTTP_HEADER_ONE': 'alpha',
'HTTP_X_OPENSTACK_NOVA_API_VERSION': ' 2.1, 9.2 ',
'HTTP_HEADER_TWO': 'beta',
})
version = microversion_parse.get_version(
headers, service_type='compute',
legacy_headers=['x-openstack-nova-api-version'])
self.assertEqual('9.2', version)
|
from fourparts.commons.Orbit import Orbit
import pytest
def test_cases():
orbit_1 = [1, 2, 3]
orbit_2 = ("X", "Y", "Z", "T")
return [
([1], 0, [1]),
(orbit_1, 0, [1, 2, 3]),
(orbit_1, 1, [2, 3, 1]),
(orbit_2, 3, ["T", "X", "Y", "Z"]),
]
@pytest.mark.parametrize("orbit, index, expected", test_cases())
def test_eval(orbit, index, expected):
assert Orbit(orbit, index).get_curr_orbit() == expected
|
from flask import jsonify
from flask import Flask,request
from flaskext.mysql import MySQL
from flask_cors import CORS, cross_origin
mysql = MySQL()
app = Flask(__name__)
CORS(app)
app.config['MYSQL_DATABASE_USER'] = 'demo'
app.config['MYSQL_DATABASE_PASSWORD'] = '37cnit73'
app.config['MYSQL_DATABASE_DB'] = 'mysql'
app.config['MYSQL_DATABASE_HOST'] = 'localhost'
mysql.init_app(app)
@app.route("/realtime")
def real():
try:
cursor = mysql.connect().cursor()
cursor.execute("SELECT * from LAST;")
data = cursor.fetchall()
dic_ret = {"samples":[{"ip":i[1],"pps":i[2], "ts":i[3]} for i in data]}
except Exception as e:
return str(e)
return jsonify(dic_ret)
@app.route("/flowhistory")
def fhistory():
try:
flow = request.args.get('ip')
ts = request.args.get('ts')
cursor = mysql.connect().cursor()
cursor.execute("SELECT * from HISTORY where FLOW='"+flow+"' AND TIMESTAMP > '"+ts+"';")
data = cursor.fetchall()
dic_ret = {"samples":[{"ip":i[1],"pps":i[2], "ts":i[3]} for i in data]}
except Exception as e:
return str(e)
return jsonify(dic_ret)
@app.route("/history")
def history():
try:
cursor = mysql.connect().cursor()
cursor.execute("SELECT COUNT(FLOW),FLOW FROM HISTORY GROUP BY FLOW ORDER BY COUNT(FLOW) DESC;")
data = cursor.fetchall()
dic_ret={}
dic_ret["ip_list"] = [i[1] for i in data]
except Exception as e:
return str(e)
return jsonify(dic_ret)
@app.route("/historyCounter")
def historyCounter():
try:
cursor = mysql.connect().cursor()
cursor.execute("SELECT COUNT(FLOW),FLOW FROM HISTORY GROUP BY FLOW ORDER BY COUNT(FLOW) DESC;")
data = cursor.fetchall()
dic_ret={}
dic_ret["ip_list"] = [{"ip":i[1],"counter":i[0]} for i in data]
except Exception as e:
return str(e)
return jsonify(dic_ret)
if __name__ == "__main__":
app.run(threaded=True)
|
texto = ''
print(f'Resultado: {texto}')
texto = str(input('Escreva algo: '))
print(f'Resultado: {texto}')
texto = 4
print(f'Resultado: {texto}')
|
import json
import logging
import faker
from .coordinator import CoordinatorAgent
from .passenger import PassengerAgent
from .taxi import TaxiAgent
logger = logging.getLogger()
faker_factory = faker.Factory.create()
class Scenario(object):
"""
A scenario object reads a file with a JSON representation of a scenario and is used to create the participant agents.
"""
def __init__(self, filename):
"""
The Scenario constructor reads the JSON file and creates the defined agents found in that file.
Args:
filename (str): the name of the scenario file
"""
self.taxis = []
self.passengers = []
self.scenario = None
with open(filename, 'r') as f:
logger.info("Reading scenario {}".format(filename))
self.scenario = json.load(f)
def load(self, coordinator: CoordinatorAgent):
logger.info("Loading scenario...")
for taxi in self.scenario["taxis"]:
password = taxi["password"] if "password" in taxi else faker_factory.password()
speed = taxi["speed"] if "speed" in taxi else None
coordinator.create_agent(TaxiAgent, taxi["name"], password, taxi["position"], speed=speed)
for passenger in self.scenario["passengers"]:
password = passenger["password"] if "password" in passenger else faker_factory.password()
coordinator.create_agent(PassengerAgent,
passenger["name"], password,
passenger["position"],
target=passenger["dest"])
|
# -*- coding: utf-8 -*-
{
"name" : "POS Repair Order",
"summary" : "POS Repair Order.",
"category" : "Point Of Sale",
"version" : "1.0.0",
"author" : "Prolitus Technologies Pvt. Ltd.",
"license" : "Other proprietary",
"depends" : ['point_of_sale', 'repair', 'hr', 'pos_orders', 'inter_pos_warehouse_tranfer', 'pos_product_detail'],
"data" : [
'reports/report_file.xml',
'reports/quotation_report.xml',
'reports/pos_order_quotation_report.xml',
'data/send_quotation_template.xml',
'data/ir_sequence_data.xml',
'security/ir.model.access.csv',
'wizard/assign_wizard_view.xml',
'wizard/add_repair_line_wizard_view.xml',
'wizard/remove_repair_line_wizard_view.xml',
'wizard/repair_order_pickings_wizard_view.xml',
'views/pos_config_view.xml',
'views/template.xml',
'views/pos_quotes_view.xml',
'views/customer_vehicle_view.xml',
'views/service_history_view.xml',
'views/product_template_view.xml',
'views/bay.xml',
'views/removed_repair_line_view.xml',
'views/stock_warehouse_view.xml',
'views/stock_picking_view.xml'
],
"qweb" : ['static/src/xml/pos_orders.xml'],
"application" : True,
"installable" : True,
"auto_install" : False,
"pre_init_hook" : "pre_init_check",
} |
# Created byMartin.cz
# Copyright (c) Martin Strohalm. All rights reserved.
import pero
class DrawTest(pero.Graphics):
"""Test case for text properties drawing."""
def draw(self, canvas, *args, **kwargs):
"""Draws the test."""
# clear canvas
canvas.fill(pero.colors.White)
# init glyphs
line = pero.Line(
line_width = 1,
line_color = pero.colors.Red)
label = pero.Text(
font_size = 12,
font_name = "Arial")
# init coords
x = 20
y = 20
# test family
label.draw(canvas, x=x, y=y, text="serif", font_family=pero.FONT_FAMILY_SERIF, font_name=pero.UNDEF)
x += 60
label.draw(canvas, x=x, y=y, text="sans-serif", font_family=pero.FONT_FAMILY_SANS, font_name=pero.UNDEF)
x += 60
label.draw(canvas, x=x, y=y, text="monospace", font_family=pero.FONT_FAMILY_MONO, font_name=pero.UNDEF)
x = 20
y += 30
# test name
label.draw(canvas, x=x, y=y, text="arial", font_name='Arial', font_size=12)
x += 60
label.draw(canvas, x=x, y=y, text="times", font_name="Times New Roman", font_size=12)
x += 60
label.draw(canvas, x=x, y=y, text="courier", font_name="Courier New", font_size=12)
x = 20
y += 30
# test style
label.draw(canvas, x=x, y=y, text="normal", font_style=pero.FONT_STYLE_NORMAL)
x += 60
label.draw(canvas, x=x, y=y, text="italic", font_style=pero.FONT_STYLE_ITALIC)
x = 20
y += 30
# test weight
label.draw(canvas, x=x, y=y, text="normal", font_weight=pero.FONT_WEIGHT_NORMAL)
x += 60
label.draw(canvas, x=x, y=y, text="light", font_weight=pero.FONT_WEIGHT_LIGHT)
x += 60
label.draw(canvas, x=x, y=y, text="bold", font_weight=pero.FONT_WEIGHT_BOLD)
x = 20
y += 30
# test size
label.draw(canvas, x=x, y=y, text="size 10", font_size=10)
x += 60
label.draw(canvas, x=x, y=y, text="size 12", font_size=12)
x += 60
label.draw(canvas, x=x, y=y, text="size 14", font_size=14)
x = 20
y += 30
# test color
label.draw(canvas, x=x, y=y, text="black", text_color=pero.colors.Black, text_bgr_color=None)
x += 50
label.draw(canvas, x=x, y=y, text="blue", text_color=pero.colors.Blue, text_bgr_color=None)
x += 50
label.draw(canvas, x=x, y=y, text="background", text_color=pero.colors.LightGrey, text_bgr_color=pero.colors.Black)
x = 20
y += 30
# test alignment
line.draw(canvas, x1=x, y1=y-5, x2=x, y2=y+17)
label.draw(canvas, x=x, y=y, text="LEFT", text_align=pero.TEXT_ALIGN_LEFT)
x += 100
line.draw(canvas, x1=x, y1=y-5, x2=x, y2=y+17)
label.draw(canvas, x=x, y=y, text="CENTER", text_align=pero.TEXT_ALIGN_CENTER)
x += 100
line.draw(canvas, x1=x, y1=y-5, x2=x, y2=y+17)
label.draw(canvas, x=x, y=y, text="RIGHT", text_align=pero.TEXT_ALIGN_RIGHT)
x = 20
y += 50
# test baseline
line.draw(canvas, x1=x-5, y1=y, x2=x+50, y2=y)
label.draw(canvas, x=x, y=y, text="TOP", text_base=pero.TEXT_BASE_TOP)
x += 100
line.draw(canvas, x1=x-5, y1=y, x2=x+50, y2=y)
label.draw(canvas, x=x, y=y, text="MIDDLE", text_base=pero.TEXT_BASE_MIDDLE)
x += 100
line.draw(canvas, x1=x-5, y1=y, x2=x+55, y2=y)
label.draw(canvas, x=x, y=y, text="BOTTOM", text_base=pero.TEXT_BASE_BOTTOM)
# run test
if __name__ == '__main__':
pero.debug(DrawTest(), 'show', "Text", 350, 280)
|
# Generated by Django 3.1 on 2021-01-21 10:40
import django.db.models.deletion
from django.db import migrations, models
import elearn.models
class Migration(migrations.Migration):
dependencies = [
('elearn', '0011_auto_20210108_0028'),
]
operations = [
migrations.CreateModel(
name='AssignmentSolution',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('file_url', models.ImageField(upload_to=elearn.models.file_directory_path)),
('post', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='elearn.classworkpost')),
('student', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='elearn.student')),
],
),
]
|
from typing import Callable
import socketio
import config
class SocketClient():
def __init__(self) -> None:
super().__init__()
self.on_exercise_data_received: Callable[[dict], None] = None
self.on_duration_received: Callable[[int], None] = None
self.on_idle_received: Callable[[bool], None] = None
sio = socketio.Client()
self.sio = sio
@sio.event
def exercise_data(data):
self.on_exercise_data_received(data)
@sio.event
def idle_state(is_idle):
self.on_idle_received(is_idle)
@sio.event
def duration(duration):
self.on_duration_received(duration)
def emit(self, event: str):
self.sio.emit(event)
def start(self):
self.sio.connect(config.properties.server_address)
|
# MIT License
#
# Copyright (c) 2019 Nihaal Sangha (Orangutan)
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import re
from setuptools import find_packages, setup
with open('requirements.txt') as f:
requirements = f.read().splitlines()
with open('mixer/__init__.py') as f:
version = re.search(r'^__version__\s*=\s*[\'"]([^\'"]*)[\'"]', f.read(), re.MULTILINE).group(1)
if not version:
raise RuntimeError('Version is not set')
if version.endswith(('a', 'b', 'rc')):
# Append version identifier based on commit count
try:
import subprocess
p = subprocess.Popen(['git', 'rev-list', '--count', 'HEAD'],
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
out, err = p.communicate()
if out:
version += out.decode('utf-8').strip()
p = subprocess.Popen(['git', 'rev-parse', '--short', 'HEAD'],
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
out, err = p.communicate()
if out:
version += '+g' + out.decode('utf-8').strip()
except Exception:
pass
with open('README.md') as f:
readme = f.read()
setup(
name="Mixer.py",
author="Nihaal Sangha (Orangutan)",
url="https://github.com/OrangutanGaming/Mixer.py",
project_urls={
"Issue tracker": "https://github.com/OrangutanGaming/Mixer.py/issues",
},
version=version,
packages=find_packages(),
license="MIT",
description="An async Mixer library",
long_description=readme,
long_description_content_type="text/markdown",
include_package_data=True,
install_requires=requirements,
python_requires='>=3.6',
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
]
)
|
"""
Copyright (c) 2016-present, Facebook, Inc.
All rights reserved.
This source code is licensed under the BSD-style license found in the
LICENSE file in the root directory of this source tree. An additional grant
of patent rights can be found in the PATENTS file in the same directory.
"""
import grpc
import logging
from magma.common.grpc_client_manager import GRPCClientManager
from magma.common.rpc_utils import grpc_async_wrapper
from magma.common.redis.containers import RedisFlatDict
from magma.common.service import MagmaService
from magma.state.keys import make_scoped_device_id
from magma.state.redis_dicts import get_json_redis_dicts, \
get_proto_redis_dicts
from orc8r.protos.state_pb2 import DeleteStatesRequest, StateID
DEFAULT_GRPC_TIMEOUT = 10
class GarbageCollector:
"""
GarbageCollector periodically fetches all state in Redis that is marked as
garbage and deletes that state from the Orchestrator State service. If the
RPC call succeeds, it then deletes the state from Redis
"""
def __init__(self,
service: MagmaService,
grpc_client_manager: GRPCClientManager):
self._service = service
# Redis dicts for each type of state to replicate
self._redis_dicts = []
self._redis_dicts.extend(get_proto_redis_dicts(service.config))
self._redis_dicts.extend(get_json_redis_dicts(service.config))
# _grpc_client_manager to manage grpc client recyclings
self._grpc_client_manager = grpc_client_manager
async def run_garbage_collection(self):
request = await self._collect_states_to_delete()
if request is not None:
await self._send_to_state_service(request)
async def _collect_states_to_delete(self):
states_to_delete = []
for redis_dict in self._redis_dicts:
for key in redis_dict.garbage_keys():
state_scope = redis_dict.state_scope
device_id = make_scoped_device_id(key, state_scope)
sid = StateID(deviceID=device_id, type=redis_dict.redis_type)
states_to_delete.append(sid)
if len(states_to_delete) == 0:
logging.debug("Not garbage collecting state. No state to delete!")
return None
# NetworkID will be filled in by Orchestrator from GW context
return DeleteStatesRequest(networkID="", ids=states_to_delete)
async def _send_to_state_service(self, request: DeleteStatesRequest):
state_client = self._grpc_client_manager.get_client()
try:
await grpc_async_wrapper(
state_client.DeleteStates.future(
request,
DEFAULT_GRPC_TIMEOUT,
))
except grpc.RpcError as err:
logging.error("GRPC call failed for state deletion: %s", err)
else:
for redis_dict in self._redis_dicts:
for key in redis_dict.garbage_keys():
await self._delete_state_from_redis(redis_dict, key)
async def _delete_state_from_redis(self,
redis_dict: RedisFlatDict,
key: str) -> None:
# Ensure that the object isn't updated before deletion
with redis_dict.lock(key):
deleted = redis_dict.delete_garbage(key)
if deleted:
logging.debug("Successfully garbage collected "
"state for key: %s", key)
else:
logging.debug("Successfully garbage collected "
"state in cloud for key %s. "
"Didn't delete locally as the "
"object is no longer garbage", key)
|
#!/usr/bin/env python3
# Import standard modules ...
import glob
# Import special modules ...
try:
import PIL
import PIL.Image
except:
raise Exception("\"PIL\" is not installed; run \"pip install --user Pillow\"") from None
# Import my modules ...
try:
import pyguymer3
import pyguymer3.image
except:
raise Exception("\"pyguymer3\" is not installed; you need to have the Python module from https://github.com/Guymer/PyGuymer3 located somewhere in your $PYTHONPATH") from None
# Configure PIL to open images up to 1 GiP ...
PIL.Image.MAX_IMAGE_PIXELS = 1024 * 1024 * 1024 # [px]
# ******************************************************************************
# Initizalize list ...
images = []
# Loop over frames (0,000m to 0,499m) ...
for frame in sorted(glob.glob("createFlood_0[0-4][0-9][0-9]m.png")):
# Open image as RGB (even if it is paletted) ...
image = PIL.Image.open(frame).convert("RGB")
# Append it to the list ...
images.append(image)
# Save 25fps GIF ...
images[0].save("createFlood.gif", save_all = True, append_images = images[1:], duration = 40, loop = 0)
pyguymer3.image.optimize_image("createFlood.gif", strip = True)
# Clean up ...
for image in images:
image.close()
del images
# ******************************************************************************
# Set widths ...
# NOTE: By inspection, the PNG frames are 1320 wide.
widths = [512, 1024] # [px]
# Loop over widths ...
for width in widths:
# Initizalize list ...
images = []
# Loop over frames (0,000m to 0,499m) ...
for frame in sorted(glob.glob("createFlood_0[0-4][0-9][0-9]m.png")):
# Open image as RGB (even if it is paletted) ...
image = PIL.Image.open(frame).convert("RGB")
# Calculate height ...
ratio = float(image.size[0]) / float(image.size[1]) # [px/px]
height = round(float(width) / ratio) # [px]
# Downscale the image and append it to the list ...
images.append(image.resize((width, height), resample = PIL.Image.LANCZOS))
# Save 25fps GIF ...
images[0].save("createFlood{:04d}px.gif".format(width), save_all = True, append_images = images[1:], duration = 40, loop = 0)
pyguymer3.image.optimize_image("createFlood{:04d}px.gif".format(width), strip = True)
# Clean up ...
for image in images:
image.close()
del images
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
import logging
import sys
from typing import Dict
from ml.rl.evaluation.evaluator import Evaluator
from ml.rl.preprocessing.normalization import (
construct_action_scale_tensor,
get_num_output_features,
)
from ml.rl.preprocessing.preprocessor import Preprocessor
from ml.rl.preprocessing.sparse_to_dense import PandasSparseToDenseProcessor
from ml.rl.readers.json_dataset_reader import JSONDatasetReader
from ml.rl.tensorboardX import summary_writer_context
from ml.rl.thrift.core.ttypes import (
ContinuousActionModelParameters,
DDPGModelParameters,
DDPGNetworkParameters,
DDPGTrainingParameters,
NormalizationParameters,
RLParameters,
)
from ml.rl.training.ddpg_trainer import ActorNetModel, CriticNetModel, DDPGTrainer
from ml.rl.training.rl_exporter import ActorExporter
from ml.rl.workflow.base_workflow import BaseWorkflow
from ml.rl.workflow.helpers import (
export_trainer_and_predictor,
minibatch_size_multiplier,
parse_args,
update_model_for_warm_start,
)
from ml.rl.workflow.preprocess_handler import (
ContinuousPreprocessHandler,
PreprocessHandler,
)
from tensorboardX import SummaryWriter
logger = logging.getLogger(__name__)
class ContinuousWorkflow(BaseWorkflow):
def __init__(
self,
model_params: ContinuousActionModelParameters,
preprocess_handler: PreprocessHandler,
state_normalization: Dict[int, NormalizationParameters],
action_normalization: Dict[int, NormalizationParameters],
use_gpu: bool,
use_all_avail_gpus: bool,
):
logger.info("Running continuous workflow with params:")
logger.info(model_params)
model_params = model_params
min_action_range_tensor_serving, max_action_range_tensor_serving = construct_action_scale_tensor(
action_normalization, model_params.action_rescale_map
)
state_dim = get_num_output_features(state_normalization)
action_dim = get_num_output_features(action_normalization)
# Build Actor Network
actor_network = ActorNetModel(
layers=(
[state_dim] + model_params.actor_training.layers[1:-1] + [action_dim]
),
activations=model_params.actor_training.activations,
fl_init=model_params.shared_training.final_layer_init,
state_dim=state_dim,
action_dim=action_dim,
)
# Build Critic Network
critic_network = CriticNetModel(
# Ensure dims match input state and scalar output
layers=[state_dim] + model_params.critic_training.layers[1:-1] + [1],
activations=model_params.critic_training.activations,
fl_init=model_params.shared_training.final_layer_init,
state_dim=state_dim,
action_dim=action_dim,
)
trainer = DDPGTrainer(
actor_network,
critic_network,
model_params,
state_normalization,
action_normalization,
min_action_range_tensor_serving,
max_action_range_tensor_serving,
use_gpu=use_gpu,
use_all_avail_gpus=use_all_avail_gpus,
)
trainer = update_model_for_warm_start(trainer)
assert type(trainer) == DDPGTrainer, "Warm started wrong model type: " + str(
type(trainer)
)
evaluator = Evaluator(
None,
model_params.rl.gamma,
trainer,
metrics_to_score=trainer.metrics_to_score,
)
super().__init__(
preprocess_handler,
trainer,
evaluator,
model_params.shared_training.minibatch_size,
)
def _get_actor_exporter(trainer, state_normalization, action_normalization):
return ActorExporter.from_state_action_normalization(
trainer.actor,
state_normalization=state_normalization,
action_normalization=action_normalization,
)
def main(params):
# Set minibatch size based on # of devices being used to train
params["shared_training"]["minibatch_size"] *= minibatch_size_multiplier(
params["use_gpu"], params["use_all_avail_gpus"]
)
rl_parameters = RLParameters(**params["rl"])
training_parameters = DDPGTrainingParameters(**params["shared_training"])
actor_parameters = DDPGNetworkParameters(**params["actor_training"])
critic_parameters = DDPGNetworkParameters(**params["critic_training"])
model_params = DDPGModelParameters(
rl=rl_parameters,
shared_training=training_parameters,
actor_training=actor_parameters,
critic_training=critic_parameters,
)
state_normalization = BaseWorkflow.read_norm_file(params["state_norm_data_path"])
action_normalization = BaseWorkflow.read_norm_file(params["action_norm_data_path"])
writer = SummaryWriter(log_dir=params["model_output_path"])
logger.info("TensorBoard logging location is: {}".format(writer.log_dir))
preprocess_handler = ContinuousPreprocessHandler(
Preprocessor(state_normalization, False),
Preprocessor(action_normalization, False),
PandasSparseToDenseProcessor(),
)
workflow = ContinuousWorkflow(
model_params,
preprocess_handler,
state_normalization,
action_normalization,
params["use_gpu"],
params["use_all_avail_gpus"],
)
train_dataset = JSONDatasetReader(
params["training_data_path"], batch_size=training_parameters.minibatch_size
)
eval_dataset = JSONDatasetReader(params["eval_data_path"], batch_size=16)
with summary_writer_context(writer):
workflow.train_network(train_dataset, eval_dataset, int(params["epochs"]))
return export_trainer_and_predictor(
workflow.trainer,
params["model_output_path"],
exporter=_get_actor_exporter(
trainer=workflow.trainer,
state_normalization=state_normalization,
action_normalization=action_normalization,
),
) # noqa
if __name__ == "__main__":
logging.basicConfig(stream=sys.stdout, level=logging.INFO)
params = parse_args(sys.argv)
main(params)
|
import numpy as np
from gensim.models import KeyedVectors
from resources import EMBEDDING_PATH
from sklearn.metrics.pairwise import cosine_similarity
def averageEmbedding(model,sentence,lowercase=True):
if lowercase: sentence = sentence.lower()
vecs = []
for w in sentence.split(): # assume it's already tokenized
if w in model:
vecs.append( model[w] )
if vecs==[]: vecs.append(np.zeros((model.vector_size,)))
return np.mean(vecs,axis=0)
if __name__ == "__main__":
model = KeyedVectors.load_word2vec_format(EMBEDDING_PATH)
avg_list = []
for sentence in ['this is a nice apple .','this apple is quite good.']:
avg = averageEmbedding(model,sentence)
avg_list.append(avg.reshape(1,-1))
print(" ".join([str(x) for x in list(avg)]))
print('similarity: {}'.format(cosine_similarity(avg_list[0],avg_list[1])))
|
import hashlib
import json
def get_hash(block):
"""
Creates a SHA-256 hash of a Block
:param block: Block
"""
# We must make sure that the Dictionary is Ordered, or we'll have inconsistent hashes
block_string = json.dumps(block, sort_keys=True).encode()
return hashlib.sha256(block_string).hexdigest()
|
print(1/3) # Division returns float, even dividing two ints.
|
# encoding: UTF-8
"""
横盘突破策略
注意事项:作者不对交易盈利做任何保证,策略代码仅供参考
"""
from __future__ import division
import numpy as np
import talib
import time
from cyvn.trader.vtObject import VtBarData
from cyvn.trader.vtConstant import EMPTY_STRING
from cyvn.trader.app.ctaStrategy.ctaTemplate import (CtaTemplate,
BarGenerator,
ArrayManager)
########################################################################
class HorizBreakoutStrategy(CtaTemplate):
"""基于Adxr的交易策略"""
className = 'AdxrStrategy'
author = u'用Python的交易员'
# 策略参数
trailingPrcnt = 0.8 # 移动止损
initDays = 30 # 初始化数据所用的天数
fixedSize = 1 # 每次交易的数量
aPeriod = 11 # 窗口数
# 策略变量
intraTradeHigh = 0 # 持仓期内的最高点
intraTradeLow = 0 # 持仓期内的最低点
buy_high = 0
sell_low = 0
buy_price = 0
sell_price = 0
targetPos = 0
buyOrderIDList = [] # OCO委托买入开仓的委托号
shortOrderIDList = [] # OCO委托卖出开仓的委托号
orderList = [] # 保存委托代码的列表
# 参数列表,保存了参数的名称
paramList = ['name',
'className',
'author',
'vtSymbol',
'aPeriod']
# 变量列表,保存了变量的名称
varList = ['inited',
'trading',
'pos',
'targetPos',
'buy_high',
'sell_low',
'buy_price',
'sell_price'
]
# 同步列表,保存了需要保存到数据库的变量名称
syncList = ['pos',
'intraTradeHigh',
'intraTradeLow']
#----------------------------------------------------------------------
def __init__(self, ctaEngine, setting):
"""Constructor"""
super(HorizBreakoutStrategy, self).__init__(ctaEngine, setting)
self.bg = BarGenerator(self.onBar, 30, self.onMyBar) # 创建K线合成器对象
self.am = ArrayManager(size=50)
self.buyOrderIDList = []
self.shortOrderIDList = []
self.orderList = []
#----------------------------------------------------------------------
def onInit(self):
"""初始化策略(必须由用户继承实现)"""
self.writeCtaLog(u'%s策略初始化' %self.name)
# 载入历史数据,并采用回放计算的方式初始化策略数值
initData = self.loadBar(self.initDays)
for bar in initData:
self.onBar(bar)
self.putEvent()
#----------------------------------------------------------------------
def onStart(self):
"""启动策略(必须由用户继承实现)"""
self.writeCtaLog(u'%s策略启动' %self.name)
self.putEvent()
#----------------------------------------------------------------------
def onStop(self):
"""停止策略(必须由用户继承实现)"""
self.writeCtaLog(u'%s策略停止' %self.name)
self.putEvent()
#----------------------------------------------------------------------
def onTick(self, tick):
"""收到行情TICK推送(必须由用户继承实现)"""
self.bg.updateTick(tick)
self.putEvent()
#----------------------------------------------------------------------
def onBar(self, bar):
"""收到Bar推送(必须由用户继承实现)"""
self.bg.updateBar(bar)
# 撤销之前发出的尚未成交的委托(包括限价单和停止单)
for orderID in self.orderList:
self.cancelOrder(orderID)
self.orderList = []
if self.targetPos > 0:
if self.pos == 0:
orderID = self.buy(bar.close + 5, self.fixedSize)
self.orderList.extend(orderID)
if self.pos < 0:
orderID = self.cover(bar.close + 5, abs(self.pos))
self.orderList.extend(orderID)
time.sleep(3)
orderID = self.buy(bar.close + 5, self.fixedSize)
self.orderList.extend(orderID)
if self.targetPos < 0:
if self.pos == 0:
orderID = self.short(bar.close - 5, self.fixedSize)
self.orderList.extend(orderID)
if self.pos > 0:
orderID = self.sell(bar.close - 5, abs(self.pos))
self.orderList.extend(orderID)
time.sleep(3)
orderID = self.short(bar.close - 5, self.fixedSize)
self.orderList.extend(orderID)
if self.targetPos == 0:
if self.pos > 0:
orderID = self.sell(bar.close - 5, abs(self.pos))
self.orderList.extend(orderID)
if self.pos < 0:
orderID = self.cover(bar.close + 5, abs(self.pos))
self.orderList.extend(orderID)
#----------------------------------------------------------------------
def onMyBar(self, bar):
"""收到30分钟K线"""
# 保存K线数据
am = self.am
am.updateBar(bar)
if not am.inited:
return
# 计算指标数值
h_high = max(am.high[-self.aPeriod:-1])
l_low = min(am.low[-self.aPeriod:-1])
vibrate = h_high - l_low < 0.04*l_low
# 判断是否要进行交易
# 多头
if vibrate and am.openArray[-1] + am.highArray[-1] + am.lowArray[-1] + am.closeArray[-1] > 4*h_high :
self.buy_price = am.close[-1]
self.buy_high = am.high[-1]
self.targetPos = self.fixedSize
# 空头
if vibrate and am.openArray[-1] + am.highArray[-1] + am.lowArray[-1] + am.closeArray[-1] < 4*l_low:
self.sell_price = am.close[-1]
self.sell_low = am.low[-1]
self.targetPos = -self.fixedSize
if self.pos > 0 and am.high[-1] > self.buy_high:
self.buy_high = am.high[-1]
if self.pos < 0 and am.low[-1] < self.sell_low:
self.sell_low = am.low[-1]
# 平多头
if self.pos > 0 and ((2*am.close[-1] < self.buy_price + self.buy_high
and self.buy_high > self.buy_price + 40) or am.close[-1] < l_low):
# orderID = self.sell(bar.close - 5, abs(self.pos))
# self.orderList.extend(orderID)
self.targetPos = 0
#平空头
if self.pos < 0 and ((2*am.close[-1] > self.sell_price + self.sell_low
and self.sell_low < self.sell_price - 40) or am.close[-1] >h_high):
# orderID = self.cover(bar.close + 5, abs(self.pos))
# self.orderList.extend(orderID)
self.targetPos = 0
# 同步数据到数据库
self.saveSyncData()
# 发出状态更新事件
self.putEvent()
#----------------------------------------------------------------------
def onOrder(self, order):
"""收到委托变化推送(必须由用户继承实现)"""
pass
#----------------------------------------------------------------------
def onTrade(self, trade):
# if self.pos != 0:
# # 多头开仓成交后,撤消空头委托
# if self.pos > 0:
# for shortOrderID in self.shortOrderIDList:
# self.cancelOrder(shortOrderID)
# # 反之同样
# elif self.pos < 0:
# for buyOrderID in self.buyOrderIDList:
# self.cancelOrder(buyOrderID)
#
# # 移除委托号
# for orderID in (self.buyOrderIDList + self.shortOrderIDList):
# if orderID in self.orderList:
# self.orderList.remove(orderID)
# 发出状态更新事件
#self.putEvent()
pass
#----------------------------------------------------------------------
def onStopOrder(self, so):
"""停止单推送"""
pass |
import re
from typing import Optional, List
from controller.helpers import get_player_colour
from data import enums, params, consts
from data.enums import ResponseFlags as rF
from state.game import GameState
from state.context import context
from lib.amongUsParser.gameEngine import PlayerClass
UNKNOWN = "unknown"
class Interpreter:
def __init__(self, game_state: GameState, player: PlayerClass, message: str):
self.game_state = game_state
self.player = player
self.message = message
self._message_lower = re.sub(r'[^\w\s?]', '', self.message.strip().lower())
def interpret(self) -> Optional[str]:
if not self.game_state.game_started:
return None
me = self.game_state.me
if not me:
print('Player info not loaded - please leave and rejoin the lobby.')
return None
if self.game_state.meeting_reason is False or self.player.playerId == me.playerId:
return None
player_name = self.player.name.decode("utf-8")
player_colour: str = UNKNOWN
if self.player.color is not False:
player_colour = get_player_colour(self.player)
if me.alive and not self.player.alive:
print(player_name, f'({player_colour}): [DEAD CHAT HIDDEN]')
return None
players = {get_player_colour(p): p
for p in self.game_state.get_players(include_me=True)}
aliases = {
# Players
"dark green": "green",
"light green": "lime",
"dark blue": "blue",
"light blue": "cyan",
"purp": "purple",
"orang": "orange",
"yallow": "yellow",
# Locations
"cafeteria": "caf",
"coms": "comms",
"navigation": "nav",
"reac": "reactor",
"medbay": "med bay",
"elec": "electrical",
"elect": "electrical",
# Misspellings
"imposter": "impostor",
}
for k, v in aliases.items():
self._message_lower = re.sub(rf'\b{k}\b', v, self._message_lower)
# Check for locations
curr_map = self.game_state.map
started_by = self.game_state.get_player_colour_from_id(self.game_state.meeting_started_by.playerId) \
if self.game_state.meeting_started_by else None
if curr_map is not None and rF.BODY_NOT_LOCATED in self.game_state.get_response_flags() \
and started_by == player_colour:
for loc in params.location[enums.AUMap.COMMON] + params.location[curr_map]:
if self._find(rf'\b{loc}\b'):
context.player_loc['body'] = loc
context.response_flags_remove(rF.BODY_NOT_LOCATED)
print("Location identified:", loc)
# Check for names
target_colours = []
for colour in [p for p in players if p != player_colour]:
p = players[colour]
if p.name is False or p.color is False:
continue
name = p.name.decode("utf-8").strip()
if name.lower() in ["i", "he", "she", "ok", "impostor", "imposter"] \
or len(name) == 0: # pronoun and unhelpful names
name = None
if self._find(rf'\b{colour}\b') \
or (name is not None and self._find(rf'\b{name.lower()}\b')):
target_colours.append(colour)
if len(target_colours) == 0: # Determine target implicitly
if self._find(r"\b(self( report)?)\b"):
target_colours.append(started_by)
target_is_me = self.game_state.me_colour in target_colours
verb = offset = None
flags = []
if len(target_colours) > 0:
verb, offset = "mentioned", -0.5
for target_colour in target_colours:
if self._find(r"\b(sus|vent(ed)?|faked?|kill(ed)?|body|self report(ed)?|imp(ostor)?)\b") \
or self._find(rf"\b(vote|it'?s?) {target_colour}\b") \
or self._message_lower == target_colour:
verb, offset = "sussed", -1
break
elif self._find(r"\b(safe|good|clear(ed)?)\b") \
or self._find(rf"\b(not|with|me and) {target_colour}\b") \
or self._find(rf"{target_colour} (and i|((had|did|has|do) )?(trash|chute|scan(ned)?|med))\b"):
verb, offset = "vouched for", 1
break
if verb == "sussed" and target_is_me:
if self._find(r"\b(vent(ed)?)\b"):
flags.append(rF.ACCUSED_VENT)
if verb:
if self.player.alive and player_colour != UNKNOWN and len(context.trust_map) != 0:
for target_colour in target_colours:
context.trust_map_score_offset(player_colour, target_colour, offset)
print('>>', player_colour, verb, ', '.join(target_colours), '<<')
if consts.debug_chat and len(flags) > 0:
print('Adding flags:', flags)
for f in flags:
context.response_flags_append(f)
print(player_name, f'({player_colour}):', self.message)
return self.message
def _find(self, pattern: str) -> bool:
return len(re.findall(pattern, self._message_lower)) > 0
def _find_all(self, pattern: str) -> List[str]:
return re.findall(pattern, self._message_lower)
|
# -*- coding: utf-8 -*-
'''
:codeauthor: Pedro Algarvio ([email protected])
:copyright: Copyright 2017 by the SaltStack Team, see AUTHORS for more details.
tests.unit.beacons.test_status
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Status beacon test cases
'''
# Python libs
from __future__ import absolute_import
import sys
# Salt libs
import salt.config
import salt.loader
from salt.beacons import status
import salt.modules.status as status_module
# Salt testing libs
from tests.support.unit import TestCase
from tests.support.mixins import LoaderModuleMockMixin
class StatusBeaconTestCase(TestCase, LoaderModuleMockMixin):
'''
Test case for salt.beacons.status
'''
def setup_loader_modules(self):
opts = salt.config.DEFAULT_MINION_OPTS
module_globals = {
'__opts__': opts,
'__salt__': 'autoload',
'__context__': {},
'__grains__': {'kernel': 'Linux'}
}
return {
status: module_globals,
status_module: module_globals
}
def test_empty_config(self, *args, **kwargs):
config = []
ret = status.validate(config)
self.assertEqual(ret, (True, 'Valid beacon configuration'))
ret = status.beacon(config)
if sys.platform.startswith('win'):
expected = []
else:
expected = sorted(['loadavg', 'meminfo', 'cpustats', 'vmstats', 'time'])
self.assertEqual(sorted(list(ret[0]['data'])), expected)
def test_deprecated_dict_config(self):
config = {'time': ['all']}
ret = status.validate(config)
self.assertEqual(ret, (False, 'Configuration for status beacon must be a list.'))
def test_list_config(self):
config = [{'time': ['all']}]
ret = status.validate(config)
self.assertEqual(ret, (True, 'Valid beacon configuration'))
ret = status.beacon(config)
if sys.platform.startswith('win'):
expected = []
else:
expected = ['time']
self.assertEqual(list(ret[0]['data']), expected)
|
from .attention import Attention
from .ffnn import Ffnn
from .mu_sigma_ffnn import MuSigmaFfnn
from .decoders.pointer_gen_network import PointerGenNetwork
from .out_embds import OutEmbds
|
import os
import gym
from stable_baselines3 import PPO
from stable_baselines3.common.vec_env import DummyVecEnv
from stable_baselines3.common.callbacks import EvalCallback, StopTrainingOnRewardThreshold
from stable_baselines3.common.evaluation import evaluate_policy
# VARIABLES
environment_name = 'CartPole-v0'
log_path = os.path.join('Training', 'Logs')
PPO_PATH = os.path.join('Training', 'Saved Models', 'PPO_MODEL_CartPole')
env = gym.make(environment_name)
env = DummyVecEnv([lambda: env])
# USING GPU, importing algorithm PPO
model = PPO('MlpPolicy', env, verbose=1)
# Delete the model
# del model
# Load it from the folder
model = PPO.load(PPO_PATH, env=env)
# Callbacks - Stop training once it reaches 200 average, verbose is for logging
save_path = os.path.join('Training', 'Saved Models')
stop_callback = StopTrainingOnRewardThreshold(reward_threshold=200, verbose=1)
# Every 1000 episodes it checks if the average is 200 or more, then stops the training,
# then save the best model to the save path
eval_callback = EvalCallback(env,
callback_on_new_best=stop_callback,
eval_freq=10000,
best_model_save_path=save_path,
verbose=1)
# With callbacks
# model = PPO('MlpPolicy', env, verbose=1, tensorboard_log=log_path)
# Changing Policies
net_arch = [dict(pi=[128, 128, 128, 128], vf=[128, 128, 128, 128])]
# model = PPO('MlpPolicy', env, verbose=1, policy_kwargs={'net_arch': net_arch})
# model.learn(total_timesteps=200, callback=eval_callback)
# Training
# model.learn(total_timesteps=50000)
# Training with Callbacks
# model.learn(total_timesteps=20000, callback=eval_callback)
# Evaluate our policy
evaluate_policy(model, env, n_eval_episodes=100, render=True)
# TEST
# episodes = 5
# for episode in range(1, episodes+1):
# obs = env.reset()
# done = False
# score = 0
#
# while not done:
# env.render()
# action, _ = model.predict(obs) # Using Model Here
# obs, reward, done, info = env.step(action)
# score += reward
# print('Episode:{} Score:{}'.format(episode, score))
# env.close()
# Save the model into a folder
# model.save(PPO_PATH)
|
# -*- coding: utf-8 -*-
"""
Created on Sat Jun 29 19:34:04 2019
@author: Administrator
"""
class Solution:
def generatePossibleNextMoves(self, s: str) -> list:
ans = []
tmp = list(s)
for k in range(len(tmp)-1):
if tmp[k] == '+' and tmp[k+1] == '+':
tmp[k] = '-'
tmp[k+1] = '-'
ans.append(''.join(tmp))
tmp = list(s)
return ans
#class Solution(object):
# def generatePossibleNextMoves(self, s):
# """
# :type s: str
# :rtype: List[str]
# """
# res = []
# for i in range(len(s) - 1):
# if s[i:i+2] == "++":
# res.append(s[:i] + "--" + s[i+2:])
# return res
solu = Solution()
s = "++++"
print(solu.generatePossibleNextMoves(s)) |
import numpy as np
import cv2
def predict_puzzle(model, img_path):
image = cv2.imread(img_path, cv2.IMREAD_GRAYSCALE)
crop_size = int((image.shape[0]) / 9)
crops = []
for col in range(9):
for row in range(9):
crop = image[int(col * crop_size): int(col * crop_size + crop_size),
int(row * crop_size): int(row * crop_size + crop_size)]
crops.append(crop)
predictions = []
for crop in crops:
crop_input = np.array(crop) / 255
crop_input.reshape(crop_size, crop_size, 1)
prediction = model.predict(crop_input)
predictions.append(prediction)
print(predictions)
|
from pysyne.draw import Path
from pysyne.visualizer import vis_for
from pysyne.processor import Processor, WINDOW_TYPES, window_type
from pysyne.main import main
|
# "print" test
from wrap import *
def printTest(objet):
for i in range(1,30): print("-", end=' ')
print("\nPrint test of", objet)
for i in range(1,30): print("-", end=' ')
print('')
exec('print '+objet)
#import apps.qs.cont2;
#domain = apps.qs.cont2.getDomain()
import apps.ale.qsUzi;
domain = apps.ale.qsUzi.getDomain(0)
domain.build()
print('\n------------------------------------------\n')
a=Matr2(1,2,3,4)
print(a)
a=Matr3(1,2,3,4)
print(a)
win=VizWin();
print(win);
def fct(x,y,z):
print('plouf')
print("x=", x)
print("y=", y)
print("z=", z)
return x*x+y*y+z*z
pfct = PythonMultiParameterFunction(fct,3)
print(pfct)
keys = KeyList(Key(TX),Key(TY),Key(TZ),Key(TM))
print(keys)
print('\n------------------------------------------\n')
#printTest('domain.findObject(ELEMENTSET_ID)')
#printTest('domain.getGeometry().getPointSet()')
#printTest('domain.findObject(TOPOLOGY_ID).getPointSet()')
#printTest('domain.findObject(NODESET_ID)')
printTest('domain.getGeometry().getCurveSet()')
printTest('domain.getGeometry().getCurveSet()(1)')
printTest('domain.getInteractionSet()')
printTest('domain.getInteractionSet()(99)')
printTest('domain.getLoadingSet()')
printTest('domain.getMaterialSet()')
printTest('domain.getMaterialSet()[1]')
printTest('domain.getFixationSet()')
printTest('domain.getDofSet()')
printTest('domain')
printTest('domain.getMetafor().get(0)')
#printTest('domain.getPartition()')
#printTest('domain.getConnexion12()')
#printTest('domain.getMetafor().get(0).findDBSet(TX|RE)')
#printTest('domain.findDBSet(TX|AB)')
printTest('domain.getMetafor()')
printTest('domain.getMetafor().getAleMethod()')
printTest('domain.getMetafor().getAleMethod().getReZoningStep()')
|
from abc import ABC, abstractmethod
from osp.core.session.registry import Registry
from osp.core.session.result import returns_query_result
class Session(ABC):
"""
Abstract Base Class for all Sessions.
Defines the common standard API and sets the registry.
"""
def __init__(self):
self._registry = Registry()
self.root = None
def __enter__(self):
return self
def __exit__(self, *args):
self.close()
def close(self):
pass
@abstractmethod
def __str__(self):
pass
def _store(self, cuds_object):
"""Store a copy of given cuds_object in the session.
Return the stored object.
:param cuds_object: The cuds_object to store.
:type cuds_object: Cuds
:return: The stored cuds_object.
:rtype: Cuds
"""
assert cuds_object.session == self
self._registry.put(cuds_object)
if self.root is None:
self.root = cuds_object.uid
@returns_query_result
def load(self, *uids):
"""Load the cuds_objects of the given uids.
:param uids: The uids of the cuds_objects to load.
:type uids: UUID
:return: The fetched Cuds objects.
:rtype: Iterator[Cuds]
"""
for uid in uids:
try:
yield self._registry.get(uid)
except KeyError:
yield None
def prune(self, rel=None):
"""Remove all elements not reachable from the sessions root.
Only consider given relationship and its subclasses.
:param rel: Only consider this relationship to calculate reachability.
:type rel: Relationship
"""
deleted = self._registry.prune(self.root, rel=rel)
for d in deleted:
self._notify_delete(d)
def delete_cuds_object(self, cuds_object):
"""Remove a CUDS object. Will (for now) not delete the cuds objects
contained.
Args:
cuds_object (Cuds): The CUDS object to be deleted
"""
from osp.core.namespaces import cuba
if cuds_object.session != self:
cuds_object = next(self.load(cuds_object.uid))
if cuds_object.get(rel=cuba.relationship):
cuds_object.remove(rel=cuba.relationship)
del self._registry[cuds_object.uid]
self._notify_delete(cuds_object)
@abstractmethod
def _notify_delete(self, cuds_object):
"""This method is called if some object from the registry is deleted
by the prune() method.
:param cuds_object: The cuds_object that has been deleted
:type cuds_object: Cuds
"""
pass
@abstractmethod
def _notify_update(self, cuds_object):
"""This method is called if some object has been updated-
:param cuds_object: The cuds_object that has been updated.
:type cuds_object: Cuds
"""
pass
def sync(self):
pass
@abstractmethod
def _notify_read(self, cuds_object):
"""This method is called when the user accesses the attributes or the
relationships of the cuds_object cuds_object.
:param cuds_object: The cuds_object that has been accessed.
:type cuds_object: Cuds
"""
pass
|
import sys
str = sys.stdin.readline()
print str
|
from string import ascii_lowercase, digits
ships = [("Carrier", 5),
("Battleship", 4),
("Cruiser", 3),
("Submarine",2),
("Destroyer", 2)]
def parse_ship_location(loc):
loc = loc.lower().strip().replace(" ", "")
row = ascii_lowercase.index(loc[0])
col = digits.index(loc[1])
if loc[-1] == 'a':
dx, dy = 1, 0
elif loc[-1] == 'd':
dx, dy = 0, 1
return col, row, dx, dy
def parse_bomb_location(loc):
loc = loc.lower().strip().replace(" ", "")
row = ascii_lowercase.index(loc[0])
col = digits.index(loc[1])
return col, row
class Board:
# potshots return one of these
MISS = 0
NEAR = 1
HIT = 2
# the board representation uses one of these
SHIP = "S"
GUESS = "."
NEAR_GUESS = "!"
SUNK = "X"
def __init__(self, width=10, height=10):
assert 0 < width <= 10
assert 0 < height <= 10
self.mx = width
self.my = height
# Coordinate pair -> ship, miss, near-miss.
self.sea = {}
def add_ship(self, x, y, dx, dy, size):
"""Add a ship fo a given length
If it passes off the board, or abuts an already places ship, raise a ValueError
"""
cells = {(x + i * dx, y + i * dy) for i in range(size)}
if any(cx < 0 or cx >= self.mx or cy < 0 or cy >= self.my
for (cx, cy) in cells):
raise ValueError("That ship does not lie on the board")
neighbours = {(cx + dx, cy + dy)
for dx in (-1, 0, 1)
for dy in (-1, 0, 1)
for (cx, cy) in cells}
if any((nx, ny) in self.sea
for (nx, ny) in neighbours):
raise ValueError("That ship abuts another")
for (cx, cy) in cells:
self.add_counter(cx, cy)
def add_counter(self, x, y):
self.sea[x, y] = Board.SHIP
def potshot(self, x, y):
"""Return MISS, NEAR or HIT"""
if x < 0 or self.mx <= x or y < 0 or self.my <= y:
raise ValueError("Off-grid shot")
if self.sea.get((x, y)) == Board.SHIP:
self.sea[x, y] = Board.SUNK
return Board.HIT
elif any(self.sea.get((x + dx, y + dy)) in (Board.SHIP, Board.SUNK)
for dx in (-1, 0, 1)
for dy in (-1, 0, 1)):
self.sea[x, y] = Board.NEAR_GUESS
return Board.NEAR
else:
self.sea[x, y] = Board.GUESS
return Board.MISS
def defeated(self):
return not any(cell == Board.SHIP
for _, cell in self.sea.items())
def display(self):
print('\n'.join(self.lines()))
def lines(self):
d = [" " + digits[:self.mx]]
for y in range(self.my):
line = ""
for x in range(self.mx):
line += self.sea.get((x, y), '~')
d.append(ascii_lowercase[y] + ' ' + line)
return d
def other_lines(self):
d = [" " + digits[:self.mx]]
for y in range(self.my):
line = ""
for x in range(self.mx):
line += self.sea.get((x, y), '~').replace(Board.SHIP, '~')
d.append(ascii_lowercase[y] + ' ' + line)
return d
|
# Copyright (c) Microsoft. All rights reserved.
# Licensed under the MIT license. See LICENSE.md file in the project root
# for full license information.
# ==============================================================================
from __future__ import division
import numpy as np
from ..learners import *
from .. import parameter, input_variable
import pytest
LR_SCHEDULE_PARAMS = [
((0.2, UnitType.sample), [0.2]),
((0.2, UnitType.sample), [0.2, 0.2, 0.2, 0.2]),
(([0.2,0.4], UnitType.sample, 5), [0.2]*5+[0.4]*20),
(([(3,0.2),(2,0.4),(1,0.8)], UnitType.sample, 5), [0.2]*15+[0.4]*10+[0.8]*20),
]
MOMENTUM_SCHEDULE_PARAMS = [
((0.2,), [0.2]),
((0.2,), [0.2, 0.2, 0.2, 0.2]),
(([0.2,0.4], 5), [0.2]*5+[0.4]*20),
(([(3,0.2),(2,0.4),(1,0.8)], 5), [0.2]*15+[0.4]*10+[0.8]*20),
]
@pytest.mark.parametrize("params, expectation", LR_SCHEDULE_PARAMS)
def test_learning_rate_schedule(params, expectation):
l = learning_rate_schedule(*params)
assert [l[i] for i in range(len(expectation))] == expectation
def sweep_based_schedule_fails():
with pytest.raises(Exception):
learning_rate_schedule([1], unit=UnitType.sample, epoch_size=0)
def test_momentum_schedule():
m = 2500
ms = momentum_as_time_constant_schedule([m])
assert ms[0] == np.exp(-1.0 / np.asarray(m))
ms = momentum_as_time_constant_schedule(m)
assert ms[0] == np.exp(-1.0 / np.asarray(m))
mlist = [980, 520]
msl = momentum_as_time_constant_schedule(mlist)
expected = np.exp(-1.0 / np.asarray(mlist))
assert all(mi == ei for mi,ei in zip(msl,expected))
@pytest.mark.parametrize("params, expectation", MOMENTUM_SCHEDULE_PARAMS)
def test_momentum_schedule_per_sample(params, expectation):
l = momentum_schedule(*params)
assert [l[i] for i in range(len(expectation))] == expectation
def test_learner_init():
i = input_variable(shape=(1,),
needs_gradient=True,
name='a')
w = parameter(shape=(1,))
res = i * w
learner = sgd(res.parameters, lr=learning_rate_schedule(0.1, UnitType.sample))
assert learner.learning_rate() == 0.1
learner.reset_learning_rate(learning_rate_schedule([1,2,3], UnitType.minibatch));
assert learner.learning_rate() == 1.0
learner_parameter = learner.parameters
from ..ops.variables import Parameter
param = learner_parameter[0]
assert isinstance(param, Parameter)
unit_gain_value = default_unit_gain_value()
assert unit_gain_value
momentum_time_constant = momentum_as_time_constant_schedule(1100)
lr_per_sample = learning_rate_schedule(0.1, UnitType.sample)
momentum_sgd(res.parameters, lr_per_sample, momentum_time_constant)
momentum_sgd(res.parameters, lr_per_sample, momentum_time_constant, unit_gain_value)
momentum_sgd(res.parameters, lr_per_sample, momentum_time_constant, unit_gain=unit_gain_value)
set_default_unit_gain_value(False)
unit_gain_value = default_unit_gain_value()
assert not unit_gain_value
lr_per_sample = learning_rate_schedule([0.1, 0.2], UnitType.sample)
nesterov(res.parameters, lr=lr_per_sample, momentum=momentum_time_constant)
nesterov(res.parameters, lr_per_sample, momentum_time_constant, unit_gain_value)
nesterov(res.parameters, lr=lr_per_sample, momentum=momentum_time_constant, unit_gain=unit_gain_value)
lr_per_sample = learning_rate_schedule([0.1]*3 +[0.2]*2 +[0.3], UnitType.sample)
adagrad(res.parameters, lr=lr_per_sample, need_ave_multiplier=True)
set_default_unit_gain_value(True)
unit_gain_value = default_unit_gain_value()
assert unit_gain_value
lr_per_sample = learning_rate_schedule([(3,0.1), (2, 0.2), (1, 0.3)], UnitType.sample)
adam_sgd(res.parameters, lr=lr_per_sample, momentum=momentum_time_constant)
adam_sgd(res.parameters, lr_per_sample, momentum_time_constant, unit_gain_value)
adam_sgd(res.parameters, lr=lr_per_sample, momentum=momentum_time_constant, unit_gain=unit_gain_value)
gamma, inc, dec, max, min = [0.1]*5
lr_per_sample = learning_rate_schedule([0.1, 0.2], UnitType.sample, 100)
rmsprop(res.parameters, lr_per_sample, gamma, inc, dec, max, min, True)
def test_learner_update():
i = input_variable(shape=(1,),
needs_gradient=True,
name='a')
w_init = 1
w = parameter(shape=(1,), init=w_init)
res = i * w
learner = sgd(res.parameters, lr=learning_rate_schedule([0.1]*50 + [0.2]*50, UnitType.sample, 1))
assert learner.learning_rate() == 0.1
x = learner.update({w: np.asarray([[2.]], dtype=np.float32)}, 100)
assert learner.learning_rate() == 0.2
assert w.value < w_init
def test_training_parameter_schedule():
training_parameter_schedule(0.01, unit='minibatch')
training_parameter_schedule(0.01, unit='sample')
with pytest.raises(ValueError):
training_parameter_schedule(0.01, unit='not_supported')
with pytest.raises(ValueError):
training_parameter_schedule(0.01, unit=5)
def test_sweep_based_schedule(tmpdir, device_id):
from cntk.io import MinibatchSource, CTFDeserializer, StreamDef, StreamDefs
from .. import cross_entropy_with_softmax, classification_error, plus, reduce_sum
from ..train.trainer import Trainer
input_dim = 69
ctf_data = '''\
0 |S0 3:1 |S1 3:1 |# <s>
0 |S0 4:1 |# A |S1 32:1 |# ~AH
0 |S0 5:1 |# B |S1 36:1 |# ~B
0 |S0 4:1 |# A |S1 31:1 |# ~AE
0 |S0 7:1 |# D |S1 38:1 |# ~D
0 |S0 12:1 |# I |S1 47:1 |# ~IY
0 |S0 1:1 |# </s> |S1 1:1 |# </s>
2 |S0 60:1 |# <s> |S1 3:1 |# <s>
2 |S0 61:1 |# A |S1 32:1 |# ~AH
'''
ctf_file = str(tmpdir/'2seqtest.txt')
with open(ctf_file, 'w') as f:
f.write(ctf_data)
mbs = MinibatchSource(CTFDeserializer(ctf_file, StreamDefs(
features = StreamDef(field='S0', shape=input_dim, is_sparse=True),
labels = StreamDef(field='S1', shape=input_dim, is_sparse=True)
)), randomize=False)
in1 = input_variable(shape=(input_dim,))
labels = input_variable(shape=(input_dim,))
p = parameter(shape=(input_dim,), init=10)
z = plus(in1, reduce_sum(p), name='z')
ce = cross_entropy_with_softmax(z, labels)
errs = classification_error(z, labels)
lr_per_sample = learning_rate_schedule([0.3, 0.2, 0.1, 0.0], UnitType.sample)
learner = sgd(z.parameters, lr_per_sample)
trainer = Trainer(z, (ce, errs), [learner])
input_map = {
in1 : mbs.streams.features,
labels : mbs.streams.labels
}
# fetch minibatch (first sequence)
data = mbs.next_minibatch(1, input_map=input_map)
trainer.train_minibatch(data)
assert learner.learning_rate() == 0.3
# fetch minibatch (second sequence, sweep ends at this point)
data = mbs.next_minibatch(1, input_map=input_map)
trainer.train_minibatch(data)
assert learner.learning_rate() == 0.2
# fetch minibatch (both sequences -- entire sweep in one go)
data = mbs.next_minibatch(9, input_map=input_map)
trainer.train_minibatch(data)
assert learner.learning_rate() == 0.1
# fetch minibatch (multiple sweeps)
data = mbs.next_minibatch(30, input_map=input_map)
trainer.train_minibatch(data, outputs=[z.output])
assert learner.learning_rate() == 0.0
|
# coding=utf-8
# Copyright 2021 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Feature extractor class for LayoutLMv2.
"""
from typing import List, Optional, Union
import numpy as np
from PIL import Image
import requests
import json
import re
from ...feature_extraction_utils import BatchFeature, FeatureExtractionMixin
from ...file_utils import TensorType, is_pytesseract_available, requires_backends
from ...image_utils import ImageFeatureExtractionMixin, is_torch_tensor
from ...utils import logging
# soft dependency
if is_pytesseract_available():
import pytesseract
logger = logging.get_logger(__name__)
ImageInput = Union[
Image.Image, np.ndarray, "torch.Tensor", List[Image.Image], List[np.ndarray], List["torch.Tensor"] # noqa
]
def normalize_box(box, width, height):
return [
int(1000 * (box[0] / width)),
int(1000 * (box[1] / height)),
int(1000 * (box[2] / width)),
int(1000 * (box[3] / height)),
]
def apply_tesseract(image: Image.Image):
"""Applies Tesseract OCR on a document image, and returns recognized words + normalized bounding boxes."""
# apply OCR
data = pytesseract.image_to_data(image, output_type="dict")
words, left, top, width, height = data["text"], data["left"], data["top"], data["width"], data["height"]
# filter empty words and corresponding coordinates
irrelevant_indices = [idx for idx, word in enumerate(words) if not word.strip()]
words = [word for idx, word in enumerate(words) if idx not in irrelevant_indices]
left = [coord for idx, coord in enumerate(left) if idx not in irrelevant_indices]
top = [coord for idx, coord in enumerate(top) if idx not in irrelevant_indices]
width = [coord for idx, coord in enumerate(width) if idx not in irrelevant_indices]
height = [coord for idx, coord in enumerate(height) if idx not in irrelevant_indices]
# turn coordinates into (left, top, left+width, top+height) format
actual_boxes = []
for x, y, w, h in zip(left, top, width, height):
actual_box = [x, y, x + w, y + h]
actual_boxes.append(actual_box)
image_width, image_height = image.size
# finally, normalize the bounding boxes
normalized_boxes = []
for box in actual_boxes:
normalized_boxes.append(normalize_box(box, image_width, image_height))
assert len(words) == len(normalized_boxes), "Not as many words as there are bounding boxes"
return words, normalized_boxes
def get_ocr(img_name, image_width, image_height):
image_s3 = 's3://modeldocuments/data/images'
## ocr invoke
data = {
"columns": [
"path",
"psm",
"lang",
"ril",
"box"
],
"data": [
[
f"{image_s3}/{img_name}",
6,
"eng",
3,
[
]
]
]
}
headers = {'Content-type': 'application/json', 'Accept': 'text/plain'}
r = requests.post('https://dev-ocr.ice-ai.tech/invocations', data=json.dumps(data), headers=headers)
response = r.json()
words = response[0]['text']
actual_boxes = response[0]['annotion']
words = [re.sub('[^A-Za-z0-9.-:+/]+', '', word) for word in words]
# finally, normalize the bounding boxes
normalized_boxes = []
for box in actual_boxes:
normalized_boxes.append(normalize_box(box, image_width, image_height))
assert len(words) == len(normalized_boxes), "Not as many words as there are bounding boxes"
return words, normalized_boxes
class LayoutLMv2FeatureExtractor(FeatureExtractionMixin, ImageFeatureExtractionMixin):
r"""
Constructs a LayoutLMv2 feature extractor. This can be used to resize document images to the same size, as well as
to apply OCR on them in order to get a list of words and normalized bounding boxes.
This feature extractor inherits from :class:`~transformers.feature_extraction_utils.PreTrainedFeatureExtractor`
which contains most of the main methods. Users should refer to this superclass for more information regarding those
methods.
Args:
do_resize (:obj:`bool`, `optional`, defaults to :obj:`True`):
Whether to resize the input to a certain :obj:`size`.
size (:obj:`int` or :obj:`Tuple(int)`, `optional`, defaults to 224):
Resize the input to the given size. If a tuple is provided, it should be (width, height). If only an
integer is provided, then the input will be resized to (size, size). Only has an effect if :obj:`do_resize`
is set to :obj:`True`.
resample (:obj:`int`, `optional`, defaults to :obj:`PIL.Image.BILINEAR`):
An optional resampling filter. This can be one of :obj:`PIL.Image.NEAREST`, :obj:`PIL.Image.BOX`,
:obj:`PIL.Image.BILINEAR`, :obj:`PIL.Image.HAMMING`, :obj:`PIL.Image.BICUBIC` or :obj:`PIL.Image.LANCZOS`.
Only has an effect if :obj:`do_resize` is set to :obj:`True`.
apply_ocr (:obj:`bool`, `optional`, defaults to :obj:`True`):
Whether to apply the Tesseract OCR engine to get words + normalized bounding boxes.
.. note::
LayoutLMv2FeatureExtractor uses Google's Tesseract OCR engine under the hood.
"""
model_input_names = ["pixel_values"]
def __init__(self, do_resize=True, size=224, resample=Image.BILINEAR, apply_ocr=True, **kwargs):
super().__init__(**kwargs)
self.do_resize = do_resize
self.size = size
self.resample = resample
self.apply_ocr = apply_ocr
if apply_ocr:
requires_backends(self, "pytesseract")
def __call__(
self, images: ImageInput, return_tensors: Optional[Union[str, TensorType]] = None, **kwargs
) -> BatchFeature:
"""
Main method to prepare for the model one or several image(s).
Args:
images (:obj:`PIL.Image.Image`, :obj:`np.ndarray`, :obj:`torch.Tensor`, :obj:`List[PIL.Image.Image]`, :obj:`List[np.ndarray]`, :obj:`List[torch.Tensor]`):
The image or batch of images to be prepared. Each image can be a PIL image, NumPy array or PyTorch
tensor. In case of a NumPy array/PyTorch tensor, each image should be of shape (C, H, W), where C is a
number of channels, H and W are image height and width.
return_tensors (:obj:`str` or :class:`~transformers.file_utils.TensorType`, `optional`, defaults to :obj:`'np'`):
If set, will return tensors of a particular framework. Acceptable values are:
* :obj:`'tf'`: Return TensorFlow :obj:`tf.constant` objects.
* :obj:`'pt'`: Return PyTorch :obj:`torch.Tensor` objects.
* :obj:`'np'`: Return NumPy :obj:`np.ndarray` objects.
* :obj:`'jax'`: Return JAX :obj:`jnp.ndarray` objects.
Returns:
:class:`~transformers.BatchFeature`: A :class:`~transformers.BatchFeature` with the following fields:
- **pixel_values** -- Pixel values to be fed to a model, of shape (batch_size, num_channels, height,
width).
- **words** -- Optional words as identified by Tesseract OCR (only when
:class:`~transformers.LayoutLMv2FeatureExtractor` was initialized with :obj:`apply_ocr` set to ``True``).
- **boxes** -- Optional bounding boxes as identified by Tesseract OCR, normalized based on the image size
(only when :class:`~transformers.LayoutLMv2FeatureExtractor` was initialized with :obj:`apply_ocr` set to
``True``).
Examples::
>>> from transformers import LayoutLMv2FeatureExtractor
>>> from PIL import Image
>>> image = Image.open("name_of_your_document - can be a png file, pdf, etc.").convert("RGB")
>>> # option 1: with apply_ocr=True (default)
>>> feature_extractor = LayoutLMv2FeatureExtractor()
>>> encoding = feature_extractor(image, return_tensors="pt")
>>> print(encoding.keys())
>>> # dict_keys(['pixel_values', 'words', 'boxes'])
>>> # option 2: with apply_ocr=False
>>> feature_extractor = LayoutLMv2FeatureExtractor(apply_ocr=False)
>>> encoding = feature_extractor(image, return_tensors="pt")
>>> print(encoding.keys())
>>> # dict_keys(['pixel_values'])
"""
# Input type checking for clearer error
valid_images = False
# Check that images has a valid type
if isinstance(images, (Image.Image, np.ndarray)) or is_torch_tensor(images):
valid_images = True
elif isinstance(images, (list, tuple)):
if len(images) == 0 or isinstance(images[0], (Image.Image, np.ndarray)) or is_torch_tensor(images[0]):
valid_images = True
if not valid_images:
raise ValueError(
"Images must of type `PIL.Image.Image`, `np.ndarray` or `torch.Tensor` (single example),"
"`List[PIL.Image.Image]`, `List[np.ndarray]` or `List[torch.Tensor]` (batch of examples), "
f"but is of type {type(images)}."
)
is_batched = bool(
isinstance(images, (list, tuple))
and (isinstance(images[0], (Image.Image, np.ndarray)) or is_torch_tensor(images[0]))
)
if not is_batched:
images = [images]
# Tesseract OCR to get words + normalized bounding boxes
if self.apply_ocr:
words_batch = []
boxes_batch = []
for image in images:
words, boxes = apply_tesseract(self.to_pil_image(image))
words_batch.append(words)
boxes_batch.append(boxes)
# transformations (resizing)
if self.do_resize and self.size is not None:
images = [self.resize(image=image, size=self.size, resample=self.resample) for image in images]
images = [self.to_numpy_array(image, rescale=False) for image in images]
# flip color channels from RGB to BGR (as Detectron2 requires this)
images = [image[::-1, :, :] for image in images]
# return as BatchFeature
data = {"pixel_values": images}
encoded_inputs = BatchFeature(data=data, tensor_type=return_tensors)
if self.apply_ocr:
encoded_inputs["words"] = words_batch
encoded_inputs["boxes"] = boxes_batch
return encoded_inputs
class LayoutLMv2FeatureExtractorV2(FeatureExtractionMixin, ImageFeatureExtractionMixin):
r"""
Constructs a LayoutLMv2 feature extractor. This can be used to resize document images to the same size, as well as
to apply OCR on them in order to get a list of words and normalized bounding boxes.
This feature extractor inherits from :class:`~transformers.feature_extraction_utils.PreTrainedFeatureExtractor`
which contains most of the main methods. Users should refer to this superclass for more information regarding those
methods.
Args:
do_resize (:obj:`bool`, `optional`, defaults to :obj:`True`):
Whether to resize the input to a certain :obj:`size`.
size (:obj:`int` or :obj:`Tuple(int)`, `optional`, defaults to 224):
Resize the input to the given size. If a tuple is provided, it should be (width, height). If only an
integer is provided, then the input will be resized to (size, size). Only has an effect if :obj:`do_resize`
is set to :obj:`True`.
resample (:obj:`int`, `optional`, defaults to :obj:`PIL.Image.BILINEAR`):
An optional resampling filter. This can be one of :obj:`PIL.Image.NEAREST`, :obj:`PIL.Image.BOX`,
:obj:`PIL.Image.BILINEAR`, :obj:`PIL.Image.HAMMING`, :obj:`PIL.Image.BICUBIC` or :obj:`PIL.Image.LANCZOS`.
Only has an effect if :obj:`do_resize` is set to :obj:`True`.
apply_ocr (:obj:`bool`, `optional`, defaults to :obj:`True`):
Whether to apply the Tesseract OCR engine to get words + normalized bounding boxes.
.. note::
LayoutLMv2FeatureExtractor uses Google's Tesseract OCR engine under the hood.
"""
model_input_names = ["pixel_values"]
def __init__(self, do_resize=True, size=224, resample=Image.BILINEAR, apply_ocr=True, **kwargs):
super().__init__(**kwargs)
self.do_resize = do_resize
self.size = size
self.resample = resample
self.apply_ocr = apply_ocr
def __call__(
self, images: ImageInput, img_name: str, return_tensors: Optional[Union[str, TensorType]] = None, **kwargs
) -> BatchFeature:
"""
Main method to prepare for the model one or several image(s).
Args:
images (:obj:`PIL.Image.Image`, :obj:`np.ndarray`, :obj:`torch.Tensor`, :obj:`List[PIL.Image.Image]`, :obj:`List[np.ndarray]`, :obj:`List[torch.Tensor]`):
The image or batch of images to be prepared. Each image can be a PIL image, NumPy array or PyTorch
tensor. In case of a NumPy array/PyTorch tensor, each image should be of shape (C, H, W), where C is a
number of channels, H and W are image height and width.
return_tensors (:obj:`str` or :class:`~transformers.file_utils.TensorType`, `optional`, defaults to :obj:`'np'`):
If set, will return tensors of a particular framework. Acceptable values are:
* :obj:`'tf'`: Return TensorFlow :obj:`tf.constant` objects.
* :obj:`'pt'`: Return PyTorch :obj:`torch.Tensor` objects.
* :obj:`'np'`: Return NumPy :obj:`np.ndarray` objects.
* :obj:`'jax'`: Return JAX :obj:`jnp.ndarray` objects.
Returns:
:class:`~transformers.BatchFeature`: A :class:`~transformers.BatchFeature` with the following fields:
- **pixel_values** -- Pixel values to be fed to a model, of shape (batch_size, num_channels, height,
width).
- **words** -- Optional words as identified by Tesseract OCR (only when
:class:`~transformers.LayoutLMv2FeatureExtractor` was initialized with :obj:`apply_ocr` set to ``True``).
- **boxes** -- Optional bounding boxes as identified by Tesseract OCR, normalized based on the image size
(only when :class:`~transformers.LayoutLMv2FeatureExtractor` was initialized with :obj:`apply_ocr` set to
``True``).
Examples::
>>> from transformers import LayoutLMv2FeatureExtractor
>>> from PIL import Image
>>> image = Image.open("name_of_your_document - can be a png file, pdf, etc.").convert("RGB")
>>> # option 1: with apply_ocr=True (default)
>>> feature_extractor = LayoutLMv2FeatureExtractor()
>>> encoding = feature_extractor(image, return_tensors="pt")
>>> print(encoding.keys())
>>> # dict_keys(['pixel_values', 'words', 'boxes'])
>>> # option 2: with apply_ocr=False
>>> feature_extractor = LayoutLMv2FeatureExtractor(apply_ocr=False)
>>> encoding = feature_extractor(image, return_tensors="pt")
>>> print(encoding.keys())
>>> # dict_keys(['pixel_values'])
"""
# Input type checking for clearer error
valid_images = False
# Check that images has a valid type
if isinstance(images, (Image.Image, np.ndarray)) or is_torch_tensor(images):
valid_images = True
elif isinstance(images, (list, tuple)):
if len(images) == 0 or isinstance(images[0], (Image.Image, np.ndarray)) or is_torch_tensor(images[0]):
valid_images = True
if not valid_images:
raise ValueError(
"Images must of type `PIL.Image.Image`, `np.ndarray` or `torch.Tensor` (single example),"
"`List[PIL.Image.Image]`, `List[np.ndarray]` or `List[torch.Tensor]` (batch of examples), "
f"but is of type {type(images)}."
)
is_batched = bool(
isinstance(images, (list, tuple))
and (isinstance(images[0], (Image.Image, np.ndarray)) or is_torch_tensor(images[0]))
)
if not is_batched:
images = [images]
# Tesseract OCR to get words + normalized bounding boxes
if self.apply_ocr:
words_batch = []
boxes_batch = []
for image in images:
#words, boxes = apply_tesseract(self.to_pil_image(image))
w, h = image.size
words, boxes = get_ocr(img_name, w, h)
words_batch.append(words)
boxes_batch.append(boxes)
# transformations (resizing)
if self.do_resize and self.size is not None:
images = [self.resize(image=image, size=self.size, resample=self.resample) for image in images]
images = [self.to_numpy_array(image, rescale=False) for image in images]
# flip color channels from RGB to BGR (as Detectron2 requires this)
images = [image[::-1, :, :] for image in images]
# return as BatchFeature
data = {"pixel_values": images}
encoded_inputs = BatchFeature(data=data, tensor_type=return_tensors)
if self.apply_ocr:
encoded_inputs["words"] = words_batch
encoded_inputs["boxes"] = boxes_batch
return encoded_inputs
|
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from sklearn.externals import joblib
from sklearn.neural_network import MLPClassifier
def input_data(age, sex, chest_pain_type, rest_blood_pressure, serum_cholestrol,
high_fasting_blood_sugar, resting_ecg, max_heart_rate, exercise_induced_angina,
st_depression, peak_exercise_st, major_vessels_num, thal):
my_list = [age, sex, chest_pain_type, rest_blood_pressure, serum_cholestrol,
high_fasting_blood_sugar, resting_ecg, max_heart_rate, exercise_induced_angina,
st_depression, peak_exercise_st, major_vessels_num, thal]
test_new = pd.DataFrame(np.array(my_list).reshape(1,13))
test_new.columns = ['age', 'sex', 'chest_pain_type', 'rest_blood_pressure', 'serum_cholestrol',
'high_fasting_blood_sugar', 'resting_ecg', 'max_heart_rate', 'exercise_induced_angina',
'st_depression', 'peak_exercise_st', 'major_vessels_num', 'thal']
test = pd.read_csv('./data/tubes2_HeartDisease_test.csv')
test.columns = ['age', 'sex', 'chest_pain_type', 'rest_blood_pressure', 'serum_cholestrol',
'high_fasting_blood_sugar', 'resting_ecg', 'max_heart_rate', 'exercise_induced_angina',
'st_depression', 'peak_exercise_st', 'major_vessels_num', 'thal']
test = test.append(test_new, ignore_index = True)
test = test.replace('?', np.nan).astype(float)
print(test)
return test
def preprocessed_input(test):
categorical_attributes = [
"sex", "chest_pain_type", "high_fasting_blood_sugar", "resting_ecg",
"exercise_induced_angina", "peak_exercise_st", "major_vessels_num", "thal"]
series_attributes = ["age", "rest_blood_pressure", "serum_cholestrol", "max_heart_rate", "st_depression"]
test[categorical_attributes] = test[categorical_attributes].fillna(test.mode().iloc[0])
test[series_attributes] = test[series_attributes].fillna(test.mean())
test[categorical_attributes] = test[categorical_attributes].astype('category')
test = test.drop('thal', 1)
test = test.drop('major_vessels_num', 1)
test = pd.get_dummies(test, prefix=[
"sex", "chest_pain_type", "high_fasting_blood_sugar", "resting_ecg",
"exercise_induced_angina", "peak_exercise_st"])
for i in range(0,141):
test = test.drop([i])
print(test)
return test
def predict_input(imported_model, test):
test = np.array(test)
predicted_test = imported_model.predict(test)
return predicted_test
def heart_disease_type(age, sex, chest_pain_type, rest_blood_pressure, serum_cholestrol,
high_fasting_blood_sugar, resting_ecg, max_heart_rate, exercise_induced_angina,
st_depression, peak_exercise_st, major_vessels_num, thal):
imported_model = joblib.load('./models/heart_disease.joblib')
test = input_data(age, sex, chest_pain_type, rest_blood_pressure, serum_cholestrol,
high_fasting_blood_sugar, resting_ecg, max_heart_rate, exercise_induced_angina,
st_depression, peak_exercise_st, major_vessels_num, thal)
test = preprocessed_input(test)
result = predict_input(imported_model, test)
print("Result = ", result)
result = int(result[0])
return result
|
import numpy as np
import torch
EPS = 1e-12
def embedded_dropout(embed, words, dropout=0.1, scale=None):
if dropout:
mask = embed.weight.data.new(embed.weight.size(0), 1).bernoulli_(1 - dropout)
masked_embed_weight = mask * embed.weight / (1 - dropout)
if EPS:
masked_embed_weight.masked_fill_(mask.eq(0), EPS)
else:
masked_embed_weight = embed.weight
if scale:
masked_embed_weight = scale.expand_as(masked_embed_weight) * masked_embed_weight
X = torch.nn.functional.embedding(words, masked_embed_weight,
embed.padding_idx, embed.max_norm, embed.norm_type,
embed.scale_grad_by_freq, embed.sparse
)
return X
if __name__ == '__main__':
V = 50
h = 4
bptt = 10
batch_size = 2
embed = torch.nn.Embedding(V, h)
words = np.random.random_integers(low=0, high=V-1, size=(batch_size, bptt))
words = torch.LongTensor(words)
origX = embed(words)
X = embedded_dropout(embed, words)
print(origX)
print(X)
|
# pylint: disable=protected-access
"""Backend for three terms recursion generator."""
import numpy
def ttr_call(self, order, dist):
"TTR call backend wrapper"
assert order.shape == (len(dist), self.size)
graph = self.graph
self.dist, dist_ = dist, self.dist
graph.add_node(dist, key=order)
if hasattr(dist, "_ttr"):
if dist.advance:
out = dist._ttr(order, self)
else:
out = numpy.empty((2,)+order.shape)
prm = self.dists.build()
prm.update(self.keys.build())
out[0], out[1] = dist._ttr(order, **prm)
else:
raise NotImplementedError(
"No `_ttr` method found for %s" % dist)
graph.add_node(dist, val=out)
self.dist = dist_
return numpy.array(out)
|
#!/usr/bin/env python3
import glob
import subprocess
import sys
import os
os.chdir(os.path.dirname(__file__)); os.chdir('..')
for name in glob.glob('tests/*.nim'):
if 'flycheck_' in name: continue
lines = open(name).read().splitlines()
if not (lines and lines[0].startswith('# TEST.')):
if lines and ('disabled' not in lines[0]):
# not marked as test, at least compile it
subprocess.check_call(['nim', 'c', '--verbosity:0', name])
continue
assert lines[1].startswith('discard """')
lines[1] = lines[1].split('"""', 1)[1]
expected_output = []
for line in lines[1:]:
expected_output.append(line.split('"""', 1)[0])
if '"""' in line:
break
expected_output = '\n'.join(expected_output).encode('utf8')
subprocess.check_call(['nim', 'c', '--verbosity:0', name])
bin_name = name.rsplit('.', 1)[0]
got_output = subprocess.check_output([bin_name], stderr=subprocess.STDOUT).strip()
if got_output != expected_output:
print(name, 'failure')
print('Expected:')
print(expected_output)
print('Got:')
print(got_output)
sys.exit(1)
else:
print(name, 'ok')
|
# Generated by Django 3.1.2 on 2021-02-07 22:33
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('gui', '0007_patientdemographic_mrn'),
]
operations = [
migrations.CreateModel(
name='Timeline',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('PatientID', models.IntegerField(blank=True, null=True)),
('TimelineDTS', models.DateField(blank=True, null=True)),
('TimelineValue', models.TextField(blank=True, null=True)),
('EventType', models.TextField(blank=True, null=True)),
],
),
migrations.AddField(
model_name='note',
name='SpecialtyDSC',
field=models.TextField(blank=True, null=True),
),
]
|
'''test methods for the OrbitalBallRoller class in the rolling ball package
'''
from nose.tools import raises
from RollingBall import rolling_ball
class TestBallRollerInstantiation():
'''test the orbital ball roller object instantiation
'''
def test_ball_roller_object(self):
'''test successful instantiation of an OrbitalBallRoller object
'''
points = [[1, 2], [3, 4], [5, 6], [7, 8], [9, 10], [11, 12], [13, 14]]
rolling_ball.OrbitalBallRoller(points)
@raises(ValueError)
def test_short_input_array(self):
'''test successful instantiation of an OrbitalBallRoller object
'''
points = [[1, 2], [3, 4], [5, 6], [7, 8], [9, 10], [11, 12]]
rolling_ball.OrbitalBallRoller(points)
class TestBallRollerMethods():
'''test basic functionality of ball roller object
'''
def setup(self):
'''setup function to be called before each test
'''
# gots 2 load in some test data here
points = [[1, 2], [3, 4], [5, 6], [7, 8], [9, 10], [11, 12], [13, 14]]
self.ball_roller = rolling_ball.OrbitalBallRoller(points)
def teardown(self):
'''teardown function to be called after each test
'''
del self.ball_roller
def test_get_alpha_complex(self):
'''test the creation of an alpha complex from the triangulation
'''
self.ball_roller.get_alpha_complex(alpha=.5)
def test_plot_delaunay(self):
'''test the plot routine of the triangulation
'''
self.ball_roller.plot_delaunay()
def test_get_background(self):
'''test the routine that acquires the background density
'''
self.ball_roller.get_background()
@raises()
def test_get_background_exception(self):
'''test the the routine to acquire the background density throws error
if no alpha_complex has been determined for the object
'''
self.ball_roller.get_background()
def test_locate_depletions(self):
'''test the method that identifies plasma depletions from the
background density
'''
self.ball_roller.locate_depletions()
@raises()
def test_locate_depletions_exception(self):
'''test that the locate depletions method produces exception if ...
'''
self.ball_roller.locate_depletions()
|
#!/usr/bin/env python
class Solution:
def replaceBlank(self, str):
print(str)
res = ''
for char in str:
if char == ' ':
res += '%20'
else:
res += char
return res
if __name__ == '__main__':
str = "Hello Python World!"
sol = Solution()
print(sol.replaceBlank(str)) |
import uuid
import pickle as pickle
import os, json
import pika
from .io import *
import logging
from datetime import datetime
class EngineRule:
def __init__(self, type, field_name, global_name=None):
self.Type = type
if self.Type not in ["replace","remove"]:
raise ValueError("%s rule type not supported." % type)
self.FieldName = field_name
if self.Type == "replace" and global_name is None:
raise ValueError("global_name parameter is required for 'replace' type rules")
self.GlobalName = global_name
class SchemaParser:
def __init__(self, json_file):
logging.info("Schema parsing started: %s" % json_file)
try:
self._json_ = json.load(open(json_file))
except:
raise IOError("'%s' schema file invalid." % json_file)
self.Input = self.__parse_input__()
self.Rules = self.__parse_rules__()
self.Output = self.__parse_output__()
self.Storage = self.__parse_storage__()
logging.info("Schema parsing completed")
def __parse_rules__(self):
rules = []
if self._json_["rules"].get("remove"):
for remove_rule in self._json_["rules"].get("remove"):
rules.append(EngineRule("remove", remove_rule.get("fieldName")))
if self._json_["rules"].get("replace"):
for replace_rule in self._json_["rules"].get("replace"):
rules.append(EngineRule("replace", replace_rule.get("fieldName"), replace_rule.get("globalName")))
return rules
def __parse_storage__(self):
storage_types = ["memory"]
type = self._json_["storage"].get("storageType")
properties = self._json_["storage"].get("storageProperties")
if type.lower() not in storage_types:
raise ValueError("storageType: %s is not supported" % type)
elif type.lower() == "memory":
file_name = properties.get("fileName")
restore = properties.get("restore") or True
return MemoryStorage(file_name, restore)
def __parse_input__(self):
input_types = ["file", "rabbitmq"] #all supported input types
input_formats = ["csv", "json"]
type = self._json_["input"].get("inputType")
properties = self._json_["input"].get("inputProperties")
separator = properties.get("separator") or ","
quote = properties.get("quote") or ""
format = properties.get("format")
schema = properties.get("schema")
self.Schema = schema
if format.lower() not in input_formats:
raise ValueError("Input format: %s is not supported" % format)
elif format.lower() == "csv":
format = CSVFormat(sep=separator, quote=quote, schema=schema)
elif format.lower() == "json":
format = JSONFormat()
if type.lower() not in input_types:
raise ValueError("inputType: %s is not supported" % type)
elif type.lower() == "file": #preparing FileReader object
file_name = properties.get("fileName")
return FileReader(file_name, format)
elif type.lower() == "rabbitmq":
connection_string = properties.get("URI")
queue_name = properties.get("QueueName")
return RabbitMQReader(connection_string, queue_name, format)
def __parse_output__(self):
output_types = ["file", "rabbitmq"]
output_formats = ["csv", "json"]
type = self._json_["output"].get("outputType")
properties = self._json_["output"].get("outputProperties")
separator = properties.get("separator") or ","
quote = properties.get("quote") or ""
format = properties.get("format")
if format.lower() not in output_formats:
raise ValueError("Output format: %s is not supported" % format)
elif format.lower() == "csv":
schema = list(self.Input.Format.schema) #making a copy of input schema
for r in [r.FieldName for r in self.Rules if r.Type == "remove"]:
schema.remove(r)
format = CSVFormat(sep=separator, quote=quote, schema=schema)
elif format.lower() == "json":
format = JSONFormat()
if type.lower() not in output_types:
raise ValueError("outputType: %s is not supported" % type)
elif type.lower() == "file":
file_name = properties.get("fileName")
replace = properties.get("replace") or False
return FileWriter(file_name, format, replace)
elif type.lower() == "rabbitmq":
connection_string = properties.get("URI")
queue_name = properties.get("QueueName")
return RabbitMQWriter(connection_string, queue_name, format)
class Engine:
def __init__(self, input, output, storage):
self.__input__ = input
self.__output__ = output
self.__storage__ = storage
self.__rules__ = []
self.ProcessedRows = 0
self.ProcessingTime = 0
def read_rules(self, rules):
"""
Reads list of EngineRule objects. Usually rules are property of SchemaParser.
"""
self.__rules__ = rules
def add_rule(self, rule):
"""
Adds EngineRule object to rules list.
"""
self.__rules__.append(rule)
def apply_rules(self, object):
for r in self.__rules__:
if r.Type == "remove":
object.pop(r.FieldName)
if r.Type == "replace" and object[r.FieldName] is not "":
object[r.FieldName] = self.__storage__.replace(r.GlobalName, object[r.FieldName])
return object
def run(self):
logging.info("Processing engine started")
if self.__input__.Type == "file":
start = datetime.now()
rows = 0
with self.__input__ as f:
for line in f:
o = self.apply_rules(line)
self.__output__.write(o)
rows += 1
if (rows % 100000) == 0: logging.info("Processed %s rows" % rows)
self.__output__.close()
self.__storage__.flush()
self.ProcessedRows = rows
self.ProcessingTime = datetime.now() - start
logging.info("Processing engine finished")
elif self.__input__.Type == "stream":
self.__input__.run(self)
def save(self):
"""
Saves replacement values in permanent storage.
"""
self.__storage__.flush()
class MemoryStorage:
def __init__(self, file, restore=True):
self.__f__ = file
if os.path.isfile(self.__f__):
with open(self.__f__, "rb") as f:
self.__storage__ = pickle.load(f)
else:
self.__storage__ = {}
def replace(self, name, value):
if name in self.__storage__:
storagens = self.__storage__.get(name) #setting dictionary namespace
else:
storagens = self.__storage__[name] = {} #creating new dictionary namespace
if value in storagens:
return storagens.get(value)
else:
hash = self.generate_hash()
storagens[value] = hash
return hash
def generate_hash(self):
return uuid.uuid4().hex
def dump(self):
"""
Returns internal mapping dictionary as JSON-formatted string.
"""
return json.dumps(self.__storage__)
def flush(self):
"""
Saves current state of storage to disk.
"""
logging.info("Storage flushing started...")
with open(self.__f__, "wb") as f:
pickle.dump(self.__storage__, f)
logging.info("Flushing completed succefully.") |
# TieRopes.py
def tieRopes(K, A):
# Devuelve el numero de sogas atadas o no
# que son mayores o iguales a k
numSogas = 0
largo = 0
for i in A:
largo += i # Se van atando sumando los largos
if largo >= K:
numSogas += 1
largo = 0
return numSogas
print(tieRopes(4, [1, 2, 3, 4, 1, 1, 3])) |
# -*- coding: utf-8 -*-
"""
Convert files with uniform grid to netcdf4
@author: rringuet, 2022
Date: 2020-05-05 00:00
Model: TS18
Bin: Esw 1.1 mV/m, Bang 178 deg., tilt 13.1 deg.
Grid: Uniform (lat_step: 1.00, lon_step: 2.00 [deg])
MLAT [deg] MLT [hr] Pot [kV] Vazm [deg] Vmag [m/s]
---------- ---------- ---------- ---------- ----------
"""
from glob import glob
import numpy as np
from time import perf_counter
from datetime import datetime, timezone
#from astropy.constants import R_earth
from netCDF4 import Dataset
import re
model_varnames={"Pot":['V','kV'],"Vazm":['theta_v','deg'],"Vmag":['v','m/s'],
#remaining variables are time series
"tilt":['theta_Btilt',"deg"], 'Esw':['E_sw','mV/m'],
'Bang':['theta_B','deg'],
#these are the coordinate variables
'MLAT':['MLAT','deg'], 'MLT':['MLT','hr']}
def dts_to_hrs(datetime_string, filedate):
'''Get hours since midnight from datetime string'''
return (datetime.strptime(datetime_string, '%Y-%m-%d %H:%M').replace(tzinfo=timezone.utc)\
-filedate).total_seconds()/3600.
def grid_type(filename):
'''Determine grid type of data file. True if uniform, False if equal-area.'''
read_obj = open(filename, 'r')
line = read_obj.readline().strip() #Date: 2020-05-05 00:00
line = read_obj.readline().strip() #Model: TS18
line = read_obj.readline().strip() #Bin: Esw 1.1 mV/m, Bang 178 deg., tilt 13.1 deg.
line = read_obj.readline().strip() #Grid: Uniform (lat_step: 1.00, lon_step: 2.00 [deg])
read_obj.close()
return 'Uniform' in line
#filename='C:/Users/rringuet/Kamodo_WinDev1/SuperDARN/fullday/model20200505-0000.txt'
def ascii_reader(filename):
'''Loads the data from a superdarn txt file into a nested dict'''
#open file
read_obj = open(filename, 'r')
#extract header
date_string = read_obj.readline().strip() #Date: 2020-05-05 00:00
model_string = read_obj.readline().strip() #Model: TS18
bin_string = read_obj.readline().strip() #Bin: Esw 1.1 mV/m, Bang 178 deg., tilt 13.1 deg.
grid_string = read_obj.readline().strip() #Grid: Uniform (lat_step: 1.00, lon_step: 2.00 [deg])
trash = read_obj.readline().strip() #empty line
variable_keys = read_obj.readline().strip() #MLAT [deg] MLT [hr] Pot [kV] Vazm [deg] Vmag [m/s]
trash = read_obj.readline().strip() #---------- ---------- ---------- ---------- ----------
#extract info from header strings
time_str = date_string[6:].strip() #'2020-05-05 00:00' date_string[2]+' '+date_string[3]
filedate = datetime.strptime(time_str[:10], '%Y-%m-%d').replace(tzinfo=timezone.utc)
hrs = dts_to_hrs(time_str, filedate)
bin_list = bin_string[4:].split(',')
Esw = float(bin_list[0].strip()[3:].strip().split(' ')[0])
Bang = float(bin_list[1].strip()[4:].strip().split(' ')[0])
tilt = float(bin_list[2].strip()[4:].strip().split(' ')[0])
var_list = re.split(' +',variable_keys)
header_keys = ['tilt','Esw','Bang']
variable_keys = [item for item in var_list if '[' not in item]
#create dictionary to store data in
variables = {model_varnames[var][0]: {'units': model_varnames[var][-1],
'data': []} for var in variable_keys+header_keys}
#store time series values
variables[model_varnames['tilt'][0]]['data'] = tilt
variables[model_varnames['Esw'][0]]['data'] = Esw
variables[model_varnames['Bang'][0]]['data'] = Bang
variables['time'] = {'units':'hr', 'data': hrs}
#store array data into dictionary
for line in read_obj:
vals = re.split(' +', line.strip())
for i in range(len(variable_keys)): #skip empty block(s) at the end
variables[model_varnames[variable_keys[i]][0]]['data'].append(vals[i])
#convert to numpy float arrays
for key in variables.keys():
if isinstance(variables[key]['data'],(list)):
variables[key]['data'] = np.array(variables[key]['data'], dtype=float)
#add metadata
variables['metadata'] = {'grid': grid_string[0][5:].strip(),
'model': model_string[0][6:].strip(),
'filedate': time_str[:10]}
return variables
def _toCDF(files, file_prefix):
'''Reads in data from all files, writes to a netcdf4 file. Used for uniform grid
data files for faster data access.'''
#get data from first file, set lat/lon arrays
file_data = ascii_reader(files[0])
lat = np.unique(file_data['MLAT']['data'])
if sum(lat>0)==lat.size:
cdf_filename = file_prefix+'_default_N.nc' #northern hemisphere data
lat = np.append(lat, 90.)
else:
cdf_filename = file_prefix+'_default_S.nc' #southern hemisphere data
lat = np.insert(lat, 0, -90.)
lon = np.unique(file_data['MLT']['data'])*15.
#set up net variables dictionary and time coordinate lists
time = [file_data['time']['data']]
var1D_list = ['theta_Btilt', 'E_sw', 'theta_B']
var3D_list = ['V', 'theta_v', 'v']
variables = {var: [np.reshape(file_data[var]['data'], (lat.size-1, lon.size)).T] \
for var in var3D_list} #reshape into lon/lat array
for var in var1D_list: variables[var] = [file_data[var]['data']]
#loop through files and add data to variables dict
for file in files[1:]:
data = ascii_reader(file)
time.append(data['time']['data'])
for var in var1D_list: variables[var].append(file_data[var]['data'])
for var in var3D_list: variables[var].append(np.reshape(file_data[var]['data'],
(lat.size-1, lon.size)).T)
for var in var1D_list+var3D_list: variables[var] = np.array(variables[var])
#perform longitude wrapping in coordinate grid
lon_le180 = np.where(lon<=180)[0]
lon_ge180 = np.where(lon>=180)[0] #repeat 180 for -180 values
if not 180. in lon: #add a cushion value for proper interpolation range (-180 to 180)
lon_le180 = np.append(lon_le180, lon_le180.max()+1)
lon_ge180 = np.insert(lon_ge180, 0, lon_ge180.min()-1)
lon_size = len(lon_le180)+len(lon_ge180)
tmp = np.zeros(lon_size)
tmp[:len(lon_ge180)] = lon[lon_ge180]-360.
tmp[len(lon_ge180):] = lon[lon_le180]
lon = tmp
#perform lon and lat wrapping in variable data
for var in var3D_list:
#perform scalar averaging for pole values (latitude wrapping)
data_shape = variables[var].shape
total_shape = (data_shape[0],data_shape[1],data_shape[2]+1)
tmp = np.zeros(total_shape, dtype=float)
if '_N.nc' in cdf_filename: #north pole at end of array
tmp[:,:,:-1] = variables[var] #copy data into grid
top = np.mean(tmp[:,:,-2],axis=1) #same shape as time axis
tmp[:,:,-1] = np.broadcast_to(top, (total_shape[1],total_shape[0])).T
elif '_S.nc' in cdf_filename: #south pole at beginning of array
tmp[:,:,1:] = variables[var] #copy data into grid
top = np.mean(tmp[:,:,1],axis=1) #same shape as time axis
tmp[:,:,0] = np.broadcast_to(top, (total_shape[1],total_shape[0])).T
variables[var] = tmp
#swap longitudes, repeat 180 values for -180 position
data_shape = variables[var].shape
total_shape = (data_shape[0],lon_size,data_shape[2])
tmp = np.zeros(total_shape, dtype=float)
tmp[:,:len(lon_ge180),:] = variables[var][:,lon_ge180,:]
tmp[:,len(lon_ge180):,:] = variables[var][:,lon_le180,:]
variables[var] = tmp
#Data wrangling complete. Start new output file
data_out = Dataset(cdf_filename, 'w', format='NETCDF4')
data_out.file = ''.join([f+',' for f in files]).strip(',')
data_out.model = 'SuperDARN'
data_out.filedate = file_data['metadata']['filedate']
data_out.grid = file_data['metadata']['grid']
data_out.internal_model = file_data['metadata']['model']
#establish coordinates (lon, lat, then time open)
#lon
new_dim = data_out.createDimension('lon', lon.size) #create dimension
new_var = data_out.createVariable('lon', np.float32, 'lon') #create variable
new_var[:] = lon #store data for dimension in variable
new_var.units = 'deg'
#lat
new_dim = data_out.createDimension('lat', lat.size) #create dimension
new_var = data_out.createVariable('lat', np.float32, 'lat') #create variable
new_var[:] = lat #store data for dimension in variable
new_var.units = 'deg'
#time
new_dim = data_out.createDimension('time', len(files)) #create dimension
new_var = data_out.createVariable('time', np.float32, 'time') #create variable
new_var[:] = np.array(time)
new_var.units = 'hr'
#copy over variables to file
for variable_name in variables.keys():
if variable_name in var3D_list:
new_var = data_out.createVariable(variable_name, np.float32, ('time','lon','lat'))
new_data = variables[variable_name]
elif variable_name in var1D_list:
new_var = data_out.createVariable(variable_name, np.float32, ('time'))
new_data = variables[variable_name]
else:
continue
new_var[:] = new_data #store data in variable
units = [value[-1] for key, value in model_varnames.items() if value[0]==variable_name][0]
new_var.units = units
#close file
data_out.close()
return cdf_filename
def _toCDFGroup(files, file_prefix):
'''Reads in data from all files, writes to h5 files. Used for equal-area
data files so that lon grids from different lat vals can be stored in groups.'''
#get data from first file
file_data = ascii_reader(files[0])
lat = np.unique(file_data['MLAT']['data'])
if sum(lat>0)==lat.size:
cdf_filename = file_prefix+'_equalarea_N.nc' #northern hemisphere data
lat = np.append(lat, 90.)
else:
cdf_filename = file_prefix+'_equalarea_S.nc' #southern hemisphere data
lat = np.insert(lat, 0, -90.)
#set up net variables dictionary and time coordinate lists
time = [file_data['time']['data']]
var1D_list = ['theta_Btilt', 'E_sw', 'theta_B']
var3D_list = ['V', 'theta_v', 'v']
variables_1D = {var: [file_data[var]['data']] for var in var1D_list}
variables_3D = {var: {latval: [] for latval in lat} for var in var3D_list}
#store longitude locations and grids for each latitude value
lonval_dict, lonidx_dict = {}, {}
for latval in lat:
idx = np.where(file_data['MLAT']['data']==latval)[0]
lonidx_dict[latval] = idx #store indices
if len(idx)==0: continue #skip latval=90.
lon = file_data['MLT']['data'][idx]*15. #convert from MLT to degrees
if lon.max()>360.: lon[np.argmax(lon)]-=360. #grid at pole has issues
lonval_dict[latval] = np.unique(lon)
if len(lonval_dict[latval])!=len(lon): #last lon value repeated near pole
lonidx_dict[latval] = idx[:-1] #remove repeated value
#Store variable data in nested fashion
for file in files:
data = ascii_reader(file)
for var in var3D_list:
for latval in lat: #1D array of vals for given time and lat
variables_3D[var][latval].append(data[var]['data'][lonidx_dict[latval]])
#perform latitude wrapping first to avoid repeated values in average
for var in var3D_list:
#store which latval key is closest to pole
if '_N.nc' in cdf_filename:
pole_lat = 90.
latval = lat[-2] #north pole at end of array
elif '_S.nc' in cdf_filename:
pole_lat = -90.
latval = lat[1] #south pole at beginning of array
#perform scalar averaging and store
#variables_3D[var][latval] has shape (time,lon), average over lon values
variables_3D[var][pole_lat] = np.mean(np.array(variables_3D[var][latval]),axis=1) #same shape as time
#perform longitude swapping and lon wrapping per lat value
for latval in lat:
if latval!=pole_lat: #if not at the poles
#wrap longitude coordinate values and store
lon = lonval_dict[latval]
lon_le180 = np.where(lon<=180)[0]
lon_ge180 = np.where(lon>=180)[0] #repeat 180 for -180 values
if not 180. in lon: #add a cushion value for proper interpolation range (-180 to 180)
lon_le180 = np.append(lon_le180, lon_le180.max()+1)
lon_ge180 = np.insert(lon_ge180, 0, lon_ge180.min()-1)
lon_size = len(lon_le180)+len(lon_ge180)
tmp = np.zeros(lon_size)
tmp[:len(lon_ge180)] = lon[lon_ge180]-360.
tmp[len(lon_ge180):] = lon[lon_le180]
lonval_dict[latval] = tmp
#swap longitude dimension of variables, each of shape (time,lon)
for var in var3D_list:
variables_3D[var][latval] = np.array(variables_3D[var][latval])
#swap longitudes, repeat 180 values for -180 position
data_shape = variables[var].shape
tmp = np.zeros((data_shape[0],lon_size), dtype=float)
tmp[:,:len(lon_ge180)] = variables[var][:,lon_ge180]
tmp[:,len(lon_ge180):] = variables[var][:,lon_le180]
variables[var] = tmp
#Data wrangling complete. Start new output file
data_out = Dataset(cdf_filename, 'w', format='NETCDF4')
data_out.file = ''.join([f+',' for f in files]).strip(',')
data_out.model = 'SuperDARN'
data_out.filedate = file_data['metadata']['filedate']
data_out.grid = file_data['metadata']['grid']
data_out.internal_model = file_data['metadata']['model']
#establish coordinates (lat, then time open)
#lat
new_dim = data_out.createDimension('lat', lat.size) #create dimension
new_var = data_out.createVariable('lat', np.float32, 'lat') #create variable
new_var[:] = lat #store data for dimension in variable
new_var.units = 'deg'
#time
new_dim = data_out.createDimension('time', len(files)) #create dimension
new_var = data_out.createVariable('time', np.float32, 'time') #create variable
new_var[:] = np.array(time)
new_var.units = 'hr'
#CHANGE TO NESTED/GROUP FORMAT!!!!
#copy over variables to file
for variable_name in variables.keys():
if variable_name in var3D_list:
new_var = data_out.createVariable(variable_name, np.float32, ('time','lon','lat'))
new_data = variables[variable_name]
elif variable_name in var1D_list:
new_var = data_out.createVariable(variable_name, np.float32, ('time'))
new_data = variables[variable_name]
else:
continue
new_var[:] = new_data #store data in variable
units = [value[-1] for key, value in model_varnames.items() if value[0]==variable_name][0]
new_var.units = units
#close file
data_out.close()
return cdf_filename
def convert_files(file_prefix):
'''Convert files of given pattern into one netCDF4 or h5 file'''
#convert N and S hemisphere files separately or combine?
print(f'Converted data file not found. Converting files with {file_prefix} prefix.')
ftic = perf_counter()
files = sorted(glob(file_prefix+'*.txt'))
if grid_type(files[0]): #If grid is uniform, writte to netcdf4
print('Uniform grid detected. Converting to a netcdf4 file.')
out_file = _toCDF(files, file_prefix)
else:
print('Equal-area grid detected. Converting to a grouped netcdf4 file.')
out_file = _toCDFGroup(files, file_prefix)
print(out_file)
print(f'{len(files)} files with prefix {file_prefix} now combined into {out_file} '+\
f'in {perf_counter()-ftic:.6f}s.')
return True |
# -*- coding: utf-8 -*-
try:
from urllib import urlopen
except ImportError:
from urllib.request import urlopen
import sys
import zipfile
import tarfile
import io
import os
import distutils.log
import ctypes
if ctypes.sizeof(ctypes.c_void_p) == 8:
PLATFORM = 'x64'
else:
PLATFORM = 'x86'
DEBUG = os.path.splitext(sys.executable)[0].endswith('d')
def get_dep(path, url):
"""download an archive to a specific location"""
try:
distutils.log.info(
"fetching {0} to {1}".format(
url,
path,
)
)
response = urlopen(url)
dst_file = io.BytesIO(response.read())
dst_file.seek(0)
dst = os.path.split(path)[0]
if url.endswith('zip'):
zip_ref = zipfile.ZipFile(dst_file)
zip_ref.extractall(dst)
zip_ref.close()
dst_file.close()
dst = os.path.join(dst, zip_ref.namelist()[0])
if dst != path:
os.rename(dst, path)
return True
else:
tar = tarfile.TarFile.gzopen(os.path.split(path)[1], fileobj=dst_file)
dst = os.path.split(path)[0]
tar.extractall(dst)
dst = os.path.join(dst, tar.getmembers()[0].name)
if dst != path:
os.rename(dst, path)
tar.close()
dst_file.close()
return True
except:
import traceback
distutils.log.error(traceback.format_exc())
return False
|
'''FBAS definition helpers'''
def get_hierarchical_base_definition(n_orgs, t_orgs, n_nodes, t_nodes):
'''Get quorum slice definition for n_orgs orgs with n_node nodes each'''
return {
'threshold': t_orgs,
'nodes': set(),
'children_definitions': [
{
'threshold': t_nodes,
'nodes': set(range(n_nodes * n, n_nodes * (n + 1))),
'children_definitions': []
}
for n in range(n_orgs)
]
}
def get_nodes(definition):
'''Get all nodes from a quorum slice definition'''
nodes = definition['nodes']
for children_definition in definition['children_definitions']:
nodes = nodes.union(get_nodes(children_definition))
return nodes
|
from .dictionary import DictionaryTree
from typing import Dict
__all__ = ('Translator',)
class Translator:
def __init__(self, trees: Dict[str, DictionaryTree], use: str = None):
assert len(trees) > 0
self.trees = trees
self.all_uses = list(trees.keys())
if not use or use not in self.all_uses:
self._use = self.all_uses[0]
else:
self._use = use
@property
def use(self):
return self._use
@use.setter
def use(self, val):
assert val in self.all_uses
self._use = val
def translate(self, sentence: str, use: str = None):
tree = self.trees[use or self.use]
translation = []
idx = 0
while idx < len(sentence):
trans_part = tree.max_match(sentence[idx:])
idx += len(trans_part)
translation.append(trans_part)
return ''.join(translation)
__call__ = translate
|
# coding=utf-8
# Copyright 2022 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Data provider with an argument to control data augmentation."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
import tensorflow.compat.v1 as tf
import tensorflow_datasets as tfds
from low_rank_local_connectivity import utils
def extract_data(data, preprocess_image):
"""Extracts image, label and create a mask."""
image = data["image"]
# Reserve label 0 for background
label = tf.cast(data["label"], dtype=tf.int32)
# Create a mask variable to track the real vs padded data in the last batch.
mask = 1.
image = preprocess_image(image)
return image, label, mask
def construct_iterator(dataset_builder,
split,
preprocess_fn,
batch_size,
is_training):
"""Constructs data iterator.
Args:
dataset_builder: tensorflow_datasets data builder.
split: tensorflow_datasets data split.
preprocess_fn: Function that preprocess each data example.
batch_size: (Integer) Batch size.
is_training: (boolean) Whether training or inference mode.
Returns:
Data iterator.
"""
dataset = dataset_builder.as_dataset(split=split, shuffle_files=True)
dataset = dataset.map(preprocess_fn,
num_parallel_calls=tf.data.experimental.AUTOTUNE)
if is_training:
# 4096 is ~0.625 GB of RAM. Reduce if memory issues encountered.
dataset = dataset.shuffle(buffer_size=4096)
dataset = dataset.repeat(-1 if is_training else 1)
dataset = dataset.batch(batch_size, drop_remainder=is_training)
if not is_training:
# Pad the remainder of the last batch to make batch size fixed.
dataset = utils.pad_to_batch(dataset, batch_size)
dataset = dataset.prefetch(buffer_size=tf.data.experimental.AUTOTUNE)
return tf.compat.v1.data.make_one_shot_iterator(dataset)
class MNISTDataProvider(object):
"""MNIST Data Provider.
Attributes:
images: (4-D tensor) Images of shape (batch, height, width, channels).
labels: (1-D tensor) Data labels of size (batch,).
mask: (1-D boolean tensor) Data mask. Used when data is not repeated to
indicate the fraction of the batch with true data in the final batch.
num_classes: (Integer) Number of classes in the dataset.
num_examples: (Integer) Number of examples in the dataset.
class_names: (List of Strings) MNIST id for class labels.
num_channels: (integer) Number of image color channels.
image_size: (Integer) Size of the image.
iterator: Tensorflow data iterator.
"""
def __init__(self,
subset,
batch_size,
is_training,
data_dir=None):
dataset_builder = tfds.builder("mnist", data_dir=data_dir)
dataset_builder.download_and_prepare(download_dir=data_dir)
self.image_size = 28
if subset == "train":
split = tfds.core.ReadInstruction("train", from_=8, to=100, unit="%")
elif subset == "valid":
split = tfds.core.ReadInstruction("train", from_=0, to=8, unit="%")
elif subset == "test":
split = tfds.Split.TEST
else:
raise ValueError("subset %s is undefined " % subset)
self.num_channels = 1
iterator = construct_iterator(
dataset_builder, split, self._preprocess_fn(), batch_size, is_training)
info = dataset_builder.info
self.iterator = iterator
self.images, self.labels, self.mask = iterator.get_next()
self.num_classes = info.features["label"].num_classes
self.class_names = info.features["label"].names
self.num_examples = info.splits[split].num_examples
def _preprocess_fn(self):
"""Preprocessing function."""
image_size = self.image_size
def preprocess_image(image):
"""Preprocessing."""
# Normalize to 0-1 range.
image = tf.image.convert_image_dtype(image, dtype=tf.float32)
image = 2 * image - 1
return tf.image.resize_image_with_crop_or_pad(
image, image_size, image_size)
return functools.partial(extract_data, preprocess_image=preprocess_image)
class CIFAR10DataProvider(object):
"""CIFAR10 Data Provider.
Attributes:
images: (4-D tensor) Images of shape (batch, height, width, channels).
labels: (1-D tensor) Data labels of size (batch,).
mask: (1-D boolean tensor) Data mask. Used when data is not repeated to
indicate the fraction of the batch with true data in the final batch.
num_classes: (Integer) Number of classes in the dataset.
num_examples: (Integer) Number of examples in the dataset.
class_names: (List of Strings) CIFAR10 id for class labels.
num_channels: (integer) Number of image color channels.
image_size: (Integer) Size of the image.
iterator: Tensorflow data iterator.
"""
def __init__(self,
subset,
batch_size,
is_training,
data_dir=None):
dataset_builder = tfds.builder("cifar10", data_dir=data_dir)
dataset_builder.download_and_prepare(download_dir=data_dir)
self.image_size = 32
if subset == "train":
split = tfds.core.ReadInstruction("train", from_=10, to=100, unit="%")
elif subset == "valid":
split = tfds.core.ReadInstruction("train", from_=0, to=10, unit="%")
elif subset == "test":
split = tfds.Split.TEST
else:
raise ValueError("subset %s is undefined " % subset)
self.num_channels = 3
iterator = construct_iterator(
dataset_builder, split, self._preprocess_fn(), batch_size, is_training)
info = dataset_builder.info
self.iterator = iterator
self.images, self.labels, self.mask = iterator.get_next()
self.num_classes = info.features["label"].num_classes
self.class_names = info.features["label"].names
self.num_examples = info.splits[split].num_examples
def _preprocess_fn(self):
"""Preprocessing function."""
image_size = self.image_size
def preprocess_image(image):
"""Preprocessing."""
# Normalize to 0-1 range.
image = tf.image.convert_image_dtype(image, dtype=tf.float32)
return tf.image.resize_image_with_crop_or_pad(
image, image_size, image_size)
return functools.partial(extract_data, preprocess_image=preprocess_image)
def extract_data_celeba(data, preprocess_image, attribute="Male"):
"""Extracts image, label and create a mask (used by CelebA data provider)."""
image = data["image"]
# Reserve label 0 for background
label = tf.cast(data["attributes"][attribute], dtype=tf.int32)
# Create a mask variable to track the real vs padded data in the last batch.
mask = 1.
image = preprocess_image(image)
return image, label, mask
class CelebADataProvider(object):
"""CelebA Data Provider.
Attributes:
images: (4-D tensor) Images of shape (batch, height, width, channels).
labels: (1-D tensor) Data labels of size (batch,).
mask: (1-D boolean tensor) Data mask. Used when data is not repeated to
indicate the fraction of the batch with true data in the final batch.
num_classes: (integer) Number of classes in the dataset.
num_examples: (integer) Number of examples in the dataset.
num_channels: (integer) Number of image color channels.
image_size: (Integer) Size of the image.
iterator: Tensorflow data iterator.
class_names: (List of strings) Name of classes in the order of the labels.
"""
image_size = 32
def __init__(self,
subset,
batch_size,
is_training,
data_dir=None):
dataset_builder = tfds.builder("celeb_a",
data_dir=data_dir)
dataset_builder.download_and_prepare(download_dir=data_dir)
if subset == "train":
split = tfds.Split.TRAIN
elif subset == "valid":
split = tfds.Split.VALIDATION
elif subset == "test":
split = tfds.Split.TEST
else:
raise ValueError(
"subset %s is undefined for the dataset" % subset)
self.num_channels = 3
iterator = construct_iterator(
dataset_builder, split, self._preprocess_fn(), batch_size, is_training)
info = dataset_builder.info
self.iterator = iterator
self.images, self.labels, self.mask = iterator.get_next()
self.num_classes = 2
self.class_names = ["Female", "Male"]
self.num_examples = info.splits[split].num_examples
def _preprocess_fn(self):
"""Preprocessing."""
crop = True
image_size = self.image_size
def preprocess_image(image):
"""Preprocesses the given image.
Args:
image: Tensor `image` representing a single image example of
arbitrary size.
Returns:
Preprocessed image.
"""
# Normalize to 0-1 range.
image = tf.image.convert_image_dtype(image, dtype=tf.float32)
if crop:
image = tf.image.crop_to_bounding_box(image, 40, 20, 218 - 80, 178 - 40)
return tf.image.resize_bicubic([image], [image_size, image_size])[0]
return functools.partial(extract_data_celeba,
preprocess_image=preprocess_image,
attribute="Male")
def _random_translate(image, new_image_size, noise_fill=True):
"""Randomly translate image and pad with noise."""
image_shape = image.shape.as_list()
image_size = image_shape[0]
mask = tf.ones(image_shape)
mask = tf.image.resize_image_with_crop_or_pad(
mask, 2 * new_image_size - image_size, 2 * new_image_size - image_size)
image = tf.image.resize_image_with_crop_or_pad(
image, 2 * new_image_size - image_size, 2 * new_image_size - image_size)
# Range of bounding boxes is from [0, new_image_size-image_size).
offset_height = tf.random_uniform(
shape=(), minval=0, maxval=new_image_size - image_size, dtype=tf.int32)
offset_width = tf.random_uniform(
shape=(), minval=0, maxval=new_image_size - image_size, dtype=tf.int32)
image = tf.image.crop_to_bounding_box(image, offset_height, offset_width,
new_image_size, new_image_size)
if noise_fill:
mask = tf.image.crop_to_bounding_box(mask, offset_height, offset_width,
new_image_size, new_image_size)
image += tf.random_uniform(
(new_image_size, new_image_size, image_shape[-1]), 0, 1.0) * (1 - mask)
return image
class TranslatedCelebADataProvider(CelebADataProvider):
"""Celeb A Data Provider with images translated randomly.
Attributes:
init_op: Initialization operation for the data provider.
images: (4-D tensor) Images of shape (batch, height, width, channels).
labels: (1-D tensor) Data labels of size (batch,).
mask: (1-D boolean tensor) Data mask. Used when data is not repeated to
indicate the fraction of the batch with true data in the final batch.
num_classes: (integer) Number of classes in the dataset.
num_examples: (integer) Number of examples in the dataset.
num_channels: (integer) Number of image color channels.
use_augmentation: (boolean) Whether to use data augmentation or not.
image_size: (Integer) Size of the image.
iterator: Tensorflow data iterator.
class_names: (List of strings) Name of classes in the order of the labels.
"""
image_size = 48
def _preprocess_fn(self):
"""Preprocessing."""
crop = True
image_size = self.image_size
def preprocess_image(image):
"""Preprocesses the given image.
Args:
image: Tensor `image` representing a single image example of
arbitrary size.
Returns:
Preprocessed image.
"""
# Normalize to 0-1 range.
image = tf.image.convert_image_dtype(image, dtype=tf.float32)
if crop:
image = tf.image.crop_to_bounding_box(image, 40, 20, 218 - 80, 178 - 40)
image = tf.image.resize_bicubic([image], [32, 32])[0]
return _random_translate(image, image_size, noise_fill=True)
return functools.partial(extract_data_celeba,
preprocess_image=preprocess_image,
attribute="Male")
# ===== Function that provides data. ======
_DATASETS = {
"cifar10": CIFAR10DataProvider,
"mnist": MNISTDataProvider,
"celeba32": CelebADataProvider,
"trans_celeba48": TranslatedCelebADataProvider,
}
def get_data_provider(dataset_name):
"""Returns dataset by name."""
return _DATASETS[dataset_name]
|
# Copyright 2021 Zefeng Zhu
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# @Created Date: 2021-12-14 09:46:48 pm
# @Filename: plot.py
# @Email: [email protected]
# @Author: Zefeng Zhu
# @Last Modified: 2021-12-18 11:29:16 am
from ResNetPPI.demo_configs import *
from ResNetPPI.utils import get_bins_tex
from matplotlib.animation import FuncAnimation
def plot2dist6d(pdb_id, pdb_binary_int, binned_dist6d_1, binned_dist6d_12, outdir='./figs'):
titles = get_bins_tex(0.5, 0, 20, '$[20,+\infty)$', non_contact_at_first=False)
fig, ax = plt.subplots(figsize=(6, 5))
ax.set_aspect('equal')
ax.set_title(titles[0])
cax = ax.pcolor(binned_dist6d_12[0, :, :], vmin=0, vmax=1, cmap='Blues')
ax.invert_yaxis()
#fig.colorbar(cax)
def animate(i):
ax.set_title(titles[i])
cax.set_array(binned_dist6d_12[i, :, :].flatten())
anim = FuncAnimation(fig, animate, interval=150, frames=binned_dist6d_12.shape[0], repeat=True)
fig.show()
anim.save(f'{outdir}/{pdb_id}.{pdb_binary_int.chain_1.struct_asym_id}.{pdb_binary_int.chain_2.struct_asym_id}_dist6d_maps.gif', writer='pillow')
# ---
titles = get_bins_tex(0.5, 2, 20, non_contact_at_first=False)
fig, ax = plt.subplots(figsize=(6, 5))
ax.set_aspect('equal')
ax.set_title(titles[0])
cax = ax.pcolor(binned_dist6d_1[0, :, :], vmin=0, vmax=1, cmap='Blues')
ax.invert_yaxis()
#fig.colorbar(cax)
def animate(i):
ax.set_title(titles[i])
cax.set_array(binned_dist6d_1[i, :, :].flatten())
anim = FuncAnimation(fig, animate, interval=150, frames=binned_dist6d_1.shape[0], repeat=True)
fig.show()
anim.save(f'{outdir}/{pdb_id}.{pdb_binary_int.chain_1.struct_asym_id}_dist6d_maps.gif', writer='pillow')
|
from stable_baselines3.ppo.policies import CnnPolicy, MlpPolicy
from stable_baselines3.ppo.ppo import PPO
|
import time
from selenium import webdriver
browser = webdriver.Chrome()
browser.get("http://www.seleniumframework.com/python-course/")
browser.implicitly_wait(10)
browser.maximize_window()
# click the subscribe button
subscribe = browser.find_element_by_xpath('//*[@id="text-11"]/div/form/input[3]').click()
# Get the window handles and switch to the launched window
window_handles = browser.window_handles
print(window_handles + '\n')
browser.switch_to.window(window_handles[1])
browser.maximize_window()
time.sleep(5)
# Switch to new window and maximize
email_address = browser.find_element_by_xpath('//*[@id="pageHolder"]/div[1]/form/input[1]')
email_address.send_keys('[email protected]')
subscription_request = browser.find_element_by_xpath('//*[@id="pageHolder"]/div[1]/form/p[2]/input')
subscription_request.click()
browser.close()
time.sleep(3)
browser.quit()
|
from collections import OrderedDict
from django.core.paginator import InvalidPage
from rest_framework import status
from rest_framework.exceptions import NotFound
from rest_framework.pagination import _positive_int
from rest_framework_datatables.pagination import DatatablesPageNumberPagination, DatatablesMixin
from rest_framework.response import Response
class CustomeDatatablesPaginator(DatatablesPageNumberPagination):
def get_page_size(self, request):
if self.page_size_query_param:
try:
page_size = request.query_params[self.page_size_query_param]
if page_size == "-1":
return 100000
return _positive_int(
request.query_params[self.page_size_query_param],
strict=True,
cutoff=self.max_page_size
)
except (KeyError, ValueError):
pass
return self.page_size
def get_paginated_response(self, data):
if not self.is_datatable_request:
if 'http_status' in data:
stat = data['http_status']
data.pop('http_status')
else:
stat = status.HTTP_200_OK
response = OrderedDict([
('count', self.page.paginator.count),
('next', self.get_next_link()),
('previous', self.get_previous_link()),
('data', data["data"]),
('msg', data["msg"]),
('status', data["status"]),
('status_code', data["status_code"])
])
if 'summary' in data:
response['summary'] = data['summary']
return Response(response, status=stat)
response=OrderedDict([
('recordsTotal', self.total_count),
('recordsFiltered', self.count),
('data', data["data"]),
('msg', data["msg"]),
('status', data["status"]),
('status_code', data["status_code"])
])
if 'summary' in data:
response['summary'] = data['summary']
return Response(response)
|
import json
import os
from copy import deepcopy
from django.core.files.uploadedfile import UploadedFile
from django.core.management.base import BaseCommand
from couchforms.models import DefaultAuthContext
from corehq.apps.hqadmin.management.commands.export_domain_forms_raw import (
FormMetadata,
)
from corehq.apps.receiverwrapper.auth import AuthContext
from corehq.apps.receiverwrapper.util import submit_form_locally
class Command(BaseCommand):
help = "Sumbit all forms saved by the ``export_domain_forms_raw`` command." \
"Note that if the domain you are importing into is not the same domain you" \
"exported from then there will be some inconsistencies since the new domain" \
"won't have the same user ID's or Application / App Build ID's"
def add_arguments(self, parser):
parser.add_argument('domain')
parser.add_argument('folder_path')
def handle(self, domain, folder_path, **options):
if not os.path.exists(folder_path):
raise Exception('Folder path must be the path to a directory')
for name in os.listdir(folder_path):
form_dir = os.path.join(folder_path, name)
if not os.path.isdir(form_dir):
continue
with open(os.path.join(form_dir, 'metadata.json'), 'r', encoding='utf-8') as meta:
metadata = FormMetadata.wrap(json.load(meta))
form_path = os.path.join(form_dir, 'form.xml')
if not os.path.exists(form_path) and os.path.isfile(form_path):
self.stderr.write('{} missing'.format(form_path))
continue
attachments_dict = {}
for name in metadata.attachments:
path = os.path.join(form_dir, name)
if os.path.exists(path):
file = open(path, 'rb')
attachments_dict[name] = UploadedFile(file, name)
else:
self.stderr.write('WARN: missing attachment: {}'.format(path))
with open(form_path, 'r', encoding='utf-8') as form:
xml_data = form.read()
auth_type = metadata.auth_context.get('doc_type', None)
if auth_type == 'AuthContext':
auth_context = AuthContext.wrap(deepcopy(metadata.to_json()['auth_context']))
auth_context.domain = domain
else:
auth_context = DefaultAuthContext()
result = submit_form_locally(
xml_data,
domain,
attachments=attachments_dict,
received_on=metadata.received_on,
auth_context=auth_context,
app_id=metadata.app_id,
build_id=metadata.build_id
)
if not result.response.status_code == 201:
self.stderr.write(str(result.response))
|
import json
import fileinput
import os
import struct
import numpy as np
import matplotlib.pyplot as plt
from math import pow
##def read_data(void):
## temp=""
### C:\\Users\LeeBruce\Desktop\\idkp1-10.txt
def delete_black(FileName):
file1 = open(FileName, 'r', encoding='utf-8') # 要去掉空行的文件
file2 = open("C:\\Users\LeeBruce\Desktop\\idkp1-102.txt", 'w', encoding='utf-8') # 生成没有空行的文件
try:
for line in file1.readlines():
if line == '\n':
line = line.strip("\n")
file2.write(line)
finally:
file1.close()
file2.close()
def CuttingFile():
temp=""
f = open("C:\\Users\LeeBruce\Desktop\\idkp1-102.txt", "r", encoding='utf-8')
temp=f.readline()
#print(temp)
lines = f.readlines()
data_1 = open("C:\\Users\LeeBruce\Desktop\\奇数行.txt", 'w', encoding='utf-8')
data_2 = open("C:\\Users\LeeBruce\Desktop\\偶数行.txt", 'w', encoding='utf-8')
num = 0 # 行数-1
for line in lines:
if (num % 2) == 0: # num为偶数说明是奇数行
print(line.strip(), file=data_1) # .strip用来删除空行
else: # # num为奇数说明是偶数行
print(line.strip(), file=data_2)
num += 1
data_1.close()
data_2.close()
f.close()
f1=open("C:\\Users\LeeBruce\Desktop\\偶数行.txt", 'r', encoding='utf-8')
d=[30]
c=[10149]
block=[]
profit=[408,921,1329,11,998,1009,104,839,943,299,374,673,703,954,1657,425,950,1375,430,541,971,332,483,815,654,706,1360,956,992,1948,
408,921,1329,11,998,1009,104,839,943,299,374,673,703,954,1657,425,950,1375,430,541,971,332,483,815,654,706,1360,956,992,1948,228,
435,663,575,687,1262,470,609,1079,696,907,1603,273,961,1234,281,461,742,54,957,1011,149,258,407,28,90,118,245,949,1194,246,298,544
,205,546,751,33,712,745,335,956,1291,163,918,1081,79,671,750,972,991,1963,217,962,1179,380,846,1226,158,671,829,39,701,740,258,577,835,
5,682,687,300,364,664,105,946,1051,68,675,743,450,465,915,686,697,1383,355,367,722,106,131,237,296,868,1164,621,807,1428,283,428,711
,230,573,803,359,772,1131,270,642,912,134,507,641,21,242,263,236,705,941,469,785,1254,196,349,545,405,985,1390,865,988,1853,355,405,760,460
,939,1399,142,408,550,291,436,727,644,922,1566,432,890,1322,352,885,1237,139,269,408,10,137,147,593,601,1194,724,764,1488,672,900,1572,892,
981,1873,597,641,1238,810,996,1806,459,816,1275,416,872,1288,310,945,1255,283,674,957,180,697,877,112,629,741,559,869,1428,79,802,881,164
,192,356,323,340,663,333,464,797,472,496,968,234,914,1148,285,691,976,401,513,914,599,755,1354,391,928,1319,244,502,746,541,837,1378,208,970,
1178,107,449,556,705,887,1592,468,802,1270,444,683,1127,222,958,1180,18,24,42,153,540,693,54,633,687,853,903,1756,399,452,851,108,161,269,
328,431,759,]
weight=[508,1021,1321,111,1098,1196,204,939,1107,399,474,719,803,1054,1781,525,1050,1362,530,641,903,432,583,894,754,806,1241,1056,1092,1545,
508,1021,1321,111,1098,1196,204,939,1107,399,474,719,803,1054,1781,525,1050,1362,530,641,903,432,583,894,754,806,1241,1056,1092,1545,
328,535,579,675,787,1037,570,709,1171,796,1007,1251,373,1061,1101,381,561,774,154,1057,1198,249,358,446,128,190,288,345,1049,1053,346,
398,622,305,646,930,133,812,892,435,1056,1406,263,1018,1192,179,771,802,1072,1091,1418,317,1062,1092,480,946,1064,258,771,846,139,801,
888,358,677,679,105,782,862,400,464,747,205,1046,1133,168,775,839,550,565,727,786,797,1098,455,467,623,206,231,232,396,968,1064,721,907,
1406,383,528,636,330,673,719,459,872,1316,370,742,846,234,607,737,121,342,372,336,805,1090,569,885,1245,296,449,729,505,1085,1364,965,
1088,1510,455,505,758,560,1039,1363,242,508,642,391,536,855,744,1022,1231,532,990,992,452,985,1021,239,369,450,110,237,264,693,701,1176,
824,864,1288,772,1000,1062,992,1081,1395,697,741,899,910,1096,1919,559,916,1296,516,972,1077,410,1045,1302,383,774,809,280,797,927,212,
729,923,659,969,1065,179,902,1010,264,292,441,423,440,450,433,564,826,572,596,1057,334,1014,1148,385,791,1019,501,613,625,699,855,1289,
491,1028,1381,344,602,609,641,937,1311,308,1070,1215,207,549,592,805,987,1133,568,902,952,544,783,1111,322,1058,1106,118,124,206,253,
640,756,154,733,879,953,1003,1510,499,552,883,208,261,437,428,531,728,]
count = len(f1.readlines())
print("读入拆分后地文件行数为:",count)
lines1 = f1.readlines()
for line in lines1:
for i in range(count):
block[0,1]=line.split(",",1)
d[i]=block[0].Split("*")[1]
c[i]=block[1].Split(" ").Last()
f1.close()
count1=len(d)
return d,c,profit,weight
def testbage(list,weight_most,value,weight):
def bag_0_1(weight, value, weight_most): # return max value
num = len(weight)
weight.insert(0, 0) # 前0件要用
value.insert(0, 0) # 前0件要用
bag = np.zeros((num + 1, weight_most + 1), dtype=np.int32) # 下标从零开始
for i in range(1, num + 1):
for j in range(1, weight_most + 1):
if weight[i] <= j:
bag[i][j] = max(bag[i - 1][j - weight[i]] + value[i], bag[i - 1][j])
else:
bag[i][j] = bag[i - 1][j]
# print(bag)
return bag[-1, -1]
result = bag_0_1(weight, value, weight_most)
return result
if __name__ == '__main__':
x=input("请输入文件路径:")
l=15
delete_black(x)
a,b,c,d=CuttingFile()
#kio=testbage(a, b ,c, d)
# print(kio)
print("读入的价值列表为:", c)
print("读入的重量列表为:",d )
print("D{0-1} KP数据的最优解为:",l)
#-------------损失函数的收敛曲线图---------------
n=len(d)
x=range(n)
plt.scatter(x,d,color='red',linewidth=3)
plt.title("Scatter plot of profit and weight")
plt.xlabel("weight")
plt.ylabel("profit")
plt.show()
|
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.cm
import matplotlib.colors
# named colors
WHITE = "#ffffff"
BLACK = "#000000"
BLUE = "#1f77b4" # matplotlib C0
ORANGE = "#ff7f0e" # matplotlib C1
GREEN = "#2ca02c" # matplotlib C2
RED = "#d62728" # matplotlib C3
PURPLE = "#9467bd" # matplotlib C4
BROWN = "#8c564b" # matplotlib C5
PINK = "#e377c2" # matplotlib C6
GRAY = "#7f7f7f" # matplotlib C7
YELLOW = "#bcbd22" # matplotlib C8
TEAL = "#17becf" # matplotlib C9
CMAP_TO_OPPOSITE = {}
def register_opposite_color(cmap_name, opposite_color):
CMAP_TO_OPPOSITE[cmap_name] = opposite_color
CMAP_TO_OPPOSITE[plt.get_cmap(cmap_name)] = opposite_color
register_opposite_color("viridis", RED)
register_opposite_color("plasma", GREEN)
register_opposite_color("inferno", GREEN)
register_opposite_color("magma", GREEN)
class RichardsonColormap(matplotlib.colors.Colormap):
"""
A matplotlib Colormap subclass which implements the colormap described in J. L. Richardson, Comput. Phys. Commun. 63, 84 (1991).
This colormap is appropriate for visualizing complex-valued functions in two dimensions.
"""
def __init__(self):
self.name = "richardson"
self.N = 256
def __call__(self, x, alpha=1, bytes=False):
real, imag = np.real(x), np.imag(x)
mag = np.sqrt((real ** 2) + (imag ** 2))
z = (mag ** 2) - 1
zplus = z + 2
eta = np.where(np.greater_equal(z, 0), 1, -1)
common = 0.5 + (eta * (0.5 - (mag / zplus)))
real_term = real / (np.sqrt(6) * zplus)
imag_term = imag / (np.sqrt(2) * zplus)
rgba = np.ones(
np.shape(x) + (4,)
) # create rgba array of shape shape as x, except in last dimension, where rgba values will be stored
rgba[:, 0] = common + (2 * real_term) # red
rgba[:, 1] = common - real_term + imag_term # green
rgba[:, 2] = common - real_term - imag_term # blue
return rgba
# register cmap so that plt.get_cmap('richardson') can find it
matplotlib.cm.register_cmap(name="richardson", cmap=RichardsonColormap())
register_opposite_color("richardson", WHITE)
blue_black_red_cdit = {
"red": ((0.0, 0.0, 0.0), (0.5, 0.0, 0.1), (1.0, 1.0, 1.0)),
"blue": ((0.0, 0.0, 1.0), (0.5, 0.1, 0.0), (1.0, 0.0, 0.0)),
"green": ((0.0, 0.0, 0.0), (1.0, 0.0, 0.0)),
}
blue_black_red_cmap = matplotlib.colors.LinearSegmentedColormap(
"BlueBlackRed", blue_black_red_cdit
)
matplotlib.cm.register_cmap(name="BlueBlackRed", cmap=blue_black_red_cmap)
class RichardsonNormalization(matplotlib.colors.Normalize):
"""A matplotlib Normalize subclass which implements an appropriate normalization for :class:`RichardsonColormap`."""
def __init__(self, equator_magnitude=1):
self.equator_magnitude = np.abs(equator_magnitude)
def __call__(self, x, **kwargs):
return np.ma.masked_invalid(x / self.equator_magnitude, copy=False)
def autoscale(self, *args):
pass
def autoscale_None(self, *args):
pass
class AbsoluteRenormalize(matplotlib.colors.Normalize):
def __init__(self):
super().__init__(vmin=0, vmax=1, clip=False)
def __call__(self, x, **kwargs):
return np.ma.masked_invalid(np.abs(x) / np.nanmax(np.abs(x)), copy=False)
def autoscale(self, *args):
pass
def autoscale_None(self, *args):
pass
|
"""
Helper function for parsing config file
"""
import configparser
import logging
from logging.config import fileConfig
# Setup logging
fileConfig('log_config.ini')
logger = logging.getLogger()
conf_file = '/var/crackq/files/crackq.conf'
def hc_conf():
"""
Parse config file and return dictionary of file
locations
for rules, wordlists, logs etc
:return: dictionary containing conf entries
"""
logger.info("Reading from config file {}".format(conf_file))
config = configparser.ConfigParser()
config.optionxform = str
config.read(conf_file)
conf_dict = {s: dict(config.items(s)) for s in config.sections()}
#logger.debug("Conf Dictionary:\n{}".format(conf_dict))
return conf_dict
|
#######################################################
# Takes esigner360 chart and repository index
# finds newest version in the repository
# iterates throu dependencies and if there is newer dependency,
# it is updated
#######################################################
from requests.models import HTTPBasicAuth
import yaml
import os
import requests
# returns extended semver string to compare versions simply
def semVer2Number(semVer):
split = semVer.split(".")
ret = ""
for ver in split:
ret = ret + "." + ("0000000000000000000000" + ver)[-20:]
return ret
##########################
SEMVER_MAJOR=0
SEMVER_MINOR=1
SEMVER_PATCH=2
def incSemVer(part,semVer):
semVerNum = list(map(lambda s: int(s), semVer.split(".")))
semVerNum[part]=semVerNum[part]+1
return ".".join(list(map(lambda n:str(n), semVerNum)))
##########################
def maxChartEntry(chartVersion):
return {
"extendedVer": semVer2Number(chartVersion["version"]),
"realVer": chartVersion["version"],
"chart": chartVersion
}
##########################
def getHighestVersionsFormRepo(repoIndex):
maxChartVer = {}
for chartName, chart in repoIndex["entries"].items():
#print(chartName)
for chartVersion in chart:
# initialize at first chart encounter
if not chartName in maxChartVer:
maxChartVer[chartName] = maxChartEntry(chartVersion)
# update if version greater
if semVer2Number(chartVersion["version"]) > maxChartVer[chartName]["extendedVer"]:
maxChartVer[chartName] = maxChartEntry(chartVersion)
return maxChartVer
##########################
def loadRepoIndex(helmRepoUrl, helmRepoUser, helmRepoPwd):
repoIndexRes = requests.get(helmRepoUrl+"/index.yaml",
auth=HTTPBasicAuth(helmRepoUser, helmRepoPwd), verify=False)
repoIndex = yaml.load(repoIndexRes.text, Loader = yaml.FullLoader)
return repoIndex
##########################
def loadChart():
with open("Chart.yaml") as f:
return yaml.load(f, Loader=yaml.FullLoader)
##### END DEFs ########
helmRepoUrl = os.environ["HELM_REPO_URL"]
helmRepoUser = os.environ["HELM_REPO_USER"]
helmRepoPwd = os.environ["HELM_REPO_PWD"]
# load repo index
repoIndex = loadRepoIndex(helmRepoUrl, helmRepoUser, helmRepoPwd)
maxChartVer = getHighestVersionsFormRepo(repoIndex)
chart = loadChart()
updated = False
updatedModules = []
updatedModulesDetail = []
for idx, dep in enumerate(chart["dependencies"]):
depName=dep["name"]
depVer=dep["version"]
maxDepVer = maxChartVer[depName]["chart"]["version"]
if semVer2Number(depVer) < semVer2Number(maxDepVer):
updatedModules.append(depName)
updatedModulesDetail.append(f"upgrading dependency {depName} {depVer} -> {maxDepVer}")
#print(f"upgrading dependency {depName} {depVer} -> {maxDepVer}")
chart["dependencies"][idx]["version"] = maxDepVer
updated = True
if updated:
oldChartVer = chart["version"]
newChartVer = incSemVer(SEMVER_PATCH, chart["version"])
updatedModulesStr = ",".join(updatedModules)
updatedModulesDetailStr = "\n".join(updatedModulesDetail)
print(f"updating chart version {oldChartVer} -> {newChartVer} (updated modules: {updatedModulesStr})\n{updatedModulesDetailStr}")
chart["version"] = newChartVer
with open("Chart.yaml","w") as f:
yaml.dump(chart,f,indent=2)
|
#encoding=utf-8
import datetime, time
import re
def get_year_start_end():
import calendar
day_now = time.localtime()
day_begin = '%d-01-01' % (day_now.tm_year) # 月初肯定是1号
wday, monthRange = calendar.monthrange(day_now.tm_year, 12)
day_end = '%d-12-%02d' % (day_now.tm_year,monthRange)
return day_begin,day_end
def get_month_start_end():
import calendar
day_now = time.localtime()
day_begin = '%d-%02d-01' % (day_now.tm_year, day_now.tm_mon) # 月初肯定是1号
wday, monthRange = calendar.monthrange(day_now.tm_year, day_now.tm_mon) # 得到本月的天数 第一返回为月第一日为星期几(0-6), 第二返回为此月天数
day_end = '%d-%02d-%02d' % (day_now.tm_year, day_now.tm_mon, monthRange)
return day_begin,day_end
def get_month_start_end_by_month(sdate):
import calendar
day_now = time.localtime()
day_begin = '%d-%02d-01 00:00:00' % (sdate.year, sdate.month) # 月初肯定是1号
wday, monthRange = calendar.monthrange(sdate.year, sdate.month) # 得到本月的天数 第一返回为月第一日为星期几(0-6), 第二返回为此月天数
day_end = '%d-%02d-%02d 23:59:59' % (sdate.year, sdate.month, monthRange)
date_day_begin = datetime.datetime.strptime(day_begin,"%Y-%m-%d %H:%M:%S")
date_day_end = datetime.datetime.strptime(day_end,"%Y-%m-%d %H:%M:%S")
next_day_begin = date_day_end+datetime.timedelta(seconds=120)
return date_day_begin , date_day_end, next_day_begin
def get_week_start_end(d=None):
if not d:
d = datetime.datetime.now()
this_week_start = d - datetime.timedelta(days=d.weekday())
this_week_end = this_week_start + datetime.timedelta(days=6)
return this_week_start.strftime("%Y-%m-%d") + " 00:00:00",this_week_end.strftime("%Y-%m-%d")+ " 23:59:59"
def get_week_start_end_day(d=None):
if not d:
d = datetime.datetime.now()
this_week_start = d - datetime.timedelta(days=d.weekday())
this_week_end = this_week_start + datetime.timedelta(days=6)
return this_week_start.strftime("%m月%d日"),this_week_end.strftime("%m月%d日")
def humanreadable_mseconds(mseconds):
seconds = int(mseconds) / 1000
s = seconds % 60
h = seconds / 60 / 60
if h:
m = seconds / 60 % 60
ret = u"%02d:%02d:%02d" % (h,m,s)
else:
m = seconds / 60
ret = u"%02d:%02d" % (m,s)
return ret
def zero_date():
d = datetime.datetime.today()
return datetime.datetime(d.year, d.month, d.day)
def datetime_to_timestamp(d):
return int(time.mktime(d.timetuple()))
def timestamp_to_datetime(response):
"Converts a unix timestamp to a Python datetime object"
if not response:
return None
try:
response = int(response)
except ValueError:
return None
return datetime.datetime.fromtimestamp(response)
def days_ago(day=30):
return datetime.datetime.now() - datetime.timedelta(day)
def nature_days_ago(day=30):
return zero_date() - datetime.timedelta(day)
def after_days(day=30):
return datetime.datetime.now() + datetime.timedelta(day)
def after_from_days(dd,day=1):
return dd + datetime.timedelta(day)
def nature_after_days(day=30):
return zero_date() + datetime.timedelta(day)
def nature_after_days_end(day=30):
return zero_date() + datetime.timedelta(day) - datetime.timedelta(seconds=60)
def seconds_to_zero():
d = nature_after_days(1)
return int(datetime_to_timestamp(d) - int(time.time()))
def is_weekend(d=datetime.datetime.today()):
return d.weekday() in (0, 6)
def minutes_ago(seconds=300):
return datetime.datetime.now() - datetime.timedelta(seconds=seconds)
def after_minutes(seconds=300):
return datetime.datetime.now() + datetime.timedelta(seconds=seconds)
def int_day(d=None):
if d is None:
d = datetime.datetime.today()
return int("%s%d%d" % (d.year,d.month, d.day))
def int_days(d=None):
if d is None:
d = datetime.datetime.today()
return int("%s%02d%02d" % (d.year,d.month, d.day))
def int_month(d=None):
if d is None:
d = datetime.datetime.today()
return int("%s%d" % (d.year, d.month))
def int_week(d=None):
if d is None:
d = datetime.datetime.today()
monday = d.weekday()
d = d - datetime.timedelta(monday)
return int("%s%d%d" % (d.year, d.month, d.day))
def int_weeks(d=None):
if d is None:
d = datetime.datetime.today()
monday = d.weekday()
d = d - datetime.timedelta(monday)
return int("%s%02d%02d" % (d.year, d.month, d.day))
def int_last_weeks(d=None):
if d is None:
d = datetime.datetime.today() - datetime.timedelta(7)
monday = d.weekday()
d = d - datetime.timedelta(monday)
return int("%s%02d%02d" % (d.year, d.month, d.day))
def is_legal_date(d):
timere = "^(\d{2}|\d{4})-((0([1-9]{1}))|(1[0|1|2]))-(([0-2]([0-9]{1}))|(3[0|1]))$"
return re.match(timere, d) != None
def out_week_date(year,day):
fir_day = datetime.datetime(year,1,1)
zone = datetime.timedelta(days=day-1)
return datetime.datetime.strftime(fir_day + zone, "%Y-%m-%d")
|
import warnings
import numpy as np
import pandas as pd
import pymc3 as pm
import arviz as az
import theano.tensor as tt
from gumbi.utils.misc import assert_in, assert_is_subset
from gumbi.utils.gp_utils import get_ℓ_prior
from gumbi.aggregation import DataSet
from gumbi.arrays import *
from gumbi.arrays import ParameterArray as parray
from gumbi.arrays import UncertainParameterArray as uparray
from gumbi.arrays import MVUncertainParameterArray as mvuparray
from ..base import Regressor
__all__ = ['GP']
class GP(Regressor):
r"""Gaussian Process surface learning and prediction.
See Also
--------
:class:`Regressor`
Notes
-----
The GP class is built from a dataframe in the form of a :class:`DataSet` object. The output(s) are
taken from :attr:`DataSet.outputs` and the corresponding column in the tidy data frame taken from
:attr:`DataSet.names_column`. This column will be generically referred to as the `output_column` in this documentation,
but can take any value specifed when the :class:`DataSet` is constructed. The model inputs are constructed by
filtering this dataframe, extracting column values, and converting these to numerical input coordinates. The main
entry point will be :meth:`fit`, which parses the dimensions of the model with :meth:`specify_model`,
extracts numerical input coordinates with :meth:`get_structured_data`, compiles the Pymc3 model with
:meth:`build_model`, and finally learns the hyperparameters with :meth:`find_MAP`.
Dimensions fall into several categories:
* Filter dimensions, those with only one level, are used to subset the dataframe but are not included as explicit
inputs to the model. These are not specified explicitly, but rather any continuous or categorical dimension with only
one level is treated as a filter dimension.
* Continuous dimensions are treated as explicit coordinates and given a Radial Basis Function kernel
* Linear dimensions (which must be a subset of `continuous_dims`) have an additional linear kernel.
* Coregion dimensions imply a distinct but correlated output for each level
* If more than one output is specified, the `output_column` is treated as a categorical dim.
A non-additive model has the form:
.. math::
y &\sim \text{Normal} \left( \mu, \sigma \right) \\
mu &\sim \mathcal{GP} \left( K \right) \\
K &= \left( K^\text{cont}+K^\text{lin} \right) K^\text{coreg}_\text{outputs} \prod_{n} K^\text{coreg}_{n} \\
K^\text{cont} &= \text{RBF} \left( \ell_{i}, \eta \right) \\
K^\text{lin} &= \text{LIN} \left( c_{j}, \tau \right) \\
K^\text{coreg} &= \text{Coreg} \left( \boldsymbol{W}, \kappa \right) \\
\sigma &\sim \text{Exponential} \left( 1 \right) \\
Where :math:`i` denotes a continuous dimension, :math:`j` denotes a linear dimension, and :math:`n` denotes a
categorical dimension (excluding the `output_column`). :math:`K^\text{cont}` and :math:`K^\text{lin}` each consist of a
joint kernel encompassing all continuous and linear dimensions, respectively, whereas :math:`K^\text{coreg}_{n}` is
a distinct kernel for a given categorical dimension.
The additive model has the form:
.. math::
mu &\sim \mathcal{GP}\left( K^\text{global} \right) + \sum_{n} \mathcal{GP}\left( K_{n} \right) \\
K^\text{global} &= \left( K^\text{cont}+K^\text{lin} \right) K^\text{coreg}_\text{outputs} \\
K_{n} &= \left( K^\text{cont}_{n}+K^\text{lin}_{n} \right) K^\text{coreg}_\text{outputs} K^\text{coreg}_{n} \\
Note that, in the additive model, :math:`K^\text{cont}_{n}` and :math:`K^\text{lin}_{n}` still consist of only
the continuous and linear dimensions, respectively, but have unique priors corresponding to each categorical dimension.
However, there is only one :math:`K^\text{coreg}_\text{outputs}` kernel.
Parameters
----------
dataset : DataSet
Data for fitting.
outputs : str or list of str, default None
Name(s) of output(s) to learn. If ``None``, uses all values from ``outputs`` attribute of *dataset*.
seed : int
Random seed
Examples
--------
A GP object is created from a :class:`DataSet` and can be fit immediately with the default dimension
configuration (regressing `r` with RBF + linear kernels for `X` and `Y`):
>>> import gumbi as gmb
>>> df = pd.read_pickle(gmb.data.example_dataset)
>>> outputs=['a', 'b', 'c', 'd', 'e', 'f']
>>> ds = gmb.DataSet(df, outputs=outputs, log_vars=['Y', 'b', 'c', 'd', 'f'], logit_vars=['X', 'e'])
>>> gp = gmb.GP(ds, outputs='d').fit()
Note that last line is equivalent to
>>> gp = gmb.GP(ds, outputs='d')
>>> gp.specify_model()
>>> gp.build_model()
>>> gp.find_MAP()
The model can be specified with various continuous, linear, and categorical dimensions.
`X` and `Y` are always included in both ``continuous_dims`` and ``linear_dims``.
>>> gp.specify_model(continuous_dims='lg10_Z', linear_dims='lg10_Z', categorical_dims='Pair')
>>> gmb.GP(ds).fit(continuous_dims='lg10_Z', linear_dims='lg10_Z', categorical_dims='Pair') # equivalent
After the model is fit, define a grid of points at which to make predictions. The result is a
:class:`ParameterArray`:
>>> gp.prepare_grid()
>>> gp.grid_points
('X', 'Y'): [(0.075 , 10.) (0.08358586, 10.) (0.09217172, 10.) ...
(0.90782828, 800.) (0.91641414, 800.) (0.925 , 800.)]
Make predictions, returning an :class:`UncertainParameterArray`
>>> gp.predict_grid()
>>> gp.predictions
d['μ', 'σ2']: [[(0.70728056, 0.16073197) (0.70728172, 0.16073197)
(0.70728502, 0.16073197) ... (0.70727954, 0.16073197)
(0.7072809 , 0.16073197) (0.70728058, 0.16073197)]
...
[(0.70749247, 0.1607318 ) (0.70773573, 0.16073116)
(0.70806603, 0.16072949) ... (0.70728449, 0.16073197)
(0.70728194, 0.16073197) (0.7072807 , 0.16073197)]]
The `uparray` makes it easy to calculate standard statistics in natural or transformed/standardized space while
maintaining the original shape of the array:
>>> gp.predictions.z.dist.ppf(0.025)
array([[-3.1887916 , -3.18878491, -3.18876601, ..., -3.18879744,
-3.18878966, -3.18879146],
...,
[-3.1875742 , -3.18617286, -3.18426272, ..., -3.18876906,
-3.18878366, -3.18879081]])
Finally, plot the results:
>>> import matplotlib.pyplot as plt
>>>
>>> plt.style.use(str(gmb.style.futura))
>>> x_pa = gp.predictions_X
>>> y_upa = gp.predictions
>>> gmb.ParrayPlotter(x_pa, y_upa).plot()
Plot a slice down the center of the prediction along each axis
>>> x_pa, y_upa = gp.get_conditional_prediction(Y=88)
>>>
>>> ax = gmb.ParrayPlotter(x_pa, y_upa).plot()
>>> ax.set_xticklabels([int(float(txt.get_text())*100) for txt in ax.get_xticklabels()]);
Plot a slice down the center of the prediction along each axis
>>> x_pa, y_upa = gp.get_conditional_prediction(X=0.5)
>>>
>>> ax = gmb.ParrayPlotter(x_pa, y_upa).plot()
>>> ax.set_xticklabels([int(float(txt.get_text())*100) for txt in ax.get_xticklabels()]);
Attributes
----------
dataset : DataSet
Data for fitting.
outputs : list of str
Name(s) of output(s) to learn.
seed : int
Random seed
continuous_dims : list of str
Columns of dataframe used as continuous dimensions
linear_dims : list of str
Subset of continuous dimensions to apply an additional linear kernel.
continuous_levels : dict
Values considered within each continuous column as ``{dim: [level1, level2]}``
continuous_coords : dict
Numerical coordinates of each continuous level within each continuous dimension as ``{dim: {level: coord}}``
categorical_dims : list of str
Columns of dataframe used as categorical dimensions
categorical_levels : dict
Values considered within each categorical column as ``{dim: [level1, level2]}``
categorical_coords : dict
Numerical coordinates of each categorical level within each categorical dimension as ``{dim: {level: coord}}``
additive : bool
Whether to treat categorical dimensions as additive or joint
filter_dims : dict
Dictionary of column-value pairs used to filter dataset before fitting
X : array
A 2D tall array of input coordinates.
y : array
A 1D vector of observations
model : pymc3.model.Model
Compiled pymc3 model
gp_dict : dict
Dictionary of model GP objects. Contains at least 'total'.
"""
def __init__(self, dataset: DataSet, outputs=None, seed=2021):
super(GP, self).__init__(dataset, outputs, seed)
self.model = None
self.gp_dict = None
self.MAP = None
self.trace = None
self.continuous_kernel = 'ExpQuad'
self.heteroskedastic_inputs = False
self.heteroskedastic_outputs = True
self.sparse = False
self.latent = False
self.n_u = 100
self.model_specs = {
'seed': self.seed,
'continuous_kernel': self.continuous_kernel,
'heteroskedastic_inputs': self.heteroskedastic_inputs,
'heteroskedastic_outputs': self.heteroskedastic_outputs,
'sparse': self.sparse,
'n_u': self.n_u,
}
################################################################################
# Model building and fitting
################################################################################
def fit(self, outputs=None, linear_dims=None, continuous_dims=None, continuous_levels=None, continuous_coords=None,
categorical_dims=None, categorical_levels=None, additive=False, seed=None, heteroskedastic_inputs=False,
heteroskedastic_outputs=True, sparse=False, n_u=100, ARD=True, **MAP_kwargs):
"""Fits a GP surface
Parses inputs, compiles a Pymc3 model, then finds the MAP value for the hyperparameters. `{}_dims` arguments
indicate the columns of the dataframe to be included in the model, with `{}_levels` indicating which values of
those columns are to be included (``None`` implies all values).
If ``additive==True``, the model is constructed as the sum of a global GP and a distinct GP for each categorical
dimension. Each of these GPs, including the global GP, consists of an RBF+linear kernel multiplied by a
coregion kernel for the `output_column` if necessary. Although the same continuous kernel structure is used for each
GP in this model, unique priors are assigned to each distinct kernel. However, there is always only one
coregion kernel for the `output_column`. The kernel for each dimension-specific GP is further multiplied by a
coregion kernel that provides an output for each level in that dimension.
See Also
--------
:meth:`build_model`
Parameters
----------
outputs : str or list of str, default None
Name(s) of output(s) to learn. If ``None``, :attr:`outputs` is used.
linear_dims : str or list of str, optional
Subset of continuous dimensions to apply an additional linear kernel. If ``None``, defaults to ``['Y','X']``.
continuous_dims : str or list of str, optional
Columns of dataframe used as continuous dimensions.
continuous_levels : str, list, or dict, optional
Values considered within each continuous column as ``{dim: [level1, level2]}``.
continuous_coords : list or dict, optional
Numerical coordinates of each continuous level within each continuous dimension as ``{dim: {level: coord}}``.
categorical_dims : str or list of str, optional
Columns of dataframe used as categorical dimensions.
categorical_levels : str, list, or dict, optional
Values considered within each categorical column as ``{dim: [level1, level2]}``.
additive : bool, default False
Whether to treat categorical_dims as additive or joint (default).
seed : int, optional.
Random seed for model instantiation. If ``None``, :attr:`seed` is used.
heteroskedastic_inputs: bool, default False
Whether to allow heteroskedasticity along continuous dimensions (input-dependent noise)
heteroskedastic_outputs: bool, default True
Whether to allow heteroskedasticity between multiple outputs (output-dependent noise). `Not yet implemented`
sparse: bool, default False
Whether to use a `sparse approximation`_ to the GP.
n_u: int, default 100
Number of inducing points to use for the sparse approximation, if required.
ARD: bool, default True
Whether to use "Automatic Relevance Determination" in the continuous kernel. If _True_, each continuous
dimension receives its own lengthscale; otherwise a single lengthscale is used for all continuous
dimensions.
**MAP_kwargs
Additional keyword arguments passed to :func:`pm.find_MAP`.
ARD: bool, default True
Whether to use "Automatic Relevance Determination" in the continuous kernel. If _True_, each continuous
dimension receives its own lengthscale; otherwise a single lengthscale is used for all continuous
dimensions.
Returns
-------
self : :class:`GP`
"""
self.specify_model(outputs=outputs, linear_dims=linear_dims, continuous_dims=continuous_dims,
continuous_levels=continuous_levels, continuous_coords=continuous_coords,
categorical_dims=categorical_dims, categorical_levels=categorical_levels,
additive=additive)
self.build_model(seed=seed,
heteroskedastic_inputs=heteroskedastic_inputs,
heteroskedastic_outputs=heteroskedastic_outputs,
sparse=sparse, n_u=n_u, ARD=ARD)
self.find_MAP(**MAP_kwargs)
return self
def _make_continuous_cov(self, continuous_cov_func, D_in, idx_s, n_s, ℓ_μ, ℓ_σ, ARD=True, stabilize=True, eps=1e-6):
shape = n_s if ARD else 1
def continuous_cov(suffix):
# ℓ = pm.InverseGamma(f'ℓ_{suffix}', mu=ℓ_μ, sigma=ℓ_σ, shape=shape)
ℓ = pm.Gamma(f'ℓ_{suffix}', alpha=2, beta=1, shape=shape)
η = pm.Gamma(f'η_{suffix}', alpha=2, beta=1)
cov = η ** 2 * continuous_cov_func(input_dim=D_in, active_dims=idx_s, ls=ℓ)
if stabilize:
cov += pm.gp.cov.WhiteNoise(eps)
return cov
return continuous_cov
def _make_linear_cov(self, D_in, idx_l, n_l):
def linear_cov(suffix):
c = pm.Normal(f'c_{suffix}', mu=0, sigma=10, shape=n_l)
τ = pm.HalfNormal(f'τ_{suffix}', sigma=10)
return τ * pm.gp.cov.Linear(input_dim=D_in, c=c, active_dims=idx_l)
return linear_cov
def _make_coreg_cov(self, D_in, seed):
def coreg_cov(suffix, D_out, idx):
testval = np.random.default_rng(seed).standard_normal(size=(D_out, 2))
W = pm.Normal(f"W_{suffix}", mu=0, sd=3, shape=(D_out, 2), testval=testval)
κ = pm.Gamma(f"κ_{suffix}", alpha=1.5, beta=1, shape=(D_out,))
return pm.gp.cov.Coregion(input_dim=D_in, active_dims=[idx], kappa=κ, W=W)
return coreg_cov
# TODO: add full probabilistic model description to docstring
# TODO: allow dimension-specific continuous kernel specification
# TODO: allow single multi-dimensional continuous kernel rather than independent kernels per dimension
def build_model(self, seed=None, continuous_kernel='ExpQuad', heteroskedastic_inputs=False,
heteroskedastic_outputs=True, sparse=False, n_u=100, ARD=True):
r"""Compile a marginalized pymc3 model for the GP.
Each dimension in :attr:`continuous_dims` is combined in an ExpQuad kernel with a principled
:math:`\text{InverseGamma}` prior for each lengthscale (as `suggested by Michael Betancourt`_) and a
:math:`\text{Gamma}\left(2, 1\right)` prior for variance.
.. _suggested by Michael Betancourt: https://betanalpha.github.io/assets/case_studies/gp_part3/part3.html#4_adding_an_informative_prior_for_the_length_scale
.. _pymc3 docs: https://docs.pymc.io/en/v3/api/gp/cov.html
.. _sparse approximation: https://docs.pymc.io/en/v3/pymc-examples/examples/gaussian_processes/GP-SparseApprox.html
Parameters
----------
seed : int, optional.
Random seed. If ``None``, :attr:`seed` is used.
continuous_kernel : {'ExpQuad', 'RatQuad', 'Matern32', 'Matern52', 'Exponential', or 'Cosine'}
Covariance function to use for continuous dimensions. See `pymc3 docs`_ for more details.
heteroskedastic_inputs: bool, default False
Whether to allow heteroskedasticity along continuous dimensions (input-dependent noise).
heteroskedastic_outputs: bool, default True
Whether to allow heteroskedasticity between multiple outputs (output-dependent noise). `Not yet implemented`.
sparse: bool, default False
Whether to use a `sparse approximation`_ to the GP.
n_u: int, default 100
Number of inducing points to use for the sparse approximation, if required.
ARD: bool, default True
Whether to use "Automatic Relevance Determination" in the continuous kernel. If _True_, each continuous
dimension receives its own lengthscale; otherwise a single lengthscale is used for all continuous
dimensions.
Returns
-------
self : :class:`GP`
"""
if heteroskedastic_inputs:
raise NotImplementedError('Heteroskedasticity over inputs is not yet implemented.')
X, y = self.get_shaped_data('mean')
D_in = len(self.dims)
assert X.shape[1] == D_in
seed = self.seed if seed is None else seed
self.seed = seed
self.continuous_kernel = continuous_kernel
self.heteroskedastic_inputs = heteroskedastic_inputs
self.heteroskedastic_outputs = heteroskedastic_outputs
self.sparse = sparse
self.n_u = n_u
self.latent = False
self.model_specs = {
'seed': seed,
'continuous_kernel': continuous_kernel,
'heteroskedastic_inputs': heteroskedastic_inputs,
'heteroskedastic_outputs': heteroskedastic_outputs,
'sparse': sparse,
'n_u': n_u,
}
gp_dict = self._construct_kernels(X, continuous_kernel, seed, sparse, latent=False, ARD=ARD)
with self.model:
# From https://docs.pymc.io/notebooks/GP-Marginal.html
# OR a covariance function for the noise can be given
# noise_l = pm.Gamma("noise_l", alpha=2, beta=2)
# cov_func_noise = pm.gp.cov.Exponential(1, noise_l) + pm.gp.cov.WhiteNoise(sigma=0.1)
# y_ = gp.marginal_likelihood("y", X=X, y=y, noise=cov_func_noise)
# GP is heteroskedastic across outputs by default,
# but homoskedastic across continuous dimensions
σ = pm.Exponential('σ', lam=1)
noise = pm.gp.cov.WhiteNoise(sigma=σ)
if heteroskedastic_inputs:
raise NotImplementedError('Heteroskedasticity over inputs is not yet implemented')
# noise += continuous_cov('noise')
if heteroskedastic_outputs and self.out_col in self.categorical_dims:
D_out = len(self.categorical_levels[self.out_col])
coreg_cov = self._make_coreg_cov(D_in, seed)
idx_p = self._get_dim_indexes()['p']
noise *= coreg_cov('Output_noise', D_out, idx_p)
if sparse:
Xu = pm.gp.util.kmeans_inducing_points(n_u, X)
if heteroskedastic_outputs:
warnings.warn('Heteroskedasticity over outputs is not yet implemented for sparse GP. Reverting to scalar-valued noise.')
_ = gp_dict['total'].marginal_likelihood('ml', X=X, Xu=Xu, y=y, noise=σ)
else:
_ = gp_dict['total'].marginal_likelihood('ml', X=X, y=y, noise=noise)
# self.gp_dict = gp_dict
return self
def _choose_implementation(self, sparse=False, latent=False):
if sparse and latent:
raise NotImplementedError('Sparse Latent GPs are not yet implemented.')
if sparse:
pm_gp = pm.gp.MarginalSparse
gp_kws = {'approx': "FITC"}
elif latent:
pm_gp = pm.gp.Latent
gp_kws = {}
else:
pm_gp = pm.gp.Marginal
gp_kws = {}
def implementation(*args, **kwargs):
return pm_gp(*args, **{**kwargs, **gp_kws}) # Fix once Python >= 3.9
return implementation
def _get_dim_counts(self):
dim_counts = {
'l': len(self.linear_dims),
's': len(self.continuous_dims),
'c': len(self.categorical_dims),
'p': len(self.outputs),
}
return dim_counts
def _get_dim_indexes(self):
dim_indexes = {
'l': [self.dims.index(dim) for dim in self.linear_dims],
's': [self.dims.index(dim) for dim in self.continuous_dims],
'c': [self.dims.index(dim) for dim in self.categorical_dims],
'p': self.dims.index(self.out_col) if self.out_col in self.dims else None,
}
return dim_indexes
def _prepare_lengthscales(self, X):
X_s = X[:, self._get_dim_indexes()['s']]
ℓ_μ, ℓ_σ = [stat for stat in np.array([get_ℓ_prior(dim) for dim in X_s.T]).T]
return ℓ_μ, ℓ_σ
def _construct_kernels(self, X, continuous_kernel, seed, sparse, latent, ARD=True, stabilize=True, eps=1e-6):
continuous_kernels = ['ExpQuad', 'RatQuad', 'Matern32', 'Matern52', 'Exponential', 'Cosine']
assert_in('Continuous kernel', continuous_kernel, continuous_kernels)
continuous_cov_func = getattr(pm.gp.cov, continuous_kernel)
D_in = len(self.dims)
ns = self._get_dim_counts()
idxs = self._get_dim_indexes()
ℓ_μ, ℓ_σ = self._prepare_lengthscales(X)
continuous_cov = self._make_continuous_cov(continuous_cov_func, D_in, idxs['s'], ns['s'], ℓ_μ, ℓ_σ,
ARD=ARD, stabilize=stabilize, eps=eps)
linear_cov = self._make_linear_cov(D_in, idxs['l'], ns['l'])
coreg_cov = self._make_coreg_cov(D_in, seed)
pm_gp = self._choose_implementation(sparse=sparse, latent=latent)
with pm.Model() as self.model:
# μ = pm.Normal('μ', mu=0, sigma=10)
# β = pm.Normal('β', mu=0, sigma=10, shape=n_l)
# lin_mean = pm.gp.mean.Linear(coeffs=[β[i] if i in idx_l else 0 for i in range(D_in), intercept=μ)
# Define a "global" continuous kernel regardless of additive structure
cov = continuous_cov('total')
if ns['l'] > 0:
cov += linear_cov('total')
# Construct a coregion kernel for each categorical_dims
if ns['c'] > 0 and not self.additive:
for dim, idx in zip(self.categorical_dims, idxs['c']):
if dim == self.out_col:
continue
D_out = len(self.categorical_levels[dim])
cov *= coreg_cov(dim, D_out, idx)
# Coregion kernel for parameters, if necessary
if self.out_col in self.categorical_dims:
D_out = len(self.categorical_levels[self.out_col])
cov_param = coreg_cov(self.out_col, D_out, idxs['p'])
cov *= cov_param
gp_dict = {'total': pm_gp(cov_func=cov)}
# Construct a continuous+coregion kernel for each categorical_dim, then combine them additively
if self.additive:
gp_dict['global'] = gp_dict['total']
for dim, idx in zip(self.categorical_dims, idxs['c']):
if dim == self.out_col:
continue
# Continuous kernel specific to this dimension
cov = continuous_cov(dim)
# TODO: Decide if each additive dimension needs its own linear kernel
if ns['l'] > 0:
cov += linear_cov(dim)
# Coregion kernel specific to this dimension
D_out = len(self.categorical_levels[dim])
cov *= coreg_cov(dim, D_out, idx)
# Coregion kernel for parameters, if necessary
if self.out_col in self.categorical_dims:
cov *= cov_param
# Combine GPs
gp_dict[dim] = pm_gp(cov_func=cov)
gp_dict['total'] += gp_dict[dim]
self.gp_dict = gp_dict
return gp_dict
def build_latent(self, seed=None, continuous_kernel='ExpQuad', prior_name='latent_prior', ARD=True, eps=1e-6):
if self.additive:
raise NotImplementedError('Additive/latent GPs are not yet implemented')
X, y = self.get_shaped_data('mean')
D_in = len(self.dims)
assert X.shape[1] == D_in
seed = self.seed if seed is None else seed
self.seed = seed
self.continuous_kernel = continuous_kernel
self.sparse = False
self.latent = True
gp_dict = self._construct_kernels(X, continuous_kernel, seed, sparse=False, latent=True, ARD=ARD,
stabilize=True, eps=eps)
with self.model:
self.prior = gp_dict['total'].prior(prior_name, X=X)
return self
def find_MAP(self, *args, **kwargs):
"""Finds maximum a posteriori value for hyperparameters in model.
Parameters
----------
*args
Positional arguments passed to :func:`pm.find_MAP`
**kwargs
Keyword arguments passed to :func:`pm.find_MAP`
"""
assert self.model is not None
with self.model:
self.MAP = pm.find_MAP(*args, **kwargs)
return self.MAP
def sample(self, *args, **kwargs):
"""Draws samples from the posterior for the hyperparameters in model.
Parameters
----------
*args
Positional arguments passed to :func:`pm.sample`
**kwargs
Keyword arguments passed to :func:`pm.sample`
"""
defaults = {
'return_inferencedata': True,
'random_seed': self.seed,
}
assert self.model is not None
with self.model:
self.trace = pm.sample(*args, **{**defaults, **kwargs})
return self.trace
def predict(self, points_array, with_noise=True, additive_level='total', **kwargs):
"""Make predictions at supplied points using specified gp
Parameters
----------
output : str
points : ParameterArray
Tall ParameterArray vector of coordinates for prediction, must have one layer per ``self.dims``
with_noise : bool, default True
Whether to incorporate aleatoric uncertainty into prediction error
Returns
-------
prediction : UncertainParameterArray
Predictions as a `uparray`
"""
# TODO: need to supply "given" dict for additive GP sublevel predictions
if additive_level != 'total':
raise NotImplementedError('Prediction for additive sublevels is not yet supported.')
# Prediction means and variance as a numpy vector
predictions = self.gp_dict[additive_level].predict(points_array, point=self.MAP, diag=True,
pred_noise=with_noise, **kwargs)
return predictions
def _recursively_append(self, var_name, suffix='_', increment_var=True):
if var_name in [v.name for v in self.model.vars]:
if increment_var:
var_name += suffix
return self._recursively_append(var_name)
else:
raise ValueError(f'The variable name "{var_name}" already exists in model.')
else:
return var_name
def draw_point_samples(self, points, *args, source=None, output=None, var_name='posterior_samples', additive_level='total', increment_var=True, **kwargs):
"""Draw posterior samples at supplied points
Parameters
----------
points : ParameterArray
1-D ParameterArray vector of coordinates for prediction, must have one layer per ``self.dims``
output : str or list of str, optional
Variable for which to make predictions
source : {None, dict, az.data.inference_data.InferenceData}
GP parameters for which to draw samples. Should be the result of :meth:`find_MAP`, :meth:`sample`, or _None_.
var_name : str, default "posterior_samples"
Name to assign new variable to contain conditional predictions.
additive_level : str, default "total"
Level of additive GP at which to make predictions.
increment_var : bool, default True
Whether to append '_' to the end of _var_name_ if it already exists in model.
Returns
-------
samples : parray
Samples as a 'Parray'
"""
output = self._parse_prediction_output(output)
if len(output) > 1:
raise NotImplementedError('Drawing correlated samples of multiple outputs is not yet implemented.')
points_array, tall_points, param_coords = self._prepare_points_for_prediction(points, output=output)
if source is None:
if self.trace is None and self.MAP is None:
raise ValueError('"Source" of predictions must be supplied if GP object has no trace or MAP stored.')
elif self.trace is not None and self.MAP is not None:
raise ValueError('"Source" of predictions must be supplied if GP object has both trace and MAP stored.')
elif self.MAP is not None:
source = [self.MAP]
elif self.trace is not None:
source = self.trace
var_name = self._recursively_append(var_name, increment_var=increment_var)
with self.model:
_ = self.gp_dict[additive_level].conditional(var_name, points_array)
with self.model:
samples = pm.sample_posterior_predictive(*args, source, var_names=[var_name], **kwargs)
self.predictions = self.parray(**{var_name: samples[var_name]}, stdzd=True)
self.predictions_X = points
return self.predictions
def draw_grid_samples(self, *args, source=None, output=None, categorical_levels=None, var_name='posterior_samples',
additive_level='total', increment_var=True, **kwargs):
"""Draw posterior samples at points defined by :meth:`prepare_grid`.
Parameters
----------
source : {None, dict, az.data.inference_data.InferenceData}
GP parameters for which to draw samples. Should be the result of :meth:`find_MAP`, :meth:`sample`, or _None_.
output : str or list of str, optional
Variable(s) for which to make predictions
categorical_levels : dict, optional
Level for each :attr:`categorical_dims` at which to make prediction
var_name : str, default "posterior_samples"
Name to assign new variable to contain conditional predictions.
additive_level : str, default "total"
Level of additive GP at which to make predictions.
increment_var : bool, default True
Whether to append '_' to the end of _var_name_ if it already exists in model.
Returns
-------
samples : ParameterArray
Samples as a 'Parray' reshaped into a grid with _len(:attr:`continuous_dims`)_ dimensions
"""
if self.grid_points is None:
raise ValueError('Grid must first be specified with `prepare_grid`')
points = self.grid_points
if self.categorical_dims:
points = self.append_categorical_points(points, categorical_levels=categorical_levels)
samples = self.draw_point_samples(*args, points=points, output=output, source=source, var_name=var_name,
additive_level=additive_level, increment_var=increment_var, **kwargs)
self.predictions = samples.reshape(-1, *self.grid_parray.shape)
self.predictions_X = self.predictions_X.reshape(self.grid_parray.shape)
return self.predictions
|
from pyDOE2 import *
import pandas as pd
import os
from scipy import stats
def ci(exp, metric):
if '1' in exp:
design = ff2n(2)
batch_column = [row[0] for row in design]
epochs_column = [row[1] for row in design]
else:
design = ff2n(4)
cores_column = [row[0] for row in design]
memory_column = [row[1] for row in design]
batch_column = [row[2] for row in design]
epochs_column = [row[3] for row in design]
exp_result = '0.f_anova_' + metric
datafile_result = os.getcwd() + '/compiling_results/results/'+ exp_result +'.txt'
with open(datafile_result, "r") as f:
lines = f.readlines()
last = lines[len(lines)-1]
value = last.split()[2]
if ',' in value:
value = value.replace(',','')
ss_e = float(value)
datafile = os.getcwd() + '/compiling_results/results/'+ exp +'.csv'
df = pd.read_csv(datafile)
res = df.groupby(["cores","memory","batch_size","epochs"]).mean()
if metric == 'response':
measurements = res[metric+'_time'].tolist()
else: measurements = res[metric].tolist()
if not '1' in exp:
q_cores = 1/len(measurements)*sum([i*j for i,j in zip(measurements, cores_column)])
q_memory = 1/len(measurements)*sum([i*j for i,j in zip(measurements, memory_column)])
q_batch = 1/len(measurements)*sum([i*j for i,j in zip(measurements, batch_column)])
q_epochs = 1/len(measurements)*sum([i*j for i,j in zip(measurements, epochs_column)])
alpha = 0.05
dof = 2**np.sqrt(len(measurements)) * 2
s_e = np.sqrt(ss_e/dof)
s_qi = s_e/(2**np.sqrt(len(measurements))*3)
t = stats.t.ppf(1-alpha/2, dof)
if not '1' in exp:
ci_cores = [q_cores - t*s_qi, q_cores + t*s_qi]
ci_memory = [q_memory - t*s_qi, q_memory + t*s_qi]
ci_batch = [q_batch - t*s_qi, q_batch + t*s_qi]
ci_epochs = [q_epochs - t*s_qi, q_epochs + t*s_qi]
ls = ["cores " + str(ci_cores)+ "\n", \
"memory " + str(ci_memory)+ "\n","batch " + str(ci_batch)+ "\n","epochs " + str(ci_epochs)]
else:
ci_batch = [q_batch - t*s_qi, q_batch + t*s_qi]
ci_epochs = [q_epochs - t*s_qi, q_epochs + t*s_qi]
ls = ["batch " + str(ci_batch)+ "\n","epochs " + str(ci_epochs)]
with open("compiling_results/results/" + exp +"_"+ metric +"_ci.txt", "w+") as fl:
fl.writelines(ls)
print("compiling_results/results/" + exp +"_"+ metric +"_ci.txt successfully created")
if __name__ == "__main__":
for metric in ['accuracy', 'response']:
for exp in ['0.f','1.c','1.f','2.c','2.f','3.c','3.f']:
ci(exp,metric)
|
# -*- coding: UTF-8 -*-
"""
literature
"""
__version__ = '4.0'
content = {
'book_headline': [
'<#book_pseudoscience_author#>','<#book_pseudoscience_title#>','<#book_pseudoscientific#>',
'<#book_title_front#>',
],
'book_section': ['Books', 'Books', 'Books', 'Books', 'Books', 'Books',
'Books', 'Books', 'Books', 'Books', 'Books', 'Books',
'New books', 'Literature', 'Authors', 'E-books', 'E-books', 'E-books', 'Published',
'New published','My library','Publishing','From the library',
],
'book_ankeiler':['<#^,book_phylosophy_title#>',
],
'book_pseudosci_adj': [
'eternal ',
'golden ',
'inspiring ',
'inspired ',
'negative ',
'positive ',
'divine ',
'universal ',
'liberal ',
'spirited ',
'early ',
'late ',
'',
'',
'',
'',
'',
'',
'',
],
'book_pseudoscience_author': [
'<#sci_titles_px#> <#names_last_absurdlyBritish#>, <#sci_titles_sx#>',
'<#names_last_absurdlyBritish#>, <#university_dept#>',
'<#name#>',
'<#name#>, <#sci_titles_px#> at <#university_dept#>',
],
'book_pseudoscience_title': [
'"<#book_pseudoscientific#>" by <#book_pseudoscience_author#>',
'"<#book_pseudoscientific#>, <#num_roman#>" by <#book_pseudoscience_author#>',
],
'book_pseudoscientific': [
'<#^,lit_figures#> and <#^,lit_figures#>',
'<#^,lit_figures#> as seen by <#book_pseudosci_adj#><#sci_pseudo#>',
'<#^,sci_pseudo#>, <#^,lit_figures#> and <#^,lit_figures#>',
'<#^,lit_figures#> and <#sci_disciplines#>',
'<#^,lit_figures#> in the history of <#sci_disciplines#>',
'<#^,book_pseudosci_adj#><#sci_pseudo#> and <#sci_disciplines#>',
'<#^,book_pseudosci_adj#><#sci_pseudo#> as <#sci_disciplines#>',
'The role of <#sci_disciplines#> in <#sci_pseudo#>',
'The influence of <#book_pseudosci_adj#><#sci_pseudo#> on <#sci_disciplines#>',
'<#^,book_pseudosci_adj#><#sci_pseudo#> and <#^,lit_figures#>',
'The role of <#sci_pseudo#> in <#sci_pseudo#>',
'<#^,sci_pseudo#> and <#sci_pseudo#> <#book_pseudoscientific_sx#>',
'<#book_pseudoscientific#>',
],
'book_pseudoscientific_sx': [
'(<-randint(1700, 1800)->-<-randint(1801, 1900)->)',
],
'book_title': [
'"<#state_of_mind#> and <#state_of_mind#>" by Jane Austen',
],
'lit_figures': [
'goya',
'rembrandt',
'elgreco',
'turner',
'constable',
'vangogh',
'renoir',
'seurat',
'rubens',
'klimt',
'monet',
'lautrec',
'matisse',
'mondrian',
'ruscha',
'reinhardt',
'malevich',
'dali',
'magritte',
'ensor',
'bach',
'handel',
'brahms',
'beethoven',
'bartok',
'chopin',
'delibes',
'debussy',
'mahler',
'copland',
'marais',
'forqueray',
'lully',
'couperin',
'grieg',
'wagner',
'tarantino',
'truffaut',
'vansant',
'lumiere',
'spielberg',
'cronenberg',
'lucas',
'bunuel',
'bergman',
'bronte',
'austen',
'kafka',
'chekov',
'beckett',
'camus',
'turgenev',
'james',
'hemingway',
'fitzgerald',
'wright',
'ellison',
'fourier',
'euler',
'mandelbrot',
'copernicus',
'galileo',
'einstein',
'tesla',
'whitney',
'edison',
'planck',
'leonardo',
'pythagoras',
'bohr',
'newton',
'archimedes',
'coulomb',
'fahrenheit',
'faraday',
'fermi',
'feynman',
'hawking',
'geiger',
'curie',
'kelvin',
'ohm',
'penzias',
'roentgen',
'volta',
],
'lit_mythology': [
'zeus',
'apollo',
'hermes',
'athena',
'achilles',
'antigone',
'aphrodite',
'anubis',
'arachne',
'asgard',
'atlantis',
'atlas',
'aurora',
'cadmus',
'calliope',
'calypso',
'centaur',
'charon',
'charybdis',
'cronus',
'cupid',
'cyclops',
'daedalus',
'daphne',
'demeter',
'diana',
'dido',
'electra',
'erato',
'europa',
'euridyce',
'euterpe',
'flora',
'ganymede',
'gorgon',
'hades',
'hector',
'hera',
'heracles',
'hermes',
'hyperion',
'icarus',
'janus',
'jocasta',
'leda',
'lucian',
'medea',
'minerva',
'narcissus',
'nestor',
'odin',
'odysseus',
'oedipus',
'orcus',
'osiris',
'paris',
'perseus',
'phaedra',
'poseidon',
'proteus',
'psyche',
'pyramus',
'satyr',
'scylla',
'sisyphus',
'sphinx',
'styx',
'tantalus',
'tereus',
'thalia',
'thor',
'thoth',
'titan',
'triton',
'ulysses',
'urania',
'vulcan',
],
'state_of_mind': [
'Pride',
'Prejudice',
'Stubborness',
'Anger',
'Fury',
'Depression',
'Mild Amusement',
'Consolation',
],
}
|
import imagehash
from PIL import Image
from skimage.measure import compare_ssim
from imagehash import average_hash, phash, dhash, whash
from mir_eval.separation import bss_eval_sources_framewise
from neural_loop_combiner.config import settings
def ssim_similarity(array_1, array_2):
if len(array_1) > len(array_2):
return compare_ssim(array_1[:len(array_2)], array_2)
else:
return compare_ssim(array_1, array_2[:len(array_1)])
def spec_similarity(spec1, spec2, hash_type=settings.HASH_TYPE):
img1, img2 = Image.fromarray(spec1), Image.fromarray(spec2)
if hash_type == 'ahash':
hash1, hash2 = average_hash(img1), average_hash(img2)
elif hash_type == 'phash':
hash1, hash2 = phash(img1), phash(img2)
elif hash_type == 'dhash':
hash1, hash2 = dhash(img1), dhash(img2)
elif hash_type == 'whash':
hash1, hash2 = whash(img1), whash(img2)
return hash1 - hash2
def snr_cal(ref_audio, estm_audio):
if len(ref_audio) > len(estm_audio):
ref_audio = ref_audio[:len(estm_audio)]
elif len(ref_audio) < len(estm_audio):
estm_audio = estm_audio[:len(ref_audio)]
return bss_eval_sources_framewise(ref_audio, estm_audio)[0][0][0] |
'''
This is to convert letters to numbers when calculations occur
'''
dictionar_of_letters_to_numbers = {
'a': 1, 'b': 2, 'c': 3, 'd':4, 'e': 5, 'f': 6, 'g':7, 'h': 8
}
'''
This is to save the corrdinates of where the user pressed, in order
to promote the pawn to the correct piece
'''
promotion_piece = []
|
import pyaudio
import time
import wave
def start_recording():
chunk = 1024
sample_format = pyaudio.paInt16
channels = 2
fs = 44100
seconds = 3
filename = "./require/roll_call.wav"
p = pyaudio.PyAudio()
print("Recoding : \n")
input("Press ENTER to start 3 second recording\n\n(Please speak closely and loudly!!)\n")
stream = p.open(format=sample_format,
channels=channels,
rate=fs,
frames_per_buffer=chunk,
input=True)
frames = []
for i in range(0, int(fs / chunk * seconds)):
data = stream.read(chunk)
frames.append(data)
stream.stop_stream()
stream.close()
p.terminate()
wf = wave.open(filename, 'wb')
wf.setnchannels(channels)
wf.setsampwidth(p.get_sample_size(sample_format))
wf.setframerate(fs)
wf.writeframes(b''.join(frames))
wf.close()
|
import json
import logging
from six.moves.urllib.parse import urlencode
import django
from django.core.exceptions import MiddlewareNotUsed
from django.http import HttpResponse
from django.shortcuts import redirect
from django.urls import reverse
try:
from django.utils.deprecation import MiddlewareMixin
except: # noqa E722
class MiddlewareMixin(object):
pass
from pwdtk.auth_backends import PwdtkLockedException
from pwdtk.helpers import PwdtkSettings
from pwdtk.models import PwdData
from pwdtk.views import lockout_response
logger = logging.getLogger(__name__)
class PwdtkMiddleware(MiddlewareMixin):
def __init__(self, get_response=None):
if not PwdtkSettings.PWDTK_ENABLED:
logger.debug("PWDTK middleware is disabled")
raise MiddlewareNotUsed("pwdtk is disabled")
if get_response:
super(PwdtkMiddleware, self).__init__(
get_response=get_response)
else:
super(PwdtkMiddleware, self).__init__()
def must_renew_password(self, request):
if request.method == 'GET':
return False
if django.VERSION < (1, 10):
is_authenticated = request.user and request.user.is_authenticated()
else:
is_authenticated = request.user and request.user.is_authenticated
if not is_authenticated:
return False
if not PwdData.get_or_create_for_user(request.user).must_renew:
return False
if request.path == reverse(PwdtkSettings.PWDTK_PASSWD_CHANGE_VIEW):
return False
if request.path in PwdtkSettings.PWDTK_PASSWD_CHANGE_ALLOWED_PATHS:
return False
return True
def process_exception(self, request, exception):
if isinstance(exception, PwdtkLockedException):
context = exception.pwdtk_data.get_lockout_context()
if request.is_ajax():
return HttpResponse(
json.dumps(context),
content_type='application/json',
status=403,
)
if PwdtkSettings.PWDTK_LOCKOUT_VIEW:
return redirect("%s?%s" % (
reverse(PwdtkSettings.PWDTK_LOCKOUT_VIEW),
urlencode(context)
)
)
return lockout_response(request, exception.pwdtk_data)
return None
def process_request(self, request):
if self.must_renew_password(request):
return redirect(reverse(PwdtkSettings.PWDTK_PASSWD_CHANGE_VIEW))
def process_response(self, request, response):
if self.must_renew_password(request):
return redirect(reverse(PwdtkSettings.PWDTK_PASSWD_CHANGE_VIEW))
return response
|
from SpotifyView import SpotifyMainView
import time
import functools
import os
class SpotifyCtrl:
def __init__(self, model, view):
self.get_data = model
self.view = view
self.connect_signals()
def _saveSong(self):
name = self.view.inputFile()
if name:
folder = self.view.chooseFile()
if folder:
self.view.setDisplayText('Saving Songs - Please Wait')
self.get_data.save_playlists_data(folder, name)
self.view.setDisplayText('Songs saved')
else:
self.view.setDisplayText('')
def _setDisplayText(self, text):
self.view.setDisplayText(text)
def delete_songs(self):
if self.view.reconfirm_delete():
self.view.setDisplayText('Deleting Playlists - Please Wait')
self.get_data.delete_all_playlists()
self.view.setDisplayText('Playlists deleted')
def restore_songs(self):
folder = self.view.chooseFile()
if folder:
if self.view.reconfirm():
self.view.setDisplayText('Restoring Songs - Please Wait')
self.get_data.restore_playlists(folder)
# self.get_data.delete_all_playlists()
self.view.setDisplayText('Songs restored')
def connect_signals(self):
self.view.save.clicked.connect(functools.partial(self._saveSong))
self.view.restore.clicked.connect(functools.partial(self.restore_songs))
self.view.delete.clicked.connect(functools.partial(self.delete_songs))
|
from __future__ import print_function, division
import sys
from ...py3k_compat import urlopen, BytesIO, url_content_length
def bytes_to_string(nbytes):
if nbytes < 1024:
return '%ib' % nbytes
nbytes /= 1024.
if nbytes < 1024:
return '%.1fkb' % nbytes
nbytes /= 1024.
if nbytes < 1024:
return '%.2fMb' % nbytes
nbytes /= 1024.
return '%.1fGb' % nbytes
def download_with_progress_bar(data_url, return_buffer=False):
"""Download a file, showing progress
Parameters
----------
data_url : string
web address
return_buffer : boolean (optional)
if true, return a BytesIO buffer rather than a string
Returns
-------
s : string
content of the file
"""
num_units = 40
fhandle = urlopen(data_url)
content_length = url_content_length(fhandle)
chunk_size = content_length // num_units
print("Downloading %s" % data_url)
nchunks = 0
buf = BytesIO()
content_length_str = bytes_to_string(content_length)
while True:
next_chunk = fhandle.read(chunk_size)
nchunks += 1
if next_chunk:
buf.write(next_chunk)
s = ('[' + nchunks * '='
+ (num_units - 1 - nchunks) * ' '
+ '] %s / %s \r' % (bytes_to_string(buf.tell()),
content_length_str))
else:
sys.stdout.write('\n')
break
sys.stdout.write(s)
sys.stdout.flush()
buf.seek(0)
if return_buffer:
return buf
else:
return buf.getvalue()
|
"""NLRN model for denoise dataset
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
import models
def update_argparser(parser):
models.update_argparser(parser)
args, _ = parser.parse_known_args()
parser.add_argument(
'--num-steps',
help='Number of steps in recurrent networks',
default=12,
type=int)
parser.add_argument(
'--num-filters',
help='Number of filters in networks',
default=128,
type=int)
parser.add_argument(
'--non-local-field-size',
help='Size of receptive field in non-local blocks',
default=35,
type=int)
parser.add_argument(
'--init-ckpt',
help='Checkpoint path to initialize',
default=None,
type=str,
)
parser.set_defaults(
train_steps=500000,
learning_rate=((100000, 200000, 300000, 400000, 450000),
(1e-3, 5e-4, 2.5e-4, 1.25e-4, 6.25e-5, 3.125e-5)),
save_checkpoints_steps=20000,
save_summary_steps=1000,
)
def model_fn(features, labels, mode, params, config):
predictions = None
loss = None
train_op = None
eval_metric_ops = None
export_outputs = None
scaffold = None
sources = features['source']
net = _nlrn(sources, mode, params)
predictions = tf.clip_by_value(net, 0.0, 1.0)
if mode == tf.estimator.ModeKeys.PREDICT:
export_outputs = {
tf.saved_model.signature_constants.PREDICT_METHOD_NAME:
tf.estimator.export.PredictOutput(predictions)
}
else:
targets = labels['target']
def central_crop(x, size=5):
x_shape = tf.shape(x)
x = tf.slice(
x, [0, x_shape[1] // 2 - size // 2, x_shape[2] // 2 - size // 2, 0],
[-1, size, size, -1])
return x
loss = tf.losses.mean_squared_error(
labels=central_crop(targets), predictions=central_crop(net))
if mode == tf.estimator.ModeKeys.EVAL:
def _ignore_boundary(images):
boundary_size = 16
images = images[:, boundary_size:-boundary_size, boundary_size:
-boundary_size, :]
return images
def _float32_to_uint8(images):
images = images * 255.0
images = tf.round(images)
images = tf.saturate_cast(images, tf.uint8)
return images
psnr = tf.image.psnr(
_float32_to_uint8(_ignore_boundary(targets)),
_float32_to_uint8(_ignore_boundary(predictions)),
max_val=255,
)
eval_metric_ops = {
'PSNR': tf.metrics.mean(psnr),
}
if mode == tf.estimator.ModeKeys.TRAIN:
global_step = tf.train.get_or_create_global_step()
learning_rate = tf.train.piecewise_constant(global_step, params.learning_rate[0], params.learning_rate[1])
opt = tf.train.AdamOptimizer(learning_rate)
update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
with tf.control_dependencies(update_ops):
gvs = opt.compute_gradients(loss)
capped_gvs = [(tf.clip_by_norm(grad, 2.5), var) for grad, var in gvs]
train_op = opt.apply_gradients(capped_gvs, global_step=global_step)
stats = tf.profiler.profile()
print("Total parameters:", stats.total_parameters)
if params.init_ckpt:
init_fn = tf.contrib.framework.assign_from_checkpoint_fn(
params.init_ckpt, tf.global_variables(), ignore_missing_vars=True)
scaffold = tf.train.Scaffold(init_fn=init_fn)
return tf.estimator.EstimatorSpec(
mode=mode,
predictions=predictions,
loss=loss,
train_op=train_op,
eval_metric_ops=eval_metric_ops,
export_outputs=export_outputs,
)
def _nlrn(x, mode, params):
training = mode == tf.estimator.ModeKeys.TRAIN
skip = x
x = tf.layers.batch_normalization(x, training=training)
x = tf.layers.conv2d(
x, params.num_filters, 3, padding='same', activation=None, name='conv1')
y = x
with tf.variable_scope("rnn"):
for i in range(params.num_steps):
if i == 0:
x = _residual_block(
x, y, params.num_filters, training, name='RB1', reuse=False)
else:
x = _residual_block(
x, y, params.num_filters, training, name='RB1', reuse=True)
x = tf.layers.batch_normalization(x, training=training)
x = tf.nn.relu(x)
x = tf.layers.conv2d(
x,
params.num_channels,
3,
padding='same',
activation=None,
name='conv_end')
return x + skip
def _residual_block(x, y, filter_num, training, name, reuse):
x = tf.layers.batch_normalization(x, training=training)
x = tf.nn.relu(x)
x = _non_local_block(x, 64, 128, training, 35, name='non_local', reuse=reuse)
x = tf.layers.batch_normalization(x, training=training)
x = tf.layers.conv2d(
x,
filter_num,
3,
padding='same',
activation=None,
name=name + '_a',
reuse=reuse)
x = tf.layers.batch_normalization(x, training=training)
x = tf.nn.relu(x)
x = tf.layers.conv2d(
x,
filter_num,
3,
padding='same',
activation=None,
name=name + '_b',
reuse=reuse)
x = tf.add(x, y)
return x
def _non_local_block(x,
filter_num,
output_filter_num,
training,
field_size,
name,
reuse=False):
x_theta = tf.layers.conv2d(
x,
filter_num,
1,
padding='same',
activation=None,
name=name + '_theta',
reuse=reuse)
x_phi = tf.layers.conv2d(
x,
filter_num,
1,
padding='same',
activation=None,
name=name + '_phi',
reuse=reuse)
x_g = tf.layers.conv2d(
x,
output_filter_num,
1,
padding='same',
activation=None,
name=name + '_g',
reuse=reuse,
kernel_initializer=tf.zeros_initializer())
if True:
x_theta_reshaped = tf.reshape(x_theta, [
tf.shape(x_theta)[0],
tf.shape(x_theta)[1] * tf.shape(x_theta)[2],
tf.shape(x_theta)[3]
])
x_phi_reshaped = tf.reshape(x_phi, [
tf.shape(x_phi)[0],
tf.shape(x_phi)[1] * tf.shape(x_phi)[2],
tf.shape(x_phi)[3]
])
x_phi_permuted = tf.transpose(x_phi_reshaped, perm=[0, 2, 1])
x_mul1 = tf.matmul(x_theta_reshaped, x_phi_permuted)
x_mul1_softmax = tf.nn.softmax(
x_mul1, axis=-1) # normalization for embedded Gaussian
x_g_reshaped = tf.reshape(x_g, [
tf.shape(x_g)[0],
tf.shape(x_g)[1] * tf.shape(x_g)[2],
tf.shape(x_g)[3]
])
x_mul2 = tf.matmul(x_mul1_softmax, x_g_reshaped)
x_mul2_reshaped = tf.reshape(x_mul2, [
tf.shape(x_mul2)[0],
tf.shape(x_phi)[1],
tf.shape(x_phi)[2], output_filter_num
])
else:
x_theta = tf.expand_dims(x_theta, -2)
x_phi_patches = tf.image.extract_image_patches(
x_phi, [1, field_size, field_size, 1], [1, 1, 1, 1], [1, 1, 1, 1],
padding='SAME')
x_phi_patches = tf.reshape(x_phi_patches, [
tf.shape(x_phi)[0],
tf.shape(x_phi)[1],
tf.shape(x_phi)[2],
field_size * field_size,
tf.shape(x_phi)[3],
])
x_mul1 = tf.matmul(x_theta, x_phi_patches, transpose_b=True)
x_mul1_softmax = tf.nn.softmax(x_mul1, axis=-1)
x_g_patches = tf.image.extract_image_patches(
x_g, [1, field_size, field_size, 1], [1, 1, 1, 1], [1, 1, 1, 1],
padding='SAME')
x_g_patches = tf.reshape(x_g_patches, [
tf.shape(x_g)[0],
tf.shape(x_g)[1],
tf.shape(x_g)[2],
field_size * field_size,
tf.shape(x_g)[3],
])
x_mul2 = tf.matmul(x_mul1_softmax, x_g_patches)
x_mul2_reshaped = tf.reshape(
x_mul2,
[tf.shape(x)[0],
tf.shape(x)[1],
tf.shape(x)[2], output_filter_num])
return tf.add(x, x_mul2_reshaped)
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Stuff that should eventually go into a model.cfg file.
"""
import os
# to-do: get these inputs from command line or config file
model_path = '/data02/MyArchive/aisteer_3Dencoders/models/gashydrates_enhancer'
if not os.path.exists(model_path):
os.makedirs(model_path)
############ MODEL PARAMETERS ############
def get_training_params(TRAINING_INPUT_SIZE, N_EPOCHS = None, N_STEPS_PER_EPOCH = None, BATCH_SIZE = None):
training_params = {"sampling_method" : "random", \
"training_input_size" : (64,64,64),\
"batch_size" : 24, \
"n_epochs" : 10,\
"random_rotate" : True, \
"add_noise" : 0.1, \
"max_stride" : 2, \
"mask_ratio" : 0.95, \
"steps_per_epoch" : 100}
if TRAINING_INPUT_SIZE == (64,64,64):
# default
pass
elif TRAINING_INPUT_SIZE == (32,32,32):
training_params["training_input_size"] = TRAINING_INPUT_SIZE
training_params["batch_size"] = 4
training_params["max_stride"] = 2
elif TRAINING_INPUT_SIZE == (256,256,256):
training_params["training_input_size"] = TRAINING_INPUT_SIZE
training_params["batch_size"] = 4
training_params["max_stride"] = 2
elif TRAINING_INPUT_SIZE == (128,128,128):
training_params["training_input_size"] = TRAINING_INPUT_SIZE
training_params["batch_size"] = 8
training_params["max_stride"] = 2
training_params["n_epochs"] = 5
training_params["steps_per_epoch"] = 100
elif TRAINING_INPUT_SIZE == (32,128,128):
training_params["training_input_size"] = TRAINING_INPUT_SIZE
training_params["batch_size"] = 16
training_params["max_stride"] = 2
training_params["n_epochs"] = 5
training_params["steps_per_epoch"] = 100
training_params["random_rotate"] = False
else:
raise ValueError("input size not catalogued yet")
if N_EPOCHS is not None:
training_params["n_epochs"] = N_EPOCHS
if N_STEPS_PER_EPOCH is not None:
training_params["steps_per_epoch"] = N_STEPS_PER_EPOCH
if BATCH_SIZE is not None:
training_params["batch_size"] = BATCH_SIZE
print("\n", "#"*55, "\n")
print("\nTraining parameters\n")
for key, value in training_params.items():
print(key, value)
return training_params
############ MODEL PARAMETERS ############
def get_model_params(model_tag):
m = {"n_filters" : [16, 32, 64], \
"n_blocks" : 3, \
"activation" : 'lrelu', \
"batch_norm" : True, \
"isconcat" : [True, True, True], \
"pool_size" : [2,2,2]}
# default
model_params = m.copy()
if model_tag == "M_a01":
pass
# a02 - shallow first, deep later. should be faster with high-level context. try with 128
elif model_tag == "M_a02":
model_params["n_filters"] = [16, 64]
model_params["pool_size"] = [ 2, 4]
# a03 - very deep (slow) model with more filters
elif model_tag == "M_a03":
model_params["n_filters"] = [16, 32]
model_params["pool_size"] = [ 2, 2]
# a04 - super shallow model - 1 max pool
elif model_tag == "M_a04":
model_params["n_filters"] = [16]
model_params["pool_size"] = [2]
# a05 - flat CNN - no pooling
elif model_tag == "M_a05":
model_params["n_filters"] = [16]
model_params["pool_size"] = [1]
elif model_tag == "M_a06":
model_params["n_filters"] = [8]
model_params["pool_size"] = [2]
else:
raise ValueError("model_tag not found")
model_params["n_blocks"] = len(model_params["n_filters"])
model_params["isconcat"] = [True]*len(model_params["n_filters"])
# BATCH_NORM_OVERRIDE
# model_params["batch_norm"] = False
print("\n", "#"*55, "\n")
print("\nModel is %s"%model_tag)
for key, value in model_params.items():
print(key, value)
return model_params
if __name__ == "__main__":
fe = SparseSegmenter(model_initialization = 'define-new', \
# input_size = , \
descriptor_tag = "M_a01",\
gpu_mem_limit = gpu_mem_limit,\
**model_params)
|
# Miyu (2041007) | Ludibrium Hair Salon (220000004)
# Male: 36000 - 36990 (Eastern Rocker to Big Point)
# Female: 38000 - 38990 (Harmony to Glam Shiny)
from net.swordie.ms.loaders import StringData
options = []
al = chr.getAvatarData().getAvatarLook()
hairColour = al.getHair() % 10
if al.getGender() == 0:
baseID = 36000
else:
baseID = 38000
for i in range(0, 1000, 10):
hair = baseID + i + hairColour
if not StringData.getItemStringById(hair) is None:
options.append(hair)
answer = sm.sendAskAvatar("Choose your new hairstyle!", False, False, options)
if answer < len(options):
sm.changeCharacterLook(options[answer])
|
import pytest
from sqlalchemy import create_engine
from cthaeh.models import Base
from cthaeh.session import Session
@pytest.fixture(scope="session")
def engine():
# PRO-TIP: Set `echo=True` for lots more SQL debug log output.
return create_engine("sqlite:///:memory:", echo=False)
@pytest.fixture(scope="session")
def _schema(engine):
Base.metadata.create_all(engine)
@pytest.fixture(scope="session")
def _Session(engine, _schema):
Session.configure(bind=engine)
return Session
@pytest.fixture
def session(_Session, _schema):
session = Session()
transaction = session.begin_nested()
session.commit = lambda: None
try:
yield session
finally:
transaction.rollback()
session.close()
|
######################################
# this files processes the social distancing data by considering the
# number of devices that are completely at home within a census block group
# https://docs.safegraph.com/docs/social-distancing-metrics
# [email protected]
######################################
import pandas as pd
import os
from tqdm import tqdm
import json
import matplotlib
matplotlib.use('agg')
import matplotlib.pyplot as plt
from datetime import datetime
mobility_data_location = '/home/arrow/safegraph'
cbgs = {
'newyorkcity': ['36047', '36081', '36061', '36005', '36085', '36119', '34003', '34017', '34031', '36079', '36087'],
'boston': ['25021', '25023', '25025', '25009', '25017', '33015', '33017'],
'seattle': ['53033', '53061', '53053', '53035', '53067', '53057', '53029', '53045'],
'miami': ['12086', '12011', '12099'],
'chicago': ['17031', '17037', '17043', '17063', '17091', '17089', '17093', '17111', '17197'],
'dallas': ['48085', '48113', '48121', '48139', '48231', '48257', '48397', '48251', '48367', '48439', '48497'],
'losangeles':['06037', '06059', '06065', '06071', '06111']
}
def process_mobility(years, months):
for city in cbgs:
locals()[city+'_mobility'] = []
locals()[city+'_today'] = 0
locals()[city+'_cbg_num'] = 0
datex = []
for yr in years:
year_dir = os.path.join(mobility_data_location, yr)
print(year_dir)
# for mth in tqdm(os.listdir(year_dir)):
for mth in tqdm(months):
month_dir = os.path.join(year_dir, mth)
print(month_dir)
for d in tqdm(os.listdir(month_dir)):
# for d in tqdm(['01', '02']):
datevalue = yr + '-' + mth + '-' + d
if datevalue not in datex:
datex.append(datevalue)
day_dir = os.path.join(month_dir, d)
# print(day_dir)
filename = datevalue + '-social-distancing.csv.gz'
day_file = os.path.join(day_dir, filename)
# print(day_file)
df = pd.read_csv(day_file, compression='gzip', header=0, quotechar='"', error_bad_lines=False)
for city in cbgs:
locals()[city+'_today'] = 0
locals()[city+'_cbg_num'] = 0
for i in tqdm(range(df.shape[0])):
src = str(df.iloc[i].origin_census_block_group)
# print(len(src))
if len(src) < 12:
src = '0' + src
# print(src[0:5])
for city in cbgs:
# print(src[0:5], cbgs[city])
if src[0:5] in cbgs[city]:
# print(df.iloc[i].device_count - df.iloc[i].completely_home_device_count)
# print(df.iloc[i].device_count, df.iloc[i].completely_home_device_count)
locals()[city+'_today'] += float(df.iloc[i].device_count - df.iloc[i].completely_home_device_count)/df.iloc[i].device_count
locals()[city+'_cbg_num'] += 1
break
for city in cbgs:
if locals()[city+'_today'] > 0:
locals()[city+'_mobility'].append(float(locals()[city+'_today'] / locals()[city+'_cbg_num']))
else:
locals()[city+'_mobility'].append(float(locals()[city+'_today']))
now = datetime.now()
dt_string = now.strftime("%Y-%m-%d-%H-%M-%S")
for city in cbgs:
plt.figure()
print(datex, locals()[city+'_mobility'])
plt.plot(datex, locals()[city+'_mobility'], label=city, marker='.')
plot_location = mobility_data_location + '/mobilityplot/' + city + years[0] + months[0] + '.png'
plt.legend(loc='best')
plt.show()
plt.savefig(plot_location)
csv_location = mobility_data_location + '/mobilityplot/' + city + years[0] + months[0] + '.csv'
mobility = {'0_date':datex, city:locals()[city+'_mobility']}
print(mobility)
df = pd.DataFrame.from_dict(mobility)
df.to_csv(csv_location, index=False)
years = ['2021']
months = ['02']
process_mobility(years, months) |
from .annealing import Ketcham1999, Ketcham2007
from .structures import Grain, Sample
from .viewer import Viewer
from .age_calculations import calculate_central_age as central_age
from .age_calculations import calculate_pooled_age as pooled_age
from .age_calculations import calculate_ages as single_grain_ages
from .age_calculations import chi_square as chi2_test
from .thermal_history import ThermalHistory
|
"""Defines Averager class."""
import collections
class Averager(object):
"""Keeps a running average with limited history."""
def __init__(self, max_count):
"""Initialize the averager with maximum number of
(latest) samples to keep."""
self._max_count = max_count if max_count > 1 else 1 # Minimum is 1.
self._data = collections.deque()
def add(self, value):
"""Add a value, and return current average."""
self._data.append(value)
if len(self._data) > self._max_count:
self._data.popleft()
return sum(self._data)/len(self._data)
def __len__(self):
"""Length operator."""
return len(self._data)
|
# -*- coding: utf-8 -*-
"""
Created on Tue Sep 25 11:50:05 2018
@author: kennedy
"""
__author__ = "kennedy Czar"
__email__ = "[email protected]"
__version__ = '1.0'
import os
class Data_collector(object):
def __init__(self, path):
'''
:Argument:
:path:
Enter the woring directory os state
the location you would love to create the dataset
:example:
create_folder('D:\\YourDirectory')
Creates the DATASSET FOLDER @D:\\MyDirectory\\DATASET
:Complexity for file Creation:
The Function runs in:
Time Complexity: O(N*logN)
Space Complexity: O(1)
'''
self.path = path
try:
if os.path.exists(self.path):
try:
self.FOLDERS = ['\\DATASET',
'\\TICKERS' ,
'\\PREDICTED',
'\\SAMPLE1',
'\\SAMLE2',
'\\SAMPLE2']
FOLDER_COUNT = 0
for folders in self.FOLDERS:
'''If folder is not created or created but deleted..Recreate/Create the folder.
Check for all folders in the FOLDERS list'''
if not os.path.exists(self.path + self.FOLDERS[FOLDER_COUNT]):
os.makedirs(path + self.FOLDERS[FOLDER_COUNT])
print('====== 100% Completed ==== : {}'.format(self.path + self.FOLDERS[FOLDER_COUNT]))
FOLDER_COUNT += 1
elif os.path.exists(self.path + self.FOLDERS[FOLDER_COUNT]):
'''OR check if the file is already existing using a boolean..if true return'''
print('File Already Existing : {}'.format(self.path + self.FOLDERS[FOLDER_COUNT]))
FOLDER_COUNT += 1
except OSError as e:
'''raise OSError('File Already Existing {}'.format(e))'''
print('File Already existing: {}'.format(e))
elif not os.path.exists(self.path):
raise OSError('File path: {} does not exist\n\t\tPlease check the path again'.format(self.path))
else:
print('File Already Existing')
except Exception as e:
raise(e)
finally:
print('Process completed...Exiting')
def STOCK_EXTRACTOR(self):
'''
:Functionality:
Collects stock data using the yahoo API
Collects all excel data and stores in DATASET FOLDER
append .csv to all files downloaded
'''
import fix_yahoo_finance as yahoo
import pandas as pd
from datetime import datetime
'''Set the start date'''
self.START = '2010-01-01'
# self.END = '2018-10-01'
'''Create a list of stocks to download'''
self.TICKERS_ = ['AMXL.MX',
'WALMEX.MX',
'FEMSAUBD.MX',
'ALFAA.MX',
'CEMEXCPO.MX',
'BIMBOA.MX',
'KOFL.MX',
'GMEXICOB.MX',
'CUERVO.MX',
'GFNORTEO.MX',
'SORIANAB.MX',
'ALPEKA.MX',
'GCARSOA1.MX',
'LIVEPOLC-1.MX',
'TLEVISACPO.MX',
'SAN.MX',
'MEXCHEM.MX',
'ELEKTRA.MX',
'CHDRAUIB.MX',
'AC.MX',
'PE&OLES.MX',
'GFINBURO.MX',
'GRUMAB.MX',
'LACOMERUBC.MX',
'LALAB.MX',
'AEROMEX.MX',
'BACHOCOB.MX',
'GSANBORB-1.MX',
'CULTIBAB.MX',
'GNP.MX',
'FRAGUAB.MX',
'ICHB.MX',
'KIMBERA.MX',
'SIMECB.MX',
'ALSEA.MX',
'GPH1.MX',
'VITROA.MX',
'GIGANTE.MX',
'KUOB.MX',
'ANGELD10.MX',
'OHLMEX.MX',
'Q.MX',
'GENTERA.MX',
'DIABLOI10.MX',
'GFAMSAA.MX',
'IDEALB-1.MX',
'HERDEZ.MX',
'VOLARA.MX',
'AZTECACPO.MX',
'MFRISCOA-1.MX',
'PAPPEL.MX',
'RASSINICPO.MX',
'LABB.MX',
'MEGACPO.MX',
'IENOVA.MX',
'AXTELCPO.MX',
'GCC.MX',
'GISSAA.MX',
'CMOCTEZ.MX',
'BAFARB.MX',
'GPROFUT.MX',
'LAMOSA.MX',
'CERAMICB.MX',
'PINFRA.MX',
'AGUA.MX',
'CIEB.MX',
'ARA.MX',
'POCHTECB.MX',
'ASURB.MX',
'FINDEP.MX',
'POSADASA.MX',
'MINSAB.MX',
'GAPB.MX',
'INVEXA.MX',
'CYDSASAA.MX',
'MONEXB.MX',
'COLLADO.MX',
'UNIFINA.MX',
'ACTINVRB.MX',
'ACCELSAB.MX',
'AUTLANB.MX',
'PASAB.MX',
'OMAB.MX',
'GBMO.MX',
'PV.MX',
'CREAL.MX',
'TMMA.MX',
'MAXCOMA.MX',
'VASCONI.MX',
'FIBRAMQ12.MX',
'GMD.MX',
'CMRB.MX',
'BOLSAA.MX',
'VALUEGFO.MX',
'MEDICAB.MX',
'TERRA13.MX',
'DANHOS13.MX',
'FIHO12.MX',
'CIDMEGA.MX',
'HCITY.MX',
'FIBRAPL14.MX',
'SPORTS.MX',
'DINEB.MX',
'CONVERA.MX',
'VESTA.MX',
'RCENTROA.MX',
'FINN13.MX',
'HOTEL.MX',
'FSHOP13.MX',
'TEAKCPO.MX',
'SAREB.MX',
'FMTY14.MX',
'FHIPO14.MX',
'HOMEX.MX',
'GMXT.MX',
'URBI.MX']
'''write the stock data to specific format by
appending the right extension'''
STOCK_TICKER_ = pd.DataFrame(self.TICKERS_)
self.FORMAT = ['.csv', '.xlsx', '.json']
for extension in self.FORMAT:
STOCK_TICKER_.to_csv('TICKERS/STOCK_TICKER{}'.format(extension))
print('======= Begin downloading stock dataset ======')
try:
for self.TICK_SYMBOLS in self.TICKERS_:
'''just in case your connection breaks,
we'd like to save our progress! by appending
downloaded dataset to DATASET FOLDER'''
if not os.path.exists('DATASET/{}.csv'.format(self.TICK_SYMBOLS)):
df = yahoo.download(self.TICK_SYMBOLS, start = self.START, end = datetime.now())
df.reset_index(inplace = True)
df.set_index("Date", inplace = True)
df.to_csv('DATASET/{}.csv'.format(self.TICK_SYMBOLS))
else:
print('File Already existing: {}'.format(self.TICK_SYMBOLS))
except OSError as e:
raise OSError('Something wrong with destination path: {}'.format(e))
finally:
print('Download Completed..Exiting!')
if __name__ == '__main__':
'''Define a path on your drive where this project folder is located'''
path = 'D:\\GIT PROJECT\\ERIC_PROJECT101\\FREELANCE_KENNETH'
Data_collector(path).STOCK_EXTRACTOR()
|
import logging
import yaml
import logging.config
import os
import argparse
with open('config/log_config.yaml', 'r') as stream:
config = yaml.load(stream, Loader=yaml.FullLoader)
# get path to logs file in config and create folder if not already created
log_path = config['handlers']['file']['filename']
log_dir = os.path.dirname(log_path)
if not os.path.exists(log_dir):
os.makedirs(log_dir)
logging.config.dictConfig(config)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Run the app')
parser.add_argument('--action', type=str, help='Action to perform')
parser.add_argument("--db", help="Link to db", nargs="?", default="bolt://db:7687")
parser.add_argument("--link", help="Page to start crawling", default='Philosophy')
args = parser.parse_args()
if args.action == 'webcrawl':
from WikiCrawler.wikiCrawler import Crawler
Crawler(args.db).start(args.link)
elif args.action == 'apicrawl':
from WikiCrawler.apiCrawler import ApiCrawler
ApiCrawler(args.db).start()
|
import warnings
from typing import IO, Tuple, Mapping, List, Dict, TextIO, Union
from importlib import import_module
from click.utils import LazyFile
from yaml import safe_dump, safe_load
from faker.providers import BaseProvider as FakerProvider
from .data_gen_exceptions import DataGenNameError
from .output_streams import (
DebugOutputStream,
OutputStream,
)
from .parse_factory_yaml import parse_factory
from .data_generator_runtime import output_batches, StoppingCriteria, Globals
from . import SnowfakeryPlugin
# This tool is essentially a three stage interpreter.
#
# 1. Yaml parsing into Python data structures.
# 2. Walking the tree, sorting things into groups like macros, file inclusions,
# etc., and doing the file inclusions (parse_factory_yaml.parse_factory)
# 2 a) merge options informtion from the parse with options from the
# environment
# 3. Generating the objects top to bottom (including evaluating Jinja) in
# data_generator_runtime.output_batches
#
# The function generate at the bottom of this file is the entry point to all
# of it.
class ExecutionSummary:
"""Summarize everything that happened during parsing and evaluating."""
def __init__(self, parse_results, runtime_results):
self.tables = parse_results.tables
self.dom = parse_results.templates
self.intertable_dependencies = runtime_results.intertable_dependencies
def summarize_for_debugging(self):
return self.intertable_dependencies, self.dom
def merge_options(option_definitions: List, user_options: Mapping) -> Tuple[Dict, set]:
"""Merge/compare options specified by end-user to those declared in YAML file.
Takes options passed in from the command line or a config file and
compare them to the options declared by the Generator YAML file.
The options from the Generator YAML should be dictionaries with keys of
"options" and "default" as described in the user documentation.
The options from the user should be a dictionary of key/value pairs.
The output is a pair, options, extra_options. The options are the values
to be fed into the process after applying defaults.
extra_options are options that the user specified which do not match
anything in the YAML generator file. The caller may want to warn the
user about them or throw an error.
"""
options = {}
for option in option_definitions:
name = option["option"]
if user_options.get(name):
options[name] = user_options.get(name)
elif option.get("default"):
options[name] = option["default"]
else:
raise DataGenNameError(
f"No definition supplied for option {name}", None, None
)
extra_options = set(user_options.keys()) - set(options.keys())
return options, extra_options
def load_continuation_yaml(continuation_file: TextIO):
"""Load a continuation file from YAML."""
return safe_load(continuation_file)
def save_continuation_yaml(continuation_data: Globals, continuation_file: TextIO):
safe_dump(continuation_data, continuation_file)
def resolve_plugin(plugin: str) -> object:
"Resolve a plugin to a class"
module_name, class_name = plugin.rsplit(".", 1)
module = import_module(module_name)
cls = getattr(module, class_name)
if issubclass(cls, FakerProvider):
return (FakerProvider, cls)
elif issubclass(cls, SnowfakeryPlugin):
return (SnowfakeryPlugin, cls)
else:
raise TypeError(f"{cls} is not a Faker Provider nor Snowfakery Plugin")
def process_plugins(plugins: List[str]) -> Tuple[List[object], Mapping[str, object]]:
"""Resolve a list of names for SnowfakeryPlugins and Faker Providers to objects
The Providers are returned as a list of objects.
The Plugins are a mapping of ClassName:object so they can be namespaced.
"""
plugin_classes = [resolve_plugin(plugin) for plugin in plugins]
faker_providers = [
provider for baseclass, provider in plugin_classes if baseclass == FakerProvider
]
snowfakery_plugins = {
plugin.__name__: plugin
for baseclass, plugin in plugin_classes
if baseclass == SnowfakeryPlugin
}
return (faker_providers, snowfakery_plugins)
def generate(
open_yaml_file: IO[str],
user_options: dict = None,
output_stream: OutputStream = None,
stopping_criteria: StoppingCriteria = None,
generate_continuation_file: Union[TextIO, LazyFile] = None,
continuation_file: TextIO = None,
) -> ExecutionSummary:
"""The main entry point to the package for Python applications."""
user_options = user_options or {}
# Where are we going to put the rows?
output_stream = output_stream or DebugOutputStream()
# parse the YAML and any it refers to
parse_result = parse_factory(open_yaml_file)
# figure out how it relates to CLI-supplied generation variables
options, extra_options = merge_options(parse_result.options, user_options)
if extra_options:
warnings.warn(f"Warning: unknown options: {extra_options}")
output_stream.create_or_validate_tables(parse_result.tables)
continuation_data = (
load_continuation_yaml(continuation_file) if continuation_file else None
)
faker_providers, snowfakery_plugins = process_plugins(parse_result.plugins)
# now do the output
runtime_context = output_batches(
output_stream,
parse_result.templates,
options,
stopping_criteria=stopping_criteria,
continuation_data=continuation_data,
tables=parse_result.tables,
snowfakery_plugins=snowfakery_plugins,
faker_providers=faker_providers,
)
if generate_continuation_file:
safe_dump(runtime_context, generate_continuation_file)
return ExecutionSummary(parse_result, runtime_context)
if __name__ == "__main__": # pragma: no cover
from .snowfakery import generate_cli
generate_cli()
|
# coding=utf-8
import codecs
def calculate(x,y,id2word,id2tag,res=[]):
entity=[]
for j in range(len(x)):
if x[j]==0 or y[j]==0:
continue
if id2tag[y[j]][0]=='B':
entity=[id2word[x[j]]+'/'+id2tag[y[j]]]
elif id2tag[y[j]][0]=='M' and len(entity)!=0 and entity[-1].split('/')[1][1:]==id2tag[y[j]][1:]:
entity.append(id2word[x[j]]+'/'+id2tag[y[j]])
elif id2tag[y[j]][0]=='E' and len(entity)!=0 and entity[-1].split('/')[1][1:]==id2tag[y[j]][1:]:
entity.append(id2word[x[j]]+'/'+id2tag[y[j]])
entity.append(str(j))
res.append(entity)
entity=[]
else:
entity=[]
return res
def calculate3(x,y,id2word,id2tag,res=[]):
'''
使用这个函数可以把抽取出的实体写到res.txt文件中,供我们查看。
注意,这个函数每次使用是在文档的最后添加新信息,所以使用时尽量删除res文件后使用。
'''
with codecs.open('./res.txt','a','utf-8') as outp:
entity=[]
for j in range(len(x)): #for every word
if x[j]==0 or y[j]==0:
continue
if id2tag[y[j]][0]=='B':
entity=[id2word[x[j]]+'/'+id2tag[y[j]]]
elif id2tag[y[j]][0]=='M' and len(entity)!=0 and entity[-1].split('/')[1][1:]==id2tag[y[j]][1:]:
entity.append(id2word[x[j]]+'/'+id2tag[y[j]])
elif id2tag[y[j]][0]=='E' and len(entity)!=0 and entity[-1].split('/')[1][1:]==id2tag[y[j]][1:]:
entity.append(id2word[x[j]]+'/'+id2tag[y[j]])
entity.append(str(j))
res.append(entity)
st = ""
for s in entity:
st += s+' '
#print st
outp.write(st+'\n')
entity=[]
else:
entity=[]
return res
|
import json
from app.api.mines.mine.models.mine_disturbance_code import MineDisturbanceCode
def test_get_all_mine_disturbance_types(test_client, db_session, auth_headers):
disturbances = MineDisturbanceCode.query.filter_by(active_ind=True).all()
disturbance_codes = map(lambda c: c.mine_disturbance_code, disturbances)
discriptions = map(lambda c: c.description, disturbances)
get_resp = test_client.get('/mines/disturbance-codes', headers=auth_headers['full_auth_header'])
get_data = json.loads(get_resp.data.decode())
options = get_data['options']
assert get_resp.status_code == 200
assert len(options) == len(disturbances)
assert all(option['mine_disturbance_code'] in disturbance_codes for option in options)
assert all(option['description'] in discriptions for option in options)
|
import numpy as np
from PIL import Image
import pandas as pd
import cv2
import sys
import os
import json
print(os.path.dirname(os.path.realpath(__file__)))
sys.path.append(os.path.dirname(os.path.realpath(__file__)) )
print(sys.path)
from bentoml import api, artifacts, env, BentoService
from bentoml.artifact import KerasModelArtifact
from bentoml.handlers import ImageHandler
# from metric import *
# from model import *
import metric, utils, model
import deploy_valid_grid
import efficientnet.keras as efn
# from utils import *
# from deploy_valid_path import valid_path
# from numpy.random import seed
# seed(42)
# from tensorflow import set_random_seed
# set_random_seed(42)
# FLAGS = None
# print(FLAGS.valid_post_path)
# valid_path = '../res/segment/test_20/valid/valid_post_grid.csv'
# with open('./deploy_valid_path.txt') as f:
# valid
print(deploy_valid_grid.grid_value)
post_df = pd.DataFrame(deploy_valid_grid.grid_value)
# print(deploy_valid_path.valid_path['valid_post_grid'])
# post_df = pd.read_csv(str(deploy_valid_path.valid_path['valid_post_grid']))
print(post_df)
# sys.exit()
print('Load dict file Post grid')
label_names = post_df['label'].values.tolist()
proba = post_df['proba'].values.tolist()
reduce_size = post_df['reduce_size'].values.tolist()
pad = post_df['pad'].values.tolist()
convex = post_df['convex'].values.tolist()
colors = post_df['colors'].values.tolist()
print(label_names)
@env(pip_dependencies=['keras==2.2.5', 'tensorflow-gpu==1.14.0', 'Pillow', 'numpy','opencv-python', 'efficientnet', 'pandas','cython','git+https://github.com/cocodataset/cocoapi.git#subdirectory=PythonAPI'])
@artifacts([KerasModelArtifact(name = 'segmentation',
custom_objects = {
# 'tf':tf,
'bce_dice_loss' : metric.bce_dice_loss,
'dice_coef' : metric.dice_coef
}
)
]
)
class KerasSegmentationService(BentoService):
# print(deploy_valid_grid.grid_value)
# post_df = pd.DataFrame(deploy_valid_grid.grid_value)
# # print(deploy_valid_path.valid_path['valid_post_grid'])
# # post_df = pd.read_csv(str(deploy_valid_path.valid_path['valid_post_grid']))
# print(post_df)
# # sys.exit()
# print('Load dict file Post grid')
# label_names = post_df['label'].values.tolist()
# proba = post_df['proba'].values.tolist()
# reduce_size = post_df['reduce_size'].values.tolist()
# pad = post_df['pad'].values.tolist()
# convex = post_df['convex'].values.tolist()
# colors = post_df['colors'].values.tolist()
# print(label_names)
@api(ImageHandler, pilmode='RGB')
def predict(self, img):
print(img.shape)
img_width = img.shape[1]
img_height = img.shape[0]
img_channel = img.shape[2]
# (1,256,256,3)
img_resized = cv2.resize(img.astype(np.uint8), (256,256)).astype(np.float)
print('#'*100)
print(img_resized.shape)
# img = Image.fromarray(img).resize((256, 256))
img_resized /= 255.0
print(img_resized.shape)
img_resized = np.expand_dims(img_resized, axis = 0)
# img = np.array(img.getdata()).reshape((1,256,256,3))
# (1,256,256,4) ==> predcited
predicted = self.artifacts.segmentation.predict(img_resized)
predicted_post = utils.predict_resize(predicted,
proba = proba,
pad_size = pad,
reduce_size = reduce_size,
convex = convex,
label_names = label_names)
print(predicted_post.shape)
rle_dict = {}
rle_list = []
for l_idx, label in enumerate(label_names):
l_mask = predicted_post[0][:,:, l_idx].astype(np.uint8)
print(l_mask.shape)
# (2100,1400)
l_mask = cv2.resize(l_mask, (img_height, img_width))
print(l_mask.shape)
label_rle = utils.mask2rle(l_mask)
rle_list.append(label_rle)
# img_masked = rle_mask2img_request(img, rle_list, label_names, colors)
# img_str = img_masked.tobytes()
rle_dict['rle'] = [{'size' : rle['size'], 'counts' : str(rle['counts'])} for rle in rle_list]
# rle_dict['image'] = str(img_str)
rle_json = json.dumps(rle_dict)
# print(rle_list)
return rle_json
# if __name__ == '__main__':
# parser = argparse.ArgumentParser(description = 'deploying model')
# # parser.add_argument('--target', required=True, help = 'train or predict')
# # parser.add_argument('--train_path')
# # parser.add_argument('--test_path')
# # parser.add_argument('--model_path', required=True)
# parser.add_argument('--valid_post_path', required=True)
# # parser.add_argument('--deploy_save_path', required=True)
# # parser.add_argument('--version_number', requried=True, type = int)
# # parser.add_argument('--json_path', required=True)
# # parser.add_argument('--img_path', required=True)
# # parser.add_argument('--model_name',default= local_time)
# # parser.add_argument('--epoch', default = 50, type =int)
# # parser.add_argument('--batch_size', default = 16, type =int)
# # parser.add_argument('--resize_shape', nargs='+', type=int, default = [256,256])
# # parser.add_argument('--augument',default = True)
# FLAGS, unparsed = parser.parse_known_args()
# KerasSegmentationService |
from django.urls import path
from . import views
from django.views.generic.base import TemplateView
urlpatterns = {
path('add_annotation', views.add_annotation),
path('getChatGroupPapers', views.getChatGroupPapers),
path('getChatGroupMembers', views.getChatGroupMembers),
path('createChatGroup', views.createChatGroup),
path('uploadChatGroupPaper', views.uploadChatGroupPaper),
path('getBothStarList', views.getBothStarList),
path('getMyChatGroupList', views.getMyChatGroupList),
path('createChatGroup', views.createChatGroup),
path('chatGroupPaper.html', TemplateView.as_view(template_name = 'chatGroupPaper.html')),
path('showpdf.html', TemplateView.as_view(template_name = 'showpdf.html')),
path('memberInGroupPage.html', TemplateView.as_view(template_name = 'memberInGroupPage.html')),
path('singleGroupPage.html', TemplateView.as_view(template_name = 'singleGroupPage.html')),
path('uploadPaperToChatGroup.html', TemplateView.as_view(template_name = 'uploadPaperToChatGroup.html')),
path('getChatGroupName', views.getChatGroupName),
path('myChatGroupList.html', TemplateView.as_view(template_name = 'myChatGroupList.html')),
path('createChatGroup.html', TemplateView.as_view(template_name = 'createChatGroup.html')),
path('annotation-noicon.svg', views.get_icon),
} |
import threading
import requests
import argparse
from time import sleep
"""ArgParse for CLI input"""
parser=argparse.ArgumentParser(description='WebProbe V0.1')
parser.add_argument('-f','--filename',type=str,required=True,help="Specify filename.")
parser.add_argument('-t','--threads',type=int,const=5,nargs='?',help="Specify No.of threads to spawn (default = 5)")
args = parser.parse_args()
"""Supressing warning caused by requests"""
requests.packages.urllib3.disable_warnings()
def do_request(url):
""" Post request to the site
print the url to console if response is 200
"""
if not url: return
try:
response = requests.get(url, verify=False, allow_redirects=False, timeout=1)
print(url) #if response.ok else print(f"response: {response.status_code} url: {url}")
except Exception as e:
pass
def process_file(fname, t):
""" Thread Implementation """
fp = open(fname,'rt')
arr = list(map(lambda a : a.strip(), fp.readlines()))
for each in arr:
req = threading.Thread(target=do_request, args=(each,))
#print(threading.active_count())
while threading.active_count() >=t:
sleep(0.1)
# Needs to be changed
req.start()
fp.close()
if __name__=="__main__":
try:
if args.threads == None:
threads_c=5
else:
threads_c=args.threads
#print(15*"="+"\nFile Name : {}\nThread Count : {}\n".format(args.filename,threads_c)+15*"="+"\n")
process_file(args.filename, threads_c)
except Exception as err:
print("\33[031mError !!\33[0m\n \n{}".format(err))
|
from myModules.github.github_api import gitApi
import requests
class Repo:
def __init__(self, user, repo=None):
self.rep = self.getRepos(user, repo)
def getRepos(self, owner, repo):
"""
get Json data
:param owner: the user
:param repo: the repository
"""
return requests.get(gitApi.repository_url + owner + '/' + repo).json()
def getStars(self):
"""
get repo star
:return: repo star
"""
try:
return self.rep['stargazers_count']
except KeyError:
return self.rep
def getAvatar(self):
"""
get repo avatar
:return: repo avatar
"""
try:
return self.rep['owner']['avatar_url']
except KeyError:
return self.rep
|
from m5ui import *
import utime as time
import random
clear_bg(0x111111)
rgb = RGB_Bar()
btnA = M5Button(name="ButtonA", text="ButtonA", visibility=False)
btnB = M5Button(name="ButtonB", text="ButtonB", visibility=False)
btnC = M5Button(name="ButtonC", text="ButtonC", visibility=False)
#########################################################
######################## GUI ############################
#########################################################
class GUIObject:
__X = 0
__Y = 0
__Height = 0
__Width = 0
__Scene = 0
__Status = 0
__Time = 0
__NumberValue = 0
__NumberValue_ = __NumberValue
__Name = ""
__Type = ""
__Label = ""
__Label_ = __Label
__TextValue = ""
__TextValue_ = __TextValue
__Focusable = False
__Function = None
@property
def X(self):
return self.__X
@property
def X_(self):
return self.__X_
@X.setter
def X(self, X):
self.__X_ = self.__X
self.__X = X
@property
def Y(self):
return self.__Y
@Y.setter
def Y(self, Y):
self.__Y = Y
@property
def Height(self):
return self.__Height
@Height.setter
def Height(self, Height):
self.__Height = Height
@property
def Width(self):
return self.__Width
@Width.setter
def Width(self, Width):
self.__Width = Width
@property
def Scene(self):
return self.__Scene
@Scene.setter
def Scene(self, Scene):
self.__Scene = Scene
@property
def Status(self):
return self.__Status
@Status.setter
def Status(self, Status):
self.__Status = Status
@property
def Time(self):
return self.__Time
@Time.setter
def Time(self, Time):
self.__Time = Time
@property
def NumberValue(self):
return self.__NumberValue
@property
def NumberValue_(self):
return self.__NumberValue_
@NumberValue.setter
def NumberValue(self, NumberValue):
self.__NumberValue_ = self.__NumberValue
self.__NumberValue = NumberValue
@NumberValue_.setter
def NumberValue_(self, NumberValue_):
self.__NumberValue_ = NumberValue_
@property
def Name(self):
return self.__Name
@Name.setter
def Name(self, Name):
self.__Name = Name
@property
def Type(self):
return self.__Type
@Type.setter
def Type(self, Type):
self.__Type = Type
@property
def Label(self):
return self.__Label
@property
def Label_(self):
return self.__Label_
@Label.setter
def Label(self, Label):
self.__Label_ = self.__Label
self.__Label = Label
@Label_.setter
def Label_(self, Label_):
self.__Label_ = Label_
@property
def TextValue(self):
return self.__TextValue
@property
def TextValue_(self):
return self.__TextValue_
@TextValue.setter
def TextValue(self, TextValue):
self.__TextValue_ = self.__TextValue
self.__TextValue = TextValue
@TextValue_.setter
def TextValue_(self, TextValue_):
self.__TextValue_ = TextValue_
@property
def Focusable(self):
return self.__Focusable
@Focusable.setter
def Focusable(self, Focusable):
self.__Focusable = Focusable
@property
def Function(self):
return self.__Function
@Function.setter
def Function(self, Function):
self.__Function = Function
def live(self):
if self.__Type == "inputbox":
return GUIInputbox(self)
elif self.__Type == "human":
return GUIHuman(self)
elif self.__Type == "button":
return GUIButton(self)
GUIRoll = []
GUIActiveScene = 0
def GUIPString(idSp, sp, srcStr):
dstStr = ''
cnt = 0
for i in range(len(srcStr)):
if srcStr[i] == sp:
cnt += 1
else:
if cnt == idSp:
dstStr += srcStr[i]
elif cnt > idSp:
break
return dstStr
def GUILive():
if buttonA.wasPressed():
GUITab("<")
if buttonC.wasPressed():
GUITab(">")
if buttonB.wasPressed():
for i in range(0, len(GUIRoll)):
if (GUIRoll[i]).Scene == GUIActiveScene and (GUIRoll[i]).Status == 1:
(GUIRoll[i]).Status = 2
for i in range(0, len(GUIRoll)):
if (GUIRoll[i]).Scene == GUIActiveScene:
GUIRoll[i] = (GUIRoll[i]).live()
def GUIKeyboard(key = 0, caps = 0):
keys = ['~', '1', '2', '3', '4', '5', '6', '7', '8', '9', '0', '-', '=', '!', '@', '#', 'Q', 'W', 'E', 'R', 'T', 'Y', 'U', 'I', 'O', 'P', '[', ']', '{', '}', '$', '%', 'A', 'S', 'D', 'F', 'G', 'H', 'J', 'K', 'L', ':', ';', '"', '\'', '\\', '|', '*', 'Z', 'X', 'C', 'V', 'B', 'N', 'M', ',', '.', '/', '<', '>', '?', '(', ')', '+', '<.', 'X.', 'O.', ' ', '_', 'A.', '<b', '>b']
al = len(keys)
w = 320
pY = 140
sk = 20
rs = int(w / sk)
for i in range(0, al):
r = int(i / rs)
rX = i * sk - r * w
rY = pY + r * sk
tX = rX + 5
tY = rY + 4
k = keys[i]
if i == key:
if k == '<.':
lcd.rect(rX, rY, sk, sk, color=0xffcc00)
lcd.print(k[0], tX, tY, color=0xffcc00)
rgb.set_all(0xffcc00)
elif k == 'X.':
lcd.rect(rX, rY, sk, sk, color=0xff0000)
lcd.print(k[0], tX, tY, color=0xff0000)
rgb.set_all(0xff0000)
elif k == 'O.':
lcd.rect(rX, rY, sk, sk, color=0x00ff00)
lcd.print(k[0], tX, tY, color=0x00ff00)
rgb.set_all(0x00ff00)
elif k == '<b' or k == '>b':
lcd.rect(rX, rY, sk, sk, color=0xffffff)
lcd.print(k[0], tX, tY, color=0xffffff)
elif k == 'A.':
if caps == 1:
lcd.print(k[0].upper(), tX, tY, color=0x00ff00)
else:
lcd.print(k[0].lower(), tX, tY, color=0x999999)
lcd.rect(rX, rY, sk, sk, color=0x00ff00)
else:
lcd.rect(rX, rY, sk, sk, color=0x33ccff)
lcd.print(k[0], tX, tY, color=0x33ccff)
rgb.set_all(0x000000)
else:
lcd.rect(rX, rY, sk, sk, color=0x666666)
if k == 'A.':
if caps == 1:
lcd.print(k[0], tX, tY, color=0x00ff00)
else:
lcd.print(k[0].lower(), tX, tY, color=0x999999)
else:
lcd.print(k[0], tX, tY, color=0x999999)
if buttonB.wasPressed():
wait(0.1)
key = key + 1
if key >= al:
key = 0
if buttonA.wasPressed():
wait(0.1)
key = key + rs
if key >= al:
key = key - (int(key / rs) * rs)
if buttonC.wasPressed():
wait(0.1)
rgb.set_all(0x000000)
k = keys[key]
if len(k) == 1:
return (keys[key].upper() if caps == 1 else keys[key].lower()), key, caps
else:
if k == 'A.':
caps = 1 if caps == 0 else 0
elif k == '<.':
return 'backspace', key, caps
elif k == 'X.':
return 'esc', key, caps
elif k == 'O.':
return 'enter', key, caps
elif k == '<b':
return 'left', key, caps
elif k == '>b':
return 'right', key, caps
return '', key, caps
def GUIAttach(obj):
inFocus = -1
for i in range(0, len(GUIRoll)):
if (GUIRoll[i]).Scene == obj.Scene and (GUIRoll[i]).Focusable:
if (GUIRoll[i]).Status == 1:
inFocus = i
if obj.Status == 1:
(GUIRoll[i]).Status = 0
if inFocus == -1 and obj.Focusable:
obj.Status = 1
GUIRoll.append(obj)
def GUI(Name):
for i in range(0, len(GUIRoll)):
if GUIRoll[i].Name == Name:
return GUIRoll[i]
def GUITab(Dir):
inFocus = -1
for i in range(0, len(GUIRoll)):
if (GUIRoll[i]).Scene == GUIActiveScene:
if (GUIRoll[i]).Status == 1:
inFocus = i
if Dir == ">":
for i in range(inFocus + 1, len(GUIRoll)):
if (GUIRoll[i]).Scene == GUIActiveScene and (GUIRoll[i]).Focusable:
(GUIRoll[inFocus]).Status = 0
(GUIRoll[i]).Status = 1
return
else:
for i in range(inFocus, 0, -1):
if (GUIRoll[i - 1]).Scene == GUIActiveScene and (GUIRoll[i - 1]).Focusable:
(GUIRoll[inFocus]).Status = 0
(GUIRoll[i - 1]).Status = 1
return
def GUIScene(Number):
global GUIActiveScene
GUIActiveScene = Number
lcd.fill(0x000000)
def GUIInputbox(obj):
if obj.Status == 0:
x = obj.X
y = obj.Y
width = obj.Width
color = 0xcccccc
elif obj.Status == 1:
x = obj.X
y = obj.Y
width = obj.Width
color = 0x33ccff
else:
lcd.fill(0x000000)
x = 10
y = 35
width = 300
color = 0x33ccff
lcd.font(lcd.FONT_Ubuntu)
lcd.print(obj.Label, x, y, color)
lcd.font(lcd.FONT_Default)
lcd.line(x, y + 40, x + width, y + 40, color)
cw = 10
ch = 16
s = obj.TextValue
mx = int(width / cw)
st = s[-mx:]
stl = len(st)
xp = len(s) - stl
for i in range(stl):
lcd.print(st[i], x + i * cw, y + 20, 0xffffff)
if obj.Status == 2:
kc = 0
caps = 0
cs = 0
cp = len(s[xp:]) if len(s[xp:]) < mx else mx
while True:
k, kc, caps = GUIKeyboard(kc, caps)
if k != '':
if len(k) == 1:
lcd.line(x + cp * cw, y + 20 - 4, x + cp * cw, y + 20 + ch, 0x000000)
s = s[0:xp+cp] + str(k) + s[xp+cp:]
k = 'right'
if k == 'backspace':
if cp > 0:
s = s[0:xp+cp-1] + s[xp+cp:]
k = 'left'
if k == 'esc':
lcd.clear()
obj.Status = 1
break
if k == 'enter':
lcd.clear()
obj.Status = 1
obj.TextValue = s
if obj.Function != None:
obj.Function(obj)
break
if k == 'left':
lcd.line(x + cp * cw, y + 20 - 4, x + cp * cw, y + 20 + ch, 0x000000)
if cp - 1 > 0:
cp = cp - 1
else:
xp = xp - 1 if xp - 1 >= 0 else xp
if k == 'right':
lcd.line(x + cp * cw, y + 20 - 4, x + cp * cw, y + 20 + ch, 0x000000)
if cp + 1 <= len(s[xp:xp+mx]):
cp = cp + 1
else:
xp = xp + 1 if xp + 1 + cp <= len(s) else xp
for i in range(mx):
lcd.print('W', x + i * cw, y + 20, 0x000000)
st = s[xp:xp+mx]
stl = len(st)
for i in range(stl):
lcd.print(st[i], x + i * cw, y + 20, 0xffffff)
else:
if time.ticks_us() - obj.Time >= 500000:
obj.Time = time.ticks_us()
if cs == 0:
lcd.line(x + cp * cw, y + 20 - 4, x + cp * cw, y + 20 + ch, 0xffffff)
cs = 1
else:
lcd.line(x + cp * cw, y + 20 - 4, x + cp * cw, y + 20 + ch, 0x000000)
cs = 0
return obj
def GUIHuman(obj):
r = 8
tail = r * 2
cw = 12
ch = 14
cells = 5
color = 0xffffff
if obj.Height == 0 or obj.Width == 0:
obj.Height = (r + 1 + tail + r + int(r / 2)) - (r * 3 - cells * ch)
obj.Width = len(obj.Label) * cw if len(obj.Label) * cw > r * 2 else r * 2
x = obj.X + r
y = obj.Y + obj.Height
lcd.circle(x, y, r, color=color)
lcd.circle(int(x - r / 3), int(y - r / 3), int(r / 5), color=0x33ccff)
lcd.circle(int(x + 1 + r / 3), int(y - r / 3), int(r / 5), color=0x33ccff)
lcd.line(x, y + r + 1, x, y + r + 1 + tail, color)
lcd.line(x, int(y + r + 1 + r * 2 / 3), x - r, int(y + r + 1 + r * 2 / 3), color)
lcd.line(x + 1, int(y + r + 1 + r * 2 / 3), x + r, int(y + r + 1 + r * 2 / 3), color)
lcd.line(x, y + r + 1 + tail, x - r, y + r + 1 + tail + r, color)
lcd.line(x, y + r + 1 + tail, x + r, y + r + 1 + tail + r, color)
if obj.Label != obj.Label_:
lcd.print(obj.Label_, int(x - len(obj.Label_) * cw / 3), int(y + r + 1 + tail + r + r / 2), 0x000000)
obj.Label_ = obj.Label
lcd.print(obj.Label, int(x - len(obj.Label) * cw / 3), int(y + r + 1 + tail + r + r / 2), 0xcccccc)
if time.ticks_us() - obj.Time >= 400000:
obj.Time = time.ticks_us()
if obj.NumberValue >= len(obj.TextValue):
obj.NumberValue = 0
for i in range(0, cells):
lcd.print("W", int(x - ch / 3), int(y - r * 3 - i * ch), 0x000000)
if len(obj.TextValue) > 0:
j = obj.NumberValue - i
if j >= 0:
lcd.print(obj.TextValue[j], int(x - ch / 3), int(y - r * 3 - i * ch), random.randint(0x00ffcc, 0x00ffff))
obj.NumberValue = obj.NumberValue + 1
return obj
def GUIButton(obj):
if obj.Status == 1:
lcd.print(obj.Label, obj.X, obj.Y, 0x33ccff)
elif obj.Status == 2:
lcd.print(obj.Label, obj.X, obj.Y, 0xffd700)
if obj.Function != None:
obj.Function(obj)
obj.Status = 1
else:
lcd.print(obj.Label, obj.X, obj.Y, 0xcccccc)
return obj
def GUIRange(obj):
if obj.Status == 1:
pass
elif obj.Status == 2:
pass
else:
lcd.print(obj.Label, obj.X, obj.Y, 0xcccccc)
#########################################################
##################### GUI END ###########################
#########################################################
def myFunc(obj):
speaker.tone(obj.NumberValue, 200)
obj = GUIObject()
obj.Name = "myFirstButton"
obj.Type = "button"
obj.NumberValue = 1000 # 1000 Hz
obj.Label = "Beep 1000 Hz"
obj.X = 10
obj.Y = 19
obj.Scene = 0
obj.Focusable = True
obj.Function = myFunc
GUIAttach(obj)
obj = None
obj = GUIObject()
obj.Name = "myFirstButton"
obj.Type = "button"
obj.NumberValue = 2000 # 2000 Hz
obj.Label = "Beep 2000 Hz"
obj.X = 10
obj.Y = 59
obj.Scene = 0
obj.Focusable = True
obj.Function = myFunc
GUIAttach(obj)
obj = None
lcd.fill(0x000000)
while True:
GUILive()
wait(0.001) |
from typing import List
class TreeNode:
def __init__(self, x):
self.val = x
self.left = None
self.right = None
class BinTree:
def printTree(self,root:TreeNode)->None:
LevelList = [root]
self.printLevel(LevelList)
def printLevel(self,LevelList:List[TreeNode])-> List[TreeNode]:
LevelStr = ""
outList = []
ListEmpty = True
for node in LevelList:
if node is None:
LevelStr += "None "
outList.append(None)
outList.append(None)
else:
LevelStr += (str(node.val) + " ")
outList.append(node.left)
outList.append(node.right)
ListEmpty = False
if not ListEmpty:
print(LevelStr)
self.printLevel(outList)
class Solution:
def isValidBST(self, root: TreeNode) -> bool:
def helper(node, lower = float('-inf'), upper = float('inf')):
if node is None:
print("Node is None") #Diagnostic
return True
nl=node.left.val if node.left else 0 #Diagnostic
nr=node.right.val if node.right else 0 #Diagnostic
val = node.val #Diagnostic
print(val,'\t',nl,'\t',nr,'\t',lower,'\t',upper) #Diagnostic
if not val: val = 0
if val <= lower or val >= upper:
return False
if not helper(node.right, val, upper):
return False
if not helper(node.left, lower, val):
return False
return True
print('val','\t','left','\t','right','\t','lower','\t','upper')
return helper(root)
#Driver code
root = TreeNode(2)
root.left = TreeNode(1)
root.right = TreeNode(4)
root.right.left = TreeNode(3)
root.right.right = TreeNode(6)
'''
root = TreeNode(5)
root.left = TreeNode(1)
root.right = TreeNode(4)
root.right.left = TreeNode(3)
root.right.right = TreeNode(6)
root = TreeNode(2)
root.left = TreeNode(1)
root.right = TreeNode(3)
'''
bst = BinTree()
bst.printTree(root)
sol = Solution()
print("Tree is valid?",sol.isValidBST(root)) |
import requests
from bs4 import BeautifulSoup as bs
class SejongAuth:
def __init__(self):
self.TIMEOUT_SEC = 10
def do_sejong(self, id: str, pw: str):
header = {
"User-Agent":"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_9_5)\
AppleWebKit 537.36 (KHTML, like Gecko) Chrome",
"Accept":"text/html,application/xhtml+xml,application/xml;\
q=0.9,imgwebp,*/*;q=0.8"
}
data = {
'email': id,
'password': pw
}
with requests.Session() as s:
html = s.post(
"https://do.sejong.ac.kr/ko/process/member/login",
headers=header, data=data, timeout=self.TIMEOUT_SEC
).content
html = s.get(
"https://do.sejong.ac.kr/",
timeout=self.TIMEOUT_SEC
).text
soup = bs(html, "html.parser")
soup = soup.select("div.info")
if soup == []:
return {"result": False}
name = soup[0].find("b").get_text().strip()
major = soup[0].find("small").get_text().strip().split(" ")[1]
return {
"result": True,
"name": name,
"id": id,
"major": major
}
def portal_sejong(self, id: str, pw: str):
header = {
"Referer": "https://portal.sejong.ac.kr",
"User-Agent": "Mozilla/5.0 (Windows NT 6.3; Win64; x64; rv:66.0) Gecko/20100101 Firefox/66.0"
}
data = {
"id": id,
"password": pw,
'rtUrl': '',
}
with requests.Session() as s:
s.post(
'https://portal.sejong.ac.kr/jsp/login/login_action.jsp',
headers=header, data=data, timeout=self.TIMEOUT_SEC
)
res = s.get('https://portal.sejong.ac.kr/main.jsp', timeout=self.TIMEOUT_SEC)
soup = bs(res.content, 'html.parser')
name = soup.select_one('div.info0 > div')
if name is None:
return {"result":False}
name = name.get_text().split("(")[0]
return {
"result": True,
"name": name,
"id": id,
}
if __name__ == '__main__':
auth = SejongAuth()
id, pw = "16011089", "!hkw45799"
print(auth.do_sejong(id, pw))
print(auth.portal_sejong(id, pw))
|
import os
import io
import httpretty
class APIMock():
"""
Responses should be a {method: filename} map
"""
def __init__(self, mock_url, mock_dir, responses):
self.mock_url = mock_url
self.responses = responses
self.mock_dir = mock_dir
def request_callback(self, request, uri, headers):
method = request.parsed_body[u'method'][0]
filename = self.responses[method]
with io.open(os.path.join(self.mock_dir, filename), 'r') as f:
contents = f.read()
return (200, headers, contents)
def __enter__(self):
httpretty.enable()
httpretty.register_uri(httpretty.POST, self.mock_url,
body=self.request_callback)
def __exit__(self, type, value, traceback):
httpretty.disable()
httpretty.reset()
|
import random
import math
from typing import NamedTuple
import uuid
from timeit import default_timer as timer
from datetime import datetime
import numba as nb
import numpy as np
import pandas as pd
from .scenario import Scenario
from .evse import EVSEType, EVSEDef, Bank
from .powertrain import Powertrain, PType
from .plug import DCPlug
from .types import vehicle_, evse_, make_evse_banks, make_fleet
from .utils import isin_
from .data import fleet_from_df, banks_from_df, make_mask, write_data, load_data
from .datastorage import Storage, DatasetInfo, StorageInfo
from .masks import MaskRules
from .simulation.loop import sim_loop
def simulate(distance_df: pd.DataFrame, sc: Scenario):
df = distance_df
df_index = df.index
rows = len(df_index)
interval_len = df.index.to_series().diff().min().seconds / 60.0
home_mask = sc.mask_rules.make_mask(df)
fleet_size = len(df.columns)
fleet = sc.fleet_def.make_fleet(fleet_size)
home_banks = Bank.make_banks(sc.home_banks, fleet_size, False)
away_banks = Bank.make_banks(sc.away_banks, fleet_size, True)
away_banks[:]['power'] = away_banks[:]['power_max']
num_banks = home_banks.shape[0]
use_soc_queue = False
out = simulation_loop_delayed(
df,
fleet,
home_banks,
away_banks,
mask.values,
interval_len,
sc.home_threshold_min,
sc.away_threshold_min,
sc.idle_load_kw,
use_soc_queue,
sc.soc_buffer
)
return out |
import requests
from bs4 import BeautifulSoup
# function to return index of second longest element
def second_longest(listi):
new_list = listi.copy()
new_list.remove(max(new_list))
return listi.index(max(new_list))
# function to get lyrics
def get_lyrics(song):
url = requests.get('https://www.azlyrics.com/lyrics/' + song + '.html')
soup = BeautifulSoup(url.text, 'html.parser')
# get list of html text
elem = soup.get_text().split('\r')
# get length of elements in elem
length = []
for i in elem:
length.append(len(i.split()))
# lyrics are usually the longest element in the list
lyrics = elem[length.index(max(length))]
# but it could also be the description in the bottom. in that case, lyrics are the second longest.
if 'Submit Corrections' in lyrics:
lyrics = elem[second_longest(length)]
return lyrics
# format of url is: https://www.azlyrics.com/lyrics/taylorswift/me.html
song_list = [] # insert list of songs: song is formatted as artist/title
scraped_lyrics = []
for song in song_list:
scraped_lyrics.append(get_lyrics(song))
|
"""
byceps.blueprints.site.authentication.login.views
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
:Copyright: 2006-2021 Jochen Kupperschmidt
:License: Revised BSD (see `LICENSE` file for details)
"""
from flask import abort, g, request, url_for
from flask_babel import gettext
from .....services.authentication.exceptions import AuthenticationFailed
from .....services.authentication import service as authentication_service
from .....services.authentication.session import service as session_service
from .....services.consent import (
consent_service,
subject_service as consent_subject_service,
)
from .....services.site import service as site_service
from .....services.site.transfer.models import Site
from .....services.verification_token import (
service as verification_token_service,
)
from .....signals import auth as auth_signals
from .....typing import UserID
from .....util.framework.blueprint import create_blueprint
from .....util.framework.flash import flash_notice, flash_success
from .....util.framework.templating import templated
from .....util import user_session
from .....util.views import redirect_to, respond_no_content
from .forms import LoginForm
blueprint = create_blueprint('authentication_login', __name__)
# -------------------------------------------------------------------- #
# log in/out
@blueprint.get('/login')
@templated
def login_form():
"""Show login form."""
if g.user.authenticated:
flash_notice(
gettext(
'You are already logged in as "%(screen_name)s".',
screen_name=g.user.screen_name,
)
)
return redirect_to('dashboard.index')
if not _is_site_login_enabled():
return {
'login_enabled': False,
}
form = LoginForm()
site = _get_site()
return {
'login_enabled': True,
'form': form,
'user_account_creation_enabled': site.user_account_creation_enabled,
}
@blueprint.post('/login')
@respond_no_content
def login():
"""Allow the user to authenticate with e-mail address and password."""
if g.user.authenticated:
return
if not _is_site_login_enabled():
abort(403, 'Log in to this site is generally disabled.')
form = LoginForm(request.form)
screen_name = form.screen_name.data.strip()
password = form.password.data
permanent = form.permanent.data
if not all([screen_name, password]):
abort(403)
try:
user = authentication_service.authenticate(screen_name, password)
except AuthenticationFailed:
abort(403)
if _is_consent_required(user.id):
verification_token = verification_token_service.create_for_consent(
user.id
)
consent_form_url = url_for(
'consent.consent_form', token=verification_token.token
)
return [('Location', consent_form_url)]
# Authorization succeeded.
auth_token, event = session_service.log_in_user(
user.id, request.remote_addr, site_id=g.site_id
)
user_session.start(user.id, auth_token, permanent=permanent)
flash_success(
gettext(
'Successfully logged in as %(screen_name)s.',
screen_name=user.screen_name,
)
)
auth_signals.user_logged_in.send(None, event=event)
return [('Location', url_for('dashboard.index'))]
def _is_consent_required(user_id: UserID) -> bool:
required_subject_ids = (
consent_subject_service.get_subject_ids_required_for_brand(g.brand_id)
)
return not consent_service.has_user_consented_to_all_subjects(
user_id, required_subject_ids
)
@blueprint.post('/logout')
@respond_no_content
def logout():
"""Log out user by deleting the corresponding cookie."""
user_session.end()
flash_success(gettext('Successfully logged out.'))
# helpers
def _is_site_login_enabled() -> bool:
site = _get_site()
return site.login_enabled
def _get_site() -> Site:
return site_service.get_site(g.site_id)
|
from hk_sp import *
from numberGen import generate
class SpData():
def createData(self):
enabled = [1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 0, 0, 1, 0, 0, 0]
hk_packet = bytearray(hk_sp_enabled(enabled))
hk_packet.extend(hk_sp_errors([1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 0, 0, 1, 0, 0, 0]))
hk_packet.extend(hk_sp_temp_current(generate(6, -10, 50)))
hk_packet.extend(hk_sp_mppt_current(generate(6666, 0, 65535), enabled[9]))
hk_packet.extend(hk_sp_coil_current(generate(6666, 0, 65535), enabled[10]))
return hk_packet
|
import pytest
from django.conf import settings
from django_fsm_log.models import StateLog
pytestmark = pytest.mark.ignore_article
def test_log_not_created_if_model_ignored(article):
assert len(StateLog.objects.all()) == 0
article.submit()
article.save()
assert len(StateLog.objects.all()) == 0
def test_log_created_on_transition_when_model_not_ignored(article):
settings.DJANGO_FSM_LOG_IGNORED_MODELS = ["tests.models.SomeOtherModel"]
assert len(StateLog.objects.all()) == 0
article.submit()
article.save()
assert len(StateLog.objects.all()) == 1
|
import os
import pandas as pd
import json
from tqdm import tqdm
DATA_DIR = os.path.join(os.environ['data'], 'allstate')
# Collect arguments (if any)
parser = argparse.ArgumentParser()
# Data directory
parser.add_argument('--data_dir', type=str, default=DATA_DIR, help='Path to the csv files.')
args = parser.parse_args()
train_df = pd.read_csv(os.path.join(args.data_dir, 'train.csv'))
test_df = pd.read_csv(os.path.join(args.data_dir, 'test.csv'))
mappings = {}; emb_size = {}
for col in tqdm([f'cat{i}' for i in range(1, 117)]):
vals = sorted(train_df[col].unique())
emb_size[col] = len(vals)
mapping = dict(zip(vals, range(1,len(vals)+1)))
mappings[col] = mapping
train_df[col] = train_df[col].apply(lambda x: mapping[x])
test_df[col] = test_df[col].apply(lambda x: mapping[x] if x in mapping else 0)
train_df.to_csv(os.path.join(args.data_dir, 'traindata.csv'), index=False)
test_df.to_csv(os.path.join(args.data_dir, 'testdata.csv'), index=False)
with open('data/emb_size.json', 'w') as f:
json.dump(emb_size, f)
with open('data/mappings.json', 'w') as f:
json.dump(mappings, f)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.