max_stars_repo_path
stringlengths 4
286
| max_stars_repo_name
stringlengths 5
119
| max_stars_count
int64 0
191k
| id
stringlengths 1
7
| content
stringlengths 6
1.03M
| content_cleaned
stringlengths 6
1.03M
| language
stringclasses 111
values | language_score
float64 0.03
1
| comments
stringlengths 0
556k
| edu_score
float64 0.32
5.03
| edu_int_score
int64 0
5
|
---|---|---|---|---|---|---|---|---|---|---|
import_data/mysql-connector-python-2.1.6/examples/transaction.py | bopopescu/nutrition | 110 | 6630651 | <gh_stars>100-1000
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# MySQL Connector/Python - MySQL driver written in Python.
# Copyright (c) 2009, 2014, Oracle and/or its affiliates. All rights reserved.
# MySQL Connector/Python is licensed under the terms of the GPLv2
# <http://www.gnu.org/licenses/old-licenses/gpl-2.0.html>, like most
# MySQL Connectors. There are special exceptions to the terms and
# conditions of the GPLv2 as it is applied to this software, see the
# FOSS License Exception
# <http://www.mysql.com/about/legal/licensing/foss-exception.html>.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
"""
Example using MySQL Connector/Python showing:
* dropping and creating a table
* using warnings
* doing a transaction, rolling it back and committing one.
"""
import mysql.connector
def main(config):
output = []
db = mysql.connector.Connect(**config)
cursor = db.cursor()
# Drop table if exists, and create it new
stmt_drop = "DROP TABLE IF EXISTS names"
cursor.execute(stmt_drop)
stmt_create = """
CREATE TABLE names (
id TINYINT UNSIGNED NOT NULL AUTO_INCREMENT,
name VARCHAR(30) DEFAULT '' NOT NULL,
cnt TINYINT UNSIGNED DEFAULT 0,
PRIMARY KEY (id)
) ENGINE=InnoDB"""
cursor.execute(stmt_create)
warnings = cursor.fetchwarnings()
if warnings:
ids = [ i for l,i,m in warnings]
output.append("Oh oh.. we got warnings..")
if 1266 in ids:
output.append("""
Table was created as MYISAM, no transaction support.
Bailing out, no use to continue. Make sure InnoDB is available!
""")
db.close()
return
# Insert 3 records
output.append("Inserting data")
names = ( ('Geert',), ('Jan',), ('Michel',) )
stmt_insert = "INSERT INTO names (name) VALUES (%s)"
cursor.executemany(stmt_insert, names)
# Roll back!!!!
output.append("Rolling back transaction")
db.rollback()
# There should be no data!
stmt_select = "SELECT id, name FROM names ORDER BY id"
cursor.execute(stmt_select)
rows = None
try:
rows = cursor.fetchall()
except mysql.connector.InterfaceError as e:
raise
if rows == []:
output.append("No data, all is fine.")
else:
output.append("Something is wrong, we have data although we rolled back!")
output.append(rows)
cursor.close()
db.close()
return output
# Do the insert again.
cursor.executemany(stmt_insert, names)
# Data should be already there
cursor.execute(stmt_select)
output.append("Data before commit:")
for row in cursor.fetchall():
output.append("%d | %s" % (row[0], row[1]))
# Do a commit
db.commit()
cursor.execute(stmt_select)
output.append("Data after commit:")
for row in cursor.fetchall():
output.append("%d | %s" % (row[0], row[1]))
# Cleaning up, dropping the table again
cursor.execute(stmt_drop)
cursor.close()
db.close()
return output
if __name__ == '__main__':
config = {
'host': 'localhost',
'port': 3306,
'database': 'test',
'user': 'root',
'password': '',
'charset': 'utf8',
'use_unicode': True,
'get_warnings': True,
}
out = main(config)
print('\n'.join(out))
| #!/usr/bin/env python
# -*- coding: utf-8 -*-
# MySQL Connector/Python - MySQL driver written in Python.
# Copyright (c) 2009, 2014, Oracle and/or its affiliates. All rights reserved.
# MySQL Connector/Python is licensed under the terms of the GPLv2
# <http://www.gnu.org/licenses/old-licenses/gpl-2.0.html>, like most
# MySQL Connectors. There are special exceptions to the terms and
# conditions of the GPLv2 as it is applied to this software, see the
# FOSS License Exception
# <http://www.mysql.com/about/legal/licensing/foss-exception.html>.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
"""
Example using MySQL Connector/Python showing:
* dropping and creating a table
* using warnings
* doing a transaction, rolling it back and committing one.
"""
import mysql.connector
def main(config):
output = []
db = mysql.connector.Connect(**config)
cursor = db.cursor()
# Drop table if exists, and create it new
stmt_drop = "DROP TABLE IF EXISTS names"
cursor.execute(stmt_drop)
stmt_create = """
CREATE TABLE names (
id TINYINT UNSIGNED NOT NULL AUTO_INCREMENT,
name VARCHAR(30) DEFAULT '' NOT NULL,
cnt TINYINT UNSIGNED DEFAULT 0,
PRIMARY KEY (id)
) ENGINE=InnoDB"""
cursor.execute(stmt_create)
warnings = cursor.fetchwarnings()
if warnings:
ids = [ i for l,i,m in warnings]
output.append("Oh oh.. we got warnings..")
if 1266 in ids:
output.append("""
Table was created as MYISAM, no transaction support.
Bailing out, no use to continue. Make sure InnoDB is available!
""")
db.close()
return
# Insert 3 records
output.append("Inserting data")
names = ( ('Geert',), ('Jan',), ('Michel',) )
stmt_insert = "INSERT INTO names (name) VALUES (%s)"
cursor.executemany(stmt_insert, names)
# Roll back!!!!
output.append("Rolling back transaction")
db.rollback()
# There should be no data!
stmt_select = "SELECT id, name FROM names ORDER BY id"
cursor.execute(stmt_select)
rows = None
try:
rows = cursor.fetchall()
except mysql.connector.InterfaceError as e:
raise
if rows == []:
output.append("No data, all is fine.")
else:
output.append("Something is wrong, we have data although we rolled back!")
output.append(rows)
cursor.close()
db.close()
return output
# Do the insert again.
cursor.executemany(stmt_insert, names)
# Data should be already there
cursor.execute(stmt_select)
output.append("Data before commit:")
for row in cursor.fetchall():
output.append("%d | %s" % (row[0], row[1]))
# Do a commit
db.commit()
cursor.execute(stmt_select)
output.append("Data after commit:")
for row in cursor.fetchall():
output.append("%d | %s" % (row[0], row[1]))
# Cleaning up, dropping the table again
cursor.execute(stmt_drop)
cursor.close()
db.close()
return output
if __name__ == '__main__':
config = {
'host': 'localhost',
'port': 3306,
'database': 'test',
'user': 'root',
'password': '',
'charset': 'utf8',
'use_unicode': True,
'get_warnings': True,
}
out = main(config)
print('\n'.join(out)) | en | 0.833099 | #!/usr/bin/env python # -*- coding: utf-8 -*- # MySQL Connector/Python - MySQL driver written in Python. # Copyright (c) 2009, 2014, Oracle and/or its affiliates. All rights reserved. # MySQL Connector/Python is licensed under the terms of the GPLv2 # <http://www.gnu.org/licenses/old-licenses/gpl-2.0.html>, like most # MySQL Connectors. There are special exceptions to the terms and # conditions of the GPLv2 as it is applied to this software, see the # FOSS License Exception # <http://www.mysql.com/about/legal/licensing/foss-exception.html>. # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software # Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA Example using MySQL Connector/Python showing: * dropping and creating a table * using warnings * doing a transaction, rolling it back and committing one. # Drop table if exists, and create it new CREATE TABLE names ( id TINYINT UNSIGNED NOT NULL AUTO_INCREMENT, name VARCHAR(30) DEFAULT '' NOT NULL, cnt TINYINT UNSIGNED DEFAULT 0, PRIMARY KEY (id) ) ENGINE=InnoDB Table was created as MYISAM, no transaction support. Bailing out, no use to continue. Make sure InnoDB is available! # Insert 3 records # Roll back!!!! # There should be no data! # Do the insert again. # Data should be already there # Do a commit # Cleaning up, dropping the table again | 2.363405 | 2 |
cpdb/data_importer/copa_crawler/parser.py | invinst/CPDBv2_backend | 25 | 6630652 | <reponame>invinst/CPDBv2_backend<gh_stars>10-100
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from collections import OrderedDict
from datetime import datetime
import iso8601
import pytz
from data.constants import AttachmentSourceType, MEDIA_TYPE_DOCUMENT
class Field(object):
pass
class Just(Field):
def __init__(self, value):
self.value = value
def parse(self, _):
return self.value
class SimpleField(Field):
def __init__(self, field_name='', ignore_values=None):
self.field_name = field_name
self.ignore_values = ignore_values or []
def parse(self, row):
return row
class CharField(SimpleField):
def parse(self, row):
value = row.get(self.field_name, '')
# We accept the fact that no-value of string is ''. Should reconsider when we work with a complete solution to
# differentiate between no-value and empty value.
if not value:
return ''
return value.strip()
class MediaTypeField(CharField):
def parse(self, row):
value = row.get(self.field_name, '')
if value.lower().startswith('video'):
return 'video'
if value.lower().startswith('audio'):
return 'audio'
return 'document'
class CompositeField(object):
def __init__(self, layout=None):
self.layout = layout or {}
def parse(self, row):
record = {}
for key in self.layout:
record[key] = self.layout[key].parse(row)
# FIXME: still return when we have value int(0)
if any([record[key] for key in record]):
return record
class TagField(CharField):
def _clean(self, title):
return title.lower().replace('-', ' ').replace('\'s', '').replace(u'’s', '')
def _guess_tag(self, title):
KEYWORD_TYPE_MAP = OrderedDict({
'audio': 'Audio',
'video': 'Video',
'arrest report': 'AR',
'officer battery report': 'OBR',
'original case incident report': 'OCIR',
'tactical response report': 'TRR',
'case report': 'CR',
})
cleaned_title = self._clean(title)
for keyword, document_type in KEYWORD_TYPE_MAP.items():
if keyword in cleaned_title:
return document_type
return 'Other'
def parse(self, row):
title = row.get(self.field_name, '')
document_type = self._guess_tag(title)
return document_type
class PortalAttachmentFileField(object):
def parse(self, record):
schema = CompositeField(layout={
'file_type': MediaTypeField(field_name='type'),
'title': CharField(field_name='title'),
'url': CharField(field_name='link'),
'original_url': CharField(field_name='link'),
'tag': TagField(field_name='title'),
'source_type': Just(AttachmentSourceType.PORTAL_COPA),
'external_last_updated': DateTimeField(field_name='last_updated'),
})
return schema.parse(record)
class SummaryReportsAttachmentFileField(object):
def parse(self, record):
schema = CompositeField(layout={
'file_type': Just(MEDIA_TYPE_DOCUMENT),
'title': Just('COPA Summary Report'),
'url': CharField(field_name='link'),
'original_url': CharField(field_name='link'),
'source_type': Just(AttachmentSourceType.SUMMARY_REPORTS_COPA),
'external_last_updated': DateTimeField(field_name='last_updated'),
})
return schema.parse(record)
class ArraySourceField(object):
def __init__(self, field_name, parser):
self.field_name = field_name
self.parser = parser
def parse(self, row):
values = row.get(self.field_name, [])
return [self.parser.parse(value) for value in values]
class NotSupportedDateFormatException(Exception):
pass
class DateTimeField(SimpleField):
DATE_SUPPORTED_PATTERNS = ['%m-%d-%Y %I:%M %p', '%B %d, %Y']
def parse(self, row):
value = row.get(self.field_name, '')
if not value:
return None
for pattern in self.DATE_SUPPORTED_PATTERNS:
try:
return datetime.strptime(value, pattern).replace(tzinfo=pytz.utc)
except ValueError:
pass
try:
return iso8601.parse_date(value)
except iso8601.ParseError:
pass
raise NotSupportedDateFormatException(value)
| # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from collections import OrderedDict
from datetime import datetime
import iso8601
import pytz
from data.constants import AttachmentSourceType, MEDIA_TYPE_DOCUMENT
class Field(object):
pass
class Just(Field):
def __init__(self, value):
self.value = value
def parse(self, _):
return self.value
class SimpleField(Field):
def __init__(self, field_name='', ignore_values=None):
self.field_name = field_name
self.ignore_values = ignore_values or []
def parse(self, row):
return row
class CharField(SimpleField):
def parse(self, row):
value = row.get(self.field_name, '')
# We accept the fact that no-value of string is ''. Should reconsider when we work with a complete solution to
# differentiate between no-value and empty value.
if not value:
return ''
return value.strip()
class MediaTypeField(CharField):
def parse(self, row):
value = row.get(self.field_name, '')
if value.lower().startswith('video'):
return 'video'
if value.lower().startswith('audio'):
return 'audio'
return 'document'
class CompositeField(object):
def __init__(self, layout=None):
self.layout = layout or {}
def parse(self, row):
record = {}
for key in self.layout:
record[key] = self.layout[key].parse(row)
# FIXME: still return when we have value int(0)
if any([record[key] for key in record]):
return record
class TagField(CharField):
def _clean(self, title):
return title.lower().replace('-', ' ').replace('\'s', '').replace(u'’s', '')
def _guess_tag(self, title):
KEYWORD_TYPE_MAP = OrderedDict({
'audio': 'Audio',
'video': 'Video',
'arrest report': 'AR',
'officer battery report': 'OBR',
'original case incident report': 'OCIR',
'tactical response report': 'TRR',
'case report': 'CR',
})
cleaned_title = self._clean(title)
for keyword, document_type in KEYWORD_TYPE_MAP.items():
if keyword in cleaned_title:
return document_type
return 'Other'
def parse(self, row):
title = row.get(self.field_name, '')
document_type = self._guess_tag(title)
return document_type
class PortalAttachmentFileField(object):
def parse(self, record):
schema = CompositeField(layout={
'file_type': MediaTypeField(field_name='type'),
'title': CharField(field_name='title'),
'url': CharField(field_name='link'),
'original_url': CharField(field_name='link'),
'tag': TagField(field_name='title'),
'source_type': Just(AttachmentSourceType.PORTAL_COPA),
'external_last_updated': DateTimeField(field_name='last_updated'),
})
return schema.parse(record)
class SummaryReportsAttachmentFileField(object):
def parse(self, record):
schema = CompositeField(layout={
'file_type': Just(MEDIA_TYPE_DOCUMENT),
'title': Just('COPA Summary Report'),
'url': CharField(field_name='link'),
'original_url': CharField(field_name='link'),
'source_type': Just(AttachmentSourceType.SUMMARY_REPORTS_COPA),
'external_last_updated': DateTimeField(field_name='last_updated'),
})
return schema.parse(record)
class ArraySourceField(object):
def __init__(self, field_name, parser):
self.field_name = field_name
self.parser = parser
def parse(self, row):
values = row.get(self.field_name, [])
return [self.parser.parse(value) for value in values]
class NotSupportedDateFormatException(Exception):
pass
class DateTimeField(SimpleField):
DATE_SUPPORTED_PATTERNS = ['%m-%d-%Y %I:%M %p', '%B %d, %Y']
def parse(self, row):
value = row.get(self.field_name, '')
if not value:
return None
for pattern in self.DATE_SUPPORTED_PATTERNS:
try:
return datetime.strptime(value, pattern).replace(tzinfo=pytz.utc)
except ValueError:
pass
try:
return iso8601.parse_date(value)
except iso8601.ParseError:
pass
raise NotSupportedDateFormatException(value) | en | 0.882 | # -*- coding: utf-8 -*- # We accept the fact that no-value of string is ''. Should reconsider when we work with a complete solution to # differentiate between no-value and empty value. # FIXME: still return when we have value int(0) | 2.910904 | 3 |
src/yellowdog_client/model/provisioned_worker_pool.py | yellowdog/yellowdog-sdk-python-public | 0 | 6630653 | from dataclasses import dataclass, field
from datetime import datetime
from typing import Optional
from .node_summary import NodeSummary
from .provisioned_worker_pool_properties import ProvisionedWorkerPoolProperties
from .worker_pool import WorkerPool
from .worker_pool_status import WorkerPoolStatus
from .worker_summary import WorkerSummary
@dataclass
class ProvisionedWorkerPool(WorkerPool):
type: str = field(default="co.yellowdog.platform.model.ProvisionedWorkerPool", init=False)
id: Optional[str] = None
name: Optional[str] = None
createdTime: Optional[datetime] = None
status: Optional[WorkerPoolStatus] = None
statusChangedTime: Optional[datetime] = None
expectedNodeCount: int = 0
awaitingNodes: bool = False
workerSummary: Optional[WorkerSummary] = None
nodeSummary: Optional[NodeSummary] = None
properties: Optional[ProvisionedWorkerPoolProperties] = None
computeRequirementId: Optional[str] = None
"""The ID of the compute requirement used to provision the compute resource."""
| from dataclasses import dataclass, field
from datetime import datetime
from typing import Optional
from .node_summary import NodeSummary
from .provisioned_worker_pool_properties import ProvisionedWorkerPoolProperties
from .worker_pool import WorkerPool
from .worker_pool_status import WorkerPoolStatus
from .worker_summary import WorkerSummary
@dataclass
class ProvisionedWorkerPool(WorkerPool):
type: str = field(default="co.yellowdog.platform.model.ProvisionedWorkerPool", init=False)
id: Optional[str] = None
name: Optional[str] = None
createdTime: Optional[datetime] = None
status: Optional[WorkerPoolStatus] = None
statusChangedTime: Optional[datetime] = None
expectedNodeCount: int = 0
awaitingNodes: bool = False
workerSummary: Optional[WorkerSummary] = None
nodeSummary: Optional[NodeSummary] = None
properties: Optional[ProvisionedWorkerPoolProperties] = None
computeRequirementId: Optional[str] = None
"""The ID of the compute requirement used to provision the compute resource."""
| en | 0.803551 | The ID of the compute requirement used to provision the compute resource. | 2.316442 | 2 |
objects_API/WholeDNAJ.py | diogo1790team/inphinity_DM | 0 | 6630654 | from marshmallow import Schema, fields, post_load
from rest_client.WholeDNARest import WholeDNAAPI
class WholeDNASchema(Schema):
"""
This class map the json into the object WholeDNA
..note:: see marshmallow API
"""
id = fields.Int()
id_db_online = fields.Str()
sequence_DNA = fields.Str()
fasta_head = fields.Str()
organism = fields.Int()
@post_load
def make_WholeDNA(self, data):
return WholeDNAJson(**data)
class WholeDNAJson(object):
"""
This class manage the object and is used to map them into json format
"""
def __init__(self, id = None, id_db_online = None, sequence_DNA= None, fasta_head = None, organism= None):
"""
Initialization of the class
:param id: name of the function
:param id_db_online: id of the database where the dna comes
:param sequence_DNA: DNA sequence
:param fasta_head: head of the fasta from where the DNA was extracted
:param organism: id of the organism
:type id: int
:type id_db_online: string
:type sequence_DNA: string
:type fasta_head: string
:type organism: int
"""
self.id = id
self.id_db_online = id_db_online
self.sequence_DNA = sequence_DNA
self.fasta_head = fasta_head
self.organism = organism
def __str__(self):
"""
override the Str function
"""
return 'id: {0} fasta head {1}'.format(self.id, self.fasta_head)
def getAllAPI():
"""
get all the WholeDNAs on the database
:return: list of WholeDNAs
:rtype: vector[WholeDNAJson]
"""
list_wholeDNA = WholeDNAAPI().get_all()
schema = WholeDNASchema()
results = schema.load(list_wholeDNA, many=True)
return results[0]
def setWholeDNA(self):
"""
set new wholeDNA
:return: new wholeDNA completed with the id
:rtype: WholeDNAJson
"""
schema = WholeDNASchema(only=['id_db_online','sequence_DNA','fasta_head','organism'])
jsonWholeDNA = schema.dump(self)
resultsCreation = WholeDNAAPI().set_wholeDNA(jsonData = jsonWholeDNA.data)
schema = WholeDNASchema()
results = schema.load(resultsCreation)
return results[0]
def getByOrganismID(organism_id:int):
"""
get the whole_dna of a given organism
:param organism_id: organism ID
:type organism_id: int
:return: the whole_dna of the given organism id
:rtype: WholeDna
"""
resultsJson = WholeDNAAPI().getByOrganismID(organism_id = organism_id)
schema = WholeDNASchema()
results = schema.load(resultsJson, many=False)
return results[0] | from marshmallow import Schema, fields, post_load
from rest_client.WholeDNARest import WholeDNAAPI
class WholeDNASchema(Schema):
"""
This class map the json into the object WholeDNA
..note:: see marshmallow API
"""
id = fields.Int()
id_db_online = fields.Str()
sequence_DNA = fields.Str()
fasta_head = fields.Str()
organism = fields.Int()
@post_load
def make_WholeDNA(self, data):
return WholeDNAJson(**data)
class WholeDNAJson(object):
"""
This class manage the object and is used to map them into json format
"""
def __init__(self, id = None, id_db_online = None, sequence_DNA= None, fasta_head = None, organism= None):
"""
Initialization of the class
:param id: name of the function
:param id_db_online: id of the database where the dna comes
:param sequence_DNA: DNA sequence
:param fasta_head: head of the fasta from where the DNA was extracted
:param organism: id of the organism
:type id: int
:type id_db_online: string
:type sequence_DNA: string
:type fasta_head: string
:type organism: int
"""
self.id = id
self.id_db_online = id_db_online
self.sequence_DNA = sequence_DNA
self.fasta_head = fasta_head
self.organism = organism
def __str__(self):
"""
override the Str function
"""
return 'id: {0} fasta head {1}'.format(self.id, self.fasta_head)
def getAllAPI():
"""
get all the WholeDNAs on the database
:return: list of WholeDNAs
:rtype: vector[WholeDNAJson]
"""
list_wholeDNA = WholeDNAAPI().get_all()
schema = WholeDNASchema()
results = schema.load(list_wholeDNA, many=True)
return results[0]
def setWholeDNA(self):
"""
set new wholeDNA
:return: new wholeDNA completed with the id
:rtype: WholeDNAJson
"""
schema = WholeDNASchema(only=['id_db_online','sequence_DNA','fasta_head','organism'])
jsonWholeDNA = schema.dump(self)
resultsCreation = WholeDNAAPI().set_wholeDNA(jsonData = jsonWholeDNA.data)
schema = WholeDNASchema()
results = schema.load(resultsCreation)
return results[0]
def getByOrganismID(organism_id:int):
"""
get the whole_dna of a given organism
:param organism_id: organism ID
:type organism_id: int
:return: the whole_dna of the given organism id
:rtype: WholeDna
"""
resultsJson = WholeDNAAPI().getByOrganismID(organism_id = organism_id)
schema = WholeDNASchema()
results = schema.load(resultsJson, many=False)
return results[0] | en | 0.712119 | This class map the json into the object WholeDNA ..note:: see marshmallow API This class manage the object and is used to map them into json format Initialization of the class :param id: name of the function :param id_db_online: id of the database where the dna comes :param sequence_DNA: DNA sequence :param fasta_head: head of the fasta from where the DNA was extracted :param organism: id of the organism :type id: int :type id_db_online: string :type sequence_DNA: string :type fasta_head: string :type organism: int override the Str function get all the WholeDNAs on the database :return: list of WholeDNAs :rtype: vector[WholeDNAJson] set new wholeDNA :return: new wholeDNA completed with the id :rtype: WholeDNAJson get the whole_dna of a given organism :param organism_id: organism ID :type organism_id: int :return: the whole_dna of the given organism id :rtype: WholeDna | 3.096987 | 3 |
lookup.py | lambaradan/PyWORDS | 11 | 6630655 | # Main methods for looking up words from the dictionary
import PYWORDS.definitions as definitions
from PYWORDS.matchfilter import MatchFilter
import re
import os
import bisect
dictline = []
stems1 = []
stems2 = []
stems3 = []
stems4 = []
def load_dictionary():
f = open(os.path.join(os.path.dirname(os.path.abspath(__file__)),'data/DICTLINE.GEN'))
orig_dictline = f.readlines()
f.close()
for l in orig_dictline:
dictline.append( {'stem1':l[0:19].strip(),
'stem2':l[19:38].strip(),
'stem3':l[38:57].strip(),
'stem4':l[57:76].strip(),
'entry':definitions.build_dictline_entry(l[76:].strip())})
# Get sorted stems with original indices
# enumerate provides iterable with (idx,element) tuples
# sorted key uses element (e[1]) as sort parameter
# sorted returns a list of tuples (idx,element), and then all tuples are flipped
# to give (element,idx)
global stems1,stems2,stems3,stems4
stems1 = sorted(enumerate([d['stem1'] for d in dictline],start=0),key=lambda e:e[1])
stems1 = [(s[1],s[0]) for s in stems1] # Flip elements for comparison later
stems2 = sorted(enumerate([d['stem2'] for d in dictline],start=0),key=lambda e:e[1])
stems2 = [(s[1],s[0]) for s in stems2] # Flip elements for comparison later
stems3 = sorted(enumerate([d['stem3'] for d in dictline],start=0),key=lambda e:e[1])
stems3 = [(s[1],s[0]) for s in stems3] # Flip elements for comparison later
stems4 = sorted(enumerate([d['stem4'] for d in dictline],start=0),key=lambda e:e[1])
stems4 = [(s[1],s[0]) for s in stems4] # Flip elements for comparison later
orig_dictline = None # Clean up
# Returns a dictionary of stem : ending pairs by starting with no ending and working backwards
# If skip_zero==True, assume there is an ending and start with 1 letter instead of ending=''
def find_endings(w,skip_zero=False):
endings = {}
if skip_zero:
for i in range(len(w)-1,0,-1):
wsub = w[i:]
if wsub in definitions.endings_list:
endings[w[:i]] = wsub
else:
for i in range(len(w),0,-1):
wsub = w[i:]
if wsub in definitions.endings_list:
endings[w[:i]] = wsub
return endings
def _simple_match(w):
'''
Core word match method. Tries all stem/ending combinations that are valid and searches
for the stem in the dictionary. Finally, checks that ending is a valid ending given the
dictline entry (declension, conjugation, variant, etc).
Return a list of matched words in the format [stem, ending, dictline entry]
'''
matches = []
endings = find_endings(w)
for stem,e in endings.items():
match_ids = []
idx1_s = bisect.bisect(stems1,(stem,0)) # First entry match
if idx1_s != len(stems1) and stems1[idx1_s][0] == stem: # if actual match
idx1_e = idx1_s # find end index, last element that is a true match
while stems1[idx1_e][0] == stem and idx1_e+1 < len(stems1):
idx1_e += 1
# stems1[idx1_e-1] is now the last matching stem entry
for i in range(idx1_s,idx1_e):
if stems1[i][1] not in match_ids:
match_ids.append(stems1[i][1]) # append original indices
idx2_s = bisect.bisect(stems2,(stem,0)) # First entry match
if idx2_s != len(stems2) and stems2[idx2_s][0] == stem: # if actual match
idx2_e = idx2_s # find end index, last element that is a true match
while stems2[idx2_e][0] == stem and idx2_e+1 < len(stems1):
idx2_e += 1
# stems2[idx2_e-1] is now the last matching stem entry
for i in range(idx2_s,idx2_e):
if stems2[i][1] not in match_ids:
match_ids.append(stems2[i][1]) # append original indices
idx3_s = bisect.bisect(stems3,(stem,0)) # First entry match
if idx3_s != len(stems3) and stems3[idx3_s][0] == stem: # if actual match
idx3_e = idx3_s # find end index, last element that is a true match
while stems3[idx3_e][0] == stem and idx3_e+1 < len(stems1):
idx3_e += 1
# stems3[idx3_e-1] is now the last matching stem entry
for i in range(idx3_s,idx3_e):
if stems3[i][1] not in match_ids:
match_ids.append(stems3[i][1]) # append original indices
idx4_s = bisect.bisect(stems4,(stem,0)) # First entry match
if idx4_s != len(stems4) and stems4[idx4_s][0] == stem: # if actual match
idx4_e = idx4_s # find end index, last element that is a true match
while stems4[idx4_e][0] == stem and idx4_e+1 < len(stems1):
idx4_e += 1
# stems1[idx4_e-1] is now the last matching stem entry
for i in range(idx4_s,idx4_e):
if stems4[i][1] not in match_ids:
match_ids.append(stems4[i][1]) # append original indices
if match_ids:
entries = [dictline[idx] for idx in match_ids]
for entr in entries:
matches.append([stem,e,entr])
matches = [match for match in matches if is_possible_ending(match)]
return matches
def _remove_enclitics(w):
if w[-3:] == 'que':
w = w[:len(w)-3] # Remove the 'que'
elif w[-2:] == 'ne':
w = w[:len(w)-2] # Remove the 'ne'
elif w[-2:] == 've':
w = w[:len(w)-2] # Remove the 've'
return w
def _fix_i_j(w):
'''
Fix 'i' and 'j' problems
Rules:
From: https://latin.stackexchange.com/a/1310/8704
'V' = vowel, 'C' = consonant
i+V => j+V, e.g. iubeo (in most cases)
i+V => i+V
only in some forms of the pronoun is (ii, iis) and the verb ire (iens, ii, ieram);
also in Greek loans (iambus, iaspis, iota, Io, Iones etc.)
In compounds and prefixed verbs
C+i => C+j adiacet
V+i => V+j seiungo
V+i+V => V+i+V only in very few examples
In Greek names: Achaia, Laius, Naiades, Troius, Acheloius, Minoius; Pleiades, etc.;
In some adjectives: -uus, -uis followed by the comparative suffix –ior (strenuior, tenuior)
In some nouns
The assumption is that 'i' is used where 'j' should be, so the i => i cases are covered.
Only the i+V => j+V case, and the compound and prefixed verb i => j cases are needed.
'''
# The dictionary tends to follow the OLD and L+S for replacing 'i' with 'j'. A common
# example is 'iuvo','adiuvo' (to help) which is written 'juvo','adjuvo' in L+S and others.
# The 'j' is the consonant 'i' sound, while 'i' is the vowel 'i' sound. This distinction
# is often made but finding hard and fast rules for it can be challenging.
# Examples:
# adjecto, ajuga, dijudico, cujus, hujus, altijugus,
# baijulus, circumjiceo, objrasco (very rare to have C+j+C),
# objicio, quadrijugus
# deistatus, deitas, dejectus
# The obvious solution for the i/j and u/v problems is to simply convert everything to
# i and u in both the word-to-be-matched and the dictionary. But then the printed
# dictionary entry won't be correct. An ideal solution would treat i and j (and u and v)
# as completely interchangeable, but only for searching purposes.
# Another choice is to have two dictionaries loaded, one with i/j and u/v and the other
# using only i and only u. Search one, use the index to get the other. But this increases
# the memory usage significantly, the dictionary is -big-.
# We'll settle for just trying to fix i's and j's based on conventions.
# First find all 'i' appearances
if w.find('i') == -1:
return w
idxs = [w.find('i')] # Initialize
while w.find('i',idxs[-1]+1) != -1:
idxs.append(w.find('i',idxs[-1]+1))
# Now replace any that are at index 0, or which are next to a vowel
vowels=['a','e','i','o','u']
for i in idxs:
if i == 0:
w = 'j'+w[1:]
elif i < len(w)-2 and i != 0: # If this isn't the first or one of the last two letters
if w[:i] in definitions.prefixes:
w = w[0:i]+'j'+w[i+1:]
elif w[i+1] in vowels and w[i-1] in vowels:
w = w[0:i]+'j'+w[i+1:]
return w
def _fix_u_v(w):
'''
Fix 'u' and 'v' problems
Examples:
-ivi (ending)
-ave (ending)
abavus
alvus, alvi
alveus
circumvincio
divum
vir
vitare
uxorculo
uvidulus
vacca
ubi
ulcus
'''
# It's easier to first convert all 'v's to 'u's, then to find any common cases
# where 'u' could become 'v'
return w
def match_word(w):
'''
Try to match a word, with basic tricks. If use_tricks is used, more in depth matching
methods are used.
'''
finished=False
fixed_i_j = False
fixed_u_v = False
removed_encls = False
while not finished:
matches = _simple_match(w)
if len(matches)>0:
finished = True
elif not removed_encls:
w = _remove_enclitics(w)
removed_encls = True
elif not fixed_i_j: # If we haven't tried interchanging i and j yet
w = _fix_i_j(w)
fixed_i_j = True
elif not fixed_u_v:
w = _fix_u_v(w)
fixed_u_v = True
else: # Search failed
finished = True
return matches
# TODO
def print_noun_declensions(m):
'''Print the declensions of a noun
m must be in the format [stem,ending,dictline] (same as a match)
'''
dictline = m[2]
entry = dictline['entry']
stem1 = dictline['stem1']
stem2 = dictline['stem2']
def get_dictionary_string(m, full_info=False, header_only=False, markdown_fmt=False):
'''
Convert m into a string in dictionary style
m must be in the format [stem, ending, dictline] (same as a match)
If full_info is True, all available information is given.
If header_only, only the word header is given (no senses)
If markdown_fmt, place headwords in bold (**ex**), part of speech in italics (*ex*)
TODO This whole thing could be rewritten to be less hacky
'''
dictstr = ''
dictline = m[2]
entry = dictline['entry']
# For reference:
#stem1 = dictline['stem1']
#stem2 = dictline['stem2']
#stem3 = dictline['stem2']
#stem4 = dictline['stem2']
if entry.pos == 'N':
infl_filt = MatchFilter(ages=['X'],frequencies=['X','A'],variants=[entry.variant,'0'])
ninfl = definitions.NounInfl(decl=entry.decl,number='S')
matches = [n for n in definitions.inflections[entry.pos] if ninfl.matches(n)]
matches = [ma for ma in matches if infl_filt.check_inflection(ma,'N')]
gender_s = ''
if matches:
end1='' # sg nom
stem1=''
end2='' # sg gen
stem2=''
for ma in matches:
if ma.case == 'NOM' and not stem1:
end1=ma.ending
stem1=ma.stem
elif ma.case == 'GEN' and not stem2:
end2=ma.ending
stem2=ma.stem
if not stem1 and not stem2:
for ma in matches:
if ma.case == 'X':
end1 = ''
stem1 = '1'
# Set gender string
if not header_only:
if entry.gender == 'C':
gender_s = 'masc/fem'
elif entry.gender == 'N':
gender_s = 'neut'
elif entry.gender == 'M':
gender_s = 'masc'
elif entry.gender == 'F':
gender_s = 'fem'
nom_stem = dictline['stem'+stem1]
if stem2:
gen_stem = dictline['stem'+stem2]
if markdown_fmt:
dictstr += '**'
dictstr += nom_stem+end1+', '+gen_stem+end2
if markdown_fmt:
dictstr += '** *'
else:
dictstr += ' '
dictstr += gender_s
if markdown_fmt:
dictstr += '*'
dictstr += ' '
else:
if markdown_fmt:
dictstr += '**'
dictstr += nom_stem+end1
if markdown_fmt:
dictstr += '** *'
else:
dictstr += ' '
dictstr += gender_s
if markdown_fmt:
dictstr += '*'
dictstr += ' '
if full_info:
# add age, area, geography, frequency
dictstr += '('+definitions.dict_frequencies[entry.freq]+', '+\
definitions.ages[entry.age]+'. '
if entry.area != 'X':
dictstr += definitions.areas[entry.area]+'. '
if entry.geog != 'X':
dictstr += definitions.geographies[entry.geog]+'). '
else:
dictstr = dictstr.strip(' .')+'). ' # Avoid awkward spaces
dictstr += 'Source: '+definitions.source_types[entry.src]+'. '
if not header_only:
dictstr += ''.join(entry.senses)
if entry.pos == 'V':
# ex. singular indicative present active 1st person
#V 1 1 PRES ACTIVE IND 1 S 1 1 o X A
# ex. infinitive
#V 1 1 PRES ACTIVE INF 0 X 2 3 are X A
# TODO Fix semi-deponent, defective, and impersonal verbs
if entry.verb_kind == 'DEP':
vinfl = definitions.VerbInfl(conj=entry.conj,tense='PRES',voice='PASSIVE')
elif entry.verb_kind == 'SEMIDEP':
vinfl = definitions.VerbInfl(conj=entry.conj,tense='PRES',voice='ACTIVE')
else:
vinfl = definitions.VerbInfl(conj=entry.conj,tense='PRES',voice='ACTIVE')
if entry.verb_kind == 'IMPERS':
infl_filt = MatchFilter(ages=['X'],frequencies=['X','A'],variants=[entry.variant,'0'],
persons=['0','3'],moods=['IND','INF'])
else:
infl_filt = MatchFilter(ages=['X'],frequencies=['X','A'],variants=[entry.variant,'0'],
persons=['0','1'],moods=['IND','INF'])
matches = [v for v in definitions.inflections[entry.pos] if vinfl.matches(v)]
matches = [ma for ma in matches if infl_filt.check_inflection(ma,'V')]
end1='' # sg ind pres active 1st person
stem1=''
end2='' # pres active infinitive
stem2=''
for ma in matches:
if entry.verb_kind == 'IMPERS':
if ma.person == '3' and ma.mood == 'IND' and not end1:
end1 = ma.ending
stem1=ma.stem
elif ma.mood == 'INF' and not end2:
end2 = ma.ending
stem2 = ma.stem
else:
if ma.person == '1' and ma.mood == 'IND' and not end1:
end1 = ma.ending
stem1=ma.stem
elif ma.mood == 'INF' and not end2:
end2 = ma.ending
stem2 = ma.stem
if stem1 and stem2:
if markdown_fmt:
dictstr += '**'
dictstr += dictline['stem'+stem1]+end1
dictstr += ', '
dictstr += dictline['stem'+stem2]+end2
if dictline['stem3'] != 'zzz':
dictstr += ', '
if entry.verb_kind == 'IMPERS':
dictstr += dictline['stem3']+'it'
else:
dictstr += dictline['stem3']+'i'
if dictline['stem4'] != 'zzz':
dictstr += ', '
if entry.verb_kind == 'IMPERS':
dictstr += dictline['stem4']+'um est'
else:
dictstr += dictline['stem4']+'us'
if entry.verb_kind in ['DEP','SEMIDEP']:
dictstr += ' sum'
if markdown_fmt:
dictstr += '**'
dictstr += ' '
else:
if markdown_fmt:
dictstr += '**'
dictstr += m[0]+m[1]
if markdown_fmt:
dictstr += '**'
dictstr += ' '
if not header_only:
if entry.conj in ['1','2']:
dictstr += '['+entry.conj+'] '
elif entry.conj in ['3','8']:
if entry.variant in ['0','1']:
dictstr += '[3] '
elif entry.variant in ['2','3']:
dictstr += '[irreg] '
elif entry.variant == '4':
dictstr += '[4] '
elif entry.conj == '7':
if entry.variant in ['1','3']:
dictstr += '[3] '
else:
dictstr += '[irreg] '
elif entry.conj in ['5','6']:
dictstr += '[irreg] ' # Irregular
# else Abbreviations, indeclinable, etc can be skipped
if entry.verb_kind == 'TRANS':
if markdown_fmt:
dictstr += '*'
dictstr += 'vt'
if markdown_fmt:
dictstr += '*'
dictstr += ' '
elif entry.verb_kind == 'INTRANS':
if markdown_fmt:
dictstr += '*'
dictstr += 'vi'
if markdown_fmt:
dictstr += '*'
dictstr += ' '
elif entry.verb_kind == 'SEMIDEP':
if markdown_fmt:
dictstr += '*'
dictstr += 'v semidep'
if markdown_fmt:
dictstr += '*'
dictstr += ' '
elif entry.verb_kind == 'DEP':
if markdown_fmt:
dictstr += '*'
dictstr += 'v dep'
if markdown_fmt:
dictstr += '*'
dictstr += ' '
elif entry.verb_kind == 'IMPERS':
if markdown_fmt:
dictstr += '*'
dictstr += 'impers v'
if markdown_fmt:
dictstr += '*'
dictstr += ' '
elif entry.verb_kind == 'PERFDEF':
if markdown_fmt:
dictstr += '*'
dictstr += 'perf def v'
if markdown_fmt:
dictstr += '*'
dictstr += ' '
elif entry.verb_kind == 'GEN':
if markdown_fmt:
dictstr += '*'
dictstr += '(w/ gen)'
if markdown_fmt:
dictstr += '*'
dictstr += ' '
elif entry.verb_kind == 'DAT':
if markdown_fmt:
dictstr += '*'
dictstr += '(w/ dat)'
if markdown_fmt:
dictstr += '*'
dictstr += ' '
elif entry.verb_kind == 'ABL':
if markdown_fmt:
dictstr += '*'
dictstr += '(w/ abl)'
if markdown_fmt:
dictstr += '*'
dictstr += ' '
else:
if markdown_fmt:
dictstr += '*'
dictstr += 'vt'
if markdown_fmt:
dictstr += '*'
dictstr += ' '
if full_info:
# add age, area, geography, frequency
dictstr += '('+definitions.dict_frequencies[entry.freq]+', '+\
definitions.ages[entry.age]+'. '
if entry.area != 'X':
dictstr += definitions.areas[entry.area]+'. '
if entry.geog != 'X':
dictstr += definitions.geographies[entry.geog]+'). '
else:
dictstr = dictstr.strip(' .')+'). ' # Avoid awkward spaces
dictstr += 'Source: '+definitions.source_types[entry.src]+'. '
if not header_only:
dictstr += ''.join(entry.senses)
elif entry.pos == 'ADJ':
ainfl = definitions.AdjectiveInfl(decl=entry.decl,
number='S',case='NOM')
infl_filt = MatchFilter(ages=['X'],frequencies=['X','A'],variants=[entry.variant,'0'])
matches = [a for a in definitions.inflections[entry.pos] if ainfl.matches(a)]
matches = [ma for ma in matches if infl_filt.check_inflection(ma,'ADJ')]
end1='' # sg nom masc
stem1=''
end2='' # sg nom fem
stem2=''
end3='' # sg nom neut
stem3=''
for ma in matches:
if ma.gender == 'M' or ma.gender == 'X' or ma.gender == 'C' and not stem1:
end1 = ma.ending
stem1 = ma.stem
if ma.gender == 'F' or ma.gender == 'C' and not stem2:
end2 = ma.ending
stem2 = ma.stem
elif ma.gender == 'N' and not stem3:
end3 = ma.ending
stem3 = ma.stem
if stem1 and not stem2 and not stem3:
stem2 = stem1
end2 = end1
stem3 = stem1
end3 = end1
# For adjectives it's common for stems to be matching
if stem1 and stem2 and stem3:
stem1 = dictline['stem'+stem1]
stem2 = dictline['stem'+stem2]
stem3 = dictline['stem'+stem3]
if stem1 == stem2 and stem1 == stem3:
if markdown_fmt:
dictstr += '**'
dictstr += stem1+end1+' -'+end2+' -'+end3
if markdown_fmt:
dictstr += '**'
dictstr += ' '
elif stem1 == stem2:
if markdown_fmt:
dictstr += '**'
dictstr += stem1+end1+' -'+end3
if markdown_fmt:
dictstr += '**'
dictstr += ' '
else:
if markdown_fmt:
dictstr += '**'
dictstr += stem1+end1+' '+stem2+end2+' '+stem3+end3
if markdown_fmt:
dictstr += '**'
dictstr += ' '
else:
if markdown_fmt:
dictstr += '**'+m[0]+m[1]+'** '
else:
dictstr += m[0]+m[1]+' '
if not header_only:
if markdown_fmt:
dictstr += '*'
dictstr += 'adj'
if markdown_fmt:
dictstr += '*'
dictstr += ' '
dictstr += ''.join(entry.senses)
elif entry.pos == 'PRON':
infl_filt = MatchFilter(ages=['X'],frequencies=['X','A'],variants=[entry.variant,'0'])
pinfl = definitions.PronounInfl(decl=entry.decl,number='S')
matches = [p for p in definitions.inflections[entry.pos] if pinfl.matches(p)]
matches = [ma for ma in matches if infl_filt.check_inflection(ma,'PRON')]
if matches:
end1='' # sg nom
stem1=''
end2='' # sg gen
stem2=''
for ma in matches:
if ma.case == 'NOM' and not stem1:
end1=ma.ending
stem1=ma.stem
elif ma.case == 'GEN' and not stem2:
end2=ma.ending
stem2=ma.stem
if not stem1 and not stem2:
for ma in matches:
if ma.case == 'X':
end1 = ''
stem1 = '1'
if not stem1:
stem1='1'
nom_stem = dictline['stem'+stem1]
if stem2:
gen_stem = dictline['stem'+stem2]
dictstr = nom_stem+end1+', '+gen_stem+end2+' '
else:
dictstr = nom_stem+end1+' '
if full_info:
# add age, area, geography, frequency
dictstr += '('+definitions.dict_frequencies[entry.freq]+', '+\
definitions.ages[entry.age]+'. '
if entry.area != 'X':
dictstr += definitions.areas[entry.area]+'. '
if entry.geog != 'X':
dictstr += definitions.geographies[entry.geog]+'). '
else:
dictstr = dictstr.strip(' .')+'). ' # Avoid awkward spaces
dictstr += 'Source: '+definitions.source_types[entry.src]+'. '
if not header_only:
dictstr += ''.join(entry.senses)
elif entry.pos == 'CONJ':
if markdown_fmt:
dictstr = '**'+dictline['stem1'] + '** *conj* '
else:
dictstr = dictline['stem1'] + ' conj '
if not header_only:
dictstr += ''.join(entry.senses)
elif entry.pos == 'ADV':
if markdown_fmt:
dictstr = '**'+dictline['stem1']+'** *adv* '
else:
dictstr = dictline['stem1'] + ' adv '
if not header_only:
dictstr += ''.join(entry.senses)
elif entry.pos in ['PREP','PACK']:
if markdown_fmt:
dictstr = '**'+dictline['stem1'] + '** *prep* '
else:
dictstr = dictline['stem1'] + ' prep '
if not header_only:
dictstr += ''.join(entry.senses)
return dictstr.replace(' ',' ').strip(' ')
def is_possible_ending(match):
entry = match[2]['entry']
pos = entry.pos
if pos in ['PREP','PACK','TACKON','SUFFIX','PREFIX','X']:
return True # TODO
infl = None
if pos == 'V':
infl = definitions.build_inflection(part_of_speech=entry.pos,conj=entry.conj)
elif pos in ['N','ADJ','PRON','NUM']:
infl = definitions.build_inflection(part_of_speech=entry.pos,decl=entry.decl)
elif pos in ['ADV','PREP','CONJ','INTERJ']:
if match[1] != '':
return False
else:
return True
possible_endings = definitions.get_possible_endings(infl,entry.pos)
if match[1] in possible_endings:
return True
else:
return False
def get_word_inflections(match,less=False):
'''
Use a match (from match_word) to look up the possible inflections of a word. Returned as list of plain text
strings.
If less is False, information about the word is printed along with the inflection. If
less is True, only the inflection information and the word header are printed.
'''
entry = match[2]['entry']
infl_strings = []
head = get_dictionary_string(match,header_only=True)
pos = entry.pos
if pos in ['PREP','PACK','TACKON','SUFFIX','PREFIX','X']:
return []
infl = None
if pos == 'V':
infl = definitions.build_inflection(part_of_speech=entry.pos,conj=entry.conj,ending=match[1])
elif pos in ['N','ADJ','PRON','NUM']:
infl = definitions.build_inflection(part_of_speech=entry.pos,decl=entry.decl,ending=match[1])
elif pos in ['ADV','PREP','CONJ','INTERJ']:
return []
possible_infls = definitions.get_possible_inflections(infl,pos)
for minfl in possible_infls:
if less:
infl_strings.append(minfl.get_inflection_string(less=less)+'of '+head)
else:
infl_strings.append(minfl.get_inflection_string(less=less)+' '+head)
return infl_strings
def get_vocab_list(text,filt=MatchFilter(),full_info=False,markdown_fmt=False):
'''
Take an arbitrarily long string (newlines and all) and process each word,
then compile dictionary entries.
Return [definitions, missed words]
'''
tlist = re.split('[, \n!.\-:;?=+/\'\"^\\]\\[]',text)
tlist = [t.lower() for t in tlist if t and t.isalpha() and len(t) > 1]
tlist = list(set(tlist))
defns = set()
missed = set()
for w in tlist:
# Patch the 'sum' problem for now...
if w in definitions.irreg_sum:
defns.add('sum, esse, fui, futurus [irreg] to be, exist; (Medieval, in perfect tense) to go')
else:
ms = match_word(w)
if len(ms) == 0:
missed.add(w)
#filt.remove_substantives(ms)
wdefns = []
for m in ms:
if filt.check_dictline_word(m[2]['entry']):
wdefns.append(get_dictionary_string(m,full_info=full_info,markdown_fmt=markdown_fmt))
for wdefn in wdefns:
if wdefn != '':
defns.add(wdefn)
defns_sort = sorted(defns)
missed_sort = sorted(missed)
return (defns_sort,missed_sort)
def find_example_sentences(text,word,word_filt=MatchFilter(),infl_filt=MatchFilter()):
word_matches = match_word(word)
if not word_matches:
print("Word "+word+" doesn't seem to be in the dictionary. Check your spelling and try again.")
return None
alphabet = ['a','b','c','d','e','f','g','h','i','j','k','l','m','n','o','p','q','r','s','t','u','v','w','x','y','z']
word_matches = [match for match in word_matches if word_filt.check_dictline_word(match[2]['entry'])]
if len(word_matches) > 1:
print("Which word did you mean? ")
for i,match in enumerate(word_matches):
print(alphabet[i]+") "+get_dictionary_string(match))
chosen=False
while not chosen:
choice = input('WORD: ')
if choice in alphabet[:len(word_matches)]:
word_match = word_matches[alphabet.index(choice)]
chosen=True
else:
word_match = word_matches[0]
print("\nFinding example sentences of word: ",end='')
print(get_dictionary_string(word_match))
sentences = text.replace('\n',' ').split('.') # Try to roughly split into sentences
matched_sentences = []
for sentence in sentences:
tlist = re.split('[, \n!.\-:;?=+/\'\"^\\]\\[]',sentence)
tlist = [t.lower() for t in tlist if t and t.isalpha() and len(t) > 1]
for w in tlist:
ms = match_word(w)
for m in ms:
if m[2]['entry'] == word_match[2]['entry'] and sentence.strip()+'.' not in matched_sentences:
matched_sentences.append(sentence.strip()+'.')
print("Found %d sentences." % (len(matched_sentences)))
return matched_sentences
def find_filtered_sentences(text,sentence_filt=MatchFilter(),strict=False):
'''
Return a list of sentences for which all words pass through the match filter
If strict is True, all matching inflections for each word must pass through the filter
If False, at least one inflection must pass through
'''
sentences = text.replace('\n',' ').split('.') # Roughly split into sentences
matched_sentences = []
for sentence in sentences:
sentence_OK = True
tlist = re.split('[, \n!.\-:;?=+/\'\"^\\]\\[]',sentence)
tlist = [t.lower() for t in tlist if t and t.isalpha() and len(t) > 1]
for w in tlist:
ms = match_word(w)
for match in ms:
entry = match[2]['entry']
sentence_OK &= sentence_filt.check_dictline_word(entry)
### Checking inflection
pos = entry.pos
infl = None
if pos == 'V':
infl = definitions.build_inflection(part_of_speech=entry.pos,conj=entry.conj,ending=match[1])
elif pos in ['N','ADJ','PRON','NUM']:
infl = definitions.build_inflection(part_of_speech=entry.pos,decl=entry.decl,ending=match[1])
elif pos in ['PREP','PACK','TACKON','SUFFIX','PREFIX','X']:
infl = None
elif pos in ['ADV','PREP','CONJ','INTERJ']:
infl = None
if infl:
possible_infls = definitions.get_possible_inflections(infl,pos)
infls_OK = False
for minfl in possible_infls:
if strict:
infls_OK &= sentence_filt.check_inflection(minfl,pos)
else:
if sentence_filt.check_inflection(minfl,pos):
infls_OK = True
sentence_OK &= infls_OK
if not strict:
break
###
if sentence and sentence_OK:
matched_sentences.append(sentence)
print("Found %d sentences." % (len(matched_sentences)))
return matched_sentences
load_dictionary()
definitions.load_inflections()
| # Main methods for looking up words from the dictionary
import PYWORDS.definitions as definitions
from PYWORDS.matchfilter import MatchFilter
import re
import os
import bisect
dictline = []
stems1 = []
stems2 = []
stems3 = []
stems4 = []
def load_dictionary():
f = open(os.path.join(os.path.dirname(os.path.abspath(__file__)),'data/DICTLINE.GEN'))
orig_dictline = f.readlines()
f.close()
for l in orig_dictline:
dictline.append( {'stem1':l[0:19].strip(),
'stem2':l[19:38].strip(),
'stem3':l[38:57].strip(),
'stem4':l[57:76].strip(),
'entry':definitions.build_dictline_entry(l[76:].strip())})
# Get sorted stems with original indices
# enumerate provides iterable with (idx,element) tuples
# sorted key uses element (e[1]) as sort parameter
# sorted returns a list of tuples (idx,element), and then all tuples are flipped
# to give (element,idx)
global stems1,stems2,stems3,stems4
stems1 = sorted(enumerate([d['stem1'] for d in dictline],start=0),key=lambda e:e[1])
stems1 = [(s[1],s[0]) for s in stems1] # Flip elements for comparison later
stems2 = sorted(enumerate([d['stem2'] for d in dictline],start=0),key=lambda e:e[1])
stems2 = [(s[1],s[0]) for s in stems2] # Flip elements for comparison later
stems3 = sorted(enumerate([d['stem3'] for d in dictline],start=0),key=lambda e:e[1])
stems3 = [(s[1],s[0]) for s in stems3] # Flip elements for comparison later
stems4 = sorted(enumerate([d['stem4'] for d in dictline],start=0),key=lambda e:e[1])
stems4 = [(s[1],s[0]) for s in stems4] # Flip elements for comparison later
orig_dictline = None # Clean up
# Returns a dictionary of stem : ending pairs by starting with no ending and working backwards
# If skip_zero==True, assume there is an ending and start with 1 letter instead of ending=''
def find_endings(w,skip_zero=False):
endings = {}
if skip_zero:
for i in range(len(w)-1,0,-1):
wsub = w[i:]
if wsub in definitions.endings_list:
endings[w[:i]] = wsub
else:
for i in range(len(w),0,-1):
wsub = w[i:]
if wsub in definitions.endings_list:
endings[w[:i]] = wsub
return endings
def _simple_match(w):
'''
Core word match method. Tries all stem/ending combinations that are valid and searches
for the stem in the dictionary. Finally, checks that ending is a valid ending given the
dictline entry (declension, conjugation, variant, etc).
Return a list of matched words in the format [stem, ending, dictline entry]
'''
matches = []
endings = find_endings(w)
for stem,e in endings.items():
match_ids = []
idx1_s = bisect.bisect(stems1,(stem,0)) # First entry match
if idx1_s != len(stems1) and stems1[idx1_s][0] == stem: # if actual match
idx1_e = idx1_s # find end index, last element that is a true match
while stems1[idx1_e][0] == stem and idx1_e+1 < len(stems1):
idx1_e += 1
# stems1[idx1_e-1] is now the last matching stem entry
for i in range(idx1_s,idx1_e):
if stems1[i][1] not in match_ids:
match_ids.append(stems1[i][1]) # append original indices
idx2_s = bisect.bisect(stems2,(stem,0)) # First entry match
if idx2_s != len(stems2) and stems2[idx2_s][0] == stem: # if actual match
idx2_e = idx2_s # find end index, last element that is a true match
while stems2[idx2_e][0] == stem and idx2_e+1 < len(stems1):
idx2_e += 1
# stems2[idx2_e-1] is now the last matching stem entry
for i in range(idx2_s,idx2_e):
if stems2[i][1] not in match_ids:
match_ids.append(stems2[i][1]) # append original indices
idx3_s = bisect.bisect(stems3,(stem,0)) # First entry match
if idx3_s != len(stems3) and stems3[idx3_s][0] == stem: # if actual match
idx3_e = idx3_s # find end index, last element that is a true match
while stems3[idx3_e][0] == stem and idx3_e+1 < len(stems1):
idx3_e += 1
# stems3[idx3_e-1] is now the last matching stem entry
for i in range(idx3_s,idx3_e):
if stems3[i][1] not in match_ids:
match_ids.append(stems3[i][1]) # append original indices
idx4_s = bisect.bisect(stems4,(stem,0)) # First entry match
if idx4_s != len(stems4) and stems4[idx4_s][0] == stem: # if actual match
idx4_e = idx4_s # find end index, last element that is a true match
while stems4[idx4_e][0] == stem and idx4_e+1 < len(stems1):
idx4_e += 1
# stems1[idx4_e-1] is now the last matching stem entry
for i in range(idx4_s,idx4_e):
if stems4[i][1] not in match_ids:
match_ids.append(stems4[i][1]) # append original indices
if match_ids:
entries = [dictline[idx] for idx in match_ids]
for entr in entries:
matches.append([stem,e,entr])
matches = [match for match in matches if is_possible_ending(match)]
return matches
def _remove_enclitics(w):
if w[-3:] == 'que':
w = w[:len(w)-3] # Remove the 'que'
elif w[-2:] == 'ne':
w = w[:len(w)-2] # Remove the 'ne'
elif w[-2:] == 've':
w = w[:len(w)-2] # Remove the 've'
return w
def _fix_i_j(w):
'''
Fix 'i' and 'j' problems
Rules:
From: https://latin.stackexchange.com/a/1310/8704
'V' = vowel, 'C' = consonant
i+V => j+V, e.g. iubeo (in most cases)
i+V => i+V
only in some forms of the pronoun is (ii, iis) and the verb ire (iens, ii, ieram);
also in Greek loans (iambus, iaspis, iota, Io, Iones etc.)
In compounds and prefixed verbs
C+i => C+j adiacet
V+i => V+j seiungo
V+i+V => V+i+V only in very few examples
In Greek names: Achaia, Laius, Naiades, Troius, Acheloius, Minoius; Pleiades, etc.;
In some adjectives: -uus, -uis followed by the comparative suffix –ior (strenuior, tenuior)
In some nouns
The assumption is that 'i' is used where 'j' should be, so the i => i cases are covered.
Only the i+V => j+V case, and the compound and prefixed verb i => j cases are needed.
'''
# The dictionary tends to follow the OLD and L+S for replacing 'i' with 'j'. A common
# example is 'iuvo','adiuvo' (to help) which is written 'juvo','adjuvo' in L+S and others.
# The 'j' is the consonant 'i' sound, while 'i' is the vowel 'i' sound. This distinction
# is often made but finding hard and fast rules for it can be challenging.
# Examples:
# adjecto, ajuga, dijudico, cujus, hujus, altijugus,
# baijulus, circumjiceo, objrasco (very rare to have C+j+C),
# objicio, quadrijugus
# deistatus, deitas, dejectus
# The obvious solution for the i/j and u/v problems is to simply convert everything to
# i and u in both the word-to-be-matched and the dictionary. But then the printed
# dictionary entry won't be correct. An ideal solution would treat i and j (and u and v)
# as completely interchangeable, but only for searching purposes.
# Another choice is to have two dictionaries loaded, one with i/j and u/v and the other
# using only i and only u. Search one, use the index to get the other. But this increases
# the memory usage significantly, the dictionary is -big-.
# We'll settle for just trying to fix i's and j's based on conventions.
# First find all 'i' appearances
if w.find('i') == -1:
return w
idxs = [w.find('i')] # Initialize
while w.find('i',idxs[-1]+1) != -1:
idxs.append(w.find('i',idxs[-1]+1))
# Now replace any that are at index 0, or which are next to a vowel
vowels=['a','e','i','o','u']
for i in idxs:
if i == 0:
w = 'j'+w[1:]
elif i < len(w)-2 and i != 0: # If this isn't the first or one of the last two letters
if w[:i] in definitions.prefixes:
w = w[0:i]+'j'+w[i+1:]
elif w[i+1] in vowels and w[i-1] in vowels:
w = w[0:i]+'j'+w[i+1:]
return w
def _fix_u_v(w):
'''
Fix 'u' and 'v' problems
Examples:
-ivi (ending)
-ave (ending)
abavus
alvus, alvi
alveus
circumvincio
divum
vir
vitare
uxorculo
uvidulus
vacca
ubi
ulcus
'''
# It's easier to first convert all 'v's to 'u's, then to find any common cases
# where 'u' could become 'v'
return w
def match_word(w):
'''
Try to match a word, with basic tricks. If use_tricks is used, more in depth matching
methods are used.
'''
finished=False
fixed_i_j = False
fixed_u_v = False
removed_encls = False
while not finished:
matches = _simple_match(w)
if len(matches)>0:
finished = True
elif not removed_encls:
w = _remove_enclitics(w)
removed_encls = True
elif not fixed_i_j: # If we haven't tried interchanging i and j yet
w = _fix_i_j(w)
fixed_i_j = True
elif not fixed_u_v:
w = _fix_u_v(w)
fixed_u_v = True
else: # Search failed
finished = True
return matches
# TODO
def print_noun_declensions(m):
'''Print the declensions of a noun
m must be in the format [stem,ending,dictline] (same as a match)
'''
dictline = m[2]
entry = dictline['entry']
stem1 = dictline['stem1']
stem2 = dictline['stem2']
def get_dictionary_string(m, full_info=False, header_only=False, markdown_fmt=False):
'''
Convert m into a string in dictionary style
m must be in the format [stem, ending, dictline] (same as a match)
If full_info is True, all available information is given.
If header_only, only the word header is given (no senses)
If markdown_fmt, place headwords in bold (**ex**), part of speech in italics (*ex*)
TODO This whole thing could be rewritten to be less hacky
'''
dictstr = ''
dictline = m[2]
entry = dictline['entry']
# For reference:
#stem1 = dictline['stem1']
#stem2 = dictline['stem2']
#stem3 = dictline['stem2']
#stem4 = dictline['stem2']
if entry.pos == 'N':
infl_filt = MatchFilter(ages=['X'],frequencies=['X','A'],variants=[entry.variant,'0'])
ninfl = definitions.NounInfl(decl=entry.decl,number='S')
matches = [n for n in definitions.inflections[entry.pos] if ninfl.matches(n)]
matches = [ma for ma in matches if infl_filt.check_inflection(ma,'N')]
gender_s = ''
if matches:
end1='' # sg nom
stem1=''
end2='' # sg gen
stem2=''
for ma in matches:
if ma.case == 'NOM' and not stem1:
end1=ma.ending
stem1=ma.stem
elif ma.case == 'GEN' and not stem2:
end2=ma.ending
stem2=ma.stem
if not stem1 and not stem2:
for ma in matches:
if ma.case == 'X':
end1 = ''
stem1 = '1'
# Set gender string
if not header_only:
if entry.gender == 'C':
gender_s = 'masc/fem'
elif entry.gender == 'N':
gender_s = 'neut'
elif entry.gender == 'M':
gender_s = 'masc'
elif entry.gender == 'F':
gender_s = 'fem'
nom_stem = dictline['stem'+stem1]
if stem2:
gen_stem = dictline['stem'+stem2]
if markdown_fmt:
dictstr += '**'
dictstr += nom_stem+end1+', '+gen_stem+end2
if markdown_fmt:
dictstr += '** *'
else:
dictstr += ' '
dictstr += gender_s
if markdown_fmt:
dictstr += '*'
dictstr += ' '
else:
if markdown_fmt:
dictstr += '**'
dictstr += nom_stem+end1
if markdown_fmt:
dictstr += '** *'
else:
dictstr += ' '
dictstr += gender_s
if markdown_fmt:
dictstr += '*'
dictstr += ' '
if full_info:
# add age, area, geography, frequency
dictstr += '('+definitions.dict_frequencies[entry.freq]+', '+\
definitions.ages[entry.age]+'. '
if entry.area != 'X':
dictstr += definitions.areas[entry.area]+'. '
if entry.geog != 'X':
dictstr += definitions.geographies[entry.geog]+'). '
else:
dictstr = dictstr.strip(' .')+'). ' # Avoid awkward spaces
dictstr += 'Source: '+definitions.source_types[entry.src]+'. '
if not header_only:
dictstr += ''.join(entry.senses)
if entry.pos == 'V':
# ex. singular indicative present active 1st person
#V 1 1 PRES ACTIVE IND 1 S 1 1 o X A
# ex. infinitive
#V 1 1 PRES ACTIVE INF 0 X 2 3 are X A
# TODO Fix semi-deponent, defective, and impersonal verbs
if entry.verb_kind == 'DEP':
vinfl = definitions.VerbInfl(conj=entry.conj,tense='PRES',voice='PASSIVE')
elif entry.verb_kind == 'SEMIDEP':
vinfl = definitions.VerbInfl(conj=entry.conj,tense='PRES',voice='ACTIVE')
else:
vinfl = definitions.VerbInfl(conj=entry.conj,tense='PRES',voice='ACTIVE')
if entry.verb_kind == 'IMPERS':
infl_filt = MatchFilter(ages=['X'],frequencies=['X','A'],variants=[entry.variant,'0'],
persons=['0','3'],moods=['IND','INF'])
else:
infl_filt = MatchFilter(ages=['X'],frequencies=['X','A'],variants=[entry.variant,'0'],
persons=['0','1'],moods=['IND','INF'])
matches = [v for v in definitions.inflections[entry.pos] if vinfl.matches(v)]
matches = [ma for ma in matches if infl_filt.check_inflection(ma,'V')]
end1='' # sg ind pres active 1st person
stem1=''
end2='' # pres active infinitive
stem2=''
for ma in matches:
if entry.verb_kind == 'IMPERS':
if ma.person == '3' and ma.mood == 'IND' and not end1:
end1 = ma.ending
stem1=ma.stem
elif ma.mood == 'INF' and not end2:
end2 = ma.ending
stem2 = ma.stem
else:
if ma.person == '1' and ma.mood == 'IND' and not end1:
end1 = ma.ending
stem1=ma.stem
elif ma.mood == 'INF' and not end2:
end2 = ma.ending
stem2 = ma.stem
if stem1 and stem2:
if markdown_fmt:
dictstr += '**'
dictstr += dictline['stem'+stem1]+end1
dictstr += ', '
dictstr += dictline['stem'+stem2]+end2
if dictline['stem3'] != 'zzz':
dictstr += ', '
if entry.verb_kind == 'IMPERS':
dictstr += dictline['stem3']+'it'
else:
dictstr += dictline['stem3']+'i'
if dictline['stem4'] != 'zzz':
dictstr += ', '
if entry.verb_kind == 'IMPERS':
dictstr += dictline['stem4']+'um est'
else:
dictstr += dictline['stem4']+'us'
if entry.verb_kind in ['DEP','SEMIDEP']:
dictstr += ' sum'
if markdown_fmt:
dictstr += '**'
dictstr += ' '
else:
if markdown_fmt:
dictstr += '**'
dictstr += m[0]+m[1]
if markdown_fmt:
dictstr += '**'
dictstr += ' '
if not header_only:
if entry.conj in ['1','2']:
dictstr += '['+entry.conj+'] '
elif entry.conj in ['3','8']:
if entry.variant in ['0','1']:
dictstr += '[3] '
elif entry.variant in ['2','3']:
dictstr += '[irreg] '
elif entry.variant == '4':
dictstr += '[4] '
elif entry.conj == '7':
if entry.variant in ['1','3']:
dictstr += '[3] '
else:
dictstr += '[irreg] '
elif entry.conj in ['5','6']:
dictstr += '[irreg] ' # Irregular
# else Abbreviations, indeclinable, etc can be skipped
if entry.verb_kind == 'TRANS':
if markdown_fmt:
dictstr += '*'
dictstr += 'vt'
if markdown_fmt:
dictstr += '*'
dictstr += ' '
elif entry.verb_kind == 'INTRANS':
if markdown_fmt:
dictstr += '*'
dictstr += 'vi'
if markdown_fmt:
dictstr += '*'
dictstr += ' '
elif entry.verb_kind == 'SEMIDEP':
if markdown_fmt:
dictstr += '*'
dictstr += 'v semidep'
if markdown_fmt:
dictstr += '*'
dictstr += ' '
elif entry.verb_kind == 'DEP':
if markdown_fmt:
dictstr += '*'
dictstr += 'v dep'
if markdown_fmt:
dictstr += '*'
dictstr += ' '
elif entry.verb_kind == 'IMPERS':
if markdown_fmt:
dictstr += '*'
dictstr += 'impers v'
if markdown_fmt:
dictstr += '*'
dictstr += ' '
elif entry.verb_kind == 'PERFDEF':
if markdown_fmt:
dictstr += '*'
dictstr += 'perf def v'
if markdown_fmt:
dictstr += '*'
dictstr += ' '
elif entry.verb_kind == 'GEN':
if markdown_fmt:
dictstr += '*'
dictstr += '(w/ gen)'
if markdown_fmt:
dictstr += '*'
dictstr += ' '
elif entry.verb_kind == 'DAT':
if markdown_fmt:
dictstr += '*'
dictstr += '(w/ dat)'
if markdown_fmt:
dictstr += '*'
dictstr += ' '
elif entry.verb_kind == 'ABL':
if markdown_fmt:
dictstr += '*'
dictstr += '(w/ abl)'
if markdown_fmt:
dictstr += '*'
dictstr += ' '
else:
if markdown_fmt:
dictstr += '*'
dictstr += 'vt'
if markdown_fmt:
dictstr += '*'
dictstr += ' '
if full_info:
# add age, area, geography, frequency
dictstr += '('+definitions.dict_frequencies[entry.freq]+', '+\
definitions.ages[entry.age]+'. '
if entry.area != 'X':
dictstr += definitions.areas[entry.area]+'. '
if entry.geog != 'X':
dictstr += definitions.geographies[entry.geog]+'). '
else:
dictstr = dictstr.strip(' .')+'). ' # Avoid awkward spaces
dictstr += 'Source: '+definitions.source_types[entry.src]+'. '
if not header_only:
dictstr += ''.join(entry.senses)
elif entry.pos == 'ADJ':
ainfl = definitions.AdjectiveInfl(decl=entry.decl,
number='S',case='NOM')
infl_filt = MatchFilter(ages=['X'],frequencies=['X','A'],variants=[entry.variant,'0'])
matches = [a for a in definitions.inflections[entry.pos] if ainfl.matches(a)]
matches = [ma for ma in matches if infl_filt.check_inflection(ma,'ADJ')]
end1='' # sg nom masc
stem1=''
end2='' # sg nom fem
stem2=''
end3='' # sg nom neut
stem3=''
for ma in matches:
if ma.gender == 'M' or ma.gender == 'X' or ma.gender == 'C' and not stem1:
end1 = ma.ending
stem1 = ma.stem
if ma.gender == 'F' or ma.gender == 'C' and not stem2:
end2 = ma.ending
stem2 = ma.stem
elif ma.gender == 'N' and not stem3:
end3 = ma.ending
stem3 = ma.stem
if stem1 and not stem2 and not stem3:
stem2 = stem1
end2 = end1
stem3 = stem1
end3 = end1
# For adjectives it's common for stems to be matching
if stem1 and stem2 and stem3:
stem1 = dictline['stem'+stem1]
stem2 = dictline['stem'+stem2]
stem3 = dictline['stem'+stem3]
if stem1 == stem2 and stem1 == stem3:
if markdown_fmt:
dictstr += '**'
dictstr += stem1+end1+' -'+end2+' -'+end3
if markdown_fmt:
dictstr += '**'
dictstr += ' '
elif stem1 == stem2:
if markdown_fmt:
dictstr += '**'
dictstr += stem1+end1+' -'+end3
if markdown_fmt:
dictstr += '**'
dictstr += ' '
else:
if markdown_fmt:
dictstr += '**'
dictstr += stem1+end1+' '+stem2+end2+' '+stem3+end3
if markdown_fmt:
dictstr += '**'
dictstr += ' '
else:
if markdown_fmt:
dictstr += '**'+m[0]+m[1]+'** '
else:
dictstr += m[0]+m[1]+' '
if not header_only:
if markdown_fmt:
dictstr += '*'
dictstr += 'adj'
if markdown_fmt:
dictstr += '*'
dictstr += ' '
dictstr += ''.join(entry.senses)
elif entry.pos == 'PRON':
infl_filt = MatchFilter(ages=['X'],frequencies=['X','A'],variants=[entry.variant,'0'])
pinfl = definitions.PronounInfl(decl=entry.decl,number='S')
matches = [p for p in definitions.inflections[entry.pos] if pinfl.matches(p)]
matches = [ma for ma in matches if infl_filt.check_inflection(ma,'PRON')]
if matches:
end1='' # sg nom
stem1=''
end2='' # sg gen
stem2=''
for ma in matches:
if ma.case == 'NOM' and not stem1:
end1=ma.ending
stem1=ma.stem
elif ma.case == 'GEN' and not stem2:
end2=ma.ending
stem2=ma.stem
if not stem1 and not stem2:
for ma in matches:
if ma.case == 'X':
end1 = ''
stem1 = '1'
if not stem1:
stem1='1'
nom_stem = dictline['stem'+stem1]
if stem2:
gen_stem = dictline['stem'+stem2]
dictstr = nom_stem+end1+', '+gen_stem+end2+' '
else:
dictstr = nom_stem+end1+' '
if full_info:
# add age, area, geography, frequency
dictstr += '('+definitions.dict_frequencies[entry.freq]+', '+\
definitions.ages[entry.age]+'. '
if entry.area != 'X':
dictstr += definitions.areas[entry.area]+'. '
if entry.geog != 'X':
dictstr += definitions.geographies[entry.geog]+'). '
else:
dictstr = dictstr.strip(' .')+'). ' # Avoid awkward spaces
dictstr += 'Source: '+definitions.source_types[entry.src]+'. '
if not header_only:
dictstr += ''.join(entry.senses)
elif entry.pos == 'CONJ':
if markdown_fmt:
dictstr = '**'+dictline['stem1'] + '** *conj* '
else:
dictstr = dictline['stem1'] + ' conj '
if not header_only:
dictstr += ''.join(entry.senses)
elif entry.pos == 'ADV':
if markdown_fmt:
dictstr = '**'+dictline['stem1']+'** *adv* '
else:
dictstr = dictline['stem1'] + ' adv '
if not header_only:
dictstr += ''.join(entry.senses)
elif entry.pos in ['PREP','PACK']:
if markdown_fmt:
dictstr = '**'+dictline['stem1'] + '** *prep* '
else:
dictstr = dictline['stem1'] + ' prep '
if not header_only:
dictstr += ''.join(entry.senses)
return dictstr.replace(' ',' ').strip(' ')
def is_possible_ending(match):
entry = match[2]['entry']
pos = entry.pos
if pos in ['PREP','PACK','TACKON','SUFFIX','PREFIX','X']:
return True # TODO
infl = None
if pos == 'V':
infl = definitions.build_inflection(part_of_speech=entry.pos,conj=entry.conj)
elif pos in ['N','ADJ','PRON','NUM']:
infl = definitions.build_inflection(part_of_speech=entry.pos,decl=entry.decl)
elif pos in ['ADV','PREP','CONJ','INTERJ']:
if match[1] != '':
return False
else:
return True
possible_endings = definitions.get_possible_endings(infl,entry.pos)
if match[1] in possible_endings:
return True
else:
return False
def get_word_inflections(match,less=False):
'''
Use a match (from match_word) to look up the possible inflections of a word. Returned as list of plain text
strings.
If less is False, information about the word is printed along with the inflection. If
less is True, only the inflection information and the word header are printed.
'''
entry = match[2]['entry']
infl_strings = []
head = get_dictionary_string(match,header_only=True)
pos = entry.pos
if pos in ['PREP','PACK','TACKON','SUFFIX','PREFIX','X']:
return []
infl = None
if pos == 'V':
infl = definitions.build_inflection(part_of_speech=entry.pos,conj=entry.conj,ending=match[1])
elif pos in ['N','ADJ','PRON','NUM']:
infl = definitions.build_inflection(part_of_speech=entry.pos,decl=entry.decl,ending=match[1])
elif pos in ['ADV','PREP','CONJ','INTERJ']:
return []
possible_infls = definitions.get_possible_inflections(infl,pos)
for minfl in possible_infls:
if less:
infl_strings.append(minfl.get_inflection_string(less=less)+'of '+head)
else:
infl_strings.append(minfl.get_inflection_string(less=less)+' '+head)
return infl_strings
def get_vocab_list(text,filt=MatchFilter(),full_info=False,markdown_fmt=False):
'''
Take an arbitrarily long string (newlines and all) and process each word,
then compile dictionary entries.
Return [definitions, missed words]
'''
tlist = re.split('[, \n!.\-:;?=+/\'\"^\\]\\[]',text)
tlist = [t.lower() for t in tlist if t and t.isalpha() and len(t) > 1]
tlist = list(set(tlist))
defns = set()
missed = set()
for w in tlist:
# Patch the 'sum' problem for now...
if w in definitions.irreg_sum:
defns.add('sum, esse, fui, futurus [irreg] to be, exist; (Medieval, in perfect tense) to go')
else:
ms = match_word(w)
if len(ms) == 0:
missed.add(w)
#filt.remove_substantives(ms)
wdefns = []
for m in ms:
if filt.check_dictline_word(m[2]['entry']):
wdefns.append(get_dictionary_string(m,full_info=full_info,markdown_fmt=markdown_fmt))
for wdefn in wdefns:
if wdefn != '':
defns.add(wdefn)
defns_sort = sorted(defns)
missed_sort = sorted(missed)
return (defns_sort,missed_sort)
def find_example_sentences(text,word,word_filt=MatchFilter(),infl_filt=MatchFilter()):
word_matches = match_word(word)
if not word_matches:
print("Word "+word+" doesn't seem to be in the dictionary. Check your spelling and try again.")
return None
alphabet = ['a','b','c','d','e','f','g','h','i','j','k','l','m','n','o','p','q','r','s','t','u','v','w','x','y','z']
word_matches = [match for match in word_matches if word_filt.check_dictline_word(match[2]['entry'])]
if len(word_matches) > 1:
print("Which word did you mean? ")
for i,match in enumerate(word_matches):
print(alphabet[i]+") "+get_dictionary_string(match))
chosen=False
while not chosen:
choice = input('WORD: ')
if choice in alphabet[:len(word_matches)]:
word_match = word_matches[alphabet.index(choice)]
chosen=True
else:
word_match = word_matches[0]
print("\nFinding example sentences of word: ",end='')
print(get_dictionary_string(word_match))
sentences = text.replace('\n',' ').split('.') # Try to roughly split into sentences
matched_sentences = []
for sentence in sentences:
tlist = re.split('[, \n!.\-:;?=+/\'\"^\\]\\[]',sentence)
tlist = [t.lower() for t in tlist if t and t.isalpha() and len(t) > 1]
for w in tlist:
ms = match_word(w)
for m in ms:
if m[2]['entry'] == word_match[2]['entry'] and sentence.strip()+'.' not in matched_sentences:
matched_sentences.append(sentence.strip()+'.')
print("Found %d sentences." % (len(matched_sentences)))
return matched_sentences
def find_filtered_sentences(text,sentence_filt=MatchFilter(),strict=False):
'''
Return a list of sentences for which all words pass through the match filter
If strict is True, all matching inflections for each word must pass through the filter
If False, at least one inflection must pass through
'''
sentences = text.replace('\n',' ').split('.') # Roughly split into sentences
matched_sentences = []
for sentence in sentences:
sentence_OK = True
tlist = re.split('[, \n!.\-:;?=+/\'\"^\\]\\[]',sentence)
tlist = [t.lower() for t in tlist if t and t.isalpha() and len(t) > 1]
for w in tlist:
ms = match_word(w)
for match in ms:
entry = match[2]['entry']
sentence_OK &= sentence_filt.check_dictline_word(entry)
### Checking inflection
pos = entry.pos
infl = None
if pos == 'V':
infl = definitions.build_inflection(part_of_speech=entry.pos,conj=entry.conj,ending=match[1])
elif pos in ['N','ADJ','PRON','NUM']:
infl = definitions.build_inflection(part_of_speech=entry.pos,decl=entry.decl,ending=match[1])
elif pos in ['PREP','PACK','TACKON','SUFFIX','PREFIX','X']:
infl = None
elif pos in ['ADV','PREP','CONJ','INTERJ']:
infl = None
if infl:
possible_infls = definitions.get_possible_inflections(infl,pos)
infls_OK = False
for minfl in possible_infls:
if strict:
infls_OK &= sentence_filt.check_inflection(minfl,pos)
else:
if sentence_filt.check_inflection(minfl,pos):
infls_OK = True
sentence_OK &= infls_OK
if not strict:
break
###
if sentence and sentence_OK:
matched_sentences.append(sentence)
print("Found %d sentences." % (len(matched_sentences)))
return matched_sentences
load_dictionary()
definitions.load_inflections()
| en | 0.810259 | # Main methods for looking up words from the dictionary # Get sorted stems with original indices # enumerate provides iterable with (idx,element) tuples # sorted key uses element (e[1]) as sort parameter # sorted returns a list of tuples (idx,element), and then all tuples are flipped # to give (element,idx) # Flip elements for comparison later # Flip elements for comparison later # Flip elements for comparison later # Flip elements for comparison later # Clean up # Returns a dictionary of stem : ending pairs by starting with no ending and working backwards # If skip_zero==True, assume there is an ending and start with 1 letter instead of ending='' Core word match method. Tries all stem/ending combinations that are valid and searches for the stem in the dictionary. Finally, checks that ending is a valid ending given the dictline entry (declension, conjugation, variant, etc). Return a list of matched words in the format [stem, ending, dictline entry] # First entry match # if actual match # find end index, last element that is a true match # stems1[idx1_e-1] is now the last matching stem entry # append original indices # First entry match # if actual match # find end index, last element that is a true match # stems2[idx2_e-1] is now the last matching stem entry # append original indices # First entry match # if actual match # find end index, last element that is a true match # stems3[idx3_e-1] is now the last matching stem entry # append original indices # First entry match # if actual match # find end index, last element that is a true match # stems1[idx4_e-1] is now the last matching stem entry # append original indices # Remove the 'que' # Remove the 'ne' # Remove the 've' Fix 'i' and 'j' problems Rules: From: https://latin.stackexchange.com/a/1310/8704 'V' = vowel, 'C' = consonant i+V => j+V, e.g. iubeo (in most cases) i+V => i+V only in some forms of the pronoun is (ii, iis) and the verb ire (iens, ii, ieram); also in Greek loans (iambus, iaspis, iota, Io, Iones etc.) In compounds and prefixed verbs C+i => C+j adiacet V+i => V+j seiungo V+i+V => V+i+V only in very few examples In Greek names: Achaia, Laius, Naiades, Troius, Acheloius, Minoius; Pleiades, etc.; In some adjectives: -uus, -uis followed by the comparative suffix –ior (strenuior, tenuior) In some nouns The assumption is that 'i' is used where 'j' should be, so the i => i cases are covered. Only the i+V => j+V case, and the compound and prefixed verb i => j cases are needed. # The dictionary tends to follow the OLD and L+S for replacing 'i' with 'j'. A common # example is 'iuvo','adiuvo' (to help) which is written 'juvo','adjuvo' in L+S and others. # The 'j' is the consonant 'i' sound, while 'i' is the vowel 'i' sound. This distinction # is often made but finding hard and fast rules for it can be challenging. # Examples: # adjecto, ajuga, dijudico, cujus, hujus, altijugus, # baijulus, circumjiceo, objrasco (very rare to have C+j+C), # objicio, quadrijugus # deistatus, deitas, dejectus # The obvious solution for the i/j and u/v problems is to simply convert everything to # i and u in both the word-to-be-matched and the dictionary. But then the printed # dictionary entry won't be correct. An ideal solution would treat i and j (and u and v) # as completely interchangeable, but only for searching purposes. # Another choice is to have two dictionaries loaded, one with i/j and u/v and the other # using only i and only u. Search one, use the index to get the other. But this increases # the memory usage significantly, the dictionary is -big-. # We'll settle for just trying to fix i's and j's based on conventions. # First find all 'i' appearances # Initialize # Now replace any that are at index 0, or which are next to a vowel # If this isn't the first or one of the last two letters Fix 'u' and 'v' problems Examples: -ivi (ending) -ave (ending) abavus alvus, alvi alveus circumvincio divum vir vitare uxorculo uvidulus vacca ubi ulcus # It's easier to first convert all 'v's to 'u's, then to find any common cases # where 'u' could become 'v' Try to match a word, with basic tricks. If use_tricks is used, more in depth matching methods are used. # If we haven't tried interchanging i and j yet # Search failed # TODO Print the declensions of a noun m must be in the format [stem,ending,dictline] (same as a match) Convert m into a string in dictionary style m must be in the format [stem, ending, dictline] (same as a match) If full_info is True, all available information is given. If header_only, only the word header is given (no senses) If markdown_fmt, place headwords in bold (**ex**), part of speech in italics (*ex*) TODO This whole thing could be rewritten to be less hacky # For reference: #stem1 = dictline['stem1'] #stem2 = dictline['stem2'] #stem3 = dictline['stem2'] #stem4 = dictline['stem2'] # sg nom # sg gen # Set gender string # add age, area, geography, frequency # Avoid awkward spaces # ex. singular indicative present active 1st person #V 1 1 PRES ACTIVE IND 1 S 1 1 o X A # ex. infinitive #V 1 1 PRES ACTIVE INF 0 X 2 3 are X A # TODO Fix semi-deponent, defective, and impersonal verbs # sg ind pres active 1st person # pres active infinitive # Irregular # else Abbreviations, indeclinable, etc can be skipped # add age, area, geography, frequency # Avoid awkward spaces # sg nom masc # sg nom fem # sg nom neut # For adjectives it's common for stems to be matching # sg nom # sg gen # add age, area, geography, frequency # Avoid awkward spaces # TODO Use a match (from match_word) to look up the possible inflections of a word. Returned as list of plain text strings. If less is False, information about the word is printed along with the inflection. If less is True, only the inflection information and the word header are printed. Take an arbitrarily long string (newlines and all) and process each word, then compile dictionary entries. Return [definitions, missed words] # Patch the 'sum' problem for now... #filt.remove_substantives(ms) # Try to roughly split into sentences Return a list of sentences for which all words pass through the match filter If strict is True, all matching inflections for each word must pass through the filter If False, at least one inflection must pass through # Roughly split into sentences ### Checking inflection ### | 3.280006 | 3 |
sharpy/plans/require/required_minerals.py | MadManSC2/sharpy-sc2 | 1 | 6630656 | <gh_stars>1-10
import sc2
from sharpy.plans.require.require_base import RequireBase
class RequiredMinerals(RequireBase):
"""Require that a specific number of minerals are "in the bank"."""
def __init__(self, mineral_requirement: int):
assert mineral_requirement is not None and isinstance(mineral_requirement, int)
super().__init__()
self.mineralRequirement = mineral_requirement
def check(self) -> bool:
if self.ai.minerals > self.mineralRequirement:
return True
return False
| import sc2
from sharpy.plans.require.require_base import RequireBase
class RequiredMinerals(RequireBase):
"""Require that a specific number of minerals are "in the bank"."""
def __init__(self, mineral_requirement: int):
assert mineral_requirement is not None and isinstance(mineral_requirement, int)
super().__init__()
self.mineralRequirement = mineral_requirement
def check(self) -> bool:
if self.ai.minerals > self.mineralRequirement:
return True
return False | en | 0.847403 | Require that a specific number of minerals are "in the bank". | 2.997242 | 3 |
graph4nlp/pytorch/test/kg_completion/src/spodernet/examples/snli.py | stjordanis/graph4nlp | 96 | 6630657 | <gh_stars>10-100
'''This models is an example for training a classifier on SNLI'''
from __future__ import print_function
from os.path import join
import nltk
import numpy as np
import os
import urllib
import zipfile
import sys
from spodernet.hooks import AccuracyHook, LossHook, ETAHook
from spodernet.preprocessing.pipeline import Pipeline
from spodernet.preprocessing.processors import AddToVocab, CreateBinsByNestedLength, SaveLengthsToState, ConvertTokenToIdx, StreamToHDF5, Tokenizer, NaiveNCharTokenizer
from spodernet.preprocessing.processors import JsonLoaderProcessors, DictKey2ListMapper, RemoveLineOnJsonValueCondition, ToLower
from spodernet.preprocessing.batching import StreamBatcher
from spodernet.utils.logger import Logger, LogLevel
from spodernet.utils.global_config import Config, Backends
from spodernet.utils.util import get_data_path
from spodernet.frontend import Model, PairedBiDirectionalLSTM, SoftmaxCrossEntropy, Embedding, Trainer
Config.parse_argv(sys.argv)
np.set_printoptions(suppress=True)
def download_snli():
'''Creates data and snli paths and downloads SNLI in the home dir'''
home = os.environ['HOME']
data_dir = join(home, '.data')
snli_dir = join(data_dir, 'snli')
snli_url = 'http://nlp.stanford.edu/projects/snli/snli_1.0.zip'
if not os.path.exists(data_dir):
os.mkdir(data_dir)
if not os.path.exists(snli_dir):
os.mkdir(snli_dir)
if not os.path.exists(join(data_dir, 'snli_1.0.zip')):
print('Downloading SNLI...')
snlidownload = urllib.URLopener()
snlidownload.retrieve(snli_url, join(data_dir, "snli_1.0.zip"))
print('Opening zip file...')
archive = zipfile.ZipFile(join(data_dir, 'snli_1.0.zip'), 'r')
return archive, snli_dir
def snli2json():
'''Preprocesses SNLI data and returns to spoder files'''
files = ['snli_1.0_train.jsonl', 'snli_1.0_dev.jsonl',
'snli_1.0_test.jsonl']
archive, snli_dir = download_snli()
new_files = ['train.data', 'dev.data', 'test.data']
names = ['train', 'dev', 'test']
if not os.path.exists(join(snli_dir, new_files[0])):
for name, new_name in zip(files, new_files):
print('Writing {0}...'.format(new_name))
archive = zipfile.ZipFile(join(data_dir, 'snli_1.0.zip'), 'r')
snli_file = archive.open(join('snli_1.0', name), 'r')
with open(join(snli_dir, new_name), 'w') as datafile:
for line in snli_file:
data = json.loads((line))
if data['gold_label'] == '-':
continue
premise = data['sentence1']
hypothesis = data['sentence2']
target = data['gold_label']
datafile.write(
json.dumps([premise, hypothesis, target]) + '\n')
return [names, [join(snli_dir, new_name) for new_name in new_files]]
def preprocess_SNLI(delete_data=False):
# load data
#names, file_paths = snli2json()
#train_path, dev_path, test_path = file_paths
tokenizer = nltk.tokenize.WordPunctTokenizer()
zip_path = join(get_data_path(), 'snli_1.0.zip', 'snli_1.0')
file_paths = ['snli_1.0_train.jsonl', 'snli_1.0_dev.jsonl', 'snli_1.0_test.jsonl']
not_t = []
t = ['input', 'support', 'target']
# tokenize and convert to hdf5
# 1. Setup pipeline to save lengths and generate vocabulary
p = Pipeline('snli_example', delete_data)
p.add_path(join(zip_path, file_paths[0]))
p.add_line_processor(JsonLoaderProcessors())
p.add_line_processor(RemoveLineOnJsonValueCondition('gold_label', lambda label: label == '-'))
p.add_line_processor(DictKey2ListMapper(['sentence1', 'sentence2', 'gold_label']))
p.add_sent_processor(ToLower())
p.add_sent_processor(Tokenizer(tokenizer.tokenize), t)
#p.add_sent_processor(NaiveNCharTokenizer(3), not_t)
p.add_token_processor(AddToVocab())
p.add_post_processor(SaveLengthsToState())
p.execute()
p.clear_processors()
p.state['vocab'].save_to_disk()
# 2. Process the data further to stream it to hdf5
p.add_sent_processor(ToLower())
p.add_sent_processor(Tokenizer(tokenizer.tokenize), t)
#p.add_sent_processor(NaiveNCharTokenizer(3), not_t)
p.add_post_processor(ConvertTokenToIdx())
p.add_post_processor(CreateBinsByNestedLength('snli_train', min_batch_size=128))
state = p.execute()
# dev and test data
p2 = Pipeline('snli_example')
p2.copy_vocab_from_pipeline(p)
p2.add_path(join(zip_path, file_paths[1]))
p2.add_line_processor(JsonLoaderProcessors())
p2.add_line_processor(RemoveLineOnJsonValueCondition('gold_label', lambda label: label == '-'))
p2.add_line_processor(DictKey2ListMapper(['sentence1', 'sentence2', 'gold_label']))
p2.add_sent_processor(ToLower())
p2.add_sent_processor(Tokenizer(tokenizer.tokenize), t)
#p2.add_sent_processor(NaiveNCharTokenizer(3), not_t)
p2.add_post_processor(SaveLengthsToState())
p2.execute()
p2.clear_processors()
p2.add_sent_processor(ToLower())
p2.add_sent_processor(Tokenizer(tokenizer.tokenize), t)
#p2.add_sent_processor(NaiveNCharTokenizer(3), not_t)
p2.add_post_processor(ConvertTokenToIdx())
p2.add_post_processor(StreamToHDF5('snli_dev'))
p2.execute()
p3 = Pipeline('snli_example')
p3.copy_vocab_from_pipeline(p)
p3.add_path(join(zip_path, file_paths[2]))
p3.add_line_processor(JsonLoaderProcessors())
p3.add_line_processor(RemoveLineOnJsonValueCondition('gold_label', lambda label: label == '-'))
p3.add_line_processor(DictKey2ListMapper(['sentence1', 'sentence2', 'gold_label']))
p3.add_sent_processor(ToLower())
p3.add_sent_processor(Tokenizer(tokenizer.tokenize), t)
#p3.add_sent_processor(NaiveNCharTokenizer(3), not_t)
p3.add_post_processor(SaveLengthsToState())
p3.execute()
p3.clear_processors()
p3.add_sent_processor(ToLower())
p3.add_sent_processor(Tokenizer(tokenizer.tokenize), t)
#p3.add_sent_processor(NaiveNCharTokenizer(3), not_t)
p3.add_post_processor(ConvertTokenToIdx())
p3.add_post_processor(StreamToHDF5('snli_test'))
p3.execute()
def main():
Logger.GLOBAL_LOG_LEVEL = LogLevel.INFO
#Config.backend = Backends.TENSORFLOW
Config.backend = Backends.TORCH
Config.cuda = True
Config.dropout = 0.1
Config.hidden_size = 128
Config.embedding_size = 256
Config.L2 = 0.00003
do_process = False
if do_process:
preprocess_SNLI(delete_data=True)
p = Pipeline('snli_example')
vocab = p.state['vocab']
vocab.load_from_disk()
batch_size = 128
if Config.backend == Backends.TENSORFLOW:
from spodernet.backends.tfbackend import TensorFlowConfig
TensorFlowConfig.init_batch_size(batch_size)
train_batcher = StreamBatcher('snli_example', 'snli_train', batch_size, randomize=True, loader_threads=8)
#train_batcher.subscribe_to_batch_prepared_event(SomeExpensivePreprocessing())
dev_batcher = StreamBatcher('snli_example', 'snli_dev', batch_size)
test_batcher = StreamBatcher('snli_example', 'snli_test', batch_size)
train_batcher.subscribe_to_events(AccuracyHook('Train', print_every_x_batches=1000))
dev_batcher.subscribe_to_events(AccuracyHook('Dev', print_every_x_batches=1000))
eta = ETAHook(print_every_x_batches=1000)
train_batcher.subscribe_to_events(eta)
train_batcher.subscribe_to_start_of_epoch_event(eta)
model = Model()
model.add(Embedding(128, vocab.num_embeddings))
model.add(PairedBiDirectionalLSTM(128, hidden_size=256, variable_length=True, conditional_encoding=False))
model.add(SoftmaxCrossEntropy(input_size=256*4, num_labels=3))
t = Trainer(model)
for i in range(10):
t.train(train_batcher, epochs=1)
t.evaluate(dev_batcher)
if __name__ == '__main__':
main()
| '''This models is an example for training a classifier on SNLI'''
from __future__ import print_function
from os.path import join
import nltk
import numpy as np
import os
import urllib
import zipfile
import sys
from spodernet.hooks import AccuracyHook, LossHook, ETAHook
from spodernet.preprocessing.pipeline import Pipeline
from spodernet.preprocessing.processors import AddToVocab, CreateBinsByNestedLength, SaveLengthsToState, ConvertTokenToIdx, StreamToHDF5, Tokenizer, NaiveNCharTokenizer
from spodernet.preprocessing.processors import JsonLoaderProcessors, DictKey2ListMapper, RemoveLineOnJsonValueCondition, ToLower
from spodernet.preprocessing.batching import StreamBatcher
from spodernet.utils.logger import Logger, LogLevel
from spodernet.utils.global_config import Config, Backends
from spodernet.utils.util import get_data_path
from spodernet.frontend import Model, PairedBiDirectionalLSTM, SoftmaxCrossEntropy, Embedding, Trainer
Config.parse_argv(sys.argv)
np.set_printoptions(suppress=True)
def download_snli():
'''Creates data and snli paths and downloads SNLI in the home dir'''
home = os.environ['HOME']
data_dir = join(home, '.data')
snli_dir = join(data_dir, 'snli')
snli_url = 'http://nlp.stanford.edu/projects/snli/snli_1.0.zip'
if not os.path.exists(data_dir):
os.mkdir(data_dir)
if not os.path.exists(snli_dir):
os.mkdir(snli_dir)
if not os.path.exists(join(data_dir, 'snli_1.0.zip')):
print('Downloading SNLI...')
snlidownload = urllib.URLopener()
snlidownload.retrieve(snli_url, join(data_dir, "snli_1.0.zip"))
print('Opening zip file...')
archive = zipfile.ZipFile(join(data_dir, 'snli_1.0.zip'), 'r')
return archive, snli_dir
def snli2json():
'''Preprocesses SNLI data and returns to spoder files'''
files = ['snli_1.0_train.jsonl', 'snli_1.0_dev.jsonl',
'snli_1.0_test.jsonl']
archive, snli_dir = download_snli()
new_files = ['train.data', 'dev.data', 'test.data']
names = ['train', 'dev', 'test']
if not os.path.exists(join(snli_dir, new_files[0])):
for name, new_name in zip(files, new_files):
print('Writing {0}...'.format(new_name))
archive = zipfile.ZipFile(join(data_dir, 'snli_1.0.zip'), 'r')
snli_file = archive.open(join('snli_1.0', name), 'r')
with open(join(snli_dir, new_name), 'w') as datafile:
for line in snli_file:
data = json.loads((line))
if data['gold_label'] == '-':
continue
premise = data['sentence1']
hypothesis = data['sentence2']
target = data['gold_label']
datafile.write(
json.dumps([premise, hypothesis, target]) + '\n')
return [names, [join(snli_dir, new_name) for new_name in new_files]]
def preprocess_SNLI(delete_data=False):
# load data
#names, file_paths = snli2json()
#train_path, dev_path, test_path = file_paths
tokenizer = nltk.tokenize.WordPunctTokenizer()
zip_path = join(get_data_path(), 'snli_1.0.zip', 'snli_1.0')
file_paths = ['snli_1.0_train.jsonl', 'snli_1.0_dev.jsonl', 'snli_1.0_test.jsonl']
not_t = []
t = ['input', 'support', 'target']
# tokenize and convert to hdf5
# 1. Setup pipeline to save lengths and generate vocabulary
p = Pipeline('snli_example', delete_data)
p.add_path(join(zip_path, file_paths[0]))
p.add_line_processor(JsonLoaderProcessors())
p.add_line_processor(RemoveLineOnJsonValueCondition('gold_label', lambda label: label == '-'))
p.add_line_processor(DictKey2ListMapper(['sentence1', 'sentence2', 'gold_label']))
p.add_sent_processor(ToLower())
p.add_sent_processor(Tokenizer(tokenizer.tokenize), t)
#p.add_sent_processor(NaiveNCharTokenizer(3), not_t)
p.add_token_processor(AddToVocab())
p.add_post_processor(SaveLengthsToState())
p.execute()
p.clear_processors()
p.state['vocab'].save_to_disk()
# 2. Process the data further to stream it to hdf5
p.add_sent_processor(ToLower())
p.add_sent_processor(Tokenizer(tokenizer.tokenize), t)
#p.add_sent_processor(NaiveNCharTokenizer(3), not_t)
p.add_post_processor(ConvertTokenToIdx())
p.add_post_processor(CreateBinsByNestedLength('snli_train', min_batch_size=128))
state = p.execute()
# dev and test data
p2 = Pipeline('snli_example')
p2.copy_vocab_from_pipeline(p)
p2.add_path(join(zip_path, file_paths[1]))
p2.add_line_processor(JsonLoaderProcessors())
p2.add_line_processor(RemoveLineOnJsonValueCondition('gold_label', lambda label: label == '-'))
p2.add_line_processor(DictKey2ListMapper(['sentence1', 'sentence2', 'gold_label']))
p2.add_sent_processor(ToLower())
p2.add_sent_processor(Tokenizer(tokenizer.tokenize), t)
#p2.add_sent_processor(NaiveNCharTokenizer(3), not_t)
p2.add_post_processor(SaveLengthsToState())
p2.execute()
p2.clear_processors()
p2.add_sent_processor(ToLower())
p2.add_sent_processor(Tokenizer(tokenizer.tokenize), t)
#p2.add_sent_processor(NaiveNCharTokenizer(3), not_t)
p2.add_post_processor(ConvertTokenToIdx())
p2.add_post_processor(StreamToHDF5('snli_dev'))
p2.execute()
p3 = Pipeline('snli_example')
p3.copy_vocab_from_pipeline(p)
p3.add_path(join(zip_path, file_paths[2]))
p3.add_line_processor(JsonLoaderProcessors())
p3.add_line_processor(RemoveLineOnJsonValueCondition('gold_label', lambda label: label == '-'))
p3.add_line_processor(DictKey2ListMapper(['sentence1', 'sentence2', 'gold_label']))
p3.add_sent_processor(ToLower())
p3.add_sent_processor(Tokenizer(tokenizer.tokenize), t)
#p3.add_sent_processor(NaiveNCharTokenizer(3), not_t)
p3.add_post_processor(SaveLengthsToState())
p3.execute()
p3.clear_processors()
p3.add_sent_processor(ToLower())
p3.add_sent_processor(Tokenizer(tokenizer.tokenize), t)
#p3.add_sent_processor(NaiveNCharTokenizer(3), not_t)
p3.add_post_processor(ConvertTokenToIdx())
p3.add_post_processor(StreamToHDF5('snli_test'))
p3.execute()
def main():
Logger.GLOBAL_LOG_LEVEL = LogLevel.INFO
#Config.backend = Backends.TENSORFLOW
Config.backend = Backends.TORCH
Config.cuda = True
Config.dropout = 0.1
Config.hidden_size = 128
Config.embedding_size = 256
Config.L2 = 0.00003
do_process = False
if do_process:
preprocess_SNLI(delete_data=True)
p = Pipeline('snli_example')
vocab = p.state['vocab']
vocab.load_from_disk()
batch_size = 128
if Config.backend == Backends.TENSORFLOW:
from spodernet.backends.tfbackend import TensorFlowConfig
TensorFlowConfig.init_batch_size(batch_size)
train_batcher = StreamBatcher('snli_example', 'snli_train', batch_size, randomize=True, loader_threads=8)
#train_batcher.subscribe_to_batch_prepared_event(SomeExpensivePreprocessing())
dev_batcher = StreamBatcher('snli_example', 'snli_dev', batch_size)
test_batcher = StreamBatcher('snli_example', 'snli_test', batch_size)
train_batcher.subscribe_to_events(AccuracyHook('Train', print_every_x_batches=1000))
dev_batcher.subscribe_to_events(AccuracyHook('Dev', print_every_x_batches=1000))
eta = ETAHook(print_every_x_batches=1000)
train_batcher.subscribe_to_events(eta)
train_batcher.subscribe_to_start_of_epoch_event(eta)
model = Model()
model.add(Embedding(128, vocab.num_embeddings))
model.add(PairedBiDirectionalLSTM(128, hidden_size=256, variable_length=True, conditional_encoding=False))
model.add(SoftmaxCrossEntropy(input_size=256*4, num_labels=3))
t = Trainer(model)
for i in range(10):
t.train(train_batcher, epochs=1)
t.evaluate(dev_batcher)
if __name__ == '__main__':
main() | en | 0.533307 | This models is an example for training a classifier on SNLI Creates data and snli paths and downloads SNLI in the home dir Preprocesses SNLI data and returns to spoder files # load data #names, file_paths = snli2json() #train_path, dev_path, test_path = file_paths # tokenize and convert to hdf5 # 1. Setup pipeline to save lengths and generate vocabulary #p.add_sent_processor(NaiveNCharTokenizer(3), not_t) # 2. Process the data further to stream it to hdf5 #p.add_sent_processor(NaiveNCharTokenizer(3), not_t) # dev and test data #p2.add_sent_processor(NaiveNCharTokenizer(3), not_t) #p2.add_sent_processor(NaiveNCharTokenizer(3), not_t) #p3.add_sent_processor(NaiveNCharTokenizer(3), not_t) #p3.add_sent_processor(NaiveNCharTokenizer(3), not_t) #Config.backend = Backends.TENSORFLOW #train_batcher.subscribe_to_batch_prepared_event(SomeExpensivePreprocessing()) | 2.347835 | 2 |
SecretColors/cmaps/parent.py | ri0t/SecretColors | 0 | 6630658 | <gh_stars>0
# Copyright (c) SecretBiology 2020.
#
# Library Name: SecretColors
# Author: <NAME>
# Website: https://github.com/secretBiology/SecretColors
#
#
import random
from SecretColors.data.constants import DIV_COLOR_PAIRS
from SecretColors.helpers.logging import Log
from SecretColors.models.palette import Palette
from typing import List
class ColorMapParent:
"""
This is parent class which will be inherited by all ColorMap objects. It
includes all basic methods which will be common to all the ColorMaps.
.. danger::
Do not use this class in your workflow. This class is intended as a
parent class which you can inherit to make new colormaps. For
general purpose use, you should use
:class:`~SecretColors.cmaps.ColorMap` instead.
"""
def __init__(self, matplotlib,
palette: Palette = None,
log: Log = None,
seed=None):
"""
Initializing of any ColorMap.
:param matplotlib: matplotlib object from matplotlib library
:param palette: Palette from which you want colors
:type palette: Palette
:param log: Log class
:type log: Log
:param seed: Seed for random number generation
"""
self._mat = matplotlib
if log is None:
log = Log()
self.log = log
if palette is None:
palette = Palette()
self.log.info(f"ColorMap will use '{palette.name}' palette")
self._palette = palette
self._seed = seed
if seed is not None:
random.seed(seed)
self.log.info(f"Random seed set for : {seed}")
self.no_of_colors = 10
@property
def data(self) -> dict:
"""Returns all available ColorMap data. This is valid ONLY for
special subclass (e.g. BrewerMap). It will return None for 'ColorMap'
class.
:rtype: dict
"""
raise NotImplementedError
@property
def seed(self):
return self._seed
@seed.setter
def seed(self, value):
"""
Seed for random number generator
:param value: Seed value
"""
self._seed = value
random.seed(value)
self.log.info(f"Random seed set for : {value}")
@property
def palette(self) -> Palette:
"""
:return: Returns current palette from which colors are drawn
:rtype: Palette
"""
return self._palette
@palette.setter
def palette(self, palette: Palette):
"""
Set Palette from which colors will be drawn
Note: Do not set this for special subclasses (like BrewerMap)
:param palette: Color Palette
:type palette: Palette
"""
self._palette = palette
self.log.info(f"ColorMap is now using '{palette.name}' palette")
@property
def get_all(self) -> list:
"""Returns list of available special colormaps. This works only with
special subclasses like BrewerMap.
:return: List of colormap names
:rtype: List[str]
"""
if self.data is None:
return []
else:
return list(self.data.keys())
def _get_linear_segment(self, color_list: list):
"""
:param color_list: List of colors
:return: LinearSegmentedColormap
"""
try:
return self._mat.colors.LinearSegmentedColormap.from_list(
"secret_color", color_list)
except AttributeError:
raise Exception("Matplotlib is required to use this function")
def _get_listed_segment(self, color_list: list):
"""
:param color_list: List of colors
:return: ListedColormap
"""
try:
return self._mat.colors.ListedColormap(color_list)
except AttributeError:
raise Exception("Matplotlib is required to use this function")
def _derive_map(self, color_list: list,
is_qualitative=False,
is_reversed=False):
"""
:param color_list: List of colors
:param is_qualitative: If True, makes listed colormap
:param is_reversed: Reverses the order of color in Colormap
:return: Colormap which can be directly used with matplotlib
"""
if is_reversed:
color_list = [x for x in reversed(color_list)]
if is_qualitative:
return self._get_listed_segment(color_list)
else:
return self._get_linear_segment(color_list)
def _get_colors(self, key: str, no_of_colors: int, backup: str,
staring_shade, ending_shade):
if no_of_colors < 2:
self.log.error("Minimum of 2 colors are required for generating "
"Colormap", exception=ValueError)
colors = None
# First check if for given combinations of parameters, colors are
# available
if self.data is not None:
if key in self.data:
if str(no_of_colors) in self.data[key]:
colors = self.data[key][str(no_of_colors)]
self.log.info("Colormap for given combination found")
if colors is None:
self.log.info("Colormap for given combination not found.")
self.log.info("Searching standard colors")
# Just make request for the color so that additional colors will
# be added to palette
self.palette.get(backup)
if (staring_shade is not None or
ending_shade is not None or
colors is None):
self.log.warn("Overriding the available standard Colormaps "
"because starting_shade or ending_shade is provided")
if staring_shade is None:
staring_shade = min(self.palette.colors[
backup].get_all_shades())
if ending_shade is None:
ending_shade = max(
self.palette.colors[backup].get_all_shades())
return self.palette.get(backup, no_of_colors=no_of_colors,
starting_shade=staring_shade,
ending_shade=ending_shade)
return colors
def get_colors(self, name: str, no_of_colors: int) -> list:
"""
This is easy way to get the available colors in current colormap
.. code-block:: python
cm = BrewerMap(matplotlib)
cm.get_colors('Spectral', 9) # Returns 9 'Spectral' colors from BrewerMap colormap
.. warning::
Be careful in using `no_of_colors` argument. It actually
points
to number of colors available in given colormap. For example,
'Tableau' map from :class:`~SecretColors.cmaps.TableauMap`
contains two list of colors, 10 and 20. So you need to enter
either 10 or 20. Any other number will raise ValueError. You can
check which all options are available by :attr:`get_all`
property. More about this can be read in documentation of
:func:`~SecretColors.cmaps.parent.ColorMapParent.get` function.
:param name: Name of the special colormap
:type name: str
:param no_of_colors: Number of colors (see warning above)
:type no_of_colors: int
:return: List of colors
:rtype: List[str]
:raises: ValueError (if used on
:class:`~SecretColors.cmaps.ColorMap` or wrong
`no_of_colors` provided)
"""
if self.data is not None:
if name not in self.data.keys():
self.log.error(f"'{name}' is not available in current "
f"colormap. Following are allowed arguments "
f"here: {self.get_all}")
if str(no_of_colors) not in self.data[name].keys():
n = list(self.data[name].keys())
if "type" in n:
n.remove("type")
n = [int(x) for x in n]
self.log.error(f"Currently following number of colors are "
f"allowed for {name}. : {n}")
if str(no_of_colors) in self.data[name]:
return self.data[name][str(no_of_colors)]
else:
raise KeyError(
f"This palette did not have this key '{no_of_colors}'")
return []
def _default(self, name, backup, kwargs):
if "self" in kwargs:
del kwargs['self']
if "starting_shade" not in kwargs:
kwargs["starting_shade"] = None
if "ending_shade" not in kwargs:
kwargs["ending_shade"] = None
no_of_colors = kwargs['no_of_colors'] or self.no_of_colors
bak_name = backup or name
colors = self._get_colors(key=name,
no_of_colors=no_of_colors,
backup=bak_name,
staring_shade=kwargs['starting_shade'],
ending_shade=kwargs['ending_shade'])
return self._derive_map(colors,
is_qualitative=kwargs['is_qualitative'],
is_reversed=kwargs['is_reversed'])
def _special_maps(self, name, backup, kwargs):
if name not in self.data.keys():
self.log.error(f"There is no '{name}' colormap in our "
f"database. Following special colormaps are"
f" available in current class :"
f" {list(self.data.keys())}")
no_of_colors = kwargs['no_of_colors'] or self.no_of_colors
cols = list(self.data[name].keys())
if 'type' in cols:
cols.remove('type')
cols = [int(x) for x in cols]
if no_of_colors not in cols:
self.log.error(f"Sorry, for {name} colormap, 'no_of_colors' "
f"argument can "
f"only take these values: {cols}.")
return self._default(name, backup, kwargs)
def from_list(self, color_list: list, is_qualitative: bool = False,
is_reversed=False):
"""
You can create your own colormap with list of own colors
:param color_list: List of colors
:param is_qualitative: If True, makes listed colormap
:param is_reversed: Reverses the order of color in Colormap
:return: Colormap which can be directly used with matplotlib
"""
return self._derive_map(color_list, is_qualitative, is_reversed)
def get(self, name: str, *, no_of_colors: int = None,
is_qualitative: bool = False, is_reversed=False):
"""
Get arbitrary color map from current ColorMap object
`no_of_colors` is probably the most important parameter in the
colormap classes. In this library each colormap data is structured
in the form of dictionary as shown below::
data = { 'map_name' : {
'10': [c1, c2, ... c10],
'5' : [b1, b2, ... b5],
...
'type': Type of colormap
}
}
In above example, if you want to access list [c1, c2...c10], you can
do following,
>>> YourMap().get('map_name',no_of_colors=10) # Returns [c1, c2 ...c10]
You can check which all colormaps are
available by :attr:`~SecretColors.cmaps.parent.ColorMapParent.get_all` property
:param name: Exact Name of the Colormap
:type name: str
:param no_of_colors: Number of colors. (See discussion above)
:type no_of_colors: int
:param is_qualitative: If True, listed colormap will be returned. (
default: False)
:type is_qualitative: bool
:param is_reversed: If True, colormap will be reversed. (default:
False)
:type is_reversed: bool
:return: Colormap object
:rtype: :class:`matplotlib.colors.ListedColormap` or :class:`matplotlib.colors.LinearSegmentedColormap`
"""
if self.data is None:
self.log.error(f"This method can only be used with special "
f"colormap. If you are using 'ColorMap' class "
f"directly. You can only use standard maps. or "
f"create your own.")
return self._special_maps(name, None, locals())
def greens(self, *, starting_shade: float = None,
ending_shade: float = None,
no_of_colors: int = None,
is_qualitative: bool = False,
is_reversed=False):
return self._default(None, "green", locals())
def reds(self, *, starting_shade: float = None,
ending_shade: float = None,
no_of_colors: int = None,
is_qualitative: bool = False,
is_reversed=False):
return self._default(None, "red", locals())
def oranges(self, *, starting_shade: float = None,
ending_shade: float = None,
no_of_colors: int = None,
is_qualitative: bool = False,
is_reversed=False):
return self._default(None, "orange", locals())
def purples(self, *, starting_shade: float = None,
ending_shade: float = None,
no_of_colors: int = None,
is_qualitative: bool = False,
is_reversed=False):
return self._default(None, "purple", locals())
def grays(self, *, starting_shade: float = None,
ending_shade: float = None,
no_of_colors: int = None,
is_qualitative: bool = False,
is_reversed=False):
return self._default(None, "gray", locals())
def blues(self, *, starting_shade: float = None,
ending_shade: float = None,
no_of_colors: int = None,
is_qualitative: bool = False,
is_reversed=False):
return self._default(None, "blue", locals())
def random_divergent(self, is_qualitative=False, is_reversed=False):
names = []
if self.data is not None:
for k in self.data:
if self.data[k]["type"] == "div":
names.append(k)
if len(names) > 0:
random.shuffle(names)
keys = list(self.data[names[0]].keys())
keys.remove("type")
random.shuffle(keys)
kwargs = locals()
kwargs["no_of_colors"] = int(keys[0])
return self._special_maps(names[0], None, kwargs)
else:
names = [x for x in DIV_COLOR_PAIRS]
random.shuffle(names)
cols = []
for c in names[0]:
for s in c[1]:
cols.append(self.palette.get(c[0], shade=s))
return self.from_list(cols)
class ColorMap(ColorMapParent):
"""
This is simple wrapper around
:class:`~SecretColors.cmaps.parent.ColorMapParent`. This wrapper let you
utilize all methods from its parent class. For all general purpose use,
you should use this class. If you want more specialized ColorMaps,
use their respective classes. Following is the simplest use where you
want to visualize your data in typical 'greens' palette
.. code-block:: python
import matplotlib
import matplotlib.pyplot as plt
from SecretColors.cmaps import ColorMap
import numpy as np
cm = ColorMap(matplotlib)
data = np.random.rand(5, 5)
plt.imshow(data, cmap=cm.greens())
plt.colorbar()
plt.show()
You can easily change standard colormaps like following
.. code-block:: python
cm.reds() # Reds colormap
cm.oranges() # Oranges colormap
cm.blues() # Blues colormap
cm.grays() # Grays colormap
All standard colormaps accepts following basic options (which should be
provided as a named arguments)
- :no_of_colors: Number of colors you want in your
colormap. It usually defines how smaooth your color gradient will be
- :starting_shade: What will be the first shade of your colormap
- :ending_shade: What will be the last shade of your colormap
- :is_qualitative: If True,
:class:`matplotlib.colors.ListedColormap` will be used instead
:class:`matplotlib.colors.LinearSegmentedColormap`. Essentially it
will provide discrete colormap instead linear
- :is_reversed: If True, colormap will be reversed
.. code-block:: python
cm.purples(no_of_colors=8)
cm.greens(starting_shade=30, ending_shade=80)
cm.blues(is_qualitative=True)
cm.reds(ending_shade=50, is_reversed=True, no_of_colors=5)
You can mix-and-match every argument. Essentially there are infinite
possibilities.
If you want even more fine-tune control over your colormap, you can use
your own colormaps by :func:`~SecretColors.cmaps.parent.ColorMapParent
.from_list` method.
.. code-block:: python
cm = ColorMap(matplotlib)
p = Palette()
my_colors = [p.red(shade=30), p.white(), p.blue(shade=60)]
my_cmap = cm.from_list(my_colors)
plt.imshow(data, cmap=my_cmap)
We have some in-build color lists for divergent colormaps. You can use
:func:`~SecretColors.cmaps.parent.ColorMapParent.random_divergent` for
its easy access. Read :class:`~SecretColors.cmaps.parent.ColorMapParent`
documentation for more details on helper functions.
If you like colors from specific :class:`~SecretColors.Palette`, you can
easily switch all colors with single line
.. code-block:: python
cm = ColorMap(matplotlib)
cm.palette = Palette("material") # Material Palette colors will be used.
cm.palette = Palette("brewer") # ColorBrewer colors will be used.
.. tip::
For "brewer" and "tableau", you should prefer using
:class:`~SecretColors.cmaps.BrewerMap` and
:class:`~SecretColors.cmaps.TableauMap` intsead just changing
palette here. As these classes will provide you much more additional
methods which are only available in those classes.
"""
@property
def data(self) -> dict:
return None
def run():
from SecretColors.data.cmaps.brewer import BREWER_DATA
for b in BREWER_DATA:
print(f"* {b}")
| # Copyright (c) SecretBiology 2020.
#
# Library Name: SecretColors
# Author: <NAME>
# Website: https://github.com/secretBiology/SecretColors
#
#
import random
from SecretColors.data.constants import DIV_COLOR_PAIRS
from SecretColors.helpers.logging import Log
from SecretColors.models.palette import Palette
from typing import List
class ColorMapParent:
"""
This is parent class which will be inherited by all ColorMap objects. It
includes all basic methods which will be common to all the ColorMaps.
.. danger::
Do not use this class in your workflow. This class is intended as a
parent class which you can inherit to make new colormaps. For
general purpose use, you should use
:class:`~SecretColors.cmaps.ColorMap` instead.
"""
def __init__(self, matplotlib,
palette: Palette = None,
log: Log = None,
seed=None):
"""
Initializing of any ColorMap.
:param matplotlib: matplotlib object from matplotlib library
:param palette: Palette from which you want colors
:type palette: Palette
:param log: Log class
:type log: Log
:param seed: Seed for random number generation
"""
self._mat = matplotlib
if log is None:
log = Log()
self.log = log
if palette is None:
palette = Palette()
self.log.info(f"ColorMap will use '{palette.name}' palette")
self._palette = palette
self._seed = seed
if seed is not None:
random.seed(seed)
self.log.info(f"Random seed set for : {seed}")
self.no_of_colors = 10
@property
def data(self) -> dict:
"""Returns all available ColorMap data. This is valid ONLY for
special subclass (e.g. BrewerMap). It will return None for 'ColorMap'
class.
:rtype: dict
"""
raise NotImplementedError
@property
def seed(self):
return self._seed
@seed.setter
def seed(self, value):
"""
Seed for random number generator
:param value: Seed value
"""
self._seed = value
random.seed(value)
self.log.info(f"Random seed set for : {value}")
@property
def palette(self) -> Palette:
"""
:return: Returns current palette from which colors are drawn
:rtype: Palette
"""
return self._palette
@palette.setter
def palette(self, palette: Palette):
"""
Set Palette from which colors will be drawn
Note: Do not set this for special subclasses (like BrewerMap)
:param palette: Color Palette
:type palette: Palette
"""
self._palette = palette
self.log.info(f"ColorMap is now using '{palette.name}' palette")
@property
def get_all(self) -> list:
"""Returns list of available special colormaps. This works only with
special subclasses like BrewerMap.
:return: List of colormap names
:rtype: List[str]
"""
if self.data is None:
return []
else:
return list(self.data.keys())
def _get_linear_segment(self, color_list: list):
"""
:param color_list: List of colors
:return: LinearSegmentedColormap
"""
try:
return self._mat.colors.LinearSegmentedColormap.from_list(
"secret_color", color_list)
except AttributeError:
raise Exception("Matplotlib is required to use this function")
def _get_listed_segment(self, color_list: list):
"""
:param color_list: List of colors
:return: ListedColormap
"""
try:
return self._mat.colors.ListedColormap(color_list)
except AttributeError:
raise Exception("Matplotlib is required to use this function")
def _derive_map(self, color_list: list,
is_qualitative=False,
is_reversed=False):
"""
:param color_list: List of colors
:param is_qualitative: If True, makes listed colormap
:param is_reversed: Reverses the order of color in Colormap
:return: Colormap which can be directly used with matplotlib
"""
if is_reversed:
color_list = [x for x in reversed(color_list)]
if is_qualitative:
return self._get_listed_segment(color_list)
else:
return self._get_linear_segment(color_list)
def _get_colors(self, key: str, no_of_colors: int, backup: str,
staring_shade, ending_shade):
if no_of_colors < 2:
self.log.error("Minimum of 2 colors are required for generating "
"Colormap", exception=ValueError)
colors = None
# First check if for given combinations of parameters, colors are
# available
if self.data is not None:
if key in self.data:
if str(no_of_colors) in self.data[key]:
colors = self.data[key][str(no_of_colors)]
self.log.info("Colormap for given combination found")
if colors is None:
self.log.info("Colormap for given combination not found.")
self.log.info("Searching standard colors")
# Just make request for the color so that additional colors will
# be added to palette
self.palette.get(backup)
if (staring_shade is not None or
ending_shade is not None or
colors is None):
self.log.warn("Overriding the available standard Colormaps "
"because starting_shade or ending_shade is provided")
if staring_shade is None:
staring_shade = min(self.palette.colors[
backup].get_all_shades())
if ending_shade is None:
ending_shade = max(
self.palette.colors[backup].get_all_shades())
return self.palette.get(backup, no_of_colors=no_of_colors,
starting_shade=staring_shade,
ending_shade=ending_shade)
return colors
def get_colors(self, name: str, no_of_colors: int) -> list:
"""
This is easy way to get the available colors in current colormap
.. code-block:: python
cm = BrewerMap(matplotlib)
cm.get_colors('Spectral', 9) # Returns 9 'Spectral' colors from BrewerMap colormap
.. warning::
Be careful in using `no_of_colors` argument. It actually
points
to number of colors available in given colormap. For example,
'Tableau' map from :class:`~SecretColors.cmaps.TableauMap`
contains two list of colors, 10 and 20. So you need to enter
either 10 or 20. Any other number will raise ValueError. You can
check which all options are available by :attr:`get_all`
property. More about this can be read in documentation of
:func:`~SecretColors.cmaps.parent.ColorMapParent.get` function.
:param name: Name of the special colormap
:type name: str
:param no_of_colors: Number of colors (see warning above)
:type no_of_colors: int
:return: List of colors
:rtype: List[str]
:raises: ValueError (if used on
:class:`~SecretColors.cmaps.ColorMap` or wrong
`no_of_colors` provided)
"""
if self.data is not None:
if name not in self.data.keys():
self.log.error(f"'{name}' is not available in current "
f"colormap. Following are allowed arguments "
f"here: {self.get_all}")
if str(no_of_colors) not in self.data[name].keys():
n = list(self.data[name].keys())
if "type" in n:
n.remove("type")
n = [int(x) for x in n]
self.log.error(f"Currently following number of colors are "
f"allowed for {name}. : {n}")
if str(no_of_colors) in self.data[name]:
return self.data[name][str(no_of_colors)]
else:
raise KeyError(
f"This palette did not have this key '{no_of_colors}'")
return []
def _default(self, name, backup, kwargs):
if "self" in kwargs:
del kwargs['self']
if "starting_shade" not in kwargs:
kwargs["starting_shade"] = None
if "ending_shade" not in kwargs:
kwargs["ending_shade"] = None
no_of_colors = kwargs['no_of_colors'] or self.no_of_colors
bak_name = backup or name
colors = self._get_colors(key=name,
no_of_colors=no_of_colors,
backup=bak_name,
staring_shade=kwargs['starting_shade'],
ending_shade=kwargs['ending_shade'])
return self._derive_map(colors,
is_qualitative=kwargs['is_qualitative'],
is_reversed=kwargs['is_reversed'])
def _special_maps(self, name, backup, kwargs):
if name not in self.data.keys():
self.log.error(f"There is no '{name}' colormap in our "
f"database. Following special colormaps are"
f" available in current class :"
f" {list(self.data.keys())}")
no_of_colors = kwargs['no_of_colors'] or self.no_of_colors
cols = list(self.data[name].keys())
if 'type' in cols:
cols.remove('type')
cols = [int(x) for x in cols]
if no_of_colors not in cols:
self.log.error(f"Sorry, for {name} colormap, 'no_of_colors' "
f"argument can "
f"only take these values: {cols}.")
return self._default(name, backup, kwargs)
def from_list(self, color_list: list, is_qualitative: bool = False,
is_reversed=False):
"""
You can create your own colormap with list of own colors
:param color_list: List of colors
:param is_qualitative: If True, makes listed colormap
:param is_reversed: Reverses the order of color in Colormap
:return: Colormap which can be directly used with matplotlib
"""
return self._derive_map(color_list, is_qualitative, is_reversed)
def get(self, name: str, *, no_of_colors: int = None,
is_qualitative: bool = False, is_reversed=False):
"""
Get arbitrary color map from current ColorMap object
`no_of_colors` is probably the most important parameter in the
colormap classes. In this library each colormap data is structured
in the form of dictionary as shown below::
data = { 'map_name' : {
'10': [c1, c2, ... c10],
'5' : [b1, b2, ... b5],
...
'type': Type of colormap
}
}
In above example, if you want to access list [c1, c2...c10], you can
do following,
>>> YourMap().get('map_name',no_of_colors=10) # Returns [c1, c2 ...c10]
You can check which all colormaps are
available by :attr:`~SecretColors.cmaps.parent.ColorMapParent.get_all` property
:param name: Exact Name of the Colormap
:type name: str
:param no_of_colors: Number of colors. (See discussion above)
:type no_of_colors: int
:param is_qualitative: If True, listed colormap will be returned. (
default: False)
:type is_qualitative: bool
:param is_reversed: If True, colormap will be reversed. (default:
False)
:type is_reversed: bool
:return: Colormap object
:rtype: :class:`matplotlib.colors.ListedColormap` or :class:`matplotlib.colors.LinearSegmentedColormap`
"""
if self.data is None:
self.log.error(f"This method can only be used with special "
f"colormap. If you are using 'ColorMap' class "
f"directly. You can only use standard maps. or "
f"create your own.")
return self._special_maps(name, None, locals())
def greens(self, *, starting_shade: float = None,
ending_shade: float = None,
no_of_colors: int = None,
is_qualitative: bool = False,
is_reversed=False):
return self._default(None, "green", locals())
def reds(self, *, starting_shade: float = None,
ending_shade: float = None,
no_of_colors: int = None,
is_qualitative: bool = False,
is_reversed=False):
return self._default(None, "red", locals())
def oranges(self, *, starting_shade: float = None,
ending_shade: float = None,
no_of_colors: int = None,
is_qualitative: bool = False,
is_reversed=False):
return self._default(None, "orange", locals())
def purples(self, *, starting_shade: float = None,
ending_shade: float = None,
no_of_colors: int = None,
is_qualitative: bool = False,
is_reversed=False):
return self._default(None, "purple", locals())
def grays(self, *, starting_shade: float = None,
ending_shade: float = None,
no_of_colors: int = None,
is_qualitative: bool = False,
is_reversed=False):
return self._default(None, "gray", locals())
def blues(self, *, starting_shade: float = None,
ending_shade: float = None,
no_of_colors: int = None,
is_qualitative: bool = False,
is_reversed=False):
return self._default(None, "blue", locals())
def random_divergent(self, is_qualitative=False, is_reversed=False):
names = []
if self.data is not None:
for k in self.data:
if self.data[k]["type"] == "div":
names.append(k)
if len(names) > 0:
random.shuffle(names)
keys = list(self.data[names[0]].keys())
keys.remove("type")
random.shuffle(keys)
kwargs = locals()
kwargs["no_of_colors"] = int(keys[0])
return self._special_maps(names[0], None, kwargs)
else:
names = [x for x in DIV_COLOR_PAIRS]
random.shuffle(names)
cols = []
for c in names[0]:
for s in c[1]:
cols.append(self.palette.get(c[0], shade=s))
return self.from_list(cols)
class ColorMap(ColorMapParent):
"""
This is simple wrapper around
:class:`~SecretColors.cmaps.parent.ColorMapParent`. This wrapper let you
utilize all methods from its parent class. For all general purpose use,
you should use this class. If you want more specialized ColorMaps,
use their respective classes. Following is the simplest use where you
want to visualize your data in typical 'greens' palette
.. code-block:: python
import matplotlib
import matplotlib.pyplot as plt
from SecretColors.cmaps import ColorMap
import numpy as np
cm = ColorMap(matplotlib)
data = np.random.rand(5, 5)
plt.imshow(data, cmap=cm.greens())
plt.colorbar()
plt.show()
You can easily change standard colormaps like following
.. code-block:: python
cm.reds() # Reds colormap
cm.oranges() # Oranges colormap
cm.blues() # Blues colormap
cm.grays() # Grays colormap
All standard colormaps accepts following basic options (which should be
provided as a named arguments)
- :no_of_colors: Number of colors you want in your
colormap. It usually defines how smaooth your color gradient will be
- :starting_shade: What will be the first shade of your colormap
- :ending_shade: What will be the last shade of your colormap
- :is_qualitative: If True,
:class:`matplotlib.colors.ListedColormap` will be used instead
:class:`matplotlib.colors.LinearSegmentedColormap`. Essentially it
will provide discrete colormap instead linear
- :is_reversed: If True, colormap will be reversed
.. code-block:: python
cm.purples(no_of_colors=8)
cm.greens(starting_shade=30, ending_shade=80)
cm.blues(is_qualitative=True)
cm.reds(ending_shade=50, is_reversed=True, no_of_colors=5)
You can mix-and-match every argument. Essentially there are infinite
possibilities.
If you want even more fine-tune control over your colormap, you can use
your own colormaps by :func:`~SecretColors.cmaps.parent.ColorMapParent
.from_list` method.
.. code-block:: python
cm = ColorMap(matplotlib)
p = Palette()
my_colors = [p.red(shade=30), p.white(), p.blue(shade=60)]
my_cmap = cm.from_list(my_colors)
plt.imshow(data, cmap=my_cmap)
We have some in-build color lists for divergent colormaps. You can use
:func:`~SecretColors.cmaps.parent.ColorMapParent.random_divergent` for
its easy access. Read :class:`~SecretColors.cmaps.parent.ColorMapParent`
documentation for more details on helper functions.
If you like colors from specific :class:`~SecretColors.Palette`, you can
easily switch all colors with single line
.. code-block:: python
cm = ColorMap(matplotlib)
cm.palette = Palette("material") # Material Palette colors will be used.
cm.palette = Palette("brewer") # ColorBrewer colors will be used.
.. tip::
For "brewer" and "tableau", you should prefer using
:class:`~SecretColors.cmaps.BrewerMap` and
:class:`~SecretColors.cmaps.TableauMap` intsead just changing
palette here. As these classes will provide you much more additional
methods which are only available in those classes.
"""
@property
def data(self) -> dict:
return None
def run():
from SecretColors.data.cmaps.brewer import BREWER_DATA
for b in BREWER_DATA:
print(f"* {b}") | en | 0.628927 | # Copyright (c) SecretBiology 2020. # # Library Name: SecretColors # Author: <NAME> # Website: https://github.com/secretBiology/SecretColors # # This is parent class which will be inherited by all ColorMap objects. It includes all basic methods which will be common to all the ColorMaps. .. danger:: Do not use this class in your workflow. This class is intended as a parent class which you can inherit to make new colormaps. For general purpose use, you should use :class:`~SecretColors.cmaps.ColorMap` instead. Initializing of any ColorMap. :param matplotlib: matplotlib object from matplotlib library :param palette: Palette from which you want colors :type palette: Palette :param log: Log class :type log: Log :param seed: Seed for random number generation Returns all available ColorMap data. This is valid ONLY for special subclass (e.g. BrewerMap). It will return None for 'ColorMap' class. :rtype: dict Seed for random number generator :param value: Seed value :return: Returns current palette from which colors are drawn :rtype: Palette Set Palette from which colors will be drawn Note: Do not set this for special subclasses (like BrewerMap) :param palette: Color Palette :type palette: Palette Returns list of available special colormaps. This works only with special subclasses like BrewerMap. :return: List of colormap names :rtype: List[str] :param color_list: List of colors :return: LinearSegmentedColormap :param color_list: List of colors :return: ListedColormap :param color_list: List of colors :param is_qualitative: If True, makes listed colormap :param is_reversed: Reverses the order of color in Colormap :return: Colormap which can be directly used with matplotlib # First check if for given combinations of parameters, colors are # available # Just make request for the color so that additional colors will # be added to palette This is easy way to get the available colors in current colormap .. code-block:: python cm = BrewerMap(matplotlib) cm.get_colors('Spectral', 9) # Returns 9 'Spectral' colors from BrewerMap colormap .. warning:: Be careful in using `no_of_colors` argument. It actually points to number of colors available in given colormap. For example, 'Tableau' map from :class:`~SecretColors.cmaps.TableauMap` contains two list of colors, 10 and 20. So you need to enter either 10 or 20. Any other number will raise ValueError. You can check which all options are available by :attr:`get_all` property. More about this can be read in documentation of :func:`~SecretColors.cmaps.parent.ColorMapParent.get` function. :param name: Name of the special colormap :type name: str :param no_of_colors: Number of colors (see warning above) :type no_of_colors: int :return: List of colors :rtype: List[str] :raises: ValueError (if used on :class:`~SecretColors.cmaps.ColorMap` or wrong `no_of_colors` provided) You can create your own colormap with list of own colors :param color_list: List of colors :param is_qualitative: If True, makes listed colormap :param is_reversed: Reverses the order of color in Colormap :return: Colormap which can be directly used with matplotlib Get arbitrary color map from current ColorMap object `no_of_colors` is probably the most important parameter in the colormap classes. In this library each colormap data is structured in the form of dictionary as shown below:: data = { 'map_name' : { '10': [c1, c2, ... c10], '5' : [b1, b2, ... b5], ... 'type': Type of colormap } } In above example, if you want to access list [c1, c2...c10], you can do following, >>> YourMap().get('map_name',no_of_colors=10) # Returns [c1, c2 ...c10] You can check which all colormaps are available by :attr:`~SecretColors.cmaps.parent.ColorMapParent.get_all` property :param name: Exact Name of the Colormap :type name: str :param no_of_colors: Number of colors. (See discussion above) :type no_of_colors: int :param is_qualitative: If True, listed colormap will be returned. ( default: False) :type is_qualitative: bool :param is_reversed: If True, colormap will be reversed. (default: False) :type is_reversed: bool :return: Colormap object :rtype: :class:`matplotlib.colors.ListedColormap` or :class:`matplotlib.colors.LinearSegmentedColormap` This is simple wrapper around :class:`~SecretColors.cmaps.parent.ColorMapParent`. This wrapper let you utilize all methods from its parent class. For all general purpose use, you should use this class. If you want more specialized ColorMaps, use their respective classes. Following is the simplest use where you want to visualize your data in typical 'greens' palette .. code-block:: python import matplotlib import matplotlib.pyplot as plt from SecretColors.cmaps import ColorMap import numpy as np cm = ColorMap(matplotlib) data = np.random.rand(5, 5) plt.imshow(data, cmap=cm.greens()) plt.colorbar() plt.show() You can easily change standard colormaps like following .. code-block:: python cm.reds() # Reds colormap cm.oranges() # Oranges colormap cm.blues() # Blues colormap cm.grays() # Grays colormap All standard colormaps accepts following basic options (which should be provided as a named arguments) - :no_of_colors: Number of colors you want in your colormap. It usually defines how smaooth your color gradient will be - :starting_shade: What will be the first shade of your colormap - :ending_shade: What will be the last shade of your colormap - :is_qualitative: If True, :class:`matplotlib.colors.ListedColormap` will be used instead :class:`matplotlib.colors.LinearSegmentedColormap`. Essentially it will provide discrete colormap instead linear - :is_reversed: If True, colormap will be reversed .. code-block:: python cm.purples(no_of_colors=8) cm.greens(starting_shade=30, ending_shade=80) cm.blues(is_qualitative=True) cm.reds(ending_shade=50, is_reversed=True, no_of_colors=5) You can mix-and-match every argument. Essentially there are infinite possibilities. If you want even more fine-tune control over your colormap, you can use your own colormaps by :func:`~SecretColors.cmaps.parent.ColorMapParent .from_list` method. .. code-block:: python cm = ColorMap(matplotlib) p = Palette() my_colors = [p.red(shade=30), p.white(), p.blue(shade=60)] my_cmap = cm.from_list(my_colors) plt.imshow(data, cmap=my_cmap) We have some in-build color lists for divergent colormaps. You can use :func:`~SecretColors.cmaps.parent.ColorMapParent.random_divergent` for its easy access. Read :class:`~SecretColors.cmaps.parent.ColorMapParent` documentation for more details on helper functions. If you like colors from specific :class:`~SecretColors.Palette`, you can easily switch all colors with single line .. code-block:: python cm = ColorMap(matplotlib) cm.palette = Palette("material") # Material Palette colors will be used. cm.palette = Palette("brewer") # ColorBrewer colors will be used. .. tip:: For "brewer" and "tableau", you should prefer using :class:`~SecretColors.cmaps.BrewerMap` and :class:`~SecretColors.cmaps.TableauMap` intsead just changing palette here. As these classes will provide you much more additional methods which are only available in those classes. | 2.876531 | 3 |
Problems/lcof-004/solve.py | luanshiyinyang/LCNotes | 3 | 6630659 | <gh_stars>1-10
class Solution:
def findNumberIn2DArray(self, matrix: List[List[int]], target: int) -> bool:
if matrix == []:
return False
row, column = len(matrix), len(matrix[0])
i, j = row - 1, 0
while i >=0 and j < column:
if matrix[i][j] > target:
i -= 1
elif matrix[i][j] < target:
j += 1
else:
return True
return False | class Solution:
def findNumberIn2DArray(self, matrix: List[List[int]], target: int) -> bool:
if matrix == []:
return False
row, column = len(matrix), len(matrix[0])
i, j = row - 1, 0
while i >=0 and j < column:
if matrix[i][j] > target:
i -= 1
elif matrix[i][j] < target:
j += 1
else:
return True
return False | none | 1 | 3.398166 | 3 |
|
src/imitation/scripts/config/eval_policy.py | Cladett/imitation | 1 | 6630660 | import os
import sacred
from imitation.util import util
eval_policy_ex = sacred.Experiment("eval_policy")
@eval_policy_ex.config
def replay_defaults():
env_name = "CartPole-v1" # environment to evaluate in
eval_n_timesteps = int(1e4) # Min timesteps to evaluate, optional.
eval_n_episodes = None # Num episodes to evaluate, optional.
num_vec = 1 # number of environments in parallel
parallel = False # Use SubprocVecEnv (generally faster if num_vec>1)
max_episode_steps = None # Set to positive int to limit episode horizons
videos = False # save video files
video_kwargs = {} # arguments to VideoWrapper
render = True # render to screen
render_fps = 60 # -1 to render at full speed
log_root = os.path.join("output", "eval_policy") # output directory
policy_type = "ppo" # class to load policy, see imitation.policies.loader
policy_path = (
"tests/data/expert_models/" "cartpole_0/policies/final/"
) # serialized policy
reward_type = None # Optional: override with reward of this type
reward_path = None # Path of serialized reward to load
@eval_policy_ex.config
def logging(log_root, env_name):
log_dir = os.path.join(
log_root, env_name.replace("/", "_"), util.make_unique_timestamp()
)
@eval_policy_ex.named_config
def fast():
eval_n_timesteps = 1
eval_n_episodes = None
max_episode_steps = 1
| import os
import sacred
from imitation.util import util
eval_policy_ex = sacred.Experiment("eval_policy")
@eval_policy_ex.config
def replay_defaults():
env_name = "CartPole-v1" # environment to evaluate in
eval_n_timesteps = int(1e4) # Min timesteps to evaluate, optional.
eval_n_episodes = None # Num episodes to evaluate, optional.
num_vec = 1 # number of environments in parallel
parallel = False # Use SubprocVecEnv (generally faster if num_vec>1)
max_episode_steps = None # Set to positive int to limit episode horizons
videos = False # save video files
video_kwargs = {} # arguments to VideoWrapper
render = True # render to screen
render_fps = 60 # -1 to render at full speed
log_root = os.path.join("output", "eval_policy") # output directory
policy_type = "ppo" # class to load policy, see imitation.policies.loader
policy_path = (
"tests/data/expert_models/" "cartpole_0/policies/final/"
) # serialized policy
reward_type = None # Optional: override with reward of this type
reward_path = None # Path of serialized reward to load
@eval_policy_ex.config
def logging(log_root, env_name):
log_dir = os.path.join(
log_root, env_name.replace("/", "_"), util.make_unique_timestamp()
)
@eval_policy_ex.named_config
def fast():
eval_n_timesteps = 1
eval_n_episodes = None
max_episode_steps = 1
| en | 0.738775 | # environment to evaluate in # Min timesteps to evaluate, optional. # Num episodes to evaluate, optional. # number of environments in parallel # Use SubprocVecEnv (generally faster if num_vec>1) # Set to positive int to limit episode horizons # save video files # arguments to VideoWrapper # render to screen # -1 to render at full speed # output directory # class to load policy, see imitation.policies.loader # serialized policy # Optional: override with reward of this type # Path of serialized reward to load | 2.227084 | 2 |
utils/functions.py | NIRDERIi/bot | 2 | 6630661 | <gh_stars>1-10
from utils.errors import ProcessError
import discord
from discord.ext import commands
from utils.buttons import Paginator
from bot import CustomContext, Bot
import more_itertools
import re
import difflib
import traceback
from utils.constants import Time, General
import datetime
import contextlib
MYSTB_DOCUMENTS = "https://mystb.in/documents"
MYSTB_FORMAT = "https://mystb.in/{key}"
async def get_group_help(ctx: CustomContext, group: commands.Group):
async def check(interaction: discord.Interaction) -> None:
return interaction.user.id == ctx.author.id
paginator = Paginator(ctx, embeds=[], timeout=20.0, check=check)
iterable = more_itertools.sliced(tuple(group.commands), 5)
commands_lst__tuples = [command_tuple for command_tuple in iterable]
for command_tuple in commands_lst__tuples:
embed = discord.Embed(
title=f"{group.qualified_name} commands group.",
description="Subcommands:\n",
color=discord.Colour.blurple(),
)
for command in command_tuple:
embed.description += (
f"> {command.name} {command.signature.replace('_', ' ')}\n"
)
embed.set_footer(text="run !help {} <subcommand>".format(group.qualified_name))
paginator.add_embed(embed=embed)
await paginator.run()
def get_divmod(seconds: int):
days, hours = divmod(seconds, 86400)
hours, minutes = divmod(hours, 3600)
minutes, seconds = divmod(minutes, 60)
days, hours, minutes, seconds = (
round(days),
round(hours),
round(minutes),
round(seconds),
)
return days, hours, minutes, seconds
async def paste(bot: Bot, text: str):
data = bytes(text, encoding="utf-8")
async with bot.session.post(url=MYSTB_DOCUMENTS, data=data) as response:
if response.status != 200:
raise ProcessError(f"Unexpected error with return status {response.status}")
raw_json = await response.json(content_type=None, encoding="utf-8")
key = raw_json.get("key")
full_link = MYSTB_FORMAT.format(key=key)
return full_link
async def error_handler(ctx: CustomContext, error: commands.CommandError):
bot: Bot = ctx.bot
new_error = getattr(error, "original", error)
embed = discord.Embed(
title=re.sub(
"(?<!^)(?=[A-Z])", " ", str(type(new_error).__name__)
).capitalize(),
color=discord.Colour.red(),
)
if isinstance(error, commands.MissingRequiredArgument):
signature = "{}{} {}".format(
ctx.prefix,
ctx.command.qualified_name,
ctx.command.signature.replace("_", " "),
)
embed.description = f"How to use: `{signature}`"
elif isinstance(error, commands.TooManyArguments):
embed.description = str(error.args[0])
elif isinstance(error, commands.MessageNotFound):
embed.description = f"Could not find message by argument: `{error.argument}`"
elif isinstance(error, commands.MemberNotFound):
embed.description = f"Could not find member by argument: `{error.argument}`"
elif isinstance(error, commands.UserNotFound):
embed.description = f"Could not find user by argument: `{error.argument}`"
elif isinstance(error, commands.ChannelNotFound):
embed.description = f"Could not find channel by argument: `{error.argument}`"
elif isinstance(error, commands.ChannelNotReadable):
embed.description = f"I can't read from {error.argument.name}"
elif isinstance(error, commands.RoleNotFound):
embed.description = f"Could not find role by argument: `{error.argument}`"
elif isinstance(error, commands.EmojiNotFound):
embed.description = error.args[0]
elif isinstance(error, commands.ThreadNotFound):
embed.description = f"Could not find thread by argument: `{error.argument}`"
elif isinstance(error, commands.CommandNotFound):
embed.description = f"Command `{ctx.invoked_with}` not found."
options = difflib.get_close_matches(
word=ctx.invoked_with,
possibilities=[command.name for command in bot.commands],
)
if options:
options_str = ", ".join([f"`{option}`" for option in options])
embed.description += f"\n\n**Close matches:** {options_str}"
elif isinstance(error, commands.MissingPermissions):
permissions_missing = ", ".join(
[f"`{permission}`" for permission in error.missing_permissions]
)
embed.description = permissions_missing
elif isinstance(error, commands.BotMissingPermissions):
permissions_missing = ", ".join(
[f"`{permission}`" for permission in error.missing_permissions]
)
embed.description = permissions_missing
elif isinstance(error, commands.DisabledCommand):
embed.description = f"This command seems to be globally disabled."
elif isinstance(error, commands.BadArgument):
embed.description = str(error.args[0])
elif isinstance(error, commands.BadUnionArgument):
embed.description = str(error.args[0])
elif (
isinstance(error, ProcessError)
or isinstance(new_error, ProcessError)
or type(error) is ProcessError
or type(error).__name__ == "ProcessError"
):
embed.description = str(error.args[0])
elif isinstance(error, commands.CheckFailure):
embed.description = "You have no perms to use this command."
elif isinstance(new_error, discord.NotFound):
embed.description = f"{new_error.text}"
elif isinstance(new_error, discord.Forbidden):
embed.description = f"{new_error.text} Status {new_error.status}"
else:
traceback.print_exception(
type(error), error, error.__traceback__
) # So it won't print the error, optional
async with bot.pool.acquire(timeout=Time.db_time) as conn:
bug_id = await conn.fetch(
"""INSERT INTO bugs (guild_id, user_id, short_error, full_traceback, error_time) VALUES($1, $2, $3, $4, $5) RETURNING bug_id""",
ctx.guild.id,
ctx.author.id,
str(error),
"\n".join(
traceback.format_exception(type(error), error, error.__traceback__)
),
datetime.datetime.utcnow(),
)
bug_id = bug_id[0]["bug_id"]
embed.title = re.sub(
"(?<!^)(?=[A-Z])", " ", str(type(error).__name__)
).capitalize()
embed.description = f"Unknown error. Please report it in [support server]({General.support_guild_invite}).\n**Bug id:** {bug_id}"
with contextlib.suppress(
discord.HTTPException, discord.Forbidden, discord.NotFound
):
await ctx.send(embed=embed)
| from utils.errors import ProcessError
import discord
from discord.ext import commands
from utils.buttons import Paginator
from bot import CustomContext, Bot
import more_itertools
import re
import difflib
import traceback
from utils.constants import Time, General
import datetime
import contextlib
MYSTB_DOCUMENTS = "https://mystb.in/documents"
MYSTB_FORMAT = "https://mystb.in/{key}"
async def get_group_help(ctx: CustomContext, group: commands.Group):
async def check(interaction: discord.Interaction) -> None:
return interaction.user.id == ctx.author.id
paginator = Paginator(ctx, embeds=[], timeout=20.0, check=check)
iterable = more_itertools.sliced(tuple(group.commands), 5)
commands_lst__tuples = [command_tuple for command_tuple in iterable]
for command_tuple in commands_lst__tuples:
embed = discord.Embed(
title=f"{group.qualified_name} commands group.",
description="Subcommands:\n",
color=discord.Colour.blurple(),
)
for command in command_tuple:
embed.description += (
f"> {command.name} {command.signature.replace('_', ' ')}\n"
)
embed.set_footer(text="run !help {} <subcommand>".format(group.qualified_name))
paginator.add_embed(embed=embed)
await paginator.run()
def get_divmod(seconds: int):
days, hours = divmod(seconds, 86400)
hours, minutes = divmod(hours, 3600)
minutes, seconds = divmod(minutes, 60)
days, hours, minutes, seconds = (
round(days),
round(hours),
round(minutes),
round(seconds),
)
return days, hours, minutes, seconds
async def paste(bot: Bot, text: str):
data = bytes(text, encoding="utf-8")
async with bot.session.post(url=MYSTB_DOCUMENTS, data=data) as response:
if response.status != 200:
raise ProcessError(f"Unexpected error with return status {response.status}")
raw_json = await response.json(content_type=None, encoding="utf-8")
key = raw_json.get("key")
full_link = MYSTB_FORMAT.format(key=key)
return full_link
async def error_handler(ctx: CustomContext, error: commands.CommandError):
bot: Bot = ctx.bot
new_error = getattr(error, "original", error)
embed = discord.Embed(
title=re.sub(
"(?<!^)(?=[A-Z])", " ", str(type(new_error).__name__)
).capitalize(),
color=discord.Colour.red(),
)
if isinstance(error, commands.MissingRequiredArgument):
signature = "{}{} {}".format(
ctx.prefix,
ctx.command.qualified_name,
ctx.command.signature.replace("_", " "),
)
embed.description = f"How to use: `{signature}`"
elif isinstance(error, commands.TooManyArguments):
embed.description = str(error.args[0])
elif isinstance(error, commands.MessageNotFound):
embed.description = f"Could not find message by argument: `{error.argument}`"
elif isinstance(error, commands.MemberNotFound):
embed.description = f"Could not find member by argument: `{error.argument}`"
elif isinstance(error, commands.UserNotFound):
embed.description = f"Could not find user by argument: `{error.argument}`"
elif isinstance(error, commands.ChannelNotFound):
embed.description = f"Could not find channel by argument: `{error.argument}`"
elif isinstance(error, commands.ChannelNotReadable):
embed.description = f"I can't read from {error.argument.name}"
elif isinstance(error, commands.RoleNotFound):
embed.description = f"Could not find role by argument: `{error.argument}`"
elif isinstance(error, commands.EmojiNotFound):
embed.description = error.args[0]
elif isinstance(error, commands.ThreadNotFound):
embed.description = f"Could not find thread by argument: `{error.argument}`"
elif isinstance(error, commands.CommandNotFound):
embed.description = f"Command `{ctx.invoked_with}` not found."
options = difflib.get_close_matches(
word=ctx.invoked_with,
possibilities=[command.name for command in bot.commands],
)
if options:
options_str = ", ".join([f"`{option}`" for option in options])
embed.description += f"\n\n**Close matches:** {options_str}"
elif isinstance(error, commands.MissingPermissions):
permissions_missing = ", ".join(
[f"`{permission}`" for permission in error.missing_permissions]
)
embed.description = permissions_missing
elif isinstance(error, commands.BotMissingPermissions):
permissions_missing = ", ".join(
[f"`{permission}`" for permission in error.missing_permissions]
)
embed.description = permissions_missing
elif isinstance(error, commands.DisabledCommand):
embed.description = f"This command seems to be globally disabled."
elif isinstance(error, commands.BadArgument):
embed.description = str(error.args[0])
elif isinstance(error, commands.BadUnionArgument):
embed.description = str(error.args[0])
elif (
isinstance(error, ProcessError)
or isinstance(new_error, ProcessError)
or type(error) is ProcessError
or type(error).__name__ == "ProcessError"
):
embed.description = str(error.args[0])
elif isinstance(error, commands.CheckFailure):
embed.description = "You have no perms to use this command."
elif isinstance(new_error, discord.NotFound):
embed.description = f"{new_error.text}"
elif isinstance(new_error, discord.Forbidden):
embed.description = f"{new_error.text} Status {new_error.status}"
else:
traceback.print_exception(
type(error), error, error.__traceback__
) # So it won't print the error, optional
async with bot.pool.acquire(timeout=Time.db_time) as conn:
bug_id = await conn.fetch(
"""INSERT INTO bugs (guild_id, user_id, short_error, full_traceback, error_time) VALUES($1, $2, $3, $4, $5) RETURNING bug_id""",
ctx.guild.id,
ctx.author.id,
str(error),
"\n".join(
traceback.format_exception(type(error), error, error.__traceback__)
),
datetime.datetime.utcnow(),
)
bug_id = bug_id[0]["bug_id"]
embed.title = re.sub(
"(?<!^)(?=[A-Z])", " ", str(type(error).__name__)
).capitalize()
embed.description = f"Unknown error. Please report it in [support server]({General.support_guild_invite}).\n**Bug id:** {bug_id}"
with contextlib.suppress(
discord.HTTPException, discord.Forbidden, discord.NotFound
):
await ctx.send(embed=embed) | en | 0.427471 | # So it won't print the error, optional INSERT INTO bugs (guild_id, user_id, short_error, full_traceback, error_time) VALUES($1, $2, $3, $4, $5) RETURNING bug_id | 2.347189 | 2 |
data/connections.py | Poweedlou/Feather | 4 | 6630662 | <reponame>Poweedlou/Feather
# DB connection
import sqlalchemy as sa
import sqlalchemy.orm as orm
from sqlalchemy.orm import Session
import sqlalchemy.ext.declarative as dec
Base = dec.declarative_base()
__factory = None
def global_init(db_file):
global __factory
if __factory:
return
if not db_file or not db_file.strip():
raise Exception("You should give name of DB.")
conn_str = f'sqlite:///{db_file.strip()}?check_same_thread=False'
print(f"Connecting to DB at {conn_str}")
engine = sa.create_engine(conn_str, echo=False)
__factory = orm.sessionmaker(bind=engine)
Base.metadata.create_all(engine)
def create_session() -> Session:
global __factory
return __factory()
| # DB connection
import sqlalchemy as sa
import sqlalchemy.orm as orm
from sqlalchemy.orm import Session
import sqlalchemy.ext.declarative as dec
Base = dec.declarative_base()
__factory = None
def global_init(db_file):
global __factory
if __factory:
return
if not db_file or not db_file.strip():
raise Exception("You should give name of DB.")
conn_str = f'sqlite:///{db_file.strip()}?check_same_thread=False'
print(f"Connecting to DB at {conn_str}")
engine = sa.create_engine(conn_str, echo=False)
__factory = orm.sessionmaker(bind=engine)
Base.metadata.create_all(engine)
def create_session() -> Session:
global __factory
return __factory() | en | 0.864273 | # DB connection | 2.765667 | 3 |
tests/unittests/test_keyboard.py | epth/ahk | 1 | 6630663 | import sys
import os
project_root = os.path.abspath(os.path.join(os.path.dirname(os.path.abspath(__file__)), '../..'))
sys.path.insert(0, project_root)
from ahk import AHK
from unittest import TestCase
from itertools import product
import time, subprocess
from ahk.keys import KEYS, ALT, CTRL
import threading
class TestKeyboard(TestCase):
def setUp(self):
"""
Record all open windows
:return:
"""
self.ahk = AHK()
self.before_windows = self.ahk.windows()
self.p = subprocess.Popen('notepad')
time.sleep(1)
self.notepad = self.ahk.find_window(title=b'Untitled - Notepad')
def tearDown(self):
self.p.terminate()
time.sleep(0.2)
def test_window_send(self):
self.notepad.send('hello world')
time.sleep(1)
self.assertIn(b'hello world', self.notepad.text)
def test_send(self):
self.notepad.activate()
self.ahk.send('hello world')
assert b'hello world' in self.notepad.text
def test_send_key_mult(self):
self.notepad.send(KEYS.TAB * 4)
time.sleep(0.5)
self.assertEqual(self.notepad.text.count(b'\t'), 4, self.notepad.text)
def test_send_input(self):
self.notepad.activate()
self.ahk.send_input('Hello World')
assert b'Hello World' in self.notepad.text
def test_type(self):
self.notepad.activate()
self.ahk.type('Hello, World!')
assert b'Hello, World!' in self.notepad.text
def a_down():
time.sleep(0.5)
ahk = AHK()
ahk.key_down('a')
def release_a():
time.sleep(0.5)
ahk = AHK()
ahk.key_up('a')
def press_a():
time.sleep(0.5)
ahk = AHK()
ahk.key_press('a')
class TestKeys(TestCase):
def setUp(self):
self.ahk = AHK()
self.thread = None
self.hotkey = None
def tearDown(self):
if self.thread is not None:
self.thread.join(timeout=3)
if self.ahk.key_state('a'):
self.ahk.key_up('a')
if self.ahk.key_down('Control'):
self.ahk.key_up('Control')
notepad = self.ahk.find_window(title=b'Untitled - Notepad')
if notepad:
notepad.close()
if self.hotkey and self.hotkey.running:
self.hotkey.stop()
def test_key_wait_pressed(self):
start = time.time()
self.thread = threading.Thread(target=a_down)
self.thread.start()
self.ahk.key_wait('a', timeout=5)
end = time.time()
assert end - start < 5
def test_key_wait_released(self):
start = time.time()
a_down()
self.thread = threading.Thread(target=release_a)
self.thread.start()
self.ahk.key_wait('a', timeout=2)
def test_key_wait_timeout(self):
self.assertRaises(TimeoutError, self.ahk.key_wait, 'f', timeout=1)
def test_key_state_when_not_pressed(self):
self.assertFalse(self.ahk.key_state('a'))
def test_key_state_pressed(self):
self.ahk.key_down('Control')
self.assertTrue(self.ahk.key_state('Control'))
def test_hotkey(self):
self.hotkey = self.ahk.hotkey(hotkey='a', script='Run Notepad')
self.thread = threading.Thread(target=a_down)
self.thread.start()
self.hotkey.start()
time.sleep(1)
self.assertIsNotNone(self.ahk.find_window(title=b'Untitled - Notepad'))
def test_hotkey_stop(self):
self.hotkey = self.ahk.hotkey(hotkey='a', script='Run Notepad')
self.hotkey.start()
assert self.hotkey.running
self.hotkey.stop()
self.ahk.key_press('a')
self.assertIsNone(self.ahk.find_window(title=b'Untitled - Notepad'))
| import sys
import os
project_root = os.path.abspath(os.path.join(os.path.dirname(os.path.abspath(__file__)), '../..'))
sys.path.insert(0, project_root)
from ahk import AHK
from unittest import TestCase
from itertools import product
import time, subprocess
from ahk.keys import KEYS, ALT, CTRL
import threading
class TestKeyboard(TestCase):
def setUp(self):
"""
Record all open windows
:return:
"""
self.ahk = AHK()
self.before_windows = self.ahk.windows()
self.p = subprocess.Popen('notepad')
time.sleep(1)
self.notepad = self.ahk.find_window(title=b'Untitled - Notepad')
def tearDown(self):
self.p.terminate()
time.sleep(0.2)
def test_window_send(self):
self.notepad.send('hello world')
time.sleep(1)
self.assertIn(b'hello world', self.notepad.text)
def test_send(self):
self.notepad.activate()
self.ahk.send('hello world')
assert b'hello world' in self.notepad.text
def test_send_key_mult(self):
self.notepad.send(KEYS.TAB * 4)
time.sleep(0.5)
self.assertEqual(self.notepad.text.count(b'\t'), 4, self.notepad.text)
def test_send_input(self):
self.notepad.activate()
self.ahk.send_input('Hello World')
assert b'Hello World' in self.notepad.text
def test_type(self):
self.notepad.activate()
self.ahk.type('Hello, World!')
assert b'Hello, World!' in self.notepad.text
def a_down():
time.sleep(0.5)
ahk = AHK()
ahk.key_down('a')
def release_a():
time.sleep(0.5)
ahk = AHK()
ahk.key_up('a')
def press_a():
time.sleep(0.5)
ahk = AHK()
ahk.key_press('a')
class TestKeys(TestCase):
def setUp(self):
self.ahk = AHK()
self.thread = None
self.hotkey = None
def tearDown(self):
if self.thread is not None:
self.thread.join(timeout=3)
if self.ahk.key_state('a'):
self.ahk.key_up('a')
if self.ahk.key_down('Control'):
self.ahk.key_up('Control')
notepad = self.ahk.find_window(title=b'Untitled - Notepad')
if notepad:
notepad.close()
if self.hotkey and self.hotkey.running:
self.hotkey.stop()
def test_key_wait_pressed(self):
start = time.time()
self.thread = threading.Thread(target=a_down)
self.thread.start()
self.ahk.key_wait('a', timeout=5)
end = time.time()
assert end - start < 5
def test_key_wait_released(self):
start = time.time()
a_down()
self.thread = threading.Thread(target=release_a)
self.thread.start()
self.ahk.key_wait('a', timeout=2)
def test_key_wait_timeout(self):
self.assertRaises(TimeoutError, self.ahk.key_wait, 'f', timeout=1)
def test_key_state_when_not_pressed(self):
self.assertFalse(self.ahk.key_state('a'))
def test_key_state_pressed(self):
self.ahk.key_down('Control')
self.assertTrue(self.ahk.key_state('Control'))
def test_hotkey(self):
self.hotkey = self.ahk.hotkey(hotkey='a', script='Run Notepad')
self.thread = threading.Thread(target=a_down)
self.thread.start()
self.hotkey.start()
time.sleep(1)
self.assertIsNotNone(self.ahk.find_window(title=b'Untitled - Notepad'))
def test_hotkey_stop(self):
self.hotkey = self.ahk.hotkey(hotkey='a', script='Run Notepad')
self.hotkey.start()
assert self.hotkey.running
self.hotkey.stop()
self.ahk.key_press('a')
self.assertIsNone(self.ahk.find_window(title=b'Untitled - Notepad'))
| en | 0.429365 | Record all open windows :return: | 2.355768 | 2 |
bmds_server/common/management/commands/load_test_db.py | shapiromatron/bmds-server | 1 | 6630664 | from django.conf import settings
from django.contrib.auth import get_user_model
from django.core.management import call_command
from django.core.management.base import BaseCommand, CommandError
class Command(BaseCommand):
help = """Load the test database from a fixture."""
def add_arguments(self, parser):
parser.add_argument(
"--ifempty",
action="store_true",
dest="ifempty",
help="Only flush/load if database is empty",
)
def handle(self, *args, **options):
if "test" not in settings.DATABASES["default"]["NAME"]:
raise CommandError("Must be using a test database to execute.")
call_command("migrate", verbosity=0)
if options["ifempty"] and get_user_model().objects.count() > 0:
message = "Migrations complete; fixtures not loaded (db not empty)"
else:
call_command("flush", verbosity=0, interactive=False)
call_command("loaddata", str(settings.TEST_DB_FIXTURE), verbosity=1)
message = "Migrations complete; fixture loaded"
self.stdout.write(self.style.SUCCESS(message))
| from django.conf import settings
from django.contrib.auth import get_user_model
from django.core.management import call_command
from django.core.management.base import BaseCommand, CommandError
class Command(BaseCommand):
help = """Load the test database from a fixture."""
def add_arguments(self, parser):
parser.add_argument(
"--ifempty",
action="store_true",
dest="ifempty",
help="Only flush/load if database is empty",
)
def handle(self, *args, **options):
if "test" not in settings.DATABASES["default"]["NAME"]:
raise CommandError("Must be using a test database to execute.")
call_command("migrate", verbosity=0)
if options["ifempty"] and get_user_model().objects.count() > 0:
message = "Migrations complete; fixtures not loaded (db not empty)"
else:
call_command("flush", verbosity=0, interactive=False)
call_command("loaddata", str(settings.TEST_DB_FIXTURE), verbosity=1)
message = "Migrations complete; fixture loaded"
self.stdout.write(self.style.SUCCESS(message))
| en | 0.781411 | Load the test database from a fixture. | 2.153766 | 2 |
tests/components/utility_meter/test_config_flow.py | mtarjoianu/core | 30,023 | 6630665 | """Test the Utility Meter config flow."""
from unittest.mock import patch
import pytest
from homeassistant import config_entries
from homeassistant.components.utility_meter.const import DOMAIN
from homeassistant.core import HomeAssistant
from homeassistant.data_entry_flow import RESULT_TYPE_CREATE_ENTRY, RESULT_TYPE_FORM
from tests.common import MockConfigEntry
@pytest.mark.parametrize("platform", ("sensor",))
async def test_config_flow(hass: HomeAssistant, platform) -> None:
"""Test the config flow."""
input_sensor_entity_id = "sensor.input"
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
assert result["type"] == RESULT_TYPE_FORM
assert result["errors"] is None
with patch(
"homeassistant.components.utility_meter.async_setup_entry",
return_value=True,
) as mock_setup_entry:
result = await hass.config_entries.flow.async_configure(
result["flow_id"],
{
"cycle": "monthly",
"name": "Electricity meter",
"offset": 0,
"source": input_sensor_entity_id,
"tariffs": [],
},
)
await hass.async_block_till_done()
assert result["type"] == RESULT_TYPE_CREATE_ENTRY
assert result["title"] == "Electricity meter"
assert result["data"] == {}
assert result["options"] == {
"cycle": "monthly",
"delta_values": False,
"name": "Electricity meter",
"net_consumption": False,
"offset": 0,
"source": input_sensor_entity_id,
"tariffs": [],
}
assert len(mock_setup_entry.mock_calls) == 1
config_entry = hass.config_entries.async_entries(DOMAIN)[0]
assert config_entry.data == {}
assert config_entry.options == {
"cycle": "monthly",
"delta_values": False,
"name": "Electricity meter",
"net_consumption": False,
"offset": 0,
"source": input_sensor_entity_id,
"tariffs": [],
}
assert config_entry.title == "Electricity meter"
async def test_tariffs(hass: HomeAssistant) -> None:
"""Test tariffs."""
input_sensor_entity_id = "sensor.input"
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
assert result["type"] == RESULT_TYPE_FORM
assert result["errors"] is None
result = await hass.config_entries.flow.async_configure(
result["flow_id"],
{
"cycle": "monthly",
"name": "Electricity meter",
"offset": 0,
"source": input_sensor_entity_id,
"tariffs": ["cat", "dog", "horse", "cow"],
},
)
await hass.async_block_till_done()
assert result["type"] == RESULT_TYPE_CREATE_ENTRY
assert result["title"] == "Electricity meter"
assert result["data"] == {}
assert result["options"] == {
"cycle": "monthly",
"delta_values": False,
"name": "Electricity meter",
"net_consumption": False,
"offset": 0,
"source": input_sensor_entity_id,
"tariffs": ["cat", "dog", "horse", "cow"],
}
config_entry = hass.config_entries.async_entries(DOMAIN)[0]
assert config_entry.data == {}
assert config_entry.options == {
"cycle": "monthly",
"delta_values": False,
"name": "Electricity meter",
"net_consumption": False,
"offset": 0,
"source": input_sensor_entity_id,
"tariffs": ["cat", "dog", "horse", "cow"],
}
assert config_entry.title == "Electricity meter"
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
assert result["type"] == RESULT_TYPE_FORM
assert result["errors"] is None
result = await hass.config_entries.flow.async_configure(
result["flow_id"],
{
"cycle": "monthly",
"name": "Electricity meter",
"offset": 0,
"source": input_sensor_entity_id,
"tariffs": ["cat", "cat", "cat", "cat"],
},
)
await hass.async_block_till_done()
assert result["type"] == RESULT_TYPE_FORM
assert result["errors"]["base"] == "tariffs_not_unique"
def get_suggested(schema, key):
"""Get suggested value for key in voluptuous schema."""
for k in schema.keys():
if k == key:
if k.description is None or "suggested_value" not in k.description:
return None
return k.description["suggested_value"]
# Wanted key absent from schema
raise Exception
async def test_options(hass: HomeAssistant) -> None:
"""Test reconfiguring."""
input_sensor1_entity_id = "sensor.input1"
input_sensor2_entity_id = "sensor.input2"
# Setup the config entry
config_entry = MockConfigEntry(
data={},
domain=DOMAIN,
options={
"cycle": "monthly",
"delta_values": False,
"name": "Electricity meter",
"net_consumption": False,
"offset": 0,
"source": input_sensor1_entity_id,
"tariffs": "",
},
title="Electricity meter",
)
config_entry.add_to_hass(hass)
assert await hass.config_entries.async_setup(config_entry.entry_id)
await hass.async_block_till_done()
result = await hass.config_entries.options.async_init(config_entry.entry_id)
assert result["type"] == RESULT_TYPE_FORM
assert result["step_id"] == "init"
schema = result["data_schema"].schema
assert get_suggested(schema, "source") == input_sensor1_entity_id
result = await hass.config_entries.options.async_configure(
result["flow_id"],
user_input={"source": input_sensor2_entity_id},
)
assert result["type"] == RESULT_TYPE_CREATE_ENTRY
assert result["data"] == {
"cycle": "monthly",
"delta_values": False,
"name": "Electricity meter",
"net_consumption": False,
"offset": 0,
"source": input_sensor2_entity_id,
"tariffs": "",
}
assert config_entry.data == {}
assert config_entry.options == {
"cycle": "monthly",
"delta_values": False,
"name": "Electricity meter",
"net_consumption": False,
"offset": 0,
"source": input_sensor2_entity_id,
"tariffs": "",
}
assert config_entry.title == "Electricity meter"
# Check config entry is reloaded with new options
await hass.async_block_till_done()
state = hass.states.get("sensor.electricity_meter")
assert state.attributes["source"] == input_sensor2_entity_id
| """Test the Utility Meter config flow."""
from unittest.mock import patch
import pytest
from homeassistant import config_entries
from homeassistant.components.utility_meter.const import DOMAIN
from homeassistant.core import HomeAssistant
from homeassistant.data_entry_flow import RESULT_TYPE_CREATE_ENTRY, RESULT_TYPE_FORM
from tests.common import MockConfigEntry
@pytest.mark.parametrize("platform", ("sensor",))
async def test_config_flow(hass: HomeAssistant, platform) -> None:
"""Test the config flow."""
input_sensor_entity_id = "sensor.input"
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
assert result["type"] == RESULT_TYPE_FORM
assert result["errors"] is None
with patch(
"homeassistant.components.utility_meter.async_setup_entry",
return_value=True,
) as mock_setup_entry:
result = await hass.config_entries.flow.async_configure(
result["flow_id"],
{
"cycle": "monthly",
"name": "Electricity meter",
"offset": 0,
"source": input_sensor_entity_id,
"tariffs": [],
},
)
await hass.async_block_till_done()
assert result["type"] == RESULT_TYPE_CREATE_ENTRY
assert result["title"] == "Electricity meter"
assert result["data"] == {}
assert result["options"] == {
"cycle": "monthly",
"delta_values": False,
"name": "Electricity meter",
"net_consumption": False,
"offset": 0,
"source": input_sensor_entity_id,
"tariffs": [],
}
assert len(mock_setup_entry.mock_calls) == 1
config_entry = hass.config_entries.async_entries(DOMAIN)[0]
assert config_entry.data == {}
assert config_entry.options == {
"cycle": "monthly",
"delta_values": False,
"name": "Electricity meter",
"net_consumption": False,
"offset": 0,
"source": input_sensor_entity_id,
"tariffs": [],
}
assert config_entry.title == "Electricity meter"
async def test_tariffs(hass: HomeAssistant) -> None:
"""Test tariffs."""
input_sensor_entity_id = "sensor.input"
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
assert result["type"] == RESULT_TYPE_FORM
assert result["errors"] is None
result = await hass.config_entries.flow.async_configure(
result["flow_id"],
{
"cycle": "monthly",
"name": "Electricity meter",
"offset": 0,
"source": input_sensor_entity_id,
"tariffs": ["cat", "dog", "horse", "cow"],
},
)
await hass.async_block_till_done()
assert result["type"] == RESULT_TYPE_CREATE_ENTRY
assert result["title"] == "Electricity meter"
assert result["data"] == {}
assert result["options"] == {
"cycle": "monthly",
"delta_values": False,
"name": "Electricity meter",
"net_consumption": False,
"offset": 0,
"source": input_sensor_entity_id,
"tariffs": ["cat", "dog", "horse", "cow"],
}
config_entry = hass.config_entries.async_entries(DOMAIN)[0]
assert config_entry.data == {}
assert config_entry.options == {
"cycle": "monthly",
"delta_values": False,
"name": "Electricity meter",
"net_consumption": False,
"offset": 0,
"source": input_sensor_entity_id,
"tariffs": ["cat", "dog", "horse", "cow"],
}
assert config_entry.title == "Electricity meter"
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
assert result["type"] == RESULT_TYPE_FORM
assert result["errors"] is None
result = await hass.config_entries.flow.async_configure(
result["flow_id"],
{
"cycle": "monthly",
"name": "Electricity meter",
"offset": 0,
"source": input_sensor_entity_id,
"tariffs": ["cat", "cat", "cat", "cat"],
},
)
await hass.async_block_till_done()
assert result["type"] == RESULT_TYPE_FORM
assert result["errors"]["base"] == "tariffs_not_unique"
def get_suggested(schema, key):
"""Get suggested value for key in voluptuous schema."""
for k in schema.keys():
if k == key:
if k.description is None or "suggested_value" not in k.description:
return None
return k.description["suggested_value"]
# Wanted key absent from schema
raise Exception
async def test_options(hass: HomeAssistant) -> None:
"""Test reconfiguring."""
input_sensor1_entity_id = "sensor.input1"
input_sensor2_entity_id = "sensor.input2"
# Setup the config entry
config_entry = MockConfigEntry(
data={},
domain=DOMAIN,
options={
"cycle": "monthly",
"delta_values": False,
"name": "Electricity meter",
"net_consumption": False,
"offset": 0,
"source": input_sensor1_entity_id,
"tariffs": "",
},
title="Electricity meter",
)
config_entry.add_to_hass(hass)
assert await hass.config_entries.async_setup(config_entry.entry_id)
await hass.async_block_till_done()
result = await hass.config_entries.options.async_init(config_entry.entry_id)
assert result["type"] == RESULT_TYPE_FORM
assert result["step_id"] == "init"
schema = result["data_schema"].schema
assert get_suggested(schema, "source") == input_sensor1_entity_id
result = await hass.config_entries.options.async_configure(
result["flow_id"],
user_input={"source": input_sensor2_entity_id},
)
assert result["type"] == RESULT_TYPE_CREATE_ENTRY
assert result["data"] == {
"cycle": "monthly",
"delta_values": False,
"name": "Electricity meter",
"net_consumption": False,
"offset": 0,
"source": input_sensor2_entity_id,
"tariffs": "",
}
assert config_entry.data == {}
assert config_entry.options == {
"cycle": "monthly",
"delta_values": False,
"name": "Electricity meter",
"net_consumption": False,
"offset": 0,
"source": input_sensor2_entity_id,
"tariffs": "",
}
assert config_entry.title == "Electricity meter"
# Check config entry is reloaded with new options
await hass.async_block_till_done()
state = hass.states.get("sensor.electricity_meter")
assert state.attributes["source"] == input_sensor2_entity_id
| en | 0.715094 | Test the Utility Meter config flow. Test the config flow. Test tariffs. Get suggested value for key in voluptuous schema. # Wanted key absent from schema Test reconfiguring. # Setup the config entry # Check config entry is reloaded with new options | 2.382162 | 2 |
aws_config_policies/aws_config_all_resource_types.py | panther-labs/panther-cli | 4 | 6630666 | from panther_base_helpers import deep_get
def policy(resource):
return bool(deep_get(resource, "RecordingGroup", "AllSupported"))
| from panther_base_helpers import deep_get
def policy(resource):
return bool(deep_get(resource, "RecordingGroup", "AllSupported"))
| none | 1 | 1.6266 | 2 |
|
model/MTGAT.py | wyu-du/MultiTurnDialogZoo | 145 | 6630667 | <filename>model/MTGAT.py
#!/usr/bin/python3
# Author: GMFTBY
# Time: 2019.9.29
'''
When to talk, control the talk timing
'''
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.nn.init as init
from torch_geometric.nn import GCNConv, GATConv, TopKPooling
from torch_geometric.data import Data, DataLoader # create the graph batch dynamically
from torch_geometric.nn import global_mean_pool as gap, global_max_pool as gmp
import numpy as np
import random
import math
from .layers import *
import ipdb
class Utterance_encoder_mt(nn.Module):
'''
Bidirectional GRU
'''
def __init__(self, input_size, embedding_size,
hidden_size, dropout=0.5, n_layer=1, pretrained=False):
super(Utterance_encoder_mt, self).__init__()
self.embedding_size = embedding_size
self.hidden_size = hidden_size
self.input_size = input_size
self.n_layer = n_layer
self.embed = nn.Embedding(input_size, self.embedding_size)
self.gru = nn.GRU(self.embedding_size, self.hidden_size, num_layers=n_layer,
dropout=dropout, bidirectional=True)
# self.hidden_proj = nn.Linear(n_layer * 2 * self.hidden_size, hidden_size)
# self.bn = nn.BatchNorm1d(num_features=hidden_size)
self.init_weight()
def init_weight(self):
init.xavier_normal_(self.gru.weight_hh_l0)
init.xavier_normal_(self.gru.weight_ih_l0)
self.gru.bias_ih_l0.data.fill_(0.0)
self.gru.bias_hh_l0.data.fill_(0.0)
def forward(self, inpt, lengths, hidden=None):
embedded = self.embed(inpt)
if not hidden:
hidden = torch.randn(self.n_layer * 2, len(lengths), self.hidden_size)
if torch.cuda.is_available():
hidden = hidden.cuda()
embedded = nn.utils.rnn.pack_padded_sequence(embedded, lengths, enforce_sorted=False)
_, hidden = self.gru(embedded, hidden)
hidden = hidden.sum(axis=0)
hidden = torch.tanh(hidden)
# hidden = hidden.permute(1, 0, 2)
# hidden = hidden.reshape(hidden.size(0), -1)
# hidden = self.bn(self.hidden_proj(hidden))
# hidden = torch.tanh(hidden)
return hidden # [batch, hidden]
class GATContext(nn.Module):
'''
GCN Context encoder
It should be noticed that PyG merges all the subgraph in the batch into a big graph
which is a sparse block diagonal adjacency matrices.
Refer: Mini-batches in
https://pytorch-geometric.readthedocs.io/en/latest/notes/introduction.html
Our implementation is the three layers GCN with the position embedding
========== Make sure the inpt_size == output_size ==========
'''
def __init__(self, inpt_size, output_size, user_embed_size,
posemb_size, dropout=0.5, threshold=2, head=5):
# inpt_size: utter_hidden_size + user_embed_size
super(GATContext, self).__init__()
# utter + user_embed + pos_embed
size = inpt_size + user_embed_size + posemb_size
self.threshold = threshold
# GraphConv
self.conv1 = GATConv(size, inpt_size, heads=head,
dropout=dropout)
self.conv2 = GATConv(size, inpt_size, heads=head,
dropout=dropout)
self.conv3 = GATConv(size, inpt_size, heads=head,
dropout=dropout)
self.layer_norm1 = nn.LayerNorm(inpt_size)
self.layer_norm2 = nn.LayerNorm(inpt_size)
self.layer_norm3 = nn.LayerNorm(inpt_size)
self.layer_norm4 = nn.LayerNorm(inpt_size)
self.compress = nn.Linear(head * inpt_size, inpt_size)
# rnn for background
self.rnn = nn.GRU(inpt_size + user_embed_size, inpt_size, bidirectional=True)
self.linear1 = nn.Linear(inpt_size * 2, inpt_size)
self.linear2 = nn.Linear(inpt_size, output_size)
self.drop = nn.Dropout(p=dropout)
# 100 is far bigger than the max turn lengths (cornell and dailydialog datasets)
self.posemb = nn.Embedding(100, posemb_size)
self.init_weight()
def init_weight(self):
init.xavier_normal_(self.rnn.weight_hh_l0)
init.xavier_normal_(self.rnn.weight_ih_l0)
self.rnn.bias_ih_l0.data.fill_(0.0)
self.rnn.bias_hh_l0.data.fill_(0.0)
def create_batch(self, gbatch, utter_hidden):
'''create one graph batch
:param: gbatch [batch_size, ([2, edge_num], [edge_num])]
:param: utter_hidden [turn_len(node), batch, hidden_size]'''
utter_hidden = utter_hidden.permute(1, 0, 2) # [batch, node, hidden_size]
batch_size = len(utter_hidden)
data_list, weights = [], []
for idx, example in enumerate(gbatch):
edge_index, edge_w = example
edge_index = torch.tensor(edge_index, dtype=torch.long)
edge_w = torch.tensor(edge_w, dtype=torch.float)
data_list.append(Data(x=utter_hidden[idx], edge_index=edge_index))
weights.append(edge_w)
# this special loader only have one batch
loader = DataLoader(data_list, batch_size=batch_size)
batch = list(loader)
assert len(batch) == 1
batch = batch[0] # one big graph (mini-batch in PyG)
weights = torch.cat(weights)
return batch, weights
def forward(self, gbatch, utter_hidden, ub):
# utter_hidden: [turn_len, batch, inpt_size]
# ub: [turn_len, batch, user_embed_size]
# BiRNN First, rnn_x: [turn, batch, 2 * inpt_size]
# rnnh: [2, batch, hidden_size]
rnn_x, rnnh = self.rnn(torch.cat([utter_hidden, ub], dim=-1))
rnn_x = torch.tanh(self.linear1(rnn_x)) # [turn, batch, inpt_size]
turn_size = utter_hidden.size(0)
rnnh = torch.tanh(rnnh.sum(axis=0)) # [batch, hidden]
if turn_size <= self.threshold:
return rnn_x, rnnh # [turn, batch, inpt_size]
batch, weights = self.create_batch(gbatch, rnn_x)
x, edge_index, batch = batch.x, batch.edge_index, batch.batch
# cat pos_embed: [node, posemb_size]
batch_size = torch.max(batch).item() + 1
# pos
pos = []
for i in range(batch_size):
pos.append(torch.arange(turn_size, dtype=torch.long))
pos = torch.cat(pos)
ub = ub.reshape(-1, ub.size(-1))
# load to GPU
if torch.cuda.is_available():
x = x.cuda()
edge_index = edge_index.cuda()
batch = batch.cuda()
weights = weights.cuda()
pos = pos.cuda() # [node]
pos = self.posemb(pos) # [node, pos_emb]
# relu -> tanh
# [node, pos_emb + inpt_size + user_embed_size]
x = torch.cat([x, pos, ub], dim=1)
# x1 = F.relu(self.bn1(self.conv1(x, edge_index, edge_weight=weights)))
x1 = torch.tanh(self.conv1(x, edge_index))
x1 = torch.tanh(self.compress(x1))
x1 = self.layer_norm1(x1)
x1_ = torch.cat([x1, pos, ub], dim=1)
# x2 = F.relu(self.bn2(self.conv2(x1_, edge_index, edge_weight=weights)))
x2 = torch.tanh(self.conv2(x1_, edge_index))
x2 = torch.tanh(self.compress(x2))
x2 = self.layer_norm2(x2)
x2_ = torch.cat([x2, pos, ub], dim=1)
# x3 = F.relu(self.bn3(self.conv3(x2_, edge_index, edge_weight=weights)))
x3 = torch.tanh(self.conv3(x2_, edge_index))
x3 = torch.tanh(self.compress(x3))
x3 = self.layer_norm3(x3)
# residual for overcoming over-smoothing, [nodes, inpt_size]
# residual -> dropout -> layernorm
x = x1 + x2 + x3
x = self.drop(torch.tanh(x))
# [nodes/turn_len, output_size]
# take apart to get the mini-batch
x = torch.stack(x.chunk(batch_size, dim=0)).permute(1, 0, 2) # [turn, batch, inpt_size]
# x = torch.cat([rnn_x, x], dim=2) # [turn, batch, inpt_size * 2]
# change it into rnn_x + x -> layernorm
x = self.layer_norm4(rnn_x + x)
x = torch.tanh(self.linear2(x)) # [turn, batch, output_size]
return x, rnnh
class Decoder_mt(nn.Module):
def __init__(self, output_size, embed_size, hidden_size, user_embed_size=10,
n_layer=2, dropout=0.5, pretrained=None):
super(Decoder_mt, self).__init__()
self.output_size = output_size
self.hidden_size = hidden_size
self.embed_size = embed_size
self.n_layer = n_layer
self.embed = nn.Embedding(self.output_size, self.embed_size)
self.gru = nn.GRU(self.embed_size + self.hidden_size,
self.hidden_size, num_layers=n_layer,
dropout=(0 if n_layer == 1 else dropout))
self.out = nn.Linear(hidden_size, output_size)
self.attn = Attention(hidden_size)
self.init_weight()
def init_weight(self):
init.xavier_normal_(self.gru.weight_hh_l0)
init.xavier_normal_(self.gru.weight_ih_l0)
self.gru.bias_ih_l0.data.fill_(0.0)
self.gru.bias_hh_l0.data.fill_(0.0)
def forward(self, inpt, last_hidden, gcncontext):
# inpt: [batch_size], last_hidden: [2, batch, hidden_size]
# last_hidden from the encoder (2-layer-BiGRU)
# gcncontext: [turn_len, batch, hidden_size], user_de: [batch, 11]
embedded = self.embed(inpt).unsqueeze(0) # [1, batch_size, embed_size]
key = last_hidden.sum(axis=0) # [batch, hidden]
# attention on the gcncontext
attn_weights = self.attn(key, gcncontext)
context = attn_weights.bmm(gcncontext.transpose(0, 1))
context = context.transpose(0, 1) # [1, batch, hidden]
rnn_inpt = torch.cat([embedded, context], 2) # [1, batch, embed_size + hidden]
output, hidden = self.gru(rnn_inpt, last_hidden)
output = output.squeeze(0) # [batch, hidden_size]
# context = context.squeeze(0) # [batch, hidden]
# output = torch.cat([output, context], 1) # [batch, hidden * 2]
output = self.out(output) # [batch, output_size]
output = F.log_softmax(output, dim=1)
# [batch, output_size], [1, batch, hidden_size]
return output, hidden
class MTGAT(nn.Module):
'''
When2Talk model
1. utterance encoder
2. GCN context encoder
3. (optional) RNN Context encoder
4. Attention RNN decoder
'''
def __init__(self, input_size, output_size, embed_size, utter_hidden_size,
context_hidden_size, decoder_hidden_size, position_embed_size,
teach_force=0.5, pad=0, sos=0, dropout=0.5, user_embed_size=10,
utter_n_layer=1, bn=False, context_threshold=2, heads=5):
super(MTGAT, self).__init__()
self.teach_force = teach_force
self.output_size = output_size
self.pad, self.sos = pad, sos
self.utter_encoder = Utterance_encoder_mt(input_size, embed_size,
utter_hidden_size,
dropout=dropout,
n_layer=utter_n_layer)
self.gcncontext = GATContext(utter_hidden_size,
context_hidden_size,
user_embed_size,
position_embed_size,
dropout=dropout,
threshold=context_threshold,
head=heads)
self.decoder = Decoder_mt(output_size, embed_size,
decoder_hidden_size,
n_layer=utter_n_layer,
dropout=dropout)
# hidden project
self.hidden_proj = nn.Linear(context_hidden_size + user_embed_size,
decoder_hidden_size)
self.hidden_drop = nn.Dropout(p=dropout)
# user embedding, 10
self.user_embed = nn.Embedding(2, user_embed_size)
def forward(self, src, tgt, gbatch, subatch, tubatch, lengths):
'''
:param: src, [turns, lengths, bastch]
:param: tgt, [lengths, batch]
:param: gbatch, [batch, ([2, num_edges], [num_edges])]
:param: subatch, [turn, batch]
:param: tubatch, [batch]
:param: lengths, [turns, batch]
'''
turn_size, batch_size, maxlen = len(src), tgt.size(1), tgt.size(0)
outputs = torch.zeros(maxlen, batch_size, self.output_size)
if torch.cuda.is_available():
outputs = outputs.cuda()
subatch = self.user_embed(subatch) # [turn, batch, 10]
tubatch = self.user_embed(tubatch) # [batch, 10]
tubatch = tubatch.unsqueeze(0).repeat(2, 1, 1) # [2, batch, 10]
# utterance encoding
turns = []
for i in range(turn_size):
hidden = self.utter_encoder(src[i], lengths[i])
turns.append(hidden)
turns = torch.stack(turns) # [turn_len, batch, utter_hidden]
# GCN Context encoder
# context_output: [turn, batch, output_size], rnnh: [batch, hidden]
context_output, rnnh = self.gcncontext(gbatch, turns, subatch)
# context_output = context_output.permute(1, 0, 2) # [turn, batch, hidden]
ghidden = context_output[-1] # [batch, decoder_hidden]
hidden = torch.stack([rnnh, ghidden]) # [2, batch, hidden]
hidden = torch.cat([hidden, tubatch], 2) # [2, batch, hidden+user_embed]
hidden = self.hidden_drop(torch.tanh(self.hidden_proj(hidden))) # [2, batch, hidden]
# decoding step
# hidden = hidden.unsqueeze(0) # [1, batch, hidden_size]
output = tgt[0, :]
use_teacher = random.random() < self.teach_force
if use_teacher:
for t in range(1, maxlen):
output, hidden = self.decoder(output, hidden, context_output)
outputs[t] = output
output = tgt[t].clone().detach()
else:
for t in range(1, maxlen):
output, hidden = self.decoder(output, hidden, context_output)
outputs[t] = output
output = torch.max(output, 1)[1]
# de: [batch], outputs: [maxlen, batch, output_size]
return outputs
def predict(self, src, gbatch, subatch, tubatch, maxlen, lengths, loss=False):
# similar with the forward function
# src: [turn, maxlen, batch_size], lengths: [turn, batch_size]
# subatch: [turn_len, batch], tubatch: [batch]
# output: [maxlen, batch_size]
with torch.no_grad():
turn_size, batch_size = len(src), src[0].size(1)
outputs = torch.zeros(maxlen, batch_size)
floss = torch.zeros(maxlen, batch_size, self.output_size)
if torch.cuda.is_available():
outputs = outputs.cuda()
floss = floss.cuda()
subatch = self.user_embed(subatch) # [turn, batch, 10]
tubatch = self.user_embed(tubatch) # [batch, 10]
tubatch = tubatch.unsqueeze(0).repeat(2, 1, 1) # [2, batch, 10]
# utterance encoding
turns = []
for i in range(turn_size):
hidden = self.utter_encoder(src[i], lengths[i])
turns.append(hidden)
turns = torch.stack(turns) # [turn, batch, hidden]
# GCN Context encoding
# [batch, turn, hidden]
context_output, rnnh = self.gcncontext(gbatch, turns, subatch)
# context_output = context_output.permute(1, 0, 2) # [turn, batch, hidden]
ghidden = context_output[-1] # [batch, decoder_hidden]
hidden = torch.stack([rnnh, ghidden]) # [2, batch, hidden]
hidden = torch.cat([hidden, tubatch], 2) # [batch, hidden+user_embed]
hidden = self.hidden_drop(torch.tanh(self.hidden_proj(hidden))) # [batch, hidden]
# hidden = hidden.unsqueeze(0) # [1, batch, hidden]
output = torch.zeros(batch_size, dtype=torch.long).fill_(self.sos)
if torch.cuda.is_available():
output = output.cuda()
for i in range(1, maxlen):
output, hidden = self.decoder(output, hidden, context_output)
floss[i] = output
output = output.max(1)[1]
outputs[i] = output
# de: [batch], outputs: [maxlen, batch]
if loss:
return outputs, floss
else:
return outputs
if __name__ == "__main__":
pass
| <filename>model/MTGAT.py
#!/usr/bin/python3
# Author: GMFTBY
# Time: 2019.9.29
'''
When to talk, control the talk timing
'''
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.nn.init as init
from torch_geometric.nn import GCNConv, GATConv, TopKPooling
from torch_geometric.data import Data, DataLoader # create the graph batch dynamically
from torch_geometric.nn import global_mean_pool as gap, global_max_pool as gmp
import numpy as np
import random
import math
from .layers import *
import ipdb
class Utterance_encoder_mt(nn.Module):
'''
Bidirectional GRU
'''
def __init__(self, input_size, embedding_size,
hidden_size, dropout=0.5, n_layer=1, pretrained=False):
super(Utterance_encoder_mt, self).__init__()
self.embedding_size = embedding_size
self.hidden_size = hidden_size
self.input_size = input_size
self.n_layer = n_layer
self.embed = nn.Embedding(input_size, self.embedding_size)
self.gru = nn.GRU(self.embedding_size, self.hidden_size, num_layers=n_layer,
dropout=dropout, bidirectional=True)
# self.hidden_proj = nn.Linear(n_layer * 2 * self.hidden_size, hidden_size)
# self.bn = nn.BatchNorm1d(num_features=hidden_size)
self.init_weight()
def init_weight(self):
init.xavier_normal_(self.gru.weight_hh_l0)
init.xavier_normal_(self.gru.weight_ih_l0)
self.gru.bias_ih_l0.data.fill_(0.0)
self.gru.bias_hh_l0.data.fill_(0.0)
def forward(self, inpt, lengths, hidden=None):
embedded = self.embed(inpt)
if not hidden:
hidden = torch.randn(self.n_layer * 2, len(lengths), self.hidden_size)
if torch.cuda.is_available():
hidden = hidden.cuda()
embedded = nn.utils.rnn.pack_padded_sequence(embedded, lengths, enforce_sorted=False)
_, hidden = self.gru(embedded, hidden)
hidden = hidden.sum(axis=0)
hidden = torch.tanh(hidden)
# hidden = hidden.permute(1, 0, 2)
# hidden = hidden.reshape(hidden.size(0), -1)
# hidden = self.bn(self.hidden_proj(hidden))
# hidden = torch.tanh(hidden)
return hidden # [batch, hidden]
class GATContext(nn.Module):
'''
GCN Context encoder
It should be noticed that PyG merges all the subgraph in the batch into a big graph
which is a sparse block diagonal adjacency matrices.
Refer: Mini-batches in
https://pytorch-geometric.readthedocs.io/en/latest/notes/introduction.html
Our implementation is the three layers GCN with the position embedding
========== Make sure the inpt_size == output_size ==========
'''
def __init__(self, inpt_size, output_size, user_embed_size,
posemb_size, dropout=0.5, threshold=2, head=5):
# inpt_size: utter_hidden_size + user_embed_size
super(GATContext, self).__init__()
# utter + user_embed + pos_embed
size = inpt_size + user_embed_size + posemb_size
self.threshold = threshold
# GraphConv
self.conv1 = GATConv(size, inpt_size, heads=head,
dropout=dropout)
self.conv2 = GATConv(size, inpt_size, heads=head,
dropout=dropout)
self.conv3 = GATConv(size, inpt_size, heads=head,
dropout=dropout)
self.layer_norm1 = nn.LayerNorm(inpt_size)
self.layer_norm2 = nn.LayerNorm(inpt_size)
self.layer_norm3 = nn.LayerNorm(inpt_size)
self.layer_norm4 = nn.LayerNorm(inpt_size)
self.compress = nn.Linear(head * inpt_size, inpt_size)
# rnn for background
self.rnn = nn.GRU(inpt_size + user_embed_size, inpt_size, bidirectional=True)
self.linear1 = nn.Linear(inpt_size * 2, inpt_size)
self.linear2 = nn.Linear(inpt_size, output_size)
self.drop = nn.Dropout(p=dropout)
# 100 is far bigger than the max turn lengths (cornell and dailydialog datasets)
self.posemb = nn.Embedding(100, posemb_size)
self.init_weight()
def init_weight(self):
init.xavier_normal_(self.rnn.weight_hh_l0)
init.xavier_normal_(self.rnn.weight_ih_l0)
self.rnn.bias_ih_l0.data.fill_(0.0)
self.rnn.bias_hh_l0.data.fill_(0.0)
def create_batch(self, gbatch, utter_hidden):
'''create one graph batch
:param: gbatch [batch_size, ([2, edge_num], [edge_num])]
:param: utter_hidden [turn_len(node), batch, hidden_size]'''
utter_hidden = utter_hidden.permute(1, 0, 2) # [batch, node, hidden_size]
batch_size = len(utter_hidden)
data_list, weights = [], []
for idx, example in enumerate(gbatch):
edge_index, edge_w = example
edge_index = torch.tensor(edge_index, dtype=torch.long)
edge_w = torch.tensor(edge_w, dtype=torch.float)
data_list.append(Data(x=utter_hidden[idx], edge_index=edge_index))
weights.append(edge_w)
# this special loader only have one batch
loader = DataLoader(data_list, batch_size=batch_size)
batch = list(loader)
assert len(batch) == 1
batch = batch[0] # one big graph (mini-batch in PyG)
weights = torch.cat(weights)
return batch, weights
def forward(self, gbatch, utter_hidden, ub):
# utter_hidden: [turn_len, batch, inpt_size]
# ub: [turn_len, batch, user_embed_size]
# BiRNN First, rnn_x: [turn, batch, 2 * inpt_size]
# rnnh: [2, batch, hidden_size]
rnn_x, rnnh = self.rnn(torch.cat([utter_hidden, ub], dim=-1))
rnn_x = torch.tanh(self.linear1(rnn_x)) # [turn, batch, inpt_size]
turn_size = utter_hidden.size(0)
rnnh = torch.tanh(rnnh.sum(axis=0)) # [batch, hidden]
if turn_size <= self.threshold:
return rnn_x, rnnh # [turn, batch, inpt_size]
batch, weights = self.create_batch(gbatch, rnn_x)
x, edge_index, batch = batch.x, batch.edge_index, batch.batch
# cat pos_embed: [node, posemb_size]
batch_size = torch.max(batch).item() + 1
# pos
pos = []
for i in range(batch_size):
pos.append(torch.arange(turn_size, dtype=torch.long))
pos = torch.cat(pos)
ub = ub.reshape(-1, ub.size(-1))
# load to GPU
if torch.cuda.is_available():
x = x.cuda()
edge_index = edge_index.cuda()
batch = batch.cuda()
weights = weights.cuda()
pos = pos.cuda() # [node]
pos = self.posemb(pos) # [node, pos_emb]
# relu -> tanh
# [node, pos_emb + inpt_size + user_embed_size]
x = torch.cat([x, pos, ub], dim=1)
# x1 = F.relu(self.bn1(self.conv1(x, edge_index, edge_weight=weights)))
x1 = torch.tanh(self.conv1(x, edge_index))
x1 = torch.tanh(self.compress(x1))
x1 = self.layer_norm1(x1)
x1_ = torch.cat([x1, pos, ub], dim=1)
# x2 = F.relu(self.bn2(self.conv2(x1_, edge_index, edge_weight=weights)))
x2 = torch.tanh(self.conv2(x1_, edge_index))
x2 = torch.tanh(self.compress(x2))
x2 = self.layer_norm2(x2)
x2_ = torch.cat([x2, pos, ub], dim=1)
# x3 = F.relu(self.bn3(self.conv3(x2_, edge_index, edge_weight=weights)))
x3 = torch.tanh(self.conv3(x2_, edge_index))
x3 = torch.tanh(self.compress(x3))
x3 = self.layer_norm3(x3)
# residual for overcoming over-smoothing, [nodes, inpt_size]
# residual -> dropout -> layernorm
x = x1 + x2 + x3
x = self.drop(torch.tanh(x))
# [nodes/turn_len, output_size]
# take apart to get the mini-batch
x = torch.stack(x.chunk(batch_size, dim=0)).permute(1, 0, 2) # [turn, batch, inpt_size]
# x = torch.cat([rnn_x, x], dim=2) # [turn, batch, inpt_size * 2]
# change it into rnn_x + x -> layernorm
x = self.layer_norm4(rnn_x + x)
x = torch.tanh(self.linear2(x)) # [turn, batch, output_size]
return x, rnnh
class Decoder_mt(nn.Module):
def __init__(self, output_size, embed_size, hidden_size, user_embed_size=10,
n_layer=2, dropout=0.5, pretrained=None):
super(Decoder_mt, self).__init__()
self.output_size = output_size
self.hidden_size = hidden_size
self.embed_size = embed_size
self.n_layer = n_layer
self.embed = nn.Embedding(self.output_size, self.embed_size)
self.gru = nn.GRU(self.embed_size + self.hidden_size,
self.hidden_size, num_layers=n_layer,
dropout=(0 if n_layer == 1 else dropout))
self.out = nn.Linear(hidden_size, output_size)
self.attn = Attention(hidden_size)
self.init_weight()
def init_weight(self):
init.xavier_normal_(self.gru.weight_hh_l0)
init.xavier_normal_(self.gru.weight_ih_l0)
self.gru.bias_ih_l0.data.fill_(0.0)
self.gru.bias_hh_l0.data.fill_(0.0)
def forward(self, inpt, last_hidden, gcncontext):
# inpt: [batch_size], last_hidden: [2, batch, hidden_size]
# last_hidden from the encoder (2-layer-BiGRU)
# gcncontext: [turn_len, batch, hidden_size], user_de: [batch, 11]
embedded = self.embed(inpt).unsqueeze(0) # [1, batch_size, embed_size]
key = last_hidden.sum(axis=0) # [batch, hidden]
# attention on the gcncontext
attn_weights = self.attn(key, gcncontext)
context = attn_weights.bmm(gcncontext.transpose(0, 1))
context = context.transpose(0, 1) # [1, batch, hidden]
rnn_inpt = torch.cat([embedded, context], 2) # [1, batch, embed_size + hidden]
output, hidden = self.gru(rnn_inpt, last_hidden)
output = output.squeeze(0) # [batch, hidden_size]
# context = context.squeeze(0) # [batch, hidden]
# output = torch.cat([output, context], 1) # [batch, hidden * 2]
output = self.out(output) # [batch, output_size]
output = F.log_softmax(output, dim=1)
# [batch, output_size], [1, batch, hidden_size]
return output, hidden
class MTGAT(nn.Module):
'''
When2Talk model
1. utterance encoder
2. GCN context encoder
3. (optional) RNN Context encoder
4. Attention RNN decoder
'''
def __init__(self, input_size, output_size, embed_size, utter_hidden_size,
context_hidden_size, decoder_hidden_size, position_embed_size,
teach_force=0.5, pad=0, sos=0, dropout=0.5, user_embed_size=10,
utter_n_layer=1, bn=False, context_threshold=2, heads=5):
super(MTGAT, self).__init__()
self.teach_force = teach_force
self.output_size = output_size
self.pad, self.sos = pad, sos
self.utter_encoder = Utterance_encoder_mt(input_size, embed_size,
utter_hidden_size,
dropout=dropout,
n_layer=utter_n_layer)
self.gcncontext = GATContext(utter_hidden_size,
context_hidden_size,
user_embed_size,
position_embed_size,
dropout=dropout,
threshold=context_threshold,
head=heads)
self.decoder = Decoder_mt(output_size, embed_size,
decoder_hidden_size,
n_layer=utter_n_layer,
dropout=dropout)
# hidden project
self.hidden_proj = nn.Linear(context_hidden_size + user_embed_size,
decoder_hidden_size)
self.hidden_drop = nn.Dropout(p=dropout)
# user embedding, 10
self.user_embed = nn.Embedding(2, user_embed_size)
def forward(self, src, tgt, gbatch, subatch, tubatch, lengths):
'''
:param: src, [turns, lengths, bastch]
:param: tgt, [lengths, batch]
:param: gbatch, [batch, ([2, num_edges], [num_edges])]
:param: subatch, [turn, batch]
:param: tubatch, [batch]
:param: lengths, [turns, batch]
'''
turn_size, batch_size, maxlen = len(src), tgt.size(1), tgt.size(0)
outputs = torch.zeros(maxlen, batch_size, self.output_size)
if torch.cuda.is_available():
outputs = outputs.cuda()
subatch = self.user_embed(subatch) # [turn, batch, 10]
tubatch = self.user_embed(tubatch) # [batch, 10]
tubatch = tubatch.unsqueeze(0).repeat(2, 1, 1) # [2, batch, 10]
# utterance encoding
turns = []
for i in range(turn_size):
hidden = self.utter_encoder(src[i], lengths[i])
turns.append(hidden)
turns = torch.stack(turns) # [turn_len, batch, utter_hidden]
# GCN Context encoder
# context_output: [turn, batch, output_size], rnnh: [batch, hidden]
context_output, rnnh = self.gcncontext(gbatch, turns, subatch)
# context_output = context_output.permute(1, 0, 2) # [turn, batch, hidden]
ghidden = context_output[-1] # [batch, decoder_hidden]
hidden = torch.stack([rnnh, ghidden]) # [2, batch, hidden]
hidden = torch.cat([hidden, tubatch], 2) # [2, batch, hidden+user_embed]
hidden = self.hidden_drop(torch.tanh(self.hidden_proj(hidden))) # [2, batch, hidden]
# decoding step
# hidden = hidden.unsqueeze(0) # [1, batch, hidden_size]
output = tgt[0, :]
use_teacher = random.random() < self.teach_force
if use_teacher:
for t in range(1, maxlen):
output, hidden = self.decoder(output, hidden, context_output)
outputs[t] = output
output = tgt[t].clone().detach()
else:
for t in range(1, maxlen):
output, hidden = self.decoder(output, hidden, context_output)
outputs[t] = output
output = torch.max(output, 1)[1]
# de: [batch], outputs: [maxlen, batch, output_size]
return outputs
def predict(self, src, gbatch, subatch, tubatch, maxlen, lengths, loss=False):
# similar with the forward function
# src: [turn, maxlen, batch_size], lengths: [turn, batch_size]
# subatch: [turn_len, batch], tubatch: [batch]
# output: [maxlen, batch_size]
with torch.no_grad():
turn_size, batch_size = len(src), src[0].size(1)
outputs = torch.zeros(maxlen, batch_size)
floss = torch.zeros(maxlen, batch_size, self.output_size)
if torch.cuda.is_available():
outputs = outputs.cuda()
floss = floss.cuda()
subatch = self.user_embed(subatch) # [turn, batch, 10]
tubatch = self.user_embed(tubatch) # [batch, 10]
tubatch = tubatch.unsqueeze(0).repeat(2, 1, 1) # [2, batch, 10]
# utterance encoding
turns = []
for i in range(turn_size):
hidden = self.utter_encoder(src[i], lengths[i])
turns.append(hidden)
turns = torch.stack(turns) # [turn, batch, hidden]
# GCN Context encoding
# [batch, turn, hidden]
context_output, rnnh = self.gcncontext(gbatch, turns, subatch)
# context_output = context_output.permute(1, 0, 2) # [turn, batch, hidden]
ghidden = context_output[-1] # [batch, decoder_hidden]
hidden = torch.stack([rnnh, ghidden]) # [2, batch, hidden]
hidden = torch.cat([hidden, tubatch], 2) # [batch, hidden+user_embed]
hidden = self.hidden_drop(torch.tanh(self.hidden_proj(hidden))) # [batch, hidden]
# hidden = hidden.unsqueeze(0) # [1, batch, hidden]
output = torch.zeros(batch_size, dtype=torch.long).fill_(self.sos)
if torch.cuda.is_available():
output = output.cuda()
for i in range(1, maxlen):
output, hidden = self.decoder(output, hidden, context_output)
floss[i] = output
output = output.max(1)[1]
outputs[i] = output
# de: [batch], outputs: [maxlen, batch]
if loss:
return outputs, floss
else:
return outputs
if __name__ == "__main__":
pass
| en | 0.581219 | #!/usr/bin/python3 # Author: GMFTBY # Time: 2019.9.29 When to talk, control the talk timing # create the graph batch dynamically Bidirectional GRU # self.hidden_proj = nn.Linear(n_layer * 2 * self.hidden_size, hidden_size) # self.bn = nn.BatchNorm1d(num_features=hidden_size) # hidden = hidden.permute(1, 0, 2) # hidden = hidden.reshape(hidden.size(0), -1) # hidden = self.bn(self.hidden_proj(hidden)) # hidden = torch.tanh(hidden) # [batch, hidden] GCN Context encoder It should be noticed that PyG merges all the subgraph in the batch into a big graph which is a sparse block diagonal adjacency matrices. Refer: Mini-batches in https://pytorch-geometric.readthedocs.io/en/latest/notes/introduction.html Our implementation is the three layers GCN with the position embedding ========== Make sure the inpt_size == output_size ========== # inpt_size: utter_hidden_size + user_embed_size # utter + user_embed + pos_embed # GraphConv # rnn for background # 100 is far bigger than the max turn lengths (cornell and dailydialog datasets) create one graph batch :param: gbatch [batch_size, ([2, edge_num], [edge_num])] :param: utter_hidden [turn_len(node), batch, hidden_size] # [batch, node, hidden_size] # this special loader only have one batch # one big graph (mini-batch in PyG) # utter_hidden: [turn_len, batch, inpt_size] # ub: [turn_len, batch, user_embed_size] # BiRNN First, rnn_x: [turn, batch, 2 * inpt_size] # rnnh: [2, batch, hidden_size] # [turn, batch, inpt_size] # [batch, hidden] # [turn, batch, inpt_size] # cat pos_embed: [node, posemb_size] # pos # load to GPU # [node] # [node, pos_emb] # relu -> tanh # [node, pos_emb + inpt_size + user_embed_size] # x1 = F.relu(self.bn1(self.conv1(x, edge_index, edge_weight=weights))) # x2 = F.relu(self.bn2(self.conv2(x1_, edge_index, edge_weight=weights))) # x3 = F.relu(self.bn3(self.conv3(x2_, edge_index, edge_weight=weights))) # residual for overcoming over-smoothing, [nodes, inpt_size] # residual -> dropout -> layernorm # [nodes/turn_len, output_size] # take apart to get the mini-batch # [turn, batch, inpt_size] # x = torch.cat([rnn_x, x], dim=2) # [turn, batch, inpt_size * 2] # change it into rnn_x + x -> layernorm # [turn, batch, output_size] # inpt: [batch_size], last_hidden: [2, batch, hidden_size] # last_hidden from the encoder (2-layer-BiGRU) # gcncontext: [turn_len, batch, hidden_size], user_de: [batch, 11] # [1, batch_size, embed_size] # [batch, hidden] # attention on the gcncontext # [1, batch, hidden] # [1, batch, embed_size + hidden] # [batch, hidden_size] # context = context.squeeze(0) # [batch, hidden] # output = torch.cat([output, context], 1) # [batch, hidden * 2] # [batch, output_size] # [batch, output_size], [1, batch, hidden_size] When2Talk model 1. utterance encoder 2. GCN context encoder 3. (optional) RNN Context encoder 4. Attention RNN decoder # hidden project # user embedding, 10 :param: src, [turns, lengths, bastch] :param: tgt, [lengths, batch] :param: gbatch, [batch, ([2, num_edges], [num_edges])] :param: subatch, [turn, batch] :param: tubatch, [batch] :param: lengths, [turns, batch] # [turn, batch, 10] # [batch, 10] # [2, batch, 10] # utterance encoding # [turn_len, batch, utter_hidden] # GCN Context encoder # context_output: [turn, batch, output_size], rnnh: [batch, hidden] # context_output = context_output.permute(1, 0, 2) # [turn, batch, hidden] # [batch, decoder_hidden] # [2, batch, hidden] # [2, batch, hidden+user_embed] # [2, batch, hidden] # decoding step # hidden = hidden.unsqueeze(0) # [1, batch, hidden_size] # de: [batch], outputs: [maxlen, batch, output_size] # similar with the forward function # src: [turn, maxlen, batch_size], lengths: [turn, batch_size] # subatch: [turn_len, batch], tubatch: [batch] # output: [maxlen, batch_size] # [turn, batch, 10] # [batch, 10] # [2, batch, 10] # utterance encoding # [turn, batch, hidden] # GCN Context encoding # [batch, turn, hidden] # context_output = context_output.permute(1, 0, 2) # [turn, batch, hidden] # [batch, decoder_hidden] # [2, batch, hidden] # [batch, hidden+user_embed] # [batch, hidden] # hidden = hidden.unsqueeze(0) # [1, batch, hidden] # de: [batch], outputs: [maxlen, batch] | 2.269266 | 2 |
modules/util/reporter_service.py | KTH/aspen | 0 | 6630668 | __author__ = '<EMAIL>'
import logging
import traceback
from modules.util import environment, exceptions, data_defs, error_cache
from modules.util import redis, requests, pipeline_data_utils
def handle_recommendation(pipeline_data, application_name, recommendation_text):
logger = logging.getLogger(__name__)
recommendation_url = environment.get_env(environment.SLACK_RECOMMENDATION_POST_URL)
if recommendation_url:
combined_labels = get_combined_service_labels(pipeline_data)
slack_channels = get_slack_channels(combined_labels)
payload = create_recommedation_object(application_name, recommendation_text, slack_channels)
response = call_with_payload(recommendation_url, payload)
if response:
logger.debug('Response was: "%s"', response)
else:
logger.debug('Slack recommendation integration not enabled, skipping report')
def handle_deployment_success(deployment_json):
logger = logging.getLogger(__name__)
deployment_url = environment.get_env(environment.SLACK_DEPLOYMENT_POST_URL)
if deployment_url:
logger.info(f'{deployment_json["cluster"]}/{deployment_json["applicationName"]} - Reporting successful deployment ')
response = call_with_payload(deployment_url, deployment_json)
if response:
logger.debug('Response was: "%s"', response)
else:
logger.debug('Slack integration not enabled, skipping report')
def handle_deployment_error(error: exceptions.DeploymentError):
logger = logging.getLogger(__name__)
add_here_to_msg = False
if not error.reportable:
logger.debug('Error.reportable is set to False: skipping')
return
cached_error = error_cache.has_cached_error(error)
if cached_error:
if not error_cache.should_be_reported_again(cached_error):
# This error has already been reported
logger.debug('Error has already been reported: skipping')
return
else:
# We are re-reporting an error, make sure someone sees it
add_here_to_msg = True
report_error_to_slack(error, add_here_to_msg)
def report_error_to_slack(error, add_here_to_msg):
logger = logging.getLogger(__name__)
logger.debug('Found new reportable error: reporting to Slack')
combined_labels = get_combined_service_labels(error.pipeline_data)
error_url = environment.get_env(environment.SLACK_ERROR_POST_URL)
if error_url:
error_json = create_error_object(error, combined_labels, add_here_to_msg)
logger.debug('Calling "%s" with "%s"', error_url, error_json)
response = call_with_payload(error_url, error_json)
if response:
logger.debug('Response was: "%s"', response)
error_cache.write_to_error_cache(error)
else:
logger.warning('Found error to report, but not SLACK_ERROR_POST_URL was set')
def handle_fatal_error(error: exceptions.DeploymentError):
logger = logging.getLogger(__name__)
logger.debug('Found new reportable error: reporting to Slack')
error_url = environment.get_env(environment.SLACK_ERROR_POST_URL)
if error_url:
error_json = create_error_object(error, None, False)
logger.debug('Calling "%s" with "%s"', error_url, error_json)
response = call_with_payload(error_url, error_json)
if response:
logger.debug('Response was: "%s"', response)
else:
logger.warning('Found error to report, but not SLACK_ERROR_POST_URL was set')
def call_with_payload(url, payload):
response = requests.send_put(url, json=payload, timeout=5)
return response
def create_recommedation_object(application_name, recommendation_text, slack_channels):
return {
"message": "*{}*: {}".format(application_name, recommendation_text),
"slackChannels": slack_channels
}
def create_error_object(error, combined_labels, add_here_to_msg):
error_json = {
'message': create_error_message(error, add_here_to_msg),
'slackChannels': None,
'stackTrace': None
}
if hasattr(error, 'expected') and error.expected:
error_json['slackChannels'] = get_slack_channels(combined_labels)
else:
error_json['stackTrace'] = traceback.format_exc().rstrip('\n')
return error_json
def create_error_message(error, add_here_to_msg):
step, application, cluster = '', '', ''
if hasattr(error, 'step_name') and error.step_name:
step = error.step_name
if hasattr(error, 'pipeline_data') and error.pipeline_data:
if data_defs.APPLICATION_NAME in error.pipeline_data:
application = error.pipeline_data[data_defs.APPLICATION_NAME]
if data_defs.APPLICATION_CLUSTER in error.pipeline_data:
cluster = error.pipeline_data[data_defs.APPLICATION_CLUSTER]
return format_error_message(cluster, application, step, error, add_here_to_msg)
def format_error_message(cluster, application, step, error, add_here_to_msg):
# Only use backticks for error message if the message itself doesn't already
# have any in it
error_str = str(error).replace('`', "").replace('\'', "").replace('\"', "")
#error_str = f'```{error_str}```'
at_here = '<!here> 'if add_here_to_msg else ''
return (f'{at_here}Error deploying *{cluster}/{application}* in step _{step}_ '
f'\n{error_str}')
def get_combined_service_labels(pipeline_data):
labels = {}
for _, service in pipeline_data_utils.get_parsed_services(pipeline_data):
if 'labels' in service:
for label in service['labels']:
if isinstance(label, str) and '=' in label:
name, value = label.split('=', 1)
value = value.strip('"')
if not name in labels:
labels[name] = {}
if labels[name]:
labels[name] = f'{labels[name]},{value}'
else:
labels[name] = f'{value}'
# labels = {'label1':'value1','value2',...}
return labels
def get_slack_channels(combined_labels):
if 'se.kth.slackChannels' in combined_labels:
return combined_labels['se.kth.slackChannels']
return None
| __author__ = '<EMAIL>'
import logging
import traceback
from modules.util import environment, exceptions, data_defs, error_cache
from modules.util import redis, requests, pipeline_data_utils
def handle_recommendation(pipeline_data, application_name, recommendation_text):
logger = logging.getLogger(__name__)
recommendation_url = environment.get_env(environment.SLACK_RECOMMENDATION_POST_URL)
if recommendation_url:
combined_labels = get_combined_service_labels(pipeline_data)
slack_channels = get_slack_channels(combined_labels)
payload = create_recommedation_object(application_name, recommendation_text, slack_channels)
response = call_with_payload(recommendation_url, payload)
if response:
logger.debug('Response was: "%s"', response)
else:
logger.debug('Slack recommendation integration not enabled, skipping report')
def handle_deployment_success(deployment_json):
logger = logging.getLogger(__name__)
deployment_url = environment.get_env(environment.SLACK_DEPLOYMENT_POST_URL)
if deployment_url:
logger.info(f'{deployment_json["cluster"]}/{deployment_json["applicationName"]} - Reporting successful deployment ')
response = call_with_payload(deployment_url, deployment_json)
if response:
logger.debug('Response was: "%s"', response)
else:
logger.debug('Slack integration not enabled, skipping report')
def handle_deployment_error(error: exceptions.DeploymentError):
logger = logging.getLogger(__name__)
add_here_to_msg = False
if not error.reportable:
logger.debug('Error.reportable is set to False: skipping')
return
cached_error = error_cache.has_cached_error(error)
if cached_error:
if not error_cache.should_be_reported_again(cached_error):
# This error has already been reported
logger.debug('Error has already been reported: skipping')
return
else:
# We are re-reporting an error, make sure someone sees it
add_here_to_msg = True
report_error_to_slack(error, add_here_to_msg)
def report_error_to_slack(error, add_here_to_msg):
logger = logging.getLogger(__name__)
logger.debug('Found new reportable error: reporting to Slack')
combined_labels = get_combined_service_labels(error.pipeline_data)
error_url = environment.get_env(environment.SLACK_ERROR_POST_URL)
if error_url:
error_json = create_error_object(error, combined_labels, add_here_to_msg)
logger.debug('Calling "%s" with "%s"', error_url, error_json)
response = call_with_payload(error_url, error_json)
if response:
logger.debug('Response was: "%s"', response)
error_cache.write_to_error_cache(error)
else:
logger.warning('Found error to report, but not SLACK_ERROR_POST_URL was set')
def handle_fatal_error(error: exceptions.DeploymentError):
logger = logging.getLogger(__name__)
logger.debug('Found new reportable error: reporting to Slack')
error_url = environment.get_env(environment.SLACK_ERROR_POST_URL)
if error_url:
error_json = create_error_object(error, None, False)
logger.debug('Calling "%s" with "%s"', error_url, error_json)
response = call_with_payload(error_url, error_json)
if response:
logger.debug('Response was: "%s"', response)
else:
logger.warning('Found error to report, but not SLACK_ERROR_POST_URL was set')
def call_with_payload(url, payload):
response = requests.send_put(url, json=payload, timeout=5)
return response
def create_recommedation_object(application_name, recommendation_text, slack_channels):
return {
"message": "*{}*: {}".format(application_name, recommendation_text),
"slackChannels": slack_channels
}
def create_error_object(error, combined_labels, add_here_to_msg):
error_json = {
'message': create_error_message(error, add_here_to_msg),
'slackChannels': None,
'stackTrace': None
}
if hasattr(error, 'expected') and error.expected:
error_json['slackChannels'] = get_slack_channels(combined_labels)
else:
error_json['stackTrace'] = traceback.format_exc().rstrip('\n')
return error_json
def create_error_message(error, add_here_to_msg):
step, application, cluster = '', '', ''
if hasattr(error, 'step_name') and error.step_name:
step = error.step_name
if hasattr(error, 'pipeline_data') and error.pipeline_data:
if data_defs.APPLICATION_NAME in error.pipeline_data:
application = error.pipeline_data[data_defs.APPLICATION_NAME]
if data_defs.APPLICATION_CLUSTER in error.pipeline_data:
cluster = error.pipeline_data[data_defs.APPLICATION_CLUSTER]
return format_error_message(cluster, application, step, error, add_here_to_msg)
def format_error_message(cluster, application, step, error, add_here_to_msg):
# Only use backticks for error message if the message itself doesn't already
# have any in it
error_str = str(error).replace('`', "").replace('\'', "").replace('\"', "")
#error_str = f'```{error_str}```'
at_here = '<!here> 'if add_here_to_msg else ''
return (f'{at_here}Error deploying *{cluster}/{application}* in step _{step}_ '
f'\n{error_str}')
def get_combined_service_labels(pipeline_data):
labels = {}
for _, service in pipeline_data_utils.get_parsed_services(pipeline_data):
if 'labels' in service:
for label in service['labels']:
if isinstance(label, str) and '=' in label:
name, value = label.split('=', 1)
value = value.strip('"')
if not name in labels:
labels[name] = {}
if labels[name]:
labels[name] = f'{labels[name]},{value}'
else:
labels[name] = f'{value}'
# labels = {'label1':'value1','value2',...}
return labels
def get_slack_channels(combined_labels):
if 'se.kth.slackChannels' in combined_labels:
return combined_labels['se.kth.slackChannels']
return None
| en | 0.842517 | # This error has already been reported # We are re-reporting an error, make sure someone sees it # Only use backticks for error message if the message itself doesn't already # have any in it #error_str = f'```{error_str}```' # labels = {'label1':'value1','value2',...} | 1.949617 | 2 |
backend/czi_hosted/data_common/matrix_loader.py | danmedani/cellxgene | 1 | 6630669 | <reponame>danmedani/cellxgene<filename>backend/czi_hosted/data_common/matrix_loader.py
from enum import Enum
import threading
import time
from backend.common.utils.data_locator import DataLocator
from backend.common.errors import DatasetAccessError
from contextlib import contextmanager
from http import HTTPStatus
from backend.czi_hosted.data_common.rwlock import RWLock
class MatrixDataCacheItem(object):
"""This class provides access and caching for a dataset. The first time a dataset is accessed, it is
opened and cached. Later accesses use the cached version. It may also be deleted by the
MatrixDataCacheManager to make room for another dataset. While a dataset is actively being used
(during the lifetime of a api request), a reader lock is locked. During that time, the dataset cannot
be removed."""
def __init__(self, loader):
self.loader = loader
self.data_adaptor = None
self.data_lock = RWLock()
def acquire_existing(self):
"""If the data_adaptor exists, take a read lock and return it, else return None"""
self.data_lock.r_acquire()
if self.data_adaptor:
return self.data_adaptor
self.data_lock.r_release()
return None
def acquire_and_open(self, app_config, dataset_config=None):
"""returns the data_adaptor if cached. opens the data_adaptor if not.
In either case, the a reader lock is taken. Must call release when
the data_adaptor is no longer needed"""
self.data_lock.r_acquire()
if self.data_adaptor:
return self.data_adaptor
self.data_lock.r_release()
self.data_lock.w_acquire()
# the data may have been loaded while waiting on the lock
if not self.data_adaptor:
try:
self.loader.pre_load_validation()
self.data_adaptor = self.loader.open(app_config, dataset_config)
except Exception as e:
# necessary to hold the reader lock after an exception, since
# the release will occur when the context exits.
self.data_lock.w_demote()
raise DatasetAccessError(str(e))
# demote the write lock to a read lock.
self.data_lock.w_demote()
return self.data_adaptor
def release(self):
"""Release the reader lock"""
self.data_lock.r_release()
def delete(self):
"""Clear resources used by this dataset"""
with self.data_lock.w_locked():
if self.data_adaptor:
self.data_adaptor.cleanup()
self.data_adaptor = None
def attempt_delete(self):
"""Delete, but only if the write lock can be immediately locked. Return True if the delete happened"""
if self.data_lock.w_acquire_non_blocking():
if self.data_adaptor:
try:
self.data_adaptor.cleanup()
self.data_adaptor = None
except Exception:
# catch all exceptions to ensure the lock is released
pass
self.data_lock.w_release()
return True
else:
return False
class MatrixDataCacheInfo(object):
def __init__(self, cache_item, timestamp):
# The MatrixDataCacheItem in the cache
self.cache_item = cache_item
# The last time the cache_item was accessed
self.last_access = timestamp
# The number of times the cache_item was accessed (used for testing)
self.num_access = 1
class MatrixDataCacheManager(object):
"""A class to manage the cached datasets. This is intended to be used as a context manager
for handling api requests. When the context is created, the data_adator is either loaded or
retrieved from a cache. In either case, the reader lock is taken during this time, and release
when the context ends. This class currently implements a simple least recently used cache,
which can delete a dataset from the cache to make room for a new one.
This is the intended usage pattern:
m = MatrixDataCacheManager(max_cached=..., timelimmit_s = ...)
with m.data_adaptor(location, app_config) as data_adaptor:
# use the data_adaptor for some operation
"""
# FIXME: If the number of active datasets exceeds the max_cached, then each request could
# lead to a dataset being deleted and a new only being opened: the cache will get thrashed.
# In this case, we may need to send back a 503 (Server Unavailable), or some other error message.
# NOTE: If the actual dataset is changed. E.g. a new set of datafiles replaces an existing set,
# then the cache will not react to this, however once the cache time limit is reached, the dataset
# will automatically be refreshed.
def __init__(self, max_cached, timelimit_s=None):
# key is tuple(url_dataroot, location), value is a MatrixDataCacheInfo
self.datasets = {}
# lock to protect the datasets
self.lock = threading.Lock()
# The number of datasets to cache. When max_cached is reached, the least recently used
# cache is replaced with the newly requested one.
# TODO: This is very simple. This can be improved by taking into account how much space is actually
# taken by each dataset, instead of arbitrarily picking a max datasets to cache.
self.max_cached = max_cached
# items are automatically removed from the cache once this time limit is reached
self.timelimit_s = timelimit_s
@contextmanager
def data_adaptor(self, url_dataroot, location, app_config):
# create a loader for to this location if it does not already exist
delete_adaptor = None
data_adaptor = None
cache_item = None
key = (url_dataroot, location)
with self.lock:
self.evict_old_datasets()
info = self.datasets.get(key)
if info is not None:
info.last_access = time.time()
info.num_access += 1
self.datasets[key] = info
data_adaptor = info.cache_item.acquire_existing()
cache_item = info.cache_item
if data_adaptor is None:
while True:
if len(self.datasets) < self.max_cached:
break
items = list(self.datasets.items())
items = sorted(items, key=lambda x: x[1].last_access)
# close the least recently used loader
oldest = items[0]
oldest_cache = oldest[1].cache_item
oldest_key = oldest[0]
del self.datasets[oldest_key]
delete_adaptor = oldest_cache
loader = MatrixDataLoader(location, app_config=app_config)
cache_item = MatrixDataCacheItem(loader)
item = MatrixDataCacheInfo(cache_item, time.time())
self.datasets[key] = item
try:
assert cache_item
if delete_adaptor:
delete_adaptor.delete()
if data_adaptor is None:
dataset_config = app_config.get_dataset_config(url_dataroot)
data_adaptor = cache_item.acquire_and_open(app_config, dataset_config)
yield data_adaptor
except DatasetAccessError:
cache_item.release()
with self.lock:
del self.datasets[key]
cache_item.delete()
cache_item = None
raise
finally:
if cache_item:
cache_item.release()
def evict_old_datasets(self):
# must be called with the lock held
if self.timelimit_s is None:
return
now = time.time()
to_del = []
for key, info in self.datasets.items():
if (now - info.last_access) > self.timelimit_s:
# remove the data_cache when if it has been in the cache too long
to_del.append((key, info))
for key, info in to_del:
# try and get the write_lock for the dataset.
# if this returns false, it means the dataset is being used, and should
# not be removed.
if info.cache_item.attempt_delete():
del self.datasets[key]
class MatrixDataType(Enum):
H5AD = "h5ad"
CXG = "cxg"
UNKNOWN = "unknown"
class MatrixDataLoader(object):
def __init__(self, location, matrix_data_type=None, app_config=None):
""" location can be a string or DataLocator """
region_name = None if app_config is None else app_config.server_config.data_locator__s3__region_name
self.location = DataLocator(location, region_name=region_name)
if not self.location.exists():
raise DatasetAccessError("Dataset does not exist.", HTTPStatus.NOT_FOUND)
# matrix_data_type is an enum value of type MatrixDataType
self.matrix_data_type = matrix_data_type
# matrix_type is a DataAdaptor type, which corresponds to the matrix_data_type
self.matrix_type = None
if matrix_data_type is None:
self.matrix_data_type = self.__matrix_data_type()
if not self.__matrix_data_type_allowed(app_config):
raise DatasetAccessError("Dataset does not have an allowed type.")
if self.matrix_data_type == MatrixDataType.H5AD:
from backend.czi_hosted.data_anndata.anndata_adaptor import AnndataAdaptor
self.matrix_type = AnndataAdaptor
elif self.matrix_data_type == MatrixDataType.CXG:
from backend.czi_hosted.data_cxg.cxg_adaptor import CxgAdaptor
self.matrix_type = CxgAdaptor
def __matrix_data_type(self):
if self.location.path.endswith(".h5ad"):
return MatrixDataType.H5AD
elif ".cxg" in self.location.path:
return MatrixDataType.CXG
else:
return MatrixDataType.UNKNOWN
def __matrix_data_type_allowed(self, app_config):
if self.matrix_data_type == MatrixDataType.UNKNOWN:
return False
if not app_config:
return True
if not app_config.is_multi_dataset():
return True
if len(app_config.server_config.multi_dataset__allowed_matrix_types) == 0:
return True
for val in app_config.server_config.multi_dataset__allowed_matrix_types:
try:
if self.matrix_data_type == MatrixDataType(val):
return True
except ValueError:
# Check case where multi_dataset_allowed_matrix_type does not have a
# valid MatrixDataType value. TODO: Add a feature to check
# the AppConfig for errors on startup
return False
return False
def pre_load_validation(self):
if self.matrix_data_type == MatrixDataType.UNKNOWN:
raise DatasetAccessError("Dataset does not have a recognized type: .h5ad or .cxg")
self.matrix_type.pre_load_validation(self.location)
def file_size(self):
return self.matrix_type.file_size(self.location)
def open(self, app_config, dataset_config=None):
# create and return a DataAdaptor object
return self.matrix_type.open(self.location, app_config, dataset_config)
| from enum import Enum
import threading
import time
from backend.common.utils.data_locator import DataLocator
from backend.common.errors import DatasetAccessError
from contextlib import contextmanager
from http import HTTPStatus
from backend.czi_hosted.data_common.rwlock import RWLock
class MatrixDataCacheItem(object):
"""This class provides access and caching for a dataset. The first time a dataset is accessed, it is
opened and cached. Later accesses use the cached version. It may also be deleted by the
MatrixDataCacheManager to make room for another dataset. While a dataset is actively being used
(during the lifetime of a api request), a reader lock is locked. During that time, the dataset cannot
be removed."""
def __init__(self, loader):
self.loader = loader
self.data_adaptor = None
self.data_lock = RWLock()
def acquire_existing(self):
"""If the data_adaptor exists, take a read lock and return it, else return None"""
self.data_lock.r_acquire()
if self.data_adaptor:
return self.data_adaptor
self.data_lock.r_release()
return None
def acquire_and_open(self, app_config, dataset_config=None):
"""returns the data_adaptor if cached. opens the data_adaptor if not.
In either case, the a reader lock is taken. Must call release when
the data_adaptor is no longer needed"""
self.data_lock.r_acquire()
if self.data_adaptor:
return self.data_adaptor
self.data_lock.r_release()
self.data_lock.w_acquire()
# the data may have been loaded while waiting on the lock
if not self.data_adaptor:
try:
self.loader.pre_load_validation()
self.data_adaptor = self.loader.open(app_config, dataset_config)
except Exception as e:
# necessary to hold the reader lock after an exception, since
# the release will occur when the context exits.
self.data_lock.w_demote()
raise DatasetAccessError(str(e))
# demote the write lock to a read lock.
self.data_lock.w_demote()
return self.data_adaptor
def release(self):
"""Release the reader lock"""
self.data_lock.r_release()
def delete(self):
"""Clear resources used by this dataset"""
with self.data_lock.w_locked():
if self.data_adaptor:
self.data_adaptor.cleanup()
self.data_adaptor = None
def attempt_delete(self):
"""Delete, but only if the write lock can be immediately locked. Return True if the delete happened"""
if self.data_lock.w_acquire_non_blocking():
if self.data_adaptor:
try:
self.data_adaptor.cleanup()
self.data_adaptor = None
except Exception:
# catch all exceptions to ensure the lock is released
pass
self.data_lock.w_release()
return True
else:
return False
class MatrixDataCacheInfo(object):
def __init__(self, cache_item, timestamp):
# The MatrixDataCacheItem in the cache
self.cache_item = cache_item
# The last time the cache_item was accessed
self.last_access = timestamp
# The number of times the cache_item was accessed (used for testing)
self.num_access = 1
class MatrixDataCacheManager(object):
"""A class to manage the cached datasets. This is intended to be used as a context manager
for handling api requests. When the context is created, the data_adator is either loaded or
retrieved from a cache. In either case, the reader lock is taken during this time, and release
when the context ends. This class currently implements a simple least recently used cache,
which can delete a dataset from the cache to make room for a new one.
This is the intended usage pattern:
m = MatrixDataCacheManager(max_cached=..., timelimmit_s = ...)
with m.data_adaptor(location, app_config) as data_adaptor:
# use the data_adaptor for some operation
"""
# FIXME: If the number of active datasets exceeds the max_cached, then each request could
# lead to a dataset being deleted and a new only being opened: the cache will get thrashed.
# In this case, we may need to send back a 503 (Server Unavailable), or some other error message.
# NOTE: If the actual dataset is changed. E.g. a new set of datafiles replaces an existing set,
# then the cache will not react to this, however once the cache time limit is reached, the dataset
# will automatically be refreshed.
def __init__(self, max_cached, timelimit_s=None):
# key is tuple(url_dataroot, location), value is a MatrixDataCacheInfo
self.datasets = {}
# lock to protect the datasets
self.lock = threading.Lock()
# The number of datasets to cache. When max_cached is reached, the least recently used
# cache is replaced with the newly requested one.
# TODO: This is very simple. This can be improved by taking into account how much space is actually
# taken by each dataset, instead of arbitrarily picking a max datasets to cache.
self.max_cached = max_cached
# items are automatically removed from the cache once this time limit is reached
self.timelimit_s = timelimit_s
@contextmanager
def data_adaptor(self, url_dataroot, location, app_config):
# create a loader for to this location if it does not already exist
delete_adaptor = None
data_adaptor = None
cache_item = None
key = (url_dataroot, location)
with self.lock:
self.evict_old_datasets()
info = self.datasets.get(key)
if info is not None:
info.last_access = time.time()
info.num_access += 1
self.datasets[key] = info
data_adaptor = info.cache_item.acquire_existing()
cache_item = info.cache_item
if data_adaptor is None:
while True:
if len(self.datasets) < self.max_cached:
break
items = list(self.datasets.items())
items = sorted(items, key=lambda x: x[1].last_access)
# close the least recently used loader
oldest = items[0]
oldest_cache = oldest[1].cache_item
oldest_key = oldest[0]
del self.datasets[oldest_key]
delete_adaptor = oldest_cache
loader = MatrixDataLoader(location, app_config=app_config)
cache_item = MatrixDataCacheItem(loader)
item = MatrixDataCacheInfo(cache_item, time.time())
self.datasets[key] = item
try:
assert cache_item
if delete_adaptor:
delete_adaptor.delete()
if data_adaptor is None:
dataset_config = app_config.get_dataset_config(url_dataroot)
data_adaptor = cache_item.acquire_and_open(app_config, dataset_config)
yield data_adaptor
except DatasetAccessError:
cache_item.release()
with self.lock:
del self.datasets[key]
cache_item.delete()
cache_item = None
raise
finally:
if cache_item:
cache_item.release()
def evict_old_datasets(self):
# must be called with the lock held
if self.timelimit_s is None:
return
now = time.time()
to_del = []
for key, info in self.datasets.items():
if (now - info.last_access) > self.timelimit_s:
# remove the data_cache when if it has been in the cache too long
to_del.append((key, info))
for key, info in to_del:
# try and get the write_lock for the dataset.
# if this returns false, it means the dataset is being used, and should
# not be removed.
if info.cache_item.attempt_delete():
del self.datasets[key]
class MatrixDataType(Enum):
H5AD = "h5ad"
CXG = "cxg"
UNKNOWN = "unknown"
class MatrixDataLoader(object):
def __init__(self, location, matrix_data_type=None, app_config=None):
""" location can be a string or DataLocator """
region_name = None if app_config is None else app_config.server_config.data_locator__s3__region_name
self.location = DataLocator(location, region_name=region_name)
if not self.location.exists():
raise DatasetAccessError("Dataset does not exist.", HTTPStatus.NOT_FOUND)
# matrix_data_type is an enum value of type MatrixDataType
self.matrix_data_type = matrix_data_type
# matrix_type is a DataAdaptor type, which corresponds to the matrix_data_type
self.matrix_type = None
if matrix_data_type is None:
self.matrix_data_type = self.__matrix_data_type()
if not self.__matrix_data_type_allowed(app_config):
raise DatasetAccessError("Dataset does not have an allowed type.")
if self.matrix_data_type == MatrixDataType.H5AD:
from backend.czi_hosted.data_anndata.anndata_adaptor import AnndataAdaptor
self.matrix_type = AnndataAdaptor
elif self.matrix_data_type == MatrixDataType.CXG:
from backend.czi_hosted.data_cxg.cxg_adaptor import CxgAdaptor
self.matrix_type = CxgAdaptor
def __matrix_data_type(self):
if self.location.path.endswith(".h5ad"):
return MatrixDataType.H5AD
elif ".cxg" in self.location.path:
return MatrixDataType.CXG
else:
return MatrixDataType.UNKNOWN
def __matrix_data_type_allowed(self, app_config):
if self.matrix_data_type == MatrixDataType.UNKNOWN:
return False
if not app_config:
return True
if not app_config.is_multi_dataset():
return True
if len(app_config.server_config.multi_dataset__allowed_matrix_types) == 0:
return True
for val in app_config.server_config.multi_dataset__allowed_matrix_types:
try:
if self.matrix_data_type == MatrixDataType(val):
return True
except ValueError:
# Check case where multi_dataset_allowed_matrix_type does not have a
# valid MatrixDataType value. TODO: Add a feature to check
# the AppConfig for errors on startup
return False
return False
def pre_load_validation(self):
if self.matrix_data_type == MatrixDataType.UNKNOWN:
raise DatasetAccessError("Dataset does not have a recognized type: .h5ad or .cxg")
self.matrix_type.pre_load_validation(self.location)
def file_size(self):
return self.matrix_type.file_size(self.location)
def open(self, app_config, dataset_config=None):
# create and return a DataAdaptor object
return self.matrix_type.open(self.location, app_config, dataset_config) | en | 0.888017 | This class provides access and caching for a dataset. The first time a dataset is accessed, it is opened and cached. Later accesses use the cached version. It may also be deleted by the MatrixDataCacheManager to make room for another dataset. While a dataset is actively being used (during the lifetime of a api request), a reader lock is locked. During that time, the dataset cannot be removed. If the data_adaptor exists, take a read lock and return it, else return None returns the data_adaptor if cached. opens the data_adaptor if not. In either case, the a reader lock is taken. Must call release when the data_adaptor is no longer needed # the data may have been loaded while waiting on the lock # necessary to hold the reader lock after an exception, since # the release will occur when the context exits. # demote the write lock to a read lock. Release the reader lock Clear resources used by this dataset Delete, but only if the write lock can be immediately locked. Return True if the delete happened # catch all exceptions to ensure the lock is released # The MatrixDataCacheItem in the cache # The last time the cache_item was accessed # The number of times the cache_item was accessed (used for testing) A class to manage the cached datasets. This is intended to be used as a context manager for handling api requests. When the context is created, the data_adator is either loaded or retrieved from a cache. In either case, the reader lock is taken during this time, and release when the context ends. This class currently implements a simple least recently used cache, which can delete a dataset from the cache to make room for a new one. This is the intended usage pattern: m = MatrixDataCacheManager(max_cached=..., timelimmit_s = ...) with m.data_adaptor(location, app_config) as data_adaptor: # use the data_adaptor for some operation # FIXME: If the number of active datasets exceeds the max_cached, then each request could # lead to a dataset being deleted and a new only being opened: the cache will get thrashed. # In this case, we may need to send back a 503 (Server Unavailable), or some other error message. # NOTE: If the actual dataset is changed. E.g. a new set of datafiles replaces an existing set, # then the cache will not react to this, however once the cache time limit is reached, the dataset # will automatically be refreshed. # key is tuple(url_dataroot, location), value is a MatrixDataCacheInfo # lock to protect the datasets # The number of datasets to cache. When max_cached is reached, the least recently used # cache is replaced with the newly requested one. # TODO: This is very simple. This can be improved by taking into account how much space is actually # taken by each dataset, instead of arbitrarily picking a max datasets to cache. # items are automatically removed from the cache once this time limit is reached # create a loader for to this location if it does not already exist # close the least recently used loader # must be called with the lock held # remove the data_cache when if it has been in the cache too long # try and get the write_lock for the dataset. # if this returns false, it means the dataset is being used, and should # not be removed. location can be a string or DataLocator # matrix_data_type is an enum value of type MatrixDataType # matrix_type is a DataAdaptor type, which corresponds to the matrix_data_type # Check case where multi_dataset_allowed_matrix_type does not have a # valid MatrixDataType value. TODO: Add a feature to check # the AppConfig for errors on startup # create and return a DataAdaptor object | 2.416244 | 2 |
bcs-ui/backend/tests/components/test_permissions.py | laodiu/bk-bcs | 599 | 6630670 | # -*- coding: utf-8 -*-
"""
Tencent is pleased to support the open source community by making 蓝鲸智云PaaS平台社区版 (BlueKing PaaS Community
Edition) available.
Copyright (C) 2017-2021 THL A29 Limited, a Tencent company. All rights reserved.
Licensed under the MIT License (the "License"); you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://opensource.org/licenses/MIT
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
"""
import pytest
from iam import OP
from backend.iam.legacy_perms import ProjectPermission
test_dict_filter_data = [
({'op': OP.IN, 'value': [2, 1], 'field': 'project.id'}, {'project_id_list': [1, 2], 'op': OP.IN}),
({'op': OP.EQ, 'value': 1, 'field': 'project.id'}, {'project_id_list': [1], 'op': OP.IN}),
({'op': OP.ANY, 'value': [], 'field': 'project.id'}, {'project_id_list': [], 'op': OP.ANY}),
(
{
'op': OP.OR,
'content': [
{'op': OP.IN, 'field': 'project.id', 'value': [2, 1, 5]},
{'op': OP.ANY, 'field': 'project.id', 'value': []},
{'op': OP.EQ, 'field': 'project.id', 'value': 3},
{'op': OP.IN, 'field': 'project.id', 'value': [4]},
],
},
{'project_id_list': [], 'op': OP.ANY},
),
(
{
'op': OP.OR,
'content': [
{'op': OP.IN, 'field': 'project.id', 'value': [2, 1, 5]},
{'op': OP.EQ, 'field': 'project.id', 'value': 3},
{'op': OP.IN, 'field': 'fake_project.id', 'value': [4, 6]},
],
},
{'project_id_list': [1, 2, 3, 5], 'op': OP.IN},
),
]
class TestProjectPermission:
@pytest.mark.parametrize('policies, expected_dict_filter', test_dict_filter_data)
def test_make_dict_filter(self, policies, expected_dict_filter):
project_perm = ProjectPermission()
dict_filter = project_perm._make_dict_filter(policies)
assert dict_filter['project_id_list'].sort() == expected_dict_filter['project_id_list'].sort()
assert dict_filter['op'] == expected_dict_filter['op']
| # -*- coding: utf-8 -*-
"""
Tencent is pleased to support the open source community by making 蓝鲸智云PaaS平台社区版 (BlueKing PaaS Community
Edition) available.
Copyright (C) 2017-2021 THL A29 Limited, a Tencent company. All rights reserved.
Licensed under the MIT License (the "License"); you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://opensource.org/licenses/MIT
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
"""
import pytest
from iam import OP
from backend.iam.legacy_perms import ProjectPermission
test_dict_filter_data = [
({'op': OP.IN, 'value': [2, 1], 'field': 'project.id'}, {'project_id_list': [1, 2], 'op': OP.IN}),
({'op': OP.EQ, 'value': 1, 'field': 'project.id'}, {'project_id_list': [1], 'op': OP.IN}),
({'op': OP.ANY, 'value': [], 'field': 'project.id'}, {'project_id_list': [], 'op': OP.ANY}),
(
{
'op': OP.OR,
'content': [
{'op': OP.IN, 'field': 'project.id', 'value': [2, 1, 5]},
{'op': OP.ANY, 'field': 'project.id', 'value': []},
{'op': OP.EQ, 'field': 'project.id', 'value': 3},
{'op': OP.IN, 'field': 'project.id', 'value': [4]},
],
},
{'project_id_list': [], 'op': OP.ANY},
),
(
{
'op': OP.OR,
'content': [
{'op': OP.IN, 'field': 'project.id', 'value': [2, 1, 5]},
{'op': OP.EQ, 'field': 'project.id', 'value': 3},
{'op': OP.IN, 'field': 'fake_project.id', 'value': [4, 6]},
],
},
{'project_id_list': [1, 2, 3, 5], 'op': OP.IN},
),
]
class TestProjectPermission:
@pytest.mark.parametrize('policies, expected_dict_filter', test_dict_filter_data)
def test_make_dict_filter(self, policies, expected_dict_filter):
project_perm = ProjectPermission()
dict_filter = project_perm._make_dict_filter(policies)
assert dict_filter['project_id_list'].sort() == expected_dict_filter['project_id_list'].sort()
assert dict_filter['op'] == expected_dict_filter['op']
| en | 0.863828 | # -*- coding: utf-8 -*- Tencent is pleased to support the open source community by making 蓝鲸智云PaaS平台社区版 (BlueKing PaaS Community Edition) available. Copyright (C) 2017-2021 THL A29 Limited, a Tencent company. All rights reserved. Licensed under the MIT License (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://opensource.org/licenses/MIT Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. | 1.940662 | 2 |
kms.py | alvarodelvalle/devops-scripts | 0 | 6630671 | import csv
import re
from botocore.exceptions import ClientError
import boto
def get_keys_from_file(file):
"""
Reads a file and creates a list of dictionaries.
:param file: Filename relative to project root.
:return: lkeys - a list of dictionaries
[{
Key: '00484545-2000-4111-9000-611111111111',
Region: 'us-east-1'
},
{
Key: '00484545-2000-4111-9000-622222222222',
Region: 'us-east-1'
}]
"""
lkeys = []
regex = '[0-9A-Za-z]{8}-[0-9A-Za-z]{4}-4[0-9A-Za-z]{3}-[89ABab][0-9A-Za-z]{3}-[0-9A-Za-z]{12}'
with open(file, newline='') as csvfile:
reader = csv.reader(csvfile, delimiter=' ', quotechar='|')
for row in reader:
if len(row) > 3 and re.search(regex, row[2]):
lkeys.append({'Key': row[2], 'Region': row[0]})
return lkeys
class Kms:
def __init__(self):
self.client = boto.Conn('kms', 'client').connection()
def bulk_rotate_keys(self, keys):
"""
Takes a list dictionary and updates keys to rotate every year
:param keys: A list of KMS keys with respective region
:return: None
"""
for k in keys:
region = k.get('Region').replace(':', '')
if self.client.meta.region_name != region:
self.client = boto.Conn('kms', 'client', region).connection()
try:
r = self.client.enable_key_rotation(
KeyId=k.get('Key')
)
print(f'Response: {r}')
except ClientError as e:
print(e)
def main():
cmk_keys = get_keys_from_file('tractmanager-mt2-prod.csv')
k = Kms()
try:
k.bulk_rotate_keys(cmk_keys)
except SystemExit as e:
print(e)
if __name__ == "__main__":
main()
| import csv
import re
from botocore.exceptions import ClientError
import boto
def get_keys_from_file(file):
"""
Reads a file and creates a list of dictionaries.
:param file: Filename relative to project root.
:return: lkeys - a list of dictionaries
[{
Key: '00484545-2000-4111-9000-611111111111',
Region: 'us-east-1'
},
{
Key: '00484545-2000-4111-9000-622222222222',
Region: 'us-east-1'
}]
"""
lkeys = []
regex = '[0-9A-Za-z]{8}-[0-9A-Za-z]{4}-4[0-9A-Za-z]{3}-[89ABab][0-9A-Za-z]{3}-[0-9A-Za-z]{12}'
with open(file, newline='') as csvfile:
reader = csv.reader(csvfile, delimiter=' ', quotechar='|')
for row in reader:
if len(row) > 3 and re.search(regex, row[2]):
lkeys.append({'Key': row[2], 'Region': row[0]})
return lkeys
class Kms:
def __init__(self):
self.client = boto.Conn('kms', 'client').connection()
def bulk_rotate_keys(self, keys):
"""
Takes a list dictionary and updates keys to rotate every year
:param keys: A list of KMS keys with respective region
:return: None
"""
for k in keys:
region = k.get('Region').replace(':', '')
if self.client.meta.region_name != region:
self.client = boto.Conn('kms', 'client', region).connection()
try:
r = self.client.enable_key_rotation(
KeyId=k.get('Key')
)
print(f'Response: {r}')
except ClientError as e:
print(e)
def main():
cmk_keys = get_keys_from_file('tractmanager-mt2-prod.csv')
k = Kms()
try:
k.bulk_rotate_keys(cmk_keys)
except SystemExit as e:
print(e)
if __name__ == "__main__":
main()
| en | 0.613134 | Reads a file and creates a list of dictionaries. :param file: Filename relative to project root. :return: lkeys - a list of dictionaries [{ Key: '00484545-2000-4111-9000-611111111111', Region: 'us-east-1' }, { Key: '00484545-2000-4111-9000-622222222222', Region: 'us-east-1' }] Takes a list dictionary and updates keys to rotate every year :param keys: A list of KMS keys with respective region :return: None | 3.029784 | 3 |
cvxpy/reductions/solvers/constant_solver.py | rpradal/cvxpy | 0 | 6630672 | <reponame>rpradal/cvxpy<gh_stars>0
from cvxpy.reductions.solution import Solution
from cvxpy.reductions.solvers.solver import Solver
import cvxpy.settings as s
class ConstantSolver(Solver):
"""TODO(akshayka): Documentation."""
# Solver capabilities
MIP_CAPABLE = True
def accepts(self, problem) -> bool:
return len(problem.variables()) == 0
def apply(self, problem):
return problem, []
def invert(self, solution, inverse_data):
return solution
def name(self) -> str:
return "CONSTANT_SOLVER"
def import_solver(self) -> None:
return
def is_installed(self) -> bool:
return True
def solve_via_data(self, data, warm_start, verbose, solver_opts, solver_cache=None):
return self.solve(data, warm_start, verbose, solver_opts)
def solve(self, problem, warm_start, verbose, solver_opts):
if all(c.value for c in problem.constraints):
return Solution(s.OPTIMAL, problem.objective.value, {}, {}, {})
else:
return Solution(s.INFEASIBLE, None, {}, {}, {})
| from cvxpy.reductions.solution import Solution
from cvxpy.reductions.solvers.solver import Solver
import cvxpy.settings as s
class ConstantSolver(Solver):
"""TODO(akshayka): Documentation."""
# Solver capabilities
MIP_CAPABLE = True
def accepts(self, problem) -> bool:
return len(problem.variables()) == 0
def apply(self, problem):
return problem, []
def invert(self, solution, inverse_data):
return solution
def name(self) -> str:
return "CONSTANT_SOLVER"
def import_solver(self) -> None:
return
def is_installed(self) -> bool:
return True
def solve_via_data(self, data, warm_start, verbose, solver_opts, solver_cache=None):
return self.solve(data, warm_start, verbose, solver_opts)
def solve(self, problem, warm_start, verbose, solver_opts):
if all(c.value for c in problem.constraints):
return Solution(s.OPTIMAL, problem.objective.value, {}, {}, {})
else:
return Solution(s.INFEASIBLE, None, {}, {}, {}) | en | 0.581284 | TODO(akshayka): Documentation. # Solver capabilities | 2.406295 | 2 |
oppgavesett_1/1_2/python/main.py | Secretmud/School_work | 0 | 6630673 | from math import pi
print("Enter the radius of the circle")
radius = float(input())
area = 2*pi*radius
print("Radius", area)
| from math import pi
print("Enter the radius of the circle")
radius = float(input())
area = 2*pi*radius
print("Radius", area)
| none | 1 | 4.274862 | 4 |
|
whoville/cloudbreak/apis/v1recipes_api.py | mikchaos/whoville | 0 | 6630674 | <gh_stars>0
# coding: utf-8
"""
Cloudbreak API
Cloudbreak is a powerful left surf that breaks over a coral reef, a mile off southwest the island of Tavarua, Fiji. Cloudbreak is a cloud agnostic Hadoop as a Service API. Abstracts the provisioning and ease management and monitoring of on-demand clusters. SequenceIQ's Cloudbreak is a RESTful application development platform with the goal of helping developers to build solutions for deploying Hadoop YARN clusters in different environments. Once it is deployed in your favourite servlet container it exposes a REST API allowing to span up Hadoop clusters of arbitary sizes and cloud providers. Provisioning Hadoop has never been easier. Cloudbreak is built on the foundation of cloud providers API (Amazon AWS, Microsoft Azure, Google Cloud Platform, Openstack), Apache Ambari, Docker lightweight containers, Swarm and Consul. For further product documentation follow the link: <a href=\"http://hortonworks.com/apache/cloudbreak/\">http://hortonworks.com/apache/cloudbreak/</a>
OpenAPI spec version: 2.7.1
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import sys
import os
import re
# python 2 and python 3 compatibility library
from six import iteritems
from ..configuration import Configuration
from ..api_client import ApiClient
class V1recipesApi(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
Ref: https://github.com/swagger-api/swagger-codegen
"""
def __init__(self, api_client=None):
config = Configuration()
if api_client:
self.api_client = api_client
else:
if not config.api_client:
config.api_client = ApiClient()
self.api_client = config.api_client
def delete_private_recipe(self, name, **kwargs):
"""
delete private recipe by name
Recipes are basically script extensions to a cluster that run on a set of nodes before or after the Ambari cluster installation.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.delete_private_recipe(name, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str name: (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.delete_private_recipe_with_http_info(name, **kwargs)
else:
(data) = self.delete_private_recipe_with_http_info(name, **kwargs)
return data
def delete_private_recipe_with_http_info(self, name, **kwargs):
"""
delete private recipe by name
Recipes are basically script extensions to a cluster that run on a set of nodes before or after the Ambari cluster installation.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.delete_private_recipe_with_http_info(name, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str name: (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['name']
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method delete_private_recipe" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'name' is set
if ('name' not in params) or (params['name'] is None):
raise ValueError("Missing the required parameter `name` when calling `delete_private_recipe`")
collection_formats = {}
path_params = {}
if 'name' in params:
path_params['name'] = params['name']
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json'])
# Authentication setting
auth_settings = ['tokenAuth']
return self.api_client.call_api('/v1/recipes/user/{name}', 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None,
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def delete_public_recipe(self, name, **kwargs):
"""
delete public (owned) or private recipe by name
Recipes are basically script extensions to a cluster that run on a set of nodes before or after the Ambari cluster installation.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.delete_public_recipe(name, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str name: (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.delete_public_recipe_with_http_info(name, **kwargs)
else:
(data) = self.delete_public_recipe_with_http_info(name, **kwargs)
return data
def delete_public_recipe_with_http_info(self, name, **kwargs):
"""
delete public (owned) or private recipe by name
Recipes are basically script extensions to a cluster that run on a set of nodes before or after the Ambari cluster installation.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.delete_public_recipe_with_http_info(name, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str name: (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['name']
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method delete_public_recipe" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'name' is set
if ('name' not in params) or (params['name'] is None):
raise ValueError("Missing the required parameter `name` when calling `delete_public_recipe`")
collection_formats = {}
path_params = {}
if 'name' in params:
path_params['name'] = params['name']
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json'])
# Authentication setting
auth_settings = ['tokenAuth']
return self.api_client.call_api('/v1/recipes/account/{name}', 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None,
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def delete_recipe(self, id, **kwargs):
"""
delete recipe by id
Recipes are basically script extensions to a cluster that run on a set of nodes before or after the Ambari cluster installation.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.delete_recipe(id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param int id: (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.delete_recipe_with_http_info(id, **kwargs)
else:
(data) = self.delete_recipe_with_http_info(id, **kwargs)
return data
def delete_recipe_with_http_info(self, id, **kwargs):
"""
delete recipe by id
Recipes are basically script extensions to a cluster that run on a set of nodes before or after the Ambari cluster installation.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.delete_recipe_with_http_info(id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param int id: (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['id']
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method delete_recipe" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'id' is set
if ('id' not in params) or (params['id'] is None):
raise ValueError("Missing the required parameter `id` when calling `delete_recipe`")
collection_formats = {}
path_params = {}
if 'id' in params:
path_params['id'] = params['id']
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json'])
# Authentication setting
auth_settings = ['tokenAuth']
return self.api_client.call_api('/v1/recipes/{id}', 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None,
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_private_recipe(self, name, **kwargs):
"""
retrieve a private recipe by name
Recipes are basically script extensions to a cluster that run on a set of nodes before or after the Ambari cluster installation.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_private_recipe(name, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str name: (required)
:return: RecipeResponse
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.get_private_recipe_with_http_info(name, **kwargs)
else:
(data) = self.get_private_recipe_with_http_info(name, **kwargs)
return data
def get_private_recipe_with_http_info(self, name, **kwargs):
"""
retrieve a private recipe by name
Recipes are basically script extensions to a cluster that run on a set of nodes before or after the Ambari cluster installation.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_private_recipe_with_http_info(name, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str name: (required)
:return: RecipeResponse
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['name']
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_private_recipe" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'name' is set
if ('name' not in params) or (params['name'] is None):
raise ValueError("Missing the required parameter `name` when calling `get_private_recipe`")
collection_formats = {}
path_params = {}
if 'name' in params:
path_params['name'] = params['name']
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json'])
# Authentication setting
auth_settings = ['tokenAuth']
return self.api_client.call_api('/v1/recipes/user/{name}', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='RecipeResponse',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_privates_recipe(self, **kwargs):
"""
retrieve private recipes
Recipes are basically script extensions to a cluster that run on a set of nodes before or after the Ambari cluster installation.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_privates_recipe(callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:return: list[RecipeResponse]
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.get_privates_recipe_with_http_info(**kwargs)
else:
(data) = self.get_privates_recipe_with_http_info(**kwargs)
return data
def get_privates_recipe_with_http_info(self, **kwargs):
"""
retrieve private recipes
Recipes are basically script extensions to a cluster that run on a set of nodes before or after the Ambari cluster installation.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_privates_recipe_with_http_info(callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:return: list[RecipeResponse]
If the method is called asynchronously,
returns the request thread.
"""
all_params = []
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_privates_recipe" % key
)
params[key] = val
del params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json'])
# Authentication setting
auth_settings = ['tokenAuth']
return self.api_client.call_api('/v1/recipes/user', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='list[RecipeResponse]',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_public_recipe(self, name, **kwargs):
"""
retrieve a public or private (owned) recipe by name
Recipes are basically script extensions to a cluster that run on a set of nodes before or after the Ambari cluster installation.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_public_recipe(name, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str name: (required)
:return: RecipeResponse
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.get_public_recipe_with_http_info(name, **kwargs)
else:
(data) = self.get_public_recipe_with_http_info(name, **kwargs)
return data
def get_public_recipe_with_http_info(self, name, **kwargs):
"""
retrieve a public or private (owned) recipe by name
Recipes are basically script extensions to a cluster that run on a set of nodes before or after the Ambari cluster installation.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_public_recipe_with_http_info(name, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str name: (required)
:return: RecipeResponse
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['name']
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_public_recipe" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'name' is set
if ('name' not in params) or (params['name'] is None):
raise ValueError("Missing the required parameter `name` when calling `get_public_recipe`")
collection_formats = {}
path_params = {}
if 'name' in params:
path_params['name'] = params['name']
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json'])
# Authentication setting
auth_settings = ['tokenAuth']
return self.api_client.call_api('/v1/recipes/account/{name}', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='RecipeResponse',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_publics_recipe(self, **kwargs):
"""
retrieve public and private (owned) recipes
Recipes are basically script extensions to a cluster that run on a set of nodes before or after the Ambari cluster installation.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_publics_recipe(callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:return: list[RecipeResponse]
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.get_publics_recipe_with_http_info(**kwargs)
else:
(data) = self.get_publics_recipe_with_http_info(**kwargs)
return data
def get_publics_recipe_with_http_info(self, **kwargs):
"""
retrieve public and private (owned) recipes
Recipes are basically script extensions to a cluster that run on a set of nodes before or after the Ambari cluster installation.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_publics_recipe_with_http_info(callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:return: list[RecipeResponse]
If the method is called asynchronously,
returns the request thread.
"""
all_params = []
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_publics_recipe" % key
)
params[key] = val
del params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json'])
# Authentication setting
auth_settings = ['tokenAuth']
return self.api_client.call_api('/v1/recipes/account', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='list[RecipeResponse]',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_recipe(self, id, **kwargs):
"""
retrieve recipe by id
Recipes are basically script extensions to a cluster that run on a set of nodes before or after the Ambari cluster installation.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_recipe(id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param int id: (required)
:return: RecipeResponse
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.get_recipe_with_http_info(id, **kwargs)
else:
(data) = self.get_recipe_with_http_info(id, **kwargs)
return data
def get_recipe_with_http_info(self, id, **kwargs):
"""
retrieve recipe by id
Recipes are basically script extensions to a cluster that run on a set of nodes before or after the Ambari cluster installation.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_recipe_with_http_info(id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param int id: (required)
:return: RecipeResponse
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['id']
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_recipe" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'id' is set
if ('id' not in params) or (params['id'] is None):
raise ValueError("Missing the required parameter `id` when calling `get_recipe`")
collection_formats = {}
path_params = {}
if 'id' in params:
path_params['id'] = params['id']
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json'])
# Authentication setting
auth_settings = ['tokenAuth']
return self.api_client.call_api('/v1/recipes/{id}', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='RecipeResponse',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_recipe_request_from_name(self, name, **kwargs):
"""
retrieve recipe request by recipe name
Recipes are basically script extensions to a cluster that run on a set of nodes before or after the Ambari cluster installation.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_recipe_request_from_name(name, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str name: (required)
:return: RecipeRequest
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.get_recipe_request_from_name_with_http_info(name, **kwargs)
else:
(data) = self.get_recipe_request_from_name_with_http_info(name, **kwargs)
return data
def get_recipe_request_from_name_with_http_info(self, name, **kwargs):
"""
retrieve recipe request by recipe name
Recipes are basically script extensions to a cluster that run on a set of nodes before or after the Ambari cluster installation.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_recipe_request_from_name_with_http_info(name, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str name: (required)
:return: RecipeRequest
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['name']
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_recipe_request_from_name" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'name' is set
if ('name' not in params) or (params['name'] is None):
raise ValueError("Missing the required parameter `name` when calling `get_recipe_request_from_name`")
collection_formats = {}
path_params = {}
if 'name' in params:
path_params['name'] = params['name']
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json'])
# Authentication setting
auth_settings = ['tokenAuth']
return self.api_client.call_api('/v1/recipes/{name}/request', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='RecipeRequest',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def post_private_recipe(self, **kwargs):
"""
create recipe as private resource
Recipes are basically script extensions to a cluster that run on a set of nodes before or after the Ambari cluster installation.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.post_private_recipe(callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param RecipeRequest body:
:return: RecipeResponse
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.post_private_recipe_with_http_info(**kwargs)
else:
(data) = self.post_private_recipe_with_http_info(**kwargs)
return data
def post_private_recipe_with_http_info(self, **kwargs):
"""
create recipe as private resource
Recipes are basically script extensions to a cluster that run on a set of nodes before or after the Ambari cluster installation.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.post_private_recipe_with_http_info(callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param RecipeRequest body:
:return: RecipeResponse
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['body']
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method post_private_recipe" % key
)
params[key] = val
del params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json'])
# Authentication setting
auth_settings = ['tokenAuth']
return self.api_client.call_api('/v1/recipes/user', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='RecipeResponse',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def post_public_recipe(self, **kwargs):
"""
create recipe as public resource
Recipes are basically script extensions to a cluster that run on a set of nodes before or after the Ambari cluster installation.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.post_public_recipe(callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param RecipeRequest body:
:return: RecipeResponse
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.post_public_recipe_with_http_info(**kwargs)
else:
(data) = self.post_public_recipe_with_http_info(**kwargs)
return data
def post_public_recipe_with_http_info(self, **kwargs):
"""
create recipe as public resource
Recipes are basically script extensions to a cluster that run on a set of nodes before or after the Ambari cluster installation.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.post_public_recipe_with_http_info(callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param RecipeRequest body:
:return: RecipeResponse
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['body']
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method post_public_recipe" % key
)
params[key] = val
del params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json'])
# Authentication setting
auth_settings = ['tokenAuth']
return self.api_client.call_api('/v1/recipes/account', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='RecipeResponse',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
| # coding: utf-8
"""
Cloudbreak API
Cloudbreak is a powerful left surf that breaks over a coral reef, a mile off southwest the island of Tavarua, Fiji. Cloudbreak is a cloud agnostic Hadoop as a Service API. Abstracts the provisioning and ease management and monitoring of on-demand clusters. SequenceIQ's Cloudbreak is a RESTful application development platform with the goal of helping developers to build solutions for deploying Hadoop YARN clusters in different environments. Once it is deployed in your favourite servlet container it exposes a REST API allowing to span up Hadoop clusters of arbitary sizes and cloud providers. Provisioning Hadoop has never been easier. Cloudbreak is built on the foundation of cloud providers API (Amazon AWS, Microsoft Azure, Google Cloud Platform, Openstack), Apache Ambari, Docker lightweight containers, Swarm and Consul. For further product documentation follow the link: <a href=\"http://hortonworks.com/apache/cloudbreak/\">http://hortonworks.com/apache/cloudbreak/</a>
OpenAPI spec version: 2.7.1
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import sys
import os
import re
# python 2 and python 3 compatibility library
from six import iteritems
from ..configuration import Configuration
from ..api_client import ApiClient
class V1recipesApi(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
Ref: https://github.com/swagger-api/swagger-codegen
"""
def __init__(self, api_client=None):
config = Configuration()
if api_client:
self.api_client = api_client
else:
if not config.api_client:
config.api_client = ApiClient()
self.api_client = config.api_client
def delete_private_recipe(self, name, **kwargs):
"""
delete private recipe by name
Recipes are basically script extensions to a cluster that run on a set of nodes before or after the Ambari cluster installation.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.delete_private_recipe(name, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str name: (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.delete_private_recipe_with_http_info(name, **kwargs)
else:
(data) = self.delete_private_recipe_with_http_info(name, **kwargs)
return data
def delete_private_recipe_with_http_info(self, name, **kwargs):
"""
delete private recipe by name
Recipes are basically script extensions to a cluster that run on a set of nodes before or after the Ambari cluster installation.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.delete_private_recipe_with_http_info(name, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str name: (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['name']
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method delete_private_recipe" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'name' is set
if ('name' not in params) or (params['name'] is None):
raise ValueError("Missing the required parameter `name` when calling `delete_private_recipe`")
collection_formats = {}
path_params = {}
if 'name' in params:
path_params['name'] = params['name']
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json'])
# Authentication setting
auth_settings = ['tokenAuth']
return self.api_client.call_api('/v1/recipes/user/{name}', 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None,
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def delete_public_recipe(self, name, **kwargs):
"""
delete public (owned) or private recipe by name
Recipes are basically script extensions to a cluster that run on a set of nodes before or after the Ambari cluster installation.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.delete_public_recipe(name, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str name: (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.delete_public_recipe_with_http_info(name, **kwargs)
else:
(data) = self.delete_public_recipe_with_http_info(name, **kwargs)
return data
def delete_public_recipe_with_http_info(self, name, **kwargs):
"""
delete public (owned) or private recipe by name
Recipes are basically script extensions to a cluster that run on a set of nodes before or after the Ambari cluster installation.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.delete_public_recipe_with_http_info(name, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str name: (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['name']
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method delete_public_recipe" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'name' is set
if ('name' not in params) or (params['name'] is None):
raise ValueError("Missing the required parameter `name` when calling `delete_public_recipe`")
collection_formats = {}
path_params = {}
if 'name' in params:
path_params['name'] = params['name']
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json'])
# Authentication setting
auth_settings = ['tokenAuth']
return self.api_client.call_api('/v1/recipes/account/{name}', 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None,
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def delete_recipe(self, id, **kwargs):
"""
delete recipe by id
Recipes are basically script extensions to a cluster that run on a set of nodes before or after the Ambari cluster installation.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.delete_recipe(id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param int id: (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.delete_recipe_with_http_info(id, **kwargs)
else:
(data) = self.delete_recipe_with_http_info(id, **kwargs)
return data
def delete_recipe_with_http_info(self, id, **kwargs):
"""
delete recipe by id
Recipes are basically script extensions to a cluster that run on a set of nodes before or after the Ambari cluster installation.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.delete_recipe_with_http_info(id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param int id: (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['id']
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method delete_recipe" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'id' is set
if ('id' not in params) or (params['id'] is None):
raise ValueError("Missing the required parameter `id` when calling `delete_recipe`")
collection_formats = {}
path_params = {}
if 'id' in params:
path_params['id'] = params['id']
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json'])
# Authentication setting
auth_settings = ['tokenAuth']
return self.api_client.call_api('/v1/recipes/{id}', 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None,
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_private_recipe(self, name, **kwargs):
"""
retrieve a private recipe by name
Recipes are basically script extensions to a cluster that run on a set of nodes before or after the Ambari cluster installation.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_private_recipe(name, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str name: (required)
:return: RecipeResponse
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.get_private_recipe_with_http_info(name, **kwargs)
else:
(data) = self.get_private_recipe_with_http_info(name, **kwargs)
return data
def get_private_recipe_with_http_info(self, name, **kwargs):
"""
retrieve a private recipe by name
Recipes are basically script extensions to a cluster that run on a set of nodes before or after the Ambari cluster installation.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_private_recipe_with_http_info(name, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str name: (required)
:return: RecipeResponse
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['name']
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_private_recipe" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'name' is set
if ('name' not in params) or (params['name'] is None):
raise ValueError("Missing the required parameter `name` when calling `get_private_recipe`")
collection_formats = {}
path_params = {}
if 'name' in params:
path_params['name'] = params['name']
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json'])
# Authentication setting
auth_settings = ['tokenAuth']
return self.api_client.call_api('/v1/recipes/user/{name}', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='RecipeResponse',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_privates_recipe(self, **kwargs):
"""
retrieve private recipes
Recipes are basically script extensions to a cluster that run on a set of nodes before or after the Ambari cluster installation.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_privates_recipe(callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:return: list[RecipeResponse]
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.get_privates_recipe_with_http_info(**kwargs)
else:
(data) = self.get_privates_recipe_with_http_info(**kwargs)
return data
def get_privates_recipe_with_http_info(self, **kwargs):
"""
retrieve private recipes
Recipes are basically script extensions to a cluster that run on a set of nodes before or after the Ambari cluster installation.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_privates_recipe_with_http_info(callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:return: list[RecipeResponse]
If the method is called asynchronously,
returns the request thread.
"""
all_params = []
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_privates_recipe" % key
)
params[key] = val
del params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json'])
# Authentication setting
auth_settings = ['tokenAuth']
return self.api_client.call_api('/v1/recipes/user', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='list[RecipeResponse]',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_public_recipe(self, name, **kwargs):
"""
retrieve a public or private (owned) recipe by name
Recipes are basically script extensions to a cluster that run on a set of nodes before or after the Ambari cluster installation.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_public_recipe(name, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str name: (required)
:return: RecipeResponse
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.get_public_recipe_with_http_info(name, **kwargs)
else:
(data) = self.get_public_recipe_with_http_info(name, **kwargs)
return data
def get_public_recipe_with_http_info(self, name, **kwargs):
"""
retrieve a public or private (owned) recipe by name
Recipes are basically script extensions to a cluster that run on a set of nodes before or after the Ambari cluster installation.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_public_recipe_with_http_info(name, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str name: (required)
:return: RecipeResponse
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['name']
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_public_recipe" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'name' is set
if ('name' not in params) or (params['name'] is None):
raise ValueError("Missing the required parameter `name` when calling `get_public_recipe`")
collection_formats = {}
path_params = {}
if 'name' in params:
path_params['name'] = params['name']
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json'])
# Authentication setting
auth_settings = ['tokenAuth']
return self.api_client.call_api('/v1/recipes/account/{name}', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='RecipeResponse',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_publics_recipe(self, **kwargs):
"""
retrieve public and private (owned) recipes
Recipes are basically script extensions to a cluster that run on a set of nodes before or after the Ambari cluster installation.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_publics_recipe(callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:return: list[RecipeResponse]
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.get_publics_recipe_with_http_info(**kwargs)
else:
(data) = self.get_publics_recipe_with_http_info(**kwargs)
return data
def get_publics_recipe_with_http_info(self, **kwargs):
"""
retrieve public and private (owned) recipes
Recipes are basically script extensions to a cluster that run on a set of nodes before or after the Ambari cluster installation.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_publics_recipe_with_http_info(callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:return: list[RecipeResponse]
If the method is called asynchronously,
returns the request thread.
"""
all_params = []
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_publics_recipe" % key
)
params[key] = val
del params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json'])
# Authentication setting
auth_settings = ['tokenAuth']
return self.api_client.call_api('/v1/recipes/account', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='list[RecipeResponse]',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_recipe(self, id, **kwargs):
"""
retrieve recipe by id
Recipes are basically script extensions to a cluster that run on a set of nodes before or after the Ambari cluster installation.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_recipe(id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param int id: (required)
:return: RecipeResponse
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.get_recipe_with_http_info(id, **kwargs)
else:
(data) = self.get_recipe_with_http_info(id, **kwargs)
return data
def get_recipe_with_http_info(self, id, **kwargs):
"""
retrieve recipe by id
Recipes are basically script extensions to a cluster that run on a set of nodes before or after the Ambari cluster installation.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_recipe_with_http_info(id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param int id: (required)
:return: RecipeResponse
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['id']
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_recipe" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'id' is set
if ('id' not in params) or (params['id'] is None):
raise ValueError("Missing the required parameter `id` when calling `get_recipe`")
collection_formats = {}
path_params = {}
if 'id' in params:
path_params['id'] = params['id']
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json'])
# Authentication setting
auth_settings = ['tokenAuth']
return self.api_client.call_api('/v1/recipes/{id}', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='RecipeResponse',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_recipe_request_from_name(self, name, **kwargs):
"""
retrieve recipe request by recipe name
Recipes are basically script extensions to a cluster that run on a set of nodes before or after the Ambari cluster installation.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_recipe_request_from_name(name, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str name: (required)
:return: RecipeRequest
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.get_recipe_request_from_name_with_http_info(name, **kwargs)
else:
(data) = self.get_recipe_request_from_name_with_http_info(name, **kwargs)
return data
def get_recipe_request_from_name_with_http_info(self, name, **kwargs):
"""
retrieve recipe request by recipe name
Recipes are basically script extensions to a cluster that run on a set of nodes before or after the Ambari cluster installation.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_recipe_request_from_name_with_http_info(name, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str name: (required)
:return: RecipeRequest
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['name']
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_recipe_request_from_name" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'name' is set
if ('name' not in params) or (params['name'] is None):
raise ValueError("Missing the required parameter `name` when calling `get_recipe_request_from_name`")
collection_formats = {}
path_params = {}
if 'name' in params:
path_params['name'] = params['name']
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json'])
# Authentication setting
auth_settings = ['tokenAuth']
return self.api_client.call_api('/v1/recipes/{name}/request', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='RecipeRequest',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def post_private_recipe(self, **kwargs):
"""
create recipe as private resource
Recipes are basically script extensions to a cluster that run on a set of nodes before or after the Ambari cluster installation.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.post_private_recipe(callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param RecipeRequest body:
:return: RecipeResponse
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.post_private_recipe_with_http_info(**kwargs)
else:
(data) = self.post_private_recipe_with_http_info(**kwargs)
return data
def post_private_recipe_with_http_info(self, **kwargs):
"""
create recipe as private resource
Recipes are basically script extensions to a cluster that run on a set of nodes before or after the Ambari cluster installation.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.post_private_recipe_with_http_info(callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param RecipeRequest body:
:return: RecipeResponse
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['body']
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method post_private_recipe" % key
)
params[key] = val
del params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json'])
# Authentication setting
auth_settings = ['tokenAuth']
return self.api_client.call_api('/v1/recipes/user', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='RecipeResponse',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def post_public_recipe(self, **kwargs):
"""
create recipe as public resource
Recipes are basically script extensions to a cluster that run on a set of nodes before or after the Ambari cluster installation.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.post_public_recipe(callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param RecipeRequest body:
:return: RecipeResponse
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.post_public_recipe_with_http_info(**kwargs)
else:
(data) = self.post_public_recipe_with_http_info(**kwargs)
return data
def post_public_recipe_with_http_info(self, **kwargs):
"""
create recipe as public resource
Recipes are basically script extensions to a cluster that run on a set of nodes before or after the Ambari cluster installation.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.post_public_recipe_with_http_info(callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param RecipeRequest body:
:return: RecipeResponse
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['body']
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method post_public_recipe" % key
)
params[key] = val
del params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json'])
# Authentication setting
auth_settings = ['tokenAuth']
return self.api_client.call_api('/v1/recipes/account', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='RecipeResponse',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats) | en | 0.758339 | # coding: utf-8 Cloudbreak API Cloudbreak is a powerful left surf that breaks over a coral reef, a mile off southwest the island of Tavarua, Fiji. Cloudbreak is a cloud agnostic Hadoop as a Service API. Abstracts the provisioning and ease management and monitoring of on-demand clusters. SequenceIQ's Cloudbreak is a RESTful application development platform with the goal of helping developers to build solutions for deploying Hadoop YARN clusters in different environments. Once it is deployed in your favourite servlet container it exposes a REST API allowing to span up Hadoop clusters of arbitary sizes and cloud providers. Provisioning Hadoop has never been easier. Cloudbreak is built on the foundation of cloud providers API (Amazon AWS, Microsoft Azure, Google Cloud Platform, Openstack), Apache Ambari, Docker lightweight containers, Swarm and Consul. For further product documentation follow the link: <a href=\"http://hortonworks.com/apache/cloudbreak/\">http://hortonworks.com/apache/cloudbreak/</a> OpenAPI spec version: 2.7.1 Generated by: https://github.com/swagger-api/swagger-codegen.git # python 2 and python 3 compatibility library NOTE: This class is auto generated by the swagger code generator program. Do not edit the class manually. Ref: https://github.com/swagger-api/swagger-codegen delete private recipe by name Recipes are basically script extensions to a cluster that run on a set of nodes before or after the Ambari cluster installation. This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please define a `callback` function to be invoked when receiving the response. >>> def callback_function(response): >>> pprint(response) >>> >>> thread = api.delete_private_recipe(name, callback=callback_function) :param callback function: The callback function for asynchronous request. (optional) :param str name: (required) :return: None If the method is called asynchronously, returns the request thread. delete private recipe by name Recipes are basically script extensions to a cluster that run on a set of nodes before or after the Ambari cluster installation. This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please define a `callback` function to be invoked when receiving the response. >>> def callback_function(response): >>> pprint(response) >>> >>> thread = api.delete_private_recipe_with_http_info(name, callback=callback_function) :param callback function: The callback function for asynchronous request. (optional) :param str name: (required) :return: None If the method is called asynchronously, returns the request thread. # verify the required parameter 'name' is set # HTTP header `Accept` # HTTP header `Content-Type` # Authentication setting delete public (owned) or private recipe by name Recipes are basically script extensions to a cluster that run on a set of nodes before or after the Ambari cluster installation. This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please define a `callback` function to be invoked when receiving the response. >>> def callback_function(response): >>> pprint(response) >>> >>> thread = api.delete_public_recipe(name, callback=callback_function) :param callback function: The callback function for asynchronous request. (optional) :param str name: (required) :return: None If the method is called asynchronously, returns the request thread. delete public (owned) or private recipe by name Recipes are basically script extensions to a cluster that run on a set of nodes before or after the Ambari cluster installation. This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please define a `callback` function to be invoked when receiving the response. >>> def callback_function(response): >>> pprint(response) >>> >>> thread = api.delete_public_recipe_with_http_info(name, callback=callback_function) :param callback function: The callback function for asynchronous request. (optional) :param str name: (required) :return: None If the method is called asynchronously, returns the request thread. # verify the required parameter 'name' is set # HTTP header `Accept` # HTTP header `Content-Type` # Authentication setting delete recipe by id Recipes are basically script extensions to a cluster that run on a set of nodes before or after the Ambari cluster installation. This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please define a `callback` function to be invoked when receiving the response. >>> def callback_function(response): >>> pprint(response) >>> >>> thread = api.delete_recipe(id, callback=callback_function) :param callback function: The callback function for asynchronous request. (optional) :param int id: (required) :return: None If the method is called asynchronously, returns the request thread. delete recipe by id Recipes are basically script extensions to a cluster that run on a set of nodes before or after the Ambari cluster installation. This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please define a `callback` function to be invoked when receiving the response. >>> def callback_function(response): >>> pprint(response) >>> >>> thread = api.delete_recipe_with_http_info(id, callback=callback_function) :param callback function: The callback function for asynchronous request. (optional) :param int id: (required) :return: None If the method is called asynchronously, returns the request thread. # verify the required parameter 'id' is set # HTTP header `Accept` # HTTP header `Content-Type` # Authentication setting retrieve a private recipe by name Recipes are basically script extensions to a cluster that run on a set of nodes before or after the Ambari cluster installation. This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please define a `callback` function to be invoked when receiving the response. >>> def callback_function(response): >>> pprint(response) >>> >>> thread = api.get_private_recipe(name, callback=callback_function) :param callback function: The callback function for asynchronous request. (optional) :param str name: (required) :return: RecipeResponse If the method is called asynchronously, returns the request thread. retrieve a private recipe by name Recipes are basically script extensions to a cluster that run on a set of nodes before or after the Ambari cluster installation. This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please define a `callback` function to be invoked when receiving the response. >>> def callback_function(response): >>> pprint(response) >>> >>> thread = api.get_private_recipe_with_http_info(name, callback=callback_function) :param callback function: The callback function for asynchronous request. (optional) :param str name: (required) :return: RecipeResponse If the method is called asynchronously, returns the request thread. # verify the required parameter 'name' is set # HTTP header `Accept` # HTTP header `Content-Type` # Authentication setting retrieve private recipes Recipes are basically script extensions to a cluster that run on a set of nodes before or after the Ambari cluster installation. This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please define a `callback` function to be invoked when receiving the response. >>> def callback_function(response): >>> pprint(response) >>> >>> thread = api.get_privates_recipe(callback=callback_function) :param callback function: The callback function for asynchronous request. (optional) :return: list[RecipeResponse] If the method is called asynchronously, returns the request thread. retrieve private recipes Recipes are basically script extensions to a cluster that run on a set of nodes before or after the Ambari cluster installation. This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please define a `callback` function to be invoked when receiving the response. >>> def callback_function(response): >>> pprint(response) >>> >>> thread = api.get_privates_recipe_with_http_info(callback=callback_function) :param callback function: The callback function for asynchronous request. (optional) :return: list[RecipeResponse] If the method is called asynchronously, returns the request thread. # HTTP header `Accept` # HTTP header `Content-Type` # Authentication setting retrieve a public or private (owned) recipe by name Recipes are basically script extensions to a cluster that run on a set of nodes before or after the Ambari cluster installation. This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please define a `callback` function to be invoked when receiving the response. >>> def callback_function(response): >>> pprint(response) >>> >>> thread = api.get_public_recipe(name, callback=callback_function) :param callback function: The callback function for asynchronous request. (optional) :param str name: (required) :return: RecipeResponse If the method is called asynchronously, returns the request thread. retrieve a public or private (owned) recipe by name Recipes are basically script extensions to a cluster that run on a set of nodes before or after the Ambari cluster installation. This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please define a `callback` function to be invoked when receiving the response. >>> def callback_function(response): >>> pprint(response) >>> >>> thread = api.get_public_recipe_with_http_info(name, callback=callback_function) :param callback function: The callback function for asynchronous request. (optional) :param str name: (required) :return: RecipeResponse If the method is called asynchronously, returns the request thread. # verify the required parameter 'name' is set # HTTP header `Accept` # HTTP header `Content-Type` # Authentication setting retrieve public and private (owned) recipes Recipes are basically script extensions to a cluster that run on a set of nodes before or after the Ambari cluster installation. This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please define a `callback` function to be invoked when receiving the response. >>> def callback_function(response): >>> pprint(response) >>> >>> thread = api.get_publics_recipe(callback=callback_function) :param callback function: The callback function for asynchronous request. (optional) :return: list[RecipeResponse] If the method is called asynchronously, returns the request thread. retrieve public and private (owned) recipes Recipes are basically script extensions to a cluster that run on a set of nodes before or after the Ambari cluster installation. This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please define a `callback` function to be invoked when receiving the response. >>> def callback_function(response): >>> pprint(response) >>> >>> thread = api.get_publics_recipe_with_http_info(callback=callback_function) :param callback function: The callback function for asynchronous request. (optional) :return: list[RecipeResponse] If the method is called asynchronously, returns the request thread. # HTTP header `Accept` # HTTP header `Content-Type` # Authentication setting retrieve recipe by id Recipes are basically script extensions to a cluster that run on a set of nodes before or after the Ambari cluster installation. This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please define a `callback` function to be invoked when receiving the response. >>> def callback_function(response): >>> pprint(response) >>> >>> thread = api.get_recipe(id, callback=callback_function) :param callback function: The callback function for asynchronous request. (optional) :param int id: (required) :return: RecipeResponse If the method is called asynchronously, returns the request thread. retrieve recipe by id Recipes are basically script extensions to a cluster that run on a set of nodes before or after the Ambari cluster installation. This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please define a `callback` function to be invoked when receiving the response. >>> def callback_function(response): >>> pprint(response) >>> >>> thread = api.get_recipe_with_http_info(id, callback=callback_function) :param callback function: The callback function for asynchronous request. (optional) :param int id: (required) :return: RecipeResponse If the method is called asynchronously, returns the request thread. # verify the required parameter 'id' is set # HTTP header `Accept` # HTTP header `Content-Type` # Authentication setting retrieve recipe request by recipe name Recipes are basically script extensions to a cluster that run on a set of nodes before or after the Ambari cluster installation. This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please define a `callback` function to be invoked when receiving the response. >>> def callback_function(response): >>> pprint(response) >>> >>> thread = api.get_recipe_request_from_name(name, callback=callback_function) :param callback function: The callback function for asynchronous request. (optional) :param str name: (required) :return: RecipeRequest If the method is called asynchronously, returns the request thread. retrieve recipe request by recipe name Recipes are basically script extensions to a cluster that run on a set of nodes before or after the Ambari cluster installation. This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please define a `callback` function to be invoked when receiving the response. >>> def callback_function(response): >>> pprint(response) >>> >>> thread = api.get_recipe_request_from_name_with_http_info(name, callback=callback_function) :param callback function: The callback function for asynchronous request. (optional) :param str name: (required) :return: RecipeRequest If the method is called asynchronously, returns the request thread. # verify the required parameter 'name' is set # HTTP header `Accept` # HTTP header `Content-Type` # Authentication setting create recipe as private resource Recipes are basically script extensions to a cluster that run on a set of nodes before or after the Ambari cluster installation. This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please define a `callback` function to be invoked when receiving the response. >>> def callback_function(response): >>> pprint(response) >>> >>> thread = api.post_private_recipe(callback=callback_function) :param callback function: The callback function for asynchronous request. (optional) :param RecipeRequest body: :return: RecipeResponse If the method is called asynchronously, returns the request thread. create recipe as private resource Recipes are basically script extensions to a cluster that run on a set of nodes before or after the Ambari cluster installation. This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please define a `callback` function to be invoked when receiving the response. >>> def callback_function(response): >>> pprint(response) >>> >>> thread = api.post_private_recipe_with_http_info(callback=callback_function) :param callback function: The callback function for asynchronous request. (optional) :param RecipeRequest body: :return: RecipeResponse If the method is called asynchronously, returns the request thread. # HTTP header `Accept` # HTTP header `Content-Type` # Authentication setting create recipe as public resource Recipes are basically script extensions to a cluster that run on a set of nodes before or after the Ambari cluster installation. This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please define a `callback` function to be invoked when receiving the response. >>> def callback_function(response): >>> pprint(response) >>> >>> thread = api.post_public_recipe(callback=callback_function) :param callback function: The callback function for asynchronous request. (optional) :param RecipeRequest body: :return: RecipeResponse If the method is called asynchronously, returns the request thread. create recipe as public resource Recipes are basically script extensions to a cluster that run on a set of nodes before or after the Ambari cluster installation. This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please define a `callback` function to be invoked when receiving the response. >>> def callback_function(response): >>> pprint(response) >>> >>> thread = api.post_public_recipe_with_http_info(callback=callback_function) :param callback function: The callback function for asynchronous request. (optional) :param RecipeRequest body: :return: RecipeResponse If the method is called asynchronously, returns the request thread. # HTTP header `Accept` # HTTP header `Content-Type` # Authentication setting | 1.569552 | 2 |
sysDump.py | fallen-geko/System-Info-Dump | 0 | 6630675 | <gh_stars>0
# Written by <NAME>
# References: got some help from:
# - https://www.thepythoncode.com/article/get-hardware-system-information-python
# - https://www.programcreek.com/python/example/53873/psutil.boot_time
# - https://docs.python.org/3/library/time.html
import psutil
import platform
import csv
import os
import time
dtime = lambda s: time.strftime("%a, %d %b %Y %H:%M:%S", s)
def getSize(bytes, suffix="B"):
factor = 1024
for unit in ["", "K", "M", "G", "T", "P"]:
if bytes < factor:
return f"{bytes:.2f}{unit}{suffix}"
bytes /= factor
def getSysInfo():
uname = platform.uname()
stab = {}
stab["system"]=uname.system
stab["release"]=uname.release
stab["version"]=uname.version
stab["nodeName"]=uname.node
stab["machine"]=uname.machine
stab["processor"]=uname.processor
return stab
def getDiskInfo():
dinf = {"partitions":{}}
#Partitions
partitions = psutil.disk_partitions()
for partition in partitions:
dname = partition.device
dinf["partitions"][dname]={}
dinf["partitions"][dname]["mountPoint"]=partition.mountpoint
dinf["partitions"][dname]["fileSystemType"]=partition.fstype
try:
partition_usage = psutil.disk_usage(partition.mountpoint)
except PermissionError:
# Partition cant be accessed
dinf["partitions"][dname]["totalSize"]="Unknown"
dinf["partitions"][dname]["used"]="Unknown"
dinf["partitions"][dname]["free"]="Unknown"
dinf["partitions"][dname]["percentageUsed"]="Unknown"
else:
dinf["partitions"][dname]["totalSize"]=getSize(partition_usage.total)
dinf["partitions"][dname]["used"]=getSize(partition_usage.used)
dinf["partitions"][dname]["free"]=getSize(partition_usage.free)
dinf["partitions"][dname]["percentageUsed"]=partition_usage.percent
# IO stats since boot
disk_io = psutil.disk_io_counters()
dinf["totalIORead"]=getSize(disk_io.read_bytes)
dinf["totalIOWrite"]=getSize(disk_io.write_bytes)
return dinf
def getNetworkInfo():
# Get all network interfaces
netty={"interfaces":{}}
if_addrs = psutil.net_if_addrs()
for interface_name, interface_addresses in if_addrs.items():
for address in interface_addresses:
netty["interfaces"][interface_name]={}
sf=str(address.family)
if sf.find('AddressFamily.AF_INET')!=-1:
netty["interfaces"][interface_name]["IPAddress"]=address.address
netty["interfaces"][interface_name]["netMask"]=address.netmask
netty["interfaces"][interface_name]["broadcastIP"]=address.broadcast
elif sf.find('AddressFamily.AF_PACKET')!=-1:
netty["interfaces"][interface_name]["MACAddress"]=address.address
netty["interfaces"][interface_name]["netMask"]=address.netmask
netty["interfaces"][interface_name]["broadcastMac"]=address.broadcast
# IO stats since boot
net_io = psutil.net_io_counters()
netty["totalBytesSent"]=getSize(net_io.bytes_sent)
netty["totalBytesRecieved"]=getSize(net_io.bytes_recv)
return netty
bootTime=psutil.boot_time()
otime=time.localtime(bootTime)
tta=dtime(otime)
def dumpToCSV(filename,dsys=True,ddisk=True,dnet=True):
sys = getSysInfo()
disk=getDiskInfo()
nets = getNetworkInfo()
try:
with open(filename+'.csv', 'w', newline="") as csvfile:
filewriter = csv.writer(csvfile, quoting=csv.QUOTE_ALL)
if dsys == True:
filewriter.writerow(["SYSTEM INFORMATION"])
filewriter.writerow(['System', sys["system"]])
filewriter.writerow(['Release', sys["release"]])
filewriter.writerow(['Version', sys["version"]])
filewriter.writerow(['Node name', sys["nodeName"]])
filewriter.writerow(['Machine', sys["machine"]])
filewriter.writerow(['Processor', sys["processor"]])
filewriter.writerow(['Boot Time', tta])
filewriter.writerow([''])
if ddisk == True:
filewriter.writerow(["DISK INFORMATION"])
filewriter.writerow(['Read operations since boot', disk["totalIORead"]])
filewriter.writerow(['Write operations since boot', disk["totalIOWrite"]])
filewriter.writerow(["Partitions"])
filewriter.writerow(["Partition","Mount Point","File System Type","Total Size","Used","Free","Percentage Used"])
dpart=disk["partitions"]
for p in dpart:
dp=dpart[p]
tt=["","","",""]
try:
tt[0] = dp["totalSize"]
tt[1] = dp["used"]
tt[2] = dp["free"]
tt[3] = dp["percentageUsed"]
except KeyError:
tt=["Unavailable","Unavailable","Unavailable","Unavailable"]
filewriter.writerow([p,dp["mountPoint"],dp["fileSystemType"],tt[0],tt[1],tt[2],tt[3]])
filewriter.writerow(['Node name', sys["nodeName"]])
filewriter.writerow(['Machine', sys["machine"]])
filewriter.writerow(['Processor', sys["processor"]])
filewriter.writerow([''])
if dnet == True:
filewriter.writerow(["NETWORK INFORMATION"])
filewriter.writerow(['Total bytes sent since boot', nets["totalBytesSent"]])
filewriter.writerow(['Total bytes recieved since boot', nets["totalBytesRecieved"]])
filewriter.writerow(["Interfaces"])
filewriter.writerow(["Interface","IP/MAC Address","Net Mask","Broadcast IP/MAC"])
nints = nets["interfaces"]
for i in nints:
iin = nints[i]
si = ["NULL","NULL","NULL"]
for a in iin:
if a == "IPAddress" or a == "MACAddress":
si[0]=iin[a]
elif a == 'netMask':
si[1]=iin[a]
elif a == "broadcastIP" or a == "broadcastMac":
si[2]=iin[a]
filewriter.writerow([i,si[0],si[1],si[2]])
except PermissionError:
print("Writing to .csv failed!")
print("Please try again using a different filename.")
return False
else:
wd = os.getcwd()
print("Writing to .csv successful!")
print(filename+".csv saved in "+wd)
return True
| # Written by <NAME>
# References: got some help from:
# - https://www.thepythoncode.com/article/get-hardware-system-information-python
# - https://www.programcreek.com/python/example/53873/psutil.boot_time
# - https://docs.python.org/3/library/time.html
import psutil
import platform
import csv
import os
import time
dtime = lambda s: time.strftime("%a, %d %b %Y %H:%M:%S", s)
def getSize(bytes, suffix="B"):
factor = 1024
for unit in ["", "K", "M", "G", "T", "P"]:
if bytes < factor:
return f"{bytes:.2f}{unit}{suffix}"
bytes /= factor
def getSysInfo():
uname = platform.uname()
stab = {}
stab["system"]=uname.system
stab["release"]=uname.release
stab["version"]=uname.version
stab["nodeName"]=uname.node
stab["machine"]=uname.machine
stab["processor"]=uname.processor
return stab
def getDiskInfo():
dinf = {"partitions":{}}
#Partitions
partitions = psutil.disk_partitions()
for partition in partitions:
dname = partition.device
dinf["partitions"][dname]={}
dinf["partitions"][dname]["mountPoint"]=partition.mountpoint
dinf["partitions"][dname]["fileSystemType"]=partition.fstype
try:
partition_usage = psutil.disk_usage(partition.mountpoint)
except PermissionError:
# Partition cant be accessed
dinf["partitions"][dname]["totalSize"]="Unknown"
dinf["partitions"][dname]["used"]="Unknown"
dinf["partitions"][dname]["free"]="Unknown"
dinf["partitions"][dname]["percentageUsed"]="Unknown"
else:
dinf["partitions"][dname]["totalSize"]=getSize(partition_usage.total)
dinf["partitions"][dname]["used"]=getSize(partition_usage.used)
dinf["partitions"][dname]["free"]=getSize(partition_usage.free)
dinf["partitions"][dname]["percentageUsed"]=partition_usage.percent
# IO stats since boot
disk_io = psutil.disk_io_counters()
dinf["totalIORead"]=getSize(disk_io.read_bytes)
dinf["totalIOWrite"]=getSize(disk_io.write_bytes)
return dinf
def getNetworkInfo():
# Get all network interfaces
netty={"interfaces":{}}
if_addrs = psutil.net_if_addrs()
for interface_name, interface_addresses in if_addrs.items():
for address in interface_addresses:
netty["interfaces"][interface_name]={}
sf=str(address.family)
if sf.find('AddressFamily.AF_INET')!=-1:
netty["interfaces"][interface_name]["IPAddress"]=address.address
netty["interfaces"][interface_name]["netMask"]=address.netmask
netty["interfaces"][interface_name]["broadcastIP"]=address.broadcast
elif sf.find('AddressFamily.AF_PACKET')!=-1:
netty["interfaces"][interface_name]["MACAddress"]=address.address
netty["interfaces"][interface_name]["netMask"]=address.netmask
netty["interfaces"][interface_name]["broadcastMac"]=address.broadcast
# IO stats since boot
net_io = psutil.net_io_counters()
netty["totalBytesSent"]=getSize(net_io.bytes_sent)
netty["totalBytesRecieved"]=getSize(net_io.bytes_recv)
return netty
bootTime=psutil.boot_time()
otime=time.localtime(bootTime)
tta=dtime(otime)
def dumpToCSV(filename,dsys=True,ddisk=True,dnet=True):
sys = getSysInfo()
disk=getDiskInfo()
nets = getNetworkInfo()
try:
with open(filename+'.csv', 'w', newline="") as csvfile:
filewriter = csv.writer(csvfile, quoting=csv.QUOTE_ALL)
if dsys == True:
filewriter.writerow(["SYSTEM INFORMATION"])
filewriter.writerow(['System', sys["system"]])
filewriter.writerow(['Release', sys["release"]])
filewriter.writerow(['Version', sys["version"]])
filewriter.writerow(['Node name', sys["nodeName"]])
filewriter.writerow(['Machine', sys["machine"]])
filewriter.writerow(['Processor', sys["processor"]])
filewriter.writerow(['Boot Time', tta])
filewriter.writerow([''])
if ddisk == True:
filewriter.writerow(["DISK INFORMATION"])
filewriter.writerow(['Read operations since boot', disk["totalIORead"]])
filewriter.writerow(['Write operations since boot', disk["totalIOWrite"]])
filewriter.writerow(["Partitions"])
filewriter.writerow(["Partition","Mount Point","File System Type","Total Size","Used","Free","Percentage Used"])
dpart=disk["partitions"]
for p in dpart:
dp=dpart[p]
tt=["","","",""]
try:
tt[0] = dp["totalSize"]
tt[1] = dp["used"]
tt[2] = dp["free"]
tt[3] = dp["percentageUsed"]
except KeyError:
tt=["Unavailable","Unavailable","Unavailable","Unavailable"]
filewriter.writerow([p,dp["mountPoint"],dp["fileSystemType"],tt[0],tt[1],tt[2],tt[3]])
filewriter.writerow(['Node name', sys["nodeName"]])
filewriter.writerow(['Machine', sys["machine"]])
filewriter.writerow(['Processor', sys["processor"]])
filewriter.writerow([''])
if dnet == True:
filewriter.writerow(["NETWORK INFORMATION"])
filewriter.writerow(['Total bytes sent since boot', nets["totalBytesSent"]])
filewriter.writerow(['Total bytes recieved since boot', nets["totalBytesRecieved"]])
filewriter.writerow(["Interfaces"])
filewriter.writerow(["Interface","IP/MAC Address","Net Mask","Broadcast IP/MAC"])
nints = nets["interfaces"]
for i in nints:
iin = nints[i]
si = ["NULL","NULL","NULL"]
for a in iin:
if a == "IPAddress" or a == "MACAddress":
si[0]=iin[a]
elif a == 'netMask':
si[1]=iin[a]
elif a == "broadcastIP" or a == "broadcastMac":
si[2]=iin[a]
filewriter.writerow([i,si[0],si[1],si[2]])
except PermissionError:
print("Writing to .csv failed!")
print("Please try again using a different filename.")
return False
else:
wd = os.getcwd()
print("Writing to .csv successful!")
print(filename+".csv saved in "+wd)
return True | en | 0.724501 | # Written by <NAME> # References: got some help from: # - https://www.thepythoncode.com/article/get-hardware-system-information-python # - https://www.programcreek.com/python/example/53873/psutil.boot_time # - https://docs.python.org/3/library/time.html #Partitions # Partition cant be accessed # IO stats since boot # Get all network interfaces # IO stats since boot | 2.490442 | 2 |
tests/test_main.py | K0lb3/binaryreader | 1 | 6630676 | from struct import unpack_from, Struct, unpack, pack
from binaryreader import BinaryReader
TESTS = [
("Bool", "?", 1),
("Int8", "b", -8),
("UInt8", "B", 8),
("Int16", "h", -16),
("UInt16", "H", 16),
("Int32", "i", -32),
("UInt32", "I", 32),
("Int64", "q", -64),
("UInt64", "Q", 64),
("Half", "e", 2.0),
("Float", "f", 4.0),
("Double", "d", 8.0),
]
# generate the tests
for name, fmt, value in TESTS:
if 1:
exec(
f"""
def test_{name}():
print("Test {name}")
for endian in ["<", ">"]:
data = Struct(endian + "{fmt}").pack({value})
br = BinaryReader(data, endian == "<")
br_value = br.read{name}()
print({value}, br_value)
assert(br_value == {value})
return br
"""
)
if 1:
exec(
f"""
def test_{name}Array():
print("Test {name}Array")
if isinstance({value}, (int, float)):
array = [({value}**i)%127 for i in range(10)]
elif isinstance({value}, (str, bytes)):
array = [{value}*i for i in range(10)]
for endian in ["<", ">"]:
data = Struct(endian + "i").pack(10)
data += Struct(endian + "{fmt}"*10).pack(*array)
br = BinaryReader(data, endian == "<")
br_array = br.read{name}Array()
print(array)
print(br_array)
assert(all(x == y for x,y in zip(br_array, array)))
return br
"""
)
def test_stringC():
print("Test stringC")
value = "StringC"
data = value.encode("utf-8") + b"\x00"
br_value = BinaryReader(data).readStringC()
print(value, br_value)
assert br_value == value
def test_string():
print("Test string")
value = "StringLengthDelimited"
data = Struct("<i").pack(len(value)) + value.encode("utf-8")
br_value = BinaryReader(data, True).readString()
print(value, br_value)
assert br_value == value
data = value.encode("utf-8")
br_value = BinaryReader(data, True).readString(len(data))
print(value, br_value)
assert br_value == value
def test_string_aligned():
print("Test string aligned")
value = "StringAlligned"
data = value.encode("utf-8")
data = Struct("<i").pack(len(value)) + data
# align data to multiple of 4 and add 1 to check if the allignment is correct
data += b"\x00" * (4 - (len(data) % 4))
data += b"\x01"
br = BinaryReader(data, True)
br_value = br.readStringAligned()
print(value, br_value)
assert br_value == value
assert br.readUInt8() == 1
def test_lsb():
value = b"lsb"
data = bytearray(24)
for j, x in enumerate(value):
for i in range(8):
data[j * 8 + i] = (x & (1 << i)) >> i
br = BinaryReader(data, False)
br_value = br.readLSB(len(value) * 8)
assert br_value == value
def run_tests():
print("Running tests")
for key, value in list(globals().items()):
if key.startswith("test_"):
br = value()
print()
if __name__ == "__main__":
run_tests()
| from struct import unpack_from, Struct, unpack, pack
from binaryreader import BinaryReader
TESTS = [
("Bool", "?", 1),
("Int8", "b", -8),
("UInt8", "B", 8),
("Int16", "h", -16),
("UInt16", "H", 16),
("Int32", "i", -32),
("UInt32", "I", 32),
("Int64", "q", -64),
("UInt64", "Q", 64),
("Half", "e", 2.0),
("Float", "f", 4.0),
("Double", "d", 8.0),
]
# generate the tests
for name, fmt, value in TESTS:
if 1:
exec(
f"""
def test_{name}():
print("Test {name}")
for endian in ["<", ">"]:
data = Struct(endian + "{fmt}").pack({value})
br = BinaryReader(data, endian == "<")
br_value = br.read{name}()
print({value}, br_value)
assert(br_value == {value})
return br
"""
)
if 1:
exec(
f"""
def test_{name}Array():
print("Test {name}Array")
if isinstance({value}, (int, float)):
array = [({value}**i)%127 for i in range(10)]
elif isinstance({value}, (str, bytes)):
array = [{value}*i for i in range(10)]
for endian in ["<", ">"]:
data = Struct(endian + "i").pack(10)
data += Struct(endian + "{fmt}"*10).pack(*array)
br = BinaryReader(data, endian == "<")
br_array = br.read{name}Array()
print(array)
print(br_array)
assert(all(x == y for x,y in zip(br_array, array)))
return br
"""
)
def test_stringC():
print("Test stringC")
value = "StringC"
data = value.encode("utf-8") + b"\x00"
br_value = BinaryReader(data).readStringC()
print(value, br_value)
assert br_value == value
def test_string():
print("Test string")
value = "StringLengthDelimited"
data = Struct("<i").pack(len(value)) + value.encode("utf-8")
br_value = BinaryReader(data, True).readString()
print(value, br_value)
assert br_value == value
data = value.encode("utf-8")
br_value = BinaryReader(data, True).readString(len(data))
print(value, br_value)
assert br_value == value
def test_string_aligned():
print("Test string aligned")
value = "StringAlligned"
data = value.encode("utf-8")
data = Struct("<i").pack(len(value)) + data
# align data to multiple of 4 and add 1 to check if the allignment is correct
data += b"\x00" * (4 - (len(data) % 4))
data += b"\x01"
br = BinaryReader(data, True)
br_value = br.readStringAligned()
print(value, br_value)
assert br_value == value
assert br.readUInt8() == 1
def test_lsb():
value = b"lsb"
data = bytearray(24)
for j, x in enumerate(value):
for i in range(8):
data[j * 8 + i] = (x & (1 << i)) >> i
br = BinaryReader(data, False)
br_value = br.readLSB(len(value) * 8)
assert br_value == value
def run_tests():
print("Running tests")
for key, value in list(globals().items()):
if key.startswith("test_"):
br = value()
print()
if __name__ == "__main__":
run_tests()
| en | 0.409385 | # generate the tests def test_{name}(): print("Test {name}") for endian in ["<", ">"]: data = Struct(endian + "{fmt}").pack({value}) br = BinaryReader(data, endian == "<") br_value = br.read{name}() print({value}, br_value) assert(br_value == {value}) return br def test_{name}Array(): print("Test {name}Array") if isinstance({value}, (int, float)): array = [({value}**i)%127 for i in range(10)] elif isinstance({value}, (str, bytes)): array = [{value}*i for i in range(10)] for endian in ["<", ">"]: data = Struct(endian + "i").pack(10) data += Struct(endian + "{fmt}"*10).pack(*array) br = BinaryReader(data, endian == "<") br_array = br.read{name}Array() print(array) print(br_array) assert(all(x == y for x,y in zip(br_array, array))) return br # align data to multiple of 4 and add 1 to check if the allignment is correct | 2.961644 | 3 |
corehq/form_processor/serializers.py | dborowiecki/commcare-hq | 0 | 6630677 | <gh_stars>0
from django.utils.functional import lazy
from jsonfield import JSONField
from rest_framework import serializers
from corehq.apps.commtrack.models import StockState
from corehq.blobs.models import BlobMeta
from corehq.form_processor.models import (
CommCareCaseIndexSQL, CommCareCaseSQL, CaseTransaction,
XFormInstanceSQL, XFormOperationSQL,
LedgerValue, CaseAttachmentSQL)
class DeletableModelSerializer(serializers.ModelSerializer):
"""
A ModelSerializer that takes an additional `fields` argument that
controls which fields should be displayed.
"""
def __init__(self, instance=None, *args, **kwargs):
super(DeletableModelSerializer, self).__init__(instance=instance, *args, **kwargs)
if instance is not None and not instance.is_deleted:
self.fields.pop('deletion_id')
self.fields.pop('deleted_on')
class XFormOperationSQLSerializer(serializers.ModelSerializer):
user = serializers.CharField(source="user_id")
class Meta(object):
model = XFormOperationSQL
exclude = ('id', 'form', 'user_id')
class XFormAttachmentSQLSerializer(serializers.ModelSerializer):
id = serializers.CharField(source="key")
class Meta(object):
model = BlobMeta
fields = ('id', 'content_type', 'content_length')
def _serialize_form_attachments(form):
return form.serialized_attachments
def _serialize_form_history(form):
return list(XFormOperationSQLSerializer(form.history, many=True).data)
lazy_serialize_form_attachments = lazy(_serialize_form_attachments, dict)
lazy_serialize_form_history = lazy(_serialize_form_history, dict)
class XFormInstanceSQLSerializer(DeletableModelSerializer):
_id = serializers.CharField(source='form_id')
doc_type = serializers.CharField()
form = serializers.JSONField(source='form_data')
auth_context = serializers.DictField()
openrosa_headers = serializers.DictField()
class Meta(object):
model = XFormInstanceSQL
exclude = ('id', 'form_id', 'time_end', 'time_start', 'commcare_version', 'app_version')
class XFormStateField(serializers.ChoiceField):
def __init__(self, **kwargs):
super(XFormStateField, self).__init__(XFormInstanceSQL.STATES, **kwargs)
def get_attribute(self, obj):
choice = super(serializers.ChoiceField, self).get_attribute(obj)
readable_state = []
for state, state_slug in self.choices.items():
if choice & state:
readable_state.append(state_slug)
return ' / '.join(readable_state)
class JsonFieldSerializerMixin(object):
serializer_field_mapping = {}
serializer_field_mapping.update(DeletableModelSerializer.serializer_field_mapping)
serializer_field_mapping[JSONField] = serializers.JSONField
class XFormInstanceSQLRawDocSerializer(JsonFieldSerializerMixin, DeletableModelSerializer):
state = XFormStateField()
history = XFormOperationSQLSerializer(many=True, read_only=True)
form = serializers.JSONField(source='form_data')
external_blobs = serializers.JSONField(source='serialized_attachments')
class Meta(object):
model = XFormInstanceSQL
fields = '__all__'
class CommCareCaseIndexSQLSerializer(serializers.ModelSerializer):
case_id = serializers.CharField()
relationship = serializers.CharField()
class Meta(object):
model = CommCareCaseIndexSQL
fields = ('case_id', 'identifier', 'referenced_id', 'referenced_type', 'relationship')
class CaseTransactionActionSerializer(serializers.ModelSerializer):
xform_id = serializers.CharField(source='form_id')
date = serializers.DateTimeField(source='client_date')
class Meta(object):
model = CaseTransaction
fields = ('xform_id', 'server_date', 'date', 'sync_log_id')
class CaseTransactionActionRawDocSerializer(JsonFieldSerializerMixin, CaseTransactionActionSerializer):
type = serializers.CharField(source='readable_type')
class Meta(object):
model = CaseTransaction
fields = ('form_id', 'server_date', 'date', 'sync_log_id', 'type', 'details')
class CommCareCaseSQLRawDocSerializer(JsonFieldSerializerMixin, DeletableModelSerializer):
indices = CommCareCaseIndexSQLSerializer(many=True, read_only=True)
transactions = CaseTransactionActionRawDocSerializer(
many=True, read_only=True, source='non_revoked_transactions')
class Meta(object):
model = CommCareCaseSQL
fields = '__all__'
class CaseAttachmentSQLSerializer(serializers.ModelSerializer):
class Meta(object):
model = CaseAttachmentSQL
fields = (
'content_type',
'content_length',
'name',
)
def _serialize_case_indices(case):
return list(CommCareCaseIndexSQLSerializer(case.indices, many=True).data)
def _serialize_case_transactions(case):
return list(CaseTransactionActionSerializer(case.non_revoked_transactions, many=True).data)
def _serialize_case_xform_ids(case):
return list(case.xform_ids)
def _serialize_case_attachments(case):
return dict(case.serialized_attachments)
lazy_serialize_case_indices = lazy(_serialize_case_indices, list)
lazy_serialize_case_transactions = lazy(_serialize_case_transactions, list)
lazy_serialize_case_xform_ids = lazy(_serialize_case_xform_ids, list)
lazy_serialize_case_attachments = lazy(_serialize_case_attachments, dict)
class CommCareCaseSQLSerializer(DeletableModelSerializer):
_id = serializers.CharField(source='case_id')
doc_type = serializers.CharField()
user_id = serializers.CharField(source='modified_by')
case_json = serializers.JSONField()
class Meta(object):
model = CommCareCaseSQL
exclude = ('id', 'case_json',)
class CommCareCaseSQLAPISerializer(serializers.ModelSerializer):
"""This serializer is for presenting a case in json for APIs to access"""
user_id = serializers.CharField(source='modified_by')
date_closed = serializers.DateTimeField(source='closed_on')
date_modified = serializers.DateTimeField(source='modified_on')
properties = serializers.JSONField(source='get_properties_in_api_format')
server_date_modified = serializers.DateTimeField(source='server_modified_on')
indices = serializers.JSONField(source='get_index_map')
attachments = serializers.JSONField(source='get_attachment_map')
reverse_indices = serializers.JSONField(source='get_reverse_index_map')
def __init__(self, *args, **kwargs):
lite = kwargs.pop('lite', False)
if lite:
self.fields.pop('reverse_indices')
super(CommCareCaseSQLAPISerializer, self).__init__(*args, **kwargs)
class Meta(object):
model = CommCareCaseSQL
fields = (
'domain',
'case_id',
'user_id',
'closed',
'xform_ids',
'date_closed',
'date_modified',
'server_date_modified',
'properties',
'indices',
'reverse_indices',
'attachments',
)
class LedgerValueSerializer(serializers.ModelSerializer):
_id = serializers.CharField(source='ledger_id')
location_id = serializers.CharField()
case_id = serializers.CharField()
def __init__(self, *args, **kwargs):
include_location_id = kwargs.pop('include_location_id', False)
if not include_location_id:
self.fields.pop('location_id')
super(LedgerValueSerializer, self).__init__(*args, **kwargs)
class Meta(object):
model = LedgerValue
exclude = ('id', 'case')
class StockStateSerializer(serializers.ModelSerializer):
_id = serializers.CharField(source='id')
entry_id = serializers.CharField(source='product_id')
location_id = serializers.CharField(source='sql_location.location_id')
balance = serializers.IntegerField(source='stock_on_hand')
last_modified = serializers.DateTimeField(source='last_modified_date')
domain = serializers.CharField()
class Meta(object):
model = StockState
exclude = (
'id',
'product_id',
'stock_on_hand',
'last_modified_date',
'sql_product',
'sql_location',
)
| from django.utils.functional import lazy
from jsonfield import JSONField
from rest_framework import serializers
from corehq.apps.commtrack.models import StockState
from corehq.blobs.models import BlobMeta
from corehq.form_processor.models import (
CommCareCaseIndexSQL, CommCareCaseSQL, CaseTransaction,
XFormInstanceSQL, XFormOperationSQL,
LedgerValue, CaseAttachmentSQL)
class DeletableModelSerializer(serializers.ModelSerializer):
"""
A ModelSerializer that takes an additional `fields` argument that
controls which fields should be displayed.
"""
def __init__(self, instance=None, *args, **kwargs):
super(DeletableModelSerializer, self).__init__(instance=instance, *args, **kwargs)
if instance is not None and not instance.is_deleted:
self.fields.pop('deletion_id')
self.fields.pop('deleted_on')
class XFormOperationSQLSerializer(serializers.ModelSerializer):
user = serializers.CharField(source="user_id")
class Meta(object):
model = XFormOperationSQL
exclude = ('id', 'form', 'user_id')
class XFormAttachmentSQLSerializer(serializers.ModelSerializer):
id = serializers.CharField(source="key")
class Meta(object):
model = BlobMeta
fields = ('id', 'content_type', 'content_length')
def _serialize_form_attachments(form):
return form.serialized_attachments
def _serialize_form_history(form):
return list(XFormOperationSQLSerializer(form.history, many=True).data)
lazy_serialize_form_attachments = lazy(_serialize_form_attachments, dict)
lazy_serialize_form_history = lazy(_serialize_form_history, dict)
class XFormInstanceSQLSerializer(DeletableModelSerializer):
_id = serializers.CharField(source='form_id')
doc_type = serializers.CharField()
form = serializers.JSONField(source='form_data')
auth_context = serializers.DictField()
openrosa_headers = serializers.DictField()
class Meta(object):
model = XFormInstanceSQL
exclude = ('id', 'form_id', 'time_end', 'time_start', 'commcare_version', 'app_version')
class XFormStateField(serializers.ChoiceField):
def __init__(self, **kwargs):
super(XFormStateField, self).__init__(XFormInstanceSQL.STATES, **kwargs)
def get_attribute(self, obj):
choice = super(serializers.ChoiceField, self).get_attribute(obj)
readable_state = []
for state, state_slug in self.choices.items():
if choice & state:
readable_state.append(state_slug)
return ' / '.join(readable_state)
class JsonFieldSerializerMixin(object):
serializer_field_mapping = {}
serializer_field_mapping.update(DeletableModelSerializer.serializer_field_mapping)
serializer_field_mapping[JSONField] = serializers.JSONField
class XFormInstanceSQLRawDocSerializer(JsonFieldSerializerMixin, DeletableModelSerializer):
state = XFormStateField()
history = XFormOperationSQLSerializer(many=True, read_only=True)
form = serializers.JSONField(source='form_data')
external_blobs = serializers.JSONField(source='serialized_attachments')
class Meta(object):
model = XFormInstanceSQL
fields = '__all__'
class CommCareCaseIndexSQLSerializer(serializers.ModelSerializer):
case_id = serializers.CharField()
relationship = serializers.CharField()
class Meta(object):
model = CommCareCaseIndexSQL
fields = ('case_id', 'identifier', 'referenced_id', 'referenced_type', 'relationship')
class CaseTransactionActionSerializer(serializers.ModelSerializer):
xform_id = serializers.CharField(source='form_id')
date = serializers.DateTimeField(source='client_date')
class Meta(object):
model = CaseTransaction
fields = ('xform_id', 'server_date', 'date', 'sync_log_id')
class CaseTransactionActionRawDocSerializer(JsonFieldSerializerMixin, CaseTransactionActionSerializer):
type = serializers.CharField(source='readable_type')
class Meta(object):
model = CaseTransaction
fields = ('form_id', 'server_date', 'date', 'sync_log_id', 'type', 'details')
class CommCareCaseSQLRawDocSerializer(JsonFieldSerializerMixin, DeletableModelSerializer):
indices = CommCareCaseIndexSQLSerializer(many=True, read_only=True)
transactions = CaseTransactionActionRawDocSerializer(
many=True, read_only=True, source='non_revoked_transactions')
class Meta(object):
model = CommCareCaseSQL
fields = '__all__'
class CaseAttachmentSQLSerializer(serializers.ModelSerializer):
class Meta(object):
model = CaseAttachmentSQL
fields = (
'content_type',
'content_length',
'name',
)
def _serialize_case_indices(case):
return list(CommCareCaseIndexSQLSerializer(case.indices, many=True).data)
def _serialize_case_transactions(case):
return list(CaseTransactionActionSerializer(case.non_revoked_transactions, many=True).data)
def _serialize_case_xform_ids(case):
return list(case.xform_ids)
def _serialize_case_attachments(case):
return dict(case.serialized_attachments)
lazy_serialize_case_indices = lazy(_serialize_case_indices, list)
lazy_serialize_case_transactions = lazy(_serialize_case_transactions, list)
lazy_serialize_case_xform_ids = lazy(_serialize_case_xform_ids, list)
lazy_serialize_case_attachments = lazy(_serialize_case_attachments, dict)
class CommCareCaseSQLSerializer(DeletableModelSerializer):
_id = serializers.CharField(source='case_id')
doc_type = serializers.CharField()
user_id = serializers.CharField(source='modified_by')
case_json = serializers.JSONField()
class Meta(object):
model = CommCareCaseSQL
exclude = ('id', 'case_json',)
class CommCareCaseSQLAPISerializer(serializers.ModelSerializer):
"""This serializer is for presenting a case in json for APIs to access"""
user_id = serializers.CharField(source='modified_by')
date_closed = serializers.DateTimeField(source='closed_on')
date_modified = serializers.DateTimeField(source='modified_on')
properties = serializers.JSONField(source='get_properties_in_api_format')
server_date_modified = serializers.DateTimeField(source='server_modified_on')
indices = serializers.JSONField(source='get_index_map')
attachments = serializers.JSONField(source='get_attachment_map')
reverse_indices = serializers.JSONField(source='get_reverse_index_map')
def __init__(self, *args, **kwargs):
lite = kwargs.pop('lite', False)
if lite:
self.fields.pop('reverse_indices')
super(CommCareCaseSQLAPISerializer, self).__init__(*args, **kwargs)
class Meta(object):
model = CommCareCaseSQL
fields = (
'domain',
'case_id',
'user_id',
'closed',
'xform_ids',
'date_closed',
'date_modified',
'server_date_modified',
'properties',
'indices',
'reverse_indices',
'attachments',
)
class LedgerValueSerializer(serializers.ModelSerializer):
_id = serializers.CharField(source='ledger_id')
location_id = serializers.CharField()
case_id = serializers.CharField()
def __init__(self, *args, **kwargs):
include_location_id = kwargs.pop('include_location_id', False)
if not include_location_id:
self.fields.pop('location_id')
super(LedgerValueSerializer, self).__init__(*args, **kwargs)
class Meta(object):
model = LedgerValue
exclude = ('id', 'case')
class StockStateSerializer(serializers.ModelSerializer):
_id = serializers.CharField(source='id')
entry_id = serializers.CharField(source='product_id')
location_id = serializers.CharField(source='sql_location.location_id')
balance = serializers.IntegerField(source='stock_on_hand')
last_modified = serializers.DateTimeField(source='last_modified_date')
domain = serializers.CharField()
class Meta(object):
model = StockState
exclude = (
'id',
'product_id',
'stock_on_hand',
'last_modified_date',
'sql_product',
'sql_location',
) | en | 0.893353 | A ModelSerializer that takes an additional `fields` argument that controls which fields should be displayed. This serializer is for presenting a case in json for APIs to access | 2.107344 | 2 |
programas/ola_mundo.py | ismaeldamiao/Apostila_de_IFC | 0 | 6630678 | <filename>programas/ola_mundo.py<gh_stars>0
print("<NAME>")
| <filename>programas/ola_mundo.py<gh_stars>0
print("<NAME>")
| none | 1 | 1.108982 | 1 |
|
CodingTest_Study1/week09/ex2740.py | FridayAlgorithm/taesong_study | 0 | 6630679 | # BOJ 행렬 곱셈 2740
N, M = map(int, input().split()) # 행렬 A의 크기 N, M
A = []
for i in range(N):
A.append(list(map(int, input().split())))
M, K = map(int, input().split()) # 행렬 B의 크기 M. K
B = []
for i in range(M):
B.append(list(map(int, input().split())))
C = [[0] * K for _ in range(N)]
for i in range(N):
for j in range(K):
for k in range(M):
C[i][j] += A[i][k] * B[k][j]
print(C[i][j], end=' ')
print()
| # BOJ 행렬 곱셈 2740
N, M = map(int, input().split()) # 행렬 A의 크기 N, M
A = []
for i in range(N):
A.append(list(map(int, input().split())))
M, K = map(int, input().split()) # 행렬 B의 크기 M. K
B = []
for i in range(M):
B.append(list(map(int, input().split())))
C = [[0] * K for _ in range(N)]
for i in range(N):
for j in range(K):
for k in range(M):
C[i][j] += A[i][k] * B[k][j]
print(C[i][j], end=' ')
print()
| ko | 0.997607 | # BOJ 행렬 곱셈 2740 # 행렬 A의 크기 N, M # 행렬 B의 크기 M. K | 2.936219 | 3 |
payment_ui/custom_extensions/jinja_markdown_filter/main.py | LandRegistry/digital-street-payment-ui | 1 | 6630680 | import misaka
from jinja2 import Markup
from payment_ui.custom_extensions.jinja_markdown_filter.gov_renderer import GovRenderer
class JinjaMarkdownFilter(object):
"""Markdown filter for Jinja templates"""
render_markdown = misaka.Markdown(GovRenderer(), extensions=('autolink',))
def __init__(self, app=None):
self.app = app
if app is not None:
self.init_app(app)
def init_app(self, app):
app.jinja_env.filters['markdown'] = self.markdown_filter(app)
def markdown_filter(self, app):
def render(value):
return Markup(self.render_markdown(value))
return render
| import misaka
from jinja2 import Markup
from payment_ui.custom_extensions.jinja_markdown_filter.gov_renderer import GovRenderer
class JinjaMarkdownFilter(object):
"""Markdown filter for Jinja templates"""
render_markdown = misaka.Markdown(GovRenderer(), extensions=('autolink',))
def __init__(self, app=None):
self.app = app
if app is not None:
self.init_app(app)
def init_app(self, app):
app.jinja_env.filters['markdown'] = self.markdown_filter(app)
def markdown_filter(self, app):
def render(value):
return Markup(self.render_markdown(value))
return render
| en | 0.413395 | Markdown filter for Jinja templates | 2.31875 | 2 |
economic/journals.py | reiem/python-economic-rest | 0 | 6630681 | <reponame>reiem/python-economic-rest<filename>economic/journals.py<gh_stars>0
from economic.journal_entries import JournalEntry
from economic.query import QueryMixin
from economic.serializer import EconomicSerializer
class JournalSerializer(EconomicSerializer):
id_property_name = 'journal_number'
class Journal(JournalSerializer, QueryMixin):
base_url = "https://restapi.e-conomic.com/journals-experimental/"
def get_journal_entries(self, limit=None):
# self.entries is the URL for this Journals's entries
# we have to remove the query parameters from the URL first, since they are added again by _query
return JournalEntry._query(self.auth, self.entries.split('?')[0], limit=limit) | from economic.journal_entries import JournalEntry
from economic.query import QueryMixin
from economic.serializer import EconomicSerializer
class JournalSerializer(EconomicSerializer):
id_property_name = 'journal_number'
class Journal(JournalSerializer, QueryMixin):
base_url = "https://restapi.e-conomic.com/journals-experimental/"
def get_journal_entries(self, limit=None):
# self.entries is the URL for this Journals's entries
# we have to remove the query parameters from the URL first, since they are added again by _query
return JournalEntry._query(self.auth, self.entries.split('?')[0], limit=limit) | en | 0.913027 | # self.entries is the URL for this Journals's entries # we have to remove the query parameters from the URL first, since they are added again by _query | 2.399312 | 2 |
reverses/deoat.py | CortanaOS/tools | 0 | 6630682 | <gh_stars>0
#!/usr/bin/python
# Copyright 2015 Coron
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
Convert the OAT format on ART to DEX format on DALVIKVM.
Usage: deoat.py [OPTIONS] <otapackage.zip> [<otapackage.deoat.zip>]
OPTIONS:
--app, -a: only de-oat the apk in system.
--framework, -f: only de-oat the jar in system.
"""
__author__ = '<EMAIL>'
import os
import commands
import re
import shutil
from common import Utils, Log
# Global
TAG="reverse-deoat"
OPTIONS = None
class OatZip:
""" Model of OAT ZIP file
"""
OAT2DEX = os.path.join(os.path.dirname(__file__), "de-oat", "oat2dex.sh")
def __init__(self, unzipRoot):
self.mRoot = unzipRoot
self.mFrwDir = os.path.join(self.mRoot, "system/framework")
self.mAppDir = os.path.join(self.mRoot, "system/app")
self.mPrivAppDir = os.path.join(self.mRoot, "system/priv-app")
# boot.oat
self.mBootOAT = self.findBootOAT()
if self.mBootOAT != None:
self.mBootOATDir = os.path.dirname(self.mBootOAT)
self.mBootClassFolder = os.path.join(self.mBootOATDir, "dex")
def findBootOAT(self):
""" Find the absolute path of boot.oat
In Android 5.0+, all the jars of BOOTCLASSPATH are packaged into boot.oat
"""
bootOATPath = os.path.join(self.mFrwDir, "arm/boot.oat")
if os.path.exists(bootOATPath):
return bootOATPath
bootOATPath = os.path.join(self.mFrwDir, "x86/boot.oat")
if os.path.exists(bootOATPath):
return bootOATPath
bootOATPath = None
cmd = "find %s -name boot.oat" % (commands.mkarg(self.mFrwDir))
(sts, text) = commands.getstatusoutput(cmd)
try:
if sts == 0:
text = text.split("\n")[0]
if len(text) > 0:
return text
except:
bootOATPath = None
return bootOATPath
def deoat(self):
""" De-oat the OTA package.
"""
if self.mBootOAT == None:
Log.i(TAG, "deoat(): boot.oat not found in %s, nothing need deoat" % self.mRoot)
return self
if os.path.exists(self.mBootClassFolder):
Log.d(TAG, "Delete the already exists %s" %self.mBootClassFolder)
shutil.rmtree(self.mBootClassFolder)
# Phase 1: de-oat boot.oat
OatZip.deoatBootOAT(self.mBootOAT)
# Phase 2: de-oat all the other oat files, of which suffix is odex.
# [Android 5.0]: All the oat jars are located in the same folder with boot.oat
OatZip.deoatFrw(self.mBootOATDir)
# Phase 3: de-oat app
OatZip.deoatApp(self.mFrwDir, self.mBootClassFolder)
OatZip.deoatApp(self.mAppDir, self.mBootClassFolder)
OatZip.deoatApp(self.mPrivAppDir, self.mBootClassFolder)
return self
def rebuild(self):
""" Rebuild the deoated zip
"""
if self.mBootOAT == None:
Log.i(TAG, "rebuild(): boot.oat not found, nothing need rebuild")
return
OatZip.repackageFrw(self.mFrwDir, self.mBootClassFolder)
OatZip.repackageApp(self.mFrwDir)
OatZip.repackageApp(self.mAppDir)
OatZip.repackageApp(self.mPrivAppDir)
# Remove the whole OAT directory
if os.path.exists(self.mBootOATDir):
shutil.rmtree(self.mBootOATDir)
@staticmethod
def deoatBootOAT(bootOAT):
""" De-oat boot.oat
"""
Log.i(TAG, "De-oat %s" % bootOAT)
Utils.runWithOutput([OatZip.OAT2DEX, "boot", bootOAT])
@staticmethod
def deoatFrw(oatJarDir):
""" De-oat framework
"""
if not OPTIONS.formatFrw: return
Log.i(TAG, "De-oat files of oat-format in %s" % oatJarDir)
for item in os.listdir(oatJarDir):
if item.endswith(".odex"):
# COMMANDS: oat2dex boot <jar-of-oat-format>
oatJar = os.path.join(oatJarDir, item)
Utils.runWithOutput([OatZip.OAT2DEX, "boot", oatJar])
@staticmethod
def deoatApp(oatApkDir, bootClassFolder):
""" De-oat app
"""
if OPTIONS.formatApp == False: return
Log.i(TAG, "De-oat files of oat-format in %s, with BOOTCLASSFOLDER=%s" %(oatApkDir, bootClassFolder))
for (dirpath, dirnames, filenames) in os.walk(oatApkDir):
dirnames = dirnames # no use, to avoid warning
for filename in filenames:
if filename.endswith(".odex"):
# no need to de-oat if original apk does not exist
apkFile = filename[0:-5] + ".apk"
apkPath = os.path.dirname(dirpath)
if not os.path.exists(os.path.join(apkPath, apkFile)):
continue
oatApk = os.path.join(dirpath, filename)
deoatApk = oatApk[0:-5] + ".dex"
if os.path.exists(deoatApk):
Log.d(TAG, "Delete the already exists %s" % deoatApk)
os.remove(deoatApk)
Utils.runWithOutput([OatZip.OAT2DEX, oatApk, bootClassFolder])
@staticmethod
def repackageFrw(frwDir, bootClassFolder):
""" Repackage the classes.dex into jar of frwDir.
"""
if OPTIONS.formatFrw == False : return
# Keep the old directory, we will change back after some operations.
oldDir = os.path.abspath(os.curdir)
# Some dexFiles are parted, such as framework-classes2.dex
regex = re.compile("(.*)-(classes\d?).dex")
Log.i(TAG, "Repackage JARs of %s" %(frwDir))
os.chdir(frwDir)
for dexFile in os.listdir(bootClassFolder):
if dexFile.endswith(".dex"):
jarFile = dexFile[0:-4] + ".jar"
dexName = "classes.dex"
if not os.path.exists(jarFile):
# Match out the jar file with regex
matcher = regex.match(dexFile)
if matcher != None:
jarFile = matcher.group(1) + ".jar"
dexName = matcher.group(2) + ".dex"
Log.d(TAG, "Repackage %s" %(jarFile))
# Put the dex and framework's jar in the same folder, and jar into the jarFile
shutil.move(os.path.join(bootClassFolder, dexFile), os.path.join(frwDir, dexName))
Utils.runWithOutput(["jar", "uf", jarFile, dexName])
if os.path.exists(dexName):
os.remove(dexName)
os.chdir(oldDir)
@staticmethod
def repackageApp(appDir):
""" Repackage the classes.dex into apk of appDir
"""
if OPTIONS.formatApp == False: return
# Keep the old directory, we will change back after some operations.
oldDir = os.path.abspath(os.curdir)
Log.i(TAG, "Repackage APKs of %s" %(appDir))
for (dirpath, dirnames, filenames) in os.walk(appDir):
dirnames = dirnames # no use, to avoid warning
for dexFile in filenames:
if dexFile.endswith(".dex"):
apkFile = dexFile[0:-4] + ".apk"
apkPath = os.path.dirname(dirpath)
if not os.path.exists(os.path.join(apkPath, apkFile)):
Log.d(TAG, "No apk matched with %s, Ignore" %dexFile)
continue
dexName = "classes.dex"
Log.d(TAG, "Repackage %s" %(apkPath))
# Put the dex and apk in the same folder, and jar into the apk
shutil.move(os.path.join(dirpath, dexFile), os.path.join(apkPath, dexName))
os.chdir(apkPath)
Utils.runWithOutput(["jar", "uf", apkFile, dexName])
if os.path.exists(dexName):
os.remove(dexName)
shutil.rmtree(dirpath)
os.chdir(oldDir)
def debug():
Log.DEBUG = True
root = "root directory the unziped files"
#root = "/w/code/smali-5.0/devices/sony/out/tmp"
OatZip(root).deoat()
if __name__ == "__main__":
debug()
| #!/usr/bin/python
# Copyright 2015 Coron
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
Convert the OAT format on ART to DEX format on DALVIKVM.
Usage: deoat.py [OPTIONS] <otapackage.zip> [<otapackage.deoat.zip>]
OPTIONS:
--app, -a: only de-oat the apk in system.
--framework, -f: only de-oat the jar in system.
"""
__author__ = '<EMAIL>'
import os
import commands
import re
import shutil
from common import Utils, Log
# Global
TAG="reverse-deoat"
OPTIONS = None
class OatZip:
""" Model of OAT ZIP file
"""
OAT2DEX = os.path.join(os.path.dirname(__file__), "de-oat", "oat2dex.sh")
def __init__(self, unzipRoot):
self.mRoot = unzipRoot
self.mFrwDir = os.path.join(self.mRoot, "system/framework")
self.mAppDir = os.path.join(self.mRoot, "system/app")
self.mPrivAppDir = os.path.join(self.mRoot, "system/priv-app")
# boot.oat
self.mBootOAT = self.findBootOAT()
if self.mBootOAT != None:
self.mBootOATDir = os.path.dirname(self.mBootOAT)
self.mBootClassFolder = os.path.join(self.mBootOATDir, "dex")
def findBootOAT(self):
""" Find the absolute path of boot.oat
In Android 5.0+, all the jars of BOOTCLASSPATH are packaged into boot.oat
"""
bootOATPath = os.path.join(self.mFrwDir, "arm/boot.oat")
if os.path.exists(bootOATPath):
return bootOATPath
bootOATPath = os.path.join(self.mFrwDir, "x86/boot.oat")
if os.path.exists(bootOATPath):
return bootOATPath
bootOATPath = None
cmd = "find %s -name boot.oat" % (commands.mkarg(self.mFrwDir))
(sts, text) = commands.getstatusoutput(cmd)
try:
if sts == 0:
text = text.split("\n")[0]
if len(text) > 0:
return text
except:
bootOATPath = None
return bootOATPath
def deoat(self):
""" De-oat the OTA package.
"""
if self.mBootOAT == None:
Log.i(TAG, "deoat(): boot.oat not found in %s, nothing need deoat" % self.mRoot)
return self
if os.path.exists(self.mBootClassFolder):
Log.d(TAG, "Delete the already exists %s" %self.mBootClassFolder)
shutil.rmtree(self.mBootClassFolder)
# Phase 1: de-oat boot.oat
OatZip.deoatBootOAT(self.mBootOAT)
# Phase 2: de-oat all the other oat files, of which suffix is odex.
# [Android 5.0]: All the oat jars are located in the same folder with boot.oat
OatZip.deoatFrw(self.mBootOATDir)
# Phase 3: de-oat app
OatZip.deoatApp(self.mFrwDir, self.mBootClassFolder)
OatZip.deoatApp(self.mAppDir, self.mBootClassFolder)
OatZip.deoatApp(self.mPrivAppDir, self.mBootClassFolder)
return self
def rebuild(self):
""" Rebuild the deoated zip
"""
if self.mBootOAT == None:
Log.i(TAG, "rebuild(): boot.oat not found, nothing need rebuild")
return
OatZip.repackageFrw(self.mFrwDir, self.mBootClassFolder)
OatZip.repackageApp(self.mFrwDir)
OatZip.repackageApp(self.mAppDir)
OatZip.repackageApp(self.mPrivAppDir)
# Remove the whole OAT directory
if os.path.exists(self.mBootOATDir):
shutil.rmtree(self.mBootOATDir)
@staticmethod
def deoatBootOAT(bootOAT):
""" De-oat boot.oat
"""
Log.i(TAG, "De-oat %s" % bootOAT)
Utils.runWithOutput([OatZip.OAT2DEX, "boot", bootOAT])
@staticmethod
def deoatFrw(oatJarDir):
""" De-oat framework
"""
if not OPTIONS.formatFrw: return
Log.i(TAG, "De-oat files of oat-format in %s" % oatJarDir)
for item in os.listdir(oatJarDir):
if item.endswith(".odex"):
# COMMANDS: oat2dex boot <jar-of-oat-format>
oatJar = os.path.join(oatJarDir, item)
Utils.runWithOutput([OatZip.OAT2DEX, "boot", oatJar])
@staticmethod
def deoatApp(oatApkDir, bootClassFolder):
""" De-oat app
"""
if OPTIONS.formatApp == False: return
Log.i(TAG, "De-oat files of oat-format in %s, with BOOTCLASSFOLDER=%s" %(oatApkDir, bootClassFolder))
for (dirpath, dirnames, filenames) in os.walk(oatApkDir):
dirnames = dirnames # no use, to avoid warning
for filename in filenames:
if filename.endswith(".odex"):
# no need to de-oat if original apk does not exist
apkFile = filename[0:-5] + ".apk"
apkPath = os.path.dirname(dirpath)
if not os.path.exists(os.path.join(apkPath, apkFile)):
continue
oatApk = os.path.join(dirpath, filename)
deoatApk = oatApk[0:-5] + ".dex"
if os.path.exists(deoatApk):
Log.d(TAG, "Delete the already exists %s" % deoatApk)
os.remove(deoatApk)
Utils.runWithOutput([OatZip.OAT2DEX, oatApk, bootClassFolder])
@staticmethod
def repackageFrw(frwDir, bootClassFolder):
""" Repackage the classes.dex into jar of frwDir.
"""
if OPTIONS.formatFrw == False : return
# Keep the old directory, we will change back after some operations.
oldDir = os.path.abspath(os.curdir)
# Some dexFiles are parted, such as framework-classes2.dex
regex = re.compile("(.*)-(classes\d?).dex")
Log.i(TAG, "Repackage JARs of %s" %(frwDir))
os.chdir(frwDir)
for dexFile in os.listdir(bootClassFolder):
if dexFile.endswith(".dex"):
jarFile = dexFile[0:-4] + ".jar"
dexName = "classes.dex"
if not os.path.exists(jarFile):
# Match out the jar file with regex
matcher = regex.match(dexFile)
if matcher != None:
jarFile = matcher.group(1) + ".jar"
dexName = matcher.group(2) + ".dex"
Log.d(TAG, "Repackage %s" %(jarFile))
# Put the dex and framework's jar in the same folder, and jar into the jarFile
shutil.move(os.path.join(bootClassFolder, dexFile), os.path.join(frwDir, dexName))
Utils.runWithOutput(["jar", "uf", jarFile, dexName])
if os.path.exists(dexName):
os.remove(dexName)
os.chdir(oldDir)
@staticmethod
def repackageApp(appDir):
""" Repackage the classes.dex into apk of appDir
"""
if OPTIONS.formatApp == False: return
# Keep the old directory, we will change back after some operations.
oldDir = os.path.abspath(os.curdir)
Log.i(TAG, "Repackage APKs of %s" %(appDir))
for (dirpath, dirnames, filenames) in os.walk(appDir):
dirnames = dirnames # no use, to avoid warning
for dexFile in filenames:
if dexFile.endswith(".dex"):
apkFile = dexFile[0:-4] + ".apk"
apkPath = os.path.dirname(dirpath)
if not os.path.exists(os.path.join(apkPath, apkFile)):
Log.d(TAG, "No apk matched with %s, Ignore" %dexFile)
continue
dexName = "classes.dex"
Log.d(TAG, "Repackage %s" %(apkPath))
# Put the dex and apk in the same folder, and jar into the apk
shutil.move(os.path.join(dirpath, dexFile), os.path.join(apkPath, dexName))
os.chdir(apkPath)
Utils.runWithOutput(["jar", "uf", apkFile, dexName])
if os.path.exists(dexName):
os.remove(dexName)
shutil.rmtree(dirpath)
os.chdir(oldDir)
def debug():
Log.DEBUG = True
root = "root directory the unziped files"
#root = "/w/code/smali-5.0/devices/sony/out/tmp"
OatZip(root).deoat()
if __name__ == "__main__":
debug() | en | 0.809192 | #!/usr/bin/python # Copyright 2015 Coron # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # Convert the OAT format on ART to DEX format on DALVIKVM. Usage: deoat.py [OPTIONS] <otapackage.zip> [<otapackage.deoat.zip>] OPTIONS: --app, -a: only de-oat the apk in system. --framework, -f: only de-oat the jar in system. # Global Model of OAT ZIP file # boot.oat Find the absolute path of boot.oat In Android 5.0+, all the jars of BOOTCLASSPATH are packaged into boot.oat De-oat the OTA package. # Phase 1: de-oat boot.oat # Phase 2: de-oat all the other oat files, of which suffix is odex. # [Android 5.0]: All the oat jars are located in the same folder with boot.oat # Phase 3: de-oat app Rebuild the deoated zip # Remove the whole OAT directory De-oat boot.oat De-oat framework # COMMANDS: oat2dex boot <jar-of-oat-format> De-oat app # no use, to avoid warning # no need to de-oat if original apk does not exist Repackage the classes.dex into jar of frwDir. # Keep the old directory, we will change back after some operations. # Some dexFiles are parted, such as framework-classes2.dex # Match out the jar file with regex # Put the dex and framework's jar in the same folder, and jar into the jarFile Repackage the classes.dex into apk of appDir # Keep the old directory, we will change back after some operations. # no use, to avoid warning # Put the dex and apk in the same folder, and jar into the apk #root = "/w/code/smali-5.0/devices/sony/out/tmp" | 2.037552 | 2 |
defects_dlmbl/utilities.py | bbarad/defects_DLMBL | 0 | 6630683 | <gh_stars>0
import mrcfile
import zarr
import numpy as np
def mrc_to_zarr(input_mrc_list, output_zarr, input_label_list=None, labels_upside_down = True, flipy=None):
"""
Convert a group of mrc files (data and labels) to a zarr file.
Assume file names are of the form: '<identifier>_<datatype>.mrc'
input_mrc_list - list (or singular string) of mrc files to convert
input_label_list - list (or singular string) of mrc files to convert. Labels can be for a subset of data files.
"""
if flipy==None:
flipy=[]
root = zarr.open_group(output_zarr, mode="w")
root.create_group("data")
root.create_group("labels")
if type(input_mrc_list) is str:
input_mrc_list = [input_mrc_list]
if type(input_label_list) is str:
input_label_list = [input_label_list]
for index,input_mrc in enumerate(input_mrc_list):
print(input_mrc)
with mrcfile.open(input_mrc, mode="r", permissive=True) as mrc:
data = mrc.data.astype(np.float32)
if index in flipy:
data = np.flip(data, axis=1)
print(f"Flipping y axis for {index},{input_mrc}")
data = np.flip(data, axis=0)
dataname = input_mrc.split("/")[-1].split("_")[0]
root["data"].create_dataset(dataname, data=data, chunks=(1,)+(data.shape[1:]))
if type(input_label_list) is list:
for input_label in input_label_list:
with mrcfile.open(input_label, mode="r", permissive=True) as mrc:
dataname = input_label.split("/")[-1].split("_")[0]
labels = mrc.data
if labels_upside_down:
labels = np.flip(labels, axis=0)
root["labels"].create_dataset(dataname, data=labels, chunks=(1,)+(labels.shape[1:]))
print(root.tree())
return root
| import mrcfile
import zarr
import numpy as np
def mrc_to_zarr(input_mrc_list, output_zarr, input_label_list=None, labels_upside_down = True, flipy=None):
"""
Convert a group of mrc files (data and labels) to a zarr file.
Assume file names are of the form: '<identifier>_<datatype>.mrc'
input_mrc_list - list (or singular string) of mrc files to convert
input_label_list - list (or singular string) of mrc files to convert. Labels can be for a subset of data files.
"""
if flipy==None:
flipy=[]
root = zarr.open_group(output_zarr, mode="w")
root.create_group("data")
root.create_group("labels")
if type(input_mrc_list) is str:
input_mrc_list = [input_mrc_list]
if type(input_label_list) is str:
input_label_list = [input_label_list]
for index,input_mrc in enumerate(input_mrc_list):
print(input_mrc)
with mrcfile.open(input_mrc, mode="r", permissive=True) as mrc:
data = mrc.data.astype(np.float32)
if index in flipy:
data = np.flip(data, axis=1)
print(f"Flipping y axis for {index},{input_mrc}")
data = np.flip(data, axis=0)
dataname = input_mrc.split("/")[-1].split("_")[0]
root["data"].create_dataset(dataname, data=data, chunks=(1,)+(data.shape[1:]))
if type(input_label_list) is list:
for input_label in input_label_list:
with mrcfile.open(input_label, mode="r", permissive=True) as mrc:
dataname = input_label.split("/")[-1].split("_")[0]
labels = mrc.data
if labels_upside_down:
labels = np.flip(labels, axis=0)
root["labels"].create_dataset(dataname, data=labels, chunks=(1,)+(labels.shape[1:]))
print(root.tree())
return root | en | 0.557534 | Convert a group of mrc files (data and labels) to a zarr file. Assume file names are of the form: '<identifier>_<datatype>.mrc' input_mrc_list - list (or singular string) of mrc files to convert input_label_list - list (or singular string) of mrc files to convert. Labels can be for a subset of data files. | 3.091756 | 3 |
tests/ut/cpp/python_input/gtest_input/pre_activate/remove_internal_output_test.py | chncwang/mindspore | 0 | 6630684 | <reponame>chncwang/mindspore
# Copyright 2020 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
from mindspore.ops import Primitive
from mindspore.ops import operations as P
from mindspore.ops import _constants as Constants
tuple_getitem = Primitive(Constants.kTupleGetItem)
add = P.Add()
max_pool = P.MaxPoolWithArgmax(pad_mode="same", kernel_size=3, strides=2)
make_tuple = Primitive('make_tuple')
trans_data = Primitive("TransData")
class FnDict:
def __init__(self):
self.fnDict = {}
def __call__(self, fn):
self.fnDict[fn.__name__] = fn
def __getitem__(self, name):
return self.fnDict[name]
def test_remove_internal_output_trans_op_for_single_output(tag):
fns = FnDict()
@fns
def before(x, y):
res = add(x, y)
return res
@fns
def after_insert_trans_op(x, y):
output = add(x, y)
res = trans_data(output)
return make_tuple(res)
@fns
def after_remove_internal_output_trans_op(x, y):
res = add(x, y)
return make_tuple(res)
return fns[tag]
def test_remove_internal_output_trans_op_for_multiple_output(tag):
fns = FnDict()
@fns
def before(x):
max_pool_res = max_pool(x)
res = make_tuple(tuple_getitem(max_pool_res, 0), tuple_getitem(max_pool_res, 1))
return res
@fns
def after_insert_trans_op(x):
output = max_pool(x)
trans_data0 = trans_data(tuple_getitem(output, 0))
trans_data1 = trans_data(tuple_getitem(output, 1))
new_make_tuple = make_tuple(trans_data0, trans_data1)
res = make_tuple(tuple_getitem(new_make_tuple, 0), tuple_getitem(new_make_tuple, 1))
return make_tuple(res)
@fns
def after_remove_internal_output_trans_op(x):
output = max_pool(x)
new_make_tuple = make_tuple(tuple_getitem(output, 0), tuple_getitem(output, 1))
res = make_tuple(tuple_getitem(new_make_tuple, 0), tuple_getitem(new_make_tuple, 1))
return make_tuple(res)
return fns[tag]
| # Copyright 2020 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
from mindspore.ops import Primitive
from mindspore.ops import operations as P
from mindspore.ops import _constants as Constants
tuple_getitem = Primitive(Constants.kTupleGetItem)
add = P.Add()
max_pool = P.MaxPoolWithArgmax(pad_mode="same", kernel_size=3, strides=2)
make_tuple = Primitive('make_tuple')
trans_data = Primitive("TransData")
class FnDict:
def __init__(self):
self.fnDict = {}
def __call__(self, fn):
self.fnDict[fn.__name__] = fn
def __getitem__(self, name):
return self.fnDict[name]
def test_remove_internal_output_trans_op_for_single_output(tag):
fns = FnDict()
@fns
def before(x, y):
res = add(x, y)
return res
@fns
def after_insert_trans_op(x, y):
output = add(x, y)
res = trans_data(output)
return make_tuple(res)
@fns
def after_remove_internal_output_trans_op(x, y):
res = add(x, y)
return make_tuple(res)
return fns[tag]
def test_remove_internal_output_trans_op_for_multiple_output(tag):
fns = FnDict()
@fns
def before(x):
max_pool_res = max_pool(x)
res = make_tuple(tuple_getitem(max_pool_res, 0), tuple_getitem(max_pool_res, 1))
return res
@fns
def after_insert_trans_op(x):
output = max_pool(x)
trans_data0 = trans_data(tuple_getitem(output, 0))
trans_data1 = trans_data(tuple_getitem(output, 1))
new_make_tuple = make_tuple(trans_data0, trans_data1)
res = make_tuple(tuple_getitem(new_make_tuple, 0), tuple_getitem(new_make_tuple, 1))
return make_tuple(res)
@fns
def after_remove_internal_output_trans_op(x):
output = max_pool(x)
new_make_tuple = make_tuple(tuple_getitem(output, 0), tuple_getitem(output, 1))
res = make_tuple(tuple_getitem(new_make_tuple, 0), tuple_getitem(new_make_tuple, 1))
return make_tuple(res)
return fns[tag] | en | 0.808111 | # Copyright 2020 Huawei Technologies Co., Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================ | 2.177313 | 2 |
migrations/versions/69a322b64d30_add_scraper_table.py | pm5/ArticleParser | 1 | 6630685 | <gh_stars>1-10
"""add scraper table
Revision ID: <KEY>
Revises: <PASSWORD>
Create Date: 2020-03-27 09:52:22.509131
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = "<KEY>"
down_revision = "afdac32d3727"
branch_labels = None
depends_on = None
def upgrade():
op.create_table(
"scraper",
sa.Column("scraper_id", sa.Integer, primary_key=True, autoincrement=True),
sa.Column("scraper_name", sa.String(255), nullable=False, unique=True),
sa.Column("db_url_var", sa.String(255), nullable=False),
sa.Column("site_table_name", sa.String(255), nullable=False),
sa.Column("article_table_name", sa.String(255), nullable=False),
sa.Column("snapshot_table_name", sa.String(255), nullable=False),
)
op.execute(
"INSERT INTO scraper (scraper_id, scraper_name, db_url_var, site_table_name, article_table_name, snapshot_table_name) VALUES (1, 'ZeroScraper', 'SCRAPER_DB_URL', 'Site', 'Article', 'ArticleSnapshot')"
)
def downgrade():
op.drop_table("scraper")
| """add scraper table
Revision ID: <KEY>
Revises: <PASSWORD>
Create Date: 2020-03-27 09:52:22.509131
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = "<KEY>"
down_revision = "afdac32d3727"
branch_labels = None
depends_on = None
def upgrade():
op.create_table(
"scraper",
sa.Column("scraper_id", sa.Integer, primary_key=True, autoincrement=True),
sa.Column("scraper_name", sa.String(255), nullable=False, unique=True),
sa.Column("db_url_var", sa.String(255), nullable=False),
sa.Column("site_table_name", sa.String(255), nullable=False),
sa.Column("article_table_name", sa.String(255), nullable=False),
sa.Column("snapshot_table_name", sa.String(255), nullable=False),
)
op.execute(
"INSERT INTO scraper (scraper_id, scraper_name, db_url_var, site_table_name, article_table_name, snapshot_table_name) VALUES (1, 'ZeroScraper', 'SCRAPER_DB_URL', 'Site', 'Article', 'ArticleSnapshot')"
)
def downgrade():
op.drop_table("scraper") | en | 0.396528 | add scraper table Revision ID: <KEY> Revises: <PASSWORD> Create Date: 2020-03-27 09:52:22.509131 # revision identifiers, used by Alembic. | 1.836747 | 2 |
test/test_helper_classes.py | mikisama/python-can-isotp | 0 | 6630686 | <gh_stars>0
import unittest
import isotp
import time
from . import unittest_logging
from isotp.protocol import RateLimiter
Message = isotp.CanMessage
# Make sure that our Timer class used for timeouts is working OK
class testTimer(unittest.TestCase):
def test_timer(self):
timeout = 0.2
t = isotp.TransportLayer.Timer(timeout=timeout)
self.assertFalse(t.is_timed_out())
self.assertEqual(t.elapsed(), 0)
t.start()
self.assertFalse(t.is_timed_out())
time.sleep(timeout+0.01)
self.assertTrue(t.elapsed() > timeout)
self.assertTrue(t.is_timed_out)
t.stop()
self.assertFalse(t.is_timed_out())
self.assertEqual(t.elapsed(), 0)
t.start()
self.assertFalse(t.is_timed_out())
# Here we check that we decode properly ecah type of frame
class TestPDUDecoding(unittest.TestCase):
def make_pdu(self, data, start_of_data=0):
return isotp.protocol.PDU(Message(data=bytearray(data)),start_of_data=start_of_data)
def make_payload(self, size, start_val=0):
return [int(x%0x100) for x in range(start_val, start_val+size)]
def test_decode_single_frame_no_escape_sequence(self):
# Empty data
with self.assertRaises(ValueError):
self.make_pdu([])
# Single Frame, imcomplete escape sequence
with self.assertRaises(ValueError):
self.make_pdu([0])
prefix =[0x55, 0xAA]
# Missing 1 byte of data for single frame without escape sequence
for length in range(1, 0xF):
with self.assertRaises(ValueError):
data = [length&0xF] + self.make_payload(length-1)
self.make_pdu(data)
for length in range(1, 0xF):
with self.assertRaises(ValueError):
data = prefix + [length&0xF] + self.make_payload(length-len(prefix)-1)
self.make_pdu(data, start_of_data=len(prefix)) # With prefix
# Valid single frames without escape sequence
for length in range(1, 0xF):
payload = self.make_payload(length)
data= [length&0xF] + payload
pdu = self.make_pdu(data)
self.assertEqual(pdu.type, isotp.protocol.PDU.Type.SINGLE_FRAME)
self.assertEqual(pdu.data, bytearray(payload))
self.assertEqual(pdu.length, len(pdu.data))
self.assertEqual(pdu.length, length)
self.assertFalse(pdu.escape_sequence)
self.assertEqual(pdu.can_dl, len(data))
self.assertEqual(pdu.rx_dl, max(8,pdu.can_dl))
for length in range(1, 0xF):
payload = self.make_payload(length)
data = prefix+[length&0xF] + payload
pdu = self.make_pdu(data, start_of_data=len(prefix)) # With prefix
self.assertEqual(pdu.type, isotp.protocol.PDU.Type.SINGLE_FRAME)
self.assertEqual(pdu.data, bytearray(payload))
self.assertEqual(pdu.length, len(pdu.data))
self.assertEqual(pdu.length, length)
self.assertFalse(pdu.escape_sequence)
self.assertEqual(pdu.can_dl, len(data))
self.assertEqual(pdu.rx_dl, max(8,pdu.can_dl))
# Valid single frames without escape sequence and extra bytes that are ignored
for length in range(1, 0xF):
payload = self.make_payload(length)
data = [length&0xF] + payload + [0xAA]*10
pdu = self.make_pdu(data)
self.assertEqual(pdu.type, isotp.protocol.PDU.Type.SINGLE_FRAME)
self.assertEqual(pdu.data, bytearray(payload))
self.assertEqual(pdu.length, len(pdu.data))
self.assertEqual(pdu.length, length)
self.assertFalse(pdu.escape_sequence)
self.assertEqual(pdu.can_dl, len(data))
self.assertEqual(pdu.rx_dl, max(8,pdu.can_dl))
for length in range(1, 0xF):
payload = self.make_payload(length)
data = prefix+[length&0xF] + payload+ [0xAA]*10
pdu = self.make_pdu(data, start_of_data=len(prefix)) # With prefix
self.assertEqual(pdu.type, isotp.protocol.PDU.Type.SINGLE_FRAME)
self.assertEqual(pdu.data, bytearray(payload))
self.assertEqual(pdu.length, len(pdu.data))
self.assertEqual(pdu.length, length)
self.assertFalse(pdu.escape_sequence)
self.assertEqual(pdu.can_dl, len(data))
self.assertEqual(pdu.rx_dl, max(8,pdu.can_dl))
def test_decode_single_frame_escape_sequence(self):
# Single Frame, length=0. Invalid
with self.assertRaises(ValueError):
self.make_pdu([0,0])
# Single Frame, length=0. Invalid even with data.
with self.assertRaises(ValueError):
self.make_pdu([0,0,0xAA])
prefix =[0x55, 0xAA]
# Missing 1 byte of data for single frame with escape sequence
for length in range(1, 0xFF): # Up to 255 bytes. More than CAN can give, but that's ok.
with self.assertRaises(ValueError):
data = [0, length] + self.make_payload(length-1)
self.make_pdu(data)
for length in range(1, 0xFF): # Up to 255 bytes. More than CAN can give, but that's ok.
with self.assertRaises(ValueError):
data = prefix + [0, length] + self.make_payload(length-1)
self.make_pdu(data, start_of_data=len(prefix)) # With prefix
# Valid single frames without escape sequence
for length in range(1, 0xFF):
payload = self.make_payload(length)
data = [0, length] + payload
pdu = self.make_pdu(data)
self.assertEqual(pdu.type, isotp.protocol.PDU.Type.SINGLE_FRAME)
self.assertEqual(pdu.data, bytearray(payload))
self.assertEqual(pdu.length, len(pdu.data))
self.assertEqual(pdu.length, length)
self.assertEqual(pdu.can_dl, len(data))
self.assertEqual(pdu.rx_dl, max(8,pdu.can_dl))
for length in range(1, 0xFF):
payload = self.make_payload(length)
data = prefix + [0, length] + payload
pdu = self.make_pdu(data, start_of_data=len(prefix)) # With prefix
self.assertEqual(pdu.type, isotp.protocol.PDU.Type.SINGLE_FRAME)
self.assertEqual(pdu.data, bytearray(payload))
self.assertEqual(pdu.length, len(pdu.data))
self.assertEqual(pdu.length, length)
self.assertTrue(pdu.escape_sequence)
self.assertEqual(pdu.can_dl, len(data))
self.assertEqual(pdu.rx_dl, max(8,pdu.can_dl))
# Valid single frames without escape sequence and extra bytes that are ignored
for length in range(1, 0xFF):
payload = self.make_payload(length)
data = [0, length] + payload + [0xAA]*10
pdu = self.make_pdu(data)
self.assertEqual(pdu.type, isotp.protocol.PDU.Type.SINGLE_FRAME)
self.assertEqual(pdu.data, bytearray(payload))
self.assertEqual(pdu.length, len(pdu.data))
self.assertEqual(pdu.length, length)
self.assertTrue(pdu.escape_sequence)
self.assertEqual(pdu.can_dl, len(data))
self.assertEqual(pdu.rx_dl, max(8,pdu.can_dl))
for length in range(1, 0xFF):
payload = self.make_payload(length)
data = prefix + [0, length] + payload + [0xAA]*10
pdu = self.make_pdu(data, start_of_data=len(prefix)) # With prefix
self.assertEqual(pdu.type, isotp.protocol.PDU.Type.SINGLE_FRAME)
self.assertEqual(pdu.data, bytearray(payload))
self.assertEqual(pdu.length, len(pdu.data))
self.assertEqual(pdu.length, length)
self.assertTrue(pdu.escape_sequence)
self.assertEqual(pdu.can_dl, len(data))
self.assertEqual(pdu.rx_dl, max(8,pdu.can_dl))
def test_decode_first_frame_no_escape_sequence(self):
with self.assertRaises(ValueError): # Empty payload
self.make_pdu([])
with self.assertRaises(ValueError): # Incomplete length
self.make_pdu([0x10])
with self.assertRaises(ValueError): # Incomplete length
self.make_pdu([0x1F])
pdu = self.make_pdu([0x10, 0x02])
self.assertEqual(pdu.type, isotp.protocol.PDU.Type.FIRST_FRAME)
self.assertEqual(pdu.data, bytearray())
self.assertEqual(pdu.length, 2)
pdu = self.make_pdu([0x10, 0x02, 0x11])
self.assertEqual(pdu.type, isotp.protocol.PDU.Type.FIRST_FRAME)
self.assertEqual(pdu.data, bytearray([0x11]))
self.assertEqual(pdu.length, 2)
prefix =[0x55, 0xAA]
# Data fits in single First pdu. Shouldn't happen, but acceptable.
for length in range(1, 0x1FF):
payload = self.make_payload(length)
data = [0x10 | (length >> 8)&0xF, length&0xFF] + payload
pdu = self.make_pdu(data)
self.assertEqual(pdu.type, isotp.protocol.PDU.Type.FIRST_FRAME)
self.assertEqual(pdu.data, bytearray(payload))
self.assertEqual(pdu.length, length)
for length in range(1, 0x1FF):
payload = self.make_payload(length)
data = prefix + [0x10 | (length >> 8)&0xF, length&0xFF] + payload
pdu = self.make_pdu(data, start_of_data=len(prefix)) # With prefix
self.assertEqual(pdu.type, isotp.protocol.PDU.Type.FIRST_FRAME)
self.assertEqual(pdu.data, bytearray(payload))
self.assertEqual(pdu.length, length)
# Data doesn't fits in first pdu. Normal use case.
for length in range(10, 0x1FF):
payload = self.make_payload(length)
data = [0x10 | (length >> 8)&0xF, length&0xFF] + payload[:5]
pdu = self.make_pdu(data)
self.assertEqual(pdu.type, isotp.protocol.PDU.Type.FIRST_FRAME)
self.assertEqual(pdu.data, bytearray(payload[:5]))
self.assertEqual(pdu.length, length)
for length in range(10, 0x1FF):
payload = self.make_payload(length)
data = prefix + [0x10 | (length >> 8)&0xF, length&0xFF] + payload[:5]
pdu = self.make_pdu(data, start_of_data=len(prefix)) # With prefix
self.assertEqual(pdu.type, isotp.protocol.PDU.Type.FIRST_FRAME)
self.assertEqual(pdu.data, bytearray(payload[:5]))
self.assertEqual(pdu.length, length)
# Data fits in single First Frame + padding. Shouldn't happen, but acceptable.
padding = [0xAA] * 10
for length in range(1, 0x1FF):
payload = self.make_payload(length)
data = [0x10 | (length >> 8)&0xF, length&0xFF] + payload + padding
pdu = self.make_pdu(data)
self.assertEqual(pdu.type, isotp.protocol.PDU.Type.FIRST_FRAME)
self.assertEqual(pdu.data, bytearray(payload))
self.assertEqual(pdu.length, length)
for length in range(1, 0x1FF):
payload = self.make_payload(length)
data = prefix + [0x10 | (length >> 8)&0xF, length&0xFF] + payload + padding
pdu = self.make_pdu(data, start_of_data=len(prefix)) # With prefix
self.assertEqual(pdu.type, isotp.protocol.PDU.Type.FIRST_FRAME)
self.assertEqual(pdu.data, bytearray(payload))
self.assertEqual(pdu.length, length)
def test_decode_first_frame_with_escape_sequence(self):
with self.assertRaises(ValueError): # Incomplete length
self.make_pdu([0x10, 0x00])
with self.assertRaises(ValueError): # Incomplete length
self.make_pdu([0x10, 0x00, 0xAA])
with self.assertRaises(ValueError): # Incomplete length
self.make_pdu([0x10, 0x00, 0xAA, 0xBB])
with self.assertRaises(ValueError): # Incomplete length
self.make_pdu([0x10, 0x00, 0xAA, 0xBB, 0xCC])
with self.assertRaises(ValueError): # Incomplete length
self.make_pdu([0x10, 0x10, 0x00, 0xAA, 0xBB, 0xCC], start_of_data=1)
# No data in first pdu. Uncommon but possible.
pdu = self.make_pdu([0x10, 0x00, 0xAA, 0xBB, 0xCC, 0xDD])
self.assertEqual(pdu.type, isotp.protocol.PDU.Type.FIRST_FRAME)
self.assertEqual(pdu.data, bytearray())
self.assertEqual(pdu.length, 0xAABBCCDD)
# No data in first pdu. Uncommon but possible.
pdu = self.make_pdu([0xAA, 0xAA, 0x10, 0x00, 0xAA, 0xBB, 0xCC, 0xDD], start_of_data=2) # With prefix
self.assertEqual(pdu.type, isotp.protocol.PDU.Type.FIRST_FRAME)
self.assertEqual(pdu.data, bytearray())
self.assertEqual(pdu.length, 0xAABBCCDD)
pdu = self.make_pdu([0x10, 0x00, 0xAA, 0xBB, 0xCC, 0xDD, 0x11, 0x22])
self.assertEqual(pdu.type, isotp.protocol.PDU.Type.FIRST_FRAME)
self.assertEqual(pdu.data, bytearray([0x11, 0x22]))
self.assertEqual(pdu.length, 0xAABBCCDD)
pdu = self.make_pdu([0xAA, 0x10, 0x00, 0xAA, 0xBB, 0xCC, 0xDD, 0x11, 0x22, 0x33, 0x44], start_of_data=1)
self.assertEqual(pdu.type, isotp.protocol.PDU.Type.FIRST_FRAME)
self.assertEqual(pdu.data, bytearray([0x11, 0x22, 0x33, 0x44]))
self.assertEqual(pdu.length, 0xAABBCCDD)
prefix =[0x55, 0xAA]
# Data fits in single First pdu. Shouldn't happen, but acceptable.
for length in range(1, 0x1FF):
len_data = [(length >> 24) & 0xFF, (length >> 16) & 0xFF, (length >> 8) & 0xFF, (length >> 0) & 0xFF]
payload = self.make_payload(length)
data = [0x10, 0x00] + len_data + payload
pdu = self.make_pdu(data)
self.assertEqual(pdu.type, isotp.protocol.PDU.Type.FIRST_FRAME)
self.assertEqual(pdu.data, bytearray(payload))
self.assertEqual(pdu.length, length)
for length in range(1, 0x1FF):
len_data = [(length >> 24) & 0xFF, (length >> 16) & 0xFF, (length >> 8) & 0xFF, (length >> 0) & 0xFF]
payload = self.make_payload(length)
data = prefix + [0x10, 0x00] + len_data + payload
pdu = self.make_pdu(data, start_of_data=len(prefix)) # With prefix
self.assertEqual(pdu.type, isotp.protocol.PDU.Type.FIRST_FRAME)
self.assertEqual(pdu.data, bytearray(payload))
self.assertEqual(pdu.length, length)
# Data doesn't fits in first pdu. Normal use case.
for length in range(10, 0x1FF):
payload = self.make_payload(length)
len_data = [(length >> 24) & 0xFF, (length >> 16) & 0xFF, (length >> 8) & 0xFF, (length >> 0) & 0xFF]
data = [0x10, 0x00] + len_data + payload[:5]
pdu = self.make_pdu(data)
self.assertEqual(pdu.type, isotp.protocol.PDU.Type.FIRST_FRAME)
self.assertEqual(pdu.data, bytearray(payload[:5]))
self.assertEqual(pdu.length, length)
for length in range(10, 0x1FF):
payload = self.make_payload(length)
len_data = [(length >> 24) & 0xFF, (length >> 16) & 0xFF, (length >> 8) & 0xFF, (length >> 0) & 0xFF]
data = prefix + [0x10, 0x00] + len_data + payload[:5]
pdu = self.make_pdu(data, start_of_data=len(prefix)) # With prefix
self.assertEqual(pdu.type, isotp.protocol.PDU.Type.FIRST_FRAME)
self.assertEqual(pdu.data, bytearray(payload[:5]))
self.assertEqual(pdu.length, length)
# Data fits in single First Frame + padding. Shouldn't happen, but acceptable.
padding = [0xAA] * 10
for length in range(1, 0x1FF):
payload = self.make_payload(length)
len_data = [(length >> 24) & 0xFF, (length >> 16) & 0xFF, (length >> 8) & 0xFF, (length >> 0) & 0xFF]
data = [0x10, 0x00] + len_data + payload + padding
pdu = self.make_pdu(data)
self.assertEqual(pdu.type, isotp.protocol.PDU.Type.FIRST_FRAME)
self.assertEqual(pdu.data, bytearray(payload))
self.assertEqual(pdu.length, length)
for length in range(1, 0x1FF):
payload = self.make_payload(length)
len_data = [(length >> 24) & 0xFF, (length >> 16) & 0xFF, (length >> 8) & 0xFF, (length >> 0) & 0xFF]
data = prefix + [0x10, 0x00] + len_data + payload + padding
pdu = self.make_pdu(data, start_of_data=len(prefix))
self.assertEqual(pdu.type, isotp.protocol.PDU.Type.FIRST_FRAME)
self.assertEqual(pdu.data, bytearray(payload))
self.assertEqual(pdu.length, length)
def test_decode_consecutive_frame(self):
with self.assertRaises(ValueError): # Empty payload
self.make_pdu([])
pdu = self.make_pdu([0x20])
self.assertEqual(pdu.type, isotp.protocol.PDU.Type.CONSECUTIVE_FRAME)
self.assertEqual(pdu.data, bytearray([]))
self.assertEqual(pdu.seqnum, 0)
pdu = self.make_pdu([0x20, 0x11])
self.assertEqual(pdu.type, isotp.protocol.PDU.Type.CONSECUTIVE_FRAME)
self.assertEqual(pdu.data, bytearray([0x11]))
self.assertEqual(pdu.seqnum, 0)
pdu = self.make_pdu([0x2A, 0x11, 0x22, 0x33, 0x44, 0x55, 0x66, 0x77])
self.assertEqual(pdu.type, isotp.protocol.PDU.Type.CONSECUTIVE_FRAME)
self.assertEqual(pdu.data, bytearray([0x11, 0x22, 0x33, 0x44, 0x55, 0x66, 0x77]))
self.assertEqual(pdu.seqnum, 0xA)
def test_decode_flow_control(self):
with self.assertRaises(ValueError): # Empty payload
self.make_pdu([])
with self.assertRaises(ValueError): # incomplete
self.make_pdu([0x30])
with self.assertRaises(ValueError): # incomplete
self.make_pdu([0x30, 0x00])
pdu = self.make_pdu([0x30, 0x00, 0x00])
self.assertEqual(pdu.type, isotp.protocol.PDU.Type.FLOW_CONTROL)
self.assertEqual(pdu.flow_status, isotp.protocol.PDU.FlowStatus.ContinueToSend)
self.assertEqual(pdu.blocksize, 0)
self.assertEqual(pdu.stmin, 0)
self.assertEqual(pdu.stmin_sec, 0)
pdu = self.make_pdu([0x31, 0x00, 0x00])
self.assertEqual(pdu.type, isotp.protocol.PDU.Type.FLOW_CONTROL)
self.assertEqual(pdu.flow_status, isotp.protocol.PDU.FlowStatus.Wait)
self.assertEqual(pdu.blocksize, 0)
self.assertEqual(pdu.stmin, 0)
self.assertEqual(pdu.stmin_sec, 0)
pdu = self.make_pdu([0x32, 0x00, 0x00])
self.assertEqual(pdu.type, isotp.protocol.PDU.Type.FLOW_CONTROL)
self.assertEqual(pdu.flow_status, isotp.protocol.PDU.FlowStatus.Overflow)
self.assertEqual(pdu.blocksize, 0)
self.assertEqual(pdu.stmin, 0)
self.assertEqual(pdu.stmin_sec, 0)
pdu = self.make_pdu([0xFF, 0xFF, 0x32, 0x01, 0x01], start_of_data=2)
self.assertEqual(pdu.type, isotp.protocol.PDU.Type.FLOW_CONTROL)
self.assertEqual(pdu.flow_status, isotp.protocol.PDU.FlowStatus.Overflow)
self.assertEqual(pdu.blocksize, 1)
self.assertEqual(pdu.stmin, 1)
self.assertEqual(pdu.stmin_sec, 1/1000)
for i in range(3, 0xF): # Reserved Flow status
with self.assertRaises(ValueError):
pdu = self.make_pdu([0x30 + i, 0x00, 0x00])
pdu = self.make_pdu([0x30, 0xFF, 0x00])
self.assertEqual(pdu.type, isotp.protocol.PDU.Type.FLOW_CONTROL)
self.assertEqual(pdu.flow_status, isotp.protocol.PDU.FlowStatus.ContinueToSend)
self.assertEqual(pdu.blocksize, 0xFF)
self.assertEqual(pdu.stmin, 0)
self.assertEqual(pdu.stmin_sec, 0)
for i in range(0,0x7F): # Millisecs
pdu = self.make_pdu([0x30, 0x00, i])
self.assertEqual(pdu.type, isotp.protocol.PDU.Type.FLOW_CONTROL)
self.assertEqual(pdu.flow_status, isotp.protocol.PDU.FlowStatus.ContinueToSend)
self.assertEqual(pdu.blocksize, 0)
self.assertEqual(pdu.stmin, i)
self.assertEqual(pdu.stmin_sec, i/1000)
for i in range(0xF1, 0xF9): # Microsecs
pdu = self.make_pdu([0x30, 0x00, i])
self.assertEqual(pdu.type, isotp.protocol.PDU.Type.FLOW_CONTROL)
self.assertEqual(pdu.flow_status, isotp.protocol.PDU.FlowStatus.ContinueToSend)
self.assertEqual(pdu.blocksize, 0)
self.assertEqual(pdu.stmin, i)
self.assertEqual(pdu.stmin_sec, (i - 0xF0)/10000)
for i in range(0x80, 0xF1): # Reserved StMin
with self.assertRaises(ValueError):
pdu = self.make_pdu([0x30, 0x00, i])
for i in range(0xFA, 0x100): # Reserved StMin
with self.assertRaises(ValueError):
pdu = self.make_pdu([0x30, 0x00, i])
class TestRateLimiter(unittest.TestCase):
def test_rate_limit_behavior(self):
rate_limiter = RateLimiter(mean_bitrate = 1024, window_size_sec = 1)
rate_limiter.enable()
rate_limiter.update()
self.assertEqual(rate_limiter.allowed_bytes(), 128) # 1024/8 = 128
rate_limiter.inform_byte_sent(96)
self.assertEqual(rate_limiter.allowed_bytes(), 32)
rate_limiter.update()
time.sleep(0.4)
rate_limiter.update()
self.assertEqual(rate_limiter.allowed_bytes(), 32)
time.sleep(0.7)
rate_limiter.update()
self.assertEqual(rate_limiter.allowed_bytes(), 128)
rate_limiter.inform_byte_sent(128*2)
self.assertEqual(rate_limiter.allowed_bytes(), 0)
time.sleep(0.4)
rate_limiter.update()
self.assertEqual(rate_limiter.allowed_bytes(), 0)
time.sleep(0.4)
rate_limiter.update()
self.assertEqual(rate_limiter.allowed_bytes(), 0)
time.sleep(0.4)
rate_limiter.update()
self.assertEqual(rate_limiter.allowed_bytes(), 128)
def test_rate_limit_measurement(self):
bitrate = 10000
window_size_sec = 1
rate_limiter = RateLimiter(mean_bitrate = bitrate, window_size_sec = window_size_sec)
runtime = 5
tstart = time.time()
time_axis = []
data_axis = []
max_write_chunk = bitrate/8/10 #
while time.time() - tstart < runtime:
rate_limiter.update()
bcount = rate_limiter.allowed_bytes()
bcount = min(max_write_chunk, bcount)
rate_limiter.inform_byte_sent(bcount)
time_axis.append(time.time())
data_axis.append(bcount)
time.sleep(0.001)
dt = time_axis[-1] - time_axis[0]
total = 0
for x in data_axis:
total += x
measured_bitrate = total/dt*8
self.assertGreater(measured_bitrate, bitrate * 0.85)
self.assertLess(measured_bitrate, bitrate * 1.15)
# Now make sure that the buffer wan'T overloaded
buffer_estimation = []
buffer_peak =0
for i in range(len(data_axis)):
if len(buffer_estimation) == 0:
buffer_estimation.append(data_axis[i])
else:
dt = time_axis[i] - time_axis[i-1]
buffer_estimation.append(buffer_estimation[-1]+data_axis[i] - bitrate/8*dt)
buffer_peak = max(buffer_peak, buffer_estimation[-1])
self.assertLess(buffer_peak, bitrate/window_size_sec)
| import unittest
import isotp
import time
from . import unittest_logging
from isotp.protocol import RateLimiter
Message = isotp.CanMessage
# Make sure that our Timer class used for timeouts is working OK
class testTimer(unittest.TestCase):
def test_timer(self):
timeout = 0.2
t = isotp.TransportLayer.Timer(timeout=timeout)
self.assertFalse(t.is_timed_out())
self.assertEqual(t.elapsed(), 0)
t.start()
self.assertFalse(t.is_timed_out())
time.sleep(timeout+0.01)
self.assertTrue(t.elapsed() > timeout)
self.assertTrue(t.is_timed_out)
t.stop()
self.assertFalse(t.is_timed_out())
self.assertEqual(t.elapsed(), 0)
t.start()
self.assertFalse(t.is_timed_out())
# Here we check that we decode properly ecah type of frame
class TestPDUDecoding(unittest.TestCase):
def make_pdu(self, data, start_of_data=0):
return isotp.protocol.PDU(Message(data=bytearray(data)),start_of_data=start_of_data)
def make_payload(self, size, start_val=0):
return [int(x%0x100) for x in range(start_val, start_val+size)]
def test_decode_single_frame_no_escape_sequence(self):
# Empty data
with self.assertRaises(ValueError):
self.make_pdu([])
# Single Frame, imcomplete escape sequence
with self.assertRaises(ValueError):
self.make_pdu([0])
prefix =[0x55, 0xAA]
# Missing 1 byte of data for single frame without escape sequence
for length in range(1, 0xF):
with self.assertRaises(ValueError):
data = [length&0xF] + self.make_payload(length-1)
self.make_pdu(data)
for length in range(1, 0xF):
with self.assertRaises(ValueError):
data = prefix + [length&0xF] + self.make_payload(length-len(prefix)-1)
self.make_pdu(data, start_of_data=len(prefix)) # With prefix
# Valid single frames without escape sequence
for length in range(1, 0xF):
payload = self.make_payload(length)
data= [length&0xF] + payload
pdu = self.make_pdu(data)
self.assertEqual(pdu.type, isotp.protocol.PDU.Type.SINGLE_FRAME)
self.assertEqual(pdu.data, bytearray(payload))
self.assertEqual(pdu.length, len(pdu.data))
self.assertEqual(pdu.length, length)
self.assertFalse(pdu.escape_sequence)
self.assertEqual(pdu.can_dl, len(data))
self.assertEqual(pdu.rx_dl, max(8,pdu.can_dl))
for length in range(1, 0xF):
payload = self.make_payload(length)
data = prefix+[length&0xF] + payload
pdu = self.make_pdu(data, start_of_data=len(prefix)) # With prefix
self.assertEqual(pdu.type, isotp.protocol.PDU.Type.SINGLE_FRAME)
self.assertEqual(pdu.data, bytearray(payload))
self.assertEqual(pdu.length, len(pdu.data))
self.assertEqual(pdu.length, length)
self.assertFalse(pdu.escape_sequence)
self.assertEqual(pdu.can_dl, len(data))
self.assertEqual(pdu.rx_dl, max(8,pdu.can_dl))
# Valid single frames without escape sequence and extra bytes that are ignored
for length in range(1, 0xF):
payload = self.make_payload(length)
data = [length&0xF] + payload + [0xAA]*10
pdu = self.make_pdu(data)
self.assertEqual(pdu.type, isotp.protocol.PDU.Type.SINGLE_FRAME)
self.assertEqual(pdu.data, bytearray(payload))
self.assertEqual(pdu.length, len(pdu.data))
self.assertEqual(pdu.length, length)
self.assertFalse(pdu.escape_sequence)
self.assertEqual(pdu.can_dl, len(data))
self.assertEqual(pdu.rx_dl, max(8,pdu.can_dl))
for length in range(1, 0xF):
payload = self.make_payload(length)
data = prefix+[length&0xF] + payload+ [0xAA]*10
pdu = self.make_pdu(data, start_of_data=len(prefix)) # With prefix
self.assertEqual(pdu.type, isotp.protocol.PDU.Type.SINGLE_FRAME)
self.assertEqual(pdu.data, bytearray(payload))
self.assertEqual(pdu.length, len(pdu.data))
self.assertEqual(pdu.length, length)
self.assertFalse(pdu.escape_sequence)
self.assertEqual(pdu.can_dl, len(data))
self.assertEqual(pdu.rx_dl, max(8,pdu.can_dl))
def test_decode_single_frame_escape_sequence(self):
# Single Frame, length=0. Invalid
with self.assertRaises(ValueError):
self.make_pdu([0,0])
# Single Frame, length=0. Invalid even with data.
with self.assertRaises(ValueError):
self.make_pdu([0,0,0xAA])
prefix =[0x55, 0xAA]
# Missing 1 byte of data for single frame with escape sequence
for length in range(1, 0xFF): # Up to 255 bytes. More than CAN can give, but that's ok.
with self.assertRaises(ValueError):
data = [0, length] + self.make_payload(length-1)
self.make_pdu(data)
for length in range(1, 0xFF): # Up to 255 bytes. More than CAN can give, but that's ok.
with self.assertRaises(ValueError):
data = prefix + [0, length] + self.make_payload(length-1)
self.make_pdu(data, start_of_data=len(prefix)) # With prefix
# Valid single frames without escape sequence
for length in range(1, 0xFF):
payload = self.make_payload(length)
data = [0, length] + payload
pdu = self.make_pdu(data)
self.assertEqual(pdu.type, isotp.protocol.PDU.Type.SINGLE_FRAME)
self.assertEqual(pdu.data, bytearray(payload))
self.assertEqual(pdu.length, len(pdu.data))
self.assertEqual(pdu.length, length)
self.assertEqual(pdu.can_dl, len(data))
self.assertEqual(pdu.rx_dl, max(8,pdu.can_dl))
for length in range(1, 0xFF):
payload = self.make_payload(length)
data = prefix + [0, length] + payload
pdu = self.make_pdu(data, start_of_data=len(prefix)) # With prefix
self.assertEqual(pdu.type, isotp.protocol.PDU.Type.SINGLE_FRAME)
self.assertEqual(pdu.data, bytearray(payload))
self.assertEqual(pdu.length, len(pdu.data))
self.assertEqual(pdu.length, length)
self.assertTrue(pdu.escape_sequence)
self.assertEqual(pdu.can_dl, len(data))
self.assertEqual(pdu.rx_dl, max(8,pdu.can_dl))
# Valid single frames without escape sequence and extra bytes that are ignored
for length in range(1, 0xFF):
payload = self.make_payload(length)
data = [0, length] + payload + [0xAA]*10
pdu = self.make_pdu(data)
self.assertEqual(pdu.type, isotp.protocol.PDU.Type.SINGLE_FRAME)
self.assertEqual(pdu.data, bytearray(payload))
self.assertEqual(pdu.length, len(pdu.data))
self.assertEqual(pdu.length, length)
self.assertTrue(pdu.escape_sequence)
self.assertEqual(pdu.can_dl, len(data))
self.assertEqual(pdu.rx_dl, max(8,pdu.can_dl))
for length in range(1, 0xFF):
payload = self.make_payload(length)
data = prefix + [0, length] + payload + [0xAA]*10
pdu = self.make_pdu(data, start_of_data=len(prefix)) # With prefix
self.assertEqual(pdu.type, isotp.protocol.PDU.Type.SINGLE_FRAME)
self.assertEqual(pdu.data, bytearray(payload))
self.assertEqual(pdu.length, len(pdu.data))
self.assertEqual(pdu.length, length)
self.assertTrue(pdu.escape_sequence)
self.assertEqual(pdu.can_dl, len(data))
self.assertEqual(pdu.rx_dl, max(8,pdu.can_dl))
def test_decode_first_frame_no_escape_sequence(self):
with self.assertRaises(ValueError): # Empty payload
self.make_pdu([])
with self.assertRaises(ValueError): # Incomplete length
self.make_pdu([0x10])
with self.assertRaises(ValueError): # Incomplete length
self.make_pdu([0x1F])
pdu = self.make_pdu([0x10, 0x02])
self.assertEqual(pdu.type, isotp.protocol.PDU.Type.FIRST_FRAME)
self.assertEqual(pdu.data, bytearray())
self.assertEqual(pdu.length, 2)
pdu = self.make_pdu([0x10, 0x02, 0x11])
self.assertEqual(pdu.type, isotp.protocol.PDU.Type.FIRST_FRAME)
self.assertEqual(pdu.data, bytearray([0x11]))
self.assertEqual(pdu.length, 2)
prefix =[0x55, 0xAA]
# Data fits in single First pdu. Shouldn't happen, but acceptable.
for length in range(1, 0x1FF):
payload = self.make_payload(length)
data = [0x10 | (length >> 8)&0xF, length&0xFF] + payload
pdu = self.make_pdu(data)
self.assertEqual(pdu.type, isotp.protocol.PDU.Type.FIRST_FRAME)
self.assertEqual(pdu.data, bytearray(payload))
self.assertEqual(pdu.length, length)
for length in range(1, 0x1FF):
payload = self.make_payload(length)
data = prefix + [0x10 | (length >> 8)&0xF, length&0xFF] + payload
pdu = self.make_pdu(data, start_of_data=len(prefix)) # With prefix
self.assertEqual(pdu.type, isotp.protocol.PDU.Type.FIRST_FRAME)
self.assertEqual(pdu.data, bytearray(payload))
self.assertEqual(pdu.length, length)
# Data doesn't fits in first pdu. Normal use case.
for length in range(10, 0x1FF):
payload = self.make_payload(length)
data = [0x10 | (length >> 8)&0xF, length&0xFF] + payload[:5]
pdu = self.make_pdu(data)
self.assertEqual(pdu.type, isotp.protocol.PDU.Type.FIRST_FRAME)
self.assertEqual(pdu.data, bytearray(payload[:5]))
self.assertEqual(pdu.length, length)
for length in range(10, 0x1FF):
payload = self.make_payload(length)
data = prefix + [0x10 | (length >> 8)&0xF, length&0xFF] + payload[:5]
pdu = self.make_pdu(data, start_of_data=len(prefix)) # With prefix
self.assertEqual(pdu.type, isotp.protocol.PDU.Type.FIRST_FRAME)
self.assertEqual(pdu.data, bytearray(payload[:5]))
self.assertEqual(pdu.length, length)
# Data fits in single First Frame + padding. Shouldn't happen, but acceptable.
padding = [0xAA] * 10
for length in range(1, 0x1FF):
payload = self.make_payload(length)
data = [0x10 | (length >> 8)&0xF, length&0xFF] + payload + padding
pdu = self.make_pdu(data)
self.assertEqual(pdu.type, isotp.protocol.PDU.Type.FIRST_FRAME)
self.assertEqual(pdu.data, bytearray(payload))
self.assertEqual(pdu.length, length)
for length in range(1, 0x1FF):
payload = self.make_payload(length)
data = prefix + [0x10 | (length >> 8)&0xF, length&0xFF] + payload + padding
pdu = self.make_pdu(data, start_of_data=len(prefix)) # With prefix
self.assertEqual(pdu.type, isotp.protocol.PDU.Type.FIRST_FRAME)
self.assertEqual(pdu.data, bytearray(payload))
self.assertEqual(pdu.length, length)
def test_decode_first_frame_with_escape_sequence(self):
with self.assertRaises(ValueError): # Incomplete length
self.make_pdu([0x10, 0x00])
with self.assertRaises(ValueError): # Incomplete length
self.make_pdu([0x10, 0x00, 0xAA])
with self.assertRaises(ValueError): # Incomplete length
self.make_pdu([0x10, 0x00, 0xAA, 0xBB])
with self.assertRaises(ValueError): # Incomplete length
self.make_pdu([0x10, 0x00, 0xAA, 0xBB, 0xCC])
with self.assertRaises(ValueError): # Incomplete length
self.make_pdu([0x10, 0x10, 0x00, 0xAA, 0xBB, 0xCC], start_of_data=1)
# No data in first pdu. Uncommon but possible.
pdu = self.make_pdu([0x10, 0x00, 0xAA, 0xBB, 0xCC, 0xDD])
self.assertEqual(pdu.type, isotp.protocol.PDU.Type.FIRST_FRAME)
self.assertEqual(pdu.data, bytearray())
self.assertEqual(pdu.length, 0xAABBCCDD)
# No data in first pdu. Uncommon but possible.
pdu = self.make_pdu([0xAA, 0xAA, 0x10, 0x00, 0xAA, 0xBB, 0xCC, 0xDD], start_of_data=2) # With prefix
self.assertEqual(pdu.type, isotp.protocol.PDU.Type.FIRST_FRAME)
self.assertEqual(pdu.data, bytearray())
self.assertEqual(pdu.length, 0xAABBCCDD)
pdu = self.make_pdu([0x10, 0x00, 0xAA, 0xBB, 0xCC, 0xDD, 0x11, 0x22])
self.assertEqual(pdu.type, isotp.protocol.PDU.Type.FIRST_FRAME)
self.assertEqual(pdu.data, bytearray([0x11, 0x22]))
self.assertEqual(pdu.length, 0xAABBCCDD)
pdu = self.make_pdu([0xAA, 0x10, 0x00, 0xAA, 0xBB, 0xCC, 0xDD, 0x11, 0x22, 0x33, 0x44], start_of_data=1)
self.assertEqual(pdu.type, isotp.protocol.PDU.Type.FIRST_FRAME)
self.assertEqual(pdu.data, bytearray([0x11, 0x22, 0x33, 0x44]))
self.assertEqual(pdu.length, 0xAABBCCDD)
prefix =[0x55, 0xAA]
# Data fits in single First pdu. Shouldn't happen, but acceptable.
for length in range(1, 0x1FF):
len_data = [(length >> 24) & 0xFF, (length >> 16) & 0xFF, (length >> 8) & 0xFF, (length >> 0) & 0xFF]
payload = self.make_payload(length)
data = [0x10, 0x00] + len_data + payload
pdu = self.make_pdu(data)
self.assertEqual(pdu.type, isotp.protocol.PDU.Type.FIRST_FRAME)
self.assertEqual(pdu.data, bytearray(payload))
self.assertEqual(pdu.length, length)
for length in range(1, 0x1FF):
len_data = [(length >> 24) & 0xFF, (length >> 16) & 0xFF, (length >> 8) & 0xFF, (length >> 0) & 0xFF]
payload = self.make_payload(length)
data = prefix + [0x10, 0x00] + len_data + payload
pdu = self.make_pdu(data, start_of_data=len(prefix)) # With prefix
self.assertEqual(pdu.type, isotp.protocol.PDU.Type.FIRST_FRAME)
self.assertEqual(pdu.data, bytearray(payload))
self.assertEqual(pdu.length, length)
# Data doesn't fits in first pdu. Normal use case.
for length in range(10, 0x1FF):
payload = self.make_payload(length)
len_data = [(length >> 24) & 0xFF, (length >> 16) & 0xFF, (length >> 8) & 0xFF, (length >> 0) & 0xFF]
data = [0x10, 0x00] + len_data + payload[:5]
pdu = self.make_pdu(data)
self.assertEqual(pdu.type, isotp.protocol.PDU.Type.FIRST_FRAME)
self.assertEqual(pdu.data, bytearray(payload[:5]))
self.assertEqual(pdu.length, length)
for length in range(10, 0x1FF):
payload = self.make_payload(length)
len_data = [(length >> 24) & 0xFF, (length >> 16) & 0xFF, (length >> 8) & 0xFF, (length >> 0) & 0xFF]
data = prefix + [0x10, 0x00] + len_data + payload[:5]
pdu = self.make_pdu(data, start_of_data=len(prefix)) # With prefix
self.assertEqual(pdu.type, isotp.protocol.PDU.Type.FIRST_FRAME)
self.assertEqual(pdu.data, bytearray(payload[:5]))
self.assertEqual(pdu.length, length)
# Data fits in single First Frame + padding. Shouldn't happen, but acceptable.
padding = [0xAA] * 10
for length in range(1, 0x1FF):
payload = self.make_payload(length)
len_data = [(length >> 24) & 0xFF, (length >> 16) & 0xFF, (length >> 8) & 0xFF, (length >> 0) & 0xFF]
data = [0x10, 0x00] + len_data + payload + padding
pdu = self.make_pdu(data)
self.assertEqual(pdu.type, isotp.protocol.PDU.Type.FIRST_FRAME)
self.assertEqual(pdu.data, bytearray(payload))
self.assertEqual(pdu.length, length)
for length in range(1, 0x1FF):
payload = self.make_payload(length)
len_data = [(length >> 24) & 0xFF, (length >> 16) & 0xFF, (length >> 8) & 0xFF, (length >> 0) & 0xFF]
data = prefix + [0x10, 0x00] + len_data + payload + padding
pdu = self.make_pdu(data, start_of_data=len(prefix))
self.assertEqual(pdu.type, isotp.protocol.PDU.Type.FIRST_FRAME)
self.assertEqual(pdu.data, bytearray(payload))
self.assertEqual(pdu.length, length)
def test_decode_consecutive_frame(self):
with self.assertRaises(ValueError): # Empty payload
self.make_pdu([])
pdu = self.make_pdu([0x20])
self.assertEqual(pdu.type, isotp.protocol.PDU.Type.CONSECUTIVE_FRAME)
self.assertEqual(pdu.data, bytearray([]))
self.assertEqual(pdu.seqnum, 0)
pdu = self.make_pdu([0x20, 0x11])
self.assertEqual(pdu.type, isotp.protocol.PDU.Type.CONSECUTIVE_FRAME)
self.assertEqual(pdu.data, bytearray([0x11]))
self.assertEqual(pdu.seqnum, 0)
pdu = self.make_pdu([0x2A, 0x11, 0x22, 0x33, 0x44, 0x55, 0x66, 0x77])
self.assertEqual(pdu.type, isotp.protocol.PDU.Type.CONSECUTIVE_FRAME)
self.assertEqual(pdu.data, bytearray([0x11, 0x22, 0x33, 0x44, 0x55, 0x66, 0x77]))
self.assertEqual(pdu.seqnum, 0xA)
def test_decode_flow_control(self):
with self.assertRaises(ValueError): # Empty payload
self.make_pdu([])
with self.assertRaises(ValueError): # incomplete
self.make_pdu([0x30])
with self.assertRaises(ValueError): # incomplete
self.make_pdu([0x30, 0x00])
pdu = self.make_pdu([0x30, 0x00, 0x00])
self.assertEqual(pdu.type, isotp.protocol.PDU.Type.FLOW_CONTROL)
self.assertEqual(pdu.flow_status, isotp.protocol.PDU.FlowStatus.ContinueToSend)
self.assertEqual(pdu.blocksize, 0)
self.assertEqual(pdu.stmin, 0)
self.assertEqual(pdu.stmin_sec, 0)
pdu = self.make_pdu([0x31, 0x00, 0x00])
self.assertEqual(pdu.type, isotp.protocol.PDU.Type.FLOW_CONTROL)
self.assertEqual(pdu.flow_status, isotp.protocol.PDU.FlowStatus.Wait)
self.assertEqual(pdu.blocksize, 0)
self.assertEqual(pdu.stmin, 0)
self.assertEqual(pdu.stmin_sec, 0)
pdu = self.make_pdu([0x32, 0x00, 0x00])
self.assertEqual(pdu.type, isotp.protocol.PDU.Type.FLOW_CONTROL)
self.assertEqual(pdu.flow_status, isotp.protocol.PDU.FlowStatus.Overflow)
self.assertEqual(pdu.blocksize, 0)
self.assertEqual(pdu.stmin, 0)
self.assertEqual(pdu.stmin_sec, 0)
pdu = self.make_pdu([0xFF, 0xFF, 0x32, 0x01, 0x01], start_of_data=2)
self.assertEqual(pdu.type, isotp.protocol.PDU.Type.FLOW_CONTROL)
self.assertEqual(pdu.flow_status, isotp.protocol.PDU.FlowStatus.Overflow)
self.assertEqual(pdu.blocksize, 1)
self.assertEqual(pdu.stmin, 1)
self.assertEqual(pdu.stmin_sec, 1/1000)
for i in range(3, 0xF): # Reserved Flow status
with self.assertRaises(ValueError):
pdu = self.make_pdu([0x30 + i, 0x00, 0x00])
pdu = self.make_pdu([0x30, 0xFF, 0x00])
self.assertEqual(pdu.type, isotp.protocol.PDU.Type.FLOW_CONTROL)
self.assertEqual(pdu.flow_status, isotp.protocol.PDU.FlowStatus.ContinueToSend)
self.assertEqual(pdu.blocksize, 0xFF)
self.assertEqual(pdu.stmin, 0)
self.assertEqual(pdu.stmin_sec, 0)
for i in range(0,0x7F): # Millisecs
pdu = self.make_pdu([0x30, 0x00, i])
self.assertEqual(pdu.type, isotp.protocol.PDU.Type.FLOW_CONTROL)
self.assertEqual(pdu.flow_status, isotp.protocol.PDU.FlowStatus.ContinueToSend)
self.assertEqual(pdu.blocksize, 0)
self.assertEqual(pdu.stmin, i)
self.assertEqual(pdu.stmin_sec, i/1000)
for i in range(0xF1, 0xF9): # Microsecs
pdu = self.make_pdu([0x30, 0x00, i])
self.assertEqual(pdu.type, isotp.protocol.PDU.Type.FLOW_CONTROL)
self.assertEqual(pdu.flow_status, isotp.protocol.PDU.FlowStatus.ContinueToSend)
self.assertEqual(pdu.blocksize, 0)
self.assertEqual(pdu.stmin, i)
self.assertEqual(pdu.stmin_sec, (i - 0xF0)/10000)
for i in range(0x80, 0xF1): # Reserved StMin
with self.assertRaises(ValueError):
pdu = self.make_pdu([0x30, 0x00, i])
for i in range(0xFA, 0x100): # Reserved StMin
with self.assertRaises(ValueError):
pdu = self.make_pdu([0x30, 0x00, i])
class TestRateLimiter(unittest.TestCase):
def test_rate_limit_behavior(self):
rate_limiter = RateLimiter(mean_bitrate = 1024, window_size_sec = 1)
rate_limiter.enable()
rate_limiter.update()
self.assertEqual(rate_limiter.allowed_bytes(), 128) # 1024/8 = 128
rate_limiter.inform_byte_sent(96)
self.assertEqual(rate_limiter.allowed_bytes(), 32)
rate_limiter.update()
time.sleep(0.4)
rate_limiter.update()
self.assertEqual(rate_limiter.allowed_bytes(), 32)
time.sleep(0.7)
rate_limiter.update()
self.assertEqual(rate_limiter.allowed_bytes(), 128)
rate_limiter.inform_byte_sent(128*2)
self.assertEqual(rate_limiter.allowed_bytes(), 0)
time.sleep(0.4)
rate_limiter.update()
self.assertEqual(rate_limiter.allowed_bytes(), 0)
time.sleep(0.4)
rate_limiter.update()
self.assertEqual(rate_limiter.allowed_bytes(), 0)
time.sleep(0.4)
rate_limiter.update()
self.assertEqual(rate_limiter.allowed_bytes(), 128)
def test_rate_limit_measurement(self):
bitrate = 10000
window_size_sec = 1
rate_limiter = RateLimiter(mean_bitrate = bitrate, window_size_sec = window_size_sec)
runtime = 5
tstart = time.time()
time_axis = []
data_axis = []
max_write_chunk = bitrate/8/10 #
while time.time() - tstart < runtime:
rate_limiter.update()
bcount = rate_limiter.allowed_bytes()
bcount = min(max_write_chunk, bcount)
rate_limiter.inform_byte_sent(bcount)
time_axis.append(time.time())
data_axis.append(bcount)
time.sleep(0.001)
dt = time_axis[-1] - time_axis[0]
total = 0
for x in data_axis:
total += x
measured_bitrate = total/dt*8
self.assertGreater(measured_bitrate, bitrate * 0.85)
self.assertLess(measured_bitrate, bitrate * 1.15)
# Now make sure that the buffer wan'T overloaded
buffer_estimation = []
buffer_peak =0
for i in range(len(data_axis)):
if len(buffer_estimation) == 0:
buffer_estimation.append(data_axis[i])
else:
dt = time_axis[i] - time_axis[i-1]
buffer_estimation.append(buffer_estimation[-1]+data_axis[i] - bitrate/8*dt)
buffer_peak = max(buffer_peak, buffer_estimation[-1])
self.assertLess(buffer_peak, bitrate/window_size_sec) | en | 0.79699 | # Make sure that our Timer class used for timeouts is working OK # Here we check that we decode properly ecah type of frame # Empty data # Single Frame, imcomplete escape sequence # Missing 1 byte of data for single frame without escape sequence # With prefix # Valid single frames without escape sequence # With prefix # Valid single frames without escape sequence and extra bytes that are ignored # With prefix # Single Frame, length=0. Invalid # Single Frame, length=0. Invalid even with data. # Missing 1 byte of data for single frame with escape sequence # Up to 255 bytes. More than CAN can give, but that's ok. # Up to 255 bytes. More than CAN can give, but that's ok. # With prefix # Valid single frames without escape sequence # With prefix # Valid single frames without escape sequence and extra bytes that are ignored # With prefix # Empty payload # Incomplete length # Incomplete length # Data fits in single First pdu. Shouldn't happen, but acceptable. # With prefix # Data doesn't fits in first pdu. Normal use case. # With prefix # Data fits in single First Frame + padding. Shouldn't happen, but acceptable. # With prefix # Incomplete length # Incomplete length # Incomplete length # Incomplete length # Incomplete length # No data in first pdu. Uncommon but possible. # No data in first pdu. Uncommon but possible. # With prefix # Data fits in single First pdu. Shouldn't happen, but acceptable. # With prefix # Data doesn't fits in first pdu. Normal use case. # With prefix # Data fits in single First Frame + padding. Shouldn't happen, but acceptable. # Empty payload # Empty payload # incomplete # incomplete # Reserved Flow status # Millisecs # Microsecs # Reserved StMin # Reserved StMin # 1024/8 = 128 # # Now make sure that the buffer wan'T overloaded | 2.619311 | 3 |
Code/multilabelloss.py | Szhgege/DACPGTN | 0 | 6630687 | import torch
def multilabel_categorical_crossentropy(y_true, y_pred):
y_pred = (1 - 2 * y_true) * y_pred
y_pred_neg = y_pred - y_true * 1e12
y_pred_pos = y_pred - (1 - y_true) * 1e12
zeros = torch.zeros_like(y_pred[..., :1])
y_pred_neg = torch.cat([y_pred_neg, zeros], dim=-1)
y_pred_pos = torch.cat([y_pred_pos, zeros], dim=-1)
neg_loss = torch.logsumexp(y_pred_neg, dim=-1)
pos_loss = torch.logsumexp(y_pred_pos, dim=-1)
return neg_loss + pos_loss
# Retrieved from https://github.com/bojone/bert4keras. For more detailed instructions please visit the https://spaces.ac.cn/archives/7359.
| import torch
def multilabel_categorical_crossentropy(y_true, y_pred):
y_pred = (1 - 2 * y_true) * y_pred
y_pred_neg = y_pred - y_true * 1e12
y_pred_pos = y_pred - (1 - y_true) * 1e12
zeros = torch.zeros_like(y_pred[..., :1])
y_pred_neg = torch.cat([y_pred_neg, zeros], dim=-1)
y_pred_pos = torch.cat([y_pred_pos, zeros], dim=-1)
neg_loss = torch.logsumexp(y_pred_neg, dim=-1)
pos_loss = torch.logsumexp(y_pred_pos, dim=-1)
return neg_loss + pos_loss
# Retrieved from https://github.com/bojone/bert4keras. For more detailed instructions please visit the https://spaces.ac.cn/archives/7359.
| en | 0.694652 | # Retrieved from https://github.com/bojone/bert4keras. For more detailed instructions please visit the https://spaces.ac.cn/archives/7359. | 2.580602 | 3 |
codewof/programming/content/en/print-bigger-number/solution.py | uccser-admin/programming-practice-prototype | 3 | 6630688 | def print_bigger_number(first, second):
if int(first) > int(second):
print(first)
else:
print(second)
| def print_bigger_number(first, second):
if int(first) > int(second):
print(first)
else:
print(second)
| none | 1 | 3.461053 | 3 |
|
feature_examples/tensorflow2/embeddings/imdb_single_ipu.py | MMKrell/gc_tutorials | 0 | 6630689 | <filename>feature_examples/tensorflow2/embeddings/imdb_single_ipu.py
# Copyright (c) 2020 Graphcore Ltd. All rights reserved.
import tensorflow as tf
from tensorflow.python import ipu
from ipu_tensorflow_addons.keras.layers import Embedding, LSTM
from tensorflow.keras.layers import Dense
from tensorflow.keras.layers import Input
from tensorflow.keras.datasets import imdb
from tensorflow.keras.preprocessing import sequence
from tensorflow.keras.optimizers import Adam
if tf.__version__[0] != '2':
raise ImportError("TensorFlow 2 is required for this example")
max_features = 20000
minibatch_size = 32
# Define the dataset.
def get_dataset():
(x_train, y_train), (_, _) = imdb.load_data(num_words=max_features)
x_train = sequence.pad_sequences(x_train, maxlen=80)
ds = tf.data.Dataset.from_tensor_slices((x_train, y_train))
ds = ds.repeat()
ds = ds.map(lambda x, y: (x, tf.cast(y, tf.int32)))
ds = ds.batch(minibatch_size, drop_remainder=True)
return ds
# Define the model.
def get_model():
input_layer = Input(shape=(80), dtype=tf.int32, batch_size=minibatch_size)
x = Embedding(max_features, 128)(input_layer)
x = LSTM(128, dropout=0.2)(x)
x = Dense(16, activation='relu')(x)
x = Dense(1, activation='sigmoid')(x)
return tf.keras.Model(input_layer, x)
def main():
# Configure IPUs.
cfg = ipu.config.IPUConfig()
cfg.auto_select_ipus = 1
cfg.configure_ipu_system()
# Set up IPU strategy.
strategy = ipu.ipu_strategy.IPUStrategy()
with strategy.scope():
model = get_model()
model.compile(steps_per_execution=384, loss='binary_crossentropy', optimizer=Adam(0.005))
model.fit(get_dataset(), steps_per_epoch=768, epochs=3)
if __name__ == '__main__':
main()
| <filename>feature_examples/tensorflow2/embeddings/imdb_single_ipu.py
# Copyright (c) 2020 Graphcore Ltd. All rights reserved.
import tensorflow as tf
from tensorflow.python import ipu
from ipu_tensorflow_addons.keras.layers import Embedding, LSTM
from tensorflow.keras.layers import Dense
from tensorflow.keras.layers import Input
from tensorflow.keras.datasets import imdb
from tensorflow.keras.preprocessing import sequence
from tensorflow.keras.optimizers import Adam
if tf.__version__[0] != '2':
raise ImportError("TensorFlow 2 is required for this example")
max_features = 20000
minibatch_size = 32
# Define the dataset.
def get_dataset():
(x_train, y_train), (_, _) = imdb.load_data(num_words=max_features)
x_train = sequence.pad_sequences(x_train, maxlen=80)
ds = tf.data.Dataset.from_tensor_slices((x_train, y_train))
ds = ds.repeat()
ds = ds.map(lambda x, y: (x, tf.cast(y, tf.int32)))
ds = ds.batch(minibatch_size, drop_remainder=True)
return ds
# Define the model.
def get_model():
input_layer = Input(shape=(80), dtype=tf.int32, batch_size=minibatch_size)
x = Embedding(max_features, 128)(input_layer)
x = LSTM(128, dropout=0.2)(x)
x = Dense(16, activation='relu')(x)
x = Dense(1, activation='sigmoid')(x)
return tf.keras.Model(input_layer, x)
def main():
# Configure IPUs.
cfg = ipu.config.IPUConfig()
cfg.auto_select_ipus = 1
cfg.configure_ipu_system()
# Set up IPU strategy.
strategy = ipu.ipu_strategy.IPUStrategy()
with strategy.scope():
model = get_model()
model.compile(steps_per_execution=384, loss='binary_crossentropy', optimizer=Adam(0.005))
model.fit(get_dataset(), steps_per_epoch=768, epochs=3)
if __name__ == '__main__':
main()
| en | 0.665743 | # Copyright (c) 2020 Graphcore Ltd. All rights reserved. # Define the dataset. # Define the model. # Configure IPUs. # Set up IPU strategy. | 2.603758 | 3 |
pymatgen/io/tests/test_feffio_set.py | rousseab/pymatgen | 1 | 6630690 | <gh_stars>1-10
# coding: utf-8
from __future__ import unicode_literals
import unittest
import os
from pymatgen.io.feff.sets import FeffInputSet
from pymatgen.io.feff import FeffPot
from pymatgen.io.cif import CifParser
test_dir = os.path.join(os.path.dirname(__file__), "..", "..", "..",
'test_files')
cif_file = 'CoO19128.cif'
central_atom = 'O'
cif_path = os.path.join(test_dir, cif_file)
r = CifParser(cif_path)
structure = r.get_structures()[0]
x = FeffInputSet("MaterialsProject")
class FeffInputSetTest(unittest.TestCase):
header_string = """* This FEFF.inp file generated by pymatgen
TITLE comment: From cif file
TITLE Source: CoO19128.cif
TITLE Structure Summary: Co2 O2
TITLE Reduced formula: CoO
TITLE space group: (P6_3mc), space number: (186)
TITLE abc: 3.297078 3.297078 5.254213
TITLE angles: 90.000000 90.000000 120.000000
TITLE sites: 4
* 1 Co 0.666667 0.333333 0.003676
* 2 Co 0.333334 0.666666 0.503676
* 3 O 0.333334 0.666666 0.121324
* 4 O 0.666667 0.333333 0.621325"""
def test_get_header(self):
comment = 'From cif file'
header = str(FeffInputSet.get_header(x, structure, 'CoO19128.cif',
comment))
ref = FeffInputSetTest.header_string.splitlines()
last4 = [" ".join(l.split()[2:]) for l in ref[-4:]]
for i, l in enumerate(header.splitlines()):
if i < 9:
self.assertEqual(l, ref[i])
else:
s = " ".join(l.split()[2:])
self.assertIn(s, last4)
def test_getfefftags(self):
tags = FeffInputSet.get_feff_tags(x, "XANES").as_dict()
self.assertEqual(tags['COREHOLE'], "FSR",
"Failed to generate PARAMETERS string")
def test_get_feffPot(self):
POT = str(FeffInputSet.get_feff_pot(x, structure, central_atom))
d, dr = FeffPot.pot_dict_from_string(POT)
self.assertEqual(d['Co'], 1, "Wrong symbols read in for FeffPot")
def test_get_feff_atoms(self):
ATOMS = str(FeffInputSet.get_feff_atoms(x, structure, central_atom))
self.assertEqual(ATOMS.splitlines()[3].split()[4], central_atom,
"failed to create ATOMS string")
def test_to_and_from_dict(self):
d = x.as_dict(structure, 'XANES', 'cif', 'O', 'test')
f = d['feff.inp']
f2 = x.from_dict(d)
self.assertEqual(f, f2, "FeffinputSet to and from dict do not match")
if __name__ == '__main__':
unittest.main()
| # coding: utf-8
from __future__ import unicode_literals
import unittest
import os
from pymatgen.io.feff.sets import FeffInputSet
from pymatgen.io.feff import FeffPot
from pymatgen.io.cif import CifParser
test_dir = os.path.join(os.path.dirname(__file__), "..", "..", "..",
'test_files')
cif_file = 'CoO19128.cif'
central_atom = 'O'
cif_path = os.path.join(test_dir, cif_file)
r = CifParser(cif_path)
structure = r.get_structures()[0]
x = FeffInputSet("MaterialsProject")
class FeffInputSetTest(unittest.TestCase):
header_string = """* This FEFF.inp file generated by pymatgen
TITLE comment: From cif file
TITLE Source: CoO19128.cif
TITLE Structure Summary: Co2 O2
TITLE Reduced formula: CoO
TITLE space group: (P6_3mc), space number: (186)
TITLE abc: 3.297078 3.297078 5.254213
TITLE angles: 90.000000 90.000000 120.000000
TITLE sites: 4
* 1 Co 0.666667 0.333333 0.003676
* 2 Co 0.333334 0.666666 0.503676
* 3 O 0.333334 0.666666 0.121324
* 4 O 0.666667 0.333333 0.621325"""
def test_get_header(self):
comment = 'From cif file'
header = str(FeffInputSet.get_header(x, structure, 'CoO19128.cif',
comment))
ref = FeffInputSetTest.header_string.splitlines()
last4 = [" ".join(l.split()[2:]) for l in ref[-4:]]
for i, l in enumerate(header.splitlines()):
if i < 9:
self.assertEqual(l, ref[i])
else:
s = " ".join(l.split()[2:])
self.assertIn(s, last4)
def test_getfefftags(self):
tags = FeffInputSet.get_feff_tags(x, "XANES").as_dict()
self.assertEqual(tags['COREHOLE'], "FSR",
"Failed to generate PARAMETERS string")
def test_get_feffPot(self):
POT = str(FeffInputSet.get_feff_pot(x, structure, central_atom))
d, dr = FeffPot.pot_dict_from_string(POT)
self.assertEqual(d['Co'], 1, "Wrong symbols read in for FeffPot")
def test_get_feff_atoms(self):
ATOMS = str(FeffInputSet.get_feff_atoms(x, structure, central_atom))
self.assertEqual(ATOMS.splitlines()[3].split()[4], central_atom,
"failed to create ATOMS string")
def test_to_and_from_dict(self):
d = x.as_dict(structure, 'XANES', 'cif', 'O', 'test')
f = d['feff.inp']
f2 = x.from_dict(d)
self.assertEqual(f, f2, "FeffinputSet to and from dict do not match")
if __name__ == '__main__':
unittest.main() | en | 0.244808 | # coding: utf-8 * This FEFF.inp file generated by pymatgen TITLE comment: From cif file TITLE Source: CoO19128.cif TITLE Structure Summary: Co2 O2 TITLE Reduced formula: CoO TITLE space group: (P6_3mc), space number: (186) TITLE abc: 3.297078 3.297078 5.254213 TITLE angles: 90.000000 90.000000 120.000000 TITLE sites: 4 * 1 Co 0.666667 0.333333 0.003676 * 2 Co 0.333334 0.666666 0.503676 * 3 O 0.333334 0.666666 0.121324 * 4 O 0.666667 0.333333 0.621325 | 2.362792 | 2 |
setup.py | vinhluan/oracle-json-field | 2 | 6630691 | from distutils.core import Command
from setuptools import setup
class TestCommand(Command):
user_options = []
def initialize_options(self):
pass
def finalize_options(self):
pass
def run(self):
from django.conf import settings
import os
oracle_json_host = os.environ.get('ORACLE_JSON_HOST', 'localhost')
oracle_json_port = os.environ.get('ORACLE_JSON_PORT', '1521')
oracle_json_sid = os.environ.get('ORACLE_JSON_SID', 'orcl')
oracle_json_user = os.environ.get('ORACLE_JSON_USER', 'test_user')
oracle_json_pass = os.environ.get('ORACLE_JSON_PASS', 'test_pass')
settings.configure(
DATABASES={"default": {
"ENGINE": "django.db.backends.oracle",
"OPTIONS": {
"threaded": True
},
"HOST": oracle_json_host,
"PORT": oracle_json_port,
"NAME": oracle_json_sid,
"USER": oracle_json_user,
"PASSWORD": <PASSWORD>
}},
INSTALLED_APPS=('oracle_json_field', 'django.contrib.contenttypes')
)
from django.core.management import call_command
import django
django.setup()
call_command('test', 'oracle_json_field')
setup(
name='oracle-json-field',
version=__import__('oracle_json_field').__version__,
packages=['oracle_json_field'],
license='MIT',
include_package_data=True,
author='<NAME>',
author_email='<EMAIL>',
description='A JSON field for Oracle backends.',
long_description=open("README.md").read(),
install_requires=['Django >= 2.0.0'],
tests_require=['Django >= 2.0.0'],
cmdclass={'test': TestCommand},
classifiers=[
'Environment :: Web Environment',
'Intended Audience :: Developers',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Framework :: Django',
],
)
| from distutils.core import Command
from setuptools import setup
class TestCommand(Command):
user_options = []
def initialize_options(self):
pass
def finalize_options(self):
pass
def run(self):
from django.conf import settings
import os
oracle_json_host = os.environ.get('ORACLE_JSON_HOST', 'localhost')
oracle_json_port = os.environ.get('ORACLE_JSON_PORT', '1521')
oracle_json_sid = os.environ.get('ORACLE_JSON_SID', 'orcl')
oracle_json_user = os.environ.get('ORACLE_JSON_USER', 'test_user')
oracle_json_pass = os.environ.get('ORACLE_JSON_PASS', 'test_pass')
settings.configure(
DATABASES={"default": {
"ENGINE": "django.db.backends.oracle",
"OPTIONS": {
"threaded": True
},
"HOST": oracle_json_host,
"PORT": oracle_json_port,
"NAME": oracle_json_sid,
"USER": oracle_json_user,
"PASSWORD": <PASSWORD>
}},
INSTALLED_APPS=('oracle_json_field', 'django.contrib.contenttypes')
)
from django.core.management import call_command
import django
django.setup()
call_command('test', 'oracle_json_field')
setup(
name='oracle-json-field',
version=__import__('oracle_json_field').__version__,
packages=['oracle_json_field'],
license='MIT',
include_package_data=True,
author='<NAME>',
author_email='<EMAIL>',
description='A JSON field for Oracle backends.',
long_description=open("README.md").read(),
install_requires=['Django >= 2.0.0'],
tests_require=['Django >= 2.0.0'],
cmdclass={'test': TestCommand},
classifiers=[
'Environment :: Web Environment',
'Intended Audience :: Developers',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Framework :: Django',
],
)
| none | 1 | 1.908816 | 2 |
|
experiments/gan/munit/train_layout_by_pixels.py | w121211/CoordConv | 0 | 6630692 | # %%writefile /content/CoordConv/experiments/gan/munit/train_layout.py
import argparse
import os
import numpy as np
import math
import sys
import torchvision.transforms as transforms
from torchvision.utils import save_image
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.utils.data import DataLoader
from torchvision import datasets
from torch.autograd import Variable
from models import Discriminator, compute_gradient_penalty, CoordConvPainter, FCN
from datasets import generate_real_samples, ImageDataset
from strokes import sampler, draw_rect
parser = argparse.ArgumentParser()
parser.add_argument(
"--n_epochs", type=int, default=200, help="number of epochs of training"
)
parser.add_argument("--batch_size", type=int, default=64, help="size of the batches")
parser.add_argument("--lr", type=float, default=0.0002, help="adam: learning rate")
parser.add_argument(
"--b1",
type=float,
default=0.5,
help="adam: decay of first order momentum of gradient",
)
parser.add_argument(
"--b2",
type=float,
default=0.999,
help="adam: decay of first order momentum of gradient",
)
parser.add_argument(
"--n_cpu",
type=int,
default=8,
help="number of cpu threads to use during batch generation",
)
parser.add_argument(
"--latent_dim", type=int, default=10, help="dimensionality of the latent space"
)
parser.add_argument(
"--img_size", type=int, default=28, help="size of each image dimension"
)
parser.add_argument("--channels", type=int, default=1, help="number of image channels")
parser.add_argument(
"--n_critic",
type=int,
default=50,
help="number of training steps for discriminator per iter",
)
parser.add_argument(
"--clip_value",
type=float,
default=0.01,
help="lower and upper clip value for disc. weights",
)
parser.add_argument(
"--sample_interval", type=int, default=400, help="interval betwen image samples"
)
parser.add_argument("--data_path", type=str, default="data/layout")
parser.add_argument("--model_path", type=str, default="saved_models/95000.pt")
opt = parser.parse_args()
print(opt)
# -------------------------------
# Define models
# -------------------------------
class Paste2d(nn.Module):
def __init__(self, im_size):
super(Paste2d, self).__init__()
self.model = nn.Sequential(nn.Linear(1, 2), nn.Sigmoid())
self.criterion = torch.nn.MSELoss()
self.im_size = im_size
def loss(self, y, x1, x2, gt):
loss_restore = self.criterion(y, gt)
loss_coord = torch.mean(F.relu(-(x2 - x1 - 1.0)))
loss = loss_restore + loss_coord
return loss
def forward(self, x):
l = self.im_size
N = x.shape[0]
x0 = x[:, 0].view(-1, 1) * l - 1.0
y0 = x[:, 1].view(-1, 1) * l - 1.0
x1 = x[:, 2].view(-1, 1) * l + 1.0
y1 = x[:, 3].view(-1, 1) * l + 1.0
coord = torch.arange(l).expand(N, -1).float()
if cuda:
coord = coord.cuda()
_x0 = F.relu6((coord - x0) * 6.0)
_x1 = F.relu6((x1 - coord) * 6.0)
x_mask = (_x0 * _x1) / 36 # normalize again after relu6 (multiply by 6.)
x_mask = x_mask.view(N, 1, l)
_y0 = F.relu6((coord - y0) * 6.0)
_y1 = F.relu6((y1 - coord) * 6.0)
y_mask = (_y0 * _y1) / 36 # normalize again after relu6 (multiply by 6.)
y_mask = y_mask.view(N, l, 1) # align to y-axis
mask = torch.ones(N,l,l)
if cuda:
mask = mask.cuda()
mask = mask * x_mask * y_mask
return mask.view(-1, 1, l, l)
class LayoutGenerator(nn.Module):
def __init__(self, in_dim=10):
super(LayoutGenerator, self).__init__()
def block(in_feat, out_feat, normalize=True):
layers = [nn.Linear(in_feat, out_feat)]
if normalize:
layers.append(nn.BatchNorm1d(out_feat, 0.8))
layers.append(nn.LeakyReLU(0.2, inplace=True))
return layers
self.model = nn.Sequential(
# *block(in_dim, 128, normalize=False),
nn.Linear(in_dim, 64),
*block(64, 64, normalize=False),
*block(64, 64, normalize=False),
*block(64, 4, normalize=False),
nn.Linear(4, 4),
nn.Sigmoid(),
)
def loss_coord(self, coord):
x0 = coord[:, 0] * opt.img_size
y0 = coord[:, 1]* opt.img_size
x1 = coord[:, 2]* opt.img_size
y1 = coord[:, 3]* opt.img_size
loss = torch.mean(F.relu(-(x1 - x0 - 1.0))) + torch.mean(F.relu(-(y1 - y0 - 1.0)))
return loss
def forward(self, z):
coord = self.model(z)
# print(coord[0])
return painter(coord), coord
# -------------------------------
# Dataset sampling
# -------------------------------
cuda = True if torch.cuda.is_available() else False
Tensor = torch.cuda.FloatTensor if cuda else torch.FloatTensor
os.makedirs("images", exist_ok=True)
os.makedirs(opt.data_path, exist_ok=True)
# os.makedirs(opt.model_path, exist_ok=True)
img_shape = (opt.channels, opt.img_size, opt.img_size)
# painter = Generator(in_dim=4)
# painter.load_state_dict(torch.load(opt.model_path, map_location="cpu"))
painter = Paste2d(opt.img_size)
if cuda:
painter.cuda()
painter.eval()
for param in painter.parameters():
param.requires_grad = False # freeze weight
def sample():
transform = transforms.ToPILImage()
box_w, box_h = 20, 20
w, h = opt.img_size, opt.img_size
xs = [0, (w - box_w) / 2, w - box_w]
ys = [0, (h - box_h) / 2, h - box_h]
i = 0
for x in xs:
for y in ys:
print(x, y)
x0, y0 = x / w, y / h
x1, y1 = (x + box_w) / w, (y + box_h) / h
y = painter(torch.tensor([[x0, y0, x1, y1]]).float())
im = transform(y[0])
im.save("%s/%d.png" % (opt.data_path, i), "PNG")
i += 1
def sample_center():
transform = transforms.ToPILImage()
box_range = [5, 30]
w, h = opt.img_size, opt.img_size
for i, box_w in enumerate(range(*box_range)):
x0, y0 = (w - box_w) / 2, (h - box_w) / 2
x1, y1 = x0 + box_w, y0 + box_w
x = torch.tensor([[x0 / w, y0 / h, x1 / w, y1 / h]]).float()
if cuda:
x = x.cuda()
y = painter(x)
im = transform(y[0].cpu())
im.save("%s/%d.png" % (opt.data_path, i), "PNG")
sample_center()
dataloader = torch.utils.data.DataLoader(
ImageDataset(
opt.data_path,
transforms_=[
# transforms.Resize(opt.img_size),
transforms.ToTensor(),
# transforms.Normalize([0.5], [0.5]),
],
has_x=False,
),
batch_size=opt.batch_size,
shuffle=True,
)
# -------------------------------
# Training GAN
# -------------------------------
def train_wgan():
lambda_gp = 10
generator = LayoutGenerator(opt.latent_dim)
discriminator = Discriminator()
loss_restore = torch.nn.MSELoss()
if cuda:
generator.cuda()
discriminator.cuda()
optimizer_G = torch.optim.Adam(
generator.parameters(), lr=opt.lr, betas=(opt.b1, opt.b2)
)
optimizer_D = torch.optim.Adam(
discriminator.parameters(), lr=opt.lr, betas=(opt.b1, opt.b2)
)
batches_done = 0
for epoch in range(opt.n_epochs):
# for i, (imgs, xs) in enumerate(dataloader):
for i, (imgs, _) in enumerate(dataloader):
real_imgs = Variable(imgs.type(Tensor))
# Train Discriminator
optimizer_D.zero_grad()
z = Variable(
Tensor(np.random.normal(0, 1, (imgs.shape[0], opt.latent_dim)))
)
# z = Variable(Tensor(xs))
# z = xs
# if cuda:
# z = z.cuda()
# imgs = imgs.cuda()
fake_imgs, coords = generator(z)
real_validity = discriminator(real_imgs)
fake_validity = discriminator(fake_imgs)
gradient_penalty = compute_gradient_penalty(
discriminator, real_imgs.data, fake_imgs.data
)
d_loss = (
-torch.mean(real_validity)
+ torch.mean(fake_validity)
+ lambda_gp * gradient_penalty
)
d_loss.backward()
optimizer_D.step()
# Train Generator
optimizer_G.zero_grad()
if i % opt.n_critic == 0:
fake_imgs, coords = generator(z)
fake_validity = discriminator(fake_imgs)
g_loss = -torch.mean(fake_validity) + generator.loss_coord(coords)
g_loss.backward()
optimizer_G.step()
if batches_done % opt.sample_interval == 0:
print(
"[Epoch %d/%d] [Batch %d/%d] [D loss: %f] [G loss: %f]"
% (
epoch,
opt.n_epochs,
i,
len(dataloader),
d_loss.item(),
g_loss.item(),
)
)
save_image(
fake_imgs.data[:25],
"images/%d.png" % batches_done,
nrow=5,
normalize=True,
)
# torch.save(generator.state_dict(), opt.save_path)
batches_done += opt.n_critic
train_wgan()
| # %%writefile /content/CoordConv/experiments/gan/munit/train_layout.py
import argparse
import os
import numpy as np
import math
import sys
import torchvision.transforms as transforms
from torchvision.utils import save_image
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.utils.data import DataLoader
from torchvision import datasets
from torch.autograd import Variable
from models import Discriminator, compute_gradient_penalty, CoordConvPainter, FCN
from datasets import generate_real_samples, ImageDataset
from strokes import sampler, draw_rect
parser = argparse.ArgumentParser()
parser.add_argument(
"--n_epochs", type=int, default=200, help="number of epochs of training"
)
parser.add_argument("--batch_size", type=int, default=64, help="size of the batches")
parser.add_argument("--lr", type=float, default=0.0002, help="adam: learning rate")
parser.add_argument(
"--b1",
type=float,
default=0.5,
help="adam: decay of first order momentum of gradient",
)
parser.add_argument(
"--b2",
type=float,
default=0.999,
help="adam: decay of first order momentum of gradient",
)
parser.add_argument(
"--n_cpu",
type=int,
default=8,
help="number of cpu threads to use during batch generation",
)
parser.add_argument(
"--latent_dim", type=int, default=10, help="dimensionality of the latent space"
)
parser.add_argument(
"--img_size", type=int, default=28, help="size of each image dimension"
)
parser.add_argument("--channels", type=int, default=1, help="number of image channels")
parser.add_argument(
"--n_critic",
type=int,
default=50,
help="number of training steps for discriminator per iter",
)
parser.add_argument(
"--clip_value",
type=float,
default=0.01,
help="lower and upper clip value for disc. weights",
)
parser.add_argument(
"--sample_interval", type=int, default=400, help="interval betwen image samples"
)
parser.add_argument("--data_path", type=str, default="data/layout")
parser.add_argument("--model_path", type=str, default="saved_models/95000.pt")
opt = parser.parse_args()
print(opt)
# -------------------------------
# Define models
# -------------------------------
class Paste2d(nn.Module):
def __init__(self, im_size):
super(Paste2d, self).__init__()
self.model = nn.Sequential(nn.Linear(1, 2), nn.Sigmoid())
self.criterion = torch.nn.MSELoss()
self.im_size = im_size
def loss(self, y, x1, x2, gt):
loss_restore = self.criterion(y, gt)
loss_coord = torch.mean(F.relu(-(x2 - x1 - 1.0)))
loss = loss_restore + loss_coord
return loss
def forward(self, x):
l = self.im_size
N = x.shape[0]
x0 = x[:, 0].view(-1, 1) * l - 1.0
y0 = x[:, 1].view(-1, 1) * l - 1.0
x1 = x[:, 2].view(-1, 1) * l + 1.0
y1 = x[:, 3].view(-1, 1) * l + 1.0
coord = torch.arange(l).expand(N, -1).float()
if cuda:
coord = coord.cuda()
_x0 = F.relu6((coord - x0) * 6.0)
_x1 = F.relu6((x1 - coord) * 6.0)
x_mask = (_x0 * _x1) / 36 # normalize again after relu6 (multiply by 6.)
x_mask = x_mask.view(N, 1, l)
_y0 = F.relu6((coord - y0) * 6.0)
_y1 = F.relu6((y1 - coord) * 6.0)
y_mask = (_y0 * _y1) / 36 # normalize again after relu6 (multiply by 6.)
y_mask = y_mask.view(N, l, 1) # align to y-axis
mask = torch.ones(N,l,l)
if cuda:
mask = mask.cuda()
mask = mask * x_mask * y_mask
return mask.view(-1, 1, l, l)
class LayoutGenerator(nn.Module):
def __init__(self, in_dim=10):
super(LayoutGenerator, self).__init__()
def block(in_feat, out_feat, normalize=True):
layers = [nn.Linear(in_feat, out_feat)]
if normalize:
layers.append(nn.BatchNorm1d(out_feat, 0.8))
layers.append(nn.LeakyReLU(0.2, inplace=True))
return layers
self.model = nn.Sequential(
# *block(in_dim, 128, normalize=False),
nn.Linear(in_dim, 64),
*block(64, 64, normalize=False),
*block(64, 64, normalize=False),
*block(64, 4, normalize=False),
nn.Linear(4, 4),
nn.Sigmoid(),
)
def loss_coord(self, coord):
x0 = coord[:, 0] * opt.img_size
y0 = coord[:, 1]* opt.img_size
x1 = coord[:, 2]* opt.img_size
y1 = coord[:, 3]* opt.img_size
loss = torch.mean(F.relu(-(x1 - x0 - 1.0))) + torch.mean(F.relu(-(y1 - y0 - 1.0)))
return loss
def forward(self, z):
coord = self.model(z)
# print(coord[0])
return painter(coord), coord
# -------------------------------
# Dataset sampling
# -------------------------------
cuda = True if torch.cuda.is_available() else False
Tensor = torch.cuda.FloatTensor if cuda else torch.FloatTensor
os.makedirs("images", exist_ok=True)
os.makedirs(opt.data_path, exist_ok=True)
# os.makedirs(opt.model_path, exist_ok=True)
img_shape = (opt.channels, opt.img_size, opt.img_size)
# painter = Generator(in_dim=4)
# painter.load_state_dict(torch.load(opt.model_path, map_location="cpu"))
painter = Paste2d(opt.img_size)
if cuda:
painter.cuda()
painter.eval()
for param in painter.parameters():
param.requires_grad = False # freeze weight
def sample():
transform = transforms.ToPILImage()
box_w, box_h = 20, 20
w, h = opt.img_size, opt.img_size
xs = [0, (w - box_w) / 2, w - box_w]
ys = [0, (h - box_h) / 2, h - box_h]
i = 0
for x in xs:
for y in ys:
print(x, y)
x0, y0 = x / w, y / h
x1, y1 = (x + box_w) / w, (y + box_h) / h
y = painter(torch.tensor([[x0, y0, x1, y1]]).float())
im = transform(y[0])
im.save("%s/%d.png" % (opt.data_path, i), "PNG")
i += 1
def sample_center():
transform = transforms.ToPILImage()
box_range = [5, 30]
w, h = opt.img_size, opt.img_size
for i, box_w in enumerate(range(*box_range)):
x0, y0 = (w - box_w) / 2, (h - box_w) / 2
x1, y1 = x0 + box_w, y0 + box_w
x = torch.tensor([[x0 / w, y0 / h, x1 / w, y1 / h]]).float()
if cuda:
x = x.cuda()
y = painter(x)
im = transform(y[0].cpu())
im.save("%s/%d.png" % (opt.data_path, i), "PNG")
sample_center()
dataloader = torch.utils.data.DataLoader(
ImageDataset(
opt.data_path,
transforms_=[
# transforms.Resize(opt.img_size),
transforms.ToTensor(),
# transforms.Normalize([0.5], [0.5]),
],
has_x=False,
),
batch_size=opt.batch_size,
shuffle=True,
)
# -------------------------------
# Training GAN
# -------------------------------
def train_wgan():
lambda_gp = 10
generator = LayoutGenerator(opt.latent_dim)
discriminator = Discriminator()
loss_restore = torch.nn.MSELoss()
if cuda:
generator.cuda()
discriminator.cuda()
optimizer_G = torch.optim.Adam(
generator.parameters(), lr=opt.lr, betas=(opt.b1, opt.b2)
)
optimizer_D = torch.optim.Adam(
discriminator.parameters(), lr=opt.lr, betas=(opt.b1, opt.b2)
)
batches_done = 0
for epoch in range(opt.n_epochs):
# for i, (imgs, xs) in enumerate(dataloader):
for i, (imgs, _) in enumerate(dataloader):
real_imgs = Variable(imgs.type(Tensor))
# Train Discriminator
optimizer_D.zero_grad()
z = Variable(
Tensor(np.random.normal(0, 1, (imgs.shape[0], opt.latent_dim)))
)
# z = Variable(Tensor(xs))
# z = xs
# if cuda:
# z = z.cuda()
# imgs = imgs.cuda()
fake_imgs, coords = generator(z)
real_validity = discriminator(real_imgs)
fake_validity = discriminator(fake_imgs)
gradient_penalty = compute_gradient_penalty(
discriminator, real_imgs.data, fake_imgs.data
)
d_loss = (
-torch.mean(real_validity)
+ torch.mean(fake_validity)
+ lambda_gp * gradient_penalty
)
d_loss.backward()
optimizer_D.step()
# Train Generator
optimizer_G.zero_grad()
if i % opt.n_critic == 0:
fake_imgs, coords = generator(z)
fake_validity = discriminator(fake_imgs)
g_loss = -torch.mean(fake_validity) + generator.loss_coord(coords)
g_loss.backward()
optimizer_G.step()
if batches_done % opt.sample_interval == 0:
print(
"[Epoch %d/%d] [Batch %d/%d] [D loss: %f] [G loss: %f]"
% (
epoch,
opt.n_epochs,
i,
len(dataloader),
d_loss.item(),
g_loss.item(),
)
)
save_image(
fake_imgs.data[:25],
"images/%d.png" % batches_done,
nrow=5,
normalize=True,
)
# torch.save(generator.state_dict(), opt.save_path)
batches_done += opt.n_critic
train_wgan()
| en | 0.449915 | # %%writefile /content/CoordConv/experiments/gan/munit/train_layout.py # ------------------------------- # Define models # ------------------------------- # normalize again after relu6 (multiply by 6.) # normalize again after relu6 (multiply by 6.) # align to y-axis # *block(in_dim, 128, normalize=False), # print(coord[0]) # ------------------------------- # Dataset sampling # ------------------------------- # os.makedirs(opt.model_path, exist_ok=True) # painter = Generator(in_dim=4) # painter.load_state_dict(torch.load(opt.model_path, map_location="cpu")) # freeze weight # transforms.Resize(opt.img_size), # transforms.Normalize([0.5], [0.5]), # ------------------------------- # Training GAN # ------------------------------- # for i, (imgs, xs) in enumerate(dataloader): # Train Discriminator # z = Variable(Tensor(xs)) # z = xs # if cuda: # z = z.cuda() # imgs = imgs.cuda() # Train Generator # torch.save(generator.state_dict(), opt.save_path) | 2.093113 | 2 |
wavenet_vocoder/builder.py | entn-at/clari_wavenet_vocoder | 54 | 6630693 | # coding: utf-8
from __future__ import with_statement, print_function, absolute_import
def wavenet(out_channels=256,
layers=20,
stacks=2,
residual_channels=512,
gate_channels=512,
skip_out_channels=512,
cin_channels=-1,
gin_channels=-1,
weight_normalization=True,
dropout=1 - 0.95,
kernel_size=3,
n_speakers=None,
upsample_conditional_features=False,
upsample_scales=[16, 16],
freq_axis_kernel_size=3,
scalar_input=False,
use_speaker_embedding=True,
output_type="Gaussian"
):
from wavenet_vocoder import WaveNet
model = WaveNet(out_channels=out_channels, layers=layers, stacks=stacks,
residual_channels=residual_channels,
gate_channels=gate_channels,
skip_out_channels=skip_out_channels,
kernel_size=kernel_size, dropout=dropout,
weight_normalization=weight_normalization,
cin_channels=cin_channels, gin_channels=gin_channels,
n_speakers=n_speakers,
upsample_conditional_features=upsample_conditional_features,
upsample_scales=upsample_scales,
freq_axis_kernel_size=freq_axis_kernel_size,
scalar_input=scalar_input,
use_speaker_embedding=use_speaker_embedding,
output_type=output_type
)
return model
def student_wavenet(out_channels=2,
layers=20,
stacks=2,
residual_channels=64,
iaf_layer_sizes=[10, 10, 10, 30],
gate_channels=64,
kernel_size=3, dropout=1 - 0.95,
cin_channels=-1, gin_channels=-1, n_speakers=None,
weight_normalization=True,
upsample_conditional_features=False,
upsample_scales=None,
freq_axis_kernel_size=3,
scalar_input=False,
use_speaker_embedding=True
):
from wavenet_vocoder import StudentWaveNet
model = StudentWaveNet(out_channels=out_channels,
layers=layers, stacks=stacks,
residual_channels=residual_channels,
iaf_layer_sizes=iaf_layer_sizes, gate_channels=gate_channels, kernel_size=kernel_size,
dropout=dropout,
cin_channels=cin_channels, gin_channels=gin_channels,
n_speakers=n_speakers,
upsample_conditional_features=upsample_conditional_features,
upsample_scales=upsample_scales,
freq_axis_kernel_size=freq_axis_kernel_size,
scalar_input=scalar_input,
use_speaker_embedding=use_speaker_embedding,
)
return model
def clari_wavenet(out_channels=2,
layers=20,
stacks=2,
residual_channels=64,
iaf_layer_sizes=[10, 10, 10, 30],
gate_channels=64,
kernel_size=3,
dropout=1 - 0.95,
cin_channels=-1, gin_channels=-1, n_speakers=None,
weight_normalization=True,
upsample_conditional_features=False,
upsample_scales=None,
freq_axis_kernel_size=3,
scalar_input=False,
use_speaker_embedding=True,
skip_channels=128,
use_skip=True,
iaf_shift=False
):
from wavenet_vocoder import ClariWaveNet
model = ClariWaveNet(out_channels=out_channels,
layers=layers, stacks=stacks,
residual_channels=residual_channels,
iaf_layer_sizes=iaf_layer_sizes, gate_channels=gate_channels, kernel_size=kernel_size,
dropout=dropout,
cin_channels=cin_channels, gin_channels=gin_channels,
n_speakers=n_speakers,
upsample_conditional_features=upsample_conditional_features,
upsample_scales=upsample_scales,
freq_axis_kernel_size=freq_axis_kernel_size,
scalar_input=scalar_input,
use_speaker_embedding=use_speaker_embedding,
skip_out_channels=skip_channels,
use_skip=use_skip,
iaf_shift=iaf_shift
)
return model
| # coding: utf-8
from __future__ import with_statement, print_function, absolute_import
def wavenet(out_channels=256,
layers=20,
stacks=2,
residual_channels=512,
gate_channels=512,
skip_out_channels=512,
cin_channels=-1,
gin_channels=-1,
weight_normalization=True,
dropout=1 - 0.95,
kernel_size=3,
n_speakers=None,
upsample_conditional_features=False,
upsample_scales=[16, 16],
freq_axis_kernel_size=3,
scalar_input=False,
use_speaker_embedding=True,
output_type="Gaussian"
):
from wavenet_vocoder import WaveNet
model = WaveNet(out_channels=out_channels, layers=layers, stacks=stacks,
residual_channels=residual_channels,
gate_channels=gate_channels,
skip_out_channels=skip_out_channels,
kernel_size=kernel_size, dropout=dropout,
weight_normalization=weight_normalization,
cin_channels=cin_channels, gin_channels=gin_channels,
n_speakers=n_speakers,
upsample_conditional_features=upsample_conditional_features,
upsample_scales=upsample_scales,
freq_axis_kernel_size=freq_axis_kernel_size,
scalar_input=scalar_input,
use_speaker_embedding=use_speaker_embedding,
output_type=output_type
)
return model
def student_wavenet(out_channels=2,
layers=20,
stacks=2,
residual_channels=64,
iaf_layer_sizes=[10, 10, 10, 30],
gate_channels=64,
kernel_size=3, dropout=1 - 0.95,
cin_channels=-1, gin_channels=-1, n_speakers=None,
weight_normalization=True,
upsample_conditional_features=False,
upsample_scales=None,
freq_axis_kernel_size=3,
scalar_input=False,
use_speaker_embedding=True
):
from wavenet_vocoder import StudentWaveNet
model = StudentWaveNet(out_channels=out_channels,
layers=layers, stacks=stacks,
residual_channels=residual_channels,
iaf_layer_sizes=iaf_layer_sizes, gate_channels=gate_channels, kernel_size=kernel_size,
dropout=dropout,
cin_channels=cin_channels, gin_channels=gin_channels,
n_speakers=n_speakers,
upsample_conditional_features=upsample_conditional_features,
upsample_scales=upsample_scales,
freq_axis_kernel_size=freq_axis_kernel_size,
scalar_input=scalar_input,
use_speaker_embedding=use_speaker_embedding,
)
return model
def clari_wavenet(out_channels=2,
layers=20,
stacks=2,
residual_channels=64,
iaf_layer_sizes=[10, 10, 10, 30],
gate_channels=64,
kernel_size=3,
dropout=1 - 0.95,
cin_channels=-1, gin_channels=-1, n_speakers=None,
weight_normalization=True,
upsample_conditional_features=False,
upsample_scales=None,
freq_axis_kernel_size=3,
scalar_input=False,
use_speaker_embedding=True,
skip_channels=128,
use_skip=True,
iaf_shift=False
):
from wavenet_vocoder import ClariWaveNet
model = ClariWaveNet(out_channels=out_channels,
layers=layers, stacks=stacks,
residual_channels=residual_channels,
iaf_layer_sizes=iaf_layer_sizes, gate_channels=gate_channels, kernel_size=kernel_size,
dropout=dropout,
cin_channels=cin_channels, gin_channels=gin_channels,
n_speakers=n_speakers,
upsample_conditional_features=upsample_conditional_features,
upsample_scales=upsample_scales,
freq_axis_kernel_size=freq_axis_kernel_size,
scalar_input=scalar_input,
use_speaker_embedding=use_speaker_embedding,
skip_out_channels=skip_channels,
use_skip=use_skip,
iaf_shift=iaf_shift
)
return model
| en | 0.833554 | # coding: utf-8 | 2.345563 | 2 |
dataloaders/__init__.py | XuPenglei/DSRL-old | 0 | 6630694 | from dataloaders.datasets import cityscapes, coco, combine_dbs, pascal, sbd, SimulateDataset
from torch.utils.data import DataLoader
def make_data_loader(args, **kwargs):
if args.dataset == 'pascal':
train_set = pascal.VOCSegmentation(args, split='train')
val_set = pascal.VOCSegmentation(args, split='val')
if args.use_sbd:
sbd_train = sbd.SBDSegmentation(args, split=['train', 'val'])
train_set = combine_dbs.CombineDBs([train_set, sbd_train], excluded=[val_set])
num_class = train_set.NUM_CLASSES
train_loader = DataLoader(train_set, batch_size=args.batch_size, shuffle=True, **kwargs)
val_loader = DataLoader(val_set, batch_size=args.batch_size, shuffle=False, **kwargs)
test_loader = None
return train_loader, val_loader, test_loader, num_class
elif args.dataset == 'cityscapes':
train_set = cityscapes.CityscapesSegmentation(args, split='train')
val_set = cityscapes.CityscapesSegmentation(args, split='val')
test_set = cityscapes.CityscapesSegmentation(args, split='test')
num_class = train_set.NUM_CLASSES
train_loader = DataLoader(train_set, batch_size=args.batch_size, shuffle=True, **kwargs)
val_loader = DataLoader(val_set, batch_size=args.batch_size, shuffle=False, **kwargs)
test_loader = DataLoader(test_set, batch_size=args.batch_size, shuffle=False, **kwargs)
return train_loader, val_loader, test_loader, num_class
elif args.dataset == 'coco':
train_set = coco.COCOSegmentation(args, split='train')
val_set = coco.COCOSegmentation(args, split='val')
num_class = train_set.NUM_CLASSES
train_loader = DataLoader(train_set, batch_size=args.batch_size, shuffle=True, **kwargs)
val_loader = DataLoader(val_set, batch_size=args.batch_size, shuffle=False, **kwargs)
test_loader = None
return train_loader, val_loader, test_loader, num_class
elif args.dataset.lower() == 'rs_ma':
train_set = SimulateDataset.SimulateRemoteSensing(
# X_dir=r'F:\Data\Dream-B\train\image',
# Xlr_dir=r'F:\Data\Dream-B\train\imageLR',
# Y_dir=r'F:\Data\Dream-B\train\label',
X_dir=r'F:\Data\各数据集小测试\Mass\sat',
Xlr_dir=r'F:\Data\各数据集小测试\Mass\satLR',
Y_dir=r'F:\Data\各数据集小测试\Mass\map',
patch_size=512,
SR=args.SR,
to_train=True
)
val_set = SimulateDataset.SimulateRemoteSensing(
# X_dir=r'F:\Data\Dream-B\train\image',
# Xlr_dir=r'F:\Data\Dream-B\train\imageLR',
# Y_dir=r'F:\Data\Dream-B\train\label',
X_dir=r'F:\Data\各数据集小测试\Mass\sat',
Xlr_dir=r'F:\Data\各数据集小测试\Mass\satLR',
Y_dir=r'F:\Data\各数据集小测试\Mass\map',
patch_size=512,
SR=args.SR,
to_train=False
)
num_class = train_set.NUM_CLASSES
train_loader = DataLoader(train_set, batch_size=args.batch_size, shuffle=True, **kwargs)
val_loader = DataLoader(val_set, batch_size=args.batch_size, shuffle=False, **kwargs)
test_loader = None
return train_loader, val_loader, test_loader, num_class
elif args.dataset.lower() == 'rs_dreamb':
train_set = SimulateDataset.SimulateRemoteSensing(
X_dir=r'/home/tang/桌面/XPL/data/data/train/image',
Xlr_dir=r'/home/tang/桌面/XPL/data/data/trainLR/image',
Y_dir=r'/home/tang/桌面/XPL/data/data/train/label',
patch_size=512,
SR=args.SR,
to_train=True
)
val_set = SimulateDataset.SimulateRemoteSensing(
X_dir=r'/home/tang/桌面/XPL/data/data/valid/image',
Xlr_dir=r'/home/tang/桌面/XPL/data/data/validLR/image',
Y_dir=r'/home/tang/桌面/XPL/data/data/valid/label',
patch_size=512,
SR=args.SR,
to_train=False
)
num_class = train_set.NUM_CLASSES
train_loader = DataLoader(train_set, batch_size=args.batch_size, shuffle=True, **kwargs)
val_loader = DataLoader(val_set, batch_size=args.batch_size, shuffle=False, **kwargs)
test_loader = None
return train_loader, val_loader, test_loader, num_class
else:
raise NotImplementedError
| from dataloaders.datasets import cityscapes, coco, combine_dbs, pascal, sbd, SimulateDataset
from torch.utils.data import DataLoader
def make_data_loader(args, **kwargs):
if args.dataset == 'pascal':
train_set = pascal.VOCSegmentation(args, split='train')
val_set = pascal.VOCSegmentation(args, split='val')
if args.use_sbd:
sbd_train = sbd.SBDSegmentation(args, split=['train', 'val'])
train_set = combine_dbs.CombineDBs([train_set, sbd_train], excluded=[val_set])
num_class = train_set.NUM_CLASSES
train_loader = DataLoader(train_set, batch_size=args.batch_size, shuffle=True, **kwargs)
val_loader = DataLoader(val_set, batch_size=args.batch_size, shuffle=False, **kwargs)
test_loader = None
return train_loader, val_loader, test_loader, num_class
elif args.dataset == 'cityscapes':
train_set = cityscapes.CityscapesSegmentation(args, split='train')
val_set = cityscapes.CityscapesSegmentation(args, split='val')
test_set = cityscapes.CityscapesSegmentation(args, split='test')
num_class = train_set.NUM_CLASSES
train_loader = DataLoader(train_set, batch_size=args.batch_size, shuffle=True, **kwargs)
val_loader = DataLoader(val_set, batch_size=args.batch_size, shuffle=False, **kwargs)
test_loader = DataLoader(test_set, batch_size=args.batch_size, shuffle=False, **kwargs)
return train_loader, val_loader, test_loader, num_class
elif args.dataset == 'coco':
train_set = coco.COCOSegmentation(args, split='train')
val_set = coco.COCOSegmentation(args, split='val')
num_class = train_set.NUM_CLASSES
train_loader = DataLoader(train_set, batch_size=args.batch_size, shuffle=True, **kwargs)
val_loader = DataLoader(val_set, batch_size=args.batch_size, shuffle=False, **kwargs)
test_loader = None
return train_loader, val_loader, test_loader, num_class
elif args.dataset.lower() == 'rs_ma':
train_set = SimulateDataset.SimulateRemoteSensing(
# X_dir=r'F:\Data\Dream-B\train\image',
# Xlr_dir=r'F:\Data\Dream-B\train\imageLR',
# Y_dir=r'F:\Data\Dream-B\train\label',
X_dir=r'F:\Data\各数据集小测试\Mass\sat',
Xlr_dir=r'F:\Data\各数据集小测试\Mass\satLR',
Y_dir=r'F:\Data\各数据集小测试\Mass\map',
patch_size=512,
SR=args.SR,
to_train=True
)
val_set = SimulateDataset.SimulateRemoteSensing(
# X_dir=r'F:\Data\Dream-B\train\image',
# Xlr_dir=r'F:\Data\Dream-B\train\imageLR',
# Y_dir=r'F:\Data\Dream-B\train\label',
X_dir=r'F:\Data\各数据集小测试\Mass\sat',
Xlr_dir=r'F:\Data\各数据集小测试\Mass\satLR',
Y_dir=r'F:\Data\各数据集小测试\Mass\map',
patch_size=512,
SR=args.SR,
to_train=False
)
num_class = train_set.NUM_CLASSES
train_loader = DataLoader(train_set, batch_size=args.batch_size, shuffle=True, **kwargs)
val_loader = DataLoader(val_set, batch_size=args.batch_size, shuffle=False, **kwargs)
test_loader = None
return train_loader, val_loader, test_loader, num_class
elif args.dataset.lower() == 'rs_dreamb':
train_set = SimulateDataset.SimulateRemoteSensing(
X_dir=r'/home/tang/桌面/XPL/data/data/train/image',
Xlr_dir=r'/home/tang/桌面/XPL/data/data/trainLR/image',
Y_dir=r'/home/tang/桌面/XPL/data/data/train/label',
patch_size=512,
SR=args.SR,
to_train=True
)
val_set = SimulateDataset.SimulateRemoteSensing(
X_dir=r'/home/tang/桌面/XPL/data/data/valid/image',
Xlr_dir=r'/home/tang/桌面/XPL/data/data/validLR/image',
Y_dir=r'/home/tang/桌面/XPL/data/data/valid/label',
patch_size=512,
SR=args.SR,
to_train=False
)
num_class = train_set.NUM_CLASSES
train_loader = DataLoader(train_set, batch_size=args.batch_size, shuffle=True, **kwargs)
val_loader = DataLoader(val_set, batch_size=args.batch_size, shuffle=False, **kwargs)
test_loader = None
return train_loader, val_loader, test_loader, num_class
else:
raise NotImplementedError
| el | 0.098435 | # X_dir=r'F:\Data\Dream-B\train\image', # Xlr_dir=r'F:\Data\Dream-B\train\imageLR', # Y_dir=r'F:\Data\Dream-B\train\label', # X_dir=r'F:\Data\Dream-B\train\image', # Xlr_dir=r'F:\Data\Dream-B\train\imageLR', # Y_dir=r'F:\Data\Dream-B\train\label', | 2.556337 | 3 |
models/model_util.py | jaswanthbjk/3D-Object-Detection | 0 | 6630695 | import numpy as np
import tensorflow as tf
import os
import sys
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
sys.path.append(BASE_DIR)
import tf_util
# -----------------
# Global Constants
# -----------------
NUM_HEADING_BIN = 12
NUM_SIZE_CLUSTER = 8 # one cluster for each type
NUM_OBJECT_POINT = 512
# g_type2class = {'car': 0, 'Van': 1, 'Truck': 2, 'pedestrian': 3,
# 'Person_sitting': 4, 'bicycle': 5, 'Tram': 6, 'Misc': 7}
# g_class2type = {g_type2class[t]: t for t in g_type2class}
# g_type2onehotclass = {'car': 0, 'pedestrian': 1, 'bicycle': 2}
# g_type_mean_size = {'car': np.array([4.682, 1.898, 1.668]),
# 'Van': np.array([5.06763659, 1.9007158, 2.20532825]),
# 'Truck': np.array([10.13586957, 2.58549199, 3.2520595]),
# 'pedestrian': np.array([0.787, 0.768, 1.79]),
# 'Person_sitting': np.array([0.80057803, 0.5983815, 1.27450867]),
# 'bicycle': np.array([1.775, 0.654, 1.276]),
# 'Tram': np.array([16.17150617, 2.53246914, 3.53079012]),
# 'Misc': np.array([3.64300781, 1.54298177, 1.92320313])}
# g_mean_size_arr = np.zeros((NUM_SIZE_CLUSTER, 3)) # size clustrs
# for i in range(NUM_SIZE_CLUSTER):
# g_mean_size_arr[i, :] = g_type_mean_size[g_class2type[i]]
g_type2class = {'Car': 0, 'Van': 1, 'Truck': 2, 'Pedestrian': 3,
'Person_sitting': 4, 'Cyclist': 5, 'Tram': 6, 'Misc': 7}
g_class2type = {g_type2class[t]: t for t in g_type2class}
g_type2onehotclass = {'Car': 0, 'Pedestrian': 1, 'Cyclist': 2}
g_type_mean_size = {'Car': np.array([3.88311640418, 1.62856739989, 1.52563191462]),
'Van': np.array([5.06763659, 1.9007158, 2.20532825]),
'Truck': np.array([10.13586957, 2.58549199, 3.2520595]),
'Pedestrian': np.array([0.84422524, 0.66068622, 1.76255119]),
'Person_sitting': np.array([0.80057803, 0.5983815, 1.27450867]),
'Cyclist': np.array([1.76282397, 0.59706367, 1.73698127]),
'Tram': np.array([16.17150617, 2.53246914, 3.53079012]),
'Misc': np.array([3.64300781, 1.54298177, 1.92320313])}
g_mean_size_arr = np.zeros((NUM_SIZE_CLUSTER, 3)) # size clustrs
for i in range(NUM_SIZE_CLUSTER):
g_mean_size_arr[i, :] = g_type_mean_size[g_class2type[i]]
# -----------------
# TF Functions Helpers
# -----------------
def tf_gather_object_pc(point_cloud, mask, npoints=512):
""" Gather object point clouds according to predicted masks.
Input:
point_cloud: TF tensor in shape (B,N,C)
mask: TF tensor in shape (B,N) of 0 (not pick) or 1 (pick)
npoints: int scalar, maximum number of points to keep (default: 512)
Output:
object_pc: TF tensor in shape (B,npoint,C)
indices: TF int tensor in shape (B,npoint,2)
"""
def mask_to_indices(mask):
indices = np.zeros((mask.shape[0], npoints, 2), dtype=np.int32)
for i in range(mask.shape[0]):
pos_indices = np.where(mask[i, :] > 0.5)[0]
# skip cases when pos_indices is empty
if len(pos_indices) > 0:
if len(pos_indices) > npoints:
choice = np.random.choice(len(pos_indices),
npoints, replace=False)
else:
choice = np.random.choice(len(pos_indices),
npoints - len(pos_indices), replace=True)
choice = np.concatenate((np.arange(len(pos_indices)), choice))
np.random.shuffle(choice)
indices[i, :, 1] = pos_indices[choice]
indices[i, :, 0] = i
return indices
indices = tf.py_func(mask_to_indices, [mask], tf.int32)
object_pc = tf.gather_nd(point_cloud, indices)
return object_pc, indices
def parse_data(raw_record):
example = parse_frustum_point_record(raw_record)
return example['frustum_point_cloud'], \
tf.cast(example['one_hot_vec'], tf.float32), \
tf.cast(example['seg_label'], tf.int32), \
example['box3d_center'], \
tf.cast(example['angle_class'], tf.int32), \
example['angle_residual'], \
tf.cast(example['size_class'], tf.int32), \
example['size_residual']
def parse_frustum_point_record(tfexample_message: str):
NUM_CLASS = len(g_type_object_of_interest)
NUM_POINT = 1024
NUM_CHANNELS_OF_PC = 3
keys_to_features = {
"size_class": tf.FixedLenFeature((), tf.int64, tf.zeros((), tf.int64)),
"size_residual": tf.FixedLenFeature((3,), tf.float32, tf.zeros((3,), tf.float32)),
"seg_label": tf.FixedLenFeature((NUM_POINT,), tf.int64, tf.zeros((NUM_POINT,), tf.int64)),
"frustum_point_cloud": tf.FixedLenFeature((NUM_POINT, NUM_CHANNELS_OF_PC), tf.float32),
"rot_angle": tf.FixedLenFeature((), tf.float32, tf.zeros((), tf.float32)),
"angle_class": tf.FixedLenFeature((), tf.int64, tf.zeros((), tf.int64)),
"angle_residual": tf.FixedLenFeature((), tf.float32, tf.zeros((), tf.float32)),
"one_hot_vec": tf.FixedLenFeature((NUM_CLASS,), tf.int64),
"box3d_center": tf.FixedLenFeature((3,), tf.float32, tf.zeros((3,), tf.float32)),
}
parsed_example = tf.io.parse_single_example(tfexample_message, keys_to_features)
return parsed_example
def parse_test_data(raw_record):
example = parse_frustum_point_test_record(raw_record)
print(example)
return example['frustum_point_cloud'], tf.cast(example['one_hot_vec'], tf.float32), \
tf.cast(example['rot_angle'], tf.float32), tf.cast(example['prob'], tf.float32), \
example['type_name'], example['sample_token'], example['box_2d']
def parse_frustum_point_test_record(tfexample_message: str):
NUM_CLASS = 3
NUM_POINT = 1024
NUM_CHANNELS_OF_PC = 3
keys_to_features = {
"frustum_point_cloud": tf.FixedLenFeature((NUM_POINT, NUM_CHANNELS_OF_PC), tf.float32),
"rot_angle": tf.FixedLenFeature((), tf.float32, tf.zeros((), tf.float32)),
"one_hot_vec": tf.FixedLenFeature((NUM_CLASS,), tf.int64),
"prob": tf.FixedLenFeature((), tf.float32, tf.zeros((), tf.float32)),
"type_name": tf.FixedLenFeature((), tf.int64),
"sample_token": tf.FixedLenFeature((), tf.int64),
"box_2d": tf.FixedLenFeature((4,), tf.float32)
}
parsed_example = tf.io.parse_single_example(tfexample_message, keys_to_features)
return parsed_example
def get_box3d_corners_helper(centers, headings, sizes):
""" TF layer. Input: (N,3), (N,), (N,3), Output: (N,8,3) """
# print '-----', centers
N = centers.get_shape()[0].value
l = tf.slice(sizes, [0, 0], [-1, 1]) # (N,1)
w = tf.slice(sizes, [0, 1], [-1, 1]) # (N,1)
h = tf.slice(sizes, [0, 2], [-1, 1]) # (N,1)
# print l,w,h
x_corners = tf.concat([l / 2, l / 2, -l / 2, -l / 2, l / 2, l / 2, -l / 2, -l / 2], axis=1) # (N,8)
y_corners = tf.concat([h / 2, h / 2, h / 2, h / 2, -h / 2, -h / 2, -h / 2, -h / 2], axis=1) # (N,8)
z_corners = tf.concat([w / 2, -w / 2, -w / 2, w / 2, w / 2, -w / 2, -w / 2, w / 2], axis=1) # (N,8)
corners = tf.concat([tf.expand_dims(x_corners, 1), tf.expand_dims(y_corners, 1), tf.expand_dims(z_corners, 1)],
axis=1) # (N,3,8)
# print x_corners, y_corners, z_corners
c = tf.cos(headings)
s = tf.sin(headings)
ones = tf.ones([N], dtype=tf.float32)
zeros = tf.zeros([N], dtype=tf.float32)
row1 = tf.stack([c, zeros, s], axis=1) # (N,3)
row2 = tf.stack([zeros, ones, zeros], axis=1)
row3 = tf.stack([-s, zeros, c], axis=1)
R = tf.concat([tf.expand_dims(row1, 1), tf.expand_dims(row2, 1), tf.expand_dims(row3, 1)], axis=1) # (N,3,3)
# print row1, row2, row3, R, N
corners_3d = tf.matmul(R, corners) # (N,3,8)
corners_3d += tf.tile(tf.expand_dims(centers, 2), [1, 1, 8]) # (N,3,8)
corners_3d = tf.transpose(corners_3d, perm=[0, 2, 1]) # (N,8,3)
return corners_3d
def get_box3d_corners(center, heading_residuals, size_residuals):
""" TF layer.
Inputs:
center: (B,3)
heading_residuals: (B,NH)
size_residuals: (B,NS,3)
Outputs:
box3d_corners: (B,NH,NS,8,3) tensor
"""
batch_size = center.get_shape()[0].value
heading_bin_centers = tf.constant(np.arange(0, 2 * np.pi, 2 * np.pi / NUM_HEADING_BIN), dtype=tf.float32) # (NH,)
headings = heading_residuals + tf.expand_dims(heading_bin_centers, 0) # (B,NH)
mean_sizes = tf.expand_dims(tf.constant(g_mean_size_arr, dtype=tf.float32), 0) + size_residuals # (B,NS,1)
sizes = mean_sizes + size_residuals # (B,NS,3)
sizes = tf.tile(tf.expand_dims(sizes, 1), [1, NUM_HEADING_BIN, 1, 1]) # (B,NH,NS,3)
headings = tf.tile(tf.expand_dims(headings, -1), [1, 1, NUM_SIZE_CLUSTER]) # (B,NH,NS)
centers = tf.tile(tf.expand_dims(tf.expand_dims(center, 1), 1),
[1, NUM_HEADING_BIN, NUM_SIZE_CLUSTER, 1]) # (B,NH,NS,3)
N = batch_size * NUM_HEADING_BIN * NUM_SIZE_CLUSTER
corners_3d = get_box3d_corners_helper(tf.reshape(centers, [N, 3]), tf.reshape(headings, [N]),
tf.reshape(sizes, [N, 3]))
return tf.reshape(corners_3d, [batch_size, NUM_HEADING_BIN, NUM_SIZE_CLUSTER, 8, 3])
def huber_loss(error, delta):
abs_error = tf.abs(error)
quadratic = tf.minimum(abs_error, delta)
linear = (abs_error - quadratic)
losses = 0.5 * quadratic ** 2 + delta * linear
return tf.reduce_mean(losses)
def parse_output_to_tensors(output, end_points):
''' Parse batch output to separate tensors (added to end_points)
Input:
output: TF tensor in shape (B,3+2*NUM_HEADING_BIN+4*NUM_SIZE_CLUSTER)
end_points: dict
Output:
end_points: dict (updated)
'''
batch_size = output.get_shape()[0].value
center = tf.slice(output, [0, 0], [-1, 3])
end_points['center_boxnet'] = center
heading_scores = tf.slice(output, [0, 3], [-1, NUM_HEADING_BIN])
heading_residuals_normalized = tf.slice(output, [0, 3 + NUM_HEADING_BIN],
[-1, NUM_HEADING_BIN])
end_points['heading_scores'] = heading_scores # BxNUM_HEADING_BIN
end_points['heading_residuals_normalized'] = \
heading_residuals_normalized # BxNUM_HEADING_BIN (-1 to 1)
end_points['heading_residuals'] = \
heading_residuals_normalized * (np.pi / NUM_HEADING_BIN) # BxNUM_HEADING_BIN
size_scores = tf.slice(output, [0, 3 + NUM_HEADING_BIN * 2],
[-1, NUM_SIZE_CLUSTER]) # BxNUM_SIZE_CLUSTER
size_residuals_normalized = tf.slice(output,
[0, 3 + NUM_HEADING_BIN * 2 + NUM_SIZE_CLUSTER], [-1, NUM_SIZE_CLUSTER * 3])
size_residuals_normalized = tf.reshape(size_residuals_normalized,
[batch_size, NUM_SIZE_CLUSTER, 3]) # BxNUM_SIZE_CLUSTERx3
end_points['size_scores'] = size_scores
end_points['size_residuals_normalized'] = size_residuals_normalized
end_points['size_residuals'] = size_residuals_normalized * \
tf.expand_dims(tf.constant(g_mean_size_arr, dtype=tf.float32), 0)
return end_points
# --------------------------------------
# Shared subgraphs for v1 and v2 models
# --------------------------------------
def placeholder_inputs(batch_size, num_point):
''' Get useful placeholder tensors.
Input:
batch_size: scalar int
num_point: scalar int
Output:
TF placeholders for inputs and ground truths
'''
pointclouds_pl = tf.placeholder(tf.float32,
shape=(batch_size, num_point, 4))
one_hot_vec_pl = tf.placeholder(tf.float32, shape=(batch_size, 3))
# labels_pl is for segmentation label
labels_pl = tf.placeholder(tf.int32, shape=(batch_size, num_point))
centers_pl = tf.placeholder(tf.float32, shape=(batch_size, 3))
heading_class_label_pl = tf.placeholder(tf.int32, shape=(batch_size,))
heading_residual_label_pl = tf.placeholder(tf.float32, shape=(batch_size,))
size_class_label_pl = tf.placeholder(tf.int32, shape=(batch_size,))
size_residual_label_pl = tf.placeholder(tf.float32, shape=(batch_size, 3))
return pointclouds_pl, one_hot_vec_pl, labels_pl, centers_pl, \
heading_class_label_pl, heading_residual_label_pl, \
size_class_label_pl, size_residual_label_pl
def point_cloud_masking(point_cloud, logits, end_points, xyz_only=True):
''' Select point cloud with predicted 3D mask,
translate coordinates to the masked points centroid.
Input:
point_cloud: TF tensor in shape (B,N,C)
logits: TF tensor in shape (B,N,2)
end_points: dict
xyz_only: boolean, if True only return XYZ channels
Output:
object_point_cloud: TF tensor in shape (B,M,3)
for simplicity we only keep XYZ here
M = NUM_OBJECT_POINT as a hyper-parameter
mask_xyz_mean: TF tensor in shape (B,3)
'''
batch_size = point_cloud.get_shape()[0].value
num_point = point_cloud.get_shape()[1].value
mask = tf.slice(logits, [0, 0, 0], [-1, -1, 1]) < \
tf.slice(logits, [0, 0, 1], [-1, -1, 1])
mask = tf.to_float(mask) # BxNx1
mask_count = tf.tile(tf.reduce_sum(mask, axis=1, keep_dims=True),
[1, 1, 3]) # Bx1x3
point_cloud_xyz = tf.slice(point_cloud, [0, 0, 0], [-1, -1, 3]) # BxNx3
mask_xyz_mean = tf.reduce_sum(tf.tile(mask, [1, 1, 3]) * point_cloud_xyz,
axis=1, keep_dims=True) # Bx1x3
mask = tf.squeeze(mask, axis=[2]) # BxN
end_points['mask'] = mask
mask_xyz_mean = mask_xyz_mean / tf.maximum(mask_count, 1) # Bx1x3
# Translate to masked points' centroid
point_cloud_xyz_stage1 = point_cloud_xyz - \
tf.tile(mask_xyz_mean, [1, num_point, 1])
if xyz_only:
point_cloud_stage1 = point_cloud_xyz_stage1
else:
point_cloud_features = tf.slice(point_cloud, [0, 0, 3], [-1, -1, -1])
point_cloud_stage1 = tf.concat([point_cloud_xyz_stage1, point_cloud_features], axis=-1)
num_channels = point_cloud_stage1.get_shape()[2].value
object_point_cloud, _ = tf_gather_object_pc(point_cloud_stage1,
mask, NUM_OBJECT_POINT)
object_point_cloud.set_shape([batch_size, NUM_OBJECT_POINT, num_channels])
return object_point_cloud, tf.squeeze(mask_xyz_mean, axis=1), end_points
def get_center_regression_net(object_point_cloud, one_hot_vec,
is_training, bn_decay, end_points):
''' Regression network for center delta. a.k.a. T-Net.
Input:
object_point_cloud: TF tensor in shape (B,M,C)
point clouds in 3D mask coordinate
one_hot_vec: TF tensor in shape (B,3)
length-3 vectors indicating predicted object type
Output:
predicted_center: TF tensor in shape (B,3)
'''
num_point = object_point_cloud.get_shape()[1].value
net = tf.expand_dims(object_point_cloud, 2)
net = tf_util.conv2d(net, 128, [1, 1],
padding='VALID', stride=[1, 1],
bn=True, is_training=is_training,
scope='conv-reg1-stage1', bn_decay=bn_decay)
net = tf_util.conv2d(net, 128, [1, 1],
padding='VALID', stride=[1, 1],
bn=True, is_training=is_training,
scope='conv-reg2-stage1', bn_decay=bn_decay)
net = tf_util.conv2d(net, 256, [1, 1],
padding='VALID', stride=[1, 1],
bn=True, is_training=is_training,
scope='conv-reg3-stage1', bn_decay=bn_decay)
net = tf_util.max_pool2d(net, [num_point, 1],
padding='VALID', scope='maxpool-stage1')
net = tf.squeeze(net, axis=[1, 2])
net = tf.concat([net, one_hot_vec], axis=1)
net = tf_util.fully_connected(net, 256, scope='fc1-stage1', bn=True,
is_training=is_training, bn_decay=bn_decay)
net = tf_util.fully_connected(net, 128, scope='fc2-stage1', bn=True,
is_training=is_training, bn_decay=bn_decay)
predicted_center = tf_util.fully_connected(net, 3, activation_fn=None,
scope='fc3-stage1')
return predicted_center, end_points
def get_loss(mask_label, center_label, heading_class_label, heading_residual_label, size_class_label,
size_residual_label, end_points, corner_loss_weight=10.0, box_loss_weight=1.0):
""" Loss functions for 3D object detection.
Input:
mask_label: TF int32 tensor in shape (B,N)
center_label: TF tensor in shape (B,3)
heading_class_label: TF int32 tensor in shape (B,)
heading_residual_label: TF tensor in shape (B,)
size_class_label: TF tensor int32 in shape (B,)
size_residual_label: TF tensor tensor in shape (B,)
end_points: dict, outputs from our model
corner_loss_weight: float scalar
box_loss_weight: float scalar
Output:
total_loss: TF scalar tensor
the total_loss is also added to the losses collection
"""
# 3D Segmentation loss
mask_loss = tf.reduce_mean(tf.nn.sparse_softmax_cross_entropy_with_logits(logits=end_points['mask_logits'],
labels=mask_label))
tf.summary.scalar('3d mask loss', mask_loss)
# Center regression losses
center_dist = tf.norm(center_label - end_points['center'], axis=-1)
center_loss = huber_loss(center_dist, delta=2.0)
tf.summary.scalar('center loss', center_loss)
stage1_center_dist = tf.norm(center_label - \
end_points['stage1_center'], axis=-1)
stage1_center_loss = huber_loss(stage1_center_dist, delta=1.0)
tf.summary.scalar('stage1 center loss', stage1_center_loss)
# Heading loss
heading_class_loss = tf.reduce_mean(
tf.nn.sparse_softmax_cross_entropy_with_logits(logits=end_points['heading_scores'], labels=heading_class_label))
tf.summary.scalar('heading class loss', heading_class_loss)
hcls_onehot = tf.one_hot(heading_class_label,
depth=NUM_HEADING_BIN,
on_value=1, off_value=0, axis=-1) # BxNUM_HEADING_BIN
heading_residual_normalized_label = \
heading_residual_label / (np.pi / NUM_HEADING_BIN)
heading_residual_normalized_loss = huber_loss(tf.reduce_sum(
end_points['heading_residuals_normalized'] * tf.to_float(hcls_onehot), axis=1) - \
heading_residual_normalized_label, delta=1.0)
tf.summary.scalar('heading residual normalized loss',
heading_residual_normalized_loss)
# Size loss
size_class_loss = tf.reduce_mean(
tf.nn.sparse_softmax_cross_entropy_with_logits(
logits=end_points['size_scores'], labels=size_class_label))
tf.summary.scalar('size class loss', size_class_loss)
scls_onehot = tf.one_hot(size_class_label,
depth=NUM_SIZE_CLUSTER,
on_value=1, off_value=0, axis=-1) # BxNUM_SIZE_CLUSTER
scls_onehot_tiled = tf.tile(tf.expand_dims(
tf.to_float(scls_onehot), -1), [1, 1, 3]) # BxNUM_SIZE_CLUSTERx3
predicted_size_residual_normalized = tf.reduce_sum(
end_points['size_residuals_normalized'] * scls_onehot_tiled, axis=[1]) # Bx3
mean_size_arr_expand = tf.expand_dims(
tf.constant(g_mean_size_arr, dtype=tf.float32), 0) # 1xNUM_SIZE_CLUSTERx3
mean_size_label = tf.reduce_sum(
scls_onehot_tiled * mean_size_arr_expand, axis=[1]) # Bx3
size_residual_label_normalized = size_residual_label / mean_size_label
size_normalized_dist = tf.norm(
size_residual_label_normalized - predicted_size_residual_normalized,
axis=-1)
size_residual_normalized_loss = huber_loss(size_normalized_dist, delta=1.0)
tf.summary.scalar('size residual normalized loss',
size_residual_normalized_loss)
# Corner loss
# We select the predicted corners corresponding to the
# GT heading bin and size cluster.
corners_3d = get_box3d_corners(end_points['center'],
end_points['heading_residuals'],
end_points['size_residuals']) # (B,NH,NS,8,3)
gt_mask = tf.tile(tf.expand_dims(hcls_onehot, 2), [1, 1, NUM_SIZE_CLUSTER]) * \
tf.tile(tf.expand_dims(scls_onehot, 1), [1, NUM_HEADING_BIN, 1]) # (B,NH,NS)
corners_3d_pred = tf.reduce_sum(
tf.to_float(tf.expand_dims(tf.expand_dims(gt_mask, -1), -1)) * corners_3d,
axis=[1, 2]) # (B,8,3)
heading_bin_centers = tf.constant(
np.arange(0, 2 * np.pi, 2 * np.pi / NUM_HEADING_BIN), dtype=tf.float32) # (NH,)
heading_label = tf.expand_dims(heading_residual_label, 1) + \
tf.expand_dims(heading_bin_centers, 0) # (B,NH)
heading_label = tf.reduce_sum(tf.to_float(hcls_onehot) * heading_label, 1)
mean_sizes = tf.expand_dims(
tf.constant(g_mean_size_arr, dtype=tf.float32), 0) # (1,NS,3)
size_label = mean_sizes + \
tf.expand_dims(size_residual_label, 1) # (1,NS,3) + (B,1,3) = (B,NS,3)
size_label = tf.reduce_sum(
tf.expand_dims(tf.to_float(scls_onehot), -1) * size_label, axis=[1]) # (B,3)
corners_3d_gt = get_box3d_corners_helper(
center_label, heading_label, size_label) # (B,8,3)
corners_3d_gt_flip = get_box3d_corners_helper(
center_label, heading_label + np.pi, size_label) # (B,8,3)
corners_dist = tf.minimum(tf.norm(corners_3d_pred - corners_3d_gt, axis=-1),
tf.norm(corners_3d_pred - corners_3d_gt_flip, axis=-1))
corners_loss = huber_loss(corners_dist, delta=1.0)
tf.summary.scalar('corners loss', corners_loss)
# Weighted sum of all losses
total_loss = mask_loss + box_loss_weight * (center_loss +
heading_class_loss + size_class_loss +
heading_residual_normalized_loss * 20 +
size_residual_normalized_loss * 20 +
stage1_center_loss +
corner_loss_weight * corners_loss)
tf.add_to_collection('losses', total_loss)
return total_loss
def FPointNet_loss(args, corner_loss_weight=10.0, box_loss_weight=1.0, mask_weight=1.0):
""" Loss functions for 3D object detection.
Input:
mask_label: TF int32 tensor in shape (B,N)
center_label: TF tensor in shape (B,3)
heading_class_label: TF int32 tensor in shape (B,)
heading_residual_label: TF tensor in shape (B,)
size_class_label: TF tensor int32 in shape (B,)
size_residual_label: TF tensor tensor in shape (B,)
end_points: dict, outputs from our model
corner_loss_weight: float scalar
box_loss_weight: float scalar
Output:
total_loss: TF scalar tensor
the total_loss is also added to the losses collection
"""
mask_label, center_label, heading_class_label, heading_residual_label, size_class_label, size_residual_label, \
end_points = args[0], args[1], args[2], args[3], args[4], args[5], args[6]
# 3D Segmentation loss
mask_loss = tf.reduce_mean(tf.nn.sparse_softmax_cross_entropy_with_logits(logits=end_points['mask_logits'],
labels=tf.cast(mask_label, tf.int64)))
tf.summary.scalar('3d mask loss', mask_loss)
# Center regression losses
center_dist = tf.norm(center_label - end_points['center'], axis=-1)
center_loss = huber_loss(center_dist, delta=2.0)
tf.summary.scalar('center loss', center_loss)
stage1_center_dist = tf.norm(center_label - end_points['stage1_center'], axis=-1)
stage1_center_loss = huber_loss(stage1_center_dist, delta=1.0)
tf.summary.scalar('stage1 center loss', stage1_center_loss)
# Heading loss
heading_class_loss = tf.reduce_mean(tf.nn.sparse_softmax_cross_entropy_with_logits(
logits=end_points['heading_scores'], labels=tf.cast(heading_class_label, tf.int64)))
tf.summary.scalar('heading class loss', heading_class_loss)
hcls_onehot = tf.one_hot(tf.cast(heading_class_label, tf.int64), depth=NUM_HEADING_BIN, on_value=1, off_value=0,
axis=-1) # BxNUM_HEADING_BIN
heading_residual_normalized_label = heading_residual_label / (np.pi / NUM_HEADING_BIN)
heading_residual_normalized_loss = huber_loss(tf.reduce_sum(
end_points['heading_residuals_normalized'] * tf.to_float(hcls_onehot), axis=1) - \
heading_residual_normalized_label, delta=1.0)
tf.summary.scalar('heading residual normalized loss',
heading_residual_normalized_loss)
# Size loss
size_class_loss = tf.reduce_mean(
tf.nn.sparse_softmax_cross_entropy_with_logits(
logits=end_points['size_scores'], labels=tf.cast(size_class_label, tf.int64)))
tf.summary.scalar('size class loss', size_class_loss)
scls_onehot = tf.one_hot(tf.cast(size_class_label, tf.int64),
depth=NUM_SIZE_CLUSTER,
on_value=1, off_value=0, axis=-1) # BxNUM_SIZE_CLUSTER
scls_onehot_tiled = tf.tile(tf.expand_dims(tf.to_float(scls_onehot), -1), [1, 1, 3]) # BxNUM_SIZE_CLUSTERx3
predicted_size_residual_normalized = tf.reduce_sum(end_points['size_residuals_normalized'] *
scls_onehot_tiled, axis=[1]) # Bx3
mean_size_arr_expand = tf.expand_dims(tf.constant(g_mean_size_arr, dtype=tf.float32), 0) # 1xNUM_SIZE_CLUSTERx3
mean_size_label = tf.reduce_sum(scls_onehot_tiled * mean_size_arr_expand, axis=[1]) # Bx3
size_residual_label_normalized = size_residual_label / mean_size_label
size_normalized_dist = tf.norm(size_residual_label_normalized - predicted_size_residual_normalized, axis=-1)
size_residual_normalized_loss = huber_loss(size_normalized_dist, delta=1.0)
tf.summary.scalar('size residual normalized loss',
size_residual_normalized_loss)
# Corner loss
# We select the predicted corners corresponding to the
# GT heading bin and size cluster.
corners_3d = get_box3d_corners(end_points['center'],
end_points['heading_residuals'],
end_points['size_residuals']) # (B,NH,NS,8,3)
gt_mask = tf.tile(tf.expand_dims(hcls_onehot, 2), [1, 1, NUM_SIZE_CLUSTER]) * \
tf.tile(tf.expand_dims(scls_onehot, 1), [1, NUM_HEADING_BIN, 1]) # (B,NH,NS)
corners_3d_pred = tf.reduce_sum(tf.to_float(tf.expand_dims(tf.expand_dims(gt_mask, -1), -1)) * corners_3d,
axis=[1, 2]) # (B,8,3)
heading_bin_centers = tf.constant(np.arange(0, 2 * np.pi, 2 * np.pi / NUM_HEADING_BIN), dtype=tf.float32) # (NH,)
heading_label = tf.expand_dims(heading_residual_label, 1) + tf.expand_dims(heading_bin_centers, 0) # (B,NH)
heading_label = tf.reduce_sum(tf.to_float(hcls_onehot) * heading_label, 1)
mean_sizes = tf.expand_dims(tf.constant(g_mean_size_arr, dtype=tf.float32), 0) # (1,NS,3)
size_label = mean_sizes + tf.expand_dims(size_residual_label, 1) # (1,NS,3) + (B,1,3) = (B,NS,3)
size_label = tf.reduce_sum(tf.expand_dims(tf.to_float(scls_onehot), -1) * size_label, axis=[1]) # (B,3)
corners_3d_gt = get_box3d_corners_helper(center_label, heading_label, size_label) # (B,8,3)
corners_3d_gt_flip = get_box3d_corners_helper(center_label, heading_label + np.pi, size_label) # (B,8,3)
corners_dist = tf.minimum(tf.norm(corners_3d_pred - corners_3d_gt, axis=-1),
tf.norm(corners_3d_pred - corners_3d_gt_flip, axis=-1))
corners_loss = huber_loss(corners_dist, delta=1.0)
tf.summary.scalar('corners loss', corners_loss)
center_loss = K.clip(center_loss, min_value=0, max_value=100)
heading_class_loss = K.clip(heading_class_loss, min_value=0, max_value=100)
size_class_loss = K.clip(size_class_loss, min_value=0, max_value=100)
heading_residual_normalized_loss = K.clip(heading_residual_normalized_loss, min_value=0, max_value=100)
size_residual_normalized_loss = K.clip(size_residual_normalized_loss, min_value=0, max_value=100)
stage1_center_loss = K.clip(stage1_center_loss, min_value=0, max_value=100)
corners_loss = K.clip(corners_loss, min_value=0, max_value=100)
total_loss = mask_loss * mask_weight + box_loss_weight * (center_loss + heading_class_loss + size_class_loss +
heading_residual_normalized_loss * 20 +
size_residual_normalized_loss * 20 +
stage1_center_loss +
corner_loss_weight * corners_loss)
tf.add_to_collection('losses', total_loss)
print(total_loss)
return total_loss
| import numpy as np
import tensorflow as tf
import os
import sys
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
sys.path.append(BASE_DIR)
import tf_util
# -----------------
# Global Constants
# -----------------
NUM_HEADING_BIN = 12
NUM_SIZE_CLUSTER = 8 # one cluster for each type
NUM_OBJECT_POINT = 512
# g_type2class = {'car': 0, 'Van': 1, 'Truck': 2, 'pedestrian': 3,
# 'Person_sitting': 4, 'bicycle': 5, 'Tram': 6, 'Misc': 7}
# g_class2type = {g_type2class[t]: t for t in g_type2class}
# g_type2onehotclass = {'car': 0, 'pedestrian': 1, 'bicycle': 2}
# g_type_mean_size = {'car': np.array([4.682, 1.898, 1.668]),
# 'Van': np.array([5.06763659, 1.9007158, 2.20532825]),
# 'Truck': np.array([10.13586957, 2.58549199, 3.2520595]),
# 'pedestrian': np.array([0.787, 0.768, 1.79]),
# 'Person_sitting': np.array([0.80057803, 0.5983815, 1.27450867]),
# 'bicycle': np.array([1.775, 0.654, 1.276]),
# 'Tram': np.array([16.17150617, 2.53246914, 3.53079012]),
# 'Misc': np.array([3.64300781, 1.54298177, 1.92320313])}
# g_mean_size_arr = np.zeros((NUM_SIZE_CLUSTER, 3)) # size clustrs
# for i in range(NUM_SIZE_CLUSTER):
# g_mean_size_arr[i, :] = g_type_mean_size[g_class2type[i]]
g_type2class = {'Car': 0, 'Van': 1, 'Truck': 2, 'Pedestrian': 3,
'Person_sitting': 4, 'Cyclist': 5, 'Tram': 6, 'Misc': 7}
g_class2type = {g_type2class[t]: t for t in g_type2class}
g_type2onehotclass = {'Car': 0, 'Pedestrian': 1, 'Cyclist': 2}
g_type_mean_size = {'Car': np.array([3.88311640418, 1.62856739989, 1.52563191462]),
'Van': np.array([5.06763659, 1.9007158, 2.20532825]),
'Truck': np.array([10.13586957, 2.58549199, 3.2520595]),
'Pedestrian': np.array([0.84422524, 0.66068622, 1.76255119]),
'Person_sitting': np.array([0.80057803, 0.5983815, 1.27450867]),
'Cyclist': np.array([1.76282397, 0.59706367, 1.73698127]),
'Tram': np.array([16.17150617, 2.53246914, 3.53079012]),
'Misc': np.array([3.64300781, 1.54298177, 1.92320313])}
g_mean_size_arr = np.zeros((NUM_SIZE_CLUSTER, 3)) # size clustrs
for i in range(NUM_SIZE_CLUSTER):
g_mean_size_arr[i, :] = g_type_mean_size[g_class2type[i]]
# -----------------
# TF Functions Helpers
# -----------------
def tf_gather_object_pc(point_cloud, mask, npoints=512):
""" Gather object point clouds according to predicted masks.
Input:
point_cloud: TF tensor in shape (B,N,C)
mask: TF tensor in shape (B,N) of 0 (not pick) or 1 (pick)
npoints: int scalar, maximum number of points to keep (default: 512)
Output:
object_pc: TF tensor in shape (B,npoint,C)
indices: TF int tensor in shape (B,npoint,2)
"""
def mask_to_indices(mask):
indices = np.zeros((mask.shape[0], npoints, 2), dtype=np.int32)
for i in range(mask.shape[0]):
pos_indices = np.where(mask[i, :] > 0.5)[0]
# skip cases when pos_indices is empty
if len(pos_indices) > 0:
if len(pos_indices) > npoints:
choice = np.random.choice(len(pos_indices),
npoints, replace=False)
else:
choice = np.random.choice(len(pos_indices),
npoints - len(pos_indices), replace=True)
choice = np.concatenate((np.arange(len(pos_indices)), choice))
np.random.shuffle(choice)
indices[i, :, 1] = pos_indices[choice]
indices[i, :, 0] = i
return indices
indices = tf.py_func(mask_to_indices, [mask], tf.int32)
object_pc = tf.gather_nd(point_cloud, indices)
return object_pc, indices
def parse_data(raw_record):
example = parse_frustum_point_record(raw_record)
return example['frustum_point_cloud'], \
tf.cast(example['one_hot_vec'], tf.float32), \
tf.cast(example['seg_label'], tf.int32), \
example['box3d_center'], \
tf.cast(example['angle_class'], tf.int32), \
example['angle_residual'], \
tf.cast(example['size_class'], tf.int32), \
example['size_residual']
def parse_frustum_point_record(tfexample_message: str):
NUM_CLASS = len(g_type_object_of_interest)
NUM_POINT = 1024
NUM_CHANNELS_OF_PC = 3
keys_to_features = {
"size_class": tf.FixedLenFeature((), tf.int64, tf.zeros((), tf.int64)),
"size_residual": tf.FixedLenFeature((3,), tf.float32, tf.zeros((3,), tf.float32)),
"seg_label": tf.FixedLenFeature((NUM_POINT,), tf.int64, tf.zeros((NUM_POINT,), tf.int64)),
"frustum_point_cloud": tf.FixedLenFeature((NUM_POINT, NUM_CHANNELS_OF_PC), tf.float32),
"rot_angle": tf.FixedLenFeature((), tf.float32, tf.zeros((), tf.float32)),
"angle_class": tf.FixedLenFeature((), tf.int64, tf.zeros((), tf.int64)),
"angle_residual": tf.FixedLenFeature((), tf.float32, tf.zeros((), tf.float32)),
"one_hot_vec": tf.FixedLenFeature((NUM_CLASS,), tf.int64),
"box3d_center": tf.FixedLenFeature((3,), tf.float32, tf.zeros((3,), tf.float32)),
}
parsed_example = tf.io.parse_single_example(tfexample_message, keys_to_features)
return parsed_example
def parse_test_data(raw_record):
example = parse_frustum_point_test_record(raw_record)
print(example)
return example['frustum_point_cloud'], tf.cast(example['one_hot_vec'], tf.float32), \
tf.cast(example['rot_angle'], tf.float32), tf.cast(example['prob'], tf.float32), \
example['type_name'], example['sample_token'], example['box_2d']
def parse_frustum_point_test_record(tfexample_message: str):
NUM_CLASS = 3
NUM_POINT = 1024
NUM_CHANNELS_OF_PC = 3
keys_to_features = {
"frustum_point_cloud": tf.FixedLenFeature((NUM_POINT, NUM_CHANNELS_OF_PC), tf.float32),
"rot_angle": tf.FixedLenFeature((), tf.float32, tf.zeros((), tf.float32)),
"one_hot_vec": tf.FixedLenFeature((NUM_CLASS,), tf.int64),
"prob": tf.FixedLenFeature((), tf.float32, tf.zeros((), tf.float32)),
"type_name": tf.FixedLenFeature((), tf.int64),
"sample_token": tf.FixedLenFeature((), tf.int64),
"box_2d": tf.FixedLenFeature((4,), tf.float32)
}
parsed_example = tf.io.parse_single_example(tfexample_message, keys_to_features)
return parsed_example
def get_box3d_corners_helper(centers, headings, sizes):
""" TF layer. Input: (N,3), (N,), (N,3), Output: (N,8,3) """
# print '-----', centers
N = centers.get_shape()[0].value
l = tf.slice(sizes, [0, 0], [-1, 1]) # (N,1)
w = tf.slice(sizes, [0, 1], [-1, 1]) # (N,1)
h = tf.slice(sizes, [0, 2], [-1, 1]) # (N,1)
# print l,w,h
x_corners = tf.concat([l / 2, l / 2, -l / 2, -l / 2, l / 2, l / 2, -l / 2, -l / 2], axis=1) # (N,8)
y_corners = tf.concat([h / 2, h / 2, h / 2, h / 2, -h / 2, -h / 2, -h / 2, -h / 2], axis=1) # (N,8)
z_corners = tf.concat([w / 2, -w / 2, -w / 2, w / 2, w / 2, -w / 2, -w / 2, w / 2], axis=1) # (N,8)
corners = tf.concat([tf.expand_dims(x_corners, 1), tf.expand_dims(y_corners, 1), tf.expand_dims(z_corners, 1)],
axis=1) # (N,3,8)
# print x_corners, y_corners, z_corners
c = tf.cos(headings)
s = tf.sin(headings)
ones = tf.ones([N], dtype=tf.float32)
zeros = tf.zeros([N], dtype=tf.float32)
row1 = tf.stack([c, zeros, s], axis=1) # (N,3)
row2 = tf.stack([zeros, ones, zeros], axis=1)
row3 = tf.stack([-s, zeros, c], axis=1)
R = tf.concat([tf.expand_dims(row1, 1), tf.expand_dims(row2, 1), tf.expand_dims(row3, 1)], axis=1) # (N,3,3)
# print row1, row2, row3, R, N
corners_3d = tf.matmul(R, corners) # (N,3,8)
corners_3d += tf.tile(tf.expand_dims(centers, 2), [1, 1, 8]) # (N,3,8)
corners_3d = tf.transpose(corners_3d, perm=[0, 2, 1]) # (N,8,3)
return corners_3d
def get_box3d_corners(center, heading_residuals, size_residuals):
""" TF layer.
Inputs:
center: (B,3)
heading_residuals: (B,NH)
size_residuals: (B,NS,3)
Outputs:
box3d_corners: (B,NH,NS,8,3) tensor
"""
batch_size = center.get_shape()[0].value
heading_bin_centers = tf.constant(np.arange(0, 2 * np.pi, 2 * np.pi / NUM_HEADING_BIN), dtype=tf.float32) # (NH,)
headings = heading_residuals + tf.expand_dims(heading_bin_centers, 0) # (B,NH)
mean_sizes = tf.expand_dims(tf.constant(g_mean_size_arr, dtype=tf.float32), 0) + size_residuals # (B,NS,1)
sizes = mean_sizes + size_residuals # (B,NS,3)
sizes = tf.tile(tf.expand_dims(sizes, 1), [1, NUM_HEADING_BIN, 1, 1]) # (B,NH,NS,3)
headings = tf.tile(tf.expand_dims(headings, -1), [1, 1, NUM_SIZE_CLUSTER]) # (B,NH,NS)
centers = tf.tile(tf.expand_dims(tf.expand_dims(center, 1), 1),
[1, NUM_HEADING_BIN, NUM_SIZE_CLUSTER, 1]) # (B,NH,NS,3)
N = batch_size * NUM_HEADING_BIN * NUM_SIZE_CLUSTER
corners_3d = get_box3d_corners_helper(tf.reshape(centers, [N, 3]), tf.reshape(headings, [N]),
tf.reshape(sizes, [N, 3]))
return tf.reshape(corners_3d, [batch_size, NUM_HEADING_BIN, NUM_SIZE_CLUSTER, 8, 3])
def huber_loss(error, delta):
abs_error = tf.abs(error)
quadratic = tf.minimum(abs_error, delta)
linear = (abs_error - quadratic)
losses = 0.5 * quadratic ** 2 + delta * linear
return tf.reduce_mean(losses)
def parse_output_to_tensors(output, end_points):
''' Parse batch output to separate tensors (added to end_points)
Input:
output: TF tensor in shape (B,3+2*NUM_HEADING_BIN+4*NUM_SIZE_CLUSTER)
end_points: dict
Output:
end_points: dict (updated)
'''
batch_size = output.get_shape()[0].value
center = tf.slice(output, [0, 0], [-1, 3])
end_points['center_boxnet'] = center
heading_scores = tf.slice(output, [0, 3], [-1, NUM_HEADING_BIN])
heading_residuals_normalized = tf.slice(output, [0, 3 + NUM_HEADING_BIN],
[-1, NUM_HEADING_BIN])
end_points['heading_scores'] = heading_scores # BxNUM_HEADING_BIN
end_points['heading_residuals_normalized'] = \
heading_residuals_normalized # BxNUM_HEADING_BIN (-1 to 1)
end_points['heading_residuals'] = \
heading_residuals_normalized * (np.pi / NUM_HEADING_BIN) # BxNUM_HEADING_BIN
size_scores = tf.slice(output, [0, 3 + NUM_HEADING_BIN * 2],
[-1, NUM_SIZE_CLUSTER]) # BxNUM_SIZE_CLUSTER
size_residuals_normalized = tf.slice(output,
[0, 3 + NUM_HEADING_BIN * 2 + NUM_SIZE_CLUSTER], [-1, NUM_SIZE_CLUSTER * 3])
size_residuals_normalized = tf.reshape(size_residuals_normalized,
[batch_size, NUM_SIZE_CLUSTER, 3]) # BxNUM_SIZE_CLUSTERx3
end_points['size_scores'] = size_scores
end_points['size_residuals_normalized'] = size_residuals_normalized
end_points['size_residuals'] = size_residuals_normalized * \
tf.expand_dims(tf.constant(g_mean_size_arr, dtype=tf.float32), 0)
return end_points
# --------------------------------------
# Shared subgraphs for v1 and v2 models
# --------------------------------------
def placeholder_inputs(batch_size, num_point):
''' Get useful placeholder tensors.
Input:
batch_size: scalar int
num_point: scalar int
Output:
TF placeholders for inputs and ground truths
'''
pointclouds_pl = tf.placeholder(tf.float32,
shape=(batch_size, num_point, 4))
one_hot_vec_pl = tf.placeholder(tf.float32, shape=(batch_size, 3))
# labels_pl is for segmentation label
labels_pl = tf.placeholder(tf.int32, shape=(batch_size, num_point))
centers_pl = tf.placeholder(tf.float32, shape=(batch_size, 3))
heading_class_label_pl = tf.placeholder(tf.int32, shape=(batch_size,))
heading_residual_label_pl = tf.placeholder(tf.float32, shape=(batch_size,))
size_class_label_pl = tf.placeholder(tf.int32, shape=(batch_size,))
size_residual_label_pl = tf.placeholder(tf.float32, shape=(batch_size, 3))
return pointclouds_pl, one_hot_vec_pl, labels_pl, centers_pl, \
heading_class_label_pl, heading_residual_label_pl, \
size_class_label_pl, size_residual_label_pl
def point_cloud_masking(point_cloud, logits, end_points, xyz_only=True):
''' Select point cloud with predicted 3D mask,
translate coordinates to the masked points centroid.
Input:
point_cloud: TF tensor in shape (B,N,C)
logits: TF tensor in shape (B,N,2)
end_points: dict
xyz_only: boolean, if True only return XYZ channels
Output:
object_point_cloud: TF tensor in shape (B,M,3)
for simplicity we only keep XYZ here
M = NUM_OBJECT_POINT as a hyper-parameter
mask_xyz_mean: TF tensor in shape (B,3)
'''
batch_size = point_cloud.get_shape()[0].value
num_point = point_cloud.get_shape()[1].value
mask = tf.slice(logits, [0, 0, 0], [-1, -1, 1]) < \
tf.slice(logits, [0, 0, 1], [-1, -1, 1])
mask = tf.to_float(mask) # BxNx1
mask_count = tf.tile(tf.reduce_sum(mask, axis=1, keep_dims=True),
[1, 1, 3]) # Bx1x3
point_cloud_xyz = tf.slice(point_cloud, [0, 0, 0], [-1, -1, 3]) # BxNx3
mask_xyz_mean = tf.reduce_sum(tf.tile(mask, [1, 1, 3]) * point_cloud_xyz,
axis=1, keep_dims=True) # Bx1x3
mask = tf.squeeze(mask, axis=[2]) # BxN
end_points['mask'] = mask
mask_xyz_mean = mask_xyz_mean / tf.maximum(mask_count, 1) # Bx1x3
# Translate to masked points' centroid
point_cloud_xyz_stage1 = point_cloud_xyz - \
tf.tile(mask_xyz_mean, [1, num_point, 1])
if xyz_only:
point_cloud_stage1 = point_cloud_xyz_stage1
else:
point_cloud_features = tf.slice(point_cloud, [0, 0, 3], [-1, -1, -1])
point_cloud_stage1 = tf.concat([point_cloud_xyz_stage1, point_cloud_features], axis=-1)
num_channels = point_cloud_stage1.get_shape()[2].value
object_point_cloud, _ = tf_gather_object_pc(point_cloud_stage1,
mask, NUM_OBJECT_POINT)
object_point_cloud.set_shape([batch_size, NUM_OBJECT_POINT, num_channels])
return object_point_cloud, tf.squeeze(mask_xyz_mean, axis=1), end_points
def get_center_regression_net(object_point_cloud, one_hot_vec,
is_training, bn_decay, end_points):
''' Regression network for center delta. a.k.a. T-Net.
Input:
object_point_cloud: TF tensor in shape (B,M,C)
point clouds in 3D mask coordinate
one_hot_vec: TF tensor in shape (B,3)
length-3 vectors indicating predicted object type
Output:
predicted_center: TF tensor in shape (B,3)
'''
num_point = object_point_cloud.get_shape()[1].value
net = tf.expand_dims(object_point_cloud, 2)
net = tf_util.conv2d(net, 128, [1, 1],
padding='VALID', stride=[1, 1],
bn=True, is_training=is_training,
scope='conv-reg1-stage1', bn_decay=bn_decay)
net = tf_util.conv2d(net, 128, [1, 1],
padding='VALID', stride=[1, 1],
bn=True, is_training=is_training,
scope='conv-reg2-stage1', bn_decay=bn_decay)
net = tf_util.conv2d(net, 256, [1, 1],
padding='VALID', stride=[1, 1],
bn=True, is_training=is_training,
scope='conv-reg3-stage1', bn_decay=bn_decay)
net = tf_util.max_pool2d(net, [num_point, 1],
padding='VALID', scope='maxpool-stage1')
net = tf.squeeze(net, axis=[1, 2])
net = tf.concat([net, one_hot_vec], axis=1)
net = tf_util.fully_connected(net, 256, scope='fc1-stage1', bn=True,
is_training=is_training, bn_decay=bn_decay)
net = tf_util.fully_connected(net, 128, scope='fc2-stage1', bn=True,
is_training=is_training, bn_decay=bn_decay)
predicted_center = tf_util.fully_connected(net, 3, activation_fn=None,
scope='fc3-stage1')
return predicted_center, end_points
def get_loss(mask_label, center_label, heading_class_label, heading_residual_label, size_class_label,
size_residual_label, end_points, corner_loss_weight=10.0, box_loss_weight=1.0):
""" Loss functions for 3D object detection.
Input:
mask_label: TF int32 tensor in shape (B,N)
center_label: TF tensor in shape (B,3)
heading_class_label: TF int32 tensor in shape (B,)
heading_residual_label: TF tensor in shape (B,)
size_class_label: TF tensor int32 in shape (B,)
size_residual_label: TF tensor tensor in shape (B,)
end_points: dict, outputs from our model
corner_loss_weight: float scalar
box_loss_weight: float scalar
Output:
total_loss: TF scalar tensor
the total_loss is also added to the losses collection
"""
# 3D Segmentation loss
mask_loss = tf.reduce_mean(tf.nn.sparse_softmax_cross_entropy_with_logits(logits=end_points['mask_logits'],
labels=mask_label))
tf.summary.scalar('3d mask loss', mask_loss)
# Center regression losses
center_dist = tf.norm(center_label - end_points['center'], axis=-1)
center_loss = huber_loss(center_dist, delta=2.0)
tf.summary.scalar('center loss', center_loss)
stage1_center_dist = tf.norm(center_label - \
end_points['stage1_center'], axis=-1)
stage1_center_loss = huber_loss(stage1_center_dist, delta=1.0)
tf.summary.scalar('stage1 center loss', stage1_center_loss)
# Heading loss
heading_class_loss = tf.reduce_mean(
tf.nn.sparse_softmax_cross_entropy_with_logits(logits=end_points['heading_scores'], labels=heading_class_label))
tf.summary.scalar('heading class loss', heading_class_loss)
hcls_onehot = tf.one_hot(heading_class_label,
depth=NUM_HEADING_BIN,
on_value=1, off_value=0, axis=-1) # BxNUM_HEADING_BIN
heading_residual_normalized_label = \
heading_residual_label / (np.pi / NUM_HEADING_BIN)
heading_residual_normalized_loss = huber_loss(tf.reduce_sum(
end_points['heading_residuals_normalized'] * tf.to_float(hcls_onehot), axis=1) - \
heading_residual_normalized_label, delta=1.0)
tf.summary.scalar('heading residual normalized loss',
heading_residual_normalized_loss)
# Size loss
size_class_loss = tf.reduce_mean(
tf.nn.sparse_softmax_cross_entropy_with_logits(
logits=end_points['size_scores'], labels=size_class_label))
tf.summary.scalar('size class loss', size_class_loss)
scls_onehot = tf.one_hot(size_class_label,
depth=NUM_SIZE_CLUSTER,
on_value=1, off_value=0, axis=-1) # BxNUM_SIZE_CLUSTER
scls_onehot_tiled = tf.tile(tf.expand_dims(
tf.to_float(scls_onehot), -1), [1, 1, 3]) # BxNUM_SIZE_CLUSTERx3
predicted_size_residual_normalized = tf.reduce_sum(
end_points['size_residuals_normalized'] * scls_onehot_tiled, axis=[1]) # Bx3
mean_size_arr_expand = tf.expand_dims(
tf.constant(g_mean_size_arr, dtype=tf.float32), 0) # 1xNUM_SIZE_CLUSTERx3
mean_size_label = tf.reduce_sum(
scls_onehot_tiled * mean_size_arr_expand, axis=[1]) # Bx3
size_residual_label_normalized = size_residual_label / mean_size_label
size_normalized_dist = tf.norm(
size_residual_label_normalized - predicted_size_residual_normalized,
axis=-1)
size_residual_normalized_loss = huber_loss(size_normalized_dist, delta=1.0)
tf.summary.scalar('size residual normalized loss',
size_residual_normalized_loss)
# Corner loss
# We select the predicted corners corresponding to the
# GT heading bin and size cluster.
corners_3d = get_box3d_corners(end_points['center'],
end_points['heading_residuals'],
end_points['size_residuals']) # (B,NH,NS,8,3)
gt_mask = tf.tile(tf.expand_dims(hcls_onehot, 2), [1, 1, NUM_SIZE_CLUSTER]) * \
tf.tile(tf.expand_dims(scls_onehot, 1), [1, NUM_HEADING_BIN, 1]) # (B,NH,NS)
corners_3d_pred = tf.reduce_sum(
tf.to_float(tf.expand_dims(tf.expand_dims(gt_mask, -1), -1)) * corners_3d,
axis=[1, 2]) # (B,8,3)
heading_bin_centers = tf.constant(
np.arange(0, 2 * np.pi, 2 * np.pi / NUM_HEADING_BIN), dtype=tf.float32) # (NH,)
heading_label = tf.expand_dims(heading_residual_label, 1) + \
tf.expand_dims(heading_bin_centers, 0) # (B,NH)
heading_label = tf.reduce_sum(tf.to_float(hcls_onehot) * heading_label, 1)
mean_sizes = tf.expand_dims(
tf.constant(g_mean_size_arr, dtype=tf.float32), 0) # (1,NS,3)
size_label = mean_sizes + \
tf.expand_dims(size_residual_label, 1) # (1,NS,3) + (B,1,3) = (B,NS,3)
size_label = tf.reduce_sum(
tf.expand_dims(tf.to_float(scls_onehot), -1) * size_label, axis=[1]) # (B,3)
corners_3d_gt = get_box3d_corners_helper(
center_label, heading_label, size_label) # (B,8,3)
corners_3d_gt_flip = get_box3d_corners_helper(
center_label, heading_label + np.pi, size_label) # (B,8,3)
corners_dist = tf.minimum(tf.norm(corners_3d_pred - corners_3d_gt, axis=-1),
tf.norm(corners_3d_pred - corners_3d_gt_flip, axis=-1))
corners_loss = huber_loss(corners_dist, delta=1.0)
tf.summary.scalar('corners loss', corners_loss)
# Weighted sum of all losses
total_loss = mask_loss + box_loss_weight * (center_loss +
heading_class_loss + size_class_loss +
heading_residual_normalized_loss * 20 +
size_residual_normalized_loss * 20 +
stage1_center_loss +
corner_loss_weight * corners_loss)
tf.add_to_collection('losses', total_loss)
return total_loss
def FPointNet_loss(args, corner_loss_weight=10.0, box_loss_weight=1.0, mask_weight=1.0):
""" Loss functions for 3D object detection.
Input:
mask_label: TF int32 tensor in shape (B,N)
center_label: TF tensor in shape (B,3)
heading_class_label: TF int32 tensor in shape (B,)
heading_residual_label: TF tensor in shape (B,)
size_class_label: TF tensor int32 in shape (B,)
size_residual_label: TF tensor tensor in shape (B,)
end_points: dict, outputs from our model
corner_loss_weight: float scalar
box_loss_weight: float scalar
Output:
total_loss: TF scalar tensor
the total_loss is also added to the losses collection
"""
mask_label, center_label, heading_class_label, heading_residual_label, size_class_label, size_residual_label, \
end_points = args[0], args[1], args[2], args[3], args[4], args[5], args[6]
# 3D Segmentation loss
mask_loss = tf.reduce_mean(tf.nn.sparse_softmax_cross_entropy_with_logits(logits=end_points['mask_logits'],
labels=tf.cast(mask_label, tf.int64)))
tf.summary.scalar('3d mask loss', mask_loss)
# Center regression losses
center_dist = tf.norm(center_label - end_points['center'], axis=-1)
center_loss = huber_loss(center_dist, delta=2.0)
tf.summary.scalar('center loss', center_loss)
stage1_center_dist = tf.norm(center_label - end_points['stage1_center'], axis=-1)
stage1_center_loss = huber_loss(stage1_center_dist, delta=1.0)
tf.summary.scalar('stage1 center loss', stage1_center_loss)
# Heading loss
heading_class_loss = tf.reduce_mean(tf.nn.sparse_softmax_cross_entropy_with_logits(
logits=end_points['heading_scores'], labels=tf.cast(heading_class_label, tf.int64)))
tf.summary.scalar('heading class loss', heading_class_loss)
hcls_onehot = tf.one_hot(tf.cast(heading_class_label, tf.int64), depth=NUM_HEADING_BIN, on_value=1, off_value=0,
axis=-1) # BxNUM_HEADING_BIN
heading_residual_normalized_label = heading_residual_label / (np.pi / NUM_HEADING_BIN)
heading_residual_normalized_loss = huber_loss(tf.reduce_sum(
end_points['heading_residuals_normalized'] * tf.to_float(hcls_onehot), axis=1) - \
heading_residual_normalized_label, delta=1.0)
tf.summary.scalar('heading residual normalized loss',
heading_residual_normalized_loss)
# Size loss
size_class_loss = tf.reduce_mean(
tf.nn.sparse_softmax_cross_entropy_with_logits(
logits=end_points['size_scores'], labels=tf.cast(size_class_label, tf.int64)))
tf.summary.scalar('size class loss', size_class_loss)
scls_onehot = tf.one_hot(tf.cast(size_class_label, tf.int64),
depth=NUM_SIZE_CLUSTER,
on_value=1, off_value=0, axis=-1) # BxNUM_SIZE_CLUSTER
scls_onehot_tiled = tf.tile(tf.expand_dims(tf.to_float(scls_onehot), -1), [1, 1, 3]) # BxNUM_SIZE_CLUSTERx3
predicted_size_residual_normalized = tf.reduce_sum(end_points['size_residuals_normalized'] *
scls_onehot_tiled, axis=[1]) # Bx3
mean_size_arr_expand = tf.expand_dims(tf.constant(g_mean_size_arr, dtype=tf.float32), 0) # 1xNUM_SIZE_CLUSTERx3
mean_size_label = tf.reduce_sum(scls_onehot_tiled * mean_size_arr_expand, axis=[1]) # Bx3
size_residual_label_normalized = size_residual_label / mean_size_label
size_normalized_dist = tf.norm(size_residual_label_normalized - predicted_size_residual_normalized, axis=-1)
size_residual_normalized_loss = huber_loss(size_normalized_dist, delta=1.0)
tf.summary.scalar('size residual normalized loss',
size_residual_normalized_loss)
# Corner loss
# We select the predicted corners corresponding to the
# GT heading bin and size cluster.
corners_3d = get_box3d_corners(end_points['center'],
end_points['heading_residuals'],
end_points['size_residuals']) # (B,NH,NS,8,3)
gt_mask = tf.tile(tf.expand_dims(hcls_onehot, 2), [1, 1, NUM_SIZE_CLUSTER]) * \
tf.tile(tf.expand_dims(scls_onehot, 1), [1, NUM_HEADING_BIN, 1]) # (B,NH,NS)
corners_3d_pred = tf.reduce_sum(tf.to_float(tf.expand_dims(tf.expand_dims(gt_mask, -1), -1)) * corners_3d,
axis=[1, 2]) # (B,8,3)
heading_bin_centers = tf.constant(np.arange(0, 2 * np.pi, 2 * np.pi / NUM_HEADING_BIN), dtype=tf.float32) # (NH,)
heading_label = tf.expand_dims(heading_residual_label, 1) + tf.expand_dims(heading_bin_centers, 0) # (B,NH)
heading_label = tf.reduce_sum(tf.to_float(hcls_onehot) * heading_label, 1)
mean_sizes = tf.expand_dims(tf.constant(g_mean_size_arr, dtype=tf.float32), 0) # (1,NS,3)
size_label = mean_sizes + tf.expand_dims(size_residual_label, 1) # (1,NS,3) + (B,1,3) = (B,NS,3)
size_label = tf.reduce_sum(tf.expand_dims(tf.to_float(scls_onehot), -1) * size_label, axis=[1]) # (B,3)
corners_3d_gt = get_box3d_corners_helper(center_label, heading_label, size_label) # (B,8,3)
corners_3d_gt_flip = get_box3d_corners_helper(center_label, heading_label + np.pi, size_label) # (B,8,3)
corners_dist = tf.minimum(tf.norm(corners_3d_pred - corners_3d_gt, axis=-1),
tf.norm(corners_3d_pred - corners_3d_gt_flip, axis=-1))
corners_loss = huber_loss(corners_dist, delta=1.0)
tf.summary.scalar('corners loss', corners_loss)
center_loss = K.clip(center_loss, min_value=0, max_value=100)
heading_class_loss = K.clip(heading_class_loss, min_value=0, max_value=100)
size_class_loss = K.clip(size_class_loss, min_value=0, max_value=100)
heading_residual_normalized_loss = K.clip(heading_residual_normalized_loss, min_value=0, max_value=100)
size_residual_normalized_loss = K.clip(size_residual_normalized_loss, min_value=0, max_value=100)
stage1_center_loss = K.clip(stage1_center_loss, min_value=0, max_value=100)
corners_loss = K.clip(corners_loss, min_value=0, max_value=100)
total_loss = mask_loss * mask_weight + box_loss_weight * (center_loss + heading_class_loss + size_class_loss +
heading_residual_normalized_loss * 20 +
size_residual_normalized_loss * 20 +
stage1_center_loss +
corner_loss_weight * corners_loss)
tf.add_to_collection('losses', total_loss)
print(total_loss)
return total_loss
| en | 0.705043 | # ----------------- # Global Constants # ----------------- # one cluster for each type # g_type2class = {'car': 0, 'Van': 1, 'Truck': 2, 'pedestrian': 3, # 'Person_sitting': 4, 'bicycle': 5, 'Tram': 6, 'Misc': 7} # g_class2type = {g_type2class[t]: t for t in g_type2class} # g_type2onehotclass = {'car': 0, 'pedestrian': 1, 'bicycle': 2} # g_type_mean_size = {'car': np.array([4.682, 1.898, 1.668]), # 'Van': np.array([5.06763659, 1.9007158, 2.20532825]), # 'Truck': np.array([10.13586957, 2.58549199, 3.2520595]), # 'pedestrian': np.array([0.787, 0.768, 1.79]), # 'Person_sitting': np.array([0.80057803, 0.5983815, 1.27450867]), # 'bicycle': np.array([1.775, 0.654, 1.276]), # 'Tram': np.array([16.17150617, 2.53246914, 3.53079012]), # 'Misc': np.array([3.64300781, 1.54298177, 1.92320313])} # g_mean_size_arr = np.zeros((NUM_SIZE_CLUSTER, 3)) # size clustrs # for i in range(NUM_SIZE_CLUSTER): # g_mean_size_arr[i, :] = g_type_mean_size[g_class2type[i]] # size clustrs # ----------------- # TF Functions Helpers # ----------------- Gather object point clouds according to predicted masks.
Input:
point_cloud: TF tensor in shape (B,N,C)
mask: TF tensor in shape (B,N) of 0 (not pick) or 1 (pick)
npoints: int scalar, maximum number of points to keep (default: 512)
Output:
object_pc: TF tensor in shape (B,npoint,C)
indices: TF int tensor in shape (B,npoint,2) # skip cases when pos_indices is empty TF layer. Input: (N,3), (N,), (N,3), Output: (N,8,3) # print '-----', centers # (N,1) # (N,1) # (N,1) # print l,w,h # (N,8) # (N,8) # (N,8) # (N,3,8) # print x_corners, y_corners, z_corners # (N,3) # (N,3,3) # print row1, row2, row3, R, N # (N,3,8) # (N,3,8) # (N,8,3) TF layer.
Inputs:
center: (B,3)
heading_residuals: (B,NH)
size_residuals: (B,NS,3)
Outputs:
box3d_corners: (B,NH,NS,8,3) tensor # (NH,) # (B,NH) # (B,NS,1) # (B,NS,3) # (B,NH,NS,3) # (B,NH,NS) # (B,NH,NS,3) Parse batch output to separate tensors (added to end_points)
Input:
output: TF tensor in shape (B,3+2*NUM_HEADING_BIN+4*NUM_SIZE_CLUSTER)
end_points: dict
Output:
end_points: dict (updated) # BxNUM_HEADING_BIN # BxNUM_HEADING_BIN (-1 to 1) # BxNUM_HEADING_BIN # BxNUM_SIZE_CLUSTER # BxNUM_SIZE_CLUSTERx3 # -------------------------------------- # Shared subgraphs for v1 and v2 models # -------------------------------------- Get useful placeholder tensors.
Input:
batch_size: scalar int
num_point: scalar int
Output:
TF placeholders for inputs and ground truths # labels_pl is for segmentation label Select point cloud with predicted 3D mask,
translate coordinates to the masked points centroid.
Input:
point_cloud: TF tensor in shape (B,N,C)
logits: TF tensor in shape (B,N,2)
end_points: dict
xyz_only: boolean, if True only return XYZ channels
Output:
object_point_cloud: TF tensor in shape (B,M,3)
for simplicity we only keep XYZ here
M = NUM_OBJECT_POINT as a hyper-parameter
mask_xyz_mean: TF tensor in shape (B,3) # BxNx1 # Bx1x3 # BxNx3 # Bx1x3 # BxN # Bx1x3 # Translate to masked points' centroid Regression network for center delta. a.k.a. T-Net.
Input:
object_point_cloud: TF tensor in shape (B,M,C)
point clouds in 3D mask coordinate
one_hot_vec: TF tensor in shape (B,3)
length-3 vectors indicating predicted object type
Output:
predicted_center: TF tensor in shape (B,3) Loss functions for 3D object detection.
Input:
mask_label: TF int32 tensor in shape (B,N)
center_label: TF tensor in shape (B,3)
heading_class_label: TF int32 tensor in shape (B,)
heading_residual_label: TF tensor in shape (B,)
size_class_label: TF tensor int32 in shape (B,)
size_residual_label: TF tensor tensor in shape (B,)
end_points: dict, outputs from our model
corner_loss_weight: float scalar
box_loss_weight: float scalar
Output:
total_loss: TF scalar tensor
the total_loss is also added to the losses collection # 3D Segmentation loss # Center regression losses # Heading loss # BxNUM_HEADING_BIN # Size loss # BxNUM_SIZE_CLUSTER # BxNUM_SIZE_CLUSTERx3 # Bx3 # 1xNUM_SIZE_CLUSTERx3 # Bx3 # Corner loss # We select the predicted corners corresponding to the # GT heading bin and size cluster. # (B,NH,NS,8,3) # (B,NH,NS) # (B,8,3) # (NH,) # (B,NH) # (1,NS,3) # (1,NS,3) + (B,1,3) = (B,NS,3) # (B,3) # (B,8,3) # (B,8,3) # Weighted sum of all losses Loss functions for 3D object detection.
Input:
mask_label: TF int32 tensor in shape (B,N)
center_label: TF tensor in shape (B,3)
heading_class_label: TF int32 tensor in shape (B,)
heading_residual_label: TF tensor in shape (B,)
size_class_label: TF tensor int32 in shape (B,)
size_residual_label: TF tensor tensor in shape (B,)
end_points: dict, outputs from our model
corner_loss_weight: float scalar
box_loss_weight: float scalar
Output:
total_loss: TF scalar tensor
the total_loss is also added to the losses collection # 3D Segmentation loss # Center regression losses # Heading loss # BxNUM_HEADING_BIN # Size loss # BxNUM_SIZE_CLUSTER # BxNUM_SIZE_CLUSTERx3 # Bx3 # 1xNUM_SIZE_CLUSTERx3 # Bx3 # Corner loss # We select the predicted corners corresponding to the # GT heading bin and size cluster. # (B,NH,NS,8,3) # (B,NH,NS) # (B,8,3) # (NH,) # (B,NH) # (1,NS,3) # (1,NS,3) + (B,1,3) = (B,NS,3) # (B,3) # (B,8,3) # (B,8,3) | 2.262232 | 2 |
test/test_sqlquery.py | KonstantinKlepikov/SimpleSQLite | 1 | 6630696 | # encoding: utf-8
"""
.. codeauthor:: <NAME> <<EMAIL>>
"""
from __future__ import unicode_literals
import pytest
from simplesqlite.query import And, Or, Where
from simplesqlite.sqlquery import SqlQuery
nan = float("nan")
inf = float("inf")
class Test_SqlQuery_make_update(object):
@pytest.mark.parametrize(
["table", "set_query", "where", "expected"],
[
["A", "B=1", None, "UPDATE A SET B=1"],
["A", "B=1", Where("C", 1, ">").to_query(), "UPDATE A SET B=1 WHERE C > 1"],
["A", "B=1", Where("C", 1, ">"), "UPDATE A SET B=1 WHERE C > 1"],
[
"A",
"B=1",
And([Where("C", 1, ">"), Where("D", 10)]),
"UPDATE A SET B=1 WHERE C > 1 AND D = 10",
],
[
"A",
"B=1",
Or([Where("C", 1, ">"), Where("D", 10)]),
"UPDATE A SET B=1 WHERE C > 1 OR D = 10",
],
],
)
def test_normal(self, table, set_query, where, expected):
assert SqlQuery.make_update(table, set_query, where) == expected
@pytest.mark.parametrize(
["table", "set_query", "where", "expected"],
[
[None, "B=1", None, ValueError],
["", "B=1", None, ValueError],
["A", None, None, ValueError],
["A", "", None, ValueError],
],
)
def test_exception(self, table, set_query, where, expected):
with pytest.raises(expected):
SqlQuery.make_update(table, set_query, where)
class Test_SqlQuery_make_where_in(object):
@pytest.mark.parametrize(
["key", "value", "expected"], [["key", ["attr_a", "attr_b"], "key IN ('attr_a', 'attr_b')"]]
)
def test_normal(self, key, value, expected):
assert SqlQuery.make_where_in(key, value) == expected
@pytest.mark.parametrize(
["key", "value", "expected"],
[
["key", None, TypeError],
["key", 1, TypeError],
[None, ["attr_a", "attr_b"], TypeError],
[None, None, TypeError],
],
)
def test_exception(self, key, value, expected):
with pytest.raises(expected):
SqlQuery.make_where_in(key, value)
class Test_SqlQuery_make_where_not_in(object):
@pytest.mark.parametrize(
["key", "value", "expected"],
[["key", ["attr_a", "attr_b"], "key NOT IN ('attr_a', 'attr_b')"]],
)
def test_normal(self, key, value, expected):
assert SqlQuery.make_where_not_in(key, value) == expected
@pytest.mark.parametrize(
["key", "value", "expected"],
[
["key", None, TypeError],
["key", 1, TypeError],
[None, ["attr_a", "attr_b"], TypeError],
[None, None, TypeError],
],
)
def test_exception(self, key, value, expected):
with pytest.raises(expected):
SqlQuery.make_where_not_in(key, value)
| # encoding: utf-8
"""
.. codeauthor:: <NAME> <<EMAIL>>
"""
from __future__ import unicode_literals
import pytest
from simplesqlite.query import And, Or, Where
from simplesqlite.sqlquery import SqlQuery
nan = float("nan")
inf = float("inf")
class Test_SqlQuery_make_update(object):
@pytest.mark.parametrize(
["table", "set_query", "where", "expected"],
[
["A", "B=1", None, "UPDATE A SET B=1"],
["A", "B=1", Where("C", 1, ">").to_query(), "UPDATE A SET B=1 WHERE C > 1"],
["A", "B=1", Where("C", 1, ">"), "UPDATE A SET B=1 WHERE C > 1"],
[
"A",
"B=1",
And([Where("C", 1, ">"), Where("D", 10)]),
"UPDATE A SET B=1 WHERE C > 1 AND D = 10",
],
[
"A",
"B=1",
Or([Where("C", 1, ">"), Where("D", 10)]),
"UPDATE A SET B=1 WHERE C > 1 OR D = 10",
],
],
)
def test_normal(self, table, set_query, where, expected):
assert SqlQuery.make_update(table, set_query, where) == expected
@pytest.mark.parametrize(
["table", "set_query", "where", "expected"],
[
[None, "B=1", None, ValueError],
["", "B=1", None, ValueError],
["A", None, None, ValueError],
["A", "", None, ValueError],
],
)
def test_exception(self, table, set_query, where, expected):
with pytest.raises(expected):
SqlQuery.make_update(table, set_query, where)
class Test_SqlQuery_make_where_in(object):
@pytest.mark.parametrize(
["key", "value", "expected"], [["key", ["attr_a", "attr_b"], "key IN ('attr_a', 'attr_b')"]]
)
def test_normal(self, key, value, expected):
assert SqlQuery.make_where_in(key, value) == expected
@pytest.mark.parametrize(
["key", "value", "expected"],
[
["key", None, TypeError],
["key", 1, TypeError],
[None, ["attr_a", "attr_b"], TypeError],
[None, None, TypeError],
],
)
def test_exception(self, key, value, expected):
with pytest.raises(expected):
SqlQuery.make_where_in(key, value)
class Test_SqlQuery_make_where_not_in(object):
@pytest.mark.parametrize(
["key", "value", "expected"],
[["key", ["attr_a", "attr_b"], "key NOT IN ('attr_a', 'attr_b')"]],
)
def test_normal(self, key, value, expected):
assert SqlQuery.make_where_not_in(key, value) == expected
@pytest.mark.parametrize(
["key", "value", "expected"],
[
["key", None, TypeError],
["key", 1, TypeError],
[None, ["attr_a", "attr_b"], TypeError],
[None, None, TypeError],
],
)
def test_exception(self, key, value, expected):
with pytest.raises(expected):
SqlQuery.make_where_not_in(key, value)
| en | 0.277426 | # encoding: utf-8 .. codeauthor:: <NAME> <<EMAIL>> | 2.519339 | 3 |
ldaSegmentation/modules/reviewModules.py | Rahul-Khanna/reviews_research | 0 | 6630697 | <reponame>Rahul-Khanna/reviews_research<gh_stars>0
# Module file used for Review Segmentation
# Author : <NAME>
import re
from gensim import corpora, models, similarities, matutils
import json
# The review object used by all scripts not in graphApproach
class Review():
def __init__(self,id,review,realTags,sentences=None,features=None,predictedTags=None, predictedLDASentenceTags=None,k=None):
self.id=id
self.review=review
self.realTags=realTags
if not sentences:
self.sentences=[]
else:
self.sentences=sentences
if not features:
self.features=[]
else:
self.features=features
if not predictedTags:
self.predictedTags=[]
else:
self.predictedTags=predictedTags
if not predictedLDASentenceTags:
self.predictedLDASentenceTags=[]
else:
self.predictedLDASentenceTags=predictedLDASentenceTags
if not k:
self.k=-1
else:
self.k=k
def __repr__(self):
output={
"id": self.id,
"review": str(self.review),
"realTags": self.realTags,
"sentences": self.sentences,
"features": self.features,
"predictedTags":self.predictedTags,
"predictedLDASentenceTags":self.predictedLDASentenceTags,
"k":self.k
}
return str(output)
def __str__(self):
output={
"id": self.id,
"review": str(self.review),
"realTags": self.realTags,
"sentences": self.sentences,
"features": self.features,
"predictedTags":self.predictedTags,
"predictedLDASentenceTags":self.predictedLDASentenceTags,
"k":self.k
}
return str(output)
# MyCorpus object used for loading the LDA model
# Written by <NAME>
class MyCorpus(object):
def __init__(self, fname, stopf = None, V = None):
self.fname = fname;
self.file = open(fname,"r");
stoplist = [];
if stopf:
with open(stopf,"r") as f:
stoplist = map(lambda x: x.strip().lower(),f.readlines());
self.dictionary = self.make_dict(stoplist, V);
def rest(self):
self.file.seek(0);
def proc(self,line):
return filter(lambda x: len(x) > 4, map(lambda x: x.strip(), re.sub(r'[0-9]+|\W',' ',line.strip().lower()).split()));
def make_dict(self, stoplist = [], V = None):
#self.reset();
#for line in self.read_file():
#print(self.proc(line))
dictionary = corpora.Dictionary(self.proc(line) for line in self.read_file());
stop_ids = [dictionary.token2id[sw] for sw in stoplist if sw in dictionary.token2id];
dictionary.filter_tokens(stop_ids);
dictionary.filter_extremes(5, .55);
return dictionary;
def read_file(self):
with open(self.fname,"r") as f:
for line in f:
t = json.loads(line)["reviewText"]
if (len(t.strip())> 10):
yield t.strip();
def __iter__(self):
#self.reset();
for line in self.read_file():
bow = self.dictionary.doc2bow(self.proc(line));
yield bow;
| # Module file used for Review Segmentation
# Author : <NAME>
import re
from gensim import corpora, models, similarities, matutils
import json
# The review object used by all scripts not in graphApproach
class Review():
def __init__(self,id,review,realTags,sentences=None,features=None,predictedTags=None, predictedLDASentenceTags=None,k=None):
self.id=id
self.review=review
self.realTags=realTags
if not sentences:
self.sentences=[]
else:
self.sentences=sentences
if not features:
self.features=[]
else:
self.features=features
if not predictedTags:
self.predictedTags=[]
else:
self.predictedTags=predictedTags
if not predictedLDASentenceTags:
self.predictedLDASentenceTags=[]
else:
self.predictedLDASentenceTags=predictedLDASentenceTags
if not k:
self.k=-1
else:
self.k=k
def __repr__(self):
output={
"id": self.id,
"review": str(self.review),
"realTags": self.realTags,
"sentences": self.sentences,
"features": self.features,
"predictedTags":self.predictedTags,
"predictedLDASentenceTags":self.predictedLDASentenceTags,
"k":self.k
}
return str(output)
def __str__(self):
output={
"id": self.id,
"review": str(self.review),
"realTags": self.realTags,
"sentences": self.sentences,
"features": self.features,
"predictedTags":self.predictedTags,
"predictedLDASentenceTags":self.predictedLDASentenceTags,
"k":self.k
}
return str(output)
# MyCorpus object used for loading the LDA model
# Written by <NAME>
class MyCorpus(object):
def __init__(self, fname, stopf = None, V = None):
self.fname = fname;
self.file = open(fname,"r");
stoplist = [];
if stopf:
with open(stopf,"r") as f:
stoplist = map(lambda x: x.strip().lower(),f.readlines());
self.dictionary = self.make_dict(stoplist, V);
def rest(self):
self.file.seek(0);
def proc(self,line):
return filter(lambda x: len(x) > 4, map(lambda x: x.strip(), re.sub(r'[0-9]+|\W',' ',line.strip().lower()).split()));
def make_dict(self, stoplist = [], V = None):
#self.reset();
#for line in self.read_file():
#print(self.proc(line))
dictionary = corpora.Dictionary(self.proc(line) for line in self.read_file());
stop_ids = [dictionary.token2id[sw] for sw in stoplist if sw in dictionary.token2id];
dictionary.filter_tokens(stop_ids);
dictionary.filter_extremes(5, .55);
return dictionary;
def read_file(self):
with open(self.fname,"r") as f:
for line in f:
t = json.loads(line)["reviewText"]
if (len(t.strip())> 10):
yield t.strip();
def __iter__(self):
#self.reset();
for line in self.read_file():
bow = self.dictionary.doc2bow(self.proc(line));
yield bow; | en | 0.54443 | # Module file used for Review Segmentation # Author : <NAME> # The review object used by all scripts not in graphApproach # MyCorpus object used for loading the LDA model # Written by <NAME> #self.reset(); #for line in self.read_file(): #print(self.proc(line)) #self.reset(); | 2.517117 | 3 |
pytpp/attributes/opentrust_pki_ca.py | Venafi/pytpp | 4 | 6630698 | <filename>pytpp/attributes/opentrust_pki_ca.py
from pytpp.attributes._helper import IterableMeta, Attribute
from pytpp.attributes.http_ca_base import HTTPCABaseAttributes
class OpenTrustPKICAAttributes(HTTPCABaseAttributes, metaclass=IterableMeta):
__config_class__ = "OpenTrust PKI CA"
connector_type = Attribute('Connector Type')
fields = Attribute('Fields')
retrieval_period = Attribute('Retrieval Period')
web_service_url = Attribute('Web Service URL')
| <filename>pytpp/attributes/opentrust_pki_ca.py
from pytpp.attributes._helper import IterableMeta, Attribute
from pytpp.attributes.http_ca_base import HTTPCABaseAttributes
class OpenTrustPKICAAttributes(HTTPCABaseAttributes, metaclass=IterableMeta):
__config_class__ = "OpenTrust PKI CA"
connector_type = Attribute('Connector Type')
fields = Attribute('Fields')
retrieval_period = Attribute('Retrieval Period')
web_service_url = Attribute('Web Service URL')
| none | 1 | 2.109464 | 2 |
|
arcgis/utils_excel.py | hvostsobaki/arcgis | 0 | 6630699 | <gh_stars>0
import arcpy
import openpyxl
# Получение списка имен столбцов с возможностью сортировки по алфавиту
def get_columns_list(file, sheet, sort=False):
wb = openpyxl.load_workbook(file)
tab = wb[sheet]
columns_list = [
tab.cell(row=1, column=i).value for i in range(1, tab.max_column+1)
]
if sort:
columns_list.sort()
return columns_list
else:
return columns_list
# Получение данных из заданного листа Excel
# и создание из них словаря {id: {id: ..., field1: ...}}.
def get_data(file, sheet, columns_list):
data_dict = {}
wb = openpyxl.load_workbook(file)
tab = wb[sheet]
rows_number = tab.max_row
columns_number = tab.max_column
for row in tab.iter_rows(
min_row=2, min_col=1,
max_row=rows_number, max_col=columns_number,
values_only=True
):
d_item = dict(zip(columns_list, list(row[0:columns_number])))
d_sum = {row[0]: d_item}
data_dict.update(d_sum)
arcpy.AddMessage(
'Downloaded {0} rows from excel table'.format(len(data_dict.keys()))
)
return data_dict | import arcpy
import openpyxl
# Получение списка имен столбцов с возможностью сортировки по алфавиту
def get_columns_list(file, sheet, sort=False):
wb = openpyxl.load_workbook(file)
tab = wb[sheet]
columns_list = [
tab.cell(row=1, column=i).value for i in range(1, tab.max_column+1)
]
if sort:
columns_list.sort()
return columns_list
else:
return columns_list
# Получение данных из заданного листа Excel
# и создание из них словаря {id: {id: ..., field1: ...}}.
def get_data(file, sheet, columns_list):
data_dict = {}
wb = openpyxl.load_workbook(file)
tab = wb[sheet]
rows_number = tab.max_row
columns_number = tab.max_column
for row in tab.iter_rows(
min_row=2, min_col=1,
max_row=rows_number, max_col=columns_number,
values_only=True
):
d_item = dict(zip(columns_list, list(row[0:columns_number])))
d_sum = {row[0]: d_item}
data_dict.update(d_sum)
arcpy.AddMessage(
'Downloaded {0} rows from excel table'.format(len(data_dict.keys()))
)
return data_dict | ru | 0.992529 | # Получение списка имен столбцов с возможностью сортировки по алфавиту # Получение данных из заданного листа Excel # и создание из них словаря {id: {id: ..., field1: ...}}. | 2.780643 | 3 |
media_organiser.py | arkalon76/Icecream | 1 | 6630700 | from pymediainfo import MediaInfo
import sys, os, pymongo, hashlib, configparser, xxhash, locale, argparse, json, logging
from guessit import guessit
from imdbpie import Imdb
imdb = Imdb(anonymize=True) # to proxy requests
REBUILD_SIDECAR = False
# Setting default hasher - can be changed with command line
# Let's configure the locale
locale.setlocale(locale.LC_ALL, 'en_US') # We use this for number formating while we count blocks
logging.basicConfig(level=logging.INFO, format='%(asctime)s %(message)s')
class FileManagement():
def validate_sidecar_file(sidecar_file):
try:
fact_size = sidecar_file['quick_facts']['file_size']
fact_name = sidecar_file['quick_facts']['file_name']
fact_last_known = sidecar_file['quick_facts']['last_known_location']
return True
except KeyError: # We couldn't find the keys we need. Let's rebuild it
print("--> There seems to be some issue with the sidecar file. Let me fix that for you.")
return False
# Ok, so we got the key's, now let's make sure they are all valid values
# attached to the key's
def hashfile(fullpath_to_mediafile):
""" Hashes any given file using xxhash (https://cyan4973.github.io/xxHash/)
Args:
fullpath: The full path, including file name to the file to be hashed
Returns:
A String hash value
"""
# Setting the block size
hasher = xxhash.xxh64() #Set the hasher
BLOCKSIZE = 65536
size = os.path.getsize(fullpath_to_mediafile)
blocks = int(size / BLOCKSIZE)
with open(fullpath_to_mediafile, 'rb') as afile:
buf = afile.read(BLOCKSIZE) #Read one block
while len(buf) > 0: #
hasher.update(buf)
buf = afile.read(BLOCKSIZE)
if (blocks % 1000) == 0: # Only print every 1000 blocks so not to spam the terminal
print("Blocks to go:", locale.format("%d", blocks, grouping=True), end="\r", flush=True)
blocks -= 1
return hasher.hexdigest()
def find_imdb_ID_from_title(filename):
# First, let's extract the name of the movie and it's year
nameDict = guessit(filename)
try:
title = nameDict['title']
year = nameDict['year']
except KeyError:
print('This file "' + filename + '" seems oddly named. Please follow [title] [year] format')
return None
imdbResult = imdb.search_for_title(title)
for movie in imdbResult:
if title == movie['title'] and str(year) == movie['year']:
print('Match found')
return movie['imdb_id']
return None
def scanMovies(fd):
""" Goes through the directory structure seaching for specific files
matching the extention mentioned in the list
"""
for dir_path,subpaths,files in os.walk(fd):
for file in files:
extension = os.path.splitext(file)[1].lower()
if extension in ['.mkv', '.mp4', '.avi', '.mov']:
fullpath = os.path.abspath(dir_path) + "/" + file
# Get the media info. This an take a while
scanMediaInfo(dir_path, fullpath, file)
elif extension in ['.ts', '.m2ts']:
fullpath = os.path.abspath(dir_path) + "/" + file
filesize = os.path.getsize(fullpath)
if filesize > 20000000000:
convert_to_mkv(dir_path, fullpath, file)
def convert_to_mkv(path, fullpath, filename):
print('Video convertion is not yet done. In progress since 17 May 2017')
# Let's establish what we are working with first. Is bluray structure intact or just a odd format.
base_path = os.path.basename(path)
if base_path == 'STREAM': # Bluray structure is intact it seems [Basefolder]/BDMV/STREAM/mediafile.m2ts
print('Bluray rip convertion')
else:
print('Asuming we are in a ripped directory')
def scanMediaInfo(path, fullpath, filename):
""" Parses the media info of the file. If, new, we will hash it and add it to the library.
We use the MKV FileUID as our guide if it's new or now. Hashing is just to slow for a quick check.
Args: path: The URI of the file
fullpath: The URI + Filename
filename: the file name of the media we try to scan
"""
filelen = len(filename)
print('=======================' + "=" * filelen)
print('Scanning Media info on', filename)
print('=======================' + "=" * filelen)
# Getting the media info
# Let's just have a quick check if we seen this file before
filesize = os.path.getsize(fullpath)
result = is_this_file_known(filename=filename, path=path, filesize=filesize)
if result == False or REBUILD_SIDECAR ==True: #We couldn't find the sidecar file. Doing a full update
media_info = MediaInfo.parse(fullpath)
# We need to add some metadata here so we can do some quick lookups
media_json = json.loads(media_info.to_json(), parse_int=str)
if 'unique_id' in media_json['tracks'][0]:
media_xxhash = media_json['tracks'][0]['unique_id']
else:
media_xxhash = FileManagement.hashfile(fullpath)
imdb_id = find_imdb_ID_from_title(filename)
media_json['quick_facts'] = {'file_size':filesize,
'file_hash': media_xxhash,
'file_name': filename,
'last_known_location' : fullpath,
'imdb_id': imdb_id}
# Save it to a file next to the media file for later use
sidecar_file = open(path + '/' + filename + '_sidcar.json', 'w')
sidecar_file.write(json.dumps(media_json))
insertMediaFile(media_json)
else: #Hey, we know this one, no need to do anything about this.
# Save it to a file next to the media file for later use
print('Seems like we have scanned this before.\n--> If you want to scan it again, remove the _sidecar file next to the original file')
print('You can find it here:\n')
print(path + filename + '_sidcar.json')
print('\n')
print('--> Will still try to add it to the DB just in case we deleted it at some point.')
sidecar_file = open(path + '/' + filename + '_sidcar.json', 'r')
insertMediaFile(json.load(sidecar_file))
"""
So sorry for the deep iffing here. Will fix it after lunch... :D
"""
def is_this_file_known(filename, filesize, path):
sidecar_uri = path + '/' + filename + '_sidcar.json' #Path to the sidecar file
if os.path.isfile(sidecar_uri):
sidecar_file = json.load(open(sidecar_uri, 'r')) # We found it, lets look inside
try:
fact_size = sidecar_file['quick_facts']['file_size']
fact_name = sidecar_file['quick_facts']['file_name']
fact_last_known = sidecar_file['quick_facts']['last_known_location']
except KeyError: # We couldn't find the keys we need. Let's rebuild it
print("--> There seems to be some issue with the sidecar file. Let me fix that for you.")
return False
if fact_size != filesize: # We check filesize first since that would qualify for a full rescan no matter what the name is of the file
print("--> The filesize doesn't match the sidecar file info. We should scan again.. \n----")
return False #Sidecar file exist but the basic info is not matching
elif fact_name != filename: #Ok, so the name doesn't match but the size does. Maybe we renamed both mediafile and the sidecar. Let's verify this.
print("--> The filename doesn't match the sidecar file info. Let's check the hash. Please wait... \n----")
file_hash = FileManagement.hashfile(path + "/" + filename)
fact_hash = sidecar_file['quick_facts']['file_hash']
print(file_hash + " + " + fact_hash)
if fact_hash == file_hash:
print("--> Seems like the file is the same but renamed. Let me update that for you!")
sidecar_file['quick_facts']['file_name'] = filename
f = open(sidecar_uri, 'w')
f.write(json.dumps(sidecar_file));
return True
else:
print("--> The xxhash doesn't match. Something has changed so let's re-scan it all")
return False #Sidecar file exist but the basic info is not matching
elif fact_last_known != os.path.abspath(path + '/' + filename):
print('--> The location seem to have change. Rebuiding the file. I know it a pain, I will make this faster later')
return False
else: # Everything is good. Lets just skip this file.
return True
else:
print("--> Can't find the sidecar file. Assuming this is a new file, or renamed\n----")
return False #Can't even find the sidecar file
def insertMediaFile(file_data):
""" Inserts a record in the MongoDB
"""
# client = pymongo.MongoClient('mongodb://arkalon:[email protected]:27017,cluster0-shard-00-01-if3vm.mongodb.net:27017,cluster0-shard-00-02-if3vm.mongodb.net:27017/icecream?ssl=true&replicaSet=Cluster0-shard-0&authSource=admin')
# db = client[db_name]
client = pymongo.MongoClient(db_url,db_port)
db = client[db_name]
db.authenticate(db_username,db_password)
# First, make sure there is no duplicate
result = db.Movies.find({'quick_facts.file_hash' : file_data['quick_facts']['file_hash']})
if result.count() != 0:
print('--> Hey! We already have this bad boy in the database. Will not add it twice.')
print('\n\n')
else:
db.Movies.insert_one(file_data)
print('--> File has been added to the DB and a sidcar file to the filesystem.')
print('\n\n')
def configure_application():
# Let's configure stuff
config = configparser.RawConfigParser()
#First, let's make sure we have a config file. If not, create a template and quit
is_configured = os.path.isfile('media_organiser.cfg')
if is_configured:
config.read('media_organiser.cfg')
# Configure mLab Database
global db_name
global db_port
global db_url
global db_username
global db_password
db_name = config.get('mLab','db_name')
db_port = config.getint('mLab','db_port')
db_url = config.get('mLab','db_url')
db_username = config.get('mLab','username')
db_password = config.get('mLab','password')
elif os.path.isfile('media_organiser_template.cfg'):
sys.exit('--> Did you forget to rename the template file to "media_organiser.cfg"?')
else:
f = open('media_organiser_template.cfg', mode='w')
f.write("[mLab]\ndb_url = \ndb_port = \nusername = \npassword = \ndb_name = ")
sys.exit("--> App has no config file. Creating a template and quitting")
if __name__ == "__main__":
configure_application()
# Setup the Argument Parser
parser = argparse.ArgumentParser(description='Documentation of all media files as you have. Will get some media details and hash them.')
parser.add_argument('media', help='Where your mediafiles are')
parser.add_argument('-c', '--config', help='Location of the config file. Default: Same directory as main file [media_organiser.cfg]')
parser.add_argument('-m', '--remux', help='[Not working yet!!] If selected, we will remux non-mkv to mkv format.')
parser.add_argument('-r', '--rebuild', action="store_true" ,help='Rebuild ALL sidecar files')
args = parser.parse_args()
REBUILD_SIDECAR = args.rebuild
if REBUILD_SIDECAR:
response = input('Are you sure you want to rebuild ALL sidecar files? (y/n) --> ')
if response.lower() == 'y':
scanMovies(args.media)
else:
print('Oh, did you forget to remove the "-r" flag?')
else:
scanMovies(args.media)
print('================================')
print(' Scan finished. ')
print('================================')
| from pymediainfo import MediaInfo
import sys, os, pymongo, hashlib, configparser, xxhash, locale, argparse, json, logging
from guessit import guessit
from imdbpie import Imdb
imdb = Imdb(anonymize=True) # to proxy requests
REBUILD_SIDECAR = False
# Setting default hasher - can be changed with command line
# Let's configure the locale
locale.setlocale(locale.LC_ALL, 'en_US') # We use this for number formating while we count blocks
logging.basicConfig(level=logging.INFO, format='%(asctime)s %(message)s')
class FileManagement():
def validate_sidecar_file(sidecar_file):
try:
fact_size = sidecar_file['quick_facts']['file_size']
fact_name = sidecar_file['quick_facts']['file_name']
fact_last_known = sidecar_file['quick_facts']['last_known_location']
return True
except KeyError: # We couldn't find the keys we need. Let's rebuild it
print("--> There seems to be some issue with the sidecar file. Let me fix that for you.")
return False
# Ok, so we got the key's, now let's make sure they are all valid values
# attached to the key's
def hashfile(fullpath_to_mediafile):
""" Hashes any given file using xxhash (https://cyan4973.github.io/xxHash/)
Args:
fullpath: The full path, including file name to the file to be hashed
Returns:
A String hash value
"""
# Setting the block size
hasher = xxhash.xxh64() #Set the hasher
BLOCKSIZE = 65536
size = os.path.getsize(fullpath_to_mediafile)
blocks = int(size / BLOCKSIZE)
with open(fullpath_to_mediafile, 'rb') as afile:
buf = afile.read(BLOCKSIZE) #Read one block
while len(buf) > 0: #
hasher.update(buf)
buf = afile.read(BLOCKSIZE)
if (blocks % 1000) == 0: # Only print every 1000 blocks so not to spam the terminal
print("Blocks to go:", locale.format("%d", blocks, grouping=True), end="\r", flush=True)
blocks -= 1
return hasher.hexdigest()
def find_imdb_ID_from_title(filename):
# First, let's extract the name of the movie and it's year
nameDict = guessit(filename)
try:
title = nameDict['title']
year = nameDict['year']
except KeyError:
print('This file "' + filename + '" seems oddly named. Please follow [title] [year] format')
return None
imdbResult = imdb.search_for_title(title)
for movie in imdbResult:
if title == movie['title'] and str(year) == movie['year']:
print('Match found')
return movie['imdb_id']
return None
def scanMovies(fd):
""" Goes through the directory structure seaching for specific files
matching the extention mentioned in the list
"""
for dir_path,subpaths,files in os.walk(fd):
for file in files:
extension = os.path.splitext(file)[1].lower()
if extension in ['.mkv', '.mp4', '.avi', '.mov']:
fullpath = os.path.abspath(dir_path) + "/" + file
# Get the media info. This an take a while
scanMediaInfo(dir_path, fullpath, file)
elif extension in ['.ts', '.m2ts']:
fullpath = os.path.abspath(dir_path) + "/" + file
filesize = os.path.getsize(fullpath)
if filesize > 20000000000:
convert_to_mkv(dir_path, fullpath, file)
def convert_to_mkv(path, fullpath, filename):
print('Video convertion is not yet done. In progress since 17 May 2017')
# Let's establish what we are working with first. Is bluray structure intact or just a odd format.
base_path = os.path.basename(path)
if base_path == 'STREAM': # Bluray structure is intact it seems [Basefolder]/BDMV/STREAM/mediafile.m2ts
print('Bluray rip convertion')
else:
print('Asuming we are in a ripped directory')
def scanMediaInfo(path, fullpath, filename):
""" Parses the media info of the file. If, new, we will hash it and add it to the library.
We use the MKV FileUID as our guide if it's new or now. Hashing is just to slow for a quick check.
Args: path: The URI of the file
fullpath: The URI + Filename
filename: the file name of the media we try to scan
"""
filelen = len(filename)
print('=======================' + "=" * filelen)
print('Scanning Media info on', filename)
print('=======================' + "=" * filelen)
# Getting the media info
# Let's just have a quick check if we seen this file before
filesize = os.path.getsize(fullpath)
result = is_this_file_known(filename=filename, path=path, filesize=filesize)
if result == False or REBUILD_SIDECAR ==True: #We couldn't find the sidecar file. Doing a full update
media_info = MediaInfo.parse(fullpath)
# We need to add some metadata here so we can do some quick lookups
media_json = json.loads(media_info.to_json(), parse_int=str)
if 'unique_id' in media_json['tracks'][0]:
media_xxhash = media_json['tracks'][0]['unique_id']
else:
media_xxhash = FileManagement.hashfile(fullpath)
imdb_id = find_imdb_ID_from_title(filename)
media_json['quick_facts'] = {'file_size':filesize,
'file_hash': media_xxhash,
'file_name': filename,
'last_known_location' : fullpath,
'imdb_id': imdb_id}
# Save it to a file next to the media file for later use
sidecar_file = open(path + '/' + filename + '_sidcar.json', 'w')
sidecar_file.write(json.dumps(media_json))
insertMediaFile(media_json)
else: #Hey, we know this one, no need to do anything about this.
# Save it to a file next to the media file for later use
print('Seems like we have scanned this before.\n--> If you want to scan it again, remove the _sidecar file next to the original file')
print('You can find it here:\n')
print(path + filename + '_sidcar.json')
print('\n')
print('--> Will still try to add it to the DB just in case we deleted it at some point.')
sidecar_file = open(path + '/' + filename + '_sidcar.json', 'r')
insertMediaFile(json.load(sidecar_file))
"""
So sorry for the deep iffing here. Will fix it after lunch... :D
"""
def is_this_file_known(filename, filesize, path):
sidecar_uri = path + '/' + filename + '_sidcar.json' #Path to the sidecar file
if os.path.isfile(sidecar_uri):
sidecar_file = json.load(open(sidecar_uri, 'r')) # We found it, lets look inside
try:
fact_size = sidecar_file['quick_facts']['file_size']
fact_name = sidecar_file['quick_facts']['file_name']
fact_last_known = sidecar_file['quick_facts']['last_known_location']
except KeyError: # We couldn't find the keys we need. Let's rebuild it
print("--> There seems to be some issue with the sidecar file. Let me fix that for you.")
return False
if fact_size != filesize: # We check filesize first since that would qualify for a full rescan no matter what the name is of the file
print("--> The filesize doesn't match the sidecar file info. We should scan again.. \n----")
return False #Sidecar file exist but the basic info is not matching
elif fact_name != filename: #Ok, so the name doesn't match but the size does. Maybe we renamed both mediafile and the sidecar. Let's verify this.
print("--> The filename doesn't match the sidecar file info. Let's check the hash. Please wait... \n----")
file_hash = FileManagement.hashfile(path + "/" + filename)
fact_hash = sidecar_file['quick_facts']['file_hash']
print(file_hash + " + " + fact_hash)
if fact_hash == file_hash:
print("--> Seems like the file is the same but renamed. Let me update that for you!")
sidecar_file['quick_facts']['file_name'] = filename
f = open(sidecar_uri, 'w')
f.write(json.dumps(sidecar_file));
return True
else:
print("--> The xxhash doesn't match. Something has changed so let's re-scan it all")
return False #Sidecar file exist but the basic info is not matching
elif fact_last_known != os.path.abspath(path + '/' + filename):
print('--> The location seem to have change. Rebuiding the file. I know it a pain, I will make this faster later')
return False
else: # Everything is good. Lets just skip this file.
return True
else:
print("--> Can't find the sidecar file. Assuming this is a new file, or renamed\n----")
return False #Can't even find the sidecar file
def insertMediaFile(file_data):
""" Inserts a record in the MongoDB
"""
# client = pymongo.MongoClient('mongodb://arkalon:[email protected]:27017,cluster0-shard-00-01-if3vm.mongodb.net:27017,cluster0-shard-00-02-if3vm.mongodb.net:27017/icecream?ssl=true&replicaSet=Cluster0-shard-0&authSource=admin')
# db = client[db_name]
client = pymongo.MongoClient(db_url,db_port)
db = client[db_name]
db.authenticate(db_username,db_password)
# First, make sure there is no duplicate
result = db.Movies.find({'quick_facts.file_hash' : file_data['quick_facts']['file_hash']})
if result.count() != 0:
print('--> Hey! We already have this bad boy in the database. Will not add it twice.')
print('\n\n')
else:
db.Movies.insert_one(file_data)
print('--> File has been added to the DB and a sidcar file to the filesystem.')
print('\n\n')
def configure_application():
# Let's configure stuff
config = configparser.RawConfigParser()
#First, let's make sure we have a config file. If not, create a template and quit
is_configured = os.path.isfile('media_organiser.cfg')
if is_configured:
config.read('media_organiser.cfg')
# Configure mLab Database
global db_name
global db_port
global db_url
global db_username
global db_password
db_name = config.get('mLab','db_name')
db_port = config.getint('mLab','db_port')
db_url = config.get('mLab','db_url')
db_username = config.get('mLab','username')
db_password = config.get('mLab','password')
elif os.path.isfile('media_organiser_template.cfg'):
sys.exit('--> Did you forget to rename the template file to "media_organiser.cfg"?')
else:
f = open('media_organiser_template.cfg', mode='w')
f.write("[mLab]\ndb_url = \ndb_port = \nusername = \npassword = \ndb_name = ")
sys.exit("--> App has no config file. Creating a template and quitting")
if __name__ == "__main__":
configure_application()
# Setup the Argument Parser
parser = argparse.ArgumentParser(description='Documentation of all media files as you have. Will get some media details and hash them.')
parser.add_argument('media', help='Where your mediafiles are')
parser.add_argument('-c', '--config', help='Location of the config file. Default: Same directory as main file [media_organiser.cfg]')
parser.add_argument('-m', '--remux', help='[Not working yet!!] If selected, we will remux non-mkv to mkv format.')
parser.add_argument('-r', '--rebuild', action="store_true" ,help='Rebuild ALL sidecar files')
args = parser.parse_args()
REBUILD_SIDECAR = args.rebuild
if REBUILD_SIDECAR:
response = input('Are you sure you want to rebuild ALL sidecar files? (y/n) --> ')
if response.lower() == 'y':
scanMovies(args.media)
else:
print('Oh, did you forget to remove the "-r" flag?')
else:
scanMovies(args.media)
print('================================')
print(' Scan finished. ')
print('================================')
| en | 0.874636 | # to proxy requests # Setting default hasher - can be changed with command line # Let's configure the locale # We use this for number formating while we count blocks # We couldn't find the keys we need. Let's rebuild it # Ok, so we got the key's, now let's make sure they are all valid values # attached to the key's Hashes any given file using xxhash (https://cyan4973.github.io/xxHash/) Args: fullpath: The full path, including file name to the file to be hashed Returns: A String hash value # Setting the block size #Set the hasher #Read one block # # Only print every 1000 blocks so not to spam the terminal # First, let's extract the name of the movie and it's year Goes through the directory structure seaching for specific files matching the extention mentioned in the list # Get the media info. This an take a while # Let's establish what we are working with first. Is bluray structure intact or just a odd format. # Bluray structure is intact it seems [Basefolder]/BDMV/STREAM/mediafile.m2ts Parses the media info of the file. If, new, we will hash it and add it to the library. We use the MKV FileUID as our guide if it's new or now. Hashing is just to slow for a quick check. Args: path: The URI of the file fullpath: The URI + Filename filename: the file name of the media we try to scan # Getting the media info # Let's just have a quick check if we seen this file before #We couldn't find the sidecar file. Doing a full update # We need to add some metadata here so we can do some quick lookups # Save it to a file next to the media file for later use #Hey, we know this one, no need to do anything about this. # Save it to a file next to the media file for later use So sorry for the deep iffing here. Will fix it after lunch... :D #Path to the sidecar file # We found it, lets look inside # We couldn't find the keys we need. Let's rebuild it # We check filesize first since that would qualify for a full rescan no matter what the name is of the file #Sidecar file exist but the basic info is not matching #Ok, so the name doesn't match but the size does. Maybe we renamed both mediafile and the sidecar. Let's verify this. #Sidecar file exist but the basic info is not matching # Everything is good. Lets just skip this file. #Can't even find the sidecar file Inserts a record in the MongoDB # client = pymongo.MongoClient('mongodb://arkalon:[email protected]:27017,cluster0-shard-00-01-if3vm.mongodb.net:27017,cluster0-shard-00-02-if3vm.mongodb.net:27017/icecream?ssl=true&replicaSet=Cluster0-shard-0&authSource=admin') # db = client[db_name] # First, make sure there is no duplicate # Let's configure stuff #First, let's make sure we have a config file. If not, create a template and quit # Configure mLab Database # Setup the Argument Parser | 2.380642 | 2 |
scripts/gen-docs.py | ThreeSixtyGiving/prototype-tools | 0 | 6630701 | <reponame>ThreeSixtyGiving/prototype-tools<filename>scripts/gen-docs.py
# This script generates an intermediate representation of the data model ready for translation into CSV
import json
import operator # Used in sorting
from sets import Set
from genmodel import generateModel, getName
# Change final parameter to False / True depending on whether you want roll-ups or not.
# Note to self: Use python gen-docs.py > ../website/standard/_includes/buildingblocks.html with rollups false for keeping documentation updated.
model = generateModel("http://joinedupdata.org/ontologies/philanthropy/Grant",1,{},False)
print "<ul>"
for table in sorted(model):
print "<li><a href='#"+table+"'>"+table +"</a></li>"
print "</ul>"
print "<p>Details on each of these building blocks can be found below.</p>"
for table in sorted(model):
print "<h4 class='activity' id='" + table + "'><span class='glyphicon glyphicon-th'></span> "+table+"</h4>"
print "<p>"+model[table]["_meta"]['description']+"</p>"
print "<p><strong>Types:</strong> "+ ", ".join(model[table]["_meta"]['types']) + "</p>"
print """
<div class="panel panel-primary">
<div class="panel-heading">
<h4 class="panel-title">
<a data-toggle="collapse" data-target="#%s">
Data properties
</a>
</h4>
</div>
<div id="%s" class="panel-collapse collapse out">
<div class="panel-body">
<table class="table">
<thead>
<tr>
<th>ID</th>
<th>Title (en)</th>
<th>Description</th>
<th>Values</th>
</tr>
</thead><tbody>
""" % ("table-"+table,"table-"+table)
c = 0
cols = []
#Dictionary sorting work-around
for col in model[table]:
if(not(col == '_meta')):
cols.append((col,model[table][col]["weight"]))
cols = sorted(cols,key=lambda x: x[1])
for col in cols:
print "<tr class='rowtype-"+str(model[table][col[0]]['values']).lower()+"'>"
print "<td>" + model[table][col[0]]['name'] + "</td>"
print "<td>" + model[table][col[0]]['title'] + "</td>"
try:
print "<td>" + model[table][col[0]]['description'] + "</td>"
except:
print "<td> No description </td>"
try:
print "<td>" + model[table][col[0]]['values'] + "</td>"
except:
print "<td> No values specified </td>"
print "</tr>"
c = c + 1
print """</tbody></table></div>
</div>
</div>"""
## Put together details of all the relationships
print """
<div class="panel panel-info">
<div class="panel-heading">
<h4 class="panel-title">
<a data-toggle="collapse" data-target="#%s">
Relationship properties
</a>
</h4>
</div>
<div id="%s" class="panel-collapse collapse out">
<div class="panel-body">
<table class="table">
<thead>
<tr>
<th>Relationship</th>
<th>Title</th>
<th>Description</th>
<th>Related to</th>
</tr>
</thead>
<tbody>
""" % ("related-"+table,"related-"+table)
#Dictionary sorting work-around
rcols = []
for col in model[table]['_meta']['related']:
rcols.append((col,model[table]['_meta']['related'][col]["topObject"]))
rcols = sorted(rcols,key=lambda x: x[1])
for related in rcols:
relatedItem = model[table]['_meta']['related'][related[0]]
print "<tr>"
print "<td>" + relatedItem['relationshipName'] + "</td>"
print "<td>" + relatedItem['title'] + "</td>"
print "<td>" + relatedItem['description'] + "</td>"
print "<td> <a href='#" + relatedItem['topObject'] + "'>" + relatedItem['objectName'] + " (" + relatedItem['topObject'] +")</a></td>"
print "</tr>"
print """</tbody></table></div>
</div>
</div>""" | # This script generates an intermediate representation of the data model ready for translation into CSV
import json
import operator # Used in sorting
from sets import Set
from genmodel import generateModel, getName
# Change final parameter to False / True depending on whether you want roll-ups or not.
# Note to self: Use python gen-docs.py > ../website/standard/_includes/buildingblocks.html with rollups false for keeping documentation updated.
model = generateModel("http://joinedupdata.org/ontologies/philanthropy/Grant",1,{},False)
print "<ul>"
for table in sorted(model):
print "<li><a href='#"+table+"'>"+table +"</a></li>"
print "</ul>"
print "<p>Details on each of these building blocks can be found below.</p>"
for table in sorted(model):
print "<h4 class='activity' id='" + table + "'><span class='glyphicon glyphicon-th'></span> "+table+"</h4>"
print "<p>"+model[table]["_meta"]['description']+"</p>"
print "<p><strong>Types:</strong> "+ ", ".join(model[table]["_meta"]['types']) + "</p>"
print """
<div class="panel panel-primary">
<div class="panel-heading">
<h4 class="panel-title">
<a data-toggle="collapse" data-target="#%s">
Data properties
</a>
</h4>
</div>
<div id="%s" class="panel-collapse collapse out">
<div class="panel-body">
<table class="table">
<thead>
<tr>
<th>ID</th>
<th>Title (en)</th>
<th>Description</th>
<th>Values</th>
</tr>
</thead><tbody>
""" % ("table-"+table,"table-"+table)
c = 0
cols = []
#Dictionary sorting work-around
for col in model[table]:
if(not(col == '_meta')):
cols.append((col,model[table][col]["weight"]))
cols = sorted(cols,key=lambda x: x[1])
for col in cols:
print "<tr class='rowtype-"+str(model[table][col[0]]['values']).lower()+"'>"
print "<td>" + model[table][col[0]]['name'] + "</td>"
print "<td>" + model[table][col[0]]['title'] + "</td>"
try:
print "<td>" + model[table][col[0]]['description'] + "</td>"
except:
print "<td> No description </td>"
try:
print "<td>" + model[table][col[0]]['values'] + "</td>"
except:
print "<td> No values specified </td>"
print "</tr>"
c = c + 1
print """</tbody></table></div>
</div>
</div>"""
## Put together details of all the relationships
print """
<div class="panel panel-info">
<div class="panel-heading">
<h4 class="panel-title">
<a data-toggle="collapse" data-target="#%s">
Relationship properties
</a>
</h4>
</div>
<div id="%s" class="panel-collapse collapse out">
<div class="panel-body">
<table class="table">
<thead>
<tr>
<th>Relationship</th>
<th>Title</th>
<th>Description</th>
<th>Related to</th>
</tr>
</thead>
<tbody>
""" % ("related-"+table,"related-"+table)
#Dictionary sorting work-around
rcols = []
for col in model[table]['_meta']['related']:
rcols.append((col,model[table]['_meta']['related'][col]["topObject"]))
rcols = sorted(rcols,key=lambda x: x[1])
for related in rcols:
relatedItem = model[table]['_meta']['related'][related[0]]
print "<tr>"
print "<td>" + relatedItem['relationshipName'] + "</td>"
print "<td>" + relatedItem['title'] + "</td>"
print "<td>" + relatedItem['description'] + "</td>"
print "<td> <a href='#" + relatedItem['topObject'] + "'>" + relatedItem['objectName'] + " (" + relatedItem['topObject'] +")</a></td>"
print "</tr>"
print """</tbody></table></div>
</div>
</div>""" | en | 0.458978 | # This script generates an intermediate representation of the data model ready for translation into CSV # Used in sorting # Change final parameter to False / True depending on whether you want roll-ups or not. # Note to self: Use python gen-docs.py > ../website/standard/_includes/buildingblocks.html with rollups false for keeping documentation updated. <div class="panel panel-primary"> <div class="panel-heading"> <h4 class="panel-title"> <a data-toggle="collapse" data-target="#%s"> Data properties </a> </h4> </div> <div id="%s" class="panel-collapse collapse out"> <div class="panel-body"> <table class="table"> <thead> <tr> <th>ID</th> <th>Title (en)</th> <th>Description</th> <th>Values</th> </tr> </thead><tbody> #Dictionary sorting work-around </tbody></table></div> </div> </div> ## Put together details of all the relationships <div class="panel panel-info"> <div class="panel-heading"> <h4 class="panel-title"> <a data-toggle="collapse" data-target="#%s"> Relationship properties </a> </h4> </div> <div id="%s" class="panel-collapse collapse out"> <div class="panel-body"> <table class="table"> <thead> <tr> <th>Relationship</th> <th>Title</th> <th>Description</th> <th>Related to</th> </tr> </thead> <tbody> #Dictionary sorting work-around </tbody></table></div> </div> </div> | 2.76027 | 3 |
Dart2/JsonHelper.py | mcskik/Python | 0 | 6630702 | import io
import json
import sys
import Constants
def printFormattedJson(json_container, sort=True, indents=Constants.indentSize):
if type(json_container) is str:
print(json.dumps(json.loads(json_container), sort_keys=sort, indent=indents))
else:
print(json.dumps(json_container, sort_keys=sort, indent=indents))
return None
def printFormattedJsonToString(json_container, sort=True, indents=Constants.indentSize):
# Redirect sys.stdout to an in memory buffer.
keep_stdout = sys.stdout
sys.stdout = io.StringIO()
if type(json_container) is str:
print(json.dumps(json.loads(json_container), sort_keys=sort, indent=indents))
else:
print(json.dumps(json_container, sort_keys=sort, indent=indents))
# Capture print output.
output = sys.stdout.getvalue()
# Restore original sys.stdout.
sys.stdout = keep_stdout
return output
| import io
import json
import sys
import Constants
def printFormattedJson(json_container, sort=True, indents=Constants.indentSize):
if type(json_container) is str:
print(json.dumps(json.loads(json_container), sort_keys=sort, indent=indents))
else:
print(json.dumps(json_container, sort_keys=sort, indent=indents))
return None
def printFormattedJsonToString(json_container, sort=True, indents=Constants.indentSize):
# Redirect sys.stdout to an in memory buffer.
keep_stdout = sys.stdout
sys.stdout = io.StringIO()
if type(json_container) is str:
print(json.dumps(json.loads(json_container), sort_keys=sort, indent=indents))
else:
print(json.dumps(json_container, sort_keys=sort, indent=indents))
# Capture print output.
output = sys.stdout.getvalue()
# Restore original sys.stdout.
sys.stdout = keep_stdout
return output
| en | 0.614671 | # Redirect sys.stdout to an in memory buffer. # Capture print output. # Restore original sys.stdout. | 2.899127 | 3 |
sarif/operations/ls_op.py | microsoft/sarif-tools | 8 | 6630703 | <reponame>microsoft/sarif-tools
"""
Code for `sarif ls` command.
"""
from typing import List
from sarif import loader
def print_ls(files_or_dirs: List[str], output):
"""
Print a SARIF file listing for each of the input files or directories.
"""
dir_result = []
for path in files_or_dirs:
dir_result.append(f"{path}:")
sarif_files = loader.load_sarif_files(path)
if sarif_files:
sarif_file_names = [f.get_file_name() for f in sarif_files]
for file_name in sorted(sarif_file_names):
dir_result.append(f" {file_name}")
else:
dir_result.append(" (None)")
if output:
print("Writing file listing to", output)
with open(output, "w", encoding="utf-8") as file_out:
file_out.writelines(d + "\n" for d in dir_result)
else:
for directory in dir_result:
print(directory)
print()
| """
Code for `sarif ls` command.
"""
from typing import List
from sarif import loader
def print_ls(files_or_dirs: List[str], output):
"""
Print a SARIF file listing for each of the input files or directories.
"""
dir_result = []
for path in files_or_dirs:
dir_result.append(f"{path}:")
sarif_files = loader.load_sarif_files(path)
if sarif_files:
sarif_file_names = [f.get_file_name() for f in sarif_files]
for file_name in sorted(sarif_file_names):
dir_result.append(f" {file_name}")
else:
dir_result.append(" (None)")
if output:
print("Writing file listing to", output)
with open(output, "w", encoding="utf-8") as file_out:
file_out.writelines(d + "\n" for d in dir_result)
else:
for directory in dir_result:
print(directory)
print() | en | 0.74615 | Code for `sarif ls` command. Print a SARIF file listing for each of the input files or directories. | 3.83169 | 4 |
sky/engine/build/scripts/make_media_features.py | domenic/mojo | 5,964 | 6630704 | #!/usr/bin/env python
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import media_feature_symbol
import in_generator
import template_expander
import name_utilities
import sys
class MakeMediaFeaturesWriter(in_generator.Writer):
defaults = {
'Conditional': None, # FIXME: Add support for Conditional.
'RuntimeEnabled': None,
'ImplementedAs': None,
}
filters = {
'symbol': media_feature_symbol.getMediaFeatureSymbolWithSuffix(''),
'to_macro_style': name_utilities.to_macro_style,
}
default_parameters = {
'namespace': '',
'export': '',
}
def __init__(self, in_file_path):
super(MakeMediaFeaturesWriter, self).__init__(in_file_path)
self._outputs = {
('MediaFeatures.h'): self.generate_header,
}
self._template_context = {
'namespace': '',
'export': '',
'entries': self.in_file.name_dictionaries,
}
@template_expander.use_jinja('MediaFeatures.h.tmpl', filters=filters)
def generate_header(self):
return self._template_context
if __name__ == '__main__':
in_generator.Maker(MakeMediaFeaturesWriter).main(sys.argv)
| #!/usr/bin/env python
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import media_feature_symbol
import in_generator
import template_expander
import name_utilities
import sys
class MakeMediaFeaturesWriter(in_generator.Writer):
defaults = {
'Conditional': None, # FIXME: Add support for Conditional.
'RuntimeEnabled': None,
'ImplementedAs': None,
}
filters = {
'symbol': media_feature_symbol.getMediaFeatureSymbolWithSuffix(''),
'to_macro_style': name_utilities.to_macro_style,
}
default_parameters = {
'namespace': '',
'export': '',
}
def __init__(self, in_file_path):
super(MakeMediaFeaturesWriter, self).__init__(in_file_path)
self._outputs = {
('MediaFeatures.h'): self.generate_header,
}
self._template_context = {
'namespace': '',
'export': '',
'entries': self.in_file.name_dictionaries,
}
@template_expander.use_jinja('MediaFeatures.h.tmpl', filters=filters)
def generate_header(self):
return self._template_context
if __name__ == '__main__':
in_generator.Maker(MakeMediaFeaturesWriter).main(sys.argv)
| en | 0.855495 | #!/usr/bin/env python # Copyright 2014 The Chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. # FIXME: Add support for Conditional. | 1.912353 | 2 |
config/score_game.py | rafiberlin/sose21-pm-language-and-vision-g1 | 0 | 6630705 | import os
from config.util import get_config, read_game_logs, output_game_metrics
import argparse
# Execute as score_game.py --file rafi_10_games_04_jun_21_attention_caption_lxmert_vqa.txt --dir ../data/game_logs
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Calculate the game statistics given a game log file')
parser.add_argument("--file", help="the file to process", required=True)
parser.add_argument("--dir",
help="The directory where to find the file. If not given, it will use the configured "
"path under 'game_logs_dir' in the config file",
default=None)
args = parser.parse_args()
file_name = args.file
game_logs_dir = args.dir
if game_logs_dir is None:
game_logs_dir = get_config()["game_logs_dir"]
log_path = os.path.join(game_logs_dir, file_name)
log = read_game_logs(log_path)
output_game_metrics(log)
| import os
from config.util import get_config, read_game_logs, output_game_metrics
import argparse
# Execute as score_game.py --file rafi_10_games_04_jun_21_attention_caption_lxmert_vqa.txt --dir ../data/game_logs
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Calculate the game statistics given a game log file')
parser.add_argument("--file", help="the file to process", required=True)
parser.add_argument("--dir",
help="The directory where to find the file. If not given, it will use the configured "
"path under 'game_logs_dir' in the config file",
default=None)
args = parser.parse_args()
file_name = args.file
game_logs_dir = args.dir
if game_logs_dir is None:
game_logs_dir = get_config()["game_logs_dir"]
log_path = os.path.join(game_logs_dir, file_name)
log = read_game_logs(log_path)
output_game_metrics(log)
| en | 0.483212 | # Execute as score_game.py --file rafi_10_games_04_jun_21_attention_caption_lxmert_vqa.txt --dir ../data/game_logs | 2.508628 | 3 |
moran_process.py | pikawika/VUB-CGT-assignment-1 | 0 | 6630706 | #Name: <NAME>
#StudentID: 568702
#Affiliation: VUB - Master Computer Science: AI
import random
import numpy as np
from IPython.display import display
def moran_step(current_state, beta, mu, Z, A):
"""
This function returns the next state of the population where
* current_state, is the current state of the population
* beta is the intensity of selection
* mu is the mutation probability
* Z is the population size
* A is the matrix that contains the payoffs of each strategy against each other.
"""
def select_random_player_from_population():
return random.randint(0, Z-1)
def get_player_payoff(player, opponent):
return A[player,opponent]
def get_random_strategy():
return random.randint(0, 1)
def should_do_random_value():
return random.random() <= mu
def fermi_prob(fitness_difference):
return np.clip(1. / (1. + np.exp(beta * fitness_difference, dtype=np.float64)), 0., 1.)
#Select 2 random players
selected=[select_random_player_from_population(), select_random_player_from_population()]
#Init fitness var
fitness = np.zeros(2)
#Calculate avg fitness for both players
for i, player in enumerate(selected):
for j in range(Z):
if j == player: continue #Skip playing against himself
players_payoff = get_player_payoff(current_state[player],current_state[j])
fitness[i] += players_payoff
fitness[i] = fitness[i] / (Z-1)
fitness_difference = fitness[0] - fitness[1]
#Assign mutation with fermi prob or adopt random strategy with mu probability
if random.random() < fermi_prob(fitness_difference):
if should_do_random_value():
current_state[selected[0]] = get_random_strategy()
else:
current_state[selected[0]] = current_state[selected[1]]
else:
if should_do_random_value():
current_state[selected[0]] = get_random_strategy()
else:
current_state[selected[1]] = current_state[selected[0]]
#Return new state
return current_state
def estimate_stationary_distribution(nb_runs, transitory, nb_generations, beta, mu, Z, A) :
"""
This function returns the stationary distribution of the population as a vector of floats
containing the fraction of time the population spends in each possible state where
* nb_runs, is number of independent realizations of the Monte Carlo simulation
* transitory is the transitory period
* nb_generations is the number of generations to run the moran process
* beta is the intensity of selection, mu is the mutation probability
* Z is the population size
* A is the matrix that contains the payoffs of each strategy against each other.
"""
#make an array to count how many times a certain state occurs (O and Z possible so Z+1)
state_count = np.zeros(Z+1)
#Repeat the simulation nb_runs times
for nb_run_index in range(nb_runs):
#Generate a random population state
current_state = np.random.randint(2, size=Z)
#Run the Moran process for a transitory period.
#No logging required
for transitory_loop_index in range(transitory):
moran_step(current_state, beta, mu, Z, A)
#Run the process for nb_generations and count how often
#the population passes through each possible state.
for nb_generation_index in range(nb_generations):
###get new current state
current_state = moran_step(current_state, beta, mu, Z, A)
###count amount of hawk players and save an extra instance of this state
state = (current_state == 0).sum()
state_count[state] += 1
#avg state_count
state_count = state_count/nb_runs
#scale state_count
state_count = state_count/nb_generations
return state_count
#Main loop
"""
we assume that 𝛽=10, 𝜇=10^−3, 𝑍=50, 𝑉=2, 𝐷=3 and 𝑇 = 1.
We assume that transitory = 10^3, nb_generations = 10^5 and the nb_runs = 10.
This main loop executes my code and produces the appropiate results for these assumptions.
It is possible to execute it as: python moran_process.py.
"""
#Setting the parameters for ease of use
beta = 10
mu = 0.001
Z = 50
V = 2
D = 3
T = 1
transitory = 1000
nb_generations = 100000
nb_runs = 10
# Payoff matrix
A = np.array([
[ (V-D)/2, V],
[ 0 , (V/2) - T],
])
#get results
result = estimate_stationary_distribution(nb_runs, transitory, nb_generations, beta, mu, Z, A)
display(result) | #Name: <NAME>
#StudentID: 568702
#Affiliation: VUB - Master Computer Science: AI
import random
import numpy as np
from IPython.display import display
def moran_step(current_state, beta, mu, Z, A):
"""
This function returns the next state of the population where
* current_state, is the current state of the population
* beta is the intensity of selection
* mu is the mutation probability
* Z is the population size
* A is the matrix that contains the payoffs of each strategy against each other.
"""
def select_random_player_from_population():
return random.randint(0, Z-1)
def get_player_payoff(player, opponent):
return A[player,opponent]
def get_random_strategy():
return random.randint(0, 1)
def should_do_random_value():
return random.random() <= mu
def fermi_prob(fitness_difference):
return np.clip(1. / (1. + np.exp(beta * fitness_difference, dtype=np.float64)), 0., 1.)
#Select 2 random players
selected=[select_random_player_from_population(), select_random_player_from_population()]
#Init fitness var
fitness = np.zeros(2)
#Calculate avg fitness for both players
for i, player in enumerate(selected):
for j in range(Z):
if j == player: continue #Skip playing against himself
players_payoff = get_player_payoff(current_state[player],current_state[j])
fitness[i] += players_payoff
fitness[i] = fitness[i] / (Z-1)
fitness_difference = fitness[0] - fitness[1]
#Assign mutation with fermi prob or adopt random strategy with mu probability
if random.random() < fermi_prob(fitness_difference):
if should_do_random_value():
current_state[selected[0]] = get_random_strategy()
else:
current_state[selected[0]] = current_state[selected[1]]
else:
if should_do_random_value():
current_state[selected[0]] = get_random_strategy()
else:
current_state[selected[1]] = current_state[selected[0]]
#Return new state
return current_state
def estimate_stationary_distribution(nb_runs, transitory, nb_generations, beta, mu, Z, A) :
"""
This function returns the stationary distribution of the population as a vector of floats
containing the fraction of time the population spends in each possible state where
* nb_runs, is number of independent realizations of the Monte Carlo simulation
* transitory is the transitory period
* nb_generations is the number of generations to run the moran process
* beta is the intensity of selection, mu is the mutation probability
* Z is the population size
* A is the matrix that contains the payoffs of each strategy against each other.
"""
#make an array to count how many times a certain state occurs (O and Z possible so Z+1)
state_count = np.zeros(Z+1)
#Repeat the simulation nb_runs times
for nb_run_index in range(nb_runs):
#Generate a random population state
current_state = np.random.randint(2, size=Z)
#Run the Moran process for a transitory period.
#No logging required
for transitory_loop_index in range(transitory):
moran_step(current_state, beta, mu, Z, A)
#Run the process for nb_generations and count how often
#the population passes through each possible state.
for nb_generation_index in range(nb_generations):
###get new current state
current_state = moran_step(current_state, beta, mu, Z, A)
###count amount of hawk players and save an extra instance of this state
state = (current_state == 0).sum()
state_count[state] += 1
#avg state_count
state_count = state_count/nb_runs
#scale state_count
state_count = state_count/nb_generations
return state_count
#Main loop
"""
we assume that 𝛽=10, 𝜇=10^−3, 𝑍=50, 𝑉=2, 𝐷=3 and 𝑇 = 1.
We assume that transitory = 10^3, nb_generations = 10^5 and the nb_runs = 10.
This main loop executes my code and produces the appropiate results for these assumptions.
It is possible to execute it as: python moran_process.py.
"""
#Setting the parameters for ease of use
beta = 10
mu = 0.001
Z = 50
V = 2
D = 3
T = 1
transitory = 1000
nb_generations = 100000
nb_runs = 10
# Payoff matrix
A = np.array([
[ (V-D)/2, V],
[ 0 , (V/2) - T],
])
#get results
result = estimate_stationary_distribution(nb_runs, transitory, nb_generations, beta, mu, Z, A)
display(result) | en | 0.881154 | #Name: <NAME> #StudentID: 568702 #Affiliation: VUB - Master Computer Science: AI This function returns the next state of the population where * current_state, is the current state of the population * beta is the intensity of selection * mu is the mutation probability * Z is the population size * A is the matrix that contains the payoffs of each strategy against each other. #Select 2 random players #Init fitness var #Calculate avg fitness for both players #Skip playing against himself #Assign mutation with fermi prob or adopt random strategy with mu probability #Return new state This function returns the stationary distribution of the population as a vector of floats containing the fraction of time the population spends in each possible state where * nb_runs, is number of independent realizations of the Monte Carlo simulation * transitory is the transitory period * nb_generations is the number of generations to run the moran process * beta is the intensity of selection, mu is the mutation probability * Z is the population size * A is the matrix that contains the payoffs of each strategy against each other. #make an array to count how many times a certain state occurs (O and Z possible so Z+1) #Repeat the simulation nb_runs times #Generate a random population state #Run the Moran process for a transitory period. #No logging required #Run the process for nb_generations and count how often #the population passes through each possible state. ###get new current state ###count amount of hawk players and save an extra instance of this state #avg state_count #scale state_count #Main loop we assume that 𝛽=10, 𝜇=10^−3, 𝑍=50, 𝑉=2, 𝐷=3 and 𝑇 = 1. We assume that transitory = 10^3, nb_generations = 10^5 and the nb_runs = 10. This main loop executes my code and produces the appropiate results for these assumptions. It is possible to execute it as: python moran_process.py. #Setting the parameters for ease of use # Payoff matrix #get results | 3.024446 | 3 |
app/models.py | richardpanda/todo-api | 0 | 6630707 | import jwt
from app import db
from bcrypt import checkpw, gensalt, hashpw
from flask import current_app
from sqlalchemy.orm import relationship
class Todo(db.Model):
id = db.Column(db.Integer, primary_key=True)
text = db.Column(db.String(100))
is_completed = db.Column(db.Boolean, default=False)
user_id = db.Column(db.Integer, db.ForeignKey('user.id'))
def __repr__(self):
return f'<Todo id={self.id} text={self.text} is_completed={self.is_completed} user_id={self.user_id}>'
def as_dict(self):
return {'id': self.id, 'text': self.text, 'is_completed': self.is_completed}
class User(db.Model):
id = db.Column(db.Integer, primary_key=True)
username = db.Column(db.String(25), unique=True)
hash = db.Column(db.String(60))
todos = relationship('Todo', backref='user')
def __init__(self, username, password):
self.username = username
self.password = password
self.hash = hashpw(password.encode(), gensalt()).decode()
def __repr__(self):
return f'<User id={self.id} username={self.username} hash={self.hash}>'
def check_password(self, password):
return checkpw(password.encode(), self.hash.encode())
def generate_jwt(self):
return jwt.encode({'id': self.id}, current_app.config['JWT_SECRET'], algorithm='HS256').decode()
| import jwt
from app import db
from bcrypt import checkpw, gensalt, hashpw
from flask import current_app
from sqlalchemy.orm import relationship
class Todo(db.Model):
id = db.Column(db.Integer, primary_key=True)
text = db.Column(db.String(100))
is_completed = db.Column(db.Boolean, default=False)
user_id = db.Column(db.Integer, db.ForeignKey('user.id'))
def __repr__(self):
return f'<Todo id={self.id} text={self.text} is_completed={self.is_completed} user_id={self.user_id}>'
def as_dict(self):
return {'id': self.id, 'text': self.text, 'is_completed': self.is_completed}
class User(db.Model):
id = db.Column(db.Integer, primary_key=True)
username = db.Column(db.String(25), unique=True)
hash = db.Column(db.String(60))
todos = relationship('Todo', backref='user')
def __init__(self, username, password):
self.username = username
self.password = password
self.hash = hashpw(password.encode(), gensalt()).decode()
def __repr__(self):
return f'<User id={self.id} username={self.username} hash={self.hash}>'
def check_password(self, password):
return checkpw(password.encode(), self.hash.encode())
def generate_jwt(self):
return jwt.encode({'id': self.id}, current_app.config['JWT_SECRET'], algorithm='HS256').decode()
| none | 1 | 2.554485 | 3 |
|
Lib/site-packages/mock/tests/__init__.py | inging44/python3 | 16,989 | 6630708 | # Copyright (C) 2007-2012 <NAME> & the mock team
# E-mail: fuzzyman AT voidspace DOT org DOT uk
# http://www.voidspace.org.uk/python/mock/
| # Copyright (C) 2007-2012 <NAME> & the mock team
# E-mail: fuzzyman AT voidspace DOT org DOT uk
# http://www.voidspace.org.uk/python/mock/
| en | 0.605774 | # Copyright (C) 2007-2012 <NAME> & the mock team # E-mail: fuzzyman AT voidspace DOT org DOT uk # http://www.voidspace.org.uk/python/mock/ | 1.006905 | 1 |
modules/vcd_vapp_vm_disk.py | okassov/ansible-role-vmware-vcloud | 0 | 6630709 | <gh_stars>0
# Copyright © 2018 VMware, Inc. All Rights Reserved.
# SPDX-License-Identifier: BSD-2-Clause OR GPL-3.0-only
# !/usr/bin/python
ANSIBLE_METADATA = {
'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'
}
DOCUMENTATION = '''
---
module: vcd_vapp_vm_disk
short_description: Ansible Module to manage disks in vApp VMs in vCloud Director.
version_added: "2.4"
description:
- "Ansible Module to manage (create/update/delete) disks in vApp VMs."
options:
user:
description:
- vCloud Director user name
required: false
password:
description:
- vCloud Director user password
required: false
host:
description:
- vCloud Director host address
required: false
org:
description:
- Organization name on vCloud Director to access
required: false
api_version:
description:
- Pyvcloud API version
required: false
verify_ssl_certs:
description:
- whether to use secure connection to vCloud Director host
required: false
org_name:
description:
- target org name
- required for service providers to create resources in other orgs
- default value is module level / environment level org
required: false
disks:
description:
- List of Disk with its size, and attached controller
required: false
vm_name:
description:
- VM name
required: true
vapp:
description:
- vApp name
required: true
vdc:
description:
- VDC name
required: true
state:
description:
- state of disk ('present'/'absent'/'update').
- One from state or operation has to be provided.
required: false
operation:
description:
- operation on Disk ('read').
- One from state or operation has to be provided.
required: false
author:
- <EMAIL>
'''
EXAMPLES = '''
- name: Test with a message
vcd_vapp_vm_disk:
user: terraform
password: <PASSWORD>
host: csa.sandbox.org
org: Terraform
api_version: 30
verify_ssl_certs: False
vm: "vm1"
vapp = "vapp1"
vdc = "vdc1"
disks:
- size: 3
controller: lsilogic
name: Hard disk 1
state = "present"
'''
RETURN = '''
msg: success/failure message corresponding to disk state
changed: true if resource has been changed else false
'''
import math
from pyvcloud.vcd.vm import VM
from pyvcloud.vcd.org import Org
from pyvcloud.vcd.vdc import VDC
from pyvcloud.vcd.vapp import VApp
from pyvcloud.vcd.client import NSMAP
from pyvcloud.vcd.client import EntityType
from ansible.module_utils.vcd import VcdAnsibleModule
from pyvcloud.vcd.exceptions import EntityNotFoundException
VAPP_VM_DISK_STATES = ['present', 'absent', 'update']
VAPP_VM_DISK_OPERATIONS = ['read']
def vapp_vm_disk_argument_spec():
return dict(
vm_name=dict(type='str', required=True),
vapp=dict(type='str', required=True),
vdc=dict(type='str', required=True),
disks=dict(type='list', required=False),
org_name=dict(type='str', required=False, default=None),
state=dict(choices=VAPP_VM_DISK_STATES, required=False),
operation=dict(choices=VAPP_VM_DISK_OPERATIONS, required=False),
)
class VappVMDisk(VcdAnsibleModule):
def __init__(self, **kwargs):
super(VappVMDisk, self).__init__(**kwargs)
self.org = self.get_org()
vapp_resource = self.get_resource()
self.vapp = VApp(self.client, resource=vapp_resource)
def manage_states(self):
state = self.params.get('state')
if state == "present":
return self.add_disk()
if state == "update":
return self.update_disk()
if state == "absent":
return self.delete_disk()
def manage_operations(self):
operation = self.params.get('operation')
if operation == "read":
return self.read_disks()
def get_org(self):
org_name = self.params.get('org_name')
org_resource = self.client.get_org()
if org_name:
org_resource = self.client.get_org_by_name(org_name)
return Org(self.client, resource=org_resource)
def get_resource(self):
vapp = self.params.get('vapp')
vdc = self.params.get('vdc')
vdc_resource = VDC(self.client, resource=self.org.get_vdc(vdc))
vapp_resource_href = vdc_resource.get_resource_href(
name=vapp, entity_type=EntityType.VAPP)
vapp_resource = self.client.get_resource(vapp_resource_href)
return vapp_resource
def get_vm(self):
vapp_vm_resource = self.vapp.get_vm(self.params.get('vm_name'))
return VM(self.client, resource=vapp_vm_resource)
def get_formatted_disk_size(self, disk_size):
'''
Convert disk byte size into GB or MB
MB = 1024 * 1024 ( 2 ** 20 )
GB = 1024 * 1024 * 1024 ( 2 ** 30 )
Note - only MB and GB are supported from vCD
'''
log_value = int(math.floor(math.log(disk_size, 1024)))
pow_value = math.pow(1024, log_value)
size_metric = ' MB' if log_value == 2 else ' GB'
return str(round(disk_size / pow_value, 1)) + size_metric
def add_disk(self):
disks = self.params.get('disks')
vm_name = self.params.get('vm_name')
response = dict()
response['msg'] = list()
response['changed'] = False
available_disks = self.read_disks().get("disks").keys()
warnings = list()
for disk in disks:
disk_size = int(disk.get("size"))
disk_controller = disk.get("controller")
disk_name = disk.get("name")
'''
here the condition covers both the situtation
1. if someone has given the disk name then first it will
check for disk availability first before adding it.
2. if someone has ignored giving the disk name then it will
add a new disk any way.
'''
if disk_name not in available_disks:
add_disk_task = self.vapp.add_disk_to_vm(
vm_name, disk_size, disk_controller)
self.execute_task(add_disk_task)
msg = "A disk with size {0} and controller {1} has been added to VM {2}"
msg = msg.format(disk_size, disk_controller, vm_name)
response['changed'] = True
response['msg'].append(msg)
else:
warnings.append(disk_name)
if warnings:
warnings = ','.join(warnings)
msg = "Hard disk(s) with name '{0}' are already present"
response["warnings"] = msg.format(warnings)
return response
def read_disks(self):
vm = self.get_vm()
response = dict()
response['changed'] = False
response['disks'] = dict()
disks = self.client.get_resource(
vm.resource.get('href') + '/virtualHardwareSection/disks')
for disk in disks.Item:
if disk['{' + NSMAP['rasd'] + '}Description'] == "Hard disk":
disk_name = str(disk['{' + NSMAP['rasd'] + '}ElementName'])
disk_instance = int(disk['{' + NSMAP['rasd'] + '}InstanceID'])
disk_size = int(disk['{' + NSMAP['rasd'] + '}VirtualQuantity'])
disk_hostresource = disk['{' + NSMAP['rasd'] + '}HostResource']
disk_capacity = int(disk_hostresource.get(
'{' + NSMAP['vcloud'] + '}capacity'))
response['disks'][disk_name] = {
'InstanceID': disk_instance,
'VirtualQuantity': self.get_formatted_disk_size(disk_size),
'HostResource': str(round(disk_capacity / 1024, 1)) + ' GB'
}
return response
def update_disk(self):
disks = self.params.get('disks')
response = dict()
response['changed'] = False
response['msg'] = list()
vm = self.get_vm()
vm_disks = self.client.get_resource(
vm.resource.get('href') + '/virtualHardwareSection/disks')
disk_names = [disk.get("name") for disk in disks]
disk_sizes = [disk.get("size", None) for disk in disks]
disk_sizes = list(filter(lambda size: size is not None, disk_sizes))
assert len(disk_sizes) == len(disk_names)
for index, disk_name in enumerate(disk_names):
for vm_disk_index, disk in enumerate(vm_disks.Item):
disk_size = int(disk_sizes[index])
if disk['{' + NSMAP['rasd'] + '}ElementName'] == disk_name:
disk[
'{' + NSMAP['rasd'] + '}VirtualQuantity'] = disk_size
disk[
'{' + NSMAP['rasd'] + '}HostResource'].set(
'{' + NSMAP['vcloud'] + '}capacity', str(disk_size))
vm_disks.Item[vm_disk_index] = disk
update_disk_task = self.client.put_resource(
vm.resource.get('href') + '/virtualHardwareSection/disks',
vm_disks, EntityType.RASD_ITEMS_LIST.value)
self.execute_task(update_disk_task)
msg = 'Vapp VM disk with name {0} has been updated.'
response['msg'].append(msg.format(disk_name))
response['changed'] = True
return response
def delete_disk(self):
vm = self.get_vm()
disks = self.params.get('disks')
disks_to_remove = [disk.get("name") for disk in disks]
response = dict()
response['changed'] = False
disks = self.client.get_resource(vm.resource.get(
'href') + '/virtualHardwareSection/disks')
for disk in disks.Item:
if disk['{' + NSMAP['rasd'] + '}ElementName'] in disks_to_remove:
disks.remove(disk)
disks_to_remove.remove(
disk['{' + NSMAP['rasd'] + '}ElementName'])
if len(disks_to_remove) > 0:
error = 'VM disk(s) with name {0} was not found.'
error = error.format(','.join(disks_to_remove))
raise EntityNotFoundException(error)
remove_disk_task = self.client.put_resource(
vm.resource.get('href') + '/virtualHardwareSection/disks',
disks, EntityType.RASD_ITEMS_LIST.value)
self.execute_task(remove_disk_task)
response['msg'] = 'VM disk(s) has been deleted.'
response['changed'] = True
return response
def main():
argument_spec = vapp_vm_disk_argument_spec()
response = dict(
msg=dict(type='str')
)
module = VappVMDisk(argument_spec=argument_spec, supports_check_mode=True)
try:
if module.check_mode:
response = dict()
response['changed'] = False
response['msg'] = "skipped, running in check mode"
response['skipped'] = True
elif module.params.get('state'):
response = module.manage_states()
elif module.params.get('operation'):
response = module.manage_operations()
else:
raise Exception('Please provide state/operation for resource')
except Exception as error:
response['msg'] = error
response['changed'] = False
module.fail_json(**response)
else:
module.exit_json(**response)
if __name__ == '__main__':
main()
| # Copyright © 2018 VMware, Inc. All Rights Reserved.
# SPDX-License-Identifier: BSD-2-Clause OR GPL-3.0-only
# !/usr/bin/python
ANSIBLE_METADATA = {
'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'
}
DOCUMENTATION = '''
---
module: vcd_vapp_vm_disk
short_description: Ansible Module to manage disks in vApp VMs in vCloud Director.
version_added: "2.4"
description:
- "Ansible Module to manage (create/update/delete) disks in vApp VMs."
options:
user:
description:
- vCloud Director user name
required: false
password:
description:
- vCloud Director user password
required: false
host:
description:
- vCloud Director host address
required: false
org:
description:
- Organization name on vCloud Director to access
required: false
api_version:
description:
- Pyvcloud API version
required: false
verify_ssl_certs:
description:
- whether to use secure connection to vCloud Director host
required: false
org_name:
description:
- target org name
- required for service providers to create resources in other orgs
- default value is module level / environment level org
required: false
disks:
description:
- List of Disk with its size, and attached controller
required: false
vm_name:
description:
- VM name
required: true
vapp:
description:
- vApp name
required: true
vdc:
description:
- VDC name
required: true
state:
description:
- state of disk ('present'/'absent'/'update').
- One from state or operation has to be provided.
required: false
operation:
description:
- operation on Disk ('read').
- One from state or operation has to be provided.
required: false
author:
- <EMAIL>
'''
EXAMPLES = '''
- name: Test with a message
vcd_vapp_vm_disk:
user: terraform
password: <PASSWORD>
host: csa.sandbox.org
org: Terraform
api_version: 30
verify_ssl_certs: False
vm: "vm1"
vapp = "vapp1"
vdc = "vdc1"
disks:
- size: 3
controller: lsilogic
name: Hard disk 1
state = "present"
'''
RETURN = '''
msg: success/failure message corresponding to disk state
changed: true if resource has been changed else false
'''
import math
from pyvcloud.vcd.vm import VM
from pyvcloud.vcd.org import Org
from pyvcloud.vcd.vdc import VDC
from pyvcloud.vcd.vapp import VApp
from pyvcloud.vcd.client import NSMAP
from pyvcloud.vcd.client import EntityType
from ansible.module_utils.vcd import VcdAnsibleModule
from pyvcloud.vcd.exceptions import EntityNotFoundException
VAPP_VM_DISK_STATES = ['present', 'absent', 'update']
VAPP_VM_DISK_OPERATIONS = ['read']
def vapp_vm_disk_argument_spec():
return dict(
vm_name=dict(type='str', required=True),
vapp=dict(type='str', required=True),
vdc=dict(type='str', required=True),
disks=dict(type='list', required=False),
org_name=dict(type='str', required=False, default=None),
state=dict(choices=VAPP_VM_DISK_STATES, required=False),
operation=dict(choices=VAPP_VM_DISK_OPERATIONS, required=False),
)
class VappVMDisk(VcdAnsibleModule):
def __init__(self, **kwargs):
super(VappVMDisk, self).__init__(**kwargs)
self.org = self.get_org()
vapp_resource = self.get_resource()
self.vapp = VApp(self.client, resource=vapp_resource)
def manage_states(self):
state = self.params.get('state')
if state == "present":
return self.add_disk()
if state == "update":
return self.update_disk()
if state == "absent":
return self.delete_disk()
def manage_operations(self):
operation = self.params.get('operation')
if operation == "read":
return self.read_disks()
def get_org(self):
org_name = self.params.get('org_name')
org_resource = self.client.get_org()
if org_name:
org_resource = self.client.get_org_by_name(org_name)
return Org(self.client, resource=org_resource)
def get_resource(self):
vapp = self.params.get('vapp')
vdc = self.params.get('vdc')
vdc_resource = VDC(self.client, resource=self.org.get_vdc(vdc))
vapp_resource_href = vdc_resource.get_resource_href(
name=vapp, entity_type=EntityType.VAPP)
vapp_resource = self.client.get_resource(vapp_resource_href)
return vapp_resource
def get_vm(self):
vapp_vm_resource = self.vapp.get_vm(self.params.get('vm_name'))
return VM(self.client, resource=vapp_vm_resource)
def get_formatted_disk_size(self, disk_size):
'''
Convert disk byte size into GB or MB
MB = 1024 * 1024 ( 2 ** 20 )
GB = 1024 * 1024 * 1024 ( 2 ** 30 )
Note - only MB and GB are supported from vCD
'''
log_value = int(math.floor(math.log(disk_size, 1024)))
pow_value = math.pow(1024, log_value)
size_metric = ' MB' if log_value == 2 else ' GB'
return str(round(disk_size / pow_value, 1)) + size_metric
def add_disk(self):
disks = self.params.get('disks')
vm_name = self.params.get('vm_name')
response = dict()
response['msg'] = list()
response['changed'] = False
available_disks = self.read_disks().get("disks").keys()
warnings = list()
for disk in disks:
disk_size = int(disk.get("size"))
disk_controller = disk.get("controller")
disk_name = disk.get("name")
'''
here the condition covers both the situtation
1. if someone has given the disk name then first it will
check for disk availability first before adding it.
2. if someone has ignored giving the disk name then it will
add a new disk any way.
'''
if disk_name not in available_disks:
add_disk_task = self.vapp.add_disk_to_vm(
vm_name, disk_size, disk_controller)
self.execute_task(add_disk_task)
msg = "A disk with size {0} and controller {1} has been added to VM {2}"
msg = msg.format(disk_size, disk_controller, vm_name)
response['changed'] = True
response['msg'].append(msg)
else:
warnings.append(disk_name)
if warnings:
warnings = ','.join(warnings)
msg = "Hard disk(s) with name '{0}' are already present"
response["warnings"] = msg.format(warnings)
return response
def read_disks(self):
vm = self.get_vm()
response = dict()
response['changed'] = False
response['disks'] = dict()
disks = self.client.get_resource(
vm.resource.get('href') + '/virtualHardwareSection/disks')
for disk in disks.Item:
if disk['{' + NSMAP['rasd'] + '}Description'] == "Hard disk":
disk_name = str(disk['{' + NSMAP['rasd'] + '}ElementName'])
disk_instance = int(disk['{' + NSMAP['rasd'] + '}InstanceID'])
disk_size = int(disk['{' + NSMAP['rasd'] + '}VirtualQuantity'])
disk_hostresource = disk['{' + NSMAP['rasd'] + '}HostResource']
disk_capacity = int(disk_hostresource.get(
'{' + NSMAP['vcloud'] + '}capacity'))
response['disks'][disk_name] = {
'InstanceID': disk_instance,
'VirtualQuantity': self.get_formatted_disk_size(disk_size),
'HostResource': str(round(disk_capacity / 1024, 1)) + ' GB'
}
return response
def update_disk(self):
disks = self.params.get('disks')
response = dict()
response['changed'] = False
response['msg'] = list()
vm = self.get_vm()
vm_disks = self.client.get_resource(
vm.resource.get('href') + '/virtualHardwareSection/disks')
disk_names = [disk.get("name") for disk in disks]
disk_sizes = [disk.get("size", None) for disk in disks]
disk_sizes = list(filter(lambda size: size is not None, disk_sizes))
assert len(disk_sizes) == len(disk_names)
for index, disk_name in enumerate(disk_names):
for vm_disk_index, disk in enumerate(vm_disks.Item):
disk_size = int(disk_sizes[index])
if disk['{' + NSMAP['rasd'] + '}ElementName'] == disk_name:
disk[
'{' + NSMAP['rasd'] + '}VirtualQuantity'] = disk_size
disk[
'{' + NSMAP['rasd'] + '}HostResource'].set(
'{' + NSMAP['vcloud'] + '}capacity', str(disk_size))
vm_disks.Item[vm_disk_index] = disk
update_disk_task = self.client.put_resource(
vm.resource.get('href') + '/virtualHardwareSection/disks',
vm_disks, EntityType.RASD_ITEMS_LIST.value)
self.execute_task(update_disk_task)
msg = 'Vapp VM disk with name {0} has been updated.'
response['msg'].append(msg.format(disk_name))
response['changed'] = True
return response
def delete_disk(self):
vm = self.get_vm()
disks = self.params.get('disks')
disks_to_remove = [disk.get("name") for disk in disks]
response = dict()
response['changed'] = False
disks = self.client.get_resource(vm.resource.get(
'href') + '/virtualHardwareSection/disks')
for disk in disks.Item:
if disk['{' + NSMAP['rasd'] + '}ElementName'] in disks_to_remove:
disks.remove(disk)
disks_to_remove.remove(
disk['{' + NSMAP['rasd'] + '}ElementName'])
if len(disks_to_remove) > 0:
error = 'VM disk(s) with name {0} was not found.'
error = error.format(','.join(disks_to_remove))
raise EntityNotFoundException(error)
remove_disk_task = self.client.put_resource(
vm.resource.get('href') + '/virtualHardwareSection/disks',
disks, EntityType.RASD_ITEMS_LIST.value)
self.execute_task(remove_disk_task)
response['msg'] = 'VM disk(s) has been deleted.'
response['changed'] = True
return response
def main():
argument_spec = vapp_vm_disk_argument_spec()
response = dict(
msg=dict(type='str')
)
module = VappVMDisk(argument_spec=argument_spec, supports_check_mode=True)
try:
if module.check_mode:
response = dict()
response['changed'] = False
response['msg'] = "skipped, running in check mode"
response['skipped'] = True
elif module.params.get('state'):
response = module.manage_states()
elif module.params.get('operation'):
response = module.manage_operations()
else:
raise Exception('Please provide state/operation for resource')
except Exception as error:
response['msg'] = error
response['changed'] = False
module.fail_json(**response)
else:
module.exit_json(**response)
if __name__ == '__main__':
main() | en | 0.762501 | # Copyright © 2018 VMware, Inc. All Rights Reserved. # SPDX-License-Identifier: BSD-2-Clause OR GPL-3.0-only # !/usr/bin/python --- module: vcd_vapp_vm_disk short_description: Ansible Module to manage disks in vApp VMs in vCloud Director. version_added: "2.4" description: - "Ansible Module to manage (create/update/delete) disks in vApp VMs." options: user: description: - vCloud Director user name required: false password: description: - vCloud Director user password required: false host: description: - vCloud Director host address required: false org: description: - Organization name on vCloud Director to access required: false api_version: description: - Pyvcloud API version required: false verify_ssl_certs: description: - whether to use secure connection to vCloud Director host required: false org_name: description: - target org name - required for service providers to create resources in other orgs - default value is module level / environment level org required: false disks: description: - List of Disk with its size, and attached controller required: false vm_name: description: - VM name required: true vapp: description: - vApp name required: true vdc: description: - VDC name required: true state: description: - state of disk ('present'/'absent'/'update'). - One from state or operation has to be provided. required: false operation: description: - operation on Disk ('read'). - One from state or operation has to be provided. required: false author: - <EMAIL> - name: Test with a message vcd_vapp_vm_disk: user: terraform password: <PASSWORD> host: csa.sandbox.org org: Terraform api_version: 30 verify_ssl_certs: False vm: "vm1" vapp = "vapp1" vdc = "vdc1" disks: - size: 3 controller: lsilogic name: Hard disk 1 state = "present" msg: success/failure message corresponding to disk state changed: true if resource has been changed else false Convert disk byte size into GB or MB MB = 1024 * 1024 ( 2 ** 20 ) GB = 1024 * 1024 * 1024 ( 2 ** 30 ) Note - only MB and GB are supported from vCD here the condition covers both the situtation 1. if someone has given the disk name then first it will check for disk availability first before adding it. 2. if someone has ignored giving the disk name then it will add a new disk any way. | 1.733947 | 2 |
jieba/analyse/analyzer.py | Jaybeka/jieba | 1 | 6630710 | <reponame>Jaybeka/jieba
#encoding=utf-8
from whoosh.analysis import RegexAnalyzer,LowercaseFilter,StopFilter,StemFilter
from whoosh.analysis import Tokenizer,Token
from whoosh.lang.porter import stem
import jieba
import re
STOP_WORDS = frozenset(('a', 'an', 'and', 'are', 'as', 'at', 'be', 'by', 'can',
'for', 'from', 'have', 'if', 'in', 'is', 'it', 'may',
'not', 'of', 'on', 'or', 'tbd', 'that', 'the', 'this',
'to', 'us', 'we', 'when', 'will', 'with', 'yet',
'you', 'your',u'的',u'了',u'和',u'什么'))
accepted_chars = re.compile(ur"[\u4E00-\u9FA5]+")
class ChineseTokenizer(Tokenizer):
def __call__(self,text,**kargs):
words = jieba.tokenize(text,mode="search")
token = Token()
for (w,start_pos,stop_pos) in words:
if not accepted_chars.match(w):
if len(w)>1:
pass
else:
continue
token.original = token.text = w
token.pos = start_pos
token.startchar = start_pos
token.endchar = stop_pos
yield token
def ChineseAnalyzer(stoplist=STOP_WORDS,minsize=1,stemfn=stem,cachesize=50000):
return ChineseTokenizer() | LowercaseFilter() | StopFilter(stoplist=stoplist,minsize=minsize)\
|StemFilter(stemfn=stemfn, ignore=None,cachesize=cachesize)
| #encoding=utf-8
from whoosh.analysis import RegexAnalyzer,LowercaseFilter,StopFilter,StemFilter
from whoosh.analysis import Tokenizer,Token
from whoosh.lang.porter import stem
import jieba
import re
STOP_WORDS = frozenset(('a', 'an', 'and', 'are', 'as', 'at', 'be', 'by', 'can',
'for', 'from', 'have', 'if', 'in', 'is', 'it', 'may',
'not', 'of', 'on', 'or', 'tbd', 'that', 'the', 'this',
'to', 'us', 'we', 'when', 'will', 'with', 'yet',
'you', 'your',u'的',u'了',u'和',u'什么'))
accepted_chars = re.compile(ur"[\u4E00-\u9FA5]+")
class ChineseTokenizer(Tokenizer):
def __call__(self,text,**kargs):
words = jieba.tokenize(text,mode="search")
token = Token()
for (w,start_pos,stop_pos) in words:
if not accepted_chars.match(w):
if len(w)>1:
pass
else:
continue
token.original = token.text = w
token.pos = start_pos
token.startchar = start_pos
token.endchar = stop_pos
yield token
def ChineseAnalyzer(stoplist=STOP_WORDS,minsize=1,stemfn=stem,cachesize=50000):
return ChineseTokenizer() | LowercaseFilter() | StopFilter(stoplist=stoplist,minsize=minsize)\
|StemFilter(stemfn=stemfn, ignore=None,cachesize=cachesize) | en | 0.595601 | #encoding=utf-8 | 2.477079 | 2 |
171/__init__.py | sc4599/LeetCode | 0 | 6630711 | __author__ = 'songchao'
| __author__ = 'songchao'
| none | 1 | 0.907934 | 1 |
|
pyscf/cc/__init__.py | nmardirossian/pyscf | 1 | 6630712 | <filename>pyscf/cc/__init__.py
'''
Coupled Cluster
===============
Simple usage::
>>> from pyscf import gto, scf, cc
>>> mol = gto.M(atom='H 0 0 0; H 0 0 1')
>>> mf = scf.RHF(mol).run()
>>> cc.CCSD(mf).run()
:func:`cc.CCSD` returns an instance of CCSD class. Followings are parameters
to control CCSD calculation.
verbose : int
Print level. Default value equals to :class:`Mole.verbose`
max_memory : float or int
Allowed memory in MB. Default value equals to :class:`Mole.max_memory`
conv_tol : float
converge threshold. Default is 1e-7.
conv_tol_normt : float
converge threshold for norm(t1,t2). Default is 1e-5.
max_cycle : int
max number of iterations. Default is 50.
diis_space : int
DIIS space size. Default is 6.
diis_start_cycle : int
The step to start DIIS. Default is 0.
direct : bool
AO-direct CCSD. Default is False.
frozen : int or list
If integer is given, the inner-most orbitals are frozen from CC
amplitudes. Given the orbital indices (0-based) in a list, both
occupied and virtual orbitals can be frozen in CC calculation.
Saved results
converged : bool
CCSD converged or not
e_tot : float
Total CCSD energy (HF + correlation)
t1, t2 :
t1[i,a], t2[i,j,a,b] (i,j in occ, a,b in virt)
l1, l2 :
Lambda amplitudes l1[i,a], l2[i,j,a,b] (i,j in occ, a,b in virt)
'''
from pyscf.cc import ccsd
from pyscf.cc import ccsd_lambda
from pyscf.cc import ccsd_rdm
from pyscf.cc import addons
def CCSD(mf, frozen=0, mo_coeff=None, mo_occ=None):
__doc__ = ccsd.CCSD.__doc__
import sys
from pyscf import scf
from pyscf.cc import dfccsd
if isinstance(mf, scf.uhf.UHF) or mf.mol.spin != 0:
return UCCSD(mf, frozen, mo_coeff, mo_occ)
if 'dft' in str(mf.__module__):
sys.stderr.write('CCSD Warning: The first argument mf is a DFT object. '
'CCSD calculation should be used with HF object')
if (isinstance(mf, scf.newton_ah._CIAH_SCF) or
not isinstance(mf, scf.hf.RHF)):
mf = scf.addons.convert_to_rhf(mf)
if hasattr(mf, 'with_df') and mf.with_df:
return dfccsd.RCCSD(mf, frozen, mo_coeff, mo_occ)
else:
return ccsd.CCSD(mf, frozen, mo_coeff, mo_occ)
def RCCSD(mf, frozen=0, mo_coeff=None, mo_occ=None):
from pyscf import lib
from pyscf import scf
from pyscf.cc import rccsd
from pyscf.cc import dfccsd
if isinstance(mf, scf.uhf.UHF):
raise RuntimeError('RCCSD cannot be used with UHF method.')
elif isinstance(mf, scf.rohf.ROHF):
lib.logger.warn(mf, 'RCCSD method does not support ROHF method. ROHF object '
'is converted to UHF object and UCCSD method is called.')
return UCCSD(mf, frozen, mo_coeff, mo_occ)
if (isinstance(mf, scf.newton_ah._CIAH_SCF) or
not isinstance(mf, scf.hf.RHF)):
mf = scf.addons.convert_to_rhf(mf)
if hasattr(mf, 'with_df') and mf.with_df:
return dfccsd.RCCSD(mf, frozen, mo_coeff, mo_occ)
else:
return rccsd.RCCSD(mf, frozen, mo_coeff, mo_occ)
def UCCSD(mf, frozen=0, mo_coeff=None, mo_occ=None):
import sys
from pyscf import scf
from pyscf.cc import uccsd
if 'dft' in str(mf.__module__):
sys.stderr.write('CCSD Warning: The first argument mf is a DFT object. '
'CCSD calculation should be used with HF object')
if (isinstance(mf, scf.newton_ah._CIAH_SCF) or
not isinstance(mf, scf.uhf.UHF)):
mf = scf.addons.convert_to_uhf(mf)
if hasattr(mf, 'with_df') and mf.with_df:
raise NotImplementedError('DF-UCCSD')
else:
return uccsd.UCCSD(mf, frozen, mo_coeff, mo_occ)
| <filename>pyscf/cc/__init__.py
'''
Coupled Cluster
===============
Simple usage::
>>> from pyscf import gto, scf, cc
>>> mol = gto.M(atom='H 0 0 0; H 0 0 1')
>>> mf = scf.RHF(mol).run()
>>> cc.CCSD(mf).run()
:func:`cc.CCSD` returns an instance of CCSD class. Followings are parameters
to control CCSD calculation.
verbose : int
Print level. Default value equals to :class:`Mole.verbose`
max_memory : float or int
Allowed memory in MB. Default value equals to :class:`Mole.max_memory`
conv_tol : float
converge threshold. Default is 1e-7.
conv_tol_normt : float
converge threshold for norm(t1,t2). Default is 1e-5.
max_cycle : int
max number of iterations. Default is 50.
diis_space : int
DIIS space size. Default is 6.
diis_start_cycle : int
The step to start DIIS. Default is 0.
direct : bool
AO-direct CCSD. Default is False.
frozen : int or list
If integer is given, the inner-most orbitals are frozen from CC
amplitudes. Given the orbital indices (0-based) in a list, both
occupied and virtual orbitals can be frozen in CC calculation.
Saved results
converged : bool
CCSD converged or not
e_tot : float
Total CCSD energy (HF + correlation)
t1, t2 :
t1[i,a], t2[i,j,a,b] (i,j in occ, a,b in virt)
l1, l2 :
Lambda amplitudes l1[i,a], l2[i,j,a,b] (i,j in occ, a,b in virt)
'''
from pyscf.cc import ccsd
from pyscf.cc import ccsd_lambda
from pyscf.cc import ccsd_rdm
from pyscf.cc import addons
def CCSD(mf, frozen=0, mo_coeff=None, mo_occ=None):
__doc__ = ccsd.CCSD.__doc__
import sys
from pyscf import scf
from pyscf.cc import dfccsd
if isinstance(mf, scf.uhf.UHF) or mf.mol.spin != 0:
return UCCSD(mf, frozen, mo_coeff, mo_occ)
if 'dft' in str(mf.__module__):
sys.stderr.write('CCSD Warning: The first argument mf is a DFT object. '
'CCSD calculation should be used with HF object')
if (isinstance(mf, scf.newton_ah._CIAH_SCF) or
not isinstance(mf, scf.hf.RHF)):
mf = scf.addons.convert_to_rhf(mf)
if hasattr(mf, 'with_df') and mf.with_df:
return dfccsd.RCCSD(mf, frozen, mo_coeff, mo_occ)
else:
return ccsd.CCSD(mf, frozen, mo_coeff, mo_occ)
def RCCSD(mf, frozen=0, mo_coeff=None, mo_occ=None):
from pyscf import lib
from pyscf import scf
from pyscf.cc import rccsd
from pyscf.cc import dfccsd
if isinstance(mf, scf.uhf.UHF):
raise RuntimeError('RCCSD cannot be used with UHF method.')
elif isinstance(mf, scf.rohf.ROHF):
lib.logger.warn(mf, 'RCCSD method does not support ROHF method. ROHF object '
'is converted to UHF object and UCCSD method is called.')
return UCCSD(mf, frozen, mo_coeff, mo_occ)
if (isinstance(mf, scf.newton_ah._CIAH_SCF) or
not isinstance(mf, scf.hf.RHF)):
mf = scf.addons.convert_to_rhf(mf)
if hasattr(mf, 'with_df') and mf.with_df:
return dfccsd.RCCSD(mf, frozen, mo_coeff, mo_occ)
else:
return rccsd.RCCSD(mf, frozen, mo_coeff, mo_occ)
def UCCSD(mf, frozen=0, mo_coeff=None, mo_occ=None):
import sys
from pyscf import scf
from pyscf.cc import uccsd
if 'dft' in str(mf.__module__):
sys.stderr.write('CCSD Warning: The first argument mf is a DFT object. '
'CCSD calculation should be used with HF object')
if (isinstance(mf, scf.newton_ah._CIAH_SCF) or
not isinstance(mf, scf.uhf.UHF)):
mf = scf.addons.convert_to_uhf(mf)
if hasattr(mf, 'with_df') and mf.with_df:
raise NotImplementedError('DF-UCCSD')
else:
return uccsd.UCCSD(mf, frozen, mo_coeff, mo_occ)
| en | 0.591835 | Coupled Cluster =============== Simple usage:: >>> from pyscf import gto, scf, cc >>> mol = gto.M(atom='H 0 0 0; H 0 0 1') >>> mf = scf.RHF(mol).run() >>> cc.CCSD(mf).run() :func:`cc.CCSD` returns an instance of CCSD class. Followings are parameters to control CCSD calculation. verbose : int Print level. Default value equals to :class:`Mole.verbose` max_memory : float or int Allowed memory in MB. Default value equals to :class:`Mole.max_memory` conv_tol : float converge threshold. Default is 1e-7. conv_tol_normt : float converge threshold for norm(t1,t2). Default is 1e-5. max_cycle : int max number of iterations. Default is 50. diis_space : int DIIS space size. Default is 6. diis_start_cycle : int The step to start DIIS. Default is 0. direct : bool AO-direct CCSD. Default is False. frozen : int or list If integer is given, the inner-most orbitals are frozen from CC amplitudes. Given the orbital indices (0-based) in a list, both occupied and virtual orbitals can be frozen in CC calculation. Saved results converged : bool CCSD converged or not e_tot : float Total CCSD energy (HF + correlation) t1, t2 : t1[i,a], t2[i,j,a,b] (i,j in occ, a,b in virt) l1, l2 : Lambda amplitudes l1[i,a], l2[i,j,a,b] (i,j in occ, a,b in virt) | 2.471021 | 2 |
Connect to LAN GUIautomate.py | RAVURISREESAIHARIKRISHNA/Python-2.7.12-3.5.2- | 0 | 6630713 | <gh_stars>0
import pyautogui,time
pyautogui.typewrite(["win"])
time.sleep(0.25)
pyautogui.click(1173,735)
time.sleep(1)
pyautogui.click(1187,502)
time.sleep(1)
#pyautogui.keyDown("win")
#pyautogui.typewrite("up")
#pyautogui.keyUp("win")
#time.sleep(0)
pyautogui.click(464,151)
time.sleep(0.2)
pyautogui.moveTo(476,199)
| import pyautogui,time
pyautogui.typewrite(["win"])
time.sleep(0.25)
pyautogui.click(1173,735)
time.sleep(1)
pyautogui.click(1187,502)
time.sleep(1)
#pyautogui.keyDown("win")
#pyautogui.typewrite("up")
#pyautogui.keyUp("win")
#time.sleep(0)
pyautogui.click(464,151)
time.sleep(0.2)
pyautogui.moveTo(476,199) | en | 0.174228 | #pyautogui.keyDown("win") #pyautogui.typewrite("up") #pyautogui.keyUp("win") #time.sleep(0) | 2.435736 | 2 |
gvm-insert.py | gubertoli/practicalvm | 12 | 6630714 | <reponame>gubertoli/practicalvm
#!/usr/bin/env python3
# Iterate through an openvas output XML file
# and insert relevant information into a Mongo database
# OID is considered the authoritative identifiers of individual
# vulnerabilities, and if a vulnerability or host-vuln mapping
# already exists in Mongo, data provided here will
# be ignored.
#
# v0.3
# <NAME>
from xml.etree.cElementTree import iterparse
from pymongo import MongoClient
import datetime, sys
# globals
# Mongo connection parameters
client = MongoClient('mongodb://localhost:27017')
db = client['vulnmgt']
# host - OIDs map
oidList = {}
# print usage and exit
def usage():
print ('''
Usage: $ openvas-insert.py <infile>
''')
def main():
if (len(sys.argv) < 2): # no files
usage()
exit(0)
# find and open the output XML file
infile = open(sys.argv[1], 'r')
# Start parsing the XML tree.
for event, elem in iterparse(infile):
# Now do this for each 'result' block in the output file
if elem.tag == "result":
result = {}
# this won't go in the result document, but will be used
# to find the result document we're inserting into or creating
#
# some 'result' blocks are nested, and we want to ignore those!
# we can tell if it's one of those because it's missing stuff like
# the 'host' block
if (elem.find("host") == None):
continue
ipaddr = elem.find("host").text
(port, proto) = elem.find("port").text.split('/')
result['port'] = port
result['proto'] = proto
nvtblock = elem.find("nvt") # a bunch of stuff is in here
# this will be reused later
oid = nvtblock.get("oid")
result['oid'] = oid
result['name'] = nvtblock.find("name").text
result['family'] = nvtblock.find("family").text
# if it is cvss 0, ignore it.
cvss = float(nvtblock.find("cvss_base").text)
if (cvss == 0):
continue
result['cvss'] = cvss
# these fields might contain one or more comma-separated values.
result['cve'] = nvtblock.find("cve").text.split(", ")
result['bid'] = nvtblock.find("bid").text.split(", ")
result['xref'] = nvtblock.find("xref").text.split(", ")
# the issue is we don't know quite what will be in here for
# any given vulnerability. So we'll just put them all in the
# database under the names that OpenVAS gives them
tags = nvtblock.find("tags").text.split("|")
for item in tags:
(tagname, tagvalue) = item.split("=", 1)
result[tagname] = tagvalue
result['threat'] = elem.find("threat").text
result['updated'] = datetime.datetime.utcnow()
elem.clear()
# first, does this vulnerability exist yet in our
# database? if so, insert it. if not, skip.
# since not all of them have a CVE or a BID, we will use
# oid (an OpenVAS identifier) as canonical.
if db.vulnerabilities.count({'oid': oid}) == 0:
db.vulnerabilities.insert(result)
# Here we're adding the OID to the dictionary of host-oid lists
# specified above. At the end of this main loop we'll go through
# each key (aka each IP) and add its list of OIDs to the Mongo
# document.
# Initialize the dictionary key if it's not yet there
if ipaddr not in oidList.keys():
oidList[ipaddr] = []
oidList[ipaddr].append({'proto': proto, 'port': port, 'oid': oid})
# Now, we'll add the OID information to each host. This will provide
# the link between hosts and vulnerabilities. If the host doesn't
# exist in our database, that is a shortcoming of our scanning
# methodology so we need to create a bare-bones record with the
# information we've collected here.
for ipaddress in oidList.keys():
if db.hosts.count({'ip': ipaddress}) == 0:
db.hosts.insert({'ip': ipaddress,
'mac': { 'addr': "", 'vendor': "Unknown" },
'ports': [],
'hostnames': [],
'os': [],
'updated': datetime.datetime.utcnow(),
'oids': oidList[ipaddress]})
else:
db.hosts.update_one({'ip': ipaddress},
{'$set': { 'updated': datetime.datetime.utcnow(),
'oids': oidList[ipaddress]}})
infile.close() # we're done
main()
| #!/usr/bin/env python3
# Iterate through an openvas output XML file
# and insert relevant information into a Mongo database
# OID is considered the authoritative identifiers of individual
# vulnerabilities, and if a vulnerability or host-vuln mapping
# already exists in Mongo, data provided here will
# be ignored.
#
# v0.3
# <NAME>
from xml.etree.cElementTree import iterparse
from pymongo import MongoClient
import datetime, sys
# globals
# Mongo connection parameters
client = MongoClient('mongodb://localhost:27017')
db = client['vulnmgt']
# host - OIDs map
oidList = {}
# print usage and exit
def usage():
print ('''
Usage: $ openvas-insert.py <infile>
''')
def main():
if (len(sys.argv) < 2): # no files
usage()
exit(0)
# find and open the output XML file
infile = open(sys.argv[1], 'r')
# Start parsing the XML tree.
for event, elem in iterparse(infile):
# Now do this for each 'result' block in the output file
if elem.tag == "result":
result = {}
# this won't go in the result document, but will be used
# to find the result document we're inserting into or creating
#
# some 'result' blocks are nested, and we want to ignore those!
# we can tell if it's one of those because it's missing stuff like
# the 'host' block
if (elem.find("host") == None):
continue
ipaddr = elem.find("host").text
(port, proto) = elem.find("port").text.split('/')
result['port'] = port
result['proto'] = proto
nvtblock = elem.find("nvt") # a bunch of stuff is in here
# this will be reused later
oid = nvtblock.get("oid")
result['oid'] = oid
result['name'] = nvtblock.find("name").text
result['family'] = nvtblock.find("family").text
# if it is cvss 0, ignore it.
cvss = float(nvtblock.find("cvss_base").text)
if (cvss == 0):
continue
result['cvss'] = cvss
# these fields might contain one or more comma-separated values.
result['cve'] = nvtblock.find("cve").text.split(", ")
result['bid'] = nvtblock.find("bid").text.split(", ")
result['xref'] = nvtblock.find("xref").text.split(", ")
# the issue is we don't know quite what will be in here for
# any given vulnerability. So we'll just put them all in the
# database under the names that OpenVAS gives them
tags = nvtblock.find("tags").text.split("|")
for item in tags:
(tagname, tagvalue) = item.split("=", 1)
result[tagname] = tagvalue
result['threat'] = elem.find("threat").text
result['updated'] = datetime.datetime.utcnow()
elem.clear()
# first, does this vulnerability exist yet in our
# database? if so, insert it. if not, skip.
# since not all of them have a CVE or a BID, we will use
# oid (an OpenVAS identifier) as canonical.
if db.vulnerabilities.count({'oid': oid}) == 0:
db.vulnerabilities.insert(result)
# Here we're adding the OID to the dictionary of host-oid lists
# specified above. At the end of this main loop we'll go through
# each key (aka each IP) and add its list of OIDs to the Mongo
# document.
# Initialize the dictionary key if it's not yet there
if ipaddr not in oidList.keys():
oidList[ipaddr] = []
oidList[ipaddr].append({'proto': proto, 'port': port, 'oid': oid})
# Now, we'll add the OID information to each host. This will provide
# the link between hosts and vulnerabilities. If the host doesn't
# exist in our database, that is a shortcoming of our scanning
# methodology so we need to create a bare-bones record with the
# information we've collected here.
for ipaddress in oidList.keys():
if db.hosts.count({'ip': ipaddress}) == 0:
db.hosts.insert({'ip': ipaddress,
'mac': { 'addr': "", 'vendor': "Unknown" },
'ports': [],
'hostnames': [],
'os': [],
'updated': datetime.datetime.utcnow(),
'oids': oidList[ipaddress]})
else:
db.hosts.update_one({'ip': ipaddress},
{'$set': { 'updated': datetime.datetime.utcnow(),
'oids': oidList[ipaddress]}})
infile.close() # we're done
main() | en | 0.887051 | #!/usr/bin/env python3 # Iterate through an openvas output XML file # and insert relevant information into a Mongo database # OID is considered the authoritative identifiers of individual # vulnerabilities, and if a vulnerability or host-vuln mapping # already exists in Mongo, data provided here will # be ignored. # # v0.3 # <NAME> # globals # Mongo connection parameters # host - OIDs map # print usage and exit Usage: $ openvas-insert.py <infile> # no files # find and open the output XML file # Start parsing the XML tree. # Now do this for each 'result' block in the output file # this won't go in the result document, but will be used # to find the result document we're inserting into or creating # # some 'result' blocks are nested, and we want to ignore those! # we can tell if it's one of those because it's missing stuff like # the 'host' block # a bunch of stuff is in here # this will be reused later # if it is cvss 0, ignore it. # these fields might contain one or more comma-separated values. # the issue is we don't know quite what will be in here for # any given vulnerability. So we'll just put them all in the # database under the names that OpenVAS gives them # first, does this vulnerability exist yet in our # database? if so, insert it. if not, skip. # since not all of them have a CVE or a BID, we will use # oid (an OpenVAS identifier) as canonical. # Here we're adding the OID to the dictionary of host-oid lists # specified above. At the end of this main loop we'll go through # each key (aka each IP) and add its list of OIDs to the Mongo # document. # Initialize the dictionary key if it's not yet there # Now, we'll add the OID information to each host. This will provide # the link between hosts and vulnerabilities. If the host doesn't # exist in our database, that is a shortcoming of our scanning # methodology so we need to create a bare-bones record with the # information we've collected here. # we're done | 2.762698 | 3 |
Unmaintained/PythonScraping/allarticles_pool.py | IQSS/workshops | 30 | 6630715 | from lxml import etree
from multiprocessing import Pool
import shutil
import csv
import urllib
import re
def scrapeSingle(num):
f = open('/tmp/parsed'+str(num)+'.csv','w')
entries = ["Day","Month","Year","Title","Remote","Local"]
c = csv.DictWriter(f,entries)
baseurl = "http://www.egyptindependent.com/subchannel/News%20features?page="
destpath = "/tmp/"
urllib.urlretrieve (baseurl+str(num),destpath+"page"+str(num)+".html")
print "Retrieved page: " + str(num)
fname = '/tmp/page'+str(num)+'.html'
fp = open(fname, 'rb')
parser = etree.HTMLParser()
tree = etree.parse(fp, parser)
dateelems = tree.xpath('.//div[@class="views-field-field-published-date-value"]/span[@class="field-content"]/span[@class="date-display-single"]')
linkelems = tree.xpath('.//div[@class="panel-pane pane-views pane-subchannel-news subchannel-pane"]//div[@class="views-field-title"]/span[@class="field-content"]/a')
for (d,l) in zip(dateelems,linkelems):
entry = dict()
myDate = d.text.split()
urlname = l.get('href')
nodenum = re.search("\d+",urlname).group()
dest = destpath+nodenum+".html"
urllib.urlretrieve (urlname,dest)
entry["Day"] = myDate[0]
entry["Month"] = myDate[1]
entry["Year"] = myDate[2]
entry["Local"] = dest
entry["Remote"] = urlname
entry["Title"] = l.text.encode("utf-8")
c.writerow(entry)
print entry
fp.close()
if __name__ == '__main__':
basepath = '/tmp/out'
print "foo"
nfiles = 1219
pool = Pool(processes=40) # start 40 worker processes
pool.map(scrapeSingle,range(1,nfiles+1))
destination = open('/tmp/files.csv','wb')
basepath = '/tmp/parsed'
print "Merging records into single csv file..."
for i in range(1,nfiles):
shutil.copyfileobj(open(basepath+str(i)+'.csv','rb'), destination)
destination.close()
print "...complete!" | from lxml import etree
from multiprocessing import Pool
import shutil
import csv
import urllib
import re
def scrapeSingle(num):
f = open('/tmp/parsed'+str(num)+'.csv','w')
entries = ["Day","Month","Year","Title","Remote","Local"]
c = csv.DictWriter(f,entries)
baseurl = "http://www.egyptindependent.com/subchannel/News%20features?page="
destpath = "/tmp/"
urllib.urlretrieve (baseurl+str(num),destpath+"page"+str(num)+".html")
print "Retrieved page: " + str(num)
fname = '/tmp/page'+str(num)+'.html'
fp = open(fname, 'rb')
parser = etree.HTMLParser()
tree = etree.parse(fp, parser)
dateelems = tree.xpath('.//div[@class="views-field-field-published-date-value"]/span[@class="field-content"]/span[@class="date-display-single"]')
linkelems = tree.xpath('.//div[@class="panel-pane pane-views pane-subchannel-news subchannel-pane"]//div[@class="views-field-title"]/span[@class="field-content"]/a')
for (d,l) in zip(dateelems,linkelems):
entry = dict()
myDate = d.text.split()
urlname = l.get('href')
nodenum = re.search("\d+",urlname).group()
dest = destpath+nodenum+".html"
urllib.urlretrieve (urlname,dest)
entry["Day"] = myDate[0]
entry["Month"] = myDate[1]
entry["Year"] = myDate[2]
entry["Local"] = dest
entry["Remote"] = urlname
entry["Title"] = l.text.encode("utf-8")
c.writerow(entry)
print entry
fp.close()
if __name__ == '__main__':
basepath = '/tmp/out'
print "foo"
nfiles = 1219
pool = Pool(processes=40) # start 40 worker processes
pool.map(scrapeSingle,range(1,nfiles+1))
destination = open('/tmp/files.csv','wb')
basepath = '/tmp/parsed'
print "Merging records into single csv file..."
for i in range(1,nfiles):
shutil.copyfileobj(open(basepath+str(i)+'.csv','rb'), destination)
destination.close()
print "...complete!" | en | 0.834889 | # start 40 worker processes | 2.803636 | 3 |
pyroms_toolbox/pyroms_toolbox/BGrid_SODA/BGrid_SODA.py | bilgetutak/pyroms | 75 | 6630716 | <gh_stars>10-100
import numpy as np
from mpl_toolkits.basemap import pyproj
from datetime import datetime
try:
import netCDF4 as netCDF
except:
import netCDF3 as netCDF
import pyroms
class BGrid_SODA(object):
"""
BGrid object for SODA
"""
def __init__(self, lon_t, lat_t, lon_uv, lat_uv, mask_t, mask_uv, depth, depth_bnds, h, name, xrange, yrange):
self.name = name
self.xrange = xrange
self.yrange = yrange
self.h = h[yrange[0]:yrange[1]+1, xrange[0]:xrange[1]+1]
lon_t, lat_t = np.meshgrid(lon_t, lat_t)
lon_uv, lat_uv = np.meshgrid(lon_uv, lat_uv)
self.lon_t = lon_t[yrange[0]:yrange[1]+1, xrange[0]:xrange[1]+1]
self.lat_t = lat_t[yrange[0]:yrange[1]+1, xrange[0]:xrange[1]+1]
self.lon_uv = lon_uv[yrange[0]:yrange[1]+1, xrange[0]:xrange[1]+1]
self.lat_uv = lat_uv[yrange[0]:yrange[1]+1, xrange[0]:xrange[1]+1]
self.lon_t_vert = 0.5 * (lon_t[yrange[0]-1:yrange[1]+1, xrange[0]-1:xrange[1]+1] + \
lon_t[yrange[0]:yrange[1]+2, xrange[0]:xrange[1]+2])
self.lat_t_vert = 0.5 * (lat_t[yrange[0]-1:yrange[1]+1, xrange[0]-1:xrange[1]+1] + \
lat_t[yrange[0]:yrange[1]+2, xrange[0]:xrange[1]+2])
self.lon_uv_vert = 0.5 * (lon_uv[yrange[0]-1:yrange[1]+1, xrange[0]-1:xrange[1]+1] + \
lon_uv[yrange[0]:yrange[1]+2, xrange[0]:xrange[1]+2])
self.lat_uv_vert = 0.5 * (lat_uv[yrange[0]-1:yrange[1]+1, xrange[0]-1:xrange[1]+1] + \
lat_uv[yrange[0]:yrange[1]+2, xrange[0]:xrange[1]+2])
self.mask_t = mask_t[:, yrange[0]:yrange[1]+1, xrange[0]:xrange[1]+1]
self.mask_uv = mask_uv[:, yrange[0]:yrange[1]+1, xrange[0]:xrange[1]+1]
self.z_t = np.tile(depth,(self.mask_t.shape[2],self.mask_t.shape[1],1)).T
self.z_t_bnds = np.tile(depth_bnds,(self.mask_t.shape[2],self.mask_t.shape[1],1)).T
| import numpy as np
from mpl_toolkits.basemap import pyproj
from datetime import datetime
try:
import netCDF4 as netCDF
except:
import netCDF3 as netCDF
import pyroms
class BGrid_SODA(object):
"""
BGrid object for SODA
"""
def __init__(self, lon_t, lat_t, lon_uv, lat_uv, mask_t, mask_uv, depth, depth_bnds, h, name, xrange, yrange):
self.name = name
self.xrange = xrange
self.yrange = yrange
self.h = h[yrange[0]:yrange[1]+1, xrange[0]:xrange[1]+1]
lon_t, lat_t = np.meshgrid(lon_t, lat_t)
lon_uv, lat_uv = np.meshgrid(lon_uv, lat_uv)
self.lon_t = lon_t[yrange[0]:yrange[1]+1, xrange[0]:xrange[1]+1]
self.lat_t = lat_t[yrange[0]:yrange[1]+1, xrange[0]:xrange[1]+1]
self.lon_uv = lon_uv[yrange[0]:yrange[1]+1, xrange[0]:xrange[1]+1]
self.lat_uv = lat_uv[yrange[0]:yrange[1]+1, xrange[0]:xrange[1]+1]
self.lon_t_vert = 0.5 * (lon_t[yrange[0]-1:yrange[1]+1, xrange[0]-1:xrange[1]+1] + \
lon_t[yrange[0]:yrange[1]+2, xrange[0]:xrange[1]+2])
self.lat_t_vert = 0.5 * (lat_t[yrange[0]-1:yrange[1]+1, xrange[0]-1:xrange[1]+1] + \
lat_t[yrange[0]:yrange[1]+2, xrange[0]:xrange[1]+2])
self.lon_uv_vert = 0.5 * (lon_uv[yrange[0]-1:yrange[1]+1, xrange[0]-1:xrange[1]+1] + \
lon_uv[yrange[0]:yrange[1]+2, xrange[0]:xrange[1]+2])
self.lat_uv_vert = 0.5 * (lat_uv[yrange[0]-1:yrange[1]+1, xrange[0]-1:xrange[1]+1] + \
lat_uv[yrange[0]:yrange[1]+2, xrange[0]:xrange[1]+2])
self.mask_t = mask_t[:, yrange[0]:yrange[1]+1, xrange[0]:xrange[1]+1]
self.mask_uv = mask_uv[:, yrange[0]:yrange[1]+1, xrange[0]:xrange[1]+1]
self.z_t = np.tile(depth,(self.mask_t.shape[2],self.mask_t.shape[1],1)).T
self.z_t_bnds = np.tile(depth_bnds,(self.mask_t.shape[2],self.mask_t.shape[1],1)).T | en | 0.703169 | BGrid object for SODA | 2.126463 | 2 |
hummingbot/strategy/volume_generation/start.py | MahaTrade/hummingbot | 0 | 6630717 | <reponame>MahaTrade/hummingbot
from hummingbot.strategy.market_trading_pair_tuple import MarketTradingPairTuple
from hummingbot.strategy.volume_generation import VolumeGeneration
from hummingbot.strategy.volume_generation.volume_generation_config_map import volume_generation_config_map as c_map
def start(self):
connector = c_map.get("connector").value.lower()
market = c_map.get("market").value
# price = c_map.get("price").value
self._initialize_markets([(connector, [market])])
base, quote = market.split("-")
market_info = MarketTradingPairTuple(self.markets[connector], market, base, quote)
self.market_trading_pair_tuples = [market_info]
self.strategy = VolumeGeneration(market_info)
| from hummingbot.strategy.market_trading_pair_tuple import MarketTradingPairTuple
from hummingbot.strategy.volume_generation import VolumeGeneration
from hummingbot.strategy.volume_generation.volume_generation_config_map import volume_generation_config_map as c_map
def start(self):
connector = c_map.get("connector").value.lower()
market = c_map.get("market").value
# price = c_map.get("price").value
self._initialize_markets([(connector, [market])])
base, quote = market.split("-")
market_info = MarketTradingPairTuple(self.markets[connector], market, base, quote)
self.market_trading_pair_tuples = [market_info]
self.strategy = VolumeGeneration(market_info) | en | 0.154413 | # price = c_map.get("price").value | 2.321201 | 2 |
textvis/settings.py | scclab/textvisdrg-prototype | 0 | 6630718 | """
Django settings for textvis project.
For more information on this file, see
https://docs.djangoproject.com/en/1.7/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.7/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
from os import environ
from path import path
import dj_database_url
BASE_DIR = path(__file__).abspath().realpath().dirname().parent
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.7/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '<KEY>'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = environ.get('DEBUG', 'False') in ('True', '1')
TEMPLATE_DEBUG = DEBUG
ALLOWED_HOSTS = ['127.0.0.1']
SITE_ID = 1
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.sites',
'debug_toolbar',
'django.contrib.humanize',
'bootstrap3',
'jsonview',
'twitter_stream',
'textvis.textprizm',
'textvis.topics',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'textvis.urls'
WSGI_APPLICATION = 'textvis.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.7/ref/settings/#databases
DATABASES = {
'default': dj_database_url.config(default='sqlite:///%s' % (BASE_DIR / 'development.sqlite'))
}
if DATABASES['default']['ENGINE'] == 'django.db.backends.mysql':
# enable utf8mb4 on mysql
DATABASES['default']['OPTIONS'] = {
'charset': 'utf8mb4',
'init_command': 'SET storage_engine=INNODB',
}
# Internationalization
# https://docs.djangoproject.com/en/1.7/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.7/howto/static-files/
STATIC_URL = '/static/'
STATICFILES_DIRS = (
BASE_DIR / 'textvis' / 'static',
)
TEMPLATE_DIRS = (
BASE_DIR / 'textvis' / 'templates',
)
TWITTER_STREAM_TWEET_MODEL = 'twitter_stream.Tweet'
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'formatters': {
'simple': {
'format': '%(asctime)s : %(levelname)s : %(message)s'
},
},
'handlers': {
'console': {
'level': 'DEBUG',
'class': 'logging.StreamHandler',
'formatter': 'simple'
},
},
'loggers': {
'django.request': {
'handlers': ['console'],
'level': 'DEBUG' if DEBUG else 'INFO',
'propagate': True,
},
'django.db': {
'handlers': ['console'],
'level': 'WARN',
'propagate': True,
}
},
} | """
Django settings for textvis project.
For more information on this file, see
https://docs.djangoproject.com/en/1.7/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.7/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
from os import environ
from path import path
import dj_database_url
BASE_DIR = path(__file__).abspath().realpath().dirname().parent
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.7/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '<KEY>'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = environ.get('DEBUG', 'False') in ('True', '1')
TEMPLATE_DEBUG = DEBUG
ALLOWED_HOSTS = ['127.0.0.1']
SITE_ID = 1
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.sites',
'debug_toolbar',
'django.contrib.humanize',
'bootstrap3',
'jsonview',
'twitter_stream',
'textvis.textprizm',
'textvis.topics',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'textvis.urls'
WSGI_APPLICATION = 'textvis.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.7/ref/settings/#databases
DATABASES = {
'default': dj_database_url.config(default='sqlite:///%s' % (BASE_DIR / 'development.sqlite'))
}
if DATABASES['default']['ENGINE'] == 'django.db.backends.mysql':
# enable utf8mb4 on mysql
DATABASES['default']['OPTIONS'] = {
'charset': 'utf8mb4',
'init_command': 'SET storage_engine=INNODB',
}
# Internationalization
# https://docs.djangoproject.com/en/1.7/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.7/howto/static-files/
STATIC_URL = '/static/'
STATICFILES_DIRS = (
BASE_DIR / 'textvis' / 'static',
)
TEMPLATE_DIRS = (
BASE_DIR / 'textvis' / 'templates',
)
TWITTER_STREAM_TWEET_MODEL = 'twitter_stream.Tweet'
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'formatters': {
'simple': {
'format': '%(asctime)s : %(levelname)s : %(message)s'
},
},
'handlers': {
'console': {
'level': 'DEBUG',
'class': 'logging.StreamHandler',
'formatter': 'simple'
},
},
'loggers': {
'django.request': {
'handlers': ['console'],
'level': 'DEBUG' if DEBUG else 'INFO',
'propagate': True,
},
'django.db': {
'handlers': ['console'],
'level': 'WARN',
'propagate': True,
}
},
} | en | 0.611948 | Django settings for textvis project. For more information on this file, see https://docs.djangoproject.com/en/1.7/topics/settings/ For the full list of settings and their values, see https://docs.djangoproject.com/en/1.7/ref/settings/ # Build paths inside the project like this: os.path.join(BASE_DIR, ...) # Quick-start development settings - unsuitable for production # See https://docs.djangoproject.com/en/1.7/howto/deployment/checklist/ # SECURITY WARNING: keep the secret key used in production secret! # SECURITY WARNING: don't run with debug turned on in production! # Application definition # Database # https://docs.djangoproject.com/en/1.7/ref/settings/#databases # enable utf8mb4 on mysql # Internationalization # https://docs.djangoproject.com/en/1.7/topics/i18n/ # Static files (CSS, JavaScript, Images) # https://docs.djangoproject.com/en/1.7/howto/static-files/ | 1.851688 | 2 |
drivers/firmata-dbg.py | prozum/embug | 0 | 6630719 | from PyQt4 import QtCore
dPins = range(14)
aPins = range(18,24)
A0,A1,A2,A3,A4,A5 = aPins
HIGH,OUT = (1,1)
LOW,IN = (0,0)
class Driver():
def __init__(self,sim):
self.sim = sim
def pinMode(self,pin,mode):
self.sim.emit(QtCore.SIGNAL("sPinMode"),pin,mode)
def digitalRead(self,pin):
if pin in dPins:
if not self.sim.pinModes[pin]:
return self.sim.pinValues[pin]
else:
self.error("Pin {0} is not IN!".format(pin))
else:
self.error("Pin {0} is not digital!".format(pin))
def digitalWrite(self,pin,state):
if pin in dPins:
if self.sim.pinModes[pin]:
self.sim.emit(QtCore.SIGNAL("sDWrite"),pin,state)
else:
self.error("Pin {0} is not OUT!".format(pin))
else:
self.error("Pin {0} is not digital!".format(pin))
def analogRead(self,pin):
if pin in aPins:
if not self.sim.pinModes[pin]:
return self.sim.pinValues[pin]
else:
self.error("Pin {0} is not IN!".format(pin))
else:
self.error("Pin {0} is not analog!".format(pin))
def analogWrite(self,pin,state):
if pin in aPins:
if self.sim.pinModes[pin]:
self.sim.emit(QtCore.SIGNAL("sAWrite"),pin,state)
else:
self.error("Pin {0} is not OUT!".format(pin))
else:
self.error("Pin {0} is not analog!".format(pin))
def serialPrint(self,msg):
self.sim.emit(QtCore.SIGNAL("sEcho"),str(msg),False)
def serialPrintln(self,msg):
self.sim.emit(QtCore.SIGNAL("sEcho"),str(msg),True)
def error(self,msg):
self.sim.emit(QtCore.SIGNAL("sEcho"),"ERROR: "+str(msg),True,"red")
| from PyQt4 import QtCore
dPins = range(14)
aPins = range(18,24)
A0,A1,A2,A3,A4,A5 = aPins
HIGH,OUT = (1,1)
LOW,IN = (0,0)
class Driver():
def __init__(self,sim):
self.sim = sim
def pinMode(self,pin,mode):
self.sim.emit(QtCore.SIGNAL("sPinMode"),pin,mode)
def digitalRead(self,pin):
if pin in dPins:
if not self.sim.pinModes[pin]:
return self.sim.pinValues[pin]
else:
self.error("Pin {0} is not IN!".format(pin))
else:
self.error("Pin {0} is not digital!".format(pin))
def digitalWrite(self,pin,state):
if pin in dPins:
if self.sim.pinModes[pin]:
self.sim.emit(QtCore.SIGNAL("sDWrite"),pin,state)
else:
self.error("Pin {0} is not OUT!".format(pin))
else:
self.error("Pin {0} is not digital!".format(pin))
def analogRead(self,pin):
if pin in aPins:
if not self.sim.pinModes[pin]:
return self.sim.pinValues[pin]
else:
self.error("Pin {0} is not IN!".format(pin))
else:
self.error("Pin {0} is not analog!".format(pin))
def analogWrite(self,pin,state):
if pin in aPins:
if self.sim.pinModes[pin]:
self.sim.emit(QtCore.SIGNAL("sAWrite"),pin,state)
else:
self.error("Pin {0} is not OUT!".format(pin))
else:
self.error("Pin {0} is not analog!".format(pin))
def serialPrint(self,msg):
self.sim.emit(QtCore.SIGNAL("sEcho"),str(msg),False)
def serialPrintln(self,msg):
self.sim.emit(QtCore.SIGNAL("sEcho"),str(msg),True)
def error(self,msg):
self.sim.emit(QtCore.SIGNAL("sEcho"),"ERROR: "+str(msg),True,"red")
| none | 1 | 2.788062 | 3 |
|
CryptoReturns/forms.py | wjone005/Crypto_Returns | 0 | 6630720 | <filename>CryptoReturns/forms.py
from flask_wtf import FlaskForm
from wtforms import StringField, SubmitField, DecimalField, validators
class Crypto_Form(FlaskForm):
name = StringField("Enter Coin Name", [validators.InputRequired(message="Please enter the Crypto Currency name.")])
intial_investment = DecimalField("Enter Initial Investment", [validators.InputRequired(message="Please enter a number")])
coin_price = DecimalField("Enter Coin Price", [validators.InputRequired(message="Please enter a number")])
submit = SubmitField("Submit") | <filename>CryptoReturns/forms.py
from flask_wtf import FlaskForm
from wtforms import StringField, SubmitField, DecimalField, validators
class Crypto_Form(FlaskForm):
name = StringField("Enter Coin Name", [validators.InputRequired(message="Please enter the Crypto Currency name.")])
intial_investment = DecimalField("Enter Initial Investment", [validators.InputRequired(message="Please enter a number")])
coin_price = DecimalField("Enter Coin Price", [validators.InputRequired(message="Please enter a number")])
submit = SubmitField("Submit") | none | 1 | 2.951551 | 3 |
|
tests/links_tests/update_tests/test_megnet_update.py | pfnet/chainerchem | 184 | 6630721 | from chainer import cuda
import numpy
import pytest
from chainer_chemistry.links.update.megnet_update import MEGNetUpdate
# node_size_list means the first moleculae has six nodes,
# and the seconde molecule has four nodes
node_size_list = [6, 4]
# edge_size_list means the first moleculae has eight edges,
# and the seconde molecule has four edges
edge_size_list = [8, 4]
node_feature_dim = 5
edge_feature_dim = 10
global_feature_dim = 2
out_dim = 32
batch_size = 2
@pytest.fixture
def update():
return MEGNetUpdate()
@pytest.fixture
def data():
if len(node_size_list) != batch_size or len(edge_size_list) != batch_size:
raise ValueError("Invalid fixture for MEGNet")
numpy.random.seed(0)
total_node_size = sum(node_size_list)
total_edge_size = sum(edge_size_list)
atom_feat = numpy.random.rand(total_node_size,
node_feature_dim).astype(numpy.float32)
pair_feat = numpy.random.rand(total_edge_size,
edge_feature_dim).astype(numpy.float32)
global_feat = numpy.random.rand(batch_size,
global_feature_dim).astype(numpy.float32)
# atom idx
atom_idx = numpy.hstack([[i] * node_size_list[i]
for i in range(batch_size)]).astype(numpy.int32)
# pair idx
pair_idx = numpy.hstack([[i] * edge_size_list[i]
for i in range(batch_size)]).astype(numpy.int32)
# create start and end idx
edge_idx = []
acc_node_size = [sum(node_size_list[:i+1]) for i in range(batch_size)]
low = numpy.roll(acc_node_size + [0], 1)[0:batch_size+1]
high = numpy.array(acc_node_size)
for i in range(batch_size):
idx = [numpy.random.choice(numpy.arange(low[i], high[i]), 2,
replace=False)
for _ in range(edge_size_list[i])]
edge_idx.extend(idx)
start_idx = numpy.array(edge_idx, dtype=numpy.int32)[:, 0]
end_idx = numpy.array(edge_idx, dtype=numpy.int32)[:, 1]
y_grad_atom = numpy.random.uniform(
-1, 1, (batch_size, out_dim)).astype(numpy.float32)
y_grad_pair = numpy.random.uniform(
-1, 1, (batch_size, out_dim)).astype(numpy.float32)
y_grad_global = numpy.random.uniform(
-1, 1, (batch_size, out_dim)).astype(numpy.float32)
return atom_feat, pair_feat, global_feat, \
atom_idx, pair_idx, start_idx, end_idx, \
y_grad_atom, y_grad_pair, y_grad_global
def check_forward(update, data):
y_actual = [cuda.to_cpu(d.data) for d in update(*data)]
atom_feat, pair_feat, global_feat = y_actual
assert atom_feat.shape == (sum(node_size_list), out_dim)
assert pair_feat.shape == (sum(edge_size_list), out_dim)
assert global_feat.shape == (batch_size, out_dim)
def test_forward_cpu(update, data):
atom_feat, pair_feat, global_feat, \
atom_idx, pair_idx, start_idx, end_idx = data[:-3]
check_forward(update, (atom_feat, pair_feat, global_feat, atom_idx,
pair_idx, start_idx, end_idx))
@pytest.mark.gpu
def test_forward_gpu(update, data):
input_data = [cuda.to_gpu(d) for d in data[:-3]]
update.to_gpu()
check_forward(update, tuple(input_data))
# def test_backward_cpu(update, data):
# input_data, y_grad = data[0:-3], data[-3:]
# gradient_check.check_backward(update, tuple(input_data), tuple(y_grad),
# atol=5e-1, rtol=1e-1)
# @pytest.mark.gpu
# def test_backward_gpu(update, data):
# data = [cuda.to_gpu(d) for d in data]
# input_data, y_grad = data[0:-3], data[-3:]
# update.to_gpu()
# gradient_check.check_backward(update, tuple(input_data), tuple(y_grad),
# atol=5e-1, rtol=1e-1)
if __name__ == '__main__':
pytest.main([__file__, '-v', '-s'])
| from chainer import cuda
import numpy
import pytest
from chainer_chemistry.links.update.megnet_update import MEGNetUpdate
# node_size_list means the first moleculae has six nodes,
# and the seconde molecule has four nodes
node_size_list = [6, 4]
# edge_size_list means the first moleculae has eight edges,
# and the seconde molecule has four edges
edge_size_list = [8, 4]
node_feature_dim = 5
edge_feature_dim = 10
global_feature_dim = 2
out_dim = 32
batch_size = 2
@pytest.fixture
def update():
return MEGNetUpdate()
@pytest.fixture
def data():
if len(node_size_list) != batch_size or len(edge_size_list) != batch_size:
raise ValueError("Invalid fixture for MEGNet")
numpy.random.seed(0)
total_node_size = sum(node_size_list)
total_edge_size = sum(edge_size_list)
atom_feat = numpy.random.rand(total_node_size,
node_feature_dim).astype(numpy.float32)
pair_feat = numpy.random.rand(total_edge_size,
edge_feature_dim).astype(numpy.float32)
global_feat = numpy.random.rand(batch_size,
global_feature_dim).astype(numpy.float32)
# atom idx
atom_idx = numpy.hstack([[i] * node_size_list[i]
for i in range(batch_size)]).astype(numpy.int32)
# pair idx
pair_idx = numpy.hstack([[i] * edge_size_list[i]
for i in range(batch_size)]).astype(numpy.int32)
# create start and end idx
edge_idx = []
acc_node_size = [sum(node_size_list[:i+1]) for i in range(batch_size)]
low = numpy.roll(acc_node_size + [0], 1)[0:batch_size+1]
high = numpy.array(acc_node_size)
for i in range(batch_size):
idx = [numpy.random.choice(numpy.arange(low[i], high[i]), 2,
replace=False)
for _ in range(edge_size_list[i])]
edge_idx.extend(idx)
start_idx = numpy.array(edge_idx, dtype=numpy.int32)[:, 0]
end_idx = numpy.array(edge_idx, dtype=numpy.int32)[:, 1]
y_grad_atom = numpy.random.uniform(
-1, 1, (batch_size, out_dim)).astype(numpy.float32)
y_grad_pair = numpy.random.uniform(
-1, 1, (batch_size, out_dim)).astype(numpy.float32)
y_grad_global = numpy.random.uniform(
-1, 1, (batch_size, out_dim)).astype(numpy.float32)
return atom_feat, pair_feat, global_feat, \
atom_idx, pair_idx, start_idx, end_idx, \
y_grad_atom, y_grad_pair, y_grad_global
def check_forward(update, data):
y_actual = [cuda.to_cpu(d.data) for d in update(*data)]
atom_feat, pair_feat, global_feat = y_actual
assert atom_feat.shape == (sum(node_size_list), out_dim)
assert pair_feat.shape == (sum(edge_size_list), out_dim)
assert global_feat.shape == (batch_size, out_dim)
def test_forward_cpu(update, data):
atom_feat, pair_feat, global_feat, \
atom_idx, pair_idx, start_idx, end_idx = data[:-3]
check_forward(update, (atom_feat, pair_feat, global_feat, atom_idx,
pair_idx, start_idx, end_idx))
@pytest.mark.gpu
def test_forward_gpu(update, data):
input_data = [cuda.to_gpu(d) for d in data[:-3]]
update.to_gpu()
check_forward(update, tuple(input_data))
# def test_backward_cpu(update, data):
# input_data, y_grad = data[0:-3], data[-3:]
# gradient_check.check_backward(update, tuple(input_data), tuple(y_grad),
# atol=5e-1, rtol=1e-1)
# @pytest.mark.gpu
# def test_backward_gpu(update, data):
# data = [cuda.to_gpu(d) for d in data]
# input_data, y_grad = data[0:-3], data[-3:]
# update.to_gpu()
# gradient_check.check_backward(update, tuple(input_data), tuple(y_grad),
# atol=5e-1, rtol=1e-1)
if __name__ == '__main__':
pytest.main([__file__, '-v', '-s'])
| en | 0.375429 | # node_size_list means the first moleculae has six nodes, # and the seconde molecule has four nodes # edge_size_list means the first moleculae has eight edges, # and the seconde molecule has four edges # atom idx # pair idx # create start and end idx # def test_backward_cpu(update, data): # input_data, y_grad = data[0:-3], data[-3:] # gradient_check.check_backward(update, tuple(input_data), tuple(y_grad), # atol=5e-1, rtol=1e-1) # @pytest.mark.gpu # def test_backward_gpu(update, data): # data = [cuda.to_gpu(d) for d in data] # input_data, y_grad = data[0:-3], data[-3:] # update.to_gpu() # gradient_check.check_backward(update, tuple(input_data), tuple(y_grad), # atol=5e-1, rtol=1e-1) | 1.949879 | 2 |
balance.py | SamarthPardhi/eth-vanity-address | 0 | 6630722 | import requests
import json
import sys
API_TOKEN = ""
def ether(add):
url = "https://api.etherscan.io/api?module=account&action=balance&address=" + add + "&tag=latest&apikey=" + API_TOKEN
res = requests.get(url)
my_json_string = res.text
to_python = json.loads(my_json_string)
return to_python['result']
def checkBal(filename):
file = open(filename, 'r')
for line in file:
add = line[68:110:].strip()
try:
print(add, ether(add))
except:
return ["Unexpected Error Ocurred"]
file.close()
return None
checkBal(sys.argv[1]) | import requests
import json
import sys
API_TOKEN = ""
def ether(add):
url = "https://api.etherscan.io/api?module=account&action=balance&address=" + add + "&tag=latest&apikey=" + API_TOKEN
res = requests.get(url)
my_json_string = res.text
to_python = json.loads(my_json_string)
return to_python['result']
def checkBal(filename):
file = open(filename, 'r')
for line in file:
add = line[68:110:].strip()
try:
print(add, ether(add))
except:
return ["Unexpected Error Ocurred"]
file.close()
return None
checkBal(sys.argv[1]) | none | 1 | 2.975659 | 3 |
|
core/src/main/python/wlsdeploy/testing/stages/system_test_it.py | mwooten/weblogic-deploy-tooling-ct | 0 | 6630723 | """
Copyright (c) 2017, 2018, Oracle and/or its affiliates. All rights reserved.
The Universal Permissive License (UPL), Version 1.0
"""
import unittest
from java.util import HashMap
from wlsdeploy.testing import testing_helper
from wlsdeploy.testing.common import system_test_support, testing_helper
from wlsdeploy.testing.common.system_test_support import SystemTestSupport
from wlsdeploy.testing.define.test_def_settings import TestDefSettings
from wlsdeploy.testing.define.test_def_stage import TestDefStage
from wlsdeploy.testing.exception import exception_helper
from wlsdeploy.util import dictionary_utils
class SystemTestIT(unittest.TestCase):
"""
"""
_class_name = 'SystemTestIT'
_DEFAULT_WAIT_TIME_SECS = 3
A2C_DEV_TESTING_MODE_ENVVAR = 'A2C_DEV_TESTING_MODE'
BUILD_DIR_ENVVAR = 'BUILD_DIR'
SOURCE_DOMAIN_NAME_ENVVAR = 'SOURCE_DOMAIN_NAME'
SUPPORTED_VERSIONS_ENVVAR = 'SUPPORTED_VERSIONS'
TARGET_DOMAIN_NAME_ENVVAR = 'TARGET_DOMAIN_NAME'
TEST_DEF_FILE_ENVVAR = 'TEST_DEF_FILE'
USER_TESTS_TO_RUN_ENVVAR = 'USER_TESTS_TO_RUN'
WAIT_BETWEEN_PHASES_SECS_ENVVAR = 'WAIT_BETWEEN_PHASES_SECS'
def __init__(self, step_name, stage, test_def, logger):
self._step_name = step_name
self._stage = stage
self._test_def = test_def
self._logger = logger
self._env = HashMap()
step_names_file_name = stage.get_field_value(TestDefStage.STEP_NAMES_FILE)
self._step_names_map = SystemTestIT.StepNames(step_names_file_name, logger)
test_name = self._step_names_map.get_step_name(step_name).get_test_name()
unittest.TestCase.__init__(self, test_name)
def __getattr__(self, test_method_name):
return getattr(self, 'proxy_method')
def proxy_method(self):
_method_name = 'proxy_method'
self._logger.entering(class_name=self._class_name, method_name=_method_name)
wdtct_home = self._test_def.get_env_var_value('A2C_HOME_ENVVAR')
if wdtct_home is not None:
self._logger.info('wdtct_home={0}', wdtct_home,
class_name=self._class_name, method_name=_method_name)
# self._step_name is actually a key into the step_names map. Use it
# to get the step name object
step_name = self._step_names_map.get_step_name(self._step_name)
source_settings_id = step_name.get_source_image_id()
target_settings_id = step_name.get_target_image_id()
self.run_test(step_name.get_test_name(), self.__get_test_number(self._step_name),
source_settings_id, target_settings_id)
self._logger.exiting(class_name=self._class_name, method_name=_method_name)
return
def setUp(self):
# userTestsToRun = getAndValidateUserTestsToRun(getAndValidateSupportedVersions());
#
# a2cHome = getAndValidateAppToCloudHome();
# testAutomationHome = getAndValidateTestAutomationHome();
# testSupportHome = getAndValidateTestSupportHome();
# this.logDir = getLogDirectory();
# this.loggingPropertiesFile = getLoggingPropertiesFile();
# this.loggingSupportJar = getCanonicalFile(MessageFormat.format(TEST_LOGGING_JAR_LOCATION_TEMPLATE,
# testAutomationHome.getPath()));
# systemTestStdoutLogPolicy = getAndValidateStdoutLogPolicy();
#
# domainParentDir = getAndValidateDomainParentDir();
# sourceDomainName = getSourceDomainName();
# targetDomainName = getTargetDomainName();
# outputDir = getAndValidateOutputDirectory();
# testFile = getAndValidateTestFile();
#
# waitSeconds = validateAndGetIntegerArg(WAIT_BETWEEN_PHASES_SECS_ENVVAR, true);
self.populate_env_map(self._test_def)
def run_test(self, test_name, test_number, source_settings_id, target_settings_id):
_method_name = 'run_test'
self._logger.entering(test_name, test_number, class_name=self._class_name, method_name=_method_name)
self._logger.finer('source_settings_id={0}, target_settings_id={1}',
source_settings_id, target_settings_id,
class_name=self._class_name, method_name=_method_name)
self._logger.info('WLSDPLY-09864',
self._step_name, self._stage.get_name(),
class_name=self._class_name, method_name=_method_name)
source_settings = self._test_def.get_settings(source_settings_id)
target_settings = self._test_def.get_settings(target_settings_id)
self.assertEquals(source_settings.get_domain_type(),
target_settings.get_domain_type())
self._logger.exiting(class_name=self._class_name, method_name=_method_name)
return
def populate_env_map(self, test_def):
stdout_log_policy = test_def.get_env_var_value('STDOUT_LOG_POLICY_ENVVAR')
if stdout_log_policy is not None:
self._env.put(SystemTestSupport.STDOUT_LOG_POLICY_ENVVAR, stdout_log_policy)
log_dir = test_def.get_env_var_value('LOG_DIR_ENVVAR')
if log_dir is not None:
self._env.put(SystemTestSupport.LOG_DIR_ENVVAR, log_dir)
log_properties = test_def.get_env_var_value('LOG_PROPERTIES_ENVVAR')
if log_properties is not None:
self._env.put(SystemTestSupport.LOG_PROPERTIES_ENVVAR, log_properties)
a2c_home = test_def.get_env_var_value('A2C_HOME_ENVVAR')
if a2c_home is not None:
self._env.put(SystemTestSupport.A2C_HOME_ENVVAR, a2c_home)
a2c_log_config = test_def.get_env_var_value('A2C_LOG_CONFIG_ENVVAR')
if a2c_log_config is not None:
self._env.put(SystemTestSupport.A2C_LOG_CONFIG_ENVVAR, a2c_log_config)
a2c_post_classpath = test_def.get_env_var_value('A2C_POST_CLASSPATH_ENVVAR')
if a2c_post_classpath is not None:
self._env.put(SystemTestSupport.A2C_POST_CLASSPATH_ENVVAR, a2c_post_classpath)
java_home = test_def.get_env_var_value('JAVA_HOME_ENVVAR')
if java_home is not None:
self._env.put(SystemTestSupport.JAVA_HOME_ENVVAR, java_home)
test_automation_home = test_def.get_env_var_value('TEST_AUTOMATION_HOME_ENVVAR')
if test_automation_home is not None:
self._env.put(SystemTestSupport.TEST_AUTOMATION_HOME_ENVVAR, test_automation_home)
test_support_home = test_def.get_env_var_value('TEST_SUPPORT_HOME_ENVVAR')
if test_support_home is not None:
self._env.put(SystemTestSupport.TEST_SUPPORT_HOME_ENVVAR, test_support_home)
output_dir = test_def.get_env_var_value('OUTPUT_DIR_ENVVAR')
if output_dir is not None:
self._env.put(SystemTestSupport.OUTPUT_DIR_ENVVAR, output_dir)
java7_home = test_def.get_env_var_value('JAVA7_HOME_ENVVAR')
if java7_home is not None:
self._env.put(SystemTestSupport.JAVA7_HOME_ENVVAR, java7_home)
java8_home = test_def.get_env_var_value('JAVA8_HOME_ENVVAR')
if java8_home is not None:
self._env.put(SystemTestSupport.JAVA8_HOME_ENVVAR, java8_home)
java9_home = test_def.get_env_var_value('JAVA9_HOME_ENVVAR')
if java9_home is not None:
self._env.put(SystemTestSupport.JAVA9_HOME_ENVVAR, java9_home)
domain_parent_dir = test_def.get_env_var_value('DOMAIN_PARENT_DIR_ENVVAR')
if domain_parent_dir is not None:
self._env.put(SystemTestSupport.DOMAIN_PARENT_DIR_ENVVAR, domain_parent_dir)
annotated_prov = test_def.get_env_var_value('ANNOTATED_PROV')
if annotated_prov is not None:
self._env.put(SystemTestSupport.ANNOTATED_PROV, annotated_prov)
####################################################################################
#
# Private methods, private inner classes and static methods only, beyond here please
#
####################################################################################
def __get_build_dir(self):
return self._test_def.get_env_var_value('BUILD_DIR_ENVVAR')
def __get_supported_versions(self):
return self._test_def.get_env_var_value('SUPPORTED_VERSIONS_ENVVAR')
def __get_user_tests_to_run(self):
return self._test_def.get_env_var_value('USER_TESTS_TO_RUN_ENVVAR')
def __get_test_file(self):
test_file = self._test_def.get_env_var_value('TEST_DEF_FILE_ENVVAR')
return system_test_support.get_canonical_file(test_file)
def __get_domain_parent_dir(self):
domain_parent_dir = self._test_def.get_env_var_value('DOMAIN_PARENT_DIR_ENVVAR')
return system_test_support.get_canonical_file(domain_parent_dir)
def __get_source_domain_name(self):
return self._test_def.get_env_var_value('SOURCE_DOMAIN_NAME_ENVVAR')
def __get_target_domain_name(self):
return self._test_def.get_env_var_value('TARGET_DOMAIN_NAME_ENVVAR')
def __validate_and_get_integer_arg(self, env_var_alias, must_be_non_negative):
result = SystemTestIT._DEFAULT_WAIT_TIME_SECS
env_var_value = self._test_def.get_env_var_value(env_var_alias)
if env_var_value is not None:
result = int(env_var_value)
if must_be_non_negative and result < 0:
result = SystemTestIT._DEFAULT_WAIT_TIME_SECS
return result
def __get_test_number(self, step_name):
return self._step_names_map.get_step_name_index(step_name)
class StepNames(object):
"""
"""
_class_name = 'StepNames'
def __init__(self, step_names_file_name, logger):
_method_name = '__init__'
self._logger = logger
self._logger.finer('step_names_file_name={0}',
step_names_file_name,
class_name=self._class_name, method_name=_method_name)
file_dict = testing_helper.translate_file(step_names_file_name, self._logger)
if TestDefStage.STEP_NAMES not in file_dict:
ex = exception_helper.create_system_test_exception('WLSDPLY-09888',
step_names_file_name)
self._logger.throwing(ex, class_name=self._class_name, method_name=_method_name)
raise ex
self._step_names_dict = file_dict[TestDefStage.STEP_NAMES]
def get_step_name(self, step_name):
_method_name = 'get_step_name'
step_name_dict = dictionary_utils.get_dictionary_element(self._step_names_dict, step_name)
if not step_name_dict:
ex = exception_helper.create_system_test_exception('WLSDPLY-09889', step_name)
self._logger.throwing(ex, class_name=self._class_name, method_name=_method_name)
raise ex
return SystemTestIT.StepName(step_name, step_name_dict, self._logger)
def get_step_name_index(self, step_name):
retval = 0
map_keys = self._step_names_dict.keys()
for i in range(0, len(map_keys)):
if map_keys[i] == step_name:
retval = i
break
return retval
class StepName(object):
"""
"""
_class_name = 'StepName'
TEST_NAME = 'test_name'
SOURCE_IMAGE_ID = 'source_image_id'
TARGET_IMAGE_ID = 'target_image_id'
def __init__(self, step_name, step_name_dict, logger):
self._step_name = step_name
self._step_name_dict = step_name_dict
self._logger = logger
def get_test_name(self):
return dictionary_utils.get_element(self._step_name_dict, SystemTestIT.StepName.TEST_NAME)
def get_source_image_id(self):
return dictionary_utils.get_element(self._step_name_dict, SystemTestIT.StepName.SOURCE_IMAGE_ID)
def get_target_image_id(self):
return dictionary_utils.get_element(self._step_name_dict, SystemTestIT.StepName.TARGET_IMAGE_ID)
def _populate_script_args(settings):
args = list()
args.append('-%s' % TestDefSettings.ORACLE_HOME)
args.append('%s' % settings.get_oracle_home())
args.append('-%s' % TestDefSettings.ORACLE_HOME)
args.append('%s' % settings.get_oracle_home())
args.append('-%s' % TestDefSettings.DOMAIN_TYPE)
args.append('%s' % settings.get_domain_type())
args.append('-%s' % TestDefSettings.ARCHIVE_FILE)
args.append('%s' % settings.get_archive_file_name())
if settings.is_field_set(TestDefSettings.DOMAIN_HOME):
args.append('-%s' % TestDefSettings.DOMAIN_HOME)
args.append('%s' % settings.get_domain_home())
if settings.is_field_set(TestDefSettings.MODEL_FILE):
args.append('-%s' % TestDefSettings.MODEL_FILE)
args.append('%s' % settings.get_model_file_name())
if settings.is_field_set(TestDefSettings.WLST_PATH):
args.append('-%s' % TestDefSettings.WLST_PATH)
args.append('%s' % settings.get_wlst_path())
return args
| """
Copyright (c) 2017, 2018, Oracle and/or its affiliates. All rights reserved.
The Universal Permissive License (UPL), Version 1.0
"""
import unittest
from java.util import HashMap
from wlsdeploy.testing import testing_helper
from wlsdeploy.testing.common import system_test_support, testing_helper
from wlsdeploy.testing.common.system_test_support import SystemTestSupport
from wlsdeploy.testing.define.test_def_settings import TestDefSettings
from wlsdeploy.testing.define.test_def_stage import TestDefStage
from wlsdeploy.testing.exception import exception_helper
from wlsdeploy.util import dictionary_utils
class SystemTestIT(unittest.TestCase):
"""
"""
_class_name = 'SystemTestIT'
_DEFAULT_WAIT_TIME_SECS = 3
A2C_DEV_TESTING_MODE_ENVVAR = 'A2C_DEV_TESTING_MODE'
BUILD_DIR_ENVVAR = 'BUILD_DIR'
SOURCE_DOMAIN_NAME_ENVVAR = 'SOURCE_DOMAIN_NAME'
SUPPORTED_VERSIONS_ENVVAR = 'SUPPORTED_VERSIONS'
TARGET_DOMAIN_NAME_ENVVAR = 'TARGET_DOMAIN_NAME'
TEST_DEF_FILE_ENVVAR = 'TEST_DEF_FILE'
USER_TESTS_TO_RUN_ENVVAR = 'USER_TESTS_TO_RUN'
WAIT_BETWEEN_PHASES_SECS_ENVVAR = 'WAIT_BETWEEN_PHASES_SECS'
def __init__(self, step_name, stage, test_def, logger):
self._step_name = step_name
self._stage = stage
self._test_def = test_def
self._logger = logger
self._env = HashMap()
step_names_file_name = stage.get_field_value(TestDefStage.STEP_NAMES_FILE)
self._step_names_map = SystemTestIT.StepNames(step_names_file_name, logger)
test_name = self._step_names_map.get_step_name(step_name).get_test_name()
unittest.TestCase.__init__(self, test_name)
def __getattr__(self, test_method_name):
return getattr(self, 'proxy_method')
def proxy_method(self):
_method_name = 'proxy_method'
self._logger.entering(class_name=self._class_name, method_name=_method_name)
wdtct_home = self._test_def.get_env_var_value('A2C_HOME_ENVVAR')
if wdtct_home is not None:
self._logger.info('wdtct_home={0}', wdtct_home,
class_name=self._class_name, method_name=_method_name)
# self._step_name is actually a key into the step_names map. Use it
# to get the step name object
step_name = self._step_names_map.get_step_name(self._step_name)
source_settings_id = step_name.get_source_image_id()
target_settings_id = step_name.get_target_image_id()
self.run_test(step_name.get_test_name(), self.__get_test_number(self._step_name),
source_settings_id, target_settings_id)
self._logger.exiting(class_name=self._class_name, method_name=_method_name)
return
def setUp(self):
# userTestsToRun = getAndValidateUserTestsToRun(getAndValidateSupportedVersions());
#
# a2cHome = getAndValidateAppToCloudHome();
# testAutomationHome = getAndValidateTestAutomationHome();
# testSupportHome = getAndValidateTestSupportHome();
# this.logDir = getLogDirectory();
# this.loggingPropertiesFile = getLoggingPropertiesFile();
# this.loggingSupportJar = getCanonicalFile(MessageFormat.format(TEST_LOGGING_JAR_LOCATION_TEMPLATE,
# testAutomationHome.getPath()));
# systemTestStdoutLogPolicy = getAndValidateStdoutLogPolicy();
#
# domainParentDir = getAndValidateDomainParentDir();
# sourceDomainName = getSourceDomainName();
# targetDomainName = getTargetDomainName();
# outputDir = getAndValidateOutputDirectory();
# testFile = getAndValidateTestFile();
#
# waitSeconds = validateAndGetIntegerArg(WAIT_BETWEEN_PHASES_SECS_ENVVAR, true);
self.populate_env_map(self._test_def)
def run_test(self, test_name, test_number, source_settings_id, target_settings_id):
_method_name = 'run_test'
self._logger.entering(test_name, test_number, class_name=self._class_name, method_name=_method_name)
self._logger.finer('source_settings_id={0}, target_settings_id={1}',
source_settings_id, target_settings_id,
class_name=self._class_name, method_name=_method_name)
self._logger.info('WLSDPLY-09864',
self._step_name, self._stage.get_name(),
class_name=self._class_name, method_name=_method_name)
source_settings = self._test_def.get_settings(source_settings_id)
target_settings = self._test_def.get_settings(target_settings_id)
self.assertEquals(source_settings.get_domain_type(),
target_settings.get_domain_type())
self._logger.exiting(class_name=self._class_name, method_name=_method_name)
return
def populate_env_map(self, test_def):
stdout_log_policy = test_def.get_env_var_value('STDOUT_LOG_POLICY_ENVVAR')
if stdout_log_policy is not None:
self._env.put(SystemTestSupport.STDOUT_LOG_POLICY_ENVVAR, stdout_log_policy)
log_dir = test_def.get_env_var_value('LOG_DIR_ENVVAR')
if log_dir is not None:
self._env.put(SystemTestSupport.LOG_DIR_ENVVAR, log_dir)
log_properties = test_def.get_env_var_value('LOG_PROPERTIES_ENVVAR')
if log_properties is not None:
self._env.put(SystemTestSupport.LOG_PROPERTIES_ENVVAR, log_properties)
a2c_home = test_def.get_env_var_value('A2C_HOME_ENVVAR')
if a2c_home is not None:
self._env.put(SystemTestSupport.A2C_HOME_ENVVAR, a2c_home)
a2c_log_config = test_def.get_env_var_value('A2C_LOG_CONFIG_ENVVAR')
if a2c_log_config is not None:
self._env.put(SystemTestSupport.A2C_LOG_CONFIG_ENVVAR, a2c_log_config)
a2c_post_classpath = test_def.get_env_var_value('A2C_POST_CLASSPATH_ENVVAR')
if a2c_post_classpath is not None:
self._env.put(SystemTestSupport.A2C_POST_CLASSPATH_ENVVAR, a2c_post_classpath)
java_home = test_def.get_env_var_value('JAVA_HOME_ENVVAR')
if java_home is not None:
self._env.put(SystemTestSupport.JAVA_HOME_ENVVAR, java_home)
test_automation_home = test_def.get_env_var_value('TEST_AUTOMATION_HOME_ENVVAR')
if test_automation_home is not None:
self._env.put(SystemTestSupport.TEST_AUTOMATION_HOME_ENVVAR, test_automation_home)
test_support_home = test_def.get_env_var_value('TEST_SUPPORT_HOME_ENVVAR')
if test_support_home is not None:
self._env.put(SystemTestSupport.TEST_SUPPORT_HOME_ENVVAR, test_support_home)
output_dir = test_def.get_env_var_value('OUTPUT_DIR_ENVVAR')
if output_dir is not None:
self._env.put(SystemTestSupport.OUTPUT_DIR_ENVVAR, output_dir)
java7_home = test_def.get_env_var_value('JAVA7_HOME_ENVVAR')
if java7_home is not None:
self._env.put(SystemTestSupport.JAVA7_HOME_ENVVAR, java7_home)
java8_home = test_def.get_env_var_value('JAVA8_HOME_ENVVAR')
if java8_home is not None:
self._env.put(SystemTestSupport.JAVA8_HOME_ENVVAR, java8_home)
java9_home = test_def.get_env_var_value('JAVA9_HOME_ENVVAR')
if java9_home is not None:
self._env.put(SystemTestSupport.JAVA9_HOME_ENVVAR, java9_home)
domain_parent_dir = test_def.get_env_var_value('DOMAIN_PARENT_DIR_ENVVAR')
if domain_parent_dir is not None:
self._env.put(SystemTestSupport.DOMAIN_PARENT_DIR_ENVVAR, domain_parent_dir)
annotated_prov = test_def.get_env_var_value('ANNOTATED_PROV')
if annotated_prov is not None:
self._env.put(SystemTestSupport.ANNOTATED_PROV, annotated_prov)
####################################################################################
#
# Private methods, private inner classes and static methods only, beyond here please
#
####################################################################################
def __get_build_dir(self):
return self._test_def.get_env_var_value('BUILD_DIR_ENVVAR')
def __get_supported_versions(self):
return self._test_def.get_env_var_value('SUPPORTED_VERSIONS_ENVVAR')
def __get_user_tests_to_run(self):
return self._test_def.get_env_var_value('USER_TESTS_TO_RUN_ENVVAR')
def __get_test_file(self):
test_file = self._test_def.get_env_var_value('TEST_DEF_FILE_ENVVAR')
return system_test_support.get_canonical_file(test_file)
def __get_domain_parent_dir(self):
domain_parent_dir = self._test_def.get_env_var_value('DOMAIN_PARENT_DIR_ENVVAR')
return system_test_support.get_canonical_file(domain_parent_dir)
def __get_source_domain_name(self):
return self._test_def.get_env_var_value('SOURCE_DOMAIN_NAME_ENVVAR')
def __get_target_domain_name(self):
return self._test_def.get_env_var_value('TARGET_DOMAIN_NAME_ENVVAR')
def __validate_and_get_integer_arg(self, env_var_alias, must_be_non_negative):
result = SystemTestIT._DEFAULT_WAIT_TIME_SECS
env_var_value = self._test_def.get_env_var_value(env_var_alias)
if env_var_value is not None:
result = int(env_var_value)
if must_be_non_negative and result < 0:
result = SystemTestIT._DEFAULT_WAIT_TIME_SECS
return result
def __get_test_number(self, step_name):
return self._step_names_map.get_step_name_index(step_name)
class StepNames(object):
"""
"""
_class_name = 'StepNames'
def __init__(self, step_names_file_name, logger):
_method_name = '__init__'
self._logger = logger
self._logger.finer('step_names_file_name={0}',
step_names_file_name,
class_name=self._class_name, method_name=_method_name)
file_dict = testing_helper.translate_file(step_names_file_name, self._logger)
if TestDefStage.STEP_NAMES not in file_dict:
ex = exception_helper.create_system_test_exception('WLSDPLY-09888',
step_names_file_name)
self._logger.throwing(ex, class_name=self._class_name, method_name=_method_name)
raise ex
self._step_names_dict = file_dict[TestDefStage.STEP_NAMES]
def get_step_name(self, step_name):
_method_name = 'get_step_name'
step_name_dict = dictionary_utils.get_dictionary_element(self._step_names_dict, step_name)
if not step_name_dict:
ex = exception_helper.create_system_test_exception('WLSDPLY-09889', step_name)
self._logger.throwing(ex, class_name=self._class_name, method_name=_method_name)
raise ex
return SystemTestIT.StepName(step_name, step_name_dict, self._logger)
def get_step_name_index(self, step_name):
retval = 0
map_keys = self._step_names_dict.keys()
for i in range(0, len(map_keys)):
if map_keys[i] == step_name:
retval = i
break
return retval
class StepName(object):
"""
"""
_class_name = 'StepName'
TEST_NAME = 'test_name'
SOURCE_IMAGE_ID = 'source_image_id'
TARGET_IMAGE_ID = 'target_image_id'
def __init__(self, step_name, step_name_dict, logger):
self._step_name = step_name
self._step_name_dict = step_name_dict
self._logger = logger
def get_test_name(self):
return dictionary_utils.get_element(self._step_name_dict, SystemTestIT.StepName.TEST_NAME)
def get_source_image_id(self):
return dictionary_utils.get_element(self._step_name_dict, SystemTestIT.StepName.SOURCE_IMAGE_ID)
def get_target_image_id(self):
return dictionary_utils.get_element(self._step_name_dict, SystemTestIT.StepName.TARGET_IMAGE_ID)
def _populate_script_args(settings):
args = list()
args.append('-%s' % TestDefSettings.ORACLE_HOME)
args.append('%s' % settings.get_oracle_home())
args.append('-%s' % TestDefSettings.ORACLE_HOME)
args.append('%s' % settings.get_oracle_home())
args.append('-%s' % TestDefSettings.DOMAIN_TYPE)
args.append('%s' % settings.get_domain_type())
args.append('-%s' % TestDefSettings.ARCHIVE_FILE)
args.append('%s' % settings.get_archive_file_name())
if settings.is_field_set(TestDefSettings.DOMAIN_HOME):
args.append('-%s' % TestDefSettings.DOMAIN_HOME)
args.append('%s' % settings.get_domain_home())
if settings.is_field_set(TestDefSettings.MODEL_FILE):
args.append('-%s' % TestDefSettings.MODEL_FILE)
args.append('%s' % settings.get_model_file_name())
if settings.is_field_set(TestDefSettings.WLST_PATH):
args.append('-%s' % TestDefSettings.WLST_PATH)
args.append('%s' % settings.get_wlst_path())
return args
| en | 0.364509 | Copyright (c) 2017, 2018, Oracle and/or its affiliates. All rights reserved. The Universal Permissive License (UPL), Version 1.0 # self._step_name is actually a key into the step_names map. Use it # to get the step name object # userTestsToRun = getAndValidateUserTestsToRun(getAndValidateSupportedVersions()); # # a2cHome = getAndValidateAppToCloudHome(); # testAutomationHome = getAndValidateTestAutomationHome(); # testSupportHome = getAndValidateTestSupportHome(); # this.logDir = getLogDirectory(); # this.loggingPropertiesFile = getLoggingPropertiesFile(); # this.loggingSupportJar = getCanonicalFile(MessageFormat.format(TEST_LOGGING_JAR_LOCATION_TEMPLATE, # testAutomationHome.getPath())); # systemTestStdoutLogPolicy = getAndValidateStdoutLogPolicy(); # # domainParentDir = getAndValidateDomainParentDir(); # sourceDomainName = getSourceDomainName(); # targetDomainName = getTargetDomainName(); # outputDir = getAndValidateOutputDirectory(); # testFile = getAndValidateTestFile(); # # waitSeconds = validateAndGetIntegerArg(WAIT_BETWEEN_PHASES_SECS_ENVVAR, true); #################################################################################### # # Private methods, private inner classes and static methods only, beyond here please # #################################################################################### | 2.05869 | 2 |
Abstraction/GraphWrapper.py | xiaoningdu/deepstellar | 10 | 6630724 | <gh_stars>1-10
import numpy as np
from Abstraction.DTMCGraph import DTMCGraph
import json
class GraphWrapper:
def __init__(self, stateAbst, fake_initial=-1):
self.graph = DTMCGraph(fake_initial)
self.stateAbst = stateAbst
def build_model(self, label_dir=None):
"""
Build model for a specific configuration
:label_dir: file of the label profiling, currently not used.
"""
pca_fit = self.stateAbst.get_pca_trans_data()
if label_dir:
with open(label_dir) as f:
translation_all = json.load(f)
else:
translation_all = None
if translation_all: # if with labels
for i in range(len(pca_fit)):
seq = pca_fit[i]
trans = translation_all[i]
assert len(seq) == len(trans)
self.build_step(seq, trans)
else: # if without labels
for i in range(len(pca_fit)):
seq = pca_fit[i]
self.build_step(seq, None)
# break
# del pca_fit
# del translation_all
# self.graph.draw_graph("0", "DTMC")
# g_warp.graph.transitions = None
self.extend_to_k_step() # extend the graph to the steps
self.graph.init_k_step_idx(self.stateAbst.n_step)
# g_warp.visit_graph('', [0]*500, 'k-step')
# g_warp.visit_graph(pca_fit[0], [0]*2000, 'transition')
# os.makedirs(save2folder, exist_ok=True)
def build_step(self, seq, labels=None):
"""
Add a sequence of state vectors to the graph, the vectors are usually transformed by PCA model
:param seq: the sequence of state vectors
:param labels: labels for the transitions, currently not used
"""
transition_seq_name = self.stateAbst.data_transform(seq) # make abstraction without PCA transformation
if labels is None:
labels = ['-']*len(seq)
self.graph.add_ordered_transitions(transition_seq_name, labels)
del transition_seq_name
def extend_to_k_step(self):
"""
Extend the graph to k step states
"""
if self.stateAbst.n_step <= 0:
return
moves = enumerate_manhattan(self.stateAbst.dimension, self.stateAbst.n_step)
step_out_dic = {}
for state_name, _ in self.graph.states.items():
if state_name != -1:
decoded_vec = self.stateAbst.coder.decode(state_name)
for move in moves:
step_out = list(np.array(decoded_vec)+np.array(move))
step_out = self.stateAbst.coder.encode(step_out)
step = abs_sum(move)
if step_out in step_out_dic:
if step_out_dic[step_out] > step:
step_out_dic[step_out] = step
else:
step_out_dic[step_out] = step
step_out_seq = []
step_seq = []
for step_out, step in step_out_dic.items():
step_out_seq.append(step_out)
step_seq.append(step)
self.graph.add_other_states(step_out_seq, step_seq)
def visit_graph(self, state_seq, cnt_states, mode, return_set=False):
"""
Update the coverage for a specific sequence
:param state_seq: the state vector sequence
:param cnt_states: current coverage
:param mode: which coverage criteria
:param return_set: whether to return the set of covered state/transition id
:return: the cnt_states will be updated
"""
transition_seq_name = self.stateAbst.data_transform(state_seq, pca_transform=True)
if mode == 'state':
self.graph.to_cover_major_states(transition_seq_name, cnt_states, return_set=return_set)
elif mode == 'k-step':
self.graph.to_cover_k_step(transition_seq_name, cnt_states, return_set=return_set)
elif mode == 'transition':
self.graph.to_cover_transitions(transition_seq_name, cnt_states, return_set=return_set)
def enumerate_manhattan(dim, k):
"""
:param dim: dimension of the space
:param k: max step-out
:return: the set of all possible moves with in k steps
"""
vec = [0] * dim
covered_list = []
queue = [vec]
while queue:
cur_vec = queue.pop(0)
if cur_vec not in covered_list:
covered_list.append(cur_vec)
for i in range(len(cur_vec)):
tmp = cur_vec.copy()
tmp[i] += 1
if abs_sum(tmp) <= k:
queue.append(tmp)
tmp = cur_vec.copy()
tmp[i] -= 1
if abs_sum(tmp) <= k:
queue.append(tmp)
covered_list.remove(vec)
return covered_list
def abs_sum(vec):
return sum([abs(i) for i in vec])
| import numpy as np
from Abstraction.DTMCGraph import DTMCGraph
import json
class GraphWrapper:
def __init__(self, stateAbst, fake_initial=-1):
self.graph = DTMCGraph(fake_initial)
self.stateAbst = stateAbst
def build_model(self, label_dir=None):
"""
Build model for a specific configuration
:label_dir: file of the label profiling, currently not used.
"""
pca_fit = self.stateAbst.get_pca_trans_data()
if label_dir:
with open(label_dir) as f:
translation_all = json.load(f)
else:
translation_all = None
if translation_all: # if with labels
for i in range(len(pca_fit)):
seq = pca_fit[i]
trans = translation_all[i]
assert len(seq) == len(trans)
self.build_step(seq, trans)
else: # if without labels
for i in range(len(pca_fit)):
seq = pca_fit[i]
self.build_step(seq, None)
# break
# del pca_fit
# del translation_all
# self.graph.draw_graph("0", "DTMC")
# g_warp.graph.transitions = None
self.extend_to_k_step() # extend the graph to the steps
self.graph.init_k_step_idx(self.stateAbst.n_step)
# g_warp.visit_graph('', [0]*500, 'k-step')
# g_warp.visit_graph(pca_fit[0], [0]*2000, 'transition')
# os.makedirs(save2folder, exist_ok=True)
def build_step(self, seq, labels=None):
"""
Add a sequence of state vectors to the graph, the vectors are usually transformed by PCA model
:param seq: the sequence of state vectors
:param labels: labels for the transitions, currently not used
"""
transition_seq_name = self.stateAbst.data_transform(seq) # make abstraction without PCA transformation
if labels is None:
labels = ['-']*len(seq)
self.graph.add_ordered_transitions(transition_seq_name, labels)
del transition_seq_name
def extend_to_k_step(self):
"""
Extend the graph to k step states
"""
if self.stateAbst.n_step <= 0:
return
moves = enumerate_manhattan(self.stateAbst.dimension, self.stateAbst.n_step)
step_out_dic = {}
for state_name, _ in self.graph.states.items():
if state_name != -1:
decoded_vec = self.stateAbst.coder.decode(state_name)
for move in moves:
step_out = list(np.array(decoded_vec)+np.array(move))
step_out = self.stateAbst.coder.encode(step_out)
step = abs_sum(move)
if step_out in step_out_dic:
if step_out_dic[step_out] > step:
step_out_dic[step_out] = step
else:
step_out_dic[step_out] = step
step_out_seq = []
step_seq = []
for step_out, step in step_out_dic.items():
step_out_seq.append(step_out)
step_seq.append(step)
self.graph.add_other_states(step_out_seq, step_seq)
def visit_graph(self, state_seq, cnt_states, mode, return_set=False):
"""
Update the coverage for a specific sequence
:param state_seq: the state vector sequence
:param cnt_states: current coverage
:param mode: which coverage criteria
:param return_set: whether to return the set of covered state/transition id
:return: the cnt_states will be updated
"""
transition_seq_name = self.stateAbst.data_transform(state_seq, pca_transform=True)
if mode == 'state':
self.graph.to_cover_major_states(transition_seq_name, cnt_states, return_set=return_set)
elif mode == 'k-step':
self.graph.to_cover_k_step(transition_seq_name, cnt_states, return_set=return_set)
elif mode == 'transition':
self.graph.to_cover_transitions(transition_seq_name, cnt_states, return_set=return_set)
def enumerate_manhattan(dim, k):
"""
:param dim: dimension of the space
:param k: max step-out
:return: the set of all possible moves with in k steps
"""
vec = [0] * dim
covered_list = []
queue = [vec]
while queue:
cur_vec = queue.pop(0)
if cur_vec not in covered_list:
covered_list.append(cur_vec)
for i in range(len(cur_vec)):
tmp = cur_vec.copy()
tmp[i] += 1
if abs_sum(tmp) <= k:
queue.append(tmp)
tmp = cur_vec.copy()
tmp[i] -= 1
if abs_sum(tmp) <= k:
queue.append(tmp)
covered_list.remove(vec)
return covered_list
def abs_sum(vec):
return sum([abs(i) for i in vec]) | en | 0.718613 | Build model for a specific configuration :label_dir: file of the label profiling, currently not used. # if with labels # if without labels # break # del pca_fit # del translation_all # self.graph.draw_graph("0", "DTMC") # g_warp.graph.transitions = None # extend the graph to the steps # g_warp.visit_graph('', [0]*500, 'k-step') # g_warp.visit_graph(pca_fit[0], [0]*2000, 'transition') # os.makedirs(save2folder, exist_ok=True) Add a sequence of state vectors to the graph, the vectors are usually transformed by PCA model :param seq: the sequence of state vectors :param labels: labels for the transitions, currently not used # make abstraction without PCA transformation Extend the graph to k step states Update the coverage for a specific sequence :param state_seq: the state vector sequence :param cnt_states: current coverage :param mode: which coverage criteria :param return_set: whether to return the set of covered state/transition id :return: the cnt_states will be updated :param dim: dimension of the space :param k: max step-out :return: the set of all possible moves with in k steps | 2.422877 | 2 |
gamestonk_terminal/stocks/options/yfinance_view.py | Flodur871/GamestonkTerminal | 1 | 6630725 | <gh_stars>1-10
"""Yfinance options view"""
__docformat__ = "numpy"
import os
from bisect import bisect_left
from typing import List, Dict, Any
from datetime import datetime, date, timedelta
import matplotlib.pyplot as plt
import matplotlib.dates as mdates
import numpy as np
import pandas as pd
import seaborn as sns
import yfinance as yf
from tabulate import tabulate
import gamestonk_terminal.config_plot as cfp
import gamestonk_terminal.feature_flags as gtff
from gamestonk_terminal.helper_funcs import export_data, plot_autoscale
from gamestonk_terminal.stocks.options import op_helpers, yfinance_model
from gamestonk_terminal.stocks.options.yfinance_model import (
generate_data,
get_option_chain,
get_price,
)
from gamestonk_terminal.helper_funcs import get_rf
def plot_oi(
ticker: str,
expiry: str,
min_sp: float,
max_sp: float,
calls_only: bool,
puts_only: bool,
export: str,
):
"""Plot open interest
Parameters
----------
ticker: str
Ticker
expiry: str
Expiry date for options
min_sp: float
Min strike to consider
max_sp: float
Max strike to consider
calls_only: bool
Show calls only
puts_only: bool
Show puts only
export: str
Format to export file
"""
options = yfinance_model.get_option_chain(ticker, expiry)
export_data(
export,
os.path.dirname(os.path.abspath(__file__)),
"oi_yf",
options,
)
calls = options.calls
puts = options.puts
current_price = float(yf.Ticker(ticker).info["regularMarketPrice"])
if min_sp == -1:
min_strike = 0.75 * current_price
else:
min_strike = min_sp
if max_sp == -1:
max_strike = 1.25 * current_price
else:
max_strike = max_sp
if calls_only and puts_only:
print("Both flags selected, please select one", "\n")
return
call_oi = calls.set_index("strike")["openInterest"] / 1000
put_oi = puts.set_index("strike")["openInterest"] / 1000
df_opt = pd.merge(call_oi, put_oi, left_index=True, right_index=True)
df_opt = df_opt.rename(
columns={"openInterest_x": "OI_call", "openInterest_y": "OI_put"}
)
max_pain = op_helpers.calculate_max_pain(df_opt)
plt.style.use("classic")
fig, ax = plt.subplots(figsize=plot_autoscale(), dpi=cfp.PLOT_DPI)
if not calls_only:
put_oi.plot(
x="strike",
y="openInterest",
label="Puts",
ax=ax,
marker="o",
ls="-",
c="r",
)
if not puts_only:
call_oi.plot(
x="strike",
y="openInterest",
label="Calls",
ax=ax,
marker="o",
ls="-",
c="g",
)
ax.axvline(
current_price, lw=2, c="k", ls="--", label="Current Price", alpha=0.7
)
ax.axvline(max_pain, lw=3, c="k", label=f"Max Pain: {max_pain}", alpha=0.7)
ax.grid("on")
ax.set_xlabel("Strike Price")
ax.set_ylabel("Open Interest (1k) ")
ax.set_xlim(min_strike, max_strike)
if gtff.USE_ION:
plt.ion()
ax.set_title(f"Open Interest for {ticker.upper()} expiring {expiry}")
plt.legend(loc=0)
fig.tight_layout(pad=1)
plt.show()
plt.style.use("default")
print("")
def plot_vol(
ticker: str,
expiry: str,
min_sp: float,
max_sp: float,
calls_only: bool,
puts_only: bool,
export: str,
):
"""Plot volume
Parameters
----------
ticker: str
Ticker
expiry: str
Expiry date for options
min_sp: float
Min strike to consider
max_sp: float
Max strike to consider
calls_only: bool
Show calls only
puts_only: bool
Show puts only
export: str
Format to export file
"""
options = yfinance_model.get_option_chain(ticker, expiry)
export_data(
export,
os.path.dirname(os.path.abspath(__file__)),
"vol_yf",
options,
)
calls = options.calls
puts = options.puts
current_price = float(yf.Ticker(ticker).info["regularMarketPrice"])
if min_sp == -1:
min_strike = 0.75 * current_price
else:
min_strike = min_sp
if max_sp == -1:
max_strike = 1.25 * current_price
else:
max_strike = max_sp
if calls_only and puts_only:
print("Both flags selected, please select one", "\n")
return
call_v = calls.set_index("strike")["volume"] / 1000
put_v = puts.set_index("strike")["volume"] / 1000
plt.style.use("classic")
fig, ax = plt.subplots(figsize=plot_autoscale(), dpi=cfp.PLOT_DPI)
if not calls_only:
put_v.plot(
x="strike",
y="volume",
label="Puts",
ax=ax,
marker="o",
ls="-",
c="r",
)
if not puts_only:
call_v.plot(
x="strike",
y="volume",
label="Calls",
ax=ax,
marker="o",
ls="-",
c="g",
)
ax.axvline(current_price, lw=2, c="k", ls="--", label="Current Price", alpha=0.7)
ax.grid("on")
ax.set_xlabel("Strike Price")
ax.set_ylabel("Volume (1k) ")
ax.set_xlim(min_strike, max_strike)
if gtff.USE_ION:
plt.ion()
ax.set_title(f"Volume for {ticker.upper()} expiring {expiry}")
plt.legend(loc=0)
fig.tight_layout(pad=1)
plt.show()
plt.style.use("default")
print("")
def plot_volume_open_interest(
ticker: str,
expiry: str,
min_sp: float,
max_sp: float,
min_vol: float,
export: str,
):
"""Plot volume and open interest
Parameters
----------
ticker: str
Stock ticker
expiry: str
Option expiration
min_sp: float
Min strike price
max_sp: float
Max strike price
min_vol: float
Min volume to consider
export: str
Format for exporting data
"""
options = yfinance_model.get_option_chain(ticker, expiry)
export_data(
export,
os.path.dirname(os.path.abspath(__file__)),
"voi_yf",
options,
)
calls = options.calls
puts = options.puts
current_price = float(yf.Ticker(ticker).info["regularMarketPrice"])
# Process Calls Data
df_calls = calls.pivot_table(
index="strike", values=["volume", "openInterest"], aggfunc="sum"
).reindex()
df_calls["strike"] = df_calls.index
df_calls["type"] = "calls"
df_calls["openInterest"] = df_calls["openInterest"]
df_calls["volume"] = df_calls["volume"]
df_calls["oi+v"] = df_calls["openInterest"] + df_calls["volume"]
df_calls["spot"] = round(current_price, 2)
df_puts = puts.pivot_table(
index="strike", values=["volume", "openInterest"], aggfunc="sum"
).reindex()
df_puts["strike"] = df_puts.index
df_puts["type"] = "puts"
df_puts["openInterest"] = df_puts["openInterest"]
df_puts["volume"] = -df_puts["volume"]
df_puts["openInterest"] = -df_puts["openInterest"]
df_puts["oi+v"] = df_puts["openInterest"] + df_puts["volume"]
df_puts["spot"] = round(current_price, 2)
call_oi = calls.set_index("strike")["openInterest"] / 1000
put_oi = puts.set_index("strike")["openInterest"] / 1000
df_opt = pd.merge(call_oi, put_oi, left_index=True, right_index=True)
df_opt = df_opt.rename(
columns={"openInterest_x": "OI_call", "openInterest_y": "OI_put"}
)
max_pain = op_helpers.calculate_max_pain(df_opt)
if min_vol == -1 and min_sp == -1 and max_sp == -1:
# If no argument provided, we use the percentile 50 to get 50% of upper volume data
volume_percentile_threshold = 50
min_vol_calls = np.percentile(df_calls["oi+v"], volume_percentile_threshold)
min_vol_puts = np.percentile(df_puts["oi+v"], volume_percentile_threshold)
df_calls = df_calls[df_calls["oi+v"] > min_vol_calls]
df_puts = df_puts[df_puts["oi+v"] < min_vol_puts]
else:
if min_vol > -1:
df_calls = df_calls[df_calls["oi+v"] > min_vol]
df_puts = df_puts[df_puts["oi+v"] < -min_vol]
if min_sp > -1:
df_calls = df_calls[df_calls["strike"] > min_sp]
df_puts = df_puts[df_puts["strike"] > min_sp]
if max_sp > -1:
df_calls = df_calls[df_calls["strike"] < max_sp]
df_puts = df_puts[df_puts["strike"] < max_sp]
if df_calls.empty and df_puts.empty:
print(
"The filtering applied is too strong, there is no data available for such conditions.\n"
)
return
# Initialize the matplotlib figure
_, ax = plt.subplots(figsize=plot_autoscale(), dpi=cfp.PLOT_DPI)
# make x axis symmetric
axis_origin = max(abs(max(df_puts["oi+v"])), abs(max(df_calls["oi+v"])))
ax.set_xlim(-axis_origin, +axis_origin)
sns.set_style(style="darkgrid")
g = sns.barplot(
x="oi+v",
y="strike",
data=df_calls,
label="Calls: Open Interest",
color="lightgreen",
orient="h",
)
g = sns.barplot(
x="volume",
y="strike",
data=df_calls,
label="Calls: Volume",
color="green",
orient="h",
)
g = sns.barplot(
x="oi+v",
y="strike",
data=df_puts,
label="Puts: Open Interest",
color="pink",
orient="h",
)
g = sns.barplot(
x="volume",
y="strike",
data=df_puts,
label="Puts: Volume",
color="red",
orient="h",
)
# draw spot line
s = [float(strike.get_text()) for strike in ax.get_yticklabels()]
spot_index = bisect_left(s, current_price) # find where the spot is on the graph
spot_line = ax.axhline(spot_index, ls="--", color="dodgerblue", alpha=0.3)
# draw max pain line
max_pain_index = bisect_left(s, max_pain)
max_pain_line = ax.axhline(max_pain_index, ls="-", color="black", alpha=0.3)
max_pain_line.set_linewidth(3)
# format ticklabels without - for puts
g.set_xticks(g.get_xticks())
xlabels = [f"{x:,.0f}".replace("-", "") for x in g.get_xticks()]
g.set_xticklabels(xlabels)
plt.title(
f"{ticker} volumes for {expiry} (open interest displayed only during market hours)"
)
ax.invert_yaxis()
_ = ax.legend()
handles, _ = ax.get_legend_handles_labels()
handles.append(spot_line)
handles.append(max_pain_line)
# create legend labels + add to graph
labels = [
"Calls open interest",
"Calls volume ",
"Puts open interest",
"Puts volume",
"Current stock price",
f"Max pain = {max_pain}",
]
plt.legend(handles=handles[:], labels=labels)
sns.despine(left=True, bottom=True)
if gtff.USE_ION:
plt.ion()
plt.show()
plt.style.use("default")
print("")
def plot_plot(
ticker: str, expiration: str, put: bool, x: str, y: str, custom: str
) -> None:
"""Generate a graph custom graph based on user input"""
convert = {
"ltd": "lastTradeDate",
"s": "strike",
"lp": "lastPrice",
"b": "bid",
"a": "ask",
"c": "change",
"pc": "percentChange",
"v": "volume",
"oi": "openInterest",
"iv": "impliedVolatility",
}
x = convert[x]
y = convert[y]
varis = op_helpers.opt_chain_cols
chain = yfinance_model.get_option_chain(ticker, expiration)
values = chain.puts if put else chain.calls
_, ax = plt.subplots()
if custom == "smile":
x = "strike"
y = "impliedVolatility"
x_data = values[x]
y_data = values[y]
ax.plot(x_data, y_data, "--bo")
word = "puts" if put else "calls"
ax.set_title(
f"{varis[y]['label']} vs. {varis[x]['label']} for {ticker} {word} on {expiration}"
)
ax.set_ylabel(varis[y]["label"])
ax.set_xlabel(varis[x]["label"])
if varis[x]["format"] == "date":
plt.gca().xaxis.set_major_formatter(mdates.DateFormatter("%Y/%m/%d"))
plt.gca().xaxis.set_major_locator(mdates.DayLocator(interval=1))
plt.gcf().autofmt_xdate()
elif varis[x]["format"]:
ax.xaxis.set_major_formatter(varis[x]["format"])
if varis[y]["format"] == "date":
plt.gca().yaxis.set_major_formatter(mdates.DateFormatter("%Y/%m/%d"))
plt.gca().yaxis.set_major_locator(mdates.DayLocator(interval=1))
elif varis[y]["format"]:
ax.yaxis.set_major_formatter(varis[y]["format"])
plt.show()
def plot_payoff(
current_price: float,
options: List[Dict[Any, Any]],
underlying: int,
ticker: str,
expiration: str,
) -> None:
"""Generate a graph showing the option payoff diagram"""
x, yb, ya = generate_data(current_price, options, underlying)
_, ax = plt.subplots()
if ya:
ax.plot(x, yb, label="Payoff Before Premium")
ax.plot(x, ya, label="Payoff After Premium")
else:
ax.plot(x, yb, label="Payoff")
ax.set_title(f"Option Payoff Diagram for {ticker} on {expiration}")
ax.set_ylabel("Profit")
ax.set_xlabel("Underlying Asset Price at Expiration")
ax.xaxis.set_major_formatter("${x:.2f}")
ax.yaxis.set_major_formatter("${x:.2f}")
plt.legend()
plt.show()
print("")
def show_parity(
ticker: str, exp: str, put: bool, ask: bool, mini: float, maxi: float
) -> None:
"""Prints options and whether they are under or over priced [Source: Yahoo Finance]
Parameters
----------
ticker : str
Ticker to get expirations for
exp : str
Expiration to use for options
put : bool
Whether to use puts or calls
ask : bool
Whether to use ask or lastPrice
mini : float
Minimum strike price to show
maxi : float
Maximum strike price to show
"""
r_date = datetime.strptime(exp, "%Y-%m-%d").date()
delta = (r_date - date.today()).days
rate = ((1 + get_rf()) ** (delta / 365)) - 1
stock = get_price(ticker)
div_info = yfinance_model.get_dividend(ticker)
div_dts = div_info.index.values.tolist()
if div_dts:
last_div = pd.to_datetime(div_dts[-1])
if len(div_dts) > 3:
avg_div = np.mean(div_info.to_list()[-4:])
else:
avg_div = np.mean(div_info.to_list())
next_div = last_div + timedelta(days=91)
dividends = []
while next_div < datetime.strptime(exp, "%Y-%m-%d"):
day_dif = (next_div - datetime.now()).days
dividends.append((avg_div, day_dif))
next_div += timedelta(days=91)
div_pvs = [x[0] / ((1 + get_rf()) ** (x[1] / 365)) for x in dividends]
pv_dividend = sum(div_pvs)
else:
pv_dividend = 0
chain = get_option_chain(ticker, exp)
name = "ask" if ask else "lastPrice"
o_type = "put" if put else "call"
calls = chain.calls[["strike", name]].copy()
calls = calls.rename(columns={name: "callPrice"})
puts = chain.puts[["strike", name]].copy()
puts = puts.rename(columns={name: "putPrice"})
opts = pd.merge(calls, puts, on="strike")
opts = opts.dropna()
opts = opts.loc[opts["callPrice"] * opts["putPrice"] != 0]
opts["callParity"] = (
opts["putPrice"] + stock - (opts["strike"] / (1 + rate)) - pv_dividend
)
opts["putParity"] = (
(opts["strike"] / (1 + rate)) + opts["callPrice"] - stock + pv_dividend
)
diff = o_type + " Difference"
opts[diff] = opts[o_type + "Price"] - opts[o_type + "Parity"]
opts["distance"] = abs(stock - opts["strike"])
filtered = opts.copy()
if mini is None:
mini = filtered.strike.quantile(0.25)
if maxi is None:
maxi = filtered.strike.quantile(0.75)
filtered = filtered.loc[filtered["strike"] >= mini]
filtered = filtered.loc[filtered["strike"] <= maxi]
show = filtered[["strike", diff]].copy()
print("Warning: Low volume options may be difficult to trade.\n")
if ask:
print("Warning: Options with no current ask price not shown.\n")
if gtff.USE_TABULATE_DF:
print(
tabulate(
show,
headers=[x.title() for x in show.columns],
tablefmt="fancy_grid",
showindex=False,
floatfmt=".2f",
)
)
else:
print(show.to_string(index=False))
print("")
def risk_neutral_vals(
ticker: str,
exp: str,
put: bool,
df: pd.DataFrame,
mini: float,
maxi: float,
risk: float,
) -> None:
"""Prints current options prices and risk neutral values [Source: Yahoo Finance]
Parameters
----------
ticker : str
Ticker to get expirations for
exp : str
Expiration to use for options
put : bool
Whether to use puts or calls
df : pd.DataFrame
Estimates for stocks prices and probabilities
mini : float
Minimum strike price to show
maxi : float
Maximum strike price to show
risk : float
The risk-free rate for the asset
"""
if put:
chain = get_option_chain(ticker, exp).puts
else:
chain = get_option_chain(ticker, exp).calls
r_date = datetime.strptime(exp, "%Y-%m-%d").date()
delta = (r_date - date.today()).days
vals = []
if risk is None:
risk = get_rf()
for _, row in chain.iterrows():
vals.append(
[
row["strike"],
row["lastPrice"],
op_helpers.rn_payoff(row["strike"], df, put, delta, risk),
]
)
new_df = pd.DataFrame(vals, columns=["Strike", "Last Price", "Value"], dtype=float)
new_df["Difference"] = new_df["Last Price"] - new_df["Value"]
if mini is None:
mini = new_df.Strike.quantile(0.25)
if maxi is None:
maxi = new_df.Strike.quantile(0.75)
new_df = new_df[new_df["Strike"] >= mini]
new_df = new_df[new_df["Strike"] <= maxi]
if gtff.USE_TABULATE_DF:
print(
tabulate(
new_df,
headers=[x.title() for x in new_df.columns],
tablefmt="fancy_grid",
showindex=False,
floatfmt=".2f",
)
)
else:
print(new_df.to_string(index=False))
print("")
| """Yfinance options view"""
__docformat__ = "numpy"
import os
from bisect import bisect_left
from typing import List, Dict, Any
from datetime import datetime, date, timedelta
import matplotlib.pyplot as plt
import matplotlib.dates as mdates
import numpy as np
import pandas as pd
import seaborn as sns
import yfinance as yf
from tabulate import tabulate
import gamestonk_terminal.config_plot as cfp
import gamestonk_terminal.feature_flags as gtff
from gamestonk_terminal.helper_funcs import export_data, plot_autoscale
from gamestonk_terminal.stocks.options import op_helpers, yfinance_model
from gamestonk_terminal.stocks.options.yfinance_model import (
generate_data,
get_option_chain,
get_price,
)
from gamestonk_terminal.helper_funcs import get_rf
def plot_oi(
ticker: str,
expiry: str,
min_sp: float,
max_sp: float,
calls_only: bool,
puts_only: bool,
export: str,
):
"""Plot open interest
Parameters
----------
ticker: str
Ticker
expiry: str
Expiry date for options
min_sp: float
Min strike to consider
max_sp: float
Max strike to consider
calls_only: bool
Show calls only
puts_only: bool
Show puts only
export: str
Format to export file
"""
options = yfinance_model.get_option_chain(ticker, expiry)
export_data(
export,
os.path.dirname(os.path.abspath(__file__)),
"oi_yf",
options,
)
calls = options.calls
puts = options.puts
current_price = float(yf.Ticker(ticker).info["regularMarketPrice"])
if min_sp == -1:
min_strike = 0.75 * current_price
else:
min_strike = min_sp
if max_sp == -1:
max_strike = 1.25 * current_price
else:
max_strike = max_sp
if calls_only and puts_only:
print("Both flags selected, please select one", "\n")
return
call_oi = calls.set_index("strike")["openInterest"] / 1000
put_oi = puts.set_index("strike")["openInterest"] / 1000
df_opt = pd.merge(call_oi, put_oi, left_index=True, right_index=True)
df_opt = df_opt.rename(
columns={"openInterest_x": "OI_call", "openInterest_y": "OI_put"}
)
max_pain = op_helpers.calculate_max_pain(df_opt)
plt.style.use("classic")
fig, ax = plt.subplots(figsize=plot_autoscale(), dpi=cfp.PLOT_DPI)
if not calls_only:
put_oi.plot(
x="strike",
y="openInterest",
label="Puts",
ax=ax,
marker="o",
ls="-",
c="r",
)
if not puts_only:
call_oi.plot(
x="strike",
y="openInterest",
label="Calls",
ax=ax,
marker="o",
ls="-",
c="g",
)
ax.axvline(
current_price, lw=2, c="k", ls="--", label="Current Price", alpha=0.7
)
ax.axvline(max_pain, lw=3, c="k", label=f"Max Pain: {max_pain}", alpha=0.7)
ax.grid("on")
ax.set_xlabel("Strike Price")
ax.set_ylabel("Open Interest (1k) ")
ax.set_xlim(min_strike, max_strike)
if gtff.USE_ION:
plt.ion()
ax.set_title(f"Open Interest for {ticker.upper()} expiring {expiry}")
plt.legend(loc=0)
fig.tight_layout(pad=1)
plt.show()
plt.style.use("default")
print("")
def plot_vol(
ticker: str,
expiry: str,
min_sp: float,
max_sp: float,
calls_only: bool,
puts_only: bool,
export: str,
):
"""Plot volume
Parameters
----------
ticker: str
Ticker
expiry: str
Expiry date for options
min_sp: float
Min strike to consider
max_sp: float
Max strike to consider
calls_only: bool
Show calls only
puts_only: bool
Show puts only
export: str
Format to export file
"""
options = yfinance_model.get_option_chain(ticker, expiry)
export_data(
export,
os.path.dirname(os.path.abspath(__file__)),
"vol_yf",
options,
)
calls = options.calls
puts = options.puts
current_price = float(yf.Ticker(ticker).info["regularMarketPrice"])
if min_sp == -1:
min_strike = 0.75 * current_price
else:
min_strike = min_sp
if max_sp == -1:
max_strike = 1.25 * current_price
else:
max_strike = max_sp
if calls_only and puts_only:
print("Both flags selected, please select one", "\n")
return
call_v = calls.set_index("strike")["volume"] / 1000
put_v = puts.set_index("strike")["volume"] / 1000
plt.style.use("classic")
fig, ax = plt.subplots(figsize=plot_autoscale(), dpi=cfp.PLOT_DPI)
if not calls_only:
put_v.plot(
x="strike",
y="volume",
label="Puts",
ax=ax,
marker="o",
ls="-",
c="r",
)
if not puts_only:
call_v.plot(
x="strike",
y="volume",
label="Calls",
ax=ax,
marker="o",
ls="-",
c="g",
)
ax.axvline(current_price, lw=2, c="k", ls="--", label="Current Price", alpha=0.7)
ax.grid("on")
ax.set_xlabel("Strike Price")
ax.set_ylabel("Volume (1k) ")
ax.set_xlim(min_strike, max_strike)
if gtff.USE_ION:
plt.ion()
ax.set_title(f"Volume for {ticker.upper()} expiring {expiry}")
plt.legend(loc=0)
fig.tight_layout(pad=1)
plt.show()
plt.style.use("default")
print("")
def plot_volume_open_interest(
ticker: str,
expiry: str,
min_sp: float,
max_sp: float,
min_vol: float,
export: str,
):
"""Plot volume and open interest
Parameters
----------
ticker: str
Stock ticker
expiry: str
Option expiration
min_sp: float
Min strike price
max_sp: float
Max strike price
min_vol: float
Min volume to consider
export: str
Format for exporting data
"""
options = yfinance_model.get_option_chain(ticker, expiry)
export_data(
export,
os.path.dirname(os.path.abspath(__file__)),
"voi_yf",
options,
)
calls = options.calls
puts = options.puts
current_price = float(yf.Ticker(ticker).info["regularMarketPrice"])
# Process Calls Data
df_calls = calls.pivot_table(
index="strike", values=["volume", "openInterest"], aggfunc="sum"
).reindex()
df_calls["strike"] = df_calls.index
df_calls["type"] = "calls"
df_calls["openInterest"] = df_calls["openInterest"]
df_calls["volume"] = df_calls["volume"]
df_calls["oi+v"] = df_calls["openInterest"] + df_calls["volume"]
df_calls["spot"] = round(current_price, 2)
df_puts = puts.pivot_table(
index="strike", values=["volume", "openInterest"], aggfunc="sum"
).reindex()
df_puts["strike"] = df_puts.index
df_puts["type"] = "puts"
df_puts["openInterest"] = df_puts["openInterest"]
df_puts["volume"] = -df_puts["volume"]
df_puts["openInterest"] = -df_puts["openInterest"]
df_puts["oi+v"] = df_puts["openInterest"] + df_puts["volume"]
df_puts["spot"] = round(current_price, 2)
call_oi = calls.set_index("strike")["openInterest"] / 1000
put_oi = puts.set_index("strike")["openInterest"] / 1000
df_opt = pd.merge(call_oi, put_oi, left_index=True, right_index=True)
df_opt = df_opt.rename(
columns={"openInterest_x": "OI_call", "openInterest_y": "OI_put"}
)
max_pain = op_helpers.calculate_max_pain(df_opt)
if min_vol == -1 and min_sp == -1 and max_sp == -1:
# If no argument provided, we use the percentile 50 to get 50% of upper volume data
volume_percentile_threshold = 50
min_vol_calls = np.percentile(df_calls["oi+v"], volume_percentile_threshold)
min_vol_puts = np.percentile(df_puts["oi+v"], volume_percentile_threshold)
df_calls = df_calls[df_calls["oi+v"] > min_vol_calls]
df_puts = df_puts[df_puts["oi+v"] < min_vol_puts]
else:
if min_vol > -1:
df_calls = df_calls[df_calls["oi+v"] > min_vol]
df_puts = df_puts[df_puts["oi+v"] < -min_vol]
if min_sp > -1:
df_calls = df_calls[df_calls["strike"] > min_sp]
df_puts = df_puts[df_puts["strike"] > min_sp]
if max_sp > -1:
df_calls = df_calls[df_calls["strike"] < max_sp]
df_puts = df_puts[df_puts["strike"] < max_sp]
if df_calls.empty and df_puts.empty:
print(
"The filtering applied is too strong, there is no data available for such conditions.\n"
)
return
# Initialize the matplotlib figure
_, ax = plt.subplots(figsize=plot_autoscale(), dpi=cfp.PLOT_DPI)
# make x axis symmetric
axis_origin = max(abs(max(df_puts["oi+v"])), abs(max(df_calls["oi+v"])))
ax.set_xlim(-axis_origin, +axis_origin)
sns.set_style(style="darkgrid")
g = sns.barplot(
x="oi+v",
y="strike",
data=df_calls,
label="Calls: Open Interest",
color="lightgreen",
orient="h",
)
g = sns.barplot(
x="volume",
y="strike",
data=df_calls,
label="Calls: Volume",
color="green",
orient="h",
)
g = sns.barplot(
x="oi+v",
y="strike",
data=df_puts,
label="Puts: Open Interest",
color="pink",
orient="h",
)
g = sns.barplot(
x="volume",
y="strike",
data=df_puts,
label="Puts: Volume",
color="red",
orient="h",
)
# draw spot line
s = [float(strike.get_text()) for strike in ax.get_yticklabels()]
spot_index = bisect_left(s, current_price) # find where the spot is on the graph
spot_line = ax.axhline(spot_index, ls="--", color="dodgerblue", alpha=0.3)
# draw max pain line
max_pain_index = bisect_left(s, max_pain)
max_pain_line = ax.axhline(max_pain_index, ls="-", color="black", alpha=0.3)
max_pain_line.set_linewidth(3)
# format ticklabels without - for puts
g.set_xticks(g.get_xticks())
xlabels = [f"{x:,.0f}".replace("-", "") for x in g.get_xticks()]
g.set_xticklabels(xlabels)
plt.title(
f"{ticker} volumes for {expiry} (open interest displayed only during market hours)"
)
ax.invert_yaxis()
_ = ax.legend()
handles, _ = ax.get_legend_handles_labels()
handles.append(spot_line)
handles.append(max_pain_line)
# create legend labels + add to graph
labels = [
"Calls open interest",
"Calls volume ",
"Puts open interest",
"Puts volume",
"Current stock price",
f"Max pain = {max_pain}",
]
plt.legend(handles=handles[:], labels=labels)
sns.despine(left=True, bottom=True)
if gtff.USE_ION:
plt.ion()
plt.show()
plt.style.use("default")
print("")
def plot_plot(
ticker: str, expiration: str, put: bool, x: str, y: str, custom: str
) -> None:
"""Generate a graph custom graph based on user input"""
convert = {
"ltd": "lastTradeDate",
"s": "strike",
"lp": "lastPrice",
"b": "bid",
"a": "ask",
"c": "change",
"pc": "percentChange",
"v": "volume",
"oi": "openInterest",
"iv": "impliedVolatility",
}
x = convert[x]
y = convert[y]
varis = op_helpers.opt_chain_cols
chain = yfinance_model.get_option_chain(ticker, expiration)
values = chain.puts if put else chain.calls
_, ax = plt.subplots()
if custom == "smile":
x = "strike"
y = "impliedVolatility"
x_data = values[x]
y_data = values[y]
ax.plot(x_data, y_data, "--bo")
word = "puts" if put else "calls"
ax.set_title(
f"{varis[y]['label']} vs. {varis[x]['label']} for {ticker} {word} on {expiration}"
)
ax.set_ylabel(varis[y]["label"])
ax.set_xlabel(varis[x]["label"])
if varis[x]["format"] == "date":
plt.gca().xaxis.set_major_formatter(mdates.DateFormatter("%Y/%m/%d"))
plt.gca().xaxis.set_major_locator(mdates.DayLocator(interval=1))
plt.gcf().autofmt_xdate()
elif varis[x]["format"]:
ax.xaxis.set_major_formatter(varis[x]["format"])
if varis[y]["format"] == "date":
plt.gca().yaxis.set_major_formatter(mdates.DateFormatter("%Y/%m/%d"))
plt.gca().yaxis.set_major_locator(mdates.DayLocator(interval=1))
elif varis[y]["format"]:
ax.yaxis.set_major_formatter(varis[y]["format"])
plt.show()
def plot_payoff(
current_price: float,
options: List[Dict[Any, Any]],
underlying: int,
ticker: str,
expiration: str,
) -> None:
"""Generate a graph showing the option payoff diagram"""
x, yb, ya = generate_data(current_price, options, underlying)
_, ax = plt.subplots()
if ya:
ax.plot(x, yb, label="Payoff Before Premium")
ax.plot(x, ya, label="Payoff After Premium")
else:
ax.plot(x, yb, label="Payoff")
ax.set_title(f"Option Payoff Diagram for {ticker} on {expiration}")
ax.set_ylabel("Profit")
ax.set_xlabel("Underlying Asset Price at Expiration")
ax.xaxis.set_major_formatter("${x:.2f}")
ax.yaxis.set_major_formatter("${x:.2f}")
plt.legend()
plt.show()
print("")
def show_parity(
ticker: str, exp: str, put: bool, ask: bool, mini: float, maxi: float
) -> None:
"""Prints options and whether they are under or over priced [Source: Yahoo Finance]
Parameters
----------
ticker : str
Ticker to get expirations for
exp : str
Expiration to use for options
put : bool
Whether to use puts or calls
ask : bool
Whether to use ask or lastPrice
mini : float
Minimum strike price to show
maxi : float
Maximum strike price to show
"""
r_date = datetime.strptime(exp, "%Y-%m-%d").date()
delta = (r_date - date.today()).days
rate = ((1 + get_rf()) ** (delta / 365)) - 1
stock = get_price(ticker)
div_info = yfinance_model.get_dividend(ticker)
div_dts = div_info.index.values.tolist()
if div_dts:
last_div = pd.to_datetime(div_dts[-1])
if len(div_dts) > 3:
avg_div = np.mean(div_info.to_list()[-4:])
else:
avg_div = np.mean(div_info.to_list())
next_div = last_div + timedelta(days=91)
dividends = []
while next_div < datetime.strptime(exp, "%Y-%m-%d"):
day_dif = (next_div - datetime.now()).days
dividends.append((avg_div, day_dif))
next_div += timedelta(days=91)
div_pvs = [x[0] / ((1 + get_rf()) ** (x[1] / 365)) for x in dividends]
pv_dividend = sum(div_pvs)
else:
pv_dividend = 0
chain = get_option_chain(ticker, exp)
name = "ask" if ask else "lastPrice"
o_type = "put" if put else "call"
calls = chain.calls[["strike", name]].copy()
calls = calls.rename(columns={name: "callPrice"})
puts = chain.puts[["strike", name]].copy()
puts = puts.rename(columns={name: "putPrice"})
opts = pd.merge(calls, puts, on="strike")
opts = opts.dropna()
opts = opts.loc[opts["callPrice"] * opts["putPrice"] != 0]
opts["callParity"] = (
opts["putPrice"] + stock - (opts["strike"] / (1 + rate)) - pv_dividend
)
opts["putParity"] = (
(opts["strike"] / (1 + rate)) + opts["callPrice"] - stock + pv_dividend
)
diff = o_type + " Difference"
opts[diff] = opts[o_type + "Price"] - opts[o_type + "Parity"]
opts["distance"] = abs(stock - opts["strike"])
filtered = opts.copy()
if mini is None:
mini = filtered.strike.quantile(0.25)
if maxi is None:
maxi = filtered.strike.quantile(0.75)
filtered = filtered.loc[filtered["strike"] >= mini]
filtered = filtered.loc[filtered["strike"] <= maxi]
show = filtered[["strike", diff]].copy()
print("Warning: Low volume options may be difficult to trade.\n")
if ask:
print("Warning: Options with no current ask price not shown.\n")
if gtff.USE_TABULATE_DF:
print(
tabulate(
show,
headers=[x.title() for x in show.columns],
tablefmt="fancy_grid",
showindex=False,
floatfmt=".2f",
)
)
else:
print(show.to_string(index=False))
print("")
def risk_neutral_vals(
ticker: str,
exp: str,
put: bool,
df: pd.DataFrame,
mini: float,
maxi: float,
risk: float,
) -> None:
"""Prints current options prices and risk neutral values [Source: Yahoo Finance]
Parameters
----------
ticker : str
Ticker to get expirations for
exp : str
Expiration to use for options
put : bool
Whether to use puts or calls
df : pd.DataFrame
Estimates for stocks prices and probabilities
mini : float
Minimum strike price to show
maxi : float
Maximum strike price to show
risk : float
The risk-free rate for the asset
"""
if put:
chain = get_option_chain(ticker, exp).puts
else:
chain = get_option_chain(ticker, exp).calls
r_date = datetime.strptime(exp, "%Y-%m-%d").date()
delta = (r_date - date.today()).days
vals = []
if risk is None:
risk = get_rf()
for _, row in chain.iterrows():
vals.append(
[
row["strike"],
row["lastPrice"],
op_helpers.rn_payoff(row["strike"], df, put, delta, risk),
]
)
new_df = pd.DataFrame(vals, columns=["Strike", "Last Price", "Value"], dtype=float)
new_df["Difference"] = new_df["Last Price"] - new_df["Value"]
if mini is None:
mini = new_df.Strike.quantile(0.25)
if maxi is None:
maxi = new_df.Strike.quantile(0.75)
new_df = new_df[new_df["Strike"] >= mini]
new_df = new_df[new_df["Strike"] <= maxi]
if gtff.USE_TABULATE_DF:
print(
tabulate(
new_df,
headers=[x.title() for x in new_df.columns],
tablefmt="fancy_grid",
showindex=False,
floatfmt=".2f",
)
)
else:
print(new_df.to_string(index=False))
print("") | en | 0.590784 | Yfinance options view Plot open interest Parameters ---------- ticker: str Ticker expiry: str Expiry date for options min_sp: float Min strike to consider max_sp: float Max strike to consider calls_only: bool Show calls only puts_only: bool Show puts only export: str Format to export file Plot volume Parameters ---------- ticker: str Ticker expiry: str Expiry date for options min_sp: float Min strike to consider max_sp: float Max strike to consider calls_only: bool Show calls only puts_only: bool Show puts only export: str Format to export file Plot volume and open interest Parameters ---------- ticker: str Stock ticker expiry: str Option expiration min_sp: float Min strike price max_sp: float Max strike price min_vol: float Min volume to consider export: str Format for exporting data # Process Calls Data # If no argument provided, we use the percentile 50 to get 50% of upper volume data # Initialize the matplotlib figure # make x axis symmetric # draw spot line # find where the spot is on the graph # draw max pain line # format ticklabels without - for puts # create legend labels + add to graph Generate a graph custom graph based on user input Generate a graph showing the option payoff diagram Prints options and whether they are under or over priced [Source: Yahoo Finance] Parameters ---------- ticker : str Ticker to get expirations for exp : str Expiration to use for options put : bool Whether to use puts or calls ask : bool Whether to use ask or lastPrice mini : float Minimum strike price to show maxi : float Maximum strike price to show Prints current options prices and risk neutral values [Source: Yahoo Finance] Parameters ---------- ticker : str Ticker to get expirations for exp : str Expiration to use for options put : bool Whether to use puts or calls df : pd.DataFrame Estimates for stocks prices and probabilities mini : float Minimum strike price to show maxi : float Maximum strike price to show risk : float The risk-free rate for the asset | 2.660551 | 3 |
examples/videostore/videostore/model.py | arjones6/elixir | 1 | 6630726 | <filename>examples/videostore/videostore/model.py
from turbogears.database import metadata, session
from elixir import Unicode, DateTime, String, Integer
from elixir import Entity, Field, using_options
from elixir import OneToMany, ManyToOne, ManyToMany
from elixir import setup_all
from datetime import datetime
#
# application model
#
class Director(Entity):
name = Field(Unicode(60))
movies = OneToMany('Movie', inverse='director')
using_options(tablename='directors')
class Movie(Entity):
title = Field(Unicode(60))
description = Field(Unicode(512))
releasedate = Field(DateTime)
director = ManyToOne('Director', inverse='movies')
actors = ManyToMany('Actor', inverse='movies', tablename='movie_casting')
using_options(tablename='movies')
class Actor(Entity):
name = Field(Unicode(60))
movies = ManyToMany('Movie', inverse='actors', tablename='movie_casting')
using_options(tablename='actors')
#
# identity model
#
class Visit(Entity):
visit_key = Field(String(40), primary_key=True)
created = Field(DateTime, required=True, default=datetime.now)
expiry = Field(DateTime)
using_options(tablename='visit')
@classmethod
def lookup_visit(cls, visit_key):
return Visit.get(visit_key)
class VisitIdentity(Entity):
visit_key = Field(String(40), primary_key=True)
user = ManyToOne('User', colname='user_id', use_alter=True)
using_options(tablename='visit_identity')
class Group(Entity):
group_id = Field(Integer, primary_key=True)
group_name = Field(Unicode(16), unique=True)
display_name = Field(Unicode(255))
created = Field(DateTime, default=datetime.now)
users = ManyToMany('User', inverse='groups')
permissions = ManyToMany('Permission', inverse='groups')
using_options(tablename='tg_group')
class User(Entity):
user_id = Field(Integer, primary_key=True)
user_name = Field(Unicode(16), unique=True)
email_address = Field(Unicode(255), unique=True)
display_name = Field(Unicode(255))
password = Field(Unicode(40))
created = Field(DateTime, default=datetime.now)
groups = ManyToMany('Group', inverse='users')
using_options(tablename='tg_user')
@property
def permissions(self):
perms = set()
for g in self.groups:
perms = perms | set(g.permissions)
return perms
class Permission(Entity):
permission_id = Field(Integer, primary_key=True)
permission_name = Field(Unicode(16), unique=True)
description = Field(Unicode(255))
groups = ManyToMany('Group', inverse='permissions')
using_options(tablename='permission')
# create the table and mapper instances for the above entities
setup_all()
| <filename>examples/videostore/videostore/model.py
from turbogears.database import metadata, session
from elixir import Unicode, DateTime, String, Integer
from elixir import Entity, Field, using_options
from elixir import OneToMany, ManyToOne, ManyToMany
from elixir import setup_all
from datetime import datetime
#
# application model
#
class Director(Entity):
name = Field(Unicode(60))
movies = OneToMany('Movie', inverse='director')
using_options(tablename='directors')
class Movie(Entity):
title = Field(Unicode(60))
description = Field(Unicode(512))
releasedate = Field(DateTime)
director = ManyToOne('Director', inverse='movies')
actors = ManyToMany('Actor', inverse='movies', tablename='movie_casting')
using_options(tablename='movies')
class Actor(Entity):
name = Field(Unicode(60))
movies = ManyToMany('Movie', inverse='actors', tablename='movie_casting')
using_options(tablename='actors')
#
# identity model
#
class Visit(Entity):
visit_key = Field(String(40), primary_key=True)
created = Field(DateTime, required=True, default=datetime.now)
expiry = Field(DateTime)
using_options(tablename='visit')
@classmethod
def lookup_visit(cls, visit_key):
return Visit.get(visit_key)
class VisitIdentity(Entity):
visit_key = Field(String(40), primary_key=True)
user = ManyToOne('User', colname='user_id', use_alter=True)
using_options(tablename='visit_identity')
class Group(Entity):
group_id = Field(Integer, primary_key=True)
group_name = Field(Unicode(16), unique=True)
display_name = Field(Unicode(255))
created = Field(DateTime, default=datetime.now)
users = ManyToMany('User', inverse='groups')
permissions = ManyToMany('Permission', inverse='groups')
using_options(tablename='tg_group')
class User(Entity):
user_id = Field(Integer, primary_key=True)
user_name = Field(Unicode(16), unique=True)
email_address = Field(Unicode(255), unique=True)
display_name = Field(Unicode(255))
password = Field(Unicode(40))
created = Field(DateTime, default=datetime.now)
groups = ManyToMany('Group', inverse='users')
using_options(tablename='tg_user')
@property
def permissions(self):
perms = set()
for g in self.groups:
perms = perms | set(g.permissions)
return perms
class Permission(Entity):
permission_id = Field(Integer, primary_key=True)
permission_name = Field(Unicode(16), unique=True)
description = Field(Unicode(255))
groups = ManyToMany('Group', inverse='permissions')
using_options(tablename='permission')
# create the table and mapper instances for the above entities
setup_all()
| en | 0.749525 | # # application model # # # identity model # # create the table and mapper instances for the above entities | 2.39802 | 2 |
bayescache/optimizers/rmsprop.py | jacobhinkle/bayescache | 0 | 6630727 | import torch.optim
from bayescache.api import OptimizerFactory, Model
class RMSpropFactory(OptimizerFactory):
""" RMSprop optimizer factory """
def __init__(self, lr=1e-2, alpha=0.99, eps=1e-8, weight_decay=0, momentum=0, centered=False):
self.lr = lr
self.alpha = alpha
self.eps = eps
self.weight_decay = weight_decay
self.momentum = momentum
self.centered = centered
def instantiate(self, model: Model) -> torch.optim.RMSprop:
return torch.optim.RMSprop(
filter(lambda p: p.requires_grad, model.parameters()),
lr=self.lr, alpha=self.alpha, eps=self.eps,
weight_decay=self.weight_decay, momentum=self.momentum, centered=self.centered
)
def create(lr, alpha, momentum=0, weight_decay=0, epsilon=1e-8):
""" Vel factory function """
return RMSpropFactory(lr=lr, alpha=alpha, momentum=momentum, weight_decay=weight_decay, eps=float(epsilon))
| import torch.optim
from bayescache.api import OptimizerFactory, Model
class RMSpropFactory(OptimizerFactory):
""" RMSprop optimizer factory """
def __init__(self, lr=1e-2, alpha=0.99, eps=1e-8, weight_decay=0, momentum=0, centered=False):
self.lr = lr
self.alpha = alpha
self.eps = eps
self.weight_decay = weight_decay
self.momentum = momentum
self.centered = centered
def instantiate(self, model: Model) -> torch.optim.RMSprop:
return torch.optim.RMSprop(
filter(lambda p: p.requires_grad, model.parameters()),
lr=self.lr, alpha=self.alpha, eps=self.eps,
weight_decay=self.weight_decay, momentum=self.momentum, centered=self.centered
)
def create(lr, alpha, momentum=0, weight_decay=0, epsilon=1e-8):
""" Vel factory function """
return RMSpropFactory(lr=lr, alpha=alpha, momentum=momentum, weight_decay=weight_decay, eps=float(epsilon))
| en | 0.453904 | RMSprop optimizer factory Vel factory function | 2.601026 | 3 |
user/tests.py | simonprast/wopi-engine | 0 | 6630728 | from django.test import override_settings
from rest_framework.test import APIRequestFactory, APITestCase, force_authenticate
from .api.dev.api_views import UserCreateOrLogin, UserDetail
from .models import User
# Test Registration Email
# This is the default email backend
@override_settings(EMAIL_BACKEND='django.core.mail.backends.smtp.EmailBackend')
class TestUserRegistration(APITestCase):
"""
This function shall send a registration confirmation to a real email address.
"""
def test_user_registration(self):
# Ask the tester if he wants to perform this test
print('Do you want to test the user registration emails? (y/N)')
answer = input()
if answer != 'y':
return
# Create a user through the API
factory = APIRequestFactory()
view = UserCreateOrLogin.as_view()
request = factory.post(
'/api/dev/users/createuser/',
{
'first_name': 'John',
'last_name': 'Doe',
'email': '<EMAIL>',
'password': '<PASSWORD>;'
}
)
response = view(request)
print(response.data)
print('Did you - a user - receive an email for welcoming you to the platform? (y/N)')
confirmation = input()
self.assertEqual(confirmation.lower(), 'y',
'Tester did state that no email arrived.')
print('Did you - an advisor - receive an email regarding a new registration on the platform? (y/N)')
confirmation = input()
self.assertEqual(confirmation.lower(), 'y',
'Tester did state that no email arrived.')
@override_settings(EMAIL_BACKEND='django.core.mail.backends.smtp.EmailBackend')
class TestUserAdvisorAssigned(APITestCase):
"""
This test shall send a notification about an advisor being assigned to a user to both the user and the advisor.
"""
def test_user_advisor_assigned(self):
# Ask the tester if he wants to perform this test
print('Do you want to test the advisor assignation emails? (y/N)')
answer = input()
if answer != 'y':
return
# Step 1 - Create an advisor's account
advisor_account = User.objects.create_user(
first_name='Werner',
last_name='Betreuer',
email='<EMAIL>',
password='<PASSWORD>;'
)
advisor_account.utype = 7
advisor_account.save()
# Step 2 - Create a user's account
user_account = User.objects.create_user(
first_name='Simon',
last_name='User',
email='<EMAIL>',
password='<PASSWORD>;'
)
# Step 3 - Assign the advisor to the user
factory = APIRequestFactory()
view = UserDetail.as_view()
request = factory.put(
'/api/dev/users/' + str(user_account.id),
{
'advisor': advisor_account.id
}
)
force_authenticate(request, user=advisor_account)
response = view(request, pk=user_account.id)
print(response.data)
| from django.test import override_settings
from rest_framework.test import APIRequestFactory, APITestCase, force_authenticate
from .api.dev.api_views import UserCreateOrLogin, UserDetail
from .models import User
# Test Registration Email
# This is the default email backend
@override_settings(EMAIL_BACKEND='django.core.mail.backends.smtp.EmailBackend')
class TestUserRegistration(APITestCase):
"""
This function shall send a registration confirmation to a real email address.
"""
def test_user_registration(self):
# Ask the tester if he wants to perform this test
print('Do you want to test the user registration emails? (y/N)')
answer = input()
if answer != 'y':
return
# Create a user through the API
factory = APIRequestFactory()
view = UserCreateOrLogin.as_view()
request = factory.post(
'/api/dev/users/createuser/',
{
'first_name': 'John',
'last_name': 'Doe',
'email': '<EMAIL>',
'password': '<PASSWORD>;'
}
)
response = view(request)
print(response.data)
print('Did you - a user - receive an email for welcoming you to the platform? (y/N)')
confirmation = input()
self.assertEqual(confirmation.lower(), 'y',
'Tester did state that no email arrived.')
print('Did you - an advisor - receive an email regarding a new registration on the platform? (y/N)')
confirmation = input()
self.assertEqual(confirmation.lower(), 'y',
'Tester did state that no email arrived.')
@override_settings(EMAIL_BACKEND='django.core.mail.backends.smtp.EmailBackend')
class TestUserAdvisorAssigned(APITestCase):
"""
This test shall send a notification about an advisor being assigned to a user to both the user and the advisor.
"""
def test_user_advisor_assigned(self):
# Ask the tester if he wants to perform this test
print('Do you want to test the advisor assignation emails? (y/N)')
answer = input()
if answer != 'y':
return
# Step 1 - Create an advisor's account
advisor_account = User.objects.create_user(
first_name='Werner',
last_name='Betreuer',
email='<EMAIL>',
password='<PASSWORD>;'
)
advisor_account.utype = 7
advisor_account.save()
# Step 2 - Create a user's account
user_account = User.objects.create_user(
first_name='Simon',
last_name='User',
email='<EMAIL>',
password='<PASSWORD>;'
)
# Step 3 - Assign the advisor to the user
factory = APIRequestFactory()
view = UserDetail.as_view()
request = factory.put(
'/api/dev/users/' + str(user_account.id),
{
'advisor': advisor_account.id
}
)
force_authenticate(request, user=advisor_account)
response = view(request, pk=user_account.id)
print(response.data)
| en | 0.900399 | # Test Registration Email # This is the default email backend This function shall send a registration confirmation to a real email address. # Ask the tester if he wants to perform this test # Create a user through the API This test shall send a notification about an advisor being assigned to a user to both the user and the advisor. # Ask the tester if he wants to perform this test # Step 1 - Create an advisor's account # Step 2 - Create a user's account # Step 3 - Assign the advisor to the user | 2.400868 | 2 |
dependency_support/com_icarus_iverilog/build-plugins.bzl | RobSpringer/bazel_rules_hdl | 0 | 6630729 | # Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""BUILD helpers for using iverilog.
"""
load("@rules_cc//cc:defs.bzl", "cc_binary")
def iverilog_compile(srcs, flags = "", name = ""):
"""Compiles the first .v files given in srcs into a .vvp file.
Passes the flags to iverilog.
"""
vvp_file = srcs[0] + "vp" # Changes .v to .vvp
native.genrule(
name = "gen_" + vvp_file,
srcs = srcs,
outs = [vvp_file],
cmd = (
"$(location @com_icarus_iverilog//:iverilog) " +
flags + " " +
"-o $@ " +
"$(location " + srcs[0] + ")"
),
tools = ["@com_icarus_iverilog//:iverilog"],
)
# Creates a dummy test which will force the .vvp file production.
native.sh_test(
name = "dummy_iverilog_compile_test_" + name + "_" + vvp_file,
srcs = ["@rules_hdl//dependency_support/com_icarus_iverilog:dummy.sh"],
data = [vvp_file],
)
def vpi_binary(name, srcs, **kwargs):
"""Creates a .vpi file with the given name from the given sources.
All the extra arguments are passed directly to cc_binary.
"""
so_name = name + ".so"
cc_binary(
name = so_name,
srcs = srcs,
linkshared = 1,
**kwargs
)
native.genrule(
name = "gen_" + name,
srcs = [so_name],
outs = [name],
cmd = "cp $< $@",
output_to_bindir = 1,
executable = 1,
)
| # Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""BUILD helpers for using iverilog.
"""
load("@rules_cc//cc:defs.bzl", "cc_binary")
def iverilog_compile(srcs, flags = "", name = ""):
"""Compiles the first .v files given in srcs into a .vvp file.
Passes the flags to iverilog.
"""
vvp_file = srcs[0] + "vp" # Changes .v to .vvp
native.genrule(
name = "gen_" + vvp_file,
srcs = srcs,
outs = [vvp_file],
cmd = (
"$(location @com_icarus_iverilog//:iverilog) " +
flags + " " +
"-o $@ " +
"$(location " + srcs[0] + ")"
),
tools = ["@com_icarus_iverilog//:iverilog"],
)
# Creates a dummy test which will force the .vvp file production.
native.sh_test(
name = "dummy_iverilog_compile_test_" + name + "_" + vvp_file,
srcs = ["@rules_hdl//dependency_support/com_icarus_iverilog:dummy.sh"],
data = [vvp_file],
)
def vpi_binary(name, srcs, **kwargs):
"""Creates a .vpi file with the given name from the given sources.
All the extra arguments are passed directly to cc_binary.
"""
so_name = name + ".so"
cc_binary(
name = so_name,
srcs = srcs,
linkshared = 1,
**kwargs
)
native.genrule(
name = "gen_" + name,
srcs = [so_name],
outs = [name],
cmd = "cp $< $@",
output_to_bindir = 1,
executable = 1,
)
| en | 0.812884 | # Copyright 2020 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. BUILD helpers for using iverilog. Compiles the first .v files given in srcs into a .vvp file. Passes the flags to iverilog. # Changes .v to .vvp # Creates a dummy test which will force the .vvp file production. Creates a .vpi file with the given name from the given sources. All the extra arguments are passed directly to cc_binary. | 1.608612 | 2 |
tests/test_patterns.py | oiwn/py-crawling-goodies | 3 | 6630730 | """Test patterns"""
from pcg.patterns.singleton import Singleton
def test_singleton():
"""testing singleton pattern"""
class Obj(metaclass=Singleton): # pylint: disable=R0903
"""Test Obj class"""
class AnotherObj: # pylint: disable=R0903
"""Another obj"""
first_instance = Obj()
second_instance = Obj()
another_instance = AnotherObj()
assert first_instance == second_instance
assert first_instance != another_instance
| """Test patterns"""
from pcg.patterns.singleton import Singleton
def test_singleton():
"""testing singleton pattern"""
class Obj(metaclass=Singleton): # pylint: disable=R0903
"""Test Obj class"""
class AnotherObj: # pylint: disable=R0903
"""Another obj"""
first_instance = Obj()
second_instance = Obj()
another_instance = AnotherObj()
assert first_instance == second_instance
assert first_instance != another_instance
| en | 0.436453 | Test patterns testing singleton pattern # pylint: disable=R0903 Test Obj class # pylint: disable=R0903 Another obj | 2.712903 | 3 |
st2common/tests/unit/test_crypto_utils.py | shusugmt/st2 | 1 | 6630731 | <filename>st2common/tests/unit/test_crypto_utils.py
# Licensed to the StackStorm, Inc ('StackStorm') under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the 'License'); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an 'AS IS' BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
import os
import six
import json
import binascii
import unittest2
from unittest2 import TestCase
from six.moves import range
from cryptography.exceptions import InvalidSignature
from st2common.util.crypto import KEYCZAR_HEADER_SIZE
from st2common.util.crypto import AESKey
from st2common.util.crypto import read_crypto_key
from st2common.util.crypto import symmetric_encrypt
from st2common.util.crypto import symmetric_decrypt
from st2common.util.crypto import keyczar_symmetric_decrypt
from st2common.util.crypto import keyczar_symmetric_encrypt
from st2common.util.crypto import cryptography_symmetric_encrypt
from st2common.util.crypto import cryptography_symmetric_decrypt
from st2tests.fixturesloader import get_fixtures_base_path
__all__ = [
'CryptoUtilsTestCase',
'CryptoUtilsKeyczarCompatibilityTestCase'
]
KEY_FIXTURES_PATH = os.path.join(get_fixtures_base_path(), 'keyczar_keys/')
class CryptoUtilsTestCase(TestCase):
@classmethod
def setUpClass(cls):
super(CryptoUtilsTestCase, cls).setUpClass()
CryptoUtilsTestCase.test_crypto_key = AESKey.generate()
def test_symmetric_encrypt_decrypt(self):
original = 'secret'
crypto = symmetric_encrypt(CryptoUtilsTestCase.test_crypto_key, original)
plain = symmetric_decrypt(CryptoUtilsTestCase.test_crypto_key, crypto)
self.assertEqual(plain, original)
def test_encrypt_output_is_diff_due_to_diff_IV(self):
original = 'Kami is a little boy.'
cryptos = set()
for _ in range(0, 10000):
crypto = symmetric_encrypt(CryptoUtilsTestCase.test_crypto_key, original)
self.assertTrue(crypto not in cryptos)
cryptos.add(crypto)
def test_decrypt_ciphertext_is_too_short(self):
aes_key = AESKey.generate()
plaintext = 'hello world ponies 1'
encrypted = cryptography_symmetric_encrypt(aes_key, plaintext)
# Verify original non manipulated value can be decrypted
decrypted = cryptography_symmetric_decrypt(aes_key, encrypted)
self.assertEqual(decrypted, plaintext)
# Corrupt / shortern the encrypted data
encrypted_malformed = binascii.unhexlify(encrypted)
header = encrypted_malformed[:KEYCZAR_HEADER_SIZE]
encrypted_malformed = encrypted_malformed[KEYCZAR_HEADER_SIZE:]
# Remove 40 bytes from ciphertext bytes
encrypted_malformed = encrypted_malformed[40:]
# Add back header
encrypted_malformed = header + encrypted_malformed
encrypted_malformed = binascii.hexlify(encrypted_malformed)
# Verify corrupted value results in an excpetion
expected_msg = 'Invalid or malformed ciphertext'
self.assertRaisesRegexp(ValueError, expected_msg, cryptography_symmetric_decrypt,
aes_key, encrypted_malformed)
def test_exception_is_thrown_on_invalid_hmac_signature(self):
aes_key = AESKey.generate()
plaintext = 'hello world ponies 2'
encrypted = cryptography_symmetric_encrypt(aes_key, plaintext)
# Verify original non manipulated value can be decrypted
decrypted = cryptography_symmetric_decrypt(aes_key, encrypted)
self.assertEqual(decrypted, plaintext)
# Corrupt the HMAC signature (last part is the HMAC signature)
encrypted_malformed = binascii.unhexlify(encrypted)
encrypted_malformed = encrypted_malformed[:-3]
encrypted_malformed += b'abc'
encrypted_malformed = binascii.hexlify(encrypted_malformed)
# Verify corrupted value results in an excpetion
expected_msg = 'Signature did not match digest'
self.assertRaisesRegexp(InvalidSignature, expected_msg, cryptography_symmetric_decrypt,
aes_key, encrypted_malformed)
class CryptoUtilsKeyczarCompatibilityTestCase(TestCase):
"""
Tests which verify that new cryptography based symmetric_encrypt and symmetric_decrypt are
fully compatible with keyczar output format and also return keyczar based format.
"""
def test_aes_key_class(self):
# 1. Unsupported mode
expected_msg = 'Unsupported mode: EBC'
self.assertRaisesRegexp(ValueError, expected_msg, AESKey, aes_key_string='a',
hmac_key_string='b', hmac_key_size=128, mode='EBC')
# 2. AES key is too small
expected_msg = 'Unsafe key size: 64'
self.assertRaisesRegexp(ValueError, expected_msg, AESKey, aes_key_string='a',
hmac_key_string='b', hmac_key_size=128, mode='CBC', size=64)
def test_loading_keys_from_keyczar_formatted_key_files(self):
key_path = os.path.join(KEY_FIXTURES_PATH, 'one.json')
aes_key = read_crypto_key(key_path=key_path)
self.assertEqual(aes_key.hmac_key_string, '<KEY>')
self.assertEqual(aes_key.hmac_key_size, 256)
self.assertEqual(aes_key.aes_key_string, '<KEY>')
self.assertEqual(aes_key.mode, 'CBC')
self.assertEqual(aes_key.size, 256)
key_path = os.path.join(KEY_FIXTURES_PATH, 'two.json')
aes_key = read_crypto_key(key_path=key_path)
self.assertEqual(aes_key.hmac_key_string, '92ok9S5extxphADmUhObPSD5wugey8eTffoJ2CEg_2s')
self.assertEqual(aes_key.hmac_key_size, 256)
self.assertEqual(aes_key.aes_key_string, '<KEY>')
self.assertEqual(aes_key.mode, 'CBC')
self.assertEqual(aes_key.size, 256)
key_path = os.path.join(KEY_FIXTURES_PATH, 'five.json')
aes_key = read_crypto_key(key_path=key_path)
self.assertEqual(aes_key.hmac_key_string, '<KEY>')
self.assertEqual(aes_key.hmac_key_size, 256)
self.assertEqual(aes_key.aes_key_string, '<KEY>')
self.assertEqual(aes_key.mode, 'CBC')
self.assertEqual(aes_key.size, 128)
def test_key_generation_file_format_is_fully_keyczar_compatible(self):
# Verify that the code can read and correctly parse keyczar formatted key files
aes_key = AESKey.generate()
key_json = aes_key.to_json()
json_parsed = json.loads(key_json)
expected = {
'hmacKey': {
'hmacKeyString': aes_key.hmac_key_string,
'size': aes_key.hmac_key_size
},
'aesKeyString': aes_key.aes_key_string,
'mode': aes_key.mode,
'size': aes_key.size
}
self.assertEqual(json_parsed, expected)
def test_symmetric_encrypt_decrypt_cryptography(self):
key = AESKey.generate()
plaintexts = [
'a b c',
'ab',
'hello foo',
'hell',
'bar5'
'hello hello bar bar hello',
'a',
'',
'c'
]
for plaintext in plaintexts:
encrypted = cryptography_symmetric_encrypt(key, plaintext)
decrypted = cryptography_symmetric_decrypt(key, encrypted)
self.assertEqual(decrypted, plaintext)
@unittest2.skipIf(six.PY3, 'keyczar doesn\'t work under Python 3')
def test_symmetric_encrypt_decrypt_roundtrips_1(self):
encrypt_keys = [
AESKey.generate(),
AESKey.generate(),
AESKey.generate(),
AESKey.generate()
]
# Verify all keys are unique
aes_key_strings = set()
hmac_key_strings = set()
for key in encrypt_keys:
aes_key_strings.add(key.aes_key_string)
hmac_key_strings.add(key.hmac_key_string)
self.assertEqual(len(aes_key_strings), 4)
self.assertEqual(len(hmac_key_strings), 4)
plaintext = 'hello world test dummy 8 9 5 1 bar2'
# Verify that round trips work and that cryptography based primitives are fully compatible
# with keyczar format
count = 0
for key in encrypt_keys:
data_enc_keyczar = keyczar_symmetric_encrypt(key, plaintext)
data_enc_cryptography = cryptography_symmetric_encrypt(key, plaintext)
self.assertNotEqual(data_enc_keyczar, data_enc_cryptography)
data_dec_keyczar_keyczar = keyczar_symmetric_decrypt(key, data_enc_keyczar)
data_dec_keyczar_cryptography = keyczar_symmetric_decrypt(key, data_enc_cryptography)
self.assertEqual(data_dec_keyczar_keyczar, plaintext)
self.assertEqual(data_dec_keyczar_cryptography, plaintext)
data_dec_cryptography_cryptography = cryptography_symmetric_decrypt(key,
data_enc_cryptography)
data_dec_cryptography_keyczar = cryptography_symmetric_decrypt(key, data_enc_keyczar)
self.assertEqual(data_dec_cryptography_cryptography, plaintext)
self.assertEqual(data_dec_cryptography_keyczar, plaintext)
count += 1
self.assertEqual(count, 4)
| <filename>st2common/tests/unit/test_crypto_utils.py
# Licensed to the StackStorm, Inc ('StackStorm') under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the 'License'); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an 'AS IS' BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
import os
import six
import json
import binascii
import unittest2
from unittest2 import TestCase
from six.moves import range
from cryptography.exceptions import InvalidSignature
from st2common.util.crypto import KEYCZAR_HEADER_SIZE
from st2common.util.crypto import AESKey
from st2common.util.crypto import read_crypto_key
from st2common.util.crypto import symmetric_encrypt
from st2common.util.crypto import symmetric_decrypt
from st2common.util.crypto import keyczar_symmetric_decrypt
from st2common.util.crypto import keyczar_symmetric_encrypt
from st2common.util.crypto import cryptography_symmetric_encrypt
from st2common.util.crypto import cryptography_symmetric_decrypt
from st2tests.fixturesloader import get_fixtures_base_path
__all__ = [
'CryptoUtilsTestCase',
'CryptoUtilsKeyczarCompatibilityTestCase'
]
KEY_FIXTURES_PATH = os.path.join(get_fixtures_base_path(), 'keyczar_keys/')
class CryptoUtilsTestCase(TestCase):
@classmethod
def setUpClass(cls):
super(CryptoUtilsTestCase, cls).setUpClass()
CryptoUtilsTestCase.test_crypto_key = AESKey.generate()
def test_symmetric_encrypt_decrypt(self):
original = 'secret'
crypto = symmetric_encrypt(CryptoUtilsTestCase.test_crypto_key, original)
plain = symmetric_decrypt(CryptoUtilsTestCase.test_crypto_key, crypto)
self.assertEqual(plain, original)
def test_encrypt_output_is_diff_due_to_diff_IV(self):
original = 'Kami is a little boy.'
cryptos = set()
for _ in range(0, 10000):
crypto = symmetric_encrypt(CryptoUtilsTestCase.test_crypto_key, original)
self.assertTrue(crypto not in cryptos)
cryptos.add(crypto)
def test_decrypt_ciphertext_is_too_short(self):
aes_key = AESKey.generate()
plaintext = 'hello world ponies 1'
encrypted = cryptography_symmetric_encrypt(aes_key, plaintext)
# Verify original non manipulated value can be decrypted
decrypted = cryptography_symmetric_decrypt(aes_key, encrypted)
self.assertEqual(decrypted, plaintext)
# Corrupt / shortern the encrypted data
encrypted_malformed = binascii.unhexlify(encrypted)
header = encrypted_malformed[:KEYCZAR_HEADER_SIZE]
encrypted_malformed = encrypted_malformed[KEYCZAR_HEADER_SIZE:]
# Remove 40 bytes from ciphertext bytes
encrypted_malformed = encrypted_malformed[40:]
# Add back header
encrypted_malformed = header + encrypted_malformed
encrypted_malformed = binascii.hexlify(encrypted_malformed)
# Verify corrupted value results in an excpetion
expected_msg = 'Invalid or malformed ciphertext'
self.assertRaisesRegexp(ValueError, expected_msg, cryptography_symmetric_decrypt,
aes_key, encrypted_malformed)
def test_exception_is_thrown_on_invalid_hmac_signature(self):
aes_key = AESKey.generate()
plaintext = 'hello world ponies 2'
encrypted = cryptography_symmetric_encrypt(aes_key, plaintext)
# Verify original non manipulated value can be decrypted
decrypted = cryptography_symmetric_decrypt(aes_key, encrypted)
self.assertEqual(decrypted, plaintext)
# Corrupt the HMAC signature (last part is the HMAC signature)
encrypted_malformed = binascii.unhexlify(encrypted)
encrypted_malformed = encrypted_malformed[:-3]
encrypted_malformed += b'abc'
encrypted_malformed = binascii.hexlify(encrypted_malformed)
# Verify corrupted value results in an excpetion
expected_msg = 'Signature did not match digest'
self.assertRaisesRegexp(InvalidSignature, expected_msg, cryptography_symmetric_decrypt,
aes_key, encrypted_malformed)
class CryptoUtilsKeyczarCompatibilityTestCase(TestCase):
"""
Tests which verify that new cryptography based symmetric_encrypt and symmetric_decrypt are
fully compatible with keyczar output format and also return keyczar based format.
"""
def test_aes_key_class(self):
# 1. Unsupported mode
expected_msg = 'Unsupported mode: EBC'
self.assertRaisesRegexp(ValueError, expected_msg, AESKey, aes_key_string='a',
hmac_key_string='b', hmac_key_size=128, mode='EBC')
# 2. AES key is too small
expected_msg = 'Unsafe key size: 64'
self.assertRaisesRegexp(ValueError, expected_msg, AESKey, aes_key_string='a',
hmac_key_string='b', hmac_key_size=128, mode='CBC', size=64)
def test_loading_keys_from_keyczar_formatted_key_files(self):
key_path = os.path.join(KEY_FIXTURES_PATH, 'one.json')
aes_key = read_crypto_key(key_path=key_path)
self.assertEqual(aes_key.hmac_key_string, '<KEY>')
self.assertEqual(aes_key.hmac_key_size, 256)
self.assertEqual(aes_key.aes_key_string, '<KEY>')
self.assertEqual(aes_key.mode, 'CBC')
self.assertEqual(aes_key.size, 256)
key_path = os.path.join(KEY_FIXTURES_PATH, 'two.json')
aes_key = read_crypto_key(key_path=key_path)
self.assertEqual(aes_key.hmac_key_string, '92ok9S5extxphADmUhObPSD5wugey8eTffoJ2CEg_2s')
self.assertEqual(aes_key.hmac_key_size, 256)
self.assertEqual(aes_key.aes_key_string, '<KEY>')
self.assertEqual(aes_key.mode, 'CBC')
self.assertEqual(aes_key.size, 256)
key_path = os.path.join(KEY_FIXTURES_PATH, 'five.json')
aes_key = read_crypto_key(key_path=key_path)
self.assertEqual(aes_key.hmac_key_string, '<KEY>')
self.assertEqual(aes_key.hmac_key_size, 256)
self.assertEqual(aes_key.aes_key_string, '<KEY>')
self.assertEqual(aes_key.mode, 'CBC')
self.assertEqual(aes_key.size, 128)
def test_key_generation_file_format_is_fully_keyczar_compatible(self):
# Verify that the code can read and correctly parse keyczar formatted key files
aes_key = AESKey.generate()
key_json = aes_key.to_json()
json_parsed = json.loads(key_json)
expected = {
'hmacKey': {
'hmacKeyString': aes_key.hmac_key_string,
'size': aes_key.hmac_key_size
},
'aesKeyString': aes_key.aes_key_string,
'mode': aes_key.mode,
'size': aes_key.size
}
self.assertEqual(json_parsed, expected)
def test_symmetric_encrypt_decrypt_cryptography(self):
key = AESKey.generate()
plaintexts = [
'a b c',
'ab',
'hello foo',
'hell',
'bar5'
'hello hello bar bar hello',
'a',
'',
'c'
]
for plaintext in plaintexts:
encrypted = cryptography_symmetric_encrypt(key, plaintext)
decrypted = cryptography_symmetric_decrypt(key, encrypted)
self.assertEqual(decrypted, plaintext)
@unittest2.skipIf(six.PY3, 'keyczar doesn\'t work under Python 3')
def test_symmetric_encrypt_decrypt_roundtrips_1(self):
encrypt_keys = [
AESKey.generate(),
AESKey.generate(),
AESKey.generate(),
AESKey.generate()
]
# Verify all keys are unique
aes_key_strings = set()
hmac_key_strings = set()
for key in encrypt_keys:
aes_key_strings.add(key.aes_key_string)
hmac_key_strings.add(key.hmac_key_string)
self.assertEqual(len(aes_key_strings), 4)
self.assertEqual(len(hmac_key_strings), 4)
plaintext = 'hello world test dummy 8 9 5 1 bar2'
# Verify that round trips work and that cryptography based primitives are fully compatible
# with keyczar format
count = 0
for key in encrypt_keys:
data_enc_keyczar = keyczar_symmetric_encrypt(key, plaintext)
data_enc_cryptography = cryptography_symmetric_encrypt(key, plaintext)
self.assertNotEqual(data_enc_keyczar, data_enc_cryptography)
data_dec_keyczar_keyczar = keyczar_symmetric_decrypt(key, data_enc_keyczar)
data_dec_keyczar_cryptography = keyczar_symmetric_decrypt(key, data_enc_cryptography)
self.assertEqual(data_dec_keyczar_keyczar, plaintext)
self.assertEqual(data_dec_keyczar_cryptography, plaintext)
data_dec_cryptography_cryptography = cryptography_symmetric_decrypt(key,
data_enc_cryptography)
data_dec_cryptography_keyczar = cryptography_symmetric_decrypt(key, data_enc_keyczar)
self.assertEqual(data_dec_cryptography_cryptography, plaintext)
self.assertEqual(data_dec_cryptography_keyczar, plaintext)
count += 1
self.assertEqual(count, 4)
| en | 0.860506 | # Licensed to the StackStorm, Inc ('StackStorm') under one or more # contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. # The ASF licenses this file to You under the Apache License, Version 2.0 # (the 'License'); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an 'AS IS' BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # Verify original non manipulated value can be decrypted # Corrupt / shortern the encrypted data # Remove 40 bytes from ciphertext bytes # Add back header # Verify corrupted value results in an excpetion # Verify original non manipulated value can be decrypted # Corrupt the HMAC signature (last part is the HMAC signature) # Verify corrupted value results in an excpetion Tests which verify that new cryptography based symmetric_encrypt and symmetric_decrypt are fully compatible with keyczar output format and also return keyczar based format. # 1. Unsupported mode # 2. AES key is too small # Verify that the code can read and correctly parse keyczar formatted key files # Verify all keys are unique # Verify that round trips work and that cryptography based primitives are fully compatible # with keyczar format | 1.734424 | 2 |
synth.py | nielm/nodejs-spanner | 0 | 6630732 | import synthtool as s
import synthtool.gcp as gcp
import synthtool.languages.node as node
import logging
from pathlib import Path
logging.basicConfig(level=logging.DEBUG)
AUTOSYNTH_MULTIPLE_COMMITS = True
gapic = gcp.GAPICBazel()
spanner = gapic.node_library('spanner', 'v1', proto_path='google/spanner/v1')
spanner_admin_database = gapic.node_library('admin-database', 'v1', proto_path='google/spanner/admin/database/v1')
spanner_admin_instance = gapic.node_library('admin-instance', 'v1', proto_path='google/spanner/admin/instance/v1')
# nodejs-spanner is composed of 3 APIs: SpannerClient, SpannerAdminDatabase and
# SpannerAdminInstance, all 3 are exported in src/v1/index.js
# Excluding auto-generated system test since Spanner has its own packing test
excludes=["src/index.ts", "src/v1/index.ts", "README.md", "package.json",
"system-test/*", "system-test/fixtures/sample/*", "system-test/fixtures/sample/src/*",
"tsconfig.json"]
s.copy(spanner, excludes=excludes)
s.copy(spanner_admin_database, excludes=excludes+["webpack.config.js", ".jsdoc.js"])
s.copy(spanner_admin_instance, excludes=excludes+["webpack.config.js", ".jsdoc.js"])
common_templates = gcp.CommonTemplates()
templates = common_templates.node_library(source_location='build/src')
s.copy(templates)
node.postprocess_gapic_library()
| import synthtool as s
import synthtool.gcp as gcp
import synthtool.languages.node as node
import logging
from pathlib import Path
logging.basicConfig(level=logging.DEBUG)
AUTOSYNTH_MULTIPLE_COMMITS = True
gapic = gcp.GAPICBazel()
spanner = gapic.node_library('spanner', 'v1', proto_path='google/spanner/v1')
spanner_admin_database = gapic.node_library('admin-database', 'v1', proto_path='google/spanner/admin/database/v1')
spanner_admin_instance = gapic.node_library('admin-instance', 'v1', proto_path='google/spanner/admin/instance/v1')
# nodejs-spanner is composed of 3 APIs: SpannerClient, SpannerAdminDatabase and
# SpannerAdminInstance, all 3 are exported in src/v1/index.js
# Excluding auto-generated system test since Spanner has its own packing test
excludes=["src/index.ts", "src/v1/index.ts", "README.md", "package.json",
"system-test/*", "system-test/fixtures/sample/*", "system-test/fixtures/sample/src/*",
"tsconfig.json"]
s.copy(spanner, excludes=excludes)
s.copy(spanner_admin_database, excludes=excludes+["webpack.config.js", ".jsdoc.js"])
s.copy(spanner_admin_instance, excludes=excludes+["webpack.config.js", ".jsdoc.js"])
common_templates = gcp.CommonTemplates()
templates = common_templates.node_library(source_location='build/src')
s.copy(templates)
node.postprocess_gapic_library()
| en | 0.862649 | # nodejs-spanner is composed of 3 APIs: SpannerClient, SpannerAdminDatabase and # SpannerAdminInstance, all 3 are exported in src/v1/index.js # Excluding auto-generated system test since Spanner has its own packing test | 1.87906 | 2 |
torchvision/models/quantization/googlenet.py | SliMM/vision | 2 | 6630733 | import warnings
import torch
import torch.nn as nn
from torch.nn import functional as F
from torchvision.models.utils import load_state_dict_from_url
from torchvision.models.googlenet import (
GoogLeNetOutputs, BasicConv2d, Inception, InceptionAux, GoogLeNet, model_urls)
from .utils import _replace_relu, quantize_model
__all__ = ['QuantizableGoogLeNet', 'googlenet']
quant_model_urls = {
# fp32 GoogLeNet ported from TensorFlow, with weights quantized in PyTorch
'googlenet_fbgemm': 'https://download.pytorch.org/models/quantized/googlenet_fbgemm-c00238cf.pth',
}
def googlenet(pretrained=False, progress=True, quantize=False, **kwargs):
r"""GoogLeNet (Inception v1) model architecture from
`"Going Deeper with Convolutions" <http://arxiv.org/abs/1409.4842>`_.
Note that quantize = True returns a quantized model with 8 bit
weights. Quantized models only support inference and run on CPUs.
GPU inference is not yet supported
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
aux_logits (bool): If True, adds two auxiliary branches that can improve training.
Default: *False* when pretrained is True otherwise *True*
transform_input (bool): If True, preprocesses the input according to the method with which it
was trained on ImageNet. Default: *False*
"""
if pretrained:
if 'transform_input' not in kwargs:
kwargs['transform_input'] = True
if 'aux_logits' not in kwargs:
kwargs['aux_logits'] = False
if kwargs['aux_logits']:
warnings.warn('auxiliary heads in the pretrained googlenet model are NOT pretrained, '
'so make sure to train them')
original_aux_logits = kwargs['aux_logits']
kwargs['aux_logits'] = True
kwargs['init_weights'] = False
model = QuantizableGoogLeNet(**kwargs)
_replace_relu(model)
if quantize:
# TODO use pretrained as a string to specify the backend
backend = 'fbgemm'
quantize_model(model, backend)
else:
assert pretrained in [True, False]
if pretrained:
if quantize:
model_url = quant_model_urls['googlenet' + '_' + backend]
else:
model_url = model_urls['googlenet']
state_dict = load_state_dict_from_url(model_url,
progress=progress)
model.load_state_dict(state_dict)
if not original_aux_logits:
model.aux_logits = False
model.aux1 = None
model.aux2 = None
return model
class QuantizableBasicConv2d(BasicConv2d):
def __init__(self, *args, **kwargs):
super(QuantizableBasicConv2d, self).__init__(*args, **kwargs)
self.relu = nn.ReLU()
def forward(self, x):
x = self.conv(x)
x = self.bn(x)
x = self.relu(x)
return x
def fuse_model(self):
torch.quantization.fuse_modules(self, ["conv", "bn", "relu"], inplace=True)
class QuantizableInception(Inception):
def __init__(self, *args, **kwargs):
super(QuantizableInception, self).__init__(
conv_block=QuantizableBasicConv2d, *args, **kwargs)
self.cat = nn.quantized.FloatFunctional()
def forward(self, x):
outputs = self._forward(x)
return self.cat.cat(outputs, 1)
class QuantizableInceptionAux(InceptionAux):
def __init__(self, *args, **kwargs):
super(QuantizableInceptionAux, self).__init__(
conv_block=QuantizableBasicConv2d, *args, **kwargs)
self.relu = nn.ReLU()
self.dropout = nn.Dropout(0.7)
def forward(self, x):
# aux1: N x 512 x 14 x 14, aux2: N x 528 x 14 x 14
x = F.adaptive_avg_pool2d(x, (4, 4))
# aux1: N x 512 x 4 x 4, aux2: N x 528 x 4 x 4
x = self.conv(x)
# N x 128 x 4 x 4
x = torch.flatten(x, 1)
# N x 2048
x = self.relu(self.fc1(x))
# N x 1024
x = self.dropout(x)
# N x 1024
x = self.fc2(x)
# N x 1000 (num_classes)
return x
class QuantizableGoogLeNet(GoogLeNet):
def __init__(self, *args, **kwargs):
super(QuantizableGoogLeNet, self).__init__(
blocks=[QuantizableBasicConv2d, QuantizableInception, QuantizableInceptionAux],
*args,
**kwargs
)
self.quant = torch.quantization.QuantStub()
self.dequant = torch.quantization.DeQuantStub()
def forward(self, x):
x = self._transform_input(x)
x = self.quant(x)
x, aux1, aux2 = self._forward(x)
x = self.dequant(x)
aux_defined = self.training and self.aux_logits
if torch.jit.is_scripting():
if not aux_defined:
warnings.warn("Scripted QuantizableGoogleNet always returns GoogleNetOutputs Tuple")
return GoogLeNetOutputs(x, aux2, aux1)
else:
return self.eager_outputs(x, aux2, aux1)
def fuse_model(self):
r"""Fuse conv/bn/relu modules in googlenet model
Fuse conv+bn+relu/ conv+relu/conv+bn modules to prepare for quantization.
Model is modified in place. Note that this operation does not change numerics
and the model after modification is in floating point
"""
for m in self.modules():
if type(m) == QuantizableBasicConv2d:
m.fuse_model()
| import warnings
import torch
import torch.nn as nn
from torch.nn import functional as F
from torchvision.models.utils import load_state_dict_from_url
from torchvision.models.googlenet import (
GoogLeNetOutputs, BasicConv2d, Inception, InceptionAux, GoogLeNet, model_urls)
from .utils import _replace_relu, quantize_model
__all__ = ['QuantizableGoogLeNet', 'googlenet']
quant_model_urls = {
# fp32 GoogLeNet ported from TensorFlow, with weights quantized in PyTorch
'googlenet_fbgemm': 'https://download.pytorch.org/models/quantized/googlenet_fbgemm-c00238cf.pth',
}
def googlenet(pretrained=False, progress=True, quantize=False, **kwargs):
r"""GoogLeNet (Inception v1) model architecture from
`"Going Deeper with Convolutions" <http://arxiv.org/abs/1409.4842>`_.
Note that quantize = True returns a quantized model with 8 bit
weights. Quantized models only support inference and run on CPUs.
GPU inference is not yet supported
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
aux_logits (bool): If True, adds two auxiliary branches that can improve training.
Default: *False* when pretrained is True otherwise *True*
transform_input (bool): If True, preprocesses the input according to the method with which it
was trained on ImageNet. Default: *False*
"""
if pretrained:
if 'transform_input' not in kwargs:
kwargs['transform_input'] = True
if 'aux_logits' not in kwargs:
kwargs['aux_logits'] = False
if kwargs['aux_logits']:
warnings.warn('auxiliary heads in the pretrained googlenet model are NOT pretrained, '
'so make sure to train them')
original_aux_logits = kwargs['aux_logits']
kwargs['aux_logits'] = True
kwargs['init_weights'] = False
model = QuantizableGoogLeNet(**kwargs)
_replace_relu(model)
if quantize:
# TODO use pretrained as a string to specify the backend
backend = 'fbgemm'
quantize_model(model, backend)
else:
assert pretrained in [True, False]
if pretrained:
if quantize:
model_url = quant_model_urls['googlenet' + '_' + backend]
else:
model_url = model_urls['googlenet']
state_dict = load_state_dict_from_url(model_url,
progress=progress)
model.load_state_dict(state_dict)
if not original_aux_logits:
model.aux_logits = False
model.aux1 = None
model.aux2 = None
return model
class QuantizableBasicConv2d(BasicConv2d):
def __init__(self, *args, **kwargs):
super(QuantizableBasicConv2d, self).__init__(*args, **kwargs)
self.relu = nn.ReLU()
def forward(self, x):
x = self.conv(x)
x = self.bn(x)
x = self.relu(x)
return x
def fuse_model(self):
torch.quantization.fuse_modules(self, ["conv", "bn", "relu"], inplace=True)
class QuantizableInception(Inception):
def __init__(self, *args, **kwargs):
super(QuantizableInception, self).__init__(
conv_block=QuantizableBasicConv2d, *args, **kwargs)
self.cat = nn.quantized.FloatFunctional()
def forward(self, x):
outputs = self._forward(x)
return self.cat.cat(outputs, 1)
class QuantizableInceptionAux(InceptionAux):
def __init__(self, *args, **kwargs):
super(QuantizableInceptionAux, self).__init__(
conv_block=QuantizableBasicConv2d, *args, **kwargs)
self.relu = nn.ReLU()
self.dropout = nn.Dropout(0.7)
def forward(self, x):
# aux1: N x 512 x 14 x 14, aux2: N x 528 x 14 x 14
x = F.adaptive_avg_pool2d(x, (4, 4))
# aux1: N x 512 x 4 x 4, aux2: N x 528 x 4 x 4
x = self.conv(x)
# N x 128 x 4 x 4
x = torch.flatten(x, 1)
# N x 2048
x = self.relu(self.fc1(x))
# N x 1024
x = self.dropout(x)
# N x 1024
x = self.fc2(x)
# N x 1000 (num_classes)
return x
class QuantizableGoogLeNet(GoogLeNet):
def __init__(self, *args, **kwargs):
super(QuantizableGoogLeNet, self).__init__(
blocks=[QuantizableBasicConv2d, QuantizableInception, QuantizableInceptionAux],
*args,
**kwargs
)
self.quant = torch.quantization.QuantStub()
self.dequant = torch.quantization.DeQuantStub()
def forward(self, x):
x = self._transform_input(x)
x = self.quant(x)
x, aux1, aux2 = self._forward(x)
x = self.dequant(x)
aux_defined = self.training and self.aux_logits
if torch.jit.is_scripting():
if not aux_defined:
warnings.warn("Scripted QuantizableGoogleNet always returns GoogleNetOutputs Tuple")
return GoogLeNetOutputs(x, aux2, aux1)
else:
return self.eager_outputs(x, aux2, aux1)
def fuse_model(self):
r"""Fuse conv/bn/relu modules in googlenet model
Fuse conv+bn+relu/ conv+relu/conv+bn modules to prepare for quantization.
Model is modified in place. Note that this operation does not change numerics
and the model after modification is in floating point
"""
for m in self.modules():
if type(m) == QuantizableBasicConv2d:
m.fuse_model()
| en | 0.785788 | # fp32 GoogLeNet ported from TensorFlow, with weights quantized in PyTorch GoogLeNet (Inception v1) model architecture from `"Going Deeper with Convolutions" <http://arxiv.org/abs/1409.4842>`_. Note that quantize = True returns a quantized model with 8 bit weights. Quantized models only support inference and run on CPUs. GPU inference is not yet supported Args: pretrained (bool): If True, returns a model pre-trained on ImageNet progress (bool): If True, displays a progress bar of the download to stderr aux_logits (bool): If True, adds two auxiliary branches that can improve training. Default: *False* when pretrained is True otherwise *True* transform_input (bool): If True, preprocesses the input according to the method with which it was trained on ImageNet. Default: *False* # TODO use pretrained as a string to specify the backend # aux1: N x 512 x 14 x 14, aux2: N x 528 x 14 x 14 # aux1: N x 512 x 4 x 4, aux2: N x 528 x 4 x 4 # N x 128 x 4 x 4 # N x 2048 # N x 1024 # N x 1024 # N x 1000 (num_classes) Fuse conv/bn/relu modules in googlenet model Fuse conv+bn+relu/ conv+relu/conv+bn modules to prepare for quantization. Model is modified in place. Note that this operation does not change numerics and the model after modification is in floating point | 2.451438 | 2 |
cbng_trainer/cli.py | cluebotng/trainer | 0 | 6630734 | #!/usr/bin/env python3
'''
MIT License
Copyright (c) 2021 <NAME>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
'''
import asyncio
import logging
import sys
import tempfile
from pathlib import PosixPath
import click
from cbng_trainer.common.docker import (build_docker_image,
start_container,
stop_container,
run_container)
from cbng_trainer.comparator.comparator import compare_samples
from cbng_trainer.comparator.results import generate_summary
from cbng_trainer.trainer.reviewed import dump_reviewed_edits
logger = logging.getLogger(__name__)
@click.group()
@click.option('--debug', is_flag=True, help='Enable debug logging')
def cli(debug):
logging.basicConfig(level=(logging.DEBUG if debug else logging.INFO),
stream=sys.stderr)
@cli.command()
@click.option('--output', help='Target file',
default='edits.xml', required=True)
def download_edits(output):
loop = asyncio.get_event_loop()
loop.run_until_complete(dump_reviewed_edits(PosixPath(output)))
@cli.command()
@click.option('--input', help='Edits file', required=True,
default='edits.xml', type=click.Path(True))
@click.option('--output', help='Target directory',
required=True, type=click.Path(True))
@click.option('--release-tag', help='Git release tag',
required=True, default='v1.0.2')
def build_database(input, output, release_tag):
output = PosixPath(output)
core_image = build_docker_image(output, release_tag)
stdout = run_container(core_image,
[(PosixPath(input).absolute().as_posix(),
'/edits.xml'),
(PosixPath(output).absolute().as_posix(),
'/opt/cbng-core/data/')],
['/opt/cbng-core/cluebotng', '-c', 'conf',
'-m', 'bayes_train', '-f', '/edits.xml'])
logger.info(f'Finished bayes_train: {stdout.decode("utf-8")}')
stdout = run_container(core_image,
[(PosixPath(input).absolute().as_posix(), '/edits.xml'),
(PosixPath(output).absolute().as_posix(), '/opt/cbng-core/data/')],
['/opt/cbng-core/create_bayes_db', 'data/bayes.db',
'data/main_bayes_train.dat'])
logger.info(f'Finished create_bayes_db bayes.db: {stdout.decode("utf-8")}')
stdout = run_container(core_image,
[(PosixPath(input).absolute().as_posix(), '/edits.xml'),
(PosixPath(output).absolute().as_posix(), '/opt/cbng-core/data/')],
['/opt/cbng-core/create_bayes_db', 'data/two_bayes.db',
'data/two_bayes_train.dat'])
logger.info(f'Finished create_bayes_db two_bayes.db: {stdout.decode("utf-8")}')
stdout = run_container(core_image,
[(PosixPath(input).absolute().as_posix(), '/edits.xml'),
(PosixPath(output).absolute().as_posix(), '/opt/cbng-core/data/')],
['/opt/cbng-core/cluebotng', '-c', 'conf',
'-m', 'ann_train', '-f', '/edits.xml'])
logger.info(f'Finished ann_train: {stdout.decode("utf-8")}')
stdout = run_container(core_image,
[(PosixPath(input).absolute().as_posix(), '/edits.xml'),
(PosixPath(output).absolute().as_posix(), '/opt/cbng-core/data/')],
['/opt/cbng-core/create_ann', 'data/main_ann.fann',
'data/main_ann_train.dat', '150', '0.25', '162'])
logger.info(f'Finished create_ann main_ann.fann: {stdout.decode("utf-8")}')
@cli.command()
@click.option('--target', help='Target binaries path', required=True, type=click.Path(True))
@click.option('--output', help='Output path', required=False, type=click.Path(True))
@click.option('--release-tag', help='Git release tag', required=True, default='v1.0.2')
def compare_database(target, output, release_tag):
with tempfile.TemporaryDirectory() as tmp_dir:
base_image = build_docker_image(PosixPath(tmp_dir), release_tag)
target_image = build_docker_image(PosixPath(target), release_tag, True)
base_container = start_container(base_image, 3501)
target_container = start_container(target_image, 3502)
try:
loop = asyncio.get_event_loop()
results = loop.run_until_complete(compare_samples(3501, 3502))
except Exception as e:
raise e
else:
if output:
target = PosixPath(output) / 'comparator.md'
click.echo(f'Dumping results to {target}')
with target.open('w') as fh:
fh.write(generate_summary(results))
else:
click.echo('Dumping results to stdout....')
for result in results:
print(result)
finally:
stop_container(base_container)
stop_container(target_container)
if __name__ == '__main__':
cli()
| #!/usr/bin/env python3
'''
MIT License
Copyright (c) 2021 <NAME>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
'''
import asyncio
import logging
import sys
import tempfile
from pathlib import PosixPath
import click
from cbng_trainer.common.docker import (build_docker_image,
start_container,
stop_container,
run_container)
from cbng_trainer.comparator.comparator import compare_samples
from cbng_trainer.comparator.results import generate_summary
from cbng_trainer.trainer.reviewed import dump_reviewed_edits
logger = logging.getLogger(__name__)
@click.group()
@click.option('--debug', is_flag=True, help='Enable debug logging')
def cli(debug):
logging.basicConfig(level=(logging.DEBUG if debug else logging.INFO),
stream=sys.stderr)
@cli.command()
@click.option('--output', help='Target file',
default='edits.xml', required=True)
def download_edits(output):
loop = asyncio.get_event_loop()
loop.run_until_complete(dump_reviewed_edits(PosixPath(output)))
@cli.command()
@click.option('--input', help='Edits file', required=True,
default='edits.xml', type=click.Path(True))
@click.option('--output', help='Target directory',
required=True, type=click.Path(True))
@click.option('--release-tag', help='Git release tag',
required=True, default='v1.0.2')
def build_database(input, output, release_tag):
output = PosixPath(output)
core_image = build_docker_image(output, release_tag)
stdout = run_container(core_image,
[(PosixPath(input).absolute().as_posix(),
'/edits.xml'),
(PosixPath(output).absolute().as_posix(),
'/opt/cbng-core/data/')],
['/opt/cbng-core/cluebotng', '-c', 'conf',
'-m', 'bayes_train', '-f', '/edits.xml'])
logger.info(f'Finished bayes_train: {stdout.decode("utf-8")}')
stdout = run_container(core_image,
[(PosixPath(input).absolute().as_posix(), '/edits.xml'),
(PosixPath(output).absolute().as_posix(), '/opt/cbng-core/data/')],
['/opt/cbng-core/create_bayes_db', 'data/bayes.db',
'data/main_bayes_train.dat'])
logger.info(f'Finished create_bayes_db bayes.db: {stdout.decode("utf-8")}')
stdout = run_container(core_image,
[(PosixPath(input).absolute().as_posix(), '/edits.xml'),
(PosixPath(output).absolute().as_posix(), '/opt/cbng-core/data/')],
['/opt/cbng-core/create_bayes_db', 'data/two_bayes.db',
'data/two_bayes_train.dat'])
logger.info(f'Finished create_bayes_db two_bayes.db: {stdout.decode("utf-8")}')
stdout = run_container(core_image,
[(PosixPath(input).absolute().as_posix(), '/edits.xml'),
(PosixPath(output).absolute().as_posix(), '/opt/cbng-core/data/')],
['/opt/cbng-core/cluebotng', '-c', 'conf',
'-m', 'ann_train', '-f', '/edits.xml'])
logger.info(f'Finished ann_train: {stdout.decode("utf-8")}')
stdout = run_container(core_image,
[(PosixPath(input).absolute().as_posix(), '/edits.xml'),
(PosixPath(output).absolute().as_posix(), '/opt/cbng-core/data/')],
['/opt/cbng-core/create_ann', 'data/main_ann.fann',
'data/main_ann_train.dat', '150', '0.25', '162'])
logger.info(f'Finished create_ann main_ann.fann: {stdout.decode("utf-8")}')
@cli.command()
@click.option('--target', help='Target binaries path', required=True, type=click.Path(True))
@click.option('--output', help='Output path', required=False, type=click.Path(True))
@click.option('--release-tag', help='Git release tag', required=True, default='v1.0.2')
def compare_database(target, output, release_tag):
with tempfile.TemporaryDirectory() as tmp_dir:
base_image = build_docker_image(PosixPath(tmp_dir), release_tag)
target_image = build_docker_image(PosixPath(target), release_tag, True)
base_container = start_container(base_image, 3501)
target_container = start_container(target_image, 3502)
try:
loop = asyncio.get_event_loop()
results = loop.run_until_complete(compare_samples(3501, 3502))
except Exception as e:
raise e
else:
if output:
target = PosixPath(output) / 'comparator.md'
click.echo(f'Dumping results to {target}')
with target.open('w') as fh:
fh.write(generate_summary(results))
else:
click.echo('Dumping results to stdout....')
for result in results:
print(result)
finally:
stop_container(base_container)
stop_container(target_container)
if __name__ == '__main__':
cli()
| en | 0.754698 | #!/usr/bin/env python3 MIT License Copyright (c) 2021 <NAME> Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. | 1.575282 | 2 |
ajenga/plugin/__init__.py | project-ajenga/Core | 0 | 6630735 | from .service import Privilege
from .service import Service
from .service import remove_service
from .service import set_current_plugin
from .plugin import Plugin
from .plugin import get_current_plugin
from .plugin import get_loaded_plugins
from .plugin import get_plugin
from .plugin import load_plugin
from .plugin import reload_plugin
from .plugin import unload_plugin
from .res import DirectoryType
from .res import ensure_file_path
from .res import get_plugin_dir
| from .service import Privilege
from .service import Service
from .service import remove_service
from .service import set_current_plugin
from .plugin import Plugin
from .plugin import get_current_plugin
from .plugin import get_loaded_plugins
from .plugin import get_plugin
from .plugin import load_plugin
from .plugin import reload_plugin
from .plugin import unload_plugin
from .res import DirectoryType
from .res import ensure_file_path
from .res import get_plugin_dir
| none | 1 | 1.141136 | 1 |
|
lobster/audio.py | noahfx/lobster | 6 | 6630736 | import subprocess
from pydub import AudioSegment
from .filemanager import get_workingdir, get_album_dir
class StreamSegment(object):
def __init__(self, name, position, initial_time=None, end_time=None):
self.name = name
self.position = position
self.initial_time = initial_time
self.end_time = end_time
self.orig_tmp_file = None
def __str__(self):
return "name: {} \n poisition: {} \n initial time: {} \n".format(
self.name,
str(self.position + 1),
str(self.initial_time)
)
def time_to_mil(time):
splitted_time = time.split(":")
if len(splitted_time) == 2:
return int(splitted_time[0])*60000 + int(splitted_time[1])*1000
elif len(splitted_time) == 3:
return int(splitted_time[0])*360000 + int(splitted_time[1])*60000 \
+ int(splitted_time[2])*1000
def build_time_range(stream_len, streamSegments):
"""
Sets initial time/end time for segments
"""
t_separator = 500 #separator between track, milseconds
sortedStreamSegments = sorted(streamSegments, key=lambda ss: ss.position)
for idx, stream_segment in enumerate(sortedStreamSegments):
stream_segment.initial_time = time_to_mil(stream_segment.initial_time)
if stream_segment.position == 0:
stream_segment.initial_time = 0
if stream_segment.end_time is None:
nxt_stream_init_t = sortedStreamSegments[idx + 1].initial_time
stream_segment.end_time = time_to_mil(nxt_stream_init_t) - t_separator
elif stream_segment.position < len(streamSegments) - 1:
if stream_segment.end_time is None:
nxt_stream_init_t = sortedStreamSegments[idx + 1].initial_time
stream_segment.end_time = time_to_mil(nxt_stream_init_t) - t_separator
else:
if stream_segment.end_time is None:
stream_segment.end_time = stream_len - t_separator
return sortedStreamSegments
def split_audio(src_file, dest_path, audio_segments, audio_format='webm'):
"""
Splits audio file given the time range in audio segments
"""
audio_orig = AudioSegment.from_file(src_file, 'webm')
print('Preparing audio, this may take a while... go for coffee')
s_audio_segments = build_time_range(len(audio_orig), audio_segments)
for as_ in audio_segments:
tmp_seg = audio_orig[int(as_.initial_time):int(as_.end_time)]
file_name = as_.name.replace(' ', '') + '.' + audio_format
as_.orig_tmp_file = '/'.join([dest_path, file_name])
tmp_seg.export(as_.orig_tmp_file, format=audio_format)
return audio_segments
def export(src_file, dest_file, track_metadata=None, format='mp3'):
"""
Exports webm file to mp3 or ogg format file, replaces pydub export
due to issues with ffmpeg
"""
print('Exporting audio to {}'.format(dest_file))
ffmpeg_cmd = ['ffmpeg', '-i', src_file, '-vn', '-ab', '320k', '-ar',
'44100']
cmd_last = ['-y', dest_file ]
if track_metadata is not None:
ffmpeg_cmd = ffmpeg_cmd + track_metadata + cmd_last
else:
ffmpeg_cmd = ffmpeg_cmd + cmd_last
p = subprocess.Popen(ffmpeg_cmd, stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
out, err = p.communicate()
if p.returncode != 0:
print(err)
raise Exception("Could not encode file")
return dest_file
def create_tracks(source_media, dest_dir, audio_segments, artist, album,
source_type, format='mp3'):
src_format = format
if source_type == 'youtube':
src_format = 'webm'
dir_name = '_'.join([artist.replace(' ', '_'), album.replace(' ', '_')])
dest_dir = '/'.join([dest_dir, dir_name])
print(dest_dir)
print(format)
dest_file = lambda name: '/'.join([dest_dir ,name.replace(' ', '_') + '.' +\
format])
splitted_segments = split_audio(source_media, get_workingdir(),
audio_segments, audio_format=src_format)
album_dir = get_album_dir(dest_dir)
print("Created {}".format(album_dir))
for as_ in splitted_segments:
track_meta = _create_track_metadata(album, artist,
as_.name, as_.position + 1)
export(as_.orig_tmp_file, dest_file(as_.name),
track_metadata=track_meta, format=format)
def _create_track_metadata(album, artist, name, track_number):
return ['-metadata', '='.join(['title', name]),
'-metadata', '='.join(['album_artist', artist]),
'-metadata', '='.join(['artist', artist]),
'-metadata', '='.join(['album', album]),
'-metadata', '='.join(['track', str(track_number)])]
| import subprocess
from pydub import AudioSegment
from .filemanager import get_workingdir, get_album_dir
class StreamSegment(object):
def __init__(self, name, position, initial_time=None, end_time=None):
self.name = name
self.position = position
self.initial_time = initial_time
self.end_time = end_time
self.orig_tmp_file = None
def __str__(self):
return "name: {} \n poisition: {} \n initial time: {} \n".format(
self.name,
str(self.position + 1),
str(self.initial_time)
)
def time_to_mil(time):
splitted_time = time.split(":")
if len(splitted_time) == 2:
return int(splitted_time[0])*60000 + int(splitted_time[1])*1000
elif len(splitted_time) == 3:
return int(splitted_time[0])*360000 + int(splitted_time[1])*60000 \
+ int(splitted_time[2])*1000
def build_time_range(stream_len, streamSegments):
"""
Sets initial time/end time for segments
"""
t_separator = 500 #separator between track, milseconds
sortedStreamSegments = sorted(streamSegments, key=lambda ss: ss.position)
for idx, stream_segment in enumerate(sortedStreamSegments):
stream_segment.initial_time = time_to_mil(stream_segment.initial_time)
if stream_segment.position == 0:
stream_segment.initial_time = 0
if stream_segment.end_time is None:
nxt_stream_init_t = sortedStreamSegments[idx + 1].initial_time
stream_segment.end_time = time_to_mil(nxt_stream_init_t) - t_separator
elif stream_segment.position < len(streamSegments) - 1:
if stream_segment.end_time is None:
nxt_stream_init_t = sortedStreamSegments[idx + 1].initial_time
stream_segment.end_time = time_to_mil(nxt_stream_init_t) - t_separator
else:
if stream_segment.end_time is None:
stream_segment.end_time = stream_len - t_separator
return sortedStreamSegments
def split_audio(src_file, dest_path, audio_segments, audio_format='webm'):
"""
Splits audio file given the time range in audio segments
"""
audio_orig = AudioSegment.from_file(src_file, 'webm')
print('Preparing audio, this may take a while... go for coffee')
s_audio_segments = build_time_range(len(audio_orig), audio_segments)
for as_ in audio_segments:
tmp_seg = audio_orig[int(as_.initial_time):int(as_.end_time)]
file_name = as_.name.replace(' ', '') + '.' + audio_format
as_.orig_tmp_file = '/'.join([dest_path, file_name])
tmp_seg.export(as_.orig_tmp_file, format=audio_format)
return audio_segments
def export(src_file, dest_file, track_metadata=None, format='mp3'):
"""
Exports webm file to mp3 or ogg format file, replaces pydub export
due to issues with ffmpeg
"""
print('Exporting audio to {}'.format(dest_file))
ffmpeg_cmd = ['ffmpeg', '-i', src_file, '-vn', '-ab', '320k', '-ar',
'44100']
cmd_last = ['-y', dest_file ]
if track_metadata is not None:
ffmpeg_cmd = ffmpeg_cmd + track_metadata + cmd_last
else:
ffmpeg_cmd = ffmpeg_cmd + cmd_last
p = subprocess.Popen(ffmpeg_cmd, stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
out, err = p.communicate()
if p.returncode != 0:
print(err)
raise Exception("Could not encode file")
return dest_file
def create_tracks(source_media, dest_dir, audio_segments, artist, album,
source_type, format='mp3'):
src_format = format
if source_type == 'youtube':
src_format = 'webm'
dir_name = '_'.join([artist.replace(' ', '_'), album.replace(' ', '_')])
dest_dir = '/'.join([dest_dir, dir_name])
print(dest_dir)
print(format)
dest_file = lambda name: '/'.join([dest_dir ,name.replace(' ', '_') + '.' +\
format])
splitted_segments = split_audio(source_media, get_workingdir(),
audio_segments, audio_format=src_format)
album_dir = get_album_dir(dest_dir)
print("Created {}".format(album_dir))
for as_ in splitted_segments:
track_meta = _create_track_metadata(album, artist,
as_.name, as_.position + 1)
export(as_.orig_tmp_file, dest_file(as_.name),
track_metadata=track_meta, format=format)
def _create_track_metadata(album, artist, name, track_number):
return ['-metadata', '='.join(['title', name]),
'-metadata', '='.join(['album_artist', artist]),
'-metadata', '='.join(['artist', artist]),
'-metadata', '='.join(['album', album]),
'-metadata', '='.join(['track', str(track_number)])]
| en | 0.818715 | Sets initial time/end time for segments #separator between track, milseconds Splits audio file given the time range in audio segments Exports webm file to mp3 or ogg format file, replaces pydub export due to issues with ffmpeg | 2.91631 | 3 |
src/izi/apps/shipping/migrations/0001_initial.py | izi-core/izi-core | 0 | 6630737 | <filename>src/izi/apps/shipping/migrations/0001_initial.py
# Generated by Django 2.1.1 on 2018-10-01 04:05
from decimal import Decimal
import django.core.validators
from django.db import migrations, models
import django.db.models.deletion
import izi.models.fields.autoslugfield
class Migration(migrations.Migration):
initial = True
dependencies = [
('address', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='OrderAndItemCharges',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('code', izi.models.fields.autoslugfield.AutoSlugField(blank=True, editable=False, max_length=128, populate_from='name', unique=True, verbose_name='Slug')),
('name', models.CharField(max_length=128, unique=True, verbose_name='Name')),
('description', models.TextField(blank=True, verbose_name='Description')),
('price_per_order', models.DecimalField(decimal_places=2, default=Decimal('0.00'), max_digits=12, verbose_name='Price per order')),
('price_per_item', models.DecimalField(decimal_places=2, default=Decimal('0.00'), max_digits=12, verbose_name='Price per item')),
('free_shipping_threshold', models.DecimalField(blank=True, decimal_places=2, max_digits=12, null=True, verbose_name='Free Shipping')),
('countries', models.ManyToManyField(blank=True, to='address.Country', verbose_name='Countries')),
],
options={
'verbose_name': 'Order and Item Charge',
'verbose_name_plural': 'Order and Item Charges',
'ordering': ['name'],
'abstract': False,
},
),
migrations.CreateModel(
name='WeightBand',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('upper_limit', models.DecimalField(decimal_places=3, help_text='Enter upper limit of this weight band in kg. The lower limit will be determined by the other weight bands.', max_digits=12, validators=[django.core.validators.MinValueValidator(Decimal('0.00'))], verbose_name='Upper Limit')),
('charge', models.DecimalField(decimal_places=2, max_digits=12, validators=[django.core.validators.MinValueValidator(Decimal('0.00'))], verbose_name='Charge')),
],
options={
'verbose_name': 'Weight Band',
'verbose_name_plural': 'Weight Bands',
'ordering': ['method', 'upper_limit'],
'abstract': False,
},
),
migrations.CreateModel(
name='WeightBased',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('code', izi.models.fields.autoslugfield.AutoSlugField(blank=True, editable=False, max_length=128, populate_from='name', unique=True, verbose_name='Slug')),
('name', models.CharField(max_length=128, unique=True, verbose_name='Name')),
('description', models.TextField(blank=True, verbose_name='Description')),
('default_weight', models.DecimalField(decimal_places=3, default=Decimal('0.000'), help_text='Default product weight in kg when no weight attribute is defined', max_digits=12, validators=[django.core.validators.MinValueValidator(Decimal('0.00'))], verbose_name='Default Weight')),
('countries', models.ManyToManyField(blank=True, to='address.Country', verbose_name='Countries')),
],
options={
'verbose_name': 'Weight-based Shipping Method',
'verbose_name_plural': 'Weight-based Shipping Methods',
'ordering': ['name'],
'abstract': False,
},
),
migrations.AddField(
model_name='weightband',
name='method',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='bands', to='shipping.WeightBased', verbose_name='Method'),
),
]
| <filename>src/izi/apps/shipping/migrations/0001_initial.py
# Generated by Django 2.1.1 on 2018-10-01 04:05
from decimal import Decimal
import django.core.validators
from django.db import migrations, models
import django.db.models.deletion
import izi.models.fields.autoslugfield
class Migration(migrations.Migration):
initial = True
dependencies = [
('address', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='OrderAndItemCharges',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('code', izi.models.fields.autoslugfield.AutoSlugField(blank=True, editable=False, max_length=128, populate_from='name', unique=True, verbose_name='Slug')),
('name', models.CharField(max_length=128, unique=True, verbose_name='Name')),
('description', models.TextField(blank=True, verbose_name='Description')),
('price_per_order', models.DecimalField(decimal_places=2, default=Decimal('0.00'), max_digits=12, verbose_name='Price per order')),
('price_per_item', models.DecimalField(decimal_places=2, default=Decimal('0.00'), max_digits=12, verbose_name='Price per item')),
('free_shipping_threshold', models.DecimalField(blank=True, decimal_places=2, max_digits=12, null=True, verbose_name='Free Shipping')),
('countries', models.ManyToManyField(blank=True, to='address.Country', verbose_name='Countries')),
],
options={
'verbose_name': 'Order and Item Charge',
'verbose_name_plural': 'Order and Item Charges',
'ordering': ['name'],
'abstract': False,
},
),
migrations.CreateModel(
name='WeightBand',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('upper_limit', models.DecimalField(decimal_places=3, help_text='Enter upper limit of this weight band in kg. The lower limit will be determined by the other weight bands.', max_digits=12, validators=[django.core.validators.MinValueValidator(Decimal('0.00'))], verbose_name='Upper Limit')),
('charge', models.DecimalField(decimal_places=2, max_digits=12, validators=[django.core.validators.MinValueValidator(Decimal('0.00'))], verbose_name='Charge')),
],
options={
'verbose_name': 'Weight Band',
'verbose_name_plural': 'Weight Bands',
'ordering': ['method', 'upper_limit'],
'abstract': False,
},
),
migrations.CreateModel(
name='WeightBased',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('code', izi.models.fields.autoslugfield.AutoSlugField(blank=True, editable=False, max_length=128, populate_from='name', unique=True, verbose_name='Slug')),
('name', models.CharField(max_length=128, unique=True, verbose_name='Name')),
('description', models.TextField(blank=True, verbose_name='Description')),
('default_weight', models.DecimalField(decimal_places=3, default=Decimal('0.000'), help_text='Default product weight in kg when no weight attribute is defined', max_digits=12, validators=[django.core.validators.MinValueValidator(Decimal('0.00'))], verbose_name='Default Weight')),
('countries', models.ManyToManyField(blank=True, to='address.Country', verbose_name='Countries')),
],
options={
'verbose_name': 'Weight-based Shipping Method',
'verbose_name_plural': 'Weight-based Shipping Methods',
'ordering': ['name'],
'abstract': False,
},
),
migrations.AddField(
model_name='weightband',
name='method',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='bands', to='shipping.WeightBased', verbose_name='Method'),
),
]
| en | 0.77519 | # Generated by Django 2.1.1 on 2018-10-01 04:05 | 1.840187 | 2 |
inverness/model_meta.py | mobarski/inverness | 0 | 6630738 | from tqdm import tqdm
try:
from .util_time import timed
from .sorbet import sorbet
except (ModuleNotFoundError,ImportError):
from util_time import timed
from sorbet import sorbet
class Meta():
@timed
def init_meta(self, storage='disk'):
self.meta = sorbet(self.path+'meta', kind=storage).new()
get_meta = self.get_meta
documents = self.doc_iter()
documents = tqdm(documents, desc='meta')
for id,doc in enumerate(documents):
m = get_meta(id,doc)
self.meta.append(m)
self.meta.save()
def load_meta(self, storage='disk'):
self.meta = sorbet(self.path+'meta', kind=storage).load()
| from tqdm import tqdm
try:
from .util_time import timed
from .sorbet import sorbet
except (ModuleNotFoundError,ImportError):
from util_time import timed
from sorbet import sorbet
class Meta():
@timed
def init_meta(self, storage='disk'):
self.meta = sorbet(self.path+'meta', kind=storage).new()
get_meta = self.get_meta
documents = self.doc_iter()
documents = tqdm(documents, desc='meta')
for id,doc in enumerate(documents):
m = get_meta(id,doc)
self.meta.append(m)
self.meta.save()
def load_meta(self, storage='disk'):
self.meta = sorbet(self.path+'meta', kind=storage).load()
| none | 1 | 2.304651 | 2 |
|
djmod/modlearn/web/migrations/0012_auto_20170321_1736.py | hosseinmh/Django_learning | 0 | 6630739 | # -*- coding: utf-8 -*-
# Generated by Django 1.10.6 on 2017-03-21 17:36
from __future__ import unicode_literals
from django.db import migrations, models
import web.validators
class Migration(migrations.Migration):
dependencies = [
('web', '0011_auto_20170321_1525'),
]
operations = [
migrations.AddField(
model_name='post',
name='slug',
field=models.SlugField(blank=True, null=True),
),
migrations.AlterField(
model_name='post',
name='author_email',
field=models.EmailField(blank=True, max_length=240, null=True, validators=[web.validators.auther_email]),
),
]
| # -*- coding: utf-8 -*-
# Generated by Django 1.10.6 on 2017-03-21 17:36
from __future__ import unicode_literals
from django.db import migrations, models
import web.validators
class Migration(migrations.Migration):
dependencies = [
('web', '0011_auto_20170321_1525'),
]
operations = [
migrations.AddField(
model_name='post',
name='slug',
field=models.SlugField(blank=True, null=True),
),
migrations.AlterField(
model_name='post',
name='author_email',
field=models.EmailField(blank=True, max_length=240, null=True, validators=[web.validators.auther_email]),
),
]
| en | 0.780702 | # -*- coding: utf-8 -*- # Generated by Django 1.10.6 on 2017-03-21 17:36 | 1.689176 | 2 |
users/migrations/0001_initial.py | intelligems/stolos | 5 | 6630740 | # -*- coding: utf-8 -*-
# Generated by Django 1.9.7 on 2016-09-06 14:10
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import uuid
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='SSHPublicKey',
fields=[
('uuid', models.UUIDField(default=uuid.uuid4, editable=False, primary_key=True, serialize=False)),
('public_key', models.TextField(unique=True)),
('md5', models.CharField(editable=False, max_length=256, unique=True)),
('sha256', models.CharField(editable=False, max_length=256, unique=True)),
('sha512', models.CharField(editable=False, max_length=256, unique=True)),
('name', models.CharField(max_length=20)),
('owner', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
options={
'permissions': (('view_sshpublickey', 'Can view public key'),),
},
),
migrations.AlterUniqueTogether(
name='sshpublickey',
unique_together=set([('name', 'owner')]),
),
]
| # -*- coding: utf-8 -*-
# Generated by Django 1.9.7 on 2016-09-06 14:10
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import uuid
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='SSHPublicKey',
fields=[
('uuid', models.UUIDField(default=uuid.uuid4, editable=False, primary_key=True, serialize=False)),
('public_key', models.TextField(unique=True)),
('md5', models.CharField(editable=False, max_length=256, unique=True)),
('sha256', models.CharField(editable=False, max_length=256, unique=True)),
('sha512', models.CharField(editable=False, max_length=256, unique=True)),
('name', models.CharField(max_length=20)),
('owner', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
options={
'permissions': (('view_sshpublickey', 'Can view public key'),),
},
),
migrations.AlterUniqueTogether(
name='sshpublickey',
unique_together=set([('name', 'owner')]),
),
]
| en | 0.807838 | # -*- coding: utf-8 -*- # Generated by Django 1.9.7 on 2016-09-06 14:10 | 1.802693 | 2 |
tensorflow/basic-rl/tutorial10/gps/gui/config.py | gopala-kr/ds-notebooks | 1 | 6630741 | <filename>tensorflow/basic-rl/tutorial10/gps/gui/config.py<gh_stars>1-10
""" Default configuration and hyperparameter values for GUI objects. """
import itertools
#from gps.proto.gps_pb2 import TRIAL_ARM, AUXILIARY_ARM
from gps_pb2 import TRIAL_ARM, AUXILIARY_ARM
from gps.gui.ps3_config import PS3_BUTTON, INVERTED_PS3_BUTTON
# Mappings from actions to their corresponding keyboard bindings.
# WARNING: keybindings must be unique
keyboard_bindings = {
# Target Setup.
'ptn': 'left', # previous target number
'ntn': 'right', # next target number
'pat': 'down', # previous actuator type
'nat': 'up', # next actuator type
'sip': 'i', # set initial position
'stp': 't', # set target position
'sii': 'z', # set initial image
'sti': 'x', # set target image
'mti': 'm', # move to initial
'mtt': 'n', # move to target
'rc': 'c', # relax controller
'mm': 'q', # mannequin mode
# GPS Training.
'stop' : 's', # stop
'reset': 'r', # reset
'go' : 'g', # go
'fail' : 'f', # fail
# Image Visualizer
'oii' : 'o', # overlay initial image
'oti' : 'p', # overlay target image
}
inverted_keyboard_bindings = {value: key
for key, value in keyboard_bindings.items()}
# for key, value in keyboard_bindings.iteritems()}
# Mappings from actions to their corresponding PS3 controller bindings.
ps3_bindings = {
# Target Setup
'ptn': (PS3_BUTTON['rear_right_1'], PS3_BUTTON['cross_left']),
'ntn': (PS3_BUTTON['rear_right_1'], PS3_BUTTON['cross_right']),
'pat': (PS3_BUTTON['rear_right_1'], PS3_BUTTON['cross_down']),
'nat': (PS3_BUTTON['rear_right_1'], PS3_BUTTON['cross_up']),
'sip': (PS3_BUTTON['rear_right_1'], PS3_BUTTON['action_square']),
'stp': (PS3_BUTTON['rear_right_1'], PS3_BUTTON['action_circle']),
'sii': (PS3_BUTTON['rear_right_1'], PS3_BUTTON['action_cross']),
'sti': (PS3_BUTTON['rear_right_1'], PS3_BUTTON['action_triangle']),
'mti': (PS3_BUTTON['rear_right_2'], PS3_BUTTON['cross_left']),
'mtt': (PS3_BUTTON['rear_right_2'], PS3_BUTTON['cross_right']),
'rc' : (PS3_BUTTON['rear_right_2'], PS3_BUTTON['cross_down']),
'mm' : (PS3_BUTTON['rear_right_2'], PS3_BUTTON['cross_up']),
# GPS Training
'stop' : (PS3_BUTTON['rear_right_2'], PS3_BUTTON['action_square']),
'reset': (PS3_BUTTON['rear_right_2'], PS3_BUTTON['action_triangle']),
'go' : (PS3_BUTTON['rear_right_2'], PS3_BUTTON['action_circle']),
'fail' : (PS3_BUTTON['rear_right_2'], PS3_BUTTON['action_cross']),
# Image Visualizer
'oii' : (PS3_BUTTON['cross_up'] ,),
'oti' : (PS3_BUTTON['cross_down'] ,),
}
#inverted_ps3_bindings = {value: key for key, value in ps3_bindings.iteritems()}
inverted_ps3_bindings = {value: key for key, value in ps3_bindings.items()}
permuted_inverted_ps3_bindings = {}
#for key, value in list(inverted_ps3_bindings.iteritems()):
for key, value in list(inverted_ps3_bindings.items()):
for permuted_key in itertools.permutations(key, len(key)):
permuted_inverted_ps3_bindings[permuted_key] = value
config = {
# Keyboard shortcuts bindings
'keyboard_bindings': keyboard_bindings,
'inverted_keyboard_bindings': inverted_keyboard_bindings,
# PS3 controller bindings
'ps3_topic': 'joy',
'ps3_process_rate': 20, # Only process 1/20 of PS3 messages.
'ps3_button': PS3_BUTTON,
'inverted_ps3_button': INVERTED_PS3_BUTTON,
'ps3_bindings': ps3_bindings,
'inverted_ps3_bindings': inverted_ps3_bindings,
'permuted_inverted_ps3_bindings': permuted_inverted_ps3_bindings,
# Images
'image_on': True,
'image_topic': '/camera/rgb/image_color',
'image_size': (240, 240),
'image_overlay_actuator': 'trial_arm',
'image_overlay_alpha': 0.3,
# Both GUIs
'figsize': (12, 12),
# Target Setup
'num_targets': 10,
'actuator_types': [TRIAL_ARM, AUXILIARY_ARM],
'actuator_names': ['trial_arm', 'auxiliary_arm'],
'target_output_fontsize': 10,
# GPS Training
'initial_mode': 'run',
'algthm_output_fontsize': 10,
'algthm_output_max_display_size': 15,
}
def generate_experiment_info(config):
"""
Generate experiment info, to be displayed by GPS Trainig GUI.
Assumes config is the config created in hyperparams.py
"""
common = config['common']
algorithm = config['algorithm']
if type(algorithm['cost']) == list:
algorithm_cost_type = algorithm['cost'][0]['type'].__name__
if (algorithm_cost_type) == 'CostSum':
algorithm_cost_type += '(%s)' % ', '.join(
map(lambda cost: cost['type'].__name__,
algorithm['cost'][0]['costs']))
else:
algorithm_cost_type = algorithm['cost']['type'].__name__
if (algorithm_cost_type) == 'CostSum':
algorithm_cost_type += '(%s)' % ', '.join(
map(lambda cost: cost['type'].__name__,
algorithm['cost']['costs']))
if 'dynamics' in algorithm:
alg_dyn = str(algorithm['dynamics']['type'].__name__)
else:
alg_dyn = 'None'
return (
'exp_name: ' + str(common['experiment_name']) + '\n' +
'alg_type: ' + str(algorithm['type'].__name__) + '\n' +
'alg_dyn: ' + alg_dyn + '\n' +
'alg_cost: ' + str(algorithm_cost_type) + '\n' +
'iterations: ' + str(config['iterations']) + '\n' +
'conditions: ' + str(algorithm['conditions']) + '\n' +
'samples: ' + str(config['num_samples']) + '\n'
)
| <filename>tensorflow/basic-rl/tutorial10/gps/gui/config.py<gh_stars>1-10
""" Default configuration and hyperparameter values for GUI objects. """
import itertools
#from gps.proto.gps_pb2 import TRIAL_ARM, AUXILIARY_ARM
from gps_pb2 import TRIAL_ARM, AUXILIARY_ARM
from gps.gui.ps3_config import PS3_BUTTON, INVERTED_PS3_BUTTON
# Mappings from actions to their corresponding keyboard bindings.
# WARNING: keybindings must be unique
keyboard_bindings = {
# Target Setup.
'ptn': 'left', # previous target number
'ntn': 'right', # next target number
'pat': 'down', # previous actuator type
'nat': 'up', # next actuator type
'sip': 'i', # set initial position
'stp': 't', # set target position
'sii': 'z', # set initial image
'sti': 'x', # set target image
'mti': 'm', # move to initial
'mtt': 'n', # move to target
'rc': 'c', # relax controller
'mm': 'q', # mannequin mode
# GPS Training.
'stop' : 's', # stop
'reset': 'r', # reset
'go' : 'g', # go
'fail' : 'f', # fail
# Image Visualizer
'oii' : 'o', # overlay initial image
'oti' : 'p', # overlay target image
}
inverted_keyboard_bindings = {value: key
for key, value in keyboard_bindings.items()}
# for key, value in keyboard_bindings.iteritems()}
# Mappings from actions to their corresponding PS3 controller bindings.
ps3_bindings = {
# Target Setup
'ptn': (PS3_BUTTON['rear_right_1'], PS3_BUTTON['cross_left']),
'ntn': (PS3_BUTTON['rear_right_1'], PS3_BUTTON['cross_right']),
'pat': (PS3_BUTTON['rear_right_1'], PS3_BUTTON['cross_down']),
'nat': (PS3_BUTTON['rear_right_1'], PS3_BUTTON['cross_up']),
'sip': (PS3_BUTTON['rear_right_1'], PS3_BUTTON['action_square']),
'stp': (PS3_BUTTON['rear_right_1'], PS3_BUTTON['action_circle']),
'sii': (PS3_BUTTON['rear_right_1'], PS3_BUTTON['action_cross']),
'sti': (PS3_BUTTON['rear_right_1'], PS3_BUTTON['action_triangle']),
'mti': (PS3_BUTTON['rear_right_2'], PS3_BUTTON['cross_left']),
'mtt': (PS3_BUTTON['rear_right_2'], PS3_BUTTON['cross_right']),
'rc' : (PS3_BUTTON['rear_right_2'], PS3_BUTTON['cross_down']),
'mm' : (PS3_BUTTON['rear_right_2'], PS3_BUTTON['cross_up']),
# GPS Training
'stop' : (PS3_BUTTON['rear_right_2'], PS3_BUTTON['action_square']),
'reset': (PS3_BUTTON['rear_right_2'], PS3_BUTTON['action_triangle']),
'go' : (PS3_BUTTON['rear_right_2'], PS3_BUTTON['action_circle']),
'fail' : (PS3_BUTTON['rear_right_2'], PS3_BUTTON['action_cross']),
# Image Visualizer
'oii' : (PS3_BUTTON['cross_up'] ,),
'oti' : (PS3_BUTTON['cross_down'] ,),
}
#inverted_ps3_bindings = {value: key for key, value in ps3_bindings.iteritems()}
inverted_ps3_bindings = {value: key for key, value in ps3_bindings.items()}
permuted_inverted_ps3_bindings = {}
#for key, value in list(inverted_ps3_bindings.iteritems()):
for key, value in list(inverted_ps3_bindings.items()):
for permuted_key in itertools.permutations(key, len(key)):
permuted_inverted_ps3_bindings[permuted_key] = value
config = {
# Keyboard shortcuts bindings
'keyboard_bindings': keyboard_bindings,
'inverted_keyboard_bindings': inverted_keyboard_bindings,
# PS3 controller bindings
'ps3_topic': 'joy',
'ps3_process_rate': 20, # Only process 1/20 of PS3 messages.
'ps3_button': PS3_BUTTON,
'inverted_ps3_button': INVERTED_PS3_BUTTON,
'ps3_bindings': ps3_bindings,
'inverted_ps3_bindings': inverted_ps3_bindings,
'permuted_inverted_ps3_bindings': permuted_inverted_ps3_bindings,
# Images
'image_on': True,
'image_topic': '/camera/rgb/image_color',
'image_size': (240, 240),
'image_overlay_actuator': 'trial_arm',
'image_overlay_alpha': 0.3,
# Both GUIs
'figsize': (12, 12),
# Target Setup
'num_targets': 10,
'actuator_types': [TRIAL_ARM, AUXILIARY_ARM],
'actuator_names': ['trial_arm', 'auxiliary_arm'],
'target_output_fontsize': 10,
# GPS Training
'initial_mode': 'run',
'algthm_output_fontsize': 10,
'algthm_output_max_display_size': 15,
}
def generate_experiment_info(config):
"""
Generate experiment info, to be displayed by GPS Trainig GUI.
Assumes config is the config created in hyperparams.py
"""
common = config['common']
algorithm = config['algorithm']
if type(algorithm['cost']) == list:
algorithm_cost_type = algorithm['cost'][0]['type'].__name__
if (algorithm_cost_type) == 'CostSum':
algorithm_cost_type += '(%s)' % ', '.join(
map(lambda cost: cost['type'].__name__,
algorithm['cost'][0]['costs']))
else:
algorithm_cost_type = algorithm['cost']['type'].__name__
if (algorithm_cost_type) == 'CostSum':
algorithm_cost_type += '(%s)' % ', '.join(
map(lambda cost: cost['type'].__name__,
algorithm['cost']['costs']))
if 'dynamics' in algorithm:
alg_dyn = str(algorithm['dynamics']['type'].__name__)
else:
alg_dyn = 'None'
return (
'exp_name: ' + str(common['experiment_name']) + '\n' +
'alg_type: ' + str(algorithm['type'].__name__) + '\n' +
'alg_dyn: ' + alg_dyn + '\n' +
'alg_cost: ' + str(algorithm_cost_type) + '\n' +
'iterations: ' + str(config['iterations']) + '\n' +
'conditions: ' + str(algorithm['conditions']) + '\n' +
'samples: ' + str(config['num_samples']) + '\n'
)
| en | 0.669409 | Default configuration and hyperparameter values for GUI objects. #from gps.proto.gps_pb2 import TRIAL_ARM, AUXILIARY_ARM # Mappings from actions to their corresponding keyboard bindings. # WARNING: keybindings must be unique # Target Setup. # previous target number # next target number # previous actuator type # next actuator type # set initial position # set target position # set initial image # set target image # move to initial # move to target # relax controller # mannequin mode # GPS Training. # stop # reset # go # fail # Image Visualizer # overlay initial image # overlay target image # for key, value in keyboard_bindings.iteritems()} # Mappings from actions to their corresponding PS3 controller bindings. # Target Setup # GPS Training # Image Visualizer #inverted_ps3_bindings = {value: key for key, value in ps3_bindings.iteritems()} #for key, value in list(inverted_ps3_bindings.iteritems()): # Keyboard shortcuts bindings # PS3 controller bindings # Only process 1/20 of PS3 messages. # Images # Both GUIs # Target Setup # GPS Training Generate experiment info, to be displayed by GPS Trainig GUI. Assumes config is the config created in hyperparams.py | 2.362337 | 2 |
main.py | AI-secure/VeriGauge | 54 | 6630742 | <reponame>AI-secure/VeriGauge<gh_stars>10-100
import os
os.environ["CUDA_VISIBLE_DEVICES"] = '1'
import sys
import datasets
import model
APPROACH_LIST = ['PGD', 'IBP', 'FastLin', 'MILP', 'PercySDP', 'ZicoDual', 'CROWN', 'CROWN-IBP', 'LPAll' 'Diffai', 'RecurJac', 'FastLip']
dataset = 'mnist'
# source = 'test'
# selector = 'small.3'
source = 'fastlin'
selector = '2.20.reg'
# source = 'cnn_cert'
# selector = '3layer_fc_20'
skip = 500
norm = '2'
radii = 0.1
def pr(rad):
if dataset != 'mnist':
return f'{rad*255:.3}/255'
else:
return f'{rad:.3}'
if __name__ == '__main__':
ds = datasets.get_dataset(dataset, 'test')
print(dataset)
m = model.load_model(source, dataset, selector)
print(m)
from adaptor.basic_adaptor import PGDAdaptor, CWAdaptor
from adaptor.basic_adaptor import CleanAdaptor, FastLinIBPAdaptor, MILPAdaptor, PercySDPAdaptor, FazlybSDPAdaptor
from adaptor.lpdual_adaptor import ZicoDualAdaptor
from adaptor.crown_adaptor import FullCrownAdaptor, CrownIBPAdaptor
from adaptor.crown_adaptor import IBPAdaptor
from adaptor.recurjac_adaptor import FastLipAdaptor, RecurJacAdaptor, SpectralAdaptor
from adaptor.recurjac_adaptor import FastLinAdaptor
from adaptor.cnncert_adaptor import CNNCertAdaptor, FastLinSparseAdaptor, LPAllAdaptor
from adaptor.eran_adaptor import AI2Adaptor, DeepPolyAdaptor, RefineZonoAdaptor, KReluAdaptor
cln = CleanAdaptor(dataset, m)
pgd = PGDAdaptor(dataset, m)
cw = CWAdaptor(dataset, m)
# ibp = IBPAdaptor(dataset, m)
# fastlinibp = FastLinIBPAdaptor(dataset, m)
# milp = MILPAdaptor(dataset, m)
sdp = PercySDPAdaptor(dataset, m)
fazsdp = FazlybSDPAdaptor(dataset, m)
lpdual = ZicoDualAdaptor(dataset, m)
# fullcrown = FullCrownAdaptor(dataset, m)
# crownibp = CrownIBPAdaptor(dataset, m)
# fastlip = FastLipAdaptor(dataset, m)
# recurjac = RecurJacAdaptor(dataset, m)
# spectral = SpectralAdaptor(dataset, m)
# fastlin = FastLinAdaptor(dataset, m)
# cnncert = CNNCertAdaptor(dataset, m)
# fastlinsparse = FastLinSparseAdaptor(dataset, m)
# lpall = LPAllAdaptor(dataset, m)
ai2 = AI2Adaptor(dataset, m)
# deeppoly= DeepPolyAdaptor(dataset, m)
# refinezono = RefineZonoAdaptor(dataset, m)
# krelu = KReluAdaptorAdaptor(dataset, m)
for i in range(0, len(ds), skip):
X, y = ds[i]
cln_v = cln.verify(X, y, norm, 0.0)
# pgd_v = pgd.verify(X, y, norm, radii)
# pgd_radius = pgd.calc_radius(X, y, norm)
cw_v = cw.verify(X, y, norm, radii)
cw_radius = cw.calc_radius(X, y, norm)
# ibp_v = ibp.verify(X, y, norm, radii)
# ibp_radius = ibp.calc_radius(X, y, norm)
# fastlinibp_v = fastlinibp.verify(X, y, norm, radii)
# fastlinibp_radius = fastlinibp.calc_radius(X, y, norm)
# milp_v = milp.verify(X, y, norm, radii)
# milp_radius = milp.calc_radius(X, y, norm, eps=1e-2)
# sdp_v = sdp.verify(X, y, norm, radii)
# sdp_radius = sdp.calc_radius(X, y, norm)
# faz_v = fazsdp.verify(X, y, norm, radii)
# faz_radius = fazsdp.calc_radius(X, y, norm)
lpdual_v = lpdual.verify(X, y, norm, radii)
lpdual_radius = lpdual.calc_radius(X, y, norm)
# fullcrown_v = fullcrown.verify(X, y, norm, radii)
# fullcrown_radius = fullcrown.calc_radius(X, y, norm)
# crownibp_v = crownibp.verify(X, y, norm, radii)
# crownibp_radius = crownibp.calc_radius(X, y, norm)
# fastlip_v = fastlip.verify(X, y, norm, radii)
# fastlip_radius = fastlip.calc_radius(X, y, norm)
# recurjac_v = recurjac.verify(X, y, norm, radii)
# recurjac_radius = recurjac.calc_radius(X, y, norm)
# spectral_v = spectral.verify(X, y, norm, radii)
# spectral_radius = spectral.calc_radius(X, y, norm)
# fastlin_v = fastlin.verify(X, y, norm, radii)
# fastlin_radius = fastlin.calc_radius(X, y, norm)
# cnncert_v = cnncert.verify(X, y, norm, radii)
# cnncert_radius = cnncert.calc_radius(X, y, norm)
# fstlinsparse_v = fastlinsparse.verify(X, y, norm, radii)
# fstlinsparse_radius = fastlinsparse.calc_radius(X, y, norm)
# lpall_v = lpall.verify(X, y, norm, radii)
# lpall_radius = lpall.calc_radius(X, y, norm)
# ai2_v = ai2.verify(X, y, norm, radii)
# ai2_radius = ai2.calc_radius(X, y, norm)
# deeppoly_v = deeppoly.verify(X, y, norm, radii)
# deeppoly_radius = deeppoly.calc_radius(X, y, norm)
# refinezono_v = refinezono.verify(X, y, norm, radii)
# refinezono_radius = refinezono.calc_radius(X, y, norm)
# krelu_v = krelu.verify(X, y, norm, radii)
# krelu_radius = krelu.calc_radius(X, y, norm)
print(i, 'clean', cln_v,
# 'pgd', pgd_v,
# 'pgd_r', pr(pgd_radius),
'cw', cw_v,
'cw_r', pr(cw_radius),
# 'ibp', ibp_v,
# 'ibp_r', pr(ibp_radius),
# 'fastlinibp', fastlinibp_v,
# 'fastlinibp_r', pr(fastlinibp_radius),
# 'milp', milp_v,
# 'milp_r', pr(milp_radius),
# 'sdp', sdp_v,
# 'sdp_r', pr(sdp_radius),
# 'faz', faz_v,
# 'faz_r', pr(faz_radius),
'lpdual', lpdual_v,
'lpdual_r', pr(lpdual_radius),
# 'crown', fullcrown_v,
# 'crown_r', pr(fullcrown_radius),
# 'crownibp', crownibp_v,
# 'crownibp_r', pr(crownibp_radius),
# 'fastlip', fastlip_v,
# 'fastlip_r', pr(fastlip_radius),
# 'recurjac', recurjac_v,
# 'recurjac_r', pr(recurjac_radius),
# 'spectral', spectral_v,
# 'spectral_r', pr(spectral_radius),
# 'fastlin', fastlin_v,
# 'fastlin_r', pr(fastlin_radius),
# 'cnncert', cnncert_v,
# 'cnncert_r', pr(cnncert_radius),
# 'fstlinsparse', fstlinsparse_v,
# 'fstlinsparse_r', pr(fstlinsparse_radius),
# 'lpall', lpall_v,
# 'lpall_r', pr(lpall_radius),
# 'ai2', ai2_v,
# 'ai2_r', pr(ai2_radius),
# 'deeppoly', deeppoly_v,
# 'deeppoly_r', pr(deeppoly_radius),
# 'refinezono', refinezono_v,
# 'refinezono_r', pr(refinezono_radius),
# 'krelu', krelu_v,
# 'krelu_r', pr(krelu_radius),
file=sys.stderr)
# assert cln_v or not pgd_v
| import os
os.environ["CUDA_VISIBLE_DEVICES"] = '1'
import sys
import datasets
import model
APPROACH_LIST = ['PGD', 'IBP', 'FastLin', 'MILP', 'PercySDP', 'ZicoDual', 'CROWN', 'CROWN-IBP', 'LPAll' 'Diffai', 'RecurJac', 'FastLip']
dataset = 'mnist'
# source = 'test'
# selector = 'small.3'
source = 'fastlin'
selector = '2.20.reg'
# source = 'cnn_cert'
# selector = '3layer_fc_20'
skip = 500
norm = '2'
radii = 0.1
def pr(rad):
if dataset != 'mnist':
return f'{rad*255:.3}/255'
else:
return f'{rad:.3}'
if __name__ == '__main__':
ds = datasets.get_dataset(dataset, 'test')
print(dataset)
m = model.load_model(source, dataset, selector)
print(m)
from adaptor.basic_adaptor import PGDAdaptor, CWAdaptor
from adaptor.basic_adaptor import CleanAdaptor, FastLinIBPAdaptor, MILPAdaptor, PercySDPAdaptor, FazlybSDPAdaptor
from adaptor.lpdual_adaptor import ZicoDualAdaptor
from adaptor.crown_adaptor import FullCrownAdaptor, CrownIBPAdaptor
from adaptor.crown_adaptor import IBPAdaptor
from adaptor.recurjac_adaptor import FastLipAdaptor, RecurJacAdaptor, SpectralAdaptor
from adaptor.recurjac_adaptor import FastLinAdaptor
from adaptor.cnncert_adaptor import CNNCertAdaptor, FastLinSparseAdaptor, LPAllAdaptor
from adaptor.eran_adaptor import AI2Adaptor, DeepPolyAdaptor, RefineZonoAdaptor, KReluAdaptor
cln = CleanAdaptor(dataset, m)
pgd = PGDAdaptor(dataset, m)
cw = CWAdaptor(dataset, m)
# ibp = IBPAdaptor(dataset, m)
# fastlinibp = FastLinIBPAdaptor(dataset, m)
# milp = MILPAdaptor(dataset, m)
sdp = PercySDPAdaptor(dataset, m)
fazsdp = FazlybSDPAdaptor(dataset, m)
lpdual = ZicoDualAdaptor(dataset, m)
# fullcrown = FullCrownAdaptor(dataset, m)
# crownibp = CrownIBPAdaptor(dataset, m)
# fastlip = FastLipAdaptor(dataset, m)
# recurjac = RecurJacAdaptor(dataset, m)
# spectral = SpectralAdaptor(dataset, m)
# fastlin = FastLinAdaptor(dataset, m)
# cnncert = CNNCertAdaptor(dataset, m)
# fastlinsparse = FastLinSparseAdaptor(dataset, m)
# lpall = LPAllAdaptor(dataset, m)
ai2 = AI2Adaptor(dataset, m)
# deeppoly= DeepPolyAdaptor(dataset, m)
# refinezono = RefineZonoAdaptor(dataset, m)
# krelu = KReluAdaptorAdaptor(dataset, m)
for i in range(0, len(ds), skip):
X, y = ds[i]
cln_v = cln.verify(X, y, norm, 0.0)
# pgd_v = pgd.verify(X, y, norm, radii)
# pgd_radius = pgd.calc_radius(X, y, norm)
cw_v = cw.verify(X, y, norm, radii)
cw_radius = cw.calc_radius(X, y, norm)
# ibp_v = ibp.verify(X, y, norm, radii)
# ibp_radius = ibp.calc_radius(X, y, norm)
# fastlinibp_v = fastlinibp.verify(X, y, norm, radii)
# fastlinibp_radius = fastlinibp.calc_radius(X, y, norm)
# milp_v = milp.verify(X, y, norm, radii)
# milp_radius = milp.calc_radius(X, y, norm, eps=1e-2)
# sdp_v = sdp.verify(X, y, norm, radii)
# sdp_radius = sdp.calc_radius(X, y, norm)
# faz_v = fazsdp.verify(X, y, norm, radii)
# faz_radius = fazsdp.calc_radius(X, y, norm)
lpdual_v = lpdual.verify(X, y, norm, radii)
lpdual_radius = lpdual.calc_radius(X, y, norm)
# fullcrown_v = fullcrown.verify(X, y, norm, radii)
# fullcrown_radius = fullcrown.calc_radius(X, y, norm)
# crownibp_v = crownibp.verify(X, y, norm, radii)
# crownibp_radius = crownibp.calc_radius(X, y, norm)
# fastlip_v = fastlip.verify(X, y, norm, radii)
# fastlip_radius = fastlip.calc_radius(X, y, norm)
# recurjac_v = recurjac.verify(X, y, norm, radii)
# recurjac_radius = recurjac.calc_radius(X, y, norm)
# spectral_v = spectral.verify(X, y, norm, radii)
# spectral_radius = spectral.calc_radius(X, y, norm)
# fastlin_v = fastlin.verify(X, y, norm, radii)
# fastlin_radius = fastlin.calc_radius(X, y, norm)
# cnncert_v = cnncert.verify(X, y, norm, radii)
# cnncert_radius = cnncert.calc_radius(X, y, norm)
# fstlinsparse_v = fastlinsparse.verify(X, y, norm, radii)
# fstlinsparse_radius = fastlinsparse.calc_radius(X, y, norm)
# lpall_v = lpall.verify(X, y, norm, radii)
# lpall_radius = lpall.calc_radius(X, y, norm)
# ai2_v = ai2.verify(X, y, norm, radii)
# ai2_radius = ai2.calc_radius(X, y, norm)
# deeppoly_v = deeppoly.verify(X, y, norm, radii)
# deeppoly_radius = deeppoly.calc_radius(X, y, norm)
# refinezono_v = refinezono.verify(X, y, norm, radii)
# refinezono_radius = refinezono.calc_radius(X, y, norm)
# krelu_v = krelu.verify(X, y, norm, radii)
# krelu_radius = krelu.calc_radius(X, y, norm)
print(i, 'clean', cln_v,
# 'pgd', pgd_v,
# 'pgd_r', pr(pgd_radius),
'cw', cw_v,
'cw_r', pr(cw_radius),
# 'ibp', ibp_v,
# 'ibp_r', pr(ibp_radius),
# 'fastlinibp', fastlinibp_v,
# 'fastlinibp_r', pr(fastlinibp_radius),
# 'milp', milp_v,
# 'milp_r', pr(milp_radius),
# 'sdp', sdp_v,
# 'sdp_r', pr(sdp_radius),
# 'faz', faz_v,
# 'faz_r', pr(faz_radius),
'lpdual', lpdual_v,
'lpdual_r', pr(lpdual_radius),
# 'crown', fullcrown_v,
# 'crown_r', pr(fullcrown_radius),
# 'crownibp', crownibp_v,
# 'crownibp_r', pr(crownibp_radius),
# 'fastlip', fastlip_v,
# 'fastlip_r', pr(fastlip_radius),
# 'recurjac', recurjac_v,
# 'recurjac_r', pr(recurjac_radius),
# 'spectral', spectral_v,
# 'spectral_r', pr(spectral_radius),
# 'fastlin', fastlin_v,
# 'fastlin_r', pr(fastlin_radius),
# 'cnncert', cnncert_v,
# 'cnncert_r', pr(cnncert_radius),
# 'fstlinsparse', fstlinsparse_v,
# 'fstlinsparse_r', pr(fstlinsparse_radius),
# 'lpall', lpall_v,
# 'lpall_r', pr(lpall_radius),
# 'ai2', ai2_v,
# 'ai2_r', pr(ai2_radius),
# 'deeppoly', deeppoly_v,
# 'deeppoly_r', pr(deeppoly_radius),
# 'refinezono', refinezono_v,
# 'refinezono_r', pr(refinezono_radius),
# 'krelu', krelu_v,
# 'krelu_r', pr(krelu_radius),
file=sys.stderr)
# assert cln_v or not pgd_v | en | 0.190975 | # source = 'test' # selector = 'small.3' # source = 'cnn_cert' # selector = '3layer_fc_20' # ibp = IBPAdaptor(dataset, m) # fastlinibp = FastLinIBPAdaptor(dataset, m) # milp = MILPAdaptor(dataset, m) # fullcrown = FullCrownAdaptor(dataset, m) # crownibp = CrownIBPAdaptor(dataset, m) # fastlip = FastLipAdaptor(dataset, m) # recurjac = RecurJacAdaptor(dataset, m) # spectral = SpectralAdaptor(dataset, m) # fastlin = FastLinAdaptor(dataset, m) # cnncert = CNNCertAdaptor(dataset, m) # fastlinsparse = FastLinSparseAdaptor(dataset, m) # lpall = LPAllAdaptor(dataset, m) # deeppoly= DeepPolyAdaptor(dataset, m) # refinezono = RefineZonoAdaptor(dataset, m) # krelu = KReluAdaptorAdaptor(dataset, m) # pgd_v = pgd.verify(X, y, norm, radii) # pgd_radius = pgd.calc_radius(X, y, norm) # ibp_v = ibp.verify(X, y, norm, radii) # ibp_radius = ibp.calc_radius(X, y, norm) # fastlinibp_v = fastlinibp.verify(X, y, norm, radii) # fastlinibp_radius = fastlinibp.calc_radius(X, y, norm) # milp_v = milp.verify(X, y, norm, radii) # milp_radius = milp.calc_radius(X, y, norm, eps=1e-2) # sdp_v = sdp.verify(X, y, norm, radii) # sdp_radius = sdp.calc_radius(X, y, norm) # faz_v = fazsdp.verify(X, y, norm, radii) # faz_radius = fazsdp.calc_radius(X, y, norm) # fullcrown_v = fullcrown.verify(X, y, norm, radii) # fullcrown_radius = fullcrown.calc_radius(X, y, norm) # crownibp_v = crownibp.verify(X, y, norm, radii) # crownibp_radius = crownibp.calc_radius(X, y, norm) # fastlip_v = fastlip.verify(X, y, norm, radii) # fastlip_radius = fastlip.calc_radius(X, y, norm) # recurjac_v = recurjac.verify(X, y, norm, radii) # recurjac_radius = recurjac.calc_radius(X, y, norm) # spectral_v = spectral.verify(X, y, norm, radii) # spectral_radius = spectral.calc_radius(X, y, norm) # fastlin_v = fastlin.verify(X, y, norm, radii) # fastlin_radius = fastlin.calc_radius(X, y, norm) # cnncert_v = cnncert.verify(X, y, norm, radii) # cnncert_radius = cnncert.calc_radius(X, y, norm) # fstlinsparse_v = fastlinsparse.verify(X, y, norm, radii) # fstlinsparse_radius = fastlinsparse.calc_radius(X, y, norm) # lpall_v = lpall.verify(X, y, norm, radii) # lpall_radius = lpall.calc_radius(X, y, norm) # ai2_v = ai2.verify(X, y, norm, radii) # ai2_radius = ai2.calc_radius(X, y, norm) # deeppoly_v = deeppoly.verify(X, y, norm, radii) # deeppoly_radius = deeppoly.calc_radius(X, y, norm) # refinezono_v = refinezono.verify(X, y, norm, radii) # refinezono_radius = refinezono.calc_radius(X, y, norm) # krelu_v = krelu.verify(X, y, norm, radii) # krelu_radius = krelu.calc_radius(X, y, norm) # 'pgd', pgd_v, # 'pgd_r', pr(pgd_radius), # 'ibp', ibp_v, # 'ibp_r', pr(ibp_radius), # 'fastlinibp', fastlinibp_v, # 'fastlinibp_r', pr(fastlinibp_radius), # 'milp', milp_v, # 'milp_r', pr(milp_radius), # 'sdp', sdp_v, # 'sdp_r', pr(sdp_radius), # 'faz', faz_v, # 'faz_r', pr(faz_radius), # 'crown', fullcrown_v, # 'crown_r', pr(fullcrown_radius), # 'crownibp', crownibp_v, # 'crownibp_r', pr(crownibp_radius), # 'fastlip', fastlip_v, # 'fastlip_r', pr(fastlip_radius), # 'recurjac', recurjac_v, # 'recurjac_r', pr(recurjac_radius), # 'spectral', spectral_v, # 'spectral_r', pr(spectral_radius), # 'fastlin', fastlin_v, # 'fastlin_r', pr(fastlin_radius), # 'cnncert', cnncert_v, # 'cnncert_r', pr(cnncert_radius), # 'fstlinsparse', fstlinsparse_v, # 'fstlinsparse_r', pr(fstlinsparse_radius), # 'lpall', lpall_v, # 'lpall_r', pr(lpall_radius), # 'ai2', ai2_v, # 'ai2_r', pr(ai2_radius), # 'deeppoly', deeppoly_v, # 'deeppoly_r', pr(deeppoly_radius), # 'refinezono', refinezono_v, # 'refinezono_r', pr(refinezono_radius), # 'krelu', krelu_v, # 'krelu_r', pr(krelu_radius), # assert cln_v or not pgd_v | 1.961073 | 2 |
solis_service/persistence/__init__.py | anszom/solis-service | 11 | 6630743 | <filename>solis_service/persistence/__init__.py<gh_stars>10-100
from contextlib import contextmanager
from .influxdb_persistence_client import InfluxDbPersistenceClient
@contextmanager
def persistence_client(config):
client = None
try:
persistence_type = config["service"]["persistence"]
if persistence_type == "influxdb":
client = InfluxDbPersistenceClient(**config["influxdb"])
yield client
else:
raise ValueError(f"persistence type: {persistence_type}")
finally:
if client is not None:
client.close()
| <filename>solis_service/persistence/__init__.py<gh_stars>10-100
from contextlib import contextmanager
from .influxdb_persistence_client import InfluxDbPersistenceClient
@contextmanager
def persistence_client(config):
client = None
try:
persistence_type = config["service"]["persistence"]
if persistence_type == "influxdb":
client = InfluxDbPersistenceClient(**config["influxdb"])
yield client
else:
raise ValueError(f"persistence type: {persistence_type}")
finally:
if client is not None:
client.close()
| none | 1 | 2.127302 | 2 |
|
src/the_tale/the_tale/linguistics/conf.py | devapromix/the-tale | 1 | 6630744 |
import smart_imports
smart_imports.all()
settings = dext_app_settings.app_settings('LINGUISTICS_SETTINGS',
WORDS_ON_PAGE=25,
TEMPLATES_ON_PAGE=25,
MODERATOR_GROUP_NAME='linguistics moderators group',
EDITOR_GROUP_NAME='linguistics editors group',
FORUM_CATEGORY_ID=61,
REMOVED_TEMPLATE_TIMEOUT=30, # days
MAX_RENDER_TEXT_RETRIES=3,
EXAMPLES_URL=django_reverse_lazy('forum:threads:show', args=[3917]),
RULES_URL=django_reverse_lazy('forum:threads:show', args=[3868]),
LINGUISTICS_MANAGER_UPDATE_DELAY=datetime.timedelta(minutes=1))
|
import smart_imports
smart_imports.all()
settings = dext_app_settings.app_settings('LINGUISTICS_SETTINGS',
WORDS_ON_PAGE=25,
TEMPLATES_ON_PAGE=25,
MODERATOR_GROUP_NAME='linguistics moderators group',
EDITOR_GROUP_NAME='linguistics editors group',
FORUM_CATEGORY_ID=61,
REMOVED_TEMPLATE_TIMEOUT=30, # days
MAX_RENDER_TEXT_RETRIES=3,
EXAMPLES_URL=django_reverse_lazy('forum:threads:show', args=[3917]),
RULES_URL=django_reverse_lazy('forum:threads:show', args=[3868]),
LINGUISTICS_MANAGER_UPDATE_DELAY=datetime.timedelta(minutes=1))
| none | 1 | 1.745282 | 2 |
|
utils/code_generator/time_measurement/phase_1/generate_job_files.py | zehor-l/tiramisu | 23 | 6630745 | <filename>utils/code_generator/time_measurement/phase_1/generate_job_files.py
"""
Generate the job files needed by the sbatch command.
Here's an example of a job file :
#!/bin/bash
#SBATCH --job-name=comp2
#SBATCH --output=log/log_comp_2_6842_10263
#SBATCH -N 1
#SBATCH --exclusive
#SBATCH -p lanka-v3
srun python3 compile_tiramisu_code.py 6842 10263 2
"""
import pickle
from pathlib import Path
# Number of nodes in the cluster
# Each node will do the job on a portion of the programs
nb_nodes = 19
# Path to the list of programs
progs_list_path = Path("progs_list.pickle")
# Path where to store the job files
dst_path = Path("job_files")
# Path to the script that will be distributed
script_path = Path("compile_tiramisu_code.py")
# Path to where to store the logs of the jobs
log_path = Path("log/")
# Content of the job files
job_file_content = "\
#!/bin/bash\n\
#SBATCH --job-name=comp{2}\n\
#SBATCH --output=%s/log_comp_{2}_{0}_{1}\n\
#SBATCH -N 1\n\
#SBATCH --exclusive\n\
#SBATCH -p lanka-v3\n\
srun python3 %s {0} {1} {2}" % (str(log_path), str(script_path)) # This replaces the %s
with open(progs_list_path, "rb") as f:
progs_list = pickle.load(f)
nb_progs = len(progs_list)
progs_per_node = nb_progs // nb_nodes
for i in range(nb_nodes):
# Each node will process the programs in the range progs_list[start, end)
start = i * progs_per_node
if i < nb_nodes - 1:
end = (i + 1) * progs_per_node
else:
end = nb_progs
with open(dst_path / ("compile_job_%s_%s.batch" % (start, end)), "w") as f:
f.write(job_file_content.format(start, end, i))
| <filename>utils/code_generator/time_measurement/phase_1/generate_job_files.py
"""
Generate the job files needed by the sbatch command.
Here's an example of a job file :
#!/bin/bash
#SBATCH --job-name=comp2
#SBATCH --output=log/log_comp_2_6842_10263
#SBATCH -N 1
#SBATCH --exclusive
#SBATCH -p lanka-v3
srun python3 compile_tiramisu_code.py 6842 10263 2
"""
import pickle
from pathlib import Path
# Number of nodes in the cluster
# Each node will do the job on a portion of the programs
nb_nodes = 19
# Path to the list of programs
progs_list_path = Path("progs_list.pickle")
# Path where to store the job files
dst_path = Path("job_files")
# Path to the script that will be distributed
script_path = Path("compile_tiramisu_code.py")
# Path to where to store the logs of the jobs
log_path = Path("log/")
# Content of the job files
job_file_content = "\
#!/bin/bash\n\
#SBATCH --job-name=comp{2}\n\
#SBATCH --output=%s/log_comp_{2}_{0}_{1}\n\
#SBATCH -N 1\n\
#SBATCH --exclusive\n\
#SBATCH -p lanka-v3\n\
srun python3 %s {0} {1} {2}" % (str(log_path), str(script_path)) # This replaces the %s
with open(progs_list_path, "rb") as f:
progs_list = pickle.load(f)
nb_progs = len(progs_list)
progs_per_node = nb_progs // nb_nodes
for i in range(nb_nodes):
# Each node will process the programs in the range progs_list[start, end)
start = i * progs_per_node
if i < nb_nodes - 1:
end = (i + 1) * progs_per_node
else:
end = nb_progs
with open(dst_path / ("compile_job_%s_%s.batch" % (start, end)), "w") as f:
f.write(job_file_content.format(start, end, i))
| en | 0.59706 | Generate the job files needed by the sbatch command. Here's an example of a job file : #!/bin/bash #SBATCH --job-name=comp2 #SBATCH --output=log/log_comp_2_6842_10263 #SBATCH -N 1 #SBATCH --exclusive #SBATCH -p lanka-v3 srun python3 compile_tiramisu_code.py 6842 10263 2 # Number of nodes in the cluster # Each node will do the job on a portion of the programs # Path to the list of programs # Path where to store the job files # Path to the script that will be distributed # Path to where to store the logs of the jobs # Content of the job files #!/bin/bash\n\ #SBATCH --job-name=comp{2}\n\ #SBATCH --output=%s/log_comp_{2}_{0}_{1}\n\ #SBATCH -N 1\n\ #SBATCH --exclusive\n\ #SBATCH -p lanka-v3\n\ # This replaces the %s # Each node will process the programs in the range progs_list[start, end) | 2.595151 | 3 |
script/bootstrap.py | renhongl/electron | 5 | 6630746 | <reponame>renhongl/electron
#!/usr/bin/env python
import argparse
import os
import subprocess
import sys
from lib.config import LIBCHROMIUMCONTENT_COMMIT, BASE_URL, PLATFORM, \
enable_verbose_mode, is_verbose_mode, get_target_arch
from lib.util import execute_stdout, get_atom_shell_version, scoped_cwd
SOURCE_ROOT = os.path.abspath(os.path.dirname(os.path.dirname(__file__)))
VENDOR_DIR = os.path.join(SOURCE_ROOT, 'vendor')
PYTHON_26_URL = 'https://chromium.googlesource.com/chromium/deps/python_26'
if os.environ.has_key('CI'):
NPM = os.path.join(SOURCE_ROOT, 'node_modules', '.bin', 'npm')
else:
NPM = 'npm'
if sys.platform in ['win32', 'cygwin']:
NPM += '.cmd'
def main():
os.chdir(SOURCE_ROOT)
args = parse_args()
if not args.yes and PLATFORM != 'win32':
check_root()
if args.verbose:
enable_verbose_mode()
if sys.platform == 'cygwin':
update_win32_python()
if PLATFORM != 'win32':
update_clang()
update_submodules()
setup_python_libs()
update_node_modules('.')
bootstrap_brightray(args.dev, args.url, args.target_arch,
args.libcc_source_path, args.libcc_shared_library_path,
args.libcc_static_library_path)
if PLATFORM == 'linux':
download_sysroot(args.target_arch)
create_chrome_version_h()
touch_config_gypi()
run_update()
update_electron_modules('spec', args.target_arch)
def parse_args():
parser = argparse.ArgumentParser(description='Bootstrap this project')
parser.add_argument('-u', '--url',
help='The base URL from which to download '
'libchromiumcontent (i.e., the URL you passed to '
'libchromiumcontent\'s script/upload script',
default=BASE_URL,
required=False)
parser.add_argument('-v', '--verbose',
action='store_true',
help='Prints the output of the subprocesses')
parser.add_argument('-d', '--dev', action='store_true',
help='Do not download static_library build')
parser.add_argument('-y', '--yes', '--assume-yes',
action='store_true',
help='Run non-interactively by assuming "yes" to all ' \
'prompts.')
parser.add_argument('--target_arch', default=get_target_arch(),
help='Manually specify the arch to build for')
parser.add_argument('--libcc_source_path', required=False,
help='The source path of libchromiumcontent. ' \
'NOTE: All options of libchromiumcontent are ' \
'required OR let electron choose it')
parser.add_argument('--libcc_shared_library_path', required=False,
help='The shared library path of libchromiumcontent.')
parser.add_argument('--libcc_static_library_path', required=False,
help='The static library path of libchromiumcontent.')
return parser.parse_args()
def check_root():
if os.geteuid() == 0:
print "We suggest not running this as root, unless you're really sure."
choice = raw_input("Do you want to continue? [y/N]: ")
if choice not in ('y', 'Y'):
sys.exit(0)
def update_submodules():
execute_stdout(['git', 'submodule', 'sync'])
execute_stdout(['git', 'submodule', 'update', '--init', '--recursive'])
def setup_python_libs():
for lib in ('requests', 'boto'):
with scoped_cwd(os.path.join(VENDOR_DIR, lib)):
execute_stdout([sys.executable, 'setup.py', 'build'])
def bootstrap_brightray(is_dev, url, target_arch, libcc_source_path,
libcc_shared_library_path,
libcc_static_library_path):
bootstrap = os.path.join(VENDOR_DIR, 'brightray', 'script', 'bootstrap')
args = [
'--commit', LIBCHROMIUMCONTENT_COMMIT,
'--target_arch', target_arch,
url
]
if is_dev:
args = ['--dev'] + args
if (libcc_source_path != None and
libcc_shared_library_path != None and
libcc_static_library_path != None):
args += ['--libcc_source_path', libcc_source_path,
'--libcc_shared_library_path', libcc_shared_library_path,
'--libcc_static_library_path', libcc_static_library_path]
execute_stdout([sys.executable, bootstrap] + args)
def update_node_modules(dirname, env=None):
if env is None:
env = os.environ
if PLATFORM == 'linux':
# Use prebuilt clang for building native modules.
llvm_dir = os.path.join(SOURCE_ROOT, 'vendor', 'llvm-build',
'Release+Asserts', 'bin')
env['CC'] = os.path.join(llvm_dir, 'clang')
env['CXX'] = os.path.join(llvm_dir, 'clang++')
env['npm_config_clang'] = '1'
with scoped_cwd(dirname):
args = [NPM, 'install']
if is_verbose_mode():
args += ['--verbose']
# Ignore npm install errors when running in CI.
if os.environ.has_key('CI'):
try:
execute_stdout(args, env)
except subprocess.CalledProcessError:
pass
else:
execute_stdout(args, env)
def update_electron_modules(dirname, target_arch):
env = os.environ.copy()
env['npm_config_arch'] = target_arch
env['npm_config_target'] = get_atom_shell_version()
env['npm_config_disturl'] = 'https://atom.io/download/atom-shell'
update_node_modules(dirname, env)
def update_win32_python():
with scoped_cwd(VENDOR_DIR):
if not os.path.exists('python_26'):
execute_stdout(['git', 'clone', PYTHON_26_URL])
def update_clang():
execute_stdout([os.path.join(SOURCE_ROOT, 'script', 'update-clang.sh')])
def download_sysroot(target_arch):
if target_arch == 'ia32':
target_arch = 'i386'
if target_arch == 'x64':
target_arch = 'amd64'
execute_stdout([sys.executable,
os.path.join(SOURCE_ROOT, 'script', 'install-sysroot.py'),
'--arch', target_arch])
def create_chrome_version_h():
version_file = os.path.join(SOURCE_ROOT, 'vendor', 'brightray', 'vendor',
'libchromiumcontent', 'VERSION')
target_file = os.path.join(SOURCE_ROOT, 'atom', 'common', 'chrome_version.h')
template_file = os.path.join(SOURCE_ROOT, 'script', 'chrome_version.h.in')
with open(version_file, 'r') as f:
version = f.read()
with open(template_file, 'r') as f:
template = f.read()
content = template.replace('{PLACEHOLDER}', version.strip())
# We update the file only if the content has changed (ignoring line ending
# differences).
should_write = True
if os.path.isfile(target_file):
with open(target_file, 'r') as f:
should_write = f.read().replace('r', '') != content.replace('r', '')
if should_write:
with open(target_file, 'w') as f:
f.write(content)
def touch_config_gypi():
config_gypi = os.path.join(SOURCE_ROOT, 'vendor', 'node', 'config.gypi')
with open(config_gypi, 'w+') as f:
content = "\n{'variables':{}}"
if f.read() != content:
f.write(content)
def run_update():
update = os.path.join(SOURCE_ROOT, 'script', 'update.py')
execute_stdout([sys.executable, update])
if __name__ == '__main__':
sys.exit(main())
| #!/usr/bin/env python
import argparse
import os
import subprocess
import sys
from lib.config import LIBCHROMIUMCONTENT_COMMIT, BASE_URL, PLATFORM, \
enable_verbose_mode, is_verbose_mode, get_target_arch
from lib.util import execute_stdout, get_atom_shell_version, scoped_cwd
SOURCE_ROOT = os.path.abspath(os.path.dirname(os.path.dirname(__file__)))
VENDOR_DIR = os.path.join(SOURCE_ROOT, 'vendor')
PYTHON_26_URL = 'https://chromium.googlesource.com/chromium/deps/python_26'
if os.environ.has_key('CI'):
NPM = os.path.join(SOURCE_ROOT, 'node_modules', '.bin', 'npm')
else:
NPM = 'npm'
if sys.platform in ['win32', 'cygwin']:
NPM += '.cmd'
def main():
os.chdir(SOURCE_ROOT)
args = parse_args()
if not args.yes and PLATFORM != 'win32':
check_root()
if args.verbose:
enable_verbose_mode()
if sys.platform == 'cygwin':
update_win32_python()
if PLATFORM != 'win32':
update_clang()
update_submodules()
setup_python_libs()
update_node_modules('.')
bootstrap_brightray(args.dev, args.url, args.target_arch,
args.libcc_source_path, args.libcc_shared_library_path,
args.libcc_static_library_path)
if PLATFORM == 'linux':
download_sysroot(args.target_arch)
create_chrome_version_h()
touch_config_gypi()
run_update()
update_electron_modules('spec', args.target_arch)
def parse_args():
parser = argparse.ArgumentParser(description='Bootstrap this project')
parser.add_argument('-u', '--url',
help='The base URL from which to download '
'libchromiumcontent (i.e., the URL you passed to '
'libchromiumcontent\'s script/upload script',
default=BASE_URL,
required=False)
parser.add_argument('-v', '--verbose',
action='store_true',
help='Prints the output of the subprocesses')
parser.add_argument('-d', '--dev', action='store_true',
help='Do not download static_library build')
parser.add_argument('-y', '--yes', '--assume-yes',
action='store_true',
help='Run non-interactively by assuming "yes" to all ' \
'prompts.')
parser.add_argument('--target_arch', default=get_target_arch(),
help='Manually specify the arch to build for')
parser.add_argument('--libcc_source_path', required=False,
help='The source path of libchromiumcontent. ' \
'NOTE: All options of libchromiumcontent are ' \
'required OR let electron choose it')
parser.add_argument('--libcc_shared_library_path', required=False,
help='The shared library path of libchromiumcontent.')
parser.add_argument('--libcc_static_library_path', required=False,
help='The static library path of libchromiumcontent.')
return parser.parse_args()
def check_root():
if os.geteuid() == 0:
print "We suggest not running this as root, unless you're really sure."
choice = raw_input("Do you want to continue? [y/N]: ")
if choice not in ('y', 'Y'):
sys.exit(0)
def update_submodules():
execute_stdout(['git', 'submodule', 'sync'])
execute_stdout(['git', 'submodule', 'update', '--init', '--recursive'])
def setup_python_libs():
for lib in ('requests', 'boto'):
with scoped_cwd(os.path.join(VENDOR_DIR, lib)):
execute_stdout([sys.executable, 'setup.py', 'build'])
def bootstrap_brightray(is_dev, url, target_arch, libcc_source_path,
libcc_shared_library_path,
libcc_static_library_path):
bootstrap = os.path.join(VENDOR_DIR, 'brightray', 'script', 'bootstrap')
args = [
'--commit', LIBCHROMIUMCONTENT_COMMIT,
'--target_arch', target_arch,
url
]
if is_dev:
args = ['--dev'] + args
if (libcc_source_path != None and
libcc_shared_library_path != None and
libcc_static_library_path != None):
args += ['--libcc_source_path', libcc_source_path,
'--libcc_shared_library_path', libcc_shared_library_path,
'--libcc_static_library_path', libcc_static_library_path]
execute_stdout([sys.executable, bootstrap] + args)
def update_node_modules(dirname, env=None):
if env is None:
env = os.environ
if PLATFORM == 'linux':
# Use prebuilt clang for building native modules.
llvm_dir = os.path.join(SOURCE_ROOT, 'vendor', 'llvm-build',
'Release+Asserts', 'bin')
env['CC'] = os.path.join(llvm_dir, 'clang')
env['CXX'] = os.path.join(llvm_dir, 'clang++')
env['npm_config_clang'] = '1'
with scoped_cwd(dirname):
args = [NPM, 'install']
if is_verbose_mode():
args += ['--verbose']
# Ignore npm install errors when running in CI.
if os.environ.has_key('CI'):
try:
execute_stdout(args, env)
except subprocess.CalledProcessError:
pass
else:
execute_stdout(args, env)
def update_electron_modules(dirname, target_arch):
env = os.environ.copy()
env['npm_config_arch'] = target_arch
env['npm_config_target'] = get_atom_shell_version()
env['npm_config_disturl'] = 'https://atom.io/download/atom-shell'
update_node_modules(dirname, env)
def update_win32_python():
with scoped_cwd(VENDOR_DIR):
if not os.path.exists('python_26'):
execute_stdout(['git', 'clone', PYTHON_26_URL])
def update_clang():
execute_stdout([os.path.join(SOURCE_ROOT, 'script', 'update-clang.sh')])
def download_sysroot(target_arch):
if target_arch == 'ia32':
target_arch = 'i386'
if target_arch == 'x64':
target_arch = 'amd64'
execute_stdout([sys.executable,
os.path.join(SOURCE_ROOT, 'script', 'install-sysroot.py'),
'--arch', target_arch])
def create_chrome_version_h():
version_file = os.path.join(SOURCE_ROOT, 'vendor', 'brightray', 'vendor',
'libchromiumcontent', 'VERSION')
target_file = os.path.join(SOURCE_ROOT, 'atom', 'common', 'chrome_version.h')
template_file = os.path.join(SOURCE_ROOT, 'script', 'chrome_version.h.in')
with open(version_file, 'r') as f:
version = f.read()
with open(template_file, 'r') as f:
template = f.read()
content = template.replace('{PLACEHOLDER}', version.strip())
# We update the file only if the content has changed (ignoring line ending
# differences).
should_write = True
if os.path.isfile(target_file):
with open(target_file, 'r') as f:
should_write = f.read().replace('r', '') != content.replace('r', '')
if should_write:
with open(target_file, 'w') as f:
f.write(content)
def touch_config_gypi():
config_gypi = os.path.join(SOURCE_ROOT, 'vendor', 'node', 'config.gypi')
with open(config_gypi, 'w+') as f:
content = "\n{'variables':{}}"
if f.read() != content:
f.write(content)
def run_update():
update = os.path.join(SOURCE_ROOT, 'script', 'update.py')
execute_stdout([sys.executable, update])
if __name__ == '__main__':
sys.exit(main()) | en | 0.728248 | #!/usr/bin/env python # Use prebuilt clang for building native modules. # Ignore npm install errors when running in CI. # We update the file only if the content has changed (ignoring line ending # differences). | 1.984026 | 2 |
tests/__init__.py | ericmjl/seqlike | 186 | 6630747 | import os
import pathlib
test_path = pathlib.Path(os.path.abspath(os.path.dirname(__file__)))
| import os
import pathlib
test_path = pathlib.Path(os.path.abspath(os.path.dirname(__file__)))
| none | 1 | 1.809192 | 2 |
|
tests/ut/python/adv_robustness/attacks/test_cw.py | hboshnak/mindarmour | 0 | 6630748 | # Copyright 2019 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
CW-Attack test.
"""
import gc
import numpy as np
import pytest
import mindspore.ops.operations as M
from mindspore.nn import Cell
from mindspore import context
from mindarmour.adv_robustness.attacks import CarliniWagnerL2Attack
# for user
class Net(Cell):
"""
Construct the network of target model.
Examples:
>>> net = Net()
"""
def __init__(self):
"""
Introduce the layers used for network construction.
"""
super(Net, self).__init__()
self._softmax = M.Softmax()
def construct(self, inputs):
"""
Construct network.
Args:
inputs (Tensor): Input data.
"""
out = self._softmax(inputs)
return out
@pytest.mark.level0
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.env_card
@pytest.mark.component_mindarmour
def test_cw_attack_ascend():
"""
Feature: CW-Attack test for ascend
Description: Given multiple images, we want to make sure the adversarial examples
generated are different from the images
Expectation: input_np != ms_adv_x
"""
context.set_context(mode=context.GRAPH_MODE, device_target="Ascend")
net = Net()
input_np = np.array([[0.1, 0.2, 0.7, 0.5, 0.4]]).astype(np.float32)
label_np = np.array([3]).astype(np.int64)
num_classes = input_np.shape[1]
attack = CarliniWagnerL2Attack(net, num_classes, targeted=False)
adv_data = attack.generate(input_np, label_np)
assert np.any(input_np != adv_data)
del input_np, label_np, adv_data
gc.collect()
@pytest.mark.level0
@pytest.mark.platform_x86_cpu
@pytest.mark.env_card
@pytest.mark.component_mindarmour
def test_cw_attack_cpu():
"""
Feature: CW-Attack test for cpu
Description: Given multiple images, we want to make sure the adversarial examples
generated are different from the images
Expectation: input_np != ms_adv_x
"""
context.set_context(mode=context.GRAPH_MODE, device_target="CPU")
net = Net()
input_np = np.array([[0.1, 0.2, 0.7, 0.5, 0.4]]).astype(np.float32)
label_np = np.array([3]).astype(np.int64)
num_classes = input_np.shape[1]
attack = CarliniWagnerL2Attack(net, num_classes, targeted=False)
adv_data = attack.generate(input_np, label_np)
assert np.any(input_np != adv_data)
del input_np, label_np, adv_data
gc.collect()
@pytest.mark.level0
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.env_card
@pytest.mark.component_mindarmour
def test_cw_attack_targeted_ascend():
"""
Feature: CW-Attack-Targeted test for ascend
Description: Given multiple images, we want to make sure the adversarial examples
generated are different from the images
Expectation: input_np != ms_adv_x
"""
context.set_context(mode=context.GRAPH_MODE, device_target="Ascend")
net = Net()
input_np = np.array([[0.1, 0.2, 0.7, 0.5, 0.4]]).astype(np.float32)
target_np = np.array([1]).astype(np.int64)
num_classes = input_np.shape[1]
attack = CarliniWagnerL2Attack(net, num_classes, targeted=True)
adv_data = attack.generate(input_np, target_np)
assert np.any(input_np != adv_data)
del input_np, target_np, adv_data
gc.collect()
@pytest.mark.level0
@pytest.mark.platform_x86_cpu
@pytest.mark.env_card
@pytest.mark.component_mindarmour
def test_cw_attack_targeted_cpu():
"""
Feature: CW-Attack-Targeted test for cpu
Description: Given multiple images, we want to make sure the adversarial examples
generated are different from the images
Expectation: input_np != ms_adv_x
"""
context.set_context(mode=context.GRAPH_MODE, device_target="CPU")
net = Net()
input_np = np.array([[0.1, 0.2, 0.7, 0.5, 0.4]]).astype(np.float32)
target_np = np.array([1]).astype(np.int64)
num_classes = input_np.shape[1]
attack = CarliniWagnerL2Attack(net, num_classes, targeted=True)
adv_data = attack.generate(input_np, target_np)
assert np.any(input_np != adv_data)
del input_np, target_np, adv_data
gc.collect()
| # Copyright 2019 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
CW-Attack test.
"""
import gc
import numpy as np
import pytest
import mindspore.ops.operations as M
from mindspore.nn import Cell
from mindspore import context
from mindarmour.adv_robustness.attacks import CarliniWagnerL2Attack
# for user
class Net(Cell):
"""
Construct the network of target model.
Examples:
>>> net = Net()
"""
def __init__(self):
"""
Introduce the layers used for network construction.
"""
super(Net, self).__init__()
self._softmax = M.Softmax()
def construct(self, inputs):
"""
Construct network.
Args:
inputs (Tensor): Input data.
"""
out = self._softmax(inputs)
return out
@pytest.mark.level0
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.env_card
@pytest.mark.component_mindarmour
def test_cw_attack_ascend():
"""
Feature: CW-Attack test for ascend
Description: Given multiple images, we want to make sure the adversarial examples
generated are different from the images
Expectation: input_np != ms_adv_x
"""
context.set_context(mode=context.GRAPH_MODE, device_target="Ascend")
net = Net()
input_np = np.array([[0.1, 0.2, 0.7, 0.5, 0.4]]).astype(np.float32)
label_np = np.array([3]).astype(np.int64)
num_classes = input_np.shape[1]
attack = CarliniWagnerL2Attack(net, num_classes, targeted=False)
adv_data = attack.generate(input_np, label_np)
assert np.any(input_np != adv_data)
del input_np, label_np, adv_data
gc.collect()
@pytest.mark.level0
@pytest.mark.platform_x86_cpu
@pytest.mark.env_card
@pytest.mark.component_mindarmour
def test_cw_attack_cpu():
"""
Feature: CW-Attack test for cpu
Description: Given multiple images, we want to make sure the adversarial examples
generated are different from the images
Expectation: input_np != ms_adv_x
"""
context.set_context(mode=context.GRAPH_MODE, device_target="CPU")
net = Net()
input_np = np.array([[0.1, 0.2, 0.7, 0.5, 0.4]]).astype(np.float32)
label_np = np.array([3]).astype(np.int64)
num_classes = input_np.shape[1]
attack = CarliniWagnerL2Attack(net, num_classes, targeted=False)
adv_data = attack.generate(input_np, label_np)
assert np.any(input_np != adv_data)
del input_np, label_np, adv_data
gc.collect()
@pytest.mark.level0
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.env_card
@pytest.mark.component_mindarmour
def test_cw_attack_targeted_ascend():
"""
Feature: CW-Attack-Targeted test for ascend
Description: Given multiple images, we want to make sure the adversarial examples
generated are different from the images
Expectation: input_np != ms_adv_x
"""
context.set_context(mode=context.GRAPH_MODE, device_target="Ascend")
net = Net()
input_np = np.array([[0.1, 0.2, 0.7, 0.5, 0.4]]).astype(np.float32)
target_np = np.array([1]).astype(np.int64)
num_classes = input_np.shape[1]
attack = CarliniWagnerL2Attack(net, num_classes, targeted=True)
adv_data = attack.generate(input_np, target_np)
assert np.any(input_np != adv_data)
del input_np, target_np, adv_data
gc.collect()
@pytest.mark.level0
@pytest.mark.platform_x86_cpu
@pytest.mark.env_card
@pytest.mark.component_mindarmour
def test_cw_attack_targeted_cpu():
"""
Feature: CW-Attack-Targeted test for cpu
Description: Given multiple images, we want to make sure the adversarial examples
generated are different from the images
Expectation: input_np != ms_adv_x
"""
context.set_context(mode=context.GRAPH_MODE, device_target="CPU")
net = Net()
input_np = np.array([[0.1, 0.2, 0.7, 0.5, 0.4]]).astype(np.float32)
target_np = np.array([1]).astype(np.int64)
num_classes = input_np.shape[1]
attack = CarliniWagnerL2Attack(net, num_classes, targeted=True)
adv_data = attack.generate(input_np, target_np)
assert np.any(input_np != adv_data)
del input_np, target_np, adv_data
gc.collect()
| en | 0.821862 | # Copyright 2019 Huawei Technologies Co., Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. CW-Attack test. # for user Construct the network of target model. Examples: >>> net = Net() Introduce the layers used for network construction. Construct network. Args: inputs (Tensor): Input data. Feature: CW-Attack test for ascend Description: Given multiple images, we want to make sure the adversarial examples generated are different from the images Expectation: input_np != ms_adv_x Feature: CW-Attack test for cpu Description: Given multiple images, we want to make sure the adversarial examples generated are different from the images Expectation: input_np != ms_adv_x Feature: CW-Attack-Targeted test for ascend Description: Given multiple images, we want to make sure the adversarial examples generated are different from the images Expectation: input_np != ms_adv_x Feature: CW-Attack-Targeted test for cpu Description: Given multiple images, we want to make sure the adversarial examples generated are different from the images Expectation: input_np != ms_adv_x | 2.068128 | 2 |
workflows/wf_quasiparticle.py | abelcarreras/aiida_extensions | 0 | 6630749 | <filename>workflows/wf_quasiparticle.py
from aiida.orm import Code, DataFactory, WorkflowFactory
from aiida.orm.workflow import Workflow
#from aiida.workflows.wf_phonon import WorkflowPhonon
from aiida.orm import load_node, load_workflow
from aiida.orm.calculation.inline import make_inline
WorkflowPhonon = WorkflowFactory('wf_phonon')
StructureData = DataFactory('structure')
ParameterData = DataFactory('parameter')
ArrayData = DataFactory('array')
import numpy as np
@make_inline
def generate_supercell_inline(**kwargs):
import itertools
structure = kwargs.pop('structure')
supercell = kwargs.pop('supercell').dict.supercell
symbols = [site.kind_name for site in structure.sites]
positions=np.array([site.position for site in structure.sites])
position_super_cell = []
for k in range(positions.shape[0]):
for r in itertools.product(*[range(i) for i in supercell[::-1]]):
position_super_cell.append(positions[k,:] + np.dot(np.array(r[::-1]), structure.cell))
position_super_cell = np.array(position_super_cell)
symbol_super_cell = []
for j in range(positions.shape[0]):
symbol_super_cell += [symbols[j]] * np.prod(supercell)
supercell = StructureData(cell=np.dot(structure.cell, np.diag(supercell)))
for i, position in enumerate(position_super_cell):
supercell.append_atom(position=position.tolist(),
symbols=symbol_super_cell[i])
return {"supercell": supercell}
class Wf_quasiparticleWorkflow(Workflow):
def __init__(self, **kwargs):
super(Wf_quasiparticleWorkflow, self).__init__(**kwargs)
if 'use_optimized_structure_for_md' in kwargs:
self._use_optimized_structure_for_md = kwargs['use_optimized_structure_for_md']
else:
self._use_optimized_structure_for_md = True # By default optimized structure is used
if 'optimize' in kwargs:
self._optimize = kwargs['optimize']
else:
self._optimize = True # By default optimization is done
def generate_md_lammps(self, structure, parameters):
codename = parameters['code']
code = Code.get_from_string(codename)
calc = code.new_calc(max_wallclock_seconds=3600,
resources=parameters['resources'])
calc.label = "md lammps calculation"
calc.description = "A much longer description"
calc.use_code(code)
calc.use_structure(structure)
calc.use_parameters(ParameterData(dict=parameters['parameters']))
calc.use_potential(ParameterData(dict=parameters['potential']))
calc.store_all()
return calc
def generate_calculation_dynaphopy(self, structure, force_constants, parameters, trajectory):
codename = parameters['code']
code = Code.get_from_string(codename)
calc = code.new_calc(max_wallclock_seconds=3600,
resources=parameters['resources'])
calc.use_code(code)
calc.use_structure(structure)
calc.use_parameters(ParameterData(dict=parameters['parameters']))
calc.use_force_constants(force_constants)
calc.use_trajectory(trajectory)
calc.store_all()
return calc
# Calculates the reference crystal structure (optimize it if requested)
@Workflow.step
def start(self):
self.append_to_report('Starting workflow_workflow')
self.append_to_report('Phonon calculation of base structure')
wf_parameters = self.get_parameters()
wf = WorkflowPhonon(params=wf_parameters, optimize=self._optimize)
wf.store()
# wf = load_workflow(127)
self.attach_workflow(wf)
wf.start()
self.next(self.md_lammps)
# Generate the volume expanded cells
@Workflow.step
def md_lammps(self):
wf_parameters = self.get_parameters()
if self._use_optimized_structure_for_md:
structure = self.get_step('start').get_sub_workflows()[0].get_result('final_structure')
else:
structure = wf_parameters['structure']
inline_params = {'structure': structure,
'supercell': ParameterData(dict=wf_parameters['input_md'])}
supercell = generate_supercell_inline(**inline_params)[1]['supercell']
calc = self.generate_md_lammps(supercell, wf_parameters['input_md'])
# self.append_to_report('created MD calculation with PK={}'.format(calc.pk))
self.attach_calculation(calc)
self.next(self.dynaphopy)
# Collects the forces and prepares force constants
@Workflow.step
def dynaphopy(self):
wf_parameters = self.get_parameters()
harmonic_force_constants = self.get_step('start').get_sub_workflows()[0].get_result('force_constants')
structure = self.get_step('start').get_sub_workflows()[0].get_result('final_structure')
self.add_result('force_constants', harmonic_force_constants)
md_calc = self.get_step_calculations(self.md_lammps)[0]
dyna_calc = self.generate_calculation_dynaphopy(structure,
harmonic_force_constants,
wf_parameters['dynaphopy_input'],
md_calc.out.trajectory_data)
self.attach_calculation(dyna_calc)
self.next(self.collect)
# Collects the forces and prepares force constants
@Workflow.step
def collect(self):
# Get the thermal properties at 0 K from phonopy calculation
self.add_result('h_thermal_properties', self.get_step('start').get_sub_workflows()[0].get_result('thermal_properties'))
# Get the thermal properties at finite temperature from dynaphopy calculation
calc = self.get_step_calculations(self.dynaphopy)[0]
self.add_result('thermal_properties', calc.out.thermal_properties)
# Pass the final properties from phonon workflow
self.add_result('quasiparticle_data', calc.out.quasiparticle_data)
self.add_result('r_force_constants', calc.out.array_data)
optimization_data = self.get_step('start').get_sub_workflows()[0].get_result('optimized_structure_data')
final_structure = self.get_step('start').get_sub_workflows()[0].get_result('final_structure')
self.add_result('optimized_structure_data', optimization_data)
self.add_result('final_structure', final_structure)
self.next(self.exit) | <filename>workflows/wf_quasiparticle.py
from aiida.orm import Code, DataFactory, WorkflowFactory
from aiida.orm.workflow import Workflow
#from aiida.workflows.wf_phonon import WorkflowPhonon
from aiida.orm import load_node, load_workflow
from aiida.orm.calculation.inline import make_inline
WorkflowPhonon = WorkflowFactory('wf_phonon')
StructureData = DataFactory('structure')
ParameterData = DataFactory('parameter')
ArrayData = DataFactory('array')
import numpy as np
@make_inline
def generate_supercell_inline(**kwargs):
import itertools
structure = kwargs.pop('structure')
supercell = kwargs.pop('supercell').dict.supercell
symbols = [site.kind_name for site in structure.sites]
positions=np.array([site.position for site in structure.sites])
position_super_cell = []
for k in range(positions.shape[0]):
for r in itertools.product(*[range(i) for i in supercell[::-1]]):
position_super_cell.append(positions[k,:] + np.dot(np.array(r[::-1]), structure.cell))
position_super_cell = np.array(position_super_cell)
symbol_super_cell = []
for j in range(positions.shape[0]):
symbol_super_cell += [symbols[j]] * np.prod(supercell)
supercell = StructureData(cell=np.dot(structure.cell, np.diag(supercell)))
for i, position in enumerate(position_super_cell):
supercell.append_atom(position=position.tolist(),
symbols=symbol_super_cell[i])
return {"supercell": supercell}
class Wf_quasiparticleWorkflow(Workflow):
def __init__(self, **kwargs):
super(Wf_quasiparticleWorkflow, self).__init__(**kwargs)
if 'use_optimized_structure_for_md' in kwargs:
self._use_optimized_structure_for_md = kwargs['use_optimized_structure_for_md']
else:
self._use_optimized_structure_for_md = True # By default optimized structure is used
if 'optimize' in kwargs:
self._optimize = kwargs['optimize']
else:
self._optimize = True # By default optimization is done
def generate_md_lammps(self, structure, parameters):
codename = parameters['code']
code = Code.get_from_string(codename)
calc = code.new_calc(max_wallclock_seconds=3600,
resources=parameters['resources'])
calc.label = "md lammps calculation"
calc.description = "A much longer description"
calc.use_code(code)
calc.use_structure(structure)
calc.use_parameters(ParameterData(dict=parameters['parameters']))
calc.use_potential(ParameterData(dict=parameters['potential']))
calc.store_all()
return calc
def generate_calculation_dynaphopy(self, structure, force_constants, parameters, trajectory):
codename = parameters['code']
code = Code.get_from_string(codename)
calc = code.new_calc(max_wallclock_seconds=3600,
resources=parameters['resources'])
calc.use_code(code)
calc.use_structure(structure)
calc.use_parameters(ParameterData(dict=parameters['parameters']))
calc.use_force_constants(force_constants)
calc.use_trajectory(trajectory)
calc.store_all()
return calc
# Calculates the reference crystal structure (optimize it if requested)
@Workflow.step
def start(self):
self.append_to_report('Starting workflow_workflow')
self.append_to_report('Phonon calculation of base structure')
wf_parameters = self.get_parameters()
wf = WorkflowPhonon(params=wf_parameters, optimize=self._optimize)
wf.store()
# wf = load_workflow(127)
self.attach_workflow(wf)
wf.start()
self.next(self.md_lammps)
# Generate the volume expanded cells
@Workflow.step
def md_lammps(self):
wf_parameters = self.get_parameters()
if self._use_optimized_structure_for_md:
structure = self.get_step('start').get_sub_workflows()[0].get_result('final_structure')
else:
structure = wf_parameters['structure']
inline_params = {'structure': structure,
'supercell': ParameterData(dict=wf_parameters['input_md'])}
supercell = generate_supercell_inline(**inline_params)[1]['supercell']
calc = self.generate_md_lammps(supercell, wf_parameters['input_md'])
# self.append_to_report('created MD calculation with PK={}'.format(calc.pk))
self.attach_calculation(calc)
self.next(self.dynaphopy)
# Collects the forces and prepares force constants
@Workflow.step
def dynaphopy(self):
wf_parameters = self.get_parameters()
harmonic_force_constants = self.get_step('start').get_sub_workflows()[0].get_result('force_constants')
structure = self.get_step('start').get_sub_workflows()[0].get_result('final_structure')
self.add_result('force_constants', harmonic_force_constants)
md_calc = self.get_step_calculations(self.md_lammps)[0]
dyna_calc = self.generate_calculation_dynaphopy(structure,
harmonic_force_constants,
wf_parameters['dynaphopy_input'],
md_calc.out.trajectory_data)
self.attach_calculation(dyna_calc)
self.next(self.collect)
# Collects the forces and prepares force constants
@Workflow.step
def collect(self):
# Get the thermal properties at 0 K from phonopy calculation
self.add_result('h_thermal_properties', self.get_step('start').get_sub_workflows()[0].get_result('thermal_properties'))
# Get the thermal properties at finite temperature from dynaphopy calculation
calc = self.get_step_calculations(self.dynaphopy)[0]
self.add_result('thermal_properties', calc.out.thermal_properties)
# Pass the final properties from phonon workflow
self.add_result('quasiparticle_data', calc.out.quasiparticle_data)
self.add_result('r_force_constants', calc.out.array_data)
optimization_data = self.get_step('start').get_sub_workflows()[0].get_result('optimized_structure_data')
final_structure = self.get_step('start').get_sub_workflows()[0].get_result('final_structure')
self.add_result('optimized_structure_data', optimization_data)
self.add_result('final_structure', final_structure)
self.next(self.exit) | en | 0.672477 | #from aiida.workflows.wf_phonon import WorkflowPhonon # By default optimized structure is used # By default optimization is done # Calculates the reference crystal structure (optimize it if requested) # wf = load_workflow(127) # Generate the volume expanded cells # self.append_to_report('created MD calculation with PK={}'.format(calc.pk)) # Collects the forces and prepares force constants # Collects the forces and prepares force constants # Get the thermal properties at 0 K from phonopy calculation # Get the thermal properties at finite temperature from dynaphopy calculation # Pass the final properties from phonon workflow | 2.111434 | 2 |
zeitcoinforth.py | mmgrant73/zeitcoin | 1 | 6630750 | #!/usr/local/bin/python
import sys, re, hashlib
from M2Crypto import Rand, RSA, BIO
class zeitforth:
ds = [] # The data stack
cStack = [] # The control struct stack
heap = [0]*20 # The data heap
heapNext = 0 # Next avail slot in heap
words = [] # The input stream of tokens
PUBKEY = 'zeitcoin-public.pem'
#============================== Lexical Parsing
def tokenizeWords(self,s) : # clip comments, split to list of words
self.words += re.sub("#.*\n","\n",s+"\n").lower().split() # Use "#" for comment to end of line
return
#================================= Runtime operation
def stripstr(self,str1):
fletter = str1[0]
lletter = str1[-1]
if (fletter=='"' and lletter=='"'):
str1 = str1[1:-1]
return str1
def execute (self,code) :
p = 0
while p < len(code) :
func = code[p]
p += 1
newP = func(self,code,p)
if newP != None : p = newP
def rAdd (self,cod,p) :
try:
b=self.ds.pop()
a=self.ds.pop()
resulta=isinstance( a, ( int, long ) )
resultb=isinstance( b, ( int, long ) )
if (resulta and resultb):
self.ds.append(a+b)
else:
print "[error] - Can not use a string with this command"
self.ds.append(0)
except:
print "[error] - Runtime error the shack is empty"
return
def rMul (self,cod,p) :
b=self.ds.pop()
a=self.ds.pop()
resulta=isinstance( a, ( int, long ) )
resultb=isinstance( b, ( int, long ) )
if (resulta and resultb):
self.ds.append(a*b)
else:
print "[error] - Can not use a string with this command"
self.ds.append(0)
return
def rSub (self,cod,p) :
b=self.ds.pop()
a=self.ds.pop()
resulta=isinstance( a, ( int, long ) )
resultb=isinstance( b, ( int, long ) )
if (resulta and resultb):
self.ds.append(a-b)
else:
print "[error] - Can not use a string with this command"
self.ds.append(0)
return
def rDiv (self,cod,p) :
b=self.ds.pop()
a=self.ds.pop()
resulta=isinstance( a, ( int, long ) )
resultb=isinstance( b, ( int, long ) )
if (resulta and resultb):
self.ds.append(a/b)
else:
print "[error] - Can not use a string with this command"
self.ds.append(0)
return
def rEq (self,cod,p) :
try:
b=self.ds.pop()
a=self.ds.pop()
resulta=isinstance( a, ( int, long ) )
resultb=isinstance( b, ( int, long ) )
if (resulta and resultb):
self.ds.append(int(a==b))
elif ((not(resulta)) and (not(resultb))):
if (str(a)==str(b)):
self.ds.append(1)
else:
self.ds.append(0)
else:
print "[error] - The data being compared must be of the same type"
self.ds.append(0)
except:
print "[error] - Runtime error the shack is empty"
return
def rGt (self,cod,p) :
b=self.ds.pop()
a=self.ds.pop()
resulta=isinstance( a, ( int, long ) )
resultb=isinstance( b, ( int, long ) )
if (resulta and resultb):
self.ds.append(int(a>b))
else:
print "[error] - Can not use a string with this command"
self.ds.append(0)
return
def rLt (self,cod,p) :
b=self.ds.pop()
a=self.ds.pop()
resulta=isinstance( a, ( int, long ) )
resultb=isinstance( b, ( int, long ) )
if (resulta and resultb):
self.ds.append(int(a<b))
else:
print "[error] - Can not use a string with this command"
self.ds.append(0)
return
def rSwap(self,cod,p) :
a=self.ds.pop()
b=self.ds.pop()
self.ds.append(a)
self.ds.append(b)
return
def rDup (self,cod,p) :
self.ds.append(self.ds[-1])
return
def rDrop(self,cod,p) :
self.ds.pop()
return
def rOver(self,cod,p) :
self.ds.append(self.ds[-2])
return
def rDump(self,cod,p) :
print "ds = ", self.ds
return
def rDot (self,cod,p) :
print self.ds.pop()
return
def rJmp (self,cod,p) :
return cod[p]
def rJnz (self,cod,p) :
return (cod[p],p+1)[self.ds.pop()]
def rJz (self,cod,p) :
return (p+1,cod[p])[self.ds.pop()==0]
def rRun (dummy,self,cod,p) :
try:
execute(self.rDict[cod[p]])
except :
print "[error] - Invalid command run-time error"
return p+1
def rPush(dummy,self,cod,p) :
#print "dummy-",dummy
#print "self-",self
#print "cod-",cod
#print "p-",p
self.ds.append(cod[p])
return p+1
def rCreate (pcode,p) :
global lastCreate
lastCreate = label = getWord() # match next word (input) to next heap address
self.rDict[label] = [rPush, self.heapNext] # when created word is run, pushes its address
def rDoes (self,cod,p) :
self.rDict[lastCreate] += cod[p:] # rest of words belong to created words runtime
return len(cod) # jump p over these
def rAllot (self,cod,p) :
self.heapNext += self.ds.pop() # reserve n words for last create
return
def rAt (self,cod,p) :
self.ds.append(self.heap[self.ds.pop()]) # get heap @ address
return
def rBang(self,cod,p) :
a=self.ds.pop()
self.heap[a] = self.ds.pop() # set heap @ address
return
def rComa(self,cod,p) : # push tos into heap
self.heap[self.heapNext]=self.ds.pop()
self.heapNext += 1
return
#================================= Zeitcoin Extention
def rHelp(self,cod,p):
print "=====================================Commands======================================="
print "and - logic and operation (ie - true and true = true)"
print "or - logic or operation (ie - true and false = true)"
print "max - Given two integers it will show the highest number"
print "min - Given two integers it will show the lowest number"
print "less - If the first number is less than the second than 1 is return else it returns 0"
print "greater - If the first number is greater than the second than 1 else 0"
print "ripemd160 - Returns the ripemd160 hash"
print "hash160 - Returns the hash160 hash"
print "hash256 - Returns the hash256 hash"
print "sha1 - Returns the sha1 hash"
print "sha256 - Returns the sha256 hash"
print "verify - If one is pop from the stack than true is return else false"
print "nip - Removes the second from the top of the stack"
print "true - pops a 1 onto the top of the stack"
print "false - pops a 0 onto the top of the stack"
print "pick - ppo an element in the stack and put it on the top of the stack"
print "roll - Duplicate an element in the stack and put it on the stack"
print "num - Returns the length of the first element of the stack"
print "len - Returns the number of elements on the stack"
print "check - check the first element of the stack is signed correctly"
print "swap - Swap the postion of the first two elements on the stack"
print "dup - Duplicates the top element and push it onto the stack"
print "drop - delete the first element of the stack"
print "dump - dumps the stack to the screen"
print "over - Duplicate the second element of the stack and push it onto the stack"
print " + - Adds the first two elements of the stack together"
print " - - Subtract the first two elements of the stack"
print " * - Muliple the first two elements of the stack"
print " / - Divide the first two elements of the stack"
print " > - Returns true if the first number is greater than the second else false"
print " < - Returns true if the first number is less than the second else false"
print " = - If the first two elements are equal returns true else false"
print " . - Tells that it is the end of the command-line"
print "===================================================================================="
return
def rAnd(self,cod,p):
b=self.ds.pop()
a=self.ds.pop()
self.ds.append(a and b)
return
def rOr(self,cod,p):
b=self.ds.pop()
a=self.ds.pop()
self.ds.append(a or b)
return
def rMin(self,cod,p):
b=self.ds.pop()
a=self.ds.pop()
resulta=isinstance( a, ( int, long ) )
resultb=isinstance( b, ( int, long ) )
if (resulta and resultb):
if (a>b):
self.ds.append(b)
else:
self.ds.append(a)
else:
print "[error] - Can not use a string with this command"
self.ds.append(0)
return
def rMax(self,cod,p):
b=self.ds.pop()
a=self.ds.pop()
resulta=isinstance( a, ( int, long ) )
resultb=isinstance( b, ( int, long ) )
if (resulta and resultb):
if (a<b):
self.ds.append(b)
else:
self.ds.append(a)
else:
print "[error] - Can not use a string with this command"
self.ds.append(0)
return
def rLess(self,cod,p):
b=self.ds.pop()
a=self.ds.pop()
resulta=isinstance( a, ( int, long ) )
resultb=isinstance( b, ( int, long ) )
if (resulta and resultb):
if (a<b):
self.ds.append(1)
else:
self.ds.append(0)
else:
print "[error] - Can not use a string with this command"
self.ds.append(0)
return
def rGreater(self,cod,p):
b=self.ds.pop()
a=self.ds.pop()
resulta=isinstance( a, ( int, long ) )
resultb=isinstance( b, ( int, long ) )
if (resulta and resultb):
if (a>b):
self.ds.append(1)
else:
self.ds.append(0)
else:
print "[error] - Can not use a string with this command"
self.ds.append(0)
return
def rSha224(self,cod,p):
a=str(self.ds.pop())
a=self.stripstr(a)
hashvalue=hashlib.sha224(a).hexdigest()
self.ds.append(hashvalue)
return
def rSha1(self,cod,p):
a=str(self.ds.pop())
a=self.stripstr(a)
hashvalue=hashlib.sha1(a).hexdigest()
self.ds.append(hashvalue)
return
def rSha256(self,cod,p):
a=str(self.ds.pop())
a=self.stripstr(a)
hashvalue=hashlib.sha256(a).hexdigest()
self.ds.append(hashvalue)
return
def rMd5(self,cod,p):
a=str(self.ds.pop())
a=self.stripstr(a)
hashvalue=hashlib.md5(a).hexdigest()
self.ds.append(hashvalue)
return
def rSha512(self,cod,p):
a=str(self.ds.pop())
a=self.stripstr(a)
hashvalue=hashlib.sha512(a).hexdigest()
self.ds.append(hashvalue)
return
def rVerify(self,cod,p):
flag=1
a=self.ds.pop()
if (a!=1):
self.ds.append(0)
flag=0
return flag
def rTrue(self,cod,p):
self.ds.append(1)
return
def rFalse(self,cod,p):
self.ds.append(0)
return
def rNip(self,cod,p):
a=self.ds.pop()
b=self.ds.pop()
self.ds.append(a)
return
def rNum(self,cod,p):
l=len(ds)+1
self.ds.append(l)
return
def rPick(self,cod,p):
l=len(ds)
a=self.ds.pop()
result=isinstance( a, ( int, long ) )
if (result==False):
self.ds.append(0)
else:
a=int(a)-1
if (a>l):
self.ds.append(0)
else:
b=self.ds[a]
self.ds.append(b)
return
def rRoll(self,cod,p):
l=len(ds)
a=self.ds.pop()
result=isinstance( a, ( int, long ) )
if (result==False):
self.ds.append(0)
else:
a=int(a)-1
if (a>l):
self.ds.append(0)
else:
b=self.ds.pop(a)
self.ds.append(b)
return
def rLen(self,cod,p):
a=str(self.ds.pop())
l=len(a)
fletter = a[0]
lletter = a[-1]
if (fletter=='"' and lletter=='"'):
l=l-2
self.ds.append(l)
return
def rCheck(self,cod,p):
b=self.ds.pop() # signature
a=self.ds.pop() # message
PubKey = M2Crypto.RSA.load_pub_key (PUBKEY)
VerifyEVP = M2Crypto.EVP.PKey()
VerifyEVP.assign_rsa (PubKey)
VerifyEVP.verify_init ()
VerifyEVP.verify_update (a)
if VerifyEVP.verify_final (b) == 1:
print "The string was successfully verified."
verify = True
self.ds.append(1)
else:
print "The string was NOT verified!"
verify = False
self.ds.append(0)
return verify
rDict = {
'+' : rAdd, '-' : rSub, '/' : rDiv, '*' : rMul, 'over': rOver,
'dup': rDup, 'swap': rSwap, '.': rDot, 'dump' : rDump, 'drop': rDrop,
'=' : rEq, '>' : rGt, '<': rLt,
',' : rComa,'@' : rAt, '!' : rBang,'allot': rAllot,
'and': rAnd, 'or' : rOr, 'min': rMin, 'max' : rMax, 'len' :rLen,
'create': rCreate, 'does>': rDoes, 'check' : rCheck, 'num' : rNum,
'roll' : rRoll, 'pick' : rPick, 'nip' : rNip, 'true' : rTrue,
'false' : rFalse, 'verify' : rVerify, 'md5' : rMd5,
'sha512' : rSha512, 'sha1' : rSha1, 'sha256' : rSha256,
'sha224' : rSha224, 'less' : rLess, 'greater' : rGreater, 'help' : rHelp,
}
#================================= Compile time
def compile(self,word) :
pcode = [];
#word = getWord(prompt) # get next word
if word == None : return None
cAct = self.cDict.get(word) # Is there a compile time action ?
rAct = self.rDict.get(word) # Is there a runtime action ?
if cAct : cAct(pcode) # run at compile time
elif rAct :
if type(rAct) == type([]) :
pcode.append(self.rRun) # Compiled word.
pcode.append(word) # for now do dynamic lookup
else : pcode.append(rAct) # push builtin for runtime
else :
# Number to be pushed onto ds at runtime
pcode.append(self.rPush)
try : pcode.append(int(word))
except :
try: pcode.append(float(word))
except :
fletter = word[0]
lletter = word[-1]
if (fletter=='"' and lletter=='"'):
pcode.append(str(word))
else:
pcode[-1] = self.rRun # Change rPush to rRun
pcode.append(word) # Assume word will be defined
return pcode
def fatal (mesg) : raise mesg
def cColon (pcode) :
if self.cStack : fatal(": inside Control stack: %s" % self.cStack)
label = getWord()
self.cStack.append(("COLON",label)) # flag for following ";"
def cSemi (pcode) :
if not self.cStack : fatal("No : for ; to match")
code,label = self.cStack.pop()
if code != "COLON" : fatal(": not balanced with ;")
self.rDict[label] = pcode[:] # Save word definition in rDict
while pcode : pcode.pop()
def cBegin (pcode) :
self.cStack.append(("BEGIN",len(pcode))) # flag for following UNTIL
def cUntil (pcode) :
if not self.cStack : fatal("No BEGIN for UNTIL to match")
code,slot = self.cStack.pop()
if code != "BEGIN" : fatal("UNTIL preceded by %s (not BEGIN)" % code)
pcode.append(rJz)
pcode.append(slot)
def cIf (pcode) :
pcode.append(rJz)
self.cStack.append(("IF",len(pcode))) # flag for following Then or Else
pcode.append(0) # slot to be filled in
def cElse (pcode) :
if not self.cStack : fatal("No IF for ELSE to match")
code,slot = self.cStack.pop()
if code != "IF" : fatal("ELSE preceded by %s (not IF)" % code)
pcode.append(rJmp)
self.cStack.append(("ELSE",len(pcode))) # flag for following THEN
pcode.append(0) # slot to be filled in
pcode[slot] = len(pcode) # close JZ for IF
def cThen (pcode) :
if not self.cStack : fatal("No IF or ELSE for THEN to match")
code,slot = self.cStack.pop()
if code not in ("IF","ELSE") : fatal("THEN preceded by %s (not IF or ELSE)" % code)
pcode[slot] = len(pcode) # close JZ for IF or JMP for ELSE
cDict = {
':' : cColon, ';' : cSemi, 'if': cIf, 'else': cElse, 'then': cThen,
'begin': cBegin, 'until': cUntil,
}
def run(self,script) :
self.tokenizeWords(script)
for word in self.words:
pcode = self.compile(word) # compile/run from user
if pcode == None : print; return
self.execute(pcode)
result = self.ds.pop()
print "result-",result
return result
def main():
zf=zeitforth()
script = raw_input("Enter the forth code to be run\n")
zf.run(script)
if __name__ == "__main__" : main()
| #!/usr/local/bin/python
import sys, re, hashlib
from M2Crypto import Rand, RSA, BIO
class zeitforth:
ds = [] # The data stack
cStack = [] # The control struct stack
heap = [0]*20 # The data heap
heapNext = 0 # Next avail slot in heap
words = [] # The input stream of tokens
PUBKEY = 'zeitcoin-public.pem'
#============================== Lexical Parsing
def tokenizeWords(self,s) : # clip comments, split to list of words
self.words += re.sub("#.*\n","\n",s+"\n").lower().split() # Use "#" for comment to end of line
return
#================================= Runtime operation
def stripstr(self,str1):
fletter = str1[0]
lletter = str1[-1]
if (fletter=='"' and lletter=='"'):
str1 = str1[1:-1]
return str1
def execute (self,code) :
p = 0
while p < len(code) :
func = code[p]
p += 1
newP = func(self,code,p)
if newP != None : p = newP
def rAdd (self,cod,p) :
try:
b=self.ds.pop()
a=self.ds.pop()
resulta=isinstance( a, ( int, long ) )
resultb=isinstance( b, ( int, long ) )
if (resulta and resultb):
self.ds.append(a+b)
else:
print "[error] - Can not use a string with this command"
self.ds.append(0)
except:
print "[error] - Runtime error the shack is empty"
return
def rMul (self,cod,p) :
b=self.ds.pop()
a=self.ds.pop()
resulta=isinstance( a, ( int, long ) )
resultb=isinstance( b, ( int, long ) )
if (resulta and resultb):
self.ds.append(a*b)
else:
print "[error] - Can not use a string with this command"
self.ds.append(0)
return
def rSub (self,cod,p) :
b=self.ds.pop()
a=self.ds.pop()
resulta=isinstance( a, ( int, long ) )
resultb=isinstance( b, ( int, long ) )
if (resulta and resultb):
self.ds.append(a-b)
else:
print "[error] - Can not use a string with this command"
self.ds.append(0)
return
def rDiv (self,cod,p) :
b=self.ds.pop()
a=self.ds.pop()
resulta=isinstance( a, ( int, long ) )
resultb=isinstance( b, ( int, long ) )
if (resulta and resultb):
self.ds.append(a/b)
else:
print "[error] - Can not use a string with this command"
self.ds.append(0)
return
def rEq (self,cod,p) :
try:
b=self.ds.pop()
a=self.ds.pop()
resulta=isinstance( a, ( int, long ) )
resultb=isinstance( b, ( int, long ) )
if (resulta and resultb):
self.ds.append(int(a==b))
elif ((not(resulta)) and (not(resultb))):
if (str(a)==str(b)):
self.ds.append(1)
else:
self.ds.append(0)
else:
print "[error] - The data being compared must be of the same type"
self.ds.append(0)
except:
print "[error] - Runtime error the shack is empty"
return
def rGt (self,cod,p) :
b=self.ds.pop()
a=self.ds.pop()
resulta=isinstance( a, ( int, long ) )
resultb=isinstance( b, ( int, long ) )
if (resulta and resultb):
self.ds.append(int(a>b))
else:
print "[error] - Can not use a string with this command"
self.ds.append(0)
return
def rLt (self,cod,p) :
b=self.ds.pop()
a=self.ds.pop()
resulta=isinstance( a, ( int, long ) )
resultb=isinstance( b, ( int, long ) )
if (resulta and resultb):
self.ds.append(int(a<b))
else:
print "[error] - Can not use a string with this command"
self.ds.append(0)
return
def rSwap(self,cod,p) :
a=self.ds.pop()
b=self.ds.pop()
self.ds.append(a)
self.ds.append(b)
return
def rDup (self,cod,p) :
self.ds.append(self.ds[-1])
return
def rDrop(self,cod,p) :
self.ds.pop()
return
def rOver(self,cod,p) :
self.ds.append(self.ds[-2])
return
def rDump(self,cod,p) :
print "ds = ", self.ds
return
def rDot (self,cod,p) :
print self.ds.pop()
return
def rJmp (self,cod,p) :
return cod[p]
def rJnz (self,cod,p) :
return (cod[p],p+1)[self.ds.pop()]
def rJz (self,cod,p) :
return (p+1,cod[p])[self.ds.pop()==0]
def rRun (dummy,self,cod,p) :
try:
execute(self.rDict[cod[p]])
except :
print "[error] - Invalid command run-time error"
return p+1
def rPush(dummy,self,cod,p) :
#print "dummy-",dummy
#print "self-",self
#print "cod-",cod
#print "p-",p
self.ds.append(cod[p])
return p+1
def rCreate (pcode,p) :
global lastCreate
lastCreate = label = getWord() # match next word (input) to next heap address
self.rDict[label] = [rPush, self.heapNext] # when created word is run, pushes its address
def rDoes (self,cod,p) :
self.rDict[lastCreate] += cod[p:] # rest of words belong to created words runtime
return len(cod) # jump p over these
def rAllot (self,cod,p) :
self.heapNext += self.ds.pop() # reserve n words for last create
return
def rAt (self,cod,p) :
self.ds.append(self.heap[self.ds.pop()]) # get heap @ address
return
def rBang(self,cod,p) :
a=self.ds.pop()
self.heap[a] = self.ds.pop() # set heap @ address
return
def rComa(self,cod,p) : # push tos into heap
self.heap[self.heapNext]=self.ds.pop()
self.heapNext += 1
return
#================================= Zeitcoin Extention
def rHelp(self,cod,p):
print "=====================================Commands======================================="
print "and - logic and operation (ie - true and true = true)"
print "or - logic or operation (ie - true and false = true)"
print "max - Given two integers it will show the highest number"
print "min - Given two integers it will show the lowest number"
print "less - If the first number is less than the second than 1 is return else it returns 0"
print "greater - If the first number is greater than the second than 1 else 0"
print "ripemd160 - Returns the ripemd160 hash"
print "hash160 - Returns the hash160 hash"
print "hash256 - Returns the hash256 hash"
print "sha1 - Returns the sha1 hash"
print "sha256 - Returns the sha256 hash"
print "verify - If one is pop from the stack than true is return else false"
print "nip - Removes the second from the top of the stack"
print "true - pops a 1 onto the top of the stack"
print "false - pops a 0 onto the top of the stack"
print "pick - ppo an element in the stack and put it on the top of the stack"
print "roll - Duplicate an element in the stack and put it on the stack"
print "num - Returns the length of the first element of the stack"
print "len - Returns the number of elements on the stack"
print "check - check the first element of the stack is signed correctly"
print "swap - Swap the postion of the first two elements on the stack"
print "dup - Duplicates the top element and push it onto the stack"
print "drop - delete the first element of the stack"
print "dump - dumps the stack to the screen"
print "over - Duplicate the second element of the stack and push it onto the stack"
print " + - Adds the first two elements of the stack together"
print " - - Subtract the first two elements of the stack"
print " * - Muliple the first two elements of the stack"
print " / - Divide the first two elements of the stack"
print " > - Returns true if the first number is greater than the second else false"
print " < - Returns true if the first number is less than the second else false"
print " = - If the first two elements are equal returns true else false"
print " . - Tells that it is the end of the command-line"
print "===================================================================================="
return
def rAnd(self,cod,p):
b=self.ds.pop()
a=self.ds.pop()
self.ds.append(a and b)
return
def rOr(self,cod,p):
b=self.ds.pop()
a=self.ds.pop()
self.ds.append(a or b)
return
def rMin(self,cod,p):
b=self.ds.pop()
a=self.ds.pop()
resulta=isinstance( a, ( int, long ) )
resultb=isinstance( b, ( int, long ) )
if (resulta and resultb):
if (a>b):
self.ds.append(b)
else:
self.ds.append(a)
else:
print "[error] - Can not use a string with this command"
self.ds.append(0)
return
def rMax(self,cod,p):
b=self.ds.pop()
a=self.ds.pop()
resulta=isinstance( a, ( int, long ) )
resultb=isinstance( b, ( int, long ) )
if (resulta and resultb):
if (a<b):
self.ds.append(b)
else:
self.ds.append(a)
else:
print "[error] - Can not use a string with this command"
self.ds.append(0)
return
def rLess(self,cod,p):
b=self.ds.pop()
a=self.ds.pop()
resulta=isinstance( a, ( int, long ) )
resultb=isinstance( b, ( int, long ) )
if (resulta and resultb):
if (a<b):
self.ds.append(1)
else:
self.ds.append(0)
else:
print "[error] - Can not use a string with this command"
self.ds.append(0)
return
def rGreater(self,cod,p):
b=self.ds.pop()
a=self.ds.pop()
resulta=isinstance( a, ( int, long ) )
resultb=isinstance( b, ( int, long ) )
if (resulta and resultb):
if (a>b):
self.ds.append(1)
else:
self.ds.append(0)
else:
print "[error] - Can not use a string with this command"
self.ds.append(0)
return
def rSha224(self,cod,p):
a=str(self.ds.pop())
a=self.stripstr(a)
hashvalue=hashlib.sha224(a).hexdigest()
self.ds.append(hashvalue)
return
def rSha1(self,cod,p):
a=str(self.ds.pop())
a=self.stripstr(a)
hashvalue=hashlib.sha1(a).hexdigest()
self.ds.append(hashvalue)
return
def rSha256(self,cod,p):
a=str(self.ds.pop())
a=self.stripstr(a)
hashvalue=hashlib.sha256(a).hexdigest()
self.ds.append(hashvalue)
return
def rMd5(self,cod,p):
a=str(self.ds.pop())
a=self.stripstr(a)
hashvalue=hashlib.md5(a).hexdigest()
self.ds.append(hashvalue)
return
def rSha512(self,cod,p):
a=str(self.ds.pop())
a=self.stripstr(a)
hashvalue=hashlib.sha512(a).hexdigest()
self.ds.append(hashvalue)
return
def rVerify(self,cod,p):
flag=1
a=self.ds.pop()
if (a!=1):
self.ds.append(0)
flag=0
return flag
def rTrue(self,cod,p):
self.ds.append(1)
return
def rFalse(self,cod,p):
self.ds.append(0)
return
def rNip(self,cod,p):
a=self.ds.pop()
b=self.ds.pop()
self.ds.append(a)
return
def rNum(self,cod,p):
l=len(ds)+1
self.ds.append(l)
return
def rPick(self,cod,p):
l=len(ds)
a=self.ds.pop()
result=isinstance( a, ( int, long ) )
if (result==False):
self.ds.append(0)
else:
a=int(a)-1
if (a>l):
self.ds.append(0)
else:
b=self.ds[a]
self.ds.append(b)
return
def rRoll(self,cod,p):
l=len(ds)
a=self.ds.pop()
result=isinstance( a, ( int, long ) )
if (result==False):
self.ds.append(0)
else:
a=int(a)-1
if (a>l):
self.ds.append(0)
else:
b=self.ds.pop(a)
self.ds.append(b)
return
def rLen(self,cod,p):
a=str(self.ds.pop())
l=len(a)
fletter = a[0]
lletter = a[-1]
if (fletter=='"' and lletter=='"'):
l=l-2
self.ds.append(l)
return
def rCheck(self,cod,p):
b=self.ds.pop() # signature
a=self.ds.pop() # message
PubKey = M2Crypto.RSA.load_pub_key (PUBKEY)
VerifyEVP = M2Crypto.EVP.PKey()
VerifyEVP.assign_rsa (PubKey)
VerifyEVP.verify_init ()
VerifyEVP.verify_update (a)
if VerifyEVP.verify_final (b) == 1:
print "The string was successfully verified."
verify = True
self.ds.append(1)
else:
print "The string was NOT verified!"
verify = False
self.ds.append(0)
return verify
rDict = {
'+' : rAdd, '-' : rSub, '/' : rDiv, '*' : rMul, 'over': rOver,
'dup': rDup, 'swap': rSwap, '.': rDot, 'dump' : rDump, 'drop': rDrop,
'=' : rEq, '>' : rGt, '<': rLt,
',' : rComa,'@' : rAt, '!' : rBang,'allot': rAllot,
'and': rAnd, 'or' : rOr, 'min': rMin, 'max' : rMax, 'len' :rLen,
'create': rCreate, 'does>': rDoes, 'check' : rCheck, 'num' : rNum,
'roll' : rRoll, 'pick' : rPick, 'nip' : rNip, 'true' : rTrue,
'false' : rFalse, 'verify' : rVerify, 'md5' : rMd5,
'sha512' : rSha512, 'sha1' : rSha1, 'sha256' : rSha256,
'sha224' : rSha224, 'less' : rLess, 'greater' : rGreater, 'help' : rHelp,
}
#================================= Compile time
def compile(self,word) :
pcode = [];
#word = getWord(prompt) # get next word
if word == None : return None
cAct = self.cDict.get(word) # Is there a compile time action ?
rAct = self.rDict.get(word) # Is there a runtime action ?
if cAct : cAct(pcode) # run at compile time
elif rAct :
if type(rAct) == type([]) :
pcode.append(self.rRun) # Compiled word.
pcode.append(word) # for now do dynamic lookup
else : pcode.append(rAct) # push builtin for runtime
else :
# Number to be pushed onto ds at runtime
pcode.append(self.rPush)
try : pcode.append(int(word))
except :
try: pcode.append(float(word))
except :
fletter = word[0]
lletter = word[-1]
if (fletter=='"' and lletter=='"'):
pcode.append(str(word))
else:
pcode[-1] = self.rRun # Change rPush to rRun
pcode.append(word) # Assume word will be defined
return pcode
def fatal (mesg) : raise mesg
def cColon (pcode) :
if self.cStack : fatal(": inside Control stack: %s" % self.cStack)
label = getWord()
self.cStack.append(("COLON",label)) # flag for following ";"
def cSemi (pcode) :
if not self.cStack : fatal("No : for ; to match")
code,label = self.cStack.pop()
if code != "COLON" : fatal(": not balanced with ;")
self.rDict[label] = pcode[:] # Save word definition in rDict
while pcode : pcode.pop()
def cBegin (pcode) :
self.cStack.append(("BEGIN",len(pcode))) # flag for following UNTIL
def cUntil (pcode) :
if not self.cStack : fatal("No BEGIN for UNTIL to match")
code,slot = self.cStack.pop()
if code != "BEGIN" : fatal("UNTIL preceded by %s (not BEGIN)" % code)
pcode.append(rJz)
pcode.append(slot)
def cIf (pcode) :
pcode.append(rJz)
self.cStack.append(("IF",len(pcode))) # flag for following Then or Else
pcode.append(0) # slot to be filled in
def cElse (pcode) :
if not self.cStack : fatal("No IF for ELSE to match")
code,slot = self.cStack.pop()
if code != "IF" : fatal("ELSE preceded by %s (not IF)" % code)
pcode.append(rJmp)
self.cStack.append(("ELSE",len(pcode))) # flag for following THEN
pcode.append(0) # slot to be filled in
pcode[slot] = len(pcode) # close JZ for IF
def cThen (pcode) :
if not self.cStack : fatal("No IF or ELSE for THEN to match")
code,slot = self.cStack.pop()
if code not in ("IF","ELSE") : fatal("THEN preceded by %s (not IF or ELSE)" % code)
pcode[slot] = len(pcode) # close JZ for IF or JMP for ELSE
cDict = {
':' : cColon, ';' : cSemi, 'if': cIf, 'else': cElse, 'then': cThen,
'begin': cBegin, 'until': cUntil,
}
def run(self,script) :
self.tokenizeWords(script)
for word in self.words:
pcode = self.compile(word) # compile/run from user
if pcode == None : print; return
self.execute(pcode)
result = self.ds.pop()
print "result-",result
return result
def main():
zf=zeitforth()
script = raw_input("Enter the forth code to be run\n")
zf.run(script)
if __name__ == "__main__" : main()
| en | 0.763161 | #!/usr/local/bin/python # The data stack # The control struct stack # The data heap # Next avail slot in heap # The input stream of tokens #============================== Lexical Parsing # clip comments, split to list of words # Use "#" for comment to end of line #================================= Runtime operation #print "dummy-",dummy #print "self-",self #print "cod-",cod #print "p-",p # match next word (input) to next heap address # when created word is run, pushes its address # rest of words belong to created words runtime # jump p over these # reserve n words for last create # get heap @ address # set heap @ address # push tos into heap #================================= Zeitcoin Extention # signature # message #================================= Compile time #word = getWord(prompt) # get next word # Is there a compile time action ? # Is there a runtime action ? # run at compile time # Compiled word. # for now do dynamic lookup # push builtin for runtime # Number to be pushed onto ds at runtime # Change rPush to rRun # Assume word will be defined # flag for following ";" # Save word definition in rDict # flag for following UNTIL # flag for following Then or Else # slot to be filled in # flag for following THEN # slot to be filled in # close JZ for IF # close JZ for IF or JMP for ELSE # compile/run from user | 3.087684 | 3 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.