ext
stringclasses 9
values | sha
stringlengths 40
40
| content
stringlengths 3
1.04M
|
---|---|---|
py
|
1a5942820aabd7ce5466c04d71623eb692b25c4d
|
"""Settings from flow_manager NApp."""
# Pooling frequency
STATS_INTERVAL = 30
FLOWS_DICT_MAX_SIZE = 10000
# Time (in seconds) to wait retrieve box from storehouse
BOX_RESTORE_TIMER = 0.1
ENABLE_CONSISTENCY_CHECK = True
# List of flows ignored by the consistency check
# To filter by a cookie or `table_id` use [value]
# To filter by a cookie or `table_id` range [(value1, value2)]
CONSISTENCY_COOKIE_IGNORED_RANGE = []
CONSISTENCY_TABLE_ID_IGNORED_RANGE = []
|
py
|
1a59448ef560e1988298554087b62d80e8a55e6f
|
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union
from ... import _utilities, _tables
__all__ = ['ApiIssueComment']
class ApiIssueComment(pulumi.CustomResource):
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
api_id: Optional[pulumi.Input[str]] = None,
comment_id: Optional[pulumi.Input[str]] = None,
created_date: Optional[pulumi.Input[str]] = None,
issue_id: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
service_name: Optional[pulumi.Input[str]] = None,
text: Optional[pulumi.Input[str]] = None,
user_id: Optional[pulumi.Input[str]] = None,
__props__=None,
__name__=None,
__opts__=None):
"""
Issue Comment Contract details.
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] api_id: API identifier. Must be unique in the current API Management service instance.
:param pulumi.Input[str] comment_id: Comment identifier within an Issue. Must be unique in the current Issue.
:param pulumi.Input[str] created_date: Date and time when the comment was created.
:param pulumi.Input[str] issue_id: Issue identifier. Must be unique in the current API Management service instance.
:param pulumi.Input[str] resource_group_name: The name of the resource group.
:param pulumi.Input[str] service_name: The name of the API Management service.
:param pulumi.Input[str] text: Comment text.
:param pulumi.Input[str] user_id: A resource identifier for the user who left the comment.
"""
if __name__ is not None:
warnings.warn("explicit use of __name__ is deprecated", DeprecationWarning)
resource_name = __name__
if __opts__ is not None:
warnings.warn("explicit use of __opts__ is deprecated, use 'opts' instead", DeprecationWarning)
opts = __opts__
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = dict()
if api_id is None and not opts.urn:
raise TypeError("Missing required property 'api_id'")
__props__['api_id'] = api_id
__props__['comment_id'] = comment_id
__props__['created_date'] = created_date
if issue_id is None and not opts.urn:
raise TypeError("Missing required property 'issue_id'")
__props__['issue_id'] = issue_id
if resource_group_name is None and not opts.urn:
raise TypeError("Missing required property 'resource_group_name'")
__props__['resource_group_name'] = resource_group_name
if service_name is None and not opts.urn:
raise TypeError("Missing required property 'service_name'")
__props__['service_name'] = service_name
if text is None and not opts.urn:
raise TypeError("Missing required property 'text'")
__props__['text'] = text
if user_id is None and not opts.urn:
raise TypeError("Missing required property 'user_id'")
__props__['user_id'] = user_id
__props__['name'] = None
__props__['type'] = None
alias_opts = pulumi.ResourceOptions(aliases=[pulumi.Alias(type_="azure-nextgen:apimanagement/v20191201:ApiIssueComment"), pulumi.Alias(type_="azure-native:apimanagement:ApiIssueComment"), pulumi.Alias(type_="azure-nextgen:apimanagement:ApiIssueComment"), pulumi.Alias(type_="azure-native:apimanagement/latest:ApiIssueComment"), pulumi.Alias(type_="azure-nextgen:apimanagement/latest:ApiIssueComment"), pulumi.Alias(type_="azure-native:apimanagement/v20170301:ApiIssueComment"), pulumi.Alias(type_="azure-nextgen:apimanagement/v20170301:ApiIssueComment"), pulumi.Alias(type_="azure-native:apimanagement/v20180101:ApiIssueComment"), pulumi.Alias(type_="azure-nextgen:apimanagement/v20180101:ApiIssueComment"), pulumi.Alias(type_="azure-native:apimanagement/v20180601preview:ApiIssueComment"), pulumi.Alias(type_="azure-nextgen:apimanagement/v20180601preview:ApiIssueComment"), pulumi.Alias(type_="azure-native:apimanagement/v20190101:ApiIssueComment"), pulumi.Alias(type_="azure-nextgen:apimanagement/v20190101:ApiIssueComment"), pulumi.Alias(type_="azure-native:apimanagement/v20191201preview:ApiIssueComment"), pulumi.Alias(type_="azure-nextgen:apimanagement/v20191201preview:ApiIssueComment"), pulumi.Alias(type_="azure-native:apimanagement/v20200601preview:ApiIssueComment"), pulumi.Alias(type_="azure-nextgen:apimanagement/v20200601preview:ApiIssueComment"), pulumi.Alias(type_="azure-native:apimanagement/v20201201:ApiIssueComment"), pulumi.Alias(type_="azure-nextgen:apimanagement/v20201201:ApiIssueComment")])
opts = pulumi.ResourceOptions.merge(opts, alias_opts)
super(ApiIssueComment, __self__).__init__(
'azure-native:apimanagement/v20191201:ApiIssueComment',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None) -> 'ApiIssueComment':
"""
Get an existing ApiIssueComment resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = dict()
__props__["created_date"] = None
__props__["name"] = None
__props__["text"] = None
__props__["type"] = None
__props__["user_id"] = None
return ApiIssueComment(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter(name="createdDate")
def created_date(self) -> pulumi.Output[Optional[str]]:
"""
Date and time when the comment was created.
"""
return pulumi.get(self, "created_date")
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
"""
Resource name.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def text(self) -> pulumi.Output[str]:
"""
Comment text.
"""
return pulumi.get(self, "text")
@property
@pulumi.getter
def type(self) -> pulumi.Output[str]:
"""
Resource type for API Management resource.
"""
return pulumi.get(self, "type")
@property
@pulumi.getter(name="userId")
def user_id(self) -> pulumi.Output[str]:
"""
A resource identifier for the user who left the comment.
"""
return pulumi.get(self, "user_id")
def translate_output_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
def translate_input_property(self, prop):
return _tables.SNAKE_TO_CAMEL_CASE_TABLE.get(prop) or prop
|
py
|
1a59452cca91d121105cf685c0055f279ca67694
|
"""
White space is used to control how whitespace is rendered.
"""
from ..defaults import BREAKPOINTS, UP, DOWN, FULL, ONLY
from ...core import CssModule
vals = [
('n', 'normal'),
('nw', 'nowrap'),
('p', 'pre'),
]
mdl = CssModule(
'White space',
[UP],
dynamic={'.ws': ['white-space']},
values=vals,
docstring=__doc__
)
|
py
|
1a59458384e6699172f2cd7ef1ac6672ae4afcaf
|
#!/usr/bin/env python
import os
import sys
import glob
import yaml
import time
import ConfigParser
import subprocess
import stat
import logbook
from optparse import OptionParser
from hashlib import md5
from bcbio.utils import safe_makedir
from bcbio.pipeline.config_loader import load_config
DEFAULT_DB = os.path.join("~","log","miseq_transferred.db")
DEFAULT_LOGFILE = os.path.join("~","log","miseq_deliveries.log")
DEFAULT_SS_NAME = "SampleSheet.csv"
DEFAULT_FQ_LOCATION = os.path.join("Data","Intensities","BaseCalls")
DEFAULT_PROJECT_ROOT = os.path.join("/proj")
DEFAULT_UPPNEXID_FIELD = "Description"
DEFAULT_SMTP_HOST = "smtp.uu.se"
DEFAULT_SMTP_PORT = 25
DEFAULT_RECIPIENT = "[email protected]"
LOG_NAME = "Miseq Delivery"
logger2 = logbook.Logger(LOG_NAME)
def main(input_path, transferred_db, run_folder, uppnexid, samplesheet, logfile, email_notification, config_file, force, dryrun):
config = {}
if config_file is not None:
config = load_config(config_file)
if logfile is None:
logfile = config.get("logfile",os.path.normpath(os.path.expanduser(DEFAULT_LOGFILE)))
email_handler = None
# Don't write dry runs to log
if dryrun:
handler = logbook.StreamHandler(sys.stdout)
else:
if not os.path.exists(logfile):
safe_makedir(os.path.dirname(logfile))
open(logfile,"w").close()
handler = logbook.FileHandler(logfile)
if email_notification is None:
email_notification = config.get("email_recipient",DEFAULT_RECIPIENT)
recipients = email_notification.split(",")
if len(recipients) > 0:
email_handler = logbook.MailHandler("[email protected]", recipients,
server_addr=[config.get("smtp_host",DEFAULT_SMTP_HOST),config.get("smtp_port",DEFAULT_SMTP_PORT)],
format_string=u'''Subject: [MiSeq delivery] {record.extra[run]}\n\n {record.message}''')
with handler.applicationbound():
if dryrun:
logger2.info("This is just a dry-run. Nothing will be delivered and no directories will be created/changed")
# If no run folder was specified, try with the folders in the input_path
if run_folder is None:
pat = "*_M*_AMS*"
folders = [os.path.relpath(os.path.normpath(file),input_path) for file in glob.glob(os.path.join(input_path,pat))]
else:
run_folder = os.path.basename(os.path.normpath(run_folder))
assert os.path.exists(os.path.join(input_path,run_folder)), "The specified run folder %s does not seem to exist in the %s folder" % (run_folder, input_path)
folders = [run_folder]
logger2.info("Will process %s folders: %s" % (len(folders),folders))
# Parse the supplied db of transferred flowcells, or a db in the default location if present
if transferred_db is None:
transferred_db = os.path.normpath(config.get("transfer_db",os.path.expanduser(DEFAULT_DB)))
assert os.path.exists(transferred_db), "Could not locate transferred_db (expected %s)" % transferred_db
logger2.info("Transferred db is %s" % transferred_db)
# Process each run folder
for folder in folders:
try:
# Skip this folder if it has already been processed
logger2.info("Processing %s" % folder)
if _is_processed(folder,transferred_db) and not force:
logger2.info("%s has already been processed, skipping" % folder)
continue
# Locate the samplesheet and pasre the uppnex id if necessary
if uppnexid is None:
local_samplesheet = samplesheet
if local_samplesheet is None: local_samplesheet = os.path.join(input_path,folder,config.get("samplesheet_name",DEFAULT_SS_NAME))
assert os.path.exists(local_samplesheet), "Could not find expected sample sheet %s" % local_samplesheet
local_uppnexid = _fetch_uppnexid(local_samplesheet, config.get("uppnexid_field",DEFAULT_UPPNEXID_FIELD))
assert local_uppnexid is not None and len(local_uppnexid) > 0, "Could not parse Uppnex ID for project from samplesheet %s" % local_samplesheet
else:
local_uppnexid = uppnexid
logger2.info("Will deliver to inbox of project %s" % local_uppnexid)
# Locate the fastq-files to be delivered
pat = os.path.join(input_path,folder,config.get("fastq_path",DEFAULT_FQ_LOCATION),"*.fastq.gz")
fq_files = glob.glob(pat)
# Also search directly in the folder
pat = os.path.join(input_path,folder,"*.fastq.gz")
fq_files.extend(glob.glob(pat))
assert len(fq_files) > 0, "Could not locate fastq files for folder %s using pattern %s" % (folder,pat)
logger2.info("Found %s fastq files to deliver: %s" % (len(fq_files),fq_files))
if dryrun:
logger2.info("Remember that this is a dry-run. Nothing will be delivered and no directories will be created/changed")
# Create the destination directory if required
dest_dir = os.path.normpath(os.path.join(config.get("project_root",DEFAULT_PROJECT_ROOT),local_uppnexid,"INBOX",folder,"fastq"))
_update_processed(folder,transferred_db,dryrun)
assert _create_destination(dest_dir, dryrun), "Could not create destination %s" % dest_dir
assert _deliver_files(fq_files,dest_dir, dryrun), "Could not transfer files to destination %s" % dest_dir
assert _verify_files(fq_files,dest_dir,dryrun), "Integrity of files in destination directory %s could not be verified. Please investigate" % dest_dir
assert _set_permissions(dest_dir, dryrun), "Could not change permissions on destination %s" % dest_dir
if email_handler is not None:
with email_handler.applicationbound():
with logbook.Processor(lambda record: record.extra.__setitem__('run', folder)):
logger2.info("The MiSeq sequence data for run %s was successfully delivered to the inbox of Uppnex project %s (%s)" % (folder,local_uppnexid,dest_dir))
except AssertionError as e:
logger2.error("Could not deliver data from folder %s. Reason: %s. Please fix problems and retry." % (folder,e))
logger2.info("Rolling back changes to %s" % transferred_db)
_update_processed(folder,transferred_db,dryrun,True)
def _update_processed(folder, transferred_db, dryrun, rollback=False):
rows = _get_processed(transferred_db)
present = False
for row in rows:
if row[0] == folder:
if rollback:
logger2.info("Removing entry for %s from %s" % (folder,transferred_db))
rows.remove(row)
else:
logger2.info("Updating entry for %s in %s" % (folder,transferred_db))
row[1] = time.strftime("%x-%X")
present = True
break
if not present and not rollback:
logger2.info("Adding entry for %s to %s" % (folder,transferred_db))
rows.append([folder,time.strftime("%x-%X")])
_put_processed(transferred_db, rows, dryrun)
def _get_processed(transferred_db, folder=None):
rows = []
with open(transferred_db,"r") as fh:
for row in fh:
data = row.split()
if len(data) > 0 and (folder is None or data[0] == folder):
rows.append(data)
return rows
def _put_processed(transferred_db, rows, dryrun):
if dryrun: return
with open(transferred_db,"w") as fh:
for row in rows:
fh.write("%s\n" % " ".join(row))
def _is_processed(folder,transferred_db):
rows = _get_processed(transferred_db,folder)
return len(rows) > 0
def _fetch_uppnexid(samplesheet, uppnexid_field):
uppnexid = None
logger2.info("Parsing UppnexId from %s" % samplesheet)
with open(samplesheet,"r") as fh:
for line in fh:
if not line.startswith("[Data]"): continue
header = fh.next().split(',')
index = header.index(uppnexid_field)
for line in fh:
values = line.split(',')
if len(values) != len(header):
return uppnexid
if values[index] is None or len(values[index]) == 0:
continue
local_uppnexid = values[index]
if uppnexid is not None and local_uppnexid != uppnexid:
logger2.error("Found multiple UppnexIds (%s,%s) in %s" % (uppnexid,local_uppnexid,samplesheet))
return None
uppnexid = local_uppnexid
return uppnexid
def _set_permissions(destination, dryrun):
try:
logger2.info("Setting permissions on %s" % destination)
for root, dirs, files in os.walk(destination):
if not dryrun: os.chmod(root, stat.S_IRWXU | stat.S_IRWXG)
for file in files:
if not dryrun: os.chmod(os.path.join(root,file), stat.S_IRUSR | stat.S_IWUSR | stat.S_IRGRP | stat.S_IWGRP)
except Exception as e:
logger2.error("Encountered exception when setting permissions for %s: %s" % (destination,e))
return False
return True
def _create_destination(destination, dryrun):
try:
logger2.info("Creating destination directory %s" % destination)
if not dryrun: os.makedirs(destination,0770)
except OSError as e:
logger2.info("Could not create destination directory %s, probably because it already exists" % destination)
pass
return dryrun or os.path.exists(destination)
def _deliver_files(files,destination, dryrun):
try:
cl = ["rsync",
"-cra"]
cl.extend(files)
cl.append(destination)
cl = [str(i) for i in cl]
logger2.info("Will deliver using command: %s" % cl)
if not dryrun: subprocess.check_call(cl)
except Exception as e:
logger2.error("Failed when trying to deliver to %s: %s" % (destination,e))
return False
return True
def _verify_files(source_files, destination, dryrun):
try:
for source_file in source_files:
filename = os.path.basename(source_file)
dest_file = os.path.join(destination,filename)
logger2.info("Verifying integrity of %s using md5" % dest_file)
if not dryrun and not os.path.exists(dest_file):
logger2.error("The file %s does not exist in destination directory %s" % (filename,destination))
return False
source_md5 = _file_md5(source_file)
if not dryrun: dest_md5 = _file_md5(dest_file)
if not dryrun and source_md5.hexdigest() != dest_md5.hexdigest():
logger2.error("The md5 sums of %s is differs between source and destination" % filename)
return False
except Exception as e:
logger2.error("Encountered exception when verifying file integrity: %s" % e)
return False
return True
def _file_md5(file):
block_size = 2**20
hash = md5()
with open(file,"rb") as f:
while True:
data = f.read(block_size)
if not data:
break
hash.update(data)
return hash
if __name__ == "__main__":
parser = OptionParser()
parser.add_option("-r", "--run-folder", dest="run_folder", default=None)
parser.add_option("-d", "--transferred-db", dest="transferred_db", default=None)
parser.add_option("-u", "--uppnexid", dest="uppnexid", default=None)
parser.add_option("-s", "--samplesheet", dest="samplesheet", default=None)
parser.add_option("-l", "--log-file", dest="logfile", default=None)
parser.add_option("-e", "--email-notification", dest="email_notification", default=None)
parser.add_option("-c", "--config-file", dest="config_file", default=None)
parser.add_option("-f", "--force", dest="force", action="store_true", default=False)
parser.add_option("-n", "--dry-run", dest="dryrun", action="store_true", default=False)
options, args = parser.parse_args()
input_path = None
if len(args) == 1:
input_path = args[0]
else:
print __doc__
sys.exit()
main(os.path.normpath(input_path),
options.transferred_db, options.run_folder,
options.uppnexid, options.samplesheet,
options.logfile, options.email_notification,
options.config_file, options.force,
options.dryrun)
|
py
|
1a5945a216eda9f9b99ec1efcfe84cf608c1ba6c
|
#!/usr/bin/env python3
# -*- coding: UTF-8 -*-
# -----------------------------------------------------------------------------
#
# P A G E B O T
#
# Copyright (c) 2016+ Buro Petr van Blokland + Claudia Mens
# www.pagebot.io
# Licensed under MIT conditions
#
# Supporting DrawBot, www.drawbot.com
# Supporting Flat, xxyxyz.org/flat
# -----------------------------------------------------------------------------
#
# variablecube2.py
#
from copy import copy
from pagebot.elements.element import Element
from pagebot.style import makeStyle
from pagebot.toolbox.units import pointOffset
from pagebot.toolbox.color import blackColor
class VariableCube2(Element):
"""
>>> from pagebot.fonttoolbox.objects.font import findFont
>>> from pagebot.document import Document
>>> vfFont = findFont('RobotoDelta_v2-VF')
>>> doc = Document(w=500, h=500, autoPages=1)
>>> page = doc[1]
>>> page.padding = 40
>>> vc = VariableCube2(vfFont, parent=page, x=40, y=40, w=page.pw)
"""
# Initialize the default behavior tags as different from Element.
def __init__(self, font, point=None, parent=None, style=None,
name=None, captionStyle=None, caption=None,
location=None, dimensions=None,
clipRect=None, mask=None, imo=None, **kwargs):
Element.__init__(self, point=point, parent=parent, style=style,
name=name, **kwargs)
self.vfFont = font
self.style = makeStyle(style, **kwargs) # Combine self.style from
# Try to figure out the requested dimensions if the element display per axes.
if dimensions is None:
dimensions = dict(wght=5, wdth=5, opsz=5)
self.dimensions = dimensions
# Each element should check at this point if the minimum set of style values
# are set and if their values are valid.
assert self.w is not None and self.h is not None # Make sure that these are defined.
# Make sure that this is a formatted string. Otherwise create it with the current style.
# Note that in case there is potential clash in the double usage of fill and stroke.
# FIXME: Review this: the 's' variable below is undefined.
#self.glyphNames = s or 'e'
self.glyphNames = 'e'
# Store the external location, to allow other axis values to be set.
if location is None:
location = {}
self.location = copy(location)
def draw(self, view, origin):
c = self.doc.context
p = pointOffset(self.origin, origin)
p = self._applyScale(view, p)
px, py, _ = self._applyAlignment(p) # Ignore z-axis for now.
fillColor = self.style.get('fill')
if fillColor is not None:
c.fill(fillColor)
c.stroke((0.8, 0.8, 0.8), 0.5)
c.rect(px, py, self.w, self.h)
if len(self.dimensions) == 1:
raise ValueError('Not supporting 1 axis now')
if len(self.dimensions) > 2:
raise ValueError('Not supporting >2 axis now')
axisNames = sorted(self.dimensions.keys())
axisX = axisNames[0]
sizeX = self.dimensions[axisX]
axisY = axisNames[1]
sizeY = self.dimensions[axisY]
stepX = self.w / (sizeX+1)
stepY = self.h / (sizeY+1)
"""Add more parametric layout behavior here."""
RANGE = 1000
for indexX in range(sizeX+1):
for indexY in range(sizeY+1):
ox = 30
oy = 25
ppx = ox + px + indexX * stepX
ppy = oy + py + indexY * stepY
self.location[axisX] = indexX * RANGE / sizeX
self.location[axisY] = indexY * RANGE / sizeY
glyphPathScale = self.fontSize/self.font.info.unitsPerEm
c.drawGlyphPath(c, self.vfFont.ttFont, self.glyphNames[0],
ppx, ppy, self.location, s=glyphPathScale,
fillColor=(0, 0, 0))
bs = c.newString('%s %d\n%s %d' % (axisX,
indexX * RANGE / sizeX,
axisY,
indexY * RANGE / sizeY),
fontSize=6,
fill=blackColor)
w, h = bs.textSize()
c.text(bs, ppx - stepX/4, ppy - 16)
# Bit of hack, we need the width of the glyph here.
bs = c.newString('Other axes: %s' % self.location,
fontSize=6, fill=blackColor)
w, h = bs.textSize()
c.text(bs, px, py - 16)
if __name__ == '__main__':
import doctest
import sys
sys.exit(doctest.testmod()[0])
|
py
|
1a5945f5354f49ab8a1fd7fa46916194198b021b
|
# -*- coding: utf-8 -*-
from pydrake.multibody.parsing import (
Parser,
PackageMap,
LoadModelDirectives,
ProcessModelDirectives,
ModelInstanceInfo,
AddFrame,
GetScopedFrameByName,
GetScopedFrameName,
)
import os
import unittest
from pydrake.common import FindResourceOrThrow
from pydrake.multibody.tree import (
ModelInstanceIndex,
)
from pydrake.multibody.plant import (
MultibodyPlant,
)
class TestParsing(unittest.TestCase):
def test_package_map(self):
dut = PackageMap()
tmpdir = os.environ.get('TEST_TMPDIR')
model = FindResourceOrThrow(
"drake/examples/atlas/urdf/atlas_minimal_contact.urdf")
# Simple coverage test for Add, Contains, size, GetPath, AddPackageXml.
dut.Add(package_name="root", package_path=tmpdir)
self.assertEqual(dut.size(), 1)
self.assertTrue(dut.Contains(package_name="root"))
self.assertEqual(dut.GetPath(package_name="root"), tmpdir)
dut.AddPackageXml(filename=FindResourceOrThrow(
"drake/multibody/parsing/test/box_package/package.xml"))
# Simple coverage test for Drake paths.
dut.PopulateUpstreamToDrake(model_file=model)
self.assertGreater(dut.size(), 1)
# Simple coverage test for folder and environment.
dut.PopulateFromEnvironment(environment_variable='TEST_TMPDIR')
dut.PopulateFromFolder(path=tmpdir)
def test_parser_file(self):
"""Calls every combination of arguments for the Parser methods which
use a file_name (not contents) and inspects their return type.
"""
sdf_file = FindResourceOrThrow(
"drake/multibody/benchmarks/acrobot/acrobot.sdf")
urdf_file = FindResourceOrThrow(
"drake/multibody/benchmarks/acrobot/acrobot.urdf")
for dut, file_name, model_name, result_dim in (
(Parser.AddModelFromFile, sdf_file, None, int),
(Parser.AddModelFromFile, sdf_file, "", int),
(Parser.AddModelFromFile, sdf_file, "a", int),
(Parser.AddModelFromFile, urdf_file, None, int),
(Parser.AddModelFromFile, urdf_file, "", int),
(Parser.AddModelFromFile, urdf_file, "a", int),
(Parser.AddAllModelsFromFile, sdf_file, None, list),
(Parser.AddAllModelsFromFile, urdf_file, None, list),
):
plant = MultibodyPlant(time_step=0.01)
parser = Parser(plant=plant)
if model_name is None:
result = dut(parser, file_name=file_name)
else:
result = dut(parser, file_name=file_name,
model_name=model_name)
if result_dim is int:
self.assertIsInstance(result, ModelInstanceIndex)
else:
assert result_dim is list
self.assertIsInstance(result, list)
self.assertIsInstance(result[0], ModelInstanceIndex)
def test_parser_string(self):
"""Checks parsing from a string (not file_name)."""
sdf_file = FindResourceOrThrow(
"drake/multibody/benchmarks/acrobot/acrobot.sdf")
with open(sdf_file, "r") as f:
sdf_contents = f.read()
plant = MultibodyPlant(time_step=0.01)
parser = Parser(plant=plant)
result = parser.AddModelFromString(
file_contents=sdf_contents, file_type="sdf")
self.assertIsInstance(result, ModelInstanceIndex)
def test_model_directives(self):
model_dir = os.path.dirname(FindResourceOrThrow(
"drake/multibody/parsing/test/"
"process_model_directives_test/package.xml"))
plant = MultibodyPlant(time_step=0.01)
parser = Parser(plant=plant)
parser.package_map().PopulateFromFolder(model_dir)
directives_file = model_dir + "/add_scoped_top.yaml"
directives = LoadModelDirectives(directives_file)
added_models = ProcessModelDirectives(
directives=directives, plant=plant, parser=parser)
# Check for an instance.
model_names = [model.model_name for model in added_models]
self.assertIn("extra_model", model_names)
plant.GetModelInstanceByName("extra_model")
# Test that other bound symbols exist.
ModelInstanceInfo.model_name
ModelInstanceInfo.model_path
ModelInstanceInfo.parent_frame_name
ModelInstanceInfo.child_frame_name
ModelInstanceInfo.X_PC
ModelInstanceInfo.model_instance
AddFrame.name
AddFrame.X_PF
frame = GetScopedFrameByName(plant, "world")
self.assertIsNotNone(GetScopedFrameName(plant, frame))
def test_model_directives_doc(self):
"""Check that the warning note in the docstring was added."""
self.assertIn("Note:\n", ProcessModelDirectives.__doc__)
|
py
|
1a594668e0ff7fc15170d96e1a7d2155af981067
|
import signal
import socket
import subprocess
import time
from eth_utils import (
to_text,
)
import requests
def wait_for_socket(ipc_path, timeout=60):
start = time.time()
while time.time() < start + timeout:
try:
sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
sock.connect(ipc_path)
sock.settimeout(timeout)
except (FileNotFoundError, socket.error):
time.sleep(0.01)
else:
break
def wait_for_http(endpoint_uri, timeout=60):
start = time.time()
while time.time() < start + timeout:
try:
requests.get(endpoint_uri)
except requests.ConnectionError:
time.sleep(0.01)
else:
break
def get_process(command_list, terminates=False):
proc = subprocess.Popen(
command_list,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
)
if terminates:
wait_for_popen(proc, 30)
try:
yield proc
finally:
kill_proc_gracefully(proc)
output, errors = proc.communicate()
print(
"Parity Process Exited:\n"
"stdout:{0}\n\n"
"stderr:{1}\n\n".format(
to_text(output),
to_text(errors),
)
)
def wait_for_popen(proc, timeout):
start = time.time()
while time.time() < start + timeout:
if proc.poll() is None:
time.sleep(0.01)
else:
break
def kill_proc_gracefully(proc):
if proc.poll() is None:
proc.send_signal(signal.SIGINT)
wait_for_popen(proc, 13)
if proc.poll() is None:
proc.terminate()
wait_for_popen(proc, 5)
if proc.poll() is None:
proc.kill()
wait_for_popen(proc, 2)
|
py
|
1a5946cfaf5ae64249bd5822ebd69b4fa630f6ff
|
import math
import statistics as stats
import numpy as np
from Modulos.Utils import Truncate
def FuncionAcumuladaExponencial(x, valor_lambda):
'''La función devuelve el valor de la función acumulada para la distribución exponencial valuada en X
Parámetros: x: variable a valuar la función
valor_lambda: valor del lambda calculado para la serie'''
return 1-math.exp(-valor_lambda*x)
def FuncionDensidadNormal(x, media, desviacion_estandar):
'''La función devuelve el valor de la función de densidad para la distribución normal valuada en X
Parámetros: x: variable a valuar la función
media: media calculada para la serie
desviacion_estandar: desviación estandar calculada para la serie'''
return (math.exp(-0.5*((x-media)/desviacion_estandar)**2))/(desviacion_estandar*math.sqrt(2*math.pi))
def FuncionAcumuladaUniforme(a, b, x):
'''La función devuelve el valor de la función acumulada para la distribución uniforme valuada en X
Parámetros: x: variable a valuar la función
a: extremo superior
b: extremo inferior'''
return (x-a)/(b-a)
def ProbabilidadAcumuladaExponencial(desde, hasta, valor_lambda):
'''La función devuelve el valor de la probabilidad acumulada para la distribución Exponencial
Parámetros: desde: valor inicial del intervalo
hasta: valor final del intervalo
valor_lambda: valor del lambda calculado para la serie'''
return FuncionAcumuladaExponencial(hasta, valor_lambda) - FuncionAcumuladaExponencial(desde, valor_lambda)
def ProbabilidadAcumuladaUniforme(desde, hasta, a, b):
'''La función devuelve el valor de la probabilidad acumulada para la distribución uniforme
Parámetros: desde: valor inicial del intervalo
hasta: valor final del intervalo
a: extremo superior
b: extremo inferior'''
return FuncionAcumuladaUniforme(a, b, hasta) - FuncionAcumuladaUniforme(a, b, desde)
def FrecuenciasEsperadas(tamaño_muestra, intervalos, tipoDistribucion, media, desviacion_estandar, a, b):
'''La función calcula la frecuencia esperada para cada intervalo según el tipo de distribución elegida
Parámetros: tamaño_muestra: entero que representa la cantidad de elementos de la serie
intervalos: intervalos Dict<str, extremos> Diccionario que utiliza como clave la representacion del intervalo
tipoDistribucion: entero que representa el tipo de distribución elegida como hipótesis nula (0=uniforme, 1=exponencial, 2=normal)
media: media calculada para la serie
desviacion_estandar: desviacion estandar calculada para la serie
a: valor minimo de la serie
b: valor maximo de la serie
Return: lista con frecuencias esperadas'''
frec_esp_arr = []
if tipoDistribucion == 1:
valor_lambda = Truncate(1/media, 7)
for i in intervalos:
intervalo = intervalos[i]
desde, hasta = intervalo[0], intervalo[1]
if tipoDistribucion == 0:
prob = ProbabilidadAcumuladaUniforme(desde, hasta, a, b)
frec_esp = round(prob*tamaño_muestra)
elif tipoDistribucion == 1:
prob = ProbabilidadAcumuladaExponencial(desde, hasta, valor_lambda)
frec_esp = Truncate(prob*tamaño_muestra, 4)
elif tipoDistribucion == 2:
marca_clase = (desde+hasta)/2
prob = FuncionDensidadNormal(marca_clase, media, desviacion_estandar) * (hasta-desde)
frec_esp = Truncate(prob*tamaño_muestra, 4)
frec_esp_arr.append(frec_esp)
return frec_esp_arr
def testFrecuenciasEsperadasExponencial():
arr = [0.10, 0.25, 1.53, 2.83, 3.50, 4.14, 5.65, 6.96, 7.19, 8.25,1.20, 5.24, 4.75, 3.96, 2.21, 3.15, 2.53, 1.16, 0.32, 0.90, 0.87, 1.34, 1.87, 2.91, 0.71, 1.69, 0.69, 0.55, 0.43, 0.26]
intervalos = {'0 - 1': [0, 1], '1 - 2': [1, 2], '2 - 3': [2, 3], '3 - 4': [3, 4], '4 - 5': [4, 5], '5 - 6': [5, 6], '6 - 7': [6, 7], '7 - 8': [7, 8], '8 - 9': [8, 9], '9 - 10': [9, 10]}
tipoDistribucion = 1
tamaño_muestra = 30
a, b = min(arr), max(arr)
media = stats.mean(arr)
print('Media:', media)
desviacion_estandar = np.std(arr, ddof=1)
frec_esp = FrecuenciasEsperadas(tamaño_muestra, intervalos, tipoDistribucion, media, desviacion_estandar, a, b)
print(frec_esp)
def testFrecuenciasEsperadasNormal():
arr = [1.56, 2.21, 3.15, 4.61, 4.18, 5.20, 6.94, 7.71, 5.15, 6.76, 7.28, 4.23, 3.21, 2.75, 4.69, 5.86, 6.25, 4.27, 4.91, 4.78, 2.46, 3.97, 5.71, 6.19, 4.20, 3.48, 5.83, 6.36, 5.90, 5.43]
intervalos = {'0 - 1': [0, 1], '1 - 2': [1, 2], '2 - 3': [2, 3], '3 - 4': [3, 4], '4 - 5': [4, 5], '5 - 6': [5, 6], '6 - 7': [6, 7], '7 - 8': [7, 8], '8 - 9': [8, 9], '9 - 10': [9, 10]}
media = stats.mean(arr)
print('Media:', media)
desviacion_estandar = np.std(arr, ddof=1)
print('Desv estandar: ', desviacion_estandar)
tipoDistribucion = 2
tamaño_muestra = 30
a, b = min(arr), max(arr)
frec_esp = FrecuenciasEsperadas(tamaño_muestra, intervalos, tipoDistribucion, media, desviacion_estandar, a, b)
print(frec_esp)
def testFrecuenciasEsperadasUniforme():
arr = [0.15, 0.22, 0.41, 0.65, 0.84, 0.81, 0.62, 0.45, 0.32, 0.07, 0.11, 0.29, 0.58, 0.73, 0.93, 0.97, 0.79, 0.55, 0.35, 0.09, 0.99, 0.51, 0.35, 0.02, 0.19, 0.24, 0.98, 0.10, 0.31, 0.17]
intervalos = {'0.0 - 0.2': [0.0, 0.2], '0.2 - 0.4': [0.2, 0.4], '0.4 - 0.6': [0.4, 0.6], '0.6 - 0.8': [0.6, 0.8], '0.8 - 1.0': [0.8, 1.0]}
media = stats.mean(arr)
desviacion_estandar = np.std(arr, ddof=1)
tipoDistribucion = 0
tamaño_muestra = 30
a, b = min(arr), max(arr)
print(a,b)
frec_esp = FrecuenciasEsperadas(tamaño_muestra, intervalos, tipoDistribucion, media, desviacion_estandar, a, b)
print(frec_esp)
|
py
|
1a5946da31b5ba95584d7d6e2132c62f32cb7260
|
#pragma repy
"""
Files should not be hashable...
"""
if callfunc == "initialize":
myfileobj = file('junk_test.out','w')
try:
mydict = {}
try:
mydict[myfileobj] = 7
except AttributeError:
# I should get an exception here...
pass
else:
print 'files are hashable!'
finally:
myfileobj.close()
removefile('junk_test.out')
|
py
|
1a59471271d98ef6e5015fba843f145b2ac4893b
|
# From the OpenCV library import imread function only.
from cv2 import imread
# Reading the image using imread() function
image = imread("../0_assets/road.jpg")
# Extracting RGB values.
# Here we have randomly choose a pixel.
# The [100, 100] represents pixel position in X and Y.
# Keep note that you have to be in bounds with respect to image size.
(B, G, R) = image[100, 100]
# Display the pixel values
print("R = {}, G = {}, B = {}".format(R, G, B))
# We can also specify color channel to extract on a pixel.
# The color channel parameter is on the 3rd parameter of image.
# ! Which I guess specifies B color of the pixel (???)
B = image[100, 100, 0]
print("B = {}".format(B))
|
py
|
1a59477b39e10e1a0a4e33a5c9745ef9b0cb9cdd
|
import os
import numpy as np
from scipy import fftpack
import matplotlib.pyplot as plt
from astropy.io import fits
from astropy.wcs import WCS
from astropy.stats import SigmaClip
from astropy.stats import gaussian_fwhm_to_sigma
from astropy.convolution import Gaussian2DKernel
from astropy.coordinates import SkyCoord
from astropy.wcs.utils import skycoord_to_pixel
from astropy.wcs.utils import proj_plane_pixel_scales
from astropy import units as u
from photutils import source_properties
from photutils import detect_sources
from photutils import Background2D, MedianBackground
import huntsman_dust.util_plot as util_plot
def image_load(image_path):
"""Returns image, header and wcs objects.
Args:
image_path(str, required): Image path to particular FITs. File
Returns:
image(array): This is the image data
header(table): This is the header object
wcs: World Coordinte System object
"""
hdulist = fits.open(image_path)
image = hdulist[0].data
header = hdulist[0].header
wcs = WCS(header)
return image, header, wcs
def background_2D(image,
sigma,
iters,
box_size,
filter_size,
plt_grid):
"""2D background estimation.
This function creates a 2D background estimate by dividing the image into
a grid, defined by box_size.
Args:
image(array, required): This is the image data
sigma(float, required): Sigma level
iters(int, required): Number of iterations
box_size(int, required): Defines the box dimesions, in pixels
filter_size(int, required): Defines the filter reach in pixels
plt_grid(boolean): Overplot grid on image
Returns:
bkg(array): 2D background level
bkgrms(array): RMS background
"""
sigma_clip = SigmaClip(sigma=sigma,
iters=iters)
mask = (image == 0)
bkg_estimator = MedianBackground()
bkg = Background2D(image,
box_size=box_size,
filter_size=filter_size,
sigma_clip=sigma_clip,
bkg_estimator=bkg_estimator,
mask=mask,
edge_method=u'pad')
# print('Background Median: ' + str(bkg.background_median))
# print('Background RMS median: ' + str(bkg.background_rms_median))
if plt_grid is True:
plt.imshow(bkg.background,
origin='lower',
cmap='Greys')
bkg.plot_meshes(outlines=True,
color='#1f77b4')
bkgrms = bkg.background_rms
return bkg, bkgrms
def find_objects(image,
threshold,
FWHM,
npixels):
"""Find sources in image by a segmentation process.
This function detects sources a given sigma above a threshold,
only if it has more that npixels that are interconnected.
Args:
image(array, required): This is the image data
threshold(array, required): This is the threshold above which
detection occurs
FWHM(int, required): Full Width Half Maximum of 2D circular
gaussian kernel used to filter the
image prior to thresholding. Input is
in terms of pixels.
npixels(int, required): The minimum number of pixels to define
a sources
Returns:
segm: The segmentation image
"""
sigma = FWHM * gaussian_fwhm_to_sigma
kernel = Gaussian2DKernel(sigma,
x_size=3,
y_size=3)
kernel.normalize()
segm = detect_sources(image,
threshold,
npixels=npixels,
filter_kernel=kernel)
return segm
def ds9_region(image_path,
image,
segm,
wcs,
ds9_region):
""""Creates ds9 region file.
This function creates a ds9 region file to display the sources
detected by the segmentation function. This file is written to
the same directory the fits files are in.
Args:
image_path(str, required): Image path to particular FITs. File
image(array, required): This is the image data
segm: The segmentation image
wcs: World Coordinte System object
ds9_region(boolean, opt): If true, creates region file
"""
if ds9_region is True:
data_path = os.path.splitext(image_path)
region_path = str(data_path[0]) + '_ds9region'
scale = proj_plane_pixel_scales(wcs)
image_scale = scale[0]
reg = source_properties(image, segm, wcs=wcs)
with open(region_path+'.reg', 'w') as f:
f.write('# Region file format: DS9 version 7.6\n\n')
f.write('global color=#ff7733\n')
f.write('global width=2\n')
f.write('fk5\n\n')
for i in range(0, len(reg.id)):
x = reg[i].sky_centroid_icrs.ra.to(u.deg)
y = reg[i].sky_centroid_icrs.dec
r = image_scale*reg[i].equivalent_radius
f.write('circle('+str(x.value)+','+str(y.value)+',' +
str(r.value)+')'+' # Source Number:' +
str(reg[i].id)+'\n')
def mask_galaxy(image,
wcs,
Ra,
Dec,
name,
radius):
"""Masks galaxy at Ra, Dec within a radius given in arcminutes
Creates a circular mask centered at a given Ra, Dec. The radius
is given in arcmins. The wcs object is used to convert these inputs
to pixel locations. A pixel scale is also determined. If the object
name is suppled, SESAME is used to find object center. If no active
internet connection is available, center location must be manually
entered, in degrees. If no center coordinates are supplied, (0, 0)
is the default center.
Args:
image(array, required): Image data
wcs: World Coordinte System object
name(str, optional): Name of galaxy or object
Ra(str): Right Ascention
Dec(str): Declination
Radius(float, required): Radius to be masked, in arcminutes
Returns:
masked_img(array): Image which has been masked
mask(boolean array): Mask of the given object"""
# Radius must be given in arcminutes
# Dimentions of the image
dim = (image.shape)
y, x = dim[0], dim[1]
# Finds the center of an object by inputting its name into SESAME
# This requires an active internet connection
# a, b are the coordinates of the center given in pixels
try:
center = SkyCoord.from_name(name)
except Exception:
print("No active internet connection. Manually enter Ra, Dec.")
Ra = Ra
Dec = Dec
center = SkyCoord(Ra, Dec, unit="deg")
c_pix = skycoord_to_pixel(center, wcs)
a, b = c_pix[0], c_pix[1]
print(center)
radius = radius*u.arcmin
# Finds pixel scale using WSC object. The default units can be found by
# unit = header['CUNIT1'], they are degrees by convention
# degrees are converted to arcmins and radius in computed in pixels
scale = proj_plane_pixel_scales(wcs)
pix_scale = scale[0]*u.deg.to(u.arcmin)
print('Image Scale: ' + str(pix_scale)+' arcmin/pix')
rad_pix = (radius/pix_scale).value
# Indexes each pixel and checks if its is >= radius from center
Y, X = np.ogrid[:y, :x]
dist_from_center = np.sqrt((X - a)**2 + (Y - b)**2)
mask = dist_from_center <= rad_pix
return mask
def plt_fits(image,
wcs,
figure,
title,
cmap,
norm):
"""Plots FITs images with axis given in Ra, Dec.
Args:
image(array): Image data
wcs: World Coordinte System object
figure(optional): Figure Number
title(str, optional): Title of the figure
cmap(str, optiona): Color map
norm: Image normalizatuion
"""
util_plot.util_plot()
fig = plt.figure(num=figure)
ax = fig.add_subplot(1, 1, 1, projection=wcs)
ax.imshow(image, origin='lower', cmap=cmap, norm=norm)
ax.coords[0].set_axislabel('RA')
ax.coords[1].set_axislabel('DEC')
ax.set_title(title)
def plt_image(image,
figure,
title,
xlabel,
ylabel,
cmap,
norm):
"""Plots FITs images with axis given in Ra, Dec.
Args:
image(array): Image data
wcs: World Coordinte System object
figure(optional): Figure Number
title(str, optional): Title of the figure
cmap(str, optiona): Color map
norm: Image normalizatuion
"""
util_plot.util_plot()
plt.figure(num=figure)
plt.imshow(image, origin='lower', cmap=cmap, norm=norm)
plt.xlabel(xlabel)
plt.ylabel(ylabel)
plt.title(title)
def fits_write(image, header, img_path, name=None):
"""Writes an 2D data array to a fits file.
Writes a 2D array to a fits file in the same directory as the oringinal
image. It appends the image header to this new fits file.
Args:
image(array): The image data to be written to a fits file
header(hdu.header): The header information to be appended
img_path(str): Path to source file
name(str): Name of new fits file. Ex: mask.fits
"""
hdu = fits.PrimaryHDU()
hdu.data = image.astype(float)
hdu.header = header
data_path, file = os.path.split(img_path)
file_path = os.path.join(data_path, name + "."+'fits')
hdu.writeto(file_path, overwrite=True)
def azimuthalAverage(image, center=None):
"""
Calculate the azimuthally averaged radial profile.
image - The 2D image
center - The [x,y] pixel coordinates used as the center. The default is
None, which then uses the center of the image (including
fracitonal pixels).
Contributed by Jessica R. Lu
"""
# Calculate the indices from the image
y, x = np.indices(image.shape)
if not center:
center = np.array([(y.max()-y.min())/2.0, (x.max()-x.min())/2.0])
r = np.hypot(x - center[1], y - center[0])
# Get sorted radii
ind = np.argsort(r.flat)
r_sorted = r.flat[ind]
i_sorted = image.flat[ind]
# Get the integer part of the radii (bin size = 1)
r_int = r_sorted.astype(int)
# Find all pixels that fall within each radial bin.
deltar = r_int[1:] - r_int[:-1] # Assumes all radii represented
rind = np.where(deltar)[0] # location of changed radius
nr = rind[1:] - rind[:-1] # number of radius bin
# Cumulative sum to figure out sums for each radius bin
csim = np.cumsum(i_sorted, dtype=float)
tbin = csim[rind[1:]] - csim[rind[:-1]]
radial_prof = tbin / nr
return radial_prof
def p_spec(image):
"""Performs 2D FFT on image and averages radially."""
image = image.astype(float)
psd2D = np.abs(fftpack.fftshift(fftpack.fft2(image)))**2
psd1D = azimuthalAverage(psd2D)
return psd1D
|
py
|
1a5947e15e046e7b8bb6854cbd831a61d361d5e7
|
from .bbox_2d import BBox2
from .line_2d import Segment2, Line2, Ray2
|
py
|
1a5949096ba27aaa9ff5df50a68abe98ceaf860c
|
import typing
from abc import ABC, abstractmethod
from .utils import image as image_utils
class CameraBase(ABC):
@abstractmethod
def read(self) -> typing.Tuple[bool, typing.Any]:
...
@abstractmethod
def release(self) -> None:
...
class CvCamera(CameraBase):
def __init__(self, width: int, cam_id=0):
try:
import cv2
except ModuleNotFoundError:
raise ModuleNotFoundError(
"OpenCV could not be found. Please see instructions on how to configure your system."
)
self.__camera = cv2.VideoCapture(cam_id)
self.__width = width
def read(self):
ok, frame = self.__camera.read()
if not ok:
return ok, None
return ok, image_utils.resize(frame, self.__width)
def release(self):
self.__camera.release()
|
py
|
1a59498d33a33e427139b55c51dfc539b1452257
|
# SPDX-License-Identifier: Apache-2.0
#
# The OpenSearch Contributors require contributions made to
# this file be licensed under the Apache-2.0 license or a
# compatible open source license.
import unittest
from unittest.mock import MagicMock, patch
from git.git_repository import GitRepository
from manifests_workflow.component_opensearch_dashboards_min import ComponentOpenSearchDashboardsMin
from system.config_file import ConfigFile
class TestComponentOpenSearchDashboardsMin(unittest.TestCase):
@patch("subprocess.check_output")
def test_branches(self, mock: MagicMock) -> None:
mock.return_value = "\n".join(["main", "1.x", "1.21", "20.1", "something", "else"]).encode()
self.assertEqual(ComponentOpenSearchDashboardsMin.branches(), ["main", "1.x", "1.21", "20.1"])
mock.assert_called_with(
"git ls-remote https://github.com/opensearch-project/OpenSearch-Dashboards.git refs/heads/* | cut -f2 | cut -d/ -f3",
shell=True,
)
@patch("os.makedirs")
@patch.object(GitRepository, "__checkout__")
def test_checkout(self, *mocks: MagicMock) -> None:
component = ComponentOpenSearchDashboardsMin.checkout("path")
self.assertEqual(component.name, "OpenSearch-Dashboards")
self.assertFalse(component.snapshot)
@patch.object(ConfigFile, "from_file")
def test_version(self, mock_config: MagicMock) -> None:
mock_config.return_value = ConfigFile('{"version":"2.1"}')
component = ComponentOpenSearchDashboardsMin(MagicMock(working_directory="path"))
self.assertEqual(component.version, "2.1")
@patch.object(ConfigFile, "from_file")
def test_properties(self, mock_config: MagicMock) -> None:
mock_config.return_value = ConfigFile('{"version":"2.1"}')
component = ComponentOpenSearchDashboardsMin(MagicMock(working_directory="path"))
self.assertEqual(component.properties.get_value("version"), "2.1")
@patch.object(ConfigFile, "from_file")
def test_to_dict(self, mock_config: MagicMock) -> None:
mock_config.return_value = ConfigFile('{"version":"2.1"}')
repo = MagicMock(ref="ref", url="repo")
component = ComponentOpenSearchDashboardsMin(repo)
self.assertEqual(
component.to_dict(),
{"name": "OpenSearch-Dashboards", "ref": "ref", "repository": "repo"},
)
|
py
|
1a5949d081d97a1ba2aea8ea36b17d1c9f9dc2c0
|
from myClasses import *
import openpyxl
from openpyxl import load_workbook
import mdToArray
import os
import shutil
from openpyxl.styles import Font
class ArrayToExcel:
def __init__(self):
self.book = None
self.books=[]
self.templateBookPath=None
self.templateSheetName = None
self.startRow = None
self.startCol = None
self.baseFont=None
# self.templateWb=None
return
def reset(self):
self.book=None
self.books=[]
def setBook(self, book: Book):
self.book = book
def addBook(self,book:Book):
self.books.append(book)
def readTemplate(self, path, templateSheetName: str, startRow: int, startCol: int):
# self.wb = load_workbook(filename=path, keep_vba=True)
self.templateBookPath=path
self.templateSheetName = templateSheetName
self.startCol = startCol
self.startRow = startRow
# cant't copy sheet from book A to book B.
# self.templateWb=load_workbook(filename=self.templateBookPath, keep_vba=True,read_only=False)
def generateBook(self,outputPath:str,font:str,size):
outputDir=outputPath[0:outputPath.rfind("\\")]
if not os.path.exists(outputPath):
if not os.path.exists(outputDir):
os.makedirs(outputDir)
shutil.copy(self.templateBookPath, outputPath)
# wb=load_workbook(filename=self.templateBookPath, keep_vba=True,read_only=False)
wb=load_workbook(outputPath,keep_vba=True,read_only=False)
for sheet in self.book.sheets: # type:Sheet
print(sheet.sheetName)
print(sheet.data)
#delete before created sheet
if sheet.sheetName in wb.sheetnames:
std=wb.get_sheet_by_name(sheet.sheetName)
wb.remove_sheet(std)
ws=None
if self.templateSheetName in wb.sheetnames:
ws = wb.copy_worksheet(
wb.get_sheet_by_name(self.templateSheetName))
else:
ws=wb.create_sheet()
ws.title = sheet.sheetName
# rootFont=ws.cell(self.startRow+1, self.startCol+1).font
# self.baseFont=Font(name=rootFont.name,sz=rootFont.sz)
self.baseFont=Font(name=font,size=size)
for r, row in enumerate(sheet.data):
for c, column in enumerate(row):
if column != "":
self.__setVal(ws, r+1, c+1, column)
std=wb.get_sheet_by_name('template')
wb.remove_sheet(std)
wb.save(outputPath)
wb.close()
return
def generateBooks(self,outputPath:str,font:str,size):
outputDir=outputPath[0:outputPath.rfind("\\")]
if not os.path.exists(outputPath):
if not os.path.exists(outputDir):
os.makedirs(outputDir)
shutil.copy(self.templateBookPath, outputPath)
# wb=load_workbook(filename=self.templateBookPath, keep_vba=True,read_only=False)
wb=load_workbook(outputPath,keep_vba=True,read_only=False)
for book in self.books:
for sheet in book.sheets: # type:Sheet
print(sheet.sheetName)
print(sheet.data)
#delete before created sheet
if sheet.sheetName in wb.sheetnames:
std=wb.get_sheet_by_name(sheet.sheetName)
wb.remove_sheet(std)
ws=None
if self.templateSheetName in wb.sheetnames:
ws = wb.copy_worksheet(
wb.get_sheet_by_name(self.templateSheetName))
else:
ws=wb.create_sheet()
ws.title = sheet.sheetName
# rootFont=ws.cell(self.startRow+1, self.startCol+1).font
# self.baseFont=Font(name=rootFont.name,sz=rootFont.sz)
self.baseFont=Font(name=font,size=size)
for r, row in enumerate(sheet.data):
for c, column in enumerate(row):
if column != "":
self.__setVal(ws, r+1, c+1, column)
wb.save(outputPath)
wb.close()
return
def __setVal(self, ws: openpyxl.worksheet, row, col, val):
cell=ws.cell(row=row+self.startRow, column=col+self.startCol)
cell.font=self.baseFont
cell.value=val
return
if __name__ == "__main__":
# mte = mdToArray.MdToArray()
# mte.read("mdDocs/sample.md")
# mte.compile()
import pickle
# with open("book.pickle","wb")as f:
# pickle.dump(mte.book, f)
with open("book.pickle", "rb")as f:
book = pickle.load(f)
ate = ArrayToExcel()
ate.setBook(book=book)
ate.readTemplate("format.xlsm", "template", 3, 3)
ate.generateBook()
|
py
|
1a594c1e6780dbde94d7a0c753562aab36ee3926
|
#!/usr/bin/env python
# encoding: utf-8
import logging
from modules.payload_builder import PayloadBuilder
import shutil
import os
from modules.obfuscate_names import ObfuscateNames
from modules.obfuscate_form import ObfuscateForm
from modules.obfuscate_strings import ObfuscateStrings
from modules.uac_bypass import UACBypass
class VBAGenerator(PayloadBuilder):
""" Module used to generate VBA file from working dir content"""
def transformAndObfuscate(self):
"""
Call this method to apply transformation and obfuscation on the content of temp directory
This method does obfuscation for all VBA and VBA like types
"""
# Enable UAC bypass
if self.mpSession.uacBypass:
uacBypasser = UACBypass(self.mpSession)
uacBypasser.run()
# Macro obfuscation
if self.mpSession.obfuscateNames:
obfuscator = ObfuscateNames(self.mpSession)
obfuscator.run()
# Mask strings
if self.mpSession.obfuscateStrings:
obfuscator = ObfuscateStrings(self.mpSession)
obfuscator.run()
# Macro obfuscation
if self.mpSession.obfuscateForm:
obfuscator = ObfuscateForm(self.mpSession)
obfuscator.run()
def check(self):
return True
def printFile(self):
""" Display generated code on stdout """
logging.info(" [+] Generated VB code:\n")
if len(self.getVBAFiles())==1:
vbaFile = self.getMainVBAFile()
with open(vbaFile,'r') as f:
print(f.read())
else:
logging.info(" [!] More then one VB file generated")
for vbaFile in self.getVBAFiles():
with open(vbaFile,'r') as f:
print(" ======================= %s ======================== " % vbaFile)
print(f.read())
def generate(self):
if len(self.getVBAFiles())>0:
logging.info(" [+] Analyzing generated VBA files...")
if len(self.getVBAFiles())==1:
shutil.copy2(self.getMainVBAFile(), self.outputFilePath)
logging.info(" [-] Generated VBA file: %s" % self.outputFilePath)
else:
logging.info(" [!] More then one VBA file generated, files will be copied in same dir as %s" % self.outputFilePath)
for vbaFile in self.getVBAFiles():
if vbaFile != self.getMainVBAFile():
shutil.copy2(vbaFile, os.path.join(os.path.dirname(self.outputFilePath),os.path.basename(vbaFile)))
logging.info(" [-] Generated VBA file: %s" % os.path.join(os.path.dirname(self.outputFilePath),os.path.basename(vbaFile)))
else:
shutil.copy2(self.getMainVBAFile(), self.outputFilePath)
logging.info(" [-] Generated VBA file: %s" % self.outputFilePath)
def getAutoOpenVbaFunction(self):
return "AutoOpen"
def resetVBAEntryPoint(self):
"""
If macro has an autoopen like mechanism, this will replace the entry_point with what is given in newEntrPoin param
Ex for Excel it will replace "Sub AutoOpen ()" with "Sub Workbook_Open ()"
"""
mainFile = self.getMainVBAFile()
if mainFile != "" and self.startFunction is not None:
if self.startFunction != self.getAutoOpenVbaFunction():
logging.info(" [-] Changing auto open function from %s to %s..." % (self.startFunction, self.getAutoOpenVbaFunction()))
#1 Replace line in VBA
f = open(mainFile)
content = f.readlines()
f.close()
for n,line in enumerate(content):
if line.find(" " + self.startFunction) != -1:
#logging.info(" -> %s becomes %s" %(content[n], self.getAutoOpenVbaSignature()))
content[n] = self.getAutoOpenVbaSignature() + "\n"
f = open(mainFile, 'w')
f.writelines(content)
f.close()
# 2 Change cure module start function
self._startFunction = self.getAutoOpenVbaFunction()
|
py
|
1a594c7c796daa5d05b3114ca225e59b0456297a
|
import salabim as sim
left = -1
right = +1
def sidename(side):
return "l" if side == left else "r"
def shortname(ship):
s = ""
for c in ship.name():
if c != ".":
s = s + c
return s
def shipcolor(side):
if side == left:
return "blue"
else:
return "red"
def ship_polygon(ship):
return (ship.side * (ship.length - 2), 0, ship.side * 3, 0, ship.side * 2, 3, ship.side * (ship.length - 2), 3)
def lock_water_rectangle(t):
if lock.mode() == "Switch":
y = sim.interpolate(t, lock.mode_time(), lock.scheduled_time(), ylevel[lock.side], ylevel[-lock.side])
else:
y = ylevel[lock.side]
return (xdoor[left], -waterdepth, xdoor[right], y)
def lock_door_left_rectangle(t):
if lock.mode() == "Switch" or lock.side == right:
y = ylevel[right] + 2
else:
y = ylevel[left] - waterdepth
return (xdoor[left] - 1, -waterdepth, xdoor[left] + 1, y)
def lock_door_right_rectangle(t):
if lock.mode() == "Switch" or lock.side == left:
y = ylevel[right] + 2
else:
y = ylevel[right] - waterdepth
return (xdoor[right] - 1, -waterdepth, xdoor[right] + 1, y)
def animation_pre_tick(self, t):
if lock.mode() == "Switch":
y = sim.interpolate(t, lock.mode_time(), lock.scheduled_time(), ylevel[lock.side], ylevel[-lock.side])
else:
y = ylevel[lock.side]
lockqueue.animate(x=xdoor[-lock.side], y=y, direction="w" if lock.side == left else "e")
def do_animation():
global ylevel, xdoor, waterdepth
lockheight = 5
waterdepth = 2
ylevel = {left: 0, right: lockheight}
xdoor = {left: -0.5 * locklength, right: 0.5 * locklength}
xbound = {left: -1.2 * locklength, right: 1.2 * locklength}
sim.Environment.animation_pre_tick = animation_pre_tick
env.animation_parameters(
x0=xbound[left], y0=-waterdepth, x1=xbound[right], modelname="Lock", speed=8, background_color="20%gray"
)
for side in [left, right]:
wait[side].animate(x=xdoor[side], y=10 + ylevel[side], direction="n")
sim.Animate(rectangle0=(xbound[left], ylevel[left] - waterdepth, xdoor[left], ylevel[left]), fillcolor0="aqua")
sim.Animate(rectangle0=(xdoor[right], ylevel[right] - waterdepth, xbound[right], ylevel[right]), fillcolor0="aqua")
a = sim.Animate(rectangle0=(0, 0, 0, 0), fillcolor0="aqua")
a.rectangle = lock_water_rectangle
a = sim.Animate(rectangle0=(0, 0, 0, 0))
a.rectangle = lock_door_left_rectangle
a = sim.Animate(rectangle0=(0, 0, 0, 0))
a.rectangle = lock_door_right_rectangle
a = sim.Animate(text="", x0=10, y0=650, screen_coordinates=True, fontsize0=15, font="narrow", anchor="w")
a.text = lambda t: "mean waiting left : {:5.1f} (n={})".format(
wait[left].length_of_stay.mean(), wait[left].length_of_stay.number_of_entries()
)
a = sim.Animate(text="", x0=10, y0=630, screen_coordinates=True, fontsize0=15, font="narrow", anchor="w")
a.text = lambda t: "mean waiting right: {:5.1f} (n={})".format(
wait[right].length_of_stay.mean(), wait[right].length_of_stay.number_of_entries()
)
a = sim.Animate(text="xx=12.34", x0=10, y0=610, screen_coordinates=True, fontsize0=15, font="narrow", anchor="w")
a.text = lambda t: " nr waiting left : {:3d}".format(wait[left].length())
a = sim.Animate(text="xx=12.34", x0=10, y0=590, screen_coordinates=True, fontsize0=15, font="narrow", anchor="w")
a.text = lambda t: " nr waiting right: {:3d}".format(wait[right].length())
sim.AnimateSlider(
x=520,
y=0,
width=100,
height=20,
vmin=16,
vmax=60,
resolution=4,
v=iat,
label="iat",
action=set_iat,
xy_anchor="nw",
)
sim.AnimateSlider(
x=660,
y=0,
width=100,
height=20,
vmin=10,
vmax=60,
resolution=5,
v=meanlength,
label="mean length",
action=set_meanlength,
xy_anchor="nw",
)
def set_iat(val):
global iat
iat = float(val)
def set_meanlength(val):
global meanlength
meanlength = float(val)
class Shipgenerator(sim.Component):
def process(self):
while True:
yield self.hold(sim.Exponential(iat).sample())
ship = Ship(name=sidename(self.side) + "ship.")
ship.side = self.side
ship.length = meanlength * sim.Uniform(2.0 / 3, 4.0 / 3).sample()
if lock.mode() == "Idle":
lock.activate()
class Ship(sim.Component):
def animation_objects(self, q):
size_x = self.length
size_y = 5
if self.side == left:
anchor = "se"
else:
anchor = "sw"
an1 = sim.Animate(polygon0=ship_polygon(self), fillcolor0=shipcolor(self.side), anchor=anchor, linewidth0=0)
an2 = sim.Animate(
text=shortname(self), textcolor0="white", anchor=anchor, fontsize0=2.4, offsetx0=self.side * 5, offsety0=0.7
)
return (size_x, size_y, an1, an2)
def process(self):
self.enter(wait[self.side])
if lock.ispassive():
lock.activate()
yield self.request((lockmeters[self.side], self.length), key_in[self.side])
self.leave(wait[self.side])
self.enter(lockqueue)
yield self.hold(intime)
self.release(key_in[self.side])
yield self.request(key_out)
self.leave(lockqueue)
yield self.hold(outtime)
self.release(key_out)
class Lock(sim.Component):
def process(self):
yield self.request(key_in[left])
yield self.request(key_in[right])
yield self.request(key_out)
while True:
if len(key_in[self.side].requesters()) == 0:
if len(key_in[-self.side].requesters()) == 0:
yield self.passivate()
self.release(key_in[self.side])
yield self.request((key_in[self.side], 1, 1000))
lockmeters[self.side].release()
yield self.hold(switchtime, mode="Switch")
self.side = -self.side
self.release(key_out)
yield self.request((key_out, 1, 1000), mode=None)
env = sim.Environment()
locklength = 60
switchtime = 10
intime = 2
outtime = 2
meanlength = 30
iat = 30
lockmeters = {}
key_in = {}
wait = {}
lockqueue = sim.Queue("lockqueue")
key_out = sim.Resource(name=" key_out")
for side in (left, right):
wait[side] = sim.Queue(name=sidename(side) + "Wait")
lockmeters[side] = sim.Resource(capacity=locklength, name=sidename(side) + " lock meters", anonymous=True)
key_in[side] = sim.Resource(name=sidename(side) + " key in")
shipgenerator = Shipgenerator(name=sidename(side) + "Shipgenerator")
shipgenerator.side = side
lock = Lock(name="lock")
lock.side = left
do_animation()
env.run()
|
py
|
1a594cc9d666b9f108a821dfd8078eebb21bc23d
|
#!/usr/bin/env python3
from math import exp, pi
import os
import random
import torch
import unittest
import gpytorch
from gpytorch.kernels import RBFKernel, ScaleKernel
from gpytorch.likelihoods import GaussianLikelihood
from gpytorch.means import ConstantMean
from gpytorch.priors import SmoothedBoxPrior
from gpytorch.distributions import MultivariateNormal
from torch.utils.data import TensorDataset, DataLoader
# Simple training data: let's try to learn a sine function,
# but with KISS-GP let's use 100 training examples.
def make_data():
train_x = torch.linspace(0, 1, 1000)
train_y = torch.sin(train_x * (4 * pi)) + torch.randn(train_x.size()) * 0.2
test_x = torch.linspace(0.02, 1, 51)
test_y = torch.sin(test_x * (4 * pi))
return train_x, train_y, test_x, test_y
class GPRegressionModel(gpytorch.models.ApproximateGP):
def __init__(self, grid_size=20, grid_bounds=[(-0.1, 1.1)]):
variational_distribution = gpytorch.variational.CholeskyVariationalDistribution(
num_inducing_points=int(pow(grid_size, len(grid_bounds)))
)
variational_strategy = gpytorch.variational.GridInterpolationVariationalStrategy(
self, grid_size=grid_size, grid_bounds=grid_bounds, variational_distribution=variational_distribution
)
super(GPRegressionModel, self).__init__(variational_strategy)
self.mean_module = ConstantMean(prior=SmoothedBoxPrior(-10, 10))
self.covar_module = ScaleKernel(RBFKernel(lengthscale_prior=SmoothedBoxPrior(exp(-3), exp(6), sigma=0.1)))
def forward(self, x):
mean_x = self.mean_module(x)
covar_x = self.covar_module(x)
return MultivariateNormal(mean_x, covar_x)
class TestKISSGPVariationalRegression(unittest.TestCase):
def setUp(self):
if os.getenv("UNLOCK_SEED") is None or os.getenv("UNLOCK_SEED").lower() == "false":
self.rng_state = torch.get_rng_state()
torch.manual_seed(0)
if torch.cuda.is_available():
torch.cuda.manual_seed_all(0)
random.seed(0)
def tearDown(self):
if hasattr(self, "rng_state"):
torch.set_rng_state(self.rng_state)
def test_kissgp_gp_mean_abs_error(self):
train_x, train_y, test_x, test_y = make_data()
train_dataset = TensorDataset(train_x, train_y)
train_loader = DataLoader(train_dataset, shuffle=True, batch_size=64)
model = GPRegressionModel()
likelihood = GaussianLikelihood()
mll = gpytorch.mlls.VariationalELBO(likelihood, model, num_data=len(train_y))
# We use SGD here, rather than Adam
# Emperically, we find that SGD is better for variational regression
optimizer = torch.optim.Adam([{"params": model.parameters()}, {"params": likelihood.parameters()}], lr=0.01)
# Our loss object
# We're using the VariationalELBO object
mll = gpytorch.mlls.VariationalELBO(likelihood, model, num_data=train_y.size(0))
# The training loop
def train(n_epochs=15):
# We use a Learning rate scheduler from PyTorch to lower the learning rate during optimization
# We're going to drop the learning rate by 1/10 after 3/4 of training
# This helps the model converge to a minimum
scheduler = torch.optim.lr_scheduler.MultiStepLR(optimizer, milestones=[0.75 * n_epochs], gamma=0.1)
for _ in range(n_epochs):
scheduler.step()
for x_batch, y_batch in train_loader:
x_batch = x_batch.float()
y_batch = y_batch.float()
optimizer.zero_grad()
output = model(x_batch)
loss = -mll(output, y_batch)
loss.backward()
optimizer.step()
train()
for _, param in model.named_parameters():
self.assertTrue(param.grad is not None)
self.assertGreater(param.grad.norm().item(), 0)
for param in likelihood.parameters():
self.assertTrue(param.grad is not None)
self.assertGreater(param.grad.norm().item(), 0)
# Test the model
model.eval()
likelihood.eval()
test_preds = likelihood(model(test_x)).mean
mean_abs_error = torch.mean(torch.abs(test_y - test_preds))
self.assertLess(mean_abs_error.squeeze().item(), 0.1)
if __name__ == "__main__":
unittest.main()
|
py
|
1a594d533accba65c438a9ccb1f73f28ee836a51
|
class RandomKitchenSinks():
def __init__(self, gamma, n_components, random_state=None):
""" Parameters:
gamma: float
Parameter of the rbf kernel to be approximated exp(-gamma * x^2)
n_components: int
Number of components (output dimensionality) used to approximate the kernel
"""
self.gamma = gamma
self.n_components = n_components
self.random_state = random_state
def fit(self, X, Y=None):
import sklearn.kernel_approximation
self.n_components = int(self.n_components)
self.gamma = float(self.gamma)
self.preprocessor = sklearn.kernel_approximation.RBFSampler(
self.gamma, self.n_components, self.random_state)
self.preprocessor.fit(X)
return self
def transform(self, X):
if self.preprocessor is None:
raise NotImplementedError()
return self.preprocessor.transform(X)
def get_model(name, config, random_state):
list_param = {"random_state": random_state}
for k in config:
if k.startswith("feature_preprocessor:kitchen_sinks:"):
param_name = k.split(":")[2]
list_param[param_name] = config[k]
model = RandomKitchenSinks(**list_param)
return (name, model)
|
py
|
1a594d6cbfa01a1395903baf93e4d4c1f4ce885e
|
import os
import git
|
py
|
1a59509dd051ca31123ab3d03b70d49131f117ff
|
from django.db import models
from django.contrib.auth.models import User
from django.conf import settings
from . import util
class LinkManager(models.Manager):
def by_uuid(self, uuid):
return self.filter(pk=models.Link.decodificar_uuid(uuid)).get()
def meus_links(self, usuario):
return self.filter(usuario=usuario)
class Link(models.Model):
handler = util.UUIDCurto()
url = models.URLField()
criacao = models.DateTimeField(auto_now_add=True)
usuario = models.ForeignKey(User, blank=True, null=True, on_delete=models.SET_NULL)
objects = LinkManager()
@property
def uuid(self):
return self.handler.codificar(self.pk)
@classmethod
def decodificar_uuid(self, uuid):
return self.handler.decodificar(uuid)
@property
def url_curta(self):
return settings.ENCURTADOR_SITE_BASE_URL + self.uuid
def __unicode__(self):
return self.url
class Meta:
ordering = ['-criacao']
get_latest_by = 'criacao'
|
py
|
1a5951344a7afef0d9a82aece59f4778c741261d
|
# -*- coding: utf-8 -*- #
# Copyright 2021 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Command group for Dataplex Content Resource."""
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
from googlecloudsdk.calliope import base
@base.Hidden
@base.ReleaseTracks(base.ReleaseTrack.ALPHA)
class Content(base.Group):
"""Manage Dataplex resources."""
category = base.DATA_ANALYTICS_CATEGORY
|
py
|
1a5951ef3a7c1505c6fc5a807b97698ced30282f
|
from conans import ConanFile, CMake, tools
import os
class OpenEXRConan(ConanFile):
name = "openexr"
version = "2.4.0"
description = "OpenEXR is a high dynamic-range (HDR) image file format developed by Industrial Light & " \
"Magic for use in computer imaging applications."
topics = ("conan", "openexr", "hdr", "image", "picture")
license = "BSD-3-Clause"
homepage = "https://github.com/openexr/openexr"
url = "https://github.com/conan-io/conan-center-index"
settings = "os", "compiler", "build_type", "arch"
options = {"shared": [True, False], "fPIC": [True, False]}
default_options = {"shared": False, "fPIC": True}
generators = "cmake", "cmake_find_package"
exports_sources = "CMakeLists.txt"
_source_subfolder = "source_subfolder"
def config_options(self):
if self.settings.os == "Windows":
self.options.remove("fPIC")
def requirements(self):
self.requires("zlib/1.2.11")
def source(self):
tools.get(**self.conan_data["sources"][self.version])
os.rename("openexr-{}".format(self.version), self._source_subfolder)
def _configure_cmake(self):
cmake = CMake(self)
cmake.definitions["PYILMBASE_ENABLE"] = False
cmake.definitions["OPENEXR_VIEWERS_ENABLE"] = False
cmake.definitions["OPENEXR_BUILD_BOTH_STATIC_SHARED"] = False
cmake.definitions["OPENEXR_BUILD_UTILS"] = False
cmake.definitions["BUILD_TESTING"] = False
cmake.configure()
return cmake
def _patch_files(self):
for lib in ("OpenEXR", "IlmBase"):
if self.settings.os == "Windows":
tools.replace_in_file(os.path.join(self._source_subfolder, lib, "config", "LibraryDefine.cmake"),
"${CMAKE_COMMAND} -E chdir ${CMAKE_INSTALL_FULL_LIBDIR}",
"${CMAKE_COMMAND} -E chdir ${CMAKE_INSTALL_FULL_BINDIR}")
if self.settings.build_type == "Debug":
tools.replace_in_file(os.path.join(self._source_subfolder, lib, "config", "LibraryDefine.cmake"),
"set(verlibname ${CMAKE_SHARED_LIBRARY_PREFIX}${libname}${@LIB@_LIB_SUFFIX}${CMAKE_SHARED_LIBRARY_SUFFIX})".replace("@LIB@", lib.upper()),
"set(verlibname ${CMAKE_SHARED_LIBRARY_PREFIX}${libname}${@LIB@_LIB_SUFFIX}_d${CMAKE_SHARED_LIBRARY_SUFFIX})".replace("@LIB@", lib.upper()))
tools.replace_in_file(os.path.join(self._source_subfolder, lib, "config", "LibraryDefine.cmake"),
"set(baselibname ${CMAKE_SHARED_LIBRARY_PREFIX}${libname}${CMAKE_SHARED_LIBRARY_SUFFIX})",
"set(baselibname ${CMAKE_SHARED_LIBRARY_PREFIX}${libname}_d${CMAKE_SHARED_LIBRARY_SUFFIX})")
def build(self):
self._patch_files()
cmake = self._configure_cmake()
cmake.build()
def package(self):
self.copy("LICENSE.md", src=self._source_subfolder, dst="licenses")
cmake = self._configure_cmake()
cmake.install()
tools.rmdir(os.path.join(self.package_folder, "share"))
tools.rmdir(os.path.join(self.package_folder, "lib", "pkgconfig"))
tools.rmdir(os.path.join(self.package_folder, "lib", "cmake"))
def package_info(self):
self.cpp_info.names["cmake_find_package"] = "OpenEXR"
self.cpp_info.names["cmake_find_package_multi"] = "OpenEXR"
parsed_version = self.version.split(".")
lib_suffix = "-{}_{}".format(parsed_version[0], parsed_version[1])
if self.settings.build_type == "Debug":
lib_suffix += "_d"
self.cpp_info.libs = ["IlmImf{}".format(lib_suffix),
"IlmImfUtil{}".format(lib_suffix),
"IlmThread{}".format(lib_suffix),
"Iex{}".format(lib_suffix),
"IexMath{}".format(lib_suffix),
"Imath{}".format(lib_suffix),
"Half{}".format(lib_suffix)]
self.cpp_info.includedirs = [os.path.join("include", "OpenEXR"), "include"]
if self.options.shared and self.settings.os == "Windows":
self.cpp_info.defines.append("OPENEXR_DLL")
if self.settings.os == "Linux":
self.cpp_info.system_libs.append("pthread")
|
py
|
1a59521ca65632d7af482c5fb08fe7172a5f8629
|
########################################
# Automatically generated, do not edit.
########################################
from pyvisdk.thirdparty import Enum
CustomizationNetBIOSMode = Enum(
'disableNetBIOS',
'enableNetBIOS',
'enableNetBIOSViaDhcp',
)
|
py
|
1a5952574188034408177c76411236718b0f9ece
|
"""SCons.Tool.386asm
Tool specification for the 386ASM assembler for the Phar Lap ETS embedded
operating system.
There normally shouldn't be any need to import this module directly.
It will usually be imported through the generic SCons.Tool.Tool()
selection method.
"""
#
# __COPYRIGHT__
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "__FILE__ __REVISION__ __DATE__ __DEVELOPER__"
from SCons.Tool.PharLapCommon import addPharLapPaths
import SCons.Util
as_module = __import__('as', globals(), locals(), [])
def generate(env):
"""Add Builders and construction variables for ar to an Environment."""
as_module.generate(env)
env['AS'] = '386asm'
env['ASFLAGS'] = SCons.Util.CLVar('')
env['ASPPFLAGS'] = '$ASFLAGS'
env['ASCOM'] = '$AS $ASFLAGS $SOURCES -o $TARGET'
env['ASPPCOM'] = '$CC $ASPPFLAGS $CPPFLAGS $_CPPDEFFLAGS $_CPPINCFLAGS $SOURCES -o $TARGET'
addPharLapPaths(env)
def exists(env):
return env.Detect('386asm')
|
py
|
1a5952e579c4415d63698a822231044e16a20cf3
|
import offline
import gen
import matplotlib.pyplot as plt
import networkx as nx
import sys
def DrawDenseGraph(graph):
"""
BRIEF When the graph is dense, circular is the way to go
"""
nx_graph = NetworkXGraph(graph)
nx.draw_circular(nx_graph)
plt.show()
complement = nx.complement(nx_graph)
nx.draw_circular(complement)
plt.show()
print('{0:<10} = {1}'.format('e(G)' , nx_graph.size()))
print('{0:<10} = {1}'.format('e(~G)', complement.size()))
sys.stdout.flush()
def DrawSparseGraph(graph):
"""
BRIEF Use spring for drawing a sparse graph
"""
nx_graph = NetworkXGraph(graph)
nx.draw_spring(nx_graph)
plt.show()
def NetworkXGraph(graph):
"""
BRIEF We'll always use this code to create a NetworkX graph
"""
nx_graph = nx.Graph()
for name in graph.nodes:
nx_graph.add_node(name)
for edge in graph.edges:
nx_graph.add_edge(*edge)
return nx_graph
if __name__ == '__main__':
"""
BRIEF Main execution - draw the superfoods graph
"""
graph = offline.Graph(gen.Read(gen.SUPERFOOD_FILE))
graph.SetEdges(offline.Euclidean, .5)
DrawDenseGraph(graph)
graph.SetEdges(offline.Euclidean, .3)
DrawSparseGraph(graph)
|
py
|
1a59535054eaa109f4f4e960849afe3f8defa169
|
import json
import uuid
import logging
import copy
from installed_clients.DataFileUtilClient import DataFileUtil
from installed_clients.KBaseReportClient import KBaseReport
class ImportEscherMapUtil:
@staticmethod
def validate_eschermap_params(params, expected, opt_param=set()):
"""
Validates that required parameters are present.
Warns if unexpected parameters appear
"""
expected = set(expected)
opt_param = set(opt_param)
pkeys = set(params)
if expected - pkeys:
raise ValueError("Required keys {} not in supplied parameters"
.format(", ".join(expected - pkeys)))
defined_param = expected | opt_param
for param in params:
if param not in defined_param:
logging.warning("Unexpected parameter {} supplied".format(param))
def _save_escher_map(self, escher_data, workspace_id, escher_map_name):
"""
save KBaseFBA.EscherMap to workspace
"""
logging.info('start saving KBaseFBA.EscherMap')
if not isinstance(workspace_id, int):
logging.warning('Invalid workspace ID: {}'.format(workspace_id))
try:
workspace_id = self.dfu.ws_name_to_id(workspace_id)
except Exception:
raise ValueError('Cannot convert {} to valid workspace id'.format(workspace_id))
info = self.dfu.save_objects({'id': workspace_id,
'objects': [{'type': 'KBaseFBA.EscherMap',
'data': escher_data,
'name': escher_map_name}]})[0]
return "%s/%s/%s" % (info[6], info[0], info[4])
def _refactor_escher_data(self, escher_data):
"""
refactor escher data to better fit KBaseFBA.EscherMap object
"""
logging.info('start refactoring escher data')
refactored_escher_data = copy.deepcopy(escher_data)
if refactored_escher_data == escher_data:
logging.warning('No changes in escher data')
return refactored_escher_data
def __init__(self, config):
self.callback_url = config['SDK_CALLBACK_URL']
self.token = config['KB_AUTH_TOKEN']
self.dfu = DataFileUtil(self.callback_url)
logging.basicConfig(format='%(created)s %(levelname)s: %(message)s',
level=logging.INFO)
def import_eschermap_from_staging(self, params):
"""
import_attribute_mapping_from_staging: import a JSON file as KBaseFBA.EscherMap
required params:
staging_file_subdir_path - subdirectory file path
e.g.
for file: /data/bulk/user_name/file_name
staging_file_subdir_path is file_name
for file: /data/bulk/user_name/subdir_1/subdir_2/file_name
staging_file_subdir_path is subdir_1/subdir_2/file_name
escher_map_name: output KBaseFBA.EscherMap object name
workspace_id: workspace ID
return:
obj_ref: return object reference
"""
self.validate_eschermap_params(params, ['staging_file_subdir_path', 'escher_map_name',
'workspace_id'])
download_staging_file_params = {
'staging_file_subdir_path': params.get('staging_file_subdir_path')
}
scratch_file_path = self.dfu.download_staging_file(
download_staging_file_params).get('copy_file_path')
try:
with open(scratch_file_path) as f:
escher_data = json.load(f)
except Exception:
raise ValueError('Failed to parse JSON file.')
escher_data = self._refactor_escher_data(escher_data)
obj_ref = self._save_escher_map(escher_data,
params['workspace_id'],
params['escher_map_name'])
returnVal = {'obj_ref': obj_ref}
return returnVal
def generate_report(self, obj_ref, params):
"""
generate_report: generate summary report
obj_ref: generated workspace object references.
"""
logging.info('start generating report')
upload_message = 'Import Finished\n'
get_objects_params = {'object_refs': [obj_ref],
'ignore_errors': False}
object_data = self.dfu.get_objects(get_objects_params)
upload_message += "Imported Escher Map Name: "
upload_message += str(object_data.get('data')[0].get('info')[1]) + '\n'
upload_message += 'Imported File: {}\n'.format(params['staging_file_subdir_path'])
report_params = {'message': upload_message,
'objects_created': [{'ref': obj_ref,
'description': 'Imported Escher Map'}],
'workspace_id': params['workspace_id'],
'report_object_name': 'kb_upload_methods_report_' + str(uuid.uuid4())}
kbase_report_client = KBaseReport(self.callback_url, token=self.token)
output = kbase_report_client.create_extended_report(report_params)
report_output = {'report_name': output['name'], 'report_ref': output['ref']}
return report_output
|
py
|
1a59536c39ab2ef3862b64c88c14f56fc44585dd
|
# coding: utf-8
"""
OpenPerf API
REST API interface for OpenPerf # noqa: E501
OpenAPI spec version: 1
Contact: [email protected]
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
class TrafficProtocolFieldModifier(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'list': 'list[int]',
'sequence': 'TrafficProtocolFieldModifierSequence'
}
attribute_map = {
'list': 'list',
'sequence': 'sequence'
}
def __init__(self, list=None, sequence=None): # noqa: E501
"""TrafficProtocolFieldModifier - a model defined in Swagger""" # noqa: E501
self._list = None
self._sequence = None
self.discriminator = None
if list is not None:
self.list = list
if sequence is not None:
self.sequence = sequence
@property
def list(self):
"""Gets the list of this TrafficProtocolFieldModifier. # noqa: E501
List of modifier values. Context determines what values are valid. # noqa: E501
:return: The list of this TrafficProtocolFieldModifier. # noqa: E501
:rtype: list[int]
"""
return self._list
@list.setter
def list(self, list):
"""Sets the list of this TrafficProtocolFieldModifier.
List of modifier values. Context determines what values are valid. # noqa: E501
:param list: The list of this TrafficProtocolFieldModifier. # noqa: E501
:type: list[int]
"""
self._list = list
@property
def sequence(self):
"""Gets the sequence of this TrafficProtocolFieldModifier. # noqa: E501
:return: The sequence of this TrafficProtocolFieldModifier. # noqa: E501
:rtype: TrafficProtocolFieldModifierSequence
"""
return self._sequence
@sequence.setter
def sequence(self, sequence):
"""Sets the sequence of this TrafficProtocolFieldModifier.
:param sequence: The sequence of this TrafficProtocolFieldModifier. # noqa: E501
:type: TrafficProtocolFieldModifierSequence
"""
self._sequence = sequence
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(TrafficProtocolFieldModifier, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, TrafficProtocolFieldModifier):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
|
py
|
1a5953838c7470f681219beb5f49921607f588f1
|
from datetime import datetime, timedelta, timezone
import unittest
import paniot
from . import mixin
class IotApiTest(mixin.AioMixin, unittest.IsolatedAsyncioTestCase):
async def test_01(self):
resp = await self.api.device(pagelength=0)
self.assertEqual(resp.status, 400)
async def test_02(self):
with self.assertRaises(paniot.ArgsError) as e:
resp = await self.api.device_details()
self.assertEqual(str(e.exception),
'deviceid or ip required')
async def test_03(self):
with self.assertRaises(paniot.ArgsError) as e:
resp = await self.api.device_details(
ip='x',
deviceid='x')
self.assertEqual(str(e.exception),
'deviceid and ip cannot be used at the same time')
async def test_04(self):
resp = await self.api.device_details(ip='x')
self.assertEqual(resp.status, 404)
async def test_05(self):
resp = await self.api.device_details(deviceid='x')
self.assertEqual(resp.status, 404)
async def test_06(self):
resp = await self.api.device(pagelength=1)
self.assertEqual(resp.status, 200)
x = await resp.json()
self.assertEqual(x['total'], 1)
self.assertEqual(len(x['devices']), 1)
key = 'number_of_caution_alerts'
self.assertNotIn(key, x['devices'][0],
'%s key requires detail' % key)
deviceid = x['devices'][0]['deviceid']
ip = x['devices'][0]['ip_address']
resp = await self.api.device_details(deviceid=deviceid)
self.assertEqual(resp.status, 200)
x = await resp.json()
self.assertEqual(x['deviceid'], deviceid)
resp = await self.api.device_details(ip=ip)
self.assertEqual(resp.status, 200)
x = await resp.json()
self.assertEqual(x['devices'][0]['ip_address'], ip)
async def test_07(self):
resp = await self.api.device(detail=True)
self.assertEqual(resp.status, 200)
x = await resp.json()
self.assertEqual(x['total'], len(x['devices']))
key = 'number_of_caution_alerts'
self.assertIn(key, x['devices'][0],
'%s key missing for detail' % key)
async def test_08(self):
d = datetime.now(tz=timezone.utc) + timedelta(seconds=10)
stime = d.strftime('%Y-%m-%dT%H:%M:%SZ')
resp = await self.api.device(stime=stime)
self.assertEqual(resp.status, 200)
x = await resp.json()
t = await resp.text()
msg = 'devices in future stime %s: ' % stime
msg += t
self.assertEqual(x['total'], 0, msg)
self.assertEqual(len(x['devices']), 0, msg)
async def test_09(self):
total = 0
async for ok, x in self.api.devices_all():
self.assertTrue(ok)
total += 1
if total > 1050:
break
|
py
|
1a59543477a58006bfa5e12eafdbdb460aba62a7
|
from __future__ import unicode_literals
from utils import CanadianScraper, CanadianPerson as Person
import re
import os
import subprocess
from pupa.scrape import Organization
from six.moves.urllib.request import urlopen
COUNCIL_PAGE = 'http://www.community.gov.yk.ca/pdf/loc_govdir.pdf'
class YukonMunicipalitiesPersonScraper(CanadianScraper):
def scrape(self):
response = urlopen(COUNCIL_PAGE).read()
pdf = open('/tmp/yt.pdf', 'w')
pdf.write(response)
pdf.close()
data = subprocess.check_output(['pdftotext', '-layout', '/tmp/yt.pdf', '-'])
data = re.split(r'\n\s*\n', data)
for municipality in data:
if 'Councillors' not in municipality:
continue
lines = municipality.split('\n')
if 'Page' in lines[0]:
lines.pop(0)
if not lines[0].strip():
lines.pop(0)
col1end = re.search(r'\s{2,}(\w)', lines[0].strip()).end()
col2end = re.search(r':\s{2,}(\w)', lines[0].strip()).end()
if 'Council' in lines[1]:
address = lines[2][:col1end - 1].strip() + ' ' + lines[3][:col1end - 1].strip()
district = lines[0][:col1end - 1].strip() + ' ' + lines[1][:col1end - 1].strip()
else:
address = lines[1][:col1end - 1].strip() + ' ' + lines[2][:col1end - 1].strip()
district = lines[0][:col1end - 1].strip()
organization = Organization(name=district + ' Council', classification='legislature', jurisdiction_id=self.jurisdiction.jurisdiction_id)
organization.add_source(COUNCIL_PAGE)
yield organization
phone = re.findall(r'(?<=Phone: )\(?(\d{3}[\)-] ?\d{3}-\d{4})', municipality)[0].replace(') ', '-')
email = re.findall(r'(?<=E-mail:) (\S*)', municipality)[0]
fax = None
if 'Fax' in municipality:
fax = re.findall(r'(?<=Fax: )\(?(\d{3}[\)-] ?\d{3}-\d{4})', municipality)[0].replace(') ', '-')
website = None
if 'Website' in municipality:
website = re.findall(r'((http:\/\/|www.)(\S*))', municipality)[0][0]
councillor_or_mayor = False
for line in lines:
if 'Mayor:' in line:
councillor_or_mayor = True
role = 'Mayor'
continue
if 'Councillors' in line:
councillor_or_mayor = True
role = 'Councillor'
continue
if councillor_or_mayor:
councillor = line[col1end - 1:col2end - 1].strip()
if not councillor:
continue
p = Person(primary_org='legislature', name=councillor, district=district)
p.add_source(COUNCIL_PAGE)
membership = p.add_membership(organization, role=role, district=district)
membership.add_contact_detail('address', address, 'legislature')
membership.add_contact_detail('voice', phone, 'legislature')
membership.add_contact_detail('email', email)
if fax:
membership.add_contact_detail('fax', fax, 'legislature')
if website:
p.add_link(website)
yield p
os.system('rm /tmp/yt.pdf')
|
py
|
1a59565557ff172c1854ec5cda050ff65025b5ba
|
import tensorflow as tf
tf.compat.v1.disable_v2_behavior()
from src.datasets.newsgroups import NewsGroups
from src.models.cnn1 import getCNN1
from src.models.predict import predict_mcdropout
import tensorflow as tf
def build():
# config
RANDOM_STATE = 1
VOCAB_SIZE = 20000
MAX_SEQUENCE_LENGTH = 500
NUM_SPLITS = 5
# get data
newsGroups = NewsGroups()
X_train_set, y_train_set, X_test_set, y_test_set, X_val, y_val = newsGroups.getRankedDataSplits(
vocab_size=VOCAB_SIZE,
max_sequence_length=MAX_SEQUENCE_LENGTH,
n_splits=NUM_SPLITS,
test_size=4500,
random_state=RANDOM_STATE
)
# training
models_n = []
for i in range(NUM_SPLITS):
model = tf.keras.models.load_model(f'models/newsGroups/CNN1_BL_{i}')
models_n.append(model)
# predict
dfs = [predict_mcdropout(models_n[i], X_val, y_val) for i in range(NUM_SPLITS)]
#save df
name = 'CNN1_MCD'
i = 0
for df in dfs:
df.to_pickle(f"pickle/newsGroups/{name}_{i}.pkl")
i = i+1
|
py
|
1a59568042c273927b91ac4b3ac9e27574d653ed
|
import re
from dndme.commands import Command
from dndme.gametime import Date
class AdjustDate(Command):
keywords = ['date']
help_text = """{keyword}
{divider}
Summary: Query, set, or adjust the in-game date using the calendar
specified at startup.
Usage:
{keyword}
{keyword} <day> <month> [<year>]
{keyword} [+|-]<days>
Examples:
{keyword}
{keyword} 20 July
{keyword} 20 July 1969
{keyword} +7
{keyword} -10
"""
def get_suggestions(self, words):
calendar = self.game.calendar
if len(words) == 3:
return [month['name']
for month in calendar.cal_data['months'].values()]
def do_command(self, *args):
calendar = self.game.calendar
data = ' '.join(args)
if not data:
print(f"The date is {calendar}")
return
m_adjustment = re.match('([+-]\d+)', data)
if m_adjustment:
days = int(m_adjustment.groups()[0])
calendar.adjust_date(days)
print(f"The date is now {calendar}")
self.game.changed = True
return
m_set = re.match('(\d+) (\w+) *(\d*)', data)
if m_set:
day, month, year = m_set.groups()
day = int(day)
year = int(year) if year else calendar.date.year
calendar.set_date(Date(day, month, year))
print(f"The date is now {calendar}")
self.game.changed = True
return
print(f"Invalid date: {data}")
|
py
|
1a5956cc2508dabd6ecb0427bcda5006716e10b6
|
import copy
from itertools import zip_longest
from typing import Any, Callable, Dict, List, Optional
from pytorch_lightning import LightningModule, Trainer
from pytorch_lightning.callbacks import Callback
from pytorch_lightning.utilities import rank_zero_info
class PrintTableMetricsCallback(Callback):
"""
Prints a table with the metrics in columns on every epoch end
Example::
from pl_bolts.callbacks import PrintTableMetricsCallback
callback = PrintTableMetricsCallback()
Pass into trainer like so:
.. code-block:: python
trainer = pl.Trainer(callbacks=[callback])
trainer.fit(...)
# ------------------------------
# at the end of every epoch it will print
# ------------------------------
# loss│train_loss│val_loss│epoch
# ──────────────────────────────
# 2.2541470527648926│2.2541470527648926│2.2158432006835938│0
"""
def __init__(self) -> None:
self.metrics: List = []
def on_epoch_end(self, trainer: Trainer, pl_module: LightningModule) -> None:
metrics_dict = copy.copy(trainer.callback_metrics)
self.metrics.append(metrics_dict)
rank_zero_info(dicts_to_table(self.metrics))
def dicts_to_table(
dicts: List[Dict],
keys: Optional[List[str]] = None,
pads: Optional[List[str]] = None,
fcodes: Optional[List[str]] = None,
convert_headers: Optional[Dict[str, Callable]] = None,
header_names: Optional[List[str]] = None,
skip_none_lines: bool = False,
replace_values: Optional[Dict[str, Any]] = None
) -> str:
"""
Generate ascii table from dictionary
Taken from (https://stackoverflow.com/questions/40056747/print-a-list-of-dictionaries-in-table-form)
Args:
dicts: input dictionary list; empty lists make keys OR header_names mandatory
keys: order list of keys to generate columns for; no key/dict-key should
suffix with '____' else adjust code-suffix
pads: indicate padding direction and size, eg <10 to right pad alias left-align
fcodes: formating codes for respective column type, eg .3f
convert_headers: apply converters(dict) on column keys k, eg timestamps
header_names: supply for custom column headers instead of keys
skip_none_lines: skip line if contains None
replace_values: specify per column keys k a map from seen value to new value;
new value must comply with the columns fcode; CAUTION: modifies input (due speed)
Example:
>>> a = {'a': 1, 'b': 2}
>>> b = {'a': 3, 'b': 4}
>>> print(dicts_to_table([a, b]))
a│b
───
1│2
3│4
"""
# optional arg prelude
if keys is None:
if len(dicts) > 0:
keys = dicts[0].keys() # type: ignore[assignment]
elif header_names is not None:
keys = header_names
else:
raise ValueError('keys or header_names mandatory on empty input list')
if pads is None:
pads = [''] * len(keys) # type: ignore[arg-type]
elif len(pads) != len(keys): # type: ignore[arg-type]
raise ValueError(f'bad pad length {len(pads)}, expected: {len(keys)}') # type: ignore[arg-type]
if fcodes is None:
fcodes = [''] * len(keys) # type: ignore[arg-type]
elif len(fcodes) != len(fcodes):
raise ValueError(f'bad fcodes length {len(fcodes)}, expected: {len(keys)}') # type: ignore[arg-type]
if convert_headers is None:
convert_headers = {}
if header_names is None:
header_names = keys
if replace_values is None:
replace_values = {}
# build header
headline = '│'.join(f"{v:{pad}}" for v, pad in zip_longest(header_names, pads)) # type: ignore[arg-type]
underline = '─' * len(headline)
# suffix special keys to apply converters to later on
marked_keys = [h + '____' if h in convert_headers else h for h in keys] # type: ignore[union-attr]
marked_values = {}
s = '│'.join(f"{{{h}:{pad}{fcode}}}" for h, pad, fcode in zip_longest(marked_keys, pads, fcodes))
lines = [headline, underline]
for d in dicts:
none_keys = [k for k, v in d.items() if v is None]
if skip_none_lines and none_keys:
continue
elif replace_values:
for k in d.keys():
if k in replace_values and d[k] in replace_values[k]:
d[k] = replace_values[k][d[k]]
if d[k] is None:
raise ValueError(f"bad or no mapping for key '{k}' is None. Use skip or change replace mapping.")
elif none_keys:
raise ValueError(f'keys {none_keys} are None in {d}. Do skip or use replace mapping.')
for h in convert_headers:
if h in keys: # type: ignore[operator]
converter = convert_headers[h]
marked_values[h + '____'] = converter(d)
line = s.format(**d, **marked_values)
lines.append(line)
return '\n'.join(lines)
|
py
|
1a59578da09bc4df60befc2f2eb4c55ec33c6fa0
|
#!/usr/bin/python
import sys
import rpc
from muduo.protorpc2 import rpc2_pb2
import nqueens_pb2
channel = rpc.SyncRpcChannel(('localhost', 9352))
server = nqueens_pb2.NQueensService_Stub(channel)
if len(sys.argv) < 3:
print "Usage: nqueens_cli.py nqueens first_row [second_row]"
else:
request = nqueens_pb2.SubProblemRequest()
request.nqueens = int(sys.argv[1])
request.first_row = int(sys.argv[2])
if len(sys.argv) >= 4:
request.second_row = int(sys.argv[3])
print server.Solve(None, request)
|
py
|
1a5958d0d1ecc330f0373d40bc693095922ce56f
|
# Prepares spreadsheet summarizing email schedulings.
#
# summarize_all_time() prepares two tabs:
# "Summary All-Time" tab: Aggregates counts by day and by 'summary_all_time_group_by_fields', a comma-separated list of payload fields. For example,
# 'event_state, event_url, event_title, event_type, event_start_timestamp_local'
#
# "Summary By Week" tab: Total scheduling count by week
#
# forecast() prepares one tab, all schedulings on date 'ds', one row per
# recipient on date 'ds', up to 'limit' recipients.
#
# Other inputs:
# 'mailing_name', e.g. 'event_invite'
# 'output_sheet', e.g. 'https://docs.google.com/spreadsheets/d/1KcZIW6piCZ60GR68KTN_UJB5wpfIh8Idc2b2E-7enFs'
import asyncio
import datetime
import pandas as pd
class BsdTriggeredEmailForecast:
FROM_SCHEDULINGS_JOIN_PAYLOADS = """
FROM "{schema}"."triggered_email_schedulings"
JOIN "{schema}"."triggered_email_payloads_{mailing_name}"
ON "triggered_email_payloads_{mailing_name}".ds = "triggered_email_schedulings".ds
AND (("triggered_email_payloads_{mailing_name}".cons_id IS NULL AND "triggered_email_schedulings".cons_id IS NULL)
OR "triggered_email_payloads_{mailing_name}".cons_id = "triggered_email_schedulings".cons_id)
AND "triggered_email_payloads_{mailing_name}".email = "triggered_email_schedulings".email
AND (("triggered_email_payloads_{mailing_name}".secondary_id IS NULL AND "triggered_email_schedulings".secondary_id IS NULL)
OR "triggered_email_payloads_{mailing_name}".secondary_id = "triggered_email_schedulings".secondary_id)
"""
GET_SCHEDULINGS_SQL = """
SELECT
"triggered_email_schedulings".email
, {output_fields}
FROM_AND_JOIN_GOES_HERE
WHERE "triggered_email_schedulings".ds = '{ds}'
AND "triggered_email_schedulings".mailing_name = '{mailing_name}'
ORDER BY "triggered_email_schedulings".email
, "triggered_email_schedulings".secondary_id
, "triggered_email_schedulings".scheduled_at
LIMIT {limit}
;
""".replace(
"FROM_AND_JOIN_GOES_HERE", FROM_SCHEDULINGS_JOIN_PAYLOADS
)
GET_SUMMARY_ALL_TIME_SQL = """
SELECT
"triggered_email_schedulings".ds
, {summary_all_time_group_by_fields}
, COUNT(*) AS cons_count
FROM_AND_JOIN_GOES_HERE
WHERE "triggered_email_schedulings".mailing_name = '{mailing_name}'
GROUP BY "triggered_email_schedulings".ds, {summary_all_time_group_by_fields}
ORDER BY 1 DESC, cons_count DESC
;
""".replace(
"FROM_AND_JOIN_GOES_HERE", FROM_SCHEDULINGS_JOIN_PAYLOADS
)
GET_SUMMARY_BY_WEEK_SQL = """
SELECT
DATE_TRUNC('w', "triggered_email_schedulings".ds) AS week_begin
, COUNT(*) AS cons_count
FROM_AND_JOIN_GOES_HERE
WHERE "triggered_email_schedulings".mailing_name = '{mailing_name}'
GROUP BY DATE_TRUNC('w', "triggered_email_schedulings".ds)
ORDER BY 1 DESC
;
""".replace(
"FROM_AND_JOIN_GOES_HERE", FROM_SCHEDULINGS_JOIN_PAYLOADS
)
TAB_NAME_SUMMARY_ALL_TIME = "Summary All-Time"
TAB_NAME_SUMMARY_BY_WEEK = "Summary By Week"
def __init__(self, civis, schema, caliban):
self.civis = civis
self.schema = schema
self.caliban = caliban
def forecast(self, ds, mailing_name, output_sheet, output_fields, tab_name, limit):
schedulings = self.get_schedulings(ds, mailing_name, output_fields, limit)
if schedulings.empty:
return
self.caliban.export_to_worksheets(output_sheet, tab_name, schedulings)
def summarize_all_time(
self, mailing_name, output_sheet, summary_all_time_group_by_fields
):
if not summary_all_time_group_by_fields:
return
summary = self.get_summary_all_time(
mailing_name, summary_all_time_group_by_fields
)
self.caliban.export_to_worksheets(
output_sheet, self.TAB_NAME_SUMMARY_ALL_TIME, summary
)
summary_by_week = self.get_summary_by_week(mailing_name)
self.caliban.export_to_worksheets(
output_sheet, self.TAB_NAME_SUMMARY_BY_WEEK, summary_by_week
)
def get_schedulings(self, ds, mailing_name, output_fields, limit):
query = self.GET_SCHEDULINGS_SQL.format(
ds=ds,
mailing_name=mailing_name,
schema=self.schema,
output_fields=output_fields,
limit=limit,
)
print("get_schedulings query:")
print(query)
df = asyncio.run(self.civis.read_civis_sql(query))
print(f"Got {len(df)} schedulings for {mailing_name} on {ds}:")
print(df)
return df
def get_summary_all_time(self, mailing_name, summary_all_time_group_by_fields):
query = self.GET_SUMMARY_ALL_TIME_SQL.format(
mailing_name=mailing_name,
schema=self.schema,
summary_all_time_group_by_fields=summary_all_time_group_by_fields,
)
print("get_summary_all_time query:")
print(query)
df = asyncio.run(self.civis.read_civis_sql(query))
print(f"Got summary all-time for {mailing_name}:")
print(df)
return df
def get_summary_by_week(self, mailing_name):
query = self.GET_SUMMARY_BY_WEEK_SQL.format(
mailing_name=mailing_name, schema=self.schema
)
print("get_summary_by_week query:")
print(query)
df = asyncio.run(self.civis.read_civis_sql(query))
print(f"Got summary by week for {mailing_name}:")
print(df)
return df
|
py
|
1a595a0f8eca595b5f995f26b35fb3ab3f9dd2f9
|
"""
Authors: Pratik Bhatu.
Copyright:
Copyright (c) 2021 Microsoft Research
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
import argparse
from argparse import RawTextHelpFormatter
import os
import os.path
import sys
import json
from RandomForests.convert_pickle_to_graphviz import convert_pickle_to_graphviz
from RandomForests.parse_graphviz_to_ezpc_input import parse_graphviz_to_ezpc_input
from RandomForests.patch_ezpc_code_params import patch_ezpc_code_params
def parse_args():
parser = argparse.ArgumentParser(formatter_class=RawTextHelpFormatter)
parser.add_argument(
"--task",
required=False,
type=str,
choices=["cla", "reg"],
help="""Choose cla for classificatin.
Choose reg for regression.
""",
)
parser.add_argument(
"--no_features",
required=False,
type=int,
help="Number of features in the dataset.",
)
parser.add_argument(
"--model_type",
required=False,
type=str,
choices=["tree", "forest"],
help="""Choose tree for decision tree.
Choose forest for random forest.
""",
)
parser.add_argument(
"--pickle",
required=False,
type=str,
help="Path to the pickle file",
)
parser.add_argument(
"--scale",
required=False,
type=int,
default=10,
help="Scaling factor for float -> fixedpt.",
)
parser.add_argument(
"--bitlen",
required=False,
type=int,
default=64,
choices=[32, 64],
help="Bit length to compile for.",
)
parser.add_argument(
"--role",
required=True,
type=str,
choices=["server", "client"],
default="server",
help="Pickle file owner is server, data owner is client",
)
parser.add_argument(
"--config",
required=False,
type=str,
help="Path to the client config file",
)
args = parser.parse_args()
return args
if __name__ == "__main__":
args = parse_args()
if args.role == "server":
if args.pickle is None:
print("Path to pickle file not specified. See --help for options")
if args.model_type is None:
print("Model type not specified. See --help for options")
if args.no_features is None:
print("Number of features not specified. See --help for options.")
if args.task is None:
print("Task is not specified. See --help for options.")
if None in [args.pickle, args.model_type, args.no_features, args.task]:
sys.exit()
else:
if args.config is None:
print(
"Path to the client config file not specified. See --help for options"
)
sys.exit()
# args.task, args.model_type, args.no_features, args.pickle, args.scale, args.bitlen, args.config
if args.role == "server":
if not os.path.isfile(args.pickle):
sys.exit("Pickle file (" + args.pickle + ") specified does not exist")
pickle_dir = os.path.dirname(os.path.abspath(args.pickle))
build_dir = os.path.join(pickle_dir, "ezpc_build_dir")
os.system("rm -rf {build_dir}".format(build_dir=build_dir))
os.mkdir(build_dir)
# Dumps tree0, tree1, ..treeN.txt
no_of_estim = convert_pickle_to_graphviz(
args.pickle, args.task, args.model_type, build_dir
)
max_tree_depth = -1
for i in range(0, no_of_estim):
tree_file_path = os.path.join(build_dir, "tree" + str(i) + ".txt")
max_depth = parse_graphviz_to_ezpc_input(
tree_file_path, args.task, args.scale
)
max_tree_depth = max(max_tree_depth, max_depth)
print("Parsed all trees in Random Forest")
no_features = args.no_features
scale = args.scale
bitlen = args.bitlen
client_json = {
"no_of_trees": no_of_estim,
"depth": max_tree_depth,
"no_of_features": no_features,
"scale": scale,
"bitlen": bitlen,
}
json_path = os.path.join(build_dir, "client.json")
with open(json_path, "w") as f:
json.dump(client_json, f)
else:
if not os.path.isfile(args.config):
sys.exit("Config file (" + args.config + ") specified does not exist")
with open(args.config) as f:
client_json = json.load(f)
no_of_estim = client_json["no_of_trees"]
max_tree_depth = client_json["depth"]
no_features = client_json["no_of_features"]
scale = client_json["scale"]
bitlen = client_json["bitlen"]
config_dir = os.path.dirname(os.path.abspath(args.config))
build_dir = os.path.join(config_dir, "ezpc_build_dir")
os.system("rm -rf {build_dir}".format(build_dir=build_dir))
os.mkdir(build_dir)
ezpc_file_name = "random_forest.ezpc"
output_path = os.path.join(build_dir, ezpc_file_name)
patch_ezpc_code_params(no_of_estim, max_tree_depth, no_features, scale, output_path)
athos_dir = os.path.dirname(os.path.abspath(__file__))
ezpc_dir = os.path.join(athos_dir, "../EzPC/EzPC/")
os.system('cp "{ezpc}" "{ezpc_dir}"'.format(ezpc=output_path, ezpc_dir=ezpc_dir))
os.chdir(ezpc_dir)
ezpc_args = ""
ezpc_args = "--bitlen {bl} --codegen {target} ".format(bl=bitlen, target="ABY")
output_name = "random_forest0.cpp"
os.system(
'eval `opam config env`; ./ezpc.sh "{}" '.format(ezpc_file_name) + ezpc_args
)
os.system("./compile_aby.sh {}".format(output_name))
output_binary_path = os.path.join(build_dir, "random_forest")
os.system(
'mv "{bin}" "{op_bin}"'.format(bin="random_forest0", op_bin=output_binary_path)
)
print("\n\n")
print("Compiled binary: " + output_binary_path)
if args.role == "server":
model_weights = "weight_sf_" + str(scale) + ".inp"
weights_path = os.path.join(build_dir, model_weights)
print("Model weights dumped in " + weights_path)
print("Send client.json to the client machine. Path: ", json_path)
print("\n\n")
|
py
|
1a595a25bb4c92a41d2344444840d303bab334c6
|
"""
Module containing all the utilities to compute and integrate the
recipe water footprint into the recommender system.
"""
import pandas as pd
from configuration import load_configuration
class WaterFootprintUtils:
"""
Class that represent utilities for the water footprint
reduction. This class provides a method for computing
the user score based on his reviews and orders.
It also provides a method for reducing the given
recommendations for the user.
"""
def __init__(self):
config = load_configuration()
self.orders = pd.read_pickle(config["path_orders"])
self.recipes = pd.read_pickle(config["path_recipes"])
self.user_scores = pd.read_pickle(config["path_user_scores"])
self.classes = ["A", "B", "C", "D", "E"]
def __get_recipe_class_to_recommend(self, user_score):
"""
Get the recipe categories to recommend based on the user score.
The categories are lower or equal than the user score.
:param user_score: the score of the user.
:return: a list containing the categories of the recipe to
recommend.
"""
return self.classes[:self.classes.index(user_score)+1]
def __get_recipe_class(self, recipe_id):
"""
Get the category of the recipe from its id.
:param recipe_id: the id of the recipe.
:return: the category of the recipe if exists.
"""
category = self.recipes.query(f"id == {recipe_id}")["category"].tolist()
return category[0] if category else None
def __get_user_score(self, user_id):
"""
Get the score of the user based on his reviews.
User orders are summed and weighted based on their
categories. Then based on the result the user score
is found.
:param user_id: the id of the user.
:return: the user score.
"""
score = self.user_scores.query(f"user_id == {user_id}")["score"].tolist()
return score[0] if score else None
def __get_recipe_category(self, recipe_id):
"""
Return the category of the recipe row from the
dataframe based on the recipe id.
:param recipe_id: the id of the recipe.
:return: the category of the recipe at the provided id.
"""
recipe = self.recipes.query(f"id == {recipe_id}")["category"].tolist()
return recipe[0] if recipe else "F"
def get_recommendations_correct(self, recommendations, user_id, algo_type):
"""
Get the correct recipe recommendations from a list of
recommendations ids based on the user score and the
type of the algorithm.
:param recommendations: a list containing all the recommended recipes.
:param user_id: the id of the user.
:param algo_type: the type of the algorithm
(Content Based or Collaborative Filtering)
:return: a list containing all the recipes filtered by
water footprint.
"""
user_score = self.__get_user_score(user_id)
class_to_rec = self.__get_recipe_class_to_recommend(user_score)
return (
[
rec
for rec in recommendations
if self.recipes["category"][rec] in class_to_rec
]
if algo_type == "cb"
else [
recipe_id
for recipe_id in recommendations
# if self.recipes.query(f"id == {recipe_id}")["category"].tolist()[0] in class_to_rec
if self.__get_recipe_category(recipe_id) in class_to_rec
]
)
if __name__ == "__main__":
wf = WaterFootprintUtils()
|
py
|
1a595ad918d2d446187d9f8ae72d260a1ff7e8af
|
#!/usr/bin/env python
"""
CREATED AT: 2021/8/18
Des:
https://leetcode.com/problems/decode-ways/
https://leetcode.com/explore/challenge/card/august-leetcoding-challenge-2021/615/week-3-august-15th-august-21st/3902/
GITHUB: https://github.com/Jiezhi/myleetcode
"""
from functools import lru_cache
from tool import print_results
class Solution:
@print_results
def numDecodings(self, s: str) -> int:
"""
269 / 269 test cases passed.
Status: Accepted
Runtime: 36 ms
Memory Usage: 14.6 MB
:param s:
:return:
"""
@lru_cache(None)
def dp(i):
# reach the end, the solution is ok
if i == len(s):
return 1
# encounter a zero at the head, the solution is invalid.
if s[i] == '0':
return 0
ans = dp(i + 1)
if i + 1 < len(s) and int(s[i:i + 2]) < 27:
ans += dp(i + 2)
return ans
return dp(0)
def test():
assert Solution().numDecodings(s="12") == 2
assert Solution().numDecodings(s="11106") == 2
assert Solution().numDecodings(s="226") == 3
assert Solution().numDecodings(s="0") == 0
assert Solution().numDecodings(s="06") == 0
if __name__ == '__main__':
test()
|
py
|
1a595b22db22b2626977d4e985ac1c866602daab
|
import time
import json
import logging
import threading
from queue import Queue
from accounts import data_get_all, check_session, login
from connections import get, post
from utils import load_account, create_path, intervals
logging.basicConfig(
format='[%(asctime)s][%(levelname)s]: %(message)s',
level=logging.DEBUG, datefmt='%d/%b/%Y:%H:%M:%S'
)
# Logging logging.INFO only so it doesnt floaded with logging.DEBUG
for logs in logging.Logger.manager.loggerDict:
logging.getLogger(logs).setLevel(logging.INFO)
class Stacher:
def __init__(self, email, password, save_path=None, exclude=[]):
self.email = email
self.password = password
self.path = save_path
self.exclude = []
for gameworld in exclude:
self.exclude.append(gameworld.lower())
self.account = self.check_account()
self.start()
def start(self):
avatar_pool = {}
while True:
logging.info('check avatar.')
lobby_details = data_get_all(self.account)
avatars = [avatar for caches in lobby_details['cache']
if 'Collection:Avatar:' in caches['name']
for avatar in caches['data']['cache']
]
for avatar in avatars:
if avatar['data']['consumersId'] not in avatar_pool:
if avatar['data']['worldName'].lower() not in self.exclude:
av = self.account.build_avatar(
avatar['data']['worldName'],
avatar['data']['consumersId'],
self.get_ranking,
self.path
)
avatar_pool[avatar['data']['consumersId']] = av
# starting avatar
for gi in avatar_pool:
try:
avatar_pool[gi].start()
except Exception as e:
logging.debug(f'{e}')
continue
# sleeping
interval = intervals(10)
logging.info(f'Stacher sleeping:{interval//60}:{interval%60}')
time.sleep(interval)
def check_account(self):
try:
account = load_account()
if self.test_login(account):
account = login(self.email, self.password)
logging.info(f'Welcome!!! {account.details["avatarName"]}')
else:
logging.info(f'Welcome back!! {account.details["avatarName"]}')
except FileNotFoundError:
account = login(self.email, self.password)
logging.info(f'Welcome!!! {account.details["avatarName"]}')
finally:
return account
@staticmethod
def test_login(account):
return 'error' in check_session(account, state='lobby')
@staticmethod
def stacher_thread(task, ranking_type,
ranking_subtype, avatar, url):
while True:
start, end, results = task.get()
if start is None:
break
try:
data = {
'controller': 'ranking',
'action': 'getRanking',
'params': {
'start': start,
'end': end,
'rankingType': ranking_type,
'rankingSubtype': ranking_subtype
},
'session': avatar.session_gameworld
}
r = post(url+f'c=ranking&a=getRanking&t{(time.time()*1000):.0f}',
headers=avatar.headers_gameworld,
json=data,
cookies=avatar.cookies_gameworld,
timeout=60
)
results.extend(r.json()['response']['results'])
except Exception as e:
logging.debug(f'{e}')
finally:
task.task_done()
@staticmethod
def get_ranking(avatar, ranking_type,
ranking_subtype, table_name):
# get total player
url = avatar.lobby_api
data = {
'controller': 'cache',
'action': 'get',
'params': {
'names': [f'GameWorld:{avatar.gameworld_id}']
},
'session': avatar.session_lobby
}
r = post(url,
headers=avatar.headers_lobby,
json=data,
cookies=avatar.cookies_lobby,
timeout=60
)
total_player = int(r.json()['cache'][0]['data']['playersRegistered'])
# prepare thread
url = avatar.gameworld_api
start, end = 0, 9
results = []
threads = []
task = Queue()
for _ in range(2):
worker = threading.Thread(target=Stacher.stacher_thread,
args=(task, ranking_type,
ranking_subtype, avatar, url
)
)
worker.start()
threads.append(worker)
# dispatch thread
for _ in range((total_player//10)+1):
task.put((start, end, results))
time.sleep(0.1)
start, end = start+10, end+10
# threading done
task.join()
for _ in range(2):
task.put((None, None, None))
for t in threads:
t.join()
# save results
path = create_path(avatar.gameworld.lower(),
avatar.gameworld_id,
avatar.path
)
try:
cache = open(path, 'r')
cache = json.load(cache)
try:
cache[table_name]
except KeyError:
cache[table_name] = {}
except FileNotFoundError:
cache = {}
cache[table_name] = {}
result = (line for line in results)
data = (
{
x['playerId']: {
'name': x['name'],
'data': [{
'timestamp': time.time(),
'points': x['points']
}]
}
} for x in result
)
for x in data:
for pid in x:
if pid in cache[table_name]:
cache[table_name][pid]['data'].extend(x[pid]['data'])
else:
cache[table_name][pid] = x[pid]
with open(path, 'w') as f:
f.write(json.dumps(cache, indent=4))
logging.info(f'{table_name} on {avatar.gameworld} done.')
|
py
|
1a595b6226a9d4a7e1dd50102172c729ed28e5d1
|
#!/usr/bin/env python3
import copy
import unittest
from psyml.models import Parameter
MINIMAL_PARAM = {
"name": "some-name",
"description": "some desc",
"type": "String",
"value": "some-value",
}
class TestParameter(unittest.TestCase):
def setUp(self):
"""Monkey patch encrypt/decrypt methods to avoid KMS usage in tests."""
de = lambda _, value: value.split("-")[1]
en = lambda name, value: f"{name}^{value}"
import psyml.models
psyml.models.encrypt_with_psyml = en
psyml.models.decrypt_with_psyml = de
def test_minimal(self):
param = Parameter(MINIMAL_PARAM)
self.assertEqual(str(param), "<Parameter: some-name>")
self.assertEqual(param.name, "some-name")
self.assertEqual(param.description, "some desc")
self.assertEqual(param.type_, "String")
self.assertEqual(param.value, "some-value")
def test_invalid_type(self):
with self.assertRaises(AssertionError) as err:
psyml = Parameter(["a", "b"])
self.assertEqual(err.exception.args[0], "Invalid type for parameters")
def test_invalid_field(self):
param = copy.deepcopy(MINIMAL_PARAM)
del param["description"]
with self.assertRaises(AssertionError) as err:
Parameter(param)
self.assertEqual(
err.exception.args[0], "Invalid/missing parameter field"
)
param["description"] = "some desc"
parameter = Parameter(param)
self.assertEqual(parameter.description, "some desc")
param["extra"] = "foo"
with self.assertRaises(AssertionError) as err:
Parameter(param)
self.assertEqual(
err.exception.args[0], "Invalid/missing parameter field"
)
def test_invalid_type(self):
param = copy.deepcopy(MINIMAL_PARAM)
param["description"] = 3
with self.assertRaises(AssertionError) as err:
Parameter(param)
self.assertEqual(err.exception.args[0], "Invalid parameter type")
param["description"] = "test"
param["type"] = "invalid-type"
with self.assertRaises(AssertionError) as err:
Parameter(param)
self.assertEqual(err.exception.args[0], "Invalid type in parameter")
def test_value_type_conversion(self):
param = copy.deepcopy(MINIMAL_PARAM)
param["value"] = 3
parameter = Parameter(param)
self.assertEqual(parameter.value, "3")
def test_decrypted_value(self):
param = copy.deepcopy(MINIMAL_PARAM)
parameter = Parameter(param)
self.assertEqual(parameter.decrypted_value, "some-value")
param = copy.deepcopy(MINIMAL_PARAM)
param["value"] = 42
parameter = Parameter(param)
self.assertEqual(parameter.decrypted_value, "42")
param = copy.deepcopy(MINIMAL_PARAM)
param["type"] = "SecureString"
parameter = Parameter(param)
self.assertEqual(parameter.decrypted_value, "some-value")
param = copy.deepcopy(MINIMAL_PARAM)
param["type"] = "securestring"
parameter = Parameter(param)
self.assertEqual(parameter.decrypted_value, "value")
def test_encrypted(self):
param = copy.deepcopy(MINIMAL_PARAM)
parameter = Parameter(param)
self.assertEqual(
parameter.encrypted,
{
"name": "some-name",
"description": "some desc",
"type": "string",
"value": "some-value",
},
)
param = copy.deepcopy(MINIMAL_PARAM)
param["type"] = "securestring"
parameter = Parameter(param)
self.assertEqual(
parameter.encrypted,
{
"name": "some-name",
"description": "some desc",
"type": "securestring",
"value": "some-value",
},
)
param = copy.deepcopy(MINIMAL_PARAM)
param["type"] = "SecureString"
parameter = Parameter(param)
self.assertEqual(
parameter.encrypted,
{
"name": "some-name",
"description": "some desc",
"type": "securestring",
"value": "some-name^some-value",
},
)
def test_decrypted(self):
param = copy.deepcopy(MINIMAL_PARAM)
parameter = Parameter(param)
self.assertEqual(
parameter.decrypted,
{
"name": "some-name",
"description": "some desc",
"type": "String",
"value": "some-value",
},
)
param = copy.deepcopy(MINIMAL_PARAM)
param["type"] = "securestring"
parameter = Parameter(param)
self.assertEqual(
parameter.decrypted,
{
"name": "some-name",
"description": "some desc",
"type": "SecureString",
"value": "value",
},
)
param = copy.deepcopy(MINIMAL_PARAM)
param["type"] = "SecureString"
parameter = Parameter(param)
self.assertEqual(
parameter.decrypted,
{
"name": "some-name",
"description": "some desc",
"type": "SecureString",
"value": "some-value",
},
)
def test_re_encrypted(self):
param = copy.deepcopy(MINIMAL_PARAM)
parameter = Parameter(param)
self.assertEqual(
parameter.re_encrypted,
{
"name": "some-name",
"description": "some desc",
"type": "string",
"value": "some-value",
},
)
param = copy.deepcopy(MINIMAL_PARAM)
param["type"] = "securestring"
parameter = Parameter(param)
self.assertEqual(
parameter.re_encrypted,
{
"name": "some-name",
"description": "some desc",
"type": "securestring",
"value": "some-name^value",
},
)
param = copy.deepcopy(MINIMAL_PARAM)
param["type"] = "SecureString"
parameter = Parameter(param)
self.assertEqual(
parameter.re_encrypted,
{
"name": "some-name",
"description": "some desc",
"type": "securestring",
"value": "some-name^some-value",
},
)
def test_export(self):
param = copy.deepcopy(MINIMAL_PARAM)
parameter = Parameter(param)
self.assertEqual(parameter.export, "export SOME_NAME=some-value")
param = copy.deepcopy(MINIMAL_PARAM)
param["value"] = "test'value"
parameter = Parameter(param)
self.assertEqual(
parameter.export, """export SOME_NAME='test'"'"'value'"""
)
|
py
|
1a595c68eb7b6e08515b0a7d2492d8513193e26c
|
from django.test import TestCase
from django.contrib.auth import get_user_model
class ModelTests(TestCase):
def test_create_user_with_email_successful(self):
"""Test creating a new user with an email is successful"""
email = '[email protected]'
password = 'Testpass123'
user = get_user_model().objects.create_user(
email=email,
password=password
)
self.assertEqual(user.email, email)
self.assertTrue(user.check_password(password))
def test_new_user_email_normalized(self):
"""Test the email for a new user is normalized"""
email = '[email protected]'
user = get_user_model().objects.create_user(email,'test123')
self.assertEqual(user.email,email.lower())
def test_new_user_invalid_email(self):
"""Test creating user with no email raises error"""
with self.assertRaises(ValueError):
get_user_model().objects.create_user(None,'test123')
def test_create_new_superuser(self):
"""Test creating a new superuser"""
user = get_user_model().objects.create_superuser(
'[email protected]',
'test123'
)
self.assertTrue(user.is_superuser)
self.assertTrue(user.is_staff)
|
py
|
1a595cf637ae7e17269271e2f7459670080b2a35
|
"""ANTLR3 exception hierarchy"""
# begin[licence]
#
# [The "BSD licence"]
# Copyright (c) 2005-2008 Terence Parr
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. The name of the author may not be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
# IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
# OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
# IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
# NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
# THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# end[licence]
from constants import INVALID_TOKEN_TYPE
class BacktrackingFailed(Exception):
"""@brief Raised to signal failed backtrack attempt"""
pass
class RecognitionException(Exception):
"""@brief The root of the ANTLR exception hierarchy.
To avoid English-only error messages and to generally make things
as flexible as possible, these exceptions are not created with strings,
but rather the information necessary to generate an error. Then
the various reporting methods in Parser and Lexer can be overridden
to generate a localized error message. For example, MismatchedToken
exceptions are built with the expected token type.
So, don't expect getMessage() to return anything.
Note that as of Java 1.4, you can access the stack trace, which means
that you can compute the complete trace of rules from the start symbol.
This gives you considerable context information with which to generate
useful error messages.
ANTLR generates code that throws exceptions upon recognition error and
also generates code to catch these exceptions in each rule. If you
want to quit upon first error, you can turn off the automatic error
handling mechanism using rulecatch action, but you still need to
override methods mismatch and recoverFromMismatchSet.
In general, the recognition exceptions can track where in a grammar a
problem occurred and/or what was the expected input. While the parser
knows its state (such as current input symbol and line info) that
state can change before the exception is reported so current token index
is computed and stored at exception time. From this info, you can
perhaps print an entire line of input not just a single token, for example.
Better to just say the recognizer had a problem and then let the parser
figure out a fancy report.
"""
def __init__(self, input=None):
Exception.__init__(self)
# What input stream did the error occur in?
self.input = None
# What is index of token/char were we looking at when the error
# occurred?
self.index = None
# The current Token when an error occurred. Since not all streams
# can retrieve the ith Token, we have to track the Token object.
# For parsers. Even when it's a tree parser, token might be set.
self.token = None
# If this is a tree parser exception, node is set to the node with
# the problem.
self.node = None
# The current char when an error occurred. For lexers.
self.c = None
# Track the line at which the error occurred in case this is
# generated from a lexer. We need to track this since the
# unexpected char doesn't carry the line info.
self.line = None
self.charPositionInLine = None
# If you are parsing a tree node stream, you will encounter som
# imaginary nodes w/o line/col info. We now search backwards looking
# for most recent token with line/col info, but notify getErrorHeader()
# that info is approximate.
self.approximateLineInfo = False
if input is not None:
self.input = input
self.index = input.index()
# late import to avoid cyclic dependencies
from .streams import TokenStream, CharStream
from .tree import TreeNodeStream
if isinstance(self.input, TokenStream):
self.token = self.input.LT(1)
self.line = self.token.line
self.charPositionInLine = self.token.charPositionInLine
if isinstance(self.input, TreeNodeStream):
self.extractInformationFromTreeNodeStream(self.input)
else:
if isinstance(self.input, CharStream):
self.c = self.input.LT(1)
self.line = self.input.line
self.charPositionInLine = self.input.charPositionInLine
else:
self.c = self.input.LA(1)
def extractInformationFromTreeNodeStream(self, nodes):
from antlr3.tree import Tree, CommonTree
from antlr3.tokens import CommonToken
self.node = nodes.LT(1)
adaptor = nodes.adaptor
payload = adaptor.getToken(self.node)
if payload is not None:
self.token = payload
if payload.line <= 0:
# imaginary node; no line/pos info; scan backwards
i = -1
priorNode = nodes.LT(i)
while priorNode is not None:
priorPayload = adaptor.getToken(priorNode)
if priorPayload is not None and priorPayload.line > 0:
# we found the most recent real line / pos info
self.line = priorPayload.line
self.charPositionInLine = priorPayload.charPositionInLine
self.approximateLineInfo = True
break
i -= 1
priorNode = nodes.LT(i)
else: # node created from real token
self.line = payload.line
self.charPositionInLine = payload.charPositionInLine
elif isinstance(self.node, Tree):
self.line = self.node.line
self.charPositionInLine = self.node.charPositionInLine
if isinstance(self.node, CommonTree):
self.token = self.node.token
else:
type = adaptor.getType(self.node)
text = adaptor.getText(self.node)
self.token = CommonToken(type=type, text=text)
def getUnexpectedType(self):
"""Return the token type or char of the unexpected input element"""
from antlr3.streams import TokenStream
from antlr3.tree import TreeNodeStream
if isinstance(self.input, TokenStream):
return self.token.type
elif isinstance(self.input, TreeNodeStream):
adaptor = self.input.treeAdaptor
return adaptor.getType(self.node)
else:
return self.c
unexpectedType = property(getUnexpectedType)
class MismatchedTokenException(RecognitionException):
"""@brief A mismatched char or Token or tree node."""
def __init__(self, expecting, input):
RecognitionException.__init__(self, input)
self.expecting = expecting
def __str__(self):
#return "MismatchedTokenException("+self.expecting+")"
return "MismatchedTokenException(%r!=%r)" % (
self.getUnexpectedType(), self.expecting
)
__repr__ = __str__
class UnwantedTokenException(MismatchedTokenException):
"""An extra token while parsing a TokenStream"""
def getUnexpectedToken(self):
return self.token
def __str__(self):
exp = ", expected %s" % self.expecting
if self.expecting == INVALID_TOKEN_TYPE:
exp = ""
if self.token is None:
return "UnwantedTokenException(found=%s%s)" % (None, exp)
return "UnwantedTokenException(found=%s%s)" % (self.token.text, exp)
__repr__ = __str__
class MissingTokenException(MismatchedTokenException):
"""
We were expecting a token but it's not found. The current token
is actually what we wanted next.
"""
def __init__(self, expecting, input, inserted):
MismatchedTokenException.__init__(self, expecting, input)
self.inserted = inserted
def getMissingType(self):
return self.expecting
def __str__(self):
if self.inserted is not None and self.token is not None:
return "MissingTokenException(inserted %r at %r)" % (
self.inserted, self.token.text)
if self.token is not None:
return "MissingTokenException(at %r)" % self.token.text
return "MissingTokenException"
__repr__ = __str__
class MismatchedRangeException(RecognitionException):
"""@brief The next token does not match a range of expected types."""
def __init__(self, a, b, input):
RecognitionException.__init__(self, input)
self.a = a
self.b = b
def __str__(self):
return "MismatchedRangeException(%r not in [%r..%r])" % (
self.getUnexpectedType(), self.a, self.b
)
__repr__ = __str__
class MismatchedSetException(RecognitionException):
"""@brief The next token does not match a set of expected types."""
def __init__(self, expecting, input):
RecognitionException.__init__(self, input)
self.expecting = expecting
def __str__(self):
return "MismatchedSetException(%r not in %r)" % (
self.getUnexpectedType(), self.expecting
)
__repr__ = __str__
class MismatchedNotSetException(MismatchedSetException):
"""@brief Used for remote debugger deserialization"""
def __str__(self):
return "MismatchedNotSetException(%r!=%r)" % (
self.getUnexpectedType(), self.expecting
)
__repr__ = __str__
class NoViableAltException(RecognitionException):
"""@brief Unable to decide which alternative to choose."""
def __init__(
self, grammarDecisionDescription, decisionNumber, stateNumber, input
):
RecognitionException.__init__(self, input)
self.grammarDecisionDescription = grammarDecisionDescription
self.decisionNumber = decisionNumber
self.stateNumber = stateNumber
def __str__(self):
return "NoViableAltException(%r!=[%r])" % (
self.unexpectedType, self.grammarDecisionDescription
)
__repr__ = __str__
class EarlyExitException(RecognitionException):
"""@brief The recognizer did not match anything for a (..)+ loop."""
def __init__(self, decisionNumber, input):
RecognitionException.__init__(self, input)
self.decisionNumber = decisionNumber
class FailedPredicateException(RecognitionException):
"""@brief A semantic predicate failed during validation.
Validation of predicates
occurs when normally parsing the alternative just like matching a token.
Disambiguating predicate evaluation occurs when we hoist a predicate into
a prediction decision.
"""
def __init__(self, input, ruleName, predicateText):
RecognitionException.__init__(self, input)
self.ruleName = ruleName
self.predicateText = predicateText
def __str__(self):
return "FailedPredicateException(" + self.ruleName + ",{" + self.predicateText + "}?)"
__repr__ = __str__
class MismatchedTreeNodeException(RecognitionException):
"""@brief The next tree mode does not match the expected type."""
def __init__(self, expecting, input):
RecognitionException.__init__(self, input)
self.expecting = expecting
def __str__(self):
return "MismatchedTreeNodeException(%r!=%r)" % (
self.getUnexpectedType(), self.expecting
)
__repr__ = __str__
|
py
|
1a595dc0a50f30e2206fff8df1e49946824bc4bb
|
import argparse
import tskit
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description=(
'Quick hack: add an extra mutations column calculated by re-laying'
' mutations using parsimony. The modified'
'csv is output to stdout, so do e.g. `python add_RF.py file.csv > new.csv`'
)
)
parser.add_argument("csv_file")
parser.add_argument("-c", "--column_containing_paths", default=-1,
help=(
"The column in the CSV containing the paths (can be negative, for pos from "
"end). The value in this column is the path to the .trees file"
)
)
args = parser.parse_args()
with open(args.csv_file, "rt") as f:
new_fields = ["", ""]
for line_num, line in enumerate(f):
fields = line.strip().split(",")
try:
ts = tskit.load(fields[args.column_containing_paths])
tables = ts.dump_tables()
tables.mutations.clear()
parsimony_muts = 0
tree_iter = ts.trees()
tree = next(tree_iter)
anc_states = []
for v in ts.variants():
while v.site.position >= tree.interval.right:
tree = next(tree_iter)
anc_state, muts = tree.map_mutations(v.genotypes, v.alleles)
anc_states.append(anc_state)
for m in muts:
tables.mutations.append(
m.replace(parent=tskit.NULL, site=v.site.id))
parsimony_muts += len(muts)
tables.compute_mutation_parents()
tables.sites.packset_ancestral_state(anc_states)
ts = tables.tree_sequence()
new_fields[0] = str(parsimony_muts)
new_fields[1] = str(ts.nbytes)
except FileNotFoundError:
new_fields = ["", ""] if line_num>0 else ["parsimony_muts", "parsimony_nbytes"]
# Add elements before the c'th one
for f in new_fields:
fields.insert(args.column_containing_paths, f)
print(",".join(fields))
|
py
|
1a595ef22bb9b6be7b51fe4012a261836ab1e1e6
|
import torch
import numpy as np
if __name__ == '__main__':
if torch.cuda.is_available():
print("Yes")
np_vec = np.array([2, 3, 4])
ten = torch.from_numpy(np_vec)
print(ten.cuda())
print(torch.cuda.device_count())
print(torch.cuda.get_device_name())
print("test")
|
py
|
1a596038891d225ea616c9d1d1ae5429cb119cde
|
"""Common verify functions for rsvp"""
# Python
import re
import logging
# Genie
from genie.utils import Dq
from genie.utils.timeout import Timeout
from genie.metaparser.util.exceptions import SchemaEmptyParserError
log = logging.getLogger(__name__)
def verify_lsp_neighbor(
device,
ipv4_address,
expected_status="Up",
max_time=60,
check_interval=10,
lsp_state_flag=False
):
""" Verify lsp state is up for neighbor
Args:
device ('obj'): device to use
ipv4_address ('str'): IPv4 address to check neighbor node
expected_status ('str'): Expected neighbor lsp status
max_time ('int'): Maximum time to keep checking
check_interval ('int'): How often to check
lsp_state_flag ('bool'): Flag for verifying Lsp state
Returns:
True/False
Raises:
N/A
"""
# Parse IPv4 address
ipv4_address = ipv4_address.split("/")[0]
timeout = Timeout(max_time, check_interval)
while timeout.iterate():
try:
output = device.parse("show rsvp neighbor detail")
except SchemaEmptyParserError:
log.info('Parser is empty')
timeout.sleep()
continue
# Example RSVP Neighbor Detail Dictionary
# {
# "rsvp-neighbor-information": {
# "rsvp-neighbor-count": str,
# "rsvp-neighbor": [
# {
# "rsvp-neighbor-address": str,
# "rsvp-neighbor-status": str,
# ...
# }
# ]
# }
# }
# Get RSVP neighbor list
for neighbor in output.q.get_values("rsvp-neighbor"):
if neighbor.get("rsvp-neighbor-address") == ipv4_address:
# Case when user wants to check the Lsp status of neighbor
if (lsp_state_flag and
neighbor.get("rsvp-neighbor-status") == expected_status):
return True
break
timeout.sleep()
return False
def verify_rsvp_neighbor(device, expected_ipaddress, max_time=30, check_interval=10):
"""
Verify there is a neighbor
Args:
device (`obj`): Device object
expected_ipaddress (`str`): The IP address that is expected in the output
max_time (`int`): Max time, default: 30
check_interval (`int`): Check interval, default: 10
Returns:
result (`bool`): Verified result
"""
# {'rsvp-neighbor-information':
# { 'rsvp-neighbor-count': '4',
# 'rsvp-neighbor': [
# {'rsvp-neighbor-address': '59.128.3.252',
# 'neighbor-idle': '39:15',
# 'neighbor-up-count': '0',
# 'neighbor-down-count': '0',
# 'last-changed-time': '39:15',
# 'hello-interval': '9',
# 'hellos-sent': '262',
# 'hellos-received': '0',
# 'messages-received': '0'},
timeout = Timeout(max_time, check_interval)
while timeout.iterate():
try:
out = device.parse("show rsvp neighbor")
except SchemaEmptyParserError:
timeout.sleep()
continue
if expected_ipaddress in out.q.get_values("rsvp-neighbor-address"):
return True
timeout.sleep()
return False
def verify_rsvp_session_state(device, expected_state, session_name=None,
session_type="Transit", max_time=60, check_interval=10):
""" Verify RSVP session state
Args:
device (obj): device object
expected_state (str): Expected state
session_name (str, optional): Session name. Defaults to None.
session_type (str): Session type. Defaults to "Transit"
max_time (int, optional): Maximum timeout time. Defaults to 60.
check_interval (int, optional): Check interval. Defaults to 10.
"""
#'rsvp-session-information': {
# 'rsvp-session-data': [{
# 'session-type': 'Transit',
# 'count': '30',
# 'rsvp-session': [{
# 'destination-address': '10.49.194.125',
# 'source-address': '10.49.194.127',
# 'lsp-state': 'Up',
# 'route-count': '0',
# 'rsb-count': '1',
# 'resv-style': 'FF',
# 'label-in': '46',
# 'label-out': '44',
# 'name': 'test_lsp_01'
# },
timeout = Timeout(max_time, check_interval)
while timeout.iterate():
try:
out = device.parse('show rsvp session')
except SchemaEmptyParserError:
timeout.sleep()
continue
for session in out.q.get_values('rsvp-session-data'):
if session.get('session-type') == session_type:
session_data = Dq(session).get_values('rsvp-session')
for data in session_data:
if session_name and session_name != data.get('name'):
continue
if data.get('lsp-state').lower() != expected_state.lower():
continue
return True
timeout.sleep()
return False
def verify_rsvp_session_state(device, expected_state, session_name=None,
session_type="Transit", max_time=60, check_interval=10):
""" Verify RSVP session state
Args:
device (obj): device object
expected_state (str): Expected state
session_name (str, optional): Session name. Defaults to None.
session_type (str): Which session to look into. Defaults to "Transit"
max_time (int, optional): Maximum timeout time. Defaults to 60.
check_interval (int, optional): Check interval. Defaults to 10.
"""
timeout = Timeout(max_time, check_interval)
while timeout.iterate():
try:
out = device.parse('show rsvp session')
except SchemaEmptyParserError:
timeout.sleep()
continue
# Example dictionary
# {
# "rsvp-session-information": {
# "rsvp-session-data": [{
# "session-type": str,
# "rsvp-session": [{
# "lsp-state": str,
# "name": str,
# }],
# }]
# }
# }
for session in out.q.get_values('rsvp-session-data'):
if session.get('session-type') == session_type:
session_data = Dq(session).get_values('rsvp-session')
for data in session_data:
if session_name != data.get('name'):
continue
if data.get('lsp-state').lower() != expected_state.lower():
continue
return True
timeout.sleep()
return False
|
py
|
1a5960fec38fcaf4733d372d77761c72c65e8280
|
from .domain import *
|
py
|
1a596185b25a58b191ce640edd6d9bc22076b4a1
|
# References:
# https://developers.google.com/gmail/api/quickstart/python
# https://developers.google.com/gmail/api/guides/sending
# https://www.thepythoncode.com/article/use-gmail-api-in-python
from __future__ import print_function
import os.path
from googleapiclient.discovery import build
from google_auth_oauthlib.flow import InstalledAppFlow
from google.auth.transport.requests import Request
from google.oauth2.credentials import Credentials
from email.mime.text import MIMEText
import base64
# If modifying these scopes, delete the file token.json.
SCOPES = ['https://www.googleapis.com/auth/gmail.send']
def create_message(sender, to, subject, message_test):
message = MIMEText(message_test)
message['to'] = to
message['from'] = sender
message['subject'] = subject
return {'raw': base64.urlsafe_b64encode(message.as_bytes()).decode()}
class Gmail():
def __init__(self):
creds = None
if os.path.exists('token.json'):
creds = Credentials.from_authorized_user_file('token.json', SCOPES)
if not creds or not creds.valid:
if creds and creds.expired and creds.refresh_token:
creds.refresh(Request())
else:
flow = InstalledAppFlow.from_client_secrets_file(
'credentials.json', SCOPES)
creds = flow.run_local_server(port=0)
# Save the credentials for the next run
with open('token.json', 'w') as token:
token.write(creds.to_json())
self.service = build('gmail', 'v1', credentials=creds)
def send_message(self, message):
try:
message = (self.service.users().messages().send(userId='me', body=message).execute())
print('Message Id: {}'.format(message['id']))
return message
except Exception as e:
print(e)
|
py
|
1a59623b902fc93d08ba55ebae1eb9ff09bc6783
|
from django.conf.urls import url
from . import views
from django.conf import settings
from django.conf.urls.static import static
urlpatterns=[
url(r'^$',views.welcome,name='home-page'),
url(r'^search/',views.search_results, name='search_results'),
url(r'^category/(\w+)', views.get_category,name='get_category'),
url(r'^location/(\w+)', views.get_location,name='get_location'),
]
if settings.DEBUG:
urlpatterns+= static(settings.MEDIA_URL, document_root = settings.MEDIA_ROOT)
|
py
|
1a5963bc57c0d7950a7ad8ddd0ef4b6c67c91e4b
|
from scipy import signal
import matplotlib.pyplot as plt
import numpy as np
import pyqtgraph
import PyQt5.QtGui as qtg
import matplotlib
from matplotlib.figure import Figure
from matplotlib.backends.backend_qt5agg import FigureCanvasQTAgg, FigureCanvas
from matplotlib.backends.backend_qt5agg import NavigationToolbar2QT as NavigationToolbar
matplotlib.use('Qt5Agg')
class MatplotlibWidget(qtg.QWidget):
"""
Implements a Matplotlib figure inside a QWidget.
Use getFigure() and redraw() to interact with matplotlib.
Example::
mw = MatplotlibWidget()
subplot = mw.getFigure().add_subplot(111)
subplot.plot(x,y)
mw.draw()
"""
def __init__(self, size=(5.0, 4.0), dpi=100):
qtg.QWidget.__init__(self)
self.fig = Figure(size, dpi=dpi)
self.canvas = FigureCanvas(self.fig)
self.canvas.setParent(self)
self.toolbar = NavigationToolbar(self.canvas, self)
self.vbox = qtg.QVBoxLayout()
self.vbox.addWidget(self.toolbar)
self.vbox.addWidget(self.canvas)
self.setLayout(self.vbox)
def getFigure(self):
return self.fig
def draw(self):
self.canvas.draw()
# Create the data
fs = 10e3
N = 1e5
amp = 2 * np.sqrt(2)
noise_power = 0.01 * fs / 2
time = np.arange(N) / float(fs)
mod = 500*np.cos(2*np.pi*0.25*time)
carrier = amp * np.sin(2*np.pi*3e3*time + mod)
noise = np.random.normal(scale=np.sqrt(noise_power), size=time.shape)
noise *= np.exp(-time/5)
x = carrier + noise
f, t, Sxx = signal.spectrogram(x, fs)
# Interpret image data as row-major instead of col-major
pyqtgraph.setConfigOptions(imageAxisOrder='row-major')
pyqtgraph.mkQApp()
win = pyqtgraph.GraphicsLayoutWidget()
# A plot area (ViewBox + axes) for displaying the image
p1 = win.addPlot()
# Item for displaying image data
img = pyqtgraph.ImageItem()
p1.addItem(img)
# Add a histogram with which to control the gradient of the image
hist = pyqtgraph.HistogramLUTItem()
# Link the histogram to the image
hist.setImageItem(img)
# If you don't add the histogram to the window, it stays invisible, but I find it useful.
win.addItem(hist)
# Show the window
win.show()
# Fit the min and max levels of the histogram to the data available
hist.setLevels(np.min(Sxx), np.max(Sxx))
# This gradient is roughly comparable to the gradient used by Matplotlib
# You can adjust it and then save it using hist.gradient.saveState()
hist.gradient.restoreState(
{'mode': 'rgb',
'ticks': [(0.5, (0, 182, 188, 255)),
(1.0, (246, 111, 0, 255)),
(0.0, (75, 0, 113, 255))]})
# Sxx contains the amplitude for each pixel
img.setImage(Sxx)
# Scale the X and Y Axis to time and frequency (standard is pixels)
img.scale(t[-1]/np.size(Sxx, axis=1),
f[-1]/np.size(Sxx, axis=0))
# Limit panning/zooming to the spectrogram
p1.setLimits(xMin=0, xMax=t[-1], yMin=0, yMax=f[-1])
# Add labels to the axis
p1.setLabel('bottom', "Time", units='s')
# If you include the units, Pyqtgraph automatically scales the axis and adjusts the SI prefix (in this case kHz)
p1.setLabel('left', "Frequency", units='Hz')
# Plotting with Matplotlib in comparison
plt.pcolormesh(t, f, Sxx)
plt.ylabel('Frequency [Hz]')
plt.xlabel('Time [sec]')
plt.colorbar()
plt.show()
pyqtgraph.Qt.QtGui.QApplication.instance().exec_()
|
py
|
1a5964220db9962b84ef08c0e2202fda258f5520
|
from .dataset import DataSet, DataSetMode, RawDataSet
from calamari_ocr.ocr.data_processing import DataPreprocessor
from calamari_ocr.ocr.text_processing import TextProcessor
from calamari_ocr.ocr.augmentation import DataAugmenter
from typing import Generator, Tuple, List, Any
import numpy as np
import multiprocessing
from collections import namedtuple
import queue
from calamari_ocr.utils.multiprocessing import tqdm_wrapper
from abc import ABC, abstractmethod
import logging
from .queue_helper import MaxElementsQueuer
from ..augmentation.dataaugmentationparams import DataAugmentationAmount, DataAugmentationAmountReference
logger = logging.getLogger(__name__)
class OrderedQueueTask:
def __init__(self, input_queue, output_queue, context=multiprocessing.get_context()):
self.input_queue = input_queue
self.output_queue = output_queue
self.context = context
self.p = self.context.Process(daemon=True, target=self.run)
def start(self):
self.p.start()
def stop(self):
self.p.terminate()
def join(self):
self.p.join()
def run(self) -> None:
data = []
current_idx = 0
while True:
while True:
try:
data.append(self.input_queue.get(timeout=0.1))
except queue.Empty:
continue
except KeyboardInterrupt:
return
break
data.sort(key=lambda data: data[0])
while len(data) > 0 and data[0][0] <= current_idx:
try:
self.output_queue.put(data[0], timeout=0.1)
self.output_queue.task_done()
del data[0]
current_idx += 1
except queue.Full:
continue
except KeyboardInterrupt:
return
DataProcessingTaskData = namedtuple("DataProcessingTaskData", [
"skip_invalid_gt",
"data_aug_params",
"text_processor",
"data_processor",
"data_augmenter",
"generate_only_non_augmented",
])
class DataProcessingTask:
def __init__(self, params, input_queue: multiprocessing.JoinableQueue, output_queue: multiprocessing.JoinableQueue, context=multiprocessing.get_context()):
self.params = params
self.input_queue = input_queue
self.output_queue = output_queue
self.p = context.Process(daemon=True, target=self.run)
def start(self):
self.p.start()
def stop(self):
self.p.terminate()
def join(self):
self.p.join()
def run(self) -> None:
while True:
try:
data = self.input_queue.get(timeout=0.1)
except queue.Empty:
continue
except KeyboardInterrupt:
# allow keyboard interrupt
return
out = self.apply_single(*data)
if out:
while True:
try:
self.output_queue.put(out, timeout=0.1)
break
except queue.Full:
continue
except KeyboardInterrupt:
return
self.output_queue.task_done()
def apply_single(self, idx, sample_id, line, text):
#if not dataset.is_sample_valid(sample, line, text):
# if not skip_invalid_gt:
# print("ERROR: invalid sample {}".format(sample))
# return None
if self.params.data_processor and line is not None:
line, params = self.params.data_processor.apply([line], 1, False)[0]
else:
params = None
if self.params.text_processor and text is not None:
text = self.params.text_processor.apply([text], 1, False)[0]
# data augmentation
if not self.params.data_aug_params.no_augs() \
and line is not None \
and not self.params.generate_only_non_augmented.value \
and self.params.data_augmenter \
and np.random.rand() <= self.params.data_aug_params.to_rel():
line, text = self.params.data_augmenter.augment_single(line, text)
return idx, sample_id, line, text, params
class InputDataset(ABC):
def __init__(self,
mode: DataSetMode,
):
self.mode = mode
self._generate_only_non_augmented = multiprocessing.Value('b', False)
self.initialized = False
def __enter__(self):
if self.initialized:
raise AssertionError("Input dataset already initialized.")
logger.debug("InputDataset {} entered".format(self))
self.initialized = True
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.initialized = False
logger.debug("InputDataset {} exited".format(self))
def check_initialized(self):
if not self.initialized:
raise AssertionError("InputDataset is not initialised. Call 'with InputDataset() as input_dataset:'. "
"After the scope is closed the threads will be closed, too, for cleaning up.")
@abstractmethod
def __len__(self):
return 0
@abstractmethod
def epoch_size(self):
return len(self)
@property
def generate_only_non_augmented(self):
return self._generate_only_non_augmented.value
@generate_only_non_augmented.setter
def generate_only_non_augmented(self, value):
self._generate_only_non_augmented.value = value
@abstractmethod
def text_generator(self) -> Generator[str, None, None]:
self.check_initialized()
@abstractmethod
def generator(self, epochs=1, text_only=False) -> Generator[Tuple[np.array, List[str], Any], None, None]:
self.check_initialized()
class RawInputDataset(InputDataset):
def __init__(self,
mode: DataSetMode,
raw_datas, raw_texts, raw_params,
):
super().__init__(mode)
self.preloaded_datas, self.preloaded_texts, self.preloaded_params = raw_datas, raw_texts, raw_params
def __len__(self):
if self._generate_only_non_augmented.value:
return len(self.preloaded_params)
return len(self.preloaded_datas)
def epoch_size(self):
return len(self)
def text_generator(self) -> Generator[str, None, None]:
self.check_initialized()
for text in self.preloaded_texts:
yield text
def generator(self, epochs=1, text_only=False) -> Generator[Tuple[np.array, List[str], Any], None, None]:
self.check_initialized()
for epoch in range(epochs):
if self.mode == DataSetMode.TRAIN:
# only train here, pred and eval are covered by else block
# train mode wont generate parameters
if self._generate_only_non_augmented.value:
# preloaded datas are ordered: first original data, then data augmented, however,
# preloaded params store the 'length' of the non augmented data
# thus, only orignal data is yielded
for data, text, params in zip(self.preloaded_datas, self.preloaded_texts, self.preloaded_params):
yield data, text, None
else:
# yield all data, however no params
for data, text in zip(self.preloaded_datas, self.preloaded_texts):
yield data, text, None
else:
# all other modes generate everything we got, but does not support data augmentation
for data, text, params in zip(self.preloaded_datas, self.preloaded_texts, self.preloaded_params):
yield data, text, params
class StreamingInputDataset(InputDataset):
def __init__(self,
dataset: DataSet,
data_preprocessor: DataPreprocessor,
text_preprocessor: TextProcessor,
data_augmenter: DataAugmenter = None,
data_augmentation_factor: float = 0,
skip_invalid_gt=True,
processes=4):
super().__init__(dataset.mode)
self.dataset = dataset
self.data_processor = data_preprocessor
self.text_processor = text_preprocessor
self.skip_invalid_gt = skip_invalid_gt
self.data_augmenter = data_augmenter
self.data_augmentation_params = DataAugmentationAmount.from_factor(data_augmentation_factor)
self.mp_context = multiprocessing.get_context('spawn')
self.processes = max(1, processes)
if data_augmenter and dataset.mode != DataSetMode.TRAIN and dataset.mode != DataSetMode.PRED_AND_EVAL:
# no pred_and_eval bc it's augmentation
raise Exception('Data augmentation is only supported for training, but got {} dataset instead'.format(dataset.mode))
if not self.data_augmentation_params.no_augs() and self.data_augmenter is None:
raise Exception('Requested data augmentation, but no data augmented provided. Use e. g. SimpleDataAugmenter')
self.data_input_queue = None
self.unordered_output_queue = None
self.data_processing_tasks = []
self.data_generator = None
self.ordered_output_queue = None
self.data_ordering = None
def __enter__(self):
super().__enter__()
# create all tasks and queues
self.max_queuer = MaxElementsQueuer(self.processes * 4, ctx=self.mp_context)
self.data_input_queue = self.max_queuer.input_queue
self.ordered_output_queue = self.max_queuer.output_queue
self.unordered_output_queue = self.mp_context.JoinableQueue()
self.data_processing_tasks = [
DataProcessingTask(
DataProcessingTaskData(
self.skip_invalid_gt,
self.data_augmentation_params,
self.text_processor,
self.data_processor,
self.data_augmenter,
self._generate_only_non_augmented,
),
self.data_input_queue,
self.unordered_output_queue,
) for _ in range(self.processes)
]
self.data_generator = self.dataset.create_generator(self.mp_context, self.data_input_queue)
self.data_generator.start()
self.data_ordering = OrderedQueueTask(self.unordered_output_queue, self.ordered_output_queue, self.mp_context)
self.data_ordering.start()
for p in self.data_processing_tasks:
p.start()
return self
def __exit__(self, exc_type, exc_val, exc_tb):
# stop all tasks
self.data_generator.stop()
for p in self.data_processing_tasks:
p.stop()
self.data_ordering.stop()
self.data_input_queue = None
self.unordered_output_queue = None
self.data_processing_tasks = []
self.data_generator = None
self.ordered_output_queue = None
self.data_ordering = None
super().__exit__(exc_type, exc_val, exc_tb)
def __len__(self):
return len(self.dataset.samples())
def epoch_size(self):
if self._generate_only_non_augmented.value:
return len(self)
return self.data_augmentation_params.epoch_size(len(self))
def to_raw_input_dataset(self, processes=1, progress_bar=False, text_only=False) -> RawInputDataset:
print("Preloading dataset type {} with size {}".format(self.dataset.mode, len(self)))
prev = self._generate_only_non_augmented.value
self._generate_only_non_augmented.value = True
datas, texts, params = zip(*list(tqdm_wrapper(self.generator(epochs=1, text_only=text_only),
desc="Preloading data", total=len(self.dataset),
progress_bar=progress_bar)))
preloaded_datas, preloaded_texts, preloaded_params = datas, texts, params
self._generate_only_non_augmented.value = prev
if not self.data_augmentation_params.no_augs() and (self.dataset.mode == DataSetMode.TRAIN or self.dataset.mode == DataSetMode.PRED_AND_EVAL):
abs_n_augs = self.data_augmentation_params.to_abs()
preloaded_datas, preloaded_texts \
= self.data_augmenter.augment_datas(list(datas), list(texts), n_augmentations=abs_n_augs,
processes=processes, progress_bar=progress_bar)
return RawInputDataset(self.mode, preloaded_datas, preloaded_texts, preloaded_params)
def text_generator(self) -> Generator[str, None, None]:
self.check_initialized()
for _, text, _ in self.generator(epochs=1, text_only=True):
if self.text_processor:
text = self.text_processor.apply([text], 1, False)[0]
yield text
def generator(self, epochs=1, text_only=False) -> Generator[Tuple[np.array, List[str], Any], None, None]:
self.check_initialized()
self.data_generator.request(epochs, text_only)
for epoch in range(epochs):
for iter in range(len(self.dataset)):
while True:
try:
global_id, id, line, text, params = self.ordered_output_queue.get(timeout=0.1)
yield line, text, params
except queue.Empty:
# test data ordering thread was canceled
if not self.data_ordering.p.is_alive() and self.ordered_output_queue.empty():
return
continue
except KeyboardInterrupt:
return
break
|
py
|
1a59649052de350571e3acd0e57cd4c51fec2a97
|
from . import errors
class MessageException(Exception):
def __init__(self, message: int):
if errors.ErrorCodeEnum.has_value(message):
self.message = errors.ErrorCodeEnum(message).value[1]
elif errors.MaaPErrorCodeEnum.has_value(message):
self.message = errors.MaaPErrorCodeEnum(message).value[1]
elif errors.RcsBizCenterErrorCodeEnum.has_value(message):
self.message = errors.RcsBizCenterErrorCodeEnum(message).value[1]
elif errors.KTErrorCodeEnum.has_value(message):
self.message = errors.KTErrorCodeEnum(message).value[1]
else:
self.message = "unknown"
def __str__(self):
return self.message
|
py
|
1a5964a76a3b900e196866a2810330917c72a6af
|
import os
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
SECRET_KEY = 'ss68t83q^4xx2d^3!!4%ccnh1xylz^kwu3&q!ev77+kb_%b@@t'
DEBUG = True
ALLOWED_HOSTS = []
AUTH_PASSWORD_VALIDATORS = []
INSTALLED_APPS = [
'tests.testapp',
'boogie.apps.users',
'boogie.apps.fragments',
'boogie',
'rest_framework',
'django.contrib.admin',
'django.contrib.sessions',
'django.contrib.contenttypes',
'django.contrib.auth',
'django.contrib.staticfiles',
]
MIDDLEWARE = [
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
]
ROOT_URLCONF = 'tests.testproject.urls'
WSGI_APPLICATION = 'tests.testproject.wsgi.application'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(os.path.basename(BASE_DIR), 'db.sqlite3'),
}
}
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = False
USE_L10N = False
USE_TZ = False
STATIC_URL = '/static/'
|
py
|
1a5966884c0cc25f1011a7471acab690413bd82b
|
import os
import time
import argparse
import math
from numpy import finfo
import pdb
import torch
from distributed import apply_gradient_allreduce
import torch.distributed as dist
from torch.utils.data.distributed import DistributedSampler
from torch.utils.data import DataLoader
from model import Tacotron2
from data_utils import TextMelLoader, TextMelCollate
from loss_function import Tacotron2Loss
from logger import Tacotron2Logger
from hparams import create_hparams
from text import text_to_sequence, sequence_to_text
from tqdm import tqdm
def reduce_tensor(tensor, n_gpus):
rt = tensor.clone()
dist.all_reduce(rt, op=dist.reduce_op.SUM)
rt /= n_gpus
return rt
def init_distributed(hparams, n_gpus, rank, group_name):
assert torch.cuda.is_available(), "Distributed mode requires CUDA."
print("Initializing Distributed")
# Set cuda device so everything is done on the right GPU.
torch.cuda.set_device(rank % torch.cuda.device_count())
# Initialize distributed communication
dist.init_process_group(
backend=hparams.dist_backend, init_method=hparams.dist_url,
world_size=n_gpus, rank=rank, group_name=group_name)
print("Done initializing distributed")
def prepare_dataloaders(hparams):
# Get data, data loaders and collate function ready
trainset = TextMelLoader(hparams.training_files, hparams)
valset = TextMelLoader(hparams.validation_files, hparams)
collate_fn = TextMelCollate(hparams.n_frames_per_step)
if hparams.distributed_run:
train_sampler = DistributedSampler(trainset)
shuffle = False
else:
train_sampler = None
shuffle = True
train_loader = DataLoader(trainset, num_workers=1, shuffle=shuffle,
sampler=train_sampler,
batch_size=hparams.batch_size, pin_memory=False,
drop_last=True, collate_fn=collate_fn)
return train_loader, valset, collate_fn
def prepare_directories_and_logger(output_directory, log_directory, rank):
if rank == 0:
if not os.path.isdir(output_directory):
os.makedirs(output_directory)
os.chmod(output_directory, 0o775)
logger = Tacotron2Logger(os.path.join(output_directory, log_directory))
else:
logger = None
return logger
def load_model(hparams):
model = Tacotron2(hparams).cuda()
if hparams.fp16_run:
model.decoder.attention_layer.score_mask_value = finfo('float16').min
if hparams.distributed_run:
model = apply_gradient_allreduce(model)
return model
def warm_start_model(checkpoint_path, model, ignore_layers):
assert os.path.isfile(checkpoint_path)
print("Warm starting model from checkpoint '{}'".format(checkpoint_path))
checkpoint_dict = torch.load(checkpoint_path, map_location='cpu')
model_dict = checkpoint_dict['state_dict']
if len(ignore_layers) > 0:
model_dict = {k: v for k, v in model_dict.items()
if k not in ignore_layers}
dummy_dict = model.state_dict()
dummy_dict.update(model_dict)
model_dict = dummy_dict
model.load_state_dict(model_dict)
return model
def load_checkpoint(checkpoint_path, model, optimizer):
assert os.path.isfile(checkpoint_path)
print("Loading checkpoint '{}'".format(checkpoint_path))
checkpoint_dict = torch.load(checkpoint_path, map_location='cpu')
model.load_state_dict(checkpoint_dict['state_dict'])
optimizer.load_state_dict(checkpoint_dict['optimizer'])
learning_rate = checkpoint_dict['learning_rate']
iteration = checkpoint_dict['iteration']
print("Loaded checkpoint '{}' from iteration {}" .format(
checkpoint_path, iteration))
return model, optimizer, learning_rate, iteration
def save_checkpoint(model, optimizer, learning_rate, iteration, filepath):
print("Saving model and optimizer state at iteration {} to {}".format(
iteration, filepath))
torch.save({'iteration': iteration,
'state_dict': model.state_dict(),
'optimizer': optimizer.state_dict(),
'learning_rate': learning_rate}, filepath)
def validate(model, criterion, valset, iteration, batch_size, n_gpus,
collate_fn, logger, distributed_run, rank):
"""Handles all the validation scoring and printing"""
model.eval()
with torch.no_grad():
val_sampler = DistributedSampler(valset) if distributed_run else None
val_loader = DataLoader(valset, sampler=val_sampler, num_workers=1,
shuffle=False, batch_size=batch_size,
pin_memory=False, collate_fn=collate_fn)
val_loss = 0.0
for i, batch in enumerate(val_loader):
x, y = model.parse_batch(batch)
y_pred = model(x)
loss = criterion(y_pred, y)
if distributed_run:
reduced_val_loss = reduce_tensor(loss.data, n_gpus).item()
else:
reduced_val_loss = loss.item()
val_loss += reduced_val_loss
val_loss = val_loss / (i + 1)
model.train()
if rank == 0:
print("Validation loss {}: {:9f} ".format(iteration, val_loss))
logger.log_validation(val_loss, model, y, y_pred, iteration)
def train(output_directory, log_directory, checkpoint_path, warm_start, n_gpus,
rank, group_name, hparams):
"""Training and validation logging results to tensorboard and stdout
Params
------
output_directory (string): directory to save checkpoints
log_directory (string) directory to save tensorboard logs
checkpoint_path(string): checkpoint path
n_gpus (int): number of gpus
rank (int): rank of current gpu
hparams (object): comma separated list of "name=value" pairs.
"""
if hparams.distributed_run:
init_distributed(hparams, n_gpus, rank, group_name)
torch.manual_seed(hparams.seed)
torch.cuda.manual_seed(hparams.seed)
model = load_model(hparams)
learning_rate = hparams.learning_rate
optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate,
weight_decay=hparams.weight_decay)
if hparams.fp16_run:
from apex import amp
model, optimizer = amp.initialize(
model, optimizer, opt_level='O2')
if hparams.distributed_run:
model = apply_gradient_allreduce(model)
criterion = Tacotron2Loss()
logger = prepare_directories_and_logger(
output_directory, log_directory, rank)
train_loader, valset, collate_fn = prepare_dataloaders(hparams)
# Load checkpoint if one exists
iteration = 0
epoch_offset = 0
if checkpoint_path is not None:
if warm_start:
model = warm_start_model(
checkpoint_path, model, hparams.ignore_layers)
else:
model, optimizer, _learning_rate, iteration = load_checkpoint(
checkpoint_path, model, optimizer)
if hparams.use_saved_learning_rate:
learning_rate = _learning_rate
iteration += 1 # next iteration is iteration + 1
epoch_offset = max(0, int(iteration / len(train_loader)))
model.train()
is_overflow = False
# ================ MAIN TRAINNIG LOOP! ===================
for epoch in range(epoch_offset, hparams.epochs):
print("Epoch: {}".format(epoch), flush=True)
for i, batch in enumerate(train_loader):
start = time.perf_counter()
for param_group in optimizer.param_groups:
param_group['lr'] = learning_rate
model.zero_grad()
#itext_padded, iinput_lengths, imel_padded, igate_padded, ioutput_lengths = batch
#pdb.set_trace()
#print(sequence_to_text(itext_padded[0].tolist()))
#print('.')
x, y = model.parse_batch(batch)
y_pred = model(x)
loss = criterion(y_pred, y)
if hparams.distributed_run:
reduced_loss = reduce_tensor(loss.data, n_gpus).item()
else:
reduced_loss = loss.item()
if hparams.fp16_run:
with amp.scale_loss(loss, optimizer) as scaled_loss:
scaled_loss.backward()
else:
loss.backward()
if hparams.fp16_run:
grad_norm = torch.nn.utils.clip_grad_norm_(
amp.master_params(optimizer), hparams.grad_clip_thresh)
is_overflow = math.isnan(grad_norm)
else:
grad_norm = torch.nn.utils.clip_grad_norm_(
model.parameters(), hparams.grad_clip_thresh)
optimizer.step()
if not is_overflow and rank == 0:
duration = time.perf_counter() - start
print("Train loss {} {:.6f} Grad Norm {:.6f} {:.2f}s/it".format(
iteration, reduced_loss, grad_norm, duration), flush=True)
logger.log_training(
reduced_loss, grad_norm, learning_rate, duration, iteration)
if not is_overflow and (iteration % hparams.iters_per_checkpoint == 0):
validate(model, criterion, valset, iteration,
hparams.batch_size, n_gpus, collate_fn, logger,
hparams.distributed_run, rank)
if rank == 0:
checkpoint_path = os.path.join(
output_directory, "checkpoint_{}".format(iteration))
save_checkpoint(model, optimizer, learning_rate, iteration,
checkpoint_path)
iteration += 1
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('-o', '--output_directory', type=str,
help='directory to save checkpoints')
parser.add_argument('-l', '--log_directory', type=str,
help='directory to save tensorboard logs')
parser.add_argument('-c', '--checkpoint_path', type=str, default=None,
required=False, help='checkpoint path')
parser.add_argument('--warm_start', action='store_true',
help='load model weights only, ignore specified layers')
parser.add_argument('--n_gpus', type=int, default=1,
required=False, help='number of gpus')
parser.add_argument('--rank', type=int, default=0,
required=False, help='rank of current gpu')
parser.add_argument('--group_name', type=str, default='group_name',
required=False, help='Distributed group name')
parser.add_argument('--hparams', type=str,
required=False, help='comma separated name=value pairs')
args = parser.parse_args()
hparams = create_hparams(args.hparams)
torch.backends.cudnn.enabled = hparams.cudnn_enabled
torch.backends.cudnn.benchmark = hparams.cudnn_benchmark
print("FP16 Run:", hparams.fp16_run)
print("Dynamic Loss Scaling:", hparams.dynamic_loss_scaling)
print("Distributed Run:", hparams.distributed_run)
print("cuDNN Enabled:", hparams.cudnn_enabled)
print("cuDNN Benchmark:", hparams.cudnn_benchmark)
train(args.output_directory, args.log_directory, args.checkpoint_path,
args.warm_start, args.n_gpus, args.rank, args.group_name, hparams)
|
py
|
1a596867abce942ac24f9978e437e5aa7f870d9f
|
# -*- coding: utf-8 -*-
###########################################################################
# Copyright (c), The AiiDA team. All rights reserved. #
# This file is part of the AiiDA code. #
# #
# The code is hosted on GitHub at https://github.com/aiidateam/aiida-core #
# For further information on the license, see the LICENSE.txt file #
# For further information please visit http://www.aiida.net #
###########################################################################
from __future__ import division
from __future__ import print_function
from __future__ import absolute_import
import six
from aiida.tools.dbimporters.baseclasses import (DbImporter, DbSearchResults,
UpfEntry)
class NnincDbImporter(DbImporter):
"""
Database importer for NNIN/C Pseudopotential Virtual Vault.
"""
def _str_clause(self, key, alias, values):
"""
Returns part of HTTP GET query for querying string fields.
"""
if not isinstance(values, six.string_types):
raise ValueError("incorrect value for keyword '{}' -- only "
'strings and integers are accepted'.format(alias))
return '{}={}'.format(key, values)
_keywords = {'xc_approximation': ['frmxcprox', _str_clause],
'xc_type': ['frmxctype', _str_clause],
'pseudopotential_class': ['frmspclass', _str_clause],
'element': ['element', None]}
def __init__(self, **kwargs):
self._query_url = 'http://nninc.cnf.cornell.edu/dd_search.php'
self.setup_db(**kwargs)
def query_get(self, **kwargs):
"""
Forms a HTTP GET query for querying the NNIN/C Pseudopotential
Virtual Vault.
:return: a string with HTTP GET statement.
"""
get_parts = []
for key in self._keywords.keys():
if key in kwargs.keys():
values = kwargs.pop(key)
if self._keywords[key][1] is not None:
get_parts.append(
self._keywords[key][1](self,
self._keywords[key][0],
key,
values))
if kwargs.keys():
raise NotImplementedError("search keyword(s) '"
"', '".join(kwargs.keys()) + \
"' is(are) not implemented for NNIN/C")
return self._query_url + '?' + '&'.join(get_parts)
def query(self, **kwargs):
"""
Performs a query on the NNIN/C Pseudopotential Virtual Vault using
``keyword = value`` pairs, specified in ``kwargs``.
:return: an instance of
:py:class:`aiida.tools.dbimporters.plugins.nninc.NnincSearchResults`.
"""
from six.moves import urllib
import re
query = self.query_get(**kwargs)
response = urllib.request.urlopen(query).read()
results = re.findall("psp_files/([^']+)\.UPF", response)
elements = kwargs.get('element', None)
if elements and not isinstance(elements, list):
elements = [elements]
if elements:
results_now = set()
for psp in results:
for element in elements:
if psp.startswith('{}.'.format(element)):
results_now = results_now | set([psp])
results = list(results_now)
return NnincSearchResults([{'id': x} for x in results])
def setup_db(self, query_url=None, **kwargs):
"""
Changes the database connection details.
"""
if query_url:
self._query_url = query_url
if kwargs.keys():
raise NotImplementedError( \
"unknown database connection parameter(s): '" + \
"', '".join(kwargs.keys()) + \
"', available parameters: 'query_url'")
def get_supported_keywords(self):
"""
Returns the list of all supported query keywords.
:return: list of strings
"""
return self._keywords.keys()
class NnincSearchResults(DbSearchResults):
"""
Results of the search, performed on NNIN/C Pseudopotential Virtual
Vault.
"""
_base_url = 'http://nninc.cnf.cornell.edu/psp_files/'
def __init__(self, results):
super(NnincSearchResults, self).__init__(results)
self._return_class = NnincEntry
def __len__(self):
return len(self._results)
def _get_source_dict(self, result_dict):
"""
Returns a dictionary, which is passed as kwargs to the created
DbEntry instance, describing the source of the entry.
:param result_dict: dictionary, describing an entry in the results.
"""
return {'id': result_dict['id']}
def _get_url(self, result_dict):
"""
Returns an URL of an entry CIF file.
:param result_dict: dictionary, describing an entry in the results.
"""
return self._base_url + result_dict['id'] + '.UPF'
class NnincEntry(UpfEntry):
"""
Represents an entry from NNIN/C Pseudopotential Virtual Vault.
"""
def __init__(self, uri, **kwargs):
"""
Creates an instance of
:py:class:`aiida.tools.dbimporters.plugins.nninc.NnincEntry`, related
to the supplied URI.
"""
super(NnincEntry, self).__init__(db_name='NNIN/C Pseudopotential Virtual Vault',
db_uri='http://nninc.cnf.cornell.edu',
uri=uri,
**kwargs)
|
bzl
|
1a59688a13aa3cca1298facd5d95493c9db04168
|
# Copyright 2020 The Bazel Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http:#www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""List the distribution dependencies we need to build Bazel."""
DIST_DEPS = {
########################################
#
# Runtime language dependencies
#
########################################
"platforms": {
"archive": "platforms-0.0.4.tar.gz",
"sha256": "079945598e4b6cc075846f7fd6a9d0857c33a7afc0de868c2ccb96405225135d",
"urls": [
"https://mirror.bazel.build/github.com/bazelbuild/platforms/releases/download/0.0.4/platforms-0.0.4.tar.gz",
"https://github.com/bazelbuild/platforms/releases/download/0.0.4/platforms-0.0.4.tar.gz",
],
"used_in": [
"additional_distfiles",
"test_WORKSPACE_files",
],
},
"bazelci_rules": {
"archive": "bazelci_rules-1.0.0.tar.gz",
"sha256": "eca21884e6f66a88c358e580fd67a6b148d30ab57b1680f62a96c00f9bc6a07e",
"strip_prefix": "bazelci_rules-1.0.0",
"urls": [
"https://github.com/bazelbuild/continuous-integration/releases/download/rules-1.0.0/bazelci_rules-1.0.0.tar.gz",
],
"used_in": [
"additional_distfiles",
],
},
# Keep in sync with src/main/java/com/google/devtools/build/lib/bazel/rules/cpp/cc_configure.WORKSPACE.
# Keep in sync with src/main/java/com/google/devtools/build/lib/bazel/rules/java/jdk.WORKSPACE.
# Note: This is not in sync with src/test/java/com/google/devtools/build/lib/blackbox/framework/BlackBoxTestEnvironment.java.
# Perhaps it should be.
"rules_cc": {
"archive": "b1c40e1de81913a3c40e5948f78719c28152486d.zip",
"sha256": "d0c573b94a6ef20ef6ff20154a23d0efcb409fb0e1ff0979cec318dfe42f0cdd",
"strip_prefix": "rules_cc-b1c40e1de81913a3c40e5948f78719c28152486d",
"urls": [
"https://mirror.bazel.build/github.com/bazelbuild/rules_cc/archive/b1c40e1de81913a3c40e5948f78719c28152486d.zip",
"https://github.com/bazelbuild/rules_cc/archive/b1c40e1de81913a3c40e5948f78719c28152486d.zip",
],
"used_in": [
"additional_distfiles",
"test_WORKSPACE_files",
],
},
"rules_java": {
"archive": "7cf3cefd652008d0a64a419c34c13bdca6c8f178.zip",
"sha256": "bc81f1ba47ef5cc68ad32225c3d0e70b8c6f6077663835438da8d5733f917598",
"strip_prefix": "rules_java-7cf3cefd652008d0a64a419c34c13bdca6c8f178",
"urls": [
"https://mirror.bazel.build/github.com/bazelbuild/rules_java/archive/7cf3cefd652008d0a64a419c34c13bdca6c8f178.zip",
"https://github.com/bazelbuild/rules_java/archive/7cf3cefd652008d0a64a419c34c13bdca6c8f178.zip",
],
"used_in": [
"additional_distfiles",
"test_WORKSPACE_files",
],
},
# TODO(aiuto): Update src/test/java/com/google/devtools/build/lib/blackbox/framework/BlackBoxTestEnvironment.java to use
# this dynamically.
"rules_proto": {
"archive": "7e4afce6fe62dbff0a4a03450143146f9f2d7488.tar.gz",
"sha256": "8e7d59a5b12b233be5652e3d29f42fba01c7cbab09f6b3a8d0a57ed6d1e9a0da",
"strip_prefix": "rules_proto-7e4afce6fe62dbff0a4a03450143146f9f2d7488",
"urls": [
"https://mirror.bazel.build/github.com/bazelbuild/rules_proto/archive/7e4afce6fe62dbff0a4a03450143146f9f2d7488.tar.gz",
"https://github.com/bazelbuild/rules_proto/archive/7e4afce6fe62dbff0a4a03450143146f9f2d7488.tar.gz",
],
"used_in": [
"additional_distfiles",
"test_WORKSPACE_files",
],
},
#################################################
#
# Dependencies which are part of the Bazel binary
#
#################################################
"com_google_protobuf": {
"archive": "v3.13.0.tar.gz",
"sha256": "9b4ee22c250fe31b16f1a24d61467e40780a3fbb9b91c3b65be2a376ed913a1a",
"strip_prefix": "protobuf-3.13.0",
"urls": [
"https://mirror.bazel.build/github.com/protocolbuffers/protobuf/archive/v3.13.0.tar.gz",
"https://github.com/protocolbuffers/protobuf/archive/v3.13.0.tar.gz",
],
"patch_args": ["-p1"],
"patches": ["//third_party/protobuf:3.13.0.patch"],
"used_in": [
"additional_distfiles",
"test_WORKSPACE_files",
],
},
"protocolbuffers": {
"archive": "382d5afc60e05470c23e8de19b19fc5ad231e732.tar.gz",
"sha256": "7992217989f3156f8109931c1fc6db3434b7414957cb82371552377beaeb9d6c",
"urls": [
"https://mirror.bazel.build/github.com/protocolbuffers/upb/archive/382d5afc60e05470c23e8de19b19fc5ad231e732.tar.gz",
"https://github.com/protocolbuffers/upb/archive/382d5afc60e05470c23e8de19b19fc5ad231e732.tar.gz",
],
"used_in": [
"additional_distfiles",
"test_WORKSPACE_files",
],
},
"com_github_grpc_grpc": {
"archive": "v1.33.1.tar.gz",
"sha256": "58eaee5c0f1bd0b92ebe1fa0606ec8f14798500620e7444726afcaf65041cb63",
"strip_prefix": "grpc-1.33.1",
"urls": [
"https://mirror.bazel.build/github.com/grpc/grpc/archive/v1.33.1.tar.gz",
"https://github.com/grpc/grpc/archive/v1.33.1.tar.gz",
],
"patch_args": ["-p1"],
"patches": [
"//third_party/grpc:grpc_1.33.1.patch",
],
"used_in": [
"additional_distfiles",
"test_WORKSPACE_files",
],
},
"c-ares": {
"archive": "e982924acee7f7313b4baa4ee5ec000c5e373c30.tar.gz",
"sha256": "e8c2751ddc70fed9dc6f999acd92e232d5846f009ee1674f8aee81f19b2b915a",
"urls": [
"https://mirror.bazel.build/github.com/c-ares/c-ares/archive/e982924acee7f7313b4baa4ee5ec000c5e373c30.tar.gz",
"https://github.com/c-ares/c-ares/archive/e982924acee7f7313b4baa4ee5ec000c5e373c30.tar.gz",
],
"used_in": [
"additional_distfiles",
"test_WORKSPACE_files",
],
},
"re2": {
"archive": "aecba11114cf1fac5497aeb844b6966106de3eb6.tar.gz",
"sha256": "9f385e146410a8150b6f4cb1a57eab7ec806ced48d427554b1e754877ff26c3e",
"urls": [
"https://mirror.bazel.build/github.com/google/re2/archive/aecba11114cf1fac5497aeb844b6966106de3eb6.tar.gz",
"https://github.com/google/re2/archive/aecba11114cf1fac5497aeb844b6966106de3eb6.tar.gz",
],
"used_in": [
"additional_distfiles",
"test_WORKSPACE_files",
],
},
"abseil-cpp": {
"archive": "df3ea785d8c30a9503321a3d35ee7d35808f190d.tar.gz",
"sha256": "f368a8476f4e2e0eccf8a7318b98dafbe30b2600f4e3cf52636e5eb145aba06a",
"urls": [
"https://mirror.bazel.build/github.com/abseil/abseil-cpp/archive/df3ea785d8c30a9503321a3d35ee7d35808f190d.tar.gz",
"https://github.com/abseil/abseil-cpp/archive/df3ea785d8c30a9503321a3d35ee7d35808f190d.tar.gz",
],
"used_in": [
"additional_distfiles",
"test_WORKSPACE_files",
],
},
###################################################
#
# Build time dependencies for testing and packaging
#
###################################################
"bazel_skylib": {
"archive": "bazel-skylib-1.0.3.tar.gz",
"sha256": "1c531376ac7e5a180e0237938a2536de0c54d93f5c278634818e0efc952dd56c",
"urls": [
"https://mirror.bazel.build/github.com/bazelbuild/bazel-skylib/releases/download/1.0.3/bazel-skylib-1.0.3.tar.gz",
"https://github.com/bazelbuild/bazel-skylib/releases/download/1.0.3/bazel-skylib-1.0.3.tar.gz",
],
"used_in": [
"additional_distfiles",
"test_WORKSPACE_files",
],
},
"io_bazel_skydoc": {
"archive": "1ef781ced3b1443dca3ed05dec1989eca1a4e1cd.tar.gz",
"sha256": "5a725b777976b77aa122b707d1b6f0f39b6020f66cd427bb111a585599c857b1",
"urls": [
"https://mirror.bazel.build/github.com/bazelbuild/stardoc/archive/1ef781ced3b1443dca3ed05dec1989eca1a4e1cd.tar.gz",
"https://github.com/bazelbuild/stardoc/archive/1ef781ced3b1443dca3ed05dec1989eca1a4e1cd.tar.gz",
],
"used_in": [
"additional_distfiles",
],
"strip_prefix": "stardoc-1ef781ced3b1443dca3ed05dec1989eca1a4e1cd",
},
"rules_pkg": {
"archive": "rules_pkg-0.4.0.tar.gz",
"sha256": "038f1caa773a7e35b3663865ffb003169c6a71dc995e39bf4815792f385d837d",
"urls": [
"https://mirror.bazel.build/github.com/bazelbuild/rules_pkg/releases/download/0.4.0/rules_pkg-0.4.0.tar.gz",
"https://github.com/bazelbuild/rules_pkg/releases/download/0.4.0/rules_pkg-0.4.0.tar.gz",
],
"used_in": [
"additional_distfiles",
],
},
# for Stardoc
"io_bazel_rules_sass": {
"archive": "1.25.0.zip",
"sha256": "c78be58f5e0a29a04686b628cf54faaee0094322ae0ac99da5a8a8afca59a647",
"strip_prefix": "rules_sass-1.25.0",
"urls": [
"https://mirror.bazel.build/github.com/bazelbuild/rules_sass/archive/1.25.0.zip",
"https://github.com/bazelbuild/rules_sass/archive/1.25.0.zip",
],
"used_in": [
"additional_distfiles",
],
},
# for Stardoc
"build_bazel_rules_nodejs": {
"archive": "rules_nodejs-2.2.2.tar.gz",
"sha256": "f2194102720e662dbf193546585d705e645314319554c6ce7e47d8b59f459e9c",
"urls": [
"https://mirror.bazel.build/github.com/bazelbuild/rules_nodejs/releases/download/2.2.2/rules_nodejs-2.2.2.tar.gz",
"https://github.com/bazelbuild/rules_nodejs/releases/download/2.2.2/rules_nodejs-2.2.2.tar.gz",
],
"used_in": [
"additional_distfiles",
],
},
"desugar_jdk_libs": {
# Commit 5847d6a06302136d95a14b4cbd4b55a9c9f1436e of 2021-03-10
"archive": "5847d6a06302136d95a14b4cbd4b55a9c9f1436e.zip",
"sha256": "299452e6f4a4981b2e6d22357f7332713382a63e4c137f5fd6b89579f6d610cb",
"strip_prefix": "desugar_jdk_libs-5847d6a06302136d95a14b4cbd4b55a9c9f1436e",
"urls": [
"https://mirror.bazel.build/github.com/google/desugar_jdk_libs/archive/5847d6a06302136d95a14b4cbd4b55a9c9f1436e.zip",
"https://github.com/google/desugar_jdk_libs/archive/5847d6a06302136d95a14b4cbd4b55a9c9f1436e.zip",
],
"used_in": [
"additional_distfiles",
],
},
"remote_coverage_tools": {
"archive": "coverage_output_generator-v2.5.zip",
"sha256": "cd14f1cb4559e4723e63b7e7b06d09fcc3bd7ba58d03f354cdff1439bd936a7d",
"urls": [
"https://mirror.bazel.build/bazel_coverage_output_generator/releases/coverage_output_generator-v2.5.zip",
],
"used_in": [
"additional_distfiles",
"test_WORKSPACE_files",
],
},
"remote_java_tools": {
"aliases": [
"remote_java_tools_test",
"remote_java_tools_for_testing",
],
"archive": "java_tools-v11.5.zip",
"sha256": "b763ee80e5754e593fd6d5be6d7343f905bc8b73d661d36d842b024ca11b6793",
"urls": [
"https://mirror.bazel.build/bazel_java_tools/releases/java/v11.5/java_tools-v11.5.zip",
"https://github.com/bazelbuild/java_tools/releases/download/java_v11.5/java_tools-v11.5.zip",
],
"used_in": [
"additional_distfiles",
"test_WORKSPACE_files",
],
},
"remote_java_tools_linux": {
"aliases": [
"remote_java_tools_test_linux",
"remote_java_tools_linux_for_testing",
],
"archive": "java_tools_linux-v11.5.zip",
"sha256": "ae1eca4546eac6487c6e565f9b409536609c273207220c51e5c94f2a058a5a56",
"urls": [
"https://mirror.bazel.build/bazel_java_tools/releases/java/v11.5/java_tools_linux-v11.5.zip",
"https://github.com/bazelbuild/java_tools/releases/download/java_v11.5/java_tools_linux-v11.5.zip",
],
"used_in": [
"additional_distfiles",
"test_WORKSPACE_files",
],
},
"remote_java_tools_windows": {
"aliases": [
"remote_java_tools_test_windows",
"remote_java_tools_windows_for_testing",
],
"archive": "java_tools_windows-v11.5.zip",
"sha256": "36766802f7ec684cecb1a14c122428de6be9784e88419e2ab5912ad4b59a8c7d",
"urls": [
"https://mirror.bazel.build/bazel_java_tools/releases/java/v11.5/java_tools_windows-v11.5.zip",
"https://github.com/bazelbuild/java_tools/releases/download/java_v11.5/java_tools_windows-v11.5.zip",
],
"used_in": [
"additional_distfiles",
"test_WORKSPACE_files",
],
},
"remote_java_tools_darwin": {
"aliases": [
"remote_java_tools_test_darwin",
"remote_java_tools_darwin_for_testing",
],
"archive": "java_tools_darwin-v11.5.zip",
"sha256": "792bc1352e736073b152528175ed424687f86a9f6f5f461f07d8b26806762738",
"urls": [
"https://mirror.bazel.build/bazel_java_tools/releases/java/v11.5/java_tools_darwin-v11.5.zip",
"https://github.com/bazelbuild/java_tools/releases/download/java_v11.5/java_tools_darwin-v11.5.zip",
],
"used_in": [
"additional_distfiles",
"test_WORKSPACE_files",
],
},
}
# Add aliased names
DEPS_BY_NAME = {}
def _create_index():
for repo_name in DIST_DEPS:
repo = DIST_DEPS[repo_name]
DEPS_BY_NAME[repo_name] = repo
aliases = repo.get("aliases")
if aliases:
for alias in aliases:
DEPS_BY_NAME[alias] = repo
_create_index()
def _gen_workspace_stanza_impl(ctx):
if ctx.attr.template and (ctx.attr.preamble or ctx.attr.postamble):
fail("Can not use template with either preamble or postamble")
if ctx.attr.use_maybe:
repo_clause = """
maybe(
http_archive,
name = "{repo}",
sha256 = "{sha256}",
strip_prefix = {strip_prefix},
urls = {urls},
)
"""
else:
repo_clause = """
http_archive(
name = "{repo}",
sha256 = "{sha256}",
strip_prefix = {strip_prefix},
urls = {urls},
)
"""
repo_stanzas = {}
for repo in ctx.attr.repos:
info = DEPS_BY_NAME[repo]
strip_prefix = info.get("strip_prefix")
if strip_prefix:
strip_prefix = "\"%s\"" % strip_prefix
else:
strip_prefix = "None"
repo_stanzas["{%s}" % repo] = repo_clause.format(
repo = repo,
archive = info["archive"],
sha256 = str(info["sha256"]),
strip_prefix = strip_prefix,
urls = info["urls"],
)
if ctx.attr.template:
ctx.actions.expand_template(
output = ctx.outputs.out,
template = ctx.file.template,
substitutions = repo_stanzas,
)
else:
content = "\n".join([p.strip() for p in ctx.attr.preamble.strip().split("\n")])
content += "\n"
content += "".join(repo_stanzas.values())
content += "\n"
content += "\n".join([p.strip() for p in ctx.attr.postamble.strip().split("\n")])
content += "\n"
ctx.actions.write(ctx.outputs.out, content)
return [DefaultInfo(files = depset([ctx.outputs.out]))]
gen_workspace_stanza = rule(
attrs = {
"repos": attr.string_list(doc = "Set of repos to include."),
"out": attr.output(mandatory = True),
"preamble": attr.string(doc = "Preamble."),
"postamble": attr.string(doc = "Set of rules to follow repos."),
"template": attr.label(
doc = "Template WORKSPACE file. May not be used with preamble or postamble." +
"Repo stanzas can be included using the syntax '{repo name}'.",
allow_single_file = True,
mandatory = False,
),
"use_maybe": attr.bool(doc = "Use maybe() invocation instead of http_archive."),
},
doc = "Use specifications from DIST_DEPS to generate WORKSPACE http_archive stanzas or to" +
"drop them into a template.",
implementation = _gen_workspace_stanza_impl,
)
|
py
|
1a5968e8253e1d799021644b15b7607e9f87aad3
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.1 on 2017-07-01 20:56
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('dgcrm', '0013_event_status'),
]
operations = [
migrations.RemoveField(
model_name='clientservicelinker',
name='client',
),
migrations.RemoveField(
model_name='clientservicelinker',
name='service',
),
migrations.RemoveField(
model_name='client',
name='services',
),
migrations.RemoveField(
model_name='feadback',
name='client',
),
migrations.RemoveField(
model_name='result',
name='client',
),
migrations.RemoveField(
model_name='result',
name='service',
),
migrations.AlterField(
model_name='event',
name='status',
field=models.CharField(choices=[('in_progress', 'ожидается'), ('successful', 'сделано'), ('failed', 'отменился'), ('contact', 'связаться')], default='в процессе', max_length=20),
),
migrations.DeleteModel(
name='ClientServiceLinker',
),
]
|
py
|
1a5969f901836810a76bc59ea6e6a1362de4d044
|
"""
-------------------------------------------------------
helper
a couple of helper functions
-------------------------------------------------------
Author: Dallas Fraser
ID: 110242560
Email: [email protected]
Version: 2014-09-10
-------------------------------------------------------
"""
import networkx as nx
def make_co_cricket():
'''
make_co_cricket
assembles a co-cricket
Parameters:
None
Returns:
g: the co-cricket (Graph)
'''
g = make_diamond()
g.add_node(4)
return g
def make_kite():
'''
make_kite
assembles a kite (co-chair)
Parameters:
None
Returns:
kite: the kite (Graph)
'''
kite = make_diamond()
kite.add_node(4)
kite.add_edge(2, 4)
return kite
def make_claw():
'''
make_claw
assembles a claw
Parameters:
None
Returns:
claw: the claw (Graph)
'''
claw = nx.Graph()
for x in range(0, 4):
# add four vertices
claw.add_node(x)
hub = 0 #0-vertex is the hub of claw
for x in range(1, 4):
claw.add_edge(hub, x)
return claw
def make_co_claw():
'''
make_co_claw
assembles a co-claw
Parameters:
None
Returns:
co_claw: the co_claw (Graph)
'''
return nx.complement(make_claw())
def make_cycle(n):
'''
make_cycle
assembles a cycle with n vertices
Parameters:
n: the number of vertices in cycle (int)
Returns:
cycle: the cycle (Graph)
'''
cycle = nx.Graph()
for vertex in range(0,n):
# add all the vertices
cycle.add_node(vertex)
for vertex in range(0,n):
# add all the edges
cycle.add_edge(vertex, (vertex+1) % n)
cycle.add_edge(vertex, (vertex-1) % n)
return cycle
def make_co_cycle(n):
'''
a function the creates an complement of a cycle of size n
Parameters:
n: the size of the anti cycle
Returns:
co_cycle: a networkx graph (networkx)
'''
return nx.complement(make_cycle(n))
def make_wheel(n):
'''
make_wheel
assembles a wheel with n vertices
Parameters:
n: the number of vertices in the wheel (int)
Returns:
wheel: the wheel (networkx)
'''
wheel = make_cycle(n-1)
wheel.add_node(n-1)
for edge in range(0,n-1):
wheel.add_edge(edge,n-1)
return wheel
def join(G, H):
'''
join
a function which (complete) joins one graph G to graph H
Parameters:
G: Graph with at least one vertice (Graph)
H: Graph with at least one vertice (Graph)
Returns:
F: The join of G and H (Graph)
'''
# add all of
F = nx.Graph()
F.add_nodes_from(G.nodes())
F.add_edges_from(G.edges())
shift = G.number_of_nodes()
# add all nodes of H
for vertex in H.nodes():
F.add_node(vertex)
# add all of F edges
for e1, e2 in H.edges():
F.add_edge(e1 + shift, e2 + shift)
# join the two sets of nodes
for v1 in G.nodes():
for v2 in H.nodes():
F.add_edge(v1,v2+shift)
return F
def make_diamond():
'''
make_diamond
assembles a diamond
Parameters:
None
Returns:
diamond: the diamond graph (networkx)
'''
diamond = nx.Graph()
for x in range(0, 4):
# add four vertices
diamond.add_node(x)
diamond.add_edge(0, 1)
diamond.add_edge(0, 2)
diamond.add_edge(0, 3)
diamond.add_edge(1, 2)
diamond.add_edge(1, 3)
return diamond
def make_co_diamond():
'''
make_co_diamond
assembles a co-diamond
Parameters:
None
Returns:
co_diamond: the co-diamond graph (networkx)
'''
return nx.complement(make_diamond())
def make_cok4():
'''
make_coK4
assembles a co-K4
Parameters:
None
Returns:
g: the co-K4 graph (networkx)
'''
g = nx.Graph()
g.add_node(0)
g.add_node(1)
g.add_node(2)
g.add_node(3)
return g
def text_to_networkx(lines):
'''
text_to_networkx
a function that takes the lines from a text file and puts into a format for
networkx graph
Parameters:
lines: a list of lines from the text file (list)
Returns:
graph: a networkx graph
'''
# try:
graph = nx.Graph()
index = 0
nodes = []
for line in lines:
# add all the nodes
entries = line.split(":")
if len(entries) == 2:
try:
node = int(entries[0])
except:
node = None
if node is None:
node = index
graph.add_node(node)
nodes.append(node)
index += 1
index = 0
for line in lines:
# add all the edges
entries = line.split(":")
if (len(entries) > 1):
entries[1] = entries[1].replace(" ", "")
edges = entries[1].split(",")
for edge in edges:
if edge != '':
graph.add_edge(nodes[index], int(edge))
index += 1
return graph
def networkx_to_text(G):
'''
a function that converts a graph G to text
Parameters:
G: the graph (networkx)
Returns:
text: the graph text (string)
'''
text = ""
for node in G.nodes():
text += str(node) + ":"
n = []
for neighbor in G.neighbors(node):
n.append(str(neighbor))
text += ",".join(n)
text += "\n"
return text
def make_clique(n):
'''
makes a clique of size n
Parameters:
n: the size of the clique (int)
Returns:
clique: the graph (networkx)
'''
clique = nx.Graph()
for v in range(0, n):
clique.add_node(v)
end = len(clique.nodes())
for target in clique.nodes():
for source in range(target+1, end):
clique.add_edge(target, source)
return clique
def make_2K2():
'''
a function which assembles a 2K2
Parameters:
None
Returns:
g: 2K2 graph (network)
'''
return nx.complement(make_cycle(4))
def make_co_twin_c5():
'''
a function to assemble a co-Twin-C5
Parameters:
None
Returns:
g: the graph g (networkx)
'''
g = make_cycle(5)
g.add_node(5)
g.add_edge(5, 0)
g.add_edge(5, 2)
g.add_edge(5, 1)
return g
def make_co_twin_house():
'''
a function to assemble a co-Twin-House
Parameters:
None
Returns:
g: the graph g (networkx)
'''
g = make_diamond()
g.add_node(4)
g.add_node(5)
g.add_edge(2, 4)
g.add_edge(3, 5)
return g
def make_co_p2_p3():
'''
a function to assemble a co p2-p3 graph
Parameters:
None
Returns:
g: the graph g (networkx)
'''
g = make_diamond()
g.add_node(4)
g.add_edge(2, 4)
g.add_edge(3, 4)
return g
def make_co_A():
'''
a function to assemble a co-A graph
Parameters:
None
Returns:
g: the graph g (networkx)
'''
g = nx.Graph()
for i in range(0, 5):
g.add_node(i)
g.add_edge(0, 1)
g.add_edge(0, 3)
g.add_edge(0, 4)
g.add_edge(1, 2)
g.add_edge(1, 4)
g.add_edge(1, 5)
g.add_edge(2, 5)
g.add_edge(3, 4)
g.add_edge(4, 5)
return g
def make_co_R():
'''
a method to assemble a co-R graph
Parameters:
None
Returns:
g: the graph g (networkx)
'''
g = make_diamond()
g.add_node(4)
g.add_node(5)
g.add_edge(0, 4)
g.add_edge(1, 4)
g.add_edge(2, 4)
g.add_edge(3, 5)
return g
def make_bridge():
'''
a method to assemble a bridge graph
Parameters:
None
Returns:
g: the graph g (networkx)
'''
g = make_co_R()
g.add_edge(0, 5)
g.add_edge(1, 5)
return g
def forbidden_line_subgraphs():
'''
a method to assemble all 9 of the forbidden subgraphs
of line graphs
Parameters:
None
Returns:
graphs: a list of graphs (networkx)
'''
graphs = []
graphs.append(make_claw()) # claw
graphs.append(make_wheel(6)) # W5
graphs.append(make_bridge()) # Bridge
graphs.append(make_co_R()) # Co-R
graphs.append(make_co_A()) # Co-A
graphs.append(make_co_p2_p3())
graphs.append(make_co_twin_house())
graphs.append(make_co_twin_c5())
k5_e = make_clique(5)
k5_e.remove_edge(3, 4)
graphs.append(k5_e)
return graphs
import unittest
import os
class tester(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
pass
def testMakeClique(self):
edges = [(0, 1), (0, 2), (1, 2)]
nodes = [0, 1, 2]
clique = make_clique(3)
self.assertEqual(edges, clique.edges(), 'Make Clique: failed on edges')
self.assertEqual(nodes, clique.nodes(), 'Make Clique: failed on nodes')
edges = [(0, 1), (0, 2), (0, 3), (1, 2), (1, 3), (2, 3)]
nodes = [0, 1, 2, 3]
clique = make_clique(4)
self.assertEqual(edges, clique.edges(), 'Make Clique: failed on edges')
self.assertEqual(nodes, clique.nodes(), 'Make Clique: failed on nodes')
def testMakeDiamond(self):
g = make_diamond()
edges = [(0, 1), (0, 2), (0, 3), (1, 2), (1, 3)]
vertices = [0, 1, 2, 3]
self.assertEqual(edges, g.edges(), "Make Diamond: failed on edges")
self.assertEqual(vertices, g.nodes(),
"Make Diamond: failed on vertices")
def testMakeCoDiamond(self):
g = make_co_diamond()
edges = [(2, 3)]
vertices = [0, 1, 2, 3]
self.assertEqual(edges, g.edges(),
"Make Co-Diamond: failed on edges")
self.assertEqual(vertices, g.nodes(),
"Make Co-Diamond: failed on vertices")
def testMakeClaw(self):
g = make_claw()
edges = [(0, 1), (0, 2), (0, 3)]
vertices =[0, 1, 2, 3]
self.assertEqual(edges, g.edges(), "Make Claw: failed on edges")
self.assertEqual(vertices, g.nodes(), "Make Claw: failed on vertices")
def testMakeCoClaw(self):
g = make_co_claw()
edges = [(1, 2), (1, 3), (2, 3)]
vertices =[0, 1, 2, 3]
self.assertEqual(edges, g.edges(), "Make Co-Claw: failed on edges")
self.assertEqual(vertices, g.nodes(),
"Make Co-Claw: failed on vertices")
def testMakeCycle(self):
g = make_cycle(3)
edges = [(0,1), (0,2), (1,2)]
vertices = [0, 1, 2]
self.assertEqual(edges, g.edges(), "Make Cycle: failed on edges")
self.assertEqual(vertices, g.nodes(), "Make Cycle: failed on vertices")
def testJoin(self):
# wheel test
g = make_cycle(5)
h = nx.Graph()
h.add_node(0)
f = join(g, h)
expect = nx.wheel_graph(6) # expect a wheel
self.assertEqual(expect.nodes(), f.nodes(),
" Join: nodes failed on wheel test")
self.assertEqual(nx.is_isomorphic(f, expect), True,
" Join: edges failed on wheel test")
# join of two trianges = K6
g = nx.complete_graph(3)
h = nx.complete_graph(3)
f = join(g, h)
expect = nx.complete_graph(6)
self.assertEqual(expect.nodes(), f.nodes(),
"Join: nodes failed for K6 test")
self.assertEqual(nx.is_isomorphic(f, expect), True,
" Join: edges failed on wheel K6 test")
def testWheel(self):
# w5
w = make_wheel(5)
g = make_cycle(4)
g.add_node(5)
g.add_edge(0,4)
g.add_edge(1,4)
g.add_edge(2,4)
g.add_edge(3,4)
self.assertEqual(w.edges(), g.edges(), "Make wheel: Failed for W5 test")
def testTextToNetworkx(self):
directory = os.getcwd()
while "inducer" in directory:
directory = os.path.dirname(directory)
claw = make_claw()
c7 = make_cycle(7)
co_claw = make_co_claw()
tests = {'test1.txt': claw, 'test2.txt': c7, 'test3.txt': co_claw}
for f, expect in tests.items():
filepath = os.path.join(directory, "tests", f)
with open(filepath) as f:
content = f.read()
lines = content.replace("\r", "")
lines = lines.split("\n")
result = text_to_networkx(lines)
self.assertEqual(expect.nodes() ,result.nodes() ,
"Text to Networkx Failed Nodes: %s" % f)
self.assertEqual(expect.edges() ,result.edges() ,
"Text to Networkx Failed Nodes: %s" % f)
def testNetworkxToText(self):
g = make_claw()
text = networkx_to_text(g)
self.assertEqual("0:1,2,3\n1:0\n2:0\n3:0\n", text)
g = make_diamond()
text = networkx_to_text(g)
self.assertEqual("0:1,2,3\n1:0,2,3\n2:0,1\n3:0,1\n", text)
def testMakeCoK4(self):
cok4 = make_cok4()
self.assertEqual(cok4.nodes(), [0, 1, 2, 3])
self.assertEqual(cok4.edges(), [])
def testMake2K2(self):
g = make_2K2()
expect = [0, 1, 2, 3]
self.assertEqual(g.nodes(), expect)
expect = [(0, 2), (1, 3)]
self.assertEqual(g.edges(), expect)
def testMakeCoTwinC5(self):
result = make_co_twin_c5()
self.assertEqual(len(result.nodes()), 6)
expect = [(0, 1), (0, 4), (0, 5), (1, 2), (1, 5),
(2, 3), (2, 5), (3, 4)]
self.assertEqual(expect, result.edges())
def testMakeCoTwinHouse(self):
result = make_co_twin_house()
self.assertEqual(len(result.nodes()), 6)
expect = [(0, 1), (0, 2), (0, 3), (1, 2), (1, 3), (2, 4), (3, 5)]
self.assertEqual(expect, result.edges())
def testMakeCoP2P3(self):
result = make_co_p2_p3()
self.assertEqual(len(result.nodes()), 5)
expect = [(0, 1), (0, 2), (0, 3), (1, 2), (1, 3), (2, 4), (3, 4)]
self.assertEqual(expect, result.edges())
def testMakeCoA(self):
result = make_co_A()
self.assertEqual(len(result.nodes()), 6)
expect = [(0, 1), (0, 3), (0, 4), (1, 2), (1, 4),
(1, 5), (2, 5), (3, 4), (4, 5)]
self.assertEqual(result.edges(), expect)
def testMakeCoR(self):
result = make_co_R()
self.assertEqual(len(result.nodes()), 6)
expect = [(0, 1), (0, 2), (0, 3), (0, 4), (1, 2),
(1, 3), (1, 4), (2, 4), (3, 5)]
self.assertEqual(result.edges(), expect)
def testMakeBridge(self):
result = make_bridge()
self.assertEqual(len(result.nodes()), 6)
expect = [(0, 1), (0, 2), (0, 3), (0, 4),(0, 5), (1, 2),
(1, 3), (1, 4),(1, 5), (2, 4), (3, 5)]
self.assertEqual(result.edges(), expect)
def testForbiddenLineSubgraphs(self):
result = forbidden_line_subgraphs()
self.assertEqual(len(result), 9)
|
py
|
1a596a1b0b341684d12c5370418a7d3f97b5923f
|
"""----------------------------------------------------------------------------"""
""" Copyright (c) FIRST 2017. All Rights Reserved. """
""" Open Source Software - may be modified and shared by FRC teams. The code """
""" must be accompanied by the FIRST BSD license file in the root directory of """
""" the project. """
"""----------------------------------------------------------------------------"""
#
# These tests are adapted from ntcore's test suite
#
import pytest
from threading import Condition
from ntcore.constants import NT_NOTIFY_LOCAL, NT_NOTIFY_NEW
from ntcore.value import Value
class SC(object):
def __init__(self):
self.events = []
self.event_cond = Condition()
def __call__(self, event):
with self.event_cond:
self.events.append(event)
self.event_cond.notify()
def wait(self, count):
with self.event_cond:
result = self.event_cond.wait_for(lambda: len(self.events) == count, 2)
assert result, "expected %s events, got %s" % (count, len(self.events))
return self.events[:]
@pytest.fixture
def server_cb():
return SC()
def test_EntryNewLocal(nt_live, server_cb):
nt_server, nt_client = nt_live
nt_server_api = nt_server._api
nt_server_api.addEntryListenerById(
nt_server_api.getEntryId("/foo"), server_cb, NT_NOTIFY_NEW | NT_NOTIFY_LOCAL
)
# Trigger an event
nt_server_api.setEntryValueById(
nt_server_api.getEntryId("/foo/bar"), Value.makeDouble(2.0)
)
nt_server_api.setEntryValueById(
nt_server_api.getEntryId("/foo"), Value.makeDouble(1.0)
)
assert nt_server_api.waitForEntryListenerQueue(1.0)
# Check the event
events = server_cb.wait(1)
# assert events[0].listener == handle
assert events[0].local_id == nt_server_api.getEntryId("/foo")
assert events[0].name == "/foo"
assert events[0].value == Value.makeDouble(1.0)
assert events[0].flags == NT_NOTIFY_NEW | NT_NOTIFY_LOCAL
def test_EntryNewRemote(nt_live, server_cb):
nt_server, nt_client = nt_live
nt_server_api = nt_server._api
nt_client_api = nt_client._api
nt_server_api.addEntryListenerById(
nt_server_api.getEntryId("/foo"), server_cb, NT_NOTIFY_NEW
)
# Trigger an event
nt_client_api.setEntryValueById(
nt_client_api.getEntryId("/foo/bar"), Value.makeDouble(2.0)
)
nt_client_api.setEntryValueById(
nt_client_api.getEntryId("/foo"), Value.makeDouble(1.0)
)
nt_client_api.flush()
assert nt_server_api.waitForEntryListenerQueue(1.0)
# Check the event
events = server_cb.wait(1)
# assert events[0].listener == handle
assert events[0].local_id == nt_server_api.getEntryId("/foo")
assert events[0].name == "/foo"
assert events[0].value == Value.makeDouble(1.0)
assert events[0].flags == NT_NOTIFY_NEW
def test_PrefixNewLocal(nt_live, server_cb):
nt_server, nt_client = nt_live
nt_server_api = nt_server._api
nt_server_api.addEntryListener("/foo", server_cb, NT_NOTIFY_NEW | NT_NOTIFY_LOCAL)
# Trigger an event
nt_server_api.setEntryValueById(
nt_server_api.getEntryId("/foo/bar"), Value.makeDouble(1.0)
)
nt_server_api.setEntryValueById(
nt_server_api.getEntryId("/baz"), Value.makeDouble(1.0)
)
assert nt_server_api.waitForEntryListenerQueue(1.0)
events = server_cb.wait(1)
# assert events[0].listener == handle
assert events[0].local_id == nt_server_api.getEntryId("/foo/bar")
assert events[0].name == "/foo/bar"
assert events[0].value == Value.makeDouble(1.0)
assert events[0].flags == NT_NOTIFY_NEW | NT_NOTIFY_LOCAL
def test_PrefixNewRemote(nt_live, server_cb):
nt_server, nt_client = nt_live
nt_server_api = nt_server._api
nt_client_api = nt_client._api
nt_server_api.addEntryListener("/foo", server_cb, NT_NOTIFY_NEW | NT_NOTIFY_LOCAL)
# Trigger an event
nt_client_api.setEntryValueById(
nt_client_api.getEntryId("/foo/bar"), Value.makeDouble(1.0)
)
nt_client_api.setEntryValueById(
nt_client_api.getEntryId("/baz"), Value.makeDouble(1.0)
)
assert nt_server_api.waitForEntryListenerQueue(1.0)
# Check the event
events = server_cb.wait(1)
# assert events[0].listener == handle
assert events[0].local_id == nt_server_api.getEntryId("/foo/bar")
assert events[0].name == "/foo/bar"
assert events[0].value == Value.makeDouble(1.0)
assert events[0].flags == NT_NOTIFY_NEW
|
py
|
1a596a25f175bbe59abadedcd71ad23300718ac1
|
# -*- coding: utf-8 -*-
# PLEASE DO NOT EDIT THIS FILE, IT IS GENERATED AND WILL BE OVERWRITTEN:
# https://github.com/ccxt/ccxt/blob/master/CONTRIBUTING.md#how-to-contribute-code
from ccxt.base.exchange import Exchange
import hashlib
from ccxt.base.errors import ExchangeError
from ccxt.base.errors import AuthenticationError
from ccxt.base.errors import NotSupported
class xbtce(Exchange):
def describe(self):
return self.deep_extend(super(xbtce, self).describe(), {
'id': 'xbtce',
'name': 'xBTCe',
'countries': ['RU'],
'rateLimit': 2000, # responses are cached every 2 seconds
'version': 'v1',
'has': {
'cancelOrder': True,
'CORS': False,
'createMarketOrder': False,
'createOrder': True,
'fetchBalance': True,
'fetchMarkets': True,
'fetchOHLCV': False,
'fetchOrderBook': True,
'fetchTicker': True,
'fetchTickers': True,
'fetchTrades': True,
},
'urls': {
'referral': 'https://xbtce.com/?agent=XX97BTCXXXG687021000B',
'logo': 'https://user-images.githubusercontent.com/1294454/28059414-e235970c-662c-11e7-8c3a-08e31f78684b.jpg',
'api': 'https://cryptottlivewebapi.xbtce.net:8443/api',
'www': 'https://www.xbtce.com',
'doc': [
'https://www.xbtce.com/tradeapi',
'https://support.xbtce.info/Knowledgebase/Article/View/52/25/xbtce-exchange-api',
],
},
'requiredCredentials': {
'apiKey': True,
'secret': True,
'uid': True,
},
'api': {
'public': {
'get': [
'currency',
'currency/{filter}',
'level2',
'level2/{filter}',
'quotehistory/{symbol}/{periodicity}/bars/ask',
'quotehistory/{symbol}/{periodicity}/bars/bid',
'quotehistory/{symbol}/level2',
'quotehistory/{symbol}/ticks',
'symbol',
'symbol/{filter}',
'tick',
'tick/{filter}',
'ticker',
'ticker/{filter}',
'tradesession',
],
},
'private': {
'get': [
'tradeserverinfo',
'tradesession',
'currency',
'currency/{filter}',
'level2',
'level2/{filter}',
'symbol',
'symbol/{filter}',
'tick',
'tick/{filter}',
'account',
'asset',
'asset/{id}',
'position',
'position/{id}',
'trade',
'trade/{id}',
'quotehistory/{symbol}/{periodicity}/bars/ask',
'quotehistory/{symbol}/{periodicity}/bars/ask/info',
'quotehistory/{symbol}/{periodicity}/bars/bid',
'quotehistory/{symbol}/{periodicity}/bars/bid/info',
'quotehistory/{symbol}/level2',
'quotehistory/{symbol}/level2/info',
'quotehistory/{symbol}/periodicities',
'quotehistory/{symbol}/ticks',
'quotehistory/{symbol}/ticks/info',
'quotehistory/cache/{symbol}/{periodicity}/bars/ask',
'quotehistory/cache/{symbol}/{periodicity}/bars/bid',
'quotehistory/cache/{symbol}/level2',
'quotehistory/cache/{symbol}/ticks',
'quotehistory/symbols',
'quotehistory/version',
],
'post': [
'trade',
'tradehistory',
],
'put': [
'trade',
],
'delete': [
'trade',
],
},
},
'commonCurrencies': {
'DSH': 'DASH',
},
})
def fetch_markets(self, params={}):
response = self.privateGetSymbol(params)
result = []
for i in range(0, len(response)):
market = response[i]
id = self.safe_string(market, 'Symbol')
baseId = self.safe_string(market, 'MarginCurrency')
quoteId = self.safe_string(market, 'ProfitCurrency')
base = self.safe_currency_code(baseId)
quote = self.safe_currency_code(quoteId)
symbol = base + '/' + quote
symbol = symbol if market['IsTradeAllowed'] else id
result.append({
'id': id,
'symbol': symbol,
'base': base,
'quote': quote,
'baseId': baseId,
'quoteId': quoteId,
'info': market,
'active': None,
'precision': self.precision,
'limits': self.limits,
})
return result
def fetch_balance(self, params={}):
self.load_markets()
balances = self.privateGetAsset(params)
result = {
'info': balances,
'timestamp': None,
'datetime': None,
}
for i in range(0, len(balances)):
balance = balances[i]
currencyId = self.safe_string(balance, 'Currency')
code = self.safe_currency_code(currencyId)
account = self.account()
account['free'] = self.safe_string(balance, 'FreeAmount')
account['used'] = self.safe_string(balance, 'LockedAmount')
account['total'] = self.safe_string(balance, 'Amount')
result[code] = account
return self.parse_balance(result, False)
def fetch_order_book(self, symbol, limit=None, params={}):
self.load_markets()
market = self.market(symbol)
request = {
'filter': market['id'],
}
response = self.privateGetLevel2Filter(self.extend(request, params))
orderbook = response[0]
timestamp = self.safe_integer(orderbook, 'Timestamp')
return self.parse_order_book(orderbook, symbol, timestamp, 'Bids', 'Asks', 'Price', 'Volume')
def parse_ticker(self, ticker, market=None):
timestamp = 0
last = None
if 'LastBuyTimestamp' in ticker:
if timestamp < ticker['LastBuyTimestamp']:
timestamp = ticker['LastBuyTimestamp']
last = ticker['LastBuyPrice']
if 'LastSellTimestamp' in ticker:
if timestamp < ticker['LastSellTimestamp']:
timestamp = ticker['LastSellTimestamp']
last = ticker['LastSellPrice']
if not timestamp:
timestamp = self.milliseconds()
symbol = None
if market:
symbol = market['symbol']
return {
'symbol': symbol,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'high': ticker['DailyBestBuyPrice'],
'low': ticker['DailyBestSellPrice'],
'bid': ticker['BestBid'],
'bidVolume': None,
'ask': ticker['BestAsk'],
'askVolume': None,
'vwap': None,
'open': None,
'close': last,
'last': last,
'previousClose': None,
'change': None,
'percentage': None,
'average': None,
'baseVolume': ticker['DailyTradedTotalVolume'],
'quoteVolume': None,
'info': ticker,
}
def fetch_tickers(self, symbols=None, params={}):
self.load_markets()
response = self.publicGetTicker(params)
tickers = self.index_by(response, 'Symbol')
ids = list(tickers.keys())
result = {}
for i in range(0, len(ids)):
id = ids[i]
market = None
symbol = None
if id in self.markets_by_id:
market = self.markets_by_id[id]
symbol = market['symbol']
else:
baseId = id[0:3]
quoteId = id[3:6]
base = self.safe_currency_code(baseId)
quote = self.safe_currency_code(quoteId)
symbol = base + '/' + quote
ticker = tickers[id]
result[symbol] = self.parse_ticker(ticker, market)
return self.filter_by_array(result, 'symbol', symbols)
def fetch_ticker(self, symbol, params={}):
self.load_markets()
market = self.market(symbol)
request = {
'filter': market['id'],
}
response = self.publicGetTickerFilter(self.extend(request, params))
length = len(response)
if length < 1:
raise ExchangeError(self.id + ' fetchTicker returned empty response, xBTCe public API error')
tickers = self.index_by(response, 'Symbol')
ticker = tickers[market['id']]
return self.parse_ticker(ticker, market)
def fetch_trades(self, symbol, since=None, limit=None, params={}):
self.load_markets()
# no method for trades?
return self.privateGetTrade(params)
def parse_ohlcv(self, ohlcv, market=None):
return [
self.safe_integer(ohlcv, 'Timestamp'),
self.safe_number(ohlcv, 'Open'),
self.safe_number(ohlcv, 'High'),
self.safe_number(ohlcv, 'Low'),
self.safe_number(ohlcv, 'Close'),
self.safe_number(ohlcv, 'Volume'),
]
def fetch_ohlcv(self, symbol, timeframe='1m', since=None, limit=None, params={}):
# minutes = int(timeframe / 60) # 1 minute by default
# periodicity = str(minutes)
# self.load_markets()
# market = self.market(symbol)
# if since is None:
# since = self.seconds() - 86400 * 7 # last day by defulat
# if limit is None:
# limit = 1000 # default
# response = self.privateGetQuotehistorySymbolPeriodicityBarsBid(self.extend({
# 'symbol': market['id'],
# 'periodicity': periodicity,
# 'timestamp': since,
# 'count': limit,
# }, params))
# return self.parse_ohlcvs(response['Bars'], market, timeframe, since, limit)
raise NotSupported(self.id + ' fetchOHLCV is disabled by the exchange')
def create_order(self, symbol, type, side, amount, price=None, params={}):
self.load_markets()
if type == 'market':
raise ExchangeError(self.id + ' allows limit orders only')
request = {
'pair': self.market_id(symbol),
'type': side,
'amount': amount,
'rate': price,
}
response = self.privatePostTrade(self.extend(request, params))
return {
'info': response,
'id': str(response['Id']),
}
def cancel_order(self, id, symbol=None, params={}):
request = {
'Type': 'Cancel',
'Id': id,
}
return self.privateDeleteTrade(self.extend(request, params))
def nonce(self):
return self.milliseconds()
def sign(self, path, api='public', method='GET', params={}, headers=None, body=None):
if not self.apiKey:
raise AuthenticationError(self.id + ' requires apiKey for all requests, their public API is always busy')
if not self.uid:
raise AuthenticationError(self.id + ' requires uid property for authentication and trading, their public API is always busy')
url = self.urls['api'] + '/' + self.version
if api == 'public':
url += '/' + api
url += '/' + self.implode_params(path, params)
query = self.omit(params, self.extract_params(path))
if api == 'public':
if query:
url += '?' + self.urlencode(query)
else:
self.check_required_credentials()
headers = {'Accept-Encoding': 'gzip, deflate'}
nonce = str(self.nonce())
if method == 'POST':
if query:
headers['Content-Type'] = 'application/json'
body = self.json(query)
else:
url += '?' + self.urlencode(query)
auth = nonce + self.uid + self.apiKey + method + url
if body:
auth += body
signature = self.hmac(self.encode(auth), self.encode(self.secret), hashlib.sha256, 'base64')
credentials = self.uid + ':' + self.apiKey + ':' + nonce + ':' + signature
headers['Authorization'] = 'HMAC ' + credentials
return {'url': url, 'method': method, 'body': body, 'headers': headers}
|
py
|
1a596a3c61a27c6e28bbee5c55ebb728bd1680f1
|
from behave import when, given, then
import marathon
import time
def wait_for_marathon(context):
for _ in xrange(30):
try:
context.client.ping()
except marathon.exceptions.MarathonError:
time.sleep(1)
else:
return
def delete_existing_apps(context):
for app in context.client.list_apps():
context.client.delete_app(app.id, force=True)
# app deletes seem to be asynchronous, creating an app with the same name
# as a previous app will fail unless the deploy for deleting it has
# finished.
time.sleep(0.5)
while context.client.list_deployments():
print "There are still marathon deployments in progress. sleeping."
time.sleep(0.5)
@given('a running marathon instance')
def running_marathon_instance(context):
context.client = marathon.MarathonClient('http://marathon:8080/')
wait_for_marathon(context)
delete_existing_apps(context)
@given('a marathon app for marathon to start')
def marathon_app_for_marathon_to_start(context):
context.client.create_app(
app_id='app-id',
app=marathon.MarathonApp(
cmd="/bin/sleep 300",
container={
'docker': {
'image': 'busybox',
'network': 'BRIDGE',
},
'type': 'DOCKER',
},
# This constraint will prevent more than one instance from
# starting, ensuring the marathon deploy is still running when we
# cause a failover.
constraints=[["hostname", "UNIQUE"]],
instances=2,
),
)
@when('we wait for one of the instances to start')
def wait_for_one_instance_to_start(context):
for _ in xrange(60):
app = context.client.get_app('app-id', embed_tasks=True)
if app.tasks_running >= 1:
context.task_ids_before_failover = set([t.id for t in app.tasks])
return
time.sleep(1)
raise Exception("Instance did not start before timeout. Tasks: %r" % app.tasks)
@when('we cause a leadership failover')
def cause_a_leadership_failover(context):
context.client.delete_leader()
@then('marathon should not kill anything')
def marathon_should_not_kill_anything(context):
app = context.client.get_app('app-id', embed_tasks=True)
# Check for a little while, in case the effect is delayed.
for _ in xrange(10):
task_ids = set([t.id for t in app.tasks])
assert context.task_ids_before_failover == task_ids, (context.task_ids_before_failover, task_ids)
time.sleep(1)
|
py
|
1a596b7ef08dfcfc8da0e19ef3c3e100c64ff20b
|
#!/opt/local/bin/python2.7
# -*- coding: utf-8 -*-
__author__ = 'naras_mg'
import re
for file in ['playground_straight.csv','playground_5x5.csv' ]:
# for file in ['playground_5x5.csv' ]:
print '\n--------------------\nprocessing: ', file
ckrFile = open(file)
ckr = ckrFile.readlines()
state = 0 # will denote where in the encoding instructions file we are reading lines
bandha = []
chakra = []
for lin in ckr:
if lin == ",,,,,,\n": continue # blank like line, just skip processing
line = lin[:-1].split(',')
# print line
if re.search('^Text to Coded',line[0]) :
state = 1
elif re.search('^Is it possible to Code Entire Text',line[0]):
state = 4
elif re.search('^Text Matrix',line[0]):
state = 4
elif re.search('^Describe Your Coding Matrix ',line[0]):
state = 2
elif re.search('^Valaya',line[0]):
state = 3
else: pass # state from prev. read line continues
# print state
if state == 1: plainText = line[1] # plain text is here
elif state == 2: # coding matrix is here
for i in range(5): bandha.append(line[2 + i])
elif state == 3: # valaya (chakra here)
for i in range(5): chakra.append(line[2 + i])
bandha = [int(c) for c in bandha if c!='']
chakra = [int(c) for c in chakra if c!='']
print 'plain text: ', plainText
print 'bandha: ', bandha
print 'chakra: ', chakra
cipherText = [chr(ord('a') + i - 1) for i in chakra]
print 'cipher: ', ' '.join(cipherText)
decrypted = (chr(ord('a') + chakra[i - 1] - 1) for i in bandha)
print 'decrypted:',' '.join(decrypted)
try:
cipherText = [chr(i) for i in chakra]
print 'cipher: ', ' '.join(cipherText)
# sortedBandha = sorted(bandha)
# for item in sortedBandha: print item, bandha.index(item)
decrypted = (chr(chakra[bandha.index(item)]) for item in sorted(bandha))
print 'decrypted:', ' '.join(decrypted)
except:
pass
|
py
|
1a596bb83a4cf3c619b47ef7b1310380176e2806
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2012, Intel, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Unit Tests for cinder.volume.rpcapi
"""
from oslo.config import cfg
from cinder import context
from cinder import db
from cinder.openstack.common import jsonutils
from cinder.openstack.common import rpc
from cinder import test
from cinder.volume import rpcapi as volume_rpcapi
CONF = cfg.CONF
class VolumeRpcAPITestCase(test.TestCase):
def setUp(self):
super(VolumeRpcAPITestCase, self).setUp()
self.context = context.get_admin_context()
vol = {}
vol['host'] = 'fake_host'
vol['availability_zone'] = CONF.storage_availability_zone
vol['status'] = "available"
vol['attach_status'] = "detached"
vol['metadata'] = {"test_key": "test_val"}
volume = db.volume_create(self.context, vol)
snpshot = {
'volume_id': 'fake_id',
'status': "creating",
'progress': '0%',
'volume_size': 0,
'display_name': 'fake_name',
'display_description': 'fake_description'}
snapshot = db.snapshot_create(self.context, snpshot)
self.fake_volume = jsonutils.to_primitive(volume)
self.fake_volume_metadata = volume["volume_metadata"]
self.fake_snapshot = jsonutils.to_primitive(snapshot)
def test_serialized_volume_has_id(self):
self.assertIn('id', self.fake_volume)
def _test_volume_api(self, method, rpc_method, **kwargs):
ctxt = context.RequestContext('fake_user', 'fake_project')
if 'rpcapi_class' in kwargs:
rpcapi_class = kwargs['rpcapi_class']
del kwargs['rpcapi_class']
else:
rpcapi_class = volume_rpcapi.VolumeAPI
rpcapi = rpcapi_class()
expected_retval = 'foo' if method == 'call' else None
expected_version = kwargs.pop('version', rpcapi.BASE_RPC_API_VERSION)
if 'request_spec' in kwargs:
spec = jsonutils.to_primitive(kwargs['request_spec'])
kwargs['request_spec'] = spec
expected_msg = rpcapi.make_msg(method, **kwargs)
if 'volume' in expected_msg['args']:
volume = expected_msg['args']['volume']
del expected_msg['args']['volume']
expected_msg['args']['volume_id'] = volume['id']
if 'snapshot' in expected_msg['args']:
snapshot = expected_msg['args']['snapshot']
del expected_msg['args']['snapshot']
expected_msg['args']['snapshot_id'] = snapshot['id']
if 'host' in expected_msg['args']:
del expected_msg['args']['host']
if 'dest_host' in expected_msg['args']:
dest_host = expected_msg['args']['dest_host']
dest_host_dict = {'host': dest_host.host,
'capabilities': dest_host.capabilities}
del expected_msg['args']['dest_host']
expected_msg['args']['host'] = dest_host_dict
if 'new_volume' in expected_msg['args']:
volume = expected_msg['args']['new_volume']
del expected_msg['args']['new_volume']
expected_msg['args']['new_volume_id'] = volume['id']
expected_msg['version'] = expected_version
if 'host' in kwargs:
host = kwargs['host']
else:
host = kwargs['volume']['host']
expected_topic = '%s:%s' % (CONF.volume_topic, host)
self.fake_args = None
self.fake_kwargs = None
def _fake_rpc_method(*args, **kwargs):
self.fake_args = args
self.fake_kwargs = kwargs
if expected_retval:
return expected_retval
self.stubs.Set(rpc, rpc_method, _fake_rpc_method)
retval = getattr(rpcapi, method)(ctxt, **kwargs)
self.assertEqual(retval, expected_retval)
expected_args = [ctxt, expected_topic, expected_msg]
for arg, expected_arg in zip(self.fake_args, expected_args):
self.assertEqual(arg, expected_arg)
def test_create_volume(self):
self._test_volume_api('create_volume',
rpc_method='cast',
volume=self.fake_volume,
host='fake_host1',
request_spec='fake_request_spec',
filter_properties='fake_properties',
allow_reschedule=True,
snapshot_id='fake_snapshot_id',
image_id='fake_image_id',
source_volid='fake_src_id',
version='1.4')
def test_create_volume_serialization(self):
request_spec = {"metadata": self.fake_volume_metadata}
self._test_volume_api('create_volume',
rpc_method='cast',
volume=self.fake_volume,
host='fake_host1',
request_spec=request_spec,
filter_properties='fake_properties',
allow_reschedule=True,
snapshot_id='fake_snapshot_id',
image_id='fake_image_id',
source_volid='fake_src_id',
version='1.4')
def test_delete_volume(self):
self._test_volume_api('delete_volume',
rpc_method='cast',
volume=self.fake_volume)
def test_create_snapshot(self):
self._test_volume_api('create_snapshot',
rpc_method='cast',
volume=self.fake_volume,
snapshot=self.fake_snapshot)
def test_delete_snapshot(self):
self._test_volume_api('delete_snapshot',
rpc_method='cast',
snapshot=self.fake_snapshot,
host='fake_host')
def test_attach_volume_to_instance(self):
self._test_volume_api('attach_volume',
rpc_method='call',
volume=self.fake_volume,
instance_uuid='fake_uuid',
host_name=None,
mountpoint='fake_mountpoint',
mode='ro',
version='1.11')
def test_attach_volume_to_host(self):
self._test_volume_api('attach_volume',
rpc_method='call',
volume=self.fake_volume,
instance_uuid=None,
host_name='fake_host',
mountpoint='fake_mountpoint',
mode='rw',
version='1.11')
def test_detach_volume(self):
self._test_volume_api('detach_volume',
rpc_method='call',
volume=self.fake_volume)
def test_copy_volume_to_image(self):
self._test_volume_api('copy_volume_to_image',
rpc_method='cast',
volume=self.fake_volume,
image_meta={'id': 'fake_image_id',
'container_format': 'fake_type',
'disk_format': 'fake_type'},
version='1.3')
def test_initialize_connection(self):
self._test_volume_api('initialize_connection',
rpc_method='call',
volume=self.fake_volume,
connector='fake_connector')
def test_terminate_connection(self):
self._test_volume_api('terminate_connection',
rpc_method='call',
volume=self.fake_volume,
connector='fake_connector',
force=False)
def test_accept_transfer(self):
self._test_volume_api('accept_transfer',
rpc_method='cast',
volume=self.fake_volume,
new_user='e5565fd0-06c8-11e3-'
'8ffd-0800200c9b77',
new_project='e4465fd0-06c8-11e3'
'-8ffd-0800200c9a66',
version='1.9')
def test_extend_volume(self):
self._test_volume_api('extend_volume',
rpc_method='cast',
volume=self.fake_volume,
new_size=1,
version='1.6')
def test_migrate_volume(self):
class FakeHost(object):
def __init__(self):
self.host = 'host'
self.capabilities = {}
dest_host = FakeHost()
self._test_volume_api('migrate_volume',
rpc_method='cast',
volume=self.fake_volume,
dest_host=dest_host,
force_host_copy=True,
version='1.8')
def test_migrate_volume_completion(self):
self._test_volume_api('migrate_volume_completion',
rpc_method='call',
volume=self.fake_volume,
new_volume=self.fake_volume,
error=False,
version='1.10')
|
py
|
1a596e44bb8fa59cd58c48eaa3058a3b6cd3c26b
|
#!/usr/bin/python
import sys
import os
import getpass
from os.path import isfile, join, dirname
from PyQt4 import QtGui, QtCore
import queuetable
import jobview
import jobedit
import queues as qeditor
from batchd.client import Client, InsufficientRightsException
APPDIR = dirname(sys.argv[0])
def labelled(label, constructor, parent=None):
result = QtGui.QWidget(parent)
layout = QtGui.QHBoxLayout()
result.setLayout(layout)
lbl = QtGui.QLabel(label)
layout.addWidget(lbl)
widget = constructor(result)
layout.addWidget(widget)
return result, widget
def get_icon(name):
path = join(APPDIR, "icons", name)
return QtGui.QIcon(path)
class LoginBox(QtGui.QDialog):
def __init__(self, url, cfg, parent=None):
QtGui.QDialog.__init__(self, parent)
self.url = url
self.client = None
self.config = cfg
form = QtGui.QFormLayout()
vbox = QtGui.QVBoxLayout()
self.setLayout(vbox)
self.login = QtGui.QLineEdit(self)
if 'username' in cfg:
username = cfg['username']
else:
username = getpass.getuser()
self.login.setText(username)
self.password = QtGui.QLineEdit(self)
self.password.setEchoMode(QtGui.QLineEdit.Password)
if 'password' in cfg:
self.password.setText(cfg['password'])
form.addRow("User name:", self.login)
form.addRow("Password:", self.password)
vbox.addLayout(form)
bbox = QtGui.QDialogButtonBox(self)
ok = QtGui.QPushButton('Ok')
ok.clicked.connect(self.on_ok)
cancel = QtGui.QPushButton('Cancel')
cancel.clicked.connect(self.on_cancel)
bbox.addButton(ok, QtGui.QDialogButtonBox.AcceptRole)
bbox.addButton(cancel, QtGui.QDialogButtonBox.RejectRole)
vbox.addWidget(bbox)
self.setAttribute(QtCore.Qt.WA_DeleteOnClose)
def on_ok(self):
try:
client = Client.from_config(self.config)
client.username = self.login.text()
client.password = self.password.text()
client.get_queues()
self.client = client
self.accept()
except InsufficientRightsException as e:
print e
def on_cancel(self):
self.client = None
self.reject()
class GUI(QtGui.QMainWindow):
def __init__(self, client):
QtGui.QMainWindow.__init__(self)
self.url = client.manager_url
self.client = client
central_widget = QtGui.QWidget(self)
self.layout = QtGui.QVBoxLayout()
central_widget.setLayout(self.layout)
self.setCentralWidget(central_widget)
wrapper = QtGui.QWidget(self)
hbox = QtGui.QHBoxLayout()
wrapper.setLayout(hbox)
lbl = QtGui.QLabel("Queue:", wrapper)
hbox.addWidget(lbl)
self.queue_popup = QtGui.QComboBox(wrapper)
hbox.addWidget(self.queue_popup, stretch=1)
self._fill_queues()
self.queue_popup.currentIndexChanged.connect(self._on_select_queue)
self.layout.addWidget(wrapper)
queue_buttons = QtGui.QToolBar(self)
queue_buttons.addAction(get_icon("list-add.svg"), "New queue", self._on_add_queue)
self.enable_queue = QtGui.QAction(get_icon("checkbox.svg"), "Enable", self)
self.enable_queue.setCheckable(True)
self.enable_queue.toggled.connect(self._on_queue_toggle)
queue_buttons.addAction(self.enable_queue)
hbox.addWidget(queue_buttons)
self.queue_info = QtGui.QLabel(self)
self.layout.addWidget(self.queue_info)
buttons = QtGui.QToolBar(self)
buttons.addAction(get_icon("quickview.svg"), "View", self._on_view)
buttons.addAction(get_icon("edit-delete.svg"), "Delete", self._on_delete)
self.layout.addWidget(buttons)
self.qtable = queuetable.Table(parent=self)
self.layout.addWidget(self.qtable)
wrapper, self.type_popup = labelled("Job type:", QtGui.QComboBox, self)
self.types = types = self.client.get_job_types()
self.type_by_name = {}
for t in types:
name = t['name']
title = t.get('title', name)
if not title:
title = name
item = QtGui.QStandardItem(name)
item.setData(title, QtCore.Qt.DisplayRole)
self.type_popup.model().appendRow(item)
self.type_by_name[name] = t
self.type_popup.currentIndexChanged.connect(self._on_select_type)
self.layout.addWidget(wrapper)
ok = QtGui.QPushButton(get_icon("list-add.svg"), "Add", self)
ok.clicked.connect(self._on_ok)
self.layout.addWidget(ok)
self.param_widgets = {}
self.form = None
self._on_select_type(0)
self._on_select_queue(0)
timer = QtCore.QTimer(self)
timer.timeout.connect(self._on_timer)
timer.start(5*1000)
def _fill_queues(self):
self.queue_popup.clear()
self.queues = queues = self.client.get_queues()
for q in queues:
enabled = "*" if q['enabled'] else " "
title = "[{0}] {1}".format(enabled, q['title'])
self.queue_popup.addItem(title, q['name'])
def _on_view(self):
job = self.qtable.currentJob()
jobtype = self.type_by_name[job['type']]
dlg = jobview.JobView(job, jobtype, parent=self)
dlg.exec_()
def _on_queue_toggle(self):
enabled = self.enable_queue.isChecked()
print enabled
def _on_delete(self):
buttons = QtGui.QMessageBox.Yes | QtGui.QMessageBox.No
job = self.qtable.currentJob()
job_id = job['id']
ok = QtGui.QMessageBox.question(self, "Delete?",
"Are you really sure you want to delete job #{}?".format(job_id),
buttons)
if ok == QtGui.QMessageBox.Yes:
print "Deleting!"
self.client.delete_job(job_id)
self._refresh_queue()
else:
print "do not delete"
def _on_select_type(self, idx):
jobtype = self.types[idx]
self.param_widgets = {}
form = jobedit.create_form(jobtype['params'], self.param_widgets, self)
if self.form:
self.form.hide()
self.layout.removeWidget(self.form)
del self.form
self.form = form
self.layout.insertWidget(5, form)
self.form.show()
def _on_add_queue(self):
dlg = qeditor.QueueEditor(self)
dlg.exec_()
self._fill_queues()
def _on_select_queue(self, idx):
self._refresh_queue(idx)
def _on_timer(self):
self._refresh_queue()
def _refresh_queue(self, idx=None):
if idx is None:
idx = self.queue_popup.currentIndex()
if len(self.queues) == 0:
print("No queues.")
return
queue = self.queues[idx]
schedule = queue['schedule_name']
host = queue['host_name']
if not host:
host = "*"
stats = self.client.get_queue_stats(queue['name'])
new = stats.get('new', 0)
processing = stats.get('processing', 0)
done = stats.get('done', 0)
failed = stats.get('failed', 0)
info = "Schedule: {}\nHost: {}\nNew/Processing/Done: {} / {} / {}\nFailed: {}".format(schedule, host, new, processing, done, failed)
self.queue_info.setText(info)
self.enable_queue.setChecked(queue['enabled'])
jobs = self.client.get_jobs(queue['name'])
self.qtable.setJobs(jobs)
def _on_ok(self):
queue_idx = self.queue_popup.currentIndex()
queue_name = self.queues[queue_idx]['name']
#typename = unicode( self.type_popup.currentText() )
jobtype = self.types[self.type_popup.currentIndex()]
typename = jobtype['name']
params = {}
for name, widget in self.param_widgets.iteritems():
params[name] = unicode(widget.text())
self.client.do_enqueue(queue_name, typename, params)
self._refresh_queue()
if __name__ == "__main__":
app = QtGui.QApplication(sys.argv)
cfg = Client.load_config()
client = Client.from_config(cfg)
auth_ok = False
if client.need_password:
login_box = LoginBox(client.manager_url, cfg)
if login_box.exec_():
client = login_box.client
auth_ok = True
else:
auth_ok = True
if auth_ok:
gui = GUI(client)
gui.show()
sys.exit(app.exec_())
|
py
|
1a596ed0bdb15b4c7537948dda0f2fe7b51d6e31
|
import math
class Neuron:
def __init__(self, initial_weights = []):
self.weights = initial_weights
pass
def predict(self, features):
"""
:param features: uma lista de valores numéricos (deve ser do mesmo
tamanho de self.weights)
:return:
"""
multiplications = [weight*feature for weight, feature in zip(self.weights, features)]
return self.sigmoid(sum(multiplications))
pass
def sigmoid(self, x):
return 1 / (1 + math.exp(-x))
def delta(self, deltas = []):
pass
|
py
|
1a596f8382aa816d8742c1b8eb70d58735a43d12
|
from django.db import models
from django.urls import reverse
# Create your models here.
class Friend(models.Model):
name = models.CharField(max_length=100)
lives_in = models.CharField(max_length=100, blank=True, null=True)
email = models.EmailField(blank=True, null=True)
def get_absolute_url(self):
return reverse("friends:friend-detail", kwargs={"id": self.id})
|
py
|
1a597130a224032ddd0caffad3f9054221faf134
|
from cdci_data_analysis.ddosa.osa_catalog import OsaIsgriCatalog
def test_from_list():
osa_catalog=OsaIsgriCatalog.build_from_dict_list([dict(ra=0,dec=0,name="SOURCE_NAME")])
osa_catalog.write("osa_cat_write_test.fits",format="fits")
osa_catalog_read=OsaIsgriCatalog.from_fits_file("osa_cat_write_test.fits")
assert osa_catalog.name == osa_catalog_read.name
|
py
|
1a5971c69fb5dc4095e6403726564f425cd99cce
|
from django.views.generic import TemplateView, ListView, CreateView, DetailView
from django.views.generic.detail import BaseDetailView
from models import Company, Venue, Booking, Location, Event, Menu, Product, Category, OpeningTime
from forms import VenueForm
from simple_rest.auth.decorators import login_required, admin_required
from django.http import HttpResponse
from django.core import serializers
#from json import dumps
# import json
# from simple_rest.response import RESTfulResponse
from django.contrib import messages
from django import http
from django.utils import simplejson as json
from datetime import datetime
from django.core import serializers
class JSONResponseMixin(object):
def render_to_response(self, context):
"Returns a JSON response containing 'context' as payload"
return self.get_json_response(self.convert_context_to_json(context))
def get_json_response(self, content, **httpresponse_kwargs):
"Construct an `HttpResponse` object."
return http.HttpResponse(content,
content_type='application/json',
**httpresponse_kwargs)
def convert_context_to_json(self, context):
"Convert the context dictionary into a JSON object"
# Note: This is *EXTREMELY* naive; in reality, you'll need
# to do much more complex handling to ensure that arbitrary
# objects -- such as Django model instances or querysets
# -- can be serialized as JSON.
return json.dumps(self.json_data(context))
def json_data(self, content):
return {}
class JSONDetailView(JSONResponseMixin, TemplateView):
def render_to_response(self, context):
return JSONResponseMixin.render_to_response(self, context)
class IndexView(TemplateView):
template_name ='reserved/index.html'
class ContactView(TemplateView):
template_name = 'reserved/contact.html'
def open_times(self):
op = OpeningTime.objects.filter(active=True)
return op
def address(self):
l = Location.objects.get(location_id='default')
return l
base_keys = ['id', 'name', 'active', 'icon_name']
product_keys = base_keys + ['categories', 'price']
category_keys = base_keys + ['parent']
def short_object(object, keys, *args, **kw):
'''
return an object of defined keys
short_object(o, ['', ...])
short_object(o, ['', ...], 'key', 'key', ... )
'''
o = {}
def key(object, k):
decoder = kw.get(k)
d = getattr(object, k)
if decoder is not None and d is not None:
d = decoder(d, object)
return d
ks = [x for x in args] + keys
for k in ks:
o[k] = key(object, k)
return o
def short_models(filtered, keys):
o =[]
for m in filtered:
p = short_object(m, keys, **decoders)
o.append(p)
return o
def categories_decoder(object, parent):
res = []
for cat in object.all():
c= short_object(cat, base_keys, **decoders)
res.append(c)
return res
def category_decoder(object, parent):
o = {}
for k in category_keys:
o[k] = getattr(object, k)
return o
decoders = {
'categories': categories_decoder,
'category': category_decoder,
'parent': category_decoder,
}
def menu_json():
menus = Menu.objects.filter()
products = Product.objects.filter()
categories = Category.objects.filter()
o = {}
o['menu'] = short_models(menus, base_keys)
o['products'] = short_models(products, product_keys)
o['categories'] = short_models(categories, category_keys)
return o
class MenuJson(JSONDetailView):
model = Menu
def json_data(self, context):
return menu_json()
class MenuData(TemplateView):
template_name = 'reserved/menu_data.js'
content_type = 'application/javascript'
def json(self, **kw):
return json.dumps( menu_json() )
class MenuList(ListView):
model = Menu
class CompanyList(ListView):
model = Company
class VenueCreate(CreateView):
model = Venue
form_class = VenueForm
def form_valid(self, form):
# This method is called when valid form data has been POSTed.
# It should return an HttpResponse.
# form.send_email()
venue = form.save()
self.form_id = venue.id
ll = form.cleaned_data.get('latlng', None)
full_address = form.cleaned_data.get('full_address', None)
# import pdb; pdb.set_trace()
if ll and full_address:
lat, lng = ll.split(',')
location, created = Location.objects.get_or_create(address=full_address)
if created is True:
location.name=venue.name
location.latitude=lat
location.longitude=lng
location.save()
venue.address = location
venue.owner = self.request.user
venue.save()
messages.success(self.request, 'New venue \'%s\' created' % (venue))
return super(VenueCreate, self).form_valid(form)
def get_success_url(self):
return '/venues/'
return '/venues/created/%s' % self.form_id
class VenueList(ListView):
model = Venue
def get_context_data(self, **kwargs):
kwargs['venues_owned'] = self.model.objects.filter(owner=self.request.user)
kwargs['venues_other'] = self.model.objects.filter(contact__email=self.request.user.email)
return kwargs
class VenueCalendarView(TemplateView):
template_name = 'reserved/venue_calendar.html'
class EventList(ListView):
model = Event
class BookingList(ListView):
model = Booking
def get_queryset(self):
return self.model.objects.all()
class BookingCreate(CreateView):
model = Booking
success_url = '/bookings/'
class EventCreate(CreateView):
model = Event
success_url = '/events/'
class BookingDetail(DetailView):
slug_field = 'name'
model = Booking
|
py
|
1a5971ca1e6ebcba37e1e1da48d2c8527ce64ce3
|
# coding=utf-8
# Copyright 2022 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""Dataset utils for the Learned Interpreters framework."""
import collections
import dataclasses
from typing import Any, Optional
from absl import logging
import jax
import jax.numpy as jnp
import six
import tensorflow as tf
import tensorflow_datasets as tfds
import tree
from ipagnn.datasets import datasets # pylint: disable=unused-import
@dataclasses.dataclass
class DatasetInfo:
dataset: Any = None
generator: Any = None
environment: Any = None
info: Optional[Any] = None # info_lib.LearnedInterpretersDatasetInfo
set_task: Any = None # Callable[[TaskFn, int], Any] = None
def _default_padding_value(dtype):
"""Gets the default value for the given dtype for padding.
Args:
dtype: A tensorflow dtype.
Returns:
A default (zero) value for the given type.
"""
if dtype == tf.string:
return ' '
elif dtype == tf.int64:
return tf.constant(0, dtype=tf.int64)
elif dtype == tf.int32:
return tf.constant(0, dtype=tf.int32)
elif dtype == tf.float32:
return tf.constant(0.0, dtype=tf.float32)
elif dtype == tf.float64:
return tf.constant(0.0, dtype=tf.float64)
elif dtype == tf.bool:
return tf.constant(False, dtype=tf.bool)
else:
raise ValueError('Unexpected type.', dtype)
def verify_reasonable_dataset(dataset_name, info, config):
"""Verifies that the dataset configs are at least reasonable.
For example, if the max_length is set too low such that every example would be
filtered out, we catch that here.
This lets us fail fast if we accidentally put in configs that will lead to all
examples being filtered out, rather than silently succeeding but never making
progress.
Args:
dataset_name: The name of the dataset being loaded.
info: The dataset info object.
config: The config for the model.
"""
if dataset_name.startswith('control_flow_programs'):
# TODO(dbieber): Move this logic into the dataset definition.
length = info.program_generator_config.length
tokens_per_statement = info.program_encoder.tokens_per_statement
assert (
not config.dataset.max_length
or config.dataset.max_length >= tokens_per_statement * length)
def cannot_set_task(**kwargs):
"""Use this as the set_task fn when no curriculum is permitted."""
del kwargs # Unused.
raise ValueError('The task cannot be changed. This is probably because the '
'data is being loaded from disk, rather than generated '
'at training time.')
def get_split(config):
"""Select the default split according to the config.
Args:
config: (ml_collections.ConfigDict) The experimental config.
Returns:
The TFDS split for the experimental setup indicated by the config.
"""
splits = {
'train': 'train[:70%]',
'valid': 'train[70%:90%]',
'test': 'train[90%:]',
}
if config.dataset.split == 'default':
split_name = 'valid' if config.runner.mode.startswith('eval') else 'train'
split = splits[split_name]
elif config.dataset.split in splits:
split = splits[config.dataset.split]
else:
raise ValueError('Unexpected split.')
return split
def get_dataset(data_dir, config, dataset_name=None):
"""The training dataset for the code model for fault localization.
Args:
data_dir: The data directory to use with tfds.load.
config: The config for the model.
dataset_name: If set, use this dataset name in place of the one from the
config.
Returns:
train_dataset: The tf.data.Dataset with batched examples.
info: The DatasetInfo object containing the feature connectors and other
info about the dataset.
"""
dataset_name = dataset_name or config.dataset.name
split = get_split(config)
version = (
None if config.dataset.version == 'default' else config.dataset.version)
# If in interact mode, use an interactive dataset.
if config.runner.mode == 'interact':
dbuilder = tfds.builder(
dataset_name, data_dir=data_dir, version=version)
unused_split_generators = dbuilder._split_generators(dl_manager=None) # pylint: disable=protected-access
info = dbuilder.info
info._builder.set_representation(config.dataset.representation) # pylint: disable=protected-access
assert config.dataset.batch_size == 1
dataset = make_interactive_dataset(info, config)
if config.dataset.batch:
dataset = apply_batching(dataset, info, config)
set_task = cannot_set_task
return DatasetInfo(
dataset=dataset,
info=info,
set_task=set_task
)
# Load the dataset.
if config.dataset.in_memory:
dbuilder = tfds.builder(
dataset_name, data_dir=data_dir, version=version)
unused_split_generators = dbuilder._split_generators(dl_manager=None) # pylint: disable=protected-access
dataset, set_task = dbuilder.as_in_memory_dataset(split='all')
info = dbuilder.info
else:
name = dataset_name
if version is not None:
name = f'{name}:{version}'
dataset, info = tfds.load(
name=name, split=split,
data_dir=data_dir,
# batch_size=config.dataset.batch_size,
with_info=True)
set_task = cannot_set_task
info._builder.set_representation(config.dataset.representation) # pylint: disable=protected-access
verify_reasonable_dataset(dataset_name, info, config)
dataset = dataset.repeat()
dataset = apply_filtering(dataset, info, config)
if config.dataset.batch:
dataset = apply_batching(dataset, info, config)
return DatasetInfo(
dataset=dataset,
info=info,
set_task=set_task,
)
def apply_filtering(dataset, info, config):
del info # Unused.
# TODO(dbieber): Reinstate filtering, but refactor it.
# if config.dataset.max_length:
# dataset = dataset.filter(
# lambda x: x[info._builder.key('length')] <= config.dataset.max_length) # pylint: disable=protected-access
if config.dataset.max_examples:
dataset = dataset.take(config.dataset.max_examples)
return dataset
def apply_sharding(generator, stack_fn, shape_fn):
"""Shards a dataset with a device dimension.
Args:
generator: Yields pytrees of numpy arrays.
stack_fn: Applied to each example before stacking.
shape_fn: Applied to each example to determine which examples to group.
Examples with the same shape are grouped.
Returns:
A new generator where each leaf now has a leading device axis.
"""
def generator_fn():
used_shapes = set()
examples_by_shapes = collections.defaultdict(list)
for example in generator:
shapes = shape_fn(example)
if shapes not in used_shapes and shapes not in examples_by_shapes:
logging.info('New shape started: %s', shapes)
examples_by_shapes[shapes].append(example)
if len(examples_by_shapes[shapes]) == jax.local_device_count():
stacked_examples = tree.map_structure(
lambda *x: jnp.stack(x, axis=0),
*[stack_fn(example) for example in examples_by_shapes[shapes]]
)
yield stacked_examples, examples_by_shapes[shapes]
examples_by_shapes[shapes] = []
if shapes not in used_shapes:
logging.info('New shape finished: %s', shapes)
used_shapes.add(shapes)
return generator_fn()
def apply_batching(dataset, info, config):
"""Applies standard batching to the dataset."""
del info # Unused.
padded_shapes = tree.map_structure(
lambda items: [None] * len(items),
tf.compat.v1.data.get_output_shapes(dataset))
padding_values = tree.map_structure(
_default_padding_value,
tf.compat.v1.data.get_output_types(dataset))
dataset = dataset.padded_batch(
config.dataset.batch_size, padded_shapes, padding_values,
drop_remainder=True)
return dataset
def dataset_from_generator(generator_fn, info, config):
"""Creates a dataset from a given generator fn."""
del config # Unused.
dtype = info.features.dtype
shape = info.features.shape
dataset = tf.data.Dataset.from_generator(generator_fn, dtype, shape)
return dataset
def _example_from_string(code, info):
example_dict = info._builder.generate_example_from_string(code) # pylint: disable=protected-access
encoded_example = info.features.encode_example(example_dict)
decoded_example = info.features.decode_example(encoded_example)
return decoded_example
def make_interactive_dataset(info, config):
"""Makes a dataset from interactively provided examples."""
logging.info('Generating dataset interactively. batch_size=%d',
config.dataset.batch_size)
def generator_fn():
while True:
example_str = six.moves.input()
if not example_str:
break
try:
yield _example_from_string(example_str, info)
except Exception as e: # pylint: disable=broad-except
logging.info('Encountered error in _example_from_string: %s', e)
return dataset_from_generator(generator_fn, info, config)
|
py
|
1a59762e72da140aeb1dc291608f4ff7152cd969
|
from .models import ec2_backends
from ..core.models import base_decorator, deprecated_base_decorator
ec2_backend = ec2_backends["us-east-1"]
mock_ec2 = base_decorator(ec2_backends)
mock_ec2_deprecated = deprecated_base_decorator(ec2_backends)
|
py
|
1a59764f6c3dfe11414adc8137c965ae1206e914
|
from django.shortcuts import render
from django.core.paginator import Paginator
from .models import Question
from stack_overclone.error_views import page_not_found_view
def index_view(request):
query_set = Question.objects.get_new()
page = paginate(query_set, request)
data = {
'questions': page.object_list,
'page': page
}
return render(request, 'index.html', context=data)
def hot_questions_view(request):
query_set = Question.objects.get_top_rated()
page = paginate(query_set, request)
data = {
'questions': page.object_list,
'page': page
}
return render(request, 'hot_questions.html', context=data)
def questions_by_tag_view(request, tag):
query_set = Question.objects.get_by_tag(tag)
if query_set.count() == 0:
return page_not_found_view(request, 'No such tag')
page = paginate(query_set, request)
data = {
'tag': tag,
'questions': page.object_list,
'page': page
}
return render(request, 'questions_by_tag.html', context=data)
def question_and_answers_view(request, question_id):
try:
question = Question.objects.get(id=question_id)
except Exception:
return page_not_found_view(request, 'No such question')
query_set = question.get_answers()
page = paginate(query_set, request)
data = {
'question': question,
'answers': page.object_list,
'page': page
}
return render(request, 'question_and_answers.html', context=data)
def ask_question_view(request):
return render(request, 'ask_question.html')
def paginate(query_set, request, per_page=20):
paginator = Paginator(query_set, per_page)
page_number = request.GET.get('page')
page = paginator.get_page(page_number)
return page
from django import template
register = template.Library()
@register.filter(name='add')
def add(value, arg):
return int(value) + int(arg)
|
py
|
1a5976a5953b0d74175416fe19eb67cf23516607
|
import random
import math
lower = int(input("Enter lower bound: - "))
upper = int (input("Enter higher bound: - "))
x = random.randint(lower, upper)
allowedGuess = round(math.log(upper - lower + 1, 2))
print("\n\tYou only have ", allowedGuess, " chances to guess the integer!\n")
guessCount = 0
while guessCount < allowedGuess:
guessCount += 1
guess = int (input("Guess a number:- "))
if x == guess:
if x == 1:
print("Congratulations you guessed the number in 1 try")
else:
print("Congratulations you guessed the number in ", guessCount, " tries")
break
elif x > guess:
print("You guessed too small!")
elif x < guess:
print("You guessed too high!")
if guessCount >= allowedGuess:
print("\nThe number is %d" % x)
print("\tBetter luck next time!")
|
py
|
1a59770f392180aa2b9a468493cec38a16435429
|
#!/usr/bin/python3
import getopt
import os
import sys
import json
usage = """USAGE: $ python3 save_codes.py https://atcoder.jp/contests/abc244/tasks/abc244_a bin/out/v220314.py
Options:
-h --help print this help and exit
"""
opt_list = {"help": "h"}
json_path = "./bin/codes.json"
def kill(s, status):
if s != "":
print("[\033[31m!\033[m]", s, file=sys.stderr)
print(usage, end="", file=sys.stderr)
sys.exit(status)
try:
opts, args = getopt.getopt(sys.argv[1:], "".join(opt_list.values()), list(opt_list.keys()))
except getopt.GetoptError as e:
kill(e, 2)
for o, v in opts:
for opt_long, opt_short in opt_list.items():
# Shorten
if opt_long in o:
o = "-" + opt_short
break
print(f"[\033[34m#\033[m] o: {o}, v: {v}", file=sys.stderr)
if o == "-h":
kill("", 0)
if len(args) != 2:
kill(f"len(args): {len(args)}", 2)
url, src_path = args
if not url:
kill("url is empty", 2)
if not src_path:
kill("src_path is empty", 2)
print("[\033[34m#\033[m] url:", url, file=sys.stderr)
src_suffix = src_path.split(".")[-1]
f = open(src_path, "r")
lines = f.read().splitlines()
while lines[-1] == "":
lines.pop()
if not os.path.isfile(json_path):
# Create json file
with open(json_path, "w") as f:
json.dump({}, f)
# Input
with open(json_path, "r") as f:
codes_json = json.load(f)
# {
# "https://atcoder.jp/contests/abc244/tasks/abc244_a": {
# "bin/out/v220314.py": [
# "n = int(input())",
# "s = input()",
# "print(s[-1])",
# ]
# }
# }
# Add
di = codes_json.get(url, {})
di[src_path] = lines
codes_json[url] = di
# Output
# Options: ensure_ascii=False, indent=2, sort_keys=True
with open(json_path, "w") as f:
json.dump(codes_json, f, ensure_ascii=False, indent=2)
print(f"[\033[32m+\033[m] {src_path} -> {json_path}", file=sys.stderr)
|
py
|
1a5977a19376fd1ea8e324b7f86623fde47530ae
|
"""
@brief test log(time=3s)
"""
import sys
import os
import unittest
import warnings
import pandas
from pyquickhelper.loghelper import fLOG
from pyquickhelper.pycode import get_temp_folder
try:
import src
except ImportError:
path = os.path.normpath(
os.path.abspath(
os.path.join(
os.path.split(__file__)[0],
"..",
"..")))
if path not in sys.path:
sys.path.append(path)
import src
from src.ensae_teaching_cs.data import google_trends, twitter_zip
class TestDataWeb(unittest.TestCase):
def test_google_trends_macron(self):
fLOG(
__file__,
self._testMethodName,
OutputPrint=__name__ == "__main__")
temp = get_temp_folder(__file__, "temp_google_trends_macron")
text = google_trends(local=True, filename=False)
assert text is not None
name = google_trends(local=True, filename=True)
assert name.endswith("macron.csv")
try:
text2 = google_trends(
local=False, cache_folder=temp, filename=False)
except ConnectionResetError as e:
warnings.warn("Cannot check remote marathon.txt.\n" + str(e))
return
assert text2 is not None
self.assertEqual(len(text), len(text2))
self.maxDiff = None
self.assertEqual(text, text2)
def test_twitter_zip(self):
fLOG(
__file__,
self._testMethodName,
OutputPrint=__name__ == "__main__")
temp = get_temp_folder(__file__, "temp_twitter_zip")
try:
twitter_zip(local=True, filename=False,
unzip=False, cache_folder=temp)
assert False
except ValueError:
pass
name = twitter_zip(local=True, filename=True, as_df=False, unzip=False)
assert name.endswith("tweets_macron_sijetaispresident_201609.zip")
try:
text2 = twitter_zip(
local=False, cache_folder=temp, filename=False, unzip=True, as_df=True)
except ConnectionResetError as e:
warnings.warn("Cannot check remote.\n" + str(e))
return
assert isinstance(text2, pandas.DataFrame)
fLOG(text2.columns)
if __name__ == "__main__":
unittest.main()
|
py
|
1a59784a96a8f9ab0c1cd63efc5118caf2f5c63a
|
# -*- coding: utf-8 -*-
"""Test database functionality."""
import os
import shutil
import sqlite3
import tempfile
import unittest
from contextlib import closing
from datetime import datetime
from dateutil.tz import tzutc
from mock import patch
from sqlalchemy.exc import NoSuchTableError
from sqlalchemy.types import (
INTEGER,
TEXT,
)
from pic2map.db import (
Database,
LocationDB,
transform_metadata_to_row,
)
class DatabaseTest(unittest.TestCase):
"""Database wrapper test cases."""
def test_get_table_metadata(self):
"""Table metadata can be retrieved using index notation."""
with tempfile.NamedTemporaryFile() as db_file:
with closing(sqlite3.connect(db_file.name)) as connection:
with closing(connection.cursor()) as cursor:
cursor.execute(
'CREATE TABLE messages (id INTEGER, message TEXT)')
database = Database(db_file.name)
table = database['messages']
schema = {column.name: type(column.type)
for column in table.columns}
self.assertDictEqual(
schema,
{'id': INTEGER, 'message': TEXT})
def test_get_unknown_table_metadata(self):
"""NoSuchTableError raised when table name is not found."""
with tempfile.NamedTemporaryFile() as db_file:
with closing(sqlite3.connect(db_file.name)) as connection:
with closing(connection.cursor()) as cursor:
cursor.execute(
'CREATE TABLE messages (id INTEGER, message TEXT)')
database = Database(db_file.name)
with self.assertRaises(NoSuchTableError):
database['unknown']
def test_type_error_on_wrong_table_name(self):
"""TypeError raised when table name is not a string."""
with tempfile.NamedTemporaryFile() as db_file:
with closing(sqlite3.connect(db_file.name)) as connection:
with closing(connection.cursor()) as cursor:
cursor.execute(
'CREATE TABLE messages (id INTEGER, message TEXT)')
database = Database(db_file.name)
with self.assertRaises(TypeError):
database[0]
def test_context_manager(self):
"""Connection is opened/closed when used as a context manager."""
database = Database(':memory:')
# Connection is None when database object is created
self.assertIsNone(database.connection)
with database:
# Connection is not closed inside the context
self.assertFalse(database.connection.closed)
# Connection is closed outside the context
self.assertTrue(database.connection.closed)
class LocationDBTest(unittest.TestCase):
"""Location database tests."""
def setUp(self):
"""Create temporary directory."""
self.directory = tempfile.mkdtemp()
self.base_directory_patcher = patch('pic2map.db.BaseDirectory')
base_directory = self.base_directory_patcher.start()
base_directory.save_data_path.return_value = self.directory
def tearDown(self):
"""Remove temporary directory."""
self.base_directory_patcher.stop()
shutil.rmtree(self.directory)
def test_database_exists(self):
"""Database not create if exists."""
filename = os.path.join(self.directory, 'location.db')
with closing(sqlite3.connect(filename)) as connection:
with closing(connection.cursor()) as cursor:
cursor.execute(
'CREATE TABLE location (column_1 TEXT, column_2 TEXT)')
location_db = LocationDB()
self.assertListEqual(
location_db.location_table.columns.keys(),
['column_1', 'column_2'],
)
def test_create_database(self):
"""Create database file."""
LocationDB()
filename = os.path.join(self.directory, 'location.db')
self.assertTrue(os.path.isfile(filename))
def test_insert(self):
"""Insert records in database."""
rows = [
{
'filename': 'a.jpg',
'latitude': 1.2,
'longitude': 2.1,
'datetime': datetime(2015, 1, 1, 12, 34, 56)
},
{
'filename': 'b.jpg',
'latitude': 3.4,
'longitude': 4.3,
'datetime': datetime(2015, 1, 1, 12, 34, 56)
},
]
with LocationDB() as location_db:
location_db.insert(rows)
filename = os.path.join(self.directory, 'location.db')
with closing(sqlite3.connect(filename)) as connection:
with closing(connection.cursor()) as cursor:
result = cursor.execute('SELECT COUNT(*) FROM location')
self.assertListEqual(result.fetchall(), [(2,)])
def test_select_all(self):
"""Select all rows from location table."""
filename = os.path.join(self.directory, 'location.db')
with closing(sqlite3.connect(filename)) as connection:
with closing(connection.cursor()) as cursor:
cursor.execute(
'CREATE TABLE location (name TEXT)')
cursor.execute(
'INSERT INTO location VALUES ("Hello world!")')
connection.commit()
with LocationDB() as location_db:
result = location_db.select_all()
rows = result.fetchall()
self.assertEqual(len(rows), 1)
row = rows[0]
self.assertSequenceEqual(row, (u'Hello world!',))
def test_remove(self):
"""Delete rows for files under a given directory."""
file_count = 10
filename = os.path.join(self.directory, 'location.db')
with closing(sqlite3.connect(filename)) as connection:
with closing(connection.cursor()) as cursor:
cursor.execute(
'CREATE TABLE location (filename TEXT)')
for directory in ['a', 'b']:
for index in range(file_count):
cursor.execute(
'INSERT INTO location VALUES ("{}/{}.jpg")'
.format(directory, index))
connection.commit()
with LocationDB() as location_db:
result = location_db.delete('a')
self.assertEqual(result.rowcount, file_count)
def test_count(self):
"""Count rows in database."""
file_count = 10
filename = os.path.join(self.directory, 'location.db')
with closing(sqlite3.connect(filename)) as connection:
with closing(connection.cursor()) as cursor:
cursor.execute(
'CREATE TABLE location (filename TEXT)')
for index in range(file_count):
cursor.execute(
'INSERT INTO location VALUES ("{}.jpg")'.format(index))
connection.commit()
with LocationDB() as location_db:
result = location_db.count()
self.assertEqual(result, file_count)
class TransformMetadataToRowTest(unittest.TestCase):
"""EXIF metadata to database row transformation tests."""
def test_transform_metadata(self):
"""Transform metadata to row."""
metadata = {
'SourceFile': 'a.jpg',
'EXIF:GPSLatitude': 1.2,
'EXIF:GPSLatitudeRef': 'N',
'EXIF:GPSLongitude': 2.1,
'EXIF:GPSLongitudeRef': 'E',
'EXIF:GPSDateStamp': '2015:01:01',
'EXIF:GPSTimeStamp': '12:34:56',
}
expected_row = {
'filename': 'a.jpg',
'latitude': 1.2,
'longitude': 2.1,
'datetime': datetime(2015, 1, 1, 12, 34, 56, tzinfo=tzutc()),
}
row = transform_metadata_to_row(metadata)
self.assertEqual(row, expected_row)
def test_transform_metadata_negative(self):
"""Transform metadata with negative latitude/longitude to row."""
metadata = {
'SourceFile': 'a.jpg',
'EXIF:GPSLatitude': 1.2,
'EXIF:GPSLatitudeRef': 'S',
'EXIF:GPSLongitude': 2.1,
'EXIF:GPSLongitudeRef': 'W',
'EXIF:GPSDateStamp': '2015:01:01',
'EXIF:GPSTimeStamp': '12:34:56',
}
expected_row = {
'filename': 'a.jpg',
'latitude': -1.2,
'longitude': -2.1,
'datetime': datetime(2015, 1, 1, 12, 34, 56, tzinfo=tzutc()),
}
row = transform_metadata_to_row(metadata)
self.assertEqual(row, expected_row)
def test_transform_metadata_no_datetime(self):
"""Transform metadata to row."""
metadata = {
'SourceFile': 'a.jpg',
'EXIF:GPSLatitude': 1.2,
'EXIF:GPSLatitudeRef': 'N',
'EXIF:GPSLongitude': 2.1,
'EXIF:GPSLongitudeRef': 'E',
}
expected_row = {
'filename': 'a.jpg',
'latitude': 1.2,
'longitude': 2.1,
'datetime': None,
}
row = transform_metadata_to_row(metadata)
self.assertEqual(row, expected_row)
|
py
|
1a5979452117e2a41f03e34fbddd51712caad5c5
|
import _plotly_utils.basevalidators
class FillcolorValidator(_plotly_utils.basevalidators.ColorValidator):
def __init__(
self, plotly_name='fillcolor', parent_name='scatterpolargl', **kwargs
):
super(FillcolorValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop('edit_type', 'calc'),
role=kwargs.pop('role', 'style'),
**kwargs
)
|
py
|
1a597a9a4442edb45f186fce2383ddfab1d8ae5e
|
# 000403_01_09_vid03_ex02_DualDigit.py
# Проверить, что введенное число - двузначное
a = int(input())
print(a >= 10 and a <= 100)
# вариант совмещения:
print(10 <= a < 100)
b = int(input())
print(10 <= b <= 100)
# Алгоритм:
# 1. что такое двузначное число? Признаки - две значащие цифры, следовательно, в пределах от 10 до 99 (включительно)
# 2. заданы границы, следовательно, требуется составить проверку на истинность/ложность
|
py
|
1a597cfe75b4fe59449c9a64cd1e5ce21baeb0a3
|
# -*- coding: utf-8 -*-
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import json
import os
import sys
import urllib.request
import urllib.parse
from flask import Flask, request, abort
from linebot import (
LineBotApi, WebhookHandler
)
from linebot.exceptions import (
InvalidSignatureError, LineBotApiError
)
from linebot.models import (
CarouselColumn, CarouselTemplate, FollowEvent,
LocationMessage, MessageEvent, TemplateSendMessage,
TextMessage, TextSendMessage, UnfollowEvent, URITemplateAction
)
# TODO: 位置情報を送るメニューボタンの配置
# TODO: Webサーバを利用して静的ファイルを相対参照
# get api_key, channel_secret and channel_access_token from environment variable
GNAVI_API_KEY = os.getenv('GNAVI_API_KEY')
CHANNEL_SECRET = os.getenv('LINE_CHANNEL_SECRET')
CHANNEL_ACCESS_TOKEN = os.getenv('LINE_CHANNEL_ACCESS_TOKEN')
BOT_SERVER_URL = os.getenv('BOT_SERVER_URL')
os.environ['http_proxy'] = os.getenv('FIXIE_URL')
os.environ['https_proxy'] = os.getenv('FIXIE_URL')
if GNAVI_API_KEY is None:
print('Specify GNAVI_API_KEY as environment variable.')
sys.exit(1)
if CHANNEL_SECRET is None:
print('Specify LINE_CHANNEL_SECRET as environment variable.')
sys.exit(1)
if CHANNEL_ACCESS_TOKEN is None:
print('Specify LINE_CHANNEL_ACCESS_TOKEN as environment variable.')
sys.exit(1)
if BOT_SERVER_URL is None:
print('Specify BOT_SERVER_URL as environment variable.')
sys.exit(1)
if os.getenv('FIXIE_URL') is None:
print('Specify FIXIE_URL as environment variable.')
sys.exit(1)
# instantiation
# TODO: インスタンス生成はグローバルでなくファクトリメソッドに移したい
# TODO: グローバルに参照可能な api_callerを作成するか, 個々に作成するかどちらが良いか確認
app = Flask(__name__)
line_bot_api = LineBotApi(CHANNEL_ACCESS_TOKEN)
handler = WebhookHandler(CHANNEL_SECRET)
RESTSEARCH_URL = "https://api.gnavi.co.jp/RestSearchAPI/v3/"
DEF_ERR_MESSAGE = """
申し訳ありません、データを取得できませんでした。
少し時間を空けて、もう一度試してみてください。
"""
NO_HIT_ERR_MESSAGE = "お近くにぐるなびに登録されている喫茶店はないようです" + chr(0x100017)
LINK_TEXT = "ぐるなびで見る"
FOLLOWED_RESPONSE = "フォローありがとうございます。位置情報を送っていただくことで、お近くの喫茶店をお伝えします" + chr(0x100059)
def call_restsearch(latitude, longitude):
query = {
"keyid": GNAVI_API_KEY,
"latitude": latitude,
"longitude": longitude,
# TODO: category_sを動的に生成
"category_s": "RSFST18008,RSFST18009,RSFST18010,RSFST18011,RSFST18012"
# TODO: hit_per_pageや offsetの変更に対応 (e.g., 指定可能にする, 多すぎるときは普通にブラウザに飛ばす, など)
# TODO: rangeをユーザーアクションによって選択可能にしたい
# "range": search_range
}
params = urllib.parse.urlencode(query, safe=",")
response = urllib.request.urlopen(RESTSEARCH_URL + "?" + params).read()
result = json.loads(response)
if "error" in result:
if "message" in result:
raise Exception("{}".format(result["message"]))
else:
raise Exception(DEF_ERR_MESSAGE)
total_hit_count = result.get("total_hit_count", 0)
if total_hit_count < 1:
raise Exception(NO_HIT_ERR_MESSAGE)
return result
@app.route("/callback", methods=['POST'])
def callback():
signature = request.headers['X-Line-Signature']
body = request.get_data(as_text=True)
app.logger.info("Request body: " + body)
# handle webhook body
try:
handler.handle(body, signature)
except InvalidSignatureError:
abort(400)
except LineBotApiError as e:
app.logger.exception(f'LineBotApiError: {e.status_code} {e.message}', e)
raise e
return 'OK'
@handler.add(MessageEvent, message=TextMessage)
def handle_text_message(event):
line_bot_api.reply_message(
event.reply_token,
TextSendMessage(text=event.message.text)
)
# TODO: ちゃんと例外処理
@handler.add(MessageEvent, message=LocationMessage)
def handle_location_message(event):
user_lat = event.message.latitude
user_longit = event.message.longitude
cafe_search_result = call_restsearch(user_lat, user_longit)
print("cafe_search_result is: {}".format(cafe_search_result))
response_json_list = []
# process result
for (count, rest) in enumerate(cafe_search_result.get("rest")):
# TODO: holiday, opentimeで表示を絞りたい
access = rest.get("access", {})
access_walk = "徒歩 {}分".format(access.get("walk", ""))
holiday = "定休日: {}".format(rest.get("holiday", ""))
image_url = rest.get("image_url", {})
image1 = image_url.get("shop_image1", "thumbnail_template.jpg")
if image1 == "":
image1 = BOT_SERVER_URL + "/static/thumbnail_template.jpg"
name = rest.get("name", "")
opentime = "営業時間: {}".format(rest.get("opentime", ""))
# pr = rest.get("pr", "")
# pr_short = pr.get("pr_short", "")
url = rest.get("url", "")
result_text = opentime + "\n" + holiday + "\n" + access_walk + "\n"
if len(result_text) > 60:
result_text = result_text[:56] + "..."
result_dict = {
"thumbnail_image_url": image1,
"title": name,
# "text": pr_short + "\n" + opentime + "\n" + holiday + "\n"
# + access_walk + "\n",
"text": result_text,
"actions": {
"label": "ぐるなびで見る",
"uri": url
}
}
response_json_list.append(result_dict)
print("response_json_list is: {}".format(response_json_list))
columns = [
CarouselColumn(
thumbnail_image_url=column["thumbnail_image_url"],
title=column["title"],
text=column["text"],
actions=[
URITemplateAction(
label=column["actions"]["label"],
uri=column["actions"]["uri"],
)
]
)
for column in response_json_list
]
# TODO: GoogleMapへのリンク実装
messages = TemplateSendMessage(
alt_text="喫茶店の情報をお伝えしました",
template=CarouselTemplate(columns=columns),
)
print("messages is: {}".format(messages))
line_bot_api.reply_message(
event.reply_token,
messages=messages
)
@handler.add(FollowEvent)
def handle_follow(event):
line_bot_api.reply_message(
event.reply_token, TextSendMessage(text=FOLLOWED_RESPONSE)
)
@handler.add(UnfollowEvent)
def handle_unfollow():
app.logger.info("Got Unfollow event")
if __name__ == "__main__":
# arg_parser = ArgumentParser(
# usage='Usage: python ' + __file__ + ' [--port <port>] [--help]'
# )
# arg_parser.add_argument('-p', '--port', type=int, default=8000, help='port')
# arg_parser.add_argument('-d', '--debug', default=False, help='debug')
# options = arg_parser.parse_args()
#
# app.run(debug=options.debug, port=options.port)
port = int(os.getenv("PORT", 5000))
app.run(host="0.0.0.0", port=port)
|
py
|
1a597dde761907e27e99ccad53a5ecc33e57f8a6
|
from .droppedKey import DroppedKey
from .items import *
class Seashell(DroppedKey):
# Thanks to patches, a seashell is just a dropped key as far as the randomizer is concerned.
def configure(self, options):
if not options.seashells:
self.OPTIONS = [SEASHELL]
class SeashellMansion(DroppedKey):
MULTIWORLD = False
|
py
|
1a597e1388b3b100698eeda1d948366bf3547080
|
import os, time, json, sys
#import from non-standard path module, do not remove.
csfp = os.path.abspath(os.path.join(os.path.dirname(__file__), 'experiment_replication'))
if csfp not in sys.path:
sys.path.insert(0, csfp)
import torch
from multitasking_transformers.heads import SubwordClassificationHead
from multitasking_transformers.multitaskers.util import get_model_path
from transformers import BertConfig, BertForTokenClassification, BertModel
from tokenizers import BertWordPieceTokenizer, Encoding
from pprint import pprint
from experiment_replication.raw_datasets.language import get_language
text = """Admission Date: [**2109-7-21**] Discharge Date: [**2109-8-13**]
Date of Birth: [**2053-6-5**] Sex: F
Service: [**Doctor Last Name 1181**] MEDICINE
HISTORY OF PRESENT ILLNESS: This is a 56-year-old white
female with a history of right frontal craniotomy on [**2109-7-1**], for a dysembryoplastic angioneural epithelial lesion
with features of an oligodendroglioma who was started on
Dilantin postoperatively for seizure prophylaxis and was
subsequently developed eye discharge and was seen by an
optometrist who treated it with sulfate ophthalmic drops.
The patient then developed oral sores and rash in the chest
the night before admission which rapidly spread to the face,
trunk, and upper extremities within the last 24 hours. The
patient was unable to eat secondary to mouth pain. She had
fevers, weakness, and diarrhea. There were no genital
the morning of [**7-20**].
PAST MEDICAL HISTORY: 1. Hypercholesterolemia. 2. Benign
right frontal cystic tumor status post right frontal
craniotomy on [**2109-7-1**].
"""
batch_size = 25
#Defines the maximum number of subwords per sequence during chunking.
#Smaller values result in faster per instance computations, larger values are faster for longer chunks of text
max_sequence_length = 512
def visualize(data_generator):
from spacy import displacy
from spacy.gold import biluo_tags_from_offsets
from spacy.tokens import Span
language = get_language()
ner = language.create_pipe("ner")
# language.add_pipe(ner, last=True)
docs = []
print(data_generator)
for text, annotation in data_generator:
doc = language(text)
for label in annotation['entity_labels']:
ner.add_label(label)
spans = []
for key in annotation['entities']:
for start, stop, label in annotation['entities'][key]:
span = doc.char_span(start, stop, label=label)
if span is None:
continue
spans.append(span)
doc.ents = spans
docs.append(doc)
displacy.serve(docs, style="ent")
device='cpu'
clinical_ner_tasks = ['i2b2_2010','n2c2_2018', 'i2b2_2012', 'i2b2_2014', 'quaero_2014']
model_path = get_model_path('mt_clinical_bert_8_tasks')
tokenizer = BertWordPieceTokenizer(os.path.join(model_path, 'vocab.txt'), lowercase=True, add_special_tokens=False)
#initialize finetuned stacked transformer
bert = BertModel.from_pretrained(model_path)
bert.eval()
heads = {}
#initialize pre-trained heads
for task in clinical_ner_tasks:
config = json.load(open(os.path.join(model_path, f"SubwordClassificationHead_{task}.json"), 'rb'))
heads[task] = SubwordClassificationHead(task, labels=config['labels'],
hidden_size=config['hidden_size'],
hidden_dropout_prob=config['hidden_dropout_prob'])
heads[task].from_pretrained(model_path)
encoding = tokenizer.encode(text)
def prepare_encoding(encoding: Encoding):
"""
Given a arbitrarily long text (>512 subwords), chunks it into the BERT context window.
:param encoding:
:return:
"""
def chunk_encoding(tensor : torch.Tensor):
chunks = tensor.split(max_sequence_length)
batch = torch.zeros(size=(len(chunks), max_sequence_length), dtype=torch.long)
#we don't include special tokens during prediction (empirically, doesn't look like it hurts!)
for index, chunk in enumerate(chunks):
batch[index][0:len(chunk)] = torch.clone(chunk)
# batch[index][0] = tokenizer.cls_token
# batch[index][chunk.shape[0] + 1] = tokenizer.sep_token
return batch, [len(chunk) for chunk in chunks]
input_ids, num_tokens_in_instance = chunk_encoding(torch.tensor(encoding.ids, dtype=torch.long))
attention_mask, _ = chunk_encoding(torch.tensor(encoding.attention_mask, dtype=torch.long))
token_type_ids, _ = chunk_encoding(torch.tensor(encoding.type_ids, dtype=torch.long))
return (input_ids, attention_mask, token_type_ids),\
[encoding.offsets[i:i+max_sequence_length] for i in range(0, len(encoding.offsets) ,max_sequence_length)],\
num_tokens_in_instance
(input_ids, attention_mask, token_type_ids), offsets, num_tokens_in_instance = prepare_encoding(encoding)
token_representations = bert(input_ids=input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids)[0]
head_annotations = []
#Get head predictions
for task, head in heads.items():
print(f"Predicting head: {head}")
batch_subword_scores = head(token_representations)[0]
batch_predicted_labels = batch_subword_scores.max(2)[1].tolist()
# print(len(batch_predicted_labels))
spans = []
for idx, (predicted_labels, sequence_offsets) in enumerate(zip(batch_predicted_labels, offsets)):
#print(predicted_labels)
#merge multiple spans together into final annotation.
predicted_labels = list(map(lambda x : x[2:] if '-' in x else x.replace('BERT_TOKEN', 'O'),
[head.config['labels'][label_key] for label_key in predicted_labels]))
sequence_offsets = sequence_offsets
predicted_labels = predicted_labels
# print(sequence_offsets)
# print(predicted_labels)
# print(f"Num tokens in instance: {num_tokens_in_instance[idx]}")
i = 0
prev_label = 'O'
#Group together tokens tagged with entities (post-processing heuristic)
while i < num_tokens_in_instance[idx]:
if predicted_labels[i] == 'O':
i += 1
continue
label_start = i
while i+1 != num_tokens_in_instance[idx] and predicted_labels[i] == predicted_labels[i+1]:
i+=1
label_end = i
spans.append((sequence_offsets[label_start:label_end+1][0][0],
sequence_offsets[label_start:label_end+1][-1][1],
predicted_labels[i]))
i+=1
# print(task)
# print(spans)
annotation = {'entities':{f"T{i}": [span] for i, span in enumerate(spans)},
'entity_labels': list(map(lambda x : x[2:] if '-' in x else x, head.config['labels']))}
head_annotations.append( tuple((str(encoding.original_str), annotation)))
visualize(head_annotations)
|
py
|
1a597f27744835d82d3906c8173c8980e7967697
|
###hi
|
py
|
1a597f450419477d8d015c641c83637c022452e4
|
# Copyright 2017 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from neutron_lib import constants as lib_const
from neutron_lib import context
from neutron_lib import rpc as n_rpc
from neutron_lib.services.qos import constants as qos_consts
from oslo_utils import uuidutils
from neutron.agent.l3 import agent as l3_agent
from neutron.agent.l3.extensions.qos import fip as fip_qos
from neutron.agent.l3 import l3_agent_extension_api as l3_ext_api
from neutron.agent.l3 import router_info as l3router
from neutron.api.rpc.callbacks.consumer import registry
from neutron.api.rpc.callbacks import resources
from neutron.api.rpc.handlers import resources_rpc
from neutron.objects.qos import policy
from neutron.objects.qos import rule
from neutron.tests import base
from neutron.tests.unit.agent.l3 import test_agent
_uuid = uuidutils.generate_uuid
TEST_QOS_FIP = "3.3.3.3"
TEST_FIP = "1.1.1.1"
TEST_FIP2 = "2.2.2.2"
HOSTNAME = 'myhost'
class QosExtensionBaseTestCase(test_agent.BasicRouterOperationsFramework):
def setUp(self):
super(QosExtensionBaseTestCase, self).setUp()
self.fip_qos_ext = fip_qos.FipQosAgentExtension()
self.context = context.get_admin_context()
self.connection = mock.Mock()
self.policy = policy.QosPolicy(context=None,
name='test1', id=_uuid())
self.ingress_rule = (
rule.QosBandwidthLimitRule(context=None, id=_uuid(),
qos_policy_id=self.policy.id,
max_kbps=1111,
max_burst_kbps=2222,
direction=lib_const.INGRESS_DIRECTION))
self.egress_rule = (
rule.QosBandwidthLimitRule(context=None, id=_uuid(),
qos_policy_id=self.policy.id,
max_kbps=3333,
max_burst_kbps=4444,
direction=lib_const.EGRESS_DIRECTION))
self.policy.rules = [self.ingress_rule, self.egress_rule]
self.new_ingress_rule = (
rule.QosBandwidthLimitRule(context=None, id=_uuid(),
qos_policy_id=self.policy.id,
max_kbps=5555,
max_burst_kbps=6666,
direction=lib_const.INGRESS_DIRECTION))
self.ingress_rule_only_has_max_kbps = (
rule.QosBandwidthLimitRule(context=None, id=_uuid(),
qos_policy_id=self.policy.id,
max_kbps=5555,
max_burst_kbps=0,
direction=lib_const.INGRESS_DIRECTION))
self.policy2 = policy.QosPolicy(context=None,
name='test2', id=_uuid())
self.policy2.rules = [self.ingress_rule]
self.policy3 = policy.QosPolicy(context=None,
name='test3', id=_uuid())
self.policy3.rules = [self.egress_rule]
self.policy4 = policy.QosPolicy(context=None,
name='test4', id=_uuid())
self.dscp = rule.QosDscpMarkingRule(context=None, id=_uuid(),
qos_policy_id=self.policy4.id,
dscp_mark=32)
self.dscp.obj_reset_changes()
self.policy4.rules = [self.dscp]
self.qos_policies = {self.policy.id: self.policy,
self.policy2.id: self.policy2,
self.policy3.id: self.policy3,
self.policy4.id: self.policy4}
self.agent = l3_agent.L3NATAgent(HOSTNAME, self.conf)
self.ex_gw_port = {'id': _uuid()}
self.fip = {'id': _uuid(),
'floating_ip_address': TEST_QOS_FIP,
'fixed_ip_address': '192.168.0.1',
'floating_network_id': _uuid(),
'port_id': _uuid(),
'host': HOSTNAME,
'qos_policy_id': self.policy.id}
self.router_id = _uuid()
self.router = {'id': self.router_id,
'gw_port': self.ex_gw_port,
'ha': False,
'distributed': False,
lib_const.FLOATINGIP_KEY: [self.fip]}
self.router_info = l3router.RouterInfo(self.agent, self.router_id,
self.router, **self.ri_kwargs)
self.router_info.ex_gw_port = self.ex_gw_port
self.agent.router_info[self.router_id] = self.router_info
def _mock_get_router_info(router_id):
return self.router_info
self.get_router_info = mock.patch(
'neutron.agent.l3.l3_agent_extension_api.'
'L3AgentExtensionAPI.get_router_info').start()
self.get_router_info.side_effect = _mock_get_router_info
self.agent_api = l3_ext_api.L3AgentExtensionAPI(None, None)
self.fip_qos_ext.consume_api(self.agent_api)
class FipQosExtensionInitializeTestCase(QosExtensionBaseTestCase):
@mock.patch.object(registry, 'register')
@mock.patch.object(resources_rpc, 'ResourcesPushRpcCallback')
def test_initialize_subscribed_to_rpc(self, rpc_mock, subscribe_mock):
with mock.patch.object(n_rpc, 'Connection',
return_value=self.connection) as create_connection:
self.fip_qos_ext.initialize(
self.connection, lib_const.L3_AGENT_MODE)
create_connection.assert_has_calls([mock.call()])
self.connection.create_consumer.assert_has_calls(
[mock.call(
resources_rpc.resource_type_versioned_topic(
resources.QOS_POLICY),
[rpc_mock()],
fanout=True)]
)
subscribe_mock.assert_called_with(mock.ANY, resources.QOS_POLICY)
class FipQosExtensionTestCase(QosExtensionBaseTestCase):
def setUp(self):
super(FipQosExtensionTestCase, self).setUp()
self.fip_qos_ext.initialize(
self.connection, lib_const.L3_AGENT_MODE)
self._set_pull_mock()
def _set_pull_mock(self):
def _pull_mock(context, resource_type, resource_id):
return self.qos_policies[resource_id]
self.pull = mock.patch(
'neutron.api.rpc.handlers.resources_rpc.'
'ResourcesPullRpcApi.pull').start()
self.pull.side_effect = _pull_mock
def _test_new_fip_add(self, func):
tc_wrapper = mock.Mock()
with mock.patch.object(self.fip_qos_ext, '_get_tc_wrapper',
return_value=tc_wrapper):
func(self.context, self.router)
tc_wrapper.set_ip_rate_limit.assert_has_calls(
[mock.call(lib_const.INGRESS_DIRECTION,
TEST_QOS_FIP, 1111, 2222),
mock.call(lib_const.EGRESS_DIRECTION,
TEST_QOS_FIP, 3333, 4444)],
any_order=True)
def test_add_router(self):
self._test_new_fip_add(self.fip_qos_ext.add_router)
def test_update_router(self):
self._test_new_fip_add(self.fip_qos_ext.update_router)
def test_update_router_fip_policy_changed(self):
tc_wrapper = mock.Mock()
with mock.patch.object(self.fip_qos_ext, '_get_tc_wrapper',
return_value=tc_wrapper):
self.fip_qos_ext.update_router(self.context, self.router)
tc_wrapper.set_ip_rate_limit.assert_has_calls(
[mock.call(lib_const.INGRESS_DIRECTION,
TEST_QOS_FIP, 1111, 2222),
mock.call(lib_const.EGRESS_DIRECTION,
TEST_QOS_FIP, 3333, 4444)],
any_order=True)
# the policy of floating IP has been changed to
# which only has one egress rule
self.fip[qos_consts.QOS_POLICY_ID] = self.policy3.id
self.fip_qos_ext.update_router(self.context, self.router)
tc_wrapper.clear_ip_rate_limit.assert_has_calls(
[mock.call(lib_const.INGRESS_DIRECTION,
TEST_QOS_FIP)])
def test_update_router_fip_policy_changed_to_none(self):
tc_wrapper = mock.Mock()
with mock.patch.object(self.fip_qos_ext, '_get_tc_wrapper',
return_value=tc_wrapper):
self.fip_qos_ext.update_router(self.context, self.router)
tc_wrapper.set_ip_rate_limit.assert_has_calls(
[mock.call(lib_const.INGRESS_DIRECTION,
TEST_QOS_FIP, 1111, 2222),
mock.call(lib_const.EGRESS_DIRECTION,
TEST_QOS_FIP, 3333, 4444)],
any_order=True)
# floating IP remove the qos_policy bonding
self.fip[qos_consts.QOS_POLICY_ID] = None
self.fip_qos_ext.update_router(self.context, self.router)
tc_wrapper.clear_ip_rate_limit.assert_has_calls(
[mock.call(lib_const.INGRESS_DIRECTION,
TEST_QOS_FIP),
mock.call(lib_const.EGRESS_DIRECTION,
TEST_QOS_FIP)],
any_order=True)
def test__process_update_policy(self):
tc_wrapper = mock.Mock()
with mock.patch.object(self.fip_qos_ext, '_get_tc_wrapper',
return_value=tc_wrapper):
self.fip_qos_ext.update_router(self.context, self.router)
tc_wrapper.set_ip_rate_limit.assert_has_calls(
[mock.call(lib_const.INGRESS_DIRECTION,
TEST_QOS_FIP, 1111, 2222),
mock.call(lib_const.EGRESS_DIRECTION,
TEST_QOS_FIP, 3333, 4444)],
any_order=True)
# the rules of floating IP policy has been changed
self.fip_qos_ext._policy_rules_modified = mock.Mock(
return_value=True)
self.policy.rules = [self.new_ingress_rule, self.egress_rule]
self.fip_qos_ext._process_update_policy(self.policy)
tc_wrapper.set_ip_rate_limit.assert_has_calls(
[mock.call(lib_const.INGRESS_DIRECTION,
TEST_QOS_FIP, 5555, 6666)])
def _test_qos_policy_scenarios(self, fip_removed=True,
qos_rules_removed=False):
tc_wrapper = mock.Mock()
with mock.patch.object(self.fip_qos_ext, '_get_tc_wrapper',
return_value=tc_wrapper):
self.fip_qos_ext.update_router(self.context, self.router)
tc_wrapper.set_ip_rate_limit.assert_has_calls(
[mock.call(lib_const.INGRESS_DIRECTION,
TEST_QOS_FIP, 1111, 2222),
mock.call(lib_const.EGRESS_DIRECTION,
TEST_QOS_FIP, 3333, 4444)],
any_order=True)
if fip_removed:
# floating IP dissociated, then it does not belong to
# this router
self.router[lib_const.FLOATINGIP_KEY] = []
if qos_rules_removed:
self.policy.rules = []
self.fip_qos_ext.update_router(self.context, self.router)
tc_wrapper.clear_ip_rate_limit.assert_has_calls(
[mock.call(lib_const.INGRESS_DIRECTION,
TEST_QOS_FIP),
mock.call(lib_const.EGRESS_DIRECTION,
TEST_QOS_FIP)],
any_order=True)
def test_delete_router(self):
tc_wrapper = mock.Mock()
with mock.patch.object(self.fip_qos_ext, '_get_tc_wrapper',
return_value=tc_wrapper):
self.fip_qos_ext.update_router(self.context, self.router)
tc_wrapper.set_ip_rate_limit.assert_has_calls(
[mock.call(lib_const.INGRESS_DIRECTION,
TEST_QOS_FIP, 1111, 2222),
mock.call(lib_const.EGRESS_DIRECTION,
TEST_QOS_FIP, 3333, 4444)],
any_order=True)
self.fip_qos_ext.delete_router(self.context, self.router)
self.assertIsNone(
self.fip_qos_ext.fip_qos_map.router_floating_ips.get(
self.router_id))
self.assertIsNone(
self.fip_qos_ext.fip_qos_map.ingress_ratelimits.get(
TEST_QOS_FIP))
self.assertIsNone(
self.fip_qos_ext.fip_qos_map.egress_ratelimits.get(
TEST_QOS_FIP))
self.assertIsNone(
self.fip_qos_ext.fip_qos_map.get_resource_policy(
TEST_QOS_FIP))
def test_update_router_fip_removed(self):
self._test_qos_policy_scenarios()
def test_fip_qos_changed_to_none(self):
self._test_qos_policy_scenarios(qos_rules_removed=True)
def _test_only_one_direction_rule(self, func, policy, direction):
tc_wrapper = mock.Mock()
with mock.patch.object(
self.fip_qos_ext.resource_rpc, 'pull',
return_value=policy):
with mock.patch.object(self.fip_qos_ext, '_get_tc_wrapper',
return_value=tc_wrapper):
func(self.context, self.router)
if direction == lib_const.INGRESS_DIRECTION:
calls = [mock.call(lib_const.INGRESS_DIRECTION,
TEST_QOS_FIP, 1111, 2222)]
else:
calls = [mock.call(lib_const.EGRESS_DIRECTION,
TEST_QOS_FIP, 3333, 4444)]
tc_wrapper.set_ip_rate_limit.assert_has_calls(calls)
def test_add_router_only_ingress(self):
self._test_only_one_direction_rule(self.fip_qos_ext.add_router,
self.policy2,
lib_const.INGRESS_DIRECTION)
def test_add_router_only_egress(self):
self._test_only_one_direction_rule(self.fip_qos_ext.add_router,
self.policy3,
lib_const.EGRESS_DIRECTION)
def test_update_router_only_ingress(self):
self._test_only_one_direction_rule(self.fip_qos_ext.update_router,
self.policy2,
lib_const.INGRESS_DIRECTION)
def test_update_router_only_egress(self):
self._test_only_one_direction_rule(self.fip_qos_ext.update_router,
self.policy3,
lib_const.EGRESS_DIRECTION)
def test_rule_only_has_max_kbps(self):
tc_wrapper = mock.Mock()
with mock.patch.object(self.fip_qos_ext, '_get_tc_wrapper',
return_value=tc_wrapper):
self.fip_qos_ext.update_router(self.context, self.router)
tc_wrapper.set_ip_rate_limit.assert_has_calls(
[mock.call(lib_const.INGRESS_DIRECTION,
TEST_QOS_FIP, 1111, 2222),
mock.call(lib_const.EGRESS_DIRECTION,
TEST_QOS_FIP, 3333, 4444)],
any_order=True)
# policy ingress rule changed to only has one max_kbps value
self.policy.rules = [self.ingress_rule_only_has_max_kbps,
self.egress_rule]
self.fip_qos_ext.update_router(self.context, self.router)
tc_wrapper.set_ip_rate_limit.assert_has_calls(
[mock.call(lib_const.INGRESS_DIRECTION,
TEST_QOS_FIP, 5555, 0)])
def test_qos_policy_has_no_bandwidth_limit_rule(self):
tc_wrapper = mock.Mock()
with mock.patch.object(self.fip_qos_ext, '_get_tc_wrapper',
return_value=tc_wrapper):
self.fip['qos_policy_id'] = self.policy4.id
self.fip_qos_ext.add_router(self.context, self.router)
tc_wrapper.set_ip_rate_limit.assert_not_called()
def _test_process_ip_rates(self, with_cache):
rates = {'egress': {'rate': 333, 'burst': 444},
'ingress': {'rate': 111, 'burst': 222}}
fip = '123.123.123.123'
device = mock.Mock()
tc_wrapper = mock.Mock()
with mock.patch.object(
self.fip_qos_ext, '_get_tc_wrapper',
return_value=tc_wrapper) as get_tc_wrapper:
with mock.patch.object(
self.fip_qos_ext, 'process_ip_rate_limit') as process_ip:
self.fip_qos_ext.process_ip_rates(
fip, device, rates, with_cache=with_cache)
if with_cache:
self.assertEqual(2, process_ip.call_count)
else:
self.assertEqual(2, get_tc_wrapper.call_count)
self.assertEqual(
2, tc_wrapper.set_ip_rate_limit.call_count)
def test_process_ip_rates_with_cache(self):
self._test_process_ip_rates(with_cache=True)
def test_process_ip_rates_without_cache(self):
self._test_process_ip_rates(with_cache=False)
class RouterFipRateLimitMapsTestCase(base.BaseTestCase):
def setUp(self):
super(RouterFipRateLimitMapsTestCase, self).setUp()
self.policy_map = fip_qos.RouterFipRateLimitMaps()
def test_find_fip_router_id(self):
router_id = _uuid()
self.policy_map.router_floating_ips[router_id] = set([TEST_FIP,
TEST_FIP2])
self.assertIsNone(self.policy_map.find_fip_router_id("8.8.8.8"))
self.assertEqual(router_id,
self.policy_map.find_fip_router_id(TEST_FIP))
def test_get_router_floating_ips(self):
router_id = _uuid()
test_ips = [TEST_FIP, TEST_FIP2]
self.policy_map.router_floating_ips[router_id] = set([TEST_FIP,
TEST_FIP2])
get_ips = self.policy_map.get_router_floating_ips(router_id)
self.assertEqual(len(test_ips), len(get_ips))
def test_remove_fip_ratelimit_cache(self):
fip = "1.1.1.1"
self.policy_map.set_fip_ratelimit_cache(
"ingress", fip, 100, 200)
self.policy_map.set_fip_ratelimit_cache(
"egress", fip, 100, 200)
self.policy_map.remove_fip_ratelimit_cache("ingress", fip)
self.assertIsNone(self.policy_map.ingress_ratelimits.get(fip))
self.policy_map.remove_fip_ratelimit_cache("egress", fip)
self.assertIsNone(self.policy_map.egress_ratelimits.get(fip))
def test_set_fip_ratelimit_cache(self):
fip = "1.1.1.1"
self.policy_map.set_fip_ratelimit_cache(
"ingress", fip, 100, 200)
self.policy_map.set_fip_ratelimit_cache(
"egress", fip, 300, 400)
in_rate, in_burst = self.policy_map.get_fip_ratelimit_cache(
"ingress", fip)
self.assertEqual(100, in_rate)
self.assertEqual(200, in_burst)
e_rate, e_burst = self.policy_map.get_fip_ratelimit_cache(
"egress", fip)
self.assertEqual(300, e_rate)
self.assertEqual(400, e_burst)
|
py
|
1a59804c45abaa187a595255878ad9355f83d9d2
|
import math
from itertools import combinations_with_replacement
from operator import attrgetter
from typing import Dict, List, Type
from locust import User
def weight_users(user_classes: List[Type[User]], user_count: int) -> Dict[str, int]:
"""
Compute the desired state of users using the weight of each user class.
:param user_classes: the list of user class
:param user_count: total number of users
:return: the set of users to run
"""
assert user_count >= 0
if len(user_classes) == 0:
return {}
user_classes = sorted(user_classes, key=attrgetter("__name__"))
user_classes_count = {user_class.__name__: 0 for user_class in user_classes}
# If the number of users is less than the number of user classes, at most one user of each user class
# is chosen. User classes with higher weight are chosen first.
if user_count <= len(user_classes):
user_classes_count.update(
{
user_class.__name__: 1
for user_class in sorted(user_classes, key=attrgetter("weight"), reverse=True)[:user_count]
}
)
return user_classes_count
# If the number of users is greater than or equal to the number of user classes, at least one user of each
# user class will be chosen. The greater number of users is, the better the actual distribution
# of users will match the desired one (as dictated by the weight attributes).
weights = list(map(attrgetter("weight"), user_classes))
relative_weights = [weight / sum(weights) for weight in weights]
user_classes_count = {
user_class.__name__: round(relative_weight * user_count) or 1
for user_class, relative_weight in zip(user_classes, relative_weights)
}
if sum(user_classes_count.values()) == user_count:
return user_classes_count
else:
user_classes_count = _find_ideal_users_to_add_or_remove(
user_classes, user_count - sum(user_classes_count.values()), user_classes_count
)
assert sum(user_classes_count.values()) == user_count
return user_classes_count
def _find_ideal_users_to_add_or_remove(
user_classes: List[Type[User]], user_count_to_add_or_remove: int, user_classes_count: Dict[str, int]
) -> Dict[str, int]:
sign = -1 if user_count_to_add_or_remove < 0 else 1
user_count_to_add_or_remove = abs(user_count_to_add_or_remove)
assert user_count_to_add_or_remove <= len(user_classes), user_count_to_add_or_remove
# Formula for combination with replacement
# (https://www.tutorialspoint.com/statistics/combination_with_replacement.htm)
number_of_combinations = math.factorial(len(user_classes) + user_count_to_add_or_remove - 1) / (
math.factorial(user_count_to_add_or_remove) * math.factorial(len(user_classes) - 1)
)
# If the number of combinations with replacement is above this threshold, we simply add/remove
# users for the first "number_of_users_to_add_or_remove" users. Otherwise, computing the best
# distribution is too expensive in terms of computation.
max_number_of_combinations_threshold = 1000
if number_of_combinations <= max_number_of_combinations_threshold:
user_classes_count_candidates: Dict[float, Dict[str, int]] = {}
for user_classes_combination in combinations_with_replacement(user_classes, user_count_to_add_or_remove):
# Copy in order to not mutate `user_classes_count` for the parent scope
user_classes_count_candidate = user_classes_count.copy()
for user_class in user_classes_combination:
user_classes_count_candidate[user_class.__name__] += sign
distance = distance_from_desired_distribution(user_classes, user_classes_count_candidate)
if distance not in user_classes_count_candidates:
user_classes_count_candidates[distance] = user_classes_count_candidate.copy()
return user_classes_count_candidates[min(user_classes_count_candidates.keys())]
else:
# Copy in order to not mutate `user_classes_count` for the parent scope
user_classes_count_candidate = user_classes_count.copy()
for user_class in user_classes[:user_count_to_add_or_remove]:
user_classes_count_candidate[user_class.__name__] += sign
return user_classes_count_candidate
def distance_from_desired_distribution(user_classes: List[Type[User]], user_classes_count: Dict[str, int]) -> float:
actual_ratio_of_user_class = {
user_class: user_class_count / sum(user_classes_count.values())
for user_class, user_class_count in user_classes_count.items()
}
expected_ratio_of_user_class = {
user_class.__name__: user_class.weight / sum(map(attrgetter("weight"), user_classes))
for user_class in user_classes
}
differences = [
actual_ratio_of_user_class[user_class] - expected_ratio
for user_class, expected_ratio in expected_ratio_of_user_class.items()
]
return math.sqrt(math.fsum(map(lambda x: x ** 2, differences)))
|
py
|
1a5982f41a4ad89d301a00409e725beb8fa4bb5b
|
import cv2
import numpy as np
import os
import re
import argparse
def list_files(path):
pattern = re.compile(r'(-?\d+),(-?\d+).png')
res = list()
rg = list() #[xmin ymin xmax ymax]
for (dirpath, dirnames, filenames) in os.walk(path):
for filename in filenames:
m = pattern.match(filename)
if m is not None:
x = int(m.group(1))
y = int(m.group(2))
p = os.path.join(dirpath, filename)
res.append((x,y,p))
if len(rg) == 0:
rg.append(x)
rg.append(y)
rg.append(x)
rg.append(y)
else:
if rg[0] > x:
rg[0] = x
if rg[1] > y:
rg[1] = y
if rg[2] < x:
rg[2] = x
if rg[3] < y:
rg[3] = y
rg = (rg[0], rg[1], rg[2] + 1, rg[3] + 1)
return (res, rg)
def merge(res, rg):
st = np.array((256, 256), dtype=np.int32)
rg = np.array(rg, dtype=np.int32)
sz = (rg[2:4] - rg[0:2]) * st
img = np.zeros((sz[1], sz[0], 4), dtype=np.uint8)
st = np.array((st[0], st[1], st[0], st[1]), dtype=np.int32)
sz = np.array((rg[0], rg[1], rg[0], rg[1]), dtype=np.int32)
for (x, z, path) in res:
if x < rg[0] or z < rg[1] or x >= rg[2] or z >= rg[3]:
continue
tg = np.array((x, z, x + 1, z + 1), dtype=np.int32)
tg = (tg - sz) * st
part = cv2.imread(path, flags=cv2.IMREAD_UNCHANGED)
if part is None:
continue
img[tg[1]:tg[3],tg[0]:tg[2],:] = part[:,:,:]
return img
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('input_dir', type=str)
parser.add_argument('-o', '--output_file', type=str)
parser.add_argument('-r', '--range', type=str) # xmin,ymin;xmax,ymax
args = parser.parse_args()
(res, rg) = list_files(args.input_dir)
if not (args.range == 'max'):
sp = args.range.split(' ')
p1 = sp[0:2]
xmin = int(p1[0])
ymin = int(p1[1])
p2 = sp[2:4]
xmax = int(p2[0]) + 1
ymax = int(p2[1]) + 1
rg = (xmin, ymin, xmax, ymax)
h = merge(res, rg)
cv2.imwrite(args.output_file, h)
pass
|
py
|
1a59849f367c469ad1d0c9dcc65c81c49765d715
|
"""
<Program Name>
download.py
<Started>
February 21, 2012. Based on previous version by Geremy Condra.
<Author>
Konstantin Andrianov
Vladimir Diaz <[email protected]>
<Copyright>
See LICENSE for licensing information.
<Purpose>
Download metadata and target files and check their validity. The hash and
length of a downloaded file has to match the hash and length supplied by the
metadata of that file. The downloaded file is technically a file-like object
that will automatically destroys itself once closed. Note that the file-like
object, 'tuf.util.TempFile', is returned by the '_download_file()' function.
"""
# Help with Python 3 compatibility, where the print statement is a function, an
# implicit relative import is invalid, and the '/' operator performs true
# division. Example: print 'hello world' raises a 'SyntaxError' exception.
from __future__ import print_function
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
import os
import socket
import logging
import timeit
import ssl
import tuf
import tuf.conf
import tuf.hash
import tuf.util
import tuf.formats
import tuf._vendor.six as six
# 'ssl.match_hostname' was added in Python 3.2. The vendored version is needed
# for Python 2.6 and 2.7.
try:
from ssl import match_hostname, CertificateError
except ImportError:
from tuf._vendor.ssl_match_hostname import match_hostname, CertificateError
# See 'log.py' to learn how logging is handled in TUF.
logger = logging.getLogger('tuf.download')
def safe_download(url, required_length):
"""
<Purpose>
Given the 'url' and 'required_length' of the desired file, open a connection
to 'url', download it, and return the contents of the file. Also ensure
the length of the downloaded file matches 'required_length' exactly.
tuf.download.unsafe_download() may be called if an upper download limit is
preferred.
'tuf.util.TempFile', the file-like object returned, is used instead of
regular tempfile object because of additional functionality provided, such
as handling compressed metadata and automatically closing files after
moving to final destination.
<Arguments>
url:
A URL string that represents the location of the file.
required_length:
An integer value representing the length of the file. This is an exact
limit.
<Side Effects>
A 'tuf.util.TempFile' object is created on disk to store the contents of
'url'.
<Exceptions>
tuf.DownloadLengthMismatchError, if there was a mismatch of observed vs
expected lengths while downloading the file.
tuf.FormatError, if any of the arguments are improperly formatted.
Any other unforeseen runtime exception.
<Returns>
A 'tuf.util.TempFile' file-like object that points to the contents of 'url'.
"""
return _download_file(url, required_length, STRICT_REQUIRED_LENGTH=True)
def unsafe_download(url, required_length):
"""
<Purpose>
Given the 'url' and 'required_length' of the desired file, open a connection
to 'url', download it, and return the contents of the file. Also ensure
the length of the downloaded file is up to 'required_length', and no larger.
tuf.download.safe_download() may be called if an exact download limit is
preferred.
'tuf.util.TempFile', the file-like object returned, is used instead of
regular tempfile object because of additional functionality provided, such
as handling compressed metadata and automatically closing files after
moving to final destination.
<Arguments>
url:
A URL string that represents the location of the file.
required_length:
An integer value representing the length of the file. This is an upper
limit.
<Side Effects>
A 'tuf.util.TempFile' object is created on disk to store the contents of
'url'.
<Exceptions>
tuf.DownloadLengthMismatchError, if there was a mismatch of observed vs
expected lengths while downloading the file.
tuf.FormatError, if any of the arguments are improperly formatted.
Any other unforeseen runtime exception.
<Returns>
A 'tuf.util.TempFile' file-like object that points to the contents of 'url'.
"""
return _download_file(url, required_length, STRICT_REQUIRED_LENGTH=False)
def _download_file(url, required_length, STRICT_REQUIRED_LENGTH=True):
"""
<Purpose>
Given the url, hashes and length of the desired file, this function
opens a connection to 'url' and downloads the file while ensuring its
length and hashes match 'required_hashes' and 'required_length'.
tuf.util.TempFile is used instead of regular tempfile object because of
additional functionality provided by 'tuf.util.TempFile'.
<Arguments>
url:
A URL string that represents the location of the file.
required_length:
An integer value representing the length of the file.
STRICT_REQUIRED_LENGTH:
A Boolean indicator used to signal whether we should perform strict
checking of required_length. True by default. We explicitly set this to
False when we know that we want to turn this off for downloading the
timestamp metadata, which has no signed required_length.
<Side Effects>
A 'tuf.util.TempFile' object is created on disk to store the contents of
'url'.
<Exceptions>
tuf.DownloadLengthMismatchError, if there was a mismatch of observed vs
expected lengths while downloading the file.
tuf.FormatError, if any of the arguments are improperly formatted.
Any other unforeseen runtime exception.
<Returns>
A 'tuf.util.TempFile' file-like object that points to the contents of 'url'.
"""
# Do all of the arguments have the appropriate format?
# Raise 'tuf.FormatError' if there is a mismatch.
tuf.formats.URL_SCHEMA.check_match(url)
tuf.formats.LENGTH_SCHEMA.check_match(required_length)
# 'url.replace()' is for compatibility with Windows-based systems because
# they might put back-slashes in place of forward-slashes. This converts it
# to the common format.
url = url.replace('\\', '/')
logger.info('Downloading: '+str(url))
# This is the temporary file that we will return to contain the contents of
# the downloaded file.
temp_file = tuf.util.TempFile()
try:
# Open the connection to the remote file.
connection = _open_connection(url)
# We ask the server about how big it thinks this file should be.
reported_length = _get_content_length(connection)
# Then, we check whether the required length matches the reported length.
_check_content_length(reported_length, required_length,
STRICT_REQUIRED_LENGTH)
# Download the contents of the URL, up to the required length, to a
# temporary file, and get the total number of downloaded bytes.
total_downloaded = _download_fixed_amount_of_data(connection, temp_file,
required_length)
# Does the total number of downloaded bytes match the required length?
_check_downloaded_length(total_downloaded, required_length,
STRICT_REQUIRED_LENGTH=STRICT_REQUIRED_LENGTH)
except:
# Close 'temp_file'. Any written data is lost.
temp_file.close_temp_file()
logger.exception('Could not download URL: '+str(url))
raise
else:
return temp_file
def _download_fixed_amount_of_data(connection, temp_file, required_length):
"""
<Purpose>
This is a helper function, where the download really happens. While-block
reads data from connection a fixed chunk of data at a time, or less, until
'required_length' is reached.
<Arguments>
connection:
The object that the _open_connection returns for communicating with the
server about the contents of a URL.
temp_file:
A temporary file where the contents at the URL specified by the
'connection' object will be stored.
required_length:
The number of bytes that we must download for the file. This is almost
always specified by the TUF metadata for the data file in question
(except in the case of timestamp metadata, in which case we would fix a
reasonable upper bound).
<Side Effects>
Data from the server will be written to 'temp_file'.
<Exceptions>
Runtime or network exceptions will be raised without question.
<Returns>
total_downloaded:
The total number of bytes downloaded for the desired file.
"""
# Tolerate servers with a slow start by ignoring their delivery speed for
# 'tuf.conf.SLOW_START_GRACE_PERIOD' seconds. Set 'seconds_spent_receiving'
# to negative SLOW_START_GRACE_PERIOD seconds, and begin checking the average
# download speed once it is positive.
grace_period = -tuf.conf.SLOW_START_GRACE_PERIOD
# Keep track of total bytes downloaded.
number_of_bytes_received = 0
start_time = timeit.default_timer()
try:
while True:
# We download a fixed chunk of data in every round. This is so that we
# can defend against slow retrieval attacks. Furthermore, we do not wish
# to download an extremely large file in one shot.
data = b''
read_amount = min(tuf.conf.CHUNK_SIZE,
required_length - number_of_bytes_received)
#logger.debug('Reading next chunk...')
try:
data = connection.read(read_amount)
# Python 3.2 returns 'IOError' if the remote file object has timed out.
except (socket.error, IOError):
pass
number_of_bytes_received = number_of_bytes_received + len(data)
# Data successfully read from the connection. Store it.
temp_file.write(data)
if number_of_bytes_received == required_length:
break
stop_time = timeit.default_timer()
seconds_spent_receiving = stop_time - start_time
if (seconds_spent_receiving + grace_period) < 0:
#logger.debug('Ignoring average download speed for another: '+\
#str(-seconds_spent_receiving) + ' seconds')
continue
# Measure the average download speed.
average_download_speed = number_of_bytes_received / seconds_spent_receiving
# If the average download speed is below a certain threshold, we flag
# this as a possible slow-retrieval attack.
if average_download_speed < tuf.conf.MIN_AVERAGE_DOWNLOAD_SPEED:
break
else:
logger.debug('Good average download speed: '+\
str(average_download_speed) + ' bytes per second')
# We might have no more data to read. Check number of bytes downloaded.
if not data:
message = 'Downloaded '+str(number_of_bytes_received)+'/'+ \
str(required_length)+' bytes.'
logger.debug(message)
# Finally, we signal that the download is complete.
break
except:
raise
else:
# This else block returns and skips closing the connection in the finally
# block, so close the connection here.
connection.close()
return number_of_bytes_received
finally:
# Whatever happens, make sure that we always close the connection.
connection.close()
def _get_request(url):
"""
Wraps the URL to retrieve to protects against "creative"
interpretation of the RFC: http://bugs.python.org/issue8732
https://github.com/pypa/pip/blob/d0fa66ecc03ab20b7411b35f7c7b423f31f77761/pip/download.py#L147
"""
return six.moves.urllib.request.Request(url, headers={'Accept-encoding': 'identity'})
def _get_opener(scheme=None):
"""
Build a urllib2 opener based on whether the user now wants SSL.
https://github.com/pypa/pip/blob/d0fa66ecc03ab20b7411b35f7c7b423f31f77761/pip/download.py#L178
"""
if scheme == "https":
assert os.path.isfile(tuf.conf.ssl_certificates)
# If we are going over https, use an opener which will provide SSL
# certificate verification.
https_handler = VerifiedHTTPSHandler()
opener = six.moves.urllib.request.build_opener(https_handler)
# strip out HTTPHandler to prevent MITM spoof
for handler in opener.handlers:
if isinstance(handler, six.moves.urllib.request.HTTPHandler):
opener.handlers.remove(handler)
else:
# Otherwise, use the default opener.
opener = six.moves.urllib.request.build_opener()
return opener
def _open_connection(url):
"""
<Purpose>
Helper function that opens a connection to the url. urllib2 supports http,
ftp, and file. In python (2.6+) where the ssl module is available, urllib2
also supports https.
TODO: Determine whether this follows http redirects and decide if we like
that. For example, would we not want to allow redirection from ssl to
non-ssl urls?
<Arguments>
url:
URL string (e.g., 'http://...' or 'ftp://...' or 'file://...')
<Exceptions>
None.
<Side Effects>
Opens a connection to a remote server.
<Returns>
File-like object.
"""
# urllib2.Request produces a Request object that allows for a finer control
# of the requesting process. Request object allows to add headers or data to
# the HTTP request. For instance, request method add_header(key, val) can be
# used to change/spoof 'User-Agent' from default Python-urllib/x.y to
# 'Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1)' this can be useful if
# servers do not recognize connections that originates from
# Python-urllib/x.y.
parsed_url = six.moves.urllib.parse.urlparse(url)
opener = _get_opener(scheme=parsed_url.scheme)
request = _get_request(url)
return opener.open(request, timeout = tuf.conf.SOCKET_TIMEOUT)
def _get_content_length(connection):
"""
<Purpose>
A helper function that gets the purported file length from server.
<Arguments>
connection:
The object that the _open_connection function returns for communicating
with the server about the contents of a URL.
<Side Effects>
No known side effects.
<Exceptions>
Runtime exceptions will be suppressed but logged.
<Returns>
reported_length:
The total number of bytes reported by server. If the process fails, we
return None; otherwise we would return a nonnegative integer.
"""
try:
# What is the length of this document according to the HTTP spec?
reported_length = connection.info().get('Content-Length')
# Try casting it as a decimal number.
reported_length = int(reported_length, 10)
# Make sure that it is a nonnegative integer.
assert reported_length > -1
except:
message = \
'Could not get content length about ' + str(connection) + ' from server.'
logger.exception(message)
reported_length = None
finally:
return reported_length
def _check_content_length(reported_length, required_length, strict_length=True):
"""
<Purpose>
A helper function that checks whether the length reported by server is
equal to the length we expected.
<Arguments>
reported_length:
The total number of bytes reported by the server.
required_length:
The total number of bytes obtained from (possibly default) metadata.
strict_length:
Boolean that indicates whether the required length of the file is an
exact match, or an upper limit (e.g., downloading a Timestamp file).
<Side Effects>
No known side effects.
<Exceptions>
No known exceptions.
<Returns>
None.
"""
logger.debug('The server reported a length of '+repr(reported_length)+' bytes.')
comparison_result = None
try:
if reported_length < required_length:
comparison_result = 'less than'
elif reported_length > required_length:
comparison_result = 'greater than'
else:
comparison_result = 'equal to'
except:
logger.exception('Could not check reported and required lengths.')
if strict_length:
message = 'The reported length is '+comparison_result+' the required '+\
'length of '+repr(required_length)+' bytes.'
logger.debug(message)
else:
message = 'The reported length is '+comparison_result+' the upper limit '+\
'of '+repr(required_length)+' bytes.'
logger.debug(message)
def _check_downloaded_length(total_downloaded, required_length,
STRICT_REQUIRED_LENGTH=True):
"""
<Purpose>
A helper function which checks whether the total number of downloaded bytes
matches our expectation.
<Arguments>
total_downloaded:
The total number of bytes supposedly downloaded for the file in question.
required_length:
The total number of bytes expected of the file as seen from its metadata.
The Timestamp role is always downloaded without a known file length, and
the Root role when the client cannot download any of the required
top-level roles. In both cases, 'required_length' is actually an upper
limit on the length of the downloaded file.
STRICT_REQUIRED_LENGTH:
A Boolean indicator used to signal whether we should perform strict
checking of required_length. True by default. We explicitly set this to
False when we know that we want to turn this off for downloading the
timestamp metadata, which has no signed required_length.
<Side Effects>
None.
<Exceptions>
tuf.DownloadLengthMismatchError, if STRICT_REQUIRED_LENGTH is True and
total_downloaded is not equal required_length.
<Returns>
None.
"""
if total_downloaded == required_length:
logger.info('Downloaded '+str(total_downloaded)+' bytes out of the '+\
'expected '+str(required_length)+ ' bytes.')
else:
difference_in_bytes = abs(total_downloaded - required_length)
# What we downloaded is not equal to the required length, but did we ask
# for strict checking of required length?
if STRICT_REQUIRED_LENGTH:
message = 'Downloaded '+str(total_downloaded)+' bytes, but expected '+\
str(required_length)+' bytes. There is a difference of '+\
str(difference_in_bytes)+' bytes.'
# This must be due to a programming error, and must never happen!
logger.error(message)
raise tuf.DownloadLengthMismatchError(required_length, total_downloaded)
else:
message = 'Downloaded '+str(total_downloaded)+' bytes out of an upper '+\
'limit of '+str(required_length)+' bytes.'
# We specifically disabled strict checking of required length, but we
# will log a warning anyway. This is useful when we wish to download the
# Timestamp or Root metadata, for which we have no signed metadata; so,
# we must guess a reasonable required_length for it.
logger.info(message)
class VerifiedHTTPSConnection(six.moves.http_client.HTTPSConnection):
"""
A connection that wraps connections with ssl certificate verification.
https://github.com/pypa/pip/blob/d0fa66ecc03ab20b7411b35f7c7b423f31f77761/pip/download.py#L72
"""
def connect(self):
self.connection_kwargs = {}
# for > py2.5
if hasattr(self, 'timeout'):
self.connection_kwargs.update(timeout = self.timeout)
# for >= py2.7
if hasattr(self, 'source_address'):
self.connection_kwargs.update(source_address = self.source_address)
sock = socket.create_connection((self.host, self.port), **self.connection_kwargs)
# for >= py2.7
if getattr(self, '_tunnel_host', None):
self.sock = sock
self._tunnel()
# set location of certificate authorities
assert os.path.isfile( tuf.conf.ssl_certificates )
cert_path = tuf.conf.ssl_certificates
# TODO: Disallow SSLv2.
# http://docs.python.org/dev/library/ssl.html#protocol-versions
# TODO: Select the right ciphers.
# http://docs.python.org/dev/library/ssl.html#cipher-selection
self.sock = ssl.wrap_socket(sock, self.key_file, self.cert_file,
cert_reqs=ssl.CERT_REQUIRED,
ca_certs=cert_path)
match_hostname(self.sock.getpeercert(), self.host)
class VerifiedHTTPSHandler(six.moves.urllib.request.HTTPSHandler):
"""
A HTTPSHandler that uses our own VerifiedHTTPSConnection.
https://github.com/pypa/pip/blob/d0fa66ecc03ab20b7411b35f7c7b423f31f77761/pip/download.py#L109
"""
def __init__(self, connection_class = VerifiedHTTPSConnection):
self.specialized_conn_class = connection_class
six.moves.urllib.request.HTTPSHandler.__init__(self)
def https_open(self, req):
return self.do_open(self.specialized_conn_class, req)
|
py
|
1a5985c6fb10437f7450e3f60a7d9bc21e897554
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
"""
This file should only contain constants used for the EKS tests.
"""
import re
from enum import Enum
from typing import Dict, List, Pattern, Tuple
DEFAULT_CONN_ID: str = "aws_default"
DEFAULT_NAMESPACE: str = "default_namespace"
FROZEN_TIME: str = "2013-11-27T01:42:00Z"
# Fargate docs say there is a limit of five labels per Selector.
MAX_FARGATE_LABELS: int = 5
PACKAGE_NOT_PRESENT_MSG: str = "mock_eks package not present"
PARTITION: str = "aws"
NON_EXISTING_CLUSTER_NAME: str = "non_existing_cluster"
NON_EXISTING_FARGATE_PROFILE_NAME: str = "non_existing_fargate_profile"
NON_EXISTING_NODEGROUP_NAME: str = "non_existing_nodegroup"
REGION: str = "us-east-1"
SUBNET_IDS: List[str] = ["subnet-12345ab", "subnet-67890cd"]
TASK_ID: str = "test-eks-operator"
AMI_TYPE: Tuple[str, str] = ("amiType", "AL2_x86_64")
CLIENT_REQUEST_TOKEN: Tuple[str, str] = ("clientRequestToken", "test_request_token")
DISK_SIZE: Tuple[str, int] = ("diskSize", 30)
ENCRYPTION_CONFIG: Tuple[str, List] = (
"encryptionConfig",
[{"resources": ["secrets"], "provider": {"keyArn": "arn:of:the:key"}}],
)
INSTANCE_TYPES: Tuple[str, List] = ("instanceTypes", ["t3.medium"])
KUBERNETES_NETWORK_CONFIG: Tuple[str, Dict] = (
"kubernetesNetworkConfig",
{"serviceIpv4Cidr": "172.20.0.0/16"},
)
LABELS: Tuple[str, Dict] = ("labels", {"purpose": "example"})
LAUNCH_TEMPLATE: Tuple[str, Dict] = ("launchTemplate", {"name": "myTemplate", "version": "2", "id": "123456"})
LOGGING: Tuple[str, Dict] = ("logging", {"clusterLogging": [{"types": ["api"], "enabled": True}]})
NODEROLE_ARN: Tuple[str, str] = ("nodeRole", "arn:aws:iam::123456789012:role/role_name")
POD_EXECUTION_ROLE_ARN: Tuple[str, str] = ("podExecutionRoleArn", "arn:aws:iam::123456789012:role/role_name")
REMOTE_ACCESS: Tuple[str, Dict] = ("remoteAccess", {"ec2SshKey": "eksKeypair"})
RESOURCES_VPC_CONFIG: Tuple[str, Dict] = (
"resourcesVpcConfig",
{
"subnetIds": SUBNET_IDS,
"endpointPublicAccess": True,
"endpointPrivateAccess": False,
},
)
ROLE_ARN: Tuple[str, str] = ("roleArn", "arn:aws:iam::123456789012:role/role_name")
SCALING_CONFIG: Tuple[str, Dict] = ("scalingConfig", {"minSize": 2, "maxSize": 3, "desiredSize": 2})
SELECTORS: Tuple[str, List] = ("selectors", [{"namespace": "profile-namespace"}])
STATUS: Tuple[str, str] = ("status", "ACTIVE")
SUBNETS: Tuple[str, List] = ("subnets", SUBNET_IDS)
TAGS: Tuple[str, Dict] = ("tags", {"hello": "world"})
VERSION: Tuple[str, str] = ("version", "1")
class ResponseAttributes:
"""Key names for the dictionaries returned by API calls."""
CLUSTER: slice = "cluster"
CLUSTERS: slice = "clusters"
FARGATE_PROFILE_NAMES: slice = "fargateProfileNames"
FARGATE_PROFILE: slice = "fargateProfile"
NEXT_TOKEN: slice = "nextToken"
NODEGROUP: slice = "nodegroup"
NODEGROUPS: slice = "nodegroups"
class ErrorAttributes:
"""Key names for the dictionaries representing error messages."""
CODE: slice = "Code"
ERROR: slice = "Error"
MESSAGE: slice = "Message"
class ClusterInputs:
"""All possible inputs for creating an EKS Cluster."""
REQUIRED: List[Tuple] = [ROLE_ARN, RESOURCES_VPC_CONFIG]
OPTIONAL: List[Tuple] = [
CLIENT_REQUEST_TOKEN,
ENCRYPTION_CONFIG,
LOGGING,
KUBERNETES_NETWORK_CONFIG,
TAGS,
VERSION,
]
class FargateProfileInputs:
REQUIRED: List[Tuple] = [POD_EXECUTION_ROLE_ARN, SELECTORS]
OPTIONAL: List[Tuple] = [SUBNETS, TAGS]
class NodegroupInputs:
"""All possible inputs for creating an EKS Managed Nodegroup."""
REQUIRED: List[Tuple] = [NODEROLE_ARN, SUBNETS]
OPTIONAL: List[Tuple] = [
AMI_TYPE,
DISK_SIZE,
INSTANCE_TYPES,
LABELS,
REMOTE_ACCESS,
SCALING_CONFIG,
TAGS,
]
class PossibleTestResults(Enum):
"""Possible test results."""
SUCCESS: str = "SUCCESS"
FAILURE: str = "FAILURE"
class ClusterAttributes:
"""Key names for the dictionaries representing EKS Clusters."""
ARN: slice = "arn"
CLUSTER_NAME: slice = "clusterName"
CREATED_AT: slice = "createdAt"
ENDPOINT: slice = "endpoint"
IDENTITY: slice = "identity"
ISSUER: slice = "issuer"
NAME: slice = "name"
OIDC: slice = "oidc"
class FargateProfileAttributes:
ARN: slice = "fargateProfileArn"
CREATED_AT: slice = "createdAt"
FARGATE_PROFILE_NAME: slice = "fargateProfileName"
LABELS: slice = "labels"
NAMESPACE: slice = "namespace"
SELECTORS: slice = "selectors"
class NodegroupAttributes:
"""Key names for the dictionaries representing EKS Managed Nodegroups."""
ARN: slice = "nodegroupArn"
AUTOSCALING_GROUPS: slice = "autoScalingGroups"
CREATED_AT: slice = "createdAt"
MODIFIED_AT: slice = "modifiedAt"
NAME: slice = "name"
NODEGROUP_NAME: slice = "nodegroupName"
REMOTE_ACCESS_SG: slice = "remoteAccessSecurityGroup"
RESOURCES: slice = "resources"
TAGS: slice = "tags"
class BatchCountSize:
"""Sizes of test data batches to generate."""
SINGLE: int = 1
SMALL: int = 10
MEDIUM: int = 20
LARGE: int = 200
class PageCount:
"""Page lengths to use when testing pagination."""
SMALL: int = 3
LARGE: int = 10
FARGATE_PROFILE_UUID_PATTERN: str = (
"(?P<fargate_uuid>[-0-9a-z]{8}-[-0-9a-z]{4}-[-0-9a-z]{4}-[-0-9a-z]{4}-[-0-9a-z]{12})"
)
NODEGROUP_UUID_PATTERN: str = (
"(?P<nodegroup_uuid>[-0-9a-z]{8}-[-0-9a-z]{4}-[-0-9a-z]{4}-[-0-9a-z]{4}-[-0-9a-z]{12})"
)
class RegExTemplates:
"""The compiled RegEx patterns used in testing."""
CLUSTER_ARN: Pattern = re.compile(
"arn:"
+ "(?P<partition>.+):"
+ "eks:"
+ "(?P<region>[-0-9a-zA-Z]+):"
+ "(?P<account_id>[0-9]{12}):"
+ "cluster/"
+ "(?P<cluster_name>.+)"
)
FARGATE_PROFILE_ARN: Pattern = re.compile(
"arn:"
+ "(?P<partition>.+):"
+ "eks:"
+ "(?P<region>[-0-9a-zA-Z]+):"
+ "(?P<account_id>[0-9]{12}):"
+ "fargateprofile/"
+ "(?P<cluster_name>.+)/"
+ "(?P<fargate_name>.+)/"
+ FARGATE_PROFILE_UUID_PATTERN
)
NODEGROUP_ARN: Pattern = re.compile(
"arn:"
+ "(?P<partition>.+):"
+ "eks:"
+ "(?P<region>[-0-9a-zA-Z]+):"
+ "(?P<account_id>[0-9]{12}):"
+ "nodegroup/"
+ "(?P<cluster_name>.+)/"
+ "(?P<nodegroup_name>.+)/"
+ NODEGROUP_UUID_PATTERN
)
NODEGROUP_ASG_NAME_PATTERN: Pattern = re.compile("eks-" + NODEGROUP_UUID_PATTERN)
NODEGROUP_SECURITY_GROUP_NAME_PATTERN: Pattern = re.compile("sg-" + "([-0-9a-z]{17})")
class MethodNames:
"""The names of methods, used when a test is expected to throw an exception."""
CREATE_CLUSTER: str = "CreateCluster"
CREATE_NODEGROUP: str = "CreateNodegroup"
DELETE_CLUSTER: str = "DeleteCluster"
DELETE_NODEGROUP: str = "DeleteNodegroup"
DESCRIBE_CLUSTER: str = "DescribeCluster"
DESCRIBE_NODEGROUP: str = "DescribeNodegroup"
|
py
|
1a5985d1b1e6fa0a44fbceeb49ccc6601d590025
|
from core.advbase import *
from slot.a import *
from slot.d import *
def module():
return Lily
class Lily(Adv):
a1 = ('a',0.15,'hp100')
a3 = ('prep','100%')
conf = {}
conf['slots.a'] = CC()+Seaside_Princess()
conf['slots.d'] = Leviathan()
conf['acl'] = """
`dragon
`s2
`s1, seq=5 and cancel
`s3
"""
coab = ['Blade', 'Dagger', 'Tiki']
if __name__ == '__main__':
from core.simulate import test_with_argv
test_with_argv(None, *sys.argv)
|
py
|
1a59863059927f80c4c405845a39e675c89c57c0
|
from cline import CommandLineArguments, Task
from examples.example02.arguments import NumberArgs
class SubtractTask(Task[NumberArgs]):
@classmethod
def make_args(cls, args: CommandLineArguments) -> NumberArgs:
"""
Makes and returns strongly-typed arguments for this task based on the
parsed command line arguments `args`.
Arguments:
args: Parsed command line arguments
Raises:
CannotMakeArguments: If the given arguments are not relevant to this
task
Returns:
Task arguments
"""
# Asserts that the "sub" flag is present and truthy.
args.assert_true("sub")
# If "a" or "b" aren't set or aren't integers then "get_integer" will
# raise `CannotMakeArguments`:
return NumberArgs(
a=args.get_integer("a"),
b=args.get_integer("b"),
)
def invoke(self) -> int:
"""
Invokes the task.
Reads arguments from `self.args`. Writes output to `self.out`.
Returns the shell exit code.
"""
# Since the arguments are strongly-typed, we don't need to worry about
# parsing integers and handing failures therein:
result = self.args.a - self.args.b
self.out.write(f"{result}\n")
return 0
|
py
|
1a5987b2a21d7a3d15f18abe02ad1280821593cd
|
from __future__ import print_function, division
from sympy.core import S, C
from sympy.core.compatibility import u
from sympy.core.exprtools import factor_terms
from sympy.core.function import (Function, Derivative, ArgumentIndexError,
AppliedUndef)
from sympy.functions.elementary.miscellaneous import sqrt
from sympy.functions.elementary.piecewise import Piecewise
from sympy.core.expr import Expr
from sympy.core import Add, Mul
from sympy.core.relational import Eq
from sympy.functions.elementary.exponential import exp
from sympy.functions.elementary.trigonometric import atan2
###############################################################################
######################### REAL and IMAGINARY PARTS ############################
###############################################################################
class re(Function):
"""Returns real part of expression. This function performs only
elementary analysis and so it will fail to decompose properly
more complicated expressions. If completely simplified result
is needed then use Basic.as_real_imag() or perform complex
expansion on instance of this function.
>>> from sympy import re, im, I, E
>>> from sympy.abc import x, y
>>> re(2*E)
2*E
>>> re(2*I + 17)
17
>>> re(2*I)
0
>>> re(im(x) + x*I + 2)
2
See Also
========
im
"""
is_real = True
unbranched = True # implicitely works on the projection to C
@classmethod
def eval(cls, arg):
if arg is S.NaN:
return S.NaN
elif arg.is_real:
return arg
elif arg.is_imaginary or (S.ImaginaryUnit*arg).is_real:
return S.Zero
elif arg.is_Function and arg.func is conjugate:
return re(arg.args[0])
else:
included, reverted, excluded = [], [], []
args = Add.make_args(arg)
for term in args:
coeff = term.as_coefficient(S.ImaginaryUnit)
if coeff is not None:
if not coeff.is_real:
reverted.append(coeff)
elif not term.has(S.ImaginaryUnit) and term.is_real:
excluded.append(term)
else:
# Try to do some advanced expansion. If
# impossible, don't try to do re(arg) again
# (because this is what we are trying to do now).
real_imag = term.as_real_imag(ignore=arg)
if real_imag:
excluded.append(real_imag[0])
else:
included.append(term)
if len(args) != len(included):
a, b, c = map(lambda xs: Add(*xs),
[included, reverted, excluded])
return cls(a) - im(b) + c
def as_real_imag(self, deep=True, **hints):
"""
Returns the real number with a zero complex part.
"""
return (self, S.Zero)
def _eval_derivative(self, x):
if x.is_real or self.args[0].is_real:
return re(Derivative(self.args[0], x, evaluate=True))
if x.is_imaginary or self.args[0].is_imaginary:
return -S.ImaginaryUnit \
* im(Derivative(self.args[0], x, evaluate=True))
def _eval_rewrite_as_im(self, arg):
return self.args[0] - im(self.args[0])
def _eval_is_algebraic(self):
return self.args[0].is_algebraic
def _sage_(self):
import sage.all as sage
return sage.real_part(self.args[0]._sage_())
class im(Function):
"""
Returns imaginary part of expression. This function performs only
elementary analysis and so it will fail to decompose properly more
complicated expressions. If completely simplified result is needed then
use Basic.as_real_imag() or perform complex expansion on instance of
this function.
Examples
========
>>> from sympy import re, im, E, I
>>> from sympy.abc import x, y
>>> im(2*E)
0
>>> re(2*I + 17)
17
>>> im(x*I)
re(x)
>>> im(re(x) + y)
im(y)
See Also
========
re
"""
is_real = True
unbranched = True # implicitely works on the projection to C
@classmethod
def eval(cls, arg):
if arg is S.NaN:
return S.NaN
elif arg.is_real:
return S.Zero
elif arg.is_imaginary or (S.ImaginaryUnit*arg).is_real:
return -S.ImaginaryUnit * arg
elif arg.is_Function and arg.func is conjugate:
return -im(arg.args[0])
else:
included, reverted, excluded = [], [], []
args = Add.make_args(arg)
for term in args:
coeff = term.as_coefficient(S.ImaginaryUnit)
if coeff is not None:
if not coeff.is_real:
reverted.append(coeff)
else:
excluded.append(coeff)
elif term.has(S.ImaginaryUnit) or not term.is_real:
# Try to do some advanced expansion. If
# impossible, don't try to do im(arg) again
# (because this is what we are trying to do now).
real_imag = term.as_real_imag(ignore=arg)
if real_imag:
excluded.append(real_imag[1])
else:
included.append(term)
if len(args) != len(included):
a, b, c = map(lambda xs: Add(*xs),
[included, reverted, excluded])
return cls(a) + re(b) + c
def as_real_imag(self, deep=True, **hints):
"""
Return the imaginary part with a zero real part.
Examples
========
>>> from sympy.functions import im
>>> from sympy import I
>>> im(2 + 3*I).as_real_imag()
(3, 0)
"""
return (self, S.Zero)
def _eval_derivative(self, x):
if x.is_real or self.args[0].is_real:
return im(Derivative(self.args[0], x, evaluate=True))
if x.is_imaginary or self.args[0].is_imaginary:
return -S.ImaginaryUnit \
* re(Derivative(self.args[0], x, evaluate=True))
def _sage_(self):
import sage.all as sage
return sage.imag_part(self.args[0]._sage_())
def _eval_rewrite_as_re(self, arg):
return self.args[0] - re(self.args[0])
def _eval_is_algebraic(self):
return self.args[0].is_algebraic
###############################################################################
############### SIGN, ABSOLUTE VALUE, ARGUMENT and CONJUGATION ################
###############################################################################
class sign(Function):
"""
Returns the complex sign of an expression:
If the expresssion is real the sign will be:
* 1 if expression is positive
* 0 if expression is equal to zero
* -1 if expression is negative
If the expresssion is imaginary the sign will be:
* I if im(expression) is positive
* -I if im(expression) is negative
Otherwise an unevaluated expression will be returned. When evaluated, the
result (in general) will be ``cos(arg(expr)) + I*sin(arg(expr))``.
Examples
========
>>> from sympy.functions import sign
>>> from sympy.core.numbers import I
>>> sign(-1)
-1
>>> sign(0)
0
>>> sign(-3*I)
-I
>>> sign(1 + I)
sign(1 + I)
>>> _.evalf()
0.707106781186548 + 0.707106781186548*I
See Also
========
Abs, conjugate
"""
is_finite = True
is_complex = True
def doit(self):
if self.args[0].is_nonzero:
return self.args[0] / Abs(self.args[0])
return self
@classmethod
def eval(cls, arg):
# handle what we can
if arg.is_Mul:
c, args = arg.as_coeff_mul()
unk = []
s = sign(c)
for a in args:
if a.is_negative:
s = -s
elif a.is_positive:
pass
else:
ai = im(a)
if a.is_imaginary and ai.is_comparable: # i.e. a = I*real
s *= S.ImaginaryUnit
if ai.is_negative:
# can't use sign(ai) here since ai might not be
# a Number
s = -s
else:
unk.append(a)
if c is S.One and len(unk) == len(args):
return None
return s * cls(arg._new_rawargs(*unk))
if arg is S.NaN:
return S.NaN
if arg.is_zero: # it may be an Expr that is zero
return S.Zero
if arg.is_positive:
return S.One
if arg.is_negative:
return S.NegativeOne
if arg.is_Function:
if arg.func is sign:
return arg
if arg.is_imaginary:
if arg.is_Pow and arg.exp is S.Half:
# we catch this because non-trivial sqrt args are not expanded
# e.g. sqrt(1-sqrt(2)) --x--> to I*sqrt(sqrt(2) - 1)
return S.ImaginaryUnit
arg2 = -S.ImaginaryUnit * arg
if arg2.is_positive:
return S.ImaginaryUnit
if arg2.is_negative:
return -S.ImaginaryUnit
def _eval_Abs(self):
if self.args[0].is_nonzero:
return S.One
def _eval_conjugate(self):
return sign(conjugate(self.args[0]))
def _eval_derivative(self, x):
if self.args[0].is_real:
from sympy.functions.special.delta_functions import DiracDelta
return 2 * Derivative(self.args[0], x, evaluate=True) \
* DiracDelta(self.args[0])
elif self.args[0].is_imaginary:
from sympy.functions.special.delta_functions import DiracDelta
return 2 * Derivative(self.args[0], x, evaluate=True) \
* DiracDelta(-S.ImaginaryUnit * self.args[0])
def _eval_is_nonnegative(self):
if self.args[0].is_nonnegative:
return True
def _eval_is_nonpositive(self):
if self.args[0].is_nonpositive:
return True
def _eval_is_imaginary(self):
return self.args[0].is_imaginary
def _eval_is_integer(self):
return self.args[0].is_real
def _eval_is_zero(self):
return self.args[0].is_zero
def _eval_power(self, other):
if (
self.args[0].is_real and
self.args[0].is_nonzero and
other.is_integer and
other.is_even
):
return S.One
def _sage_(self):
import sage.all as sage
return sage.sgn(self.args[0]._sage_())
def _eval_rewrite_as_Piecewise(self, arg):
if arg.is_real:
return Piecewise((1, arg > 0), (-1, arg < 0), (0, True))
def _eval_rewrite_as_Heaviside(self, arg):
if arg.is_real:
return C.Heaviside(arg)*2-1
def _eval_simplify(self, ratio, measure):
return self.func(self.args[0].factor())
class Abs(Function):
"""
Return the absolute value of the argument.
This is an extension of the built-in function abs() to accept symbolic
values. If you pass a SymPy expression to the built-in abs(), it will
pass it automatically to Abs().
Examples
========
>>> from sympy import Abs, Symbol, S
>>> Abs(-1)
1
>>> x = Symbol('x', real=True)
>>> Abs(-x)
Abs(x)
>>> Abs(x**2)
x**2
>>> abs(-x) # The Python built-in
Abs(x)
Note that the Python built-in will return either an Expr or int depending on
the argument::
>>> type(abs(-1))
<... 'int'>
>>> type(abs(S.NegativeOne))
<class 'sympy.core.numbers.One'>
Abs will always return a sympy object.
See Also
========
sign, conjugate
"""
is_real = True
is_negative = False
unbranched = True
def fdiff(self, argindex=1):
"""
Get the first derivative of the argument to Abs().
Examples
========
>>> from sympy.abc import x
>>> from sympy.functions import Abs
>>> Abs(-x).fdiff()
sign(x)
"""
if argindex == 1:
return sign(self.args[0])
else:
raise ArgumentIndexError(self, argindex)
@classmethod
def eval(cls, arg):
from sympy.simplify.simplify import signsimp
if hasattr(arg, '_eval_Abs'):
obj = arg._eval_Abs()
if obj is not None:
return obj
if not isinstance(arg, Expr):
raise TypeError("Bad argument type for Abs(): %s" % type(arg))
# handle what we can
arg = signsimp(arg, evaluate=False)
if arg.is_Mul:
known = []
unk = []
for t in arg.args:
tnew = cls(t)
if tnew.func is cls:
unk.append(tnew.args[0])
else:
known.append(tnew)
known = Mul(*known)
unk = cls(Mul(*unk), evaluate=False) if unk else S.One
return known*unk
if arg is S.NaN:
return S.NaN
if arg.is_Pow:
base, exponent = arg.as_base_exp()
if base.is_real:
if exponent.is_integer:
if exponent.is_even:
return arg
if base is S.NegativeOne:
return S.One
if base.func is cls and exponent is S.NegativeOne:
return arg
return Abs(base)**exponent
if base.is_positive == True:
return base**re(exponent)
return (-base)**re(exponent)*exp(-S.Pi*im(exponent))
if isinstance(arg, exp):
return exp(re(arg.args[0]))
if arg.is_zero: # it may be an Expr that is zero
return S.Zero
if arg.is_nonnegative:
return arg
if arg.is_nonpositive:
return -arg
if arg.is_imaginary:
arg2 = -S.ImaginaryUnit * arg
if arg2.is_nonnegative:
return arg2
if arg.is_Add:
if arg.has(S.Infinity, S.NegativeInfinity):
if any(a.is_infinite for a in arg.as_real_imag()):
return S.Infinity
if arg.is_real is None and arg.is_imaginary is None:
if all(a.is_real or a.is_imaginary or (S.ImaginaryUnit*a).is_real for a in arg.args):
from sympy import expand_mul
return sqrt(expand_mul(arg*arg.conjugate()))
if arg.is_real is False and arg.is_imaginary is False:
from sympy import expand_mul
return sqrt(expand_mul(arg*arg.conjugate()))
def _eval_is_integer(self):
if self.args[0].is_real:
return self.args[0].is_integer
def _eval_is_nonzero(self):
return self._args[0].is_nonzero
def _eval_is_positive(self):
return self.is_nonzero
def _eval_is_rational(self):
if self.args[0].is_real:
return self.args[0].is_rational
def _eval_is_even(self):
if self.args[0].is_real:
return self.args[0].is_even
def _eval_is_odd(self):
if self.args[0].is_real:
return self.args[0].is_odd
def _eval_is_algebraic(self):
return self.args[0].is_algebraic
def _eval_power(self, exponent):
if self.args[0].is_real and exponent.is_integer:
if exponent.is_even:
return self.args[0]**exponent
elif exponent is not S.NegativeOne and exponent.is_Integer:
return self.args[0]**(exponent - 1)*self
return
def _eval_nseries(self, x, n, logx):
direction = self.args[0].leadterm(x)[0]
s = self.args[0]._eval_nseries(x, n=n, logx=logx)
when = Eq(direction, 0)
return Piecewise(
((s.subs(direction, 0)), when),
(sign(direction)*s, True),
)
def _sage_(self):
import sage.all as sage
return sage.abs_symbolic(self.args[0]._sage_())
def _eval_derivative(self, x):
if self.args[0].is_real or self.args[0].is_imaginary:
return Derivative(self.args[0], x, evaluate=True) \
* sign(conjugate(self.args[0]))
return (re(self.args[0]) * Derivative(re(self.args[0]), x,
evaluate=True) + im(self.args[0]) * Derivative(im(self.args[0]),
x, evaluate=True)) / Abs(self.args[0])
def _eval_rewrite_as_Heaviside(self, arg):
# Note this only holds for real arg (since Heaviside is not defined
# for complex arguments).
if arg.is_real:
return arg*(C.Heaviside(arg) - C.Heaviside(-arg))
def _eval_rewrite_as_Piecewise(self, arg):
if arg.is_real:
return Piecewise((arg, arg >= 0), (-arg, True))
def _eval_rewrite_as_sign(self, arg):
return arg/C.sign(arg)
class arg(Function):
"""Returns the argument (in radians) of a complex number"""
is_real = True
is_finite = True
@classmethod
def eval(cls, arg):
if not arg.is_Atom:
c, arg_ = factor_terms(arg).as_coeff_Mul()
if arg_.is_Mul:
arg_ = Mul(*[a if (sign(a) not in (-1, 1)) else
sign(a) for a in arg_.args])
arg_ = sign(c)*arg_
else:
arg_ = arg
x, y = re(arg_), im(arg_)
rv = atan2(y, x)
if rv.is_number and not rv.atoms(AppliedUndef):
return rv
if arg_ != arg:
return cls(arg_, evaluate=False)
def _eval_derivative(self, t):
x, y = re(self.args[0]), im(self.args[0])
return (x * Derivative(y, t, evaluate=True) - y *
Derivative(x, t, evaluate=True)) / (x**2 + y**2)
def _eval_rewrite_as_atan2(self, arg):
x, y = re(self.args[0]), im(self.args[0])
return atan2(y, x)
class conjugate(Function):
"""
Changes the sign of the imaginary part of a complex number.
Examples
========
>>> from sympy import conjugate, I
>>> conjugate(1 + I)
1 - I
See Also
========
sign, Abs
"""
@classmethod
def eval(cls, arg):
obj = arg._eval_conjugate()
if obj is not None:
return obj
def _eval_Abs(self):
return Abs(self.args[0], evaluate=True)
def _eval_adjoint(self):
return transpose(self.args[0])
def _eval_conjugate(self):
return self.args[0]
def _eval_derivative(self, x):
if x.is_real:
return conjugate(Derivative(self.args[0], x, evaluate=True))
elif x.is_imaginary:
return -conjugate(Derivative(self.args[0], x, evaluate=True))
def _eval_transpose(self):
return adjoint(self.args[0])
def _eval_is_algebraic(self):
return self.args[0].is_algebraic
class transpose(Function):
"""
Linear map transposition.
"""
@classmethod
def eval(cls, arg):
obj = arg._eval_transpose()
if obj is not None:
return obj
def _eval_adjoint(self):
return conjugate(self.args[0])
def _eval_conjugate(self):
return adjoint(self.args[0])
def _eval_transpose(self):
return self.args[0]
class adjoint(Function):
"""
Conjugate transpose or Hermite conjugation.
"""
@classmethod
def eval(cls, arg):
obj = arg._eval_adjoint()
if obj is not None:
return obj
obj = arg._eval_transpose()
if obj is not None:
return conjugate(obj)
def _eval_adjoint(self):
return self.args[0]
def _eval_conjugate(self):
return transpose(self.args[0])
def _eval_transpose(self):
return conjugate(self.args[0])
def _latex(self, printer, exp=None, *args):
arg = printer._print(self.args[0])
tex = r'%s^{\dag}' % arg
if exp:
tex = r'\left(%s\right)^{%s}' % (tex, printer._print(exp))
return tex
def _pretty(self, printer, *args):
from sympy.printing.pretty.stringpict import prettyForm
pform = printer._print(self.args[0], *args)
if printer._use_unicode:
pform = pform**prettyForm(u('\N{DAGGER}'))
else:
pform = pform**prettyForm('+')
return pform
###############################################################################
############### HANDLING OF POLAR NUMBERS #####################################
###############################################################################
class polar_lift(Function):
"""
Lift argument to the Riemann surface of the logarithm, using the
standard branch.
>>> from sympy import Symbol, polar_lift, I
>>> p = Symbol('p', polar=True)
>>> x = Symbol('x')
>>> polar_lift(4)
4*exp_polar(0)
>>> polar_lift(-4)
4*exp_polar(I*pi)
>>> polar_lift(-I)
exp_polar(-I*pi/2)
>>> polar_lift(I + 2)
polar_lift(2 + I)
>>> polar_lift(4*x)
4*polar_lift(x)
>>> polar_lift(4*p)
4*p
See Also
========
sympy.functions.elementary.exponential.exp_polar
periodic_argument
"""
is_polar = True
is_comparable = False # Cannot be evalf'd.
@classmethod
def eval(cls, arg):
from sympy import exp_polar, pi, I, arg as argument
if arg.is_number:
ar = argument(arg)
# In general we want to affirm that something is known,
# e.g. `not ar.has(argument) and not ar.has(atan)`
# but for now we will just be more restrictive and
# see that it has evaluated to one of the known values.
if ar in (0, pi/2, -pi/2, pi):
return exp_polar(I*ar)*abs(arg)
if arg.is_Mul:
args = arg.args
else:
args = [arg]
included = []
excluded = []
positive = []
for arg in args:
if arg.is_polar:
included += [arg]
elif arg.is_positive:
positive += [arg]
else:
excluded += [arg]
if len(excluded) < len(args):
if excluded:
return Mul(*(included + positive))*polar_lift(Mul(*excluded))
elif included:
return Mul(*(included + positive))
else:
return Mul(*positive)*exp_polar(0)
def _eval_evalf(self, prec):
""" Careful! any evalf of polar numbers is flaky """
return self.args[0]._eval_evalf(prec)
def _eval_Abs(self):
return Abs(self.args[0], evaluate=True)
class periodic_argument(Function):
"""
Represent the argument on a quotient of the Riemann surface of the
logarithm. That is, given a period P, always return a value in
(-P/2, P/2], by using exp(P*I) == 1.
>>> from sympy import exp, exp_polar, periodic_argument, unbranched_argument
>>> from sympy import I, pi
>>> unbranched_argument(exp(5*I*pi))
pi
>>> unbranched_argument(exp_polar(5*I*pi))
5*pi
>>> periodic_argument(exp_polar(5*I*pi), 2*pi)
pi
>>> periodic_argument(exp_polar(5*I*pi), 3*pi)
-pi
>>> periodic_argument(exp_polar(5*I*pi), pi)
0
See Also
========
sympy.functions.elementary.exponential.exp_polar
polar_lift : Lift argument to the Riemann surface of the logarithm
principal_branch
"""
@classmethod
def _getunbranched(cls, ar):
from sympy import exp_polar, log, polar_lift
if ar.is_Mul:
args = ar.args
else:
args = [ar]
unbranched = 0
for a in args:
if not a.is_polar:
unbranched += arg(a)
elif a.func is exp_polar:
unbranched += a.exp.as_real_imag()[1]
elif a.is_Pow:
re, im = a.exp.as_real_imag()
unbranched += re*unbranched_argument(
a.base) + im*log(abs(a.base))
elif a.func is polar_lift:
unbranched += arg(a.args[0])
else:
return None
return unbranched
@classmethod
def eval(cls, ar, period):
# Our strategy is to evaluate the argument on the Riemann surface of the
# logarithm, and then reduce.
# NOTE evidently this means it is a rather bad idea to use this with
# period != 2*pi and non-polar numbers.
from sympy import ceiling, oo, atan2, atan, polar_lift, pi, Mul
if not period.is_positive:
return None
if period == oo and isinstance(ar, principal_branch):
return periodic_argument(*ar.args)
if ar.func is polar_lift and period >= 2*pi:
return periodic_argument(ar.args[0], period)
if ar.is_Mul:
newargs = [x for x in ar.args if not x.is_positive]
if len(newargs) != len(ar.args):
return periodic_argument(Mul(*newargs), period)
unbranched = cls._getunbranched(ar)
if unbranched is None:
return None
if unbranched.has(periodic_argument, atan2, arg, atan):
return None
if period == oo:
return unbranched
if period != oo:
n = ceiling(unbranched/period - S(1)/2)*period
if not n.has(ceiling):
return unbranched - n
def _eval_evalf(self, prec):
from sympy import ceiling, oo
z, period = self.args
if period == oo:
unbranched = periodic_argument._getunbranched(z)
if unbranched is None:
return self
return unbranched._eval_evalf(prec)
ub = periodic_argument(z, oo)._eval_evalf(prec)
return (ub - ceiling(ub/period - S(1)/2)*period)._eval_evalf(prec)
def unbranched_argument(arg):
from sympy import oo
return periodic_argument(arg, oo)
class principal_branch(Function):
"""
Represent a polar number reduced to its principal branch on a quotient
of the Riemann surface of the logarithm.
This is a function of two arguments. The first argument is a polar
number `z`, and the second one a positive real number of infinity, `p`.
The result is "z mod exp_polar(I*p)".
>>> from sympy import exp_polar, principal_branch, oo, I, pi
>>> from sympy.abc import z
>>> principal_branch(z, oo)
z
>>> principal_branch(exp_polar(2*pi*I)*3, 2*pi)
3*exp_polar(0)
>>> principal_branch(exp_polar(2*pi*I)*3*z, 2*pi)
3*principal_branch(z, 2*pi)
See Also
========
sympy.functions.elementary.exponential.exp_polar
polar_lift : Lift argument to the Riemann surface of the logarithm
periodic_argument
"""
is_polar = True
is_comparable = False # cannot always be evalf'd
@classmethod
def eval(self, x, period):
from sympy import oo, exp_polar, I, Mul, polar_lift, Symbol
if isinstance(x, polar_lift):
return principal_branch(x.args[0], period)
if period == oo:
return x
ub = periodic_argument(x, oo)
barg = periodic_argument(x, period)
if ub != barg and not ub.has(periodic_argument) \
and not barg.has(periodic_argument):
pl = polar_lift(x)
def mr(expr):
if not isinstance(expr, Symbol):
return polar_lift(expr)
return expr
pl = pl.replace(polar_lift, mr)
if not pl.has(polar_lift):
res = exp_polar(I*(barg - ub))*pl
if not res.is_polar and not res.has(exp_polar):
res *= exp_polar(0)
return res
if not x.free_symbols:
c, m = x, ()
else:
c, m = x.as_coeff_mul(*x.free_symbols)
others = []
for y in m:
if y.is_positive:
c *= y
else:
others += [y]
m = tuple(others)
arg = periodic_argument(c, period)
if arg.has(periodic_argument):
return None
if arg.is_number and (unbranched_argument(c) != arg or
(arg == 0 and m != () and c != 1)):
if arg == 0:
return abs(c)*principal_branch(Mul(*m), period)
return principal_branch(exp_polar(I*arg)*Mul(*m), period)*abs(c)
if arg.is_number and ((abs(arg) < period/2) == True or arg == period/2) \
and m == ():
return exp_polar(arg*I)*abs(c)
def _eval_evalf(self, prec):
from sympy import exp, pi, I
z, period = self.args
p = periodic_argument(z, period)._eval_evalf(prec)
if abs(p) > pi or p == -pi:
return self # Cannot evalf for this argument.
return (abs(z)*exp(I*p))._eval_evalf(prec)
# /cyclic/
from sympy.core import basic as _
_.abs_ = Abs
del _
|
py
|
1a59888ca8e2cc12e589ffd1591e5d370a1ee765
|
from report_errores import *
reporte_sintactico=""
reporte_lexico = ""
global reporte_bnf
reporte_bnf = []
entradaa = ""
# -----------------------------------------------------------------------------
# Gramatica del Proyecto Fase 1 - Compiladores 2
# -----------------------------------------------------------------------------
from ply import lex
import ply.yacc as yacc
entradaa = ""
reservadas = {
'create' : 'CREATE',
'table':'TABLE',
'tables':'TABLES',
'inherits': 'INHERITS',
'integer': 'INTEGER',
'boolean': 'BOOLEAN',
'show': 'SHOW',
'databases': 'DATABASES',
'default': 'DEFAULT',
# CREATE DATABASE
'database': 'DATABASE',
'if' : 'IF',
'replace' : 'REPLACE',
'exists' : 'EXISTS',
'or': 'OR',
'owner': 'OWNER',
'not' : 'NOT',
'mode' : 'MODE',
'select': 'SELECT',
'insert': 'INSERT',
'update': 'UPDATE',
'delete': 'DELETE',
'count': 'COUNT',
'from': 'FROM',
'into': 'INTO',
'values': 'VALUES',
'sum' : 'SUM',
'set': 'SET',
'inner': 'INNER',
'join': 'JOIN',
'on': 'ON',
'case': 'CASE',
'when': 'WHEN',
'then': 'THEN',
'end': 'END',
'and': 'AND',
'or': 'OR',
'else': 'ELSE',
'where': 'WHERE',
'as': 'AS',
'create': 'CREATE',
'table': 'TABLE',
'inherits': 'INHERITS',
'alter': 'ALTER',
'database': 'DATABASE',
'rename': 'RENAME',
'owner': 'OWNER',
'drop': 'DROP',
'currUser' : 'CURRENT_USER',
'sessUser' : 'SESSION_USER',
'add' : 'ADD',
'check' : 'CHECK',
'constraint': 'CONSTRAINT',
'column' : 'COLUMN',
'unique' : 'UNIQUE',
'references' : 'REFERENCES',
'type' : 'TYPE',
'not' : 'NOT',
'like' : 'LIKE',
'null' : 'NULL',
# ---- DATA TYPES AND SPECIFICATIONS--------
'text': 'TEXT',
'float': 'FLOAT',
'integer': 'INTEGER',
'char': 'CHAR',
'varchar' : 'VARCHAR',
'smallint':'SMALLINT',
'bigint' : 'BIGINT',
'decimal' : 'DECIMAL',
'numeric' : 'NUMERIC',
'real' : 'REAL',
'double' : 'DOUBLE',
'precision' : 'PRECISION',
'character' : 'CHARACTER',
'varying' : 'VARYING',
'timestamp' : 'TIMESTAMP',
'date' : 'DATE',
'time' : 'TIME',
'interval' : 'INTERVAL',
'extract' : 'EXTRACT',
'year' : 'YEAR',
'month' : 'MONTH',
'day' : 'DAY',
'hour' : 'HOUR',
'minute' : 'MINUTE',
'second' : 'SECOND',
'now' : 'NOW',
'date_part' : 'DATE_PART',
'current_date': 'CURRENT_DATE',
'current_time' : 'CURRENT_TIME',
'to' : 'TO',
'enum' : 'ENUM',
'money' : 'MONEY',
# ---- DELETE --------
'only' : 'ONLY',
'in' : 'IN',
'returning' : 'RETURNING',
'using' : 'USING',
'exists' : 'EXISTS',
# ---- USE DATABASE --------
'use' : 'USE',
#----- SELECT-----------
'distinct' : 'DISTINCT',
'group' : 'GROUP',
'by' : 'BY',
'order' : 'ORDER',
'asc' : 'ASC',
'desc' : 'DESC',
'primary' : 'PRIMARY',
'key' : 'KEY',
'foreign' : 'FOREIGN',
'avg' : 'AVG',
'min' : 'MIN',
'max' : 'MAX',
'between' : 'BETWEEN',
'having' : 'HAVING',
#----- FUNCIONES TRIGONOMETRICAS -----------
'acos' : 'ACOS',
'acosd' : 'ACOSD',
'asin' : 'ASIN',
'asind' : 'ASIND',
'atan' : 'ATAN',
'atand' : 'ATAND',
'atan2' : 'ATAN2',
'atan2d' : 'ATAN2D',
'cos' : 'COS',
'cosd' : 'COSD',
'cot' : 'COT',
'cotd' : 'COTD',
'sin' : 'SIN',
'sind' : 'SIND',
'tan' : 'TAN',
'tand' : 'TAND',
'sinh' : 'SINH',
'cosh' : 'COSH',
'tanh' : 'TANH',
'asinh' : 'ASINH',
'acosh' : 'ACOSH',
'atanh' : 'ATANH',
#----- FUNCIONES MATEMATICAS-----------
'abs' : 'ABS',
'cbrt' : 'CBRT',
'ceil' : 'CEIL',
'ceiling' : 'CEILING',
'degrees' : 'DEGREES',
'div' : 'DIV',
'exp' : 'EXP',
'factorial' : 'FACTORIAL',
'floor' : 'FLOOR',
'gcd' : 'GCD',
'lcm' : 'LCM',
'ln' : 'LN',
'log' : 'LOG',
'log10' : 'LOG10',
'min_scale' : 'MIN_SCALE',
'mod' : 'MOD',
'pi' : 'PI',
'power' : 'POWER',
'radians' : 'RADIANS',
'round' : 'ROUND',
'scale' : 'SCALE',
'sign' : 'SIGN',
'sqrt' : 'SQRT',
'trim_scale' : 'TRIM_SCALE',
'truc' : 'TRUC',
'width_bucket' : 'WIDTH_BUCKET',
'random' : 'RANDOM',
'setseed' : 'SETSEED',
#----- DATATYPES -----------
'symmetric' : 'SYMMETRIC',
'isnull' : 'ISNULL',
'true': 'TRUE',
'notnull' : 'NOTNULL',
'is' : 'IS',
'false' : 'FALSE',
'unknown' : 'UNKNOWN',
#----- BYNARY STRING FUNCTIONS -----------
'length' : 'LENGTH',
'substring' : 'SUBSTRING',
'trim' : 'TRIM',
'get_byte' : 'GET_BYTE',
'md5' : 'MD5',
'set_byte' : 'SET_BYTE',
'sha256' : 'SHA256',
'substr' : 'SUBSTR',
'convert' : 'CONVERT',
'encode' : 'ENCODE',
'decode' : 'DECODE',
#----- COMBINING QUERIES -----------
'union' : 'UNION',
'intersect' : 'INTERSECT',
'except' : 'EXCEPT',
'all' : 'ALL',
#----- LIMIT AND OFFSET -----------
'limit' : 'LIMIT',
'offset' : 'OFFSET',
'some' : 'SOME',
'any' : 'ANY',
##----- COMBINING QUERIES -----------
# 'left' : 'LEFT',
# 'right' : 'RIGHT',
# 'full' : 'FULL',
# 'natural' : 'NATURAL',
# 'outer' : 'OUTER',
'bytea' : 'BYTEA',
'trunc' : 'TRUNC',
'greatest' : 'GREATEST',
'least' : 'LEAST',
# ----- AGREGADOS INDEX -----------------
'index' : 'INDEX',
'hash' : 'HASH',
'nulls' : 'NULLS',
'first' : 'FIRST',
'last' : 'LAST',
'lower' : 'LOWER',
'include' : 'INCLUDE',
'collate' : 'COLLATE',
##--------------- PARTE DE LA SEGUNDA FASE --------
'function' : 'FUNCTION',
'returns' : 'RETURNS',
'declare' : 'DECLARE',
'begin' : 'BEGIN',
'raise' : 'RAISE',
'notice' : 'NOTICE',
'return' : 'RETURN',
'record' : 'RECORD',
'constant' : 'CONSTANT',
'alias' : 'ALIAS',
'for' : 'FOR',
'real' : 'REAL',
#-------------Agregado por Dulce :D ---------------
'if' : 'IF',
'prepare' : 'PREPARE',
'perform' : 'PERFORM',
# ANCHOR ----------- NUEVOS----------------
'exception' : 'EXCEPTION',
'next' : 'NEXT',
'query' : 'QUERY',
'execute' : 'EXECUTE',
'call' : 'CALL',
'loop' : 'LOOP',
'exit' : 'EXIT',
'text_pattern_ops' : 'TEXT_PATTERN_OPS',
'varchar_pattern_ops' : 'VARCHAR_PATTERN_OPS',
'bpchar_pattern_ops' : 'BPCHAR_PATTERN_OPS'
}
tokens = [
'PTCOMA',
'ASTERISCO',
'COMA',
'PAR_A',
'PAR_C',
'FLOTANTE',
'ESCAPE',
'HEX',
'BASE64',
'ENTERO',
'CADENA',
'ID',
'PUNTO',
'MENIGQUE',
'NOIG',
'MAYIGQUE',
'MAYMAY',
'MENMEN',
'AMPERMEN',
'AMPERMAY',
'MENMENOR',
'AMPMENOR',
'ORAMPMAY',
'ORMAYMAY',
'ARROBAMAY',
'MENARROBA',
'CEJILLAIGUAL',
'AMPERSON_D',
'MENPOT',
'MAYPOT',
'MENQUE',
'MAYQUE',
'DOBLEIG',
'NOIGUAL',
'IGUAL',
'SUMA',
'RESTA',
'DIVISION',
'MODULO',
'Y',
'S_OR',
'HASHTAG',
'CEJILLA',
'D_DOSPTS',
'D_OR',
'DOSPUNTOS'
] + list(reservadas.values())
#tokens
t_D_DOSPTS = r'::'
t_PTCOMA = r';'
t_COMA = r','
t_MENIGQUE = r'<='
t_MAYIGQUE = r'>='
t_MAYMAY = r'>>'
t_MENMEN = r'<<'
t_NOIG = r'<>'
t_NOIGUAL = r'!='
t_DOBLEIG = r'=='
# ANCHOR
t_AMPERMEN = r'&<'
t_AMPERMAY = r'&>'
t_MENMENOR = r'<<\|'
t_AMPMENOR = r'&<\|'
t_ORAMPMAY = r'\|&>'
t_ORMAYMAY = r'\|>>'
t_ARROBAMAY = r'@>'
t_MENARROBA = r'<@'
t_CEJILLAIGUAL = r'~='
t_AMPERSON_D = r'&&'
t_MENPOT = r'<\^'
t_MAYPOT = r'>\^'
t_DOSPUNTOS = r'\:'
t_SUMA = r'\+'
t_RESTA = r'\-'
t_DIVISION = r'\\'
t_ASTERISCO = r'\*'
t_MODULO = r'\%'
t_PAR_A = r'\('
t_PAR_C = r'\)'
t_PUNTO = r'\.'
t_MENQUE = r'\<'
t_MAYQUE = r'\>'
t_IGUAL = r'\='
t_D_OR = r'\|\|'
t_Y = r'\&'
t_S_OR = r'\|'
t_HASHTAG = r'\#'
t_CEJILLA = r'\~'
def t_FLOTANTE(t):
r'\d+\.\d+'
try:
t.value = float(t.value)
except ValueError:
print("Float value too large %d", t.value)
t.value = 0
return t
def t_ENTERO(t):
r'\d+'
try:
t.value = int(t.value)
except ValueError:
print("Integer value too large %d", t.value)
t.value = 0
return t
def t_ID(t):
r'[a-zA-Z_][a-zA-Z_0-9]*'
t.type = reservadas.get(t.value.lower(),'ID') # Check for reserved words
return t
def t_ESCAPE(t):
r'\'(?i)escape\'' #ignore case
t.value = t.value[1:-1] # remuevo las comillas
return t
def t_BASE64(t):
r'\'(?i)base64\''
t.value = t.value[1:-1] # remuevo las comillas
return t
def t_HEX(t):
r'\'(?i)hex\''
t.value = t.value[1:-1] # remuevo las comillas
return t
def t_CADENA(t):
r'\'.*?\''
t.value = t.value[1:-1] # remuevo las comillas
return t
# Comentario de múltiples líneas /* .. */
def t_COMENTARIO_MULTILINEA(t):
r'/\*(.|\n)*?\*/'
t.lexer.lineno += t.value.count('\n')
# Comentario simple // ...
def t_COMENTARIO_SIMPLE(t):
r'--.*\n'
t.lexer.lineno += 1
# Caracteres ignorados
t_ignore = " \t"
def t_newline(t):
r'\n+'
t.lexer.lineno += t.value.count("\n")
def t_error(t):
#print("Illegal character '%s'" % t.value[0], t.lineno, t.lexpos)
errorLexico = Error(str(t.value[0]),int(t.lineno),int(t.lexpos), "Error Lexico")
listaErrores.append(errorLexico)
t.lexer.skip(1)
# TOKENIZAR
# Construyendo el analizador léxico
import ply.lex as lex
lexer = lex.lex()
from instrucciones import *
from expresiones import *
# Asociación de operadores y precedencia
precedence = (
('left','MAYQUE','MENQUE','MAYIGQUE','MENIGQUE'),
('left','IGUAL','NOIG','NOIGUAL'),
('left','AND','OR'),
('left','SUMA','RESTA'),
('left','ASTERISCO','DIVISION'),
('nonassoc', 'IS'),
('right','UMINUS'),
)
# Definición de la gramática
from instrucciones import *
from expresiones import *
def p_init(t) :
'init : instrucciones'
reporte_bnf.append("<init> ::= <instrucciones>")
# print(reporte_bnf)
get_array(reporte_bnf)
t[0] = t[1]
def p_instrucciones_lista(t) :
'instrucciones : instrucciones instruccion'
reporte_bnf.append("<instrucciones> ::= <instrucciones><instruccion>")
t[1].append(t[2])
t[0] = t[1]
def p_instrucciones_instruccion(t) :
'instrucciones : instruccion '
reporte_bnf.append("<instrucciones> ::= <instruccion>")
t[0] = [t[1]]
#?######################################################
# TODO INSTRUCCIONES
#?######################################################
def p_instruccion(t) :
'instruccion : createDB_insrt'
reporte_bnf.append("<instruccion> ::= <createDB_insrt>")
t[0] = t[1]
def p_instruccion1(t) :
'instruccion : create_Table_isnrt'
reporte_bnf.append("<instruccion> ::= <create_Table_isnrt>")
t[0] = t[1]
def p_instruccion2(t) :
'instruccion : show_databases_instr'
reporte_bnf.append("<instruccion> ::= <show_databases_instr>")
t[0] = t[1]
def p_instruccion3(t) :
'instruccion : show_tables_instr'
reporte_bnf.append("<instruccion> ::= <show_tables_instr>")
t[0] = t[1]
def p_instruccion4(t) :
'instruccion : drop_database_instr'
reporte_bnf.append("<instruccion> ::= <drop_database_instr>")
t[0] = t[1]
def p_instruccion5(t) :
'instruccion : use_database_instr'
reporte_bnf.append("<instruccion> ::= <use_database_instr>")
t[0] = t[1]
def p_instruccion6(t) :
'instruccion : alterDB_insrt'
reporte_bnf.append("<instruccion> ::= <alterDB_insrt>")
t[0] = t[1]
def p_instruccion7(t) :
'instruccion : drop_insrt'
reporte_bnf.append("<instruccion> ::= <drop_insrt>")
t[0] = t[1]
def p_instruccion8(t) :
'instruccion : alterTable_insrt'
reporte_bnf.append("<instruccion> ::= <alterTable_insrt>")
t[0] = t[1]
def p_instruccion9(t) :
'instruccion : insert_insrt'
reporte_bnf.append("<instruccion> ::= <insert_insrt>")
t[0] = t[1]
def p_instruccion10(t) :
'instruccion : TIPO_ENUM_INSRT'
reporte_bnf.append("<instruccion> ::= <TIPO_ENUM_INSRT>")
t[0] = t[1]
def p_instruccion11(t) :
'instruccion : delete_insrt'
reporte_bnf.append("<instruccion> ::= <delete_insrt>")
t[0] = t[1]
def p_instruccion_f_select(t):
'instruccion : select_insrt PTCOMA'
reporte_bnf.append("<instruccion> ::= <select_insrt> PTCOMA")
t[0] = t[1]
def p_instruccion_f_select_union(t):
'instruccion : select_uniones PTCOMA'
reporte_bnf.append("<instruccion> ::= <select_uniones> PTOCOMA")
t[0] = Select_Uniones(t[1][0],t[1][1])
def p_instruccion_f_select_uodate(t):
'instruccion : update_insrt'
'''def p_instruccion_error(t) :
'instruccion : createDB_insrt error'
reporte_bnf.append("<instruccion> ::= <createDB_insrt><error>")
def p_instruccion_error1(t) :
'instruccion : create_Table_isnrt error '
reporte_bnf.append("<instruccion> ::= <create_Table_isnrt><error>")
def p_instruccion_error2(t) :
'instruccion : show_databases_instr error'
reporte_bnf.append("<instruccion> ::= <show_databases_instr><error>")
def p_instruccion_error3(t) :
'instruccion : show_tables_instr error'
reporte_bnf.append("<instruccion> ::= <show_tables_instr><error>")
def p_instruccion_error4(t) :
'instruccion : drop_database_instr error'
reporte_bnf.append("<instruccion> ::= <drop_database_instr><error>")
def p_instruccion_error5(t) :
'instruccion : use_database_instr error'
reporte_bnf.append("<instruccion> ::= <use_database_instr><error>")
def p_instruccion_error6(t) :
'instruccion : alterDB_insrt error'
reporte_bnf.append("<instruccion> ::= <alterDB_insrt><error>")
def p_instruccion_error7(t) :
'instruccion : update_insrt error'
reporte_bnf.append("<instruccion> ::= <update_insrt><error>")
def p_instruccion_error8(t) :
'instruccion : drop_insrt error'
reporte_bnf.append("<instruccion> ::= <drop_insrt><error>")
def p_instruccion_error9(t) :
'instruccion : alterTable_insrt error'
reporte_bnf.append("<instruccion> ::= <alterTable_insrt><error>")
def p_instruccion_error10(t) :
'instruccion : insert_insrt error'
reporte_bnf.append("<instruccion> ::= <insert_insrt><error>")
def p_instruccion_error11(t) :
'instruccion : TIPO_ENUM_INSRT error'
reporte_bnf.append("<instruccion> ::= <TIPO_ENUM_INSRT><error>")
def p_instruccion_error12(t) :
'instruccion : delete_insrt error'
reporte_bnf.append("<instruccion> ::= <delete_insrt><error>")
'''
#?######################################################
# TODO GRAMATICA INSTRUCCION DELETE
#?######################################################
def p_delete_insrt_delete(t):
' delete_insrt : DELETE FROM ID PTCOMA'
reporte_bnf.append("<delete_insrt> ::= DELETE FROM ID PTCOMA")
t[0] = Definicion_delete(t[3], TIPO_DELETE.DELETE_NORMAL, None, None, None)
def p_delete_insrt(t):
' delete_insrt : DELETE FROM ONLY ID PTCOMA'
reporte_bnf.append("<delete_insrt> ::= DELETE FROM ONLY ID PTCOMA")
t[0] = Definicion_delete(t[4], TIPO_DELETE.DELETE_NORMAL, None, None, None)
def p_delete_insert2(t):
' delete_insrt : DELETE FROM ONLY ID RETURNING returning_exp PTCOMA'
reporte_bnf.append("<delete_insrt> ::= DELETE FROM ONLY ID RETURNING <returning_exp> PTCOMA")
t[0] = Definicion_delete(t[4], TIPO_DELETE.DELETE_RETURNING , None, None,t[6])
def p_delete_insrt3(t):
' delete_insrt : DELETE FROM ID WHERE EXISTS expresion_logica PTCOMA '
reporte_bnf.append("<delete_insrt> ::= DELETE FROM ID WHERE EXISTS <expresion_logica> PTCOMA")
t[0] = Definicion_delete(t[3], TIPO_DELETE.DELETE_EXIST ,t[6],None,None)
def p_delete_insrt4(t):
' delete_insrt : DELETE FROM ID WHERE EXISTS expresion_logica RETURNING returning_exp PTCOMA '
reporte_bnf.append("<delete_insrt> ::= DELETE FROM ID WHERE EXISTS <expresion_logica> RETURNING <returning_exp> PTCOMA")
t[0] = Definicion_delete(t[3], TIPO_DELETE.DELETE_EXIST_RETURNING, t[6], None, t[8])
def p_delete_insrt5(t):
' delete_insrt : DELETE FROM ID WHERE expresion_logica PTCOMA '
reporte_bnf.append("<delete_insrt> ::= DELETE FROM ID WHERE <expresion_logica> PTCOMA")
t[0] = Definicion_delete(t[3], TIPO_DELETE.DELETE_EXIST,t[5],None,None)
def p_delete_insrt6(t):
' delete_insrt : DELETE FROM ID WHERE expresion_logica RETURNING returning_exp PTCOMA'
reporte_bnf.append("<delete_insrt> ::= DELETE FROM ID WHERE <expresion_logica> RETURNING <returning_exp> PTCOMA")
t[0] = Definicion_delete(t[3], TIPO_DELETE.DELETE_EXIST_RETURNING, t[5], None, t[7])
def p_delete_insrt7(t):
' delete_insrt : DELETE FROM ID RETURNING returning_exp PTCOMA '
reporte_bnf.append("<delete_insrt> ::= DELETE FROM ID RETURNING <returning_exp> PTCOMA")
t[0] = Definicion_delete(t[3], TIPO_DELETE.DELETE_RETURNING, None, None, t[5])
def p_delete_insrt8(t):
' delete_insrt : DELETE FROM ID USING ID WHERE EXISTS expresion_logica PTCOMA '
reporte_bnf.append("<delete_insrt> ::= DELETE FROM ID USING ID WHERE EXISTS <expresion_logica> PTCOMA")
t[0] = Definicion_delete(t[3], TIPO_DELETE.DELETE_USING, t[8],t[5],None)
def p_delete_insrt9(t):
' delete_insrt : DELETE FROM ID USING ID WHERE EXISTS expresion_logica RETURNING returning_exp PTCOMA '
reporte_bnf.append("<delete_insrt> ::= DELETE FROM ID USING ID WHERE EXISTS <expresion_logica> RETURNING <returning_exp> PTCOMA")
t[0] = Definicion_delete(t[3], TIPO_DELETE.DELETE_USING_returnin,t[8],t[5],t[10])
def p_delete_insrt10(t):
' delete_insrt : DELETE FROM ID USING ID WHERE expresion_logica PTCOMA '
reporte_bnf.append("<delete_insrt> ::= DELETE FROM ID USING ID WHERE <expresion_logica> PTCOMA")
t[0] = Definicion_delete(t[3], TIPO_DELETE.DELETE_USING, t[7],t[5],None )
def p_returning_exp(t):
' returning_exp : ASTERISCO'
reporte_bnf.append("<returning_exp> ::= ASTERISCO")
t[0] = t[1]
def p_returning_exp1(t):
' returning_exp : campos_c'
reporte_bnf.append("<returning_exp> ::= <campos_c>")
t[0] = t[1]
#?######################################################
# TODO GRAMATICA INSTRUCCION ENUM
#?######################################################
def p_Create_Type_Enum(t):
' TIPO_ENUM_INSRT : CREATE TYPE ID AS ENUM PAR_A lista_datos_enum PAR_C PTCOMA'
reporte_bnf.append("<TIPO_ENUM_INSRT> ::= CREATE TYPE ID AS ENUM PAR_A <lista_datos_enum> PAR_C PTCOMA")
t[0] = Create_type(ExpresionIdentificador(TIPO_VALOR.IDENTIFICADOR,t[3]),t[7])
def p_parametros_lista_datos_(t):
' lista_datos_enum : lista_datos_enum COMA CADENA'
reporte_bnf.append("<lista_datos_enum> ::= <lista_datos_enum> COMA CADENA")
t[1].append(ExpresionComillaSimple(TIPO_VALOR.NUMERO,t[3]))
t[0] = t[1]
def p_expresion_lista_(t):
' lista_datos_enum : CADENA '
reporte_bnf.append("<lista_datos_enum> ::= CADENA")
t[0] = [ExpresionComillaSimple(TIPO_VALOR.NUMERO,t[1])]
#?######################################################
# TODO GRAMATICA INSTRUCCION INSERT
#?######################################################
def p_insert_insrt(t):
' insert_insrt : INSERT INTO ID PAR_A lista_parametros_lista PAR_C VALUES PAR_A lista_datos PAR_C PTCOMA '
reporte_bnf.append("<insert_insrt> ::= INSERT INTO ID PAR_A <lista_parametros_lista> PAR_C VALUES PAR_A <lista_datos> PAR_C PTCOMA")
t[0] = Definicion_Insert(t[3], TIPO_INSERT.CON_PARAMETROS ,t[5], t[9])
def p_opcion_lista_parametros_(t):
' insert_insrt : INSERT INTO ID PAR_A PAR_C VALUES PAR_A lista_datos PAR_C PTCOMA '
reporte_bnf.append("<insert_insrt> ::= INSERT INTO ID PAR_A PAR_C VALUES PAR_A <lista_datos> PAR_C PTCOMA")
t[0] = Definicion_Insert(t[3], TIPO_INSERT.SIN_PARAMETROS ,None, t[8])
def p_opcion_lista_parametros_vacios(t):
' insert_insrt : INSERT INTO ID VALUES PAR_A lista_datos PAR_C PTCOMA '
reporte_bnf.append("<insert_insrt> ::= INSERT INTO ID VALUES PAR_A <lista_datos> PAR_C PTCOMA")
t[0] = Definicion_Insert(t[3], TIPO_INSERT.SIN_PARAMETROS ,None, t[6])
#?######################################################
# TODO GRAMATICA INSTRUCCION LISTA INSERT
#?######################################################
def p_lista_parametros_lista(t):
' lista_parametros_lista : lista_parametros_lista COMA ID'
reporte_bnf.append("<lista_parametros_lista> ::= <lista_parametros_lista> COMA ID")
t[1].append(ExpresionIdentificador(TIPO_VALOR.IDENTIFICADOR,t[3]))
t[0] = t[1]
def p_lista_parametros(t):
' lista_parametros_lista : ID'
reporte_bnf.append("<lista_parametros_lista> ::= ID")
t[0] = [ExpresionIdentificador(TIPO_VALOR.IDENTIFICADOR,t[1])]
def p_parametros_lista_datos(t):
' lista_datos : lista_datos COMA exclusiva_insert'
reporte_bnf.append("<lista_datos> ::= <lista_datos> COMA <expresion>")
t[1].append(t[3])
t[0] = t[1]
def p_parametros_exclusiva(t):
' lista_datos : exclusiva_insert'
reporte_bnf.append("<lista_datos> ::= <exclusiva_insert>")
t[0] = [t[1]]
def p_expresion_lista(t):
' exclusiva_insert : expresion'
reporte_bnf.append("<exclusiva_insert> ::= <expresion>")
t[0] = t[1]
def p_expresiones_excluva(t):
''' exclusiva_insert : SUBSTRING PAR_A string_type COMA expresion COMA expresion PAR_C
| MD5 PAR_A string_type PAR_C
| TRIM PAR_A string_type PAR_C
| SUBSTR PAR_A string_type COMA expresion COMA expresion PAR_C
| NOW PAR_A PAR_C'''
if t[1].upper() == 'SUBSTRING' : t[0] = Funcion_Exclusivas_insert(INSERT_EXCLUSIVA.SUBSTRING,t[3],t[5],t[7])
elif t[1].upper() == 'MD5' : t[0] = Funcion_Exclusivas_insert(INSERT_EXCLUSIVA.MD5,t[3],None,None)
elif t[1].upper() == 'TRIM' : t[0] = Funcion_Exclusivas_insert(INSERT_EXCLUSIVA.TRIM,t[3],None,None)
elif t[1].upper() == 'SUBSTR' : t[0] = Funcion_Exclusivas_insert(INSERT_EXCLUSIVA.SUBSTRING,t[3],t[5],t[7])
elif t[1].upper() == 'NOW' : t[0] = Funcion_Exclusivas_insert(INSERT_EXCLUSIVA.NOW,None,None,None)
#?######################################################
# TODO GRAMATICA ALTER TABLE
#?######################################################
def p_Table_alter(t):
'Table_alter : ALTER COLUMN ID TYPE TIPO_DATO'
reporte_bnf.append("<Table_alter> ::= ALTER COLUMN ID TYPE <TIPO_DATO>")
if t[5][0] == 'VARCHAR':
t[0] = Crear_tipodato(ExpresionIdentificador(TIPO_VALOR.IDENTIFICADOR,t[3]),ExpresionIdentificador(TIPO_VALOR.IDENTIFICADOR,t[5][0]),t[5][1],None)
elif t[5][0] == 'DECIMAL':
t[0] = Crear_tipodato(ExpresionIdentificador(TIPO_VALOR.IDENTIFICADOR,t[3]),ExpresionIdentificador(TIPO_VALOR.IDENTIFICADOR,t[5][0]),t[5][1],t[5][2])
elif t[5][0] == 'NUMERIC':
t[0] = Crear_tipodato(ExpresionIdentificador(TIPO_VALOR.IDENTIFICADOR,t[3]),ExpresionIdentificador(TIPO_VALOR.IDENTIFICADOR,t[5][0]),t[5][1],t[5][2])
elif t[5][0] == 'VARYING':
t[0] = Crear_tipodato(ExpresionIdentificador(TIPO_VALOR.IDENTIFICADOR,t[3]),ExpresionIdentificador(TIPO_VALOR.IDENTIFICADOR,t[5][0]),t[5][1],None)
elif t[5][0] == 'CHAR':
t[0] = Crear_tipodato(ExpresionIdentificador(TIPO_VALOR.IDENTIFICADOR,t[3]),ExpresionIdentificador(TIPO_VALOR.IDENTIFICADOR,t[5][0]),t[5][1],None)
elif t[5][0] == 'CHARACTER':
t[0] = Crear_tipodato(ExpresionIdentificador(TIPO_VALOR.IDENTIFICADOR,t[3]),ExpresionIdentificador(TIPO_VALOR.IDENTIFICADOR,t[5][0]),t[5][1],None)
elif t[5][0] == 'INTERVAL' and t[5][1] == 'TO':
t[0] = Crear_tipodato(ExpresionIdentificador(TIPO_VALOR.IDENTIFICADOR,t[3]),ExpresionIdentificador(TIPO_VALOR.IDENTIFICADOR,t[5][0]),t[5][2],t[5][3])
else:
t[0] = Crear_tipodato(ExpresionIdentificador(TIPO_VALOR.IDENTIFICADOR,t[3]),t[5][0],None,None)
def p_alterTable3(t):
'alterTable_insrt : ALTER TABLE ID DROP CONSTRAINT campos_c PTCOMA'
reporte_bnf.append("<alterTable_insrt> ::= ALTER TABLE ID DROP CONSTRAINT <campos_c> PTCOMA")
t[0] = Crear_altertable(TIPO_ALTER_TABLE.DROP_CONSTRAINT,t[3],None,None,None,t[6],None)
def p_alterTable_Drop(t):
'alterTable_insrt : ALTER TABLE ID DROP COLUMN campos_c PTCOMA'
reporte_bnf.append("<alterTable_insrt> ::= ALTER TABLE ID DROP COLUMN <campos_c> PTCOMA")
t[0] = Crear_altertable(TIPO_ALTER_TABLE.DROP_COLUMN, t[3], None,None,None,t[6],None)
def p_alterTable4(t):
'alterTable_insrt : ALTER TABLE ID RENAME COLUMN ID TO ID PTCOMA'
reporte_bnf.append("<alterTable_insrt> ::= ALTER TABLE ID RENAME COLUMN ID TO ID PTCOMA")
t[0] = Crear_altertable(TIPO_ALTER_TABLE.RENAME_COLUMN,t[3],t[6],t[8],None,None,None)
def p_alterTable5(t):
'alterTable_insrt : ALTER TABLE ID ADD COLUMN campos_add_Column PTCOMA'
reporte_bnf.append("<alterTable_insrt> ::= ALTER TABLE ID ADD COLUMN campos_add_Column PTCOMA")
t[0] = Crear_altertable(TIPO_ALTER_TABLE.ADD_COLUMN,t[3],None,None,None,t[6],None)
def p_alterTable_add_column(t):
'campos_add_Column : campos_add_Column COMA tipos_datos_columnas '
reporte_bnf.append("<campos_add_Column> ::= <campos_add_Column> COMA <tipos_datos_columnas>")
t[1].append(t[3])
t[0] = t[1]
def p_alterTable_add_columna(t):
'campos_add_Column : tipos_datos_columnas '
reporte_bnf.append("<campos_add_Column> ::= <tipos_datos_columnas>")
t[0] = [t[1]]
def p_alterTable_add_tipodato(t):
'tipos_datos_columnas : ID TIPO_DATO'
reporte_bnf.append("<tipos_datos_columnas> ::= ID <TIPO_DATO>")
if t[2][0] == 'VARCHAR':
t[0] = Crear_tipodato(ExpresionIdentificador(TIPO_VALOR.IDENTIFICADOR,t[1]),ExpresionIdentificador(TIPO_VALOR.IDENTIFICADOR,t[2][0]),t[2][1],None)
elif t[2][0] == 'DECIMAL':
t[0] = Crear_tipodato(ExpresionIdentificador(TIPO_VALOR.IDENTIFICADOR,t[1]),ExpresionIdentificador(TIPO_VALOR.IDENTIFICADOR,t[2][0]),t[2][1],t[2][2])
elif t[2][0] == 'NUMERIC':
t[0] = Crear_tipodato(ExpresionIdentificador(TIPO_VALOR.IDENTIFICADOR,t[1]),ExpresionIdentificador(TIPO_VALOR.IDENTIFICADOR,t[2][0]),t[2][1],t[2][2])
elif t[2][0] == 'VARYING':
t[0] = Crear_tipodato(ExpresionIdentificador(TIPO_VALOR.IDENTIFICADOR,t[1]),ExpresionIdentificador(TIPO_VALOR.IDENTIFICADOR,t[2][0]),t[2][1],None)
elif t[2][0] == 'CHAR':
t[0] = Crear_tipodato(ExpresionIdentificador(TIPO_VALOR.IDENTIFICADOR,t[1]),ExpresionIdentificador(TIPO_VALOR.IDENTIFICADOR,t[2][0]),t[2][1],None)
elif t[2][0] == 'CHARACTER':
t[0] = Crear_tipodato(ExpresionIdentificador(TIPO_VALOR.IDENTIFICADOR,t[1]),ExpresionIdentificador(TIPO_VALOR.IDENTIFICADOR,t[2][0]),t[2][1],None)
elif t[2][0] == 'INTERVAL' and t[2][1] == 'TO':
t[0] = Crear_tipodato(ExpresionIdentificador(TIPO_VALOR.IDENTIFICADOR,t[1]),ExpresionIdentificador(TIPO_VALOR.IDENTIFICADOR,t[2][0]),t[2][2],t[2][3])
else:
t[0] = Crear_tipodato(ExpresionIdentificador(TIPO_VALOR.IDENTIFICADOR,t[1]),t[2][0],None,None)
def p_alterTable6(t):
'alterTable_insrt : ALTER TABLE ID ADD CHECK PAR_A expresion_logica PAR_C PTCOMA'
reporte_bnf.append("<alterTable_insrt> ::= ALTER TABLE ID ADD CHECK PAR_A <expresion_logica> PAR_C PTCOMA")
t[0] = Crear_altertable(TIPO_ALTER_TABLE.ADD_CHECK,t[3],None,None,t[7],None,None)
def p_alterTable8(t):
'alterTable_insrt : ALTER TABLE ID ADD FOREIGN KEY PAR_A ID PAR_C REFERENCES ID PAR_A ID PAR_C PTCOMA'
reporte_bnf.append("<alterTable_insrt> ::= ALTER TABLE ID ADD FOREIGN KEY PAR_A ID PAR_C REFERENCES ID PAR_A ID PAR_C PTCOMA")
t[0] = Crear_altertable(TIPO_ALTER_TABLE.ADD_FOREIGN,t[3],t[8],t[11],None,t[13],None)
def p_alterTable7(t):
'alterTable_insrt : ALTER TABLE ID ADD CONSTRAINT ID CHECK PAR_A expresion_logica PAR_C PTCOMA'
reporte_bnf.append("<alterTable_insrt> ::= ER TABLE ID ADD CONSTRAINT ID CHECK PAR_A <expresion_logica> PAR_C PTCOMA")
t[0] = Crear_altertable(TIPO_ALTER_TABLE.ADD_CONSTRAINT_CHECK,t[3],t[6],None,t[9],None,None)
def p_constraint_esp(t):
'alterTable_insrt : ALTER TABLE ID ADD CONSTRAINT ID UNIQUE PAR_A campos_c PAR_C PTCOMA'
reporte_bnf.append("<alterTable_insrt> ::= ALTER TABLE ID ADD CONSTRAINT ID UNIQUE PAR_A <campos_c> PAR_C PTCOMA")
t[0] = Crear_altertable(TIPO_ALTER_TABLE.ADD_CONSTRAINT_UNIQUE,t[3],t[6],None,None,t[9],None)
def p_constraint_esp_1(t):
'alterTable_insrt : ALTER TABLE ID ADD CONSTRAINT ID FOREIGN KEY PAR_A ID PAR_C REFERENCES ID PAR_A ID PAR_C PTCOMA'
reporte_bnf.append("<alterTable_insrt> ::= ALTER TABLE ID ADD CONSTRAINT ID FOREIGN KEY PAR_A ID PAR_C REFERENCES ID PAR_A ID PAR_C PTCOMA")
t[0] = Crear_altertable(TIPO_ALTER_TABLE.ADD_CONSTRAINT_FOREIGN,t[3],t[6],t[10],None,t[13],t[15])
def p_constraint_esp_null(t):
'alterTable_insrt : ALTER TABLE ID ALTER COLUMN ID SET NULL PTCOMA'
reporte_bnf.append("<alterTable_insrt> ::= ALTER TABLE ID ALTER COLUMN ID SET NULL PTCOMA")
t[0] = Crear_altertable(TIPO_ALTER_TABLE.ALTER_COLUMN_NULL,t[3],t[6],None,None,None,None)
def p_constraint_esp_Notnull(t):
'alterTable_insrt : ALTER TABLE ID ALTER COLUMN ID SET NOT NULL PTCOMA'
reporte_bnf.append("<alterTable_insrt> ::= ALTER TABLE ID ALTER COLUMN ID SET NOT NULL PTCOMA")
t[0] = Crear_altertable(TIPO_ALTER_TABLE.ALTER_COLUMN_NOT_NULL,t[3],t[6],None,None,None,None)
def p_alterTable2(t):
'alterTable_insrt : ALTER TABLE ID alterTable_alter PTCOMA'
reporte_bnf.append("<alterTable_insrt> ::= ALTER TABLE ID <alterTable_alter> PTCOMA")
t[0] = Crear_altertable(TIPO_ALTER_TABLE.ALTER_COLUMN,t[3],None,None,None,t[4],None)
def p_alerTable_alter(t):
'alterTable_alter : alterTable_alter COMA Table_alter'
reporte_bnf.append("<alterTable_alter> ::= <alterTable_alter> COMA <Table_alter>")
t[1].append(t[3])
t[0] = t[1]
def p_alerTable_alter_1(t):
'alterTable_alter : Table_alter'
reporte_bnf.append("<alterTable_alter> ::= <Table_alter>")
t[0] = [t[1]]
# DROP
#?######################################################
# TODO GRAMATICA DROP TABLE
#?######################################################
def p_dropTable(t):
' drop_insrt : DROP TABLE lista_drop_id PTCOMA'
reporte_bnf.append("<drop_insrt> ::= DROP TABLE <lista_drop_id> PTCOMA")
t[0] = Crear_Drop(t[3])
def p_lista_tabla_lista(t):
' lista_drop_id : lista_drop_id COMA ID '
reporte_bnf.append("<lista_drop_id> ::= <lista_drop_id> COMA ID")
t[1].append(ExpresionIdentificador(TIPO_VALOR.IDENTIFICADOR,t[3]))
t[0] = t[1]
def p_lista_tabla_lista2(t):
' lista_drop_id : ID '
reporte_bnf.append("<lista_drop_id> ::= ID")
t[0] = [ExpresionIdentificador(TIPO_VALOR.IDENTIFICADOR,t[1])]
#?######################################################
# TODO GRAMATICA UPDATE TABLE
#?######################################################
def p_update_insrt(t):
' update_insrt : UPDATE ID SET lista_update cond_where PTCOMA'
reporte_bnf.append("<update_insrt> ::= UPDATE ID SET <lista_update> <cond_where> PTCOMA")
t[0] = Create_update(ExpresionIdentificador(TIPO_VALOR.IDENTIFICADOR,(t[2])),t[5],t[4])
def p_lista_update(t):
' lista_update : lista_update COMA parametro_update'
reporte_bnf.append("<lista_update> ::= <lista_update> COMA <parametro_update>")
t[1].append(t[3])
t[0] = t[1]
def p_lista_update_lista(t):
' lista_update : parametro_update'
reporte_bnf.append("<lista_update> ::= <parametro_update>")
t[0] = [t[1]]
def p_parametro_update(t):
' parametro_update : ID IGUAL expresion'
reporte_bnf.append("<parametro_update> ::= ID IGUAL <expresion>")
t[0] = Create_Parametro_update(ExpresionIdentificador(TIPO_VALOR.IDENTIFICADOR,t[1]),t[3])
'''def p_cond_where(t):
'cond_where : WHERE expresion_where'
t[0] = Create_hijo_select(OPCIONES_SELECT.WHERE,None,None,t[2])'''
def p_expresion_dato(t):
''' expresion_dato : string_type '''
reporte_bnf.append("<expresion_dato> ::= <string_type>")
t[0] = t[1]
def p_expresion_dato2(t):
' expresion_dato : RESTA ENTERO %prec UMINUS '
reporte_bnf.append("<expresion_dato> ::= RESTA ENTERO %prec UMINUS")
t[0] = ExpresionNegativo(TIPO_VALOR.NEGATIVO,-t[2])
def p_expresion_dato3(t):
' expresion_dato : ID PUNTO ID'
reporte_bnf.append("<expresion_dato> ::= ID PUNTO ID")
t[0] = ExpresionIdentificadorDoble(TIPO_VALOR.DOBLE,t[1],t[3])
def p_expresion_dato_numero(t):
'expresion_dato : expresion_numero'
reporte_bnf.append("<expresion_dato> ::= <expresion_numero>")
t[0] = t[1]
def p_expresion_numero(t):
'expresion_numero : ENTERO'
reporte_bnf.append("<expresion_numero> ::= ENTERO")
t[0] = ExpresionEntero(TIPO_VALOR.NUMERO,t[1])
def p_expresion_numero1(t):
'expresion_numero : FLOTANTE'
reporte_bnf.append("<expresion_numero> ::= FLOTANTE")
t[0] = ExpresionEntero(TIPO_VALOR.NUMERO,t[1])
#?######################################################
# TODO GRAMATICA ALTER DATABASE
#?######################################################
def p_AlterDB_opc1(t):
' alterDB_insrt : ALTER DATABASE ID RENAME TO ID PTCOMA'
reporte_bnf.append("<alterDB_insrt> ::= ALTER DATABASE ID RENAME TO ID PTCOMA")
t[0] = Create_Alterdatabase(t[3],t[6])
def p_AlterDB_opc2(t):
' alterDB_insrt : ALTER DATABASE ID OWNER TO usuariosDB PTCOMA'
reporte_bnf.append("<alterDB_insrt> ::= ALTER DATABASE ID OWNER TO <usuariosDB> PTCOMA")
t[0] = Create_Alterdatabase(t[3],t[6])
def p_usuarioDB(t):
' usuariosDB : ID '
reporte_bnf.append("<usuariosDB> ::= ID")
t[0] = ExpresionIdentificador(TIPO_VALOR.IDENTIFICADOR,t[1])
def p_usuarioDB2(t):
' usuariosDB : CURRENT_USER '
reporte_bnf.append("<usuariosDB> ::= CURRENT_USER")
t[0] = ExpresionIdentificador(TIPO_VALOR.IDENTIFICADOR,t[1])
def p_usuarioDB3(t):
' usuariosDB : SESSION_USER '
reporte_bnf.append("<usuariosDB> ::= SESSION_USER")
t[0] = ExpresionIdentificador(TIPO_VALOR.IDENTIFICADOR,t[1])
def p_usuarioDB4(t):
' usuariosDB : CADENA '
reporte_bnf.append("<usuariosDB> ::= CADENA")
t[0] = ExpresionComillaSimple(TIPO_VALOR.CADENA,t[1])
#?######################################################
# TODO GRAMATICA USE DATABASE
#?######################################################
def p_instruccion_use_database(t):
'use_database_instr : USE ID PTCOMA'
reporte_bnf.append("<use_database_instr> ::= USE ID PTCOMA")
t[0] = useDatabase(ExpresionIdentificador(TIPO_VALOR.IDENTIFICADOR,t[2]))
#?######################################################
# TODO GRAMATICA DROP DATABASE
#?######################################################
def p_instruccion_drop_database(t):
'''drop_database_instr : DROP DATABASE ID PTCOMA
| DROP DATABASE IF EXISTS ID PTCOMA'''
if t[4] == ';':
reporte_bnf.append("<drop_database_instr> ::= DROP DATABASE ID PTCOMA")
t[0] = dropDatabase(ExpresionIdentificador(TIPO_VALOR.IDENTIFICADOR,t[3]), 0)
else:
reporte_bnf.append("<drop_database_instr> ::= DROP DATABASE IF EXISTS ID PTCOMA")
t[0] = dropDatabase(ExpresionIdentificador(TIPO_VALOR.IDENTIFICADOR,t[5]), 1)
#?######################################################
# TODO GRAMATICA SHOW DATABASE
#?######################################################
def p_instruccion_show_databases(t):
'show_databases_instr : SHOW DATABASES PTCOMA'
reporte_bnf.append("<show_databases_instr> ::= SHOW DATABASES PTCOMA")
t[0] = showDatabases()
#?######################################################
# ANCHOR GRAMATICA INSTRUCCION SHOW DATABASE
#?######################################################
def p_instruccion_showTables(t):
'show_tables_instr : SHOW TABLES PTCOMA'
reporte_bnf.append("<show_tables_instr> ::= SHOW TABLES PTCOMA")
t[0] = showTables()
#?######################################################
# TODO GRAMATICA INSTRUCCION CREATE DATABASE
#?######################################################
#?######################################################
# ANCHOR SIMPLE
#?######################################################
def p_createDB(t):
'createDB_insrt : CREATE DATABASE ID PTCOMA'
reporte_bnf.append("<createDB_insrt> ::= CREATE DATABASE ID PTCOMA")
t[0] = CreateDatabase(ExpresionIdentificador(TIPO_VALOR.IDENTIFICADOR,t[3]), ExpresionIdentificador(TIPO_VALOR.IDENTIFICADOR,""), ExpresionNumeroSimple(1), 0)
def p_createDB_wRP(t):
'createDB_insrt : CREATE OR REPLACE DATABASE ID PTCOMA'
reporte_bnf.append("<createDB_insrt> ::= CREATE OR REPLACE DATABASE ID PTCOMA")
t[0] = CreateDatabase(ExpresionIdentificador(TIPO_VALOR.IDENTIFICADOR,t[5]), ExpresionIdentificador(TIPO_VALOR.IDENTIFICADOR,""), ExpresionNumeroSimple(1), 1)
def p_createDB_wIfNot(t):
'createDB_insrt : CREATE DATABASE IF NOT EXISTS ID PTCOMA'
reporte_bnf.append("<createDB_insrt> ::= CREATE DATABASE IF NOT EXISTS ID PTCOMA")
t[0] = CreateDatabase(ExpresionIdentificador(TIPO_VALOR.IDENTIFICADOR,t[6]), ExpresionIdentificador(TIPO_VALOR.IDENTIFICADOR,""), ExpresionNumeroSimple(1), 0)
def p_createDB_wRP_wIN(t):
'createDB_insrt : CREATE OR REPLACE DATABASE IF NOT EXISTS ID PTCOMA'
reporte_bnf.append("<createDB_insrt> ::= CREATE OR REPLACE DATABASE IF NOT EXISTS ID PTCOMA")
t[0] = CreateDatabase(ExpresionIdentificador(TIPO_VALOR.IDENTIFICADOR,t[8]), ExpresionIdentificador(TIPO_VALOR.IDENTIFICADOR,""), ExpresionNumeroSimple(1), 1)
#?######################################################
# ANCHOR UN PARAMETRO
#?######################################################
def p_createDB_up(t):
'createDB_insrt : CREATE DATABASE ID createDB_unParam PTCOMA'
reporte_bnf.append("<createDB_insrt> ::= CREATE DATABASE ID <createDB_unParam> PTCOMA")
if type(t[4]) == ExpresionIdentificador:
t[0] = CreateDatabase(ExpresionIdentificador(TIPO_VALOR.IDENTIFICADOR,t[3]), t[4], ExpresionNumeroSimple(1),0)
else:
t[0] = CreateDatabase(ExpresionIdentificador(TIPO_VALOR.IDENTIFICADOR,t[3]), ExpresionIdentificador(TIPO_VALOR.IDENTIFICADOR,""), t[4],0)
def p_createDB_wRP_up(t):
'createDB_insrt : CREATE OR REPLACE DATABASE ID createDB_unParam PTCOMA'
reporte_bnf.append("<createDB_insrt> ::= CREATE OR REPLACE DATABASE ID <createDB_unParam> PTCOMA")
if type(t[6]) == ExpresionIdentificador:
t[0] = CreateDatabase(ExpresionIdentificador(TIPO_VALOR.IDENTIFICADOR,t[5]), t[6], ExpresionNumeroSimple(1),1)
else:
t[0] = CreateDatabase(ExpresionIdentificador(TIPO_VALOR.IDENTIFICADOR,t[5]), ExpresionIdentificador(TIPO_VALOR.IDENTIFICADOR,""), t[6],1)
def p_createDB_wIfNot_up(t):
'createDB_insrt : CREATE DATABASE IF NOT EXISTS ID createDB_unParam PTCOMA'
reporte_bnf.append("<createDB_insrt> ::= CREATE DATABASE IF NOT EXISTS ID createDB_unParam PTCOMA")
if type(t[7]) == ExpresionIdentificador:
t[0] = CreateDatabase(ExpresionIdentificador(TIPO_VALOR.IDENTIFICADOR,t[6]), t[7], ExpresionNumeroSimple(1),0)
else:
t[0] = CreateDatabase(ExpresionIdentificador(TIPO_VALOR.IDENTIFICADOR,t[6]), ExpresionIdentificador(TIPO_VALOR.IDENTIFICADOR,""), t[7],0)
def p_createDB_wRP_wIN_up(t):
'createDB_insrt : CREATE OR REPLACE DATABASE IF NOT EXISTS ID createDB_unParam PTCOMA'
reporte_bnf.append("<createDB_insrt> ::= CREATE OR REPLACE DATABASE IF NOT EXISTS ID <createDB_unParam> PTCOMA")
if type(t[7]) == ExpresionIdentificador:
t[0] = CreateDatabase(ExpresionIdentificador(TIPO_VALOR.IDENTIFICADOR,t[6]), t[7], ExpresionNumeroSimple(1),1)
else:
t[0] = CreateDatabase(ExpresionIdentificador(TIPO_VALOR.IDENTIFICADOR,t[6]), ExpresionIdentificador(TIPO_VALOR.IDENTIFICADOR,""), t[7],1)
def p_createDB_unParam_Owner(t):
'''createDB_unParam : OWNER string_type
| OWNER IGUAL string_type
| MODE ENTERO
| MODE IGUAL ENTERO'''
if t[1].upper() == 'OWNER':
if t[2] == '=':
reporte_bnf.append("<createDB_unParam> ::= OWNER IGUAL <string_type>")
t[0] = t[3]
else:
reporte_bnf.append("<createDB_unParam> ::= OWNER <string_type>")
t[0] = t[0] = t[2]
elif t[1].upper() == 'MODE':
if t[2] == '=':
reporte_bnf.append("<createDB_unParam> ::= MODE ENTERO")
t[0] = ExpresionNumeroSimple(t[3])
else:
reporte_bnf.append("<createDB_unParam> ::= MODE IGUAL ENTERO")
t[0] = t[0] = ExpresionNumeroSimple(t[2])
#?######################################################
# ANCHOR DOS PARAMETROS
#?######################################################
def p_createDB_dp(t):
'createDB_insrt : CREATE DATABASE ID createDB_dosParam PTCOMA'
reporte_bnf.append("<createDB_insrt> ::= CREATE DATABASE ID <createDB_dosParam> PTCOMA")
t[0] = CreateDatabase(ExpresionIdentificador(TIPO_VALOR.IDENTIFICADOR,t[3]), t[4][0], t[4][1],0)
def p_createDB_wRP_dp(t):
'createDB_insrt : CREATE OR REPLACE DATABASE ID createDB_dosParam PTCOMA'
reporte_bnf.append("<createDB_insrt> ::= CREATE OR REPLACE DATABASE ID <createDB_dosParam> PTCOMA")
t[0] = CreateDatabase(ExpresionIdentificador(TIPO_VALOR.IDENTIFICADOR,t[5]), t[6][0], t[6][1],1)
def p_createDB_wIfNot_dp(t):
'createDB_insrt : CREATE DATABASE IF NOT EXISTS ID createDB_dosParam PTCOMA'
reporte_bnf.append("<createDB_insrt> ::= CREATE DATABASE IF NOT EXISTS ID <createDB_dosParam> PTCOMA")
t[0] = CreateDatabase(ExpresionIdentificador(TIPO_VALOR.IDENTIFICADOR,t[6]), t[7][0], t[7][1],0)
def p_createDB_wRP_wIN_dp(t):
'createDB_insrt : CREATE OR REPLACE DATABASE IF NOT EXISTS ID createDB_dosParam PTCOMA'
reporte_bnf.append("<createDB_insrt> ::= CREATE OR REPLACE DATABASE IF NOT EXISTS ID <createDB_dosParam> PTCOMA")
t[0] = CreateDatabase(ExpresionIdentificador(TIPO_VALOR.IDENTIFICADOR,t[8]), t[9][0], t[9][1],1)
def p_createDB_dosParam_Owner(t):
'''createDB_dosParam : OWNER string_type MODE ENTERO
| OWNER string_type MODE IGUAL ENTERO
| OWNER IGUAL string_type MODE ENTERO
| OWNER IGUAL string_type MODE IGUAL ENTERO
| MODE ENTERO OWNER string_type
| MODE ENTERO OWNER IGUAL string_type
| MODE IGUAL ENTERO OWNER ID
| MODE IGUAL ENTERO OWNER IGUAL ID'''
temp = []
if t[1].upper() == 'OWNER' and t[3].upper() == 'MODE':
if t[4] == '=':
reporte_bnf.append("<createDB_dosParam> ::= OWNER <string_type> MODE IGUAL ENTERO")
temp.append(t[2])
temp.append(ExpresionNumeroSimple(t[5]))
else:
reporte_bnf.append("<createDB_dosParam> ::= OWNER <string_type> MODE ENTERO")
temp.append(t[2])
temp.append(ExpresionNumeroSimple(t[4]))
elif t[1].upper() == 'OWNER' and t[4].upper() == 'MODE':
if t[5] == '=':
reporte_bnf.append("<createDB_dosParam> ::= OWNER IGUAL <string_type> MODE IGUAL ENTERO")
temp.append(t[3])
temp.append(ExpresionNumeroSimple(t[6]))
else:
reporte_bnf.append("<createDB_dosParam> ::= OWNER IGUAL <string_type> MODE ENTERO")
temp.append(t[3])
temp.append(ExpresionNumeroSimple(t[5]))
elif t[1].upper() == 'MODE' and type(t[3]) != int:
if t[4] == '=':
reporte_bnf.append("<createDB_dosParam> ::= MODE ENTERO OWNER IGUAL <string_type>")
temp.append(t[5])
temp.append(ExpresionNumeroSimple(t[2]))
else:
reporte_bnf.append("<createDB_dosParam> ::= MODE ENTERO OWNER <string_type>")
temp.append(t[4])
temp.append(ExpresionNumeroSimple(t[2]))
elif t[1].upper() == 'MODE' and type(t[3]) == int:
if t[5] == '=':
reporte_bnf.append("<createDB_dosParam> ::= MODE IGUAL ENTERO OWNER IGUAL ID")
temp.append(t[6])
temp.append(ExpresionNumeroSimple(t[3]))
else:
reporte_bnf.append("<createDB_dosParam> ::= MODE IGUAL ENTERO OWNER ID")
temp.append(t[5])
temp.append(ExpresionNumeroSimple(t[3]))
t[0] = temp
#?######################################################
# TODO ADD PRODUCCIONES
#?######################################################
def p_constraint_esp_(t):
'constraint_esp : CHECK PAR_A expresion_logica PAR_C '
reporte_bnf.append("<constraint_esp> ::= CHECK PAR_A <expresion_logica> PAR_C")
temp = []
temp.append(t[1].upper())
temp.append([t[3]])
t[0] = temp
def p_constraint_esp1(t):
'constraint_esp : UNIQUE PAR_A campos_c PAR_C '
reporte_bnf.append("<constraint_esp> ::= UNIQUE PAR_A <campos_c> PAR_C")
temp = []
temp.append(t[1].upper())
temp.append(t[3])
t[0] = temp
def p_constraint_esp2(t):
'constraint_esp : FOREIGN KEY PAR_A ID PAR_C REFERENCES ID PAR_A ID PAR_C '
reporte_bnf.append("<constraint_esp> ::= FOREIGN KEY PAR_A ID PAR_C REFERENCES ID PAR_A ID PAR_C")
temp = []
temp.append(t[1].upper())
temp.append(t[4])
temp.append(t[7])
temp.append([t[9]])
t[0] = temp
#YA ESTA
def p_cons_campos(t):
'campos_c : campos_c COMA ID '
reporte_bnf.append("<campos_c> ::= <campos_c> COMA ID")
t[1].append(ExpresionIdentificador(TIPO_VALOR.IDENTIFICADOR,t[3]))
t[0] = t[1]
def p_cons_campos_id(t):
' campos_c : ID'
reporte_bnf.append("<campos_c> ::= ID")
t[0] = [ExpresionIdentificador(TIPO_VALOR.IDENTIFICADOR,t[1])]
#?######################################################
# TODO INSTRUCCION CREATE TABLE
#?######################################################
def p_create_table(t):
''' create_Table_isnrt : CREATE TABLE ID PAR_A cuerpo_createTable_lista PAR_C PTCOMA
| CREATE TABLE ID PAR_A cuerpo_createTable_lista PAR_C INHERITS PAR_A ID PAR_C PTCOMA '''
if t[7] == ';' :
reporte_bnf.append("<create_Table_isnrt> ::= CREATE TABLE ID PAR_A <cuerpo_createTable_lista> PAR_C PTCOMA ")
t[0] = Create_Table(t[3], None , t[5])
else:
reporte_bnf.append("<create_Table_isnrt> ::= CREATE TABLE ID PAR_A <cuerpo_createTable_lista> PAR_C INHERITS PAR_A ID PAR_C PTCOMA")
t[0] = Create_Table(t[3], t[9], t[5])
def p_cuerpo_createTable_lista(t):
' cuerpo_createTable_lista : cuerpo_createTable_lista COMA cuerpo_createTable'
reporte_bnf.append("<cuerpo_createTable_lista> ::= <cuerpo_createTable_lista> COMA <cuerpo_createTable>")
t[1].append(t[3])
t[0] = t[1]
def p_cuerpo_createTable(t):
' cuerpo_createTable_lista : cuerpo_createTable'
reporte_bnf.append("<cuerpo_createTable_lista> ::= <cuerpo_createTable>")
t[0] = [t[1]]
def p_createTable(t):
' cuerpo_createTable : ID TIPO_DATO_DEF '
reporte_bnf.append("<campos_c> ::= ID <TIPO_DATO_DEF>")
t[0] = Definicion_Columnas(t[1],t[2], None,None,None)
def p_createTable_id_pk(t):
' cuerpo_createTable : ID TIPO_DATO_DEF createTable_options'
reporte_bnf.append("<cuerpo_createTable> ::= ID <TIPO_DATO_DEF> <createTable_options>")
t[0] = Definicion_Columnas(t[1],t[2], None,None,t[3])
# -------------------------------------------
def p_createTable_combs1(t):
' createTable_options : createTable_options cT_options'
reporte_bnf.append("<createTable_options> ::= <createTable_options> <cT_options>")
t[1].append(t[2])
t[0] = t[1]
def p_createTable_combs2(t):
' createTable_options : cT_options'
reporte_bnf.append("<createTable_options> ::= <cT_options>")
t[0] = [t[1]]
def p_cT_options(t):
' cT_options : N_null'
reporte_bnf.append("<cT_options> ::= <N_null>")
t[0] = t[1]
def p_cT_options1(t):
' cT_options : C_unique'
reporte_bnf.append("<cT_options> ::= <C_unique>")
t[0] = t[1]
def p_cT_options2(t):
' cT_options : C_check'
reporte_bnf.append("<cT_options> ::= <C_check>")
t[0] = t[1]
def p_cT_options3(t):
' cT_options : llave'
reporte_bnf.append("<cT_options> ::= <llave>")
t[0] = t[1]
def p_cT_options4(t):
' cT_options : O_DEFAULT'
reporte_bnf.append("<cT_options> ::= <O_DEFAULT>")
t[0] = t[1]
#--------------------------------------------------
def p_default(t):
' O_DEFAULT : DEFAULT expresion_dato_default '
reporte_bnf.append("<O_DEFAULT> ::= DEFAULT <expresion_dato_default>")
t[0] = definicion_constraint(None,OPCIONES_CONSTRAINT.DEFAULT,None,None,t[2])
def p_N_null(t):
''' N_null : NULL
| NOT NULL'''
if t[1].upper() == 'NULL':
reporte_bnf.append("<N_null> ::= NULL")
t[0] = definicion_constraint(None,OPCIONES_CONSTRAINT.NULL,None,None,None)
else:
reporte_bnf.append("<N_null> ::= NOT NULL")
t[0] = definicion_constraint(None,OPCIONES_CONSTRAINT.NOT_NULL,None,None,None)
def p_C_unique(t):
''' C_unique : UNIQUE
| CONSTRAINT ID UNIQUE'''
if t[1].upper() == 'UNIQUE':
reporte_bnf.append("<C_unique> ::= UNIQUE")
t[0] = definicion_constraint(None,OPCIONES_CONSTRAINT.UNIQUE,None,None,None)
else:
reporte_bnf.append("<C_unique> ::= CONSTRAINT ID UNIQUE")
t[0] = definicion_constraint(t[2],OPCIONES_CONSTRAINT.UNIQUE,None,None,None)
def p_Ccheck(t):
''' C_check : CHECK PAR_A expresion_logica PAR_C
| CONSTRAINT ID CHECK PAR_A expresion_logica PAR_C '''
if t[1].upper() == 'CHECK':
reporte_bnf.append("<C_check> ::= CHECK PAR_A <expresion_logica> PAR_C")
t[0] = definicion_constraint(None,OPCIONES_CONSTRAINT.CHECK,None,None,t[3])
else:
reporte_bnf.append("<C_check> ::= CONSTRAINT ID CHECK PAR_A <expresion_logica> PAR_C")
t[0] = definicion_constraint(t[2],OPCIONES_CONSTRAINT.CHECK,None,None,t[3])
def p_llave(t):
''' llave : PRIMARY KEY
| FOREIGN KEY'''
if t[1].upper() == 'PRIMARY':
reporte_bnf.append("<llave> ::= PRIMARY KEY")
t[0] = definicion_constraint(None,OPCIONES_CONSTRAINT.PRIMARY,None,None,None)
else:
reporte_bnf.append("<llave> ::= FOREIGN KEY")
t[0] = definicion_constraint(None,OPCIONES_CONSTRAINT.FOREIGN,None,None,None)
def p_expresion_cadena_DEFAULT(t):
'expresion_dato_default : CADENA'
reporte_bnf.append("<expresion_dato_default> ::= CADENA")
t[0] = ExpresionComillaSimple(TIPO_VALOR.CADENA,t[1])
def p_expresion1_DEFAULT(t):
'expresion_dato_default : ENTERO'
reporte_bnf.append("<expresion_dato_default> ::= ENTERO")
t[0] = ExpresionEntero(TIPO_VALOR.NUMERO,t[1])
def p_expresion1_DEFAULT1(t):
'expresion_dato_default : FLOTANTE'
reporte_bnf.append("<expresion_dato_default> ::= FLOTANTE")
t[0] = ExpresionEntero(TIPO_VALOR.NUMERO,t[1])
##########################################################
##########################################################
##########################################################
####################################
def p_createTable_pk(t):
' cuerpo_createTable : PRIMARY KEY PAR_A campos_c PAR_C'
reporte_bnf.append("<cuerpo_createTable> ::= PRIMARY KEY PAR_A <campos_c> PAR_C")
t[0] = LLave_Primaria(t[4])
def p_createTable_fk(t):
' cuerpo_createTable : FOREIGN KEY PAR_A ID PAR_C REFERENCES ID PAR_A ID PAR_C'
reporte_bnf.append("<cuerpo_createTable> ::= FOREIGN KEY PAR_A ID PAR_C REFERENCES ID PAR_A ID PAR_C")
t[0] = Definicon_Foranea(t[4], t[7], t[9])
def p_createTable_unique(t):
' cuerpo_createTable : UNIQUE PAR_A campos_c PAR_C '
reporte_bnf.append("<cuerpo_createTable> ::= UNIQUE PAR_A <campos_c> PAR_C")
t[0] = Lista_Parametros(t[3])
def p_createTable_constraint(t):
' cuerpo_createTable : CONSTRAINT ID constraint_esp '
reporte_bnf.append("<cuerpo_createTable> ::= CONSTRAINT ID <constraint_esp>")
if t[3][0] == 'CHECK':
t[0] = definicion_constraint(t[2], t[3][0], None, None ,t[3][1])
elif t[3][0] == 'UNIQUE':
t[0] = definicion_constraint(t[2], t[3][0], None, None ,t[3][1])
elif t[3][0] == 'FOREIGN':
t[0] = definicion_constraint(t[2], t[3][0], t[3][2], t[3][1] ,t[3][3])
#?######################################################
# TODO TIPO DE DATO
#?######################################################
def p_tipo_dato_text(t):
' TIPO_DATO : TEXT'
reporte_bnf.append("<TIPO_DATO> ::= TEXT")
temp = []
temp.append(ExpresionIdentificador(TIPO_VALOR.IDENTIFICADOR,t[1]))
t[0] = temp
def p_tipo_dato_float(t):
' TIPO_DATO : FLOAT'
reporte_bnf.append("<TIPO_DATO> ::= FLOAT")
temp = []
temp.append(ExpresionIdentificador(TIPO_VALOR.IDENTIFICADOR,t[1]))
t[0] = temp
def p_tipo_dato_integer(t):
' TIPO_DATO : INTEGER'
reporte_bnf.append("<TIPO_DATO> ::= INTEGER")
temp = []
temp.append(ExpresionIdentificador(TIPO_VALOR.IDENTIFICADOR,t[1]))
t[0] = temp
def p_tipo_dato_BOOLEAN(t):
' TIPO_DATO : BOOLEAN'
reporte_bnf.append("<TIPO_DATO> ::= BOOLEAN")
temp = []
temp.append(ExpresionIdentificador(TIPO_VALOR.IDENTIFICADOR,t[1]))
t[0] = temp
def p_tipo_dato_smallint(t):
' TIPO_DATO : SMALLINT'
reporte_bnf.append("<TIPO_DATO> ::= SMALLINT")
temp = []
temp.append(ExpresionIdentificador(TIPO_VALOR.IDENTIFICADOR,t[1]))
t[0] = temp
def p_tipo_dato_money(t):
' TIPO_DATO : MONEY'
reporte_bnf.append("<TIPO_DATO> ::= MONEY")
temp = []
temp.append(ExpresionIdentificador(TIPO_VALOR.IDENTIFICADOR,t[1]))
t[0] = temp
def p_tipo_dato_decimal(t):
' TIPO_DATO : DECIMAL PAR_A ENTERO COMA ENTERO PAR_C'
reporte_bnf.append("<TIPO_DATO> ::= DECIMAL PAR_A ENTERO COMA ENTERO PAR_C")
temp = []
temp.append(t[1].upper())
temp.append(t[3])
temp.append(t[5])
t[0] = temp
def p_tipo_dato_numerico(t):
' TIPO_DATO : NUMERIC PAR_A ENTERO COMA ENTERO PAR_C'
reporte_bnf.append("<TIPO_DATO> ::= NUMERIC PAR_A ENTERO COMA ENTERO PAR_C")
temp = []
temp.append(t[1].upper())
temp.append(t[3])
temp.append(t[5])
t[0] = temp
def p_tipo_dato_bigint(t):
' TIPO_DATO : BIGINT'
reporte_bnf.append("<TIPO_DATO> ::= BIGINT")
temp = []
temp.append(ExpresionIdentificador(TIPO_VALOR.IDENTIFICADOR,t[1]))
t[0] = temp
def p_tipo_dato_real(t):
' TIPO_DATO : REAL'
reporte_bnf.append("<TIPO_DATO> ::= REAL")
temp = []
temp.append(ExpresionIdentificador(TIPO_VALOR.IDENTIFICADOR,t[1]))
t[0] = temp
def p_tipo_dato_double_precision(t):
' TIPO_DATO : DOUBLE PRECISION'
reporte_bnf.append("<TIPO_DATO> ::= DOUBLE PRECISION")
temp = []
temp.append(ExpresionIdentificador(TIPO_VALOR.IDENTIFICADOR,t[1]))
t[0] = temp
def p_tipo_dato_interval_to(t):
' TIPO_DATO : INTERVAL extract_time TO extract_time'
reporte_bnf.append("<TIPO_DATO> ::= INTERVAL <extract_time> TO <extract_time>")
temp = []
temp.append(t[1].upper())
temp.append(t[3].upper())
temp.append(t[2])
temp.append(t[4])
t[0] = temp
def p_tipo_dato_interval(t):
' TIPO_DATO : INTERVAL'
reporte_bnf.append("<TIPO_DATO> ::= INTERVAL")
temp = []
temp.append(ExpresionIdentificador(TIPO_VALOR.IDENTIFICADOR,t[1]))
t[0] = temp
def p_tipo_dato_time(t):
' TIPO_DATO : TIME'
reporte_bnf.append("<TIPO_DATO> ::= TIME")
temp = []
temp.append(ExpresionIdentificador(TIPO_VALOR.IDENTIFICADOR,t[1]))
t[0] = temp
def p_tipo_dato_interval_tsmp(t):
' TIPO_DATO : TIMESTAMP'
reporte_bnf.append("<TIPO_DATO> ::= TIMESTAMP")
temp = []
temp.append(ExpresionIdentificador(TIPO_VALOR.IDENTIFICADOR,t[1]))
t[0] = temp
def p_tipo_dato(t):
'TIPO_DATO : DATE'
reporte_bnf.append("<TIPO_DATO> ::= DATE")
temp = []
temp.append(ExpresionIdentificador(TIPO_VALOR.IDENTIFICADOR,t[1]))
t[0] = temp
def p_tipo_dato_character_varying(t):
' TIPO_DATO : CHARACTER VARYING PAR_A ENTERO PAR_C'
reporte_bnf.append("<TIPO_DATO> ::= CHARACTER VARYING PAR_A ENTERO PAR_C")
temp = []
temp.append(t[2].upper())
temp.append(t[3])
t[0] = temp
def p_tipo_dato_varchar(t):
' TIPO_DATO : VARCHAR PAR_A ENTERO PAR_C'
reporte_bnf.append("<TIPO_DATO> ::= VARCHAR PAR_A ENTERO PAR_C")
temp = []
temp.append(t[1].upper())
temp.append(t[3])
t[0] = temp
def p_tipo_dato_char(t):
' TIPO_DATO : CHAR PAR_A ENTERO PAR_C'
reporte_bnf.append("<TIPO_DATO> ::= CHAR PAR_A ENTERO PAR_C")
temp = []
temp.append(t[1].upper())
temp.append(t[3])
t[0] = temp
def p_tipo_dato_character(t):
' TIPO_DATO : CHARACTER PAR_A ENTERO PAR_C'
reporte_bnf.append("<TIPO_DATO> ::= CHARACTER PAR_A ENTERO PAR_C")
temp = []
temp.append(t[1].upper())
temp.append(t[3])
t[0] = temp
def p_tipo_dato_char_no_esp(t):
' TIPO_DATO : CHAR PAR_A PAR_C'
reporte_bnf.append("<TIPO_DATO> ::= CHAR PAR_A PAR_C")
temp = []
temp.append(ExpresionIdentificador(TIPO_VALOR.IDENTIFICADOR,t[1]))
t[0] = temp
def p_tipo_dato_character_no_esp(t):
' TIPO_DATO : CHARACTER PAR_A PAR_C'
reporte_bnf.append("<TIPO_DATO> ::= CHARACTER PAR_A PAR_C")
temp = []
temp.append(ExpresionIdentificador(TIPO_VALOR.IDENTIFICADOR,t[1]))
t[0] = temp
def p_extract_time(t):
' extract_time : YEAR'
reporte_bnf.append("<extract_time> ::= YEAR")
t[0] = ExpresionIdentificador(TIPO_VALOR.IDENTIFICADOR,t[1])
def p_extract_time1(t):
' extract_time : DAY'
reporte_bnf.append("<extract_time> ::= DAY")
t[0] = ExpresionIdentificador(TIPO_VALOR.IDENTIFICADOR,t[1])
def p_extract_time2(t):
' extract_time : MONTH'
reporte_bnf.append("<extract_time> ::= MONTH")
t[0] = ExpresionIdentificador(TIPO_VALOR.IDENTIFICADOR,t[1])
def p_extract_time3(t):
' extract_time : HOUR'
reporte_bnf.append("<extract_time> ::= HOUR")
t[0] = ExpresionIdentificador(TIPO_VALOR.IDENTIFICADOR,t[1])
def p_extract_time4(t):
' extract_time : MINUTE'
reporte_bnf.append("<extract_time> ::= MINUTE")
t[0] = ExpresionIdentificador(TIPO_VALOR.IDENTIFICADOR,t[1])
def p_extract_time5(t):
' extract_time : SECOND '
reporte_bnf.append("<extract_time> ::= SECOND")
t[0] = ExpresionIdentificador(TIPO_VALOR.IDENTIFICADOR,t[1])
def p_tipo_dato_text_DEF(t):
' TIPO_DATO_DEF : TEXT'
reporte_bnf.append("<TIPO_DATO_DEF> ::= TEXT")
t[0] = Etiqueta_tipo(TIPO_DE_DATOS.text_)
def p_tipo_dato_float_DEF(t):
' TIPO_DATO_DEF : FLOAT'
reporte_bnf.append("<TIPO_DATO_DEF> ::= FLOAT")
t[0] = Etiqueta_tipo(TIPO_DE_DATOS.float_)
def p_tipo_dato_integer_DEF(t):
' TIPO_DATO_DEF : INTEGER'
reporte_bnf.append("<TIPO_DATO_DEF> ::= INTEGER")
t[0] = Etiqueta_tipo(TIPO_DE_DATOS.integer_)
def p_tipo_dato_boolean_DEF(t):
' TIPO_DATO_DEF : BOOLEAN'
reporte_bnf.append("<TIPO_DATO_DEF> ::= BOOLEAN")
t[0] = Etiqueta_tipo(TIPO_DE_DATOS.boolean)
def p_tipo_dato_smallint_DEF(t):
' TIPO_DATO_DEF : SMALLINT'
reporte_bnf.append("<TIPO_DATO_DEF> ::= SMALLINT")
t[0] = Etiqueta_tipo(TIPO_DE_DATOS.smallint_)
def p_tipo_dato_money_DEF(t):
' TIPO_DATO_DEF : MONEY'
reporte_bnf.append("<TIPO_DATO_DEF> ::= MONEY")
t[0] = Etiqueta_tipo(TIPO_DE_DATOS.money)
def p_tipo_dato_decimal_DEF(t):
' TIPO_DATO_DEF : DECIMAL PAR_A ENTERO COMA ENTERO PAR_C'
reporte_bnf.append("<TIPO_DATO_DEF> ::= DECIMAL PAR_A ENTERO COMA ENTERO PAR_C")
t[0] = ExpresionNumero(TIPO_DE_DATOS.decimal,t[3], t[5])
def p_tipo_dato_numerico_DEF(t):
' TIPO_DATO_DEF : NUMERIC PAR_A ENTERO COMA ENTERO PAR_C'
reporte_bnf.append("<TIPO_DATO_DEF> ::= NUMERIC PAR_A ENTERO COMA ENTERO PAR_C")
t[0] = ExpresionNumero(TIPO_DE_DATOS.numeric,t[3],t[5])
def p_tipo_dato_bigint_DEF(t):
' TIPO_DATO_DEF : BIGINT'
reporte_bnf.append("<TIPO_DATO_DEF> ::= BIGINT")
t[0] = Etiqueta_tipo(TIPO_DE_DATOS.bigint)
def p_tipo_dato_real_DEF(t):
' TIPO_DATO_DEF : REAL'
reporte_bnf.append("<TIPO_DATO_DEF> ::= REAL")
t[0] = Etiqueta_tipo(TIPO_DE_DATOS.real)
def p_tipo_dato_double_precision_DEF(t):
' TIPO_DATO_DEF : DOUBLE PRECISION'
reporte_bnf.append("<TIPO_DATO_DEF> ::= DOUBLE PRECISION")
t[0] = Etiqueta_tipo(TIPO_DE_DATOS.double_precision)
def p_tipo_dato_interval_to_DEF(t):
' TIPO_DATO_DEF : INTERVAL extract_time TO extract_time'
reporte_bnf.append("<TIPO_DATO_DEF> ::= INTERVAL <extract_time> TO <extract_time>")
t[0] = Etiqueta_Interval(t[2],t[4], TIPO_DE_DATOS.interval)
def p_tipo_dato_interval_DEF(t):
' TIPO_DATO_DEF : INTERVAL'
reporte_bnf.append("<TIPO_DATO_DEF> ::= INTERVAL")
t[0] = ExpresionTiempo(OPERACION_TIEMPO.YEAR)
def p_tipo_dato_time_DEF(t):
' TIPO_DATO_DEF : TIME'
reporte_bnf.append("<TIPO_DATO_DEF> ::= TIME")
t[0] = Etiqueta_tipo(TIPO_DE_DATOS.time)
def p_tipo_dato_interval_tsmp_DEF(t):
' TIPO_DATO_DEF : TIMESTAMP'
reporte_bnf.append("<TIPO_DATO_DEF> ::= TIMESTAMP")
t[0] = Etiqueta_tipo(TIPO_DE_DATOS.timestamp)
def p_tipo_dato_DEF(t):
'TIPO_DATO_DEF : DATE'
reporte_bnf.append("<TIPO_DATO_DEF> ::= DATE")
t[0] = Etiqueta_tipo(TIPO_DE_DATOS.date)
def p_tipo_dato_character_varying_DEF(t):
' TIPO_DATO_DEF : CHARACTER VARYING PAR_A ENTERO PAR_C'
reporte_bnf.append("<TIPO_DATO_DEF> ::= CHARACTER VARYING PAR_A ENTERO PAR_C")
t[0] = Expresion_Caracter(TIPO_DE_DATOS.varying, t[4])
def p_tipo_dato_varchar_DEF(t):
' TIPO_DATO_DEF : VARCHAR PAR_A ENTERO PAR_C'
reporte_bnf.append("<TIPO_DATO_DEF> ::= VARCHAR PAR_A ENTERO PAR_C")
t[0] = Expresion_Caracter(TIPO_DE_DATOS.varchar,t[3])
def p_tipo_dato_char_DEF(t):
' TIPO_DATO_DEF : CHAR PAR_A ENTERO PAR_C'
reporte_bnf.append("<TIPO_DATO_DEF> ::= CHAR PAR_A ENTERO PAR_C")
t[0] = Expresion_Caracter(TIPO_DE_DATOS.char,t[3])
def p_tipo_dato_character_DEF(t):
' TIPO_DATO_DEF : CHARACTER PAR_A ENTERO PAR_C'
reporte_bnf.append("<TIPO_DATO_DEF> ::= CHARACTER PAR_A ENTERO PAR_C")
t[0] = Expresion_Caracter(TIPO_DE_DATOS.character,t[3])
def p_tipo_dato_char_no_esp_DEF(t):
' TIPO_DATO_DEF : CHAR PAR_A PAR_C'
reporte_bnf.append("<TIPO_DATO_DEF> ::= CHAR PAR_A PAR_C")
t[0] = Etiqueta_tipo(TIPO_DE_DATOS.char)
def p_tipo_dato_character_no_esp_DEF(t):
' TIPO_DATO_DEF : CHARACTER PAR_A PAR_C'
reporte_bnf.append("<TIPO_DATO_DEF> ::= CHARACTER PAR_A PAR_C")
t[0] = Etiqueta_tipo(TIPO_DE_DATOS.character)
#?######################################################
# TODO INSTRUCCION SELECT
#?######################################################
def p_instruccion_select_insrt(t):
' select_insrt : SELECT opcion_select_tm'
reporte_bnf.append("<select_insrt> ::= SELECT <opcion_select_tm>")
t[0] = t[2]
def p_instruccion_select_insrt_union(t):
''' select_uniones : select_uniones tipo_union select_insrt'''
reporte_bnf.append("<select_uniones> ::= <select_uniones> <tipo_union> <select_insrt>")
temp = []
if t[2].upper() == 'UNION':
temp.append(OPCIONES_UNIONES.UNION)
t[1].append(t[3])
temp.append(t[1])
elif t[2].upper() == 'INTERSECT':
temp.append(OPCIONES_UNIONES.INTERSECT)
t[1].append(t[3])
temp.append(t[1])
elif t[2].upper() == 'EXCEPT':
temp.append(OPCIONES_UNIONES.EXCEPTS)
t[1].append(t[3])
temp.append(t[1])
t[0] = temp
def p_instruccion_select_insrt_union_ALL(t):
''' select_uniones : select_uniones tipo_union ALL select_insrt'''
reporte_bnf.append("<select_uniones> ::= <select_uniones> <tipo_union> ALL <select_insrt>")
temp = []
if t[2].upper() == 'UNION':
temp.append(OPCIONES_UNIONES.UNION_ALL)
t[1].append(t[4])
temp.append(t[1])
elif t[2].upper() == 'INTERSECT':
temp.append(OPCIONES_UNIONES.INTERSECT_ALL)
t[1].append(t[4])
temp.append(t[1])
elif t[2].upper() == 'EXCEPT':
temp.append(OPCIONES_UNIONES.EXCEPTS_ALL)
t[1].append(t[4])
temp.append(t[1])
t[0] = temp
def p_instruccion_select_insrt_union2(t):
' select_uniones : select_insrt '
reporte_bnf.append("<select_uniones> ::= <select_insrt>")
t[0] = [t[1]]
def p_instruccion_select_uniones(t):
' tipo_union : UNION'
reporte_bnf.append("<tipo_union> ::= UNION")
t[0] = t[1]
def p_instruccion_select_uniones1(t):
' tipo_union : INTERSECT'
reporte_bnf.append("<tipo_union> ::= INTERSECT")
t[0] = t[1]
def p_instruccion_select_uniones2(t):
' tipo_union : EXCEPT'
reporte_bnf.append("<tipo_union> ::= EXCEPT")
t[0] = t[1]
def p_opcion_select_tm3(t):
'opcion_select_tm : greatest_insrt' #YA ESTA
reporte_bnf.append("<opcion_select_tm> ::= <greatest_insrt>")
t[0] = t[1]
def p_select_lista(t):
' opcion_select_lista : DISTINCT campos_c '
reporte_bnf.append("<opcion_select_lista> ::= DISTINCT <campos_c>")
t[0] = Create_select_uno(OPCIONES_SELECT.DISTINCT,None,None,None,None,t[2],None) #YA ESTA
def p_select_lista2(t):
' opcion_select_lista : opciones_select_lista'
reporte_bnf.append("<opcion_select_lista> ::= <opciones_select_lista>")
t[0] = Create_select_uno(OPCIONES_SELECT.SUBCONSULTA,None,None,None,t[1],None,None)
def p_opciones_select_lista(t):
''' opciones_select_lista : opciones_select_lista COMA opcion_select '''
reporte_bnf.append("<opciones_select_lista> ::= <opciones_select_lista> COMA <opcion_select>")
t[1].append(t[3])
t[0] = t[1]
def p_opciones_select_lista2(t):
' opciones_select_lista : opcion_select'
reporte_bnf.append("<opciones_select_lista> ::= <opcion_select>")
t[0] = [t[1]]
def p_opcion_select_tm1(t):
'opcion_select_tm : opcion_select_lista FROM opciones_sobrenombres '
reporte_bnf.append("<opcion_select_tm> ::= <opcion_select_lista> FROM <opciones_sobrenombres>")
t[0] = Create_select_general(OPCIONES_SELECT.SELECT,t[1],None,None,None,t[3])
def p_opcion_select_tm2(t):
'opcion_select_tm : opcion_select_lista FROM opciones_sobrenombres opcion_from '
reporte_bnf.append("<opcion_select_tm> ::= <opcion_select_lista> FROM <opciones_sobrenombres> <opcion_from>")
t[0] = Create_select_general(OPCIONES_SELECT.SELECT,t[1],t[4],None,None,t[3])
def p_opciones_sobrenombre(t):
'''opciones_sobrenombres : opciones_sobrenombres COMA opcion_sobrenombre '''
reporte_bnf.append("<opciones_sobrenombres> ::= <opciones_sobrenombres> COMA <opcion_sobrenombre>")
t[1].append(t[3])
t[0] = t[1]
def p_opciones_sobrenombre2(t):
' opciones_sobrenombres : opcion_sobrenombre '
reporte_bnf.append("<opciones_sobrenombres> ::= <opcion_sobrenombre>")
t[0] = [t[1]]
def p_opcion_select_tm_op1(t):
'opcion_select_tm : opcion_select_lista seguir_sobrenombre FROM otros_froms '
reporte_bnf.append("<opcion_select_tm> ::= <opcion_select_lista> <seguir_sobrenombre> FROM <otros_froms>")
t[0] = Create_select_general(OPCIONES_SELECT,None,t[1],t[2],t[4],None)
def p_otros_from(t):
'otros_froms : otros_froms COMA otro_from'
reporte_bnf.append("<otros_froms> ::= <otros_froms> COMA <otro_from>")
t[1].append(t[3])
t[0] = t[1]
def p_otros_from2(t):
'otros_froms : otro_from'
reporte_bnf.append("<otros_froms> ::= <otro_from>")
t[0] = [t[1]]
def p_opcion_select_tm(t):
'opcion_select_tm : opcion_select_lista FROM opciones_from opcion_from'
reporte_bnf.append("<opcion_select_tm> ::= <opcion_select_lista> FROM <opciones_from> <opcion_from>")
t[0] = Create_select_general(OPCIONES_SELECT.SELECT,t[1],None,t[4],t[3],None)
def p_opciones_from(t):
'''opciones_from : opciones_from COMA from_s'''
reporte_bnf.append("<opciones_from> ::= <opciones_from> COMA <from_s>")
t[1].append(t[3])
t[0] = t[1]
def p_opciones_from2(t):
'opciones_from : from_s'
reporte_bnf.append("<opciones_from> ::= <from_s>")
t[0] = [t[1]]
def p_ins_1(t):
'opcion_select_tm : varias_funciones'
reporte_bnf.append("<opcion_select_tm> ::= <varias_funciones>")
t[0] = Create_select_general(OPCIONES_SELECT.SELECT,None,None,None,None,t[1])
def p_varias_funciones(t):
'varias_funciones : varias_funciones COMA funcion'
reporte_bnf.append("<varias_funciones> ::= <varias_funciones> COMA <funcion>")
t[1].append(t[3])
t[0] = t[1]
def p_varias_funciones1(t):
'varias_funciones : funcion'
reporte_bnf.append("<varias_funciones> ::= <funcion>")
t[0] = [t[1]]
def p_funcion(t):
'funcion : funciones_select seguir_sobrenombre'
reporte_bnf.append("<funcion> ::= <funciones_select> <seguir_sobrenombre>")
t[0] = Create_select_uno(OPCIONES_SELECT.FUNCIONES,None,t[1],t[2],None,None,None)
def p_funcion1(t):
'funcion : funciones_select'
reporte_bnf.append("<funcion> ::= <funciones_select>")
t[0] = Create_select_uno(OPCIONES_SELECT.FUNCIONES,None,t[1],None,None,None,None)
def p_opcion_select_tm_op2(t):
'''otro_from : from_s '''
reporte_bnf.append("<otro_from> ::= <from_s>")
t[0] = Create_select_general(OPCIONES_SELECT.SELECT,t[1],None,None,None,None)
def p_opcion_select_tm_op3(t):
'otro_from : from_s opcion_from'
reporte_bnf.append("<otro_from> ::= <from_s><opcion_from>")
t[0] = Create_select_general(OPCIONES_SELECT.SELECT,t[1],t[2],None,None,None)
def p_opcion_s(t):
''' from_s : ID'''
reporte_bnf.append("<from_s> ::= ID")
t[0] = ExpresionIdentificador(TIPO_VALOR.IDENTIFICADOR,t[1])
def p_opcion_s2(t):
' from_s : PAR_A'
reporte_bnf.append("<from_s> ::= PAR_A")
t[0] = t[1]
def p_sobre_Nombre(t):
''' opcion_sobrenombre : ID seguir_sobrenombre'''
reporte_bnf.append("<opcion_sobrenombre> ::= ID <seguir_sobrenombre>")
if t[2][0] == TIPO_VALOR.AS_ID:
t[0] = ExpresionIdentificadorDoble(t[2][0],t[1],t[2][1])
elif t[2][0] == TIPO_VALOR.DOBLE:
t[0] = ExpresionIdentificadorDoble(t[2][0],t[1],t[2][1])
else:
t[0] = ExpresionIdentificadorDoble(TIPO_VALOR.IDENTIFICADOR,t[1],t[2])
def p_sobre_Nombre2(t):
' opcion_sobrenombre : ID '
reporte_bnf.append("<opcion_sobrenombre> ::= ID")
t[0] = ExpresionIdentificadorDoble(TIPO_VALOR.IDENTIFICADOR,t[1],None)
def p_as_ID(t):
''' as_ID : ID '''
reporte_bnf.append("<as_ID> ::= ID")
t[0] = ExpresionIdentificador(TIPO_VALOR.IDENTIFICADOR,t[1])
def p_as_ID2(t):
'as_ID : CADENA'
reporte_bnf.append("<as_ID> ::= CADENA")
t[0] = ExpresionComillaSimple(TIPO_VALOR.NUMERO,t[1])
#---------------------------------------------------------
def p_alias(t):
''' seguir_sobrenombre : AS as_ID'''
reporte_bnf.append("<seguir_sobrenombre> ::= AS <as_ID>")
temp = []
temp.append(TIPO_VALOR.AS_ID)
temp.append(t[2])
t[0] = temp
def p_alias2(t):
'seguir_sobrenombre : ID'
reporte_bnf.append("<seguir_sobrenombre> ::= ID")
t[0] = t[1]
def p_alias3(t):
'seguir_sobrenombre : PUNTO ID'
reporte_bnf.append("<seguir_sobrenombre> ::= PUNTO ID")
temp = []
temp.append(TIPO_VALOR.DOBLE)
temp.append(t[2])
t[0] = temp
def p_opcion_select_tm_extract(t):
'opcion_select_tm : EXTRACT PAR_A extract_time FROM TIMESTAMP CADENA PAR_C '
reporte_bnf.append("<opcion_select_tm> ::= EXTRACT PAR_A <extract_time> FROM TIMESTAMP CADENA PAR_C")
t[0] = Create_select_time(SELECT_TIME.EXTRACT,t[3],t[6])
def p_opcion_select_tm_date(t):
'opcion_select_tm : DATE_PART PAR_A CADENA COMA INTERVAL CADENA PAR_C '
reporte_bnf.append("<opcion_select_tm> ::= DATE_PART PAR_A CADENA COMA INTERVAL CADENA PAR_C")
t[0] = Create_select_time(SELECT_TIME.DATE_PART,t[3],t[6])
def p_opcion_select_tm_now(t):
'opcion_select_tm : NOW PAR_A PAR_C '
reporte_bnf.append("<opcion_select_tm> ::= NOW PAR_A PAR_C")
t[0] = Create_select_time(SELECT_TIME.NOW,None,None)
def p_opcion_select_tm_current(t):
'opcion_select_tm : CURRENT_DATE '
reporte_bnf.append("<opcion_select_tm> ::= CURRENT_DATE")
t[0] = Create_select_time(SELECT_TIME.CURRENT_DATE,None,None)
def p_opcion_select_tm_crtm(t):
'opcion_select_tm : CURRENT_TIME '
reporte_bnf.append("<opcion_select_tm> ::= CURRENT_TIME")
t[0] = Create_select_time(SELECT_TIME.CURRENT_TIME,None,None)
def p_opcion_select_tm_timestamp(t):
'opcion_select_tm : TIMESTAMP CADENA '
reporte_bnf.append("<opcion_select_tm> ::= TIMESTAMP CADENA")
t[0] = Create_select_time(SELECT_TIME.TIMESTAMP,t[2],None)
#def p_opcion_select_tm_extract(t):
# 'opcion_select_tm : tiempo'
# t[0] = t[1]
#def p_tiempo(t):
# '''tiempo : EXTRACT PAR_A extract_time FROM string_type PAR_C
# | DATE_PART PAR_A CADENA COMA INTERVAL CADENA PAR_C
# | NOW PAR_A PAR_C
# | CURRENT_DATE
# | CURRENT_TIME
# | TIMESTAMP CADENA '''
#?######################################################
# TODO OFFSET
#?######################################################
#?######################################################
# TODO OFFSET
#?######################################################
def p_opcion_from_0_0_1_1_1_1_1_0(t):
'opcion_from : cond_where cond_gb cond_having cond_ob orden cond_limit cond_offset'
reporte_bnf.append("<opcion_from> ::= <cond_where> <cond_gb> <cond_having> <cond_ob> <orden> <cond_limit> <cond_offset>")
t[0] = Create_padre_select(t[1],t[2],t[3],t[4],t[5],t[6],t[7],None)
def p_opcion_from_0_0_0_1_1_1_1_0(t):
'opcion_from : cond_gb cond_having cond_ob orden cond_limit cond_offset'
reporte_bnf.append("<opcion_from> ::= <cond_gb> <cond_having> <cond_ob> <orden> <cond_limit> <cond_offset>")
t[0] = Create_padre_select(None,t[1],t[2],t[3],t[4],t[5],t[6],None)
def p_opcion_from_0_0_1_0_1_1_1_0(t):
'opcion_from : cond_where cond_having cond_ob orden cond_limit OFFSET ENTERO'
reporte_bnf.append("<opcion_from> ::= <cond_where> <cond_having> <cond_ob> <orden> <cond_limit> OFFSET ENTERO")
t[0] = Create_padre_select(t[1],None,t[2],t[3],t[4],t[5],None,t[7])
def p_opcion_from_0_0_0_0_1_1_1_0(t):
'opcion_from : cond_having cond_ob orden cond_limit cond_offset'
reporte_bnf.append("<opcion_from> ::= <cond_having> <cond_ob> <orden> <cond_limit> <cond_offset>")
t[0] = Create_padre_select(None,None,t[1],t[2],t[3],t[4],t[5],None)
def p_opcion_from_0_0_1_1_0_1_1_0(t):
'opcion_from : cond_where cond_gb cond_ob orden cond_limit cond_offset'
reporte_bnf.append("<opcion_from> ::= <cond_where> <cond_gb> <cond_ob> <orden> <cond_limit> <cond_offset>")
t[0] = Create_padre_select(t[1],t[2],None,t[3],t[4],t[5],t[6],None)
def p_opcion_from_0_0_0_1_0_1_1_0(t):
'opcion_from : cond_gb cond_ob orden cond_limit cond_offset'
reporte_bnf.append("<opcion_from> ::= <cond_gb> <cond_ob> <orden> <cond_limit> <cond_offset>")
t[0] = Create_padre_select(None,t[1],None,t[2],t[3],t[4],t[5],None)
def p_opcion_from_0_0_1_0_0_1_1_0(t):
'opcion_from : cond_where cond_ob orden cond_limit cond_offset'
reporte_bnf.append("<opcion_from> ::= <cond_where> <cond_ob> <orden> <cond_limit> <cond_offset>")
t[0] = Create_padre_select(t[1],None,None,t[2],t[3],t[4],t[5],None)
def p_opcion_from_0_0_0_0_0_1_1_0(t):
'opcion_from : cond_ob orden cond_limit cond_offset'
reporte_bnf.append("<opcion_from> ::= <cond_ob> <orden> <cond_limit> <cond_offset>")
t[0] = Create_padre_select(None,None,None,t[1],t[2],t[3],t[4],None)
def p_opcion_from_0_0_1_1_1_1_1_0_ordeno(t):
'opcion_from : cond_where cond_gb cond_having cond_ob cond_limit cond_offset'
reporte_bnf.append("<opcion_from> ::= <cond_where> <cond_gb> <cond_having> <cond_ob> <cond_limit> <cond_offset>")
t[0] = Create_padre_select(t[1],t[2],t[3],t[4],None,t[5],t[6],None)
def p_opcion_from_0_0_0_1_1_1_1_0_ordeno(t):
'opcion_from : cond_gb cond_having cond_ob cond_limit cond_offset'
reporte_bnf.append("<opcion_from> ::= <cond_gb> <cond_having> <cond_ob> <cond_limit> <cond_offset>")
t[0] = Create_padre_select(None,t[1],t[2],t[3],None,t[4],t[5],None)
def p_opcion_from_0_0_1_0_1_1_1_0_ordeno(t):
'opcion_from : cond_where cond_having cond_ob cond_limit cond_offset'
reporte_bnf.append("<opcion_from> ::= <cond_where> <cond_having> <cond_ob> <cond_limit> <cond_offset>")
t[0] = Create_padre_select(t[1],None,t[2],t[3],None,t[4],t[5],None)
def p_opcion_from_0_0_0_0_1_1_1_0_ordeno(t):
'opcion_from : cond_having cond_ob cond_limit cond_offset'
reporte_bnf.append("<opcion_from> ::= <cond_having> <cond_ob> <cond_limit> <cond_offset>")
t[0] = Create_padre_select(None,None,t[1],t[2],None,t[3],t[4],None)
def p_opcion_from_0_0_1_1_0_1_1_0_ordeno(t):
'opcion_from : cond_where cond_gb cond_ob cond_limit cond_offset'
reporte_bnf.append("<opcion_from> ::= <cond_where> <cond_gb> <cond_ob> <cond_limit> <cond_offset>")
t[0] = Create_padre_select(t[1],t[2],None,t[3],None,t[4],t[5],None)
def p_opcion_from_0_0_0_1_0_1_1_0_ordeno(t):
'opcion_from : cond_gb cond_ob cond_limit cond_offset'
reporte_bnf.append("<opcion_from> ::= <cond_gb> <cond_ob> <cond_limit> <cond_offset>")
t[0] = Create_padre_select(None,t[1],None,t[2],None,t[3],t[4],None)
def p_opcion_from_0_0_1_0_0_1_1_0_ordeno(t):
'opcion_from : cond_where cond_ob cond_limit cond_offset'
reporte_bnf.append("<opcion_from> ::= <cond_where> <cond_ob> <cond_limit> <cond_offset>")
t[0] = Create_padre_select(t[1],None,None,t[2],None,t[3],t[4],None)
def p_opcion_from_0_0_0_0_0_1_1_0_ordeno(t):
'opcion_from : cond_ob cond_limit cond_offset'
reporte_bnf.append("<opcion_from> ::= <cond_ob> <cond_limit> <cond_offset>")
t[0] = Create_padre_select(None,None,None,t[1],None,t[2],t[3],None)
def p_opcion_from_0_0_1_1_1_0_1_0(t):
'opcion_from : cond_where cond_gb cond_having cond_limit cond_offset'
reporte_bnf.append("<opcion_from> ::= <cond_where> <cond_gb> <cond_having> <cond_limit> <cond_offset>")
t[0] = Create_padre_select(t[1],t[2],t[3],None,None,t[4],t[5],None)
def p_opcion_from_0_0_0_1_1_0_1_0(t):
'opcion_from : cond_gb cond_having cond_limit cond_offset'
reporte_bnf.append("<opcion_from> ::= <cond_gb> <cond_having> <cond_limit> <cond_offset>")
t[0] = Create_padre_select(None,t[1],t[2],None,None,t[3],t[4],None)
def p_opcion_from_0_0_1_0_1_0_1_0(t):
'opcion_from : cond_where cond_having cond_limit cond_offset'
reporte_bnf.append("<opcion_from> ::= <cond_where> <cond_having> <cond_limit> <cond_offset>")
t[0] = Create_padre_select(t[1],None,t[2],None,None,t[3],t[4],None)
def p_opcion_from_0_0_0_0_1_0_1_0(t):
'opcion_from : cond_having cond_limit cond_offset'
reporte_bnf.append("<opcion_from> ::= <cond_having> <cond_limit> <cond_offset>")
t[0] = Create_padre_select(None,None,t[1],None,None,t[2],t[3],None)
def p_opcion_from_0_0_1_1_0_0_1_0(t):
'opcion_from : cond_where cond_gb cond_limit cond_offset'
reporte_bnf.append("<opcion_from> ::= <cond_where> <cond_gb> <cond_limit> <cond_offset>")
t[0] = Create_padre_select(t[1],t[2],None,None,None,t[3],t[4],None)
def p_opcion_from_0_0_0_1_0_0_1_0(t):
'opcion_from : cond_gb cond_limit cond_offset'
reporte_bnf.append("<opcion_from> ::= <cond_gb> <cond_limit> <cond_offset>")
t[0] = Create_padre_select(None,t[1],None,None,None,t[2],t[3],None)
def p_opcion_from_0_0_1_0_0_0_1_0(t):
'opcion_from : cond_where cond_limit cond_offset'
reporte_bnf.append("<opcion_from> ::= <cond_where> <cond_limit> <cond_offset>")
t[0] = Create_padre_select(t[1],None,None,None,None,t[2],t[3],None)
def p_opcion_from_0_0_0_0_0_0_1_0(t):
'opcion_from : cond_limit cond_offset'
reporte_bnf.append("<opcion_from> ::= <cond_limit> <cond_offset>")
t[0] = Create_padre_select(None,None,None,None,None,t[1],t[2],None)
def p_opcion_from_0_0_1_1_1_1_1_0_offno(t):
'opcion_from : cond_where cond_gb cond_having cond_ob orden cond_limit'
reporte_bnf.append("<opcion_from> ::= <cond_where> <cond_gb> <cond_having> <cond_ob> <orden> <cond_limit>")
t[0] = Create_padre_select(t[1],t[2],t[3],t[4],t[5],t[6],None,None)
def p_opcion_from_0_0_0_1_1_1_1_0_offno(t):
'opcion_from : cond_gb cond_having cond_ob orden cond_limit'
reporte_bnf.append("<opcion_from> ::= <cond_gb> <cond_having> <cond_ob> <orden> <cond_limit>")
t[0] = Create_padre_select(None,t[1],t[2],t[3],t[4],t[5],None,None)
def p_opcion_from_0_0_1_0_1_1_1_0_offno(t):
'opcion_from : cond_where cond_having cond_ob orden cond_limit'
reporte_bnf.append("<opcion_from> ::= <cond_where> <cond_having> <cond_ob> <orden> <cond_limit>")
t[0] = Create_padre_select(t[1],None,t[2],t[3],t[4],t[5],None,None)
def p_opcion_from_0_0_0_0_1_1_1_0_offno(t):
'opcion_from : cond_having cond_ob orden cond_limit'
reporte_bnf.append("<opcion_from> ::= <cond_having> <cond_ob> <orden> <cond_limit>")
t[0] = Create_padre_select(None,None,t[1],t[2],t[3],t[4],None,None)
def p_opcion_from_0_0_1_1_0_1_1_0_offno(t):
'opcion_from : cond_where cond_gb cond_ob orden cond_limit'
reporte_bnf.append("<opcion_from> ::= <cond_where> <cond_gb> <cond_ob> <orden> <cond_limit>")
t[0] = Create_padre_select(t[1],t[2],None,t[3],t[4],t[5],None,None)
def p_opcion_from_0_0_0_1_0_1_1_0_offno(t):
'opcion_from : cond_gb cond_ob orden cond_limit'
reporte_bnf.append("<opcion_from> ::= <cond_gb> <cond_ob> <orden> <cond_limit>")
t[0] = Create_padre_select(None,t[1],None,t[2],t[3],t[4],None,None)
def p_opcion_from_0_0_1_0_0_1_1_0_offno(t):
'opcion_from : cond_where cond_ob orden cond_limit'
reporte_bnf.append("<opcion_from> ::= <cond_where> <cond_ob> <orden> <cond_limit>")
t[0] = Create_padre_select(t[1],None,None,t[2],t[3],t[4],None,None)
def p_opcion_from_0_0_0_0_0_1_1_0_offno(t):
'opcion_from : cond_ob orden cond_limit'
reporte_bnf.append("<opcion_from> ::= <cond_ob> <orden> <cond_limit>")
t[0] = Create_padre_select(None,None,None,t[1],t[2],t[3],None,None)
def p_opcion_from_0_0_1_1_1_1_1_0_offno_ordeno(t):
'opcion_from : cond_where cond_gb cond_having cond_ob cond_limit'
reporte_bnf.append("<opcion_from> ::= <cond_where> <cond_gb> <cond_having> <cond_ob> <cond_limit>")
t[0] = Create_padre_select(t[1],t[2],t[3],t[4],None,t[5],None,None)
def p_opcion_from_0_0_0_1_1_1_1_0_offno_ordeno(t):
'opcion_from : cond_gb cond_having cond_ob cond_limit'
reporte_bnf.append("<opcion_from> ::= <cond_gb> <cond_having> <cond_ob> <cond_limit>")
t[0] = Create_padre_select(None,t[1],t[2],t[3],None,t[4],None,None)
def p_opcion_from_0_0_1_0_1_1_1_0_offno_ordeno(t):
'opcion_from : cond_where cond_having cond_ob cond_limit'
reporte_bnf.append("<opcion_from> ::= <cond_where> <cond_having> <cond_ob> <cond_limit>")
t[0] = Create_padre_select(t[1],None,t[2],t[3],None,t[4],None,None)
def p_opcion_from_0_0_0_0_1_1_1_0_offno_ordeno(t):
'opcion_from : cond_having cond_ob cond_limit'
reporte_bnf.append("<opcion_from> ::= <cond_having> <cond_ob> <cond_limit>")
t[0] = Create_padre_select(None,None,t[1],t[2],None,t[3],None,None)
def p_opcion_from_0_0_1_1_0_1_1_0_offno_ordeno(t):
'opcion_from : cond_where cond_gb cond_ob cond_limit'
reporte_bnf.append("<opcion_from> ::= <cond_where> <cond_gb> <cond_ob> <cond_limit>")
t[0] = Create_padre_select(t[1],t[2],None,t[3],None,t[4],None,None)
def p_opcion_from_0_0_0_1_0_1_1_0_offno_ordeno(t):
'opcion_from : cond_gb cond_ob cond_limit'
reporte_bnf.append("<opcion_from> ::= <cond_gb> <cond_ob> <cond_limit>")
t[0] = Create_padre_select(None,t[1],None,t[2],None,t[3],None,None)
def p_opcion_from_0_0_1_0_0_1_1_0_offno_ordeno(t):
'opcion_from : cond_where cond_ob cond_limit'
reporte_bnf.append("<opcion_from> ::= <cond_where> <cond_ob> <cond_limit>")
t[0] = Create_padre_select(t[1],None,None,t[2],None,t[3],None,None)
def p_opcion_from_0_0_0_0_0_1_1_0_offno_ordeno(t):
'opcion_from : cond_ob cond_limit'
reporte_bnf.append("<opcion_from> ::= <cond_ob> <cond_limit>")
t[0] = Create_padre_select(None,None,None,t[1],None,t[2],None,None)
def p_opcion_from_0_0_1_1_1_0_1_0_offno(t):
'opcion_from : cond_where cond_gb cond_having cond_limit'
reporte_bnf.append("<opcion_from> ::= <cond_where> <cond_gb> <cond_having> <cond_limit>")
t[0] = Create_padre_select(t[1],t[2],t[3],None,None,t[4],None,None)
def p_opcion_from_0_0_0_1_1_0_1_0_offno(t):
'opcion_from : cond_gb cond_having cond_limit'
reporte_bnf.append("<opcion_from> ::= <cond_gb> <cond_having> <cond_limit>")
t[0] = Create_padre_select(None,t[1],t[2],None,None,t[3],None,None)
def p_opcion_from_0_0_1_0_1_0_1_0_offno(t):
'opcion_from : cond_where cond_having cond_limit'
reporte_bnf.append("<opcion_from> ::= <cond_where> <cond_having> <cond_limit>")
t[0] = Create_padre_select(t[1],None,t[2],None,None,t[3],None,None)
def p_opcion_from_0_0_0_0_1_0_1_0_offno(t):
'opcion_from : cond_having cond_limit'
reporte_bnf.append("<opcion_from> ::= <cond_having> <cond_limit>")
t[0] = Create_padre_select(None,None,t[1],None,None,t[2],None,None)
def p_opcion_from_0_0_1_1_0_0_1_0_offno(t):
'opcion_from : cond_where cond_gb cond_limit'
reporte_bnf.append("<opcion_from> ::= <cond_where> <cond_gb> <cond_limit>")
t[0] = Create_padre_select(t[1],t[2],None,None,None,t[3],None,None)
def p_opcion_from_0_0_0_1_0_0_1_0_offno(t):
'opcion_from : cond_gb cond_limit'
reporte_bnf.append("<opcion_from> ::= <cond_gb> <cond_limit>")
t[0] = Create_padre_select(None,t[1],None,None,None,t[2],None,None)
def p_opcion_from_0_0_1_0_0_0_1_0_offno(t):
'opcion_from : cond_where cond_limit'
reporte_bnf.append("<opcion_from> ::= <cond_where> <cond_limit>")
t[0] = Create_padre_select(t[1],None,None,None,None,t[2],None,None)
def p_opcion_from_0_0_0_0_0_0_1_0_offno(t):
'opcion_from : cond_limit'
reporte_bnf.append("<opcion_from> ::= <cond_limit>")
t[0] = Create_padre_select(None,None,None,None,None,t[1],None,None)
def p_opcion_from_0_0_1_1_1_1_0_0(t):
'opcion_from : cond_where cond_gb cond_having cond_ob orden'
reporte_bnf.append("<opcion_from> ::= <cond_where> <cond_gb> <cond_having> <cond_ob> <orden>")
t[0] = Create_padre_select(t[1],t[2],t[3],t[4],t[5],None,None,None)
def p_opcion_from_0_0_0_1_1_1_0_0(t):
'opcion_from : cond_gb cond_having cond_ob orden'
reporte_bnf.append("<opcion_from> ::= <cond_gb> <cond_having> <cond_ob> <orden>")
t[0] = Create_padre_select(None,t[1],t[2],t[3],t[4],None,None,None)
def p_opcion_from_0_0_1_0_1_1_0_0(t):
'opcion_from : cond_where cond_having cond_ob orden'
reporte_bnf.append("<opcion_from> ::= <cond_where> <cond_having> <cond_ob> <orden>")
t[0] = Create_padre_select(t[1],None,t[2],t[3],t[4],None,None,None)
def p_opcion_from_0_0_0_0_1_1_0_0(t):
'opcion_from : cond_having cond_ob orden'
reporte_bnf.append("<opcion_from> ::= <cond_having> <cond_ob> <orden>")
t[0] = Create_padre_select(None,None,t[1],t[2],t[3],None,None,None)
def p_opcion_from_0_0_1_1_0_1_0_0(t):
'opcion_from : cond_where cond_gb cond_ob orden'
reporte_bnf.append("<opcion_from> ::= <cond_where> <cond_gb> <cond_ob> <orden>")
t[0] = Create_padre_select(t[1],t[2],None,t[3],t[4],None,None,None)
def p_opcion_from_0_0_0_1_0_1_0_0(t):
'opcion_from : cond_gb cond_ob orden'
reporte_bnf.append("<opcion_from> ::= <cond_gb> <cond_ob> <orden>")
t[0] = Create_padre_select(None,t[1],None,t[2],t[3],None,None,None)
def p_opcion_from_0_0_1_0_0_1_0_0(t):
'opcion_from : cond_where cond_ob orden'
reporte_bnf.append("<opcion_from> ::= <cond_where> <cond_ob> <orden>")
t[0] = Create_padre_select(t[1],None,None,t[2],t[3],None,None,None)
def p_opcion_from_0_0_0_0_0_1_0_0(t):
'opcion_from : cond_ob'
reporte_bnf.append("<opcion_from> ::= <cond_ob>")
t[0] = Create_padre_select(None,None,None,t[1],None,None,None,None)
def p_opcion_from_0_0_1_1_1_1_0_0_ordeno(t):
'opcion_from : cond_where cond_gb cond_having cond_ob'
reporte_bnf.append("<opcion_from> ::= <cond_where> <cond_gb> <cond_having> <cond_ob>")
t[0] = Create_padre_select(t[1],t[2],t[3],t[4],None,None,None,None)
def p_opcion_from_0_0_0_1_1_1_0_0_ordeno(t):
'opcion_from : cond_gb cond_having cond_ob'
reporte_bnf.append("<opcion_from> ::= <cond_gb> <cond_having> <cond_ob>")
t[0] = Create_padre_select(None,t[1],t[2],t[3],None,None,None,None)
def p_opcion_from_0_0_1_0_1_1_0_0_ordeno(t):
'opcion_from : cond_where cond_having cond_ob'
reporte_bnf.append("<opcion_from> ::= <cond_where> <cond_having> <cond_ob>")
t[0] = Create_padre_select(t[1],None,t[2],t[3],None,None,None,None)
def p_opcion_from_0_0_0_0_1_1_0_0_ordeno(t):
'opcion_from : cond_having cond_ob'
reporte_bnf.append("<opcion_from> ::= <cond_having> <cond_ob>")
t[0] = Create_padre_select(None,None,t[1],t[2],None,None,None,None)
def p_opcion_from_0_0_1_1_0_1_0_0_ordeno(t):
'opcion_from : cond_where cond_gb cond_ob'
reporte_bnf.append("<opcion_from> ::= <cond_where> <cond_gb> <cond_ob>")
t[0] = Create_padre_select(t[1],t[2],None,t[4],None,None,None,None)
def p_opcion_from_0_0_0_1_0_1_0_0_ordeno(t):
'opcion_from : cond_gb cond_ob'
reporte_bnf.append("<opcion_from> ::= <cond_gb> <cond_ob>")
t[0] = Create_padre_select(None,t[1],None,t[2],None,None,None,None)
def p_opcion_from_0_0_1_0_0_1_0_0_ordeno(t):
'opcion_from : cond_where cond_ob'
reporte_bnf.append("<opcion_from> ::= <cond_where> <cond_ob>")
t[0] = Create_padre_select(t[1],None,None,t[2],None,None,None,None)
#def p_opcion_from_0_0_0_0_0_1_0_0_ordeno(t):
# 'opcion_from : cond_ob'
def p_opcion_from_0_0_1_1_1_0_0_0(t):
'opcion_from : cond_where cond_gb cond_having'
reporte_bnf.append("<opcion_from> ::= <cond_where> <cond_gb> <cond_having>")
t[0] = Create_padre_select(t[1],t[2],t[3],None,None,None,None,None)
def p_opcion_from_0_0_0_1_1_0_0_0(t):
'opcion_from : cond_gb cond_having'
reporte_bnf.append("<opcion_from> ::= <cond_gb> <cond_having>")
t[0] = Create_padre_select(None,t[1],t[2],None,None,None,None,None)
def p_opcion_from_0_0_1_0_1_0_0_0(t):
'opcion_from : cond_where cond_having'
reporte_bnf.append("<opcion_from> ::= <cond_where> <cond_having>")
t[0] = Create_padre_select(t[1],None,t[2],None,None,None,None,None)
def p_opcion_from_0_0_0_0_1_0_0_0(t):
'opcion_from : cond_having'
reporte_bnf.append("<opcion_from> ::= <cond_having>")
t[0] = Create_padre_select(None,None,t[1],None,None,None,None,None)
def p_opcion_from_0_0_1_1_0_0_0_0(t):
'opcion_from : cond_where cond_gb '
reporte_bnf.append("<opcion_from> ::= <cond_where> <cond_gb>")
t[0] = Create_padre_select(t[1],t[2],None,None,None,None,None,None)
def p_opcion_from_0_0_0_1_0_0_0_0(t):
'opcion_from : cond_gb '
reporte_bnf.append("<opcion_from> ::= <cond_gb>")
t[0] = Create_padre_select(None,t[1],None,None,None,None,None,None)
def p_opcion_from_0_0_1_0_0_0_0_0(t):
'opcion_from : cond_where'
reporte_bnf.append("<opcion_from> ::= <cond_where>")
t[0] = Create_padre_select(t[1],None,None,None,None,None,None,None)
#? ####################################################################
# TODO OPCIONES DE FROM
#? ####################################################################
def p_opcion_from_2(t):
'opcion_from : select_insrt PAR_C ID '
reporte_bnf.append("<opcion_from> ::= <select_insrt> PAR_C ID")
t[0] = Create_hijo_select(OPCIONES_SELECT.SUBCONSULTA,t[1],t[3])
def p_opcion_from_3(t):
'opcion_from : select_insrt PAR_C'
reporte_bnf.append("<opcion_from> ::= <select_insrt> PAR_C")
t[0] = Create_hijo_select(OPCIONES_SELECT.SUBCONSULTA,t[1],None)
def p_cond_where(t):
'cond_where : WHERE expresion_where'
reporte_bnf.append("<cond_where> ::= WHERE <expresion_where>")
t[0] = Create_hijo_select(OPCIONES_SELECT.WHERE,t[2],None)
def p_cond_GB(t):
'cond_gb : GROUP BY campos_c '
reporte_bnf.append("<cond_gb> ::= GROUP BY <campos_c>")
t[0] = Create_hijo_select(OPCIONES_SELECT.GROUP_BY,t[3],None)
def p_cond_Having(t):
'cond_having : HAVING expresion_logica'
reporte_bnf.append("<cond_having> ::= HAVING <expresion_logica>")
t[0] = Create_hijo_select(OPCIONES_SELECT.HAVING,t[1],None)
def p_cond_OB(t):
'cond_ob : ORDER BY campos_c' #######
reporte_bnf.append("<cond_ob> ::= ORDER BY <campos_c>")
t[0] = Create_hijo_select(OPCIONES_SELECT.ORDER_BY,t[3],None)
def p_cond_limit(t):
'cond_limit : LIMIT opc_lim'
reporte_bnf.append("<cond_limit> ::= LIMIT <opc_lim>")
t[0] = Create_hijo_select(OPCIONES_SELECT.LIMIT,t[2],None)
def p_cond_offset(t):
'cond_offset : OFFSET ENTERO'
reporte_bnf.append("<cond_offset> ::= OFFSET ENTERO")
t[0] = Create_hijo_select(OPCIONES_SELECT.OFFSET,ExpresionEntero(TIPO_VALOR.NUMERO,t[2]),None)
#? ####################################################################
# TODO LIM,ORDEN
#? ####################################################################
def p_opc_lim(t):
'''opc_lim : ENTERO'''
reporte_bnf.append("<opc_lim> ::= ENTERO")
t[0] = ExpresionEntero(TIPO_VALOR.NUMERO,t[1])
def p_opc_lim2(t):
' opc_lim : ASTERISCO '
reporte_bnf.append("<opc_lim> ::= ASTERISCO")
t[0] = ExpresionIdentificador(TIPO_VALOR.ASTERISCO,t[1])
def p_ORDER(t):
''' orden : DESC '''
reporte_bnf.append("<orden> ::= DESC")
t[0] = ExpresionIdentificador(TIPO_VALOR.IDENTIFICADOR,t[1])
def p_ORDER2(t):
''' orden : ASC '''
reporte_bnf.append("<orden> ::= ASC")
t[0] = ExpresionIdentificador(TIPO_VALOR.IDENTIFICADOR,t[1])
#? ####################################################################
# TODO CASE
#? ####################################################################
def p_case_insrt(t):
' case_insrt : CASE estructura_when_lista ELSE expresion END '
reporte_bnf.append("<case_insrt> ::= CASE <estructura_when_lista> ELSE <expresion> END")
t[0] = Create_select_uno(OPCIONES_SELECT.CASE,None,t[4],None,None,None,t[2])
def p_estructura_when_lista(t):
' estructura_when_lista : estructura_when_lista estructura_when '
reporte_bnf.append("<estructura_when_lista> ::= <estructura_when_lista> <estructura_when>")
t[1].append(t[2])
t[0] = t[1]
def p_opcion_estructura_when(t):
' estructura_when_lista : estructura_when'
reporte_bnf.append("<estructura_when_lista> ::= <estructura_when>")
t[0] = t[1]
def p_estructura_when(t):
' estructura_when : WHEN expresion_logica THEN expresion'
reporte_bnf.append("<estructura_when> ::= WHEN <expresion_logica> THEN <expresion>")
t[0] = [ExpresionRelacional(t[2],t[4],OPERACION_LOGICA.THEN)]
#? ####################################################################
# TODO EXPRESION
#? ####################################################################
def p_agrupacion_expresion(t):
' agrupacion_expresion : PAR_A expresion PAR_C'
reporte_bnf.append("<agrupacion_expresion> ::= PAR_A <expresion> PAR_C")
t[0] = t[2]
#! modificaciones
def p_expresion(t):
''' expresion : expresion SUMA expresion
| expresion RESTA expresion
| expresion ASTERISCO expresion
| expresion DIVISION expresion
| expresion MODULO expresion
| expresion MAYMAY expresion
| expresion MENMEN expresion
| CEJILLA expresion
| expresion HASHTAG expresion
| S_OR expresion
| D_OR expresion
| expresion Y expresion
| AVG PAR_A expresion PAR_C
| MAX PAR_A expresion PAR_C
| MIN PAR_A expresion PAR_C
| ALL PAR_A select_insrt PAR_C
| SOME PAR_A select_insrt PAR_C
| expresion D_OR expresion'''
if t[2] == '+':
reporte_bnf.append("<expresion> ::= <expresion> SUMA <expresion>")
t[0] = ExpresionBinaria(t[1],t[3],OPERACION_ARITMETICA.MAS)
elif t[2] == '-':
reporte_bnf.append("<expresion> ::= <expresion> RESTA <expresion>")
t[0] = ExpresionBinaria(t[1],t[3],OPERACION_ARITMETICA.MENOS)
elif t[2] == '*':
reporte_bnf.append("<expresion> ::= <expresion> ASTERISCO <expresion>")
t[0] = ExpresionBinaria(t[1],t[3],OPERACION_ARITMETICA.ASTERISCO)
elif t[2] == '/':
reporte_bnf.append("<expresion> ::= <expresion> DIVISION <expresion>")
t[0] = ExpresionBinaria(t[1],t[3],OPERACION_ARITMETICA.DIVIDIDO)
elif t[2] == '%':
reporte_bnf.append("<expresion> ::= <expresion> MODULO <expresion>")
t[0] = ExpresionBinaria(t[1],t[3],OPERACION_ARITMETICA.MODULO)
elif t[2] == '>>':
reporte_bnf.append("<expresion> ::= <expresion> MAYMAY <expresion>")
t[0] = ExpresionBinaria(t[1],t[3],OPERACION_ARITMETICA.MAYMAY)
elif t[2] == '<<':
reporte_bnf.append("<expresion> ::= <expresion> MENMEN <expresion>")
t[0] = ExpresionBinaria(t[1],t[3],OPERACION_ARITMETICA.MENMEN)
elif t[1] == '~':
reporte_bnf.append("<expresion> ::= CEJILLA <expresion>")
t[0] = ExpresionBinaria(t[2],None,OPERACION_ARITMETICA.CEJILLA)
elif t[2] == '#':
reporte_bnf.append("<expresion> ::= <expresion> HASHTAG <expresion>")
t[0] = ExpresionBinaria(t[1],t[3],OPERACION_ARITMETICA.HASTAG)
elif t[1] == '|':
reporte_bnf.append("<expresion> ::= S_OR <expresion>")
t[0] = ExpresionBinaria(t[2],None,OPERACION_ARITMETICA.S_OR)
elif t[1] == '||':
reporte_bnf.append("<expresion> ::= D_OR <expresion>")
t[0] = ExpresionBinaria(t[2],None,OPERACION_ARITMETICA.D_OR)
elif t[2] == '&':
reporte_bnf.append("<expresion> ::= <expresion> Y <expresion>")
t[0] = ExpresionBinaria(t[1],t[3],OPERACION_ARITMETICA.AMPERSON)
elif t[1] == 'AVG':
reporte_bnf.append("<expresion> ::= AVG PAR_A <expresion> PAR_C")
t[0] = ExpresionBinaria(t[3],None,OPERACION_ARITMETICA.AVG)
elif t[1] == 'MAX':
reporte_bnf.append("<expresion> ::= MAX PAR_A <expresion> PAR_C")
t[0] = ExpresionBinaria(t[3],None,OPERACION_ARITMETICA.MAX)
elif t[1] == 'MIN':
reporte_bnf.append("<expresion> ::= MIN PAR_A <expresion> PAR_C")
t[0] = ExpresionBinaria(t[3],None,OPERACION_ARITMETICA.MIN)
elif t[1] == 'ALL':
reporte_bnf.append("<expresion> ::= ALL PAR_A <expresion> PAR_C")
t[0] = ExpresionBinaria(t[3],None,OPERACION_ARITMETICA.ALL)
elif t[1] == 'SOME':
reporte_bnf.append("<expresion> ::= SOME PAR_A <select_insrt> PAR_C")
t[0] = ExpresionBinaria(t[3],None,OPERACION_ARITMETICA.SOME)
#? ####################################################################
# TODO EXPRESION DATOS
#? ####################################################################
def p_expresion3(t):
' expresion : PAR_A expresion_logica PAR_C '
reporte_bnf.append("<expresion> ::= PAR_A <expresion_logica> PAR_C")
t[0] = t[2]
def p_expresion_boolean_true(t):
''' expresion : TRUE'''
reporte_bnf.append("<expresion> ::= TRUE")
t[0] = ExpresionBooleana(OPERACION_LOGICA.TRUE,True)
def p_expresion_boolean_false(t):
''' expresion : FALSE'''
reporte_bnf.append("<expresion> ::= FALSE")
t[0] = ExpresionBooleana(OPERACION_LOGICA.FALSE,False)
def p_sin_some_any(t):
'''sin_some_any : SOME '''
reporte_bnf.append("<sin_some_any> ::= SOM")
t[0] = ExpresionIdentificador(TIPO_VALOR.IDENTIFICADOR,t[1])
def p_sin_some_any2(t):
'''sin_some_any : ANY '''
reporte_bnf.append("<sin_some_any> ::= ANY")
t[0] = ExpresionIdentificador(TIPO_VALOR.IDENTIFICADOR,t[1])
def p_string_type(t):
''' string_type : CADENA '''
reporte_bnf.append("<string_type> ::= CADENA")
t[0] = ExpresionComillaSimple(TIPO_VALOR.IDENTIFICADOR,t[1])
def p_string_type2(t):
' string_type : ID'
reporte_bnf.append("<string_type> ::= ID")
t[0] = ExpresionIdentificador(TIPO_VALOR.IDENTIFICADOR,t[1])
#? ####################################################################
# TODO GRAMATICA PARA EXPRESION
#? ####################################################################
def p_expresion_relacional(t):
''' expresion_relacional : expresion MAYQUE expresion
| expresion MENQUE expresion
| expresion MAYIGQUE expresion
| expresion MENIGQUE expresion
| expresion DOBLEIG expresion
| expresion IGUAL expresion
| expresion NOIG expresion
| expresion NOIGUAL expresion'''
if t[2] == '>':
reporte_bnf.append("<expresion_relacional> ::= <expresion> MAYQUE <expresion>")
t[0] = ExpresionRelacional(t[1],t[3],OPERACION_RELACIONAL.MAYQUE)
elif t[2] == '<':
reporte_bnf.append("<expresion_relacional> ::= <expresion> MENQUE <expresion>")
t[0] = ExpresionRelacional(t[1],t[3],OPERACION_RELACIONAL.MENQUE)
elif t[2] == '>=':
reporte_bnf.append("<expresion_relacional> ::= <expresion> MAYIGQUE <expresion>")
t[0] = ExpresionRelacional(t[1],t[3],OPERACION_RELACIONAL.MAYIGQUE)
elif t[2] == '<=':
reporte_bnf.append("<expresion_relacional> ::= <expresion> MENIGQUE <expresion>")
t[0] = ExpresionRelacional(t[1],t[3],OPERACION_RELACIONAL.MENIGQUE)
elif t[2] == '==':
reporte_bnf.append("<expresion_relacional> ::= <expresion> DOBLEIG <expresion>")
t[0] = ExpresionRelacional(t[1],t[3],OPERACION_RELACIONAL.DOBLEIGUAL)
elif t[2] == '=':
reporte_bnf.append("<expresion_relacional> ::= <expresion> IGUAL <expresion>")
t[0] = ExpresionRelacional(t[1],t[3],OPERACION_RELACIONAL.IGUAL)
elif t[2] == '<>':
reporte_bnf.append("<expresion_relacional> ::= <expresion> NOIG <expresion>")
t[0] = ExpresionRelacional(t[1],t[3],OPERACION_RELACIONAL.NOIG)
elif t[2] == '!=':
reporte_bnf.append("<expresion_relacional> ::= <expresion> NOIGUAL <expresion>")
t[0] = ExpresionRelacional(t[1],t[3],OPERACION_RELACIONAL.DIFERENTE)
def p_expresion_relacional_exp(t):
' expresion_relacional : expresion '
reporte_bnf.append("<expresion_relacional> ::= <expresion>")
t[0] = t[1]
def p_expresion_logica(t):
''' expresion_logica : expresion_relacional AND expresion_logica
| expresion_relacional OR expresion_logica'''
if t[2].upper() == 'AND':
reporte_bnf.append("<expresion_logica> ::= <expresion_relacional> AND <expresion_logica>")
t[0] = ExpresionLogica(t[1],t[3],OPERACION_LOGICA.AND)
elif t[2].upper() == 'OR':
reporte_bnf.append("<expresion_logica> ::= <expresion_relacional> OR <expresion_logica>")
t[0] == ExpresionLogica(t[1],t[3],OPERACION_LOGICA.OR)
def p_expresion_logica_not(t):
''' expresion_logica : NOT expresion_logica'''
reporte_bnf.append("<expresion_logica> ::= NOT <expresion_logica>")
t[0] = ExpresionLogica(t[2],None,OPERACION_LOGICA.NOT)
def p_expresion_logica_rel(t):
''' expresion_logica : expresion_relacional'''
reporte_bnf.append("<expresion_logica> ::= <expresion_relacion>")
t[0] = t[1]
def p_expresion2(t):
''' expresion : expresion_dato '''
reporte_bnf.append("<expresion> ::= <expresion_dato>")
t[0] = t[1]
def p_expresion31(t):
''' expresion : select_insrt '''
reporte_bnf.append("<expresion> ::= <select_insrt>")
t[0] = t[1]
def p_expresion4(t):
''' expresion : sum_insrt '''
reporte_bnf.append("<expresion> ::= <sum_insrt>")
t[0] = t[1]
def p_expresion5(t):
''' expresion : count_insrt '''
reporte_bnf.append("<expresion> ::= <count_insrt>")
t[0] = t[1]
#? ####################################################################
# TODO GRAMATICA PARA LA INSTRUCCION DE SUM ----------
#? ####################################################################
def p_sum_insert(t):
' sum_insrt : SUM agrupacion_expresion'
reporte_bnf.append("<sum_insrt> ::= SUM <agrupacion_expresion>")
#? ####################################################################
# TODO GRAMATICA PAR LA INSTRUCCIONN DE COUNT ---------
#? ####################################################################
def p_count_insrt(t):
' count_insrt : COUNT agrupacion_expresion '
reporte_bnf.append("<count_insrt> ::= COUNT <agrupacion_expresion>")
#? ####################################################################
# TODO EXPRESION SELECT
#? ####################################################################
def p_opcion_select(t):
' opcion_select : case_insrt '
reporte_bnf.append("<opcion_select> ::= <case_insrt>")
t[0] = t[1]
def p_opcion_select1(t):
' opcion_select : PAR_A select_insrt PAR_C '
reporte_bnf.append("<opcion_select> ::= PAR_A <select_insrt> PAR_C")
t[0] = t[2]
def p_opcion_select2(t):
' opcion_select : expresion '
reporte_bnf.append("<opcion_select> ::= <expresion>")
t[0] = t[1]
def p_opcion_select3(t):
'opcion_select : funciones_select '
reporte_bnf.append("<opcion_select> ::= <funciones_select>")
t[0] = t[1]
def p_opcion_select4(t):
'opcion_select : ASTERISCO '
reporte_bnf.append("<opcion_select> ::= ASTERISCO")
t[0] = ExpresionIdentificador(TIPO_VALOR.ASTERISCO,t[1])
def p_opcion_select5(t):
' opcion_select : ID PUNTO ASTERISCO '
reporte_bnf.append("<opcion_select> ::= ID PUNTO ASTERISCO")
t[0] = ExpresionIdentificadorDoble(TIPO_VALOR.ID_ASTERISCO,t[1],t[3])
def p_greatest_insrt(t):
''' greatest_insrt : GREATEST PAR_A greatest_val PAR_C
| LEAST PAR_A greatest_val PAR_C'''
if t[1].upper() == 'GREATEST':
reporte_bnf.append("<greates_insrt> ::= GREATEST PAR_A <greatest_val> PAR_C")
t[0] = Create_select_uno(OPCIONES_SELECT.GREATEST,None,None,None,t[3],None,None)
elif t[1].upper() == 'LEAST':
reporte_bnf.append("<greates_insrt> ::= LEAST PAR_A <greatest_val> PAR_C")
t[0] = Create_select_uno(OPCIONES_SELECT.LEAST,None,None,None,t[3],None,None)
def p_greatest_insrt1(t):
' greatest_val : greatest_val COMA expresion_dato '
reporte_bnf.append("<greates_val> ::= <greatest_val> COMA <expresion_dato>")
t[1].append(t[3])
t[0] = t[1]
def p_greatest_insrt2(t):
' greatest_val : expresion_dato'
reporte_bnf.append("<greatest_val> ::= <expresion_dato>")
t[0] = [t[1]]
##################################EXPRESIONES#####################################
def p_funciones_select(t):
''' funciones_select : ABS PAR_A expresion PAR_C
| CBRT PAR_A expresion PAR_C
| CEIL PAR_A expresion PAR_C
| CEILING PAR_A expresion PAR_C
| DEGREES PAR_A expresion PAR_C
| DIV PAR_A expresion COMA expresion PAR_C
| EXP PAR_A expresion PAR_C
| FACTORIAL PAR_A expresion PAR_C
| FLOOR PAR_A expresion PAR_C
| GCD PAR_A expresion COMA expresion PAR_C
| LN PAR_A expresion PAR_C
| LOG PAR_A expresion PAR_C
| MOD PAR_A expresion COMA expresion PAR_C
| PI PAR_A PAR_C
| POWER PAR_A expresion COMA expresion PAR_C
| RADIANS PAR_A expresion PAR_C
| ROUND PAR_A expresion PAR_C
| SIGN PAR_A expresion PAR_C
| SQRT PAR_A expresion PAR_C
| WIDTH_BUCKET PAR_A expresion COMA expresion COMA expresion COMA expresion PAR_C
| TRUNC PAR_A expresion COMA ENTERO PAR_C
| TRUNC PAR_A expresion PAR_C
| RANDOM PAR_A PAR_C
| ACOS PAR_A expresion PAR_C
| ASIND PAR_A expresion PAR_C
| ATAN2 PAR_A expresion COMA expresion PAR_C
| ATAN2D PAR_A expresion COMA expresion PAR_C
| ATAN PAR_A expresion PAR_C
| ATAND PAR_A expresion PAR_C
| COS PAR_A expresion PAR_C
| COT PAR_A expresion PAR_C
| COTD PAR_A expresion PAR_C
| SIN PAR_A expresion PAR_C
| SIND PAR_A expresion PAR_C
| TAN PAR_A expresion PAR_C
| TAND PAR_A expresion PAR_C
| SINH PAR_A expresion PAR_C
| COSH PAR_A expresion PAR_C
| TANH PAR_A expresion PAR_C
| ASINH PAR_A expresion PAR_C
| ATANH PAR_A expresion PAR_C
| COSD PAR_A expresion PAR_C
| ACOSH PAR_A expresion PAR_C
| ASIN PAR_A expresion PAR_C
| ACOSD PAR_A expresion PAR_C
| LENGTH PAR_A string_type PAR_C
| SUBSTRING PAR_A string_type COMA expresion COMA expresion PAR_C
| TRIM PAR_A string_type PAR_C
| SUBSTR PAR_A string_type COMA expresion COMA expresion PAR_C
| GET_BYTE PAR_A string_type D_DOSPTS BYTEA COMA ENTERO PAR_C
| SET_BYTE PAR_A string_type D_DOSPTS BYTEA COMA ENTERO COMA ENTERO PAR_C
| SHA256 PAR_A string_type PAR_C
| ENCODE PAR_A string_type D_DOSPTS BYTEA COMA formato_texto PAR_C
| DECODE PAR_A string_type D_DOSPTS BYTEA COMA formato_texto PAR_C
| CONVERT PAR_A string_type AS TIPO_DATO PAR_C
'''
if t[1].upper() == 'ABS':
reporte_bnf.append("<funciones_select> ::= ABS PAR_A <expresion> PAR_C")
t[0] = Expresiondatos(OPERACION_ARITMETICA.ABS, t[3],None,None,None)
elif t[1].upper() == 'CBRT':
reporte_bnf.append("<funciones_select> ::= CBRT PAR_A <expresion> PAR_C")
t[0] = Expresiondatos(OPERACION_ARITMETICA.CBRT, t[3],None,None,None)
elif t[1].upper() == 'CEIL':
reporte_bnf.append("<funciones_select> ::= CEIL PAR_A <expresion> PAR_C")
t[0] = Expresiondatos(OPERACION_ARITMETICA.CEIL, t[3],None,None,None)
elif t[1].upper() == 'CEILING':
reporte_bnf.append("<funciones_select> ::= CEILING PAR_A <expresion> PAR_C")
t[0] = Expresiondatos(OPERACION_ARITMETICA.CEILING, t[3],None,None,None)
elif t[1].upper() == 'DEGREES':
reporte_bnf.append("<funciones_select> ::= DEGREES PAR_A <expresion> PAR_C")
t[0] = Expresiondatos(OPERACION_ARITMETICA.DEGREES, t[3],None,None,None)
elif t[1].upper() == 'DIV':
reporte_bnf.append("<funciones_select> ::= DIV PAR_A <expresion> COMA <expresion> PAR_C")
t[0] = Expresiondatos(OPERACION_ARITMETICA.E_DIV, t[3],t[5],None,None)
elif t[1].upper() == 'EXP':
reporte_bnf.append("<funciones_select> ::= EXP PAR_A <expresion> PAR_C")
t[0] = Expresiondatos(OPERACION_ARITMETICA.EXP, t[3],None,None,None)
elif t[1].upper() == 'FACTORIAL':
reporte_bnf.append("<funciones_select> ::= FACTORIAL PAR_A <expresion> PAR_C")
t[0] = Expresiondatos(OPERACION_ARITMETICA.FACTORIAL, t[3],None,None,None)
elif t[1].upper() == 'FLOOR':
reporte_bnf.append("<funciones_select> ::= FLOOR PAR_A <expresion> PAR_C")
t[0] = Expresiondatos(OPERACION_ARITMETICA.FLOOR, t[3],None,None,None)
elif t[1].upper() == 'GCD':
reporte_bnf.append("<funciones_select> ::= GCD PAR_A <expresion> COMA <expresion> PAR_C")
t[0] = Expresiondatos(OPERACION_ARITMETICA.GCD, t[3],t[5],None,None)
elif t[1].upper() == 'LN':
reporte_bnf.append("<funciones_select> ::= LN PAR_A <expresion> PAR_C")
t[0] = Expresiondatos(OPERACION_ARITMETICA.LN, t[3],None,None,None)
elif t[1].upper() == 'LOG':
reporte_bnf.append("<funciones_select> ::= LOG PAR_A <expresion> PAR_C")
t[0] = Expresiondatos(OPERACION_ARITMETICA.LOG, t[3],None,None,None)
elif t[1].upper() == 'MOD':
reporte_bnf.append("<funciones_select> ::= MOD PAR_A <expresion> COMA <expresion> PAR_C")
t[0] = Expresiondatos(OPERACION_ARITMETICA.MOD, t[3],t[5],None,None)
elif t[1].upper() == 'PI':
reporte_bnf.append("<funciones_select> ::= PI PAR_A PAR_C")
t[0] = Expresiondatos(OPERACION_ARITMETICA.PI, None,None,None,None)
elif t[1].upper() == 'POWER':
reporte_bnf.append("<funciones_select> ::= POWER PAR_A <expresion> COMA <expresion> PAR_C")
t[0] = Expresiondatos(OPERACION_ARITMETICA.POWER, t[3],t[5],None,None)
elif t[1].upper() == 'RADIANS':
reporte_bnf.append("<funciones_select> ::= RADIANS PAR_A <expresion> PAR_C")
t[0] = Expresiondatos(OPERACION_ARITMETICA.RADIANS, t[3],None,None,None)
elif t[1].upper() == 'ROUND':
reporte_bnf.append("<funciones_select> ::= ROUND PAR_A <expresion> PAR_C")
t[0] = Expresiondatos(OPERACION_ARITMETICA.ROUND, t[3],None,None,None)
elif t[1].upper() == 'SIGN':
reporte_bnf.append("<funciones_select> ::= SIGN PAR_A <expresion> PAR_C")
t[0] = Expresiondatos(OPERACION_ARITMETICA.SIGN, t[3],None,None,None)
elif t[1].upper() == 'SQRT':
reporte_bnf.append("<funciones_select> ::= SQRT PAR_A <expresion> PAR_C")
t[0] = Expresiondatos(OPERACION_ARITMETICA.SQRT, t[3],None,None,None)
elif t[1].upper() == 'WIDTH_BUCKET':
reporte_bnf.append("<funciones_select> ::= WIDTH_BUCKET PAR_A <expresion> COMA <expresion> COMA <expresion> COMA <expresion> PAR_C")
t[0] = Expresiondatos(OPERACION_ARITMETICA.WIDTH_BUCKET, t[3],t[5],t[7],t[9])
elif t[1].upper() == 'TRUNC' and t[4] == ',':
reporte_bnf.append("<funciones_select> ::= TRUNC PAR_A <expresion> COMA ENTERO PAR_C ")
t[0] = Expresiondatos(OPERACION_ARITMETICA.TRUNC, t[3],ExpresionEntero(TIPO_VALOR.NUMERO,t[5]),None,None)
elif t[1].upper() == 'TRUNC':
reporte_bnf.append("<funciones_select> ::= TRUNC PAR_A <expresion> PAR_C")
t[0] = Expresiondatos(OPERACION_ARITMETICA.S_TRUNC, t[3],None,None,None)
elif t[1].upper() == 'RANDOM':
reporte_bnf.append("<funciones_select> ::= RANDOM PAR_A PAR_C")
t[0] = Expresiondatos(OPERACION_ARITMETICA.RANDOM, t[3],None,None,None)
elif t[1].upper() == 'ACOS':
reporte_bnf.append("<funciones_select> ::= ACOS PAR_A <expresion> PAR_C")
t[0] = Expresiondatos(OPERACION_ARITMETICA.ACOS, t[3],None,None,None)
elif t[1].upper() == 'ASIND':
reporte_bnf.append("<funciones_select> ::= ASIND PAR_A <expresion> PAR_C")
t[0] = Expresiondatos(OPERACION_ARITMETICA.ASIND, t[3],None,None,None)
elif t[1].upper() == 'ATAN2':
reporte_bnf.append("<funciones_select> ::= ATAN2 PAR_A <expresion> COMA <expresion> PAR_C")
t[0] = Expresiondatos(OPERACION_ARITMETICA.ATAN2, t[3],t[5],None,None)
elif t[1].upper() == 'ATAN2D':
reporte_bnf.append("<funciones_select> ::= ATAN2D PAR_A <expresion> COMA <expresion> PAR_C")
t[0] = Expresiondatos(OPERACION_ARITMETICA.ATAN2D, t[3],t[5],None,None)
elif t[1].upper() == 'ATAN':
reporte_bnf.append("<funciones_select> ::= ATAN PAR_A <expresion> PAR_C")
t[0] = Expresiondatos(OPERACION_ARITMETICA.ATAN, t[3],None,None,None)
elif t[1].upper() == 'ATAND':
reporte_bnf.append("<funciones_select> ::= ATAND PAR_A <expresion> PAR_C")
t[0] = Expresiondatos(OPERACION_ARITMETICA.ATAND, t[3],None,None,None)
elif t[1].upper() == 'COS':
reporte_bnf.append("<funciones_select> ::= COS PAR_A <expresion> PAR_C")
t[0] = Expresiondatos(OPERACION_ARITMETICA.COS, t[3],None,None,None)
elif t[1].upper() == 'COT':
reporte_bnf.append("<funciones_select> ::= COT PAR_A <expresion> PAR_C")
t[0] = Expresiondatos(OPERACION_ARITMETICA.COT, t[3],None,None,None)
elif t[1].upper() == 'COTD':
reporte_bnf.append("<funciones_select> ::= COTD PAR_A <expresion> PAR_C")
t[0] = Expresiondatos(OPERACION_ARITMETICA.COTD, t[3],None,None,None)
elif t[1].upper() == 'SIN':
reporte_bnf.append("<funciones_select> ::= SIN PAR_A <expresion> PAR_C")
t[0] = Expresiondatos(OPERACION_ARITMETICA.SIN, t[3],None,None,None)
elif t[1].upper() == 'SIND':
reporte_bnf.append("<funciones_select> ::= SIND PAR_A <expresion> PAR_C")
t[0] = Expresiondatos(OPERACION_ARITMETICA.SIND, t[3],None,None,None)
elif t[1].upper() == 'TAN':
reporte_bnf.append("<funciones_select> ::= TAN PAR_A <expresion> PAR_C")
t[0] = Expresiondatos(OPERACION_ARITMETICA.TAN, t[3],None,None,None)
elif t[1].upper() == 'TAND':
reporte_bnf.append("<funciones_select> ::= TAND PAR_A <expresion> PAR_C")
t[0] = Expresiondatos(OPERACION_ARITMETICA.TAND, t[3],None,None,None)
elif t[1].upper() == 'SINH':
reporte_bnf.append("<funciones_select> ::= SINH PAR_A <expresion> PAR_C")
t[0] = Expresiondatos(OPERACION_ARITMETICA.SINH, t[3],None,None,None)
elif t[1].upper() == 'COSH':
reporte_bnf.append("<funciones_select> ::= COSH PAR_A <expresion> PAR_C")
t[0] = Expresiondatos(OPERACION_ARITMETICA.COSH, t[3],None,None,None)
elif t[1].upper() == 'TANH':
reporte_bnf.append("<funciones_select> ::= TANH PAR_A <expresion> PAR_C")
t[0] = Expresiondatos(OPERACION_ARITMETICA.TANH, t[3],None,None,None)
elif t[1].upper() == 'ASINH':
reporte_bnf.append("<funciones_select> ::= ASINH PAR_A <expresion> PAR_C")
t[0] = Expresiondatos(OPERACION_ARITMETICA.ASINH, t[3],None,None,None)
elif t[1].upper() == 'ATANH':
reporte_bnf.append("<funciones_select> ::= ATANH PAR_A <expresion> PAR_C")
t[0] = Expresiondatos(OPERACION_ARITMETICA.ATANH, t[3],None,None,None)
elif t[1].upper() == 'COSD':
reporte_bnf.append("<funciones_select> ::= COSD PAR_A <expresion> PAR_C")
t[0] = Expresiondatos(OPERACION_ARITMETICA.COSD, t[3],None,None,None)
elif t[1].upper() == 'ACOSH':
reporte_bnf.append("<funciones_select> ::= ACOSH PAR_A <expresion> PAR_C")
t[0] = Expresiondatos(OPERACION_ARITMETICA.ACOSH, t[3],None,None,None)
elif t[1].upper() == 'ASIN':
reporte_bnf.append("<funciones_select> ::= ASIN PAR_A <expresion> PAR_C")
t[0] = Expresiondatos(OPERACION_ARITMETICA.ASIN, t[3],None,None,None)
elif t[1].upper() == 'ACOSD':
reporte_bnf.append("<funciones_select> ::= ACOSD PAR_A <expresion> PAR_C")
t[0] = Expresiondatos(OPERACION_ARITMETICA.ACOSD, t[3],None,None,None)
elif t[1].upper() == 'LENGTH':
reporte_bnf.append("<funciones_select> ::= LENGTH PAR_A <expresion> PAR_C")
t[0] = Expresiondatos(CADENA_BINARIA.LENGTH, t[3],None,None,None)
elif t[1].upper() == 'SUBSTRING':
reporte_bnf.append("<funciones_select> ::= SUBSTRING PAR_A <string_type> COMA <expresion> COMA <expresion> PAR_C")
t[0] = Expresiondatos(CADENA_BINARIA.SUBSTRING, t[3],t[5],t[7],None)
elif t[1].upper() == 'TRIM':
reporte_bnf.append("<funciones_select> :: TRIM PAR_A <string_type> PAR_C")
t[0] = Expresiondatos(CADENA_BINARIA.TRIM, t[3],None,None,None)
elif t[1].upper() == 'SUBSTR':
reporte_bnf.append("<funciones_select> :: SUBSTR PAR_A <string_type> COMA ENTERO COMA ENTERO PAR_C")
t[0] = Expresiondatos(CADENA_BINARIA.SUBSTR, t[3],t[5],t[7],None)
elif t[1].upper() == 'GET_BYTE':
reporte_bnf.append("<funciones_select> :: GET_BYTE PAR_A <string_type> D_DOSPTS BYTEA COMA ENTERO PAR_C")
t[0] = Expresiondatos(CADENA_BINARIA.GET_BYTE, t[3],ExpresionEntero(TIPO_VALOR.NUMERO,t[7]),None,None)
elif t[1].upper() == 'SET_BYTE':
reporte_bnf.append("<funciones_select> :: SET_BYTE PAR_A <string_type> D_DOSPTS BYTEA COMA ENTERO COMA ENTERO PAR_C")
t[0] = Expresiondatos(CADENA_BINARIA.SET_BYTE, t[3],ExpresionEntero(TIPO_VALOR.NUMERO,t[7]),ExpresionEntero(TIPO_VALOR,t[9]),None)
elif t[1].upper() == 'SHA256':
reporte_bnf.append("<funciones_select> :: SHA256 PAR_A <string_typ>e PAR_C")
t[0] = Expresiondatos(CADENA_BINARIA.SHA256, t[3],None,None,None)
elif t[1].upper() == 'ENCODE':
reporte_bnf.append("<funciones_select> :: ENCODE PAR_A <string_type> D_DOSPTS BYTEA COMA formato_texto PAR_C")
t[0] = Expresiondatos(CADENA_BINARIA.ENCODE, t[3],t[7],None,None)
elif t[1].upper() == 'DECODE':
reporte_bnf.append("<funciones_select> :: DECODE PAR_A <string_type> D_DOSPTS BYTEA COMA formato_texto PAR_C")
t[0] = Expresiondatos(CADENA_BINARIA.DECODE, t[3],t[7],None,None)
elif t[1].upper() == 'CONVERT':
reporte_bnf.append("<funciones_select> :: CONVERT PAR_A <string_type> AS TIPO_DATO PAR_C ")
t[0] = Expresiondatos(CADENA_BINARIA.CONVERT, t[3],t[5],None,None)
def p_formato_texto(t):
''' formato_texto : ESCAPE '''
reporte_bnf.append("<formato_texto> ::= ESCAPE")
t[0] = t[1]
def p_formato_texto_hex(t):
'formato_texto : HEX'
reporte_bnf.append("<formato_texto> ::= HEX")
t[0] = t[1]
def p_formato_texto_base64(t):
' formato_texto : BASE64'
reporte_bnf.append("<formato_texto> ::= BASE64")
t[0] = t[1]
#? ###################################################################
# TODO EXPRESION WHERE
#? ###################################################################
def p_expresion_where2(t):
'expresion_where : expresion_logica_w'
reporte_bnf.append("<expresion_where> ::= <expresion_logica_w>")
t[0] = t[1]
def p_expresion_where(t):
''' expresion_where : expresion_dato NOT IN PAR_A select_insrt PAR_C
| expresion_dato IN PAR_A select_insrt PAR_C
| NOT EXISTS PAR_A select_insrt PAR_C
'''
if t[2].upper() == 'NOT' and t[3].upper() == 'IN':
reporte_bnf.append("<expresion_where> ::= <expresion_dato> NOT IN PAR_A <select_insrt> PAR_C")
t[0] = Expresiondatos(OPCION_VERIFICAR.NOT_IN, t[1],t[5],None,None)
elif t[2].upper() == 'IN':
reporte_bnf.append("<expresion_where> ::= <expresion_dato> IN PAR_A <select_insrt> PAR_C")
t[0] = Expresiondatos(OPCION_VERIFICAR.INN,t[1],t[4],None,None)
elif t[1].upper() == 'NOT' and t[2].upper() == 'EXISTS':
reporte_bnf.append("<expresion_where> ::= NOT EXISTS PAR_A <select_insrt> PAR_C")
t[0] = Expresiondatos(OPCION_VERIFICAR.NOT_EXISTS,t[4],None,None,None)
def p_expresion_where_3(t):
''' expresion_where : expresion_dato NOT BETWEEN SYMMETRIC expresion_dato AND expresion_dato'''
if t[2].upper() == 'NOT' and t[4].upper() == 'SYMMETRIC':
reporte_bnf.append("<expresion_where> ::= <expresion_dato> NOT BETWEEN SYMMETRIC <expresion_dato> AND <expresion_dato>")
t[0] = Expresiondatos(OPCION_VERIFICAR.NOT_BETWEEN_SYMETRIC,t[1],t[5],t[7],None)
def p_expresion_wherea(t):
'''expresion_wherea : ABS PAR_A expresion PAR_C
| LENGTH PAR_A string_type PAR_C
| CBRT PAR_A expresion PAR_C
| CEIL PAR_A expresion PAR_C
| CEILING PAR_A expresion PAR_C
| SUBSTRING PAR_A string_type COMA expresion COMA expresion PAR_C
| TRIM PAR_A string_type D_DOSPTS BYTEA FROM string_type D_DOSPTS BYTEA PAR_C
| SUBSTR PAR_A string_type COMA expresion COMA expresion PAR_C
| sin_some_any PAR_A select_insrt PAR_C
| EXTRACT PAR_A extract_time FROM string_type PAR_C '''
if t[1].upper() == 'ABS':
reporte_bnf.append("<expresion_wherea> ::= ABS PAR_A expresion PAR_C")
t[0] = Expresiondatos(OPERACION_ARITMETICA.ABS, t[3],None,None,None)
elif t[1].upper() == 'LENGTH':
reporte_bnf.append("<expresion_wherea> ::= LENGTH PAR_A <string_type> PAR_C")
t[0] = Expresiondatos(OPERACION_ARITMETICA.LENGTH, t[3],None,None,None)
elif t[1].upper() == 'CBRT':
reporte_bnf.append("<expresion_wherea> ::= CBRT PAR_A <expresion> PAR_C")
t[0] = Expresiondatos(OPERACION_ARITMETICA.CBRT, t[3],None,None,None)
elif t[1].upper() == 'CEIL':
reporte_bnf.append("<expresion_wherea> ::= CEIL PAR_A <expresion> PAR_C")
t[0] = Expresiondatos(OPERACION_ARITMETICA.CEIL, t[3],None,None,None)
elif t[1].upper() == 'CEILING':
reporte_bnf.append("<expresion_wherea> ::= CEILING PAR_A <expresion> PAR_C")
t[0] = Expresiondatos(OPERACION_ARITMETICA.CEILING, t[3],None,None,None)
elif t[1].upper() == 'SUBSTRING':
reporte_bnf.append("<expresion_wherea> ::= SUBSTRING PAR_A <string_type> COMA <expresion> COMA <expresion> PAR_C")
t[0] = Expresiondatos(OPCIONES_DATOS.SUBSTRING, t[3],t[5],t[7],None)
elif t[1].upper() == 'TRIM':
reporte_bnf.append("<expresion_wherea> ::= TRIM PAR_A <string_type> D_DOSPTS BYTEA FROM <string_type> D_DOSPTS BYTEA PAR_C")
t[0] = Expresiondatos(OPCIONES_DATOS.TRIM, t[3],t[7],None,None)
elif t[1].upper() == 'SUBSTR':
reporte_bnf.append("<expresion_wherea> ::= SUBSTR PAR_A <string_type> COMA ENTERO COMA ENTERO PAR_C")
t[0] = Expresiondatos(OPCIONES_DATOS.SUBSTR, t[3],t[5],t[7],None)
elif t[1].upper() == 'EXTRACT':
reporte_bnf.append("<expresion_wherea> ::= EXTRACT PAR_A <expresion_time> FROM <string_type> PAR_C")
t[0] = Expresiondatos(OPCIONES_DATOS.EXTRACT, t[3],t[5],None,None)
else:
reporte_bnf.append("<expresion_wherea> ::= <sin_some_any> PAR_A <select_insrt> PAR_C")
t[0] = Expresiondatos(OPCIONES_DATOS.SOME, t[3],None,None,None)
def p_expresion_wherea2(t):
''' expresion_wherea : expresion '''
reporte_bnf.append("<expresion_wherea> ::= <expresion>")
t[0] = t[1]
#? #########################################################
#ANCHOR EXPRESIONES AGREGADAS AL WHERE
#? ##################################################
def p_expresion_wherea3(t):
''' expresion_wherea : LOWER PAR_A string_type PAR_C '''
def p_expresion_wherea4(t):
''' expresion_wherea : ID PAR_A ID PAR_C'''
def p_expresion_isnull_(t):
''' expresion_whereb : expresion_dato IS NULL '''
reporte_bnf.append("<expresion_whereb> ::= <expresion_dato> IS NULL")
t[0] = ExpresionRelacional(t[1],'',OPCION_VERIFICAR.NULL)
def p_experesion_isnull_2(t):
' expresion_whereb : expresion_dato ISNULL'
reporte_bnf.append("<expresion_whereb> ::= <expresion_dato> ISNULL")
t[0] = ExpresionRelacional(t[1],'',OPCION_VERIFICAR.ISNULL)
def p_expresion_notnull(t):
' expresion_whereb : expresion_dato NOTNULL'
reporte_bnf.append("<expresion_whereb> ::= <expresion_dato> NOTNULL")
t[0] = ExpresionRelacional(t[1],'',OPCION_VERIFICAR.NOTNULL)
def p_expresion_true(t):
' expresion_whereb : expresion_dato IS TRUE'
reporte_bnf.append("<expresion_whereb> ::= <expresion_dato> IS TRUE")
t[0] = ExpresionRelacional(t[1],'',OPCION_VERIFICAR.TRUE)
def p_expresion_not_true(t):
' expresion_whereb : expresion_dato IS NOT TRUE'
reporte_bnf.append("<expresion_whereb> ::= <expresion_dato> IS NOT TRUE")
t[0] = ExpresionRelacional(t[1],'',OPCION_VERIFICAR.N_TRUE)
def p_expresion_false(t):
'expresion_whereb : expresion_dato IS FALSE'
reporte_bnf.append("<expresion_whereb> ::= <expresion_dato> IS FALSE")
t[0] = ExpresionRelacional(t[1],'',OPCION_VERIFICAR.FALSE)
def p_expresion_UNKNOWN(t):
' expresion_whereb : expresion_dato IS UNKNOWN'
reporte_bnf.append("<expresion_whereb> ::= <expresion_dato> IS UNKNOWN")
t[0] = ExpresionRelacional(t[1],'',OPCION_VERIFICAR.UNKNOWN)
def p_expresion_UNKNOWN_(t):
' expresion_whereb : expresion_dato IS NOT UNKNOWN'
reporte_bnf.append("<expresion_whereb> ::= <expresion_dato> IS NOT UNKNOWN")
t[0] = ExpresionRelacional(t[1],'',OPCION_VERIFICAR.UNKNOWN)
def p_expresion_whereb(t):
'''expresion_whereb : expresion_wherea MAYQUE expresion_wherea
| expresion_wherea MENQUE expresion_wherea
| expresion_wherea MAYIGQUE expresion_wherea
| expresion_wherea MENIGQUE expresion_wherea
| expresion_wherea DOBLEIG expresion_wherea
| expresion_wherea IGUAL expresion_wherea
| expresion_wherea NOIG expresion_wherea
| expresion_wherea NOIGUAL expresion_wherea '''
if t[2] == '>':
reporte_bnf.append("<expresion_whereb> ::= <expresion_wherea> MAYQUE <expresion_wherea>")
t[0] = ExpresionRelacional(t[1],t[3],OPERACION_RELACIONAL.MAYQUE)
elif t[2] == '<':
reporte_bnf.append("<expresion_whereb> ::= <expresion_wherea> MENQUE <expresion_wherea>")
t[0] = ExpresionRelacional(t[1],t[3],OPERACION_RELACIONAL.MENQUE)
elif t[2] == '>=':
reporte_bnf.append("<expresion_whereb> ::= <expresion_wherea> MAYIGQUE <expresion_wherea>")
t[0] = ExpresionRelacional(t[1],t[3],OPERACION_RELACIONAL.MAYIGQUE)
elif t[2] == '<=':
reporte_bnf.append("<expresion_whereb> ::= <expresion_wherea> MENIGQUE <expresion_wherea>")
t[0] = ExpresionRelacional(t[1],t[3],OPERACION_RELACIONAL.MENIGQUE)
elif t[2] == '==':
reporte_bnf.append("<expresion_whereb> ::= <expresion_wherea> DOBLEIG <expresion_wherea>")
t[0] = ExpresionRelacional(t[1],t[3],OPERACION_RELACIONAL.DOBLEIGUAL)
elif t[2] == '=':
reporte_bnf.append("<expresion_whereb> ::= <expresion_wherea> IGUAL <expresion_wherea>")
t[0] = ExpresionRelacional(t[1],t[3],OPERACION_RELACIONAL.IGUAL)
elif t[2] == '<>':
reporte_bnf.append("<expresion_whereb> ::= <expresion_wherea> NOIG <expresion_wherea>")
t[0] = ExpresionRelacional(t[1],t[3],OPERACION_RELACIONAL.NOIG)
elif t[2] == '!=':
reporte_bnf.append("<expresion_whereb> ::= <expresion_wherea> NOIGUAL <expresion_wherea>")
t[0] = ExpresionRelacional(t[1],t[3],OPERACION_RELACIONAL.DIFERENTE)
def p_expresion_whereb2(t):
' expresion_whereb : expresion_wherea '
reporte_bnf.append("<expresion_whereb> ::= <expresion_wherea>")
t[0] = t[1]
def p_expresion_logica_w(t):
''' expresion_logica_w : expresion_logica_w AND expresion_whereb
| expresion_logica_w OR expresion_whereb '''
if t[2].upper() == 'AND':
reporte_bnf.append("<expresion_logica_w> ::= <expresion_logica_w> AND <expresion_whereb>")
t[0] = ExpresionLogica(t[1],t[3],OPERACION_LOGICA.AND)
elif t[2].upper() == 'OR':
reporte_bnf.append("<expresion_logica_w> ::= <expresion_logica_w> OR <expresion_whereb>")
t[0] = ExpresionLogica(t[1],t[3],OPERACION_LOGICA.OR)
def p_expresion_logica_between(t):
' expresion_logica_w : expresion_logica_w BETWEEN expresion_whereb'
reporte_bnf.append("<expresion_logica_w> ::= <expresion_logica_w> BETWEEN <expresion_whereb>")
if t[2].upper() == 'BETWEEN' : t[0] = ExpresionLogica(t[1],t[3],OPCION_VERIFICAR.BETWEEN)
def p_expresion_logica_between_1(t):
' expresion_logica_w : expresion_wherea BETWEEN expresion_wherea AND expresion_wherea'
reporte_bnf.append("<expresion_logica_w> ::= <expresion_wherea> BETWEEN <expresion_wherea> AND <expresion_wherea>")
if t[2].upper() == 'BETWEEN' and t[4].upper() == 'AND' : t[0] = ExpresionLogica(ExpresionRelacional(t[1],t[3],OPERACION_RELACIONAL.MAYQUE),ExpresionRelacional(t[1],t[5],OPERACION_RELACIONAL.MENQUE),OPCION_VERIFICAR.BETWEEN_1)
def p_expresion_logica_between_NOT(t):
' expresion_logica_w : expresion_dato NOT BETWEEN expresion_dato AND expresion_dato'
reporte_bnf.append("<expresion_logica_w> ::= <expresion_dato> NOT BETWEEN <expresion_dato> AND <expresion_dato>")
if t[3].upper() == 'BETWEEN' and t[2].upper() == 'NOT' : t[0] = ExpresionLogica(ExpresionRelacional(t[1],t[4],OPERACION_RELACIONAL.MAYQUE),ExpresionRelacional(t[1],t[6],OPERACION_RELACIONAL.MENQUE),OPCION_VERIFICAR.N_BETWEEN)
def p_expresion_logica_between_distict(t):
' expresion_logica_w : expresion_dato IS DISTINCT FROM expresion_dato'
reporte_bnf.append("<expresion_logica_w> ::= <expresion_dato> IS DISTINCT FROM <expresion_dato>")
if t[3].upper() == 'DISTINCT' : t[0] = ExpresionLogica(ExpresionRelacional(t[1],t[5],OPERACION_RELACIONAL.DIFERENTE), ExpresionRelacional(t[1],t[5],OPERACION_RELACIONAL.DIFERENTE), OPCION_VERIFICAR.ISDISTINCT)
def p_expresion_logica_between_notdistict(t):
' expresion_logica_w : expresion_dato IS NOT DISTINCT FROM expresion_dato'
reporte_bnf.append("<expresion_logica_w> ::= <expresion_dato> IS NOT DISTINCT FROM expresion_dato")
if t[3].upper() == 'NOT' and t[4].upper() == 'DISTINCT' : t[0] = ExpresionLogica(ExpresionRelacional(t[1],t[6],OPERACION_RELACIONAL.DOBLEIGUAL), ExpresionRelacional(t[1],t[6],OPERACION_RELACIONAL.DOBLEIGUAL), OPCION_VERIFICAR.NOT_DISTINCT)
def p_expresion_logica_between_like(t):
'expresion_logica_w : expresion_dato LIKE CADENA'
reporte_bnf.append("<expresion_logica_w> ::= <expresion_dato> LIKE CADENA")
if t[2].upper() == 'LIKE' : t[0] = ExpresionLogica(ExpresionRelacional(t[1],ExpresionComillaSimple(TIPO_VALOR.NUMERO,t[3]),OPERACION_RELACIONAL.DOBLEIGUAL), ExpresionRelacional(t[1],ExpresionComillaSimple(TIPO_VALOR.NUMERO,t[3]),OPERACION_RELACIONAL.DOBLEIGUAL), OPCION_VERIFICAR.LIKE)
def p_expresion_logica_between_NOTLIKE(t):
'expresion_logica_w : expresion_dato NOT LIKE CADENA'
reporte_bnf.append("<expresion_logica_w> ::= <expresion_dato> NOT LIKE CADENA")
if t[3].upper() == 'LIKE' and t[2].upper() == 'NOT' : t[0] = ExpresionLogica(ExpresionRelacional(t[1],ExpresionComillaSimple(TIPO_VALOR.NUMERO,t[4]),OPERACION_RELACIONAL.DIFERENTE), ExpresionRelacional(t[1],ExpresionComillaSimple(TIPO_VALOR.NUMERO,t[4]),OPERACION_RELACIONAL.DIFERENTE), OPCION_VERIFICAR.NOT_LIKE)
def p_expresion_logica_w2(t):
' expresion_logica_w : NOT expresion_logica_w '
reporte_bnf.append("<expresion_logica_w> ::= NOT <expresion_logica_w>")
t[0] = ExpresionLogica(t[2],None,OPERACION_LOGICA.NOT)
def p_expresion_logica_w3(t):
' expresion_logica_w : expresion_whereb '
reporte_bnf.append("<expresion_logica_w> ::= <expresion_whereb>")
t[0] = t[1]
#? ###################################################################
# SECTION AGREGADOS CAPITULO 11
#? ###################################################################
#? ###################################################################
# TODO INDEX
#? ###################################################################
def p_ins_createIndex(t):
'instruccion : createIndex'
t[0] = t[1]
def p_createIndex(t):
' createIndex : CREATE INDEX ID ON ID opc_index PTCOMA '
t[0] = Funcion_Index(INDEX.INDEX,t[3],t[5],t[6],None)
def p_createIndex1(t):
' createIndex : CREATE INDEX ID ON ID opc_index cond_where PTCOMA '
t[0] = Funcion_Index(INDEX.INDEX_WHERE,t[3],t[5],t[6],t[7])
def p_createIndex2(t):
' createIndex : CREATE INDEX ID ON ID opc_index INCLUDE opc_index PTCOMA '
t[0] = Funcion_Index(INDEX.INDEX_INCLUDE,t[3],t[5],t[6],t[8])
def p_createIndex3(t):
' createIndex : CREATE UNIQUE INDEX ID ON ID opc_index PTCOMA '
t[0] = Funcion_Index(INDEX.INDEX_UNIQUE_WHERE,t[4],t[6],t[7],None)
def p_createIndex4(t):
' createIndex : CREATE UNIQUE INDEX ID ON ID opc_index cond_where PTCOMA '
t[0] = Funcion_Index(INDEX.INDEX_INCLUDE,t[4],t[6],t[7],t[8])
def p_createIndex5(t):
' createIndex : CREATE UNIQUE INDEX ID ON ID opc_index INCLUDE opc_index PTCOMA '
t[0] = Funcion_Index(INDEX.INDEX_INCLUDE,t[4],t[6],t[7],t[9])
def p_otro_index(t):
'createIndex : CREATE INDEX ID ON ID PAR_A ID opclass PAR_C PTCOMA'
t[0] = Funcion_Index(INDEX.INDEX_CLASS,t[3],t[5],t[7],t[8])
def p_otro_index1(t):
'createIndex : CREATE INDEX ID ON ID PAR_A ID opclass sortoptions PAR_C PTCOMA'
t[0] = Funcion_Index(t[3],t[5],t[7],t[8],t[9])
def p_createIndex6(t):
'''opc_index : USING HASH PAR_A ID PAR_C
| PAR_A opc_index_par PAR_C'''
if t[1].upper() == 'USING':
t[0] = index_cuerpo(TIPO_INDEX.USING_HASH,t[4],None)
else:
t[0]= t[2]
def p_createIndex2_0(t):
' opc_index_par : campos_c '
t[0] = index_cuerpo(TIPO_INDEX.CAMPOS,t[1],None)
def p_createIndex2_1(t):
' opc_index_par : ID NULLS first_last'
t[0] = index_cuerpo(TIPO_INDEX.NULLS,t[1],t[3])
def p_createIndex2_1_1(t):
' opc_index_par : ID orden NULLS first_last '
t[0] = index_cuerpo(TIPO_INDEX.NULLS,t[1], t[4])
def p_createIndex2_3(t):
' opc_index_par : ID COLLATE string_type '
t[0] = index_cuerpo(TIPO_INDEX.COLLATE,t[1],t[3])
def p_createIndex2_30(t):
' opc_index_par : LOWER PAR_A ID PAR_C '
t[0] = index_cuerpo(TIPO_INDEX.LOWER,t[3],None)
def p_createIndex_5(t):
' opc_index_par : ID PAR_A ID PAR_C '
t[0] = index_cuerpo(TIPO_INDEX.WITH_IDS,t[1],t[3])
def p_first_last(t):
''' first_last : FIRST
| LAST'''
t[0] = t[1]
def p_sortoptions(t):
'sortoptions : sortoptions sortoption'
t[1].append(t[2])
t[0] = t[1]
def p_sortoptions0(t):
'sortoptions : sortoption'
t[0] = t[1]
def p_sortoptions1(t):
'''sortoption : COLLATE
| ASC
| DESC '''
t[0] = t[1]
def p_sortoptions2(t):
'''sortoption : NULLS FIRST
| NULLS LAST '''
t[0] = t[2]
def p_opclass(t):
'''opclass : TEXT_PATTERN_OPS
| VARCHAR_PATTERN_OPS
| BPCHAR_PATTERN_OPS '''
t[0] = t[1]
def p_error(t):
print("Error sintáctico en '%s'" % t.value, str(t.lineno),find_column(str(input), t))
global reporte_sintactico
reporte_sintactico += "<tr> <td> Sintactico </td> <td>" + t.value + "</td>" + "<td>" + str(t.lineno) + "</td> <td> "+ str(find_column(str(input),t))+"</td></th>"
def find_column(input, token):
line_start = input.rfind('\n', 0, token.lexpos) + 1
print((token.lexpos - line_start) + 1)
return (token.lexpos - line_start) + 1
import ply.yacc as yacc
parser = yacc.yacc()
'''f = open("./entrada.txt", "r")
input = f.read()
print(input)'''
def parse(input) :
global entradaa
entradaa = input
return parser.parse(input)
def get_array(lista):
lista_repo = lista
reverse_list = lista_repo[::-1]
w_jumps = '\n \n'.join(reverse_list)
f = open("reportes/reportebnf.bnf", "w")
for items in w_jumps:
f.write(items)
f.close()
'''parser.parse(input)'''
|
py
|
1a59899bb0364052bf0585680b324a7487165b1a
|
from django.core.exceptions import FieldError
from django.test import TestCase
from .models.default_related_name import Author, Book, Editor
class DefaultRelatedNameTests(TestCase):
@classmethod
def setUpTestData(cls):
cls.author = Author.objects.create(first_name="Dave", last_name="Loper")
cls.editor = Editor.objects.create(
name="Test Editions", bestselling_author=cls.author
)
cls.book = Book.objects.create(title="Test Book", editor=cls.editor)
cls.book.authors.add(cls.author)
def test_no_default_related_name(self):
self.assertEqual(list(self.author.editor_set.all()), [self.editor])
def test_default_related_name(self):
self.assertEqual(list(self.author.books.all()), [self.book])
def test_default_related_name_in_queryset_lookup(self):
self.assertEqual(Author.objects.get(books=self.book), self.author)
def test_model_name_not_available_in_queryset_lookup(self):
msg = "Cannot resolve keyword 'book' into field."
with self.assertRaisesMessage(FieldError, msg):
Author.objects.get(book=self.book)
def test_related_name_overrides_default_related_name(self):
self.assertEqual(list(self.editor.edited_books.all()), [self.book])
def test_inheritance(self):
# model_options is the name of the application for this test.
self.assertEqual(list(self.book.model_options_bookstores.all()), [])
def test_inheritance_with_overridden_default_related_name(self):
self.assertEqual(list(self.book.editor_stores.all()), [])
|
py
|
1a5989f0caa2e6aaff5266411188a411a143a409
|
# qubit number=3
# total number=70
import numpy as np
from qiskit import QuantumCircuit, execute, Aer, QuantumRegister, ClassicalRegister, transpile, BasicAer, IBMQ
from qiskit.visualization import plot_histogram
from typing import *
from pprint import pprint
from math import log2
from collections import Counter
from qiskit.test.mock import FakeVigo, FakeYorktown
kernel = 'circuit/bernstein'
def bitwise_xor(s: str, t: str) -> str:
length = len(s)
res = []
for i in range(length):
res.append(str(int(s[i]) ^ int(t[i])))
return ''.join(res[::-1])
def bitwise_dot(s: str, t: str) -> str:
length = len(s)
res = 0
for i in range(length):
res += int(s[i]) * int(t[i])
return str(res % 2)
def build_oracle(n: int, f: Callable[[str], str]) -> QuantumCircuit:
# implement the oracle O_f
# NOTE: use multi_control_toffoli_gate ('noancilla' mode)
# https://qiskit.org/documentation/_modules/qiskit/aqua/circuits/gates/multi_control_toffoli_gate.html
# https://quantumcomputing.stackexchange.com/questions/3943/how-do-you-implement-the-toffoli-gate-using-only-single-qubit-and-cnot-gates
# https://quantumcomputing.stackexchange.com/questions/2177/how-can-i-implement-an-n-bit-toffoli-gate
controls = QuantumRegister(n, "ofc")
target = QuantumRegister(1, "oft")
oracle = QuantumCircuit(controls, target, name="Of")
for i in range(2 ** n):
rep = np.binary_repr(i, n)
if f(rep) == "1":
for j in range(n):
if rep[j] == "0":
oracle.x(controls[j])
oracle.mct(controls, target[0], None, mode='noancilla')
for j in range(n):
if rep[j] == "0":
oracle.x(controls[j])
# oracle.barrier()
# oracle.draw('mpl', filename=(kernel + '-oracle.png'))
return oracle
def build_circuit(n: int, f: Callable[[str], str]) -> QuantumCircuit:
# implement the Bernstein-Vazirani circuit
zero = np.binary_repr(0, n)
b = f(zero)
# initial n + 1 bits
input_qubit = QuantumRegister(n+1, "qc")
classicals = ClassicalRegister(n, "qm")
prog = QuantumCircuit(input_qubit, classicals)
# inverse last one (can be omitted if using O_f^\pm)
prog.x(input_qubit[n])
# circuit begin
prog.h(input_qubit[1]) # number=1
prog.h(input_qubit[2]) # number=38
prog.cz(input_qubit[0],input_qubit[2]) # number=39
prog.h(input_qubit[2]) # number=40
prog.h(input_qubit[2]) # number=59
prog.cz(input_qubit[0],input_qubit[2]) # number=60
prog.h(input_qubit[2]) # number=61
prog.h(input_qubit[2]) # number=42
prog.cz(input_qubit[0],input_qubit[2]) # number=43
prog.h(input_qubit[2]) # number=44
prog.h(input_qubit[2]) # number=48
prog.cz(input_qubit[0],input_qubit[2]) # number=49
prog.h(input_qubit[2]) # number=50
prog.h(input_qubit[2]) # number=67
prog.cz(input_qubit[0],input_qubit[2]) # number=68
prog.h(input_qubit[2]) # number=69
prog.x(input_qubit[2]) # number=55
prog.cx(input_qubit[0],input_qubit[2]) # number=56
prog.h(input_qubit[2]) # number=64
prog.cz(input_qubit[0],input_qubit[2]) # number=65
prog.h(input_qubit[2]) # number=66
prog.cx(input_qubit[0],input_qubit[2]) # number=37
prog.h(input_qubit[2]) # number=51
prog.cz(input_qubit[0],input_qubit[2]) # number=52
prog.h(input_qubit[2]) # number=53
prog.h(input_qubit[2]) # number=25
prog.cz(input_qubit[0],input_qubit[2]) # number=26
prog.h(input_qubit[2]) # number=27
prog.h(input_qubit[1]) # number=7
prog.cz(input_qubit[2],input_qubit[1]) # number=8
prog.rx(0.17592918860102857,input_qubit[2]) # number=34
prog.rx(-0.3989822670059037,input_qubit[1]) # number=30
prog.h(input_qubit[1]) # number=9
prog.h(input_qubit[1]) # number=18
prog.rx(2.3310617489636263,input_qubit[2]) # number=58
prog.cz(input_qubit[2],input_qubit[1]) # number=19
prog.h(input_qubit[1]) # number=20
prog.x(input_qubit[1]) # number=62
prog.y(input_qubit[1]) # number=14
prog.h(input_qubit[1]) # number=22
prog.cz(input_qubit[2],input_qubit[1]) # number=23
prog.rx(-0.9173450548482197,input_qubit[1]) # number=57
prog.cx(input_qubit[2],input_qubit[1]) # number=63
prog.h(input_qubit[1]) # number=24
prog.z(input_qubit[2]) # number=3
prog.z(input_qubit[1]) # number=41
prog.x(input_qubit[1]) # number=17
prog.y(input_qubit[2]) # number=5
prog.x(input_qubit[2]) # number=21
# apply H to get superposition
for i in range(n):
prog.h(input_qubit[i])
prog.h(input_qubit[n])
prog.barrier()
# apply oracle O_f
oracle = build_oracle(n, f)
prog.append(
oracle.to_gate(),
[input_qubit[i] for i in range(n)] + [input_qubit[n]])
# apply H back (QFT on Z_2^n)
for i in range(n):
prog.h(input_qubit[i])
prog.barrier()
# measure
return prog
def get_statevector(prog: QuantumCircuit) -> Any:
state_backend = Aer.get_backend('statevector_simulator')
statevec = execute(prog, state_backend).result()
quantum_state = statevec.get_statevector()
qubits = round(log2(len(quantum_state)))
quantum_state = {
"|" + np.binary_repr(i, qubits) + ">": quantum_state[i]
for i in range(2 ** qubits)
}
return quantum_state
def evaluate(backend_str: str, prog: QuantumCircuit, shots: int, b: str) -> Any:
# Q: which backend should we use?
# get state vector
quantum_state = get_statevector(prog)
# get simulate results
# provider = IBMQ.load_account()
# backend = provider.get_backend(backend_str)
# qobj = compile(prog, backend, shots)
# job = backend.run(qobj)
# job.result()
backend = Aer.get_backend(backend_str)
# transpile/schedule -> assemble -> backend.run
results = execute(prog, backend, shots=shots).result()
counts = results.get_counts()
a = Counter(counts).most_common(1)[0][0][::-1]
return {
"measurements": counts,
# "state": statevec,
"quantum_state": quantum_state,
"a": a,
"b": b
}
def bernstein_test_1(rep: str):
"""011 . x + 1"""
a = "011"
b = "1"
return bitwise_xor(bitwise_dot(a, rep), b)
def bernstein_test_2(rep: str):
"""000 . x + 0"""
a = "000"
b = "0"
return bitwise_xor(bitwise_dot(a, rep), b)
def bernstein_test_3(rep: str):
"""111 . x + 1"""
a = "111"
b = "1"
return bitwise_xor(bitwise_dot(a, rep), b)
if __name__ == "__main__":
n = 2
a = "11"
b = "1"
f = lambda rep: \
bitwise_xor(bitwise_dot(a, rep), b)
prog = build_circuit(n, f)
sample_shot =4000
writefile = open("../data/startQiskit_noisy350.csv", "w")
# prog.draw('mpl', filename=(kernel + '.png'))
backend = FakeYorktown()
circuit1 = transpile(prog, FakeYorktown())
circuit1.h(qubit=2)
circuit1.x(qubit=3)
circuit1.measure_all()
info = execute(circuit1,backend=backend, shots=sample_shot).result().get_counts()
print(info, file=writefile)
print("results end", file=writefile)
print(circuit1.depth(), file=writefile)
print(circuit1, file=writefile)
writefile.close()
|
py
|
1a598ae224f5153b0829585c3c39cc9ffcd1c95e
|
import torch
from pytorch_lightning import LightningModule
from torch import nn
from inferface.config import NetworkLayerSizes, LossNames, FairFaceColumnKeys
class AgeGenderRaceClassifier(LightningModule):
def __init__(self,
input_size: int = NetworkLayerSizes.INPUT.value,
output_size_age: int = NetworkLayerSizes.AGE_9_OUTPUT.value,
output_size_gender: int = NetworkLayerSizes.GENDER_2_OUTPUT.value,
output_size_race: int = NetworkLayerSizes.RACE_7_OUTPUT.value,
lr: float = 1e-3,
dropout: float = 0.4
):
super().__init__()
self.lr = lr
self.dropout = dropout
self.fc_age = nn.Sequential(nn.Linear(input_size, 256),
nn.ReLU(),
nn.Dropout(self.dropout),
nn.Linear(256, 64),
nn.ReLU(),
nn.Dropout(self.dropout),
nn.Linear(64, output_size_age),
nn.LogSoftmax(dim=1))
self.fc_gender = nn.Sequential(nn.Linear(input_size, 256),
nn.ReLU(),
nn.Dropout(self.dropout),
nn.Linear(256, 64),
nn.ReLU(),
nn.Dropout(self.dropout),
nn.Linear(64, output_size_gender),
nn.Sigmoid())
self.fc_race = nn.Sequential(nn.Linear(input_size, 256),
nn.ReLU(),
nn.Dropout(self.dropout),
nn.Linear(256, 64),
nn.ReLU(),
nn.Dropout(self.dropout),
nn.Linear(64, output_size_race),
nn.LogSoftmax(dim=1))
self.criterion_binary = nn.BCELoss()
self.criterion_multioutput = nn.CrossEntropyLoss()
def forward(self, x):
age = self.fc_age(x)
gender = self.fc_gender(x)
race = self.fc_race(x)
return age, gender, race
def _loop(self, batch, batch_idx, stage):
image_path, embedding, age, gender, race = batch[FairFaceColumnKeys.KEY_FILE.value], \
batch[FairFaceColumnKeys.KEY_EMBEDDING.value], \
batch[FairFaceColumnKeys.KEY_AGE.value], \
batch[FairFaceColumnKeys.KEY_GENDER.value], \
batch[FairFaceColumnKeys.KEY_RACE.value]
age_hat, gender_hat, race_hat = self(embedding)
loss_age = self.criterion_multioutput(age_hat, age)
self.log(f"{stage}_{LossNames.LOSS_AGE.value}", loss_age)
loss_gender = self.criterion_binary(gender_hat, gender)
self.log(f"{stage}_{LossNames.LOSS_GENDER.value}", loss_gender)
loss_race = self.criterion_multioutput(race_hat, race)
self.log(f"{stage}_{LossNames.LOSS_RACE.value}", loss_race)
loss = loss_age + loss_gender + loss_race
self.log(f"{stage}_{LossNames.LOSS_TOTAL.value}", loss)
return loss
def training_step(self, batch, batch_idx):
stage = 'train'
return self._loop(batch, batch_idx, stage)
def validation_step(self, batch, batch_idx):
stage = 'val'
return self._loop(batch, batch_idx, stage)
def test_step(self, batch, batch_idx):
stage = 'test'
return self._loop(batch, batch_idx, stage)
def configure_optimizers(self):
optimizer = torch.optim.Adam(self.parameters(), lr=self.lr)
return optimizer
|
py
|
1a598d5315460f124870c72f9c110f9a32980080
|
from django.shortcuts import render, redirect
from urllib import request
from http import cookiejar
import urllib
import json
from django.shortcuts import HttpResponse
from urllib import parse
from bs4 import BeautifulSoup
import pymysql.cursors
import pymysql
import numpy as np
from main.fdu_cookie import FduCookie
# import tesserocr
# Create your views here.
def index(req):
login_url = 'https://gsas.fudan.edu.cn/sscjcx/index'
code_url = 'https://gsas.fudan.edu.cn/captcha/imageCode'
user_agent = r'Mozilla/5.0 (Windows NT 6.2; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) ' \
r'Chrome/27.0.1453.94 Safari/537.36'
head = {'User-Agent': user_agent, 'Connection': 'keep-alive'}
cookie = cookiejar.CookieJar()
handler = request.HTTPCookieProcessor(cookie)
opener = request.build_opener(handler)
req_crawler = request.Request(url=login_url, headers=head)
req_code = request.Request(url=code_url, headers=head)
response = opener.open(req_crawler)
for item in cookie:
cookie_name = item.name
cookie_value = item.value
response_code = opener.open(req_code)
code = response_code.read()
with open('main/static/'+str(cookie_value)+'.png','wb') as code_img:
code_img.write(code)
response.close()
response_code.close()
return render(req, 'base.html', {'string': str(cookie_value)})
def cookie_homepage(req):
return render(req, 'base_cookie.html')
def test_post(req):
if req.method == 'GET':
return HttpResponse('get')
else:
req_data = json.loads(req.body)
username = req_data['username']
password = req_data['password']
varycode = req_data['varycode']
cookie = req_data['crawcookie']
print('username', username)
# print(type(username))
# print(username, password, varycode, cookie)
# img_file = 'main/static/'+str(cookie)+'.png'
# varycode = tesserocr.file_to_text(img_file)
# varycode = str(varycode).strip().strip(b'\x00'.decode())
# print('varycode', varycode)
user_agent = r'Mozilla/5.0 (Windows NT 6.2; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) ' \
r'Chrome/27.0.1453.94 Safari/537.36'
head = {'User-Agent': user_agent, 'Connection': 'keep-alive'}
head['Cookie'] = 'cn_com_southsoft_gms='+str(cookie)
post_url = 'https://gsas.fudan.edu.cn/sscjcx/28198B369067E88DAB9FEFE85484DBF4'
try:
post_data = {}
post_data['nd'] = '2021'
post_data['username'] = username
post_data['password'] = password
post_data['validateCode'] = varycode
# print(post_data)
datepostdata = parse.urlencode(post_data).encode('utf-8')
craw_req = request.Request(url=post_url, data=datepostdata, headers=head)
craw_response = urllib.request.urlopen(craw_req)
html = craw_response.read().decode('utf-8')
soup = BeautifulSoup(html, "lxml")
craw_response.close()
result = soup.select("#errorInfo")
if len(result)<1:
# print('success')
table = soup.table
trs = table.find_all('tr')
total_grade_tr = trs[-1]
name_tr = trs[0]
type_tr = trs[1]
# 总成绩
total_grade = total_grade_tr.find_all('td')[-1].get_text()
# 报考类型
st_type = type_tr.find_all('td')[-1].get_text()
st_type = str(st_type).strip().strip(b'\x00'.decode())
# print(st_type)
# 姓名
st_name = name_tr.find_all('td')[-1].get_text()
st_name = str(st_name).strip().strip(b'\x00'.decode())
student_type = 0
# 专硕
if '085211' in st_type:
student_type = 0
# 学硕
elif '081201' in st_type or '081202' in st_type or '081203' in st_type or '083900' in st_type:
student_type = 1
else:
student_type = 2
rep = {'status': 0, 'st_type': student_type, 'total_grade': total_grade, 'st_name': st_name}
if student_type !=2:
# 插入数据库
connect = pymysql.Connect(
host='127.0.0.1',
port=3306,
user='root',
passwd='',
db='student',
charset='utf8'
)
cursor = connect.cursor()
sql = "SELECT * FROM student WHERE number = %d;"
cursor.execute(sql % int(username))
if cursor.rowcount > 0:
pass
else:
sql = "INSERT INTO student(number, type, grade) VALUES (%s, %d, %d);"
insert_data = (str(username), student_type, int(total_grade))
cursor.execute(sql % insert_data)
connect.commit()
if student_type == 0:
sql = "SELECT grade FROM student WHERE type = 0 ORDER BY grade desc;"
cursor.execute(sql)
grade_list = []
for item in cursor.fetchall():
grade_list.append(int(item[0]))
# print(grade_list)
total_grade = int(total_grade)
index = grade_list.index(total_grade)
total = cursor.rowcount
rep['rank'] = str(index+1) + '/' + str(total)
elif student_type == 1:
sql = "SELECT grade FROM student WHERE type = 1 ORDER BY grade desc;"
cursor.execute(sql)
grade_list = []
for item in cursor.fetchall():
grade_list.append(int(item[0]))
# print(grade_list)
total_grade = int(total_grade)
index = grade_list.index(total_grade)
total = cursor.rowcount
rep['rank'] = str(index+1) + '/' + str(total)
cursor.close()
connect.close()
return HttpResponse(json.dumps(rep, ensure_ascii=False), content_type="application/json, charset=utf-8")
else:
result = result[0].get_text()
result = str(result).strip().strip(b'\x00'.decode())
print(result)
rep = {'status': 1, 'data': result}
return HttpResponse(json.dumps(rep, ensure_ascii=False), content_type="application/json, charset=utf-8")
except urllib.error.URLError as e:
print(e.reason)
rep = {'status': 1, 'data': 'error'}
return HttpResponse(json.dumps(rep, ensure_ascii=False), content_type="application/json, charset=utf-8")
def cookie_crow(req):
if req.method == 'GET':
return HttpResponse('get')
else:
req_data = json.loads(req.body)
cookie = req_data['crawcookie']
# print(cookie)
try:
fducookie = FduCookie(cookie)
suffix = fducookie.get_suffix()
# print('suffix', suffix)
if suffix is not None:
student_info = fducookie.get_score(suffix)
# print('student info', student_info)
username = student_info['uid']
st_name = student_info['st_name']
total_grade = student_info['score']
student_type = student_info['type']
rep = {'status': 0, 'st_type': student_type, 'total_grade': total_grade, 'st_name': st_name}
# print(rep)
if student_type !=2:
# 插入数据库
connect = pymysql.Connect(
host='127.0.0.1',
port=3306,
user='root',
passwd='',
db='student',
charset='utf8'
)
cursor = connect.cursor()
sql = "SELECT * FROM student WHERE number = %d;"
cursor.execute(sql % int(username))
if cursor.rowcount > 0:
pass
else:
sql = "INSERT INTO student(number, type, grade) VALUES (%s, %d, %d);"
insert_data = (str(username), student_type, int(total_grade))
cursor.execute(sql % insert_data)
connect.commit()
if student_type == 0:
sql = "SELECT grade FROM student WHERE type = 0 ORDER BY grade desc;"
cursor.execute(sql)
grade_list = []
for item in cursor.fetchall():
grade_list.append(int(item[0]))
# print(grade_list)
total_grade = int(total_grade)
index = grade_list.index(total_grade)
total = cursor.rowcount
rep['rank'] = str(index+1) + '/' + str(total)
elif student_type == 1:
sql = "SELECT grade FROM student WHERE type = 1 ORDER BY grade desc;"
cursor.execute(sql)
grade_list = []
for item in cursor.fetchall():
grade_list.append(int(item[0]))
# print(grade_list)
total_grade = int(total_grade)
index = grade_list.index(total_grade)
total = cursor.rowcount
rep['rank'] = str(index+1) + '/' + str(total)
cursor.close()
connect.close()
return HttpResponse(json.dumps(rep, ensure_ascii=False), content_type="application/json, charset=utf-8")
else:
rep = {'status': 1, 'data': "cookie is invalid"}
return HttpResponse(json.dumps(rep, ensure_ascii=False), content_type="application/json, charset=utf-8")
except urllib.error.URLError as e:
print(e.reason)
rep = {'status': 1, 'data': 'error'}
return HttpResponse(json.dumps(rep, ensure_ascii=False), content_type="application/json, charset=utf-8")
def rank(req):
stu_type = int(req.GET.get('type'))
# stu_type = int(req.GET.get('type'))
# print('stu_type', stu_type)
connect = pymysql.Connect(
host='127.0.0.1',
port=3306,
user='root',
passwd='zhangzhao1996',
db='student',
charset='utf8'
)
cursor = connect.cursor()
sql = ''
if stu_type == 0:
sql = "SELECT grade FROM student WHERE type = 0 ORDER BY grade desc;"
elif stu_type == 1:
sql = "SELECT grade FROM student WHERE type = 1 ORDER BY grade desc;"
else:
resp = HttpResponse()
resp.status_code = 404
return resp
cursor.execute(sql)
grade_list = []
for item in cursor.fetchall():
grade_list.append(int(item[0]))
# print(grade_list)
cursor.close()
connect.close()
grade_set = np.unique(grade_list)
rank_list = []
# for grade in grade_set[::-1]:
# rank_list.append({'grade': str(grade), 'rank': str(grade_list.index(grade)+1)})
max_grade = 510
for i, grade in enumerate(grade_list):
if max_grade > grade:
max_grade = grade
rank_list.append({'grade': str(grade), 'rank': str(i+1)})
return HttpResponse(json.dumps(rank_list, ensure_ascii=False), content_type="application/json, charset=utf-8")
def ranking(req):
return render(req, 'rank.html')
def rankinglm(req):
return render(req, 'ranklm.html')
def verify_student(req):
if req.method == 'GET':
return HttpResponse('get')
else:
req_data = json.loads(req.body)
uid = req_data['uid']
# print(uid)
connect = pymysql.Connect(
host='127.0.0.1',
port=3306,
user='root',
passwd='',
db='student',
charset='utf8'
)
cursor = connect.cursor()
sql = "SELECT * FROM student WHERE number = %d;"
cursor.execute(sql % int(uid))
if cursor.rowcount > 0:
item = cursor.fetchone()
# print(item)
type = int(item[-2])
score = int(item[-1])
rep = {'status': 0, 'st_type': type, 'score': score}
# print(rep)
cursor.close()
connect.close()
return HttpResponse(json.dumps(rep, ensure_ascii=False), content_type="application/json, charset=utf-8")
else:
rep = {'status': 1}
return HttpResponse(json.dumps(rep, ensure_ascii=False), content_type="application/json, charset=utf-8")
def verify_homepage(req):
return render(req, 'verify_student.html')
|
py
|
1a598e24c08421f3f4780cd39d6149797ccda312
|
def main():
print("foo")
if __name__ == '__main__':
main()
|
py
|
1a598ffd2ec0d3b498e9c7f1325b09678569bcf1
|
# Copyright 2021 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Supervisord Tessia instance runner
"""
#
# IMPORTS
#
#
# CONSTANTS AND DEFINITIONS
#
#
# CODE
#
class SupervisordInstance:
"""Tessia instance for supervisord"""
def __init__(self, configuration) -> None:
self._conf = configuration
# __init__()
def setup(self) -> None:
"""
Create configuration files for components, check prerequisites etc.
"""
raise NotImplementedError()
# setup()
def run(self) -> None:
"""
Run the instance
"""
raise NotImplementedError()
# run()
def stop(self) -> None:
"""
Stop the instance
"""
raise NotImplementedError()
# stop()
def cleanup(self) -> None:
"""
Cleanup as much as possible
"""
raise NotImplementedError()
# cleanup()
# SupervisordInstance
|
py
|
1a5990bc9583a79faceb4b138bd9526f3626ad2c
|
#!/usr/bin/env python
import os
import sys
import django
from django.conf import settings
DIRNAME = os.path.dirname(__file__)
if django.VERSION[1] < 4:
# If the version is NOT django 4 or greater
# then remove the TZ setting.
settings.configure(DEBUG=True,
DATABASES={
'default': {
'ENGINE': 'django.db.backends.sqlite3', }
},
INSTALLED_APPS=('django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.admin',
'downtime',
'eultheme',))
else:
settings.configure(DEBUG=True,
DATABASES={
'default': {
'ENGINE': 'django.db.backends.sqlite3', }
},
INSTALLED_APPS=('django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.admin',
'downtime',
'eultheme',),
USE_TZ=True)
try:
# Django 1.7 needs this, but other versions dont.
django.setup()
except AttributeError:
pass
try:
from django.test.simple import DjangoTestSuiteRunner
test_runner = DjangoTestSuiteRunner(verbosity=1)
except ImportError:
from django.test.runner import DiscoverRunner
test_runner = DiscoverRunner(verbosity=1)
failures = test_runner.run_tests(['eultheme', ])
if failures:
sys.exit(failures)
|
py
|
1a599110ef5339f1a6e24cf07dd25d0122b6eb71
|
from django.db import models # type: ignore
from .jurisdiction import Jurisdiction, LegislativeSession
OBJECT_TYPES = (
("jurisdiction", "Jurisdiction"),
("person", "Person"),
("organization", "Organization"),
("post", "Post"),
("membership", "Membership"),
("bill", "Bill"),
("vote_event", "VoteEvent"),
("event", "Event"),
)
class RunPlan(models.Model):
jurisdiction = models.ForeignKey(
Jurisdiction, related_name="runs", on_delete=models.CASCADE
)
success = models.BooleanField(default=True)
start_time = models.DateTimeField()
end_time = models.DateTimeField()
exception = models.TextField(blank=True, default="")
traceback = models.TextField(blank=True, default="")
class Meta:
db_table = "pupa_runplan"
class ScrapeReport(models.Model):
plan = models.ForeignKey(RunPlan, related_name="scrapers", on_delete=models.CASCADE)
scraper = models.CharField(max_length=300)
args = models.CharField(max_length=300)
start_time = models.DateTimeField()
end_time = models.DateTimeField()
class Meta:
db_table = "pupa_scrapereport"
class ScrapeObjects(models.Model):
report = models.ForeignKey(
ScrapeReport, related_name="scraped_objects", on_delete=models.CASCADE
)
object_type = models.CharField(max_length=20, choices=OBJECT_TYPES)
count = models.PositiveIntegerField()
class Meta:
db_table = "pupa_scrapeobjects"
class ImportObjects(models.Model):
report = models.ForeignKey(
RunPlan, related_name="imported_objects", on_delete=models.CASCADE
)
object_type = models.CharField(max_length=20, choices=OBJECT_TYPES)
insert_count = models.PositiveIntegerField()
update_count = models.PositiveIntegerField()
noop_count = models.PositiveIntegerField()
start_time = models.DateTimeField()
end_time = models.DateTimeField()
records = models.JSONField(blank=True, null=True)
class Meta:
db_table = "pupa_importobjects"
class SessionDataQualityReport(models.Model):
legislative_session = models.ForeignKey(
LegislativeSession, on_delete=models.CASCADE
)
bills_missing_actions = models.PositiveIntegerField()
bills_missing_sponsors = models.PositiveIntegerField()
bills_missing_versions = models.PositiveIntegerField()
votes_missing_voters = models.PositiveIntegerField()
votes_missing_bill = models.PositiveIntegerField()
votes_missing_yes_count = models.PositiveIntegerField()
votes_missing_no_count = models.PositiveIntegerField()
votes_with_bad_counts = models.PositiveIntegerField()
# these fields store lists of names mapped to numbers of occurances
unmatched_sponsor_people = models.JSONField()
unmatched_sponsor_organizations = models.JSONField()
unmatched_voters = models.JSONField()
class Meta:
db_table = "pupa_sessiondataqualityreport"
|
py
|
1a5991b6fcd51e13b5206f5e07664dc17e210b03
|
import pytest
from django.test import RequestFactory
from vapor_manager.users.models import User
from vapor_manager.users.tests.factories import UserFactory
@pytest.fixture(autouse=True)
def media_storage(settings, tmpdir):
settings.MEDIA_ROOT = tmpdir.strpath
@pytest.fixture
def user() -> User:
return UserFactory()
@pytest.fixture
def request_factory() -> RequestFactory:
return RequestFactory()
|
py
|
1a599261e40c47a7d99d56df4fa9d5217e159216
|
"""Utility functions for attitude dynamics."""
import numpy as np
def cross_matrix(vector) -> np.ndarray:
"""The cross-product 'tilde' matrix of a 3x1 vector."""
return np.array(
[
[0, -vector[2], vector[1]],
[vector[2], 0, -vector[0]],
[-vector[1], vector[0], 0]
]
)
|
py
|
1a5994fa510c84efe7133e7b01fbb7601369d00a
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.constant.ParamConstants import *
class AlipayInsUnderwriteClaimReportQueryModel(object):
def __init__(self):
self._claim_report_no = None
@property
def claim_report_no(self):
return self._claim_report_no
@claim_report_no.setter
def claim_report_no(self, value):
self._claim_report_no = value
def to_alipay_dict(self):
params = dict()
if self.claim_report_no:
if hasattr(self.claim_report_no, 'to_alipay_dict'):
params['claim_report_no'] = self.claim_report_no.to_alipay_dict()
else:
params['claim_report_no'] = self.claim_report_no
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = AlipayInsUnderwriteClaimReportQueryModel()
if 'claim_report_no' in d:
o.claim_report_no = d['claim_report_no']
return o
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.