filename
stringlengths 13
19
| text
stringlengths 134
1.04M
|
---|---|
the-stack_0_23669 | import logging
from collections import defaultdict
from copy import deepcopy
from decimal import Decimal
from typing import TYPE_CHECKING, Dict, Iterable, List, Optional, Tuple
from django.contrib.sites.models import Site
from django.db import transaction
from ..account.models import User
from ..core import analytics
from ..core.exceptions import AllocationError, InsufficientStock, InsufficientStockData
from ..payment import (
ChargeStatus,
CustomPaymentChoices,
PaymentError,
TransactionKind,
gateway,
)
from ..payment.models import Payment, Transaction
from ..payment.utils import create_payment
from ..warehouse.management import (
deallocate_stock,
deallocate_stock_for_order,
decrease_stock,
get_order_lines_with_track_inventory,
)
from ..warehouse.models import Stock
from . import (
FulfillmentLineData,
FulfillmentStatus,
OrderLineData,
OrderStatus,
events,
utils,
)
from .events import (
draft_order_created_from_replace_event,
fulfillment_refunded_event,
fulfillment_replaced_event,
order_replacement_created,
order_returned_event,
)
from .models import Fulfillment, FulfillmentLine, Order, OrderLine
from .notifications import (
send_fulfillment_confirmation_to_customer,
send_order_canceled_confirmation,
send_order_confirmed,
send_order_refunded_confirmation,
send_payment_confirmation,
)
from .utils import (
order_line_needs_automatic_fulfillment,
recalculate_order,
restock_fulfillment_lines,
update_order_status,
)
if TYPE_CHECKING:
from ..plugins.manager import PluginsManager
from ..warehouse.models import Warehouse
logger = logging.getLogger(__name__)
OrderLineIDType = int
QuantityType = int
def order_created(
order: "Order", user: "User", manager: "PluginsManager", from_draft: bool = False
):
events.order_created_event(order=order, user=user, from_draft=from_draft)
manager.order_created(order)
payment = order.get_last_payment()
if payment:
if order.is_captured():
order_captured(
order=order,
user=user,
amount=payment.total,
payment=payment,
manager=manager,
)
elif order.is_pre_authorized():
order_authorized(
order=order,
user=user,
amount=payment.total,
payment=payment,
manager=manager,
)
site_settings = Site.objects.get_current().settings
if site_settings.automatically_confirm_all_new_orders:
order_confirmed(order, user, manager)
def order_confirmed(
order: "Order",
user: "User",
manager: "PluginsManager",
send_confirmation_email: bool = False,
):
"""Order confirmed.
Trigger event, plugin hooks and optionally confirmation email.
"""
events.order_confirmed_event(order=order, user=user)
manager.order_confirmed(order)
if send_confirmation_email:
send_order_confirmed(order, user, manager)
def handle_fully_paid_order(
manager: "PluginsManager", order: "Order", user: Optional["User"] = None
):
events.order_fully_paid_event(order=order, user=user)
if order.get_customer_email():
send_payment_confirmation(order, manager)
if utils.order_needs_automatic_fulfillment(order):
automatically_fulfill_digital_lines(order, manager)
try:
analytics.report_order(order.tracking_client_id, order)
except Exception:
# Analytics failing should not abort the checkout flow
logger.exception("Recording order in analytics failed")
manager.order_fully_paid(order)
manager.order_updated(order)
@transaction.atomic
def cancel_order(order: "Order", user: Optional["User"], manager: "PluginsManager"):
"""Cancel order.
Release allocation of unfulfilled order items.
"""
events.order_canceled_event(order=order, user=user)
deallocate_stock_for_order(order)
order.status = OrderStatus.CANCELED
order.save(update_fields=["status"])
manager.order_cancelled(order)
manager.order_updated(order)
send_order_canceled_confirmation(order, user, manager)
def order_refunded(
order: "Order",
user: Optional["User"],
amount: "Decimal",
payment: "Payment",
manager: "PluginsManager",
):
events.payment_refunded_event(
order=order, user=user, amount=amount, payment=payment
)
manager.order_updated(order)
send_order_refunded_confirmation(order, user, amount, payment.currency, manager)
def order_voided(
order: "Order", user: "User", payment: "Payment", manager: "PluginsManager"
):
events.payment_voided_event(order=order, user=user, payment=payment)
manager.order_updated(order)
def order_returned(
order: "Order",
user: Optional["User"],
returned_lines: List[Tuple[QuantityType, OrderLine]],
manager: "PluginsManager",
):
order_returned_event(order=order, user=user, returned_lines=returned_lines)
update_order_status(order)
manager.order_updated(order)
@transaction.atomic
def order_fulfilled(
fulfillments: List["Fulfillment"],
user: "User",
fulfillment_lines: List["FulfillmentLine"],
manager: "PluginsManager",
notify_customer=True,
):
order = fulfillments[0].order
update_order_status(order)
events.fulfillment_fulfilled_items_event(
order=order, user=user, fulfillment_lines=fulfillment_lines
)
manager.order_updated(order)
for fulfillment in fulfillments:
manager.fulfillment_created(fulfillment)
if order.status == OrderStatus.FULFILLED:
manager.order_fulfilled(order)
if notify_customer:
for fulfillment in fulfillments:
send_fulfillment_confirmation_to_customer(order, fulfillment, user, manager)
def order_shipping_updated(order: "Order", manager: "PluginsManager"):
recalculate_order(order)
manager.order_updated(order)
def order_authorized(
order: "Order",
user: Optional["User"],
amount: "Decimal",
payment: "Payment",
manager: "PluginsManager",
):
events.payment_authorized_event(
order=order, user=user, amount=amount, payment=payment
)
manager.order_updated(order)
def order_captured(
order: "Order",
user: Optional["User"],
amount: "Decimal",
payment: "Payment",
manager: "PluginsManager",
):
events.payment_captured_event(
order=order, user=user, amount=amount, payment=payment
)
manager.order_updated(order)
if order.is_fully_paid():
handle_fully_paid_order(manager, order, user)
def fulfillment_tracking_updated(
fulfillment: "Fulfillment",
user: "User",
tracking_number: str,
manager: "PluginsManager",
):
events.fulfillment_tracking_updated_event(
order=fulfillment.order,
user=user,
tracking_number=tracking_number,
fulfillment=fulfillment,
)
manager.order_updated(fulfillment.order)
@transaction.atomic
def cancel_fulfillment(
fulfillment: "Fulfillment",
user: "User",
warehouse: "Warehouse",
manager: "PluginsManager",
):
"""Cancel fulfillment.
Return products to corresponding stocks.
"""
fulfillment = Fulfillment.objects.select_for_update().get(pk=fulfillment.pk)
restock_fulfillment_lines(fulfillment, warehouse)
events.fulfillment_canceled_event(
order=fulfillment.order, user=user, fulfillment=fulfillment
)
events.fulfillment_restocked_items_event(
order=fulfillment.order,
user=user,
fulfillment=fulfillment,
warehouse_pk=warehouse.pk,
)
fulfillment.status = FulfillmentStatus.CANCELED
fulfillment.save(update_fields=["status"])
update_order_status(fulfillment.order)
manager.order_updated(fulfillment.order)
@transaction.atomic
def mark_order_as_paid(
order: "Order",
request_user: "User",
manager: "PluginsManager",
external_reference: Optional[str] = None,
):
"""Mark order as paid.
Allows to create a payment for an order without actually performing any
payment by the gateway.
"""
payment = create_payment(
gateway=CustomPaymentChoices.MANUAL,
payment_token="",
currency=order.total.gross.currency,
email=order.user_email,
total=order.total.gross.amount,
order=order,
)
payment.charge_status = ChargeStatus.FULLY_CHARGED
payment.captured_amount = order.total.gross.amount
payment.save(update_fields=["captured_amount", "charge_status", "modified"])
Transaction.objects.create(
payment=payment,
action_required=False,
kind=TransactionKind.EXTERNAL,
token=external_reference or "",
is_success=True,
amount=order.total.gross.amount,
currency=order.total.gross.currency,
searchable_key=external_reference or "",
gateway_response={},
)
events.order_manually_marked_as_paid_event(
order=order, user=request_user, transaction_reference=external_reference
)
manager.order_fully_paid(order)
manager.order_updated(order)
def clean_mark_order_as_paid(order: "Order"):
"""Check if an order can be marked as paid."""
if order.payments.exists():
raise PaymentError(
"Orders with payments can not be manually marked as paid.",
)
@transaction.atomic
def fulfill_order_lines(order_lines_info: Iterable["OrderLineData"]):
"""Fulfill order line with given quantity."""
lines_to_decrease_stock = get_order_lines_with_track_inventory(order_lines_info)
if lines_to_decrease_stock:
decrease_stock(lines_to_decrease_stock)
order_lines = []
for line_info in order_lines_info:
line = line_info.line
line.quantity_fulfilled += line_info.quantity
order_lines.append(line)
OrderLine.objects.bulk_update(order_lines, ["quantity_fulfilled"])
@transaction.atomic
def automatically_fulfill_digital_lines(order: "Order", manager: "PluginsManager"):
"""Fulfill all digital lines which have enabled automatic fulfillment setting.
Send confirmation email afterward.
"""
digital_lines = order.lines.filter(
is_shipping_required=False, variant__digital_content__isnull=False
)
digital_lines = digital_lines.prefetch_related("variant__digital_content")
if not digital_lines:
return
fulfillment, _ = Fulfillment.objects.get_or_create(order=order)
fulfillments = []
lines_info = []
for line in digital_lines:
if not order_line_needs_automatic_fulfillment(line):
continue
variant = line.variant
if variant:
digital_content = variant.digital_content
digital_content.urls.create(line=line)
quantity = line.quantity
fulfillments.append(
FulfillmentLine(fulfillment=fulfillment, order_line=line, quantity=quantity)
)
warehouse_pk = line.allocations.first().stock.warehouse.pk # type: ignore
lines_info.append(
OrderLineData(
line=line,
quantity=quantity,
variant=line.variant,
warehouse_pk=warehouse_pk,
)
)
FulfillmentLine.objects.bulk_create(fulfillments)
fulfill_order_lines(lines_info)
send_fulfillment_confirmation_to_customer(
order, fulfillment, user=order.user, manager=manager
)
update_order_status(order)
def _create_fulfillment_lines(
fulfillment: Fulfillment,
warehouse_pk: str,
lines_data: List[Dict],
channel_slug: str,
) -> List[FulfillmentLine]:
"""Modify stocks and allocations. Return list of unsaved FulfillmentLines.
Args:
fulfillment (Fulfillment): Fulfillment to create lines
warehouse_pk (str): Warehouse to fulfill order.
lines_data (List[Dict]): List with information from which system
create FulfillmentLines. Example:
[
{
"order_line": (OrderLine),
"quantity": (int),
},
...
]
channel_slug (str): Channel for which fulfillment lines should be created.
Return:
List[FulfillmentLine]: Unsaved fulfillmet lines created for this fulfillment
based on information form `lines`
Raise:
InsufficientStock: If system hasn't containt enough item in stock for any line.
"""
lines = [line_data["order_line"] for line_data in lines_data]
variants = [line.variant for line in lines]
stocks = (
Stock.objects.for_channel(channel_slug)
.filter(warehouse_id=warehouse_pk, product_variant__in=variants)
.select_related("product_variant")
)
variant_to_stock: Dict[str, List[Stock]] = defaultdict(list)
for stock in stocks:
variant_to_stock[stock.product_variant_id].append(stock)
insufficient_stocks = []
fulfillment_lines = []
lines_info = []
for line in lines_data:
quantity = line["quantity"]
order_line = line["order_line"]
if quantity > 0:
line_stocks = variant_to_stock.get(order_line.variant_id)
if line_stocks is None:
error_data = InsufficientStockData(
variant=order_line.variant,
order_line=order_line,
warehouse_pk=warehouse_pk,
)
insufficient_stocks.append(error_data)
continue
stock = line_stocks[0]
lines_info.append(
OrderLineData(
line=order_line,
quantity=quantity,
variant=order_line.variant,
warehouse_pk=warehouse_pk,
)
)
if order_line.is_digital:
order_line.variant.digital_content.urls.create(line=order_line)
fulfillment_lines.append(
FulfillmentLine(
order_line=order_line,
fulfillment=fulfillment,
quantity=quantity,
stock=stock,
)
)
if insufficient_stocks:
raise InsufficientStock(insufficient_stocks)
if lines_info:
fulfill_order_lines(lines_info)
return fulfillment_lines
@transaction.atomic()
def create_fulfillments(
requester: "User",
order: "Order",
fulfillment_lines_for_warehouses: Dict,
manager: "PluginsManager",
notify_customer: bool = True,
) -> List[Fulfillment]:
"""Fulfill order.
Function create fulfillments with lines.
Next updates Order based on created fulfillments.
Args:
requester (User): Requester who trigger this action.
order (Order): Order to fulfill
fulfillment_lines_for_warehouses (Dict): Dict with information from which
system create fulfillments. Example:
{
(Warehouse.pk): [
{
"order_line": (OrderLine),
"quantity": (int),
},
...
]
}
manager (PluginsManager): Base manager for handling plugins logic.
notify_customer (bool): If `True` system send email about
fulfillments to customer.
Return:
List[Fulfillment]: Fulfillmet with lines created for this order
based on information form `fulfillment_lines_for_warehouses`
Raise:
InsufficientStock: If system hasn't containt enough item in stock for any line.
"""
fulfillments: List[Fulfillment] = []
fulfillment_lines: List[FulfillmentLine] = []
for warehouse_pk in fulfillment_lines_for_warehouses:
fulfillment = Fulfillment.objects.create(order=order)
fulfillments.append(fulfillment)
fulfillment_lines.extend(
_create_fulfillment_lines(
fulfillment,
warehouse_pk,
fulfillment_lines_for_warehouses[warehouse_pk],
order.channel.slug,
)
)
FulfillmentLine.objects.bulk_create(fulfillment_lines)
transaction.on_commit(
lambda: order_fulfilled(
fulfillments,
requester,
fulfillment_lines,
manager,
notify_customer,
)
)
return fulfillments
def _get_fulfillment_line_if_exists(
fulfillment_lines: List[FulfillmentLine], order_line_id, stock_id=None
):
for line in fulfillment_lines:
if line.order_line_id == order_line_id and line.stock_id == stock_id:
return line
return None
def _get_fulfillment_line(
target_fulfillment: Fulfillment,
lines_in_target_fulfillment: List[FulfillmentLine],
order_line_id: int,
stock_id: Optional[int] = None,
) -> Tuple[FulfillmentLine, bool]:
"""Get fulfillment line if extists or create new fulfillment line object."""
# Check if line for order_line_id and stock_id does not exist in DB.
moved_line = _get_fulfillment_line_if_exists(
lines_in_target_fulfillment,
order_line_id,
stock_id,
)
fulfillment_line_existed = True
if not moved_line:
# Create new not saved FulfillmentLine object and assign it to target
# fulfillment
fulfillment_line_existed = False
moved_line = FulfillmentLine(
fulfillment=target_fulfillment,
order_line_id=order_line_id,
stock_id=stock_id,
quantity=0,
)
return moved_line, fulfillment_line_existed
@transaction.atomic()
def _move_order_lines_to_target_fulfillment(
order_lines_to_move: List[OrderLineData],
lines_in_target_fulfillment: List[FulfillmentLine],
target_fulfillment: Fulfillment,
):
"""Move order lines with given quantity to the target fulfillment."""
fulfillment_lines_to_create: List[FulfillmentLine] = []
fulfillment_lines_to_update: List[FulfillmentLine] = []
order_lines_to_update: List[OrderLine] = []
lines_to_dellocate: List[OrderLineData] = []
for line_data in order_lines_to_move:
line_to_move = line_data.line
quantity_to_move = line_data.quantity
moved_line, fulfillment_line_existed = _get_fulfillment_line(
target_fulfillment=target_fulfillment,
lines_in_target_fulfillment=lines_in_target_fulfillment,
order_line_id=line_to_move.id,
stock_id=None,
)
# calculate the quantity fulfilled/unfulfilled to move
unfulfilled_to_move = min(line_to_move.quantity_unfulfilled, quantity_to_move)
quantity_to_move -= unfulfilled_to_move
line_to_move.quantity_fulfilled += unfulfilled_to_move
moved_line.quantity += unfulfilled_to_move
# update current lines with new value of quantity
order_lines_to_update.append(line_to_move)
if moved_line.quantity > 0 and not fulfillment_line_existed:
# If this is new type of (order_line, stock) then we create new fulfillment
# line
fulfillment_lines_to_create.append(moved_line)
elif fulfillment_line_existed:
# if target fulfillment already have the same line, we just update the
# quantity
fulfillment_lines_to_update.append(moved_line)
line_allocations_exists = line_to_move.allocations.exists()
if line_allocations_exists:
lines_to_dellocate.append(
OrderLineData(line=line_to_move, quantity=unfulfilled_to_move)
)
if lines_to_dellocate:
try:
deallocate_stock(lines_to_dellocate)
except AllocationError as e:
logger.warning(
f"Unable to deallocate stock for line {', '.join(e.order_lines)}."
)
# update the fulfillment lines with new values
FulfillmentLine.objects.bulk_update(fulfillment_lines_to_update, ["quantity"])
FulfillmentLine.objects.bulk_create(fulfillment_lines_to_create)
OrderLine.objects.bulk_update(order_lines_to_update, ["quantity_fulfilled"])
@transaction.atomic()
def _move_fulfillment_lines_to_target_fulfillment(
fulfillment_lines_to_move: List[FulfillmentLineData],
lines_in_target_fulfillment: List[FulfillmentLine],
target_fulfillment: Fulfillment,
):
"""Move fulfillment lines with given quantity to the target fulfillment."""
fulfillment_lines_to_create: List[FulfillmentLine] = []
fulfillment_lines_to_update: List[FulfillmentLine] = []
empty_fulfillment_lines_to_delete: List[FulfillmentLine] = []
for fulfillment_line_data in fulfillment_lines_to_move:
fulfillment_line = fulfillment_line_data.line
quantity_to_move = fulfillment_line_data.quantity
moved_line, fulfillment_line_existed = _get_fulfillment_line(
target_fulfillment=target_fulfillment,
lines_in_target_fulfillment=lines_in_target_fulfillment,
order_line_id=fulfillment_line.order_line_id,
stock_id=fulfillment_line.stock_id,
)
# calculate the quantity fulfilled/unfulfilled/to move
fulfilled_to_move = min(fulfillment_line.quantity, quantity_to_move)
quantity_to_move -= fulfilled_to_move
moved_line.quantity += fulfilled_to_move
fulfillment_line.quantity -= fulfilled_to_move
if fulfillment_line.quantity == 0:
# the fulfillment line without any items will be deleted
empty_fulfillment_lines_to_delete.append(fulfillment_line)
else:
# update with new quantity value
fulfillment_lines_to_update.append(fulfillment_line)
if moved_line.quantity > 0 and not fulfillment_line_existed:
# If this is new type of (order_line, stock) then we create new fulfillment
# line
fulfillment_lines_to_create.append(moved_line)
elif fulfillment_line_existed:
# if target fulfillment already have the same line, we just update the
# quantity
fulfillment_lines_to_update.append(moved_line)
# update the fulfillment lines with new values
FulfillmentLine.objects.bulk_update(fulfillment_lines_to_update, ["quantity"])
FulfillmentLine.objects.bulk_create(fulfillment_lines_to_create)
# Remove the empty fulfillment lines
FulfillmentLine.objects.filter(
id__in=[f.id for f in empty_fulfillment_lines_to_delete]
).delete()
def create_refund_fulfillment(
requester: Optional["User"],
order,
payment,
order_lines_to_refund: List[OrderLineData],
fulfillment_lines_to_refund: List[FulfillmentLineData],
manager: "PluginsManager",
amount=None,
refund_shipping_costs=False,
):
"""Proceed with all steps required for refunding products.
Calculate refunds for products based on the order's order lines and fulfillment
lines. The logic takes the list of order lines, fulfillment lines, and their
quantities which is used to create the refund fulfillment. The stock for
unfulfilled lines will be deallocated. It creates only single refund fulfillment
for each order. Calling the method N-time will increase the quantity of the already
refunded line. The refund fulfillment can have assigned lines with the same
products but with the different stocks.
"""
_process_refund(
requester=requester,
order=order,
payment=payment,
order_lines_to_refund=order_lines_to_refund,
fulfillment_lines_to_refund=fulfillment_lines_to_refund,
amount=amount,
refund_shipping_costs=refund_shipping_costs,
manager=manager,
)
with transaction.atomic():
refunded_fulfillment, _ = Fulfillment.objects.get_or_create(
status=FulfillmentStatus.REFUNDED, order=order
)
already_refunded_lines = list(refunded_fulfillment.lines.all())
_move_order_lines_to_target_fulfillment(
order_lines_to_move=order_lines_to_refund,
lines_in_target_fulfillment=already_refunded_lines,
target_fulfillment=refunded_fulfillment,
)
_move_fulfillment_lines_to_target_fulfillment(
fulfillment_lines_to_move=fulfillment_lines_to_refund,
lines_in_target_fulfillment=already_refunded_lines,
target_fulfillment=refunded_fulfillment,
)
Fulfillment.objects.filter(order=order, lines=None).delete()
return refunded_fulfillment
def _populate_replace_order_fields(original_order: "Order"):
replace_order = Order()
replace_order.status = OrderStatus.DRAFT
replace_order.user_id = original_order.user_id
replace_order.language_code = original_order.language_code
replace_order.user_email = original_order.user_email
replace_order.currency = original_order.currency
replace_order.channel = original_order.channel
replace_order.display_gross_prices = original_order.display_gross_prices
replace_order.redirect_url = original_order.redirect_url
if original_order.billing_address:
original_order.billing_address.pk = None
replace_order.billing_address = original_order.billing_address
replace_order.billing_address.save()
if original_order.shipping_address:
original_order.shipping_address.pk = None
replace_order.shipping_address = original_order.shipping_address
replace_order.shipping_address.save()
replace_order.save()
original_order.refresh_from_db()
return replace_order
@transaction.atomic
def create_replace_order(
requester: Optional["User"],
original_order: "Order",
order_lines_to_replace: List[OrderLineData],
fulfillment_lines_to_replace: List[FulfillmentLineData],
) -> "Order":
"""Create draft order with lines to replace."""
replace_order = _populate_replace_order_fields(original_order)
order_line_to_create: Dict[OrderLineIDType, OrderLine] = dict()
# iterate over lines without fulfillment to get the items for replace.
# deepcopy to not lose the refence for lines assigned to original order
for line_data in deepcopy(order_lines_to_replace):
order_line = line_data.line
order_line_id = order_line.pk
order_line.pk = None
order_line.order = replace_order
order_line.quantity = line_data.quantity
order_line.quantity_fulfilled = 0
# we set order_line_id as a key to use it for iterating over fulfillment items
order_line_to_create[order_line_id] = order_line
order_lines_with_fulfillment = OrderLine.objects.in_bulk(
[line_data.line.order_line_id for line_data in fulfillment_lines_to_replace]
)
for fulfillment_line_data in fulfillment_lines_to_replace:
fulfillment_line = fulfillment_line_data.line
order_line_id = fulfillment_line.order_line_id
# if order_line_id exists in order_line_to_create, it means that we already have
# prepared new order_line for this fulfillment. In that case we need to increase
# quantity amount of new order_line by fulfillment_line.quantity
if order_line_id in order_line_to_create:
order_line_to_create[
order_line_id
].quantity += fulfillment_line_data.quantity
continue
order_line_from_fulfillment = order_lines_with_fulfillment.get(order_line_id)
order_line = order_line_from_fulfillment # type: ignore
order_line_id = order_line.pk
order_line.pk = None
order_line.order = replace_order
order_line.quantity = fulfillment_line_data.quantity
order_line.quantity_fulfilled = 0
order_line_to_create[order_line_id] = order_line
lines_to_create = order_line_to_create.values()
OrderLine.objects.bulk_create(lines_to_create)
recalculate_order(replace_order)
draft_order_created_from_replace_event(
draft_order=replace_order,
original_order=original_order,
user=requester,
lines=[(line.quantity, line) for line in lines_to_create],
)
return replace_order
def _move_lines_to_return_fulfillment(
order_lines: List[OrderLineData],
fulfillment_lines: List[FulfillmentLineData],
fulfillment_status: str,
order: "Order",
) -> Fulfillment:
target_fulfillment, _ = Fulfillment.objects.get_or_create(
status=fulfillment_status, order=order
)
lines_in_target_fulfillment = list(target_fulfillment.lines.all())
_move_order_lines_to_target_fulfillment(
order_lines_to_move=order_lines,
lines_in_target_fulfillment=lines_in_target_fulfillment,
target_fulfillment=target_fulfillment,
)
fulfillment_lines_already_refunded = FulfillmentLine.objects.filter(
fulfillment__order=order, fulfillment__status=FulfillmentStatus.REFUNDED
).values_list("id", flat=True)
refunded_fulfillment_lines_to_return = []
fulfillment_lines_to_return = []
for line_data in fulfillment_lines:
if line_data.line.id in fulfillment_lines_already_refunded:
# item already refunded should be moved to fulfillment with status
# REFUNDED_AND_RETURNED
refunded_fulfillment_lines_to_return.append(line_data)
else:
# the rest of the items should be moved to target fulfillment
fulfillment_lines_to_return.append(line_data)
_move_fulfillment_lines_to_target_fulfillment(
fulfillment_lines_to_move=fulfillment_lines_to_return,
lines_in_target_fulfillment=lines_in_target_fulfillment,
target_fulfillment=target_fulfillment,
)
if refunded_fulfillment_lines_to_return:
refund_and_return_fulfillment, _ = Fulfillment.objects.get_or_create(
status=FulfillmentStatus.REFUNDED_AND_RETURNED, order=order
)
lines_in_target_fulfillment = list(refund_and_return_fulfillment.lines.all())
_move_fulfillment_lines_to_target_fulfillment(
fulfillment_lines_to_move=refunded_fulfillment_lines_to_return,
lines_in_target_fulfillment=lines_in_target_fulfillment,
target_fulfillment=refund_and_return_fulfillment,
)
return target_fulfillment
def _move_lines_to_replace_fulfillment(
order_lines_to_replace: List[OrderLineData],
fulfillment_lines_to_replace: List[FulfillmentLineData],
order: "Order",
) -> Fulfillment:
target_fulfillment, _ = Fulfillment.objects.get_or_create(
status=FulfillmentStatus.REPLACED, order=order
)
lines_in_target_fulfillment = list(target_fulfillment.lines.all())
_move_order_lines_to_target_fulfillment(
order_lines_to_move=order_lines_to_replace,
lines_in_target_fulfillment=lines_in_target_fulfillment,
target_fulfillment=target_fulfillment,
)
_move_fulfillment_lines_to_target_fulfillment(
fulfillment_lines_to_move=fulfillment_lines_to_replace,
lines_in_target_fulfillment=lines_in_target_fulfillment,
target_fulfillment=target_fulfillment,
)
return target_fulfillment
@transaction.atomic
def create_return_fulfillment(
requester: Optional["User"],
order: "Order",
order_lines: List[OrderLineData],
fulfillment_lines: List[FulfillmentLineData],
manager: "PluginsManager",
refund: bool = False,
) -> Fulfillment:
status = FulfillmentStatus.RETURNED
if refund:
status = FulfillmentStatus.REFUNDED_AND_RETURNED
with transaction.atomic():
return_fulfillment = _move_lines_to_return_fulfillment(
order_lines=order_lines,
fulfillment_lines=fulfillment_lines,
fulfillment_status=status,
order=order,
)
returned_lines: Dict[OrderLineIDType, Tuple[QuantityType, OrderLine]] = dict()
order_lines_with_fulfillment = OrderLine.objects.in_bulk(
[line_data.line.order_line_id for line_data in fulfillment_lines]
)
for line_data in order_lines:
returned_lines[line_data.line.id] = (line_data.quantity, line_data.line)
for line_data in fulfillment_lines:
order_line = order_lines_with_fulfillment.get(line_data.line.order_line_id)
returned_line = returned_lines.get(order_line.id) # type: ignore
if returned_line:
quantity, line = returned_line
quantity += line_data.quantity
returned_lines[order_line.id] = (quantity, line) # type: ignore
else:
returned_lines[order_line.id] = ( # type: ignore
line_data.quantity,
order_line,
)
returned_lines_list = list(returned_lines.values())
transaction.on_commit(
lambda: order_returned(
order,
user=requester,
returned_lines=returned_lines_list,
manager=manager,
)
)
return return_fulfillment
@transaction.atomic
def process_replace(
requester: Optional["User"],
order: "Order",
order_lines: List[OrderLineData],
fulfillment_lines: List[FulfillmentLineData],
) -> Tuple[Fulfillment, Optional["Order"]]:
"""Create replace fulfillment and new draft order.
Move all requested lines to fulfillment with status replaced. Based on original
order create the draft order with all user details, and requested lines.
"""
replace_fulfillment = _move_lines_to_replace_fulfillment(
order_lines_to_replace=order_lines,
fulfillment_lines_to_replace=fulfillment_lines,
order=order,
)
new_order = create_replace_order(
requester=requester,
original_order=order,
order_lines_to_replace=order_lines,
fulfillment_lines_to_replace=fulfillment_lines,
)
replaced_lines = [(line.quantity, line) for line in new_order.lines.all()]
fulfillment_replaced_event(
order=order,
user=requester,
replaced_lines=replaced_lines,
)
order_replacement_created(
original_order=order,
replace_order=new_order,
user=requester,
)
return replace_fulfillment, new_order
def create_fulfillments_for_returned_products(
requester: Optional["User"],
order: "Order",
payment: Optional[Payment],
order_lines: List[OrderLineData],
fulfillment_lines: List[FulfillmentLineData],
manager: "PluginsManager",
refund: bool = False,
amount: Optional[Decimal] = None,
refund_shipping_costs=False,
) -> Tuple[Fulfillment, Optional[Fulfillment], Optional[Order]]:
"""Process the request for replacing or returning the products.
Process the refund when the refund is set to True. The amount of refund will be
calculated for all lines with statuses different from refunded. The lines which
are set to replace will not be included in the refund amount.
If the amount is provided, the refund will be used for this amount.
If refund_shipping_costs is True, the calculated refund amount will include
shipping costs.
All lines with replace set to True will be used to create a new draft order, with
the same order details as the original order. These lines will be moved to
fulfillment with status replaced. The events with relation to new order will be
created.
All lines with replace set to False will be moved to fulfillment with status
returned/refunded_and_returned - depends on refund flag and current line status.
If the fulfillment line has refunded status it will be moved to
returned_and_refunded
"""
return_order_lines = [data for data in order_lines if not data.replace]
return_fulfillment_lines = [data for data in fulfillment_lines if not data.replace]
if refund and payment:
_process_refund(
requester=requester,
order=order,
payment=payment,
order_lines_to_refund=return_order_lines,
fulfillment_lines_to_refund=return_fulfillment_lines,
amount=amount,
refund_shipping_costs=refund_shipping_costs,
manager=manager,
)
with transaction.atomic():
replace_order_lines = [data for data in order_lines if data.replace]
replace_fulfillment_lines = [data for data in fulfillment_lines if data.replace]
replace_fulfillment, new_order = None, None
if replace_order_lines or replace_fulfillment_lines:
replace_fulfillment, new_order = process_replace(
requester=requester,
order=order,
order_lines=replace_order_lines,
fulfillment_lines=replace_fulfillment_lines,
)
return_fulfillment = create_return_fulfillment(
requester=requester,
order=order,
order_lines=return_order_lines,
fulfillment_lines=return_fulfillment_lines,
manager=manager,
refund=refund,
)
Fulfillment.objects.filter(order=order, lines=None).delete()
return return_fulfillment, replace_fulfillment, new_order
def _calculate_refund_amount(
return_order_lines: List[OrderLineData],
return_fulfillment_lines: List[FulfillmentLineData],
lines_to_refund: Dict[OrderLineIDType, Tuple[QuantityType, OrderLine]],
) -> Decimal:
refund_amount = Decimal(0)
for line_data in return_order_lines:
refund_amount += line_data.quantity * line_data.line.unit_price_gross_amount
lines_to_refund[line_data.line.id] = (line_data.quantity, line_data.line)
if not return_fulfillment_lines:
return refund_amount
order_lines_with_fulfillment = OrderLine.objects.in_bulk(
[line_data.line.order_line_id for line_data in return_fulfillment_lines]
)
for line_data in return_fulfillment_lines:
# skip lines which were already refunded
if line_data.line.fulfillment.status == FulfillmentStatus.REFUNDED:
continue
order_line = order_lines_with_fulfillment[line_data.line.order_line_id]
refund_amount += line_data.quantity * order_line.unit_price_gross_amount
data_from_all_refunded_lines = lines_to_refund.get(order_line.id)
if data_from_all_refunded_lines:
quantity, line = data_from_all_refunded_lines
quantity += line_data.quantity
lines_to_refund[order_line.id] = (quantity, line)
else:
lines_to_refund[order_line.id] = (line_data.quantity, order_line)
return refund_amount
def _process_refund(
requester: Optional["User"],
order: "Order",
payment: Payment,
order_lines_to_refund: List[OrderLineData],
fulfillment_lines_to_refund: List[FulfillmentLineData],
amount: Optional[Decimal],
refund_shipping_costs: bool,
manager: "PluginsManager",
):
lines_to_refund: Dict[OrderLineIDType, Tuple[QuantityType, OrderLine]] = dict()
refund_amount = _calculate_refund_amount(
order_lines_to_refund, fulfillment_lines_to_refund, lines_to_refund
)
if amount is None:
amount = refund_amount
# we take into consideration the shipping costs only when amount is not
# provided.
if refund_shipping_costs:
amount += order.shipping_price_gross_amount
if amount:
amount = min(payment.captured_amount, amount)
gateway.refund(payment, manager, amount)
order_refunded(order, requester, amount, payment, manager=manager)
fulfillment_refunded_event(
order=order,
user=requester,
refunded_lines=list(lines_to_refund.values()),
amount=amount,
shipping_costs_included=refund_shipping_costs,
)
|
the-stack_0_23670 | import io
import salt.proxy.junos as junos
from tests.support.mixins import LoaderModuleMockMixin
from tests.support.mock import ANY, patch
from tests.support.unit import TestCase, skipIf
try:
from jnpr.junos.device import Device # pylint: disable=unused-import
from jnpr.junos.exception import ConnectError
import jxmlease # pylint: disable=unused-import
HAS_JUNOS = True
except ImportError:
HAS_JUNOS = False
@skipIf(not HAS_JUNOS, "The junos-eznc and jxmlease modules are required")
class JunosProxyTestCase(TestCase, LoaderModuleMockMixin):
def setup_loader_modules(self):
return {junos: {"DETAILS": {}, "__pillar__": {}}}
def setUp(self):
self.opts = {
"proxy": {
"username": "xxxx",
"password]": "xxx",
"host": "junos",
"port": "960",
}
}
@patch("ncclient.manager.connect")
def test_init(self, mock_connect):
junos.init(self.opts)
self.assertTrue(junos.thisproxy.get("initialized"))
mock_connect.assert_called_with(
allow_agent=True,
device_params={"name": "junos", "local": False, "use_filter": False},
host="junos",
hostkey_verify=False,
key_filename=None,
password=None,
port="960",
sock_fd=None,
ssh_config=ANY,
username="xxxx",
)
@patch("ncclient.manager.connect")
def test_init_err(self, mock_connect):
mock_connect.side_effect = ConnectError
junos.init(self.opts)
self.assertFalse(junos.thisproxy.get("initialized"))
@patch("ncclient.manager.connect")
def test_alive(self, mock_connect):
junos.init(self.opts)
junos.thisproxy["conn"]._conn._session._buffer = io.BytesIO()
self.assertTrue(junos.alive(self.opts))
self.assertTrue(junos.thisproxy.get("initialized"))
|
the-stack_0_23671 | """
Make a fasta file of all the proteins
"""
import os
import sys
import argparse
from pppf_accessories import color
from pppf_databases import connect_to_db, disconnect
if __name__ == '__main__':
parser = argparse.ArgumentParser(description="Sequences in the phage database not in a cluster")
parser.add_argument('-p', help='phage database', required=True)
parser.add_argument('-c', help='cluster database', required=True)
parser.add_argument('-v', help='verbose output', action='store_true')
args = parser.parse_args()
pbc = connect_to_db(args.p, args.v)
pcur = pbc.cursor()
dbc = connect_to_db(args.c, args.v)
ccur = dbc.cursor()
cl = set()
ex = ccur.execute("select protein_md5sum, cluster from md5cluster")
for (m, c) in ex.fetchall():
cl.add(m)
if args.v:
sys.stderr.write(f"{color.GREEN}Loaded {len(cl)} proteins{color.ENDC}\n")
ex = pcur.execute("select protein_md5sum, protein_sequence from protein_sequence")
n = 0
for (m, s) in ex.fetchall():
n += 1
if m not in cl:
print(f">{m}\n{s}")
if args.v:
sys.stderr.write(f"{color.GREEN}Tested {n} proteins{color.ENDC}\n")
|
the-stack_0_23674 | """Downloads filings from the EDGAR database.
Examples:
simple:
singledownload("edgar/data/1645148/0001213900-15-004775.txt")
for slow internet connections:
singledownload("edgar/data/1645148/0001213900-15-004775.txt", timeout=120)
with different folder structure:
singledownload("edgar/data/1645148/0001213900-15-004775.txt",
folder="myfolder/", sub="")
"""
import urllib.request
import uuid
import datetime
import os
def singledownload(url, edgar_url="https://www.sec.gov/Archives/",
folder="data/", sub="filings/", timeout=30):
"""Download filings from the EDGAR database.
Args:
url (str): Relative path on the EDGAR server.
edgar_url (str): URL to EDGAR archive parent folder on server.
Defaults to "https://www.sec.gov/Archives/".
folder (str): Path to working directory. Defaults to "data/".
sub (str): Path to subdirectory in working directory.
Defaults to "filings/".
timeout (int): Number of seconds to wait for the download to complete
before the download attempt is counted as failed.
Defaults to 30 seconds.
Returns:
result (list): Information on which was downloaded, the local filename
and the date and time the file was downloaded.
Raises:
None
"""
full_url = edgar_url + url
fname = str(uuid.uuid4()) + ".txt"
accessed = datetime.datetime.now().strftime('%Y%m%d_%H%M%S')
try:
txt = urllib.request.urlopen(full_url, timeout=timeout).read()
except Exception as e:
return [url, e, "Error"]
if os.path.isdir(folder + sub) is False:
try:
os.makedirs(folder + sub)
except Exception as e:
print(e)
try:
f = open(folder + sub + fname, 'x')
except Exception:
f = open(folder + sub + fname, 'w+')
f.write(txt.decode('utf-8'))
f.close
result = [url, fname, accessed]
return result
|
the-stack_0_23676 | import time
import json
import logging
import requests
import socket
import threading
from flask import request, Flask
from werkzeug.middleware.dispatcher import DispatcherMiddleware
from prometheus_client import make_wsgi_app
from werkzeug.serving import make_server
def wait_for_port(port, host="localhost", timeout=5.0):
"""Wait until a port starts accepting TCP connections.
Args:
port (int): Port number.
host (str): Host address on which the port should exist.
timeout (float): In seconds. How long to wait before raising errors.
Raises:
TimeoutError: The port isn't accepting connection after time specified in `timeout`.
"""
start_time = time.perf_counter()
while True:
try:
with socket.create_connection((host, port), timeout=timeout):
break
except OSError as ex:
time.sleep(0.01)
if time.perf_counter() - start_time >= timeout:
raise TimeoutError(
"Waited too long for the port {} on host {} to start accepting "
"connections.".format(port, host)
) from ex
class ServerThread(threading.Thread):
def __init__(self, master_addr, master_port, app):
threading.Thread.__init__(self)
self.srv = make_server(master_addr, master_port, app)
self.ctx = app.app_context()
self.ctx.push()
def run(self):
self.srv.serve_forever()
def shutdown(self):
self.srv.shutdown()
def pick_n_free_ports(n: int):
socks = []
for i in range(n):
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.bind(("localhost", 0))
socks.append(sock)
n_free_ports = [sock.getsockname()[1] for sock in socks]
for sock in socks:
sock.close()
return n_free_ports
def setup_app(slots: list, server_addr: str, server_port: int):
# run http server
app = Flask(__name__)
app.use_reloader = False # type: ignore
@app.route("/get_fuselib_server_addr", methods=["POST"])
def get_fuselib_server_addr():
req: dict = request.get_json(force=True)
hostname: str = req["hostname"]
rank: int = req["rank"]
slots[rank] = hostname
return json.dumps({"server_addr": server_addr, "server_port": server_port})
# Add prometheus wsgi middleware to route /metrics requests
app.wsgi_app = DispatcherMiddleware(app.wsgi_app, {"/metrics": make_wsgi_app()}) # type: ignore
return app
def generate_and_broadcast_server_addr(
master_addr,
master_port,
world_size: int,
my_rank: int,
proxies={
"http": None,
"https": None,
},
):
# import torch
# torch.distributed.init_process_group(backend="gloo", init_method="tcp://{}:{}".format(master_addr, master_port), world_size=world_size, rank=my_rank)
# if my_rank == 0:
# objects = [pick_n_free_ports(1)[0]]
# else:
# objects = [None]
# dist.broadcast_object_list(objects, src=0)
# dist.destroy_process_group()
# server_port = broadcast_objects[0]
# return (master_addr, server_port)
slots = [None] * world_size
if my_rank == 0:
server_addr = master_addr
server_port = pick_n_free_ports(1)[0]
slots[my_rank] = socket.gethostname() # type: ignore
app = setup_app(slots, server_addr, server_port)
server = ServerThread("0.0.0.0", master_port, app)
server.start()
while True:
n_empty_slot = len([x for x in slots if x is None])
if n_empty_slot == 0:
break
time.sleep(1)
server.shutdown()
else:
# Wait service discovery service ready
timeout = time.time() + 60 # 60s timeout
rsp = None
wait_for_port(host=master_addr, port=master_port)
while time.time() < timeout:
try:
with requests.session() as sess:
rsp = sess.post(
"http://{}:{}/get_fuselib_server_addr".format(
master_addr, master_port
),
json={"rank": my_rank, "hostname": socket.gethostname()},
proxies=proxies,
timeout=timeout,
)
except requests.exceptions.ConnectionError as ex:
logging.info(ex)
if rsp and rsp.status_code == 200:
break
time.sleep(1)
if rsp is None or rsp.status_code != 200:
raise RuntimeError("Waiting for service discovery service start timeout")
server_addr = json.loads(rsp.content)["server_addr"]
server_port = json.loads(rsp.content)["server_port"]
return (master_addr, server_port, slots)
|
the-stack_0_23677 | """
Tests the accuracy of the opt_einsum paths in addition to unit tests for
the various path helper functions.
"""
import itertools
import sys
import numpy as np
import pytest
import opt_einsum as oe
explicit_path_tests = {
'GEMM1': ([set('abd'), set('ac'), set('bdc')], set(''), {
'a': 1,
'b': 2,
'c': 3,
'd': 4
}),
'Inner1': ([set('abcd'), set('abc'), set('bc')], set(''), {
'a': 5,
'b': 2,
'c': 3,
'd': 4
}),
}
# note that these tests have no unique solution due to the chosen dimensions
path_edge_tests = [
['greedy', 'eb,cb,fb->cef', ((0, 2), (0, 1))],
['branch-all', 'eb,cb,fb->cef', ((0, 2), (0, 1))],
['branch-2', 'eb,cb,fb->cef', ((0, 2), (0, 1))],
['optimal', 'eb,cb,fb->cef', ((0, 2), (0, 1))],
['dp', 'eb,cb,fb->cef', ((1, 2), (0, 1))],
['greedy', 'dd,fb,be,cdb->cef', ((0, 3), (0, 1), (0, 1))],
['branch-all', 'dd,fb,be,cdb->cef', ((0, 3), (0, 1), (0, 1))],
['branch-2', 'dd,fb,be,cdb->cef', ((0, 3), (0, 1), (0, 1))],
['optimal', 'dd,fb,be,cdb->cef', ((0, 3), (0, 1), (0, 1))],
['optimal', 'dd,fb,be,cdb->cef', ((0, 3), (0, 1), (0, 1))],
['dp', 'dd,fb,be,cdb->cef', ((0, 3), (0, 2), (0, 1))],
['greedy', 'bca,cdb,dbf,afc->', ((1, 2), (0, 2), (0, 1))],
['branch-all', 'bca,cdb,dbf,afc->', ((1, 2), (0, 2), (0, 1))],
['branch-2', 'bca,cdb,dbf,afc->', ((1, 2), (0, 2), (0, 1))],
['optimal', 'bca,cdb,dbf,afc->', ((1, 2), (0, 2), (0, 1))],
['dp', 'bca,cdb,dbf,afc->', ((1, 2), (1, 2), (0, 1))],
['greedy', 'dcc,fce,ea,dbf->ab', ((1, 2), (0, 1), (0, 1))],
['branch-all', 'dcc,fce,ea,dbf->ab', ((1, 2), (0, 2), (0, 1))],
['branch-2', 'dcc,fce,ea,dbf->ab', ((1, 2), (0, 2), (0, 1))],
['optimal', 'dcc,fce,ea,dbf->ab', ((1, 2), (0, 2), (0, 1))],
['dp', 'dcc,fce,ea,dbf->ab', ((1, 2), (0, 2), (0, 1))],
]
def check_path(test_output, benchmark, bypass=False):
if not isinstance(test_output, list):
return False
if len(test_output) != len(benchmark):
return False
ret = True
for pos in range(len(test_output)):
ret &= isinstance(test_output[pos], tuple)
ret &= test_output[pos] == benchmark[pos]
return ret
def assert_contract_order(func, test_data, max_size, benchmark):
test_output = func(test_data[0], test_data[1], test_data[2], max_size)
assert check_path(test_output, benchmark)
def test_size_by_dict():
sizes_dict = {}
for ind, val in zip('abcdez', [2, 5, 9, 11, 13, 0]):
sizes_dict[ind] = val
path_func = oe.helpers.compute_size_by_dict
assert 1 == path_func('', sizes_dict)
assert 2 == path_func('a', sizes_dict)
assert 5 == path_func('b', sizes_dict)
assert 0 == path_func('z', sizes_dict)
assert 0 == path_func('az', sizes_dict)
assert 0 == path_func('zbc', sizes_dict)
assert 104 == path_func('aaae', sizes_dict)
assert 12870 == path_func('abcde', sizes_dict)
def test_flop_cost():
size_dict = {v: 10 for v in "abcdef"}
# Loop over an array
assert 10 == oe.helpers.flop_count("a", False, 1, size_dict)
# Hadamard product (*)
assert 10 == oe.helpers.flop_count("a", False, 2, size_dict)
assert 100 == oe.helpers.flop_count("ab", False, 2, size_dict)
# Inner product (+, *)
assert 20 == oe.helpers.flop_count("a", True, 2, size_dict)
assert 200 == oe.helpers.flop_count("ab", True, 2, size_dict)
# Inner product x3 (+, *, *)
assert 30 == oe.helpers.flop_count("a", True, 3, size_dict)
# GEMM
assert 2000 == oe.helpers.flop_count("abc", True, 2, size_dict)
def test_bad_path_option():
with pytest.raises(KeyError):
oe.contract("a,b,c", [1], [2], [3], optimize='optimall')
def test_explicit_path():
x = oe.contract("a,b,c", [1], [2], [3], optimize=[(1, 2), (0, 1)])
assert x.item() == 6
def test_path_optimal():
test_func = oe.paths.optimal
test_data = explicit_path_tests['GEMM1']
assert_contract_order(test_func, test_data, 5000, [(0, 2), (0, 1)])
assert_contract_order(test_func, test_data, 0, [(0, 1, 2)])
def test_path_greedy():
test_func = oe.paths.greedy
test_data = explicit_path_tests['GEMM1']
assert_contract_order(test_func, test_data, 5000, [(0, 2), (0, 1)])
assert_contract_order(test_func, test_data, 0, [(0, 1, 2)])
def test_memory_paths():
expression = "abc,bdef,fghj,cem,mhk,ljk->adgl"
views = oe.helpers.build_views(expression)
# Test tiny memory limit
path_ret = oe.contract_path(expression, *views, optimize="optimal", memory_limit=5)
assert check_path(path_ret[0], [(0, 1, 2, 3, 4, 5)])
path_ret = oe.contract_path(expression, *views, optimize="greedy", memory_limit=5)
assert check_path(path_ret[0], [(0, 1, 2, 3, 4, 5)])
# Check the possibilities, greedy is capped
path_ret = oe.contract_path(expression, *views, optimize="optimal", memory_limit=-1)
assert check_path(path_ret[0], [(0, 3), (0, 4), (0, 2), (0, 2), (0, 1)])
path_ret = oe.contract_path(expression, *views, optimize="greedy", memory_limit=-1)
assert check_path(path_ret[0], [(0, 3), (0, 4), (0, 2), (0, 2), (0, 1)])
@pytest.mark.parametrize("alg,expression,order", path_edge_tests)
def test_path_edge_cases(alg, expression, order):
views = oe.helpers.build_views(expression)
# Test tiny memory limit
path_ret = oe.contract_path(expression, *views, optimize=alg)
assert check_path(path_ret[0], order)
def test_optimal_edge_cases():
# Edge test5
expression = 'a,ac,ab,ad,cd,bd,bc->'
edge_test4 = oe.helpers.build_views(expression, dimension_dict={"a": 20, "b": 20, "c": 20, "d": 20})
path, path_str = oe.contract_path(expression, *edge_test4, optimize='greedy', memory_limit='max_input')
assert check_path(path, [(0, 1), (0, 1, 2, 3, 4, 5)])
path, path_str = oe.contract_path(expression, *edge_test4, optimize='optimal', memory_limit='max_input')
assert check_path(path, [(0, 1), (0, 1, 2, 3, 4, 5)])
def test_greedy_edge_cases():
expression = "abc,cfd,dbe,efa"
dim_dict = {k: 20 for k in expression.replace(",", "")}
tensors = oe.helpers.build_views(expression, dimension_dict=dim_dict)
path, path_str = oe.contract_path(expression, *tensors, optimize='greedy', memory_limit='max_input')
assert check_path(path, [(0, 1, 2, 3)])
path, path_str = oe.contract_path(expression, *tensors, optimize='greedy', memory_limit=-1)
assert check_path(path, [(0, 1), (0, 2), (0, 1)])
def test_dp_edge_cases_dimension_1():
eq = 'nlp,nlq,pl->n'
shapes = [(1, 1, 1), (1, 1, 1), (1, 1)]
info = oe.contract_path(eq, *shapes, shapes=True, optimize='dp')[1]
assert max(info.scale_list) == 3
def test_dp_edge_cases_all_singlet_indices():
eq = 'a,bcd,efg->'
shapes = [(2, ), (2, 2, 2), (2, 2, 2)]
info = oe.contract_path(eq, *shapes, shapes=True, optimize='dp')[1]
assert max(info.scale_list) == 3
def test_custom_dp_can_optimize_for_outer_products():
eq = "a,b,abc->c"
da, db, dc = 2, 2, 3
shapes = [(da, ), (db, ), (da, db, dc)]
opt1 = oe.DynamicProgramming(search_outer=False)
opt2 = oe.DynamicProgramming(search_outer=True)
info1 = oe.contract_path(eq, *shapes, shapes=True, optimize=opt1)[1]
info2 = oe.contract_path(eq, *shapes, shapes=True, optimize=opt2)[1]
assert info2.opt_cost < info1.opt_cost
def test_custom_dp_can_optimize_for_size():
eq, shapes = oe.helpers.rand_equation(10, 4, seed=43)
opt1 = oe.DynamicProgramming(minimize='flops')
opt2 = oe.DynamicProgramming(minimize='size')
info1 = oe.contract_path(eq, *shapes, shapes=True, optimize=opt1)[1]
info2 = oe.contract_path(eq, *shapes, shapes=True, optimize=opt2)[1]
assert (info1.opt_cost < info2.opt_cost)
assert (info1.largest_intermediate > info2.largest_intermediate)
def test_custom_dp_can_set_cost_cap():
eq, shapes = oe.helpers.rand_equation(5, 3, seed=42)
opt1 = oe.DynamicProgramming(cost_cap=True)
opt2 = oe.DynamicProgramming(cost_cap=False)
opt3 = oe.DynamicProgramming(cost_cap=100)
info1 = oe.contract_path(eq, *shapes, shapes=True, optimize=opt1)[1]
info2 = oe.contract_path(eq, *shapes, shapes=True, optimize=opt2)[1]
info3 = oe.contract_path(eq, *shapes, shapes=True, optimize=opt3)[1]
assert info1.opt_cost == info2.opt_cost == info3.opt_cost
def test_dp_errors_when_no_contractions_found():
eq, shapes, size_dict = oe.helpers.rand_equation(10, 3, seed=42, return_size_dict=True)
# first get the actual minimum cost
opt = oe.DynamicProgramming(minimize='size')
path, info = oe.contract_path(eq, *shapes, shapes=True, optimize=opt)
mincost = info.largest_intermediate
# check we can still find it without minimizing size explicitly
oe.contract_path(eq, *shapes, shapes=True, memory_limit=mincost, optimize='dp')
# but check just below this threshold raises
with pytest.raises(RuntimeError):
oe.contract_path(eq, *shapes, shapes=True, memory_limit=mincost - 1, optimize='dp')
@pytest.mark.parametrize("optimize", ['greedy', 'branch-2', 'branch-all', 'optimal', 'dp'])
def test_can_optimize_outer_products(optimize):
a, b, c = [np.random.randn(10, 10) for _ in range(3)]
d = np.random.randn(10, 2)
assert oe.contract_path("ab,cd,ef,fg", a, b, c, d, optimize=optimize)[0] == [(2, 3), (0, 2), (0, 1)]
@pytest.mark.parametrize('num_symbols', [2, 3, 26, 26 + 26, 256 - 140, 300])
def test_large_path(num_symbols):
symbols = ''.join(oe.get_symbol(i) for i in range(num_symbols))
dimension_dict = dict(zip(symbols, itertools.cycle([2, 3, 4])))
expression = ','.join(symbols[t:t + 2] for t in range(num_symbols - 1))
tensors = oe.helpers.build_views(expression, dimension_dict=dimension_dict)
# Check that path construction does not crash
oe.contract_path(expression, *tensors, optimize='greedy')
def test_custom_random_greedy():
eq, shapes = oe.helpers.rand_equation(10, 4, seed=42)
views = list(map(np.ones, shapes))
with pytest.raises(ValueError):
oe.RandomGreedy(minimize='something')
optimizer = oe.RandomGreedy(max_repeats=10, minimize='flops')
path, path_info = oe.contract_path(eq, *views, optimize=optimizer)
assert len(optimizer.costs) == 10
assert len(optimizer.sizes) == 10
assert path == optimizer.path
assert optimizer.best['flops'] == min(optimizer.costs)
assert path_info.largest_intermediate == optimizer.best['size']
assert path_info.opt_cost == optimizer.best['flops']
# check can change settings and run again
optimizer.temperature = 0.0
optimizer.max_repeats = 6
path, path_info = oe.contract_path(eq, *views, optimize=optimizer)
assert len(optimizer.costs) == 16
assert len(optimizer.sizes) == 16
assert path == optimizer.path
assert optimizer.best['size'] == min(optimizer.sizes)
assert path_info.largest_intermediate == optimizer.best['size']
assert path_info.opt_cost == optimizer.best['flops']
# check error if we try and reuse the optimizer on a different expression
eq, shapes = oe.helpers.rand_equation(10, 4, seed=41)
views = list(map(np.ones, shapes))
with pytest.raises(ValueError):
path, path_info = oe.contract_path(eq, *views, optimize=optimizer)
def test_custom_branchbound():
eq, shapes = oe.helpers.rand_equation(8, 4, seed=42)
views = list(map(np.ones, shapes))
optimizer = oe.BranchBound(nbranch=2, cutoff_flops_factor=10, minimize='size')
path, path_info = oe.contract_path(eq, *views, optimize=optimizer)
assert path == optimizer.path
assert path_info.largest_intermediate == optimizer.best['size']
assert path_info.opt_cost == optimizer.best['flops']
# tweak settings and run again
optimizer.nbranch = 3
optimizer.cutoff_flops_factor = 4
path, path_info = oe.contract_path(eq, *views, optimize=optimizer)
assert path == optimizer.path
assert path_info.largest_intermediate == optimizer.best['size']
assert path_info.opt_cost == optimizer.best['flops']
# check error if we try and reuse the optimizer on a different expression
eq, shapes = oe.helpers.rand_equation(8, 4, seed=41)
views = list(map(np.ones, shapes))
with pytest.raises(ValueError):
path, path_info = oe.contract_path(eq, *views, optimize=optimizer)
@pytest.mark.skipif(sys.version_info < (3, 2), reason="requires python3.2 or higher")
def test_parallel_random_greedy():
from concurrent.futures import ProcessPoolExecutor
pool = ProcessPoolExecutor(2)
eq, shapes = oe.helpers.rand_equation(10, 4, seed=42)
views = list(map(np.ones, shapes))
optimizer = oe.RandomGreedy(max_repeats=10, parallel=pool)
path, path_info = oe.contract_path(eq, *views, optimize=optimizer)
assert len(optimizer.costs) == 10
assert len(optimizer.sizes) == 10
assert path == optimizer.path
assert optimizer.parallel is pool
assert optimizer._executor is pool
assert optimizer.best['flops'] == min(optimizer.costs)
assert path_info.largest_intermediate == optimizer.best['size']
assert path_info.opt_cost == optimizer.best['flops']
# now switch to max time algorithm
optimizer.max_repeats = int(1e6)
optimizer.max_time = 0.2
optimizer.parallel = 2
path, path_info = oe.contract_path(eq, *views, optimize=optimizer)
assert len(optimizer.costs) > 10
assert len(optimizer.sizes) > 10
assert path == optimizer.path
assert optimizer.best['flops'] == min(optimizer.costs)
assert path_info.largest_intermediate == optimizer.best['size']
assert path_info.opt_cost == optimizer.best['flops']
optimizer.parallel = True
assert optimizer._executor is not None
assert optimizer._executor is not pool
are_done = [f.running() or f.done() for f in optimizer._futures]
assert all(are_done)
def test_custom_path_optimizer():
class NaiveOptimizer(oe.paths.PathOptimizer):
def __call__(self, inputs, output, size_dict, memory_limit=None):
self.was_used = True
return [(0, 1)] * (len(inputs) - 1)
eq, shapes = oe.helpers.rand_equation(5, 3, seed=42, d_max=3)
views = list(map(np.ones, shapes))
exp = oe.contract(eq, *views, optimize=False)
optimizer = NaiveOptimizer()
out = oe.contract(eq, *views, optimize=optimizer)
assert exp == out
assert optimizer.was_used
def test_custom_random_optimizer():
class NaiveRandomOptimizer(oe.path_random.RandomOptimizer):
@staticmethod
def random_path(r, n, inputs, output, size_dict):
"""Picks a completely random contraction order.
"""
np.random.seed(r)
ssa_path = []
remaining = set(range(n))
while len(remaining) > 1:
i, j = np.random.choice(list(remaining), size=2, replace=False)
remaining.add(n + len(ssa_path))
remaining.remove(i)
remaining.remove(j)
ssa_path.append((i, j))
cost, size = oe.path_random.ssa_path_compute_cost(ssa_path, inputs, output, size_dict)
return ssa_path, cost, size
def setup(self, inputs, output, size_dict):
self.was_used = True
n = len(inputs)
trial_fn = self.random_path
trial_args = (n, inputs, output, size_dict)
return trial_fn, trial_args
eq, shapes = oe.helpers.rand_equation(5, 3, seed=42, d_max=3)
views = list(map(np.ones, shapes))
exp = oe.contract(eq, *views, optimize=False)
optimizer = NaiveRandomOptimizer(max_repeats=16)
out = oe.contract(eq, *views, optimize=optimizer)
assert exp == out
assert optimizer.was_used
assert len(optimizer.costs) == 16
def test_optimizer_registration():
def custom_optimizer(inputs, output, size_dict, memory_limit):
return [(0, 1)] * (len(inputs) - 1)
with pytest.raises(KeyError):
oe.paths.register_path_fn('optimal', custom_optimizer)
oe.paths.register_path_fn('custom', custom_optimizer)
assert 'custom' in oe.paths._PATH_OPTIONS
eq = 'ab,bc,cd'
shapes = [(2, 3), (3, 4), (4, 5)]
path, path_info = oe.contract_path(eq, *shapes, shapes=True, optimize='custom')
assert path == [(0, 1), (0, 1)]
del oe.paths._PATH_OPTIONS['custom']
|
the-stack_0_23678 | # Copyright 2017 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ------------------------------------------------------------------------------
import logging
import unittest
from sawtooth_validator.database.dict_database import DictDatabase
from sawtooth_validator.exceptions import PossibleForkDetectedError
from sawtooth_validator.journal.block_store import BlockStore
from sawtooth_validator.journal.block_wrapper import NULL_BLOCK_IDENTIFIER
from sawtooth_validator.journal.block_wrapper import BlockWrapper
from sawtooth_validator.protobuf.block_pb2 import Block
from sawtooth_validator.protobuf.block_pb2 import BlockHeader
from test_journal.block_tree_manager import BlockTreeManager
LOGGER = logging.getLogger(__name__)
class BlockStoreTest(unittest.TestCase):
def setUp(self):
self.block_tree_manager = BlockTreeManager()
def test_chain_head(self):
""" Test that the chain head can be retrieved from the
BlockStore.
"""
block = self.create_block()
block_store = self.create_block_store(
{
'chain_head_id': 'head',
'head': self.encode_block(block)
})
chain_head = block_store.chain_head
self.assert_blocks_equal(chain_head, block)
def test_get(self):
""" Test BlockStore block get operations.
"""
block = self.create_block()
block_store = self.create_block_store(
{
'chain_head_id': 'head',
'head': self.encode_block(block),
'txn': 'head'
})
chain_head = block_store['head']
self.assert_blocks_equal(chain_head, block)
with self.assertRaises(KeyError):
block_store['txn']
with self.assertRaises(KeyError):
chain_head = block_store['missing']
def test_set(self):
""" Test BlockStore block set operations.
"""
block = self.create_block()
block_store = self.create_block_store(
{
'chain_head_id': 'head',
'head': self.encode_block(block),
'txn': 'head'
})
block2 = self.create_block()
with self.assertRaises(KeyError):
block_store['head'] = block2
block_store[block2.identifier] = block2
stored_block = block_store[block2.identifier]
self.assert_blocks_equal(stored_block, block2)
with self.assertRaises(AttributeError):
block_store['batch'] = 'head'
def test_has(self):
""" Test BlockStore tests if Transactions and Batches
are commited to the current chain.
"""
block_store = self.create_block_store(
{
'chain_head_id': 'block',
'block': self.create_serialized_block(),
'txn': 'block',
'batch': 'block'
})
self.assertTrue(block_store.has_transaction('txn'))
self.assertFalse(block_store.has_transaction('txn_missing'))
self.assertTrue(block_store.has_batch('batch'))
self.assertFalse(block_store.has_transaction('batch_missing'))
self.assertTrue('block' in block_store)
self.assertTrue('batch' in block_store)
self.assertTrue('txn' in block_store)
self.assertFalse('block_missing' in block_store)
self.assertFalse('batch_missing' in block_store)
self.assertFalse('txn_missing' in block_store)
def test_get_block_by_batch_id(self):
""" Test BlockStore retrieval of a Block that contains a specific
batch.
"""
block = self.create_block()
block_store = self.create_block_store()
block_store.update_chain([block])
batch_id = block.batches[0].header_signature
stored = block_store.get_block_by_batch_id(batch_id)
self.assert_blocks_equal(stored, block)
with self.assertRaises(ValueError):
block_store.get_block_by_batch_id("bad")
def test_get_batch_by_transaction(self):
""" Test BlockStore retrieval of a Batch that contains a specific
transaction.
"""
block = self.create_block()
block_store = self.create_block_store()
block_store.update_chain([block])
batch = block.batches[0]
txn_id = batch.transactions[0].header_signature
stored = block_store.get_batch_by_transaction(txn_id)
self.asset_protobufs_equal(stored, batch)
with self.assertRaises(ValueError):
block_store.get_batch_by_transaction("bad")
def test_get_block_by_transaction_id(self):
""" Test BlockStore retrieval of a Block that contains a specific
transaction.
"""
block = self.create_block()
block_store = self.create_block_store()
block_store.update_chain([block])
txn_id = block.batches[0].transactions[0].header_signature
stored = block_store.get_block_by_transaction_id(txn_id)
self.assert_blocks_equal(stored, block)
with self.assertRaises(ValueError):
stored = block_store.get_block_by_transaction_id("bad")
def test_get_batch(self):
""" Test BlockStore retrieval of a batch by id.
"""
block = self.create_block()
block_store = self.create_block_store()
block_store.update_chain([block])
batch = block.batches[0]
batch_id = batch.header_signature
stored = block_store.get_batch(batch_id)
self.asset_protobufs_equal(stored, batch)
with self.assertRaises(ValueError):
stored = block_store.get_batch("bad")
def test_get_transaction(self):
""" Test BlockStore retrieval of a transaction by id.
"""
block = self.create_block()
block_store = self.create_block_store()
block_store.update_chain([block])
txn = block.batches[0].transactions[0]
txn_id = txn.header_signature
stored = block_store.get_transaction(txn_id)
self.asset_protobufs_equal(stored, txn)
with self.assertRaises(ValueError):
stored = block_store.get_transaction("bad")
def assert_blocks_equal(self, stored, reference):
self.asset_protobufs_equal(stored.block,
reference.block)
def asset_protobufs_equal(self, stored, reference):
self.assertEqual(self.encode(stored),
self.encode(reference))
@staticmethod
def create_block_store(data=None):
return BlockStore(DictDatabase(data))
def create_block(self):
return self.block_tree_manager.create_block()
def create_serialized_block(self):
block_wrapper = self.block_tree_manager.create_block()
return block_wrapper.block.SerializeToString()
@staticmethod
def encode_block(obj):
return obj.block.SerializeToString()
@staticmethod
def encode(obj):
return obj.SerializeToString()
class BlockStorePredecessorIteratorTest(unittest.TestCase):
def test_iterate_chain(self):
"""Given a block store, create an predecessor iterator.
1. Create a chain of length 5.
2. Iterate the chain using the get_predecessor_iter from the chain head
3. Verify that the block ids match the chain, in reverse order
"""
block_store = BlockStore(DictDatabase())
chain = self._create_chain(5)
block_store.update_chain(chain)
ids = [b.identifier for b in block_store.get_predecessor_iter()]
self.assertEqual(
['abcd4', 'abcd3', 'abcd2', 'abcd1', 'abcd0'],
ids)
def test_iterate_chain_from_starting_block(self):
"""Given a block store, iterate if using an predecessor iterator from
a particular start point in the chain.
1. Create a chain of length 5.
2. Iterate the chain using the get_predecessor_iter from block 3
3. Verify that the block ids match the chain, in reverse order
"""
block_store = BlockStore(DictDatabase())
chain = self._create_chain(5)
block_store.update_chain(chain)
block = block_store['abcd2']
ids = [b.identifier
for b in block_store.get_predecessor_iter(block)]
self.assertEqual(
['abcd2', 'abcd1', 'abcd0'],
ids)
def test_iterate_chain_on_empty_block_store(self):
"""Given a block store with no blocks, iterate using predecessor iterator
and verify that it results in an empty list.
"""
block_store = BlockStore(DictDatabase())
self.assertEqual([], [b for b in block_store.get_predecessor_iter()])
def test_fork_detection_on_iteration(self):
"""Given a block store where a fork occurred while using the predecessor
iterator, it should throw a PossibleForkDetectedError.
The fork occurrance will be simulated.
"""
block_store = BlockStore(DictDatabase())
chain = self._create_chain(5)
block_store.update_chain(chain)
iterator = block_store.get_predecessor_iter()
self.assertEqual('abcd4', next(iterator).identifier)
del block_store['abcd3']
with self.assertRaises(PossibleForkDetectedError):
next(iterator)
def _create_chain(self, length):
chain = []
previous_block_id = NULL_BLOCK_IDENTIFIER
for i in range(length):
block = BlockWrapper(
Block(header_signature='abcd{}'.format(i),
header=BlockHeader(
block_num=i,
previous_block_id=previous_block_id
).SerializeToString()))
previous_block_id = block.identifier
chain.append(block)
chain.reverse()
return chain
|
the-stack_0_23679 | #!/usr/bin/env python3
import numpy as np
import re
# first version actually visual inspection with matplotlib
def solve(input):
data = np.array([(int(x),int(y),int(vx),int(vy))
for (x,y,vx,vy)
in re.findall("[^<]+<([^,]+),([^>]+)>[^<]+<([^,]+),([^>]+)>", input)])
p = data[:,:2]
v = data[:,2:]
i = 0
s0 = 2**32
while True:
s1 = p[:,0].max() - p[:,0].min() + p[:,1].max() - p[:,1].min()
if s0 < s1:
p -= v
i -= 1
break
p += v
i += 1
s0 = s1
min_x = p[:,0].min()
min_y = p[:,1].min()
width = p[:,0].max() - min_x
height = p[:,1].max() - min_y
b = np.full((height+1, width+1), ".")
for x,y in p:
b[y-min_y,x-min_x] = "*"
print("Seconds:", i)
for row in b:
print(''.join(row))
with open("../data/day10.input") as fd:
solve(fd.read()) # pt1: XLZAKBGZ, pt2: 10656
|
the-stack_0_23680 | def tree(cls, level=0, last_in_level=True):
yield cls.__name__, level, last_in_level
subclasses = cls.__subclasses__()
if subclasses:
last = subclasses[-1]
for sub_cls in subclasses:
yield from tree(sub_cls, level+1, sub_cls is last)
def display(cls):
for cls_name, level, _ in tree(cls):
indent = ' ' * 4 * level
print(f'{indent}{cls_name}')
if __name__ == '__main__':
display(BaseException)
|
the-stack_0_23682 | import tensorflow as tf
import numpy as np
from absl.flags import FLAGS
@tf.function
def transform_targets_for_output(y_true, grid_size, anchor_idxs):
# y_true: (N, boxes, (x1, y1, x2, y2, class, best_anchor))
N = tf.shape(y_true)[0]
# y_true_out: (N, grid, grid, anchors, [x, y, w, h, obj, class])
y_true_out = tf.zeros(
(N, grid_size, grid_size, tf.shape(anchor_idxs)[0], 6))
anchor_idxs = tf.cast(anchor_idxs, tf.int32)
indexes = tf.TensorArray(tf.int32, 1, dynamic_size=True)
updates = tf.TensorArray(tf.float32, 1, dynamic_size=True)
idx = 0
for i in tf.range(N):
for j in tf.range(tf.shape(y_true)[1]):
if tf.equal(y_true[i][j][2], 0):
continue
anchor_eq = tf.equal(
anchor_idxs, tf.cast(y_true[i][j][5], tf.int32))
if tf.reduce_any(anchor_eq):
box = y_true[i][j][0:4]
box_xy = (y_true[i][j][0:2] + y_true[i][j][2:4]) / 2
anchor_idx = tf.cast(tf.where(anchor_eq), tf.int32)
grid_xy = tf.cast(box_xy // (1/grid_size), tf.int32)
# grid[y][x][anchor] = (tx, ty, bw, bh, obj, class)
indexes = indexes.write(
idx, [i, grid_xy[1], grid_xy[0], anchor_idx[0][0]])
updates = updates.write(
idx, [box[0], box[1], box[2], box[3], 1, y_true[i][j][4]])
idx += 1
# tf.print(indexes.stack())
# tf.print(updates.stack())
return tf.tensor_scatter_nd_update(
y_true_out, indexes.stack(), updates.stack())
def transform_targets(y_train, anchors, anchor_masks, size):
y_outs = []
grid_size = size // 32
# calculate anchor index for true boxes
anchors = tf.cast(anchors, tf.float32)
anchor_area = anchors[..., 0] * anchors[..., 1]
box_wh = y_train[..., 2:4] - y_train[..., 0:2]
box_wh = tf.tile(tf.expand_dims(box_wh, -2),
(1, 1, tf.shape(anchors)[0], 1))
box_area = box_wh[..., 0] * box_wh[..., 1]
intersection = tf.minimum(box_wh[..., 0], anchors[..., 0]) * \
tf.minimum(box_wh[..., 1], anchors[..., 1])
iou = intersection / (box_area + anchor_area - intersection)
anchor_idx = tf.cast(tf.argmax(iou, axis=-1), tf.float32)
anchor_idx = tf.expand_dims(anchor_idx, axis=-1)
y_train = tf.concat([y_train, anchor_idx], axis=-1)
for anchor_idxs in anchor_masks:
y_outs.append(transform_targets_for_output(
y_train, grid_size, anchor_idxs))
grid_size *= 2
return tuple(y_outs)
IMAGE_FEATURE_MAP = {
"image": tf.io.FixedLenFeature([], tf.string),
"xmins": tf.io.VarLenFeature(tf.float32),
"ymins": tf.io.VarLenFeature(tf.float32),
"xmaxs": tf.io.VarLenFeature(tf.float32),
"ymaxs": tf.io.VarLenFeature(tf.float32),
"classes": tf.io.VarLenFeature(tf.int64)
}
def transform_images(x_train, size, pad=False, augment=False):
if pad:
x_train = tf.image.resize_with_pad(
x_train, size, size, method='bicubic', antialias=True)
else:
x_train = tf.image.resize(x_train, (size, size),
method='bicubic', antialias=True)
if augment:
x_train = augment_image(x_train)
x_train = x_train / 255
return x_train
def augment_image(image):
choice = np.random.randint(4)
if choice == 0:
image = tf.image.random_brightness(image, 0.05)
elif choice == 1:
image = tf.image.random_contrast(image, 0.75, 1.25)
elif choice == 2:
image = tf.image.random_hue(image, 0.01)
else:
image = tf.image.random_saturation(image, 0.75, 1.5)
return image
def parse_tfrecord(tfrecord, size, image_type):
x = tf.io.parse_single_example(tfrecord, IMAGE_FEATURE_MAP)
if image_type == 'png':
x_train = tf.image.decode_png(x['image'], channels=3)
elif image_type == 'jpg':
x_train = tf.image.decode_jpeg(x['image'], channels=3)
x_train = tf.image.resize(x_train, (size, size),
method='bicubic', antialias=True)
y_train = tf.stack([tf.sparse.to_dense(x['xmins']),
tf.sparse.to_dense(x['ymins']),
tf.sparse.to_dense(x['xmaxs']),
tf.sparse.to_dense(x['ymaxs']),
tf.cast(tf.sparse.to_dense(x['classes']),
tf.float32)], axis=1)
paddings = [[0, FLAGS.yolo_max_boxes - tf.shape(y_train)[0]], [0, 0]]
y_train = tf.pad(y_train, paddings)
return x_train, y_train
def load_tfrecord_dataset(file_pattern, size, image_type):
files = tf.data.Dataset.list_files(file_pattern)
dataset = files.flat_map(tf.data.TFRecordDataset)
return dataset.map(lambda x: parse_tfrecord(x, size, image_type))
|
the-stack_0_23684 | import os
import random, math
import torch
import numpy as np
import glob
import cv2
from tqdm import tqdm
from skimage import io
from ISP_implement import ISP
if __name__ == '__main__':
isp = ISP()
source_dir = './source/'
target_dir = './target/'
if not os.path.isdir(target_dir):
os.makedirs(target_dir)
fns = glob.glob(os.path.join(source_dir, '*.png'))
patch_size = 256
for fn in tqdm(fns):
img_rgb = cv2.imread(fn)[:, :, ::-1] / 255.0
H = img_rgb.shape[0]
W = img_rgb.shape[1]
H_s = H // patch_size
W_s = W // patch_size
patch_id = 0
for i in range(H_s):
for j in range(W_s):
yy = i * patch_size
xx = j * patch_size
patch_img_rgb = img_rgb[yy:yy+patch_size, xx:xx+patch_size, :]
gt, noise = isp.noise_generate_srgb(patch_img_rgb)
filename = os.path.basename(fn)
foldername = filename.split('.')[0]
out_folder = os.path.join(target_dir, foldername)
if not os.path.isdir(out_folder):
os.makedirs(out_folder)
io.imsave(os.path.join(out_folder, 'GT_SRGB_%d_%d.png' % (i, j)), gt)
io.imsave(os.path.join(out_folder, 'NOISY_SRGB_%d_%d.png' % (i, j)), noise)
|
the-stack_0_23688 | # Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Interaction main program."""
import argparse
from collections import namedtuple
import paddle
import paddle.fluid as fluid
from termcolor import colored, cprint
import knover.models as models
from knover.tasks.dialog_generation import DialogGeneration
from knover.utils import check_cuda, parse_args
def setup_args():
"""Setup arguments."""
parser = argparse.ArgumentParser()
models.add_cmdline_args(parser)
DialogGeneration.add_cmdline_args(parser)
args = parse_args(parser)
args.load(args.config_path, "Model")
args.run_infer = True # only build infer program
args.display()
return args
def interact(args):
"""Interaction main function."""
dev_count = 1
gpu_id = 0
place = fluid.CUDAPlace(gpu_id)
task = DialogGeneration(args)
model = models.create_model(args, place)
task.debug()
Example = namedtuple("Example", ["src", "data_id"])
context = []
start_info = "Enter [EXIT] to quit the interaction, [NEXT] to start a new conversation."
cprint(start_info, "yellow", attrs=["bold"])
while True:
user_utt = input(colored("[Human]: ", "red", attrs=["bold"])).strip()
if user_utt == "[EXIT]":
break
elif user_utt == "[NEXT]":
context = []
cprint(start_info, "yellow", attrs=["bold"])
else:
context.append(user_utt)
example = Example(src=" [SEP] ".join(context), data_id=0)
task.reader.features[0] = example
record = task.reader._convert_example_to_record(example, is_infer=True)
data = task.reader._pad_batch_records([record], is_infer=True)
pred = task.infer_step(model, data)[0]
bot_response = pred["response"]
print(colored("[Bot]:", "blue", attrs=["bold"]), colored(bot_response, attrs=["bold"]))
context.append(bot_response)
return
if __name__ == "__main__":
if hasattr(paddle, "enable_static"):
paddle.enable_static()
args = setup_args()
check_cuda(True)
interact(args)
|
the-stack_0_23689 | import json
from betamax.serializers.json_serializer import JSONSerializer
class JSONBodySerializer(JSONSerializer):
name = 'json_body'
stored_as_binary = False
def _get_content_type(self, ct):
return ct[0].split(';')[0].strip().lower()
def _add_json_body(self, r):
body_string = r['body'].get('string')
if body_string:
content_type = self._get_content_type(r['headers']['Content-Type'])
if content_type == 'application/json':
r['json_body'] = json.loads(body_string)
def serialize(self, cassette_data):
for interaction in cassette_data['http_interactions']:
self._add_json_body(interaction['request'])
self._add_json_body(interaction['response'])
return json.dumps(
cassette_data,
sort_keys=True,
indent=2,
separators=(',', ': '),
)
|
the-stack_0_23692 | #!/usr/bin/env python3
"""Precompute Wav2Vec features."""
import os
import json
from pathlib import Path
from tempfile import mkstemp
from multiprocessing import cpu_count
import tqdm
import torch
from torch.utils.data import DataLoader
from jsonargparse import ArgumentParser, ActionConfigFile
from models import load_pretrained_wav2vec
from data import PreprocessDataset
def parse_args():
"""Parse command-line arguments."""
parser = ArgumentParser()
parser.add_argument("data_dirs", type=str, nargs="+")
parser.add_argument("wav2vec_path", type=str)
parser.add_argument("out_dir", type=str)
parser.add_argument("--trim_method", choices=["librosa", "vad"], default="vad")
parser.add_argument("--n_workers", type=int, default=cpu_count())
parser.add_argument("--sample_rate", type=int, default=16000)
parser.add_argument("--preemph", type=float, default=0.97)
parser.add_argument("--hop_len", type=int, default=320)
parser.add_argument("--win_len", type=int, default=1280)
parser.add_argument("--n_fft", type=int, default=1280)
parser.add_argument("--n_mels", type=int, default=80)
parser.add_argument("--f_min", type=int, default=80)
parser.add_argument("--f_max", type=int, default=None)
parser.add_argument("--audio_config", action=ActionConfigFile)
return vars(parser.parse_args())
def main(
data_dirs,
wav2vec_path,
out_dir,
trim_method,
n_workers,
sample_rate,
preemph,
hop_len,
win_len,
n_fft,
n_mels,
f_min,
f_max,
**kwargs,
):
"""Main function."""
out_dir_path = Path(out_dir)
if out_dir_path.exists():
assert out_dir_path.is_dir()
else:
out_dir_path.mkdir(parents=True)
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
dataset = PreprocessDataset(
data_dirs,
trim_method,
sample_rate,
preemph,
hop_len,
win_len,
n_fft,
n_mels,
f_min,
f_max
)
dataloader = DataLoader(
dataset, batch_size=1, shuffle=False, drop_last=False, num_workers=n_workers
)
wav2vec = load_pretrained_wav2vec(wav2vec_path).to(device)
speaker_infos = {}
pbar = tqdm.tqdm(total=len(dataset), ncols=0)
for speaker_name, audio_path, wav, mel in dataloader:
if wav.size(-1) < 10:
continue
wav = wav.to(device)
speaker_name = speaker_name[0]
audio_path = audio_path[0]
with torch.no_grad():
feat = wav2vec.extract_features(wav, None)[0]
feat = feat.detach().cpu().squeeze(0)
mel = mel.squeeze(0)
assert mel.shape == feat.shape
fd, temp_file = mkstemp(suffix=".tar", prefix="utterance-", dir=out_dir_path)
torch.save({"feat": feat, "mel": mel}, temp_file)
os.close(fd)
if speaker_name not in speaker_infos.keys():
speaker_infos[speaker_name] = []
speaker_infos[speaker_name].append(
{
"feature_path": Path(temp_file).name,
"audio_path": audio_path,
"feat_len": len(feat),
"mel_len": len(mel),
}
)
pbar.update(dataloader.batch_size)
with open(out_dir_path / "metadata.json", "w") as f:
json.dump(speaker_infos, f, indent=2)
if __name__ == "__main__":
main(**parse_args())
|
the-stack_0_23693 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Simple image classification with Inception.
Run image classification with Inception trained on ImageNet 2012 Challenge data
set.
This program creates a graph from a saved GraphDef protocol buffer,
and runs inference on an input JPEG image. It outputs human readable
strings of the top 5 predictions along with their probabilities.
Change the --image_file argument to any jpg image to compute a
classification of that image.
Please see the tutorial and website for a detailed description of how
to use this script to perform image recognition.
https://tensorflow.org/tutorials/image_recognition/
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os.path
import re
import sys
import tarfile
import numpy as np
from six.moves import urllib
import tensorflow as tf
FLAGS = tf.app.flags.FLAGS
# classify_image_graph_def.pb:
# Binary representation of the GraphDef protocol buffer.
# imagenet_synset_to_human_label_map.txt:
# Map from synset ID to a human readable string.
# imagenet_2012_challenge_label_map_proto.pbtxt:
# Text representation of a protocol buffer mapping a label to synset ID.
tf.app.flags.DEFINE_string(
'model_dir', 'imagenet',
"""Path to classify_image_graph_def.pb, """
"""imagenet_synset_to_human_label_map.txt, and """
"""imagenet_2012_challenge_label_map_proto.pbtxt.""")
tf.app.flags.DEFINE_string('image_file', '',
"""Absolute path to image file.""")
tf.app.flags.DEFINE_integer('num_top_predictions', 5,
"""Display this many predictions.""")
# pylint: disable=line-too-long
DATA_URL = 'http://download.tensorflow.org/models/image/imagenet/inception-2015-12-05.tgz'
# pylint: enable=line-too-long
class NodeLookup(object):
"""Converts integer node ID's to human readable labels."""
def __init__(self,
label_lookup_path=None,
uid_lookup_path=None):
if not label_lookup_path:
label_lookup_path = os.path.join(
FLAGS.model_dir, 'imagenet_2012_challenge_label_map_proto.pbtxt')
if not uid_lookup_path:
uid_lookup_path = os.path.join(
FLAGS.model_dir, 'imagenet_synset_to_human_label_map.txt')
self.node_lookup = self.load(label_lookup_path, uid_lookup_path)
def load(self, label_lookup_path, uid_lookup_path):
"""Loads a human readable English name for each softmax node.
Args:
label_lookup_path: string UID to integer node ID.
uid_lookup_path: string UID to human-readable string.
Returns:
dict from integer node ID to human-readable string.
"""
if not tf.gfile.Exists(uid_lookup_path):
tf.logging.fatal('File does not exist %s', uid_lookup_path)
if not tf.gfile.Exists(label_lookup_path):
tf.logging.fatal('File does not exist %s', label_lookup_path)
# Loads mapping from string UID to human-readable string
proto_as_ascii_lines = tf.gfile.GFile(uid_lookup_path).readlines()
uid_to_human = {}
p = re.compile(r'[n\d]*[ \S,]*')
for line in proto_as_ascii_lines:
parsed_items = p.findall(line)
uid = parsed_items[0]
human_string = parsed_items[2]
uid_to_human[uid] = human_string
# Loads mapping from string UID to integer node ID.
node_id_to_uid = {}
proto_as_ascii = tf.gfile.GFile(label_lookup_path).readlines()
for line in proto_as_ascii:
if line.startswith(' target_class:'):
target_class = int(line.split(': ')[1])
if line.startswith(' target_class_string:'):
target_class_string = line.split(': ')[1]
node_id_to_uid[target_class] = target_class_string[1:-2]
# Loads the final mapping of integer node ID to human-readable string
node_id_to_name = {}
for key, val in node_id_to_uid.items():
if val not in uid_to_human:
tf.logging.fatal('Failed to locate: %s', val)
name = uid_to_human[val]
node_id_to_name[key] = name
return node_id_to_name
def id_to_string(self, node_id):
if node_id not in self.node_lookup:
return ''
return self.node_lookup[node_id]
def create_graph():
"""Creates a graph from saved GraphDef file and returns a saver."""
# Creates graph from saved graph_def.pb.
with tf.gfile.FastGFile(os.path.join(
FLAGS.model_dir, 'classify_image_graph_def.pb'), 'rb') as f:
graph_def = tf.GraphDef()
graph_def.ParseFromString(f.read())
_ = tf.import_graph_def(graph_def, name='')
def run_inference_on_image(image):
"""Runs inference on an image.
Args:
image: Image file name.
Returns:
Nothing
"""
if not tf.gfile.Exists(image):
tf.logging.fatal('File does not exist %s', image)
image_data = tf.gfile.FastGFile(image, 'rb').read()
# Creates graph from saved GraphDef.
create_graph()
with tf.Session() as sess:
# Some useful tensors:
# 'softmax:0': A tensor containing the normalized prediction across
# 1000 labels.
# 'pool_3:0': A tensor containing the next-to-last layer containing 2048
# float description of the image.
# 'DecodeJpeg/contents:0': A tensor containing a string providing JPEG
# encoding of the image.
# Runs the softmax tensor by feeding the image_data as input to the graph.
softmax_tensor = sess.graph.get_tensor_by_name('softmax:0')
predictions = sess.run(softmax_tensor,
{'DecodeJpeg/contents:0': image_data})
predictions = np.squeeze(predictions)
# Creates node ID --> English string lookup.
node_lookup = NodeLookup()
top_k = predictions.argsort()[-FLAGS.num_top_predictions:][::-1]
for node_id in top_k:
human_string = node_lookup.id_to_string(node_id)
score = predictions[node_id]
print('%s (score = %.5f)' % (human_string, score))
def maybe_download_and_extract():
"""Download and extract model tar file."""
dest_directory = FLAGS.model_dir
if not os.path.exists(dest_directory):
os.makedirs(dest_directory)
filename = DATA_URL.split('/')[-1]
filepath = os.path.join(dest_directory, filename)
if not os.path.exists(filepath):
def _progress(count, block_size, total_size):
sys.stdout.write('\r>> Downloading %s %.1f%%' % (
filename, float(count * block_size) / float(total_size) * 100.0))
sys.stdout.flush()
filepath, _ = urllib.request.urlretrieve(DATA_URL, filepath, _progress)
print()
statinfo = os.stat(filepath)
print('Succesfully downloaded', filename, statinfo.st_size, 'bytes.')
tarfile.open(filepath, 'r:gz').extractall(dest_directory)
def testForFun():
path = './test'
# filenames = next(os.walk(path))[2]
filenames = [f for f in os.listdir(path) if f.endswith('.jpg')]
print(filenames)
for f in filenames:
print('Inference image %s:' %(f))
image = os.path.join(path, f)
run_inference_on_image(image)
def main(_):
# maybe_download_and_extract()
# image = (FLAGS.image_file if FLAGS.image_file else
# os.path.join(FLAGS.model_dir, 'cropped_panda.jpg'))
# run_inference_on_image(image)
testForFun()
if __name__ == '__main__':
tf.app.run()
|
the-stack_0_23701 | import json
import logging
import discord
from .document import FullDocument
log = logging.getLogger(__name__)
class Collection:
def __init__(self, client, channel):
self.id = channel.id
self.chan = channel
self.client = client
def __repr__(self):
return f'Collection({self.chan!r})'
async def get_single(self, message_id: int) -> 'Document':
"""Get a single document"""
message_id = int(message_id)
m = await self.chan.get_message(message_id)
if m is not None:
return FullDocument(self.client, m)
return
async def simple_query(self, query: dict) -> 'Document':
"""Make a Simple Query to a collection.
Parameters
----------
query: dict
Query object to the collection.
Returns
-------
FullDocument
The document found.
None
If no documents were found.
"""
if '_id' in query:
return await self.get_single(query['_id'])
if 'raw' in query:
# Search raw-y
raw = query['raw']
for message_id in self.client.indexdb[self.chan.id]:
try:
m = await self.chan.get_message(message_id)
except discord.NotFound:
return
if raw in m.content:
log.debug(m.content)
return FullDocument(self.client, m)
return
# search by JSON, the most expensive
for message_id in self.client.indexdb[self.chan.id]:
try:
m = await self.chan.get_message(message_id)
except discord.NotFound:
m = None
if m is not None:
doc = FullDocument(self.client, m)
if doc.match(query):
return doc
return
async def insert(self, document):
m = await self.chan.send(json.dumps(document.to_raw))
self.client.indexdb[self.chan.id].append(m.id)
async def delete(self, document):
try:
self.client.indexdb[self.id].pop(document.message.id)
except IndexError:
pass
return await document.message.delete()
async def update(self, document):
await document.message.edit(content=document.to_raw_json)
|
the-stack_0_23702 | # Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Tests for L{twisted.application.app} and L{twisted.scripts.twistd}.
"""
import errno
import inspect
import pickle
import signal
import os
import sys
try:
import pwd as _pwd
import grp as _grp
except ImportError:
pwd = None
grp = None
else:
pwd = _pwd
grp = _grp
from io import StringIO
from unittest import skipIf
from zope.interface import implementer
from zope.interface.verify import verifyObject
from twisted.trial.unittest import TestCase
from twisted.test.test_process import MockOS
from twisted import plugin, logger, internet
from twisted.application import service, app, reactors
from twisted.application.service import IServiceMaker
from twisted.internet.defer import Deferred
from twisted.internet.interfaces import IReactorDaemonize, _ISupportsExitSignalCapturing
from twisted.internet.test.modulehelpers import AlternateReactor
from twisted.logger import globalLogBeginner, globalLogPublisher, ILogObserver
from twisted.internet.base import ReactorBase
from twisted.test.proto_helpers import MemoryReactor
from twisted.python.components import Componentized
from twisted.python import util
from twisted.python.log import ILogObserver as LegacyILogObserver, textFromEventDict
from twisted.python.reflect import requireModule
from twisted.python.runtime import platformType
from twisted.python.usage import UsageError
from twisted.python.fakepwd import UserDatabase
from twisted.scripts import twistd
_twistd_unix = requireModule("twistd.scripts._twistd_unix")
if _twistd_unix:
from twisted.scripts._twistd_unix import checkPID
from twisted.scripts._twistd_unix import UnixApplicationRunner
from twisted.scripts._twistd_unix import UnixAppLogger
syslog = requireModule("twistd.python.syslog")
profile = requireModule("profile")
pstats = requireModule("pstats")
cProfile = requireModule("cProfile")
def patchUserDatabase(patch, user, uid, group, gid):
"""
Patch L{pwd.getpwnam} so that it behaves as though only one user exists
and patch L{grp.getgrnam} so that it behaves as though only one group
exists.
@param patch: A function like L{TestCase.patch} which will be used to
install the fake implementations.
@type user: C{str}
@param user: The name of the single user which will exist.
@type uid: C{int}
@param uid: The UID of the single user which will exist.
@type group: C{str}
@param group: The name of the single user which will exist.
@type gid: C{int}
@param gid: The GID of the single group which will exist.
"""
# Try not to be an unverified fake, but try not to depend on quirks of
# the system either (eg, run as a process with a uid and gid which
# equal each other, and so doesn't reliably test that uid is used where
# uid should be used and gid is used where gid should be used). -exarkun
pwent = pwd.getpwuid(os.getuid())
grent = grp.getgrgid(os.getgid())
database = UserDatabase()
database.addUser(
user, pwent.pw_passwd, uid, gid, pwent.pw_gecos, pwent.pw_dir, pwent.pw_shell
)
def getgrnam(name):
result = list(grent)
result[result.index(grent.gr_name)] = group
result[result.index(grent.gr_gid)] = gid
result = tuple(result)
return {group: result}[name]
patch(pwd, "getpwnam", database.getpwnam)
patch(grp, "getgrnam", getgrnam)
patch(pwd, "getpwuid", database.getpwuid)
class MockServiceMaker:
"""
A non-implementation of L{twisted.application.service.IServiceMaker}.
"""
tapname = "ueoa"
def makeService(self, options):
"""
Take a L{usage.Options} instance and return a
L{service.IService} provider.
"""
self.options = options
self.service = service.Service()
return self.service
class CrippledAppLogger(app.AppLogger):
"""
@see: CrippledApplicationRunner.
"""
def start(self, application):
pass
class CrippledApplicationRunner(twistd._SomeApplicationRunner):
"""
An application runner that cripples the platform-specific runner and
nasty side-effect-having code so that we can use it without actually
running any environment-affecting code.
"""
loggerFactory = CrippledAppLogger
def preApplication(self):
pass
def postApplication(self):
pass
class ServerOptionsTests(TestCase):
"""
Non-platform-specific tests for the platform-specific ServerOptions class.
"""
def test_subCommands(self):
"""
subCommands is built from IServiceMaker plugins, and is sorted
alphabetically.
"""
class FakePlugin:
def __init__(self, name):
self.tapname = name
self._options = "options for " + name
self.description = "description of " + name
def options(self):
return self._options
apple = FakePlugin("apple")
banana = FakePlugin("banana")
coconut = FakePlugin("coconut")
donut = FakePlugin("donut")
def getPlugins(interface):
self.assertEqual(interface, IServiceMaker)
yield coconut
yield banana
yield donut
yield apple
config = twistd.ServerOptions()
self.assertEqual(config._getPlugins, plugin.getPlugins)
config._getPlugins = getPlugins
# "subCommands is a list of 4-tuples of (command name, command
# shortcut, parser class, documentation)."
subCommands = config.subCommands
expectedOrder = [apple, banana, coconut, donut]
for subCommand, expectedCommand in zip(subCommands, expectedOrder):
name, shortcut, parserClass, documentation = subCommand
self.assertEqual(name, expectedCommand.tapname)
self.assertIsNone(shortcut)
self.assertEqual(parserClass(), expectedCommand._options),
self.assertEqual(documentation, expectedCommand.description)
def test_sortedReactorHelp(self):
"""
Reactor names are listed alphabetically by I{--help-reactors}.
"""
class FakeReactorInstaller:
def __init__(self, name):
self.shortName = "name of " + name
self.description = "description of " + name
self.moduleName = "twisted.internet.default"
apple = FakeReactorInstaller("apple")
banana = FakeReactorInstaller("banana")
coconut = FakeReactorInstaller("coconut")
donut = FakeReactorInstaller("donut")
def getReactorTypes():
yield coconut
yield banana
yield donut
yield apple
config = twistd.ServerOptions()
self.assertEqual(config._getReactorTypes, reactors.getReactorTypes)
config._getReactorTypes = getReactorTypes
config.messageOutput = StringIO()
self.assertRaises(SystemExit, config.parseOptions, ["--help-reactors"])
helpOutput = config.messageOutput.getvalue()
indexes = []
for reactor in apple, banana, coconut, donut:
def getIndex(s):
self.assertIn(s, helpOutput)
indexes.append(helpOutput.index(s))
getIndex(reactor.shortName)
getIndex(reactor.description)
self.assertEqual(
indexes,
sorted(indexes),
"reactor descriptions were not in alphabetical order: {!r}".format(
helpOutput
),
)
def test_postOptionsSubCommandCausesNoSave(self):
"""
postOptions should set no_save to True when a subcommand is used.
"""
config = twistd.ServerOptions()
config.subCommand = "ueoa"
config.postOptions()
self.assertTrue(config["no_save"])
def test_postOptionsNoSubCommandSavesAsUsual(self):
"""
If no sub command is used, postOptions should not touch no_save.
"""
config = twistd.ServerOptions()
config.postOptions()
self.assertFalse(config["no_save"])
def test_listAllProfilers(self):
"""
All the profilers that can be used in L{app.AppProfiler} are listed in
the help output.
"""
config = twistd.ServerOptions()
helpOutput = str(config)
for profiler in app.AppProfiler.profilers:
self.assertIn(profiler, helpOutput)
@skipIf(not _twistd_unix, "twistd unix not available")
def test_defaultUmask(self):
"""
The default value for the C{umask} option is L{None}.
"""
config = twistd.ServerOptions()
self.assertIsNone(config["umask"])
@skipIf(not _twistd_unix, "twistd unix not available")
def test_umask(self):
"""
The value given for the C{umask} option is parsed as an octal integer
literal.
"""
config = twistd.ServerOptions()
config.parseOptions(["--umask", "123"])
self.assertEqual(config["umask"], 83)
config.parseOptions(["--umask", "0123"])
self.assertEqual(config["umask"], 83)
@skipIf(not _twistd_unix, "twistd unix not available")
def test_invalidUmask(self):
"""
If a value is given for the C{umask} option which cannot be parsed as
an integer, L{UsageError} is raised by L{ServerOptions.parseOptions}.
"""
config = twistd.ServerOptions()
self.assertRaises(UsageError, config.parseOptions, ["--umask", "abcdef"])
def test_unimportableConfiguredLogObserver(self):
"""
C{--logger} with an unimportable module raises a L{UsageError}.
"""
config = twistd.ServerOptions()
e = self.assertRaises(
UsageError, config.parseOptions, ["--logger", "no.such.module.I.hope"]
)
self.assertTrue(
e.args[0].startswith(
"Logger 'no.such.module.I.hope' could not be imported: "
"'no.such.module.I.hope' does not name an object"
)
)
self.assertNotIn("\n", e.args[0])
def test_badAttributeWithConfiguredLogObserver(self):
"""
C{--logger} with a non-existent object raises a L{UsageError}.
"""
config = twistd.ServerOptions()
e = self.assertRaises(
UsageError,
config.parseOptions,
["--logger", "twisted.test.test_twistd.FOOBAR"],
)
self.assertTrue(
e.args[0].startswith(
"Logger 'twisted.test.test_twistd.FOOBAR' could not be "
"imported: module 'twisted.test.test_twistd' "
"has no attribute 'FOOBAR'"
)
)
self.assertNotIn("\n", e.args[0])
def test_version(self):
"""
C{--version} prints the version.
"""
from twisted import copyright
if platformType == "win32":
name = "(the Twisted Windows runner)"
else:
name = "(the Twisted daemon)"
expectedOutput = "twistd {} {}\n{}\n".format(
name, copyright.version, copyright.copyright
)
stdout = StringIO()
config = twistd.ServerOptions(stdout=stdout)
e = self.assertRaises(SystemExit, config.parseOptions, ["--version"])
self.assertIs(e.code, None)
self.assertEqual(stdout.getvalue(), expectedOutput)
def test_printSubCommandForUsageError(self):
"""
Command is printed when an invalid option is requested.
"""
stdout = StringIO()
config = twistd.ServerOptions(stdout=stdout)
self.assertRaises(UsageError, config.parseOptions, ["web --foo"])
@skipIf(not _twistd_unix, "twistd unix not available")
class CheckPIDTests(TestCase):
"""
Tests for L{checkPID}.
"""
def test_notExists(self):
"""
Nonexistent PID file is not an error.
"""
self.patch(os.path, "exists", lambda _: False)
checkPID("non-existent PID file")
def test_nonNumeric(self):
"""
Non-numeric content in a PID file causes a system exit.
"""
pidfile = self.mktemp()
with open(pidfile, "w") as f:
f.write("non-numeric")
e = self.assertRaises(SystemExit, checkPID, pidfile)
self.assertIn("non-numeric value", e.code)
def test_anotherRunning(self):
"""
Another running twistd server causes a system exit.
"""
pidfile = self.mktemp()
with open(pidfile, "w") as f:
f.write("42")
def kill(pid, sig):
pass
self.patch(os, "kill", kill)
e = self.assertRaises(SystemExit, checkPID, pidfile)
self.assertIn("Another twistd server", e.code)
def test_stale(self):
"""
Stale PID file is removed without causing a system exit.
"""
pidfile = self.mktemp()
with open(pidfile, "w") as f:
f.write(str(os.getpid() + 1))
def kill(pid, sig):
raise OSError(errno.ESRCH, "fake")
self.patch(os, "kill", kill)
checkPID(pidfile)
self.assertFalse(os.path.exists(pidfile))
def test_unexpectedOSError(self):
"""
An unexpected L{OSError} when checking the validity of a
PID in a C{pidfile} terminates the process via L{SystemExit}.
"""
pidfile = self.mktemp()
with open(pidfile, "w") as f:
f.write("3581")
def kill(pid, sig):
raise OSError(errno.EBADF, "fake")
self.patch(os, "kill", kill)
e = self.assertRaises(SystemExit, checkPID, pidfile)
self.assertIsNot(e.code, None)
self.assertTrue(e.args[0].startswith("Can't check status of PID"))
class TapFileTests(TestCase):
"""
Test twistd-related functionality that requires a tap file on disk.
"""
def setUp(self):
"""
Create a trivial Application and put it in a tap file on disk.
"""
self.tapfile = self.mktemp()
with open(self.tapfile, "wb") as f:
pickle.dump(service.Application("Hi!"), f)
def test_createOrGetApplicationWithTapFile(self):
"""
Ensure that the createOrGetApplication call that 'twistd -f foo.tap'
makes will load the Application out of foo.tap.
"""
config = twistd.ServerOptions()
config.parseOptions(["-f", self.tapfile])
application = CrippledApplicationRunner(config).createOrGetApplication()
self.assertEqual(service.IService(application).name, "Hi!")
class TestLoggerFactory:
"""
A logger factory for L{TestApplicationRunner}.
"""
def __init__(self, runner):
self.runner = runner
def start(self, application):
"""
Save the logging start on the C{runner} instance.
"""
self.runner.order.append("log")
self.runner.hadApplicationLogObserver = hasattr(self.runner, "application")
def stop(self):
"""
Don't log anything.
"""
class TestApplicationRunner(app.ApplicationRunner):
"""
An ApplicationRunner which tracks the environment in which its methods are
called.
"""
def __init__(self, options):
app.ApplicationRunner.__init__(self, options)
self.order = []
self.logger = TestLoggerFactory(self)
def preApplication(self):
self.order.append("pre")
self.hadApplicationPreApplication = hasattr(self, "application")
def postApplication(self):
self.order.append("post")
self.hadApplicationPostApplication = hasattr(self, "application")
class ApplicationRunnerTests(TestCase):
"""
Non-platform-specific tests for the platform-specific ApplicationRunner.
"""
def setUp(self):
config = twistd.ServerOptions()
self.serviceMaker = MockServiceMaker()
# Set up a config object like it's been parsed with a subcommand
config.loadedPlugins = {"test_command": self.serviceMaker}
config.subOptions = object()
config.subCommand = "test_command"
self.config = config
def test_applicationRunnerGetsCorrectApplication(self):
"""
Ensure that a twistd plugin gets used in appropriate ways: it
is passed its Options instance, and the service it returns is
added to the application.
"""
arunner = CrippledApplicationRunner(self.config)
arunner.run()
self.assertIs(
self.serviceMaker.options,
self.config.subOptions,
"ServiceMaker.makeService needs to be passed the correct "
"sub Command object.",
)
self.assertIs(
self.serviceMaker.service,
service.IService(arunner.application).services[0],
"ServiceMaker.makeService's result needs to be set as a child "
"of the Application.",
)
def test_preAndPostApplication(self):
"""
Test thet preApplication and postApplication methods are
called by ApplicationRunner.run() when appropriate.
"""
s = TestApplicationRunner(self.config)
s.run()
self.assertFalse(s.hadApplicationPreApplication)
self.assertTrue(s.hadApplicationPostApplication)
self.assertTrue(s.hadApplicationLogObserver)
self.assertEqual(s.order, ["pre", "log", "post"])
def _applicationStartsWithConfiguredID(self, argv, uid, gid):
"""
Assert that given a particular command line, an application is started
as a particular UID/GID.
@param argv: A list of strings giving the options to parse.
@param uid: An integer giving the expected UID.
@param gid: An integer giving the expected GID.
"""
self.config.parseOptions(argv)
events = []
class FakeUnixApplicationRunner(twistd._SomeApplicationRunner):
def setupEnvironment(self, chroot, rundir, nodaemon, umask, pidfile):
events.append("environment")
def shedPrivileges(self, euid, uid, gid):
events.append(("privileges", euid, uid, gid))
def startReactor(self, reactor, oldstdout, oldstderr):
events.append("reactor")
def removePID(self, pidfile):
pass
@implementer(service.IService, service.IProcess)
class FakeService:
parent = None
running = None
name = None
processName = None
uid = None
gid = None
def setName(self, name):
pass
def setServiceParent(self, parent):
pass
def disownServiceParent(self):
pass
def privilegedStartService(self):
events.append("privilegedStartService")
def startService(self):
events.append("startService")
def stopService(self):
pass
application = FakeService()
verifyObject(service.IService, application)
verifyObject(service.IProcess, application)
runner = FakeUnixApplicationRunner(self.config)
runner.preApplication()
runner.application = application
runner.postApplication()
self.assertEqual(
events,
[
"environment",
"privilegedStartService",
("privileges", False, uid, gid),
"startService",
"reactor",
],
)
@skipIf(
not getattr(os, "setuid", None),
"Platform does not support --uid/--gid twistd options.",
)
def test_applicationStartsWithConfiguredNumericIDs(self):
"""
L{postApplication} should change the UID and GID to the values
specified as numeric strings by the configuration after running
L{service.IService.privilegedStartService} and before running
L{service.IService.startService}.
"""
uid = 1234
gid = 4321
self._applicationStartsWithConfiguredID(
["--uid", str(uid), "--gid", str(gid)], uid, gid
)
@skipIf(
not getattr(os, "setuid", None),
"Platform does not support --uid/--gid twistd options.",
)
def test_applicationStartsWithConfiguredNameIDs(self):
"""
L{postApplication} should change the UID and GID to the values
specified as user and group names by the configuration after running
L{service.IService.privilegedStartService} and before running
L{service.IService.startService}.
"""
user = "foo"
uid = 1234
group = "bar"
gid = 4321
patchUserDatabase(self.patch, user, uid, group, gid)
self._applicationStartsWithConfiguredID(
["--uid", user, "--gid", group], uid, gid
)
def test_startReactorRunsTheReactor(self):
"""
L{startReactor} calls L{reactor.run}.
"""
reactor = DummyReactor()
runner = app.ApplicationRunner(
{"profile": False, "profiler": "profile", "debug": False}
)
runner.startReactor(reactor, None, None)
self.assertTrue(reactor.called, "startReactor did not call reactor.run()")
def test_applicationRunnerChoosesReactorIfNone(self):
"""
L{ApplicationRunner} chooses a reactor if none is specified.
"""
reactor = DummyReactor()
self.patch(internet, "reactor", reactor)
runner = app.ApplicationRunner(
{"profile": False, "profiler": "profile", "debug": False}
)
runner.startReactor(None, None, None)
self.assertTrue(reactor.called)
def test_applicationRunnerCapturesSignal(self):
"""
If the reactor exits with a signal, the application runner caches
the signal.
"""
class DummyReactorWithSignal(ReactorBase):
"""
A dummy reactor, providing a C{run} method, and setting the
_exitSignal attribute to a nonzero value.
"""
def installWaker(self):
"""
Dummy method, does nothing.
"""
def run(self):
"""
A fake run method setting _exitSignal to a nonzero value
"""
self._exitSignal = 2
reactor = DummyReactorWithSignal()
runner = app.ApplicationRunner(
{"profile": False, "profiler": "profile", "debug": False}
)
runner.startReactor(reactor, None, None)
self.assertEquals(2, runner._exitSignal)
def test_applicationRunnerIgnoresNoSignal(self):
"""
The runner sets its _exitSignal instance attribute to None if
the reactor does not implement L{_ISupportsExitSignalCapturing}.
"""
class DummyReactorWithExitSignalAttribute:
"""
A dummy reactor, providing a C{run} method, and setting the
_exitSignal attribute to a nonzero value.
"""
def installWaker(self):
"""
Dummy method, does nothing.
"""
def run(self):
"""
A fake run method setting _exitSignal to a nonzero value
that should be ignored.
"""
self._exitSignal = 2
reactor = DummyReactorWithExitSignalAttribute()
runner = app.ApplicationRunner(
{"profile": False, "profiler": "profile", "debug": False}
)
runner.startReactor(reactor, None, None)
self.assertEquals(None, runner._exitSignal)
@skipIf(not _twistd_unix, "twistd unix not available")
class UnixApplicationRunnerSetupEnvironmentTests(TestCase):
"""
Tests for L{UnixApplicationRunner.setupEnvironment}.
@ivar root: The root of the filesystem, or C{unset} if none has been
specified with a call to L{os.chroot} (patched for this TestCase with
L{UnixApplicationRunnerSetupEnvironmentTests.chroot}).
@ivar cwd: The current working directory of the process, or C{unset} if
none has been specified with a call to L{os.chdir} (patched for this
TestCase with L{UnixApplicationRunnerSetupEnvironmentTests.chdir}).
@ivar mask: The current file creation mask of the process, or C{unset} if
none has been specified with a call to L{os.umask} (patched for this
TestCase with L{UnixApplicationRunnerSetupEnvironmentTests.umask}).
@ivar daemon: A boolean indicating whether daemonization has been performed
by a call to L{_twistd_unix.daemonize} (patched for this TestCase with
L{UnixApplicationRunnerSetupEnvironmentTests}.
"""
unset = object()
def setUp(self):
self.root = self.unset
self.cwd = self.unset
self.mask = self.unset
self.daemon = False
self.pid = os.getpid()
self.patch(os, "chroot", lambda path: setattr(self, "root", path))
self.patch(os, "chdir", lambda path: setattr(self, "cwd", path))
self.patch(os, "umask", lambda mask: setattr(self, "mask", mask))
self.runner = UnixApplicationRunner(twistd.ServerOptions())
self.runner.daemonize = self.daemonize
def daemonize(self, reactor):
"""
Indicate that daemonization has happened and change the PID so that the
value written to the pidfile can be tested in the daemonization case.
"""
self.daemon = True
self.patch(os, "getpid", lambda: self.pid + 1)
def test_chroot(self):
"""
L{UnixApplicationRunner.setupEnvironment} changes the root of the
filesystem if passed a non-L{None} value for the C{chroot} parameter.
"""
self.runner.setupEnvironment("/foo/bar", ".", True, None, None)
self.assertEqual(self.root, "/foo/bar")
def test_noChroot(self):
"""
L{UnixApplicationRunner.setupEnvironment} does not change the root of
the filesystem if passed L{None} for the C{chroot} parameter.
"""
self.runner.setupEnvironment(None, ".", True, None, None)
self.assertIs(self.root, self.unset)
def test_changeWorkingDirectory(self):
"""
L{UnixApplicationRunner.setupEnvironment} changes the working directory
of the process to the path given for the C{rundir} parameter.
"""
self.runner.setupEnvironment(None, "/foo/bar", True, None, None)
self.assertEqual(self.cwd, "/foo/bar")
def test_daemonize(self):
"""
L{UnixApplicationRunner.setupEnvironment} daemonizes the process if
C{False} is passed for the C{nodaemon} parameter.
"""
with AlternateReactor(FakeDaemonizingReactor()):
self.runner.setupEnvironment(None, ".", False, None, None)
self.assertTrue(self.daemon)
def test_noDaemonize(self):
"""
L{UnixApplicationRunner.setupEnvironment} does not daemonize the
process if C{True} is passed for the C{nodaemon} parameter.
"""
self.runner.setupEnvironment(None, ".", True, None, None)
self.assertFalse(self.daemon)
def test_nonDaemonPIDFile(self):
"""
L{UnixApplicationRunner.setupEnvironment} writes the process's PID to
the file specified by the C{pidfile} parameter.
"""
pidfile = self.mktemp()
self.runner.setupEnvironment(None, ".", True, None, pidfile)
with open(pidfile, "rb") as f:
pid = int(f.read())
self.assertEqual(pid, self.pid)
def test_daemonPIDFile(self):
"""
L{UnixApplicationRunner.setupEnvironment} writes the daemonized
process's PID to the file specified by the C{pidfile} parameter if
C{nodaemon} is C{False}.
"""
pidfile = self.mktemp()
with AlternateReactor(FakeDaemonizingReactor()):
self.runner.setupEnvironment(None, ".", False, None, pidfile)
with open(pidfile, "rb") as f:
pid = int(f.read())
self.assertEqual(pid, self.pid + 1)
def test_umask(self):
"""
L{UnixApplicationRunner.setupEnvironment} changes the process umask to
the value specified by the C{umask} parameter.
"""
with AlternateReactor(FakeDaemonizingReactor()):
self.runner.setupEnvironment(None, ".", False, 123, None)
self.assertEqual(self.mask, 123)
def test_noDaemonizeNoUmask(self):
"""
L{UnixApplicationRunner.setupEnvironment} doesn't change the process
umask if L{None} is passed for the C{umask} parameter and C{True} is
passed for the C{nodaemon} parameter.
"""
self.runner.setupEnvironment(None, ".", True, None, None)
self.assertIs(self.mask, self.unset)
def test_daemonizedNoUmask(self):
"""
L{UnixApplicationRunner.setupEnvironment} changes the process umask to
C{0077} if L{None} is passed for the C{umask} parameter and C{False} is
passed for the C{nodaemon} parameter.
"""
with AlternateReactor(FakeDaemonizingReactor()):
self.runner.setupEnvironment(None, ".", False, None, None)
self.assertEqual(self.mask, 0o077)
@skipIf(not _twistd_unix, "twistd unix not available")
class UnixApplicationRunnerStartApplicationTests(TestCase):
"""
Tests for L{UnixApplicationRunner.startApplication}.
"""
def test_setupEnvironment(self):
"""
L{UnixApplicationRunner.startApplication} calls
L{UnixApplicationRunner.setupEnvironment} with the chroot, rundir,
nodaemon, umask, and pidfile parameters from the configuration it is
constructed with.
"""
options = twistd.ServerOptions()
options.parseOptions(
[
"--nodaemon",
"--umask",
"0070",
"--chroot",
"/foo/chroot",
"--rundir",
"/foo/rundir",
"--pidfile",
"/foo/pidfile",
]
)
application = service.Application("test_setupEnvironment")
self.runner = UnixApplicationRunner(options)
args = []
def fakeSetupEnvironment(self, chroot, rundir, nodaemon, umask, pidfile):
args.extend((chroot, rundir, nodaemon, umask, pidfile))
# Sanity check
setupEnvironmentParameters = inspect.signature(
self.runner.setupEnvironment
).parameters
fakeSetupEnvironmentParameters = inspect.signature(
fakeSetupEnvironment
).parameters
# inspect.signature() does not return "self" in the signature of
# a class method, so we need to omit it when comparing the
# the signature of a plain method
fakeSetupEnvironmentParameters = fakeSetupEnvironmentParameters.copy()
fakeSetupEnvironmentParameters.pop("self")
self.assertEqual(setupEnvironmentParameters, fakeSetupEnvironmentParameters)
self.patch(UnixApplicationRunner, "setupEnvironment", fakeSetupEnvironment)
self.patch(UnixApplicationRunner, "shedPrivileges", lambda *a, **kw: None)
self.patch(app, "startApplication", lambda *a, **kw: None)
self.runner.startApplication(application)
self.assertEqual(args, ["/foo/chroot", "/foo/rundir", True, 56, "/foo/pidfile"])
def test_shedPrivileges(self):
"""
L{UnixApplicationRunner.shedPrivileges} switches the user ID
of the process.
"""
def switchUIDPass(uid, gid, euid):
self.assertEqual(uid, 200)
self.assertEqual(gid, 54)
self.assertEqual(euid, 35)
self.patch(_twistd_unix, "switchUID", switchUIDPass)
runner = UnixApplicationRunner({})
runner.shedPrivileges(35, 200, 54)
def test_shedPrivilegesError(self):
"""
An unexpected L{OSError} when calling
L{twisted.scripts._twistd_unix.shedPrivileges}
terminates the process via L{SystemExit}.
"""
def switchUIDFail(uid, gid, euid):
raise OSError(errno.EBADF, "fake")
runner = UnixApplicationRunner({})
self.patch(_twistd_unix, "switchUID", switchUIDFail)
exc = self.assertRaises(SystemExit, runner.shedPrivileges, 35, 200, None)
self.assertEqual(exc.code, 1)
def _setUID(self, wantedUser, wantedUid, wantedGroup, wantedGid):
"""
Common code for tests which try to pass the the UID to
L{UnixApplicationRunner}.
"""
patchUserDatabase(self.patch, wantedUser, wantedUid, wantedGroup, wantedGid)
def initgroups(uid, gid):
self.assertEqual(uid, wantedUid)
self.assertEqual(gid, wantedGid)
def setuid(uid):
self.assertEqual(uid, wantedUid)
def setgid(gid):
self.assertEqual(gid, wantedGid)
self.patch(util, "initgroups", initgroups)
self.patch(os, "setuid", setuid)
self.patch(os, "setgid", setgid)
options = twistd.ServerOptions()
options.parseOptions(["--nodaemon", "--uid", str(wantedUid)])
application = service.Application("test_setupEnvironment")
self.runner = UnixApplicationRunner(options)
runner = UnixApplicationRunner(options)
runner.startApplication(application)
def test_setUidWithoutGid(self):
"""
Starting an application with L{UnixApplicationRunner} configured
with a UID and no GUID will result in the GUID being
set to the default GUID for that UID.
"""
self._setUID("foo", 5151, "bar", 4242)
def test_setUidSameAsCurrentUid(self):
"""
If the specified UID is the same as the current UID of the process,
then a warning is displayed.
"""
currentUid = os.getuid()
self._setUID("morefoo", currentUid, "morebar", 4343)
warningsShown = self.flushWarnings()
self.assertEqual(1, len(warningsShown))
expectedWarning = (
"tried to drop privileges and setuid {} but uid is already {}; "
"should we be root? Continuing.".format(currentUid, currentUid)
)
self.assertEqual(expectedWarning, warningsShown[0]["message"])
@skipIf(not _twistd_unix, "twistd unix not available")
class UnixApplicationRunnerRemovePIDTests(TestCase):
"""
Tests for L{UnixApplicationRunner.removePID}.
"""
def test_removePID(self):
"""
L{UnixApplicationRunner.removePID} deletes the file the name of
which is passed to it.
"""
runner = UnixApplicationRunner({})
path = self.mktemp()
os.makedirs(path)
pidfile = os.path.join(path, "foo.pid")
open(pidfile, "w").close()
runner.removePID(pidfile)
self.assertFalse(os.path.exists(pidfile))
def test_removePIDErrors(self):
"""
Calling L{UnixApplicationRunner.removePID} with a non-existent filename
logs an OSError.
"""
runner = UnixApplicationRunner({})
runner.removePID("fakepid")
errors = self.flushLoggedErrors(OSError)
self.assertEqual(len(errors), 1)
self.assertEqual(errors[0].value.errno, errno.ENOENT)
class FakeNonDaemonizingReactor:
"""
A dummy reactor, providing C{beforeDaemonize} and C{afterDaemonize}
methods, but not announcing this, and logging whether the methods have been
called.
@ivar _beforeDaemonizeCalled: if C{beforeDaemonize} has been called or not.
@type _beforeDaemonizeCalled: C{bool}
@ivar _afterDaemonizeCalled: if C{afterDaemonize} has been called or not.
@type _afterDaemonizeCalled: C{bool}
"""
def __init__(self):
self._beforeDaemonizeCalled = False
self._afterDaemonizeCalled = False
def beforeDaemonize(self):
self._beforeDaemonizeCalled = True
def afterDaemonize(self):
self._afterDaemonizeCalled = True
def addSystemEventTrigger(self, *args, **kw):
"""
Skip event registration.
"""
@implementer(IReactorDaemonize)
class FakeDaemonizingReactor(FakeNonDaemonizingReactor):
"""
A dummy reactor, providing C{beforeDaemonize} and C{afterDaemonize}
methods, announcing this, and logging whether the methods have been called.
"""
class DummyReactor:
"""
A dummy reactor, only providing a C{run} method and checking that it
has been called.
@ivar called: if C{run} has been called or not.
@type called: C{bool}
"""
called = False
def run(self):
"""
A fake run method, checking that it's been called one and only time.
"""
if self.called:
raise RuntimeError("Already called")
self.called = True
class AppProfilingTests(TestCase):
"""
Tests for L{app.AppProfiler}.
"""
@skipIf(not profile, "profile module not available")
def test_profile(self):
"""
L{app.ProfileRunner.run} should call the C{run} method of the reactor
and save profile data in the specified file.
"""
config = twistd.ServerOptions()
config["profile"] = self.mktemp()
config["profiler"] = "profile"
profiler = app.AppProfiler(config)
reactor = DummyReactor()
profiler.run(reactor)
self.assertTrue(reactor.called)
with open(config["profile"]) as f:
data = f.read()
self.assertIn("DummyReactor.run", data)
self.assertIn("function calls", data)
def _testStats(self, statsClass, profile):
out = StringIO()
# Patch before creating the pstats, because pstats binds self.stream to
# sys.stdout early in 2.5 and newer.
stdout = self.patch(sys, "stdout", out)
# If pstats.Stats can load the data and then reformat it, then the
# right thing probably happened.
stats = statsClass(profile)
stats.print_stats()
stdout.restore()
data = out.getvalue()
self.assertIn("function calls", data)
self.assertIn("(run)", data)
@skipIf(not profile, "profile module not available")
def test_profileSaveStats(self):
"""
With the C{savestats} option specified, L{app.ProfileRunner.run}
should save the raw stats object instead of a summary output.
"""
config = twistd.ServerOptions()
config["profile"] = self.mktemp()
config["profiler"] = "profile"
config["savestats"] = True
profiler = app.AppProfiler(config)
reactor = DummyReactor()
profiler.run(reactor)
self.assertTrue(reactor.called)
self._testStats(pstats.Stats, config["profile"])
def test_withoutProfile(self):
"""
When the C{profile} module is not present, L{app.ProfilerRunner.run}
should raise a C{SystemExit} exception.
"""
savedModules = sys.modules.copy()
config = twistd.ServerOptions()
config["profiler"] = "profile"
profiler = app.AppProfiler(config)
sys.modules["profile"] = None
try:
self.assertRaises(SystemExit, profiler.run, None)
finally:
sys.modules.clear()
sys.modules.update(savedModules)
@skipIf(not profile, "profile module not available")
def test_profilePrintStatsError(self):
"""
When an error happens during the print of the stats, C{sys.stdout}
should be restored to its initial value.
"""
class ErroneousProfile(profile.Profile):
def print_stats(self):
raise RuntimeError("Boom")
self.patch(profile, "Profile", ErroneousProfile)
config = twistd.ServerOptions()
config["profile"] = self.mktemp()
config["profiler"] = "profile"
profiler = app.AppProfiler(config)
reactor = DummyReactor()
oldStdout = sys.stdout
self.assertRaises(RuntimeError, profiler.run, reactor)
self.assertIs(sys.stdout, oldStdout)
@skipIf(not cProfile, "cProfile module not available")
def test_cProfile(self):
"""
L{app.CProfileRunner.run} should call the C{run} method of the
reactor and save profile data in the specified file.
"""
config = twistd.ServerOptions()
config["profile"] = self.mktemp()
config["profiler"] = "cProfile"
profiler = app.AppProfiler(config)
reactor = DummyReactor()
profiler.run(reactor)
self.assertTrue(reactor.called)
with open(config["profile"]) as f:
data = f.read()
self.assertIn("run", data)
self.assertIn("function calls", data)
@skipIf(not cProfile, "cProfile module not available")
def test_cProfileSaveStats(self):
"""
With the C{savestats} option specified,
L{app.CProfileRunner.run} should save the raw stats object
instead of a summary output.
"""
config = twistd.ServerOptions()
config["profile"] = self.mktemp()
config["profiler"] = "cProfile"
config["savestats"] = True
profiler = app.AppProfiler(config)
reactor = DummyReactor()
profiler.run(reactor)
self.assertTrue(reactor.called)
self._testStats(pstats.Stats, config["profile"])
def test_withoutCProfile(self):
"""
When the C{cProfile} module is not present,
L{app.CProfileRunner.run} should raise a C{SystemExit}
exception and log the C{ImportError}.
"""
savedModules = sys.modules.copy()
sys.modules["cProfile"] = None
config = twistd.ServerOptions()
config["profiler"] = "cProfile"
profiler = app.AppProfiler(config)
try:
self.assertRaises(SystemExit, profiler.run, None)
finally:
sys.modules.clear()
sys.modules.update(savedModules)
def test_unknownProfiler(self):
"""
Check that L{app.AppProfiler} raises L{SystemExit} when given an
unknown profiler name.
"""
config = twistd.ServerOptions()
config["profile"] = self.mktemp()
config["profiler"] = "foobar"
error = self.assertRaises(SystemExit, app.AppProfiler, config)
self.assertEqual(str(error), "Unsupported profiler name: foobar")
def test_defaultProfiler(self):
"""
L{app.Profiler} defaults to the cprofile profiler if not specified.
"""
profiler = app.AppProfiler({})
self.assertEqual(profiler.profiler, "cprofile")
def test_profilerNameCaseInsentive(self):
"""
The case of the profiler name passed to L{app.AppProfiler} is not
relevant.
"""
profiler = app.AppProfiler({"profiler": "CprOfile"})
self.assertEqual(profiler.profiler, "cprofile")
def _patchTextFileLogObserver(patch):
"""
Patch L{logger.textFileLogObserver} to record every call and keep a
reference to the passed log file for tests.
@param patch: a callback for patching (usually L{TestCase.patch}).
@return: the list that keeps track of the log files.
@rtype: C{list}
"""
logFiles = []
oldFileLogObserver = logger.textFileLogObserver
def observer(logFile, *args, **kwargs):
logFiles.append(logFile)
return oldFileLogObserver(logFile, *args, **kwargs)
patch(logger, "textFileLogObserver", observer)
return logFiles
def _setupSyslog(testCase):
"""
Make fake syslog, and return list to which prefix and then log
messages will be appended if it is used.
"""
logMessages = []
class fakesyslogobserver:
def __init__(self, prefix):
logMessages.append(prefix)
def emit(self, eventDict):
logMessages.append(eventDict)
testCase.patch(syslog, "SyslogObserver", fakesyslogobserver)
return logMessages
class AppLoggerTests(TestCase):
"""
Tests for L{app.AppLogger}.
@ivar observers: list of observers installed during the tests.
@type observers: C{list}
"""
def setUp(self):
"""
Override L{globaLogBeginner.beginLoggingTo} so that we can trace the
observers installed in C{self.observers}.
"""
self.observers = []
def beginLoggingTo(observers):
for observer in observers:
self.observers.append(observer)
globalLogPublisher.addObserver(observer)
self.patch(globalLogBeginner, "beginLoggingTo", beginLoggingTo)
def tearDown(self):
"""
Remove all installed observers.
"""
for observer in self.observers:
globalLogPublisher.removeObserver(observer)
def _makeObserver(self):
"""
Make a new observer which captures all logs sent to it.
@return: An observer that stores all logs sent to it.
@rtype: Callable that implements L{ILogObserver}.
"""
@implementer(ILogObserver)
class TestObserver:
_logs = []
def __call__(self, event):
self._logs.append(event)
return TestObserver()
def _checkObserver(self, observer):
"""
Ensure that initial C{twistd} logs are written to logs.
@param observer: The observer made by L{self._makeObserver).
"""
self.assertEqual(self.observers, [observer])
self.assertIn("starting up", observer._logs[0]["log_format"])
self.assertIn("reactor class", observer._logs[1]["log_format"])
def test_start(self):
"""
L{app.AppLogger.start} calls L{globalLogBeginner.addObserver}, and then
writes some messages about twistd and the reactor.
"""
logger = app.AppLogger({})
observer = self._makeObserver()
logger._getLogObserver = lambda: observer
logger.start(Componentized())
self._checkObserver(observer)
def test_startUsesApplicationLogObserver(self):
"""
When the L{ILogObserver} component is available on the application,
that object will be used as the log observer instead of constructing a
new one.
"""
application = Componentized()
observer = self._makeObserver()
application.setComponent(ILogObserver, observer)
logger = app.AppLogger({})
logger.start(application)
self._checkObserver(observer)
def _setupConfiguredLogger(
self, application, extraLogArgs={}, appLogger=app.AppLogger
):
"""
Set up an AppLogger which exercises the C{logger} configuration option.
@type application: L{Componentized}
@param application: The L{Application} object to pass to
L{app.AppLogger.start}.
@type extraLogArgs: C{dict}
@param extraLogArgs: extra values to pass to AppLogger.
@type appLogger: L{AppLogger} class, or a subclass
@param appLogger: factory for L{AppLogger} instances.
@rtype: C{list}
@return: The logs accumulated by the log observer.
"""
observer = self._makeObserver()
logArgs = {"logger": lambda: observer}
logArgs.update(extraLogArgs)
logger = appLogger(logArgs)
logger.start(application)
return observer
def test_startUsesConfiguredLogObserver(self):
"""
When the C{logger} key is specified in the configuration dictionary
(i.e., when C{--logger} is passed to twistd), the initial log observer
will be the log observer returned from the callable which the value
refers to in FQPN form.
"""
application = Componentized()
self._checkObserver(self._setupConfiguredLogger(application))
def test_configuredLogObserverBeatsComponent(self):
"""
C{--logger} takes precedence over a L{ILogObserver} component set on
Application.
"""
observer = self._makeObserver()
application = Componentized()
application.setComponent(ILogObserver, observer)
self._checkObserver(self._setupConfiguredLogger(application))
self.assertEqual(observer._logs, [])
def test_configuredLogObserverBeatsLegacyComponent(self):
"""
C{--logger} takes precedence over a L{LegacyILogObserver} component
set on Application.
"""
nonlogs = []
application = Componentized()
application.setComponent(LegacyILogObserver, nonlogs.append)
self._checkObserver(self._setupConfiguredLogger(application))
self.assertEqual(nonlogs, [])
def test_loggerComponentBeatsLegacyLoggerComponent(self):
"""
A L{ILogObserver} takes precedence over a L{LegacyILogObserver}
component set on Application.
"""
nonlogs = []
observer = self._makeObserver()
application = Componentized()
application.setComponent(ILogObserver, observer)
application.setComponent(LegacyILogObserver, nonlogs.append)
logger = app.AppLogger({})
logger.start(application)
self._checkObserver(observer)
self.assertEqual(nonlogs, [])
@skipIf(not _twistd_unix, "twistd unix not available")
@skipIf(not syslog, "syslog not available")
def test_configuredLogObserverBeatsSyslog(self):
"""
C{--logger} takes precedence over a C{--syslog} command line
argument.
"""
logs = _setupSyslog(self)
application = Componentized()
self._checkObserver(
self._setupConfiguredLogger(application, {"syslog": True}, UnixAppLogger)
)
self.assertEqual(logs, [])
def test_configuredLogObserverBeatsLogfile(self):
"""
C{--logger} takes precedence over a C{--logfile} command line
argument.
"""
application = Componentized()
path = self.mktemp()
self._checkObserver(
self._setupConfiguredLogger(application, {"logfile": "path"})
)
self.assertFalse(os.path.exists(path))
def test_getLogObserverStdout(self):
"""
When logfile is empty or set to C{-}, L{app.AppLogger._getLogObserver}
returns a log observer pointing at C{sys.stdout}.
"""
logger = app.AppLogger({"logfile": "-"})
logFiles = _patchTextFileLogObserver(self.patch)
logger._getLogObserver()
self.assertEqual(len(logFiles), 1)
self.assertIs(logFiles[0], sys.stdout)
logger = app.AppLogger({"logfile": ""})
logger._getLogObserver()
self.assertEqual(len(logFiles), 2)
self.assertIs(logFiles[1], sys.stdout)
def test_getLogObserverFile(self):
"""
When passing the C{logfile} option, L{app.AppLogger._getLogObserver}
returns a log observer pointing at the specified path.
"""
logFiles = _patchTextFileLogObserver(self.patch)
filename = self.mktemp()
logger = app.AppLogger({"logfile": filename})
logger._getLogObserver()
self.assertEqual(len(logFiles), 1)
self.assertEqual(logFiles[0].path, os.path.abspath(filename))
def test_stop(self):
"""
L{app.AppLogger.stop} removes the observer created in C{start}, and
reinitialize its C{_observer} so that if C{stop} is called several
times it doesn't break.
"""
removed = []
observer = object()
def remove(observer):
removed.append(observer)
self.patch(globalLogPublisher, "removeObserver", remove)
logger = app.AppLogger({})
logger._observer = observer
logger.stop()
self.assertEqual(removed, [observer])
logger.stop()
self.assertEqual(removed, [observer])
self.assertIsNone(logger._observer)
def test_legacyObservers(self):
"""
L{app.AppLogger} using a legacy logger observer still works, wrapping
it in a compat shim.
"""
logs = []
logger = app.AppLogger({})
@implementer(LegacyILogObserver)
class LoggerObserver:
"""
An observer which implements the legacy L{LegacyILogObserver}.
"""
def __call__(self, x):
"""
Add C{x} to the logs list.
"""
logs.append(x)
logger._observerFactory = lambda: LoggerObserver()
logger.start(Componentized())
self.assertIn("starting up", textFromEventDict(logs[0]))
warnings = self.flushWarnings([self.test_legacyObservers])
self.assertEqual(len(warnings), 0)
def test_unmarkedObserversDeprecated(self):
"""
L{app.AppLogger} using a logger observer which does not implement
L{ILogObserver} or L{LegacyILogObserver} will be wrapped in a compat
shim and raise a L{DeprecationWarning}.
"""
logs = []
logger = app.AppLogger({})
logger._getLogObserver = lambda: logs.append
logger.start(Componentized())
self.assertIn("starting up", textFromEventDict(logs[0]))
warnings = self.flushWarnings([self.test_unmarkedObserversDeprecated])
self.assertEqual(len(warnings), 1)
self.assertEqual(
warnings[0]["message"],
(
"Passing a logger factory which makes log observers "
"which do not implement twisted.logger.ILogObserver "
"or twisted.python.log.ILogObserver to "
"twisted.application.app.AppLogger was deprecated "
"in Twisted 16.2. Please use a factory that "
"produces twisted.logger.ILogObserver (or the "
"legacy twisted.python.log.ILogObserver) "
"implementing objects instead."
),
)
@skipIf(not _twistd_unix, "twistd unix not available")
class UnixAppLoggerTests(TestCase):
"""
Tests for L{UnixAppLogger}.
@ivar signals: list of signal handlers installed.
@type signals: C{list}
"""
def setUp(self):
"""
Fake C{signal.signal} for not installing the handlers but saving them
in C{self.signals}.
"""
self.signals = []
def fakeSignal(sig, f):
self.signals.append((sig, f))
self.patch(signal, "signal", fakeSignal)
def test_getLogObserverStdout(self):
"""
When non-daemonized and C{logfile} is empty or set to C{-},
L{UnixAppLogger._getLogObserver} returns a log observer pointing at
C{sys.stdout}.
"""
logFiles = _patchTextFileLogObserver(self.patch)
logger = UnixAppLogger({"logfile": "-", "nodaemon": True})
logger._getLogObserver()
self.assertEqual(len(logFiles), 1)
self.assertIs(logFiles[0], sys.stdout)
logger = UnixAppLogger({"logfile": "", "nodaemon": True})
logger._getLogObserver()
self.assertEqual(len(logFiles), 2)
self.assertIs(logFiles[1], sys.stdout)
def test_getLogObserverStdoutDaemon(self):
"""
When daemonized and C{logfile} is set to C{-},
L{UnixAppLogger._getLogObserver} raises C{SystemExit}.
"""
logger = UnixAppLogger({"logfile": "-", "nodaemon": False})
error = self.assertRaises(SystemExit, logger._getLogObserver)
self.assertEqual(str(error), "Daemons cannot log to stdout, exiting!")
def test_getLogObserverFile(self):
"""
When C{logfile} contains a file name, L{app.AppLogger._getLogObserver}
returns a log observer pointing at the specified path, and a signal
handler rotating the log is installed.
"""
logFiles = _patchTextFileLogObserver(self.patch)
filename = self.mktemp()
logger = UnixAppLogger({"logfile": filename})
logger._getLogObserver()
self.assertEqual(len(logFiles), 1)
self.assertEqual(logFiles[0].path, os.path.abspath(filename))
self.assertEqual(len(self.signals), 1)
self.assertEqual(self.signals[0][0], signal.SIGUSR1)
d = Deferred()
def rotate():
d.callback(None)
logFiles[0].rotate = rotate
rotateLog = self.signals[0][1]
rotateLog(None, None)
return d
def test_getLogObserverDontOverrideSignalHandler(self):
"""
If a signal handler is already installed,
L{UnixAppLogger._getLogObserver} doesn't override it.
"""
def fakeGetSignal(sig):
self.assertEqual(sig, signal.SIGUSR1)
return object()
self.patch(signal, "getsignal", fakeGetSignal)
filename = self.mktemp()
logger = UnixAppLogger({"logfile": filename})
logger._getLogObserver()
self.assertEqual(self.signals, [])
def test_getLogObserverDefaultFile(self):
"""
When daemonized and C{logfile} is empty, the observer returned by
L{UnixAppLogger._getLogObserver} points at C{twistd.log} in the current
directory.
"""
logFiles = _patchTextFileLogObserver(self.patch)
logger = UnixAppLogger({"logfile": "", "nodaemon": False})
logger._getLogObserver()
self.assertEqual(len(logFiles), 1)
self.assertEqual(logFiles[0].path, os.path.abspath("twistd.log"))
@skipIf(not _twistd_unix, "twistd unix not available")
def test_getLogObserverSyslog(self):
"""
If C{syslog} is set to C{True}, L{UnixAppLogger._getLogObserver} starts
a L{syslog.SyslogObserver} with given C{prefix}.
"""
logs = _setupSyslog(self)
logger = UnixAppLogger({"syslog": True, "prefix": "test-prefix"})
observer = logger._getLogObserver()
self.assertEqual(logs, ["test-prefix"])
observer({"a": "b"})
self.assertEqual(logs, ["test-prefix", {"a": "b"}])
@skipIf(not _twistd_unix, "twistd unix support not available")
class DaemonizeTests(TestCase):
"""
Tests for L{_twistd_unix.UnixApplicationRunner} daemonization.
"""
def setUp(self):
self.mockos = MockOS()
self.config = twistd.ServerOptions()
self.patch(_twistd_unix, "os", self.mockos)
self.runner = _twistd_unix.UnixApplicationRunner(self.config)
self.runner.application = service.Application("Hi!")
self.runner.oldstdout = sys.stdout
self.runner.oldstderr = sys.stderr
self.runner.startReactor = lambda *args: None
def test_success(self):
"""
When double fork succeeded in C{daemonize}, the child process writes
B{0} to the status pipe.
"""
with AlternateReactor(FakeDaemonizingReactor()):
self.runner.postApplication()
self.assertEqual(
self.mockos.actions,
[
("chdir", "."),
("umask", 0o077),
("fork", True),
"setsid",
("fork", True),
("write", -2, b"0"),
("unlink", "twistd.pid"),
],
)
self.assertEqual(self.mockos.closed, [-3, -2])
def test_successInParent(self):
"""
The parent process initiating the C{daemonize} call reads data from the
status pipe and then exit the process.
"""
self.mockos.child = False
self.mockos.readData = b"0"
with AlternateReactor(FakeDaemonizingReactor()):
self.assertRaises(SystemError, self.runner.postApplication)
self.assertEqual(
self.mockos.actions,
[
("chdir", "."),
("umask", 0o077),
("fork", True),
("read", -1, 100),
("exit", 0),
("unlink", "twistd.pid"),
],
)
self.assertEqual(self.mockos.closed, [-1])
def test_successEINTR(self):
"""
If the C{os.write} call to the status pipe raises an B{EINTR} error,
the process child retries to write.
"""
written = []
def raisingWrite(fd, data):
written.append((fd, data))
if len(written) == 1:
raise OSError(errno.EINTR)
self.mockos.write = raisingWrite
with AlternateReactor(FakeDaemonizingReactor()):
self.runner.postApplication()
self.assertEqual(
self.mockos.actions,
[
("chdir", "."),
("umask", 0o077),
("fork", True),
"setsid",
("fork", True),
("unlink", "twistd.pid"),
],
)
self.assertEqual(self.mockos.closed, [-3, -2])
self.assertEqual([(-2, b"0"), (-2, b"0")], written)
def test_successInParentEINTR(self):
"""
If the C{os.read} call on the status pipe raises an B{EINTR} error, the
parent child retries to read.
"""
read = []
def raisingRead(fd, size):
read.append((fd, size))
if len(read) == 1:
raise OSError(errno.EINTR)
return b"0"
self.mockos.read = raisingRead
self.mockos.child = False
with AlternateReactor(FakeDaemonizingReactor()):
self.assertRaises(SystemError, self.runner.postApplication)
self.assertEqual(
self.mockos.actions,
[
("chdir", "."),
("umask", 0o077),
("fork", True),
("exit", 0),
("unlink", "twistd.pid"),
],
)
self.assertEqual(self.mockos.closed, [-1])
self.assertEqual([(-1, 100), (-1, 100)], read)
def assertErrorWritten(self, raised, reported):
"""
Assert L{UnixApplicationRunner.postApplication} writes
C{reported} to its status pipe if the service raises an
exception whose message is C{raised}.
"""
class FakeService(service.Service):
def startService(self):
raise RuntimeError(raised)
errorService = FakeService()
errorService.setServiceParent(self.runner.application)
with AlternateReactor(FakeDaemonizingReactor()):
self.assertRaises(RuntimeError, self.runner.postApplication)
self.assertEqual(
self.mockos.actions,
[
("chdir", "."),
("umask", 0o077),
("fork", True),
"setsid",
("fork", True),
("write", -2, reported),
("unlink", "twistd.pid"),
],
)
self.assertEqual(self.mockos.closed, [-3, -2])
def test_error(self):
"""
If an error happens during daemonization, the child process writes the
exception error to the status pipe.
"""
self.assertErrorWritten(
raised="Something is wrong", reported=b"1 RuntimeError: Something is wrong"
)
def test_unicodeError(self):
"""
If an error happens during daemonization, and that error's
message is Unicode, the child encodes the message as ascii
with backslash Unicode code points.
"""
self.assertErrorWritten(raised="\u2022", reported=b"1 RuntimeError: \\u2022")
def assertErrorInParentBehavior(self, readData, errorMessage, mockOSActions):
"""
Make L{os.read} appear to return C{readData}, and assert that
L{UnixApplicationRunner.postApplication} writes
C{errorMessage} to standard error and executes the calls
against L{os} functions specified in C{mockOSActions}.
"""
self.mockos.child = False
self.mockos.readData = readData
errorIO = StringIO()
self.patch(sys, "__stderr__", errorIO)
with AlternateReactor(FakeDaemonizingReactor()):
self.assertRaises(SystemError, self.runner.postApplication)
self.assertEqual(errorIO.getvalue(), errorMessage)
self.assertEqual(self.mockos.actions, mockOSActions)
self.assertEqual(self.mockos.closed, [-1])
def test_errorInParent(self):
"""
When the child writes an error message to the status pipe
during daemonization, the parent writes the repr of the
message to C{stderr} and exits with non-zero status code.
"""
self.assertErrorInParentBehavior(
readData=b"1 Exception: An identified error",
errorMessage=(
"An error has occurred: b'Exception: An identified error'\n"
"Please look at log file for more information.\n"
),
mockOSActions=[
("chdir", "."),
("umask", 0o077),
("fork", True),
("read", -1, 100),
("exit", 1),
("unlink", "twistd.pid"),
],
)
def test_nonASCIIErrorInParent(self):
"""
When the child writes a non-ASCII error message to the status
pipe during daemonization, the parent writes the repr of the
message to C{stderr} and exits with a non-zero status code.
"""
self.assertErrorInParentBehavior(
readData=b"1 Exception: \xff",
errorMessage=(
"An error has occurred: b'Exception: \\xff'\n"
"Please look at log file for more information.\n"
),
mockOSActions=[
("chdir", "."),
("umask", 0o077),
("fork", True),
("read", -1, 100),
("exit", 1),
("unlink", "twistd.pid"),
],
)
def test_errorInParentWithTruncatedUnicode(self):
"""
When the child writes a non-ASCII error message to the status
pipe during daemonization, and that message is too longer, the
parent writes the repr of the truncated message to C{stderr}
and exits with a non-zero status code.
"""
truncatedMessage = b"1 RuntimeError: " + b"\\u2022" * 14
# the escape sequence will appear to be escaped twice, because
# we're getting the repr
reportedMessage = "b'RuntimeError: {}'".format(r"\\u2022" * 14)
self.assertErrorInParentBehavior(
readData=truncatedMessage,
errorMessage=(
"An error has occurred: {}\n"
"Please look at log file for more information.\n".format(
reportedMessage
)
),
mockOSActions=[
("chdir", "."),
("umask", 0o077),
("fork", True),
("read", -1, 100),
("exit", 1),
("unlink", "twistd.pid"),
],
)
def test_errorMessageTruncated(self):
"""
If an error occurs during daemonization and its message is too
long, it's truncated by the child.
"""
self.assertErrorWritten(
raised="x" * 200, reported=b"1 RuntimeError: " + b"x" * 84
)
def test_unicodeErrorMessageTruncated(self):
"""
If an error occurs during daemonization and its message is
unicode and too long, it's truncated by the child, even if
this splits a unicode escape sequence.
"""
self.assertErrorWritten(
raised="\u2022" * 30,
reported=b"1 RuntimeError: " + b"\\u2022" * 14,
)
def test_hooksCalled(self):
"""
C{daemonize} indeed calls L{IReactorDaemonize.beforeDaemonize} and
L{IReactorDaemonize.afterDaemonize} if the reactor implements
L{IReactorDaemonize}.
"""
reactor = FakeDaemonizingReactor()
self.runner.daemonize(reactor)
self.assertTrue(reactor._beforeDaemonizeCalled)
self.assertTrue(reactor._afterDaemonizeCalled)
def test_hooksNotCalled(self):
"""
C{daemonize} does NOT call L{IReactorDaemonize.beforeDaemonize} or
L{IReactorDaemonize.afterDaemonize} if the reactor does NOT implement
L{IReactorDaemonize}.
"""
reactor = FakeNonDaemonizingReactor()
self.runner.daemonize(reactor)
self.assertFalse(reactor._beforeDaemonizeCalled)
self.assertFalse(reactor._afterDaemonizeCalled)
@implementer(_ISupportsExitSignalCapturing)
class SignalCapturingMemoryReactor(MemoryReactor):
"""
MemoryReactor that implements the _ISupportsExitSignalCapturing interface,
all other operations identical to MemoryReactor.
"""
@property
def _exitSignal(self):
return self._val
@_exitSignal.setter
def _exitSignal(self, val):
self._val = val
class StubApplicationRunnerWithSignal(twistd._SomeApplicationRunner):
"""
An application runner that uses a SignalCapturingMemoryReactor and
has a _signalValue attribute that it will set in the reactor.
@ivar _signalValue: The signal value to set on the reactor's _exitSignal
attribute.
"""
loggerFactory = CrippledAppLogger
def __init__(self, config):
super().__init__(config)
self._signalValue = None
def preApplication(self):
"""
Does nothing.
"""
def postApplication(self):
"""
Instantiate a SignalCapturingMemoryReactor and start it
in the runner.
"""
reactor = SignalCapturingMemoryReactor()
reactor._exitSignal = self._signalValue
self.startReactor(reactor, sys.stdout, sys.stderr)
def stubApplicationRunnerFactoryCreator(signum):
"""
Create a factory function to instantiate a
StubApplicationRunnerWithSignal that will report signum as the captured
signal..
@param signum: The integer signal number or None
@type signum: C{int} or C{None}
@return: A factory function to create stub runners.
@rtype: stubApplicationRunnerFactory
"""
def stubApplicationRunnerFactory(config):
"""
Create a StubApplicationRunnerWithSignal using a reactor that
implements _ISupportsExitSignalCapturing and whose _exitSignal
attribute is set to signum.
@param config: The runner configuration, platform dependent.
@type config: L{twisted.scripts.twistd.ServerOptions}
@return: A runner to use for the test.
@rtype: twisted.test.test_twistd.StubApplicationRunnerWithSignal
"""
runner = StubApplicationRunnerWithSignal(config)
runner._signalValue = signum
return runner
return stubApplicationRunnerFactory
class ExitWithSignalTests(TestCase):
"""
Tests for L{twisted.application.app._exitWithSignal}.
"""
def setUp(self):
"""
Set up the server options and a fake for use by test cases.
"""
self.config = twistd.ServerOptions()
self.config.loadedPlugins = {"test_command": MockServiceMaker()}
self.config.subOptions = object()
self.config.subCommand = "test_command"
self.fakeKillArgs = [None, None]
def fakeKill(pid, sig):
"""
Fake method to capture arguments passed to os.kill.
@param pid: The pid of the process being killed.
@param sig: The signal sent to the process.
"""
self.fakeKillArgs[0] = pid
self.fakeKillArgs[1] = sig
self.patch(os, "kill", fakeKill)
def test_exitWithSignal(self):
"""
exitWithSignal replaces the existing signal handler with the default
handler and sends the replaced signal to the current process.
"""
fakeSignalArgs = [None, None]
def fake_signal(sig, handler):
fakeSignalArgs[0] = sig
fakeSignalArgs[1] = handler
self.patch(signal, "signal", fake_signal)
app._exitWithSignal(signal.SIGINT)
self.assertEquals(fakeSignalArgs[0], signal.SIGINT)
self.assertEquals(fakeSignalArgs[1], signal.SIG_DFL)
self.assertEquals(self.fakeKillArgs[0], os.getpid())
self.assertEquals(self.fakeKillArgs[1], signal.SIGINT)
def test_normalExit(self):
"""
_exitWithSignal is not called if the runner does not exit with a
signal.
"""
self.patch(
twistd, "_SomeApplicationRunner", stubApplicationRunnerFactoryCreator(None)
)
twistd.runApp(self.config)
self.assertIsNone(self.fakeKillArgs[0])
self.assertIsNone(self.fakeKillArgs[1])
def test_runnerExitsWithSignal(self):
"""
_exitWithSignal is called when the runner exits with a signal.
"""
self.patch(
twistd,
"_SomeApplicationRunner",
stubApplicationRunnerFactoryCreator(signal.SIGINT),
)
twistd.runApp(self.config)
self.assertEquals(self.fakeKillArgs[0], os.getpid())
self.assertEquals(self.fakeKillArgs[1], signal.SIGINT)
|
the-stack_0_23707 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from .resource_py3 import Resource
class VirtualNetworkGatewayConnectionListEntity(Resource):
"""A common class for general resource information.
Variables are only populated by the server, and will be ignored when
sending a request.
All required parameters must be populated in order to send to Azure.
:param id: Resource ID.
:type id: str
:ivar name: Resource name.
:vartype name: str
:ivar type: Resource type.
:vartype type: str
:param location: Resource location.
:type location: str
:param tags: Resource tags.
:type tags: dict[str, str]
:param authorization_key: The authorizationKey.
:type authorization_key: str
:param virtual_network_gateway1: Required. The reference to virtual
network gateway resource.
:type virtual_network_gateway1:
~azure.mgmt.network.v2018_02_01.models.VirtualNetworkConnectionGatewayReference
:param virtual_network_gateway2: The reference to virtual network gateway
resource.
:type virtual_network_gateway2:
~azure.mgmt.network.v2018_02_01.models.VirtualNetworkConnectionGatewayReference
:param local_network_gateway2: The reference to local network gateway
resource.
:type local_network_gateway2:
~azure.mgmt.network.v2018_02_01.models.VirtualNetworkConnectionGatewayReference
:param connection_type: Required. Gateway connection type. Possible values
are: 'Ipsec','Vnet2Vnet','ExpressRoute', and 'VPNClient. Possible values
include: 'IPsec', 'Vnet2Vnet', 'ExpressRoute', 'VPNClient'
:type connection_type: str or
~azure.mgmt.network.v2018_02_01.models.VirtualNetworkGatewayConnectionType
:param routing_weight: The routing weight.
:type routing_weight: int
:param shared_key: The IPSec shared key.
:type shared_key: str
:ivar connection_status: Virtual network Gateway connection status.
Possible values are 'Unknown', 'Connecting', 'Connected' and
'NotConnected'. Possible values include: 'Unknown', 'Connecting',
'Connected', 'NotConnected'
:vartype connection_status: str or
~azure.mgmt.network.v2018_02_01.models.VirtualNetworkGatewayConnectionStatus
:ivar tunnel_connection_status: Collection of all tunnels' connection
health status.
:vartype tunnel_connection_status:
list[~azure.mgmt.network.v2018_02_01.models.TunnelConnectionHealth]
:ivar egress_bytes_transferred: The egress bytes transferred in this
connection.
:vartype egress_bytes_transferred: long
:ivar ingress_bytes_transferred: The ingress bytes transferred in this
connection.
:vartype ingress_bytes_transferred: long
:param peer: The reference to peerings resource.
:type peer: ~azure.mgmt.network.v2018_02_01.models.SubResource
:param enable_bgp: EnableBgp flag
:type enable_bgp: bool
:param use_policy_based_traffic_selectors: Enable policy-based traffic
selectors.
:type use_policy_based_traffic_selectors: bool
:param ipsec_policies: The IPSec Policies to be considered by this
connection.
:type ipsec_policies:
list[~azure.mgmt.network.v2018_02_01.models.IpsecPolicy]
:param resource_guid: The resource GUID property of the
VirtualNetworkGatewayConnection resource.
:type resource_guid: str
:ivar provisioning_state: The provisioning state of the
VirtualNetworkGatewayConnection resource. Possible values are: 'Updating',
'Deleting', and 'Failed'.
:vartype provisioning_state: str
:param etag: Gets a unique read-only string that changes whenever the
resource is updated.
:type etag: str
"""
_validation = {
'name': {'readonly': True},
'type': {'readonly': True},
'virtual_network_gateway1': {'required': True},
'connection_type': {'required': True},
'connection_status': {'readonly': True},
'tunnel_connection_status': {'readonly': True},
'egress_bytes_transferred': {'readonly': True},
'ingress_bytes_transferred': {'readonly': True},
'provisioning_state': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'location': {'key': 'location', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
'authorization_key': {'key': 'properties.authorizationKey', 'type': 'str'},
'virtual_network_gateway1': {'key': 'properties.virtualNetworkGateway1', 'type': 'VirtualNetworkConnectionGatewayReference'},
'virtual_network_gateway2': {'key': 'properties.virtualNetworkGateway2', 'type': 'VirtualNetworkConnectionGatewayReference'},
'local_network_gateway2': {'key': 'properties.localNetworkGateway2', 'type': 'VirtualNetworkConnectionGatewayReference'},
'connection_type': {'key': 'properties.connectionType', 'type': 'str'},
'routing_weight': {'key': 'properties.routingWeight', 'type': 'int'},
'shared_key': {'key': 'properties.sharedKey', 'type': 'str'},
'connection_status': {'key': 'properties.connectionStatus', 'type': 'str'},
'tunnel_connection_status': {'key': 'properties.tunnelConnectionStatus', 'type': '[TunnelConnectionHealth]'},
'egress_bytes_transferred': {'key': 'properties.egressBytesTransferred', 'type': 'long'},
'ingress_bytes_transferred': {'key': 'properties.ingressBytesTransferred', 'type': 'long'},
'peer': {'key': 'properties.peer', 'type': 'SubResource'},
'enable_bgp': {'key': 'properties.enableBgp', 'type': 'bool'},
'use_policy_based_traffic_selectors': {'key': 'properties.usePolicyBasedTrafficSelectors', 'type': 'bool'},
'ipsec_policies': {'key': 'properties.ipsecPolicies', 'type': '[IpsecPolicy]'},
'resource_guid': {'key': 'properties.resourceGuid', 'type': 'str'},
'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'},
'etag': {'key': 'etag', 'type': 'str'},
}
def __init__(self, *, virtual_network_gateway1, connection_type, id: str=None, location: str=None, tags=None, authorization_key: str=None, virtual_network_gateway2=None, local_network_gateway2=None, routing_weight: int=None, shared_key: str=None, peer=None, enable_bgp: bool=None, use_policy_based_traffic_selectors: bool=None, ipsec_policies=None, resource_guid: str=None, etag: str=None, **kwargs) -> None:
super(VirtualNetworkGatewayConnectionListEntity, self).__init__(id=id, location=location, tags=tags, **kwargs)
self.authorization_key = authorization_key
self.virtual_network_gateway1 = virtual_network_gateway1
self.virtual_network_gateway2 = virtual_network_gateway2
self.local_network_gateway2 = local_network_gateway2
self.connection_type = connection_type
self.routing_weight = routing_weight
self.shared_key = shared_key
self.connection_status = None
self.tunnel_connection_status = None
self.egress_bytes_transferred = None
self.ingress_bytes_transferred = None
self.peer = peer
self.enable_bgp = enable_bgp
self.use_policy_based_traffic_selectors = use_policy_based_traffic_selectors
self.ipsec_policies = ipsec_policies
self.resource_guid = resource_guid
self.provisioning_state = None
self.etag = etag
|
the-stack_0_23709 | import datetime
from itertools import count
import os
import threading
import time
from six.moves import range
from six.moves import urllib
import pytest
import cherrypy
from cherrypy.lib import httputil
from cherrypy.test import helper
curdir = os.path.join(os.getcwd(), os.path.dirname(__file__))
gif_bytes = (
b'GIF89a\x01\x00\x01\x00\x82\x00\x01\x99"\x1e\x00\x00\x00\x00\x00'
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
b'\x00,\x00\x00\x00\x00\x01\x00\x01\x00\x02\x03\x02\x08\t\x00;'
)
class CacheTest(helper.CPWebCase):
@staticmethod
def setup_server():
@cherrypy.config(**{'tools.caching.on': True})
class Root:
def __init__(self):
self.counter = 0
self.control_counter = 0
self.longlock = threading.Lock()
@cherrypy.expose
def index(self):
self.counter += 1
msg = 'visit #%s' % self.counter
return msg
@cherrypy.expose
def control(self):
self.control_counter += 1
return 'visit #%s' % self.control_counter
@cherrypy.expose
def a_gif(self):
cherrypy.response.headers[
'Last-Modified'] = httputil.HTTPDate()
return gif_bytes
@cherrypy.expose
def long_process(self, seconds='1'):
try:
self.longlock.acquire()
time.sleep(float(seconds))
finally:
self.longlock.release()
return 'success!'
@cherrypy.expose
def clear_cache(self, path):
cherrypy._cache.store[cherrypy.request.base + path].clear()
@cherrypy.config(**{
'tools.caching.on': True,
'tools.response_headers.on': True,
'tools.response_headers.headers': [
('Vary', 'Our-Varying-Header')
],
})
class VaryHeaderCachingServer(object):
def __init__(self):
self.counter = count(1)
@cherrypy.expose
def index(self):
return 'visit #%s' % next(self.counter)
@cherrypy.config(**{
'tools.expires.on': True,
'tools.expires.secs': 60,
'tools.staticdir.on': True,
'tools.staticdir.dir': 'static',
'tools.staticdir.root': curdir,
})
class UnCached(object):
@cherrypy.expose
@cherrypy.config(**{'tools.expires.secs': 0})
def force(self):
cherrypy.response.headers['Etag'] = 'bibbitybobbityboo'
self._cp_config['tools.expires.force'] = True
self._cp_config['tools.expires.secs'] = 0
return 'being forceful'
@cherrypy.expose
def dynamic(self):
cherrypy.response.headers['Etag'] = 'bibbitybobbityboo'
cherrypy.response.headers['Cache-Control'] = 'private'
return 'D-d-d-dynamic!'
@cherrypy.expose
def cacheable(self):
cherrypy.response.headers['Etag'] = 'bibbitybobbityboo'
return "Hi, I'm cacheable."
@cherrypy.expose
@cherrypy.config(**{'tools.expires.secs': 86400})
def specific(self):
cherrypy.response.headers[
'Etag'] = 'need_this_to_make_me_cacheable'
return 'I am being specific'
class Foo(object):
pass
@cherrypy.expose
@cherrypy.config(**{'tools.expires.secs': Foo()})
def wrongtype(self):
cherrypy.response.headers[
'Etag'] = 'need_this_to_make_me_cacheable'
return 'Woops'
@cherrypy.config(**{
'tools.gzip.mime_types': ['text/*', 'image/*'],
'tools.caching.on': True,
'tools.staticdir.on': True,
'tools.staticdir.dir': 'static',
'tools.staticdir.root': curdir
})
class GzipStaticCache(object):
pass
cherrypy.tree.mount(Root())
cherrypy.tree.mount(UnCached(), '/expires')
cherrypy.tree.mount(VaryHeaderCachingServer(), '/varying_headers')
cherrypy.tree.mount(GzipStaticCache(), '/gzip_static_cache')
cherrypy.config.update({'tools.gzip.on': True})
def testCaching(self):
elapsed = 0.0
for trial in range(10):
self.getPage('/')
# The response should be the same every time,
# except for the Age response header.
self.assertBody('visit #1')
if trial != 0:
age = int(self.assertHeader('Age'))
self.assert_(age >= elapsed)
elapsed = age
# POST, PUT, DELETE should not be cached.
self.getPage('/', method='POST')
self.assertBody('visit #2')
# Because gzip is turned on, the Vary header should always Vary for
# content-encoding
self.assertHeader('Vary', 'Accept-Encoding')
# The previous request should have invalidated the cache,
# so this request will recalc the response.
self.getPage('/', method='GET')
self.assertBody('visit #3')
# ...but this request should get the cached copy.
self.getPage('/', method='GET')
self.assertBody('visit #3')
self.getPage('/', method='DELETE')
self.assertBody('visit #4')
# The previous request should have invalidated the cache,
# so this request will recalc the response.
self.getPage('/', method='GET', headers=[('Accept-Encoding', 'gzip')])
self.assertHeader('Content-Encoding', 'gzip')
self.assertHeader('Vary')
self.assertEqual(
cherrypy.lib.encoding.decompress(self.body), b'visit #5')
# Now check that a second request gets the gzip header and gzipped body
# This also tests a bug in 3.0 to 3.0.2 whereby the cached, gzipped
# response body was being gzipped a second time.
self.getPage('/', method='GET', headers=[('Accept-Encoding', 'gzip')])
self.assertHeader('Content-Encoding', 'gzip')
self.assertEqual(
cherrypy.lib.encoding.decompress(self.body), b'visit #5')
# Now check that a third request that doesn't accept gzip
# skips the cache (because the 'Vary' header denies it).
self.getPage('/', method='GET')
self.assertNoHeader('Content-Encoding')
self.assertBody('visit #6')
def testVaryHeader(self):
self.getPage('/varying_headers/')
self.assertStatus('200 OK')
self.assertHeaderItemValue('Vary', 'Our-Varying-Header')
self.assertBody('visit #1')
# Now check that different 'Vary'-fields don't evict each other.
# This test creates 2 requests with different 'Our-Varying-Header'
# and then tests if the first one still exists.
self.getPage('/varying_headers/',
headers=[('Our-Varying-Header', 'request 2')])
self.assertStatus('200 OK')
self.assertBody('visit #2')
self.getPage('/varying_headers/',
headers=[('Our-Varying-Header', 'request 2')])
self.assertStatus('200 OK')
self.assertBody('visit #2')
self.getPage('/varying_headers/')
self.assertStatus('200 OK')
self.assertBody('visit #1')
def testExpiresTool(self):
# test setting an expires header
self.getPage('/expires/specific')
self.assertStatus('200 OK')
self.assertHeader('Expires')
# test exceptions for bad time values
self.getPage('/expires/wrongtype')
self.assertStatus(500)
self.assertInBody('TypeError')
# static content should not have "cache prevention" headers
self.getPage('/expires/index.html')
self.assertStatus('200 OK')
self.assertNoHeader('Pragma')
self.assertNoHeader('Cache-Control')
self.assertHeader('Expires')
# dynamic content that sets indicators should not have
# "cache prevention" headers
self.getPage('/expires/cacheable')
self.assertStatus('200 OK')
self.assertNoHeader('Pragma')
self.assertNoHeader('Cache-Control')
self.assertHeader('Expires')
self.getPage('/expires/dynamic')
self.assertBody('D-d-d-dynamic!')
# the Cache-Control header should be untouched
self.assertHeader('Cache-Control', 'private')
self.assertHeader('Expires')
# configure the tool to ignore indicators and replace existing headers
self.getPage('/expires/force')
self.assertStatus('200 OK')
# This also gives us a chance to test 0 expiry with no other headers
self.assertHeader('Pragma', 'no-cache')
if cherrypy.server.protocol_version == 'HTTP/1.1':
self.assertHeader('Cache-Control', 'no-cache, must-revalidate')
self.assertHeader('Expires', 'Sun, 28 Jan 2007 00:00:00 GMT')
# static content should now have "cache prevention" headers
self.getPage('/expires/index.html')
self.assertStatus('200 OK')
self.assertHeader('Pragma', 'no-cache')
if cherrypy.server.protocol_version == 'HTTP/1.1':
self.assertHeader('Cache-Control', 'no-cache, must-revalidate')
self.assertHeader('Expires', 'Sun, 28 Jan 2007 00:00:00 GMT')
# the cacheable handler should now have "cache prevention" headers
self.getPage('/expires/cacheable')
self.assertStatus('200 OK')
self.assertHeader('Pragma', 'no-cache')
if cherrypy.server.protocol_version == 'HTTP/1.1':
self.assertHeader('Cache-Control', 'no-cache, must-revalidate')
self.assertHeader('Expires', 'Sun, 28 Jan 2007 00:00:00 GMT')
self.getPage('/expires/dynamic')
self.assertBody('D-d-d-dynamic!')
# dynamic sets Cache-Control to private but it should be
# overwritten here ...
self.assertHeader('Pragma', 'no-cache')
if cherrypy.server.protocol_version == 'HTTP/1.1':
self.assertHeader('Cache-Control', 'no-cache, must-revalidate')
self.assertHeader('Expires', 'Sun, 28 Jan 2007 00:00:00 GMT')
def _assert_resp_len_and_enc_for_gzip(self, uri):
"""
Test that after querying gzipped content it's remains valid in
cache and available non-gzipped as well.
"""
ACCEPT_GZIP_HEADERS = [('Accept-Encoding', 'gzip')]
content_len = None
for _ in range(3):
self.getPage(uri, method='GET', headers=ACCEPT_GZIP_HEADERS)
if content_len is not None:
# all requests should get the same length
self.assertHeader('Content-Length', content_len)
self.assertHeader('Content-Encoding', 'gzip')
content_len = dict(self.headers)['Content-Length']
# check that we can still get non-gzipped version
self.getPage(uri, method='GET')
self.assertNoHeader('Content-Encoding')
# non-gzipped version should have a different content length
self.assertNoHeaderItemValue('Content-Length', content_len)
def testGzipStaticCache(self):
"""Test that cache and gzip tools play well together when both enabled.
Ref GitHub issue #1190.
"""
GZIP_STATIC_CACHE_TMPL = '/gzip_static_cache/{}'
resource_files = ('index.html', 'dirback.jpg')
for f in resource_files:
uri = GZIP_STATIC_CACHE_TMPL.format(f)
self._assert_resp_len_and_enc_for_gzip(uri)
def testLastModified(self):
self.getPage('/a.gif')
self.assertStatus(200)
self.assertBody(gif_bytes)
lm1 = self.assertHeader('Last-Modified')
# this request should get the cached copy.
self.getPage('/a.gif')
self.assertStatus(200)
self.assertBody(gif_bytes)
self.assertHeader('Age')
lm2 = self.assertHeader('Last-Modified')
self.assertEqual(lm1, lm2)
# this request should match the cached copy, but raise 304.
self.getPage('/a.gif', [('If-Modified-Since', lm1)])
self.assertStatus(304)
self.assertNoHeader('Last-Modified')
if not getattr(cherrypy.server, 'using_apache', False):
self.assertHeader('Age')
@pytest.mark.xfail(reason='#1536')
def test_antistampede(self):
SECONDS = 4
slow_url = '/long_process?seconds={SECONDS}'.format(**locals())
# We MUST make an initial synchronous request in order to create the
# AntiStampedeCache object, and populate its selecting_headers,
# before the actual stampede.
self.getPage(slow_url)
self.assertBody('success!')
path = urllib.parse.quote(slow_url, safe='')
self.getPage('/clear_cache?path=' + path)
self.assertStatus(200)
start = datetime.datetime.now()
def run():
self.getPage(slow_url)
# The response should be the same every time
self.assertBody('success!')
ts = [threading.Thread(target=run) for i in range(100)]
for t in ts:
t.start()
for t in ts:
t.join()
finish = datetime.datetime.now()
# Allow for overhead, two seconds for slow hosts
allowance = SECONDS + 2
self.assertEqualDates(start, finish, seconds=allowance)
def test_cache_control(self):
self.getPage('/control')
self.assertBody('visit #1')
self.getPage('/control')
self.assertBody('visit #1')
self.getPage('/control', headers=[('Cache-Control', 'no-cache')])
self.assertBody('visit #2')
self.getPage('/control')
self.assertBody('visit #2')
self.getPage('/control', headers=[('Pragma', 'no-cache')])
self.assertBody('visit #3')
self.getPage('/control')
self.assertBody('visit #3')
time.sleep(1)
self.getPage('/control', headers=[('Cache-Control', 'max-age=0')])
self.assertBody('visit #4')
self.getPage('/control')
self.assertBody('visit #4')
|
the-stack_0_23710 | #!/usr/bin/env python3
import json
import os
from unidiff import PatchSet # type: ignore
from build_download_helper import get_with_retries
from env_helper import (
GITHUB_REPOSITORY,
GITHUB_SERVER_URL,
GITHUB_RUN_ID,
GITHUB_EVENT_PATH,
)
DIFF_IN_DOCUMENTATION_EXT = [
".html",
".md",
".yml",
".txt",
".css",
".js",
".xml",
".ico",
".conf",
".svg",
".png",
".jpg",
".py",
".sh",
".json",
]
RETRY_SLEEP = 0
def get_pr_for_commit(sha, ref):
if not ref:
return None
try_get_pr_url = (
f"https://api.github.com/repos/{GITHUB_REPOSITORY}/commits/{sha}/pulls"
)
try:
response = get_with_retries(try_get_pr_url, sleep=RETRY_SLEEP)
data = response.json()
if len(data) > 1:
print("Got more than one pr for commit", sha)
for pr in data:
# refs for pushes looks like refs/head/XX
# refs for RPs looks like XX
if pr["head"]["ref"] in ref:
return pr
print("Cannot find PR with required ref", ref, "returning first one")
first_pr = data[0]
return first_pr
except Exception as ex:
print("Cannot fetch PR info from commit", ex)
return None
class PRInfo:
default_event = {
"commits": 1,
"before": "HEAD~",
"after": "HEAD",
"ref": None,
}
def __init__(
self,
github_event=None,
need_orgs=False,
need_changed_files=False,
pr_event_from_api=False,
):
if not github_event:
if GITHUB_EVENT_PATH:
with open(GITHUB_EVENT_PATH, "r", encoding="utf-8") as event_file:
github_event = json.load(event_file)
else:
github_event = PRInfo.default_event.copy()
self.event = github_event
self.changed_files = set([])
self.body = ""
ref = github_event.get("ref", "refs/head/master")
if ref and ref.startswith("refs/heads/"):
ref = ref[11:]
# workflow completed event, used for PRs only
if "action" in github_event and github_event["action"] == "completed":
self.sha = github_event["workflow_run"]["head_sha"]
prs_for_sha = get_with_retries(
f"https://api.github.com/repos/{GITHUB_REPOSITORY}/commits/{self.sha}"
"/pulls",
sleep=RETRY_SLEEP,
).json()
if len(prs_for_sha) != 0:
github_event["pull_request"] = prs_for_sha[0]
if "pull_request" in github_event: # pull request and other similar events
self.number = github_event["pull_request"]["number"]
if pr_event_from_api:
response = get_with_retries(
f"https://api.github.com/repos/{GITHUB_REPOSITORY}"
f"/pulls/{self.number}",
sleep=RETRY_SLEEP,
)
github_event["pull_request"] = response.json()
if "after" in github_event:
self.sha = github_event["after"]
else:
self.sha = github_event["pull_request"]["head"]["sha"]
repo_prefix = f"{GITHUB_SERVER_URL}/{GITHUB_REPOSITORY}"
self.task_url = f"{repo_prefix}/actions/runs/{GITHUB_RUN_ID or '0'}"
self.repo_full_name = GITHUB_REPOSITORY
self.commit_html_url = f"{repo_prefix}/commits/{self.sha}"
self.pr_html_url = f"{repo_prefix}/pull/{self.number}"
self.base_ref = github_event["pull_request"]["base"]["ref"]
self.base_name = github_event["pull_request"]["base"]["repo"]["full_name"]
self.head_ref = github_event["pull_request"]["head"]["ref"]
self.head_name = github_event["pull_request"]["head"]["repo"]["full_name"]
self.body = github_event["pull_request"]["body"]
self.labels = {
label["name"] for label in github_event["pull_request"]["labels"]
}
self.user_login = github_event["pull_request"]["user"]["login"]
self.user_orgs = set([])
if need_orgs:
user_orgs_response = get_with_retries(
github_event["pull_request"]["user"]["organizations_url"],
sleep=RETRY_SLEEP,
)
if user_orgs_response.ok:
response_json = user_orgs_response.json()
self.user_orgs = set(org["id"] for org in response_json)
self.diff_url = github_event["pull_request"]["diff_url"]
elif "commits" in github_event:
self.sha = github_event["after"]
pull_request = get_pr_for_commit(self.sha, github_event["ref"])
repo_prefix = f"{GITHUB_SERVER_URL}/{GITHUB_REPOSITORY}"
self.task_url = f"{repo_prefix}/actions/runs/{GITHUB_RUN_ID or '0'}"
self.commit_html_url = f"{repo_prefix}/commits/{self.sha}"
self.repo_full_name = GITHUB_REPOSITORY
if pull_request is None or pull_request["state"] == "closed":
# it's merged PR to master
self.number = 0
self.labels = {}
self.pr_html_url = f"{repo_prefix}/commits/{ref}"
self.base_ref = ref
self.base_name = self.repo_full_name
self.head_ref = ref
self.head_name = self.repo_full_name
self.diff_url = (
f"https://api.github.com/repos/{GITHUB_REPOSITORY}/"
f"compare/{github_event['before']}...{self.sha}"
)
else:
self.labels = {label["name"] for label in pull_request["labels"]}
self.base_ref = pull_request["base"]["ref"]
self.base_name = pull_request["base"]["repo"]["full_name"]
self.head_ref = pull_request["head"]["ref"]
self.head_name = pull_request["head"]["repo"]["full_name"]
self.pr_html_url = pull_request["html_url"]
if "pr-backport" in self.labels:
self.diff_url = (
f"https://github.com/{GITHUB_REPOSITORY}/"
f"compare/master...{self.head_ref}.diff"
)
else:
self.diff_url = pull_request["diff_url"]
else:
print(json.dumps(github_event, sort_keys=True, indent=4))
self.sha = os.getenv("GITHUB_SHA")
self.number = 0
self.labels = {}
repo_prefix = f"{GITHUB_SERVER_URL}/{GITHUB_REPOSITORY}"
self.task_url = f"{repo_prefix}/actions/runs/{GITHUB_RUN_ID or '0'}"
self.commit_html_url = f"{repo_prefix}/commits/{self.sha}"
self.repo_full_name = GITHUB_REPOSITORY
self.pr_html_url = f"{repo_prefix}/commits/{ref}"
self.base_ref = ref
self.base_name = self.repo_full_name
self.head_ref = ref
self.head_name = self.repo_full_name
if need_changed_files:
self.fetch_changed_files()
def fetch_changed_files(self):
if not self.diff_url:
raise Exception("Diff URL cannot be find for event")
response = get_with_retries(
self.diff_url,
sleep=RETRY_SLEEP,
)
response.raise_for_status()
if "commits" in self.event and self.number == 0:
diff = response.json()
if "files" in diff:
self.changed_files = [f["filename"] for f in diff["files"]]
else:
diff_object = PatchSet(response.text)
self.changed_files = {f.path for f in diff_object}
def get_dict(self):
return {
"sha": self.sha,
"number": self.number,
"labels": self.labels,
"user_login": self.user_login,
"user_orgs": self.user_orgs,
}
def has_changes_in_documentation(self):
# If the list wasn't built yet the best we can do is to
# assume that there were changes.
if self.changed_files is None or not self.changed_files:
return True
for f in self.changed_files:
_, ext = os.path.splitext(f)
path_in_docs = "docs" in f
path_in_website = "website" in f
if (
ext in DIFF_IN_DOCUMENTATION_EXT and (path_in_docs or path_in_website)
) or "docker/docs" in f:
return True
return False
def can_skip_builds_and_use_version_from_master(self):
# TODO: See a broken loop
if "force tests" in self.labels:
return False
if self.changed_files is None or not self.changed_files:
return False
for f in self.changed_files:
# TODO: this logic is broken, should be fixed before using
if (
not f.startswith("tests/queries")
or not f.startswith("tests/integration")
or not f.startswith("tests/performance")
):
return False
return True
def can_skip_integration_tests(self):
# TODO: See a broken loop
if "force tests" in self.labels:
return False
if self.changed_files is None or not self.changed_files:
return False
for f in self.changed_files:
# TODO: this logic is broken, should be fixed before using
if not f.startswith("tests/queries") or not f.startswith(
"tests/performance"
):
return False
return True
def can_skip_functional_tests(self):
# TODO: See a broken loop
if "force tests" in self.labels:
return False
if self.changed_files is None or not self.changed_files:
return False
for f in self.changed_files:
# TODO: this logic is broken, should be fixed before using
if not f.startswith("tests/integration") or not f.startswith(
"tests/performance"
):
return False
return True
class FakePRInfo:
def __init__(self):
self.number = 11111
self.sha = "xxxxxxxxxxxxxxxxxx"
|
the-stack_0_23711 | from typing import List
from overrides import overrides
from allennlp.common.util import JsonDict, sanitize
from allennlp.data import Instance
from allennlp.predictors.predictor import Predictor
@Predictor.register('SeqClassificationPredictor')
class SeqClassificationPredictor(Predictor):
"""
Predictor for the abstruct model
"""
def predict_json(self, json_dict: JsonDict) -> JsonDict:
self._dataset_reader.predict = True
pred_labels = []
sentences = json_dict['sentences']
for sentences_loop, _, _, _ in self._dataset_reader.enforce_max_sent_per_example(sentences):
instance = self._dataset_reader.text_to_instance(sentences=sentences_loop)
output = self._model.forward_on_instance(instance)
idx = output['action_probs'].argmax(axis=1).tolist()
labels = [self._model.vocab.get_token_from_index(i, namespace='labels') for i in idx]
pred_labels.extend(labels)
try:
assert len(pred_labels) == len(sentences)
json_dict['pred_labels_truncated'] = False
# If a sequence is too long and gets truncated then some of sentences dont
# get labels which is super weird I couldn't find where this happened.
# Seemed to be a small number of cases.
except AssertionError:
#print('Added other because of truncation: {:}'.format(json_dict['paper_id']))
assert len(pred_labels) < len(sentences)
dif_len = len(sentences)-len(pred_labels)
pred_labels.extend(['other_label']*dif_len)
json_dict['pred_labels_truncated'] = True
json_dict['pred_labels'] = pred_labels
return json_dict
|
the-stack_0_23714 | # def warn(*args, **kwargs):
# pass
# import warnings
# warnings.warn = warn # to ignore all warnings.
import sys
import pickle
import numpy as np
import pandas as pd
from matplotlib import pyplot as plt
import utils
import loadData
import fairRecourse
from scatter import *
from sklearn.svm import SVC
from sklearn.model_selection import GridSearchCV
from sklearn.metrics import accuracy_score
from sklearn.tree import DecisionTreeClassifier
from sklearn.neural_network import MLPClassifier
from sklearn.ensemble import RandomForestClassifier
from sklearn.linear_model import LogisticRegression
from _third_party.svm_recourse import RecourseSVM
from debug import ipsh
from random import seed
RANDOM_SEED = 54321
seed(RANDOM_SEED) # set the random seed so that the random permutations can be reproduced again
np.random.seed(RANDOM_SEED)
# TODO: change to be like _data_main below, and make python module
# this answer https://stackoverflow.com/a/50474562 and others
try:
import treeUtils
except:
print('[ENV WARNING] treeUtils not available')
SIMPLIFY_TREES = False
def trainFairClassifier(model_class, fair_kernel_type):
if model_class != 'iw_fair_svm':
if 'svm' in model_class:
if fair_kernel_type == 'linear':
param_grid = [{'C': np.logspace(0, 2, 3), 'kernel': ['linear']}]
elif fair_kernel_type == 'poly':
param_grid = [{'C': np.logspace(0, 2, 3), 'kernel': ['poly'], 'degree':[2, 3, 5]}]
elif fair_kernel_type == 'rbf':
param_grid = [{'C': np.logspace(0, 2, 3), 'gamma': np.logspace(-3,0,4), 'kernel': ['rbf']}]
elif fair_kernel_type == 'all':
param_grid = [
{'C': np.logspace(0, 2, 3), 'kernel': ['linear']},
{'C': np.logspace(0, 2, 3), 'kernel': ['poly'], 'degree':[2, 3, 5]},
{'C': np.logspace(0, 2, 3), 'gamma': np.logspace(-3,0,4), 'kernel': ['rbf']},
]
else:
raise Exception(f'unrecognized fair_kernel_type: {fair_kernel_type}')
return GridSearchCV(estimator=SVC(probability=True), param_grid=param_grid, n_jobs=-1)
elif 'lr' in model_class:
return LogisticRegression()
elif 'mlp' in model_class:
return MLPClassifier(hidden_layer_sizes = (10, 10))
else:
raise Exception(f'unrecognized model_class: {model_class}')
else:
# Note: regularisation strength C is referred to as 'ups' in RecourseSVM and is fixed to 10 by default;
# (this correspondes to the Greek nu in the paper, see the primal form on p.3 of https://arxiv.org/pdf/1909.03166.pdf )
lams = [0.2, 0.5, 1, 2, 10, 50, 100]
if fair_kernel_type == 'linear':
param_grid = [{'lam': lams, 'kernel_fn': ['linear']}]
elif fair_kernel_type == 'poly':
param_grid = [{'lam': lams, 'kernel_fn': ['poly'], 'degree':[2, 3, 5]}]
elif fair_kernel_type == 'rbf':
param_grid = [{'lam': lams, 'kernel_fn': ['rbf'], 'gamma': np.logspace(-3,0,4)}]
elif fair_kernel_type == 'all':
param_grid = [
{'lam': lams, 'kernel_fn': ['linear']},
{'lam': lams, 'kernel_fn': ['poly'], 'degree':[2, 3, 5]},
{'lam': lams, 'kernel_fn': ['rbf'], 'gamma': np.logspace(-3,0,4)},
]
else:
raise Exception(f'unrecognized fair_kernel_type: {fair_kernel_type}')
return GridSearchCV(estimator=RecourseSVM(), param_grid=param_grid, n_jobs=-1)
@utils.Memoize
def loadModelForDataset(model_class, dataset_class, scm_class = None, num_train_samples = 1e5, fair_nodes = None, fair_kernel_type = None, experiment_folder_name = None):
log_file = sys.stdout if experiment_folder_name == None else open(f'{experiment_folder_name}/log_training.txt','w')
if not (model_class in {'lr', 'mlp', 'tree', 'forest'}) and not (model_class in fairRecourse.FAIR_MODELS):
raise Exception(f'{model_class} not supported.')
if not (dataset_class in {'synthetic', 'mortgage', 'twomoon', 'german', 'credit', 'compass', 'adult', 'test'}):
raise Exception(f'{dataset_class} not supported.')
if dataset_class == 'adult':
dataset_obj = loadData.loadDataset(dataset_class, return_one_hot = False, load_from_cache = False, index_offset = 1)
else:
dataset_obj = loadData.loadDataset(dataset_class, return_one_hot = True, load_from_cache = False, meta_param = scm_class)
if model_class not in fairRecourse.FAIR_MODELS:
X_train, X_test, y_train, y_test = dataset_obj.getTrainTestSplit()
y_all = pd.concat([y_train, y_test], axis = 0)
assert sum(y_all) / len(y_all) == 0.5, 'Expected class balance should be 50/50%.'
else:
if dataset_class == 'adult':
X_train, X_test, y_train, y_test = dataset_obj.getTrainTestSplit(with_meta = False, balanced = False)
X_train = pd.concat([X_train], axis = 1)[fair_nodes]
X_test = pd.concat([X_test], axis = 1)[fair_nodes]
else:
X_train, X_test, U_train, U_test, y_train, y_test = dataset_obj.getTrainTestSplit(with_meta = True, balanced = False)
X_train = pd.concat([X_train, U_train], axis = 1)[fair_nodes]
X_test = pd.concat([X_test, U_test], axis = 1)[fair_nodes]
if model_class == 'tree':
model_pretrain = DecisionTreeClassifier()
elif model_class == 'forest':
model_pretrain = RandomForestClassifier()
elif model_class == 'lr':
# IMPORTANT: The default solver changed from ‘liblinear’ to ‘lbfgs’ in 0.22;
# therefore, results may differ slightly from paper.
model_pretrain = LogisticRegression() # default penalty='l2', i.e., ridge
elif model_class == 'mlp':
model_pretrain = MLPClassifier(hidden_layer_sizes = (10, 10))
else:
model_pretrain = trainFairClassifier(model_class, fair_kernel_type)
X_train = np.array(X_train)
X_test = np.array(X_test)
y_train = np.array(y_train)
y_test = np.array(y_test)
X_train = X_train[:num_train_samples]
y_train = y_train[:num_train_samples]
training_setup_string = f'[INFO] Training `{model_class}` on {X_train.shape[0]:,} samples ' + \
f'(%{100 * X_train.shape[0] / (X_train.shape[0] + X_test.shape[0]):.2f}' + \
f'of {X_train.shape[0] + X_test.shape[0]:,} samples)...'
print(training_setup_string, file=log_file)
print(training_setup_string)
model_trained = model_pretrain.fit(X_train, y_train)
train_accuracy_string = f'\t[INFO] Training accuracy: %{accuracy_score(y_train, model_trained.predict(X_train)) * 100:.2f}.'
test_accuracy_string = f'\t[INFO] Testing accuracy: %{accuracy_score(y_test, model_trained.predict(X_test)) * 100:.2f}.'
print(train_accuracy_string, file=log_file)
print(test_accuracy_string, file=log_file)
print(train_accuracy_string)
print(test_accuracy_string)
if hasattr(model_trained, 'best_estimator_'):
hyperparams_string = f'\t[INFO] Hyper-parameters of best classifier selected by CV:\n\t{model_trained.best_estimator_}'
print(hyperparams_string, file=log_file)
print(hyperparams_string)
# shouldn't deal with bad model; arbitrarily select offset to be 70% accuracy
tmp = accuracy_score(y_train, model_trained.predict(X_train))
# TODO (fair): added try except loop for use of nonlinear classifiers in fairness experiments
try:
assert tmp > 0.70, f'Model accuracy only {tmp}'
except:
print('[INFO] logistic regression accuracy may be low (<70%)')
pass
classifier_obj = model_trained
visualizeDatasetAndFixedModel(dataset_obj, classifier_obj, experiment_folder_name)
feature_names = dataset_obj.getInputAttributeNames('kurz') # easier to read (nothing to do with one-hot vs non-hit!)
if model_class == 'tree':
if SIMPLIFY_TREES:
print('[INFO] Simplifying decision tree...', end = '', file=log_file)
model_trained.tree_ = treeUtils.simplifyDecisionTree(model_trained, False)
print('\tdone.', file=log_file)
# treeUtils.saveTreeVisualization(model_trained, model_class, '', X_test, feature_names, experiment_folder_name)
elif model_class == 'forest':
for tree_idx in range(len(model_trained.estimators_)):
if SIMPLIFY_TREES:
print(f'[INFO] Simplifying decision tree (#{tree_idx + 1}/{len(model_trained.estimators_)})...', end = '', file=log_file)
model_trained.estimators_[tree_idx].tree_ = treeUtils.simplifyDecisionTree(model_trained.estimators_[tree_idx], False)
print('\tdone.', file=log_file)
# treeUtils.saveTreeVisualization(model_trained.estimators_[tree_idx], model_class, f'tree{tree_idx}', X_test, feature_names, experiment_folder_name)
if experiment_folder_name:
pickle.dump(model_trained, open(f'{experiment_folder_name}/_model_trained', 'wb'))
return model_trained
|
the-stack_0_23716 |
from support.logging import log
import edp_modules.edp_db as edp_db
import edp_modules.edp_system as edp_system
import edp_modules.edp_router as edp_router
def run_tests():
edp_db.tests()
edp_system.tests()
edp_router.tests()
def compute_route(system_names_list, start_name, end_name):
system_list = _create_system_list(system_names_list, start_name, end_name)
return edp_router.create_route(system_list, start_name, end_name)
def _complete_system_names_list(systems_list, start, end):
if start is not None and start not in systems_list:
systems_list.append(start)
if end is not None and end not in systems_list:
systems_list.append(end)
def _warn_for_unresolved_names(system_list, system_names_list):
resolved_names = []
for s in system_list:
resolved_names.append(s.name)
for n in system_names_list:
if n not in resolved_names:
log('Warning: Unable to resolve system {0}. Skipping it.'.format(n))
def _create_system_list(system_names_list, start_system_name, end_system_name):
cache = edp_db.load_data()
system_list = []
_complete_system_names_list(system_names_list, start_system_name, end_system_name)
for system_descriptor in cache:
if system_descriptor['name'].upper() in system_names_list:
system = edp_system.EdpSystem(system_descriptor)
system_list.append(system)
if len(system_list) == len(system_names_list):
break
_warn_for_unresolved_names(system_list, system_names_list)
return system_list
|
the-stack_0_23718 | #!/usr/bin/env python3
#
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Big number routines.
This file is copied from python-alphaconlib.
"""
import struct
# generic big endian MPI format
def bn_bytes(v, have_ext=False):
ext = 0
if have_ext:
ext = 1
return ((v.bit_length()+7)//8) + ext
def bn2bin(v):
s = bytearray()
i = bn_bytes(v)
while i > 0:
s.append((v >> ((i-1) * 8)) & 0xff)
i -= 1
return s
def bn2mpi(v):
have_ext = False
if v.bit_length() > 0:
have_ext = (v.bit_length() & 0x07) == 0
neg = False
if v < 0:
neg = True
v = -v
s = struct.pack(b">I", bn_bytes(v, have_ext))
ext = bytearray()
if have_ext:
ext.append(0)
v_bin = bn2bin(v)
if neg:
if have_ext:
ext[0] |= 0x80
else:
v_bin[0] |= 0x80
return s + ext + v_bin
# alphacon-specific little endian format, with implicit size
def mpi2vch(s):
r = s[4:] # strip size
r = r[::-1] # reverse string, converting BE->LE
return r
def bn2vch(v):
return bytes(mpi2vch(bn2mpi(v)))
|
the-stack_0_23719 | import random
import torch
import pandas as pd
import numpy as np
from model import get_model
from intent_initializer import read_all_intents, read_all_responses
from preprocessing import stem, tokenize, bag_of_words
PATH = './config/'
BOT_NAME = 'Bavardez'
def load_bot():
model_details = torch.load(PATH+'model_details.pt')
model = get_model(model_details['input_size'], model_details['hidden_size'], model_details['output_size'])
model.load_state_dict(model_details['model_state'])
model.eval()
tags = model_details['tags']
all_words = model_details['all_words']
return model, tags, all_words
def main():
model, tags, all_words = load_bot()
df_responses = read_all_responses()
activation = torch.nn.Softmax(1)
print("Let's chat! (BOF Version) Type \"quit\" to exit.")
while True:
sentence = input("You:\t")
if sentence == "quit":
break
sentence = tokenize(sentence)
bof = bag_of_words(sentence, all_words)
bof = np.expand_dims(bof, axis=0)
bof = torch.from_numpy(bof)
output = model(bof)
probs = activation(output).flatten()
predicted_label = torch.argmax(probs)
tag = tags[predicted_label.item()]
if probs[predicted_label]>0.5:
if tag in list(df_responses.keys()):
answer = random.choice(df_responses[tag])
else:
answer = "Sorry there's an error in OUR SYSTEM! Please re-phrase"
else:
answer = "I do not understand you."
print(BOT_NAME+":\t"+answer)
print("Thankyou for using "+BOT_NAME)
if __name__ == '__main__':
main() |
the-stack_0_23720 | """Collections of linestrings and related utilities
"""
import sys
if sys.version_info[0] < 3:
range = xrange
from ctypes import c_void_p, cast
from private_tools.Shapely.shapely.geos import lgeos
from private_tools.Shapely.shapely.geometry.base import BaseMultipartGeometry, geos_geom_from_py
from private_tools.Shapely.shapely.geometry import linestring
from private_tools.Shapely.shapely.geometry.proxy import CachingGeometryProxy
__all__ = ['MultiLineString', 'asMultiLineString']
class MultiLineString(BaseMultipartGeometry):
"""
A collection of one or more line strings
A MultiLineString has non-zero length and zero area.
Attributes
----------
geoms : sequence
A sequence of LineStrings
"""
def __init__(self, lines=None):
"""
Parameters
----------
lines : sequence
A sequence of line-like coordinate sequences or objects that
provide the numpy array interface, including instances of
LineString.
Example
-------
Construct a collection containing one line string.
>>> lines = MultiLineString( [[[0.0, 0.0], [1.0, 2.0]]] )
"""
super(MultiLineString, self).__init__()
if not lines:
# allow creation of empty multilinestrings, to support unpickling
pass
else:
self._geom, self._ndim = geos_multilinestring_from_py(lines)
def shape_factory(self, *args):
return linestring.LineString(*args)
@property
def __geo_interface__(self):
return {
'type': 'MultiLineString',
'coordinates': tuple(tuple(c for c in g.coords) for g in self.geoms)
}
def svg(self, scale_factor=1., stroke_color=None):
"""Returns a group of SVG polyline elements for the LineString geometry.
Parameters
==========
scale_factor : float
Multiplication factor for the SVG stroke-width. Default is 1.
stroke_color : str, optional
Hex string for stroke color. Default is to use "#66cc99" if
geometry is valid, and "#ff3333" if invalid.
"""
if self.is_empty:
return '<g />'
if stroke_color is None:
stroke_color = "#66cc99" if self.is_valid else "#ff3333"
return '<g>' + \
''.join(p.svg(scale_factor, stroke_color) for p in self) + \
'</g>'
class MultiLineStringAdapter(CachingGeometryProxy, MultiLineString):
context = None
_other_owned = False
def __init__(self, context):
self.context = context
self.factory = geos_multilinestring_from_py
@property
def _ndim(self):
try:
# From array protocol
array = self.context[0].__array_interface__
n = array['shape'][1]
assert n == 2 or n == 3
return n
except AttributeError:
# Fall back on list
return len(self.context[0][0])
def asMultiLineString(context):
"""Adapts a sequence of objects to the MultiLineString interface"""
return MultiLineStringAdapter(context)
def geos_multilinestring_from_py(ob):
# ob must be either a MultiLineString, a sequence, or
# array of sequences or arrays
if isinstance(ob, MultiLineString):
return geos_geom_from_py(ob)
obs = getattr(ob, 'geoms', ob)
L = len(obs)
assert L >= 1
exemplar = obs[0]
try:
N = len(exemplar[0])
except TypeError:
N = exemplar._ndim
if N not in (2, 3):
raise ValueError("Invalid coordinate dimensionality")
# Array of pointers to point geometries
subs = (c_void_p * L)()
# add to coordinate sequence
for l in range(L):
geom, ndims = linestring.geos_linestring_from_py(obs[l])
subs[l] = cast(geom, c_void_p)
return (lgeos.GEOSGeom_createCollection(5, subs, L), N)
# Test runner
def _test():
import doctest
doctest.testmod()
if __name__ == "__main__":
_test()
|
the-stack_0_23724 | import numpy as np
import pandas as pd
from sklearn.preprocessing import MaxAbsScaler
import matplotlib.pylab as plt
import pickle
import os
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
from tensorflow.keras.regularizers import l2, l1
from tensorflow.keras import layers
from tensorflow.keras import layers, Model
from tensorflow.keras.regularizers import l2
from tensorflow.keras.optimizers import Adam
# Helper Functions
def dump(obj, filename):
with open(filename+".pickle", 'wb') as config_dictionary_file:
pickle.dump(obj, config_dictionary_file)
def get(filename):
a = open(filename + ".pickle", "rb")
print("Unpickling...")
return pickle.load(a)
def mse(true, predicted):
"""
A function to compute the total mean square error
"""
error = 0
for i in range(len(predicted)):
error += pow(true[i] - predicted[i], 2)
return error/len(predicted)
def rmse(true, predicted):
"""
A function to compute the total mean square error
"""
error = 0
for i in range(len(predicted)):
error += pow(true[i] - predicted[i], 2)
return np.sqrt(error/len(predicted))
def parseData(data):
subtables = ["election", "info", "questionsRaw", "weights", "questionsInfo"]
columns = [['voterID', 'electionID', 'sourceTYPE', 'source','recTIME','recTYPE','questTYPE', 'soc_completion','N_answers','quest_completion'],
['districtID', 'language', 'birthYEAR','age','gender','zip','education','interest','position','pref_party'],
['language', 'birthYEAR', 'gender', 'zip', 'education', 'interest', 'position','pref_party'], [range(74,126)]]
df = dict()
print("init length", data.shape)
dataNaN = data.replace(-9, np.nan)
noQuestionNanIndex = dataNaN.iloc[:,20:73].dropna().index #.iloc[;,20:73]
print("no NaN length", len(noQuestionNanIndex))
df["election"] = data.iloc[noQuestionNanIndex][columns[0]]
df["info"] = data.iloc[noQuestionNanIndex][columns[1]].replace([-9, -1, -1977, -990], np.nan)
df["questionsRaw"] = data.iloc[noQuestionNanIndex,20:73]
df["weights"] = data.iloc[noQuestionNanIndex,74:127]
df["questionsInfo"] = pd.DataFrame()
for sub in subtables:
print("size", sub, df[sub].shape)
df["questions"] = MaxAbsScaler().fit_transform(pd.concat([df["questionsRaw"], df["info"]], axis=1)) #MaxAbsScaler().fit_transform(
df["questions"] = pd.DataFrame(df["questions"], columns=np.concatenate((df["questionsRaw"].columns,df["info"].columns)))
return df
def plot_metric(h, metric):
h = h.history
train_metrics = h[metric]
val_metrics = h['val_'+metric]
epochs = range(1, len(train_metrics) + 1)
plt.plot(epochs, train_metrics)
plt.plot(epochs, val_metrics)
plt.title('Training and validation '+ metric)
plt.xlabel("Epochs")
plt.ylabel(metric)
plt.legend(["train_"+metric, 'val_'+metric])
plt.show()
class EmbeddingLayer:
def __init__(self, n_items, n_factors, name="", reg=l2(1e-5)):
self.n_items = n_items
self.n_factors = n_factors
self.name = name
self.reg = reg
def __call__(self, x):
x = layers.Embedding(self.n_items, self.n_factors, name=self.name,
embeddings_regularizer=self.reg)(x)
x = layers.Reshape((self.n_factors,))(x)
return x
def MF(df, n_factors, epoch, reg):
n_users, n_items = df.shape
train = makeForKeras(df)
user = layers.Input(shape=(1,))
u = EmbeddingLayer(n_users, n_factors, name='U', reg=l2(1e-4))(user)
ub = EmbeddingLayer(n_users, 1, name='U-bias', reg=l1(0))(user)
items = layers.Input(shape=(1,))
m = EmbeddingLayer(n_items, n_factors, name="V", reg=l2(1e-4))(items)
mb = EmbeddingLayer(n_items, 1, name="V-bias", reg=l1(reg))(items)
x = layers.Dot(axes=1, activity_regularizer=None)([u, m])
x = layers.Add()([x, ub, mb])
model = Model(inputs=[user, items], outputs=x)
opt = Adam(0.075, beta_1=0.9, beta_2=0.999)
model.compile(optimizer=opt, loss='mean_squared_error', metrics=['mae'])
bs = int(len(train)/6)
history = model.fit([train["index"].values, train.variable.values], train.value.values,batch_size=bs, epochs=epoch, verbose=0)
U, V = model.get_layer(name='U').get_weights()[0], model.get_layer(name='V').get_weights()[0]
Ubias = model.get_layer(name='U-bias').get_weights()[0]
Vbias = model.get_layer(name='V-bias').get_weights()[0]
return U, V, Ubias, Vbias
|
the-stack_0_23726 | """The `vitamins.comms` package contains classes for convenient communications between
Python processes via websockets. This is useful for sending commands to your bot
interactively, or for having your bot send data out to another Python program.
Data is sent in the form of `Message` objects, each of which has three members:
* `id` is an integer that is unique to that message.
* `tag` is a string.
* `data` is any pickleable Python object.
The `id` is assigned for you when the message is sent. The `tag` and `data` are
specified by you when you send a message. I use the `tag` as a convenient identifier for
the type of message, e.g. command, status, query, etc. Finally, the `data` can be any
pickleable Python object. This could be a number, a string, `None` (in case the `tag` is
meaningful all by itself, a dictionary, or even a class instance (assuming the program
on the other end of the connection has the same class definition available).
"""
from collections import deque, namedtuple
from typing import Any, Optional
import io
import pickle
import socket
from time import monotonic as now
HOST: str = "localhost"
PORT: int = 16849
BUFFSIZE: int = 4096
DEFAULT_RATE: int = 5
NOCLIENT_SPAM_INTERVAL: int = 10
Message = namedtuple("Message", "id, tag, data")
# id is int, tag is str, data is anything.
class SocketQueue:
"""This is the base class for Host and Client, which should be used for
communication over a socket. This class is not intended to be instantiated directly.
"""
first_index: int
def __init__(
self,
host: str = HOST,
port_offset: int = 0,
buffsize: int = BUFFSIZE,
rate: int = DEFAULT_RATE,
):
self.host = host
self.port = PORT + port_offset
self.buffsize = buffsize
self.inq, self.outq = deque(), deque()
self.connected = False
self.endpoint = None
self.socket = None
self.msg_index = self.first_index # host uses evens, client uses odds
self.poll_interval = 1 / rate
self.next_poll = now()
self.next_send = now()
self.last_spam = -1e9
def _recv(self) -> None:
"""Read any available data from the socket, decode it, and put it into the
incoming queue.
"""
if not self.connected or now() < self.next_poll:
return
self.next_poll += self.poll_interval
data = []
while True:
try:
data.append(self.endpoint.recv(BUFFSIZE))
except BlockingIOError:
break
if data:
stream = io.BytesIO(b"".join(data))
while True:
try:
info = pickle.load(stream)
msg = Message(*info)
self.inq.append(msg)
except EOFError:
break
def _send(self) -> None:
"""Encode any data in the outgoing queue and send it to the socket."""
if not self.connected or now() < self.next_send:
return
self.next_send += self.poll_interval
buff = []
while self.outq:
msg_id, tag, data = self.outq.popleft()
buff.append(pickle.dumps((msg_id, tag, data)))
if buff:
stream = b"".join(buff)
self.endpoint.sendall(stream)
def put(self, tag: str, data: Any = None) -> int:
"""Put (tag, data) into the queue. Return the index.
"""
msg = (self.msg_index, tag, data)
self.msg_index += 2
self.outq.append(msg)
self._send()
return msg[0]
def get(self) -> Optional[Message]:
"""Return the next incoming message, or None.
"""
self._recv()
if not self.inq:
return None
return self.inq.popleft()
def close(self) -> None:
if self.socket is not None:
self.socket.close()
class Host(SocketQueue):
first_index: int = 0
def connect(self) -> bool:
"""Try to connect to a client. Return True if a connection was successfully
made (or already existed), False if not. A return value of False is expected,
e.g., when there is no client trying to connect.
"""
if self.socket is None:
self.socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.socket.bind((self.host, self.port))
self.socket.setblocking(False)
self.socket.listen(1)
if self.endpoint is None:
if self.socket is not None:
try:
self.endpoint, _ = self.socket.accept()
self.connected = True
return True
except (BlockingIOError, OSError):
pass
self.socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.socket.bind((self.host, self.port))
self.socket.setblocking(False)
self.socket.listen(1)
return self.connected
class Client(SocketQueue):
first_index: int = 1
def connect(self, timeout: float = 5) -> bool:
"""Try to connect to a host. Return True if a connection was successfully
made (or already existed), False if not.
"""
if not self.connected:
self.socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.socket.settimeout(timeout)
try:
self.socket.connect((self.host, self.port))
self.connected = True
self.socket.setblocking(False)
self.endpoint = self.socket
except ConnectionRefusedError:
pass
return self.connected
|
the-stack_0_23727 | # Copyright (c) 2017-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the LICENSE file in
# the root directory of this source tree. An additional grant of patent rights
# can be found in the PATENTS file in the same directory.
import argparse
import torch
from fairseq.criterions import CRITERION_REGISTRY
from fairseq.models import ARCH_MODEL_REGISTRY, ARCH_CONFIG_REGISTRY
from fairseq.optim import OPTIMIZER_REGISTRY
from fairseq.optim.lr_scheduler import LR_SCHEDULER_REGISTRY
from fairseq.tasks import TASK_REGISTRY
def get_training_parser(default_task='translation'):
parser = get_parser('Trainer', default_task)
add_dataset_args(parser, train=True)
add_distributed_training_args(parser)
add_model_args(parser)
add_optimization_args(parser)
add_checkpoint_args(parser)
return parser
def get_generation_parser(interactive=False, default_task='translation'):
parser = get_parser('Generation', default_task)
add_dataset_args(parser, gen=True)
add_generation_args(parser)
if interactive:
add_interactive_args(parser)
return parser
def get_interactive_generation_parser(default_task='translation'):
return get_generation_parser(interactive=True, default_task=default_task)
def get_eval_lm_parser(default_task='language_modeling'):
parser = get_parser('Evaluate Language Model', default_task)
add_dataset_args(parser, gen=True)
add_eval_lm_args(parser)
return parser
def get_inference_parser(default_task='glue'):
parser = get_parser('Inference', default_task)
add_dataset_args(parser, gen=True)
add_inference_args(parser)
return parser
def eval_str_list(x, type=float):
if x is None:
return None
if isinstance(x, str):
x = eval(x)
try:
return list(map(type, x))
except TypeError:
return [type(x)]
def eval_bool(x, default=False):
if x is None:
return default
try:
return bool(eval(x))
except TypeError:
return default
def parse_args_and_arch(parser, input_args=None, parse_known=False):
# The parser doesn't know about model/criterion/optimizer-specific args, so
# we parse twice. First we parse the model/criterion/optimizer, then we
# parse a second time after adding the *-specific arguments.
# If input_args is given, we will parse those args instead of sys.argv.
args, _ = parser.parse_known_args(input_args)
# Add model-specific args to parser.
if hasattr(args, 'arch'):
model_specific_group = parser.add_argument_group(
'Model-specific configuration',
# Only include attributes which are explicitly given as command-line
# arguments or which have default values.
argument_default=argparse.SUPPRESS,
)
ARCH_MODEL_REGISTRY[args.arch].add_args(model_specific_group)
# Add *-specific args to parser.
if hasattr(args, 'criterion'):
CRITERION_REGISTRY[args.criterion].add_args(parser)
if hasattr(args, 'optimizer'):
OPTIMIZER_REGISTRY[args.optimizer].add_args(parser)
if hasattr(args, 'lr_scheduler'):
LR_SCHEDULER_REGISTRY[args.lr_scheduler].add_args(parser)
if hasattr(args, 'task'):
TASK_REGISTRY[args.task].add_args(parser)
# Parse a second time.
if parse_known:
args, extra = parser.parse_known_args(input_args)
else:
args = parser.parse_args(input_args)
extra = None
# Post-process args.
if hasattr(args, 'lr'):
args.lr = eval_str_list(args.lr, type=float)
if hasattr(args, 'update_freq'):
args.update_freq = eval_str_list(args.update_freq, type=int)
if hasattr(args, 'max_sentences_valid') and args.max_sentences_valid is None:
args.max_sentences_valid = args.max_sentences
# Apply architecture configuration.
if hasattr(args, 'arch'):
ARCH_CONFIG_REGISTRY[args.arch](args)
if parse_known:
return args, extra
else:
return args
def get_parser(desc, default_task='translation'):
parser = argparse.ArgumentParser()
parser.add_argument('--no-progress-bar', action='store_true', help='disable progress bar')
parser.add_argument('--log-interval', type=int, default=1000, metavar='N',
help='log progress every N batches (when progress bar is disabled)')
parser.add_argument('--log-format', default=None, help='log format to use',
choices=['json', 'none', 'simple', 'tqdm'])
parser.add_argument('--seed', default=1, type=int, metavar='N',
help='pseudo random number generator seed')
parser.add_argument('--fp16', action='store_true', help='use FP16')
parser.add_argument('--fp16-init-scale', default=2**7, type=int,
help='default FP16 loss scale')
# Task definitions can be found under fairseq/tasks/
parser.add_argument(
'--task', metavar='TASK', default=default_task,
choices=TASK_REGISTRY.keys(),
help='task',
)
return parser
def add_dataset_args(parser, train=False, gen=False):
group = parser.add_argument_group('Dataset and data loading')
group.add_argument('--skip-invalid-size-inputs-valid-test', action='store_true',
help='ignore too long or too short lines in valid and test set')
group.add_argument('--max-tokens', type=int, metavar='N',
help='maximum number of tokens in a batch')
group.add_argument('--max-sentences', '--batch-size', type=int, metavar='N',
help='maximum number of sentences in a batch')
if train:
group.add_argument('--train-subset', default='train', metavar='SPLIT',
choices=['train', 'valid', 'test'],
help='data subset to use for training (train, valid, test)')
group.add_argument('--valid-subset', default='valid', metavar='SPLIT',
help='comma separated list of data subsets to use for validation'
' (train, valid, valid1, test, test1)')
group.add_argument('--max-sentences-valid', type=int, metavar='N',
help='maximum number of sentences in a validation batch'
' (defaults to --max-sentences)')
if gen:
group.add_argument('--gen-subset', default='test', metavar='SPLIT',
help='data subset to generate (train, valid, test)')
group.add_argument('--num-shards', default=1, type=int, metavar='N',
help='shard generation over N shards')
group.add_argument('--shard-id', default=0, type=int, metavar='ID',
help='id of the shard to generate (id < num_shards)')
return group
def add_distributed_training_args(parser):
group = parser.add_argument_group('Distributed training')
group.add_argument('--distributed-world-size', type=int, metavar='N',
default=torch.cuda.device_count(),
help='total number of GPUs across all nodes (default: all visible GPUs)')
group.add_argument('--distributed-rank', default=0, type=int,
help='rank of the current worker')
group.add_argument('--distributed-backend', default='nccl', type=str,
help='distributed backend')
group.add_argument('--distributed-init-method', default=None, type=str,
help='typically tcp://hostname:port that will be used to '
'establish initial connetion')
group.add_argument('--distributed-port', default=-1, type=int,
help='port number (not required if using --distributed-init-method)')
group.add_argument('--device-id', default=0, type=int,
help='which GPU to use (usually configured automatically)')
group.add_argument('--ddp-backend', default='c10d', type=str,
choices=['c10d', 'no_c10d'],
help='DistributedDataParallel backend')
group.add_argument('--bucket-cap-mb', default=150, type=int, metavar='MB',
help='bucket size for reduction')
return group
def add_optimization_args(parser):
group = parser.add_argument_group('Optimization')
group.add_argument('--max-epoch', '--me', default=0, type=int, metavar='N',
help='force stop training at specified epoch')
group.add_argument('--max-update', '--mu', default=0, type=int, metavar='N',
help='force stop training at specified update')
group.add_argument('--clip-norm', default=25, type=float, metavar='NORM',
help='clip threshold of gradients')
group.add_argument('--sentence-avg', action='store_true',
help='normalize gradients by the number of sentences in a batch'
' (default is to normalize by number of tokens)')
group.add_argument('--update-freq', default='1', metavar='N',
help='update parameters every N_i batches, when in epoch i')
# Optimizer definitions can be found under fairseq/optim/
group.add_argument('--optimizer', default='nag', metavar='OPT',
choices=OPTIMIZER_REGISTRY.keys(),
help='Optimizer')
group.add_argument('--lr', '--learning-rate', default='0.25', metavar='LR_1,LR_2,...,LR_N',
help='learning rate for the first N epochs; all epochs >N using LR_N'
' (note: this may be interpreted differently depending on --lr-scheduler)')
group.add_argument('--momentum', default=0.99, type=float, metavar='M',
help='momentum factor')
group.add_argument('--weight-decay', '--wd', default=0.0, type=float, metavar='WD',
help='weight decay')
# Learning rate schedulers can be found under fairseq/optim/lr_scheduler/
group.add_argument('--lr-scheduler', default='reduce_lr_on_plateau',
choices=LR_SCHEDULER_REGISTRY.keys(),
help='Learning Rate Scheduler')
group.add_argument('--lr-shrink', default=0.1, type=float, metavar='LS',
help='learning rate shrink factor for annealing, lr_new = (lr * lr_shrink)')
group.add_argument('--min-lr', default=1e-5, type=float, metavar='LR',
help='minimum learning rate')
group.add_argument('--min-loss-scale', default=1e-4, type=float, metavar='D',
help='minimum loss scale (for FP16 training)')
return group
def add_checkpoint_args(parser):
group = parser.add_argument_group('Checkpointing')
group.add_argument('--save-dir', metavar='DIR', default='checkpoints',
help='path to save checkpoints')
group.add_argument('--restore-file', default='checkpoint_last.pt',
help='filename in save-dir from which to load checkpoint')
group.add_argument('--reset-optimizer', action='store_true',
help='if set, does not load optimizer state from the checkpoint')
group.add_argument('--reset-lr-scheduler', action='store_true',
help='if set, does not load lr scheduler state from the checkpoint')
group.add_argument('--optimizer-overrides', default="{}", type=str, metavar='DICT',
help='a dictionary used to override optimizer args when loading a checkpoint')
group.add_argument('--save-interval', type=int, default=1, metavar='N',
help='save a checkpoint every N epochs')
group.add_argument('--save-interval-updates', type=int, default=0, metavar='N',
help='save a checkpoint (and validate) every N updates')
group.add_argument('--keep-interval-updates', type=int, default=-1, metavar='N',
help='keep last N checkpoints saved with --save-interval-updates')
group.add_argument('--no-save', action='store_true',
help='don\'t save models or checkpoints')
group.add_argument('--no-epoch-checkpoints', action='store_true',
help='only store last and best checkpoints')
group.add_argument('--validate-interval', type=int, default=1, metavar='N',
help='validate every N epochs')
group.add_argument('--load-bert', metavar='PATH', type=str,
help='pretrained bert encoder to load from file')
group.add_argument('--load-type', metavar='TYPE', default="all", type=str,
help='which part pretrained bert encoder to load ("all", "no_fc", "no_out", "no_out_no_fc")')
return group
def add_common_eval_args(group):
group.add_argument('--path', metavar='FILE',
help='path(s) to model file(s), colon separated')
group.add_argument('--remove-bpe', nargs='?', const='@@ ', default=None,
help='remove BPE tokens before scoring')
group.add_argument('--cpu', action='store_true', help='generate on CPU')
group.add_argument('--quiet', action='store_true',
help='only print final scores')
def add_eval_lm_args(parser):
group = parser.add_argument_group('LM Evaluation')
add_common_eval_args(group)
group.add_argument('--output-word-probs', action='store_true',
help='if set, outputs words and their predicted log probabilities to standard output')
group.add_argument('--output-word-stats', action='store_true',
help='if set, outputs word statistics such as word count, average probability, etc')
def add_generation_args(parser):
group = parser.add_argument_group('Generation')
add_common_eval_args(group)
group.add_argument('--beam', default=5, type=int, metavar='N',
help='beam size')
group.add_argument('--nbest', default=1, type=int, metavar='N',
help='number of hypotheses to output')
group.add_argument('--max-len-a', default=0, type=float, metavar='N',
help=('generate sequences of maximum length ax + b, '
'where x is the source length'))
group.add_argument('--max-len-b', default=200, type=int, metavar='N',
help=('generate sequences of maximum length ax + b, '
'where x is the source length'))
group.add_argument('--min-len', default=1, type=float, metavar='N',
help=('minimum generation length'))
group.add_argument('--no-early-stop', action='store_true',
help=('continue searching even after finalizing k=beam '
'hypotheses; this is more correct, but increases '
'generation time by 50%%'))
group.add_argument('--unnormalized', action='store_true',
help='compare unnormalized hypothesis scores')
group.add_argument('--no-beamable-mm', action='store_true',
help='don\'t use BeamableMM in attention layers')
group.add_argument('--lenpen', default=1, type=float,
help='length penalty: <1.0 favors shorter, >1.0 favors longer sentences')
group.add_argument('--unkpen', default=0, type=float,
help='unknown word penalty: <0 produces more unks, >0 produces fewer')
group.add_argument('--replace-unk', nargs='?', const=True, default=None,
help='perform unknown replacement (optionally with alignment dictionary)')
group.add_argument('--score-reference', action='store_true',
help='just score the reference translation')
group.add_argument('--prefix-size', default=0, type=int, metavar='PS',
help='initialize generation by target prefix of given length')
group.add_argument('--sampling', action='store_true',
help='sample hypotheses instead of using beam search')
group.add_argument('--sampling-topk', default=-1, type=int, metavar='PS',
help='sample from top K likely next words instead of all words')
group.add_argument('--sampling-temperature', default=1, type=float, metavar='N',
help='temperature for random sampling')
group.add_argument('--diverse-beam-groups', default=1, type=int, metavar='N',
help='number of groups for Diverse Beam Search')
group.add_argument('--diverse-beam-strength', default=0.5, type=float, metavar='N',
help='strength of diversity penalty for Diverse Beam Search')
group.add_argument('--print-alignment', action='store_true',
help='if set, uses attention feedback to compute and print alignment to source tokens')
group.add_argument('--model-overrides', default="{}", type=str, metavar='DICT',
help='a dictionary used to override model args at generation that were used during model training')
return group
def add_interactive_args(parser):
group = parser.add_argument_group('Interactive')
group.add_argument('--buffer-size', default=0, type=int, metavar='N',
help='read this many sentences into a buffer before processing them')
def add_inference_args(parser):
group = parser.add_argument_group('Inference')
add_common_eval_args(group)
group.add_argument('--output', type=str, metavar='PATH',
help='path of inference output')
return group
def add_model_args(parser):
group = parser.add_argument_group('Model configuration')
# Model definitions can be found under fairseq/models/
#
# The model architecture can be specified in several ways.
# In increasing order of priority:
# 1) model defaults (lowest priority)
# 2) --arch argument
# 3) --encoder/decoder-* arguments (highest priority)
group.add_argument(
'--arch', '-a', default='fconv', metavar='ARCH', required=True,
choices=ARCH_MODEL_REGISTRY.keys(),
help='Model Architecture',
)
# Criterion definitions can be found under fairseq/criterions/
group.add_argument(
'--criterion', default='cross_entropy', metavar='CRIT',
choices=CRITERION_REGISTRY.keys(),
help='Training Criterion',
)
return group
|
the-stack_0_23728 | # Copyright 2013-2018 ARM Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
Utility functions for working with Android devices through adb.
"""
# pylint: disable=E1103
import glob
import logging
import os
import pexpect
import re
import subprocess
import sys
import tempfile
import time
import uuid
import zipfile
from collections import defaultdict
from io import StringIO
from lxml import etree
try:
from shlex import quote
except ImportError:
from pipes import quote
from devlib.exception import TargetTransientError, TargetStableError, HostError
from devlib.utils.misc import check_output, which, ABI_MAP, redirect_streams, get_subprocess
from devlib.connection import ConnectionBase, AdbBackgroundCommand, PopenBackgroundCommand, PopenTransferManager
logger = logging.getLogger('android')
MAX_ATTEMPTS = 5
AM_START_ERROR = re.compile(r"Error: Activity.*")
AAPT_BADGING_OUTPUT = re.compile(r"no dump ((file)|(apk)) specified", re.IGNORECASE)
# See:
# http://developer.android.com/guide/topics/manifest/uses-sdk-element.html#ApiLevels
ANDROID_VERSION_MAP = {
29: 'Q',
28: 'PIE',
27: 'OREO_MR1',
26: 'OREO',
25: 'NOUGAT_MR1',
24: 'NOUGAT',
23: 'MARSHMALLOW',
22: 'LOLLYPOP_MR1',
21: 'LOLLYPOP',
20: 'KITKAT_WATCH',
19: 'KITKAT',
18: 'JELLY_BEAN_MR2',
17: 'JELLY_BEAN_MR1',
16: 'JELLY_BEAN',
15: 'ICE_CREAM_SANDWICH_MR1',
14: 'ICE_CREAM_SANDWICH',
13: 'HONEYCOMB_MR2',
12: 'HONEYCOMB_MR1',
11: 'HONEYCOMB',
10: 'GINGERBREAD_MR1',
9: 'GINGERBREAD',
8: 'FROYO',
7: 'ECLAIR_MR1',
6: 'ECLAIR_0_1',
5: 'ECLAIR',
4: 'DONUT',
3: 'CUPCAKE',
2: 'BASE_1_1',
1: 'BASE',
}
# See https://developer.android.com/reference/android/content/Intent.html#setFlags(int)
INTENT_FLAGS = {
'ACTIVITY_NEW_TASK' : 0x10000000,
'ACTIVITY_CLEAR_TASK' : 0x00008000
}
# Initialized in functions near the botton of the file
android_home = None
platform_tools = None
adb = None
aapt = None
aapt_version = None
fastboot = None
class AndroidProperties(object):
def __init__(self, text):
self._properties = {}
self.parse(text)
def parse(self, text):
self._properties = dict(re.findall(r'\[(.*?)\]:\s+\[(.*?)\]', text))
def iteritems(self):
return iter(self._properties.items())
def __iter__(self):
return iter(self._properties)
def __getattr__(self, name):
return self._properties.get(name)
__getitem__ = __getattr__
class AdbDevice(object):
def __init__(self, name, status):
self.name = name
self.status = status
# pylint: disable=undefined-variable
def __cmp__(self, other):
if isinstance(other, AdbDevice):
return cmp(self.name, other.name)
else:
return cmp(self.name, other)
def __str__(self):
return 'AdbDevice({}, {})'.format(self.name, self.status)
__repr__ = __str__
class ApkInfo(object):
version_regex = re.compile(r"name='(?P<name>[^']+)' versionCode='(?P<vcode>[^']+)' versionName='(?P<vname>[^']+)'")
name_regex = re.compile(r"name='(?P<name>[^']+)'")
permission_regex = re.compile(r"name='(?P<permission>[^']+)'")
activity_regex = re.compile(r'\s*A:\s*android:name\(0x\d+\)=".(?P<name>\w+)"')
def __init__(self, path=None):
self.path = path
self.package = None
self.activity = None
self.label = None
self.version_name = None
self.version_code = None
self.native_code = None
self.permissions = []
self._apk_path = None
self._activities = None
self._methods = None
if path:
self.parse(path)
# pylint: disable=too-many-branches
def parse(self, apk_path):
_check_env()
output = self._run([aapt, 'dump', 'badging', apk_path])
for line in output.split('\n'):
if line.startswith('application-label:'):
self.label = line.split(':')[1].strip().replace('\'', '')
elif line.startswith('package:'):
match = self.version_regex.search(line)
if match:
self.package = match.group('name')
self.version_code = match.group('vcode')
self.version_name = match.group('vname')
elif line.startswith('launchable-activity:'):
match = self.name_regex.search(line)
self.activity = match.group('name')
elif line.startswith('native-code'):
apk_abis = [entry.strip() for entry in line.split(':')[1].split("'") if entry.strip()]
mapped_abis = []
for apk_abi in apk_abis:
found = False
for abi, architectures in ABI_MAP.items():
if apk_abi in architectures:
mapped_abis.append(abi)
found = True
break
if not found:
mapped_abis.append(apk_abi)
self.native_code = mapped_abis
elif line.startswith('uses-permission:'):
match = self.permission_regex.search(line)
if match:
self.permissions.append(match.group('permission'))
else:
pass # not interested
self._apk_path = apk_path
self._activities = None
self._methods = None
@property
def activities(self):
if self._activities is None:
cmd = [aapt, 'dump', 'xmltree', self._apk_path]
if aapt_version == 2:
cmd += ['--file']
cmd += ['AndroidManifest.xml']
matched_activities = self.activity_regex.finditer(self._run(cmd))
self._activities = [m.group('name') for m in matched_activities]
return self._activities
@property
def methods(self):
if self._methods is None:
# Only try to extract once
self._methods = []
with tempfile.TemporaryDirectory() as tmp_dir:
with zipfile.ZipFile(self._apk_path, 'r') as z:
try:
extracted = z.extract('classes.dex', tmp_dir)
except KeyError:
return []
dexdump = os.path.join(os.path.dirname(aapt), 'dexdump')
command = [dexdump, '-l', 'xml', extracted]
dump = self._run(command)
# Dexdump from build tools v30.0.X does not seem to produce
# valid xml from certain APKs so ignore errors and attempt to recover.
parser = etree.XMLParser(encoding='utf-8', recover=True)
xml_tree = etree.parse(StringIO(dump), parser)
package = next((i for i in xml_tree.iter('package')
if i.attrib['name'] == self.package), None)
self._methods = [(meth.attrib['name'], klass.attrib['name'])
for klass in package.iter('class')
for meth in klass.iter('method')] if package else []
return self._methods
def _run(self, command):
logger.debug(' '.join(command))
try:
output = subprocess.check_output(command, stderr=subprocess.STDOUT)
if sys.version_info[0] == 3:
output = output.decode(sys.stdout.encoding or 'utf-8', 'replace')
except subprocess.CalledProcessError as e:
raise HostError('Error while running "{}":\n{}'
.format(command, e.output))
return output
class AdbConnection(ConnectionBase):
# maintains the count of parallel active connections to a device, so that
# adb disconnect is not invoked untill all connections are closed
active_connections = defaultdict(int)
# Track connected as root status per device
_connected_as_root = defaultdict(lambda: None)
default_timeout = 10
ls_command = 'ls'
su_cmd = 'su -c {}'
@property
def name(self):
return self.device
@property
def connected_as_root(self):
if self._connected_as_root[self.device] is None:
result = self.execute('id')
self._connected_as_root[self.device] = 'uid=0(' in result
return self._connected_as_root[self.device]
@connected_as_root.setter
def connected_as_root(self, state):
self._connected_as_root[self.device] = state
# pylint: disable=unused-argument
def __init__(self, device=None, timeout=None, platform=None, adb_server=None,
adb_as_root=False, connection_attempts=MAX_ATTEMPTS,
poll_transfers=False,
start_transfer_poll_delay=30,
total_transfer_timeout=3600,
transfer_poll_period=30,):
super().__init__()
self.timeout = timeout if timeout is not None else self.default_timeout
if device is None:
device = adb_get_device(timeout=timeout, adb_server=adb_server)
self.device = device
self.adb_server = adb_server
self.adb_as_root = adb_as_root
self.poll_transfers = poll_transfers
if poll_transfers:
transfer_opts = {'start_transfer_poll_delay': start_transfer_poll_delay,
'total_timeout': total_transfer_timeout,
'poll_period': transfer_poll_period,
}
self.transfer_mgr = PopenTransferManager(self, **transfer_opts) if poll_transfers else None
if self.adb_as_root:
self.adb_root(enable=True)
adb_connect(self.device, adb_server=self.adb_server, attempts=connection_attempts)
AdbConnection.active_connections[self.device] += 1
self._setup_ls()
self._setup_su()
def push(self, sources, dest, timeout=None):
return self._push_pull('push', sources, dest, timeout)
def pull(self, sources, dest, timeout=None):
return self._push_pull('pull', sources, dest, timeout)
def _push_pull(self, action, sources, dest, timeout):
paths = sources + [dest]
# Quote twice to avoid expansion by host shell, then ADB globbing
do_quote = lambda x: quote(glob.escape(x))
paths = ' '.join(map(do_quote, paths))
command = "{} {}".format(action, paths)
if timeout or not self.poll_transfers:
adb_command(self.device, command, timeout=timeout, adb_server=self.adb_server)
else:
with self.transfer_mgr.manage(sources, dest, action):
bg_cmd = adb_command_background(self.device, command, adb_server=self.adb_server)
self.transfer_mgr.set_transfer_and_wait(bg_cmd)
# pylint: disable=unused-argument
def execute(self, command, timeout=None, check_exit_code=False,
as_root=False, strip_colors=True, will_succeed=False):
try:
return adb_shell(self.device, command, timeout, check_exit_code,
as_root, adb_server=self.adb_server, su_cmd=self.su_cmd)
except TargetStableError as e:
if will_succeed:
raise TargetTransientError(e)
else:
raise
def background(self, command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, as_root=False):
bg_cmd = self._background(command, stdout, stderr, as_root)
self._current_bg_cmds.add(bg_cmd)
return bg_cmd
def _background(self, command, stdout, stderr, as_root):
adb_shell, pid = adb_background_shell(self, command, stdout, stderr, as_root)
bg_cmd = AdbBackgroundCommand(
conn=self,
adb_popen=adb_shell,
pid=pid,
as_root=as_root
)
return bg_cmd
def _close(self):
AdbConnection.active_connections[self.device] -= 1
if AdbConnection.active_connections[self.device] <= 0:
if self.adb_as_root:
self.adb_root(enable=False)
adb_disconnect(self.device, self.adb_server)
del AdbConnection.active_connections[self.device]
def cancel_running_command(self):
# adbd multiplexes commands so that they don't interfer with each
# other, so there is no need to explicitly cancel a running command
# before the next one can be issued.
pass
def adb_root(self, enable=True):
cmd = 'root' if enable else 'unroot'
output = adb_command(self.device, cmd, timeout=30, adb_server=self.adb_server)
if 'cannot run as root in production builds' in output:
raise TargetStableError(output)
AdbConnection._connected_as_root[self.device] = enable
def wait_for_device(self, timeout=30):
adb_command(self.device, 'wait-for-device', timeout, self.adb_server)
def reboot_bootloader(self, timeout=30):
adb_command(self.device, 'reboot-bootloader', timeout, self.adb_server)
# Again, we need to handle boards where the default output format from ls is
# single column *and* boards where the default output is multi-column.
# We need to do this purely because the '-1' option causes errors on older
# versions of the ls tool in Android pre-v7.
def _setup_ls(self):
command = "shell '(ls -1); echo \"\n$?\"'"
try:
output = adb_command(self.device, command, timeout=self.timeout, adb_server=self.adb_server)
except subprocess.CalledProcessError as e:
raise HostError(
'Failed to set up ls command on Android device. Output:\n'
+ e.output)
lines = output.splitlines()
retval = lines[-1].strip()
if int(retval) == 0:
self.ls_command = 'ls -1'
else:
self.ls_command = 'ls'
logger.debug("ls command is set to {}".format(self.ls_command))
def _setup_su(self):
try:
# Try the new style of invoking `su`
self.execute('ls', timeout=self.timeout, as_root=True,
check_exit_code=True)
# If failure assume either old style or unrooted. Here we will assume
# old style and root status will be verified later.
except (TargetStableError, TargetTransientError, TimeoutError):
self.su_cmd = 'echo {} | su'
logger.debug("su command is set to {}".format(quote(self.su_cmd)))
def fastboot_command(command, timeout=None, device=None):
_check_env()
target = '-s {}'.format(quote(device)) if device else ''
full_command = 'fastboot {} {}'.format(target, command)
logger.debug(full_command)
output, _ = check_output(full_command, timeout, shell=True)
return output
def fastboot_flash_partition(partition, path_to_image):
command = 'flash {} {}'.format(quote(partition), quote(path_to_image))
fastboot_command(command)
def adb_get_device(timeout=None, adb_server=None):
"""
Returns the serial number of a connected android device.
If there are more than one device connected to the machine, or it could not
find any device connected, :class:`devlib.exceptions.HostError` is raised.
"""
# TODO this is a hacky way to issue a adb command to all listed devices
# Ensure server is started so the 'daemon started successfully' message
# doesn't confuse the parsing below
adb_command(None, 'start-server', adb_server=adb_server)
# The output of calling adb devices consists of a heading line then
# a list of the devices sperated by new line
# The last line is a blank new line. in otherwords, if there is a device found
# then the output length is 2 + (1 for each device)
start = time.time()
while True:
output = adb_command(None, "devices", adb_server=adb_server).splitlines() # pylint: disable=E1103
output_length = len(output)
if output_length == 3:
# output[1] is the 2nd line in the output which has the device name
# Splitting the line by '\t' gives a list of two indexes, which has
# device serial in 0 number and device type in 1.
return output[1].split('\t')[0]
elif output_length > 3:
message = '{} Android devices found; either explicitly specify ' +\
'the device you want, or make sure only one is connected.'
raise HostError(message.format(output_length - 2))
else:
if timeout < time.time() - start:
raise HostError('No device is connected and available')
time.sleep(1)
def adb_connect(device, timeout=None, attempts=MAX_ATTEMPTS, adb_server=None):
_check_env()
tries = 0
output = None
while tries <= attempts:
tries += 1
if device:
if "." in device: # Connect is required only for ADB-over-IP
# ADB does not automatically remove a network device from it's
# devices list when the connection is broken by the remote, so the
# adb connection may have gone "stale", resulting in adb blocking
# indefinitely when making calls to the device. To avoid this,
# always disconnect first.
adb_disconnect(device, adb_server)
adb_cmd = get_adb_command(None, 'connect', adb_server)
command = '{} {}'.format(adb_cmd, quote(device))
logger.debug(command)
output, _ = check_output(command, shell=True, timeout=timeout)
if _ping(device, adb_server):
break
time.sleep(10)
else: # did not connect to the device
message = 'Could not connect to {}'.format(device or 'a device')
if output:
message += '; got: "{}"'.format(output)
raise HostError(message)
def adb_disconnect(device, adb_server=None):
_check_env()
if not device:
return
if ":" in device and device in adb_list_devices(adb_server):
adb_cmd = get_adb_command(None, 'disconnect', adb_server)
command = "{} {}".format(adb_cmd, device)
logger.debug(command)
retval = subprocess.call(command, stdout=open(os.devnull, 'wb'), shell=True)
if retval:
raise TargetTransientError('"{}" returned {}'.format(command, retval))
def _ping(device, adb_server=None):
_check_env()
adb_cmd = get_adb_command(device, 'shell', adb_server)
command = "{} {}".format(adb_cmd, quote('ls /data/local/tmp > /dev/null'))
logger.debug(command)
result = subprocess.call(command, stderr=subprocess.PIPE, shell=True)
if not result: # pylint: disable=simplifiable-if-statement
return True
else:
return False
# pylint: disable=too-many-locals
def adb_shell(device, command, timeout=None, check_exit_code=False,
as_root=False, adb_server=None, su_cmd='su -c {}'): # NOQA
_check_env()
# On older combinations of ADB/Android versions, the adb host command always
# exits with 0 if it was able to run the command on the target, even if the
# command failed (https://code.google.com/p/android/issues/detail?id=3254).
# Homogenise this behaviour by running the command then echoing the exit
# code of the executed command itself.
command = r'({}); echo "\n$?"'.format(command)
parts = ['adb']
if adb_server is not None:
parts += ['-H', adb_server]
if device is not None:
parts += ['-s', device]
parts += ['shell',
command if not as_root else su_cmd.format(quote(command))]
logger.debug(' '.join(quote(part) for part in parts))
try:
raw_output, error = check_output(parts, timeout, shell=False)
except subprocess.CalledProcessError as e:
raise TargetStableError(str(e))
if raw_output:
try:
output, exit_code, _ = raw_output.replace('\r\n', '\n').replace('\r', '\n').rsplit('\n', 2)
except ValueError:
exit_code, _ = raw_output.replace('\r\n', '\n').replace('\r', '\n').rsplit('\n', 1)
output = ''
else: # raw_output is empty
exit_code = '969696' # just because
output = ''
if check_exit_code:
exit_code = exit_code.strip()
re_search = AM_START_ERROR.findall(output)
if exit_code.isdigit():
if int(exit_code):
message = ('Got exit code {}\nfrom target command: {}\n'
'OUTPUT: {}\nSTDERR: {}\n')
raise TargetStableError(message.format(exit_code, command, output, error))
elif re_search:
message = 'Could not start activity; got the following:\n{}'
raise TargetStableError(message.format(re_search[0]))
else: # not all digits
if re_search:
message = 'Could not start activity; got the following:\n{}'
raise TargetStableError(message.format(re_search[0]))
else:
message = 'adb has returned early; did not get an exit code. '\
'Was kill-server invoked?\nOUTPUT:\n-----\n{}\n'\
'-----\nSTDERR:\n-----\n{}\n-----'
raise TargetTransientError(message.format(raw_output, error))
return output + error
def adb_background_shell(conn, command,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
as_root=False):
"""Runs the specified command in a subprocess, returning the the Popen object."""
device = conn.device
adb_server = conn.adb_server
_check_env()
stdout, stderr, command = redirect_streams(stdout, stderr, command)
if as_root:
command = 'echo {} | su'.format(quote(command))
# Attach a unique UUID to the command line so it can be looked for without
# any ambiguity with ps
uuid_ = uuid.uuid4().hex
uuid_var = 'BACKGROUND_COMMAND_UUID={}'.format(uuid_)
command = "{} sh -c {}".format(uuid_var, quote(command))
adb_cmd = get_adb_command(device, 'shell', adb_server)
full_command = '{} {}'.format(adb_cmd, quote(command))
logger.debug(full_command)
p = subprocess.Popen(full_command, stdout=stdout, stderr=stderr, shell=True)
# Out of band PID lookup, to avoid conflicting needs with stdout redirection
find_pid = '{} ps -A -o pid,args | grep {}'.format(conn.busybox, quote(uuid_var))
ps_out = conn.execute(find_pid)
pids = [
int(line.strip().split(' ', 1)[0])
for line in ps_out.splitlines()
]
# The line we are looking for is the first one, since it was started before
# any look up command
pid = sorted(pids)[0]
return (p, pid)
def adb_kill_server(timeout=30, adb_server=None):
adb_command(None, 'kill-server', timeout, adb_server)
def adb_list_devices(adb_server=None):
output = adb_command(None, 'devices', adb_server=adb_server)
devices = []
for line in output.splitlines():
parts = [p.strip() for p in line.split()]
if len(parts) == 2:
devices.append(AdbDevice(*parts))
return devices
def get_adb_command(device, command, adb_server=None):
_check_env()
device_string = ""
if adb_server != None:
device_string = ' -H {}'.format(adb_server)
device_string += ' -s {}'.format(device) if device else ''
return "adb{} {}".format(device_string, command)
def adb_command(device, command, timeout=None, adb_server=None):
full_command = get_adb_command(device, command, adb_server)
logger.debug(full_command)
output, _ = check_output(full_command, timeout, shell=True)
return output
def adb_command_background(device, command, adb_server=None):
full_command = get_adb_command(device, command, adb_server)
logger.debug(full_command)
proc = get_subprocess(full_command, shell=True)
cmd = PopenBackgroundCommand(proc)
return cmd
def grant_app_permissions(target, package):
"""
Grant an app all the permissions it may ask for
"""
dumpsys = target.execute('dumpsys package {}'.format(package))
permissions = re.search(
r'requested permissions:\s*(?P<permissions>(android.permission.+\s*)+)', dumpsys
)
if permissions is None:
return
permissions = permissions.group('permissions').replace(" ", "").splitlines()
for permission in permissions:
try:
target.execute('pm grant {} {}'.format(package, permission))
except TargetStableError:
logger.debug('Cannot grant {}'.format(permission))
# Messy environment initialisation stuff...
class _AndroidEnvironment(object):
def __init__(self):
self.android_home = None
self.platform_tools = None
self.build_tools = None
self.adb = None
self.aapt = None
self.aapt_version = None
self.fastboot = None
def _initialize_with_android_home(env):
logger.debug('Using ANDROID_HOME from the environment.')
env.android_home = android_home
env.platform_tools = os.path.join(android_home, 'platform-tools')
os.environ['PATH'] = env.platform_tools + os.pathsep + os.environ['PATH']
_init_common(env)
return env
def _initialize_without_android_home(env):
adb_full_path = which('adb')
if adb_full_path:
env.adb = 'adb'
else:
raise HostError('ANDROID_HOME is not set and adb is not in PATH. '
'Have you installed Android SDK?')
logger.debug('Discovering ANDROID_HOME from adb path.')
env.platform_tools = os.path.dirname(adb_full_path)
env.android_home = os.path.dirname(env.platform_tools)
_init_common(env)
return env
def _init_common(env):
_discover_build_tools(env)
_discover_aapt(env)
def _discover_build_tools(env):
logger.debug('ANDROID_HOME: {}'.format(env.android_home))
build_tools_directory = os.path.join(env.android_home, 'build-tools')
if os.path.isdir(build_tools_directory):
env.build_tools = build_tools_directory
def _check_supported_aapt2(binary):
# At time of writing the version argument of aapt2 is not helpful as
# the output is only a placeholder that does not distinguish between versions
# with and without support for badging. Unfortunately aapt has been
# deprecated and fails to parse some valid apks so we will try to favour
# aapt2 if possible else will fall back to aapt.
# Try to execute the badging command and check if we get an expected error
# message as opposed to an unknown command error to determine if we have a
# suitable version.
cmd = '{} dump badging'.format(binary)
result = subprocess.run(cmd.encode('utf-8'), shell=True, stderr=subprocess.PIPE)
supported = bool(AAPT_BADGING_OUTPUT.search(result.stderr.decode('utf-8')))
msg = 'Found a {} aapt2 binary at: {}'
logger.debug(msg.format('supported' if supported else 'unsupported', binary))
return supported
def _discover_aapt(env):
if env.build_tools:
aapt_path = ''
aapt2_path = ''
versions = os.listdir(env.build_tools)
for version in reversed(sorted(versions)):
if not os.path.isfile(aapt2_path):
aapt2_path = os.path.join(env.build_tools, version, 'aapt2')
if not os.path.isfile(aapt_path):
aapt_path = os.path.join(env.build_tools, version, 'aapt')
aapt_version = 1
# Use latest available version for aapt/appt2 but ensure at least one is valid.
if os.path.isfile(aapt2_path) or os.path.isfile(aapt_path):
break
# Use aapt2 only if present and we have a suitable version
if aapt2_path and _check_supported_aapt2(aapt2_path):
aapt_path = aapt2_path
aapt_version = 2
# Use the aapt version discoverted from build tools.
if aapt_path:
logger.debug('Using {} for version {}'.format(aapt_path, version))
env.aapt = aapt_path
env.aapt_version = aapt_version
return
# Try detecting aapt2 and aapt from PATH
if not env.aapt:
aapt2_path = which('aapt2')
if _check_supported_aapt2(aapt2_path):
env.aapt = aapt2_path
env.aapt_version = 2
else:
env.aapt = which('aapt')
env.aapt_version = 1
if not env.aapt:
raise HostError('aapt/aapt2 not found. Please make sure it is avaliable in PATH'
' or at least one Android platform is installed')
def _check_env():
global android_home, platform_tools, adb, aapt, aapt_version # pylint: disable=W0603
if not android_home:
android_home = os.getenv('ANDROID_HOME')
if android_home:
_env = _initialize_with_android_home(_AndroidEnvironment())
else:
_env = _initialize_without_android_home(_AndroidEnvironment())
android_home = _env.android_home
platform_tools = _env.platform_tools
adb = _env.adb
aapt = _env.aapt
aapt_version = _env.aapt_version
class LogcatMonitor(object):
"""
Helper class for monitoring Anroid's logcat
:param target: Android target to monitor
:type target: :class:`AndroidTarget`
:param regexps: List of uncompiled regular expressions to filter on the
device. Logcat entries that don't match any will not be
seen. If omitted, all entries will be sent to host.
:type regexps: list(str)
"""
@property
def logfile(self):
return self._logfile
def __init__(self, target, regexps=None, logcat_format=None):
super(LogcatMonitor, self).__init__()
self.target = target
self._regexps = regexps
self._logcat_format = logcat_format
self._logcat = None
self._logfile = None
def start(self, outfile=None):
"""
Start logcat and begin monitoring
:param outfile: Optional path to file to store all logcat entries
:type outfile: str
"""
if outfile:
self._logfile = open(outfile, 'w')
else:
self._logfile = tempfile.NamedTemporaryFile(mode='w')
self.target.clear_logcat()
logcat_cmd = 'logcat'
# Join all requested regexps with an 'or'
if self._regexps:
regexp = '{}'.format('|'.join(self._regexps))
if len(self._regexps) > 1:
regexp = '({})'.format(regexp)
# Logcat on older version of android do not support the -e argument
# so fall back to using grep.
if self.target.get_sdk_version() > 23:
logcat_cmd = '{} -e {}'.format(logcat_cmd, quote(regexp))
else:
logcat_cmd = '{} | grep {}'.format(logcat_cmd, quote(regexp))
if self._logcat_format:
logcat_cmd = "{} -v {}".format(logcat_cmd, quote(self._logcat_format))
logcat_cmd = get_adb_command(self.target.conn.device, logcat_cmd, self.target.adb_server)
logger.debug('logcat command ="{}"'.format(logcat_cmd))
self._logcat = pexpect.spawn(logcat_cmd, logfile=self._logfile, encoding='utf-8')
def stop(self):
self.flush_log()
self._logcat.terminate()
self._logfile.close()
def get_log(self):
"""
Return the list of lines found by the monitor
"""
self.flush_log()
with open(self._logfile.name) as fh:
return [line for line in fh]
def flush_log(self):
# Unless we tell pexect to 'expect' something, it won't read from
# logcat's buffer or write into our logfile. We'll need to force it to
# read any pending logcat output.
while True:
try:
read_size = 1024 * 8
# This will read up to read_size bytes, but only those that are
# already ready (i.e. it won't block). If there aren't any bytes
# already available it raises pexpect.TIMEOUT.
buf = self._logcat.read_nonblocking(read_size, timeout=0)
# We can't just keep calling read_nonblocking until we get a
# pexpect.TIMEOUT (i.e. until we don't find any available
# bytes), because logcat might be writing bytes the whole time -
# in that case we might never return from this function. In
# fact, we only care about bytes that were written before we
# entered this function. So, if we read read_size bytes (as many
# as we were allowed to), then we'll assume there are more bytes
# that have already been sitting in the output buffer of the
# logcat command. If not, we'll assume we read everything that
# had already been written.
if len(buf) == read_size:
continue
else:
break
except pexpect.TIMEOUT:
# No available bytes to read. No prob, logcat just hasn't
# printed anything since pexpect last read from its buffer.
break
def clear_log(self):
with open(self._logfile.name, 'w') as _:
pass
def search(self, regexp):
"""
Search a line that matches a regexp in the logcat log
Return immediatly
"""
return [line for line in self.get_log() if re.match(regexp, line)]
def wait_for(self, regexp, timeout=30):
"""
Search a line that matches a regexp in the logcat log
Wait for it to appear if it's not found
:param regexp: regexp to search
:type regexp: str
:param timeout: Timeout in seconds, before rasing RuntimeError.
``None`` means wait indefinitely
:type timeout: number
:returns: List of matched strings
"""
log = self.get_log()
res = [line for line in log if re.match(regexp, line)]
# Found some matches, return them
if res:
return res
# Store the number of lines we've searched already, so we don't have to
# re-grep them after 'expect' returns
next_line_num = len(log)
try:
self._logcat.expect(regexp, timeout=timeout)
except pexpect.TIMEOUT:
raise RuntimeError('Logcat monitor timeout ({}s)'.format(timeout))
return [line for line in self.get_log()[next_line_num:]
if re.match(regexp, line)]
|
the-stack_0_23729 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator 2.3.33.0
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class BatchMetricV2Dto(Model):
"""BatchMetricV2Dto.
:param values:
:type values: list[~_restclient.models.MetricV2Dto]
:param report_errors:
:type report_errors: bool
"""
_attribute_map = {
'values': {'key': 'values', 'type': '[MetricV2Dto]'},
'report_errors': {'key': 'reportErrors', 'type': 'bool'},
}
def __init__(self, values=None, report_errors=None):
super(BatchMetricV2Dto, self).__init__()
self.values = values
self.report_errors = report_errors
|
the-stack_0_23730 | import sys, os
from bmtk.simulator import bionet
import numpy as np
import h5py
import pickle
from neuron import h
import pandas as pd
np.random.seed(2129)
#Calculates maximum polarization of the given arr.
#Time time is base level and finds the lowest after that.
def calc_pol(arr, time=4998):
base = arr[time]
trough = min(arr[time:])
return trough - base
# if __name__ == '__main__':
# if __file__ != sys.argv[-1]:
# inp = sys.argv[-1]
# else:
# raise Exception("no work" + str(sys.argv[-1]))
# dist = float(inp)
#synapses.set_pyr_w(weight)
#synapses.load()
config_file = 'simulation_config.json'
conf = bionet.Config.from_json(config_file, validate=True)
conf.build_env()
graph = bionet.BioNetwork.from_config(conf)
sim = bionet.BioSimulator.from_config(conf, network=graph)
cells = graph.get_local_cells()
cell = cells[0]
hobj = cell.hobj
soma = cell.hobj.soma[0](0.5)
soma_v = h.Vector()
soma_v.record(soma._ref_v)
segs = cell._morph.seg_prop
dic = {}
seg_names = []
i = 0
h.distance(sec=cell.hobj.soma[0])
for sec in hobj.all:
fullsecname = sec.name()
sec_type = fullsecname.split(".")[1][:4]
for seg in sec:
i+= 1
#if (i % 100 == 0):
rec = h.Vector()
rec.record(seg._ref_v)
dic[str(seg)] = [sec_type, h.distance(seg.x), rec]
# import pdb; pdb.set_trace()
# seg_names.append(str(seg))
#import pdb; pdb.set_trace()
# v_ref = cell.connections()[0]._connector.postseg()._ref_v
# rec = h.Vector()
# rec.record(v_ref)
sim.run()
import pdb; pdb.set_trace()
soma_pol = calc_pol(np.array(soma_v))
df = pd.DataFrame()
attenuations = []
distances = []
parts = []
for key in dic.keys():
parts.append(dic[key][0])
distances.append(dic[key][1])
attenuations.append(calc_pol(np.array(dic[key][2])) / soma_pol)
df["type"] = parts
df["distance"] = distances
df["attenuation"] = attenuations
df.to_csv("results.csv", index=False)
# syn_volt = rec.as_numpy()
# mem_pot_file = 'output/v_report.h5'
# # load
# f = h5py.File(mem_pot_file,'r')
# mem_potential = f['report']['exc_stim']['data']
# import matplotlib.pyplot as plt
# plt.plot(mem_potential[:, 0], label='soma')
# plt.plot(syn_volt, label='synapse')
# plt.legend()
# plt.show()
# low = mem_potential[3998, 0]
# high = max(mem_potential[3998:, 0])
# soma_mag = high - low
# #print(soma_mag)
# low = syn_volt[3998]
# high = max(syn_volt[3998:])
# dend_mag = high - low
# #print(dend_mag)
# attenuation = soma_mag / dend_mag
# print(attenuation)
# f = open('syn_att.pkl', 'rb')
# res = pickle.load(f)
# if dist in res.keys():
# res[dist].append(attenuation)
# else:
# res[dist] = [attenuation]
# f.close()
# #f = open('syn_epsps.pkl', 'wb')
# f = open('syn_att.pkl', 'wb')
# pickle.dump(res, f)
# f.close()
|
the-stack_0_23731 | from scrapeconfig_orm.models import Schema
from .projects import BaseProjectModelRoute
from ..jsonapi.exceptions import JsonApiBadRequestError, JsonApiNotFoundError
class SchemaRoute(BaseProjectModelRoute):
lookup_url_kwarg = 'schema_id'
default_model = Schema
def get_instance(self):
return self.get_collection()[self.kwargs.get('schema_id')]
def get_collection(self):
return self.project.schemas
def get_list_kwargs(self):
return {
'fields_map': {
'schemas': [
'name',
'default',
'project',
],
}
}
def update(self, *args, **kwargs):
# Reset default schema if current schema will be default
if self.data.get('data', {}).get('attributes', {}).get('default'):
for schema in self.get_collection():
if schema.default:
schema.default = False
schema.save()
return super(SchemaRoute, self).update(*args, **kwargs)
def destroy(self, *args, **kwargs):
try:
schema = self.get_instance()
except KeyError:
raise JsonApiNotFoundError('Unable to find the requested schema')
for spider in self.project.spiders:
for sample in spider.samples:
for item in sample.items:
if self._item_uses_schema(item):
raise JsonApiBadRequestError(
'Unable to delete the data format "%s" as it is used '
"by a spider's sample." % schema.name)
return super(SchemaRoute, self).destroy(*args, **kwargs)
def _item_uses_schema(self, item):
schema = self.get_instance()
if item.schema.id == schema.id:
return True
for item in item.annotations:
if hasattr(item, 'schema') and self._item_uses_schema(item):
return True
return False
|
the-stack_0_23733 | from __future__ import (absolute_import, division, print_function,
unicode_literals)
import six
import os, sys, warnings
def fn_name(): return sys._getframe(1).f_code.co_name
if six.PY3:
warnings.warn(
"The gtk* backends have not been tested with Python 3.x",
ImportWarning)
try:
import gobject
import gtk; gdk = gtk.gdk
import pango
except ImportError:
raise ImportError("Gtk* backend requires pygtk to be installed.")
pygtk_version_required = (2,4,0)
if gtk.pygtk_version < pygtk_version_required:
raise ImportError ("PyGTK %d.%d.%d is installed\n"
"PyGTK %d.%d.%d or later is required"
% (gtk.pygtk_version + pygtk_version_required))
del pygtk_version_required
_new_tooltip_api = (gtk.pygtk_version[1] >= 12)
import matplotlib
from matplotlib._pylab_helpers import Gcf
from matplotlib.backend_bases import RendererBase, GraphicsContextBase, \
FigureManagerBase, FigureCanvasBase, NavigationToolbar2, cursors, TimerBase
from matplotlib.backend_bases import ShowBase
from matplotlib.backends.backend_gdk import RendererGDK, FigureCanvasGDK
from matplotlib.cbook import is_string_like, is_writable_file_like
from matplotlib.colors import colorConverter
from matplotlib.figure import Figure
from matplotlib.widgets import SubplotTool
from matplotlib import lines
from matplotlib import markers
from matplotlib import cbook
from matplotlib import verbose
from matplotlib import rcParams
backend_version = "%d.%d.%d" % gtk.pygtk_version
_debug = False
#_debug = True
# the true dots per inch on the screen; should be display dependent
# see http://groups.google.com/groups?q=screen+dpi+x11&hl=en&lr=&ie=UTF-8&oe=UTF-8&safe=off&selm=7077.26e81ad5%40swift.cs.tcd.ie&rnum=5 for some info about screen dpi
PIXELS_PER_INCH = 96
# Hide the benign warning that it can't stat a file that doesn't
warnings.filterwarnings('ignore', '.*Unable to retrieve the file info for.*', gtk.Warning)
cursord = {
cursors.MOVE : gdk.Cursor(gdk.FLEUR),
cursors.HAND : gdk.Cursor(gdk.HAND2),
cursors.POINTER : gdk.Cursor(gdk.LEFT_PTR),
cursors.SELECT_REGION : gdk.Cursor(gdk.TCROSS),
}
# ref gtk+/gtk/gtkwidget.h
def GTK_WIDGET_DRAWABLE(w):
flags = w.flags();
return flags & gtk.VISIBLE != 0 and flags & gtk.MAPPED != 0
def draw_if_interactive():
"""
Is called after every pylab drawing command
"""
if matplotlib.is_interactive():
figManager = Gcf.get_active()
if figManager is not None:
figManager.canvas.draw_idle()
class Show(ShowBase):
def mainloop(self):
if gtk.main_level() == 0:
gtk.main()
show = Show()
def new_figure_manager(num, *args, **kwargs):
"""
Create a new figure manager instance
"""
FigureClass = kwargs.pop('FigureClass', Figure)
thisFig = FigureClass(*args, **kwargs)
return new_figure_manager_given_figure(num, thisFig)
def new_figure_manager_given_figure(num, figure):
"""
Create a new figure manager instance for the given figure.
"""
canvas = FigureCanvasGTK(figure)
manager = FigureManagerGTK(canvas, num)
return manager
class TimerGTK(TimerBase):
'''
Subclass of :class:`backend_bases.TimerBase` that uses GTK for timer events.
Attributes:
* interval: The time between timer events in milliseconds. Default
is 1000 ms.
* single_shot: Boolean flag indicating whether this timer should
operate as single shot (run once and then stop). Defaults to False.
* callbacks: Stores list of (func, args) tuples that will be called
upon timer events. This list can be manipulated directly, or the
functions add_callback and remove_callback can be used.
'''
def _timer_start(self):
# Need to stop it, otherwise we potentially leak a timer id that will
# never be stopped.
self._timer_stop()
self._timer = gobject.timeout_add(self._interval, self._on_timer)
def _timer_stop(self):
if self._timer is not None:
gobject.source_remove(self._timer)
self._timer = None
def _timer_set_interval(self):
# Only stop and restart it if the timer has already been started
if self._timer is not None:
self._timer_stop()
self._timer_start()
def _on_timer(self):
TimerBase._on_timer(self)
# Gtk timeout_add() requires that the callback returns True if it
# is to be called again.
if len(self.callbacks) > 0 and not self._single:
return True
else:
self._timer = None
return False
class FigureCanvasGTK (gtk.DrawingArea, FigureCanvasBase):
keyvald = {65507 : 'control',
65505 : 'shift',
65513 : 'alt',
65508 : 'control',
65506 : 'shift',
65514 : 'alt',
65361 : 'left',
65362 : 'up',
65363 : 'right',
65364 : 'down',
65307 : 'escape',
65470 : 'f1',
65471 : 'f2',
65472 : 'f3',
65473 : 'f4',
65474 : 'f5',
65475 : 'f6',
65476 : 'f7',
65477 : 'f8',
65478 : 'f9',
65479 : 'f10',
65480 : 'f11',
65481 : 'f12',
65300 : 'scroll_lock',
65299 : 'break',
65288 : 'backspace',
65293 : 'enter',
65379 : 'insert',
65535 : 'delete',
65360 : 'home',
65367 : 'end',
65365 : 'pageup',
65366 : 'pagedown',
65438 : '0',
65436 : '1',
65433 : '2',
65435 : '3',
65430 : '4',
65437 : '5',
65432 : '6',
65429 : '7',
65431 : '8',
65434 : '9',
65451 : '+',
65453 : '-',
65450 : '*',
65455 : '/',
65439 : 'dec',
65421 : 'enter',
65511 : 'super',
65512 : 'super',
65406 : 'alt',
65289 : 'tab',
}
# Setting this as a static constant prevents
# this resulting expression from leaking
event_mask = (gdk.BUTTON_PRESS_MASK |
gdk.BUTTON_RELEASE_MASK |
gdk.EXPOSURE_MASK |
gdk.KEY_PRESS_MASK |
gdk.KEY_RELEASE_MASK |
gdk.ENTER_NOTIFY_MASK |
gdk.LEAVE_NOTIFY_MASK |
gdk.POINTER_MOTION_MASK |
gdk.POINTER_MOTION_HINT_MASK)
def __init__(self, figure):
if _debug: print('FigureCanvasGTK.%s' % fn_name())
FigureCanvasBase.__init__(self, figure)
gtk.DrawingArea.__init__(self)
self._idle_draw_id = 0
self._need_redraw = True
self._pixmap_width = -1
self._pixmap_height = -1
self._lastCursor = None
self.connect('scroll_event', self.scroll_event)
self.connect('button_press_event', self.button_press_event)
self.connect('button_release_event', self.button_release_event)
self.connect('configure_event', self.configure_event)
self.connect('expose_event', self.expose_event)
self.connect('key_press_event', self.key_press_event)
self.connect('key_release_event', self.key_release_event)
self.connect('motion_notify_event', self.motion_notify_event)
self.connect('leave_notify_event', self.leave_notify_event)
self.connect('enter_notify_event', self.enter_notify_event)
self.set_events(self.__class__.event_mask)
self.set_double_buffered(False)
self.set_flags(gtk.CAN_FOCUS)
self._renderer_init()
self._idle_event_id = gobject.idle_add(self.idle_event)
self.last_downclick = {}
def destroy(self):
#gtk.DrawingArea.destroy(self)
self.close_event()
gobject.source_remove(self._idle_event_id)
if self._idle_draw_id != 0:
gobject.source_remove(self._idle_draw_id)
def scroll_event(self, widget, event):
if _debug: print('FigureCanvasGTK.%s' % fn_name())
x = event.x
# flipy so y=0 is bottom of canvas
y = self.allocation.height - event.y
if event.direction==gdk.SCROLL_UP:
step = 1
else:
step = -1
FigureCanvasBase.scroll_event(self, x, y, step, guiEvent=event)
return False # finish event propagation?
def button_press_event(self, widget, event):
if _debug: print('FigureCanvasGTK.%s' % fn_name())
x = event.x
# flipy so y=0 is bottom of canvas
y = self.allocation.height - event.y
dblclick = (event.type == gdk._2BUTTON_PRESS)
if not dblclick:
# GTK is the only backend that generates a DOWN-UP-DOWN-DBLCLICK-UP event
# sequence for a double click. All other backends have a DOWN-UP-DBLCLICK-UP
# sequence. In order to provide consistency to matplotlib users, we will
# eat the extra DOWN event in the case that we detect it is part of a double
# click.
# first, get the double click time in milliseconds.
current_time = event.get_time()
last_time = self.last_downclick.get(event.button,0)
dblclick_time = gtk.settings_get_for_screen(gdk.screen_get_default()).get_property('gtk-double-click-time')
delta_time = current_time-last_time
if delta_time < dblclick_time:
del self.last_downclick[event.button] # we do not want to eat more than one event.
return False # eat.
self.last_downclick[event.button] = current_time
FigureCanvasBase.button_press_event(self, x, y, event.button, dblclick=dblclick, guiEvent=event)
return False # finish event propagation?
def button_release_event(self, widget, event):
if _debug: print('FigureCanvasGTK.%s' % fn_name())
x = event.x
# flipy so y=0 is bottom of canvas
y = self.allocation.height - event.y
FigureCanvasBase.button_release_event(self, x, y, event.button, guiEvent=event)
return False # finish event propagation?
def key_press_event(self, widget, event):
if _debug: print('FigureCanvasGTK.%s' % fn_name())
key = self._get_key(event)
if _debug: print("hit", key)
FigureCanvasBase.key_press_event(self, key, guiEvent=event)
return False # finish event propagation?
def key_release_event(self, widget, event):
if _debug: print('FigureCanvasGTK.%s' % fn_name())
key = self._get_key(event)
if _debug: print("release", key)
FigureCanvasBase.key_release_event(self, key, guiEvent=event)
return False # finish event propagation?
def motion_notify_event(self, widget, event):
if _debug: print('FigureCanvasGTK.%s' % fn_name())
if event.is_hint:
x, y, state = event.window.get_pointer()
else:
x, y, state = event.x, event.y, event.state
# flipy so y=0 is bottom of canvas
y = self.allocation.height - y
FigureCanvasBase.motion_notify_event(self, x, y, guiEvent=event)
return False # finish event propagation?
def leave_notify_event(self, widget, event):
FigureCanvasBase.leave_notify_event(self, event)
def enter_notify_event(self, widget, event):
x, y, state = event.window.get_pointer()
FigureCanvasBase.enter_notify_event(self, event, xy=(x, y))
def _get_key(self, event):
if event.keyval in self.keyvald:
key = self.keyvald[event.keyval]
elif event.keyval < 256:
key = chr(event.keyval)
else:
key = None
for key_mask, prefix in (
[gdk.MOD4_MASK, 'super'],
[gdk.MOD1_MASK, 'alt'],
[gdk.CONTROL_MASK, 'ctrl'], ):
if event.state & key_mask:
key = '{0}+{1}'.format(prefix, key)
return key
def configure_event(self, widget, event):
if _debug: print('FigureCanvasGTK.%s' % fn_name())
if widget.window is None:
return
w, h = event.width, event.height
if w < 3 or h < 3:
return # empty fig
# resize the figure (in inches)
dpi = self.figure.dpi
self.figure.set_size_inches (w/dpi, h/dpi)
self._need_redraw = True
return False # finish event propagation?
def draw(self):
# Note: FigureCanvasBase.draw() is inconveniently named as it clashes
# with the deprecated gtk.Widget.draw()
self._need_redraw = True
if GTK_WIDGET_DRAWABLE(self):
self.queue_draw()
# do a synchronous draw (its less efficient than an async draw,
# but is required if/when animation is used)
self.window.process_updates (False)
def draw_idle(self):
def idle_draw(*args):
try:
self.draw()
finally:
self._idle_draw_id = 0
return False
if self._idle_draw_id == 0:
self._idle_draw_id = gobject.idle_add(idle_draw)
def _renderer_init(self):
"""Override by GTK backends to select a different renderer
Renderer should provide the methods:
set_pixmap ()
set_width_height ()
that are used by
_render_figure() / _pixmap_prepare()
"""
self._renderer = RendererGDK (self, self.figure.dpi)
def _pixmap_prepare(self, width, height):
"""
Make sure _._pixmap is at least width, height,
create new pixmap if necessary
"""
if _debug: print('FigureCanvasGTK.%s' % fn_name())
create_pixmap = False
if width > self._pixmap_width:
# increase the pixmap in 10%+ (rather than 1 pixel) steps
self._pixmap_width = max (int (self._pixmap_width * 1.1),
width)
create_pixmap = True
if height > self._pixmap_height:
self._pixmap_height = max (int (self._pixmap_height * 1.1),
height)
create_pixmap = True
if create_pixmap:
self._pixmap = gdk.Pixmap (self.window, self._pixmap_width,
self._pixmap_height)
self._renderer.set_pixmap (self._pixmap)
def _render_figure(self, pixmap, width, height):
"""used by GTK and GTKcairo. GTKAgg overrides
"""
self._renderer.set_width_height (width, height)
self.figure.draw (self._renderer)
def expose_event(self, widget, event):
"""Expose_event for all GTK backends. Should not be overridden.
"""
if _debug: print('FigureCanvasGTK.%s' % fn_name())
if GTK_WIDGET_DRAWABLE(self):
if self._need_redraw:
x, y, w, h = self.allocation
self._pixmap_prepare (w, h)
self._render_figure(self._pixmap, w, h)
self._need_redraw = False
x, y, w, h = event.area
self.window.draw_drawable (self.style.fg_gc[self.state],
self._pixmap, x, y, x, y, w, h)
return False # finish event propagation?
filetypes = FigureCanvasBase.filetypes.copy()
filetypes['jpg'] = 'JPEG'
filetypes['jpeg'] = 'JPEG'
filetypes['png'] = 'Portable Network Graphics'
def print_jpeg(self, filename, *args, **kwargs):
return self._print_image(filename, 'jpeg')
print_jpg = print_jpeg
def print_png(self, filename, *args, **kwargs):
return self._print_image(filename, 'png')
def _print_image(self, filename, format, *args, **kwargs):
if self.flags() & gtk.REALIZED == 0:
# for self.window(for pixmap) and has a side effect of altering
# figure width,height (via configure-event?)
gtk.DrawingArea.realize(self)
width, height = self.get_width_height()
pixmap = gdk.Pixmap (self.window, width, height)
self._renderer.set_pixmap (pixmap)
self._render_figure(pixmap, width, height)
# jpg colors don't match the display very well, png colors match
# better
pixbuf = gdk.Pixbuf(gdk.COLORSPACE_RGB, 0, 8, width, height)
pixbuf.get_from_drawable(pixmap, pixmap.get_colormap(),
0, 0, 0, 0, width, height)
# set the default quality, if we are writing a JPEG.
# http://www.pygtk.org/docs/pygtk/class-gdkpixbuf.html#method-gdkpixbuf--save
options = cbook.restrict_dict(kwargs, ['quality'])
if format in ['jpg','jpeg']:
if 'quality' not in options:
options['quality'] = rcParams['savefig.jpeg_quality']
options['quality'] = str(options['quality'])
if is_string_like(filename):
try:
pixbuf.save(filename, format, options=options)
except gobject.GError as exc:
error_msg_gtk('Save figure failure:\n%s' % (exc,), parent=self)
elif is_writable_file_like(filename):
if hasattr(pixbuf, 'save_to_callback'):
def save_callback(buf, data=None):
data.write(buf)
try:
pixbuf.save_to_callback(save_callback, format, user_data=filename, options=options)
except gobject.GError as exc:
error_msg_gtk('Save figure failure:\n%s' % (exc,), parent=self)
else:
raise ValueError("Saving to a Python file-like object is only supported by PyGTK >= 2.8")
else:
raise ValueError("filename must be a path or a file-like object")
def new_timer(self, *args, **kwargs):
"""
Creates a new backend-specific subclass of :class:`backend_bases.Timer`.
This is useful for getting periodic events through the backend's native
event loop. Implemented only for backends with GUIs.
optional arguments:
*interval*
Timer interval in milliseconds
*callbacks*
Sequence of (func, args, kwargs) where func(*args, **kwargs) will
be executed by the timer every *interval*.
"""
return TimerGTK(*args, **kwargs)
def flush_events(self):
gtk.gdk.threads_enter()
while gtk.events_pending():
gtk.main_iteration(True)
gtk.gdk.flush()
gtk.gdk.threads_leave()
def start_event_loop(self,timeout):
FigureCanvasBase.start_event_loop_default(self,timeout)
start_event_loop.__doc__=FigureCanvasBase.start_event_loop_default.__doc__
def stop_event_loop(self):
FigureCanvasBase.stop_event_loop_default(self)
stop_event_loop.__doc__=FigureCanvasBase.stop_event_loop_default.__doc__
class FigureManagerGTK(FigureManagerBase):
"""
Public attributes
canvas : The FigureCanvas instance
num : The Figure number
toolbar : The gtk.Toolbar (gtk only)
vbox : The gtk.VBox containing the canvas and toolbar (gtk only)
window : The gtk.Window (gtk only)
"""
def __init__(self, canvas, num):
if _debug: print('FigureManagerGTK.%s' % fn_name())
FigureManagerBase.__init__(self, canvas, num)
self.window = gtk.Window()
self.set_window_title("Figure %d" % num)
if (window_icon):
try:
self.window.set_icon_from_file(window_icon)
except:
# some versions of gtk throw a glib.GError but not
# all, so I am not sure how to catch it. I am unhappy
# diong a blanket catch here, but an not sure what a
# better way is - JDH
verbose.report('Could not load matplotlib icon: %s' % sys.exc_info()[1])
self.vbox = gtk.VBox()
self.window.add(self.vbox)
self.vbox.show()
self.canvas.show()
self.vbox.pack_start(self.canvas, True, True)
self.toolbar = self._get_toolbar(canvas)
# calculate size for window
w = int (self.canvas.figure.bbox.width)
h = int (self.canvas.figure.bbox.height)
if self.toolbar is not None:
self.toolbar.show()
self.vbox.pack_end(self.toolbar, False, False)
tb_w, tb_h = self.toolbar.size_request()
h += tb_h
self.window.set_default_size (w, h)
def destroy(*args):
Gcf.destroy(num)
self.window.connect("destroy", destroy)
self.window.connect("delete_event", destroy)
if matplotlib.is_interactive():
self.window.show()
def notify_axes_change(fig):
'this will be called whenever the current axes is changed'
if self.toolbar is not None: self.toolbar.update()
self.canvas.figure.add_axobserver(notify_axes_change)
self.canvas.grab_focus()
def destroy(self, *args):
if _debug: print('FigureManagerGTK.%s' % fn_name())
if hasattr(self, 'toolbar') and self.toolbar is not None:
self.toolbar.destroy()
if hasattr(self, 'vbox'):
self.vbox.destroy()
if hasattr(self, 'window'):
self.window.destroy()
if hasattr(self, 'canvas'):
self.canvas.destroy()
self.__dict__.clear() #Is this needed? Other backends don't have it.
if Gcf.get_num_fig_managers()==0 and \
not matplotlib.is_interactive() and \
gtk.main_level() >= 1:
gtk.main_quit()
def show(self):
# show the figure window
self.window.show()
def full_screen_toggle(self):
self._full_screen_flag = not self._full_screen_flag
if self._full_screen_flag:
self.window.fullscreen()
else:
self.window.unfullscreen()
_full_screen_flag = False
def _get_toolbar(self, canvas):
# must be inited after the window, drawingArea and figure
# attrs are set
if rcParams['toolbar'] == 'toolbar2':
toolbar = NavigationToolbar2GTK (canvas, self.window)
else:
toolbar = None
return toolbar
def get_window_title(self):
return self.window.get_title()
def set_window_title(self, title):
self.window.set_title(title)
def resize(self, width, height):
'set the canvas size in pixels'
#_, _, cw, ch = self.canvas.allocation
#_, _, ww, wh = self.window.allocation
#self.window.resize (width-cw+ww, height-ch+wh)
self.window.resize(width, height)
class NavigationToolbar2GTK(NavigationToolbar2, gtk.Toolbar):
def __init__(self, canvas, window):
self.win = window
gtk.Toolbar.__init__(self)
NavigationToolbar2.__init__(self, canvas)
def set_message(self, s):
self.message.set_label(s)
def set_cursor(self, cursor):
self.canvas.window.set_cursor(cursord[cursor])
def release(self, event):
try: del self._pixmapBack
except AttributeError: pass
def dynamic_update(self):
# legacy method; new method is canvas.draw_idle
self.canvas.draw_idle()
def draw_rubberband(self, event, x0, y0, x1, y1):
'adapted from http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/189744'
drawable = self.canvas.window
if drawable is None:
return
gc = drawable.new_gc()
height = self.canvas.figure.bbox.height
y1 = height - y1
y0 = height - y0
w = abs(x1 - x0)
h = abs(y1 - y0)
rect = [int(val)for val in (min(x0,x1), min(y0, y1), w, h)]
try:
lastrect, pixmapBack = self._pixmapBack
except AttributeError:
#snap image back
if event.inaxes is None:
return
ax = event.inaxes
l,b,w,h = [int(val) for val in ax.bbox.bounds]
b = int(height)-(b+h)
axrect = l,b,w,h
self._pixmapBack = axrect, gtk.gdk.Pixmap(drawable, w, h)
self._pixmapBack[1].draw_drawable(gc, drawable, l, b, 0, 0, w, h)
else:
drawable.draw_drawable(gc, pixmapBack, 0, 0, *lastrect)
drawable.draw_rectangle(gc, False, *rect)
def _init_toolbar(self):
self.set_style(gtk.TOOLBAR_ICONS)
self._init_toolbar2_4()
def _init_toolbar2_4(self):
basedir = os.path.join(rcParams['datapath'],'images')
if not _new_tooltip_api:
self.tooltips = gtk.Tooltips()
for text, tooltip_text, image_file, callback in self.toolitems:
if text is None:
self.insert( gtk.SeparatorToolItem(), -1 )
continue
fname = os.path.join(basedir, image_file + '.png')
image = gtk.Image()
image.set_from_file(fname)
tbutton = gtk.ToolButton(image, text)
self.insert(tbutton, -1)
tbutton.connect('clicked', getattr(self, callback))
if _new_tooltip_api:
tbutton.set_tooltip_text(tooltip_text)
else:
tbutton.set_tooltip(self.tooltips, tooltip_text, 'Private')
toolitem = gtk.SeparatorToolItem()
self.insert(toolitem, -1)
# set_draw() not making separator invisible,
# bug #143692 fixed Jun 06 2004, will be in GTK+ 2.6
toolitem.set_draw(False)
toolitem.set_expand(True)
toolitem = gtk.ToolItem()
self.insert(toolitem, -1)
self.message = gtk.Label()
toolitem.add(self.message)
self.show_all()
def get_filechooser(self):
fc = FileChooserDialog(
title='Save the figure',
parent=self.win,
path=os.path.expanduser(rcParams.get('savefig.directory', '')),
filetypes=self.canvas.get_supported_filetypes(),
default_filetype=self.canvas.get_default_filetype())
fc.set_current_name(self.canvas.get_default_filename())
return fc
def save_figure(self, *args):
chooser = self.get_filechooser()
fname, format = chooser.get_filename_from_user()
chooser.destroy()
if fname:
startpath = os.path.expanduser(rcParams.get('savefig.directory', ''))
if startpath == '':
# explicitly missing key or empty str signals to use cwd
rcParams['savefig.directory'] = startpath
else:
# save dir for next time
rcParams['savefig.directory'] = os.path.dirname(six.text_type(fname))
try:
self.canvas.print_figure(fname, format=format)
except Exception as e:
error_msg_gtk(str(e), parent=self)
def configure_subplots(self, button):
toolfig = Figure(figsize=(6,3))
canvas = self._get_canvas(toolfig)
toolfig.subplots_adjust(top=0.9)
tool = SubplotTool(self.canvas.figure, toolfig)
w = int (toolfig.bbox.width)
h = int (toolfig.bbox.height)
window = gtk.Window()
if (window_icon):
try: window.set_icon_from_file(window_icon)
except:
# we presumably already logged a message on the
# failure of the main plot, don't keep reporting
pass
window.set_title("Subplot Configuration Tool")
window.set_default_size(w, h)
vbox = gtk.VBox()
window.add(vbox)
vbox.show()
canvas.show()
vbox.pack_start(canvas, True, True)
window.show()
def _get_canvas(self, fig):
return FigureCanvasGTK(fig)
class FileChooserDialog(gtk.FileChooserDialog):
"""GTK+ 2.4 file selector which presents the user with a menu
of supported image formats
"""
def __init__ (self,
title = 'Save file',
parent = None,
action = gtk.FILE_CHOOSER_ACTION_SAVE,
buttons = (gtk.STOCK_CANCEL, gtk.RESPONSE_CANCEL,
gtk.STOCK_SAVE, gtk.RESPONSE_OK),
path = None,
filetypes = [],
default_filetype = None
):
super(FileChooserDialog, self).__init__ (title, parent, action,
buttons)
super(FileChooserDialog, self).set_do_overwrite_confirmation(True)
self.set_default_response (gtk.RESPONSE_OK)
if not path: path = os.getcwd() + os.sep
# create an extra widget to list supported image formats
self.set_current_folder (path)
self.set_current_name ('image.' + default_filetype)
hbox = gtk.HBox (spacing=10)
hbox.pack_start (gtk.Label ("File Format:"), expand=False)
liststore = gtk.ListStore(gobject.TYPE_STRING)
cbox = gtk.ComboBox(liststore)
cell = gtk.CellRendererText()
cbox.pack_start(cell, True)
cbox.add_attribute(cell, 'text', 0)
hbox.pack_start (cbox)
self.filetypes = filetypes
self.sorted_filetypes = list(six.iteritems(filetypes))
self.sorted_filetypes.sort()
default = 0
for i, (ext, name) in enumerate(self.sorted_filetypes):
cbox.append_text ("%s (*.%s)" % (name, ext))
if ext == default_filetype:
default = i
cbox.set_active(default)
self.ext = default_filetype
def cb_cbox_changed (cbox, data=None):
"""File extension changed"""
head, filename = os.path.split(self.get_filename())
root, ext = os.path.splitext(filename)
ext = ext[1:]
new_ext = self.sorted_filetypes[cbox.get_active()][0]
self.ext = new_ext
if ext in self.filetypes:
filename = root + '.' + new_ext
elif ext == '':
filename = filename.rstrip('.') + '.' + new_ext
self.set_current_name (filename)
cbox.connect ("changed", cb_cbox_changed)
hbox.show_all()
self.set_extra_widget(hbox)
def get_filename_from_user (self):
while True:
filename = None
if self.run() != int(gtk.RESPONSE_OK):
break
filename = self.get_filename()
break
return filename, self.ext
class DialogLineprops(object):
"""
A GUI dialog for controlling lineprops
"""
signals = (
'on_combobox_lineprops_changed',
'on_combobox_linestyle_changed',
'on_combobox_marker_changed',
'on_colorbutton_linestyle_color_set',
'on_colorbutton_markerface_color_set',
'on_dialog_lineprops_okbutton_clicked',
'on_dialog_lineprops_cancelbutton_clicked',
)
linestyles = [ls for ls in lines.Line2D.lineStyles if ls.strip()]
linestyled = dict([ (s,i) for i,s in enumerate(linestyles)])
markers = [m for m in markers.MarkerStyle.markers if cbook.is_string_like(m)]
markerd = dict([(s,i) for i,s in enumerate(markers)])
def __init__(self, lines):
import gtk.glade
datadir = matplotlib.get_data_path()
gladefile = os.path.join(datadir, 'lineprops.glade')
if not os.path.exists(gladefile):
raise IOError('Could not find gladefile lineprops.glade in %s'%datadir)
self._inited = False
self._updateson = True # suppress updates when setting widgets manually
self.wtree = gtk.glade.XML(gladefile, 'dialog_lineprops')
self.wtree.signal_autoconnect(dict([(s, getattr(self, s)) for s in self.signals]))
self.dlg = self.wtree.get_widget('dialog_lineprops')
self.lines = lines
cbox = self.wtree.get_widget('combobox_lineprops')
cbox.set_active(0)
self.cbox_lineprops = cbox
cbox = self.wtree.get_widget('combobox_linestyles')
for ls in self.linestyles:
cbox.append_text(ls)
cbox.set_active(0)
self.cbox_linestyles = cbox
cbox = self.wtree.get_widget('combobox_markers')
for m in self.markers:
cbox.append_text(m)
cbox.set_active(0)
self.cbox_markers = cbox
self._lastcnt = 0
self._inited = True
def show(self):
'populate the combo box'
self._updateson = False
# flush the old
cbox = self.cbox_lineprops
for i in range(self._lastcnt-1,-1,-1):
cbox.remove_text(i)
# add the new
for line in self.lines:
cbox.append_text(line.get_label())
cbox.set_active(0)
self._updateson = True
self._lastcnt = len(self.lines)
self.dlg.show()
def get_active_line(self):
'get the active line'
ind = self.cbox_lineprops.get_active()
line = self.lines[ind]
return line
def get_active_linestyle(self):
'get the active lineinestyle'
ind = self.cbox_linestyles.get_active()
ls = self.linestyles[ind]
return ls
def get_active_marker(self):
'get the active lineinestyle'
ind = self.cbox_markers.get_active()
m = self.markers[ind]
return m
def _update(self):
'update the active line props from the widgets'
if not self._inited or not self._updateson: return
line = self.get_active_line()
ls = self.get_active_linestyle()
marker = self.get_active_marker()
line.set_linestyle(ls)
line.set_marker(marker)
button = self.wtree.get_widget('colorbutton_linestyle')
color = button.get_color()
r, g, b = [val/65535. for val in (color.red, color.green, color.blue)]
line.set_color((r,g,b))
button = self.wtree.get_widget('colorbutton_markerface')
color = button.get_color()
r, g, b = [val/65535. for val in (color.red, color.green, color.blue)]
line.set_markerfacecolor((r,g,b))
line.figure.canvas.draw()
def on_combobox_lineprops_changed(self, item):
'update the widgets from the active line'
if not self._inited: return
self._updateson = False
line = self.get_active_line()
ls = line.get_linestyle()
if ls is None: ls = 'None'
self.cbox_linestyles.set_active(self.linestyled[ls])
marker = line.get_marker()
if marker is None: marker = 'None'
self.cbox_markers.set_active(self.markerd[marker])
r,g,b = colorConverter.to_rgb(line.get_color())
color = gtk.gdk.Color(*[int(val*65535) for val in (r,g,b)])
button = self.wtree.get_widget('colorbutton_linestyle')
button.set_color(color)
r,g,b = colorConverter.to_rgb(line.get_markerfacecolor())
color = gtk.gdk.Color(*[int(val*65535) for val in (r,g,b)])
button = self.wtree.get_widget('colorbutton_markerface')
button.set_color(color)
self._updateson = True
def on_combobox_linestyle_changed(self, item):
self._update()
def on_combobox_marker_changed(self, item):
self._update()
def on_colorbutton_linestyle_color_set(self, button):
self._update()
def on_colorbutton_markerface_color_set(self, button):
'called colorbutton marker clicked'
self._update()
def on_dialog_lineprops_okbutton_clicked(self, button):
self._update()
self.dlg.hide()
def on_dialog_lineprops_cancelbutton_clicked(self, button):
self.dlg.hide()
# set icon used when windows are minimized
# Unfortunately, the SVG renderer (rsvg) leaks memory under earlier
# versions of pygtk, so we have to use a PNG file instead.
try:
if gtk.pygtk_version < (2, 8, 0) or sys.platform == 'win32':
icon_filename = 'matplotlib.png'
else:
icon_filename = 'matplotlib.svg'
window_icon = os.path.join(rcParams['datapath'], 'images', icon_filename)
except:
window_icon = None
verbose.report('Could not load matplotlib icon: %s' % sys.exc_info()[1])
def error_msg_gtk(msg, parent=None):
if parent is not None: # find the toplevel gtk.Window
parent = parent.get_toplevel()
if parent.flags() & gtk.TOPLEVEL == 0:
parent = None
if not is_string_like(msg):
msg = ','.join(map(str,msg))
dialog = gtk.MessageDialog(
parent = parent,
type = gtk.MESSAGE_ERROR,
buttons = gtk.BUTTONS_OK,
message_format = msg)
dialog.run()
dialog.destroy()
FigureCanvas = FigureCanvasGTK
FigureManager = FigureManagerGTK
|
the-stack_0_23735 | import _plotly_utils.basevalidators
class ZautoValidator(_plotly_utils.basevalidators.BooleanValidator):
def __init__(self, plotly_name="zauto", parent_name="contourcarpet", **kwargs):
super(ZautoValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "calc"),
implied_edits=kwargs.pop("implied_edits", {}),
role=kwargs.pop("role", "info"),
**kwargs
)
|
the-stack_0_23736 | # Lint as: python2, python3
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""TensorFlow Lite tooling helper functionality."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import enum
import functools
import pprint
import shutil
import tempfile
import time
import warnings
from absl import logging
import six
from six import PY2
from google.protobuf import text_format as _text_format
from google.protobuf.message import DecodeError
from tensorflow.core.framework import graph_pb2 as _graph_pb2
from tensorflow.lite.experimental.microfrontend.python.ops import audio_microfrontend_op # pylint: disable=unused-import
from tensorflow.lite.python import lite_constants as constants
from tensorflow.lite.python.convert import build_toco_convert_protos # pylint: disable=unused-import
from tensorflow.lite.python.convert import convert_saved_model as _convert_saved_model
from tensorflow.lite.python.convert import ConverterError # pylint: disable=unused-import
from tensorflow.lite.python.convert import mlir_quantize as _mlir_quantize
from tensorflow.lite.python.convert import mlir_sparsify as _mlir_sparsify
from tensorflow.lite.python.convert import OpsSet
from tensorflow.lite.python.convert import toco_convert # pylint: disable=unused-import
from tensorflow.lite.python.convert import toco_convert_graph_def as _toco_convert_graph_def
from tensorflow.lite.python.convert import toco_convert_impl as _toco_convert_impl
from tensorflow.lite.python.convert import toco_convert_protos # pylint: disable=unused-import
from tensorflow.lite.python.convert_phase import Component
from tensorflow.lite.python.convert_phase import convert_phase
from tensorflow.lite.python.convert_phase import SubComponent
from tensorflow.lite.python.convert_saved_model import freeze_saved_model as _freeze_saved_model
from tensorflow.lite.python.interpreter import Interpreter # pylint: disable=unused-import
from tensorflow.lite.python.interpreter import load_delegate # pylint: disable=unused-import
from tensorflow.lite.python.interpreter import OpResolverType # pylint: disable=unused-import
from tensorflow.lite.python.op_hint import convert_op_hints_to_stubs # pylint: disable=unused-import
from tensorflow.lite.python.op_hint import is_ophint_converted as _is_ophint_converted
from tensorflow.lite.python.op_hint import OpHint # pylint: disable=unused-import
from tensorflow.lite.python.optimize import calibrator as _calibrator
from tensorflow.lite.python.util import build_debug_info_func as _build_debug_info_func
from tensorflow.lite.python.util import convert_debug_info_func as _convert_debug_info_func
from tensorflow.lite.python.util import freeze_graph as _freeze_graph
from tensorflow.lite.python.util import get_debug_info as _get_debug_info
from tensorflow.lite.python.util import get_grappler_config as _get_grappler_config
from tensorflow.lite.python.util import get_tensor_name as _get_tensor_name
from tensorflow.lite.python.util import get_tensors_from_tensor_names as _get_tensors_from_tensor_names
from tensorflow.lite.python.util import get_tf_type_name as _get_tf_type_name
from tensorflow.lite.python.util import is_frozen_graph as _is_frozen_graph
from tensorflow.lite.python.util import model_input_signature as _model_input_signature
from tensorflow.lite.python.util import modify_model_io_type as _modify_model_io_type
from tensorflow.lite.python.util import run_graph_optimizations as _run_graph_optimizations
from tensorflow.lite.python.util import set_tensor_shapes as _set_tensor_shapes
from tensorflow.lite.python.util import trace_model_call as _trace_model_call
from tensorflow.python import saved_model as _saved_model
from tensorflow.python.client import session as _session
from tensorflow.python.eager import context
from tensorflow.python.eager import def_function as _def_function
from tensorflow.python.eager import function as _function
from tensorflow.python.framework import convert_to_constants as _convert_to_constants
from tensorflow.python.framework import dtypes as _dtypes
from tensorflow.python.framework import ops as _ops
from tensorflow.python.framework.errors_impl import NotFoundError as _NotFoundError
from tensorflow.python.framework.importer import import_graph_def as _import_graph_def
from tensorflow.python.platform import gfile
from tensorflow.python.saved_model import loader_impl as _loader_impl
from tensorflow.python.saved_model import save_options as _save_options
from tensorflow.python.saved_model import signature_constants as _signature_constants
from tensorflow.python.saved_model import tag_constants as _tag_constants
from tensorflow.python.saved_model.load import load as _load
from tensorflow.python.saved_model.loader_impl import parse_saved_model_with_debug_info as _parse_saved_model_with_debug_info
from tensorflow.python.util import deprecation as _deprecation
from tensorflow.python.util import keras_deps
from tensorflow.python.util.tf_export import tf_export as _tf_export
# pylint: disable=g-import-not-at-top
try:
from tensorflow.lite.python import metrics_portable as metrics
except ImportError:
from tensorflow.lite.python import metrics_nonportable as metrics
# pylint: enable=g-import-not-at-top
@_tf_export("lite.Optimize")
class Optimize(enum.Enum):
"""Enum defining the optimizations to apply when generating a tflite model.
DEFAULT
Default optimization strategy that quantizes model weights. Enhanced
optimizations are gained by providing a representative dataset that
quantizes biases and activations as well.
Converter will do its best to reduce size and latency, while minimizing
the loss in accuracy.
OPTIMIZE_FOR_SIZE
Deprecated. Does the same as DEFAULT.
OPTIMIZE_FOR_LATENCY
Deprecated. Does the same as DEFAULT.
EXPERIMENTAL_SPARSITY
Experimental flag, subject to change.
Enable optimization by taking advantage of the sparse model weights
trained with pruning.
The converter will inspect the sparsity pattern of the model weights and
do its best to improve size and latency.
The flag can be used alone to optimize float32 models with sparse weights.
It can also be used together with the DEFAULT optimization mode to
optimize quantized models with sparse weights.
"""
# Default optimization strategy that quantizes model weights. Enhanced
# optimizations are gained by providing a representative dataset that
# quantizes biases and activations as well.
# Converter will do its best to reduce size and latency, while minimizing
# the loss in accuracy.
DEFAULT = "DEFAULT"
# Deprecated. Does the same as DEFAULT.
OPTIMIZE_FOR_SIZE = "OPTIMIZE_FOR_SIZE"
# Deprecated. Does the same as DEFAULT.
OPTIMIZE_FOR_LATENCY = "OPTIMIZE_FOR_LATENCY"
# Experimental flag, subject to change.
# Enable optimization by taking advantage of the sparse model weights trained
# with pruning.
#
# The converter will inspect the sparsity pattern of the model weights and do
# its best to improve size and latency.
# The flag can be used alone to optimize float32 models with sparse weights.
# It can also be used together with the DEFAULT optimization mode to optimize
# quantized models with sparse weights.
# TODO(b/161560631): Add log message when this optimization is applied.
EXPERIMENTAL_SPARSITY = "EXPERIMENTAL_SPARSITY"
def __str__(self):
return str(self.value)
@_tf_export("lite.RepresentativeDataset")
class RepresentativeDataset(object):
"""Representative dataset used to optimize the model.
This is a generator function that provides a small dataset to calibrate or
estimate the range, i.e, (min, max) of all floating-point arrays in the model
(such as model input, activation outputs of intermediate layers, and model
output) for quantization. Usually, this is a small subset of a few hundred
samples randomly chosen, in no particular order, from the training or
evaluation dataset.
"""
def __init__(self, input_gen):
"""Creates a representative dataset.
Args:
input_gen: A generator function that generates input samples for the
model and has the same order, type and shape as the inputs to the model.
Usually, this is a small subset of a few hundred samples randomly
chosen, in no particular order, from the training or evaluation dataset.
"""
self.input_gen = input_gen
@_tf_export("lite.TargetSpec")
class TargetSpec(object):
"""Specification of target device used to optimize the model.
Attributes:
supported_ops: Experimental flag, subject to change. Set of `tf.lite.OpsSet`
options, where each option represents a set of operators supported by the
target device. (default {tf.lite.OpsSet.TFLITE_BUILTINS}))
supported_types: Set of `tf.dtypes.DType` data types supported on the target
device. If initialized, optimization might be driven by the smallest type
in this set. (default set())
experimental_select_user_tf_ops: Experimental flag, subject to change. Set
of user's TensorFlow operators' names that are required in the TensorFlow
Lite runtime. These ops will be exported as select TensorFlow ops in the
model (in conjunction with the tf.lite.OpsSet.SELECT_TF_OPS flag). This is
an advanced feature that should only be used if the client is using TF ops
that may not be linked in by default with the TF ops that are provided
when using the SELECT_TF_OPS path. The client is responsible for linking
these ops into the target runtime.
"""
def __init__(self,
supported_ops=None,
supported_types=None,
experimental_select_user_tf_ops=None):
if supported_ops is None:
supported_ops = {OpsSet.TFLITE_BUILTINS}
self.supported_ops = supported_ops
if supported_types is None:
supported_types = set()
self.supported_types = supported_types
if experimental_select_user_tf_ops is None:
experimental_select_user_tf_ops = set()
self.experimental_select_user_tf_ops = experimental_select_user_tf_ops
self._experimental_custom_op_registerers = []
# Hint for the supported accumulation type used for inference. Typically
# used for fp16 post-training quantization, where some models can use fp16
# accumulators instead of the typical fp32 type.
# TODO(b/188185962): Provide full API and authoring support for
# reduced precision accumulation types.
self._experimental_supported_accumulation_type = None
class QuantizationMode(object):
"""QuantizationMode determines the quantization type from user options."""
def __init__(self, optimizations, target_spec, representative_dataset,
graph_def):
self._optimizations = optimizations
for deprecated_optimization in [
Optimize.OPTIMIZE_FOR_SIZE, Optimize.OPTIMIZE_FOR_LATENCY
]:
if deprecated_optimization in self._optimizations:
logging.warning(
"Optimization option %s is deprecated, please use optimizations="
"[Optimize.DEFAULT] instead.", deprecated_optimization)
self._target_spec = target_spec
self._representative_dataset = representative_dataset
self._graph_def = graph_def
self._validate_int8_required()
# TODO(b/162537905): Refactor the following quantization functions -
# re-organize and refactor for better readability.
def post_training_int8_no_float(self):
return (self.any_optimization_enabled() and
self._is_int8_target_required() and
not self._is_int16x8_target_required() and
not self.is_allow_float() and
self._representative_dataset is not None)
def post_training_int8_allow_float(self):
return (self.any_optimization_enabled() and
not self._is_int16x8_target_required() and
self._representative_dataset is not None and
self._smallest_supported_type() == _dtypes.int8)
def is_post_training_integer_quantize_8(self):
return (self.post_training_int8_no_float() or
self.post_training_int8_allow_float())
def is_post_training_integer_quantize_16x8(self):
return (self.post_training_int16x8_no_float() or
self.post_training_int16x8_allow_float())
def is_post_training_integer_quantize(self):
return (self.is_post_training_integer_quantize_8() or
self.is_post_training_integer_quantize_16x8())
def is_integer_quantize(self):
return (self.is_post_training_integer_quantize() or
self.is_training_time_int8_allow_float())
def is_training_time_int8_allow_float(self):
return (self.any_optimization_enabled() and
self.contains_training_quant_op())
def is_bfloat16_inference_allowed(self):
return (self.any_optimization_enabled() and
self._smallest_supported_type().size == 2 and
_dtypes.bfloat16 in self._target_spec.supported_types)
def post_training_int16x8_no_float(self):
return (self.any_optimization_enabled() and
not self._is_int8_target_required() and
self._is_int16x8_target_required() and
not self.is_allow_float() and
self._representative_dataset is not None)
def post_training_int16x8_allow_float(self):
return (self.any_optimization_enabled() and
self._is_int16x8_target_required() and
self.is_allow_float())
def post_training_dynamic_range_int8(self):
# Post-training dynamic range quantization is only enabled if post-training
# int8 quantization and training time quantization was not done.
return (self.any_optimization_enabled() and
self._representative_dataset is None and
not self.contains_training_quant_op() and
self._smallest_supported_type() == _dtypes.int8)
def post_training_fp16(self):
return (self.any_optimization_enabled() and
self._smallest_supported_type().size == 2 and
_dtypes.float16 in self._target_spec.supported_types)
def fp32_execution(self):
"""If none of the above are true."""
return not (self.is_integer_quantize() or
self.post_training_dynamic_range_int8() or
self.post_training_fp16())
def activations_type(self):
if self.is_integer_quantize():
if self._is_int16x8_target_required():
return _dtypes.int16
else:
return _dtypes.int8
else:
return _dtypes.float32
def converter_flags(self, inference_ty=None, inference_input_ty=None):
"""Flags to the converter."""
if self.is_integer_quantize():
return {
"inference_type": (
inference_ty if inference_ty else self.activations_type()),
"inference_input_type": _dtypes.float32,
"post_training_quantize": False, # disable dynamic range quantization
"quantize_to_float16": False # disable float16 quantization
}
elif self.post_training_dynamic_range_int8():
return {
"inference_type": _dtypes.float32,
"inference_input_type": _dtypes.float32,
"post_training_quantize": True, # enable dynamic range quantization
"quantize_to_float16": False # disable float16 quantization
}
elif self.post_training_fp16():
return {
"inference_type": _dtypes.float32,
"inference_input_type": _dtypes.float32,
"post_training_quantize": True,
"quantize_to_float16": True, # enable float16 quantization
"accumulation_type":
self._target_spec._experimental_supported_accumulation_type,
"allow_bfloat16":
self.is_bfloat16_inference_allowed()
}
else:
# Note this might still trigger (uint8) quantization to be compatible with
# TOCO.
return {
"inference_type": inference_ty if inference_ty else _dtypes.float32,
"inference_input_type": inference_input_ty,
"post_training_quantize": False, # enable dynamic range quantization
"quantize_to_float16": False, # disable float16 quantization
"allow_bfloat16": self.is_bfloat16_inference_allowed()
}
# Below are helpers for the above functions.
def _validate_int8_required(self):
"""Int8 mode requires certain parameters to exist and be compatible."""
if not self._is_int8_target_required():
return
if self._target_spec.supported_types and (self._smallest_supported_type() !=
_dtypes.int8):
raise ValueError("TFLITE_BUILTINS_INT8 requires smallest supported "
"type to be INT8.")
if self._representative_dataset:
if not isinstance(self._representative_dataset, RepresentativeDataset):
self._representative_dataset = RepresentativeDataset(
self._representative_dataset)
if self._representative_dataset.input_gen is None:
raise ValueError(
"Provide an input generator for representative_dataset")
else:
# TODO(b/162537905): Relax this check for QAT.
raise ValueError("representative_dataset is required when specifying "
"TFLITE_BUILTINS_INT8 or INT8 supported types.")
def _is_int8_target_required(self):
return (OpsSet.TFLITE_BUILTINS_INT8 in set(
self._target_spec.supported_ops)) or (set(
self._target_spec.supported_types) == set([_dtypes.int8]))
def _is_int16x8_target_required(self):
return (OpsSet.EXPERIMENTAL_TFLITE_BUILTINS_ACTIVATIONS_INT16_WEIGHTS_INT8
in set(self._target_spec.supported_ops))
def is_allow_float(self):
return (OpsSet.TFLITE_BUILTINS in set(
self._target_spec.supported_ops)) or (OpsSet.SELECT_TF_OPS in set(
self._target_spec.supported_ops))
def any_optimization_enabled(self):
return bool(
set(self._optimizations).intersection([
Optimize.OPTIMIZE_FOR_LATENCY, Optimize.OPTIMIZE_FOR_SIZE,
Optimize.DEFAULT
]))
def _smallest_supported_type(self):
if self._target_spec.supported_types:
return min(self._target_spec.supported_types, key=lambda x: x.size)
else:
# The default smallest supported type is INT8.
return _dtypes.int8
def contains_training_quant_op(self):
"""Checks if the graph contains any training-time quantization ops."""
training_quant_ops = frozenset({
"FakeQuantWithMinMaxVars", "FakeQuantWithMinMaxVarsPerChannel",
"FakeQuantWithMinMaxArgs", "FakeQuantWithMinMaxArgsPerChannel",
"QuantizeAndDequantizeV2", "QuantizeAndDequantizeV3"
})
if self._graph_def:
for node_def in self._graph_def.node:
if node_def.op in training_quant_ops:
return True
for function in self._graph_def.library.function:
for node_def in function.node_def:
if node_def.op in training_quant_ops:
return True
return False
class TFLiteConverterBase(object):
"""Converter subclass to share functionality between V1 and V2 converters."""
def __init__(self):
self.optimizations = set()
self.representative_dataset = None
self.target_spec = TargetSpec()
self.allow_custom_ops = False
self.experimental_new_converter = True
self.experimental_new_quantizer = True
self.experimental_enable_resource_variables = False
self._experimental_new_quantizer = None
self._experimental_calibrate_only = False
self._experimental_sparsify_model = False
self._experimental_disable_per_channel = False
self._debug_info = None # contains the stack traces of all the original
# nodes in the `GraphDef` to the converter.
self.saved_model_dir = None
self._saved_model_tags = None
self._saved_model_version = 0
self._saved_model_exported_names = []
self._tflite_metrics = metrics.TFLiteConverterMetrics()
self._collected_converter_params = {}
self._experimental_disable_batchmatmul_unfold = False
self._experimental_lower_tensor_list_ops = True
def _grappler_config(self, optimizers=None):
"""Creates a tf.compat.v1.ConfigProto for configuring Grappler.
Args:
optimizers: List of strings that represents the list of optimizers.
Returns:
tf.ConfigProto.
"""
if not optimizers:
optimizers = []
# MLIR converter will take care of constant folding instead of grappler.
if not self.experimental_new_converter:
optimizers.append("constfold")
is_only_flex_enabled = (
set([OpsSet.SELECT_TF_OPS]) == set(self.target_spec.supported_ops))
if is_only_flex_enabled:
# The layout optimizer turns NHCW to NCHW. This provides performance
# optimizations when Flex mode is enabled. However, this is not compatible
# with builtin ops.
optimizers.append("layout")
return _get_grappler_config(optimizers)
def _quantize(self, result, input_type, output_type, activations_type,
allow_float):
"""Quantize the model."""
# pylint: disable=protected-access
custom_op_registerers_by_name = [
x for x in self.target_spec._experimental_custom_op_registerers
if isinstance(x, str)
]
custom_op_registerers_by_func = [
x for x in self.target_spec._experimental_custom_op_registerers
if not isinstance(x, str)
]
# pylint: enable=protected-access
if not isinstance(self.representative_dataset, RepresentativeDataset):
self.representative_dataset = RepresentativeDataset(
self.representative_dataset)
# Add intermediate tensors to the model if needed.
result = _calibrator.add_intermediate_tensors(result)
calibrate_quantize = _calibrator.Calibrator(result,
custom_op_registerers_by_name,
custom_op_registerers_by_func)
if self._experimental_calibrate_only or self.experimental_new_quantizer:
calibrated = calibrate_quantize.calibrate(
self.representative_dataset.input_gen)
if self._experimental_calibrate_only:
return calibrated
elif self.experimental_new_quantizer and (
activations_type != _dtypes.int16):
# TODO(b/175659372): remove the activations_type restriction and enable
# it for all the activation types.
return _mlir_quantize(
calibrated,
self._experimental_disable_per_channel,
input_data_type=input_type,
output_data_type=output_type)
else:
return calibrate_quantize.calibrate_and_quantize(
self.representative_dataset.input_gen, input_type, output_type,
allow_float, activations_type,
disable_per_channel=self._experimental_disable_per_channel)
def _is_unknown_shapes_allowed(self):
# Unknown dimensions are only allowed with the new converter.
return self.experimental_new_converter
def _get_base_converter_args(self):
"""Returns the base converter args.
Returns:
{key str: val}
"""
args = {
"input_format": constants.TENSORFLOW_GRAPHDEF,
"allow_custom_ops": self.allow_custom_ops,
"debug_info": self._debug_info,
"target_ops": self.target_spec.supported_ops,
"enable_mlir_converter": self.experimental_new_converter,
"select_user_tf_ops": self.target_spec.experimental_select_user_tf_ops,
"unfold_batchmatmul": not self._experimental_disable_batchmatmul_unfold,
"lower_tensor_list_ops": self._experimental_lower_tensor_list_ops,
}
if self.saved_model_dir:
args.update({
"saved_model_dir": self.saved_model_dir,
"saved_model_version": self._saved_model_version,
"saved_model_tags": self._saved_model_tags,
"saved_model_exported_names": self._saved_model_exported_names,
})
return args
def _contains_function_with_implements_attr(self, saved_model_proto):
meta_graph = saved_model_proto.meta_graphs[0]
for function in meta_graph.graph_def.library.function:
if function.attr.get("_implements", None) or function.attr.get(
"api_implements", None):
return True
return False
def _parse_saved_model_args(self, always_enable_saved_model_import=False):
"""Parses SavedModel arguments from the given Keras/RNN SavedModel.
Args:
always_enable_saved_model_import: Bool. When the value is true, it enables
MLIR saved model import path regardless of checking the conditions.
"""
if not self.experimental_new_converter:
self.saved_model_dir = None
return
if self.saved_model_dir:
try:
saved_model_proto, _ = (
_parse_saved_model_with_debug_info(self.saved_model_dir))
except OSError:
# If it fails to read the given saved model, it will fall back to the
# frozen graph def path.
self.saved_model_dir = None
return
if (not always_enable_saved_model_import and
not self._contains_function_with_implements_attr(saved_model_proto)):
self.saved_model_dir = None
return
if not self._saved_model_exported_names:
self._saved_model_exported_names = []
self._saved_model_version = saved_model_proto.saved_model_schema_version
if self._saved_model_version == 0:
self.saved_model_dir = None
logging.warning("SavedModel schema version is zero.")
return
if self._saved_model_version not in [1, 2]:
raise ValueError("SavedModel file format({0}) is not supported".format(
self._saved_model_version))
def _sparsify_model(self):
return Optimize.EXPERIMENTAL_SPARSITY in self.optimizations
def _validate_experimental_new_quantizer_flag(self):
if self._experimental_new_quantizer is not None:
raise ValueError("Please use 'experimental_new_quantizer' instead.")
def _increase_conversion_attempt_metric(self):
self._tflite_metrics.increase_counter_converter_attempt()
def _increase_conversion_success_metric(self):
self._tflite_metrics.increase_counter_converter_success()
def _save_conversion_params_metric(self,
graph_def=None,
inference_type=None,
inference_input_type=None):
"""Set conversion parameter metrics."""
converter_kwargs = self._collected_converter_params
converter_kwargs.update(self._get_base_converter_args())
# Optimization parameters.
quant_mode = QuantizationMode(self.optimizations, self.target_spec,
self.representative_dataset, graph_def)
converter_kwargs.update({
"optimization_default":
quant_mode.any_optimization_enabled(),
"optimization_post_training_dynamic_range":
quant_mode.post_training_dynamic_range_int8(),
"optimization_post_training_float16":
quant_mode.post_training_fp16(),
"optimization_post_training_integer_quantize":
quant_mode.is_post_training_integer_quantize(),
"optimization_qat":
quant_mode.is_training_time_int8_allow_float(),
"optimization_sparsify":
self._sparsify_model(),
"activations_type":
quant_mode.activations_type()
})
converter_kwargs.update(
quant_mode.converter_flags(inference_type, inference_input_type))
# pylint: disable=protected-access
if self.target_spec._experimental_supported_accumulation_type:
converter_kwargs.update({
"accumulation_type":
self.target_spec._experimental_supported_accumulation_type
})
# pylint: enable=protected-access
def format_element(elem):
if isinstance(elem, enum.Enum):
return str(elem.value)
return pprint.pformat(elem)
def format_param(param):
if isinstance(param, (list, tuple, set)):
if not param:
return "None" # Return None if empty.
string_list = [format_element(x) for x in param]
return ",".join(sorted(string_list))
return format_element(param)
for key, value in converter_kwargs.items():
self._tflite_metrics.set_converter_param(key, format_param(value))
self._tflite_metrics.set_export_required()
def _set_conversion_latency_metric(self, value):
self._tflite_metrics.set_converter_latency(value)
@convert_phase(Component.OPTIMIZE_TFLITE_MODEL)
def _optimize_tflite_model(self, model, quant_mode, quant_io=True):
"""Apply optimizations on a TFLite model."""
if quant_mode.is_integer_quantize():
in_type, out_type = self.inference_input_type, self.inference_output_type
if quant_mode.is_post_training_integer_quantize():
q_in_type = in_type if in_type and quant_io else _dtypes.float32
q_out_type = out_type if out_type and quant_io else _dtypes.float32
q_activations_type = quant_mode.activations_type()
q_allow_float = quant_mode.is_allow_float()
model = self._quantize(
model, q_in_type, q_out_type, q_activations_type, q_allow_float)
m_in_type = in_type if in_type else _dtypes.float32
m_out_type = out_type if out_type else _dtypes.float32
model = _modify_model_io_type(model, m_in_type, m_out_type)
if self._sparsify_model():
model = _mlir_sparsify(model)
return model
def _convert_and_export_metrics(self, convert_func, *args, **kwargs):
"""Wraps around convert function to export metrics.
Args:
convert_func: The convert function to wrap.
*args: Positional arguments of the convert function.
**kwargs: The keyword arguments of the convert function.
Returns:
The decorator to wrap the convert function.
"""
self._increase_conversion_attempt_metric()
self._save_conversion_params_metric()
start_time = time.process_time()
result = convert_func(self, *args, **kwargs)
elapsed_time_ms = (time.process_time() - start_time) * 1000
if result:
self._increase_conversion_success_metric()
self._set_conversion_latency_metric(round(elapsed_time_ms))
self._tflite_metrics.export_metrics()
return result
def _export_metrics(convert_func):
"""The decorator around convert function to export metrics."""
@functools.wraps(convert_func)
def wrapper(self, *args, **kwargs):
# pylint: disable=protected-access
return self._convert_and_export_metrics(convert_func, *args, **kwargs)
# pylint: enable=protected-access
return wrapper
class TFLiteConverterBaseV2(TFLiteConverterBase):
"""Converter subclass to share functionality between V2 converters."""
def __init__(self):
"""Constructor for TFLiteConverter."""
super(TFLiteConverterBaseV2, self).__init__()
self.inference_input_type = _dtypes.float32
self.inference_output_type = _dtypes.float32
self._collected_converter_params.update({"api_version": 2})
def _validate_inference_input_output_types(self, quant_mode):
"""Validate inference_input_type and inference_output_type flags."""
default_types = [_dtypes.float32]
# We support integer input/output for integer quantized models only.
if quant_mode.is_integer_quantize():
if quant_mode.is_post_training_integer_quantize_16x8():
all_types = default_types + [_dtypes.int16]
else:
all_types = default_types + [_dtypes.int8, _dtypes.uint8]
if (self.inference_input_type not in all_types or
self.inference_output_type not in all_types):
all_types_names = ["tf." + t.name for t in all_types]
raise ValueError("The inference_input_type and inference_output_type "
"must be in {}.".format(all_types_names))
elif (self.inference_input_type not in default_types or
self.inference_output_type not in default_types):
raise ValueError("The inference_input_type and inference_output_type "
"must be tf.float32.")
@convert_phase(Component.PREPARE_TF_MODEL, SubComponent.LOAD_SAVED_MODEL)
def _load_saved_model(self, saved_model_dir, saved_model_tags):
"""Load graph_def from saved model with the default serving signature key.
Args:
saved_model_dir: Directory of the SavedModel.
saved_model_tags: Set of tags identifying the MetaGraphDef within the
SavedModel to analyze.
Returns:
graph_def: The loaded GraphDef.
input_tensors: List of input tensors.
output_tensors: List of output tensors.
"""
graph = _ops.Graph()
saved_model = _loader_impl.SavedModelLoader(saved_model_dir)
saved_model.load_graph(graph, tags=saved_model_tags)
meta_graph = saved_model.get_meta_graph_def_from_tags(saved_model_tags)
graph_def = meta_graph.graph_def
signature_def = meta_graph.signature_def[
_signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY]
input_tensors = [
graph.get_tensor_by_name(signature_def.inputs[key].name)
for key in signature_def.inputs
]
output_tensors = [
graph.get_tensor_by_name(signature_def.outputs[key].name)
for key in signature_def.outputs
]
return graph_def, input_tensors, output_tensors
@convert_phase(Component.PREPARE_TF_MODEL, SubComponent.VALIDATE_INPUTS)
def _validate_inputs(self, graph_def, input_tensors):
"""Validate the input parameters.
Args:
graph_def: The TensorFlow GraphDef.
input_tensors: List of input tensors.
Raise:
ValueError:
Input shape is not specified.
Invalid quantization parameters.
"""
# Update conversion params with graph_def.
self._save_conversion_params_metric(graph_def)
self._quant_mode = QuantizationMode(self.optimizations, self.target_spec,
self.representative_dataset, graph_def)
self._validate_inference_input_output_types(self._quant_mode)
self._validate_experimental_new_quantizer_flag()
if not self._is_unknown_shapes_allowed():
# Checks dimensions in input tensor.
for tensor in input_tensors:
# Note that shape_list might be empty for scalar shapes.
shape_list = tensor.shape.as_list()
if None in shape_list[1:]:
raise ValueError(
"None is only supported in the 1st dimension. Tensor '{0}' has "
"invalid shape '{1}'.".format(
_get_tensor_name(tensor), shape_list))
elif shape_list and shape_list[0] is None:
# Set the batch size to 1 if undefined.
shape = tensor.shape.as_list()
shape[0] = 1
tensor.set_shape(shape)
if (self._trackable_obj is None or
not hasattr(self._trackable_obj, "graph_debug_info")):
self._debug_info = _get_debug_info(
_build_debug_info_func(self._funcs[0].graph), graph_def)
else:
self._debug_info = _get_debug_info(
_convert_debug_info_func(self._trackable_obj.graph_debug_info),
graph_def)
@convert_phase(Component.PREPARE_TF_MODEL, SubComponent.OPTIMIZE_TF_MODEL)
def _optimize_tf_model(self, graph_def, input_tensors, output_tensors,
frozen_func):
"""Run a Grappler pass to optimize the TensorFlow graph.
Args:
graph_def: Frozen GraphDef to be optimized.
input_tensors: List of input tensors.
output_tensors: List of output tensors.
frozen_func: TensorFlow Graph.
Returns:
The optimized TensorFlow graph.
"""
grappler_config = self._grappler_config()
# Skip running grappler when there are no optimizers to run. If not,
# grappler will run with the default optimizer set and it will lead to
# causing an unexpected behavior.
if grappler_config.graph_options.rewrite_options.optimizers:
graph_def = _run_graph_optimizations(
graph_def,
input_tensors,
output_tensors,
config=grappler_config,
graph=frozen_func.graph)
return graph_def
def convert(self, graph_def, input_tensors, output_tensors):
"""Converts a TensorFlow GraphDef based on instance variables.
Args:
graph_def: Frozen TensorFlow GraphDef.
input_tensors: List of input tensors.
output_tensors: List of output tensors.
Returns:
The converted data in serialized format.
Raises:
ValueError:
No concrete functions is specified.
Multiple concrete functions are specified.
Input shape is not specified.
Invalid quantization parameters.
"""
self._validate_inputs(graph_def, input_tensors)
converter_kwargs = self._get_base_converter_args()
converter_kwargs.update(self._quant_mode.converter_flags())
if not self.experimental_new_converter:
logging.warning(
"Please consider switching to the new converter by setting "
"experimental_new_converter=True. "
"The old converter (TOCO) is deprecated.")
else:
logging.info("Using new converter: If you encounter a problem "
"please file a bug. You can opt-out "
"by setting experimental_new_converter=False")
# Converts model.
result = _toco_convert_impl(
input_data=graph_def,
input_tensors=input_tensors,
output_tensors=output_tensors,
**converter_kwargs)
return self._optimize_tflite_model(
result, self._quant_mode, quant_io=self.experimental_new_quantizer)
class TFLiteSavedModelConverterV2(TFLiteConverterBaseV2):
"""Converts the given SavedModel into TensorFlow Lite model.
Attributes:
saved_model_dir: Directory of the SavedModel.
"""
def __init__(self,
saved_model_dir,
saved_model_tags=None,
saved_model_exported_names=None,
trackable_obj=None):
"""Constructor for TFLiteConverter.
Args:
saved_model_dir: Directory of the SavedModel.
saved_model_tags: Set of tags identifying the MetaGraphDef within the
SavedModel to analyze. All tags in the tag set must be present. (default
{tf.saved_model.SERVING}).
saved_model_exported_names: Names to be exported when the saved model
import path is on.
trackable_obj: tf.AutoTrackable object associated with `funcs`. A
reference to this object needs to be maintained so that Variables do not
get garbage collected since functions have a weak reference to
Variables. This is only required when the tf.AutoTrackable object is not
maintained by the user (e.g. `from_saved_model`).
"""
super(TFLiteSavedModelConverterV2, self).__init__()
self.saved_model_dir = saved_model_dir
self._saved_model_tags = saved_model_tags
self._saved_model_exported_names = saved_model_exported_names
self._trackable_obj = trackable_obj
self._parse_saved_model_args(always_enable_saved_model_import=True)
@_export_metrics
def convert(self):
"""Converts a TensorFlow GraphDef based on instance variables.
Returns:
The converted data in serialized format.
Raises:
ValueError:
No concrete functions is specified.
Multiple concrete functions are specified.
Input shape is not specified.
Invalid quantization parameters.
"""
graph_def, input_tensors, output_tensors = self._load_saved_model(
self.saved_model_dir, self._saved_model_tags)
# If we can't use saved model importer, then fallback
# to frozen graph conversion path.
if self.saved_model_dir is None or not self.experimental_new_converter:
graph_def, _, _, _ = _freeze_saved_model(
self.saved_model_dir, None, None, None, self._saved_model_tags,
_signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY)
# We make sure to clear the saved_model_dir as there is some
# legacy code down in the caller that checks this.
# TODO(b/162537905): Clean these indirect dependencies.
self.saved_model_dir = None
return super(TFLiteSavedModelConverterV2,
self).convert(graph_def, input_tensors, output_tensors)
if self._trackable_obj is None:
self._debug_info = _get_debug_info(
_build_debug_info_func(self._funcs[0].graph), graph_def)
else:
self._debug_info = _get_debug_info(
_convert_debug_info_func(self._trackable_obj.graph_debug_info),
graph_def)
# Update conversion params with graph_def.
self._save_conversion_params_metric(graph_def)
# Get quantization options and do some sanity checks.
quant_mode = QuantizationMode(self.optimizations, self.target_spec,
self.representative_dataset, graph_def)
self._validate_inference_input_output_types(quant_mode)
converter_kwargs = {
"enable_tflite_resource_variables":
self.experimental_enable_resource_variables
}
converter_kwargs.update(self._get_base_converter_args())
converter_kwargs.update(quant_mode.converter_flags())
result = _convert_saved_model(**converter_kwargs)
return self._optimize_tflite_model(
result, quant_mode, quant_io=self.experimental_new_quantizer)
class TFLiteKerasModelConverterV2(TFLiteConverterBaseV2):
"""Converts the given Keras model into TensorFlow Lite model."""
def __init__(self, keras_model, trackable_obj=None):
"""Constructor for TFLiteConverter.
Args:
keras_model: tf.Keras.Model.
trackable_obj: tf.AutoTrackable object associated with `funcs`. A
reference to this object needs to be maintained so that Variables do not
get garbage collected since functions have a weak reference to
Variables. This is only required when the tf.AutoTrackable object is not
maintained by the user (e.g. `from_saved_model`).
"""
super(TFLiteKerasModelConverterV2, self).__init__()
self._keras_model = keras_model
self._trackable_obj = trackable_obj
self.experimental_lower_to_saved_model = False
@convert_phase(Component.PREPARE_TF_MODEL,
SubComponent.CONVERT_KERAS_TO_SAVED_MODEL)
def _convert_keras_to_saved_model(self, output_dir):
"""Save Keras model to the SavedModel format.
Args:
output_dir: The output directory to save the SavedModel.
Returns:
graph_def: The frozen GraphDef.
input_tensors: List of input tensors.
output_tensors: List of output tensors.
"""
try:
_saved_model.save(
self._keras_model,
output_dir,
options=_save_options.SaveOptions(save_debug_info=True))
except Exception: # pylint: disable=broad-except
# When storing the given keras model to a saved model is failed, let's
# use original keras model conversion pipeline.
return None, None, None
self.saved_model_dir = output_dir
self._saved_model_tags = set([_tag_constants.SERVING])
self._saved_model_exported_names = [
_signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY
]
self._parse_saved_model_args(
always_enable_saved_model_import=self.experimental_lower_to_saved_model)
if self.saved_model_dir:
graph_def, input_tensors, output_tensors = self._load_saved_model(
self.saved_model_dir, self._saved_model_tags)
self._trackable_obj = _load(self.saved_model_dir, self._saved_model_tags)
return graph_def, input_tensors, output_tensors
return None, None, None
@convert_phase(Component.PREPARE_TF_MODEL, SubComponent.FREEZE_KERAS_MODEL)
def _freeze_keras_model(self):
"""Freeze Keras model to frozen graph.
Returns:
graph_def: The frozen GraphDef.
input_tensors: List of input tensors.
output_tensors: List of output tensors.
frozen_func: The frozen ConcreteFunction.
"""
input_signature = None
# If the model's call is not a `tf.function`, then we need to first get its
# input signature from `model_input_signature` method. We can't directly
# call `trace_model_call` because otherwise the batch dimension is set
# to None.
# Once we have better support for dynamic shapes, we can remove this.
if not isinstance(self._keras_model.call, _def_function.Function):
# Pass `keep_original_batch_size=True` will ensure that we get an input
# signature including the batch dimension specified by the user.
# TODO(b/169898786): Use the Keras public API when TFLite moves out of TF
input_signature = _model_input_signature(
self._keras_model, keep_original_batch_size=True)
# TODO(b/169898786): Use the Keras public API when TFLite moves out of TF
func = _trace_model_call(self._keras_model, input_signature)
concrete_func = func.get_concrete_function()
self._funcs = [concrete_func]
frozen_func, graph_def = (
_convert_to_constants.convert_variables_to_constants_v2_as_graph(
self._funcs[0], lower_control_flow=False))
input_tensors = [
tensor for tensor in frozen_func.inputs
if tensor.dtype != _dtypes.resource
]
output_tensors = frozen_func.outputs
return graph_def, input_tensors, output_tensors, frozen_func
def _convert_as_saved_model(self):
"""Converts a Keras model as a saved model.
Returns:
The converted data in serialized format.
"""
temp_dir = tempfile.mkdtemp()
try:
graph_def, input_tensors, output_tensors = (
self._convert_keras_to_saved_model(temp_dir))
if self.saved_model_dir:
return super(TFLiteKerasModelConverterV2,
self).convert(graph_def, input_tensors, output_tensors)
finally:
shutil.rmtree(temp_dir, True)
@_export_metrics
def convert(self):
"""Converts a keras model based on instance variables.
Returns:
The converted data in serialized format.
Raises:
ValueError:
Multiple concrete functions are specified.
Input shape is not specified.
Invalid quantization parameters.
"""
saved_model_convert_result = self._convert_as_saved_model()
if saved_model_convert_result:
return saved_model_convert_result
graph_def, input_tensors, output_tensors, frozen_func = (
self._freeze_keras_model())
graph_def = self._optimize_tf_model(graph_def, input_tensors,
output_tensors, frozen_func)
return super(TFLiteKerasModelConverterV2,
self).convert(graph_def, input_tensors, output_tensors)
class TFLiteFrozenGraphConverterV2(TFLiteConverterBaseV2):
"""Converts the given frozen graph into TensorFlow Lite model."""
def __init__(self, funcs, trackable_obj=None):
"""Constructor for TFLiteConverter.
Args:
funcs: List of TensorFlow ConcreteFunctions. The list should not contain
duplicate elements.
trackable_obj: tf.AutoTrackable object associated with `funcs`. A
reference to this object needs to be maintained so that Variables do not
get garbage collected since functions have a weak reference to
Variables. This is only required when the tf.AutoTrackable object is not
maintained by the user (e.g. `from_saved_model`).
"""
super(TFLiteFrozenGraphConverterV2, self).__init__()
self._funcs = funcs
self._trackable_obj = trackable_obj
self.experimental_lower_to_saved_model = True
@convert_phase(Component.PREPARE_TF_MODEL,
SubComponent.FREEZE_CONCRETE_FUNCTION)
def _freeze_concrete_function(self):
"""Convert the given ConcreteFunction to frozen graph.
Returns:
graph_def: The frozen GraphDef.
input_tensors: List of input tensors.
output_tensors: List of output tensors.
frozen_func: The frozen ConcreteFunction.
Raises:
ValueError: none or multiple ConcreteFunctions provided.
"""
# TODO(b/130297984): Add support for converting multiple function.
if len(self._funcs) == 0: # pylint: disable=g-explicit-length-test
raise ValueError("No ConcreteFunction is specified.")
if len(self._funcs) > 1:
raise ValueError("This converter can only convert a single "
"ConcreteFunction. Converting multiple functions is "
"under development.")
frozen_func, graph_def = (
_convert_to_constants.convert_variables_to_constants_v2_as_graph(
self._funcs[0], lower_control_flow=False))
input_tensors = [
tensor for tensor in frozen_func.inputs
if tensor.dtype != _dtypes.resource
]
output_tensors = frozen_func.outputs
return graph_def, input_tensors, output_tensors, frozen_func
@convert_phase(Component.PREPARE_TF_MODEL,
SubComponent.CONVERT_CONCRETE_FUNCTIONS_TO_SAVED_MODEL)
def _convert_concrete_functions_to_saved_model(self, output_dir):
"""Save concrete functions to the SavedModel format.
Args:
output_dir: The output directory to save the SavedModel.
Returns:
graph_def: The frozen GraphDef.
input_tensors: List of input tensors.
output_tensors: List of output tensors.
"""
func = self._funcs[0]
if not self.experimental_lower_to_saved_model:
return None, None, None
# Without the provided trackable obj, it is not able to serialize the given
# concrete functions as a saved model format.
if not self._trackable_obj:
return None, None, None
try:
_saved_model.save(
self._trackable_obj,
output_dir,
signatures={"serving_default": func},
options=_save_options.SaveOptions(save_debug_info=True))
except Exception: # pylint: disable=broad-except
# When storing the given concrete function to a saved model is failed,
# let's use original concrete function conversion pipeline.
return None, None, None
self.saved_model_dir = output_dir
self._saved_model_tags = set([_tag_constants.SERVING])
self._saved_model_exported_names = [
_signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY
]
self._parse_saved_model_args(always_enable_saved_model_import=True)
if self.saved_model_dir:
graph_def, input_tensors, output_tensors = self._load_saved_model(
self.saved_model_dir, self._saved_model_tags)
self._trackable_obj = _load(self.saved_model_dir, self._saved_model_tags)
return graph_def, input_tensors, output_tensors
return None, None, None
def _convert_as_saved_model(self):
"""Converts the given concrete functions as a saved model format.
Returns:
The converted data in serialized format.
"""
temp_dir = tempfile.mkdtemp()
try:
graph_def, input_tensors, output_tensors = (
self._convert_concrete_functions_to_saved_model(temp_dir))
if self.saved_model_dir:
return super(TFLiteFrozenGraphConverterV2,
self).convert(graph_def, input_tensors, output_tensors)
finally:
shutil.rmtree(temp_dir, True)
return None
@_export_metrics
def convert(self):
"""Converts a TensorFlow GraphDef based on instance variables.
Returns:
The converted data in serialized format.
Raises:
ValueError:
No concrete functions is specified.
Multiple concrete functions are specified.
Input shape is not specified.
Invalid quantization parameters.
"""
if self.experimental_lower_to_saved_model:
saved_model_convert_result = self._convert_as_saved_model()
if saved_model_convert_result:
return saved_model_convert_result
graph_def, input_tensors, output_tensors, frozen_func = (
self._freeze_concrete_function())
graph_def = self._optimize_tf_model(graph_def, input_tensors,
output_tensors, frozen_func)
return super(TFLiteFrozenGraphConverterV2,
self).convert(graph_def, input_tensors, output_tensors)
@_tf_export("lite.TFLiteConverter", v1=[])
class TFLiteConverterV2(TFLiteFrozenGraphConverterV2):
"""Converts a TensorFlow model into TensorFlow Lite model.
Attributes:
optimizations: Experimental flag, subject to change. Set of optimizations
to apply. e.g {tf.lite.Optimize.DEFAULT}. (default None, must be None or a
set of values of type `tf.lite.Optimize`)
representative_dataset: A generator function used for integer quantization
where each generated sample has the same order, type and shape as the
inputs to the model. Usually, this is a small subset of a few hundred
samples randomly chosen, in no particular order, from the training or
evaluation dataset. This is an optional attribute, but required for full
integer quantization, i.e, if `tf.int8` is the only supported type in
`target_spec.supported_types`. Refer to `tf.lite.RepresentativeDataset`.
(default None)
target_spec: Experimental flag, subject to change. Specifications of target
device, including supported ops set, supported types and a set of user's
defined TensorFlow operators required in the TensorFlow Lite runtime.
Refer to `tf.lite.TargetSpec`.
inference_input_type: Data type of the input layer. Note that integer types
(tf.int8 and tf.uint8) are currently only supported for post training
integer quantization and quantization aware training. (default tf.float32,
must be in {tf.float32, tf.int8, tf.uint8})
inference_output_type: Data type of the output layer. Note that integer
types (tf.int8 and tf.uint8) are currently only supported for post
training integer quantization and quantization aware training. (default
tf.float32, must be in {tf.float32, tf.int8, tf.uint8})
allow_custom_ops: Boolean indicating whether to allow custom operations.
When False, any unknown operation is an error. When True, custom ops are
created for any op that is unknown. The developer needs to provide these
to the TensorFlow Lite runtime with a custom resolver. (default False)
experimental_new_converter: Experimental flag, subject to change. Enables
MLIR-based conversion instead of TOCO conversion. (default True)
experimental_new_quantizer: Experimental flag, subject to change. Enables
MLIR-based quantization conversion instead of Flatbuffer-based conversion.
(default True)
experimental_enable_resource_variables: Experimental flag, subject to
change. Enables resource variables to be converted by this converter.
This is only allowed if from_saved_model interface is used.
(default False)
Example usage:
```python
# Converting a SavedModel to a TensorFlow Lite model.
converter = tf.lite.TFLiteConverter.from_saved_model(saved_model_dir)
tflite_model = converter.convert()
# Converting a tf.Keras model to a TensorFlow Lite model.
converter = tf.lite.TFLiteConverter.from_keras_model(model)
tflite_model = converter.convert()
# Converting ConcreteFunctions to a TensorFlow Lite model.
converter = tf.lite.TFLiteConverter.from_concrete_functions([func])
tflite_model = converter.convert()
```
"""
# pylint: disable=useless-super-delegation
def __init__(self, funcs, trackable_obj=None):
"""Constructor for TFLiteConverter.
Args:
funcs: List of TensorFlow ConcreteFunctions. The list should not contain
duplicate elements.
trackable_obj: tf.AutoTrackable object associated with `funcs`. A
reference to this object needs to be maintained so that Variables do not
get garbage collected since functions have a weak reference to
Variables. This is only required when the tf.AutoTrackable object is not
maintained by the user (e.g. `from_saved_model`).
"""
super(TFLiteConverterV2, self).__init__(funcs, trackable_obj)
@classmethod
def from_concrete_functions(cls, funcs, trackable_obj=None):
"""Creates a TFLiteConverter object from ConcreteFunctions.
Args:
funcs: List of TensorFlow ConcreteFunctions. The list should not contain
duplicate elements. Currently converter can only convert a single
ConcreteFunction. Converting multiple functions is under development.
trackable_obj: An `AutoTrackable` object (typically `tf.module`)
associated with `funcs`. A reference to this object needs to be
maintained so that Variables do not get garbage collected since
functions have a weak reference to Variables.
Returns:
TFLiteConverter object.
Raises:
Invalid input type.
"""
for func in funcs:
if not isinstance(func, _function.ConcreteFunction):
message = "This function takes in a list of ConcreteFunction."
if isinstance(func, _def_function.Function):
message += (" To get the ConcreteFunction from a Function,"
" call get_concrete_function.")
raise ValueError(message)
return cls(funcs, trackable_obj)
@classmethod
def from_saved_model(cls, saved_model_dir, signature_keys=None, tags=None):
"""Creates a TFLiteConverter object from a SavedModel directory.
Args:
saved_model_dir: SavedModel directory to convert.
signature_keys: List of keys identifying SignatureDef containing inputs
and outputs. Elements should not be duplicated. By default the
`signatures` attribute of the MetaGraphdef is used. (default
saved_model.signatures)
tags: Set of tags identifying the MetaGraphDef within the SavedModel to
analyze. All tags in the tag set must be present. (default
{tf.saved_model.SERVING} or {'serve'})
Returns:
TFLiteConverter object.
Raises:
Invalid signature keys.
"""
# When run without eager enabled, this will return the legacy
# TFLiteConverter.
if not context.executing_eagerly():
signature_key = None
if signature_keys:
if len(signature_keys) != 1:
raise ValueError("Only support a single signature key.")
else:
signature_key = signature_keys[0]
logging.warning("Invoking the TF1 implementation of TFLiteConverter "
"because eager is disabled. Consider enabling eager.")
return TFLiteConverter.from_saved_model(
saved_model_dir, signature_key=signature_key, tag_set=tags)
# Ensures any graphs created in Eager mode are able to run. This is required
# in order to create a tf.estimator.Exporter that exports a TFLite model.
if tags is None:
tags = set([_tag_constants.SERVING])
with context.eager_mode():
saved_model = _load(saved_model_dir, tags)
if not signature_keys:
signature_keys = saved_model.signatures
if not signature_keys:
raise ValueError("Only support at least one signature key.")
funcs = []
for key in signature_keys:
if key not in saved_model.signatures:
raise ValueError("Invalid signature key '{}' found. Valid keys are "
"'{}'.".format(key, ",".join(saved_model.signatures)))
funcs.append(saved_model.signatures[key])
saved_model_converter = TFLiteSavedModelConverterV2(saved_model_dir, tags,
signature_keys,
saved_model)
if saved_model_converter.saved_model_dir:
return saved_model_converter
return cls(funcs, saved_model)
@classmethod
def from_keras_model(cls, model):
"""Creates a TFLiteConverter object from a Keras model.
Args:
model: tf.Keras.Model
Returns:
TFLiteConverter object.
"""
return TFLiteKerasModelConverterV2(model)
# pylint: disable=useless-super-delegation
def convert(self):
"""Converts a TensorFlow GraphDef based on instance variables.
Returns:
The converted data in serialized format.
Raises:
ValueError:
No concrete functions is specified.
Multiple concrete functions are specified.
Input shape is not specified.
Invalid quantization parameters.
"""
return super(TFLiteConverterV2, self).convert()
class TFLiteConverterBaseV1(TFLiteConverterBase):
"""Converter subclass to share functionality between V1 converters."""
def __init__(self, experimental_debug_info_func):
"""Constructor for TFLiteConverter.
Args:
experimental_debug_info_func: An experimental function to retrieve the
graph debug info for a set of nodes from the `graph_def`.
"""
super(TFLiteConverterBaseV1, self).__init__()
self.inference_type = _dtypes.float32
self.inference_input_type = None
self.inference_output_type = None
self.output_format = constants.TFLITE
self.quantized_input_stats = {}
self.default_ranges_stats = None
self.drop_control_dependency = True
self.reorder_across_fake_quant = False
self.change_concat_input_ranges = False
self.dump_graphviz_dir = None
self.dump_graphviz_video = False
self.conversion_summary_dir = None
self._debug_info_func = experimental_debug_info_func
self._experimental_allow_all_select_tf_ops = False
def __setattr__(self, name, value):
if name == "post_training_quantize":
warnings.warn("Property %s is deprecated, "
"please use optimizations=[Optimize.DEFAULT]"
" instead." % name)
if value:
self.optimizations = [Optimize.DEFAULT]
else:
self.optimizations = []
return
if name == "target_ops":
warnings.warn("Property %s is deprecated, please use "
"target_spec.supported_ops instead." % name)
self.target_spec.supported_ops = value
return
object.__setattr__(self, name, value)
def __getattribute__(self, name):
if name == "post_training_quantize":
warnings.warn("Property %s is deprecated, "
"please use optimizations=[Optimize.DEFAULT]"
" instead." % name)
return Optimize.DEFAULT in set(self.optimizations)
if name == "target_ops":
warnings.warn("Property %s is deprecated, please use "
"target_spec.supported_ops instead." % name)
return self.target_spec.supported_ops
return object.__getattribute__(self, name)
def _validate_quantized_input_stats(self, converter_kwargs, quant_mode):
"""Ensure the `quantized_input_stats` flag is provided if required."""
quantized_types = frozenset({_dtypes.int8, _dtypes.uint8})
requires_quantized_input_stats = (
(converter_kwargs["inference_type"] in quantized_types or
converter_kwargs["inference_input_type"] in quantized_types) and
not quant_mode.is_post_training_integer_quantize())
if (requires_quantized_input_stats and
not converter_kwargs["quantized_input_stats"]):
raise ValueError(
"The `quantized_input_stats` flag must be defined when either "
"`inference_type` flag or `inference_input_type` flag is set to "
"tf.int8 or tf.uint8. Currently, `inference_type={}` and "
"`inference_input_type={}`.".format(
_get_tf_type_name(converter_kwargs["inference_type"]),
_get_tf_type_name(converter_kwargs["inference_input_type"])))
@convert_phase(Component.PREPARE_TF_MODEL, SubComponent.VALIDATE_INPUTS)
def _validate_inputs(self, input_tensors, quantized_input_stats):
"""Validate input parameters.
Args:
input_tensors: List of input tensors.
quantized_input_stats: Map of input tensor names to a tuple of floats
representing the mean and standard deviation of the training data.
Raises:
ValueError:
Input shape is not specified.
Quantization input stats is required but not provided.
"""
if (not self._is_unknown_shapes_allowed() and self._has_valid_tensors()):
# Checks dimensions in input tensor.
for tensor in input_tensors:
shape = tensor.shape
if not shape:
raise ValueError("Provide an input shape for input array "
"'{0}'.".format(_get_tensor_name(tensor)))
# Note that shape_list might be empty for scalar shapes.
shape_list = shape.as_list()
if None in shape_list[1:]:
raise ValueError(
"None is only supported in the 1st dimension. Tensor '{0}' has "
"invalid shape '{1}'.".format(
_get_tensor_name(tensor), shape_list))
elif shape_list and shape_list[0] is None:
self._set_batch_size(batch_size=1)
# Get quantization stats. Ensures there is one stat per name if the stats
# are specified.
if quantized_input_stats:
self._quantized_stats = []
invalid_stats = []
for name in self.get_input_arrays():
if name in quantized_input_stats:
self._quantized_stats.append(quantized_input_stats[name])
else:
invalid_stats.append(name)
if invalid_stats:
raise ValueError("Quantization input stats are not available for input "
"tensors '{0}'.".format(",".join(invalid_stats)))
else:
self._quantized_stats = None
@convert_phase(Component.PREPARE_TF_MODEL, SubComponent.OPTIMIZE_TF_MODEL)
def _optimize_tf_model(self, graph_def, input_tensors, output_tensors,
quant_mode):
"""Run a Grappler pass to optimize the TensorFlow graph.
Args:
graph_def: Frozen GraphDef to be optimized.
input_tensors: List of input tensors.
output_tensors: List of output tensors.
quant_mode: the quantization mode.
Returns:
The optimized TensorFlow graph.
"""
# Disable grappler constant folding if there are training quant ops.
if self.saved_model_dir or quant_mode.contains_training_quant_op():
return graph_def
try:
# TODO(b/150163103): Merge `disabling lower using switch merge' calls.
# Grappler will also try to lower while loop into switch merge
# representation which is undesired for Ophints, so we simply remove
# those attributes to prevent Grappler from doing so.
graph = _convert_to_constants.disable_lower_using_switch_merge(graph_def)
# Run function inlining optimization to ensure any models generated
# through the from_frozen_graph path have been inlined.
optimized_graph = _run_graph_optimizations(
graph,
input_tensors,
output_tensors,
config=self._grappler_config(["function"]))
return optimized_graph
except Exception: # pylint: disable=broad-except
return graph_def
def convert(self):
"""Converts a TensorFlow GraphDef based on instance variables.
Returns:
The converted data in serialized format. Either a TFLite Flatbuffer or a
Graphviz graph depending on value in `output_format`.
Raises:
ValueError:
Input shape is not specified.
None value for dimension in input_tensor.
"""
self._validate_inputs(self._input_tensors, self.quantized_input_stats)
quant_mode = QuantizationMode(self.optimizations, self.target_spec,
self.representative_dataset, self._graph_def)
optimized_graph = self._optimize_tf_model(self._graph_def,
self._input_tensors,
self._output_tensors, quant_mode)
self._debug_info = _get_debug_info(self._debug_info_func, optimized_graph)
converter_kwargs = self._get_base_converter_args()
converter_kwargs.update(
quant_mode.converter_flags(self.inference_type,
self.inference_input_type))
converter_kwargs.update({
"output_format": self.output_format,
"quantized_input_stats": self._quantized_stats,
"default_ranges_stats": self.default_ranges_stats,
"drop_control_dependency": self.drop_control_dependency,
"reorder_across_fake_quant": self.reorder_across_fake_quant,
"change_concat_input_ranges": self.change_concat_input_ranges,
"dump_graphviz_dir": self.dump_graphviz_dir,
"dump_graphviz_video": self.dump_graphviz_video,
"conversion_summary_dir": self.conversion_summary_dir,
"allow_all_select_tf_ops": self._experimental_allow_all_select_tf_ops,
})
if not self.experimental_new_converter:
logging.warning(
"Please consider switching to the new converter by setting "
"experimental_new_converter=True. "
"The old converter (TOCO) is deprecated.")
else:
logging.info("Using experimental converter: If you encountered a problem "
"please file a bug. You can opt-out "
"by setting experimental_new_converter=False")
self._validate_quantized_input_stats(converter_kwargs, quant_mode)
self._validate_experimental_new_quantizer_flag()
# Converts model.
if self._has_valid_tensors():
result = _toco_convert_impl(
input_data=optimized_graph,
input_tensors=self._input_tensors,
output_tensors=self._output_tensors,
**converter_kwargs)
else:
result = _toco_convert_graph_def(
input_data=optimized_graph,
input_arrays_with_shape=self._input_arrays_with_shape,
output_arrays=self._output_arrays,
control_output_arrays=self._control_output_arrays,
**converter_kwargs)
return self._optimize_tflite_model(
result, quant_mode, quant_io=not self.experimental_new_converter)
def get_input_arrays(self):
"""Returns a list of the names of the input tensors.
Returns:
List of strings.
"""
if self._has_valid_tensors():
return [_get_tensor_name(tensor) for tensor in self._input_tensors]
else:
return [name for name, _ in self._input_arrays_with_shape]
def _has_valid_tensors(self):
"""Checks if the input and output tensors have been initialized.
Returns:
Bool.
"""
return self._input_tensors is not None and self._output_tensors
def _set_batch_size(self, batch_size):
"""Sets the first dimension of the input tensor to `batch_size`.
Args:
batch_size: Batch size for the model. Replaces the first dimension of an
input size array if undefined. (default 1)
Raises:
ValueError: input_tensor is not defined.
"""
if not self._has_valid_tensors():
raise ValueError("The batch size cannot be set for this model. Please "
"use input_shapes parameter.")
for tensor in self._input_tensors:
shape = tensor.shape.as_list()
if shape[0] is None:
shape[0] = batch_size
tensor.set_shape(shape)
def _is_unknown_shapes_allowed(self):
# Ophint Converted nodes will need the shapes to be known.
if _is_ophint_converted(self._graph_def):
return False
if not super(TFLiteConverterBaseV1, self)._is_unknown_shapes_allowed():
return False
# `conversion_summary_dir` calls TOCO. Unknown shapes are only supported by
# the MLIR converter.
if self.conversion_summary_dir:
logging.warning(
"`conversion_summary_dir` does not work with unknown shapes. "
"Graphs with unknown shapes might be different than when this flag "
"is disabled.")
return False
return True
def _save_conversion_params_metric(self):
self._collected_converter_params.update({
"output_format": self.output_format,
"default_ranges_stats": self.default_ranges_stats,
"drop_control_dependency": self.drop_control_dependency,
"reorder_across_fake_quant": self.reorder_across_fake_quant,
"change_concat_input_ranges": self.change_concat_input_ranges,
"dump_graphviz_dir": self.dump_graphviz_dir,
"dump_graphviz_video": self.dump_graphviz_video,
"conversion_summary_dir": self.conversion_summary_dir,
"api_version": 1,
})
super(TFLiteConverterBaseV1,
self)._save_conversion_params_metric(self._graph_def,
self.inference_type,
self.inference_input_type)
class TFLiteSavedModelConverter(TFLiteConverterBaseV1):
"""Converts the given SavedModel into TensorFlow Lite model.
Attributes:
saved_model_dir: Directory of the SavedModel.
"""
def __init__(self,
saved_model_dir,
saved_model_tags,
saved_model_exported_names,
experimental_debug_info_func=None):
"""Constructor for TFLiteConverter.
Args:
saved_model_dir: Directory of the SavedModel.
saved_model_tags: Set of tags identifying the MetaGraphDef within the
SavedModel to analyze. All tags in the tag set must be present. (default
{tf.saved_model.SERVING}).
saved_model_exported_names: Names to be exported when the saved model
import path is on.
experimental_debug_info_func: An experimental function to retrieve the
graph debug info for a set of nodes from the `graph_def`.
Raises:
ValueError: Invalid arguments.
"""
super(TFLiteSavedModelConverter,
self).__init__(experimental_debug_info_func)
self.saved_model_dir = saved_model_dir
self._saved_model_tags = saved_model_tags
self._saved_model_exported_names = saved_model_exported_names
signature_key = _signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY
if len(self._saved_model_exported_names) != 1:
raise ValueError("Only support a single signature key.")
signature_key = self._saved_model_exported_names[0]
result = _freeze_saved_model(self.saved_model_dir, None, None, None,
self._saved_model_tags, signature_key)
self._graph_def = result[0]
self._input_tensors = result[1]
self._output_tensors = result[2]
self._parse_saved_model_args()
@_export_metrics
def convert(self):
"""Converts a TensorFlow GraphDef based on instance variables.
Returns:
The converted data in serialized format. Either a TFLite Flatbuffer or a
Graphviz graph depending on value in `output_format`.
Raises:
ValueError:
Input shape is not specified.
None value for dimension in input_tensor.
"""
return super(TFLiteSavedModelConverter, self).convert()
class TFLiteKerasModelConverter(TFLiteConverterBaseV1):
"""Converts the given SavedModel into TensorFlow Lite model."""
def __init__(self,
model_file,
input_arrays=None,
input_shapes=None,
output_arrays=None,
custom_objects=None):
"""Constructor for TFLiteConverter.
Args:
model_file: Full filepath of HDF5 file containing the tf.keras model.
input_arrays: List of input tensors to freeze graph with. Uses input
arrays from SignatureDef when none are provided. (default None)
input_shapes: Dict of strings representing input tensor names to list of
integers representing input shapes (e.g., {"foo" : [1, 16, 16, 3]}).
Automatically determined when input shapes is None (e.g., {"foo" :
None}). (default None)
output_arrays: List of output tensors to freeze graph with. Uses output
arrays from SignatureDef when none are provided. (default None)
custom_objects: Dict mapping names (strings) to custom classes or
functions to be considered during model deserialization. (default None)
Raises:
ValueError: Invalid arguments.
"""
super(TFLiteKerasModelConverter,
self).__init__(experimental_debug_info_func=None)
# Handles Keras when Eager mode is enabled.
if context.executing_eagerly():
if input_arrays or output_arrays:
raise ValueError("`input_arrays` and `output_arrays` are unsupported "
"with Eager mode. If your model requires any of these "
"parameters, please use disable_eager_execution().")
keras_model = keras_deps.get_load_model_function()(model_file,
custom_objects)
function = _trace_model_call(keras_model)
concrete_func = function.get_concrete_function()
frozen_func = _convert_to_constants.convert_variables_to_constants_v2(
concrete_func, lower_control_flow=False)
_set_tensor_shapes(frozen_func.inputs, input_shapes)
self._keras_model = keras_model
self._graph_def = frozen_func.graph.as_graph_def()
self._input_tensors = frozen_func.inputs
self._output_tensors = frozen_func.outputs
self._debug_info_func = _build_debug_info_func(frozen_func.graph)
return
# Handles Keras when Eager mode is disabled.
keras_deps.get_clear_session_function()()
keras_model = keras_deps.get_load_model_function()(model_file,
custom_objects)
sess = keras_deps.get_get_session_function()()
# Get input and output tensors.
if input_arrays:
input_tensors = _get_tensors_from_tensor_names(sess.graph, input_arrays)
else:
input_tensors = keras_model.inputs
if output_arrays:
output_tensors = _get_tensors_from_tensor_names(sess.graph, output_arrays)
else:
output_tensors = keras_model.outputs
_set_tensor_shapes(input_tensors, input_shapes)
graph_def = _freeze_graph(sess, input_tensors, output_tensors)
self._keras_model = keras_model
self._graph_def = graph_def
self._input_tensors = input_tensors
self._output_tensors = output_tensors
self._debug_info_func = _build_debug_info_func(sess.graph)
@convert_phase(Component.PREPARE_TF_MODEL, SubComponent.FREEZE_KERAS_MODEL)
def _freeze_keras_model(self, output_dir):
"""Save Keras model to Saved Model format.
Args:
output_dir: The output directory to save the SavedModel.
"""
try:
self._keras_model.save(output_dir, save_format="tf")
except Exception: # pylint: disable=broad-except
# When storing the given keras model to a saved model is failed, let's
# use original keras model conversion pipeline.
return None
tag_set = set([_tag_constants.SERVING])
signature_key = _signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY
graph_def, input_tensors, output_tensors, sess_graph = _freeze_saved_model(
output_dir, None, None, None, tag_set, signature_key)
self.saved_model_dir = output_dir
self._saved_model_tags = tag_set
self._saved_model_exported_names = [signature_key]
self._parse_saved_model_args()
if self.saved_model_dir:
self._graph_def = graph_def
self._input_tensors = input_tensors
self._output_tensors = output_tensors
self._debug_info_func = _build_debug_info_func(sess_graph)
def _convert_as_saved_model(self):
"""Converts a Keras model as a saved model.
Returns:
The converted data in serialized format.
"""
temp_dir = tempfile.mkdtemp()
try:
self._freeze_keras_model(temp_dir)
if self.saved_model_dir:
return super(TFLiteKerasModelConverter, self).convert()
finally:
shutil.rmtree(temp_dir, True)
@_export_metrics
def convert(self):
"""Converts a Keras model based on instance variables.
Returns:
The converted data in serialized format. Either a TFLite Flatbuffer or a
Graphviz graph depending on value in `output_format`.
Raises:
ValueError:
Input shape is not specified.
None value for dimension in input_tensor.
"""
saved_model_convert_result = self._convert_as_saved_model()
if saved_model_convert_result:
return saved_model_convert_result
return super(TFLiteKerasModelConverter, self).convert()
class TFLiteFrozenGraphConverter(TFLiteConverterBaseV1):
"""Converts the given frozen graph def into TensorFlow Lite model."""
def __init__(self,
graph_def,
input_tensors,
output_tensors,
input_arrays_with_shape=None,
output_arrays=None,
experimental_debug_info_func=None):
"""Constructor for TFLiteConverter.
Args:
graph_def: Frozen TensorFlow GraphDef.
input_tensors: List of input tensors. Type and shape are computed using
`foo.shape` and `foo.dtype`.
output_tensors: List of output tensors (only .name is used from this).
input_arrays_with_shape: Tuple of strings representing input tensor names
and list of integers representing input shapes
(e.g., [("foo", [1, 16, 16, 3])]). Use only when graph cannot be loaded
into TensorFlow and when `input_tensors` and `output_tensors` are
None. (default None)
output_arrays: List of output tensors to freeze graph with. Use only when
graph cannot be loaded into TensorFlow and when `input_tensors` and
`output_tensors` are None. (default None)
experimental_debug_info_func: An experimental function to retrieve the
graph debug info for a set of nodes from the `graph_def`.
Raises:
ValueError: Invalid arguments.
"""
super(TFLiteFrozenGraphConverter,
self).__init__(experimental_debug_info_func)
self._graph_def = graph_def
self._input_tensors = input_tensors
self._output_tensors = output_tensors
self._control_output_arrays = None
# Attributes are used by models that cannot be loaded into TensorFlow.
if not self._has_valid_tensors():
self._input_arrays_with_shape = input_arrays_with_shape
self._output_arrays = output_arrays
if input_tensors is not None and input_arrays_with_shape is not None:
logging.warning("input_arrays_with_shape will be ignored when both the "
"given input_tensors and input_arrays_with_shape are not "
"None.")
if output_tensors is not None and output_arrays is not None:
logging.warning("output_arrays will be ignored when both the given "
"output_tensors and output_arrays are not None.")
@_export_metrics
def convert(self):
"""Converts a TensorFlow GraphDef based on instance variables.
Returns:
The converted data in serialized format. Either a TFLite Flatbuffer or a
Graphviz graph depending on value in `output_format`.
Raises:
ValueError:
Input shape is not specified.
None value for dimension in input_tensor.
"""
if not self._has_valid_tensors():
if not self._input_arrays_with_shape or not (self._output_arrays or
self._control_output_arrays):
raise ValueError(
"If input_tensors and output_tensors are None, both "
"input_arrays_with_shape and output_arrays|control_output_arrays "
"must be defined.")
return super(TFLiteFrozenGraphConverter, self).convert()
@_tf_export(v1=["lite.TFLiteConverter"])
class TFLiteConverter(TFLiteFrozenGraphConverter):
"""Convert a TensorFlow model into `output_format`.
This is used to convert from a TensorFlow GraphDef, SavedModel or tf.keras
model into either a TFLite FlatBuffer or graph visualization.
Attributes:
optimizations: Experimental flag, subject to change. Set of optimizations to
apply. e.g {tf.lite.Optimize.DEFAULT}. (default None, must be None or a
set of values of type `tf.lite.Optimize`)
representative_dataset: A generator function used for integer quantization
where each generated sample has the same order, type and shape as the
inputs to the model. Usually, this is a small subset of a few hundred
samples randomly chosen, in no particular order, from the training or
evaluation dataset. This is an optional attribute, but required for full
integer quantization, i.e, if `tf.int8` is the only supported type in
`target_spec.supported_types`. Refer to `tf.lite.RepresentativeDataset`.
(default None)
target_spec: Experimental flag, subject to change. Specifications of target
device, including supported ops set, supported types and a set of user's
defined TensorFlow operators required in the TensorFlow Lite runtime.
Refer to `tf.lite.TargetSpec`.
inference_type: Data type of numeric arrays, excluding the input layer.
(default tf.float32, must be in {tf.float32, tf.int8, tf.uint8})
inference_input_type: Data type of the numeric arrays in the input layer. If
`inference_input_type` is in {tf.int8, tf.uint8}, then
`quantized_input_stats` must be provided. (default is the value assigned
to `inference_type`, must be in {tf.float32, tf.int8, tf.uint8})
inference_output_type: Data type of the numeric arrays in the output layer.
(default is the value assigned to `inference_type`, must be in
{tf.float32, tf.int8, tf.uint8})
quantized_input_stats: Map of input tensor names to a tuple of floats
representing the mean and standard deviation of the training data.
(e.g., {"foo" : (0., 1.)}). Required if `inference_input_type` is tf.int8
or tf.uint8. (default None)
default_ranges_stats: Tuple of integers (min, max) representing range values
for all numeric arrays without a specified range. Intended for
experimenting with quantization via "dummy quantization". (default None)
allow_custom_ops: Boolean indicating whether to allow custom operations.
When False any unknown operation is an error. When True, custom ops are
created for any op that is unknown. The developer will need to provide
these to the TensorFlow Lite runtime with a custom resolver. (default
False)
drop_control_dependency: Boolean indicating whether to drop control
dependencies silently. This is due to TFLite not supporting control
dependencies. (default True)
reorder_across_fake_quant: Boolean indicating whether to reorder FakeQuant
nodes in unexpected locations. Used when the location of the FakeQuant
nodes is preventing graph transformations necessary to convert the graph.
Results in a graph that differs from the quantized training graph,
potentially causing differing arithmetic behavior. (default False)
change_concat_input_ranges: Boolean to change behavior of min/max ranges for
inputs and outputs of the concat operator for quantized models. Changes
the ranges of concat operator overlap when true. (default False)
output_format: Output file format. (default
tf.compat.v1.lite.constants.TFLITE, must be in
{tf.compat.v1.lite.constants.TFLITE,
tf.compat.v1.lite.constants.GRAPHVIZ_DOT})
dump_graphviz_dir: Full filepath of folder to dump the graphs at various
stages of processing GraphViz .dot files. Preferred over
`output_format=tf.compat.v1.lite.constants.GRAPHVIZ_DOT` in order to keep
the requirements of the output file. (default None)
dump_graphviz_video: Boolean indicating whether to dump the GraphViz .dot
files after every graph transformation. Requires the `dump_graphviz_dir`
flag to be specified. (default False)
conversion_summary_dir: Full path of the directory to store conversion logs.
(default None)
target_ops: Deprecated. Please use `target_spec.supported_ops` instead.
post_training_quantize: Deprecated. Please use `optimizations` instead and
set it to `{tf.lite.Optimize.DEFAULT}`. (default False)
experimental_new_converter: Experimental flag, subject to change. Enables
MLIR-based conversion instead of TOCO conversion. (default True)
experimental_new_quantizer: Experimental flag, subject to change. Enables
MLIR-based quantization conversion instead of Flatbuffer-based conversion.
(default True)
Example usage:
```python
# Converting a GraphDef from session.
converter = tf.compat.v1.lite.TFLiteConverter.from_session(
sess, in_tensors, out_tensors)
tflite_model = converter.convert()
open("converted_model.tflite", "wb").write(tflite_model)
# Converting a GraphDef from file.
converter = tf.compat.v1.lite.TFLiteConverter.from_frozen_graph(
graph_def_file, input_arrays, output_arrays)
tflite_model = converter.convert()
open("converted_model.tflite", "wb").write(tflite_model)
# Converting a SavedModel.
converter = tf.compat.v1.lite.TFLiteConverter.from_saved_model(
saved_model_dir)
tflite_model = converter.convert()
open("converted_model.tflite", "wb").write(tflite_model)
# Converting a tf.keras model.
converter = tf.compat.v1.lite.TFLiteConverter.from_keras_model_file(
keras_model)
tflite_model = converter.convert()
open("converted_model.tflite", "wb").write(tflite_model)
```
"""
# pylint: disable=useless-super-delegation
def __init__(self,
graph_def,
input_tensors,
output_tensors,
input_arrays_with_shape=None,
output_arrays=None,
experimental_debug_info_func=None):
"""Constructor for TFLiteConverter.
Args:
graph_def: Frozen TensorFlow GraphDef.
input_tensors: List of input tensors. Type and shape are computed using
`foo.shape` and `foo.dtype`.
output_tensors: List of output tensors (only .name is used from this).
input_arrays_with_shape: Tuple of strings representing input tensor names
and list of integers representing input shapes
(e.g., [("foo" : [1, 16, 16, 3])]). Use only when graph cannot be loaded
into TensorFlow and when `input_tensors` and `output_tensors` are
None. (default None)
output_arrays: List of output tensors to freeze graph with. Use only when
graph cannot be loaded into TensorFlow and when `input_tensors` and
`output_tensors` are None. (default None)
experimental_debug_info_func: An experimental function to retrieve the
graph debug info for a set of nodes from the `graph_def`.
Raises:
ValueError: Invalid arguments.
"""
super(TFLiteConverter,
self).__init__(graph_def, input_tensors, output_tensors,
input_arrays_with_shape, output_arrays,
experimental_debug_info_func)
@classmethod
def from_session(cls, sess, input_tensors, output_tensors):
"""Creates a TFLiteConverter class from a TensorFlow Session.
Args:
sess: TensorFlow Session.
input_tensors: List of input tensors. Type and shape are computed using
`foo.shape` and `foo.dtype`.
output_tensors: List of output tensors (only .name is used from this).
Returns:
TFLiteConverter class.
"""
graph_def = _freeze_graph(sess, input_tensors, output_tensors)
return cls(
graph_def,
input_tensors,
output_tensors,
experimental_debug_info_func=_build_debug_info_func(sess.graph))
@classmethod
def from_frozen_graph(cls,
graph_def_file,
input_arrays,
output_arrays,
input_shapes=None):
"""Creates a TFLiteConverter class from a file containing a frozen GraphDef.
Args:
graph_def_file: Full filepath of file containing frozen GraphDef.
input_arrays: List of input tensors to freeze graph with.
output_arrays: List of output tensors to freeze graph with.
input_shapes: Dict of strings representing input tensor names to list of
integers representing input shapes (e.g., {"foo" : [1, 16, 16, 3]}).
Automatically determined when input shapes is None (e.g., {"foo" :
None}). (default None)
Returns:
TFLiteConverter class.
Raises:
IOError:
File not found.
Unable to parse input file.
ValueError:
The graph is not frozen.
input_arrays or output_arrays contains an invalid tensor name.
input_shapes is not correctly defined when required
"""
with _ops.Graph().as_default():
with _session.Session() as sess:
# Read GraphDef from file.
if not gfile.Exists(graph_def_file):
raise IOError("File '{0}' does not exist.".format(graph_def_file))
with gfile.GFile(graph_def_file, "rb") as f:
file_content = f.read()
try:
graph_def = _graph_pb2.GraphDef()
graph_def.ParseFromString(file_content)
except (_text_format.ParseError, DecodeError):
try:
print("Ignore 'tcmalloc: large alloc' warnings.")
if not isinstance(file_content, str):
if PY2:
file_content = six.ensure_binary(file_content, "utf-8")
else:
file_content = six.ensure_text(file_content, "utf-8")
graph_def = _graph_pb2.GraphDef()
_text_format.Merge(file_content, graph_def)
except (_text_format.ParseError, DecodeError):
raise IOError(
"Unable to parse input file '{}'.".format(graph_def_file))
# Handles models with custom TFLite ops that cannot be resolved in
# TensorFlow.
load_model_in_session = True
try:
_import_graph_def(graph_def, name="")
except _NotFoundError:
load_model_in_session = False
if load_model_in_session:
# Check if graph is frozen.
if not _is_frozen_graph(sess):
raise ValueError("Please freeze the graph using freeze_graph.py.")
# Get input and output tensors.
input_tensors = _get_tensors_from_tensor_names(
sess.graph, input_arrays)
output_tensors = _get_tensors_from_tensor_names(
sess.graph, output_arrays)
_set_tensor_shapes(input_tensors, input_shapes)
return cls(sess.graph_def, input_tensors, output_tensors)
else:
if not input_shapes:
raise ValueError("input_shapes must be defined for this model.")
if set(input_arrays) != set(input_shapes.keys()):
raise ValueError("input_shapes must contain a value for each item "
"in input_array.")
input_arrays_with_shape = [
(name, input_shapes[name]) for name in input_arrays
]
return cls(
graph_def,
input_tensors=None,
output_tensors=None,
input_arrays_with_shape=input_arrays_with_shape,
output_arrays=output_arrays)
@classmethod
def from_saved_model(cls,
saved_model_dir,
input_arrays=None,
input_shapes=None,
output_arrays=None,
tag_set=None,
signature_key=None):
"""Creates a TFLiteConverter class from a SavedModel.
Args:
saved_model_dir: SavedModel directory to convert.
input_arrays: List of input tensors to freeze graph with. Uses input
arrays from SignatureDef when none are provided. (default None)
input_shapes: Dict of strings representing input tensor names to list of
integers representing input shapes (e.g., {"foo" : [1, 16, 16, 3]}).
Automatically determined when input shapes is None (e.g., {"foo" :
None}). (default None)
output_arrays: List of output tensors to freeze graph with. Uses output
arrays from SignatureDef when none are provided. (default None)
tag_set: Set of tags identifying the MetaGraphDef within the SavedModel to
analyze. All tags in the tag set must be present. (default
{tf.saved_model.SERVING})
signature_key: Key identifying SignatureDef containing inputs and outputs.
(default tf.saved_model.DEFAULT_SERVING_SIGNATURE_DEF_KEY)
Returns:
TFLiteConverter class.
"""
if tag_set is None:
tag_set = set([_tag_constants.SERVING])
if signature_key is None:
signature_key = _signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY
saved_model_converter = TFLiteSavedModelConverter(saved_model_dir, tag_set,
[signature_key])
if saved_model_converter.saved_model_dir:
return saved_model_converter
result = _freeze_saved_model(saved_model_dir, input_arrays, input_shapes,
output_arrays, tag_set, signature_key)
return cls(
graph_def=result[0],
input_tensors=result[1],
output_tensors=result[2],
experimental_debug_info_func=_build_debug_info_func(result[3]))
@classmethod
def from_keras_model_file(cls,
model_file,
input_arrays=None,
input_shapes=None,
output_arrays=None,
custom_objects=None):
"""Creates a TFLiteConverter class from a tf.keras model file.
Args:
model_file: Full filepath of HDF5 file containing the tf.keras model.
input_arrays: List of input tensors to freeze graph with. Uses input
arrays from SignatureDef when none are provided. (default None)
input_shapes: Dict of strings representing input tensor names to list of
integers representing input shapes (e.g., {"foo" : [1, 16, 16, 3]}).
Automatically determined when input shapes is None (e.g., {"foo" :
None}). (default None)
output_arrays: List of output tensors to freeze graph with. Uses output
arrays from SignatureDef when none are provided. (default None)
custom_objects: Dict mapping names (strings) to custom classes or
functions to be considered during model deserialization. (default None)
Returns:
TFLiteConverter class.
"""
return TFLiteKerasModelConverter(model_file, input_arrays, input_shapes,
output_arrays, custom_objects)
# pylint: disable=useless-super-delegation
def convert(self):
"""Converts a TensorFlow GraphDef based on instance variables.
Returns:
The converted data in serialized format. Either a TFLite Flatbuffer or a
Graphviz graph depending on value in `output_format`.
Raises:
ValueError:
Input shape is not specified.
None value for dimension in input_tensor.
"""
return super(TFLiteConverter, self).convert()
@_tf_export(v1=["lite.TocoConverter"])
class TocoConverter(object):
"""Convert a TensorFlow model into `output_format` using TOCO.
This class has been deprecated. Please use `lite.TFLiteConverter` instead.
"""
@classmethod
@_deprecation.deprecated(None,
"Use `lite.TFLiteConverter.from_session` instead.")
def from_session(cls, sess, input_tensors, output_tensors):
"""Creates a TocoConverter class from a TensorFlow Session."""
return TFLiteConverter.from_session(sess, input_tensors, output_tensors)
@classmethod
@_deprecation.deprecated(
None, "Use `lite.TFLiteConverter.from_frozen_graph` instead.")
def from_frozen_graph(cls,
graph_def_file,
input_arrays,
output_arrays,
input_shapes=None):
"""Creates a TocoConverter class from a file containing a frozen graph."""
return TFLiteConverter.from_frozen_graph(graph_def_file, input_arrays,
output_arrays, input_shapes)
@classmethod
@_deprecation.deprecated(
None, "Use `lite.TFLiteConverter.from_saved_model` instead.")
def from_saved_model(cls,
saved_model_dir,
input_arrays=None,
input_shapes=None,
output_arrays=None,
tag_set=None,
signature_key=None):
"""Creates a TocoConverter class from a SavedModel."""
return TFLiteConverter.from_saved_model(saved_model_dir, input_arrays,
input_shapes, output_arrays,
tag_set, signature_key)
@classmethod
@_deprecation.deprecated(
None, "Use `lite.TFLiteConverter.from_keras_model_file` instead.")
def from_keras_model_file(cls,
model_file,
input_arrays=None,
input_shapes=None,
output_arrays=None):
"""Creates a TocoConverter class from a tf.keras model file."""
return TFLiteConverter.from_keras_model_file(model_file, input_arrays,
input_shapes, output_arrays)
|
the-stack_0_23737 | """
Genetic Algorithms for Digital Signal Processing
"""
import numpy as np
"""
cal_pop_fitness: Calculating the population fitness for Digital Signal Processing
The Signal to Noise Ratio (SNR) was used as the fitness function, with higher signal to noise ratios giving better results
"""
def cal_pop_fitness(Waveform, pop):
# Calculating the fitness value of each solution in the current population.
# The fitness function calulates the sum of products between each input and its corresponding weight.
fitness = np.empty((pop.shape[0], 1))
for i in range(1, pop.shape[0]):
fitness[i, 0] = Waveform.PM(pop[i])
return fitness
"""
select_mating_pool: Choose the mating pool for the parent genomes
The parents are returned
"""
def select_mating_pool(pop, fitness, num_parents):
# Selecting the best individuals in the current generation as parents for producing the offspring of the next generation.
parents = np.empty((num_parents, pop.shape[1]))
for parent_num in range(num_parents):
# If taking num_parents from the previous iteration,
# the best are taken. The fitness of others is set to be low so
# the next best can be found
max_fitness_idx = np.where(fitness == np.max(fitness))
max_fitness_idx = max_fitness_idx[0][0]
parents[parent_num, :] = pop[max_fitness_idx, :]
fitness[max_fitness_idx] = -99999999999
return parents
"""
crossover: The crossover function swaps the genes of the parents
to find more optimal genes.
"""
def crossover(parents, offspring_size):
offspring = np.empty(offspring_size)
# The point at which crossover takes place between two parents. Usually, it is at the center.
crossover_point = np.uint8(offspring_size[1]/2) # Gene Swapping here
#Swaps the genes of the caried parents
for k in range(offspring_size[0]):
# Index of the first parent to mate.
parent1_idx = k%parents.shape[0]
# Index of the second parent to mate.
parent2_idx = (k+1)%parents.shape[0]
# The new offspring will have its first half of its genes taken from the first parent.
offspring[k, 0:crossover_point] = parents[parent1_idx, 0:crossover_point]
# The new offspring will have its second half of its genes taken from the second parent.
offspring[k, crossover_point:] = parents[parent2_idx, crossover_point:]
return offspring
"""
mutation: Mutation adds random variation to genes to find
other gene combinations
"""
def mutation(offspring_crossover, num_mutations=1):
mutations_counter = np.uint8(offspring_crossover.shape[1] / num_mutations)
# Mutation changes a number of genes as defined by the num_mutations argument. The changes are random.
for idx in range(offspring_crossover.shape[0]):
gene_idx = mutations_counter - 1
for mutation_num in range(num_mutations):
# The random value to be added to the gene.
random_value = np.random.uniform(-1.0, 1.0, 1)
offspring_crossover[idx, gene_idx] = offspring_crossover[idx, gene_idx] + random_value
gene_idx = gene_idx + mutations_counter
return offspring_crossover
"""
create_population: The create_population function initialises the population data
with appropriate data values.
"""
def create_population(pop_size):
f1 = np.random.uniform(low=30, high=50, size=(pop_size[0], 1))
f2 = np.random.uniform(low=50, high=100, size=(pop_size[0], 1))
# TW = np.random.uniform(low=0, high=5, size=(pop_size[0], 1))
# BW = np.random.uniform(low=5, high=10, size=(pop_size[0], 1))
return np.column_stack((f1, f2)) |
the-stack_0_23738 | #!/usr/bin/env python
#
# Electrum - lightweight Bitcoin client
# Copyright (C) 2014 Thomas Voegtlin
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import os
import hmac
import math
import hashlib
import unicodedata
import string
import ecdsa
import pbkdf2
from util import print_error
from rubycoin import is_old_seed, is_new_seed
import version
import i18n
# http://www.asahi-net.or.jp/~ax2s-kmtn/ref/unicode/e_asia.html
CJK_INTERVALS = [
(0x4E00, 0x9FFF, 'CJK Unified Ideographs'),
(0x3400, 0x4DBF, 'CJK Unified Ideographs Extension A'),
(0x20000, 0x2A6DF, 'CJK Unified Ideographs Extension B'),
(0x2A700, 0x2B73F, 'CJK Unified Ideographs Extension C'),
(0x2B740, 0x2B81F, 'CJK Unified Ideographs Extension D'),
(0xF900, 0xFAFF, 'CJK Compatibility Ideographs'),
(0x2F800, 0x2FA1D, 'CJK Compatibility Ideographs Supplement'),
(0x3190, 0x319F , 'Kanbun'),
(0x2E80, 0x2EFF, 'CJK Radicals Supplement'),
(0x2F00, 0x2FDF, 'CJK Radicals'),
(0x31C0, 0x31EF, 'CJK Strokes'),
(0x2FF0, 0x2FFF, 'Ideographic Description Characters'),
(0xE0100, 0xE01EF, 'Variation Selectors Supplement'),
(0x3100, 0x312F, 'Bopomofo'),
(0x31A0, 0x31BF, 'Bopomofo Extended'),
(0xFF00, 0xFFEF, 'Halfwidth and Fullwidth Forms'),
(0x3040, 0x309F, 'Hiragana'),
(0x30A0, 0x30FF, 'Katakana'),
(0x31F0, 0x31FF, 'Katakana Phonetic Extensions'),
(0x1B000, 0x1B0FF, 'Kana Supplement'),
(0xAC00, 0xD7AF, 'Hangul Syllables'),
(0x1100, 0x11FF, 'Hangul Jamo'),
(0xA960, 0xA97F, 'Hangul Jamo Extended A'),
(0xD7B0, 0xD7FF, 'Hangul Jamo Extended B'),
(0x3130, 0x318F, 'Hangul Compatibility Jamo'),
(0xA4D0, 0xA4FF, 'Lisu'),
(0x16F00, 0x16F9F, 'Miao'),
(0xA000, 0xA48F, 'Yi Syllables'),
(0xA490, 0xA4CF, 'Yi Radicals'),
]
def is_CJK(c):
n = ord(c)
for imin,imax,name in CJK_INTERVALS:
if n>=imin and n<=imax: return True
return False
def normalize_text(seed):
# normalize
seed = unicodedata.normalize('NFKD', unicode(seed))
# lower
seed = seed.lower()
# remove accents
seed = u''.join([c for c in seed if not unicodedata.combining(c)])
# normalize whitespaces
seed = u' '.join(seed.split())
# remove whitespaces between CJK
seed = u''.join([seed[i] for i in range(len(seed)) if not (seed[i] in string.whitespace and is_CJK(seed[i-1]) and is_CJK(seed[i+1]))])
return seed
filenames = {
'en':'english.txt',
'es':'spanish.txt',
'ja':'japanese.txt',
'pt':'portuguese.txt',
'zh':'chinese_simplified.txt'
}
class Mnemonic(object):
# Seed derivation no longer follows BIP39
# Mnemonic phrase uses a hash based checksum, instead of a wordlist-dependent checksum
def __init__(self, lang=None):
lang = lang or 'en'
print_error('language', lang)
filename = filenames.get(lang[0:2], 'english.txt')
path = os.path.join(os.path.dirname(__file__), 'wordlist', filename)
s = open(path,'r').read().strip()
s = unicodedata.normalize('NFKD', s.decode('utf8'))
lines = s.split('\n')
self.wordlist = []
for line in lines:
line = line.split('#')[0]
line = line.strip(' \r')
assert ' ' not in line
if line:
self.wordlist.append(line)
print_error("wordlist has %d words"%len(self.wordlist))
@classmethod
def mnemonic_to_seed(self, mnemonic, passphrase):
PBKDF2_ROUNDS = 2048
mnemonic = normalize_text(mnemonic)
passphrase = normalize_text(passphrase)
return pbkdf2.PBKDF2(mnemonic, 'electrum' + passphrase, iterations = PBKDF2_ROUNDS, macmodule = hmac, digestmodule = hashlib.sha512).read(64)
def mnemonic_encode(self, i):
n = len(self.wordlist)
words = []
while i:
x = i%n
i = i/n
words.append(self.wordlist[x])
return ' '.join(words)
def get_suggestions(self, prefix):
for w in self.wordlist:
if w.startswith(prefix):
yield w
def mnemonic_decode(self, seed):
n = len(self.wordlist)
words = seed.split()
i = 0
while words:
w = words.pop()
k = self.wordlist.index(w)
i = i*n + k
return i
def check_seed(self, seed, custom_entropy):
assert is_new_seed(seed)
i = self.mnemonic_decode(seed)
return i % custom_entropy == 0
def make_seed(self, num_bits=128, prefix=version.SEED_PREFIX, custom_entropy=1):
# increase num_bits in order to obtain a uniform distibution for the last word
bpw = math.log(len(self.wordlist), 2)
num_bits = int(math.ceil(num_bits/bpw)) * bpw
# handle custom entropy; make sure we add at least 16 bits
n_custom = int(math.ceil(math.log(custom_entropy, 2)))
n = max(16, num_bits - n_custom)
print_error("make_seed", prefix, "adding %d bits"%n)
my_entropy = ecdsa.util.randrange(pow(2, n))
nonce = 0
while True:
nonce += 1
i = custom_entropy * (my_entropy + nonce)
seed = self.mnemonic_encode(i)
assert i == self.mnemonic_decode(seed)
if is_old_seed(seed):
continue
if is_new_seed(seed, prefix):
break
print_error('%d words'%len(seed.split()))
return seed
|
the-stack_0_23741 |
import numpy as np
import collections as c
import torch
class Buffer(object):
def __init__(self, o_dim, a_dim, bs, device='cpu'):
self.o_dim = o_dim
self.a_dim = a_dim
self.bs = bs
self.device = device
self.o_buf, self.a_buf, self.r_buf, self.logpb_buf, self.distb_buf, self.done_buf = \
c.deque(), c.deque(), c.deque(), c.deque(), c.deque(), c.deque()
self.op = np.zeros((1, o_dim), dtype=np.float32)
def store(self, o, a, r, op, logpb, dist, done):
self.o_buf.append(o)
self.a_buf.append(a)
self.r_buf.append(r)
self.logpb_buf.append(logpb)
self.distb_buf.append(dist)
self.done_buf.append(float(done))
self.op[:] = op
def pop(self):
self.o_buf.popleft()
self.a_buf.popleft()
self.r_buf.popleft()
self.logpb_buf.popleft()
self.distb_buf.popleft()
self.done_buf.popleft()
def clear(self):
self.o_buf.clear()
self.a_buf.clear()
self.r_buf.clear()
self.logpb_buf.clear()
self.distb_buf.clear()
self.done_buf.clear()
def get(self, dist_stack):
rang = range(self.bs)
os = torch.as_tensor(np.array([self.o_buf[i] for i in rang]), dtype=torch.float32, device=self.device).view(-1, self.o_dim)
acts = torch.as_tensor(np.array([self.a_buf[i] for i in rang]), dtype=torch.float32, device=self.device).view(-1, self.a_dim)
rs = torch.as_tensor(np.array([self.r_buf[i] for i in rang]), dtype=torch.float32, device=self.device).view(-1, 1)
op = torch.as_tensor(self.op, device=self.device).view(-1, self.o_dim)
logpbs = torch.as_tensor(np.array([self.logpb_buf[i] for i in rang]), dtype=torch.float32, device=self.device).view(-1, 1)
distbs = dist_stack([self.distb_buf[i] for i in rang], device=self.device)
dones = torch.as_tensor(np.array([self.done_buf[i] for i in rang]), dtype=torch.float32, device=self.device).view(-1, 1)
return os, acts, rs, op, logpbs, distbs, dones
|
the-stack_0_23742 | import _plotly_utils.basevalidators
class DomainValidator(_plotly_utils.basevalidators.CompoundValidator):
def __init__(
self, plotly_name='domain', parent_name='layout.grid', **kwargs
):
super(DomainValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
data_class_str=kwargs.pop('data_class_str', 'Domain'),
data_docs=kwargs.pop(
'data_docs', """
x
Sets the horizontal domain of this grid subplot
(in plot fraction). The first and last cells
end exactly at the domain edges, with no grout
around the edges.
y
Sets the vertical domain of this grid subplot
(in plot fraction). The first and last cells
end exactly at the domain edges, with no grout
around the edges.
"""
),
**kwargs
)
|
the-stack_0_23743 | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
import pytest
from tests.test_utils.amazon_system_helpers import AWS_DAG_FOLDER, AmazonSystemTest
@pytest.mark.backend("postgres", "mysql")
class ECSSystemTest(AmazonSystemTest):
"""
ECS System Test to run and test example ECS dags
Required variables.env file content (from your account):
# Auto-export all variables
set -a
# aws parameters
REGION_NAME="eu-west-1"
REGISTRY_ID="123456789012"
IMAGE="alpine:3.9"
SUBNET_ID="subnet-068e9654a3c357a"
SECURITY_GROUP_ID="sg-054dc69874a651"
EXECUTION_ROLE_ARN="arn:aws:iam::123456789012:role/FooBarRole"
# remove all created/existing resources flag
# comment out to keep resources or use empty string
# REMOVE_RESOURCES="True"
"""
# should be same as in the example dag
aws_conn_id = "aws_ecs"
cluster = "c"
task_definition = "hello-world"
container = "hello-world-container"
awslogs_group = "/ecs/hello-world"
awslogs_stream_prefix = "prefix_b" # only prefix without container name
@classmethod
def setup_class(cls):
cls.create_connection(
aws_conn_id=cls.aws_conn_id,
region=cls._region_name(),
)
# create ecs cluster if it does not exist
cls.create_ecs_cluster(
aws_conn_id=cls.aws_conn_id,
cluster_name=cls.cluster,
)
# create task_definition if it does not exist
task_definition_exists = cls.is_ecs_task_definition_exists(
aws_conn_id=cls.aws_conn_id,
task_definition=cls.task_definition,
)
if not task_definition_exists:
cls.create_ecs_task_definition(
aws_conn_id=cls.aws_conn_id,
task_definition=cls.task_definition,
container=cls.container,
image=cls._image(),
execution_role_arn=cls._execution_role_arn(),
awslogs_group=cls.awslogs_group,
awslogs_region=cls._region_name(),
awslogs_stream_prefix=cls.awslogs_stream_prefix,
)
@classmethod
def teardown_class(cls):
# remove all created/existing resources in tear down
if cls._remove_resources():
cls.delete_ecs_cluster(
aws_conn_id=cls.aws_conn_id,
cluster_name=cls.cluster,
)
cls.delete_ecs_task_definition(
aws_conn_id=cls.aws_conn_id,
task_definition=cls.task_definition,
)
def test_run_example_dag_ecs_fargate_dag(self):
self.run_dag("ecs_fargate_dag", AWS_DAG_FOLDER)
|
the-stack_0_23746 | import sublime
import sublime_plugin
from os.path import basename, dirname
from .core import Core
from .tools import Tools
class VDeployProjectCommand(sublime_plugin.WindowCommand):
def deploy_dj(self, project_id):
Core().deploy_django_project(project_id)
def deploy_php(self, project_id):
"""Copying project local files to project server files"""
Core().deploy_php_project(project_id)
def deploy_html(self, project_id):
"""Copying project local files to project server files"""
Core().deploy_html_project(project_id)
def deploy_go(self, project_id):
"""Copying project_local_dir/bin/* to project_server_dir"""
Core().deploy_go_project(project_id)
def deploy(self, project):
"""
Run deploy project by it's type:
deploy_dj, deploy_go, deploy_php, deploy_html
"""
c = Core()
# exclude for applying rights
exclude = ''
for p in project['exclude']:
exclude += " -not -path '*{}*' ".format(p)
# set 755 for all directories inside project directory
cmd = "find {} -type d {} -exec chmod 755 {{}} +".format(
project['project_dir_local'], exclude)
c.run_command_in_subprocess(cmd)
# set 644 for all files inside project directory
if project['type'] != 'GO':
cmd = "find {} -type f {} -exec chmod 644 {{}} +".format(
project['project_dir_local'], exclude)
c.run_command_in_subprocess(cmd)
# run func by project type
func = getattr(self, 'deploy_{}'.format(project['type'].lower()))
return func(project['id'])
def onselect(self, index):
if index == -1:
return
project = self.data[index]
self.deploy(project)
def run(self, **kw):
w = sublime.active_window()
# get project path
path = w.project_file_name()
if not path:
sublime.error_message('Project file has not found.')
return
# One level up - config directory with project file:
# config/project.sublime-project
# Two levels up - projectname directory (dirname = project name)
# projectname/config/project.sublime-project
# projectname/project/project_data_files
name = basename(dirname(dirname(path)))
# find project by name
c = Core()
result = c.get_project_by_name(name)
# Can return project or list of projects
if isinstance(result, list):
self.data = result
self.items = [project['name'] for project in result]
Tools.show_quick_panel(self.items, self.onselect)
else:
project = result
self.deploy(project)
|
the-stack_0_23747 | import sys
from test import list_tests
from test.support import cpython_only
import pickle
import unittest
class ListTest(list_tests.CommonTest):
type2test = list
def test_basic(self):
self.assertEqual(list([]), [])
l0_3 = [0, 1, 2, 3]
l0_3_bis = list(l0_3)
self.assertEqual(l0_3, l0_3_bis)
self.assertTrue(l0_3 is not l0_3_bis)
self.assertEqual(list(()), [])
self.assertEqual(list((0, 1, 2, 3)), [0, 1, 2, 3])
self.assertEqual(list(''), [])
self.assertEqual(list('spam'), ['s', 'p', 'a', 'm'])
self.assertEqual(list(x for x in range(10) if x % 2),
[1, 3, 5, 7, 9])
# XXX RUSTPYTHON TODO: catch ooms
if sys.maxsize == 0x7fffffff and False:
# This test can currently only work on 32-bit machines.
# XXX If/when PySequence_Length() returns a ssize_t, it should be
# XXX re-enabled.
# Verify clearing of bug #556025.
# This assumes that the max data size (sys.maxint) == max
# address size this also assumes that the address size is at
# least 4 bytes with 8 byte addresses, the bug is not well
# tested
#
# Note: This test is expected to SEGV under Cygwin 1.3.12 or
# earlier due to a newlib bug. See the following mailing list
# thread for the details:
# http://sources.redhat.com/ml/newlib/2002/msg00369.html
self.assertRaises(MemoryError, list, range(sys.maxsize // 2))
# This code used to segfault in Py2.4a3
x = []
x.extend(-y for y in x)
self.assertEqual(x, [])
def test_keyword_args(self):
with self.assertRaisesRegex(TypeError, 'keyword argument'):
list(sequence=[])
def test_truth(self):
super().test_truth()
self.assertTrue(not [])
self.assertTrue([42])
def test_identity(self):
self.assertTrue([] is not [])
def test_len(self):
super().test_len()
self.assertEqual(len([]), 0)
self.assertEqual(len([0]), 1)
self.assertEqual(len([0, 1, 2]), 3)
@unittest.skip("TODO: RUSTPYTHON, thread 'main' panicked at 'capacity overflow'")
def test_overflow(self):
lst = [4, 5, 6, 7]
n = int((sys.maxsize*2+2) // len(lst))
def mul(a, b): return a * b
def imul(a, b): a *= b
self.assertRaises((MemoryError, OverflowError), mul, lst, n)
self.assertRaises((MemoryError, OverflowError), imul, lst, n)
def test_repr_large(self):
# Check the repr of large list objects
def check(n):
l = [0] * n
s = repr(l)
self.assertEqual(s,
'[' + ', '.join(['0'] * n) + ']')
check(10) # check our checking code
check(1000000)
def test_iterator_pickle(self):
orig = self.type2test([4, 5, 6, 7])
data = [10, 11, 12, 13, 14, 15]
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
# initial iterator
itorig = iter(orig)
d = pickle.dumps((itorig, orig), proto)
it, a = pickle.loads(d)
a[:] = data
self.assertEqual(type(it), type(itorig))
self.assertEqual(list(it), data)
# running iterator
next(itorig)
d = pickle.dumps((itorig, orig), proto)
it, a = pickle.loads(d)
a[:] = data
self.assertEqual(type(it), type(itorig))
self.assertEqual(list(it), data[1:])
# empty iterator
for i in range(1, len(orig)):
next(itorig)
d = pickle.dumps((itorig, orig), proto)
it, a = pickle.loads(d)
a[:] = data
self.assertEqual(type(it), type(itorig))
self.assertEqual(list(it), data[len(orig):])
# exhausted iterator
self.assertRaises(StopIteration, next, itorig)
d = pickle.dumps((itorig, orig), proto)
it, a = pickle.loads(d)
a[:] = data
self.assertEqual(list(it), [])
def test_reversed_pickle(self):
orig = self.type2test([4, 5, 6, 7])
data = [10, 11, 12, 13, 14, 15]
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
# initial iterator
itorig = reversed(orig)
d = pickle.dumps((itorig, orig), proto)
it, a = pickle.loads(d)
a[:] = data
self.assertEqual(type(it), type(itorig))
self.assertEqual(list(it), data[len(orig)-1::-1])
# running iterator
next(itorig)
d = pickle.dumps((itorig, orig), proto)
it, a = pickle.loads(d)
a[:] = data
self.assertEqual(type(it), type(itorig))
self.assertEqual(list(it), data[len(orig)-2::-1])
# empty iterator
for i in range(1, len(orig)):
next(itorig)
d = pickle.dumps((itorig, orig), proto)
it, a = pickle.loads(d)
a[:] = data
self.assertEqual(type(it), type(itorig))
self.assertEqual(list(it), [])
# exhausted iterator
self.assertRaises(StopIteration, next, itorig)
d = pickle.dumps((itorig, orig), proto)
it, a = pickle.loads(d)
a[:] = data
self.assertEqual(list(it), [])
def test_no_comdat_folding(self):
# Issue 8847: In the PGO build, the MSVC linker's COMDAT folding
# optimization causes failures in code that relies on distinct
# function addresses.
class L(list): pass
with self.assertRaises(TypeError):
(3,) + L([1,2])
@cpython_only
def test_preallocation(self):
iterable = [0] * 10
iter_size = sys.getsizeof(iterable)
self.assertEqual(iter_size, sys.getsizeof(list([0] * 10)))
self.assertEqual(iter_size, sys.getsizeof(list(range(10))))
if __name__ == "__main__":
unittest.main()
|
the-stack_0_23749 | from __future__ import annotations # To avoid circular import.
from typing import Callable, List, TYPE_CHECKING
from .ulist import BooleanList
from .typedef import ELEM, LIST_PY
from .ulist import select_bool as _select_bool
from .ulist import select_float as _select_float
from .ulist import select_int as _select_int
from .ulist import select_string as _select_string
if TYPE_CHECKING: # To avoid circular import.
from . import UltraFastList
def select(
conditions: List[UltraFastList],
choices: LIST_PY,
default: ELEM,
) -> UltraFastList:
"""Return a ulist drawn from elements in `choices`, depending on`conditions`.
Args:
conditions (List[UltraFastList]):
The list of conditions which determine from which array in
`choices` the output elements are taken. When multiple conditions
are satisfied, the first one encountered in `conditions` is used.
choices (LIST_PY):
The list of ulist from which the output elements are taken.
It has to be of the same length as `conditions`.
default (ELEM):
The element inserted in output when all conditions evaluate
to False.
Raises:
TypeError:
The type of parameter `default` should be bool, float, int or str!
Returns:
UltraFastList: A ulist object.
Examples
--------
>>> import ulist as ul
>>> arr = ul.arange(6)
>>> arr
UltraFastList([0, 1, 2, 3, 4, 5])
>>> conditions = [arr < 2, arr < 4]
>>> conditions
[
UltraFastList([True, True, False, False, False, False]),
UltraFastList([True, True, True, True, False, False])
]
>>> result = ul.select(conditions, choices=[0, 1], default=2)
>>> result
UltraFastList([0, 0, 1, 1, 2, 2])
"""
assert len(conditions) == len(choices)
if type(default) is bool:
fn: Callable = _select_bool
elif type(default) is float:
fn = _select_float
elif type(default) is int:
fn = _select_int
elif type(default) is str:
fn = _select_string
else:
raise TypeError(
"The type of parameter `default` should be" +
" bool, float, int or str!"
)
_conditions = []
for cond in conditions:
assert isinstance(cond._values, BooleanList)
_conditions.append(cond._values)
result = fn(_conditions, choices, default)
from . import UltraFastList # To avoid circular import.
return UltraFastList(result)
class CaseObject:
"""
This is designed to implement `case` method for UtraFastList.
To provide an interface similar to SQL's `case` statement.
"""
def __init__(self, nums: UltraFastList, default: ELEM) -> None:
self._values = nums
self._default = default
self._conditions: List[UltraFastList] = []
self._choices: list = []
def when(
self,
fn: Callable[[UltraFastList], UltraFastList],
then: ELEM
) -> 'CaseObject':
"""Calculate the condition, and keep the condition and element to use.
Args:
fn (Callable[[UltraFastList], UltraFastList]):
Function to calculate the condition.
then (ELEM):
The element to use when the condition is satisfied.
Raises:
TypeError:
Calling parameter `fn` should return a ulist with dtype bool!
TypeError:
The type of parameter `then` should be the same as `default`!
Returns:
CaseObject
"""
cond = fn(self._values)
if cond.dtype != "bool":
raise TypeError(
"Calling parameter `fn` should return a ulist with dtype bool!"
)
self._conditions.append(cond)
if not isinstance(then, type(self._default)):
raise TypeError(
"The type of parameter `then` should be the same as `default`!"
)
self._choices.append(then)
return self
def end(self) -> UltraFastList:
"""Execute the case statement."""
return select(self._conditions, self._choices, self._default)
|
the-stack_0_23750 | """
The question is to group all same integers together - Sort colors in place
The challenge is: - They can give a pivot index
They can also to do this in-place
Generic version of the problem:
- Given an I/P array - rearrange the elements such that all elements less than pivot appear first,
- followed by element equal to the pivot
- followed by elements greater than the pivot
"""
def sortColors(nums):
"""The sort Colors() problem is just a variant of the dutch national flag problem, where the pivot is 1"""
dutch_flag_partition(nums, pivot=1)
def dutch_flag_partition(nums, pivot):
"""Idea is to group the elements in-place"""
n = len(nums)
left = 0
# Group elements smaller than pivot
for i in range(n):
if nums[i] < pivot:
nums[i], nums[left] = nums[left], nums[i]
left += 1
# Second pass group elements larger than the pivot
right = n - 1
for i in reversed(range(n)):
if nums[i] > pivot:
nums[i], nums[right] = nums[right], nums[i]
right -= 1
def dutch_flag_partition_optimized(nums, pivot):
"""
here the idea is:
1. If value is less than pivot - we exhange it with the first pivot occurrence
2. If value is equal to the pivot - we advance to the next unclassified element
3. If the value is greater then the pivot = - we exchange it with the last unclassified element
"""
smaller = 0
equal = 0
larger = len(nums) - 1
while equal < larger:
if nums[equal] < pivot:
nums[smaller], nums[equal] = nums[equal], nums[smaller]
smaller += 1
equal += 1
elif nums[equal] == pivot:
equal += 1
elif nums[equal] > pivot:
nums[equal], nums[larger] = nums[larger], nums[equal]
larger -= 1
if __name__ == "__main__":
pass
|
the-stack_0_23751 | #!/usr/bin/env python
# Perform radial raster scan on specified target(s). Mostly used for beam pattern mapping.
import numpy as np
from katcorelib import (standard_script_options, verify_and_connect,
collect_targets, start_session, user_logger)
# Set up standard script options
description = 'Perform radial raster scan across one or more sources. ' \
'Mostly used for beam pattern mapping and on-the-fly mapping. ' \
'Some options are **required**.'
parser = standard_script_options(usage="%prog [options] <'target/catalogue'> [<'target/catalogue'> ...]",
description=description)
# Add experiment-specific options
parser.add_option('-k', '--num-scans', type='int', default=3,
help='Number of scans across target (default=%default)')
parser.add_option('-t', '--scan-duration', type='float', default=20.0,
help='Minimum duration of each scan across target, in seconds (default=%default)')
parser.add_option('-l', '--scan-extent', type='float', default=2.0,
help='Length of each scan, in degrees (default=%default)')
# Set default value for any option (both standard and experiment-specific options)
parser.set_defaults(description='Radial raster scan')
# Parse the command line
opts, args = parser.parse_args()
if len(args) == 0:
raise ValueError("Please specify at least one target argument via name ('Cygnus A'), "
"description ('azel, 20, 30') or catalogue file name ('sources.csv')")
# Check options and build KAT configuration, connecting to proxies and devices
with verify_and_connect(opts) as kat:
observation_sources = collect_targets(kat, args)
# Start capture session, which creates HDF5 file
with start_session(kat, **vars(opts)) as session:
session.standard_setup(**vars(opts))
session.capture_start()
for target in observation_sources:
session.label('raster')
user_logger.info("Initiating radial scan (%d %g-second scans "
"extending %g degrees) on target '%s'",
opts.num_scans, opts.scan_duration,
opts.scan_extent, target.name)
# Calculate average time that noise diode is operated per scan, to add to scan duration in check below
nd_time = session.nd_params['on'] + session.nd_params['off']
nd_time *= opts.scan_duration / max(session.nd_params['period'], opts.scan_duration)
nd_time = nd_time if session.nd_params['period'] >= 0 else 0.
# Check whether the target will be visible for entire duration of radial scan
if not session.target_visible(target, (opts.scan_duration + nd_time) * opts.num_scans):
user_logger.warning("Skipping radial scan, as target '%s' will be below horizon",
target.name)
continue
# Iterate through angles and scan across target
for ind, angle in enumerate(np.arange(0., np.pi, np.pi / opts.num_scans)):
offset = np.array((np.cos(angle), -np.sin(angle))) * opts.scan_extent / 2. * (-1) ** ind
session.scan(target, duration=opts.scan_duration, start=-offset, end=offset, index=ind,
projection=opts.projection, announce=False)
|
the-stack_0_23752 | import os
import cv2
import torch
import argparse
from torch.nn import functional as F
from model.RIFE_HDv3 import Model
import warnings
warnings.filterwarnings("ignore")
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
torch.set_grad_enabled(False)
if torch.cuda.is_available():
torch.backends.cudnn.enabled = True
torch.backends.cudnn.benchmark = True
parser = argparse.ArgumentParser(description='Interpolation for a pair of images')
parser.add_argument('--img', dest='img', nargs=2, required=True)
parser.add_argument('--exp', default=4, type=int)
parser.add_argument('--ratio', default=0, type=float, help='inference ratio between two images with 0 - 1 range')
parser.add_argument('--rthreshold', default=0.02, type=float, help='returns image when actual ratio falls in given range threshold')
parser.add_argument('--rmaxcycles', default=8, type=int, help='limit max number of bisectional cycles')
parser.add_argument('--model', dest='modelDir', type=str, default='train_log', help='directory with trained model files')
args = parser.parse_args()
model = Model()
model.load_model(args.modelDir, -1)
print("Loaded v3.x HD model.")
model.eval()
model.device()
if args.img[0].endswith('.exr') and args.img[1].endswith('.exr'):
img0 = cv2.imread(args.img[0], cv2.IMREAD_COLOR | cv2.IMREAD_ANYDEPTH)
img1 = cv2.imread(args.img[1], cv2.IMREAD_COLOR | cv2.IMREAD_ANYDEPTH)
img0 = (torch.tensor(img0.transpose(2, 0, 1)).to(device)).unsqueeze(0)
img1 = (torch.tensor(img1.transpose(2, 0, 1)).to(device)).unsqueeze(0)
else:
img0 = cv2.imread(args.img[0], cv2.IMREAD_UNCHANGED)
img1 = cv2.imread(args.img[1], cv2.IMREAD_UNCHANGED)
img0 = (torch.tensor(img0.transpose(2, 0, 1)).to(device) / 255.).unsqueeze(0)
img1 = (torch.tensor(img1.transpose(2, 0, 1)).to(device) / 255.).unsqueeze(0)
n, c, h, w = img0.shape
ph = ((h - 1) // 32 + 1) * 32
pw = ((w - 1) // 32 + 1) * 32
padding = (0, pw - w, 0, ph - h)
img0 = F.pad(img0, padding)
img1 = F.pad(img1, padding)
if args.ratio:
img_list = [img0]
img0_ratio = 0.0
img1_ratio = 1.0
if args.ratio <= img0_ratio + args.rthreshold / 2:
middle = img0
elif args.ratio >= img1_ratio - args.rthreshold / 2:
middle = img1
else:
tmp_img0 = img0
tmp_img1 = img1
for inference_cycle in range(args.rmaxcycles):
middle = model.inference(tmp_img0, tmp_img1)
middle_ratio = ( img0_ratio + img1_ratio ) / 2
if args.ratio - (args.rthreshold / 2) <= middle_ratio <= args.ratio + (args.rthreshold / 2):
break
if args.ratio > middle_ratio:
tmp_img0 = middle
img0_ratio = middle_ratio
else:
tmp_img1 = middle
img1_ratio = middle_ratio
img_list.append(middle)
img_list.append(img1)
else:
img_list = [img0, img1]
for i in range(args.exp):
tmp = []
for j in range(len(img_list) - 1):
mid = model.inference(img_list[j], img_list[j + 1])
tmp.append(img_list[j])
tmp.append(mid)
tmp.append(img1)
img_list = tmp
if not os.path.exists('output'):
os.mkdir('output')
for i in range(len(img_list)):
if args.img[0].endswith('.exr') and args.img[1].endswith('.exr'):
cv2.imwrite('output/img{}.exr'.format(i), (img_list[i][0]).cpu().numpy().transpose(1, 2, 0)[:h, :w], [cv2.IMWRITE_EXR_TYPE, cv2.IMWRITE_EXR_TYPE_HALF])
else:
cv2.imwrite('output/img{}.png'.format(i), (img_list[i][0] * 255).byte().cpu().numpy().transpose(1, 2, 0)[:h, :w])
|
the-stack_0_23753 | # Copyright 2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ===========================================================================
"""generate json desc for squeeze"""
from ._utils import Expander, ExpanderInfoValidator as VLD
@VLD.check_attrs('axis')
class Squeeze(Expander):
"""Squeeze expander"""
def _expand(self, graph_builder):
input_x = self.inputs[0]
out_shape = self.infer_shape(input_x.shape, self.attrs['axis'])
result = graph_builder.emit('Reshape', [input_x], attrs={'shape': out_shape})
return result
@staticmethod
def infer_shape(shape, axis):
"""infer shape for squeeze"""
def squeeze_axis(shape, axis):
if not axis:
out_shape = [d for d in shape if d != 1]
else:
out_shape = []
for idx, dim in enumerate(shape):
if idx not in axis:
out_shape.append(dim)
if not out_shape:
out_shape = [1]
return out_shape
if isinstance(shape, (list, tuple)):
if isinstance(axis, int):
axis = [axis]
if isinstance(axis, (list, tuple)):
return squeeze_axis(shape, axis)
raise ValueError("Invalid axis for Squeeze.")
|
the-stack_0_23754 | # Copyright 2020 Bluefog Team. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from typing import List, Callable, Optional
import atexit
import contextlib
import ctypes
import logging
import networkx
import bluefog.common.util as util
import bluefog.common.topology_util as topology_util
logger = logging.getLogger("bluefog")
logger.setLevel(logging.INFO)
ch = logging.StreamHandler()
ch.setLevel(logging.INFO)
formatter = logging.Formatter("%(asctime)-15s %(levelname)s %(message)s")
ch.setFormatter(formatter)
logger.addHandler(ch)
class BlueFogBasics(object):
"""Wrapper class for the basic BlueFog API."""
def __init__(self, pkg_path, *args):
full_path = util.get_extension_full_path(pkg_path, *args)
self._topology = None
self._machine_topology = None
self._MPI_LIB_CTYPES = ctypes.CDLL(full_path, mode=ctypes.RTLD_GLOBAL)
self._is_topo_weighted = False
self._is_machine_topo_weighted = False
self.warn_timeline = False
def init(
self,
topology_fn: Optional[Callable[[int], networkx.DiGraph]] = None,
is_weighted: bool = False,
):
"""A function that initializes BlueFog.
Args:
topology_fn: A callable function that takes size as input and return
networkx.DiGraph object to decide the topology. If not provided
a default exponential graph (base 2) structure is called.
is_weighted: If set to true, the neighbor ops like (win_update, neighbor_allreduce) will
execute the weighted average instead, where the weight is the value used in
topology matrix (including self).
"""
self._MPI_LIB_CTYPES.bluefog_init()
if topology_fn:
topo = topology_fn(self.size())
else:
topo = topology_util.ExponentialGraph(self.size())
self.set_topology(topo, is_weighted)
atexit.register(self.shutdown)
def shutdown(self) -> None:
"""A function that shuts BlueFog down."""
self._MPI_LIB_CTYPES.bluefog_shutdown()
self._topology = None
self._machine_topology = None
def size(self) -> int:
"""A function that returns the number of BlueFog processes.
Returns:
An integer scalar containing the number of BlueFog processes.
"""
size = self._MPI_LIB_CTYPES.bluefog_size()
if size == -1:
raise ValueError("BlueFog has not been initialized; use bf.init().")
return size
def local_size(self) -> int:
"""A function that returns the number of BlueFog processes within the
node the current process is running on.
Returns:
An integer scalar containing the number of local BlueFog processes.
"""
local_size = self._MPI_LIB_CTYPES.bluefog_local_size()
if local_size == -1:
raise ValueError("BlueFog has not been initialized; use bf.init().")
return local_size
def rank(self) -> int:
"""A function that returns the BlueFog rank of the calling process.
Returns:
An integer scalar with the BlueFog rank of the calling process.
"""
rank = self._MPI_LIB_CTYPES.bluefog_rank()
if rank == -1:
raise ValueError("BlueFog has not been initialized; use bf.init().")
return rank
def local_rank(self) -> int:
"""A function that returns the local BlueFog rank of the calling process, within the
node that it is running on. For example, if there are seven processes running
on a node, their local ranks will be zero through six, inclusive.
Returns:
An integer scalar with the local BlueFog rank of the calling process.
"""
local_rank = self._MPI_LIB_CTYPES.bluefog_local_rank()
if local_rank == -1:
raise ValueError("BlueFog has not been initialized; use bf.init().")
return local_rank
def machine_rank(self) -> int:
"""A function that returns the BlueFog rank of the machine.
Returns:
An integer scalar with the BlueFog rank of the machine.
"""
# TODO(hhb) This only supports the homogenous environment now. Currently it assumes all
# machines share the same local_size()
assert self.is_homogeneous(), "Only supports homogeneous environment now"
return self.rank() // self.local_size()
def machine_size(self) -> int:
"""A function that returns the BlueFog size of the machine.
Returns:
An integer scalar with the BlueFog size of the machine.
"""
# TODO(hhb) This only supports the homogenous environment now. Currently it assumes all
# machines share the same local_size()
assert self.is_homogeneous(), "Only supports homogeneous environment now"
return self.size() // self.local_size()
def unified_mpi_window_model_supported(self) -> bool:
"""Returns a boolean value to indicate the MPI_Win model is unified or not.
Unfornuately, it is a collective call. We have to create a fake win to get
this information.
"""
is_unified = self._MPI_LIB_CTYPES.bluefog_unified_mpi_window_model_supported()
if is_unified == -1:
raise ValueError("BlueFog has not been initialized; use bf.init().")
return is_unified == 1
def mpi_threads_supported(self) -> bool:
"""A function that returns a flag indicating whether MPI multi-threading is supported.
If MPI multi-threading is supported, users may mix and match BlueFog usage with other
MPI libraries, such as `mpi4py`.
Returns:
A boolean value indicating whether MPI multi-threading is supported.
"""
mpi_threads_supported = self._MPI_LIB_CTYPES.bluefog_mpi_threads_supported()
if mpi_threads_supported == -1:
raise ValueError("BlueFog has not been initialized; use bf.init().")
return mpi_threads_supported
def is_topo_weighted(self) -> bool:
"""A function that returns if the virtual topology weights are used
Returns:
A boolean value indicating if the topology weights are used.
"""
return self._is_topo_weighted
def is_machine_topo_weighted(self) -> bool:
"""A function that returns if the virtual machine topology weights are used
Returns:
A boolean value indicating if the machine topology weights are used.
"""
return self._is_machine_topo_weighted
def load_machine_topology(self) -> networkx.DiGraph:
"""A function that returns the virtual topology for the machine.
Returns:
machine_topology: networkx.DiGraph.
"""
return self._machine_topology
def load_topology(self) -> networkx.DiGraph:
"""A funnction that returns the virtual topology MPI used.
Returns:
topology: networkx.DiGraph.
"""
return self._topology
def in_neighbor_machine_ranks(self) -> List[int]:
"""Return the machine ranks of all in-neighbors.
Notice: No matter self-loop is presented or not, self machine rank will not be included.
Returns:
in_neighbor_machine_ranks
"""
if self._machine_topology is None:
return []
_machine_rank = self.machine_rank()
in_neighbor_machine_ranks = [
r
for r in self._machine_topology.predecessors(self.machine_rank())
if r != _machine_rank
]
return in_neighbor_machine_ranks
def in_neighbor_ranks(self) -> List[int]:
"""Return the ranks of all in-neighbors.
Notice: No matter self-loop is presented or not, self rank will not be included.
Returns:
in_neighbor_ranks
"""
if self._topology is None:
return []
_rank = self.rank()
in_neighbor_ranks = [
r for r in self._topology.predecessors(self.rank()) if r != _rank
]
return in_neighbor_ranks
def out_neighbor_machine_ranks(self) -> List[int]:
"""Return the machine ranks of all out-neighbors.
Notice: No matter self-loop is presented or not, self machine rank will not be included.
Returns:
out_neighbor_machine_ranks
"""
if self._machine_topology is None:
return []
_machine_rank = self.machine_rank()
out_neighbor_machine_ranks = [
r
for r in self._machine_topology.successors(self.machine_rank())
if r != _machine_rank
]
return out_neighbor_machine_ranks
def out_neighbor_ranks(self) -> List[int]:
"""Return the ranks of all out-neighbors.
Notice: No matter self-loop is presented or not, self rank will not be included.
Returns:
out_neighbor_ranks
"""
if self._topology is None:
return []
_rank = self.rank()
out_neighbor_ranks = [
r for r in self._topology.successors(self.rank()) if r != _rank
]
return out_neighbor_ranks
def set_machine_topology(
self, topology: Optional[networkx.DiGraph], is_weighted: bool = False
) -> bool:
"""A function that sets the virtual machine topology.
Args:
Topo: A networkx.DiGraph object to decide the machine topology. It shall not be None.
is_weighted: If set to true, hierarchical_neighbor_allreduce will execute the
weighted average instead, where the weights are the value used in machine topology
matrix (including self weight).
Returns:
A boolean value that whether machine topology is set correctly or not.
Example:
>>> import bluefog.torch as bf
>>> from bluefog.common import topology_util
>>> bf.init()
>>> bf.set_machine_topology(topology_util.RingGraph(bf.machine_size()))
"""
if topology is None:
raise ValueError("Machine topology shall not be None.")
if not isinstance(topology, networkx.DiGraph):
raise TypeError("Machine topology must be a networkx.DiGraph obejct.")
if topology.number_of_nodes() != self.machine_size():
raise TypeError(
"topology must be a networkx.DiGraph obejct with same number of nodes "
"as bf.machine_size()."
)
assert self.is_homogeneous(), "Only supports homogeneous environment now"
if topology_util.IsTopologyEquivalent(topology, self._machine_topology):
if self.local_rank() == 0:
logger.debug(
"Machine topology to set is the same as old one. Skip the setting."
)
return True
self._machine_topology = topology
self._is_machine_topo_weighted = is_weighted
return True
def set_topology(
self, topology: Optional[networkx.DiGraph] = None, is_weighted: bool = False
) -> bool:
"""A function that sets the virtual topology MPI used.
Args:
Topo: A networkx.DiGraph object to decide the topology. If not provided
a default exponential graph (base 2) structure is used.
is_weighted: If set to true, the win_update and neighbor_allreduce will execute the
weighted average instead, where the weights are the value used in topology matrix
(including self weight). Note win_get/win_put/win_accumulate do not use this weight
since win_update already uses these weights.
Returns:
A boolean value that whether topology is set correctly or not.
Example:
>>> import bluefog.torch as bf
>>> from bluefog.common import topology_util
>>> bf.init()
>>> bf.set_topology(topology_util.RingGraph(bf.size()))
"""
if topology is None:
topology = topology_util.ExponentialGraph(size=self.size())
if self.local_rank() == 0:
logger.info(
"Topology is not specified. Default Exponential Two topology is used."
)
if not isinstance(topology, networkx.DiGraph):
raise TypeError("topology must be a networkx.DiGraph obejct.")
if topology.number_of_nodes() != self.size():
raise TypeError(
"topology must be a networkx.DiGraph obejct with same number of nodes as bf.size()."
)
if topology_util.IsTopologyEquivalent(topology, self._topology):
if self.local_rank() == 0:
logger.debug(
"Topology to set is the same as old one. Skip the setting."
)
return True
# We remove the self-rank for any cases because MPI graph_comm do not include it.
destinations = sorted(
[r for r in topology.successors(self.rank()) if r != self.rank()]
)
sources = sorted(
[r for r in topology.predecessors(self.rank()) if r != self.rank()]
)
indegree = len(sources)
outdegree = len(destinations)
sources_type = ctypes.c_int * indegree
destinations_type = ctypes.c_int * outdegree
if not is_weighted:
self._MPI_LIB_CTYPES.bluefog_set_topology.argtypes = [
ctypes.c_int,
ctypes.POINTER(ctypes.c_int),
ctypes.c_int,
ctypes.POINTER(ctypes.c_int),
]
ret = self._MPI_LIB_CTYPES.bluefog_set_topology(
indegree,
sources_type(*sources),
outdegree,
destinations_type(*destinations),
)
else:
# Here the source_weights is a vector containing weights from source, i.e.,
# (in-)neighbors, converted from the neighbor_weights dictionary.
self_weight, neighbor_weights = topology_util.GetRecvWeights(
topology, self.rank()
)
source_weights = [
neighbor_weights[r] for r in sorted(neighbor_weights.keys())
]
source_weights_type = ctypes.c_float * indegree
self._MPI_LIB_CTYPES.bluefog_set_topology_with_weights.argtypes = [
ctypes.c_int,
ctypes.POINTER(ctypes.c_int),
ctypes.c_int,
ctypes.POINTER(ctypes.c_int),
ctypes.c_float,
ctypes.POINTER(ctypes.c_float),
]
ret = self._MPI_LIB_CTYPES.bluefog_set_topology_with_weights(
indegree,
sources_type(*sources),
outdegree,
destinations_type(*destinations),
self_weight,
source_weights_type(*source_weights),
)
if ret != 1:
if self.local_rank() == 0:
logger.error(
"Cannot set topology correctly. Three common reasons caused this. \n"
"1. Has Bluefog been initialized? use bf.init(). \n"
"2. The win_create has been called. It is not allowed to change\n"
" the topology after that. You can call win_free() to unregister\n"
" all window object first, then set the topology. \n"
"3. Make sure all previous MPI ops are done. It is not allowed to \n"
" change the topology while there is undone MPI ops."
)
return False
self._topology = topology
self._is_topo_weighted = is_weighted
return True
def is_homogeneous(self) -> bool:
"""Returns True if the cluster is homogeneous.
Returns:
A boolean value indicating whether every node in the cluster has same number of ranks
and if it is true it also indicates the ranks are continuous in machines.
"""
is_homogeneous = self._MPI_LIB_CTYPES.bluefog_is_homogeneous()
if is_homogeneous == -1:
raise ValueError("BlueFog has not been initialized; use bf.init().")
return bool(is_homogeneous)
def nccl_built(self) -> bool:
"""Returns True if BlueFog was compiled with NCCL support.
Returns:
A boolean value indicating whether NCCL support was compiled.
"""
return bool(self._MPI_LIB_CTYPES.bluefog_nccl_built())
def set_skip_negotiate_stage(self, value: bool) -> None:
"""Skip the negotiate stage or not. (Default state is no skip).
For some MPI implementation, it doesn't have support for multiple thread.
To use the win ops, it has to turn off the negotiate the stage.
After turn off the negotiate the sate the error in collective callse like
size mismatch, order of tensor is randomized, may not be able to be handled properly.
But it may help to boost the performance.
"""
self._MPI_LIB_CTYPES.bluefog_set_skip_negotiate_stage(value)
def get_skip_negotiate_stage(self) -> bool:
"""Get the value of skip the negotiate stage. (Default state is no skip)."""
return bool(self._MPI_LIB_CTYPES.bluefog_get_skip_negotiate_stage())
def timeline_start_activity(self, tensor_name: str, activity_name: str) -> bool:
"""A python interface to call the timeline for StartActivity.
If you want to use this function, please make sure to turn on the timeline first by
setting the ENV variable BLUEFOG_TIMELINE = {file_name}, or use
bfrun --timeline-filename {file_name} ...
Args:
tensor_name (str): The activity associated tensor name.
activity_name (str): The activity type.
Returns:
A boolean value that whether timeline is executed correctly or not.
Example:
>>> import bluefog.torch as bf
>>> from bluefog.common.util import env
>>> with env(BLUEFOG_TIMELINE="./timeline_file"):
>>> bf.init()
>>> bf.timeline_start_activity(tensor_name, activity_name)
>>> ...
>>> bf.timeline_end_activity(tensor_name)
"""
if self.warn_timeline:
# We know timeline didn't turn on. No need to repeat it.
return False
self._MPI_LIB_CTYPES.bluefog_timeline.argtypes = [
ctypes.c_bool,
ctypes.c_char_p,
ctypes.c_char_p,
]
ret = self._MPI_LIB_CTYPES.bluefog_timeline(
True, tensor_name.encode("utf-8"), activity_name.encode("utf-8")
)
if ret != 1 and not self.warn_timeline:
logger.error(
"Cannot start activity in the timeline. "
"Most common reason is you didn't turn on the timeline function. "
"Use bfrun --timeline-filename file_name ... or "
"setting the ENV variable BLUEFOG_TIMELINE = file_name"
)
self.warn_timeline = True
return False
return True
def timeline_end_activity(self, tensor_name: str) -> bool:
"""A python interface to call the timeline for EndActivity.
Please check comments in timeline_start_activity for more explanation.
"""
if self.warn_timeline:
# We know timeline didn't turn on. No need to repeat it.
return False
self._MPI_LIB_CTYPES.bluefog_timeline.argtypes = [
ctypes.c_bool,
ctypes.c_char_p,
ctypes.c_char_p,
]
ret = self._MPI_LIB_CTYPES.bluefog_timeline(
False, tensor_name.encode("utf-8"), "".encode("utf-8")
)
if ret != 1 and not self.warn_timeline:
logger.error(
"Cannot end activity in the timeline. Check "
"Most common reason is you didn't turn on the timeline function. "
"Use bfrun --timeline-filename file_name ... or "
"setting the ENV variable BLUEFOG_TIMELINE = file_name"
)
self.warn_timeline = True
return False
return True
@contextlib.contextmanager
def timeline_context(self, tensor_name: str, activity_name: str):
"""Context manager for activating timeline record.
If you want to use this function, please make sure to turn on the timeline first by
setting the ENV variable BLUEFOG_TIMELINE = {file_name}, or use
bfrun --timeline-filename {file_name} ...
Args:
tensor_name (str): The activity associated tensor name.
activity_name (str): The activity type.
Example:
>>> with bf.timeline_context(tensor_name, activity_name):
>>> time.sleep(1.0)
"""
self.timeline_start_activity(tensor_name, activity_name)
try:
yield
finally:
self.timeline_end_activity(tensor_name)
def suspend(self):
"""Suspend the background thread of BlueFog.
It should be used under interactive python environment only.
"""
if not util.is_running_from_ipython:
raise EnvironmentError(
"This function should be used only when you are under ipython environment."
)
self._MPI_LIB_CTYPES.bluefog_suspend()
def resume(self):
"""Resume the background thread of BlueFog.
It should be used under interactive python environment only.
"""
if not util.is_running_from_ipython:
raise EnvironmentError(
"This function should be used only when you are under ipython environment."
)
self._MPI_LIB_CTYPES.bluefog_resume()
|
the-stack_0_23756 | import unittest
from werkzeug.exceptions import NotFound
from app import create_app
import os.path
from shutil import copyfile
from .test_client import TestClient
UPLOAD_DIRECTORY = '/var/hotmaps/cm_files_uploaded'
if not os.path.exists(UPLOAD_DIRECTORY):
os.makedirs(UPLOAD_DIRECTORY)
os.chmod(UPLOAD_DIRECTORY, 0o777)
class TestAPI(unittest.TestCase):
def setUp(self):
self.app = create_app(os.environ.get('FLASK_CONFIG', 'development'))
self.ctx = self.app.app_context()
self.ctx.push()
self.client = TestClient(self.app,)
def tearDown(self):
self.ctx.pop()
def test_compute(self):
raster_file_path = 'tests/data/raster_for_test.tif'
# simulate copy from HTAPI to CM
save_path = UPLOAD_DIRECTORY+"/raster_for_test.tif"
copyfile(raster_file_path, save_path)
inputs_raster_selection = {}
inputs_parameter_selection = {}
inputs_vector_selection = {}
inputs_raster_selection["heat_tot_curr_density"] = save_path
inputs_vector_selection["heating_technologies_eu28"] = {}
inputs_parameter_selection["reduction_factor"] = 2
# register the calculation module a
payload = {"inputs_raster_selection": inputs_raster_selection,
"inputs_parameter_selection": inputs_parameter_selection,
"inputs_vector_selection": inputs_vector_selection}
rv, json = self.client.post('computation-module/compute/', data=payload)
self.assertTrue(rv.status_code == 200)
|
the-stack_0_23757 | #!/usr/bin/env python
# -*- coding: UTF-8 -*-
import logging
from plot import *
from cluster import *
def plot(data, density_threshold, distance_threshold, auto_select_dc=False):
logging.basicConfig(
format='%(asctime)s : %(levelname)s : %(message)s', level=logging.INFO)
dpcluster = DensityPeakCluster()
rho, delta, nneigh = dpcluster.cluster(
load_paperdata, data, density_threshold, distance_threshold, auto_select_dc=auto_select_dc)
logger.info(str(len(dpcluster.ccenter)) + ' center as below')
for idx, center in dpcluster.ccenter.items():
logger.info('%d %f %f' % (idx, rho[center], delta[center]))
# plot_rho_delta(rho, delta) #plot to choose the threthold
plot_cluster(dpcluster)
if __name__ == '__main__':
# plot('./data/data_in_paper/example_distances.dat', 20, 0.1)
plot('./data/data_iris_flower/iris.forcluster',
40.7, 0.9, auto_select_dc=True)
|
the-stack_0_23758 | #
# pytest documentation build configuration file, created by
# sphinx-quickstart on Fri Oct 8 17:54:28 2010.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The full version, including alpha/beta/rc tags.
# The short X.Y version.
import os
import sys
from _pytest import __version__ as version
release = ".".join(version.split(".")[:2])
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
# sys.path.insert(0, os.path.abspath('.'))
autodoc_member_order = "bysource"
todo_include_todos = 1
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = [
"pygments_pytest",
"sphinx.ext.autodoc",
"sphinx.ext.autosummary",
"sphinx.ext.intersphinx",
"sphinx.ext.todo",
"sphinx.ext.viewcode",
"sphinx_removed_in",
"sphinxcontrib_trio",
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ["_templates"]
# The suffix of source filenames.
source_suffix = ".rst"
# The encoding of source files.
# source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = "contents"
# General information about the project.
project = "pytest"
copyright = "2015–2019, holger krekel and pytest-dev team"
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
# language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
# today = ''
# Else, today_fmt is used as the format for a strftime call.
# today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = [
"links.inc",
"_build",
"naming20.rst",
"test/*",
"old_*",
"*attic*",
"*/attic*",
"funcargs.rst",
"setup.rst",
"example/remoteinterp.rst",
]
# The reST default role (used for this markup: `text`) to use for all documents.
# default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
# add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
add_module_names = False
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = "sphinx"
# A list of ignored prefixes for module index sorting.
# modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
sys.path.append(os.path.abspath("_themes"))
html_theme_path = ["_themes"]
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = "flask"
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
html_theme_options = {"index_logo": None}
# Add any paths that contain custom themes here, relative to this directory.
# html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
html_title = "pytest documentation"
# A shorter title for the navigation bar. Default is the same as html_title.
html_short_title = "pytest-%s" % release
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
html_logo = "img/pytest1.png"
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
html_favicon = "img/pytest1favi.ico"
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
# html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
# html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
# html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
# html_sidebars = {}
# html_sidebars = {'index': 'indexsidebar.html'}
html_sidebars = {
"index": [
"slim_searchbox.html",
"sidebarintro.html",
"globaltoc.html",
"links.html",
"sourcelink.html",
],
"**": [
"slim_searchbox.html",
"globaltoc.html",
"relations.html",
"links.html",
"sourcelink.html",
],
}
# Additional templates that should be rendered to pages, maps page names to
# template names.
# html_additional_pages = {}
# html_additional_pages = {'index': 'index.html'}
# If false, no module index is generated.
html_domain_indices = True
# If false, no index is generated.
html_use_index = False
# If true, the index is split into individual pages for each letter.
# html_split_index = False
# If true, links to the reST sources are added to the pages.
html_show_sourcelink = False
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
# html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
# html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
# html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
# html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = "pytestdoc"
# -- Options for LaTeX output --------------------------------------------------
# The paper size ('letter' or 'a4').
# latex_paper_size = 'letter'
# The font size ('10pt', '11pt' or '12pt').
# latex_font_size = '10pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
(
"contents",
"pytest.tex",
"pytest Documentation",
"holger krekel, trainer and consultant, http://merlinux.eu",
"manual",
)
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
latex_logo = "img/pytest1.png"
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
# latex_use_parts = False
# If true, show page references after internal links.
# latex_show_pagerefs = False
# If true, show URL addresses after external links.
# latex_show_urls = False
# Additional stuff for the LaTeX preamble.
# latex_preamble = ''
# Documents to append as an appendix to all manuals.
# latex_appendices = []
# If false, no module index is generated.
latex_domain_indices = False
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [("usage", "pytest", "pytest usage", ["holger krekel at merlinux eu"], 1)]
# -- Options for Epub output ---------------------------------------------------
# Bibliographic Dublin Core info.
epub_title = "pytest"
epub_author = "holger krekel at merlinux eu"
epub_publisher = "holger krekel at merlinux eu"
epub_copyright = "2013, holger krekel et alii"
# The language of the text. It defaults to the language option
# or en if the language is not set.
# epub_language = ''
# The scheme of the identifier. Typical schemes are ISBN or URL.
# epub_scheme = ''
# The unique identifier of the text. This can be a ISBN number
# or the project homepage.
# epub_identifier = ''
# A unique identification for the text.
# epub_uid = ''
# HTML files that should be inserted before the pages created by sphinx.
# The format is a list of tuples containing the path and title.
# epub_pre_files = []
# HTML files shat should be inserted after the pages created by sphinx.
# The format is a list of tuples containing the path and title.
# epub_post_files = []
# A list of files that should not be packed into the epub file.
# epub_exclude_files = []
# The depth of the table of contents in toc.ncx.
# epub_tocdepth = 3
# Allow duplicate toc entries.
# epub_tocdup = True
# -- Options for texinfo output ------------------------------------------------
texinfo_documents = [
(
master_doc,
"pytest",
"pytest Documentation",
(
"Holger Krekel@*Benjamin Peterson@*Ronny Pfannschmidt@*"
"Floris Bruynooghe@*others"
),
"pytest",
"simple powerful testing with Python",
"Programming",
1,
)
]
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {"python": ("https://docs.python.org/3", None)}
def setup(app):
# from sphinx.ext.autodoc import cut_lines
# app.connect('autodoc-process-docstring', cut_lines(4, what=['module']))
app.add_object_type(
"confval",
"confval",
objname="configuration value",
indextemplate="pair: %s; configuration value",
)
|
the-stack_0_23759 | import pandas as pd
import numpy as np
from statistics import mode
class autodataclean:
'''
A.1) Automated Data Cleaning; identify invalid values and/or rows and automatically solve the problem-
NAN, missing, outliers, unreliable values, out of the range, automated data input.
(Your group decide a solution for the each problem!)
Reference - http://pandas.pydata.org/pandas-docs/stable/missing_data.html
Process -
1. Check type of column - numeric/non-numeric
2. For non-numeric -
a. Replace missing and out of range by most common (mode) in dev
3. For numeric -
a. Compute dev mean, median, min and max excluding outliers and unreliable values
b. For automated -
i. Replace NA and unreliable by mean of dev
ii. Replace outliers and out of range by min or max of dev as applicable
c. For human assisted -
i. For NAs and unreliable values, give option of replacing by mean, median or user input value
ii. For outliers and out of range values, give option of replacing by mean, median, min, max or user input
Note - Replacement values are always computed on dev and replacements in val are always same as dev treatment
Note - Exclude ID and target from cleaning process
Note - case 1 : one file, like MBD_FA2; case 2 : multiple files, one dev and others val, test, oot etc.
'''
def __init__(self, traindata, testdata = None):
'''Constructor for this class'''
self.traindata = pd.DataFrame(traindata)
if testdata is not None:
self.testdata = pd.DataFrame(testdata)
else:
self.testdata = None
self.main()
def main(self):
numerics = ['int16', 'int32', 'int64', 'float16', 'float32', 'float64']
idtargetlist = ['id', 'ob_target', 'ID', 'TARGET']
dev_num_auto = self.traindata.select_dtypes(include=numerics)
dev_nonnum_auto = self.traindata.select_dtypes(exclude=numerics)
if self.testdata is not None:
val_num_auto = self.testdata.select_dtypes(include=numerics)
val_nonnum_auto = self.testdata.select_dtypes(exclude=numerics)
colnames_num_auto = list(dev_num_auto.columns.values)
colnames_nonnum_auto = list(dev_nonnum_auto.columns.values)
for names in idtargetlist:
if names in colnames_num_auto:
colnames_num_auto.remove(names)
if names in colnames_nonnum_auto:
colnames_nonnum_auto.remove(names)
print("Processing non-numeric variables")
for column in colnames_nonnum_auto:
print("Processing variable ", column)
colmode = mode(dev_nonnum_auto.loc[:, column])
dev_nonnum_auto.loc[:, column].to_replace(to_replace="", value=colmode)
allvalues = np.unique(dev_nonnum_auto.loc[:, column])
if val_filename != "NA":
val_nonnum_auto.loc[:, column].to_replace(to_replace="", value=colmode)
for row in val_nonnum_auto.loc[:, column]:
if row not in allvalues:
row = colmode
print("Variable ", column, "is clean")
print("Processing numeric variables")
for column in colnames_num_auto:
print("Processing variable ", column)
colmeanorig = np.mean(dev_num_auto.loc[:, column])
colstdev = np.std(dev_num_auto.loc[:, column])
temp = dev_num_auto.loc[:, column].tolist()
for i in temp:
if np.abs((i - colmeanorig)) > 3 * colstdev:
temp.remove(i)
colmean = np.mean(temp)
colmedian = np.median(temp)
colmin = np.min(temp)
colmax = np.max(temp)
dev_num_auto.loc[:, column].fillna(colmean)
for row in dev_num_auto.loc[:, column]:
if row < colmeanorig - 3 * colstdev:
row = colmin
if row > colmeanorig + 3 * colstdev:
row = colmax
if self.testdata is not None:
val_num_auto.loc[:, column].fillna(colmean)
for row in val_num_auto.loc[:, column]:
if row < colmin or row < colmeanorig - 3 * colstdev:
row = colmin
if row > colmax or row > colmeanorig + 3 * colstdev:
row = colmax
print("Variable ", column, "is clean")
print("Automated cleaning is complete")
print("Cleaned numeric variables are available in dev_num_auto and val_num_auto")
print("Cleaned non-numeric variables are available in dev_nonnum_auto and val_nonnum_auto")
# dev = pd.read_csv("dev.csv")
# oot = pd.read_csv("oot0.csv")
# A = autodataclean(dev,oot)
|
the-stack_0_23760 | import uvicore
from uvicore.http import status
from uvicore.http import Request
from uvicore.auth import User
from uvicore.support import module
from fastapi.params import Security
from fastapi.security import SecurityScopes
from uvicore.support.dumper import dump, dd
from uvicore.typing import Optional, Sequence, Callable, Any, List, Dict, Tuple
from uvicore.http.exceptions import HTTPException, PermissionDenied, NotAuthenticated, InvalidCredentials
from uvicore.contracts import UserProvider
@uvicore.service()
class Guard(Security):
"""Uvicore Auth Guard"""
def __init__(self, scopes: Optional[Sequence[str]] = None, guard: str = None):
# # Swap guard and scopes
# if scopes is None and type(guard) == list:
# scopes: Sequence[str] = guard
# guard = None
# Ensure scopes is a List, to allow for singles
if scopes: scopes = [scopes] if isinstance(scopes, str) else list(scopes)
# Do NOT apply a default guard to self.guard, let it be None
# So I know its blank so I can overwrite it with a parent guard if needed
self.scopes = scopes
self.guard = guard
# Get auth_config from app config
auth_config = uvicore.config.app.auth
# Set default guard if none providedGet actual guard from app config
if guard is None: guard = auth_config.default
# Get actual guard config
if guard not in auth_config.guards:
raise Exception('Guard {} not found in app config'.format(guard))
guard_config = auth_config.guards[guard].clone() # Clone becuase I add name below
# Add name to guard config (its a clone, its ok)
guard_config.name = guard
# Get all providers from auth_config
providers = auth_config.providers
# Get all authenticator options from auth_config
options = auth_config.options
# Call parent Depends passing in the multi-middleware Authenticator
super().__init__(dependency=Authenticator(guard_config, options, providers), scopes=scopes, use_cache=True)
@uvicore.service()
class Authenticator:
def __init__(self, guard: Dict, options, providers: Dict):
# Guard is the full config SuperDict from app config matching the proper guard string
self.guard = guard
# Authenticator Default Options
self.options = options
# Providers is all providers from app config
self.providers = providers
async def __call__(self, scopes: SecurityScopes, request: Request):
#dump(self.guard)
# Dict({
# 'authenticators': Dict({
# 'jwt': Dict({
# 'module': 'uvicore.auth.middleware.Jwt',
# 'verify_signature': True,
# 'algorithms': ['RS256'],
# 'secret':
# '-----BEGIN PUBLIC KEY-----\nMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAnc84SDViVX8JNye2GVQZ\n'
# 'ixAwG2PWXoOhkj++wGASoAXs2LN0Ue48conxf/0bgEtq6kcbLPR23SieqBZA77vc\n'
# 'yulimMbzfwNczyP3FRo8wSCqRgJipTse87WItd8ga2MUCzSS8q19V4swUT4T23Su\n'
# 'DiG/Ry5f1sYbvxP2kJAJMUCzVbS7STxh33h65Bj+P6JdzrCJi+yrLqg928RHjLIF\n'
# 'gDy4MyFBLTI8w5u6IJi1TLm6h9lj3YqSa/qDkkIardnnZa7Xj0IJCEB9c+RD4Q7C\n'
# '+jco6g2Vr9oLP8Mg3c5lZPNVzcXC67UMVk9lK+zrlfPDI/m2+9kyTc/58S9ZUTFJ\nQwIDAQAB\n-----END PUBLIC KEY-----'
# }),
# 'basic': Dict({
# 'module': 'uvicore.auth.middleware.Basic',
# 'provider': 'users',
# 'realm': 'App1'
# })
# }),
# 'name': 'api'
# })
for authenticator in self.guard.authenticators.values():
# Get authenticator options by deep merging defaults and proper providers
options = authenticator.clone()
if 'options' in options:
# Deep merge default options
option_key = options.options
if option_key not in self.options:
# This is an application error, not an HTTPException
raise Exception('Default options key {} not found in app config'.format(option_key))
options.defaults(self.options[option_key]) # Merge seems to do a clone too!
# Merge provider into options
if 'provider' in options:
if options.provider not in self.providers:
# This is an application error, not an HTTPException
raise Exception('Provider {} not found in app config'.format(options.provider))
options.provider = self.providers[options.provider].clone()
options.guard = self.guard.name
#dump(options)
# Dict({
# 'default_options': 'jwt',
# 'module': 'uvicore.auth.middleware.Jwt',
# 'provider': Dict({
# 'module': 'uvicore.auth.models.user.User',
# 'method': 'userinfo',
# 'model': 'uvicore.auth.models.user.User',
# 'includes': ['roles', 'roles.permissions', 'groups', 'groups.roles', 'groups.roles.permissions']
# }),
# 'sync': Dict({'auto_create_user': True}),
# 'verify_signature': True,
# 'audience': '222b06eb-85ce-472b-af30-ec09244e3bf0',
# 'algorithms': ['RS256'],
# 'secret':
# '-----BEGIN PUBLIC KEY-----\nMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAnc84SDViVX8JNye2GVQZ\n'
# 'ixAwG2PWXoOhkj++wGASoAXs2LN0Ue48conxf/0bgEtq6kcbLPR23SieqBZA77vc\n'
# 'yulimMbzfwNczyP3FRo8wSCqRgJipTse87WItd8ga2MUCzSS8q19V4swUT4T23Su\n'
# 'DiG/Ry5f1sYbvxP2kJAJMUCzVbS7STxh33h65Bj+P6JdzrCJi+yrLqg928RHjLIF\n'
# 'gDy4MyFBLTI8w5u6IJi1TLm6h9lj3YqSa/qDkkIardnnZa7Xj0IJCEB9c+RD4Q7C\n'
# '+jco6g2Vr9oLP8Mg3c5lZPNVzcXC67UMVk9lK+zrlfPDI/m2+9kyTc/58S9ZUTFJ\nQwIDAQAB\n-----END PUBLIC KEY-----'
# })
# Import the auth middleware module
middleware = module.load(options.module).object(options)
# Fire the middleware __call__ callable and get the returned value
value = await middleware(scopes, request)
# If value is returned, auth was successful with this authenticator. Return value, stop the middleware stack.
# If value is None means auth headers not found. Continue to next middleware in auth stack
if value is not None:
return value
# If we are here, no auth middleware returned a value, meaning NOT logged in
# If no value is ever returned we are not logged in.
# Maybe here is the place to add an anonymous user to the request? with user.authenticated = False ???????
# NO, think about this. If a route is NOT guarded, this code will never run therefore request.user will
# never exist. Would have to do global auth middleware to accomplish an always present anonymous user.
# I could have a built-in hidden global middleware that adds request.user as an anonymous model?
raise NotAuthenticated('MASTER STACKER')
@uvicore.service()
class Auth:
"""Base Auth middleware class"""
async def retrieve_user(self, username: str, password: str, provider: Dict) -> Optional[User]:
"""Retrieve user from User Provider backend"""
# Import our user provider defined in auth config
user_provider: UserProvider = module.load(provider.module).object()
# Get user from user provider and validate password
# If returned user is None, validation has failed, user is disabled or user not found
user = await user_provider.retrieve_by_credentials(username, password, **provider.options)
# Do not throw error if no user or not validated here. We let the middleware handle that
return user
def validate_permissions(self, user: User, scopes: SecurityScopes) -> None:
"""Validate logged in users permissions again route permissions"""
# Superadmin is always allowed
if user.superadmin: return
# Get permissions defined on this route
route_permissions = scopes.scopes
# If route does not specify permissions, then anyone that is authenticated can access.
if not route_permissions: return
# Compare users permissions with route permissions
for permission in route_permissions:
if permission in user.permissions:
# This is an OR, if any one of these, then pass
return
# No matching permissinos means they are logged in, but they don't have the proper permissions.
raise PermissionDenied(route_permissions)
def auth_header(self, request) -> Tuple[str, str, str]:
"""Extract authorization header parts"""
authorization = request.headers.get('Authorization')
if not authorization: return (authorization, '', '')
scheme, _, param = authorization.partition(' ')
return authorization, scheme.lower(), param
|
the-stack_0_23761 | import textwrap
from pytype.pytd import escape
from pytype.pytd import pytd
from pytype.pytd import pytd_utils
from pytype.pytd import visitors
from pytype.pytd.parse import parser_test_base
import six
import unittest
# All of these tests implicitly test pytd_utils.Print because
# parser_test_base.AssertSourceEquals() uses pytd_utils.Print.
DEFAULT_PYI = """
from typing import Any
def __getattr__(name) -> Any: ...
"""
def pytd_src(text):
return textwrap.dedent(escape.preprocess_pytd(text))
class TestVisitors(parser_test_base.ParserTest):
"""Tests the classes in parse/visitors."""
def test_invent_starargs_params(self):
call = lambda x: tuple(f.name for f in visitors.InventStarArgParams(x))
self.assertEqual(("args", "kwargs"), call({}))
self.assertEqual(("args", "kwargs"), call({"a"}))
self.assertEqual(("_args", "kwargs"), call({"args"}))
self.assertEqual(("args", "_kwargs"), call({"kwargs"}))
self.assertEqual(("_args", "_kwargs"), call({"args", "kwargs"}))
self.assertEqual(("__args", "_kwargs"), call({"args", "_args", "kwargs"}))
self.assertEqual(("args", "__kwargs"), call({"kwargs", "_kwargs"}))
def test_lookup_classes(self):
src = textwrap.dedent("""
from typing import Union
class object:
pass
class A:
def a(self, a: A, b: B) -> Union[A, B]:
raise A()
raise B()
class B:
def b(self, a: A, b: B) -> Union[A, B]:
raise A()
raise B()
""")
tree = self.Parse(src)
new_tree = visitors.LookupClasses(tree)
self.AssertSourceEquals(new_tree, src)
new_tree.Visit(visitors.VerifyLookup())
def test_maybe_fill_in_local_pointers(self):
src = textwrap.dedent("""
from typing import Union
class A:
def a(self, a: A, b: B) -> Union[A, B]:
raise A()
raise B()
""")
tree = self.Parse(src)
ty_a = pytd.ClassType("A")
ty_a.Visit(visitors.FillInLocalPointers({"": tree}))
self.assertIsNotNone(ty_a.cls)
ty_b = pytd.ClassType("B")
ty_b.Visit(visitors.FillInLocalPointers({"": tree}))
self.assertIsNone(ty_b.cls)
def test_deface_unresolved(self):
builtins = self.Parse(textwrap.dedent("""
class int:
pass
"""))
src = textwrap.dedent("""
class A(X):
def a(self, a: A, b: X, c: int) -> X:
raise X()
def b(self) -> X[int]: ...
""")
expected = textwrap.dedent("""
from typing import Any
class A(Any):
def a(self, a: A, b: Any, c: int) -> Any:
raise Any
def b(self) -> Any: ...
""")
tree = self.Parse(src)
new_tree = tree.Visit(visitors.DefaceUnresolved([tree, builtins]))
new_tree.Visit(visitors.VerifyVisitor())
self.AssertSourceEquals(new_tree, expected)
def test_deface_unresolved2(self):
builtins = self.Parse(textwrap.dedent("""
from typing import Generic, TypeVar
class int:
pass
T = TypeVar("T")
class list(Generic[T]):
pass
"""))
src = textwrap.dedent("""
from typing import Union
class A(X):
def a(self, a: A, b: X, c: int) -> X:
raise X()
def c(self) -> Union[list[X], int]: ...
""")
expected = textwrap.dedent("""
from typing import Any, Union
class A(Any):
def a(self, a: A, b: Any, c: int) -> Any:
raise Any
def c(self) -> Union[list[Any], int]: ...
""")
tree = self.Parse(src)
new_tree = tree.Visit(visitors.DefaceUnresolved([tree, builtins]))
new_tree.Visit(visitors.VerifyVisitor())
self.AssertSourceEquals(new_tree, expected)
def test_replace_types(self):
src = textwrap.dedent("""
from typing import Union
class A:
def a(self, a: Union[A, B]) -> Union[A, B]:
raise A()
raise B()
""")
expected = textwrap.dedent("""
from typing import Union
class A:
def a(self: A2, a: Union[A2, B]) -> Union[A2, B]:
raise A2()
raise B()
""")
tree = self.Parse(src)
new_tree = tree.Visit(visitors.ReplaceTypes({"A": pytd.NamedType("A2")}))
self.AssertSourceEquals(new_tree, expected)
def test_superclasses_by_name(self):
src = textwrap.dedent("""
class A():
pass
class B():
pass
class C(A):
pass
class D(A,B):
pass
class E(C,D,A):
pass
""")
tree = self.Parse(src)
data = tree.Visit(visitors.ExtractSuperClassesByName())
six.assertCountEqual(self, ("object",), data["A"])
six.assertCountEqual(self, ("object",), data["B"])
six.assertCountEqual(self, ("A",), data["C"])
six.assertCountEqual(self, ("A", "B"), data["D"])
six.assertCountEqual(self, ("A", "C", "D"), data["E"])
def test_strip_self(self):
src = textwrap.dedent("""
def add(x: int, y: int) -> int: ...
class A:
def bar(self, x: int) -> float: ...
def baz(self) -> float: ...
def foo(self, x: int, y: float) -> float: ...
""")
expected = textwrap.dedent("""
def add(x: int, y: int) -> int: ...
class A:
def bar(x: int) -> float: ...
def baz() -> float: ...
def foo(x: int, y: float) -> float: ...
""")
tree = self.Parse(src)
new_tree = tree.Visit(visitors.StripSelf())
self.AssertSourceEquals(new_tree, expected)
def test_remove_unknown_classes(self):
src = pytd_src("""
from typing import Union
class `~unknown1`():
pass
class `~unknown2`():
pass
class A:
def foobar(x: `~unknown1`, y: `~unknown2`) -> Union[`~unknown1`, int]: ...
""")
expected = textwrap.dedent("""
from typing import Any, Union
class A:
def foobar(x, y) -> Union[Any, int]: ...
""")
tree = self.Parse(src)
tree = tree.Visit(visitors.RemoveUnknownClasses())
tree = tree.Visit(visitors.DropBuiltinPrefix())
self.AssertSourceEquals(tree, expected)
def test_find_unknown_visitor(self):
src = pytd_src("""
from typing import Any
class object:
pass
class `~unknown1`():
pass
class `~unknown_foobar`():
pass
class `~int`():
pass
class A():
def foobar(self, x: `~unknown1`) -> Any: ...
class B():
def foobar(self, x: `~int`) -> Any: ...
class C():
x = ... # type: `~unknown_foobar`
class D(`~unknown1`):
pass
""")
tree = self.Parse(src)
tree = visitors.LookupClasses(tree)
find_on = lambda x: tree.Lookup(x).Visit(visitors.RaiseIfContainsUnknown())
self.assertRaises(visitors.RaiseIfContainsUnknown.HasUnknown, find_on, "A")
find_on("B") # shouldn't raise
self.assertRaises(visitors.RaiseIfContainsUnknown.HasUnknown, find_on, "C")
self.assertRaises(visitors.RaiseIfContainsUnknown.HasUnknown, find_on, "D")
def test_in_place_lookup_external_classes(self):
src1 = textwrap.dedent("""
def f1() -> bar.Bar: ...
class Foo:
pass
""")
src2 = textwrap.dedent("""
def f2() -> foo.Foo: ...
class Bar:
pass
""")
ast1 = self.Parse(src1, name="foo")
ast2 = self.Parse(src2, name="bar")
ast1 = ast1.Visit(visitors.LookupExternalTypes(dict(foo=ast1, bar=ast2)))
ast2 = ast2.Visit(visitors.LookupExternalTypes(dict(foo=ast1, bar=ast2)))
f1, = ast1.Lookup("foo.f1").signatures
f2, = ast2.Lookup("bar.f2").signatures
self.assertIs(ast2.Lookup("bar.Bar"), f1.return_type.cls)
self.assertIs(ast1.Lookup("foo.Foo"), f2.return_type.cls)
def test_lookup_constant(self):
src1 = textwrap.dedent("""
Foo = ... # type: type
""")
src2 = textwrap.dedent("""
class Bar:
bar = ... # type: foo.Foo
""")
ast1 = self.Parse(src1, name="foo").Visit(
visitors.LookupBuiltins(self.loader.builtins))
ast2 = self.Parse(src2, name="bar")
ast2 = ast2.Visit(visitors.LookupExternalTypes({"foo": ast1, "bar": ast2}))
self.assertEqual(ast2.Lookup("bar.Bar").constants[0],
pytd.Constant(name="bar", type=pytd.AnythingType()))
def test_lookup_star_alias(self):
src1 = textwrap.dedent("""
x = ... # type: int
T = TypeVar("T")
class A: ...
def f(x: T) -> T: ...
B = A
""")
src2 = "from foo import *"
ast1 = self.Parse(src1).Replace(name="foo").Visit(visitors.AddNamePrefix())
ast2 = self.Parse(src2).Replace(name="bar").Visit(visitors.AddNamePrefix())
ast2 = ast2.Visit(visitors.LookupExternalTypes(
{"foo": ast1, "bar": ast2}, self_name="bar"))
self.assertEqual("bar", ast2.name)
self.assertSetEqual({a.name for a in ast2.aliases},
{"bar.x", "bar.T", "bar.A", "bar.f", "bar.B"})
def test_lookup_star_alias_in_unnamed_module(self):
src1 = textwrap.dedent("""
class A: ...
""")
src2 = "from foo import *"
ast1 = self.Parse(src1).Replace(name="foo").Visit(visitors.AddNamePrefix())
ast2 = self.Parse(src2)
name = ast2.name
ast2 = ast2.Visit(visitors.LookupExternalTypes(
{"foo": ast1}, self_name=None))
self.assertEqual(name, ast2.name)
self.assertMultiLineEqual(pytd_utils.Print(ast2), textwrap.dedent("""
import foo
A = foo.A
""").strip())
def test_lookup_two_star_aliases(self):
src1 = "class A: ..."
src2 = "class B: ..."
src3 = textwrap.dedent("""
from foo import *
from bar import *
""")
ast1 = self.Parse(src1).Replace(name="foo").Visit(visitors.AddNamePrefix())
ast2 = self.Parse(src2).Replace(name="bar").Visit(visitors.AddNamePrefix())
ast3 = self.Parse(src3).Replace(name="baz").Visit(visitors.AddNamePrefix())
ast3 = ast3.Visit(visitors.LookupExternalTypes(
{"foo": ast1, "bar": ast2, "baz": ast3}, self_name="baz"))
self.assertSetEqual({a.name for a in ast3.aliases}, {"baz.A", "baz.B"})
def test_lookup_two_star_aliases_with_same_class(self):
src1 = "class A: ..."
src2 = "class A: ..."
src3 = textwrap.dedent("""
from foo import *
from bar import *
""")
ast1 = self.Parse(src1).Replace(name="foo").Visit(visitors.AddNamePrefix())
ast2 = self.Parse(src2).Replace(name="bar").Visit(visitors.AddNamePrefix())
ast3 = self.Parse(src3).Replace(name="baz").Visit(visitors.AddNamePrefix())
self.assertRaises(KeyError, ast3.Visit, visitors.LookupExternalTypes(
{"foo": ast1, "bar": ast2, "baz": ast3}, self_name="baz"))
def test_lookup_star_alias_with_duplicate_class(self):
src1 = "class A: ..."
src2 = textwrap.dedent("""
from foo import *
class A:
x = ... # type: int
""")
ast1 = self.Parse(src1).Replace(name="foo").Visit(visitors.AddNamePrefix())
ast2 = self.Parse(src2).Replace(name="bar").Visit(visitors.AddNamePrefix())
ast2 = ast2.Visit(visitors.LookupExternalTypes(
{"foo": ast1, "bar": ast2}, self_name="bar"))
self.assertMultiLineEqual(pytd_utils.Print(ast2), textwrap.dedent("""
class bar.A:
x: int
""").strip())
def test_lookup_two_star_aliases_with_default_pyi(self):
src1 = DEFAULT_PYI
src2 = DEFAULT_PYI
src3 = textwrap.dedent("""
from foo import *
from bar import *
""")
ast1 = self.Parse(src1).Replace(name="foo").Visit(visitors.AddNamePrefix())
ast2 = self.Parse(src2).Replace(name="bar").Visit(visitors.AddNamePrefix())
ast3 = self.Parse(src3).Replace(name="baz").Visit(visitors.AddNamePrefix())
ast3 = ast3.Visit(visitors.LookupExternalTypes(
{"foo": ast1, "bar": ast2, "baz": ast3}, self_name="baz"))
self.assertMultiLineEqual(pytd_utils.Print(ast3), textwrap.dedent("""
from typing import Any
def baz.__getattr__(name) -> Any: ...
""").strip())
def test_lookup_star_alias_with_duplicate_getattr(self):
src1 = DEFAULT_PYI
src2 = textwrap.dedent("""
from typing import Any
from foo import *
def __getattr__(name) -> Any: ...
""")
ast1 = self.Parse(src1).Replace(name="foo").Visit(visitors.AddNamePrefix())
ast2 = self.Parse(src2).Replace(name="bar").Visit(visitors.AddNamePrefix())
ast2 = ast2.Visit(visitors.LookupExternalTypes(
{"foo": ast1, "bar": ast2}, self_name="bar"))
self.assertMultiLineEqual(pytd_utils.Print(ast2), textwrap.dedent("""
from typing import Any
def bar.__getattr__(name) -> Any: ...
""").strip())
def test_lookup_two_star_aliases_with_different_getattrs(self):
src1 = "def __getattr__(name) -> int: ..."
src2 = "def __getattr__(name) -> str: ..."
src3 = textwrap.dedent("""
from foo import *
from bar import *
""")
ast1 = self.Parse(src1).Replace(name="foo").Visit(visitors.AddNamePrefix())
ast2 = self.Parse(src2).Replace(name="bar").Visit(visitors.AddNamePrefix())
ast3 = self.Parse(src3).Replace(name="baz").Visit(visitors.AddNamePrefix())
self.assertRaises(KeyError, ast3.Visit, visitors.LookupExternalTypes(
{"foo": ast1, "bar": ast2, "baz": ast3}, self_name="baz"))
def test_lookup_star_alias_with_different_getattr(self):
src1 = "def __getattr__(name) -> int: ..."
src2 = textwrap.dedent("""
from foo import *
def __getattr__(name) -> str: ...
""")
ast1 = self.Parse(src1).Replace(name="foo").Visit(visitors.AddNamePrefix())
ast2 = self.Parse(src2).Replace(name="bar").Visit(visitors.AddNamePrefix())
ast2 = ast2.Visit(visitors.LookupExternalTypes(
{"foo": ast1, "bar": ast2}, self_name="bar"))
self.assertMultiLineEqual(pytd_utils.Print(ast2), textwrap.dedent("""
def bar.__getattr__(name) -> str: ...
""").strip())
def test_collect_dependencies(self):
src = textwrap.dedent("""
from typing import Union
l = ... # type: list[Union[int, baz.BigInt]]
def f1() -> bar.Bar: ...
def f2() -> foo.bar.Baz: ...
""")
deps = visitors.CollectDependencies()
self.Parse(src).Visit(deps)
six.assertCountEqual(self, {"baz", "bar", "foo.bar"}, deps.dependencies)
def test_expand(self):
src = textwrap.dedent("""
from typing import Union
def foo(a: Union[int, float], z: Union[complex, str], u: bool) -> file: ...
def bar(a: int) -> Union[str, unicode]: ...
""")
new_src = textwrap.dedent("""
from typing import Union
def foo(a: int, z: complex, u: bool) -> file: ...
def foo(a: int, z: str, u: bool) -> file: ...
def foo(a: float, z: complex, u: bool) -> file: ...
def foo(a: float, z: str, u: bool) -> file: ...
def bar(a: int) -> Union[str, unicode]: ...
""")
self.AssertSourceEquals(
self.ApplyVisitorToString(src, visitors.ExpandSignatures()),
new_src)
def test_print_imports(self):
src = textwrap.dedent("""
from typing import Any, List, Tuple, Union
def f(x: Union[int, slice]) -> List[Any]: ...
def g(x: foo.C.C2) -> None: ...
""")
expected = textwrap.dedent("""
import foo.C
from typing import Any, List, Union
def f(x: Union[int, slice]) -> List[Any]: ...
def g(x: foo.C.C2) -> None: ...
""").strip()
tree = self.Parse(src)
res = pytd_utils.Print(tree)
self.AssertSourceEquals(res, src)
self.assertMultiLineEqual(res, expected)
def test_print_imports_named_type(self):
# Can't get tree by parsing so build explicitly
node = pytd.Constant("x", pytd.NamedType("typing.List"))
tree = pytd_utils.CreateModule(name=None, constants=(node,))
expected_src = textwrap.dedent("""
from typing import List
x: List
""").strip()
res = pytd_utils.Print(tree)
self.assertMultiLineEqual(res, expected_src)
def test_print_imports_ignores_existing(self):
src = "from foo import b"
tree = self.Parse(src)
res = pytd_utils.Print(tree)
self.assertMultiLineEqual(res, src)
@unittest.skip("depended on `or`")
def test_print_union_name_conflict(self):
src = textwrap.dedent("""
class Union: ...
def g(x: Union) -> Union[int, float]: ...
""")
tree = self.Parse(src)
res = pytd_utils.Print(tree)
self.AssertSourceEquals(res, src)
def test_adjust_type_parameters(self):
ast = self.Parse("""
from typing import Union
T = TypeVar("T")
T2 = TypeVar("T2")
def f(x: T) -> T: ...
class A(Generic[T]):
def a(self, x: T2) -> None:
self = A[Union[T, T2]]
""")
f = ast.Lookup("f")
sig, = f.signatures
p_x, = sig.params
self.assertEqual(sig.template,
(pytd.TemplateItem(pytd.TypeParameter("T", scope="f")),))
self.assertEqual(p_x.type, pytd.TypeParameter("T", scope="f"))
cls = ast.Lookup("A")
f_cls, = cls.methods
sig_cls, = f_cls.signatures
p_self, p_x_cls = sig_cls.params
self.assertEqual(cls.template,
(pytd.TemplateItem(pytd.TypeParameter("T", scope="A")),))
self.assertEqual(sig_cls.template, (pytd.TemplateItem(
pytd.TypeParameter("T2", scope="A.a")),))
self.assertEqual(p_self.type.parameters,
(pytd.TypeParameter("T", scope="A"),))
self.assertEqual(p_x_cls.type, pytd.TypeParameter("T2", scope="A.a"))
def test_adjust_type_parameters_with_builtins(self):
ast = self.ParseWithBuiltins("""
T = TypeVar("T")
K = TypeVar("K")
V = TypeVar("V")
class Foo(List[int]): pass
class Bar(Dict[T, int]): pass
class Baz(Generic[K, V]): pass
class Qux(Baz[str, int]): pass
""")
foo = ast.Lookup("Foo")
bar = ast.Lookup("Bar")
qux = ast.Lookup("Qux")
foo_parent, = foo.parents
bar_parent, = bar.parents
qux_parent, = qux.parents
# Expected:
# Class(Foo, parent=GenericType(List, parameters=(int,)), template=())
# Class(Bar, parent=GenericType(Dict, parameters=(T, int)), template=(T))
# Class(Qux, parent=GenericType(Baz, parameters=(str, int)), template=())
self.assertEqual((pytd.ClassType("int"),), foo_parent.parameters)
self.assertEqual((), foo.template)
self.assertEqual(
(pytd.TypeParameter("T", scope="Bar"), pytd.ClassType("int")),
bar_parent.parameters)
self.assertEqual(
(pytd.TemplateItem(pytd.TypeParameter("T", scope="Bar")),),
bar.template)
self.assertEqual((pytd.ClassType("str"), pytd.ClassType("int")),
qux_parent.parameters)
self.assertEqual((), qux.template)
def test_adjust_type_parameters_with_duplicates(self):
ast = self.ParseWithBuiltins("""
T = TypeVar("T")
class A(Dict[T, T], Generic[T]): pass
""")
a = ast.Lookup("A")
self.assertEqual(
(pytd.TemplateItem(pytd.TypeParameter("T", (), None, "A")),),
a.template)
def test_adjust_type_parameters_with_duplicates_in_generic(self):
src = textwrap.dedent("""
T = TypeVar("T")
class A(Generic[T, T]): pass
""")
self.assertRaises(visitors.ContainerError, lambda: self.Parse(src))
def test_verify_containers(self):
ast1 = self.ParseWithBuiltins("""
from typing import SupportsInt, TypeVar
T = TypeVar("T")
class Foo(SupportsInt[T]): pass
""")
ast2 = self.ParseWithBuiltins("""
from typing import SupportsInt
class Foo(SupportsInt[int]): pass
""")
ast3 = self.ParseWithBuiltins("""
from typing import Generic
class Foo(Generic[int]): pass
""")
ast4 = self.ParseWithBuiltins("""
from typing import List
class Foo(List[int, str]): pass
""")
self.assertRaises(visitors.ContainerError,
lambda: ast1.Visit(visitors.VerifyContainers()))
self.assertRaises(visitors.ContainerError,
lambda: ast2.Visit(visitors.VerifyContainers()))
self.assertRaises(visitors.ContainerError,
lambda: ast3.Visit(visitors.VerifyContainers()))
self.assertRaises(visitors.ContainerError,
lambda: ast4.Visit(visitors.VerifyContainers()))
def test_clear_class_pointers(self):
cls = pytd.Class("foo", None, (), (), (), (), (), None, ())
t = pytd.ClassType("foo", cls)
t = t.Visit(visitors.ClearClassPointers())
self.assertIsNone(t.cls)
def test_expand_compatible_builtins(self):
src = textwrap.dedent("""
from typing import Tuple, Union, TypeVar
T = TypeVar('T', float, bool)
def f1(a: float) -> None: ...
def f2() -> float: ...
def f3(a: bool) -> None: ...
def f4() -> bool: ...
def f5(a: Union[bool, int]) -> None: ...
def f6(a: Tuple[bool, int]) -> None: ...
def f7(x: T) -> T: ...
""")
expected = textwrap.dedent("""
from typing import Tuple, TypeVar, Union
T = TypeVar('T', float, bool)
def f1(a: Union[float, int]) -> None: ...
def f2() -> float: ...
def f3(a: Union[bool, None]) -> None: ...
def f4() -> bool: ...
def f5(a: Union[bool, None, int]) -> None: ...
def f6(a: Tuple[Union[bool, None], int]) -> None: ...
def f7(x: T) -> T: ...
""")
src_tree, expected_tree = (
self.Parse(s).Visit(visitors.LookupBuiltins(self.loader.builtins))
for s in (src, expected))
new_tree = src_tree.Visit(visitors.ExpandCompatibleBuiltins(
self.loader.builtins))
self.AssertSourceEquals(new_tree, expected_tree)
def test_add_name_prefix(self):
src = textwrap.dedent("""
from typing import TypeVar
def f(a: T) -> T: ...
T = TypeVar("T")
class X(Generic[T]):
pass
""")
tree = self.Parse(src)
self.assertIsNone(tree.Lookup("T").scope)
self.assertEqual("X",
tree.Lookup("X").template[0].type_param.scope)
tree = tree.Replace(name="foo").Visit(visitors.AddNamePrefix())
self.assertIsNotNone(tree.Lookup("foo.f"))
self.assertIsNotNone(tree.Lookup("foo.X"))
self.assertEqual("foo", tree.Lookup("foo.T").scope)
self.assertEqual("foo.X",
tree.Lookup("foo.X").template[0].type_param.scope)
def test_add_name_prefix_twice(self):
src = textwrap.dedent("""
from typing import Any, TypeVar
x = ... # type: Any
T = TypeVar("T")
class X(Generic[T]): ...
""")
tree = self.Parse(src)
tree = tree.Replace(name="foo").Visit(visitors.AddNamePrefix())
tree = tree.Replace(name="foo").Visit(visitors.AddNamePrefix())
self.assertIsNotNone(tree.Lookup("foo.foo.x"))
self.assertEqual("foo.foo", tree.Lookup("foo.foo.T").scope)
self.assertEqual("foo.foo.X",
tree.Lookup("foo.foo.X").template[0].type_param.scope)
def test_add_name_prefix_on_class_type(self):
src = textwrap.dedent("""
x = ... # type: y
class Y: ...
""")
tree = self.Parse(src)
x = tree.Lookup("x")
x = x.Replace(type=pytd.ClassType("Y"))
tree = tree.Replace(constants=(x,), name="foo")
tree = tree.Visit(visitors.AddNamePrefix())
self.assertEqual("foo.Y", tree.Lookup("foo.x").type.name)
def test_add_name_prefix_on_nested_class_alias(self):
src = textwrap.dedent("""
class A:
class B:
class C: ...
D = A.B.C
""")
expected = textwrap.dedent("""
from typing import Type
class foo.A:
class foo.A.B:
class foo.A.B.C: ...
D: Type[foo.A.B.C]
""").strip()
self.assertMultiLineEqual(expected, pytd_utils.Print(
self.Parse(src).Replace(name="foo").Visit(visitors.AddNamePrefix())))
def test_add_name_prefix_on_nested_class_outside_ref(self):
src = textwrap.dedent("""
class A:
class B: ...
b: A.B
C = A.B
def f(x: A.B) -> A.B: ...
class D:
b: A.B
def f(self, x: A.B) -> A.B: ...
""")
expected = textwrap.dedent("""
from typing import Type
foo.b: foo.A.B
foo.C: Type[foo.A.B]
class foo.A:
class foo.A.B: ...
class foo.D:
b: foo.A.B
def f(self, x: foo.A.B) -> foo.A.B: ...
def foo.f(x: foo.A.B) -> foo.A.B: ...
""").strip()
self.assertMultiLineEqual(expected, pytd_utils.Print(
self.Parse(src).Replace(name="foo").Visit(visitors.AddNamePrefix())))
def test_add_name_prefix_on_nested_class_method(self):
src = textwrap.dedent("""
class A:
class B:
def copy(self) -> A.B: ...
""")
expected = textwrap.dedent("""
class foo.A:
class foo.A.B:
def copy(self) -> foo.A.B: ...
""").strip()
self.assertMultiLineEqual(expected, pytd_utils.Print(
self.Parse(src).Replace(name="foo").Visit(visitors.AddNamePrefix())))
def test_print_merge_types(self):
src = textwrap.dedent("""
from typing import Union
def a(a: float) -> int: ...
def b(a: Union[int, float]) -> int: ...
def c(a: object) -> Union[float, int]: ...
def d(a: float) -> int: ...
def e(a: Union[bool, None]) -> Union[bool, None]: ...
""")
expected = textwrap.dedent("""
from typing import Optional, Union
def a(a: float) -> int: ...
def b(a: float) -> int: ...
def c(a: object) -> Union[float, int]: ...
def d(a: float) -> int: ...
def e(a: bool) -> Optional[bool]: ...
""")
self.assertMultiLineEqual(expected.strip(),
pytd_utils.Print(self.ToAST(src)).strip())
def test_print_heterogeneous_tuple(self):
t = pytd.TupleType(pytd.NamedType("tuple"),
(pytd.NamedType("str"), pytd.NamedType("float")))
self.assertEqual("Tuple[str, float]", pytd_utils.Print(t))
def test_verify_heterogeneous_tuple(self):
# Error: does not inherit from Generic
base = pytd.ClassType("tuple")
base.cls = pytd.Class("tuple", None, (), (), (), (), (), None, ())
t1 = pytd.TupleType(base, (pytd.NamedType("str"), pytd.NamedType("float")))
self.assertRaises(visitors.ContainerError,
lambda: t1.Visit(visitors.VerifyContainers()))
# Error: Generic[str, float]
gen = pytd.ClassType("typing.Generic")
gen.cls = pytd.Class("typing.Generic", None, (), (), (), (), (), None, ())
t2 = pytd.TupleType(gen, (pytd.NamedType("str"), pytd.NamedType("float")))
self.assertRaises(visitors.ContainerError,
lambda: t2.Visit(visitors.VerifyContainers()))
# Okay
param = pytd.TypeParameter("T")
parent = pytd.GenericType(gen, (param,))
base.cls = pytd.Class(
"tuple", None, (parent,), (), (), (), (), None,
(pytd.TemplateItem(param),))
t3 = pytd.TupleType(base, (pytd.NamedType("str"), pytd.NamedType("float")))
t3.Visit(visitors.VerifyContainers())
def test_typevar_value_conflict(self):
# Conflicting values for _T.
ast = self.ParseWithBuiltins("""
from typing import List
class A(List[int], List[str]): ...
""")
self.assertRaises(visitors.ContainerError,
lambda: ast.Visit(visitors.VerifyContainers()))
def test_typevar_value_conflict_hidden(self):
# Conflicting value for _T hidden in MRO.
ast = self.ParseWithBuiltins("""
from typing import List
class A(List[int]): ...
class B(A, List[str]): ...
""")
self.assertRaises(visitors.ContainerError,
lambda: ast.Visit(visitors.VerifyContainers()))
def test_typevar_value_conflict_related_containers(self):
# List inherits from Sequence, so they share a type parameter.
ast = self.ParseWithBuiltins("""
from typing import List, Sequence
class A(List[int], Sequence[str]): ...
""")
self.assertRaises(visitors.ContainerError,
lambda: ast.Visit(visitors.VerifyContainers()))
def test_typevar_value_no_conflict(self):
# Not an error if the containers are unrelated, even if they use the same
# type parameter name.
ast = self.ParseWithBuiltins("""
from typing import ContextManager, SupportsAbs
class Foo(SupportsAbs[float], ContextManager[Foo]): ...
""")
ast.Visit(visitors.VerifyContainers())
def test_typevar_value_consistency(self):
# Type renaming makes all type parameters represent the same type `T1`.
ast = self.ParseWithBuiltins("""
from typing import Generic, TypeVar
T1 = TypeVar("T1")
T2 = TypeVar("T2")
T3 = TypeVar("T3")
T4 = TypeVar("T4")
T5 = TypeVar("T5")
class A(Generic[T1]): ...
class B1(A[T2]): ...
class B2(A[T3]): ...
class C(B1[T4], B2[T5]): ...
class D(C[str, str], A[str]): ...
""")
ast.Visit(visitors.VerifyContainers())
def test_typevar_value_and_alias_conflict(self):
ast = self.ParseWithBuiltins("""
from typing import Generic, TypeVar
T = TypeVar("T")
class A(Generic[T]): ...
class B(A[int], A[T]): ...
""")
self.assertRaises(visitors.ContainerError,
lambda: ast.Visit(visitors.VerifyContainers()))
def test_typevar_alias_and_value_conflict(self):
ast = self.ParseWithBuiltins("""
from typing import Generic, TypeVar
T = TypeVar("T")
class A(Generic[T]): ...
class B(A[T], A[int]): ...
""")
self.assertRaises(visitors.ContainerError,
lambda: ast.Visit(visitors.VerifyContainers()))
def test_verify_container_with_mro_error(self):
# Make sure we don't crash.
ast = self.ParseWithBuiltins("""
from typing import List
class A(List[str]): ...
class B(List[str], A): ...
""")
ast.Visit(visitors.VerifyContainers())
def test_alias_printing(self):
a = pytd.Alias("MyList", pytd.GenericType(
pytd.NamedType("typing.List"), (pytd.AnythingType(),)))
ty = pytd_utils.CreateModule("test", aliases=(a,))
expected = textwrap.dedent("""
from typing import Any, List
MyList = List[Any]""")
self.assertMultiLineEqual(expected.strip(), pytd_utils.Print(ty).strip())
def test_print_none_union(self):
src = textwrap.dedent("""
from typing import Union
def f(x: Union[str, None]) -> None: ...
def g(x: Union[str, int, None]) -> None: ...
def h(x: Union[None]) -> None: ...
""")
expected = textwrap.dedent("""
from typing import Optional, Union
def f(x: Optional[str]) -> None: ...
def g(x: Optional[Union[str, int]]) -> None: ...
def h(x: None) -> None: ...
""")
self.assertMultiLineEqual(expected.strip(),
pytd_utils.Print(self.ToAST(src)).strip())
def test_lookup_typing_class(self):
node = visitors.LookupClasses(pytd.NamedType("typing.Sequence"),
self.loader.concat_all())
assert node.cls
def test_create_type_parameters_from_unknowns(self):
src = pytd_src("""
from typing import Dict
def f(x: `~unknown1`) -> `~unknown1`: ...
def g(x: `~unknown2`, y: `~unknown2`) -> None: ...
def h(x: `~unknown3`) -> None: ...
def i(x: Dict[`~unknown4`, `~unknown4`]) -> None: ...
# Should not be changed
class `~unknown5`:
def __add__(self, x: `~unknown6`) -> `~unknown6`: ...
def `~f`(x: `~unknown7`) -> `~unknown7`: ...
""")
expected = pytd_src("""
from typing import Dict
_T0 = TypeVar('_T0')
def f(x: _T0) -> _T0: ...
def g(x: _T0, y: _T0) -> None: ...
def h(x: `~unknown3`) -> None: ...
def i(x: Dict[_T0, _T0]) -> None: ...
class `~unknown5`:
def __add__(self, x: `~unknown6`) -> `~unknown6`: ...
def `~f`(x: `~unknown7`) -> `~unknown7`: ...
""")
ast1 = self.Parse(src)
ast1 = ast1.Visit(visitors.CreateTypeParametersForSignatures())
self.AssertSourceEquals(ast1, expected)
@unittest.skip("We no longer support redefining TypeVar")
def test_redefine_typevar(self):
src = pytd_src("""
def f(x: `~unknown1`) -> `~unknown1`: ...
class `TypeVar`: ...
""")
ast = self.Parse(src).Visit(visitors.CreateTypeParametersForSignatures())
self.assertMultiLineEqual(pytd_utils.Print(ast), textwrap.dedent("""
import typing
_T0 = TypeVar('_T0')
class `TypeVar`: ...
def f(x: _T0) -> _T0: ...""").strip())
def test_create_type_parameters_for_new(self):
src = textwrap.dedent("""
class Foo:
def __new__(cls: Type[Foo]) -> Foo: ...
class Bar:
def __new__(cls: Type[Bar], x, y, z) -> Bar: ...
""")
ast = self.Parse(src).Visit(visitors.CreateTypeParametersForSignatures())
self.assertMultiLineEqual(pytd_utils.Print(ast), textwrap.dedent("""
from typing import TypeVar
_TBar = TypeVar('_TBar', bound=Bar)
_TFoo = TypeVar('_TFoo', bound=Foo)
class Foo:
def __new__(cls: Type[_TFoo]) -> _TFoo: ...
class Bar:
def __new__(cls: Type[_TBar], x, y, z) -> _TBar: ...
""").strip())
def test_keep_custom_new(self):
src = textwrap.dedent("""
class Foo:
def __new__(cls: Type[X]) -> X: ...
class Bar:
def __new__(cls, x: Type[Bar]) -> Bar: ...
""").strip()
ast = self.Parse(src).Visit(visitors.CreateTypeParametersForSignatures())
self.assertMultiLineEqual(pytd_utils.Print(ast), src)
def test_print_type_parameter_bound(self):
src = textwrap.dedent("""
from typing import TypeVar
T = TypeVar("T", bound=str)
""")
self.assertMultiLineEqual(pytd_utils.Print(self.Parse(src)),
textwrap.dedent("""
from typing import TypeVar
T = TypeVar('T', bound=str)""").lstrip())
def test_print_cls(self):
src = textwrap.dedent("""
class A:
def __new__(cls: Type[A]) -> A: ...
""")
self.assertMultiLineEqual(pytd_utils.Print(self.Parse(src)),
textwrap.dedent("""
class A:
def __new__(cls) -> A: ...
""").strip())
def test_print_no_return(self):
src = textwrap.dedent("""
def f() -> nothing: ...
""")
self.assertMultiLineEqual(pytd_utils.Print(self.Parse(src)),
textwrap.dedent("""
from typing import NoReturn
def f() -> NoReturn: ...""").lstrip())
def test_print_multiline_signature(self):
src = textwrap.dedent("""
def f(x: int, y: str, z: bool) -> list[str]:
pass
""")
self.assertMultiLineEqual(
pytd_utils.Print(self.Parse(src), multiline_args=True),
textwrap.dedent("""
from typing import List
def f(
x: int,
y: str,
z: bool
) -> List[str]: ...
""").strip())
def test_rename_builtins_prefix(self):
"""__builtin__.foo should get rewritten to builtins.foo and then to foo."""
src = textwrap.dedent("""
import __builtin__
class MyError(__builtin__.KeyError): ...
""")
self.assertMultiLineEqual(pytd_utils.Print(self.Parse(src)),
"class MyError(KeyError): ...")
class ReplaceModulesWithAnyTest(unittest.TestCase):
def test_any_replacement(self):
class_type_match = pytd.ClassType("match.foo")
named_type_match = pytd.NamedType("match.bar")
class_type_no_match = pytd.ClassType("match_no.foo")
named_type_no_match = pytd.NamedType("match_no.bar")
generic_type_match = pytd.GenericType(class_type_match, ())
generic_type_no_match = pytd.GenericType(class_type_no_match, ())
visitor = visitors.ReplaceModulesWithAny(["match."])
self.assertEqual(class_type_no_match, class_type_no_match.Visit(visitor))
self.assertEqual(named_type_no_match, named_type_no_match.Visit(visitor))
self.assertEqual(generic_type_no_match,
generic_type_no_match.Visit(visitor))
self.assertEqual(pytd.AnythingType,
class_type_match.Visit(visitor).__class__)
self.assertEqual(pytd.AnythingType,
named_type_match.Visit(visitor).__class__)
self.assertEqual(pytd.AnythingType,
generic_type_match.Visit(visitor).__class__)
class ReplaceUnionsWithAnyTest(unittest.TestCase):
def test_any_replacement(self):
union = pytd.UnionType((pytd.NamedType("a"), pytd.NamedType("b")))
self.assertEqual(
union.Visit(visitors.ReplaceUnionsWithAny()), pytd.AnythingType())
if __name__ == "__main__":
unittest.main()
|
the-stack_0_23762 | from sklearn.base import BaseEstimator, ClassifierMixin
from sklearn.exceptions import NotFittedError
from sklearn.metrics import confusion_matrix
import tensorflow as tf
import numpy as np
import random
from dataset_wave import Dataset
from alsNetHistory import AlsNetHistory
import os
import sys
BASE_DIR = os.path.dirname(__file__)
sys.path.append(BASE_DIR)
sys.path.append(os.path.join(BASE_DIR, '../utils'))
import tf_util
from pointnet_util import pointnet_sa_module, pointnet_fp_module, pointnet_sa_module_2D
def simple_loss(labels, logits):
return tf.losses.sparse_softmax_cross_entropy(labels, logits, scope='loss')
def fp_high_loss(labels, logits, factor=100):
#weights = [tf.where(tf.logical_and(labels != 2, tf.argmax(logits) == 2), factor, 1)]
weights = tf.where(labels == 0, 5, 1)
weights = tf.where(labels == 1, 1, weights)
weights = tf.where(labels == 2, 10, weights)
weights = tf.where(labels == 3, 200, weights)
weights = tf.where(labels == 4, 200, weights)
weights = tf.where(labels == 5, 200, weights)
classify_loss = tf.losses.sparse_softmax_cross_entropy(labels=labels, logits=logits, scope='loss', weights=weights)
return classify_loss
class AlsNetContainer(BaseEstimator, ClassifierMixin):
def __init__(self,
output_base,
num_classes,
num_feat,
num_points,
learning_rate=0.1,
dropout=0.5,
activation_fn=tf.nn.relu,
optimizer_cls=tf.train.AdamOptimizer,
loss_fn=simple_loss,
initalizer=None,
arch=None,
score_sample=1):
self.output_dir = output_base
self.num_classes = num_classes
self.num_feat = num_feat
self.num_points = num_points
self.learning_rate = learning_rate
self.dropout = dropout
self.activation_fn = activation_fn
self.optimizer_cls = optimizer_cls
self.loss_fn = loss_fn
self.initalizer = initalizer
self.arch = arch
self.score_sample=score_sample
self._session = None
self._graph = None
self._config = tf.ConfigProto()
self._config.gpu_options.allow_growth = True
self._config.allow_soft_placement = False
self._config.log_device_placement = False
self.savefiles = True
self._train_points_seen = 0
self.train_history = AlsNetHistory()
self.eval_history = AlsNetHistory()
def _build_graph(self):
"""
Build the graph
:return: Nothing
"""
# define placeholders
# input points
points_in = tf.placeholder(tf.float32, shape=(1, self.num_points, 3), name='points_in')
waves_in = tf.placeholder(tf.float32, shape=(1, self.num_points, self.num_feat), name='points_in')
# reference labels
labels_ref = tf.placeholder(tf.int64, shape=(1, self.num_points), name='labels_ref')
# training flag
is_training = tf.placeholder(tf.bool, shape=(), name='is_training')
# create network
logits = self._dnn(points_in, is_training)
# get loss
loss = self.loss_fn(labels_ref, logits)
# create optimizer
optimizer = self.optimizer_cls(learning_rate=self.learning_rate)
# set operations
train_op = optimizer.minimize(loss, name='train')
softmax_op = tf.nn.softmax(logits, name='softmax')
# initalize variables
init_op = tf.global_variables_initializer()
saver = tf.train.Saver()
# Make important vars/ops availiable instance-wide
self._points_in = points_in
self._labels_ref = labels_ref
self._is_training = is_training
self._logits = logits
self._loss = loss
self._train_op = train_op
self._softmax_op = softmax_op
self._init_op = init_op
self._saver = saver
def _dnn(self, points_in, is_training):
"""
Central definition of the deep neural network: creates the sa and fp layers
and handles dropout.
:param points_in: tensor (batch x num_points x (3+num_feat)). input points (x,y,z,attr...)
:param is_training: bool.
:return: last layer of net
"""
with tf.variable_scope('dnn'), tf.device('/gpu:0'):
ln_xyz = [tf.slice(points_in, [0, 0, 0], [-1, -1, 3])] # point coordinates
ln_feat_in = [tf.slice(points_in, [0, 0, 3], [-1, -1, -1])] # point attributes
ln_feat = [tf.slice(points_in, [0, 0, 3], [-1, -1, -1])] # point attributes
ln_wave_in = [tf.slice(waves_in, [0, 0, 3], [-1, -1, -1])] # point attributes
ln_wave = [tf.slice(waves_in, [0, 0, 3], [-1, -1, -1])] # point attributes
if self.savefiles:
self._ln_xyz = ln_xyz
self._ln_feat_in = ln_feat_in
self._ln_feat = ln_feat
for depth, step_dict in enumerate(self.arch): # set abstraction
xyz, feat = self._pointnet_sa(step_dict,
ln_xyz[depth], ln_feat[depth],
is_training,
'sa_layer_%d' % (depth + 1))
ln_xyz.append(xyz)
ln_feat.append(feat)
ln_feat_in.append(feat)
for depth, step_dict in enumerate(reversed(self.arch)): # feature propagation
depth = len(self.arch) - depth
feat = self._pointnet_fp(step_dict,
ln_xyz[depth-1], ln_xyz[depth],
ln_feat[depth-1], ln_feat[depth],
is_training,
'fp_layer_%d' % (depth - 1))
ln_feat[depth - 1] = feat
l0_feats = ln_feat[0]
net = tf_util.conv1d(l0_feats, 128, 1, padding='VALID', bn=True,
is_training=is_training, scope='fc1', bn_decay=None)
net = tf_util.dropout(net, keep_prob=(1-self.dropout), is_training=is_training, scope='dp1')
net = tf_util.conv1d(net, self.num_classes, 1, padding='VALID', activation_fn=None, scope='fc2', name='net')
return net
def _pointnet_sa(self, arch_dict, xyz, feat, is_training, scope=""):
"""
PointNet Set Abstraction layer (Qi et al. 2017)
:param arch_dict: dictionary describing the architecture of this layer
:param xyz: Tensor (batch x num_points x 3). coordinate triplets
:param feat: Tensor (batch x num_points x num_feat). features for each point
:param scope: name for the layers
:return: xyz and features of the superpoint layer
"""
li_xyz, li_feats, li_indices = pointnet_sa_module_2D(xyz, feat,
npoint=arch_dict['npoint'],
radius=arch_dict['radius'],
nsample=arch_dict['nsample'],
mlp=arch_dict['mlp'],
pooling=arch_dict['pooling'],
mlp2=arch_dict['mlp2'],
group_all=False,
is_training=is_training,
bn_decay=None,
scope=scope)
return li_xyz, li_feats
def _pointnet_fp(self, arch_dict, xyz_to, xyz_from, feat_to, feat_from, is_training, scope=""):
"""
PointNet Feature Propagation layer (Qi et al. 2017)
:param arch_dict: dictionary describing the architecture of this layer
:param xyz_to: Tensor (batch x num_points x 3). coordinate triplets
:param xyz_from: Tensor (batch x num_points x 3). coordinate triplets
:param feat_to: Tensor (batch x num_points x num_feat). features for each point
:param feat_from: Tensor (batch x num_points x num_feat). features for each point
:param scope: name for the layers
:return: features interpolated to the next layer
"""
li_feats = pointnet_fp_module(xyz_to, xyz_from,
feat_to, feat_from,
arch_dict['reverse_mlp'],
is_training,
bn_decay=None,
scope=scope)
return li_feats
def close_session(self):
if self._session:
self._session.close()
def fit_file(self, filenames_in, new_session=True, **kwargs):
if new_session or self._graph is None:
self.close_session()
self._train_points_seen = 0
self._graph = tf.Graph()
with self._graph.as_default():
self._build_graph()
self._session = tf.Session(graph=self._graph, config=self._config)
with self._session.as_default() as sess:
sess.run(self._init_op)
for filename in filenames_in:
ds = Dataset(filename)
self.fit_one_epoch(ds.points_and_features, ds.labels)
return self
def fit(self, datasets, new_session=True):
if new_session or self._graph is None:
self.close_session()
self._train_points_seen = 0
self._graph = tf.Graph()
with self._graph.as_default():
self._build_graph()
self._session = tf.Session(graph=self._graph, config=self._config)
with self._session.as_default() as sess:
sess.run(self._init_op)
for ds in datasets:
points_in_single_ds, labels_single_ds = ds.points_and_features, ds.labels
self.fit_one_epoch(points_in_single_ds, labels_single_ds)
ds.unload()
def fit_one_epoch(self, points_in, labels):
with self._session.as_default() as sess:
points_in = np.expand_dims(points_in, 0)
labels = np.expand_dims(labels, 0)
train, loss, class_prob = sess.run([self._train_op, self._loss, self._softmax_op],
feed_dict={self._points_in: points_in,
self._labels_ref: labels,
self._is_training: True})
new_classes = np.argmax(class_prob, axis=2)
cm = confusion_matrix(labels[0], new_classes[0], range(self.num_classes))
self._train_points_seen += len(labels[0]) *1e-6
self.train_history.add_history_step(cm, self._train_points_seen, loss)
def predict_probability(self, points_in):
if not self._session:
raise NotFittedError("This %s instance is not fitted yet" % self.__class__.__name__)
with self._session.as_default() as sess:
points_in = np.expand_dims(points_in, 0)
softmax, feat, feat_in, xyz = sess.run((self._softmax_op, self._ln_feat, self._ln_feat_in, self._ln_xyz), feed_dict={self._points_in: points_in,
self._is_training: False})
#if self.savefiles:
# for level, x in enumerate(xyz):
# print(xyz[level][0], feat[level][0])
# print(xyz[level][0].shape, feat[level][0].shape)
# np.savetxt(os.path.join(self.output_dir, 'xyz%i.xyz' % level), np.hstack((xyz[level][0], feat[level][0])))
# np.savetxt(os.path.join(self.output_dir, 'xyz%i_in.xyz' % level), np.hstack((xyz[level][0], feat_in[level][0])))
return softmax
def predict_one_epoch(self, points_in):
class_indices = np.argmax(self.predict_probability(points_in), axis=2)
return class_indices
def predict(self, points_in_mult):
results = []
for points_in in points_in_mult:
pred_res = self.predict_one_epoch(points_in)
results.append(pred_res[0])
return results
def _get_model_params(self):
"""Get all variable values (used for early stopping, faster than saving to disk)"""
with self._graph.as_default():
gvars = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES)
return {gvar.op.name: value for gvar, value in zip(gvars, self._session.run(gvars))}
def _restore_model_params(self, model_params):
"""Set all variables to the given values (for early stopping, faster than loading from disk)"""
gvar_names = list(model_params.keys())
assign_ops = {gvar_name: self._graph.get_operation_by_name(gvar_name + "/Assign")
for gvar_name in gvar_names}
init_values = {gvar_name: assign_op.inputs[1] for gvar_name, assign_op in assign_ops.items()}
feed_dict = {init_values[gvar_name]: model_params[gvar_name] for gvar_name in gvar_names}
self._session.run(assign_ops, feed_dict=feed_dict)
def test_single(self, file_in, save_to=None, save_prob=False, unload=True):
if isinstance(file_in, Dataset):
ds = file_in
else:
print(file_in)
ds = Dataset(file_in)
probs = self.predict_probability(ds.points_and_features)
new_classes = np.argmax(probs, axis=2)
if save_to:
Dataset.Save(save_to, ds.points_and_features,
ds.names, ds.labels, new_classes[0],
probs[0] if save_prob else None)
cm = confusion_matrix(ds.labels, new_classes[0], range(self.num_classes))
self.eval_history.add_history_step(cm, self._train_points_seen, 0)
if unload: ds.unload()
return np.count_nonzero(ds.labels == new_classes[0]) / self.num_points
def save_model(self, path):
if not os.path.exists(os.path.dirname(path)):
os.makedirs(os.path.dirname(path))
self._saver.save(self._session, path)
def load_model(self, path):
if self._graph is None or self._session is None:
self.close_session()
self._train_points_seen = 0
self._graph = tf.Graph()
with self._graph.as_default():
self._build_graph()
self._session = tf.Session(graph=self._graph, config=self._config)
with self._session.as_default() as sess:
self._saver.restore(sess, path)
def score(self, ds, sample_weight=None):
from sklearn.metrics import accuracy_score
try:
samples = random.sample(ds, self.score_sample)
except ValueError: # too few samples --> take whatever we have
samples = ds
scores = []
for sample in samples:
X = sample.points_and_features
y = sample.labels
score = accuracy_score(np.array(y), np.array(self.predict_one_epoch(X)[0]), sample_weight=sample_weight)
print("Current Accuracy score: %s" % score)
scores.append(score)
sample.unload()
return np.mean(scores)
|
the-stack_0_23766 | # Copyright (c) 2011 Openstack, LLC.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Least Cost Scheduler is a mechanism for choosing which host machines to
provision a set of resources to. The input of the least-cost-scheduler is a
set of objective-functions, called the 'cost-functions', a weight for each
cost-function, and a list of candidate hosts (gathered via FilterHosts).
The cost-function and weights are tabulated, and the host with the least cost
is then selected for provisioning.
"""
import collections
from nova import flags
from nova import log as logging
from nova.scheduler import base_scheduler
from nova import utils
from nova import exception
LOG = logging.getLogger('nova.scheduler.least_cost')
FLAGS = flags.FLAGS
flags.DEFINE_list('least_cost_scheduler_cost_functions',
['nova.scheduler.least_cost.noop_cost_fn'],
'Which cost functions the LeastCostScheduler should use.')
# TODO(sirp): Once we have enough of these rules, we can break them out into a
# cost_functions.py file (perhaps in a least_cost_scheduler directory)
flags.DEFINE_integer('noop_cost_fn_weight', 1,
'How much weight to give the noop cost function')
flags.DEFINE_integer('compute_fill_first_cost_fn_weight', 1,
'How much weight to give the fill-first cost function')
def noop_cost_fn(host):
"""Return a pre-weight cost of 1 for each host"""
return 1
def compute_fill_first_cost_fn(host):
"""Prefer hosts that have less ram available, filter_hosts will exclude
hosts that don't have enough ram.
"""
hostname, service = host
caps = service.get("compute", {})
free_mem = caps.get("host_memory_free", 0)
return free_mem
def normalize_list(L):
"""Normalize an array of numbers such that each element satisfies:
0 <= e <= 1
"""
if not L:
return L
max_ = max(L)
if max_ > 0:
return [(float(e) / max_) for e in L]
return L
def weighted_sum(domain, weighted_fns, normalize=True):
"""Use the weighted-sum method to compute a score for an array of objects.
Normalize the results of the objective-functions so that the weights are
meaningful regardless of objective-function's range.
domain - input to be scored
weighted_fns - list of weights and functions like:
[(weight, objective-functions)]
Returns an unsorted list of scores. To pair with hosts do:
zip(scores, hosts)
"""
# Table of form:
# { domain1: [score1, score2, ..., scoreM]
# ...
# domainN: [score1, score2, ..., scoreM] }
score_table = collections.defaultdict(list)
for weight, fn in weighted_fns:
scores = [fn(elem) for elem in domain]
if normalize:
norm_scores = normalize_list(scores)
else:
norm_scores = scores
for idx, score in enumerate(norm_scores):
weighted_score = score * weight
score_table[idx].append(weighted_score)
# Sum rows in table to compute score for each element in domain
domain_scores = []
for idx in sorted(score_table):
elem_score = sum(score_table[idx])
domain_scores.append(elem_score)
return domain_scores
class LeastCostScheduler(base_scheduler.BaseScheduler):
def __init__(self, *args, **kwargs):
self.cost_fns_cache = {}
super(LeastCostScheduler, self).__init__(*args, **kwargs)
def get_cost_fns(self, topic):
"""Returns a list of tuples containing weights and cost functions to
use for weighing hosts
"""
if topic in self.cost_fns_cache:
return self.cost_fns_cache[topic]
cost_fns = []
for cost_fn_str in FLAGS.least_cost_scheduler_cost_functions:
if '.' in cost_fn_str:
short_name = cost_fn_str.split('.')[-1]
else:
short_name = cost_fn_str
cost_fn_str = "%s.%s.%s" % (
__name__, self.__class__.__name__, short_name)
if not (short_name.startswith('%s_' % topic) or
short_name.startswith('noop')):
continue
try:
# NOTE(sirp): import_class is somewhat misnamed since it can
# any callable from a module
cost_fn = utils.import_class(cost_fn_str)
except exception.ClassNotFound:
raise exception.SchedulerCostFunctionNotFound(
cost_fn_str=cost_fn_str)
try:
flag_name = "%s_weight" % cost_fn.__name__
weight = getattr(FLAGS, flag_name)
except AttributeError:
raise exception.SchedulerWeightFlagNotFound(
flag_name=flag_name)
cost_fns.append((weight, cost_fn))
self.cost_fns_cache[topic] = cost_fns
return cost_fns
def weigh_hosts(self, topic, request_spec, hosts):
"""Returns a list of dictionaries of form:
[ {weight: weight, hostname: hostname, capabilities: capabs} ]
"""
cost_fns = self.get_cost_fns(topic)
costs = weighted_sum(domain=hosts, weighted_fns=cost_fns)
weighted = []
weight_log = []
for cost, (hostname, caps) in zip(costs, hosts):
weight_log.append("%s: %s" % (hostname, "%.2f" % cost))
weight_dict = dict(weight=cost, hostname=hostname,
capabilities=caps)
weighted.append(weight_dict)
LOG.debug(_("Weighted Costs => %s") % weight_log)
return weighted
|
the-stack_0_23767 | import logging
from base.wrapper import DB, doc
from math import pi
from .my_geom import MyPoints
def calc_angle_to_ver_or_hor_side(main_vector, second_vector):
"""
Calc angle between main and second
Then transform it to main vector or it perpendicular and make angle less than 90
:param main_vector: DB.XYZ
:param second_vector: DB.XYZ, for example UpDirection of view
:return: Angle between main and second < 90
:rtype: float
"""
angle = main_vector.AngleTo(second_vector)
logging.debug('Calc first rotation angle: {:.2f}'.format(angle * 180 / pi))
if pi / 4 < angle <= pi / 2:
angle -= pi / 2
elif pi / 2 < angle <= 3 * pi / 4:
angle += pi / 2 - pi
elif 3 * pi / 4 < angle <= pi:
angle -= pi
logging.debug('Calc change rotation angle: {:.2f}'.format(angle * 180 / pi))
sign_angle = MyPoints.calc_sign(main_vector, second_vector) * angle
logging.debug('Calc sign rotation angle: {:.2f}'.format(sign_angle * 180 / pi))
return sign_angle
def get_depends_elems_id_by_class(view, cur_class):
my_filter = DB.ElementClassFilter(cur_class)
elems_ids = view.GetDependentElements(my_filter)
logging.debug('View #{}. Get {} id elems by class: {}'.format(view.Id, len(elems_ids), cur_class))
return elems_ids
def get_depends_elems_by_class(view, cur_class):
elems_ids = get_depends_elems_id_by_class(view, cur_class)
elems = get_elems_by_ids(elems_ids)
logging.debug('View #{}. Get {} elems by class: {}'.format(view.Id, len(elems), cur_class))
return elems
def get_elems_by_ids(list_ids):
elems = []
for elem_id in list_ids:
elem = doc.GetElement(elem_id)
if elem is not None:
elems.append(elem)
return elems
|
the-stack_0_23770 | '''
Module : Main
Description : The main entry point for the program.
Copyright : (c) RD, 29 Nov 2018
License : MIT
Maintainer : [email protected]
Portability : POSIX
The program reads one or more input FASTA files. For each file it computes a
variety of statistics, and then prints a summary of the statistics as output.
'''
from argparse import ArgumentParser
from math import floor
import sys
import logging
import pkg_resources
from Bio import SeqIO
EXIT_FILE_IO_ERROR = 1
EXIT_COMMAND_LINE_ERROR = 2
EXIT_FASTA_FILE_ERROR = 3
DEFAULT_MIN_LEN = 0
DEFAULT_VERBOSE = False
HEADER = 'FILENAME\tNUMSEQ\tTOTAL\tMIN\tAVG\tMAX'
PROGRAM_NAME = "biodemo"
try:
PROGRAM_VERSION = pkg_resources.require(PROGRAM_NAME)[0].version
except pkg_resources.DistributionNotFound:
PROGRAM_VERSION = "undefined_version"
def exit_with_error(message, exit_status):
'''Print an error message to stderr, prefixed by the program name and 'ERROR'.
Then exit program with supplied exit status.
Arguments:
message: an error message as a string.
exit_status: a positive integer representing the exit status of the
program.
'''
logging.error(message)
print("{} ERROR: {}, exiting".format(PROGRAM_NAME, message), file=sys.stderr)
sys.exit(exit_status)
def parse_args():
'''Parse command line arguments.
Returns Options object with command line argument values as attributes.
Will exit the program on a command line error.
'''
description = 'Read one or more FASTA files, compute simple stats for each file'
parser = ArgumentParser(description=description)
parser.add_argument(
'--minlen',
metavar='N',
type=int,
default=DEFAULT_MIN_LEN,
help='Minimum length sequence to include in stats (default {})'.format(
DEFAULT_MIN_LEN))
parser.add_argument('--version',
action='version',
version='%(prog)s ' + PROGRAM_VERSION)
parser.add_argument('--log',
metavar='LOG_FILE',
type=str,
help='record program progress in LOG_FILE')
parser.add_argument('fasta_files',
nargs='*',
metavar='FASTA_FILE',
type=str,
help='Input FASTA files')
return parser.parse_args()
class FastaStats(object):
'''Compute various statistics for a FASTA file:
num_seqs: the number of sequences in the file satisfying the minimum
length requirement (minlen_threshold).
num_bases: the total length of all the counted sequences.
min_len: the minimum length of the counted sequences.
max_len: the maximum length of the counted sequences.
average: the average length of the counted sequences rounded down
to an integer.
'''
#pylint: disable=too-many-arguments
def __init__(self,
num_seqs=None,
num_bases=None,
min_len=None,
max_len=None,
average=None):
"Build an empty FastaStats object"
self.num_seqs = num_seqs
self.num_bases = num_bases
self.min_len = min_len
self.max_len = max_len
self.average = average
def __eq__(self, other):
"Two FastaStats objects are equal iff their attributes are equal"
if type(other) is type(self):
return self.__dict__ == other.__dict__
return False
def __repr__(self):
"Generate a printable representation of a FastaStats object"
return "FastaStats(num_seqs={}, num_bases={}, min_len={}, max_len={}, " \
"average={})".format(
self.num_seqs, self.num_bases, self.min_len, self.max_len,
self.average)
def from_file(self, fasta_file, minlen_threshold=DEFAULT_MIN_LEN):
'''Compute a FastaStats object from an input FASTA file.
Arguments:
fasta_file: an open file object for the FASTA file
minlen_threshold: the minimum length sequence to consider in
computing the statistics. Sequences in the input FASTA file
which have a length less than this value are ignored and not
considered in the resulting statistics.
Result:
A FastaStats object
'''
num_seqs = num_bases = 0
min_len = max_len = None
for seq in SeqIO.parse(fasta_file, "fasta"):
this_len = len(seq)
if this_len >= minlen_threshold:
if num_seqs == 0:
min_len = max_len = this_len
else:
min_len = min(this_len, min_len)
max_len = max(this_len, max_len)
num_seqs += 1
num_bases += this_len
if num_seqs > 0:
self.average = int(floor(float(num_bases) / num_seqs))
else:
self.average = None
self.num_seqs = num_seqs
self.num_bases = num_bases
self.min_len = min_len
self.max_len = max_len
return self
def pretty(self, filename):
'''Generate a pretty printable representation of a FastaStats object
suitable for output of the program. The output is a tab-delimited
string containing the filename of the input FASTA file followed by
the attributes of the object. If 0 sequences were read from the FASTA
file then num_seqs and num_bases are output as 0, and min_len, average
and max_len are output as a dash "-".
Arguments:
filename: the name of the input FASTA file
Result:
A string suitable for pretty printed output
'''
if self.num_seqs > 0:
num_seqs = str(self.num_seqs)
num_bases = str(self.num_bases)
min_len = str(self.min_len)
average = str(self.average)
max_len = str(self.max_len)
else:
num_seqs = num_bases = "0"
min_len = average = max_len = "-"
return "\t".join([filename, num_seqs, num_bases, min_len, average,
max_len])
def process_files(options):
'''Compute and print FastaStats for each input FASTA file specified on the
command line. If no FASTA files are specified on the command line then
read from the standard input (stdin).
Arguments:
options: the command line options of the program
Result:
None
'''
if options.fasta_files:
for fasta_filename in options.fasta_files:
logging.info("Processing FASTA file from %s", fasta_filename)
try:
fasta_file = open(fasta_filename)
except IOError as exception:
exit_with_error(str(exception), EXIT_FILE_IO_ERROR)
else:
with fasta_file:
stats = FastaStats().from_file(fasta_file, options.minlen)
print(stats.pretty(fasta_filename))
else:
logging.info("Processing FASTA file from stdin")
stats = FastaStats().from_file(sys.stdin, options.minlen)
print(stats.pretty("stdin"))
def init_logging(log_filename):
'''If the log_filename is defined, then
initialise the logging facility, and write log statement
indicating the program has started, and also write out the
command line from sys.argv
Arguments:
log_filename: either None, if logging is not required, or the
string name of the log file to write to
Result:
None
'''
if log_filename is not None:
logging.basicConfig(filename=log_filename,
level=logging.DEBUG,
filemode='w',
format='%(asctime)s %(levelname)s - %(message)s',
datefmt='%m-%d-%Y %H:%M:%S')
logging.info('program started')
logging.info('command line: %s', ' '.join(sys.argv))
def main():
"Orchestrate the execution of the program"
options = parse_args()
init_logging(options.log)
print(HEADER)
process_files(options)
# If this script is run from the command line then call the main function.
if __name__ == '__main__':
main()
|
the-stack_0_23771 | # @author:leacoder
# @des: 暴力 旋转数组 O(n*k)
class Solution:
def rotate(self, nums: List[int], k: int) -> None:
# 旋转 k 次,每次将数组旋转 1 个元素。
for i in range(k):
prev = nums[len(nums)-1] # 从数组尾取数据
for j in range(len(nums)): # 将数组尾数据 放在数组头 并前后搬移数据
temp = nums[j]
nums[j] = prev
prev = temp
class Solution:
def rotate(self, nums: List[int], k: int) -> None:
n = len(nums)
k %= n
# 旋转 k 次,每次将数组旋转 1 个元素。
for _ in range(k):
nums.insert(0, nums.pop())
# @author:leacoder
# @des: 拼接切片 使用额外的数组 旋转数组
class Solution:
def rotate(self, nums: List[int], k: int) -> None:
n = len(nums)
k %= n # 处理 k > len(nums)的情况
# 切片 nums[-k:] :后 k个数据 nums[:-k] : 前 len(nums) - k 个数据
nums[:] = nums[-k:] + nums[:-k]
# @author:leacoder
# @des: 翻转 旋转数组
class Solution:
def rotate(self, nums: List[int], k: int) -> None:
n = len(nums)
k %= n # 处理 k > len(nums)的情况
nums[:] = nums[::-1] # 整体翻转
nums[:k] = nums[:k][::-1] # 前 k 翻转
nums[k:] = nums[k:][::-1] # 剩余翻转
|
the-stack_0_23773 | from diesel import Application, Loop, sleep
import time
def l(ident):
for x in range(2):
print('* [{}] hi.{}: {}'.format(time.strftime('%H:%M:%S'), ident, x))
time.sleep(1)
sleep(5)
a.halt()
a = Application()
a.add_loop(Loop(l, 1))
a.add_loop(Loop(l, 2))
a.run()
|
the-stack_0_23774 | # Copyright 2017 The Sonnet Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Example script to train a stacked LSTM on the Tiny Shakespeare dataset."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# Dependency imports
import sonnet as snt
from sonnet.examples import dataset_shakespeare
import tensorflow.compat.v1 as tf
FLAGS = tf.flags.FLAGS
tf.flags.DEFINE_integer("num_training_iterations", 10000,
"Number of iterations to train for.")
tf.flags.DEFINE_integer("report_interval", 1000,
"Iterations between reports (samples, valid loss).")
tf.flags.DEFINE_integer("reduce_learning_rate_interval", 2500,
"Iterations between learning rate reductions.")
tf.flags.DEFINE_integer("lstm_depth", 3, "Number of LSTM layers.")
tf.flags.DEFINE_integer("batch_size", 32, "Batch size for training.")
tf.flags.DEFINE_integer("num_embedding", 32, "Size of embedding layer.")
tf.flags.DEFINE_integer("num_hidden", 128, "Size of LSTM hidden layer.")
tf.flags.DEFINE_integer("truncation_length", 64, "Sequence size for training.")
tf.flags.DEFINE_integer("sample_length", 1000, "Sequence size for sampling.")
tf.flags.DEFINE_float("max_grad_norm", 5, "Gradient clipping norm limit.")
tf.flags.DEFINE_float("learning_rate", 0.1, "Optimizer learning rate.")
tf.flags.DEFINE_float("reduce_learning_rate_multiplier", 0.1,
"Learning rate is multiplied by this when reduced.")
tf.flags.DEFINE_float("optimizer_epsilon", 0.01,
"Epsilon used for Adam optimizer.")
tf.flags.DEFINE_string("checkpoint_dir", "/tmp/tf/rnn_shakespeare",
"Checkpointing directory.")
tf.flags.DEFINE_integer("checkpoint_interval", 500,
"Checkpointing step interval.")
def _configure_saver(checkpoint_dir, checkpoint_interval):
"""Returns a tf.train.CheckpointSaverHook for autosaving checkpoints."""
saver = tf.train.Saver()
return tf.train.CheckpointSaverHook(
checkpoint_dir=checkpoint_dir,
save_steps=checkpoint_interval,
saver=saver)
def build_graph(lstm_depth=3, batch_size=32, num_embedding=32, num_hidden=128,
truncation_length=64, sample_length=1000, max_grad_norm=5,
initial_learning_rate=0.1, reduce_learning_rate_multiplier=0.1,
optimizer_epsilon=0.01):
"""Constructs the computation graph."""
# Get datasets.
dataset_train = dataset_shakespeare.TinyShakespeareDataset(
num_steps=truncation_length,
batch_size=batch_size,
subset="train",
random=True,
name="shake_train")
dataset_valid = dataset_shakespeare.TinyShakespeareDataset(
num_steps=truncation_length,
batch_size=batch_size,
subset="valid",
random=False,
name="shake_valid")
dataset_test = dataset_shakespeare.TinyShakespeareDataset(
num_steps=truncation_length,
batch_size=batch_size,
subset="test",
random=False,
name="shake_test")
# Define model.
model = TextModel(
num_embedding=num_embedding,
num_hidden=num_hidden,
lstm_depth=lstm_depth,
output_size=dataset_valid.vocab_size,
use_dynamic_rnn=True,
use_skip_connections=True)
# Get the training loss.
train_input_sequence, train_target_sequence = dataset_train()
train_output_sequence_logits, train_final_state = model(train_input_sequence) # pylint: disable=not-callable
train_loss = dataset_train.cost(train_output_sequence_logits,
train_target_sequence)
# Get the validation loss.
valid_input_sequence, valid_target_sequence = dataset_valid()
valid_output_sequence_logits, _ = model(valid_input_sequence) # pylint: disable=not-callable
valid_loss = dataset_valid.cost(valid_output_sequence_logits,
valid_target_sequence)
# Get the test loss.
test_input_sequence, test_target_sequence = dataset_test()
test_output_sequence_logits, _ = model(test_input_sequence) # pylint: disable=not-callable
test_loss = dataset_test.cost(test_output_sequence_logits,
test_target_sequence)
# Build graph to sample some strings during training.
initial_logits = train_output_sequence_logits[truncation_length - 1]
train_generated_string = model.generate_string(
initial_logits=initial_logits,
initial_state=train_final_state,
sequence_length=sample_length)
# Set up global norm clipping of gradients.
trainable_variables = tf.trainable_variables()
grads, _ = tf.clip_by_global_norm(
tf.gradients(train_loss, trainable_variables), max_grad_norm)
# Get learning rate and define annealing.
learning_rate = tf.get_variable(
"learning_rate",
shape=[],
dtype=tf.float32,
initializer=tf.constant_initializer(initial_learning_rate),
trainable=False)
reduce_learning_rate = learning_rate.assign(
learning_rate * reduce_learning_rate_multiplier)
# Get training step counter.
global_step = tf.get_variable(
name="global_step",
shape=[],
dtype=tf.int64,
initializer=tf.zeros_initializer(),
trainable=False,
collections=[tf.GraphKeys.GLOBAL_VARIABLES,
tf.GraphKeys.GLOBAL_STEP])
# Define optimizer and training step.
optimizer = tf.train.AdamOptimizer(
learning_rate, epsilon=optimizer_epsilon)
train_step = optimizer.apply_gradients(
zip(grads, trainable_variables),
global_step=global_step)
graph_tensors = {
"train_loss": train_loss,
"valid_loss": valid_loss,
"test_loss": test_loss,
"train_generated_string": train_generated_string,
"reduce_learning_rate": reduce_learning_rate,
"global_step": global_step,
"train_step": train_step
}
# Return dataset_train for translation to human readable text.
return graph_tensors, dataset_train
def train(num_training_iterations, report_interval,
reduce_learning_rate_interval):
"""Trains a deep LSTM model on the Tiny Shakespeare dataset."""
# Build the computation graph.
graph_tensors, dataset_train = build_graph(
lstm_depth=FLAGS.lstm_depth, batch_size=FLAGS.batch_size,
num_embedding=FLAGS.num_embedding, num_hidden=FLAGS.num_hidden,
truncation_length=FLAGS.truncation_length,
sample_length=FLAGS.sample_length, max_grad_norm=FLAGS.max_grad_norm,
initial_learning_rate=FLAGS.learning_rate,
reduce_learning_rate_multiplier=FLAGS.reduce_learning_rate_multiplier,
optimizer_epsilon=FLAGS.optimizer_epsilon)
# Configure a checkpoint saver.
saver_hook = _configure_saver(FLAGS.checkpoint_dir,
FLAGS.checkpoint_interval)
# Train the network.
with tf.train.SingularMonitoredSession(
hooks=[saver_hook], checkpoint_dir=FLAGS.checkpoint_dir) as sess:
start_iteration = sess.run(graph_tensors["global_step"])
for train_iteration in range(start_iteration, num_training_iterations):
if (train_iteration + 1) % report_interval == 0:
train_loss_v, valid_loss_v, _ = sess.run(
(graph_tensors["train_loss"],
graph_tensors["valid_loss"],
graph_tensors["train_step"]))
train_generated_string_v = sess.run(
graph_tensors["train_generated_string"])
train_generated_string_human = dataset_train.to_human_readable(
(train_generated_string_v, 0), indices=[0])
tf.logging.info("%d: Training loss %f. Validation loss %f. Sample = %s",
train_iteration,
train_loss_v,
valid_loss_v,
train_generated_string_human)
else:
train_loss_v, _ = sess.run((graph_tensors["train_loss"],
graph_tensors["train_step"]))
tf.logging.info("%d: Training loss %f.", train_iteration, train_loss_v)
if (train_iteration + 1) % reduce_learning_rate_interval == 0:
sess.run(graph_tensors["reduce_learning_rate"])
tf.logging.info("Reducing learning rate.")
test_loss = sess.run(graph_tensors["test_loss"])
tf.logging.info("Test loss %f", test_loss)
class TextModel(snt.AbstractModule):
"""A deep LSTM model, for use on the Tiny Shakespeare dataset."""
def __init__(self, num_embedding, num_hidden, lstm_depth, output_size,
use_dynamic_rnn=True, use_skip_connections=True,
name="text_model"):
"""Constructs a `TextModel`.
Args:
num_embedding: Size of embedding representation, used directly after the
one-hot encoded input.
num_hidden: Number of hidden units in each LSTM layer.
lstm_depth: Number of LSTM layers.
output_size: Size of the output layer on top of the DeepRNN.
use_dynamic_rnn: Whether to use dynamic RNN unrolling. If `False`, it uses
static unrolling. Default is `True`.
use_skip_connections: Whether to use skip connections in the
`snt.DeepRNN`. Default is `True`.
name: Name of the module.
"""
super(TextModel, self).__init__(name=name)
self._num_embedding = num_embedding
self._num_hidden = num_hidden
self._lstm_depth = lstm_depth
self._output_size = output_size
self._use_dynamic_rnn = use_dynamic_rnn
self._use_skip_connections = use_skip_connections
with self._enter_variable_scope():
self._embed_module = snt.Linear(self._num_embedding, name="linear_embed")
self._output_module = snt.Linear(self._output_size, name="linear_output")
self._subcores = [
snt.LSTM(self._num_hidden, name="lstm_{}".format(i))
for i in range(self._lstm_depth)
]
if self._use_skip_connections:
skips = []
current_input_shape = self._num_embedding
for lstm in self._subcores:
input_shape = tf.TensorShape([current_input_shape])
skip = snt.SkipConnectionCore(
lstm,
input_shape=input_shape,
name="skip_{}".format(lstm.module_name))
skips.append(skip)
# SkipConnectionCore concatenates the input with the output, so the
# dimensionality increases with depth.
current_input_shape += self._num_hidden
self._subcores = skips
self._core = snt.DeepRNN(self._subcores, skip_connections=False,
name="deep_lstm")
def _build(self, one_hot_input_sequence):
"""Builds the deep LSTM model sub-graph.
Args:
one_hot_input_sequence: A Tensor with the input sequence encoded as a
one-hot representation. Its dimensions should be `[truncation_length,
batch_size, output_size]`.
Returns:
Tuple of the Tensor of output logits for the batch, with dimensions
`[truncation_length, batch_size, output_size]`, and the
final state of the unrolled core,.
"""
input_shape = one_hot_input_sequence.get_shape()
batch_size = input_shape[1]
batch_embed_module = snt.BatchApply(self._embed_module)
input_sequence = batch_embed_module(one_hot_input_sequence)
input_sequence = tf.nn.relu(input_sequence)
initial_state = self._core.initial_state(batch_size)
if self._use_dynamic_rnn:
output_sequence, final_state = tf.nn.dynamic_rnn(
cell=self._core,
inputs=input_sequence,
time_major=True,
initial_state=initial_state)
else:
rnn_input_sequence = tf.unstack(input_sequence)
output, final_state = tf.nn.static_rnn(
cell=self._core,
inputs=rnn_input_sequence,
initial_state=initial_state)
output_sequence = tf.stack(output)
batch_output_module = snt.BatchApply(self._output_module)
output_sequence_logits = batch_output_module(output_sequence)
return output_sequence_logits, final_state
@snt.reuse_variables
def generate_string(self, initial_logits, initial_state, sequence_length):
"""Builds sub-graph to generate a string, sampled from the model.
Args:
initial_logits: Starting logits to sample from.
initial_state: Starting state for the RNN core.
sequence_length: Number of characters to sample.
Returns:
A Tensor of characters, with dimensions `[sequence_length, batch_size,
output_size]`.
"""
current_logits = initial_logits
current_state = initial_state
generated_letters = []
for _ in range(sequence_length):
# Sample a character index from distribution.
char_index = tf.squeeze(tf.multinomial(current_logits, 1))
char_one_hot = tf.one_hot(char_index, self._output_size, 1.0, 0.0)
generated_letters.append(char_one_hot)
# Feed character back into the deep_lstm.
gen_out_seq, current_state = self._core(
tf.nn.relu(self._embed_module(char_one_hot)),
current_state)
current_logits = self._output_module(gen_out_seq)
generated_string = tf.stack(generated_letters)
return generated_string
def main(unused_argv):
train(
num_training_iterations=FLAGS.num_training_iterations,
report_interval=FLAGS.report_interval,
reduce_learning_rate_interval=FLAGS.reduce_learning_rate_interval)
if __name__ == "__main__":
tf.app.run()
|
the-stack_0_23775 | from bs4 import BeautifulSoup as Soup
from .fixtures import (
app_client,
make_app_client,
TABLES,
TEMP_PLUGIN_SECRET_FILE,
TestClient as _TestClient,
) # noqa
from datasette.app import Datasette
from datasette import cli
from datasette.plugins import get_plugins, DEFAULT_PLUGINS, pm
from datasette.utils import sqlite3, CustomRow
from jinja2.environment import Template
import base64
import json
import os
import pathlib
import re
import sqlite3
import textwrap
import pytest
import urllib
at_memory_re = re.compile(r" at 0x\w+")
@pytest.mark.parametrize(
"plugin_hook", [name for name in dir(pm.hook) if not name.startswith("_")]
)
def test_plugin_hooks_have_tests(plugin_hook):
"Every plugin hook should be referenced in this test module"
tests_in_this_module = [t for t in globals().keys() if t.startswith("test_")]
ok = False
for test in tests_in_this_module:
if plugin_hook in test:
ok = True
assert ok, "Plugin hook is missing tests: {}".format(plugin_hook)
def test_plugins_dir_plugin_prepare_connection(app_client):
response = app_client.get(
"/fixtures.json?sql=select+convert_units(100%2C+'m'%2C+'ft')"
)
assert pytest.approx(328.0839) == response.json["rows"][0][0]
def test_plugin_prepare_connection_arguments(app_client):
response = app_client.get(
"/fixtures.json?sql=select+prepare_connection_args()&_shape=arrayfirst"
)
assert [
"database=fixtures, datasette.plugin_config(\"name-of-plugin\")={'depth': 'root'}"
] == response.json
@pytest.mark.parametrize(
"path,expected_decoded_object",
[
("/", {"template": "index.html", "database": None, "table": None}),
(
"/fixtures/",
{"template": "database.html", "database": "fixtures", "table": None},
),
(
"/fixtures/sortable",
{"template": "table.html", "database": "fixtures", "table": "sortable"},
),
],
)
def test_plugin_extra_css_urls(app_client, path, expected_decoded_object):
response = app_client.get(path)
links = Soup(response.body, "html.parser").findAll("link")
special_href = [
l for l in links if l.attrs["href"].endswith("/extra-css-urls-demo.css")
][0]["href"]
# This link has a base64-encoded JSON blob in it
encoded = special_href.split("/")[3]
assert expected_decoded_object == json.loads(
base64.b64decode(encoded).decode("utf8")
)
def test_plugin_extra_js_urls(app_client):
response = app_client.get("/")
scripts = Soup(response.body, "html.parser").findAll("script")
assert [
s
for s in scripts
if s.attrs
== {
"integrity": "SRIHASH",
"crossorigin": "anonymous",
"src": "https://plugin-example.com/jquery.js",
}
]
def test_plugins_with_duplicate_js_urls(app_client):
# If two plugins both require jQuery, jQuery should be loaded only once
response = app_client.get("/fixtures")
# This test is a little tricky, as if the user has any other plugins in
# their current virtual environment those may affect what comes back too.
# What matters is that https://plugin-example.com/jquery.js is only there once
# and it comes before plugin1.js and plugin2.js which could be in either
# order
scripts = Soup(response.body, "html.parser").findAll("script")
srcs = [s["src"] for s in scripts if s.get("src")]
# No duplicates allowed:
assert len(srcs) == len(set(srcs))
# jquery.js loaded once:
assert 1 == srcs.count("https://plugin-example.com/jquery.js")
# plugin1.js and plugin2.js are both there:
assert 1 == srcs.count("https://plugin-example.com/plugin1.js")
assert 1 == srcs.count("https://plugin-example.com/plugin2.js")
# jquery comes before them both
assert srcs.index("https://plugin-example.com/jquery.js") < srcs.index(
"https://plugin-example.com/plugin1.js"
)
assert srcs.index("https://plugin-example.com/jquery.js") < srcs.index(
"https://plugin-example.com/plugin2.js"
)
def test_plugins_render_cell_link_from_json(app_client):
sql = """
select '{"href": "http://example.com/", "label":"Example"}'
""".strip()
path = "/fixtures?" + urllib.parse.urlencode({"sql": sql})
response = app_client.get(path)
td = Soup(response.body, "html.parser").find("table").find("tbody").find("td")
a = td.find("a")
assert a is not None, str(a)
assert a.attrs["href"] == "http://example.com/"
assert a.attrs["data-database"] == "fixtures"
assert a.text == "Example"
def test_plugins_render_cell_demo(app_client):
response = app_client.get("/fixtures/simple_primary_key?id=4")
soup = Soup(response.body, "html.parser")
td = soup.find("td", {"class": "col-content"})
assert {
"column": "content",
"table": "simple_primary_key",
"database": "fixtures",
"config": {"depth": "table", "special": "this-is-simple_primary_key"},
} == json.loads(td.string)
def test_plugin_config(app_client):
assert {"depth": "table"} == app_client.ds.plugin_config(
"name-of-plugin", database="fixtures", table="sortable"
)
assert {"depth": "database"} == app_client.ds.plugin_config(
"name-of-plugin", database="fixtures", table="unknown_table"
)
assert {"depth": "database"} == app_client.ds.plugin_config(
"name-of-plugin", database="fixtures"
)
assert {"depth": "root"} == app_client.ds.plugin_config(
"name-of-plugin", database="unknown_database"
)
assert {"depth": "root"} == app_client.ds.plugin_config("name-of-plugin")
assert None is app_client.ds.plugin_config("unknown-plugin")
def test_plugin_config_env(app_client):
os.environ["FOO_ENV"] = "FROM_ENVIRONMENT"
assert {"foo": "FROM_ENVIRONMENT"} == app_client.ds.plugin_config("env-plugin")
# Ensure secrets aren't visible in /-/metadata.json
metadata = app_client.get("/-/metadata.json")
assert {"foo": {"$env": "FOO_ENV"}} == metadata.json["plugins"]["env-plugin"]
del os.environ["FOO_ENV"]
def test_plugin_config_env_from_list(app_client):
os.environ["FOO_ENV"] = "FROM_ENVIRONMENT"
assert [{"in_a_list": "FROM_ENVIRONMENT"}] == app_client.ds.plugin_config(
"env-plugin-list"
)
# Ensure secrets aren't visible in /-/metadata.json
metadata = app_client.get("/-/metadata.json")
assert [{"in_a_list": {"$env": "FOO_ENV"}}] == metadata.json["plugins"][
"env-plugin-list"
]
del os.environ["FOO_ENV"]
def test_plugin_config_file(app_client):
open(TEMP_PLUGIN_SECRET_FILE, "w").write("FROM_FILE")
assert {"foo": "FROM_FILE"} == app_client.ds.plugin_config("file-plugin")
# Ensure secrets aren't visible in /-/metadata.json
metadata = app_client.get("/-/metadata.json")
assert {"foo": {"$file": TEMP_PLUGIN_SECRET_FILE}} == metadata.json["plugins"][
"file-plugin"
]
os.remove(TEMP_PLUGIN_SECRET_FILE)
@pytest.mark.parametrize(
"path,expected_extra_body_script",
[
(
"/",
{
"template": "index.html",
"database": None,
"table": None,
"config": {"depth": "root"},
},
),
(
"/fixtures/",
{
"template": "database.html",
"database": "fixtures",
"table": None,
"config": {"depth": "database"},
},
),
(
"/fixtures/sortable",
{
"template": "table.html",
"database": "fixtures",
"table": "sortable",
"config": {"depth": "table"},
},
),
],
)
def test_plugins_extra_body_script(app_client, path, expected_extra_body_script):
r = re.compile(r"<script>var extra_body_script = (.*?);</script>")
json_data = r.search(app_client.get(path).text).group(1)
actual_data = json.loads(json_data)
assert expected_extra_body_script == actual_data
def test_plugins_asgi_wrapper(app_client):
response = app_client.get("/fixtures")
assert "fixtures" == response.headers["x-databases"]
def test_plugins_extra_template_vars(restore_working_directory):
with make_app_client(
template_dir=str(pathlib.Path(__file__).parent / "test_templates")
) as client:
response = client.get("/-/metadata")
assert response.status == 200
extra_template_vars = json.loads(
Soup(response.body, "html.parser").select("pre.extra_template_vars")[0].text
)
assert {
"template": "show_json.html",
"scope_path": "/-/metadata",
} == extra_template_vars
extra_template_vars_from_awaitable = json.loads(
Soup(response.body, "html.parser")
.select("pre.extra_template_vars_from_awaitable")[0]
.text
)
assert {
"template": "show_json.html",
"awaitable": True,
"scope_path": "/-/metadata",
} == extra_template_vars_from_awaitable
def test_plugins_async_template_function(restore_working_directory):
with make_app_client(
template_dir=str(pathlib.Path(__file__).parent / "test_templates")
) as client:
response = client.get("/-/metadata")
assert response.status == 200
extra_from_awaitable_function = (
Soup(response.body, "html.parser")
.select("pre.extra_from_awaitable_function")[0]
.text
)
expected = (
sqlite3.connect(":memory:").execute("select sqlite_version()").fetchone()[0]
)
assert expected == extra_from_awaitable_function
def test_default_plugins_have_no_templates_path_or_static_path():
# The default plugins that ship with Datasette should have their static_path and
# templates_path all set to None
plugins = get_plugins()
for plugin in plugins:
if plugin["name"] in DEFAULT_PLUGINS:
assert None is plugin["static_path"]
assert None is plugin["templates_path"]
@pytest.fixture(scope="session")
def view_names_client(tmp_path_factory):
tmpdir = tmp_path_factory.mktemp("test-view-names")
templates = tmpdir / "templates"
templates.mkdir()
plugins = tmpdir / "plugins"
plugins.mkdir()
for template in (
"index.html",
"database.html",
"table.html",
"row.html",
"show_json.html",
"query.html",
):
(templates / template).write_text("view_name:{{ view_name }}", "utf-8")
(plugins / "extra_vars.py").write_text(
textwrap.dedent(
"""
from datasette import hookimpl
@hookimpl
def extra_template_vars(view_name):
return {"view_name": view_name}
"""
),
"utf-8",
)
db_path = str(tmpdir / "fixtures.db")
conn = sqlite3.connect(db_path)
conn.executescript(TABLES)
return _TestClient(
Datasette(
[db_path], template_dir=str(templates), plugins_dir=str(plugins)
).app()
)
@pytest.mark.parametrize(
"path,view_name",
(
("/", "index"),
("/fixtures", "database"),
("/fixtures/units", "table"),
("/fixtures/units/1", "row"),
("/-/metadata", "json_data"),
("/fixtures?sql=select+1", "database"),
),
)
def test_view_names(view_names_client, path, view_name):
response = view_names_client.get(path)
assert response.status == 200
assert "view_name:{}".format(view_name) == response.text
def test_register_output_renderer_no_parameters(app_client):
response = app_client.get("/fixtures/facetable.testnone")
assert 200 == response.status
assert b"Hello" == response.body
def test_register_output_renderer_all_parameters(app_client):
response = app_client.get("/fixtures/facetable.testall")
assert 200 == response.status
# Lots of 'at 0x103a4a690' in here - replace those so we can do
# an easy comparison
body = at_memory_re.sub(" at 0xXXX", response.text)
assert {
"1+1": 2,
"datasette": "<datasette.app.Datasette object at 0xXXX>",
"columns": [
"pk",
"created",
"planet_int",
"on_earth",
"state",
"city_id",
"neighborhood",
"tags",
"complex_array",
"distinct_some_null",
],
"rows": [
"<sqlite3.Row object at 0xXXX>",
"<sqlite3.Row object at 0xXXX>",
"<sqlite3.Row object at 0xXXX>",
"<sqlite3.Row object at 0xXXX>",
"<sqlite3.Row object at 0xXXX>",
"<sqlite3.Row object at 0xXXX>",
"<sqlite3.Row object at 0xXXX>",
"<sqlite3.Row object at 0xXXX>",
"<sqlite3.Row object at 0xXXX>",
"<sqlite3.Row object at 0xXXX>",
"<sqlite3.Row object at 0xXXX>",
"<sqlite3.Row object at 0xXXX>",
"<sqlite3.Row object at 0xXXX>",
"<sqlite3.Row object at 0xXXX>",
"<sqlite3.Row object at 0xXXX>",
],
"sql": "select pk, created, planet_int, on_earth, state, city_id, neighborhood, tags, complex_array, distinct_some_null from facetable order by pk limit 51",
"query_name": None,
"database": "fixtures",
"table": "facetable",
"request": "<datasette.utils.asgi.Request object at 0xXXX>",
"view_name": "table",
} == json.loads(body)
# Test that query_name is set correctly
query_response = app_client.get("/fixtures/pragma_cache_size.testall")
assert "pragma_cache_size" == json.loads(query_response.body)["query_name"]
def test_register_output_renderer_custom_status_code(app_client):
response = app_client.get("/fixtures/pragma_cache_size.testall?status_code=202")
assert 202 == response.status
def test_register_output_renderer_custom_content_type(app_client):
response = app_client.get(
"/fixtures/pragma_cache_size.testall?content_type=text/blah"
)
assert "text/blah" == response.headers["content-type"]
def test_register_output_renderer_custom_headers(app_client):
response = app_client.get(
"/fixtures/pragma_cache_size.testall?header=x-wow:1&header=x-gosh:2"
)
assert "1" == response.headers["x-wow"]
assert "2" == response.headers["x-gosh"]
def test_register_output_renderer_can_render(app_client):
response = app_client.get("/fixtures/facetable?_no_can_render=1")
assert response.status == 200
links = (
Soup(response.body, "html.parser")
.find("p", {"class": "export-links"})
.findAll("a")
)
actual = [l["href"].split("/")[-1] for l in links]
# Should not be present because we sent ?_no_can_render=1
assert "facetable.testall?_labels=on" not in actual
# Check that it was passed the values we expected
assert hasattr(app_client.ds, "_can_render_saw")
assert {
"datasette": app_client.ds,
"columns": [
"pk",
"created",
"planet_int",
"on_earth",
"state",
"city_id",
"neighborhood",
"tags",
"complex_array",
"distinct_some_null",
],
"sql": "select pk, created, planet_int, on_earth, state, city_id, neighborhood, tags, complex_array, distinct_some_null from facetable order by pk limit 51",
"query_name": None,
"database": "fixtures",
"table": "facetable",
"view_name": "table",
}.items() <= app_client.ds._can_render_saw.items()
@pytest.mark.asyncio
async def test_prepare_jinja2_environment(app_client):
template = app_client.ds.jinja_env.from_string(
"Hello there, {{ a|format_numeric }}", {"a": 3412341}
)
rendered = await app_client.ds.render_template(template)
assert "Hello there, 3,412,341" == rendered
def test_publish_subcommand():
# This is hard to test properly, because publish subcommand plugins
# cannot be loaded using the --plugins-dir mechanism - they need
# to be installed using "pip install". So I'm cheating and taking
# advantage of the fact that cloudrun/heroku use the plugin hook
# to register themselves as default plugins.
assert ["cloudrun", "heroku"] == cli.publish.list_commands({})
def test_register_facet_classes(app_client):
response = app_client.get(
"/fixtures/compound_three_primary_keys.json?_dummy_facet=1"
)
assert [
{
"name": "pk1",
"toggle_url": "http://localhost/fixtures/compound_three_primary_keys.json?_dummy_facet=1&_facet_dummy=pk1",
"type": "dummy",
},
{
"name": "pk2",
"toggle_url": "http://localhost/fixtures/compound_three_primary_keys.json?_dummy_facet=1&_facet_dummy=pk2",
"type": "dummy",
},
{
"name": "pk3",
"toggle_url": "http://localhost/fixtures/compound_three_primary_keys.json?_dummy_facet=1&_facet_dummy=pk3",
"type": "dummy",
},
{
"name": "content",
"toggle_url": "http://localhost/fixtures/compound_three_primary_keys.json?_dummy_facet=1&_facet_dummy=content",
"type": "dummy",
},
{
"name": "pk1",
"toggle_url": "http://localhost/fixtures/compound_three_primary_keys.json?_dummy_facet=1&_facet=pk1",
},
{
"name": "pk2",
"toggle_url": "http://localhost/fixtures/compound_three_primary_keys.json?_dummy_facet=1&_facet=pk2",
},
{
"name": "pk3",
"toggle_url": "http://localhost/fixtures/compound_three_primary_keys.json?_dummy_facet=1&_facet=pk3",
},
] == response.json["suggested_facets"]
def test_actor_from_request(app_client):
app_client.get("/")
# Should have no actor
assert None == app_client.ds._last_request.scope["actor"]
app_client.get("/?_bot=1")
# Should have bot actor
assert {"id": "bot"} == app_client.ds._last_request.scope["actor"]
def test_actor_from_request_async(app_client):
app_client.get("/")
# Should have no actor
assert None == app_client.ds._last_request.scope["actor"]
app_client.get("/?_bot2=1")
# Should have bot2 actor
assert {"id": "bot2", "1+1": 2} == app_client.ds._last_request.scope["actor"]
def test_existing_scope_actor_respected(app_client):
app_client.get("/?_actor_in_scope=1")
assert {"id": "from-scope"} == app_client.ds._last_request.scope["actor"]
@pytest.mark.asyncio
@pytest.mark.parametrize(
"action,expected",
[
("this_is_allowed", True),
("this_is_denied", False),
("this_is_allowed_async", True),
("this_is_denied_async", False),
("no_match", None),
],
)
async def test_permission_allowed(app_client, action, expected):
actual = await app_client.ds.permission_allowed(
{"id": "actor"}, action, default=None
)
assert expected == actual
def test_actor_json(app_client):
assert {"actor": None} == app_client.get("/-/actor.json").json
assert {"actor": {"id": "bot2", "1+1": 2}} == app_client.get(
"/-/actor.json/?_bot2=1"
).json
@pytest.mark.parametrize(
"path,body", [("/one/", "2"), ("/two/Ray?greeting=Hail", "Hail Ray"),]
)
def test_register_routes(app_client, path, body):
response = app_client.get(path)
assert 200 == response.status
assert body == response.text
def test_register_routes_post(app_client):
response = app_client.post("/post/", {"this is": "post data"}, csrftoken_from=True)
assert 200 == response.status
assert "csrftoken" in response.json
assert "post data" == response.json["this is"]
def test_register_routes_csrftoken(restore_working_directory, tmpdir_factory):
templates = tmpdir_factory.mktemp("templates")
(templates / "csrftoken_form.html").write_text(
"CSRFTOKEN: {{ csrftoken() }}", "utf-8"
)
with make_app_client(template_dir=templates) as client:
response = client.get("/csrftoken-form/")
expected_token = client.ds._last_request.scope["csrftoken"]()
assert "CSRFTOKEN: {}".format(expected_token) == response.text
def test_register_routes_asgi(app_client):
response = app_client.get("/three/")
assert {"hello": "world"} == response.json
assert "1" == response.headers["x-three"]
@pytest.mark.asyncio
async def test_startup(app_client):
await app_client.ds.invoke_startup()
assert app_client.ds._startup_hook_fired
assert 2 == app_client.ds._startup_hook_calculation
def test_canned_queries(app_client):
queries = app_client.get("/fixtures.json").json["queries"]
queries_by_name = {q["name"]: q for q in queries}
assert {
"sql": "select 2",
"name": "from_async_hook",
"private": False,
} == queries_by_name["from_async_hook"]
assert {
"sql": "select 1, 'null' as actor_id",
"name": "from_hook",
"private": False,
} == queries_by_name["from_hook"]
def test_canned_queries_non_async(app_client):
response = app_client.get("/fixtures/from_hook.json?_shape=array")
assert [{"1": 1, "actor_id": "null"}] == response.json
def test_canned_queries_async(app_client):
response = app_client.get("/fixtures/from_async_hook.json?_shape=array")
assert [{"2": 2}] == response.json
def test_canned_queries_actor(app_client):
assert [{"1": 1, "actor_id": "bot"}] == app_client.get(
"/fixtures/from_hook.json?_bot=1&_shape=array"
).json
|
the-stack_0_23776 | """
Model definitions.
Note: some models are denormalized by design, this greatly simplifies (and speeds up)
the queries necessary to fetch a certain entry.
"""
import os, random, hashlib, string
from django.db import models
from django.db import transaction
from django.contrib.auth.models import User, Group
from django.contrib import admin
from django.conf import settings
from mptt.models import MPTTModel, TreeForeignKey
from datetime import datetime, timedelta
from main.server import html, notegen
# import all constants
from main.server.const import *
import markdown
class UserProfile( models.Model ):
"""
Stores user options
>>> user, flag = User.objects.get_or_create(first_name='Jane', last_name='Doe', username='jane', email='jane')
>>> prof = user.get_profile()
>>> prof.json = dict( message='Hello world' )
>>> prof.save()
"""
user = models.OneToOneField(User, unique=True, related_name='profile')
# this designates a user as moderator
type = models.IntegerField(choices=USER_TYPES, default=USER_NORMAL)
# globally unique id
uuid = models.TextField(null=False, db_index=True, unique=True)
score = models.IntegerField(default=0, blank=True)
reputation = models.IntegerField(default=0, blank=True, db_index=True)
views = models.IntegerField(default=0, blank=True)
bronze_badges = models.IntegerField(default=0)
silver_badges = models.IntegerField(default=0)
gold_badges = models.IntegerField(default=0)
json = models.TextField(default="", null=True)
last_visited = models.DateTimeField(auto_now=True)
suspended = models.BooleanField(default=False, null=False)
about_me = models.TextField(default="(about me)", null=True)
html = models.TextField(default="", null=True)
location = models.TextField(default="", null=True)
website = models.URLField(default="", null=True, max_length=100)
openid = models.URLField(default="http://www.biostars.org", null=True)
display_name = models.CharField(max_length=35, default='User', null=False, db_index=True)
last_login_ip = models.IPAddressField(default="0.0.0.0", null=True)
openid_merge = models.NullBooleanField(default=False, null=True)
@property
def is_moderator(self):
return (self.type == USER_MODERATOR) or (self.type == USER_ADMIN)
@property
def is_admin(self):
return self.type == USER_ADMIN
@property
def is_active(self):
if self.suspended:
return False
if self.is_moderator or self.score >= settings.MINIMUM_REPUTATION:
return True
# right not we let it fall through to True
# needs more throttles may go here here
return True
def get_absolute_url(self):
return "/user/show/%i/" % self.user.id
def status(self):
if self.suspended:
return 'suspended'
else:
return 'active'
def authorize(self, moderator):
"Authorizes access to a user data moderator"
# we will cascade through options here
# no access to anonymous users
if moderator.is_anonymous():
return False
# other admins may only be changed via direct database access
if self.is_admin:
return False
# moderator that is also an admin may access everyone else
if moderator.profile.is_admin:
return True
# a moderator's private info may not be accessed past this point
if self.is_moderator:
return False
return moderator.profile.is_moderator
def editable(self, moderator):
"Is this users content editable by a moderator"
# everyone can access themselves
if self.user == moderator:
return True
return self.authorize(moderator)
@property
def note_count(self):
note_count = Note.objects.filter(target=self.user).count()
new_count = Note.objects.filter(target=self.user, unread=True).count()
return (note_count, new_count)
class Tag(models.Model):
name = models.TextField(max_length=50)
count = models.IntegerField(default=0)
class TagAdmin(admin.ModelAdmin):
list_display = ('name', 'count')
admin.site.register(Tag, TagAdmin)
class PostManager(models.Manager):
''' Used for all posts (question, answer, comment); returns only non-deleted posts '''
def get_query_set(self):
return super(PostManager, self).get_query_set().select_related('author','author__profile','children',).filter(post_type=POST_COMMENT)
class AnswerManager(models.Manager):
''' Used for all posts (question, answer, comment); returns only non-deleted posts '''
def get_query_set(self):
return super(AnswerManager, self).get_query_set().select_related('author','author__profile','children').filter(post_type=POST_ANSWER)
class Post(MPTTModel):
"""
A posting is the basic content generated by a user
>>> user, flag = User.objects.get_or_create(first_name='Jane', last_name='Doe', username='jane', email='jane')
>>> post = Post.objects.create(author=user, post_type=POST_QUESTION)
>>> content ='*A*'
>>> post.create_revision(content=content)
>>> post.html
u'<p><em>A</em></p>'
"""
author = models.ForeignKey(User)
content = models.TextField(blank=True) # The underlying Markdown
html = models.TextField(blank=True) # this is the sanitized HTML for display
title = models.TextField(blank=True)
slug = models.SlugField(blank=True, max_length=200)
tag_string = models.CharField(max_length=200) # The tag string is the canonical form of the post's tags
tag_set = models.ManyToManyField(Tag) # The tag set is built from the tag string and used only for fast filtering
views = models.IntegerField(default=0, blank=True)
score = models.IntegerField(default=0, blank=True)
creation_date = models.DateTimeField(db_index=True)
lastedit_date = models.DateTimeField()
lastedit_user = models.ForeignKey(User, related_name='editor')
deleted = models.BooleanField()
closed = models.BooleanField()
post_type = models.IntegerField(choices=POST_TYPES, db_index=True)
# this will maintain parent-child replationships between poss
#parent = models.ForeignKey('self', related_name="children", null=True, blank=True)
parent = TreeForeignKey('self', null=True, blank=True, related_name='children')
#
# denormalized fields only that only apply to specific cases
#
comment_count = models.IntegerField(default=0)
revision_count = models.IntegerField(default=0)
child_count = models.IntegerField(default=0, blank=True) # number of children (other posts associated with the post)
post_accepted = models.BooleanField(default=False) # the post has been accepted
answer_accepted = models.BooleanField() # if this was
unanswered = models.BooleanField(db_index=True) # this is a question with no answers
answer_count = models.IntegerField(default=0, blank=True)
# this field will be used to allow posts to float back into relevance
touch_date = models.DateTimeField(db_index=True)
class MPTTMeta:
order_insertion_by = ['creation_date']
def get_absolute_url(self):
return "/post/show/%i/" % self.id
@property
def status(self):
# some say this is ugly but simplifies greatly the templates
if self.post_accepted:
return 'answer-accepted'
elif self.answer_count:
return 'answered'
else:
return 'unanswered'
@property
def post_type_name(self):
"Returns a user friendly name for the post type"
return POST_REV_MAP[self.post_type]
@property
def is_owner(self, user):
return (self.author == user)
@transaction.commit_on_success
def notify(self):
"Generates notifications to all users related with this post. Invoked only on the creation of the post"
# create a notification for the post that includes all authors of every child
root = self.get_root()
authors = set( [ root.author] )
for child in root.get_descendants():
authors.add( child.author )
text = notegen.post_action(user=self.author, post=self)
# the current author will get a message that is not new
authors.remove(self.author)
for target in authors:
Note.send(sender=self.author, target=target, content=text, type=NOTE_USER, unread=True, date=self.creation_date)
# for the current post author this is not a new message
Note.send(sender=self.author, target=self.author, content=text, type=NOTE_USER, unread=False, date=self.creation_date)
def changed(self, content=None, title=None, tag_string=None):
"Tests post parameters"
return (content == self.content and tag_string == self.tag_string and title == self.title)
def create_revision(self, content=None, title=None, tag_string=None, author=None, date=None, action=REV_NONE):
"""
Creates a new revision of the post with the given data.
Content, title and tags are assumed to be unmodified if not given.
Author is assumed to be same as original author if not given.
Date is assumed to be now if not given.
"""
content = content or self.content
title = title or self.title
tag_string = tag_string or self.tag_string
author = author or self.author
date = date or datetime.now()
# transform the content to UNIX style line endings
content = content.replace('\r\n', '\n')
content = content.replace('\r', '\n')
# creates a new revision for the post
revision = PostRevision(post=self, content=content, tag_string=tag_string, title=title, author=author, date=date, action=action)
revision.save()
# Update our metadata
self.lastedit_user = author
self.content = content
self.title = title
self.set_tags(tag_string)
self.save()
def get_title(self):
title = self.title
if self.deleted:
title = "%s [deleted ]" % self.title
elif self.closed:
title = "%s [closed]" % self.title
return title
def current_revision(self):
"""
Returns the most recent revision of the post. Primarily useful for getting the
current raw text of the post
"""
return self.revisions.order_by('date')[0]
def moderator_action(self, action, moderator, date=None):
"""
Performs a moderator action on the post. Takes an action (one of REV_ACTIONS)
and a user. Date is assumed to be now if not provided
"""
text = notegen.post_moderator_action(user=moderator, post=self, action=action)
Note.send(target=self.author, sender=moderator, post=self, content=text, type=NOTE_MODERATOR)
self.create_revision(action=action)
if action == REV_CLOSE:
self.closed = True
elif action == REV_REOPEN:
self.closed = False
elif action == REV_DELETE:
self.deleted = True
elif action == REV_UNDELETE:
self.deleted = False
else:
raise Exception('Invalid moderator action %s' % action)
self.save()
def authorize(self, user, strict=True):
"Verfifies access by a request object. Strict mode fails immediately."
# no access to anonymous users
if user.is_anonymous():
return False
# everyone may access posts they have authored
if user == self.author:
return True
return user.profile.is_moderator
def get_vote(self, user, vote_type):
if user.is_anonymous():
return None
try:
return self.votes.get(author=user, type=vote_type)
except Vote.DoesNotExist:
return None
def add_vote(self, user, vote_type):
vote = Vote(author=user, type=vote_type, post=self)
vote.save()
return vote
def remove_vote(self, user, vote_type):
''' Removes a vote from a user of a certain type if it exists
Returns True if removed, False if it didn't exist'''
vote = self.get_vote(user, vote_type)
if vote:
vote.delete()
return True
return False
def set_tags(self, tag_string):
''' Sets the post's tags to a space-separated string of tags '''
self.tag_string = tag_string
self.save()
self.tag_set.clear()
if not tag_string:
return
tags = []
for tag_name in tag_string.split(' '):
try:
tags.append(Tag.objects.get(name=tag_name))
except Tag.DoesNotExist:
tag = Tag(name=tag_name)
tag.save()
tags.append(tag)
self.tag_set.add(*tags)
def get_tags(self):
''' Returns the post's tags as a list of strings '''
return self.tag_string.split(' ')
def details(self):
return
def apply(self, dir):
is_answer = self.parent and self.post_type == POST_ANSWER
is_comment = self.parent and self.post_type == POST_COMMENT
if is_answer:
self.parent.answer_count += dir
self.parent.save()
if is_comment:
self.parent.comment_count += dir
self.parent.save()
def comments(self):
objs = Post.objects.filter(parent=self, post_type=POST_COMMENT).select_related('author','author__profile')
return objs
def css(self):
"Used during rendering"
if self.deleted:
return "post-deleted"
elif self.closed:
return "post-closed"
else:
return "post-active"
objects = models.Manager()
answers = AnswerManager()
class PostRevision(models.Model):
"""
Represents various revisions of a single post
"""
post = models.ForeignKey(Post, related_name='revisions')
content = models.TextField()
tag_string = models.CharField(max_length=200)
title = models.TextField(blank=True)
# Moderator action performed in this revision, if applicable
action = models.IntegerField(choices=REV_ACTIONS, default=REV_NONE)
author = models.ForeignKey(User)
date = models.DateTimeField()
def html(self):
'''We won't cache the HTML in the DB because revisions are viewed fairly infrequently '''
return html.generate(self.content)
def get_tags(self):
''' Returns the revision's tags as a list of strings '''
return self.tag_string.split(' ')
def apply(self, dir=1):
self.post.revision_count += dir
self.post.save()
class PostAdmin(admin.ModelAdmin):
list_display = ('id', 'title', )
admin.site.register(Post, PostAdmin)
class PostRevisionAdmin(admin.ModelAdmin):
list_display = ('id', 'title', )
admin.site.register(PostRevision, PostRevisionAdmin)
class Note(models.Model):
"""
Creates simple notifications that are active until the user deletes them
"""
sender = models.ForeignKey(User, related_name="note_sender") # the creator of the notification
target = models.ForeignKey(User, related_name="note_target", db_index=True) # the user that will get the note
post = models.ForeignKey(Post, related_name="note_post",null=True, blank=True) # the user that will get the note
content = models.CharField(max_length=5000, default='') # this contains the raw message
html = models.CharField(max_length=5000, default='') # this contains the santizied content
date = models.DateTimeField(null=False)
unread = models.BooleanField(default=True)
type = models.IntegerField(choices=NOTE_TYPES, default=NOTE_USER)
@classmethod
def send(self, **params):
note = Note.objects.create(**params)
return note
def get_absolute_url(self):
return "/user/show/%s/" % self.target.id
@property
def status(self):
return 'new' if self.unread else 'old'
class Vote(models.Model):
"""
>>> user, flag = User.objects.get_or_create(first_name='Jane', last_name='Doe', username='jane', email='jane')
>>> post = Post.objects.create(author=user, post_type=POST_QUESTION)
>>> post.create_revision(content='ABC')
>>> vote = Vote(author=user, post=post, type=VOTE_UP)
>>> vote.score()
1
"""
author = models.ForeignKey(User)
post = models.ForeignKey(Post, related_name='votes')
type = models.IntegerField(choices=VOTE_TYPES)
def score(self):
return POST_SCORE.get(self.type, 0)
def reputation(self):
return USER_REP.get(self.type, 0)
def voter_reputation(self):
return VOTER_REP.get(self.type, 0)
def apply(self, dir=1):
"Applies the score and reputation changes. Direction can be set to -1 to undo (ie delete vote)"
if self.reputation():
prof = self.post.author.get_profile()
prof.score += dir * self.reputation()
prof.save()
if self.voter_reputation():
prof = self.author.get_profile()
prof.score += dir * self.voter_reputation()
prof.save()
if self.score():
self.post.score += dir * self.score()
self.post.save()
if self.type == VOTE_ACCEPT:
answer = self.post
question = self.post.parent
if dir == 1:
answer.post_accepted = True
question.answer_accepted = True
else:
answer.post_accepted = False
question.answer_accepted = False
answer.save()
#question.save()
class Badge(models.Model):
name = models.CharField(max_length=50)
description = models.CharField(max_length=200)
type = models.IntegerField(choices=BADGE_TYPES)
unique = models.BooleanField(default=False) # Unique badges may be earned only once
secret = models.BooleanField(default=False) # Secret badges are not listed on the badge list
count = models.IntegerField(default=0) # Total number of times awarded
def get_absolute_url(self):
return "/badge/show/%s/" % self.id
class Award(models.Model):
'''
A badge being awarded to a user.Cannot be ManyToManyField
because some may be earned multiple times
'''
badge = models.ForeignKey(Badge)
user = models.ForeignKey(User)
date = models.DateTimeField()
def apply(self, dir=1):
type = self.badge.type
prof = self.user.get_profile()
if type == BADGE_BRONZE:
prof.bronze_badges += dir
if type == BADGE_SILVER:
prof.silver_badges += dir
if type == BADGE_GOLD:
prof.gold_badges += dir
prof.save()
self.badge.count += dir
self.badge.save()
def apply_award(request, user, badge_name, messages=None):
badge = Badge.objects.get(name=badge_name)
award = Award.objects.filter(badge=badge, user=user)
if award and badge.unique:
# this badge has already been awarded
return
community = User.objects.get(username='community')
award = Award.objects.create(badge=badge, user=user)
text = notegen.badgenote(award.badge)
note = Note.send(sender=community, target=user, content=text)
if messages:
messages.info(request, note.html)
# most of the site functionality, reputation change
# and voting is auto applied via database signals
#
# data migration will need to route through
# these models (this application) to ensure that all actions
# get applied properly
#
from django.db.models import signals
# Many models have apply() methods that need to be called when they are created
# and called with dir=-1 when deleted to update something.
MODELS_WITH_APPLY = [ Post, Vote, Award, PostRevision ]
def apply_instance(sender, instance, created, raw, *args, **kwargs):
"Applies changes from an instance with an apply() method"
if created and not raw: # Raw is true when importing from fixtures, in which case votes are already applied
instance.apply(+1)
def unapply_instance(sender, instance, *args, **kwargs):
"Unapplies an instance when it is deleted"
instance.apply(-1)
for model in MODELS_WITH_APPLY:
signals.post_save.connect(apply_instance, sender=model)
signals.post_delete.connect(unapply_instance, sender=model)
def make_uuid():
"Returns a unique id"
x = random.getrandbits(256)
u = hashlib.md5(str(x)).hexdigest()
return u
def create_profile(sender, instance, created, *args, **kwargs):
"Post save hook for creating user profiles on user save"
if created:
uuid = make_uuid()
display_name = html.nuke(instance.get_full_name())
UserProfile.objects.create(user=instance, uuid=uuid, display_name=display_name)
def update_profile(sender, instance, *args, **kwargs):
"Pre save hook for profiles"
instance.html = html.generate(instance.about_me)
from django.template.defaultfilters import slugify
def create_post(sender, instance, *args, **kwargs):
"Pre save post information that needs to be applied"
if not hasattr(instance, 'lastedit_user'):
instance.lastedit_user = instance.author
if not instance.creation_date:
instance.creation_date = datetime.now()
if not instance.lastedit_date:
instance.lastedit_date = datetime.now()
if not instance.title:
instance.title = "%s: %s" %(POST_MAP[instance.post_type], instance.get_root().title)
instance.slug = slugify(instance.title)
# generate the HTML from the content
instance.html = html.generate(instance.content)
# set the touch date
instance.touch_date = datetime.now()
def create_post_note(sender, instance, created, *args, **kwargs):
"Post save notice on a post"
if created:
# when a new post is created all descendants are notified
instance.notify()
def create_award(sender, instance, *args, **kwargs):
"Pre save award function"
if not instance.date:
instance.date = datetime.now()
def create_note(sender, instance, *args, **kwargs):
"Pre save notice function"
if not instance.date:
instance.date = datetime.now()
instance.html = html.generate(instance.content)
def tags_changed(sender, instance, action, pk_set, *args, **kwargs):
"Applies tag count updates upon post changes"
if action == 'post_add':
for pk in pk_set:
tag = Tag.objects.get(pk=pk)
tag.count += 1
tag.save()
if action == 'post_delete':
for pk in pk_set:
tag = Tag.objects.get(pk=pk)
tag.count -= 1
tag.save()
if action == 'pre_clear': # Must be pre so we know what was cleared
for tag in instance.tag_set.all():
tag.count -= 1
tag.save()
def tag_created(sender, instance, created, *args, **kwargs):
"Zero out the count of a newly created Tag instance to avoid double counting in import"
if created and instance.count != 0:
# To avoid infinite recursion, we must disconnect the signal temporarily
signals.post_save.disconnect(tag_created, sender=Tag)
instance.count = 0
instance.save()
signals.post_save.connect(tag_created, sender=Tag)
# now connect all the signals
signals.post_save.connect( create_profile, sender=User )
signals.pre_save.connect( update_profile, sender=UserProfile )
signals.pre_save.connect( create_post, sender=Post )
signals.post_save.connect( create_post_note, sender=Post )
signals.pre_save.connect( create_note, sender=Note )
signals.pre_save.connect( create_award, sender=Award )
signals.m2m_changed.connect( tags_changed, sender=Post.tag_set.through )
signals.post_save.connect( tag_created, sender=Tag )
# adding full text search capabilities
from whoosh import store, fields, index
WhooshSchema = fields.Schema(content=fields.TEXT(), pid=fields.NUMERIC(stored=True))
def create_index(sender=None, **kwargs):
if not os.path.exists(settings.WHOOSH_INDEX):
os.mkdir(settings.WHOOSH_INDEX)
ix = index.create_in(settings.WHOOSH_INDEX, WhooshSchema)
writer = ix.writer()
signals.post_syncdb.connect(create_index)
def update_index(sender, instance, created, **kwargs):
ix = index.open_dir(settings.WHOOSH_INDEX)
writer = ix.writer()
if instance.post_type in POST_FULL_FORM:
text = instance.title + instance.content
else:
text = instance.content
if created:
writer.add_document(content=text, pid=instance.id)
writer.commit()
else:
writer.update_document(content=text, pid=instance.id)
writer.commit()
def set_text_indexing(switch):
if switch:
signals.post_save.connect(update_index, sender=Post)
else:
signals.post_save.disconnect(update_index, sender=Post)
set_text_indexing(True) |
the-stack_0_23777 | import inspect
import os
import sys
from random import choice
from typing import List
__author__ = "GLNB"
__copyright__ = "GLNB"
__license__ = "mit"
try:
from .dictionaries import invisible_chars, dict_latin
except ImportError:
from dictionaries import invisible_chars, dict_latin
__location__ = os.path.join(
os.getcwd(), os.path.dirname(inspect.getfile(inspect.currentframe()))
)
class Scrambler:
# This is done by parsing the Unicode list of confusable characters.
"""
.. code:: python
>>> from text_scrambler import Scrambler
>>> scr = Scrambler()
>>> text = "This is an example"
>>> text_1 = scr.scramble(text, level=1)
>>> #############
>>> # adding only zwj/zwnj characters
>>> print(text, text_1, sep="\\n")
This is an example
This is an example
>>> assert text != text_1
>>> print(len(text), len(text_1))
18 35
>>> # though the texts look similar, the second one has more characters
>>> #############
>>> text_2 = scr.scramble(text, level=2)
>>> # replacing some latin letters by their cyrillic/greek equivalent
>>> print(text_2)
Тhiѕ iѕ an ехаmple
>>> for char, char_2 in zip(text, text_2):
... if char != char_2:
... print(char, char_2)
...
T Т
s ѕ
s ѕ
e е
x х
a а
>>> #############
>>> text_3 = scr.scramble(text, level=3)
>>> # adding zwj/zwnj characters and replacing latin letters
>>> print(text_3)
Thіs iѕ аn eхаmple
>>> print(text, text_3, sep="\\n")
This is an example
Thіs iѕ аn eхаmple
>>> assert text_3 != text
>>> #############
>>> text_4 = scr.scramble(text, level=4)
>>> # replacing all characters by any unicode looking like character
>>> print(text_4)
⊤𝒽𝐢𝘴 𝘪𝙨 𝞪ռ 𝙚⨯𝚊mρ𝟙ҽ
>>> #
>>> # generating several versions
>>> versions = scr.generate(text, 10, level=4)
>>> for txt in versions:
... print(txt)
...
𝕋𝗵𝕚𝔰 𝙞ѕ ɑ𝗇 ꬲ𝗑𝒂m𝛠Ⲓ𝚎
𝔗һ𑣃ƽ ˛ꜱ 𝛼𝐧 𝐞𝖝𝛼m𝜌𝟏ℯ
Th𝓲𝔰 ⅈ𝔰 αn ꬲ⤬αm⍴𞸀e
𝗧𝗵i𝑠 i𝖘 ⍺𝘯 𝗲𝔁аm𝘱𝙸𝔢
⊤𝚑𝑖s ɪ𝚜 𝜶𝑛 𝖾𝘅𝒶m𝛒𝑙𝓮
𝘛h𝙞ꮪ ⅈ𝗌 𝗮𝐧 ꬲᕽ𝓪m𝜌⏽𝓮
𝙏𝕙і𝓈 ıꜱ 𝔞𝕟 𝗲𝕩𝛂mр𐌉𝚎
𝕿Ꮒℹ𝐬 𝗶𝗌 𝛼𝔫 𝗲𝐱𝓪m𝞎𝙡𝖊
⟙h𝜾ꮪ i𝘴 𝝰𝒏 𝙚ᕽ𝗮m𝗽𝗜𝗲
𝖳հ𝒊s 𝕚𝙨 𝖆𝑛 𝘦𝔁аm𝜌𝐈𝗲
>>> versions = scr.generate(text, 1000, level=1)
>>> assert len(versions) == len(set(versions))
>>> # all unique
"""
def __init__(
self,
confusables_file=os.path.join(
__location__, "txt_files", "confusablesSummary.txt"
),
):
# The confusables can be found at:
# https://www.unicode.org/Public/security/13.0.0/confusables.txt
self.confusables_file = confusables_file
self.invisible_chars = invisible_chars
self.dict_latin = dict_latin
self._parse_unicode_file()
def __str__(self):
return self.scramble("<__main__.Scrambler object>", level=4)
__repr__ = __str__
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, exc_traceback):
if exc_type:
print(f"exc_type: {exc_type}", file=sys.stderr)
print(f"exc_value: {exc_value}", file=sys.stderr)
print(f"exc_traceback: {exc_traceback}", file=sys.stderr)
def _parse_unicode_file(self) -> dict:
"""return a dict of the unicode confusable given
the self.confusables_file"""
self.unicode_dict = {}
file = open(self.confusables_file, encoding="utf-8")
ls_lines_confusable = []
for _ in range(32):
file.readline()
for line in file:
if line.startswith("#"):
ls_lines_confusable.append(line[:-1]) # not taking the \n
file.close()
ls_lines_confusable = ls_lines_confusable[
:-1
] # not taking the last line (total)
for line in ls_lines_confusable:
_, char, *ls_chars = line.split("\t")
if len(char) > 1:
continue
self.unicode_dict[char] = ls_chars
def scramble(self, text: str, level: int = 1) -> str:
"""return the text scrambled
:param text: the text to scramble
:type text: str
:param level: default to 1
:type level: int, optional
**level**:
1: insert non printable characters within the text
2: replace some latin letters to their Greek or Cyrillic equivalent
3: insert non printable characters and change the some latin letters to their Greek or Cyrillic equivalent
4: insert non printable chraracters change all possible letter to a randomly picked unicode letter equivalent
:return: the scrambled string
:rtype: str
"""
if level not in range(1, 5):
raise ValueError(f"level {level} not implemented")
new_text = ""
if level == 1:
for char in text:
new_text += char + choice(self.invisible_chars)
elif level == 2:
for char in text:
new_text += choice(self.dict_latin.get(char, []) + [char])
new_text += " "
elif level == 3:
for char in text:
new_text += choice(self.dict_latin.get(char, []) + [char]) + choice(
self.invisible_chars
)
elif level == 4:
for char in text:
new_text += choice(self.unicode_dict.get(char, []) + [char]) + choice(
self.invisible_chars
)
else:
raise ValueError(f"level '{level}' not implemented")
return new_text[:-1]
def generate(self, text: str, n: int = 1000, level: int = 3) -> List[str]:
"""return a list containing n versions of the text jammed
:param text: the text to be scrambled
:type text: str
:param n: the number of time the text should be scrambled, defaults to 1000
:type n: int, optional
:param level: the level of the scrambling, defaults to 3
:type level: int, optional
:return: a list of scrambled texts, all differents
:rtype: List[str]
.. code:: python
>>> from text_scrambler import Scrambler
>>> scr = Scrambler()
>>> text = "A cranial nerve nucleus is a collection of neurons in the brain stem that is associated with one or more of the cranial nerves."
>>> texts = scr.generate(text, 1000, level=1)
>>> assert texts[0] != text
>>> for scrambled_text in texts:
... assert text != scrambled_text
...
>>> print(texts[0])
A cranial nerve nucleus is a collection of neurons in the brain stem that is associated with one or more of the cranial nerves.
>>> # different from the original text
"""
ls_new_text = []
num_generated = 0
while True:
new_text = self.scramble(text, level=level)
if new_text not in ls_new_text:
ls_new_text.append(new_text)
num_generated += 1
if num_generated == n:
break
return ls_new_text
|
the-stack_0_23778 | from ops.framework import Object, StoredState
class KeepalivedPeers(Object):
state = StoredState()
def __init__(self, charm, relation_name):
super().__init__(charm, relation_name)
self._relation_name = relation_name
self._relation = self.framework.model.get_relation(self._relation_name)
self.framework.observe(charm.on.keepalived_initialized, self)
@property
def is_joined(self):
return self._relation is not None
def on_keepalived_initialized(self, event):
if not self.framework.model.unit.is_leader():
return
# A workaround for LP: #1859769.
if not self.is_joined:
event.defer()
return
self._relation.data[self.model.app]['initial_unit'] = self.framework.model.unit.name
@property
def initial_unit(self):
"""Return the unit that is supposed to have an initial MASTER state."""
if self.is_joined:
return self._relation.data[self.model.app].get('initial_unit')
else:
return None
|
the-stack_0_23779 | FILTER_WORDS = [
"who",
"http",
"twitch",
"onlyfans",
"instagram",
"snapchat",
"website",
"profile",
"ethereum",
"commentmessage"
"snap",
"youtube",
]
def is_valid_comment(raw_text):
""" Makes sure the comment is not spam, is not just asking for who it is, does not contain link, and is somewhat long """
comment_text = raw_text.lower()
if len(comment_text) <= 14:
return False
for word in FILTER_WORDS:
if comment_text.find(word) != -1:
return False
return True
|
the-stack_0_23781 | """ Module for testing postgres update"""
import os
import psycopg2
class DBConnector:
"""Establishes connection to local postgres"""
def __init__(self):
""" init method that establishes connection
- *dbname*: the database name
- *user*: user name used to authenticate
- *password*: password used to authenticate
- *host*: database host address (defaults to UNIX socket if not provided)
- *port*: connection port number (defaults to 5432 if not provided)
"""
self.host = 'localhost'
self.user = os.environ['user']
def connect(self):
""" Connects to somedb in local postgres"""
connection = psycopg2.connect(
host=self.host,
user=self.user,
password='',
database='somedb'
)
return connection
def get_all_changed_rows(self):
""" Get all"""
sql = "Select user_id from users where email like '%test_changed%'"
connection = self.connect()
with connection:
with connection.cursor() as curs:
curs.execute(sql)
return [r[0] for r in curs.fetchall()]
def bulk_insert(self):
""" bulk inserts million records in no time """
connection = self.connect()
with connection:
with connection.cursor() as curs:
with open('users.csv', 'r') as f:
next(f)
curs.copy_from(f, 'users', sep=',')
def update(self, row):
""" updates and test concurrency """
sql = f"""Update users set email='test_changed_{row}' where username='test_{row}'"""
print(sql)
connection = self.connect()
with connection:
with connection.cursor() as curs:
curs.execute(sql)
|
the-stack_0_23782 | class ListNode:
def __init__(self, key, val):
self.key = key
self.val = val
self.next = None
class MyHashMap:
def __init__(self):
"""
Initialize your data structure here.
"""
self.mod = 100
self.data = [None] * self.mod
def put(self, key: int, value: int) -> None:
"""
value will always be non-negative.
"""
index = key % self.mod
node = ListNode(key, value)
node.next = self.data[index]
self.data[index] = node
def get(self, key: int) -> int:
"""
Returns the value to which the specified key is mapped, or -1 if this map contains no mapping for the key
"""
index = key % self.mod
node = self.data[index]
while node is not None:
if node.key == key:
return node.val
node = node.next
return -1
def remove(self, key: int) -> None:
"""
Removes the mapping of the specified value key if this map contains a mapping for the key
"""
index = key % self.mod
head = node = ListNode(None,None)
node.next = self.data[index]
while node.next is not None:
if node.next.key == key:
node.next = node.next.next
else:
node = node.next
self.data[index] = head.next
# Your MyHashMap object will be instantiated and called as such:
obj = MyHashMap()
obj.put(1, 2)
param_2 = obj.get(1)
print(param_2)
obj.remove(1)
param_2 = obj.get(1)
print(param_2)
|
the-stack_0_23784 | # Welcome to the gCloud Datastore Demo! (hit enter)
# We're going to walk through some of the basics...
# Don't worry though. You don't need to do anything, just keep hitting enter...
# Copyright 2014 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Let's start by importing the demo module and initializing our connection.
from gcloud import datastore
from gcloud.datastore import demo
demo.initialize()
# Let's import the package containing our helper classes:
# Let's create a new entity of type "Thing" and name it 'Toy':
key = datastore.Key('Thing')
toy = datastore.Entity(key)
toy.update({'name': 'Toy'})
# Now let's save it to our datastore:
datastore.put(toy)
# If we look it up by its key, we should find it...
print(datastore.get(toy.key))
# And we should be able to delete it...
datastore.delete(toy.key)
# Since we deleted it, if we do another lookup it shouldn't be there again:
print(datastore.get(toy.key))
# Now let's try a more advanced query.
# First, let's create some entities.
SAMPLE_DATA = [
(1234, 'Computer', 10),
(2345, 'Computer', 8),
(3456, 'Laptop', 10),
(4567, 'Printer', 11),
(5678, 'Printer', 12),
(6789, 'Computer', 13)]
sample_keys = []
for id, name, age in SAMPLE_DATA:
key = datastore.Key('Thing', id)
sample_keys.append(key)
entity = datastore.Entity(key)
entity['name'] = name
entity['age'] = age
datastore.put(entity)
# We'll start by look at all Thing entities:
query = datastore.Query(kind='Thing')
# Let's look at the first two.
print(list(query.fetch(limit=2)))
# Now let's check for Thing entities named 'Computer'
query.add_filter('name', '=', 'Computer')
print(list(query.fetch()))
# If you want to filter by multiple attributes,
# you can call .add_filter multiple times on the query.
query.add_filter('age', '=', 10)
print(list(query.fetch()))
# Now delete them.
datastore.delete_multi(sample_keys)
# You can also work inside a transaction.
# (Check the official docs for explanations of what's happening here.)
with datastore.Transaction() as xact:
print('Creating and saving an entity...')
key = datastore.Key('Thing', 'foo')
thing = datastore.Entity(key)
thing['age'] = 10
xact.put(thing)
print('Creating and saving another entity...')
key2 = datastore.Key('Thing', 'bar')
thing2 = datastore.Entity(key2)
thing2['age'] = 15
xact.put(thing2)
print('Committing the transaction...')
# Now that the transaction is commited, let's delete the entities.
datastore.delete_multi([key, key2])
# To rollback a transaction, just call .rollback()
with datastore.Transaction() as xact:
key = datastore.Key('Thing', 'another')
thing = datastore.Entity(key)
xact.put(thing)
xact.rollback()
# Let's check if the entity was actually created:
created = datastore.get(key)
print('yes' if created else 'no')
# Remember, a key won't be complete until the transaction is commited.
# That is, while inside the transaction block, thing.key will be incomplete.
with datastore.Transaction() as xact:
key = datastore.Key('Thing') # partial
thing = datastore.Entity(key)
xact.put(thing)
print(thing.key) # This will still be partial
print(thing.key) # This will be complete
# Now let's delete the entity.
datastore.delete(thing.key)
|
the-stack_0_23785 | import networkx as nx
__all__ = ["cytoscape_data", "cytoscape_graph"]
_attrs = dict(name="name", ident="id")
def cytoscape_data(G, attrs=None):
"""Returns data in Cytoscape JSON format (cyjs).
Parameters
----------
G : NetworkX Graph
Returns
-------
data: dict
A dictionary with cyjs formatted data.
Raises
------
NetworkXError
If values in attrs are not unique.
"""
if not attrs:
attrs = _attrs
else:
attrs.update({k: v for (k, v) in _attrs.items() if k not in attrs})
name = attrs["name"]
ident = attrs["ident"]
if len({name, ident}) < 2:
raise nx.NetworkXError("Attribute names are not unique.")
jsondata = {"data": list(G.graph.items())}
jsondata["directed"] = G.is_directed()
jsondata["multigraph"] = G.is_multigraph()
jsondata["elements"] = {"nodes": [], "edges": []}
nodes = jsondata["elements"]["nodes"]
edges = jsondata["elements"]["edges"]
for i, j in G.nodes.items():
n = {"data": j.copy()}
n["data"]["id"] = j.get(ident) or str(i)
n["data"]["value"] = i
n["data"]["name"] = j.get(name) or str(i)
nodes.append(n)
if G.is_multigraph():
for e in G.edges(keys=True):
n = {"data": G.adj[e[0]][e[1]][e[2]].copy()}
n["data"]["source"] = e[0]
n["data"]["target"] = e[1]
n["data"]["key"] = e[2]
edges.append(n)
else:
for e in G.edges():
n = {"data": G.adj[e[0]][e[1]].copy()}
n["data"]["source"] = e[0]
n["data"]["target"] = e[1]
edges.append(n)
return jsondata
def cytoscape_graph(data, attrs=None):
if not attrs:
attrs = _attrs
else:
attrs.update({k: v for (k, v) in _attrs.items() if k not in attrs})
name = attrs["name"]
ident = attrs["ident"]
if len({ident, name}) < 2:
raise nx.NetworkXError("Attribute names are not unique.")
multigraph = data.get("multigraph")
directed = data.get("directed")
if multigraph:
graph = nx.MultiGraph()
else:
graph = nx.Graph()
if directed:
graph = graph.to_directed()
graph.graph = dict(data.get("data"))
for d in data["elements"]["nodes"]:
node_data = d["data"].copy()
node = d["data"]["value"]
if d["data"].get(name):
node_data[name] = d["data"].get(name)
if d["data"].get(ident):
node_data[ident] = d["data"].get(ident)
graph.add_node(node)
graph.nodes[node].update(node_data)
for d in data["elements"]["edges"]:
edge_data = d["data"].copy()
sour = d["data"].pop("source")
targ = d["data"].pop("target")
if multigraph:
key = d["data"].get("key", 0)
graph.add_edge(sour, targ, key=key)
graph.edges[sour, targ, key].update(edge_data)
else:
graph.add_edge(sour, targ)
graph.edges[sour, targ].update(edge_data)
return graph
|
the-stack_0_23787 | # Copyright (c) 2012 OpenStack Foundation
# All Rights Reserved.
# Copyright 2013 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Functionality related to notifications common to multiple layers of
the system.
"""
import datetime
from keystoneauth1 import exceptions as ks_exc
from oslo_log import log
from oslo_utils import excutils
from oslo_utils import timeutils
import nova.conf
import nova.context
from nova import exception
from nova import image as image_api
from nova import network
from nova.network import model as network_model
from nova.notifications.objects import base as notification_base
from nova.notifications.objects import instance as instance_notification
from nova import objects
from nova.objects import base as obj_base
from nova.objects import fields
from nova import rpc
from nova import utils
LOG = log.getLogger(__name__)
CONF = nova.conf.CONF
def send_update(context, old_instance, new_instance, service="compute",
host=None):
"""Send compute.instance.update notification to report any changes occurred
in that instance
"""
if not CONF.notifications.notify_on_state_change:
# skip all this if updates are disabled
return
update_with_state_change = False
old_vm_state = old_instance["vm_state"]
new_vm_state = new_instance["vm_state"]
old_task_state = old_instance["task_state"]
new_task_state = new_instance["task_state"]
# we should check if we need to send a state change or a regular
# notification
if old_vm_state != new_vm_state:
# yes, the vm state is changing:
update_with_state_change = True
elif (CONF.notifications.notify_on_state_change == "vm_and_task_state" and
old_task_state != new_task_state):
# yes, the task state is changing:
update_with_state_change = True
if update_with_state_change:
# send a notification with state changes
# value of verify_states need not be True as the check for states is
# already done here
send_update_with_states(context, new_instance, old_vm_state,
new_vm_state, old_task_state, new_task_state, service, host)
else:
try:
old_display_name = None
if new_instance["display_name"] != old_instance["display_name"]:
old_display_name = old_instance["display_name"]
send_instance_update_notification(context, new_instance,
service=service, host=host,
old_display_name=old_display_name)
except exception.InstanceNotFound:
LOG.debug('Failed to send instance update notification. The '
'instance could not be found and was most likely '
'deleted.', instance=new_instance)
except Exception:
LOG.exception("Failed to send state update notification",
instance=new_instance)
def send_update_with_states(context, instance, old_vm_state, new_vm_state,
old_task_state, new_task_state, service="compute", host=None,
verify_states=False):
"""Send compute.instance.update notification to report changes if there
are any, in the instance
"""
if not CONF.notifications.notify_on_state_change:
# skip all this if updates are disabled
return
fire_update = True
# send update notification by default
if verify_states:
# check whether we need to send notification related to state changes
fire_update = False
# do not send notification if the conditions for vm and(or) task state
# are not satisfied
if old_vm_state != new_vm_state:
# yes, the vm state is changing:
fire_update = True
elif (CONF.notifications.notify_on_state_change == "vm_and_task_state"
and old_task_state != new_task_state):
# yes, the task state is changing:
fire_update = True
if fire_update:
# send either a state change or a regular notification
try:
send_instance_update_notification(context, instance,
old_vm_state=old_vm_state, old_task_state=old_task_state,
new_vm_state=new_vm_state, new_task_state=new_task_state,
service=service, host=host)
except exception.InstanceNotFound:
LOG.debug('Failed to send instance update notification. The '
'instance could not be found and was most likely '
'deleted.', instance=instance)
except Exception:
LOG.exception("Failed to send state update notification",
instance=instance)
def _compute_states_payload(instance, old_vm_state=None,
old_task_state=None, new_vm_state=None, new_task_state=None):
# If the states were not specified we assume the current instance
# states are the correct information. This is important to do for
# both old and new states because otherwise we create some really
# confusing notifications like:
#
# None(None) => Building(none)
#
# When we really were just continuing to build
if new_vm_state is None:
new_vm_state = instance["vm_state"]
if new_task_state is None:
new_task_state = instance["task_state"]
if old_vm_state is None:
old_vm_state = instance["vm_state"]
if old_task_state is None:
old_task_state = instance["task_state"]
states_payload = {
"old_state": old_vm_state,
"state": new_vm_state,
"old_task_state": old_task_state,
"new_task_state": new_task_state,
}
return states_payload
def send_instance_update_notification(context, instance, old_vm_state=None,
old_task_state=None, new_vm_state=None, new_task_state=None,
service="compute", host=None, old_display_name=None):
"""Send 'compute.instance.update' notification to inform observers
about instance state changes.
"""
# NOTE(gibi): The image_ref_url is only used in unversioned notifications.
# Calling the generate_image_url() could be costly as it calls
# the Keystone API. So only do the call if the actual value will be
# used.
populate_image_ref_url = (CONF.notifications.notification_format in
('both', 'unversioned'))
payload = info_from_instance(context, instance, None,
populate_image_ref_url=populate_image_ref_url)
# determine how we'll report states
payload.update(
_compute_states_payload(
instance, old_vm_state, old_task_state,
new_vm_state, new_task_state))
# add audit fields:
(audit_start, audit_end) = audit_period_bounds(current_period=True)
payload["audit_period_beginning"] = null_safe_isotime(audit_start)
payload["audit_period_ending"] = null_safe_isotime(audit_end)
# add bw usage info:
bw = bandwidth_usage(context, instance, audit_start)
payload["bandwidth"] = bw
# add old display name if it is changed
if old_display_name:
payload["old_display_name"] = old_display_name
rpc.get_notifier(service, host).info(context,
'compute.instance.update', payload)
_send_versioned_instance_update(context, instance, payload, host, service)
@rpc.if_notifications_enabled
def _send_versioned_instance_update(context, instance, payload, host, service):
def _map_legacy_service_to_source(legacy_service):
if not legacy_service.startswith('nova-'):
return 'nova-' + service
else:
return service
state_update = instance_notification.InstanceStateUpdatePayload(
old_state=payload.get('old_state'),
state=payload.get('state'),
old_task_state=payload.get('old_task_state'),
new_task_state=payload.get('new_task_state'))
audit_period = instance_notification.AuditPeriodPayload(
audit_period_beginning=payload.get('audit_period_beginning'),
audit_period_ending=payload.get('audit_period_ending'))
bandwidth = [instance_notification.BandwidthPayload(
network_name=label,
in_bytes=bw['bw_in'],
out_bytes=bw['bw_out'])
for label, bw in payload['bandwidth'].items()]
versioned_payload = instance_notification.InstanceUpdatePayload(
context=context,
instance=instance,
state_update=state_update,
audit_period=audit_period,
bandwidth=bandwidth,
old_display_name=payload.get('old_display_name'))
notification = instance_notification.InstanceUpdateNotification(
priority=fields.NotificationPriority.INFO,
event_type=notification_base.EventType(
object='instance',
action=fields.NotificationAction.UPDATE),
publisher=notification_base.NotificationPublisher(
host=host or CONF.host,
source=_map_legacy_service_to_source(service)),
payload=versioned_payload)
notification.emit(context)
def audit_period_bounds(current_period=False):
"""Get the start and end of the relevant audit usage period
:param current_period: if True, this will generate a usage for the
current usage period; if False, this will generate a usage for the
previous audit period.
"""
begin, end = utils.last_completed_audit_period()
if current_period:
audit_start = end
audit_end = timeutils.utcnow()
else:
audit_start = begin
audit_end = end
return (audit_start, audit_end)
def bandwidth_usage(context, instance_ref, audit_start,
ignore_missing_network_data=True):
"""Get bandwidth usage information for the instance for the
specified audit period.
"""
admin_context = context.elevated(read_deleted='yes')
def _get_nwinfo_old_skool():
"""Support for getting network info without objects."""
if (instance_ref.get('info_cache') and
instance_ref['info_cache'].get('network_info') is not None):
cached_info = instance_ref['info_cache']['network_info']
if isinstance(cached_info, network_model.NetworkInfo):
return cached_info
return network_model.NetworkInfo.hydrate(cached_info)
try:
return network.API().get_instance_nw_info(admin_context,
instance_ref)
except Exception:
try:
with excutils.save_and_reraise_exception():
LOG.exception('Failed to get nw_info',
instance=instance_ref)
except Exception:
if ignore_missing_network_data:
return
raise
# FIXME(comstud): Temporary as we transition to objects.
if isinstance(instance_ref, obj_base.NovaObject):
nw_info = instance_ref.info_cache.network_info
if nw_info is None:
nw_info = network_model.NetworkInfo()
else:
nw_info = _get_nwinfo_old_skool()
macs = [vif['address'] for vif in nw_info]
uuids = [instance_ref["uuid"]]
bw_usages = objects.BandwidthUsageList.get_by_uuids(admin_context, uuids,
audit_start)
bw = {}
for b in bw_usages:
if b.mac in macs:
label = 'net-name-not-found-%s' % b.mac
for vif in nw_info:
if vif['address'] == b.mac:
label = vif['network']['label']
break
bw[label] = dict(bw_in=b.bw_in, bw_out=b.bw_out)
return bw
def image_meta(system_metadata):
"""Format image metadata for use in notifications from the instance
system metadata.
"""
image_meta = {}
for md_key, md_value in system_metadata.items():
if md_key.startswith('image_'):
image_meta[md_key[6:]] = md_value
return image_meta
def null_safe_str(s):
return str(s) if s else ''
def null_safe_isotime(s):
if isinstance(s, datetime.datetime):
return utils.strtime(s)
else:
return str(s) if s else ''
def info_from_instance(context, instance, network_info,
populate_image_ref_url=False, **kw):
"""Get detailed instance information for an instance which is common to all
notifications.
:param:instance: nova.objects.Instance
:param:network_info: network_info provided if not None
:param:populate_image_ref_url: If True then the full URL of the image of
the instance is generated and returned.
This, depending on the configuration, might
mean a call to Keystone. If false, None
value is returned in the dict at the
image_ref_url key.
"""
image_ref_url = None
if populate_image_ref_url:
try:
# NOTE(mriedem): We can eventually drop this when we no longer
# support legacy notifications since versioned notifications don't
# use this.
image_ref_url = image_api.API().generate_image_url(
instance.image_ref, context)
except ks_exc.EndpointNotFound:
# We might be running from a periodic task with no auth token and
# CONF.glance.api_servers isn't set, so we can't get the image API
# endpoint URL from the service catalog, therefore just use the
# image id for the URL (yes it's a lie, but it's best effort at
# this point).
with excutils.save_and_reraise_exception() as exc_ctx:
if context.auth_token is None:
image_ref_url = instance.image_ref
exc_ctx.reraise = False
instance_type = instance.get_flavor()
instance_type_name = instance_type.get('name', '')
instance_flavorid = instance_type.get('flavorid', '')
instance_info = dict(
# Owner properties
tenant_id=instance.project_id,
user_id=instance.user_id,
# Identity properties
instance_id=instance.uuid,
display_name=instance.display_name,
reservation_id=instance.reservation_id,
hostname=instance.hostname,
# Type properties
instance_type=instance_type_name,
instance_type_id=instance.instance_type_id,
instance_flavor_id=instance_flavorid,
architecture=instance.architecture,
# Capacity properties
memory_mb=instance.flavor.memory_mb,
disk_gb=instance.flavor.root_gb + instance.flavor.ephemeral_gb,
vcpus=instance.flavor.vcpus,
# Note(dhellmann): This makes the disk_gb value redundant, but
# we are keeping it for backwards-compatibility with existing
# users of notifications.
root_gb=instance.flavor.root_gb,
ephemeral_gb=instance.flavor.ephemeral_gb,
# Location properties
host=instance.host,
node=instance.node,
availability_zone=instance.availability_zone,
cell_name=null_safe_str(instance.cell_name),
# Date properties
created_at=str(instance.created_at),
# Terminated and Deleted are slightly different (although being
# terminated and not deleted is a transient state), so include
# both and let the recipient decide which they want to use.
terminated_at=null_safe_isotime(instance.get('terminated_at', None)),
deleted_at=null_safe_isotime(instance.get('deleted_at', None)),
launched_at=null_safe_isotime(instance.get('launched_at', None)),
# Image properties
image_ref_url=image_ref_url,
os_type=instance.os_type,
kernel_id=instance.kernel_id,
ramdisk_id=instance.ramdisk_id,
# Status properties
state=instance.vm_state,
state_description=null_safe_str(instance.task_state),
# NOTE(gibi): It might seems wrong to default the progress to an empty
# string but this is how legacy work and this code only used by the
# legacy notification so try to keep the compatibility here but also
# keep it contained.
progress=int(instance.progress) if instance.progress else '',
# accessIPs
access_ip_v4=instance.access_ip_v4,
access_ip_v6=instance.access_ip_v6,
)
if network_info is not None:
fixed_ips = []
for vif in network_info:
for ip in vif.fixed_ips():
ip["label"] = vif["network"]["label"]
ip["vif_mac"] = vif["address"]
fixed_ips.append(ip)
instance_info['fixed_ips'] = fixed_ips
# add image metadata
image_meta_props = image_meta(instance.system_metadata)
instance_info["image_meta"] = image_meta_props
# add instance metadata
instance_info['metadata'] = instance.metadata
instance_info.update(kw)
return instance_info
|
the-stack_0_23789 | import numpy as np
from yaglm.utils import count_support
def est_dof_support(coef, intercept=None, transform=None, zero_tol=1e-6):
"""
The size of the support of the estimated coefficient (or a transform therof) is sometimes a reasonable estimate for the degrees of freedom e.g. in Lasso, SCAD and some generalized Lasso problems.
Parameters
----------
coef: array-like
The estimated coefficient.
intercept: None, float, array-like
(Optional) The estimated coefficeint.
transform: None, callable(coef) -> array-like
(Optional) The transformation applied to the coefficient e.g. for the generalized Lasso.
zero_tol: float
Tolerance for declaring a small value equal to zero. This addresses numerical issues where some solvers may not return exact zeros.
Output
------
dof: int
The estimaed number of degrees of freedom. The DoF of the coefficeint is given by either ||coef||_0 or ||transform(coef)||_0
References
----------
Zou, H., Hastie, T. and Tibshirani, R., 2007. On the “degrees of freedom” of the lasso. The Annals of Statistics, 35(5), pp.2173-2192.
Park, M.Y. and Hastie, T., 2007. L1‐regularization path algorithm for generalized linear models. Journal of the Royal Statistical Society: Series B (Statistical Methodology), 69(4), pp.659-677.
Zhang, Y., Li, R. and Tsai, C.L., 2010. Regularization parameter selections via generalized information criterion. Journal of the American Statistical Association, 105(489), pp.312-323.
"""
# count support of estimated coefficient
coef = np.array(coef)
if transform is None:
n_nonzero_coef = count_support(coef, zero_tol=zero_tol)
else:
n_nonzero_coef = count_support(transform(coef), zero_tol=zero_tol)
# maybe add intercept
if intercept is not None:
n_vals_intercept = np.array(intercept).size
else:
n_vals_intercept = 0
DoF = n_nonzero_coef + n_vals_intercept
return DoF
def est_dof_enet(coef, pen_val, mix_val, X, intercept=None, zero_tol=1e-6):
"""
The size of the support of the estimated coefficient for elastic net at a particular
penalty and mixing value.
ElasticNet penalty:
pen_val * mix_val ||coef||_1 + pen_val * (1 - mix_val) * ||coef||_2^2
Parameters
----------
coef: array-like
The estimated coefficient.
pen_val: float,
current penalty value in the elastic net penalty
mix_val: float,
current mixing value in the elastic net penalty
X: (n,d)-array,
design matrix excluding the intercept column
intercept: None, float, array-like
(Optional) The estimated coefficeint.
zero_tol: float
Tolerance for declaring a small value equal to zero. This addresses numerical issues where some solvers may not return exact zeros.
Output
------
DoF: int
The estimaed number of degrees of freedom. The DoF of the coefficeint is given by either ||coef||_0 or ||transform(coef)||_0
References
----------
Zou, H., Hastie, T. and Tibshirani, R., 2007. On the “degrees of freedom” of the lasso. The Annals of Statistics, 35(5), pp.2173-2192.
"""
# Get the estimated support from the fitted coefficient
if intercept is not None:
coef = np.concatenate([[intercept], coef])
support = (abs(coef) > zero_tol)
# tuning parameter attached to the ridge penalty
lambda_2 = pen_val * (1 - mix_val)
# Get the columns of the design matrix that correspond to the non-zero coef
if intercept is not None:
ones = np.ones((X.shape[0], 1))
X = np.concatenate([ones, X], axis = 1)
X_A = X[:, support].copy()
xtx_li_inv = np.linalg.inv(X_A.T @ X_A + lambda_2 * np.identity(X_A.shape[1]))
DoF = np.trace(X_A @ xtx_li_inv @ X_A.T)
return(DoF)
|
the-stack_0_23790 | #!/usr/bin/env python
import re
import numpy as np
if __name__ == "__main__":
with open("input") as fh:
data = [line.rstrip() for line in fh.readlines()]
grid = np.zeros(shape=(1000, 1000))
pattern = re.compile(r"\#(\S+) \@ (\d+),(\d+): (\d+)x(\d+)")
for line in data:
match = pattern.match(line)
idx, x, y, width, height = match.groups()
x = int(x)
y = int(y)
width = int(width)
height = int(height)
grid[x:x+width, y:y+height] += 1
overlaps = np.where(grid > 1)
print(len(overlaps[0]), len(overlaps[1]))
|
the-stack_0_23791 | """
Distributed Prioritized Experience Replay (Ape-X)
=================================================
This file defines a DQN trainer using the Ape-X architecture.
Ape-X uses a single GPU learner and many CPU workers for experience collection.
Experience collection can scale to hundreds of CPU workers due to the
distributed prioritization of experience prior to storage in replay buffers.
Detailed documentation:
https://docs.ray.io/en/master/rllib-algorithms.html#distributed-prioritized-experience-replay-ape-x
""" # noqa: E501
import collections
import copy
import platform
from typing import Tuple
import ray
from ray.actor import ActorHandle
from ray.rllib.agents.dqn.dqn import (
calculate_rr_weights,
DEFAULT_CONFIG as DQN_DEFAULT_CONFIG,
DQNTrainer,
)
from ray.rllib.agents.dqn.learner_thread import LearnerThread
from ray.rllib.evaluation.worker_set import WorkerSet
from ray.rllib.execution.common import (
STEPS_TRAINED_COUNTER,
STEPS_TRAINED_THIS_ITER_COUNTER,
_get_global_vars,
_get_shared_metrics,
)
from ray.rllib.execution.concurrency_ops import Concurrently, Dequeue, Enqueue
from ray.rllib.execution.metric_ops import StandardMetricsReporting
from ray.rllib.execution.buffers.multi_agent_replay_buffer import ReplayActor
from ray.rllib.execution.replay_ops import Replay, StoreToReplayBuffer
from ray.rllib.execution.rollout_ops import ParallelRollouts
from ray.rllib.execution.train_ops import UpdateTargetNetwork
from ray.rllib.utils import merge_dicts
from ray.rllib.utils.actors import create_colocated_actors
from ray.rllib.utils.annotations import override
from ray.rllib.utils.metrics.learner_info import LEARNER_INFO
from ray.rllib.utils.typing import SampleBatchType, TrainerConfigDict
from ray.tune.trainable import Trainable
from ray.tune.utils.placement_groups import PlacementGroupFactory
from ray.util.iter import LocalIterator
# fmt: off
# __sphinx_doc_begin__
APEX_DEFAULT_CONFIG = merge_dicts(
# See also the options in dqn.py, which are also supported.
DQN_DEFAULT_CONFIG,
{
"optimizer": merge_dicts(
DQN_DEFAULT_CONFIG["optimizer"], {
"max_weight_sync_delay": 400,
"num_replay_buffer_shards": 4,
"debug": False
}),
"n_step": 3,
"num_gpus": 1,
"num_workers": 32,
# TODO(jungong) : add proper replay_buffer_config after
# DistributedReplayBuffer type is supported.
"replay_buffer_config": {
# For now we don't use the new ReplayBuffer API here
"_enable_replay_buffer_api": False,
"no_local_replay_buffer": True,
"type": "MultiAgentReplayBuffer",
"capacity": 2000000,
"replay_batch_size": 32,
"prioritized_replay_alpha": 0.6,
# Beta parameter for sampling from prioritized replay buffer.
"prioritized_replay_beta": 0.4,
# Epsilon to add to the TD errors when updating priorities.
"prioritized_replay_eps": 1e-6,
},
# Whether all shards of the replay buffer must be co-located
# with the learner process (running the execution plan).
# This is preferred b/c the learner process should have quick
# access to the data from the buffer shards, avoiding network
# traffic each time samples from the buffer(s) are drawn.
# Set this to False for relaxing this constraint and allowing
# replay shards to be created on node(s) other than the one
# on which the learner is located.
"replay_buffer_shards_colocated_with_driver": True,
"learning_starts": 50000,
"train_batch_size": 512,
"rollout_fragment_length": 50,
"target_network_update_freq": 500000,
"timesteps_per_iteration": 25000,
"exploration_config": {"type": "PerWorkerEpsilonGreedy"},
"worker_side_prioritization": True,
"min_time_s_per_reporting": 30,
# If set, this will fix the ratio of replayed from a buffer and learned
# on timesteps to sampled from an environment and stored in the replay
# buffer timesteps. Otherwise, replay will proceed as fast as possible.
"training_intensity": None,
# Experimental flag.
# If True, the execution plan API will not be used. Instead,
# a Trainer's `training_iteration` method will be called as-is each
# training iteration.
"_disable_execution_plan_api": False,
},
)
# __sphinx_doc_end__
# fmt: on
# Update worker weights as they finish generating experiences.
class UpdateWorkerWeights:
def __init__(
self,
learner_thread: LearnerThread,
workers: WorkerSet,
max_weight_sync_delay: int,
):
self.learner_thread = learner_thread
self.workers = workers
self.steps_since_update = collections.defaultdict(int)
self.max_weight_sync_delay = max_weight_sync_delay
self.weights = None
def __call__(self, item: Tuple[ActorHandle, SampleBatchType]):
actor, batch = item
self.steps_since_update[actor] += batch.count
if self.steps_since_update[actor] >= self.max_weight_sync_delay:
# Note that it's important to pull new weights once
# updated to avoid excessive correlation between actors.
if self.weights is None or self.learner_thread.weights_updated:
self.learner_thread.weights_updated = False
self.weights = ray.put(self.workers.local_worker().get_weights())
actor.set_weights.remote(self.weights, _get_global_vars())
# Also update global vars of the local worker.
self.workers.local_worker().set_global_vars(_get_global_vars())
self.steps_since_update[actor] = 0
# Update metrics.
metrics = _get_shared_metrics()
metrics.counters["num_weight_syncs"] += 1
class ApexTrainer(DQNTrainer):
@classmethod
@override(DQNTrainer)
def get_default_config(cls) -> TrainerConfigDict:
return APEX_DEFAULT_CONFIG
@override(DQNTrainer)
def validate_config(self, config):
if config["num_gpus"] > 1:
raise ValueError("`num_gpus` > 1 not yet supported for APEX-DQN!")
# Call DQN's validation method.
super().validate_config(config)
@staticmethod
@override(DQNTrainer)
def execution_plan(
workers: WorkerSet, config: dict, **kwargs
) -> LocalIterator[dict]:
assert (
len(kwargs) == 0
), "Apex execution_plan does NOT take any additional parameters"
# Create a number of replay buffer actors.
num_replay_buffer_shards = config["optimizer"]["num_replay_buffer_shards"]
replay_actor_args = [
num_replay_buffer_shards,
config["learning_starts"],
config["replay_buffer_config"]["capacity"],
config["train_batch_size"],
config["replay_buffer_config"]["prioritized_replay_alpha"],
config["replay_buffer_config"]["prioritized_replay_beta"],
config["replay_buffer_config"]["prioritized_replay_eps"],
config["multiagent"]["replay_mode"],
config["replay_buffer_config"].get("replay_sequence_length", 1),
]
# Place all replay buffer shards on the same node as the learner
# (driver process that runs this execution plan).
if config["replay_buffer_shards_colocated_with_driver"]:
replay_actors = create_colocated_actors(
actor_specs=[
# (class, args, kwargs={}, count)
(ReplayActor, replay_actor_args, {}, num_replay_buffer_shards)
],
node=platform.node(), # localhost
)[
0
] # [0]=only one item in `actor_specs`.
# Place replay buffer shards on any node(s).
else:
replay_actors = [
ReplayActor(*replay_actor_args) for _ in range(num_replay_buffer_shards)
]
# Start the learner thread.
learner_thread = LearnerThread(workers.local_worker())
learner_thread.start()
# Update experience priorities post learning.
def update_prio_and_stats(item: Tuple[ActorHandle, dict, int]) -> None:
actor, prio_dict, count = item
if config.get("prioritized_replay"):
actor.update_priorities.remote(prio_dict)
metrics = _get_shared_metrics()
# Manually update the steps trained counter since the learner
# thread is executing outside the pipeline.
metrics.counters[STEPS_TRAINED_THIS_ITER_COUNTER] = count
metrics.counters[STEPS_TRAINED_COUNTER] += count
metrics.timers["learner_dequeue"] = learner_thread.queue_timer
metrics.timers["learner_grad"] = learner_thread.grad_timer
metrics.timers["learner_overall"] = learner_thread.overall_timer
# We execute the following steps concurrently:
# (1) Generate rollouts and store them in one of our replay buffer
# actors. Update the weights of the worker that generated the batch.
rollouts = ParallelRollouts(workers, mode="async", num_async=2)
store_op = rollouts.for_each(StoreToReplayBuffer(actors=replay_actors))
# Only need to update workers if there are remote workers.
if workers.remote_workers():
store_op = store_op.zip_with_source_actor().for_each(
UpdateWorkerWeights(
learner_thread,
workers,
max_weight_sync_delay=(
config["optimizer"]["max_weight_sync_delay"]
),
)
)
# (2) Read experiences from one of the replay buffer actors and send
# to the learner thread via its in-queue.
post_fn = config.get("before_learn_on_batch") or (lambda b, *a: b)
replay_op = (
Replay(actors=replay_actors, num_async=4)
.for_each(lambda x: post_fn(x, workers, config))
.zip_with_source_actor()
.for_each(Enqueue(learner_thread.inqueue))
)
# (3) Get priorities back from learner thread and apply them to the
# replay buffer actors.
update_op = (
Dequeue(learner_thread.outqueue, check=learner_thread.is_alive)
.for_each(update_prio_and_stats)
.for_each(
UpdateTargetNetwork(
workers, config["target_network_update_freq"], by_steps_trained=True
)
)
)
if config["training_intensity"]:
# Execute (1), (2) with a fixed intensity ratio.
rr_weights = calculate_rr_weights(config) + ["*"]
merged_op = Concurrently(
[store_op, replay_op, update_op],
mode="round_robin",
output_indexes=[2],
round_robin_weights=rr_weights,
)
else:
# Execute (1), (2), (3) asynchronously as fast as possible. Only
# output items from (3) since metrics aren't available before
# then.
merged_op = Concurrently(
[store_op, replay_op, update_op], mode="async", output_indexes=[2]
)
# Add in extra replay and learner metrics to the training result.
def add_apex_metrics(result: dict) -> dict:
replay_stats = ray.get(
replay_actors[0].stats.remote(config["optimizer"].get("debug"))
)
exploration_infos = workers.foreach_policy_to_train(
lambda p, _: p.get_exploration_state()
)
result["info"].update(
{
"exploration_infos": exploration_infos,
"learner_queue": learner_thread.learner_queue_size.stats(),
LEARNER_INFO: copy.deepcopy(learner_thread.learner_info),
"replay_shard_0": replay_stats,
}
)
return result
# Only report metrics from the workers with the lowest 1/3 of
# epsilons.
selected_workers = workers.remote_workers()[
-len(workers.remote_workers()) // 3 :
]
return StandardMetricsReporting(
merged_op, workers, config, selected_workers=selected_workers
).for_each(add_apex_metrics)
@classmethod
@override(Trainable)
def default_resource_request(cls, config):
cf = dict(cls.get_default_config(), **config)
eval_config = cf["evaluation_config"]
# Return PlacementGroupFactory containing all needed resources
# (already properly defined as device bundles).
return PlacementGroupFactory(
bundles=[
{
# Local worker + replay buffer actors.
# Force replay buffers to be on same node to maximize
# data bandwidth between buffers and the learner (driver).
# Replay buffer actors each contain one shard of the total
# replay buffer and use 1 CPU each.
"CPU": cf["num_cpus_for_driver"]
+ cf["optimizer"]["num_replay_buffer_shards"],
"GPU": 0 if cf["_fake_gpus"] else cf["num_gpus"],
}
]
+ [
{
# RolloutWorkers.
"CPU": cf["num_cpus_per_worker"],
"GPU": cf["num_gpus_per_worker"],
}
for _ in range(cf["num_workers"])
]
+ (
[
{
# Evaluation workers.
# Note: The local eval worker is located on the driver
# CPU.
"CPU": eval_config.get(
"num_cpus_per_worker", cf["num_cpus_per_worker"]
),
"GPU": eval_config.get(
"num_gpus_per_worker", cf["num_gpus_per_worker"]
),
}
for _ in range(cf["evaluation_num_workers"])
]
if cf["evaluation_interval"]
else []
),
strategy=config.get("placement_strategy", "PACK"),
)
|
the-stack_0_23797 | # qubit number=3
# total number=59
import numpy as np
from qiskit import QuantumCircuit, execute, Aer, QuantumRegister, ClassicalRegister, transpile, BasicAer, IBMQ
from qiskit.visualization import plot_histogram
from typing import *
from pprint import pprint
from math import log2
from collections import Counter
from qiskit.test.mock import FakeVigo, FakeYorktown
kernel = 'circuit/bernstein'
def bitwise_xor(s: str, t: str) -> str:
length = len(s)
res = []
for i in range(length):
res.append(str(int(s[i]) ^ int(t[i])))
return ''.join(res[::-1])
def bitwise_dot(s: str, t: str) -> str:
length = len(s)
res = 0
for i in range(length):
res += int(s[i]) * int(t[i])
return str(res % 2)
def build_oracle(n: int, f: Callable[[str], str]) -> QuantumCircuit:
# implement the oracle O_f
# NOTE: use multi_control_toffoli_gate ('noancilla' mode)
# https://qiskit.org/documentation/_modules/qiskit/aqua/circuits/gates/multi_control_toffoli_gate.html
# https://quantumcomputing.stackexchange.com/questions/3943/how-do-you-implement-the-toffoli-gate-using-only-single-qubit-and-cnot-gates
# https://quantumcomputing.stackexchange.com/questions/2177/how-can-i-implement-an-n-bit-toffoli-gate
controls = QuantumRegister(n, "ofc")
target = QuantumRegister(1, "oft")
oracle = QuantumCircuit(controls, target, name="Of")
for i in range(2 ** n):
rep = np.binary_repr(i, n)
if f(rep) == "1":
for j in range(n):
if rep[j] == "0":
oracle.x(controls[j])
oracle.mct(controls, target[0], None, mode='noancilla')
for j in range(n):
if rep[j] == "0":
oracle.x(controls[j])
# oracle.barrier()
# oracle.draw('mpl', filename=(kernel + '-oracle.png'))
return oracle
def build_circuit(n: int, f: Callable[[str], str]) -> QuantumCircuit:
# implement the Bernstein-Vazirani circuit
zero = np.binary_repr(0, n)
b = f(zero)
# initial n + 1 bits
input_qubit = QuantumRegister(n+1, "qc")
classicals = ClassicalRegister(n, "qm")
prog = QuantumCircuit(input_qubit, classicals)
# inverse last one (can be omitted if using O_f^\pm)
prog.x(input_qubit[n])
# circuit begin
prog.h(input_qubit[1]) # number=1
prog.rx(-0.09738937226128368,input_qubit[2]) # number=2
prog.h(input_qubit[1]) # number=33
prog.cz(input_qubit[2],input_qubit[1]) # number=34
prog.h(input_qubit[1]) # number=35
prog.h(input_qubit[1]) # number=3
# apply H to get superposition
for i in range(n):
prog.h(input_qubit[i])
prog.h(input_qubit[n])
prog.barrier()
# apply oracle O_f
oracle = build_oracle(n, f)
prog.append(
oracle.to_gate(),
[input_qubit[i] for i in range(n)] + [input_qubit[n]])
# apply H back (QFT on Z_2^n)
for i in range(n):
prog.h(input_qubit[i])
prog.barrier()
# measure
return prog
def get_statevector(prog: QuantumCircuit) -> Any:
state_backend = Aer.get_backend('statevector_simulator')
statevec = execute(prog, state_backend).result()
quantum_state = statevec.get_statevector()
qubits = round(log2(len(quantum_state)))
quantum_state = {
"|" + np.binary_repr(i, qubits) + ">": quantum_state[i]
for i in range(2 ** qubits)
}
return quantum_state
def evaluate(backend_str: str, prog: QuantumCircuit, shots: int, b: str) -> Any:
# Q: which backend should we use?
# get state vector
quantum_state = get_statevector(prog)
# get simulate results
# provider = IBMQ.load_account()
# backend = provider.get_backend(backend_str)
# qobj = compile(prog, backend, shots)
# job = backend.run(qobj)
# job.result()
backend = Aer.get_backend(backend_str)
# transpile/schedule -> assemble -> backend.run
results = execute(prog, backend, shots=shots).result()
counts = results.get_counts()
a = Counter(counts).most_common(1)[0][0][::-1]
return {
"measurements": counts,
# "state": statevec,
"quantum_state": quantum_state,
"a": a,
"b": b
}
def bernstein_test_1(rep: str):
"""011 . x + 1"""
a = "011"
b = "1"
return bitwise_xor(bitwise_dot(a, rep), b)
def bernstein_test_2(rep: str):
"""000 . x + 0"""
a = "000"
b = "0"
return bitwise_xor(bitwise_dot(a, rep), b)
def bernstein_test_3(rep: str):
"""111 . x + 1"""
a = "111"
b = "1"
return bitwise_xor(bitwise_dot(a, rep), b)
if __name__ == "__main__":
n = 2
a = "11"
b = "1"
f = lambda rep: \
bitwise_xor(bitwise_dot(a, rep), b)
prog = build_circuit(n, f)
sample_shot =4000
writefile = open("../data/startQiskit_QC322.csv", "w")
# prog.draw('mpl', filename=(kernel + '.png'))
IBMQ.load_account()
provider = IBMQ.get_provider(hub='ibm-q')
provider.backends()
backend = provider.get_backend("ibmq_belem")
circuit1 = transpile(prog, FakeYorktown())
circuit1.h(qubit=2)
circuit1.x(qubit=3)
circuit1.measure_all()
info = execute(circuit1,backend=backend, shots=sample_shot).result().get_counts()
print(info, file=writefile)
print("results end", file=writefile)
print(circuit1.depth(), file=writefile)
print(circuit1, file=writefile)
writefile.close()
|
the-stack_0_23798 | #!/usr/bin/python
# -*- encoding: utf-8; py-indent-offset: 4 -*-
# +------------------------------------------------------------------+
# | ____ _ _ __ __ _ __ |
# | / ___| |__ ___ ___| | __ | \/ | |/ / |
# | | | | '_ \ / _ \/ __| |/ / | |\/| | ' / |
# | | |___| | | | __/ (__| < | | | | . \ |
# | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ |
# | |
# | Copyright Mathias Kettner 2014 [email protected] |
# +------------------------------------------------------------------+
#
# This file is part of Check_MK.
# The official homepage is at http://mathias-kettner.de/check_mk.
#
# check_mk is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by
# the Free Software Foundation in version 2. check_mk is distributed
# in the hope that it will be useful, but WITHOUT ANY WARRANTY; with-
# out even the implied warranty of MERCHANTABILITY or FITNESS FOR A
# PARTICULAR PURPOSE. See the GNU General Public License for more de-
# tails. You should have received a copy of the GNU General Public
# License along with GNU Make; see the file COPYING. If not, write
# to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor,
# Boston, MA 02110-1301 USA.
# ____ _
# | _ \ ___ | | ___ ___
# | |_) / _ \| |/ _ \/ __|
# | _ < (_) | | __/\__ \
# |_| \_\___/|_|\___||___/
#
roles = {} # User supplied roles
# define default values for all settings
debug = False
screenshotmode = False
profile = False
users = []
admin_users = []
guest_users = []
default_user_role = "user"
save_user_access_times = False
user_online_maxage = 30 # seconds
# New style, used by WATO
multisite_users = {}
# ____ _ _ _
# / ___|(_) __| | ___| |__ __ _ _ __
# \___ \| |/ _` |/ _ \ '_ \ / _` | '__|
# ___) | | (_| | __/ |_) | (_| | |
# |____/|_|\__,_|\___|_.__/ \__,_|_|
#
sidebar = [
('tactical_overview', 'open'),
('search', 'open'),
('views', 'open'),
('reports', 'closed'), # does not harm if not available
('bookmarks', 'open'),
('admin', 'open'),
('master_control', 'closed')
]
# Interval of snapin updates in seconds
sidebar_update_interval = 30.0
# It is possible (but ugly) to enable a scrollbar in the sidebar
sidebar_show_scrollbar = False
# Enable regular checking for popup notifications
sidebar_notify_interval = None
# _ _ _ _
# | | (_)_ __ ___ (_) |_ ___
# | | | | '_ ` _ \| | __/ __|
# | |___| | | | | | | | |_\__ \
# |_____|_|_| |_| |_|_|\__|___/
#
soft_query_limit = 1000
hard_query_limit = 5000
# ____ _
# / ___| ___ _ _ _ __ __| |___
# \___ \ / _ \| | | | '_ \ / _` / __|
# ___) | (_) | |_| | | | | (_| \__ \
# |____/ \___/ \__,_|_| |_|\__,_|___/
#
sound_url = "sounds/"
enable_sounds = False
sounds = [
( "down", "down.wav" ),
( "critical", "critical.wav" ),
( "unknown", "unknown.wav" ),
( "warning", "warning.wav" ),
# ( None, "ok.wav" ),
]
# __ ___ _ _
# \ \ / (_) _____ __ ___ _ __ | |_(_) ___ _ __ ___
# \ \ / /| |/ _ \ \ /\ / / / _ \| '_ \| __| |/ _ \| '_ \/ __|
# \ V / | | __/\ V V / | (_) | |_) | |_| | (_) | | | \__ \
# \_/ |_|\___| \_/\_/ \___/| .__/ \__|_|\___/|_| |_|___/
# |_|
view_option_refreshes = [ 30, 60, 90, 0 ]
view_option_columns = [ 1, 2, 3, 4, 5, 6, 8, 10, 12 ]
# MISC
doculink_urlformat = "http://mathias-kettner.de/checkmk_%s.html";
# ____ _ _ _ _
# / ___| _ ___| |_ ___ _ __ ___ | | (_)_ __ | | _____
# | | | | | / __| __/ _ \| '_ ` _ \ | | | | '_ \| |/ / __|
# | |__| |_| \__ \ || (_) | | | | | | | |___| | | | | <\__ \
# \____\__,_|___/\__\___/|_| |_| |_| |_____|_|_| |_|_|\_\___/
#
custom_links = {}
# __ __ _
# \ \ / /_ _ _ __(_) ___ _ _ ___
# \ \ / / _` | '__| |/ _ \| | | / __|
# \ V / (_| | | | | (_) | |_| \__ \
# \_/ \__,_|_| |_|\___/ \__,_|___/
#
debug_livestatus_queries = False
# Show livestatus errors in multi site setup if some sites are
# not reachable.
show_livestatus_errors = True
# Whether the livestatu proxy daemon is available
liveproxyd_enabled = False
# Set this to a list in order to globally control which views are
# being displayed in the sidebar snapin "Views"
visible_views = None
# Set this list in order to actively hide certain views
hidden_views = None
# Custom user stylesheet to load (resides in htdocs/)
custom_style_sheet = None
# URL for start page in main frame (welcome page)
start_url = "dashboard.py"
# Page heading for main frame set
page_heading = "Check_MK %s"
# Timeout for rescheduling of host- and servicechecks
reschedule_timeout = 10.0
# Number of columsn in "Filter" form
filter_columns = 2
# Default language for l10n
default_language = None
# Hide these languages from user selection
hide_languages = []
# Default timestamp format to be used in multisite
default_ts_format = 'mixed'
# Default authentication type. Can be changed to e.g. "cookie" for
# using the cookie auth
auth_type = 'basic'
# Show only most used buttons, set to None if you want
# always all buttons to be shown
context_buttons_to_show = 5
# Buffering of HTML output stream
buffered_http_stream = True
# Maximum livetime of unmodified selections
selection_livetime = 3600
# Configure HTTP header to read usernames from
auth_by_http_header = False
# Number of rows to display by default in tables rendered with
# the table.py module
table_row_limit = 100
# Add an icon pointing to the WATO rule to each service
multisite_draw_ruleicon = True
# Default downtime configuration
adhoc_downtime = {}
# Display dashboard date
pagetitle_date_format = None
# Value of the host_staleness/service_staleness field to make hosts/services
# appear in a stale state
staleness_threshold = 1.5
# Escape HTML in plugin output / log messages
escape_plugin_output = True
# Virtual host trees for the "Virtual Host Trees" snapin
virtual_host_trees = []
# Fall back to PNP4Nagios as graphing GUI even on CEE
force_pnp_graphing = False
# Target email address for "Crashed Check" page
crash_report_target = "[email protected]"
# GUI Tests (see cmk-guitest)
guitests_enabled = False
# _ _ ____ ____
# | | | |___ ___ _ __| _ \| __ )
# | | | / __|/ _ \ '__| | | | _ \
# | |_| \__ \ __/ | | |_| | |_) |
# \___/|___/\___|_| |____/|____/
#
userdb_automatic_sync = "master"
# Holds dicts defining user connector instances and their properties
user_connections = []
default_user_profile = {
'roles': ['user'],
}
lock_on_logon_failures = False
password_policy = {}
user_localizations = {
u'Agent type': { "de": u"Art des Agenten", },
u'Business critical': { "de": u"Geschäftskritisch", },
u'Check_MK Agent (Server)': { "de": u"Check_MK Agent (Server)", },
u'Criticality': { "de": u"Kritikalität", },
u'DMZ (low latency, secure access)': { "de": u"DMZ (geringe Latenz, hohe Sicherheit", },
u'Do not monitor this host': { "de": u"Diesen Host nicht überwachen", },
u'Dual: Check_MK Agent + SNMP': { "de": u"Dual: Check_MK Agent + SNMP", },
u'Legacy SNMP device (using V1)': { "de": u"Alte SNMP-Geräte (mit Version 1)", },
u'Local network (low latency)': { "de": u"Lokales Netzwerk (geringe Latenz)", },
u'Networking Segment': { "de": u"Netzwerksegment", },
u'No Agent': { "de": u"Kein Agent", },
u'Productive system': { "de": u"Produktivsystem", },
u'Test system': { "de": u"Testsystem", },
u'WAN (high latency)': { "de": u"WAN (hohe Latenz)", },
u'monitor via Check_MK Agent': { "de": u"Überwachung via Check_MK Agent", },
u'monitor via SNMP': { "de": u"Überwachung via SNMP", },
u'SNMP (Networking device, Appliance)': { "de": u"SNMP (Netzwerkgerät, Appliance)", },
}
# Contains user specified icons and actions for hosts and services
user_icons_and_actions = {}
# Override toplevel and sort_index settings of builtin icons
builtin_icon_visibility = {}
# Write WATO folder permissions to auth.php file
export_folder_permissions = False
# Name of the hostgroup to filter the network topology view by default
topology_default_filter_group = None
|
the-stack_0_23799 | """Training-related part of the Keras engine.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import warnings
import copy
import numpy as np
from .network import Network
from .base_layer import Layer
from .training_utils import collect_metrics
from .training_utils import check_array_length_consistency
from .training_utils import check_loss_and_target_compatibility
from .training_utils import standardize_class_weights
from .training_utils import standardize_input_data
from .training_utils import standardize_sample_weights
from .training_utils import standardize_weights
from .training_utils import weighted_masked_objective
from . import training_arrays
from . import training_generator
from .. import backend as K
from .. import optimizers
from .. import losses
from .. import metrics as metrics_module
from ..utils.generic_utils import slice_arrays
from ..utils.generic_utils import to_list
from ..utils.generic_utils import unpack_singleton
from ..legacy import interfaces
class Model(Network):
"""The `Model` class adds training & evaluation routines to a `Network`.
"""
def compile(self, optimizer,
loss=None,
metrics=None,
loss_weights=None,
sample_weight_mode=None,
weighted_metrics=None,
target_tensors=None,
**kwargs):
"""Configures the model for training.
# Arguments
optimizer: String (name of optimizer) or optimizer instance.
See [optimizers](/optimizers).
loss: String (name of objective function) or objective function.
See [losses](/losses).
If the model has multiple outputs, you can use a different loss
on each output by passing a dictionary or a list of losses.
The loss value that will be minimized by the model
will then be the sum of all individual losses.
metrics: List of metrics to be evaluated by the model
during training and testing.
Typically you will use `metrics=['accuracy']`.
To specify different metrics for different outputs of a
multi-output model, you could also pass a dictionary,
such as `metrics={'output_a': 'accuracy'}`.
loss_weights: Optional list or dictionary specifying scalar
coefficients (Python floats) to weight the loss contributions
of different model outputs.
The loss value that will be minimized by the model
will then be the *weighted sum* of all individual losses,
weighted by the `loss_weights` coefficients.
If a list, it is expected to have a 1:1 mapping
to the model's outputs. If a tensor, it is expected to map
output names (strings) to scalar coefficients.
sample_weight_mode: If you need to do timestep-wise
sample weighting (2D weights), set this to `"temporal"`.
`None` defaults to sample-wise weights (1D).
If the model has multiple outputs, you can use a different
`sample_weight_mode` on each output by passing a
dictionary or a list of modes.
weighted_metrics: List of metrics to be evaluated and weighted
by sample_weight or class_weight during training and testing.
target_tensors: By default, Keras will create placeholders for the
model's target, which will be fed with the target data during
training. If instead you would like to use your own
target tensors (in turn, Keras will not expect external
Numpy data for these targets at training time), you
can specify them via the `target_tensors` argument. It can be
a single tensor (for a single-output model), a list of tensors,
or a dict mapping output names to target tensors.
**kwargs: When using the Theano/CNTK backends, these arguments
are passed into `K.function`.
When using the TensorFlow backend,
these arguments are passed into `tf.Session.run`.
# Raises
ValueError: In case of invalid arguments for
`optimizer`, `loss`, `metrics` or `sample_weight_mode`.
"""
self.optimizer = optimizers.get(optimizer)
self.loss = loss or []
self.metrics = metrics or []
self.loss_weights = loss_weights
self.sample_weight_mode = sample_weight_mode
self.weighted_metrics = weighted_metrics
if not self.built:
# Model is not compilable because
# it does not know its number of inputs
# and outputs, nor their shapes and names.
# We will compile after the first
# time the model gets called on training data.
return
self._is_compiled = True
# Prepare loss functions.
if isinstance(loss, dict):
for name in loss:
if name not in self.output_names:
raise ValueError('Unknown entry in loss '
'dictionary: "' + name + '". '
'Only expected the following keys: ' +
str(self.output_names))
loss_functions = []
for name in self.output_names:
if name not in loss:
warnings.warn('Output "' + name +
'" missing from loss dictionary. '
'We assume this was done on purpose, '
'and we will not be expecting '
'any data to be passed to "' + name +
'" during training.', stacklevel=2)
loss_functions.append(losses.get(loss.get(name)))
elif isinstance(loss, list):
if len(loss) != len(self.outputs):
raise ValueError('When passing a list as loss, '
'it should have one entry per model outputs. '
'The model has ' + str(len(self.outputs)) +
' outputs, but you passed loss=' +
str(loss))
loss_functions = [losses.get(l) for l in loss]
else:
loss_function = losses.get(loss)
loss_functions = [loss_function for _ in range(len(self.outputs))]
self.loss_functions = loss_functions
weighted_losses = [
weighted_masked_objective(fn) for fn in loss_functions]
skip_target_indices = []
skip_target_weighing_indices = []
self._feed_outputs = []
self._feed_output_names = []
self._feed_output_shapes = []
self._feed_loss_fns = []
for i in range(len(weighted_losses)):
if weighted_losses[i] is None:
skip_target_indices.append(i)
skip_target_weighing_indices.append(i)
# Prepare output masks.
masks = self.compute_mask(self.inputs, mask=None)
if masks is None:
masks = [None for _ in self.outputs]
masks = to_list(masks)
# Prepare loss weights.
if loss_weights is None:
loss_weights_list = [1. for _ in range(len(self.outputs))]
elif isinstance(loss_weights, dict):
for name in loss_weights:
if name not in self.output_names:
raise ValueError('Unknown entry in loss_weights '
'dictionary: "' + name + '". '
'Only expected the following keys: ' +
str(self.output_names))
loss_weights_list = []
for name in self.output_names:
loss_weights_list.append(loss_weights.get(name, 1.))
elif isinstance(loss_weights, list):
if len(loss_weights) != len(self.outputs):
raise ValueError('When passing a list as loss_weights, '
'it should have one entry per model output. '
'The model has ' + str(len(self.outputs)) +
' outputs, but you passed loss_weights=' +
str(loss_weights))
loss_weights_list = loss_weights
else:
raise TypeError('Could not interpret loss_weights argument: ' +
str(loss_weights) +
' - expected a list of dicts.')
# Prepare targets of model.
self.targets = []
self._feed_targets = []
if target_tensors is not None:
if isinstance(target_tensors, list):
if len(target_tensors) != len(self.outputs):
raise ValueError(
'When passing a list as `target_tensors`, '
'it should have one entry per model output. '
'The model has ' + str(len(self.outputs)) +
' outputs, but you passed target_tensors=' +
str(target_tensors))
elif isinstance(target_tensors, dict):
for name in target_tensors:
if name not in self.output_names:
raise ValueError('Unknown entry in `target_tensors` '
'dictionary: "' + name + '". '
'Only expected the following keys: ' +
str(self.output_names))
tmp_target_tensors = []
for name in self.output_names:
tmp_target_tensors.append(target_tensors.get(name, None))
target_tensors = tmp_target_tensors
elif K.is_tensor(target_tensors):
if len(self.outputs) != 1:
raise ValueError('The model has ' + str(len(self.outputs)) +
' outputs, but you passed a single tensor as '
'`target_tensors`. Expected a list or a dict '
'of tensors.')
target_tensors = [target_tensors]
else:
raise TypeError('Expected `target_tensors` to be a tensor, '
'a list of tensors, or dict of tensors, but got:', target_tensors)
for i in range(len(self.outputs)):
if i in skip_target_indices:
self.targets.append(None)
else:
shape = K.int_shape(self.outputs[i])
name = self.output_names[i]
if target_tensors is not None:
target = target_tensors[i]
else:
target = None
if target is None or K.is_placeholder(target):
if target is None:
target = K.placeholder(
ndim=len(shape),
name=name + '_target',
sparse=K.is_sparse(self.outputs[i]),
dtype=K.dtype(self.outputs[i]))
self._feed_targets.append(target)
self._feed_outputs.append(self.outputs[i])
self._feed_output_names.append(name)
self._feed_output_shapes.append(shape)
self._feed_loss_fns.append(self.loss_functions[i])
else:
skip_target_weighing_indices.append(i)
self.targets.append(target)
# Prepare sample weights.
sample_weights = []
sample_weight_modes = []
if isinstance(sample_weight_mode, dict):
for name in sample_weight_mode:
if name not in self.output_names:
raise ValueError('Unknown entry in '
'sample_weight_mode dictionary: "' +
name + '". '
'Only expected the following keys: ' +
str(self.output_names))
for i, name in enumerate(self.output_names):
if i in skip_target_weighing_indices:
weight = None
sample_weight_modes.append(None)
else:
if name not in sample_weight_mode:
raise ValueError('Output "' + name +
'" missing from sample_weight_modes '
'dictionary')
if sample_weight_mode.get(name) == 'temporal':
weight = K.placeholder(ndim=2,
name=name + '_sample_weights')
sample_weight_modes.append('temporal')
else:
weight = K.placeholder(ndim=1,
name=name + '_sample_weights')
sample_weight_modes.append(None)
sample_weights.append(weight)
elif isinstance(sample_weight_mode, list):
if len(sample_weight_mode) != len(self.outputs):
raise ValueError('When passing a list as sample_weight_mode, '
'it should have one entry per model output. '
'The model has ' + str(len(self.outputs)) +
' outputs, but you passed '
'sample_weight_mode=' +
str(sample_weight_mode))
for i in range(len(self.output_names)):
if i in skip_target_weighing_indices:
weight = None
sample_weight_modes.append(None)
else:
mode = sample_weight_mode[i]
name = self.output_names[i]
if mode == 'temporal':
weight = K.placeholder(ndim=2,
name=name + '_sample_weights')
sample_weight_modes.append('temporal')
else:
weight = K.placeholder(ndim=1,
name=name + '_sample_weights')
sample_weight_modes.append(None)
sample_weights.append(weight)
else:
for i, name in enumerate(self.output_names):
if i in skip_target_weighing_indices:
sample_weight_modes.append(None)
sample_weights.append(None)
else:
if sample_weight_mode == 'temporal':
sample_weights.append(
K.placeholder(ndim=2,
name=name + '_sample_weights'))
sample_weight_modes.append('temporal')
else:
sample_weights.append(
K.placeholder(ndim=1,
name=name + '_sample_weights'))
sample_weight_modes.append(None)
self.sample_weight_modes = sample_weight_modes
self._feed_sample_weight_modes = []
for i in range(len(self.outputs)):
if i not in skip_target_weighing_indices:
self._feed_sample_weight_modes.append(
self.sample_weight_modes[i])
# Prepare metrics.
self.metrics_names = ['loss']
self.metrics_tensors = []
# Compute total loss.
total_loss = None
with K.name_scope('loss'):
for i in range(len(self.outputs)):
if i in skip_target_indices:
continue
y_true = self.targets[i]
y_pred = self.outputs[i]
weighted_loss = weighted_losses[i]
sample_weight = sample_weights[i]
mask = masks[i]
loss_weight = loss_weights_list[i]
with K.name_scope(self.output_names[i] + '_loss'):
output_loss = weighted_loss(y_true, y_pred,
sample_weight, mask)
if len(self.outputs) > 1:
self.metrics_tensors.append(output_loss)
self.metrics_names.append(self.output_names[i] + '_loss')
if total_loss is None:
total_loss = loss_weight * output_loss
else:
total_loss += loss_weight * output_loss
if total_loss is None:
if not self.losses:
raise ValueError('The model cannot be compiled '
'because it has no loss to optimize.')
else:
total_loss = 0.
# Add regularization penalties
# and other layer-specific losses.
for loss_tensor in self.losses:
total_loss += loss_tensor
# List of same size as output_names.
# contains tuples (metrics for output, names of metrics).
nested_metrics = collect_metrics(metrics, self.output_names)
nested_weighted_metrics = collect_metrics(weighted_metrics,
self.output_names)
self.metrics_updates = []
self.stateful_metric_names = []
self.stateful_metric_functions = []
def handle_metrics(metrics, weights=None):
metric_name_prefix = 'weighted_' if weights is not None else ''
for metric in metrics:
if metric in ('accuracy', 'acc', 'crossentropy', 'ce'):
# custom handling of accuracy/crossentropy
# (because of class mode duality)
output_shape = K.int_shape(self.outputs[i])
if (output_shape[-1] == 1 or
self.loss_functions[i] == losses.binary_crossentropy):
# case: binary accuracy/crossentropy
if metric in ('accuracy', 'acc'):
metric_fn = metrics_module.binary_accuracy
elif metric in ('crossentropy', 'ce'):
metric_fn = metrics_module.binary_crossentropy
elif self.loss_functions[i] == losses.sparse_categorical_crossentropy:
# case: categorical accuracy/crossentropy
# with sparse targets
if metric in ('accuracy', 'acc'):
metric_fn = metrics_module.sparse_categorical_accuracy
elif metric in ('crossentropy', 'ce'):
metric_fn = metrics_module.sparse_categorical_crossentropy
else:
# case: categorical accuracy/crossentropy
if metric in ('accuracy', 'acc'):
metric_fn = metrics_module.categorical_accuracy
elif metric in ('crossentropy', 'ce'):
metric_fn = metrics_module.categorical_crossentropy
if metric in ('accuracy', 'acc'):
suffix = 'acc'
elif metric in ('crossentropy', 'ce'):
suffix = 'ce'
weighted_metric_fn = weighted_masked_objective(metric_fn)
metric_name = metric_name_prefix + suffix
else:
metric_fn = metrics_module.get(metric)
weighted_metric_fn = weighted_masked_objective(metric_fn)
# Get metric name as string
if hasattr(metric_fn, 'name'):
metric_name = metric_fn.name
else:
metric_name = metric_fn.__name__
metric_name = metric_name_prefix + metric_name
with K.name_scope(metric_name):
metric_result = weighted_metric_fn(y_true, y_pred,
weights=weights,
mask=masks[i])
# Append to self.metrics_names, self.metric_tensors,
# self.stateful_metric_names
if len(self.output_names) > 1:
metric_name = self.output_names[i] + '_' + metric_name
# Dedupe name
j = 1
base_metric_name = metric_name
while metric_name in self.metrics_names:
metric_name = base_metric_name + '_' + str(j)
j += 1
self.metrics_names.append(metric_name)
self.metrics_tensors.append(metric_result)
# Keep track of state updates created by
# stateful metrics (i.e. metrics layers).
if isinstance(metric_fn, Layer) and metric_fn.stateful:
self.stateful_metric_names.append(metric_name)
self.stateful_metric_functions.append(metric_fn)
self.metrics_updates += metric_fn.updates
with K.name_scope('metrics'):
for i in range(len(self.outputs)):
if i in skip_target_indices:
continue
y_true = self.targets[i]
y_pred = self.outputs[i]
weights = sample_weights[i]
output_metrics = nested_metrics[i]
output_weighted_metrics = nested_weighted_metrics[i]
handle_metrics(output_metrics)
handle_metrics(output_weighted_metrics, weights=weights)
# Prepare gradient updates and state updates.
self.total_loss = total_loss
self.sample_weights = sample_weights
self._feed_sample_weights = []
for i in range(len(self.sample_weights)):
if i not in skip_target_weighing_indices:
self._feed_sample_weights.append(sample_weights[i])
# Functions for train, test and predict will
# be compiled lazily when required.
# This saves time when the user is not using all functions.
self._function_kwargs = kwargs
self.train_function = None
self.test_function = None
self.predict_function = None
# Collected trainable weights, sorted in topological order.
trainable_weights = self.trainable_weights
self._collected_trainable_weights = trainable_weights
def _check_trainable_weights_consistency(self):
"""Check trainable weights count consistency.
This will raise a warning if `trainable_weights` and
`_collected_trainable_weights` are inconsistent (i.e. have different
number of parameters).
Inconsistency will typically arise when one modifies `model.trainable`
without calling `model.compile` again.
"""
if not hasattr(self, '_collected_trainable_weights'):
return
if (len(self.trainable_weights) !=
len(self._collected_trainable_weights)):
warnings.warn(UserWarning(
'Discrepancy between trainable weights and collected trainable'
' weights, did you set `model.trainable` without calling'
' `model.compile` after ?'))
def _make_train_function(self):
if not hasattr(self, 'train_function'):
raise RuntimeError('You must compile your model before using it.')
self._check_trainable_weights_consistency()
if self.train_function is None:
inputs = (self._feed_inputs +
self._feed_targets +
self._feed_sample_weights)
if self._uses_dynamic_learning_phase():
inputs += [K.learning_phase()]
with K.name_scope('training'):
with K.name_scope(self.optimizer.__class__.__name__):
training_updates = self.optimizer.get_updates(
params=self._collected_trainable_weights,
loss=self.total_loss)
updates = (self.updates +
training_updates +
self.metrics_updates)
# Gets loss and metrics. Updates weights at each call.
self.train_function = K.function(
inputs,
[self.total_loss] + self.metrics_tensors,
updates=updates,
name='train_function',
**self._function_kwargs)
def _make_test_function(self):
if not hasattr(self, 'test_function'):
raise RuntimeError('You must compile your model before using it.')
if self.test_function is None:
inputs = (self._feed_inputs +
self._feed_targets +
self._feed_sample_weights)
if self._uses_dynamic_learning_phase():
inputs += [K.learning_phase()]
# Return loss and metrics, no gradient updates.
# Does update the network states.
self.test_function = K.function(
inputs,
[self.total_loss] + self.metrics_tensors,
updates=self.state_updates + self.metrics_updates,
name='test_function',
**self._function_kwargs)
def _make_predict_function(self):
if not hasattr(self, 'predict_function'):
self.predict_function = None
if self.predict_function is None:
if self._uses_dynamic_learning_phase():
inputs = self._feed_inputs + [K.learning_phase()]
else:
inputs = self._feed_inputs
# Gets network outputs. Does not update weights.
# Does update the network states.
kwargs = getattr(self, '_function_kwargs', {})
self.predict_function = K.function(inputs,
self.outputs,
updates=self.state_updates,
name='predict_function',
**kwargs)
def _uses_dynamic_learning_phase(self):
return (self.uses_learning_phase and
not isinstance(K.learning_phase(), int))
def _set_inputs(self, inputs, outputs=None, training=None):
"""Set model's input and output specs based on the input data received.
This is to be used for Model subclasses, which do not know at instantiation
time what their inputs look like.
# Arguments
inputs: Single array, or list of arrays. The arrays could be placeholders,
Numpy arrays, or data tensors.
- if placeholders: the model is built on top of these placeholders,
and we expect Numpy data to be fed for them when calling `fit`/etc.
- if Numpy data: we create placeholders matching the shape of the Numpy
arrays. We expect Numpy data to be fed for these placeholders
when calling `fit`/etc.
- if data tensors: the model is built on top of these tensors.
We do not expect any Numpy data to be provided when calling `fit`/etc.
outputs: Optional output tensors (if already computed by running the model).
training: Boolean or None. Only relevant in symbolic mode. Specifies
whether to build the model's graph in inference mode (False), training
mode (True), or using the Keras learning phase (None).
"""
if self.__class__.__name__ == 'Sequential':
# Note: we can't test whether the model
# is `Sequential` via `isinstance`
# since `Sequential` depends on `Model`.
if isinstance(inputs, list):
assert len(inputs) == 1
inputs = inputs[0]
self.build(input_shape=(None,) + inputs.shape[1:])
return
if self.inputs:
raise ValueError('Model inputs are already set.')
# On-the-fly setting of symbolic model inputs
# (either by using the tensor provided,
# or by creating a placeholder if Numpy data was provided).
self.inputs = []
self.input_names = []
self._feed_inputs = []
self._feed_input_names = []
self._feed_input_shapes = []
inputs = to_list(inputs, allow_tuple=True)
for i, v in enumerate(inputs):
name = 'input_%d' % (i + 1)
self.input_names.append(name)
if isinstance(v, list):
v = np.asarray(v)
if v.ndim == 1:
v = np.expand_dims(v, 1)
if isinstance(v, (np.ndarray)):
# We fix the placeholder shape except the batch size.
# This is suboptimal, but it is the best we can do with the info
# we have. The user should call `model._set_inputs(placeholders)`
# to specify custom placeholders if the need arises.
shape = (None,) + v.shape[1:]
placeholder = K.placeholder(shape=shape, name=name)
self.inputs.append(placeholder)
self._feed_inputs.append(placeholder)
self._feed_input_names.append(name)
self._feed_input_shapes.append(shape)
else:
# Assumed tensor - TODO(fchollet) additional type check?
self.inputs.append(v)
if K.is_placeholder(v):
self._feed_inputs.append(v)
self._feed_input_names.append(name)
self._feed_input_shapes.append(K.int_shape(v))
if outputs is None:
# Obtain symbolic outputs by calling the model.
if self._expects_training_arg:
outputs = self.call(unpack_singleton(self.inputs), training=training)
else:
outputs = self.call(unpack_singleton(self.inputs))
outputs = to_list(outputs, allow_tuple=True)
self.outputs = outputs
self.output_names = [
'output_%d' % (i + 1) for i in range(len(self.outputs))]
self.built = True
def _standardize_user_data(self, x,
y=None,
sample_weight=None,
class_weight=None,
check_array_lengths=True,
batch_size=None):
all_inputs = []
if not self.built:
# We need to use `x` to set the model inputs.
# We type-check that `x` and `y` are either single arrays
# or lists of arrays.
if isinstance(x, (list, tuple)):
if not all(isinstance(v, np.ndarray) or
K.is_tensor(v) for v in x):
raise ValueError('Please provide as model inputs '
'either a single '
'array or a list of arrays. '
'You passed: x=' + str(x))
all_inputs += list(x)
elif isinstance(x, dict):
raise ValueError('Please do not pass a dictionary '
'as model inputs.')
else:
if not isinstance(x, np.ndarray) and not K.is_tensor(x):
raise ValueError('Please provide as model inputs '
'either a single '
'array or a list of arrays. '
'You passed: x=' + str(x))
all_inputs.append(x)
# Build the model using the retrieved inputs (value or symbolic).
# If values, then in symbolic-mode placeholders will be created
# to match the value shapes.
if not self.inputs:
self._set_inputs(x)
if y is not None:
if not self.optimizer:
raise RuntimeError('You must compile a model before '
'training/testing. '
'Use `model.compile(optimizer, loss)`.')
if not self._is_compiled:
# On-the-fly compilation of the model.
# We need to use `y` to set the model targets.
if isinstance(y, (list, tuple)):
if not all(isinstance(v, np.ndarray) or
K.is_tensor(v) for v in y):
raise ValueError('Please provide as model targets '
'either a single '
'array or a list of arrays. '
'You passed: y=' + str(y))
elif isinstance(y, dict):
raise ValueError('Please do not pass a dictionary '
'as model targets.')
else:
if not isinstance(y, np.ndarray) and not K.is_tensor(y):
raise ValueError('Please provide as model targets '
'either a single '
'array or a list of arrays. '
'You passed: y=' + str(y))
# Typecheck that all inputs are *either* value *or* symbolic.
if y is not None:
all_inputs += to_list(y, allow_tuple=True)
if any(K.is_tensor(v) for v in all_inputs):
if not all(K.is_tensor(v) for v in all_inputs):
raise ValueError('Do not pass inputs that mix Numpy '
'arrays and symbolic tensors. '
'You passed: x=' + str(x) +
'; y=' + str(y))
# Handle target tensors if any passed.
y = to_list(y, allow_tuple=True)
target_tensors = [v for v in y if K.is_tensor(v)]
if not target_tensors:
target_tensors = None
self.compile(optimizer=self.optimizer,
loss=self.loss,
metrics=self.metrics,
loss_weights=self.loss_weights,
target_tensors=target_tensors)
# If `x` and `y` were all symbolic,
# then the model should not be fed any inputs and targets.
# Note: in this case, `any` and `all` are equivalent since we disallow
# mixed symbolic/value inputs.
if any(K.is_tensor(v) for v in all_inputs):
return [], [], []
# What follows is input validation and standardization to list format,
# in the case where all inputs are value arrays.
if not self._is_graph_network:
# Case: symbolic-mode subclassed network.
# Do not do shape validation.
feed_input_names = self._feed_input_names
feed_input_shapes = None
else:
# Case: symbolic-mode graph network.
# In this case, we run extensive shape validation checks.
feed_input_names = self._feed_input_names
feed_input_shapes = self._feed_input_shapes
# Standardize the inputs.
x = standardize_input_data(
x,
feed_input_names,
feed_input_shapes,
check_batch_axis=False, # Don't enforce the batch size.
exception_prefix='input')
if y is not None:
if not self._is_graph_network:
feed_output_names = self._feed_output_names
feed_output_shapes = None
# Sample weighting not supported in this case.
# TODO: consider supporting it.
feed_sample_weight_modes = [None for _ in self.outputs]
else:
feed_output_names = self._feed_output_names
feed_sample_weight_modes = self._feed_sample_weight_modes
feed_output_shapes = []
for output_shape, loss_fn in zip(self._feed_output_shapes,
self._feed_loss_fns):
if loss_fn is losses.sparse_categorical_crossentropy:
if K.image_data_format() == 'channels_first' and len(
output_shape) in [4, 5]:
feed_output_shapes.append(
(output_shape[0], 1) + output_shape[2:])
else:
feed_output_shapes.append(output_shape[:-1] + (1,))
elif (not hasattr(loss_fn, '__name__') or
getattr(losses, loss_fn.__name__, None) is None):
# If `loss_fn` is not a function (e.g. callable class)
# or if it not in the `losses` module, then
# it is a user-defined loss and we make no assumptions
# about it.
feed_output_shapes.append(None)
else:
feed_output_shapes.append(output_shape)
# Standardize the outputs.
y = standardize_input_data(
y,
feed_output_names,
feed_output_shapes,
check_batch_axis=False, # Don't enforce the batch size.
exception_prefix='target')
# Generate sample-wise weight values given the `sample_weight` and
# `class_weight` arguments.
sample_weights = standardize_sample_weights(
sample_weight, feed_output_names)
class_weights = standardize_class_weights(
class_weight, feed_output_names)
sample_weights = [
standardize_weights(ref, sw, cw, mode)
for (ref, sw, cw, mode) in
zip(y, sample_weights, class_weights,
feed_sample_weight_modes)
]
# Check that all arrays have the same length.
check_array_length_consistency(x, y, sample_weights)
if self._is_graph_network:
# Additional checks to avoid users mistakenly
# using improper loss fns.
check_loss_and_target_compatibility(
y, self._feed_loss_fns, feed_output_shapes)
else:
y = []
sample_weights = []
if self.stateful and batch_size:
# Check that for stateful networks, number of samples is a multiple
# of the static batch size.
if x[0].shape[0] % batch_size != 0:
raise ValueError('In a stateful network, '
'you should only pass inputs with '
'a number of samples that can be '
'divided by the batch size. Found: ' +
str(x[0].shape[0]) + ' samples')
return x, y, sample_weights
def fit(self,
x=None,
y=None,
batch_size=None,
epochs=1,
verbose=1,
callbacks=None,
validation_split=0.,
validation_data=None,
shuffle=True,
class_weight=None,
sample_weight=None,
initial_epoch=0,
steps_per_epoch=None,
validation_steps=None,
**kwargs):
"""Trains the model for a given number of epochs (iterations on a dataset).
# Arguments
x: Numpy array of training data (if the model has a single input),
or list of Numpy arrays (if the model has multiple inputs).
If input layers in the model are named, you can also pass a
dictionary mapping input names to Numpy arrays.
`x` can be `None` (default) if feeding from
framework-native tensors (e.g. TensorFlow data tensors).
y: Numpy array of target (label) data
(if the model has a single output),
or list of Numpy arrays (if the model has multiple outputs).
If output layers in the model are named, you can also pass a
dictionary mapping output names to Numpy arrays.
`y` can be `None` (default) if feeding from
framework-native tensors (e.g. TensorFlow data tensors).
batch_size: Integer or `None`.
Number of samples per gradient update.
If unspecified, `batch_size` will default to 32.
epochs: Integer. Number of epochs to train the model.
An epoch is an iteration over the entire `x` and `y`
data provided.
Note that in conjunction with `initial_epoch`,
`epochs` is to be understood as "final epoch".
The model is not trained for a number of iterations
given by `epochs`, but merely until the epoch
of index `epochs` is reached.
verbose: Integer. 0, 1, or 2. Verbosity mode.
0 = silent, 1 = progress bar, 2 = one line per epoch.
callbacks: List of `keras.callbacks.Callback` instances.
List of callbacks to apply during training.
See [callbacks](/callbacks).
validation_split: Float between 0 and 1.
Fraction of the training data to be used as validation data.
The model will set apart this fraction of the training data,
will not train on it, and will evaluate
the loss and any model metrics
on this data at the end of each epoch.
The validation data is selected from the last samples
in the `x` and `y` data provided, before shuffling.
validation_data: tuple `(x_val, y_val)` or tuple
`(x_val, y_val, val_sample_weights)` on which to evaluate
the loss and any model metrics at the end of each epoch.
The model will not be trained on this data.
`validation_data` will override `validation_split`.
shuffle: Boolean (whether to shuffle the training data
before each epoch) or str (for 'batch').
'batch' is a special option for dealing with the
limitations of HDF5 data; it shuffles in batch-sized chunks.
Has no effect when `steps_per_epoch` is not `None`.
class_weight: Optional dictionary mapping class indices (integers)
to a weight (float) value, used for weighting the loss function
(during training only).
This can be useful to tell the model to
"pay more attention" to samples from
an under-represented class.
sample_weight: Optional Numpy array of weights for
the training samples, used for weighting the loss function
(during training only). You can either pass a flat (1D)
Numpy array with the same length as the input samples
(1:1 mapping between weights and samples),
or in the case of temporal data,
you can pass a 2D array with shape
`(samples, sequence_length)`,
to apply a different weight to every timestep of every sample.
In this case you should make sure to specify
`sample_weight_mode="temporal"` in `compile()`.
initial_epoch: Integer.
Epoch at which to start training
(useful for resuming a previous training run).
steps_per_epoch: Integer or `None`.
Total number of steps (batches of samples)
before declaring one epoch finished and starting the
next epoch. When training with input tensors such as
TensorFlow data tensors, the default `None` is equal to
the number of samples in your dataset divided by
the batch size, or 1 if that cannot be determined.
validation_steps: Only relevant if `steps_per_epoch`
is specified. Total number of steps (batches of samples)
to validate before stopping.
# Returns
A `History` object. Its `History.history` attribute is
a record of training loss values and metrics values
at successive epochs, as well as validation loss values
and validation metrics values (if applicable).
# Raises
RuntimeError: If the model was never compiled.
ValueError: In case of mismatch between the provided input data
and what the model expects.
"""
# Backwards compatibility
if batch_size is None and steps_per_epoch is None:
batch_size = 32
# Legacy support
if 'nb_epoch' in kwargs:
warnings.warn('The `nb_epoch` argument in `fit` '
'has been renamed `epochs`.', stacklevel=2)
epochs = kwargs.pop('nb_epoch')
if kwargs:
raise TypeError('Unrecognized keyword arguments: ' + str(kwargs))
if x is None and y is None and steps_per_epoch is None:
raise ValueError('If fitting from data tensors, '
'you should specify the `steps_per_epoch` '
'argument.')
# Validate user data.
x, y, sample_weights = self._standardize_user_data(
x, y,
sample_weight=sample_weight,
class_weight=class_weight,
batch_size=batch_size)
# Prepare validation data.
do_validation = False
if validation_data:
do_validation = True
if len(validation_data) == 2:
val_x, val_y = validation_data
val_sample_weight = None
elif len(validation_data) == 3:
val_x, val_y, val_sample_weight = validation_data
else:
raise ValueError('When passing validation_data, '
'it must contain 2 (x_val, y_val) '
'or 3 (x_val, y_val, val_sample_weights) '
'items, however it contains %d items' %
len(validation_data))
val_x, val_y, val_sample_weights = self._standardize_user_data(
val_x, val_y,
sample_weight=val_sample_weight,
batch_size=batch_size)
if self._uses_dynamic_learning_phase():
val_ins = val_x + val_y + val_sample_weights + [0.]
else:
val_ins = val_x + val_y + val_sample_weights
elif validation_split and 0. < validation_split < 1.:
if any(K.is_tensor(t) for t in x):
raise ValueError(
'If your data is in the form of symbolic tensors, '
'you cannot use `validation_split`.')
do_validation = True
if hasattr(x[0], 'shape'):
split_at = int(int(x[0].shape[0]) * (1. - validation_split))
else:
split_at = int(len(x[0]) * (1. - validation_split))
x, val_x = (slice_arrays(x, 0, split_at),
slice_arrays(x, split_at))
y, val_y = (slice_arrays(y, 0, split_at),
slice_arrays(y, split_at))
sample_weights, val_sample_weights = (
slice_arrays(sample_weights, 0, split_at),
slice_arrays(sample_weights, split_at))
if self._uses_dynamic_learning_phase():
val_ins = val_x + val_y + val_sample_weights + [0.]
else:
val_ins = val_x + val_y + val_sample_weights
elif validation_steps:
do_validation = True
if self._uses_dynamic_learning_phase():
val_ins = [0.]
# Prepare input arrays and training function.
if self._uses_dynamic_learning_phase():
ins = x + y + sample_weights + [1.]
else:
ins = x + y + sample_weights
self._make_train_function()
f = self.train_function
# Prepare display labels.
out_labels = self.metrics_names
if do_validation:
self._make_test_function()
val_f = self.test_function
callback_metrics = copy.copy(out_labels) + [
'val_' + n for n in out_labels]
else:
callback_metrics = copy.copy(out_labels)
val_f = None
val_ins = []
# Delegate logic to `fit_loop`.
return training_arrays.fit_loop(self, f, ins,
out_labels=out_labels,
batch_size=batch_size,
epochs=epochs,
verbose=verbose,
callbacks=callbacks,
val_f=val_f,
val_ins=val_ins,
shuffle=shuffle,
callback_metrics=callback_metrics,
initial_epoch=initial_epoch,
steps_per_epoch=steps_per_epoch,
validation_steps=validation_steps)
def evaluate(self, x=None, y=None,
batch_size=None,
verbose=1,
sample_weight=None,
steps=None):
"""Returns the loss value & metrics values for the model in test mode.
Computation is done in batches.
# Arguments
x: Numpy array of test data (if the model has a single input),
or list of Numpy arrays (if the model has multiple inputs).
If input layers in the model are named, you can also pass a
dictionary mapping input names to Numpy arrays.
`x` can be `None` (default) if feeding from
framework-native tensors (e.g. TensorFlow data tensors).
y: Numpy array of target (label) data
(if the model has a single output),
or list of Numpy arrays (if the model has multiple outputs).
If output layers in the model are named, you can also pass a
dictionary mapping output names to Numpy arrays.
`y` can be `None` (default) if feeding from
framework-native tensors (e.g. TensorFlow data tensors).
batch_size: Integer or `None`.
Number of samples per evaluation step.
If unspecified, `batch_size` will default to 32.
verbose: 0 or 1. Verbosity mode.
0 = silent, 1 = progress bar.
sample_weight: Optional Numpy array of weights for
the test samples, used for weighting the loss function.
You can either pass a flat (1D)
Numpy array with the same length as the input samples
(1:1 mapping between weights and samples),
or in the case of temporal data,
you can pass a 2D array with shape
`(samples, sequence_length)`,
to apply a different weight to every timestep of every sample.
In this case you should make sure to specify
`sample_weight_mode="temporal"` in `compile()`.
steps: Integer or `None`.
Total number of steps (batches of samples)
before declaring the evaluation round finished.
Ignored with the default value of `None`.
# Returns
Scalar test loss (if the model has a single output and no metrics)
or list of scalars (if the model has multiple outputs
and/or metrics). The attribute `model.metrics_names` will give you
the display labels for the scalar outputs.
"""
# Backwards compatibility.
if batch_size is None and steps is None:
batch_size = 32
if x is None and y is None and steps is None:
raise ValueError('If evaluating from data tensors, '
'you should specify the `steps` '
'argument.')
# Validate user data.
x, y, sample_weights = self._standardize_user_data(
x, y,
sample_weight=sample_weight,
batch_size=batch_size)
# Prepare inputs, delegate logic to `test_loop`.
if self._uses_dynamic_learning_phase():
ins = x + y + sample_weights + [0.]
else:
ins = x + y + sample_weights
self._make_test_function()
f = self.test_function
return training_arrays.test_loop(self, f, ins,
batch_size=batch_size,
verbose=verbose,
steps=steps)
def predict(self, x,
batch_size=None,
verbose=0,
steps=None):
"""Generates output predictions for the input samples.
Computation is done in batches.
# Arguments
x: The input data, as a Numpy array
(or list of Numpy arrays if the model has multiple inputs).
batch_size: Integer. If unspecified, it will default to 32.
verbose: Verbosity mode, 0 or 1.
steps: Total number of steps (batches of samples)
before declaring the prediction round finished.
Ignored with the default value of `None`.
# Returns
Numpy array(s) of predictions.
# Raises
ValueError: In case of mismatch between the provided
input data and the model's expectations,
or in case a stateful model receives a number of samples
that is not a multiple of the batch size.
"""
# Backwards compatibility.
if batch_size is None and steps is None:
batch_size = 32
if x is None and steps is None:
raise ValueError('If predicting from data tensors, '
'you should specify the `steps` '
'argument.')
# Validate user data.
x, _, _ = self._standardize_user_data(x)
if self.stateful:
if x[0].shape[0] > batch_size and x[0].shape[0] % batch_size != 0:
raise ValueError('In a stateful network, '
'you should only pass inputs with '
'a number of samples that can be '
'divided by the batch size. Found: ' +
str(x[0].shape[0]) + ' samples. '
'Batch size: ' + str(batch_size) + '.')
# Prepare inputs, delegate logic to `predict_loop`.
if self._uses_dynamic_learning_phase():
ins = x + [0.]
else:
ins = x
self._make_predict_function()
f = self.predict_function
return training_arrays.predict_loop(self, f, ins,
batch_size=batch_size,
verbose=verbose,
steps=steps)
def train_on_batch(self, x, y,
sample_weight=None,
class_weight=None):
"""Runs a single gradient update on a single batch of data.
# Arguments
x: Numpy array of training data,
or list of Numpy arrays if the model has multiple inputs.
If all inputs in the model are named,
you can also pass a dictionary
mapping input names to Numpy arrays.
y: Numpy array of target data,
or list of Numpy arrays if the model has multiple outputs.
If all outputs in the model are named,
you can also pass a dictionary
mapping output names to Numpy arrays.
sample_weight: Optional array of the same length as x, containing
weights to apply to the model's loss for each sample.
In the case of temporal data, you can pass a 2D array
with shape (samples, sequence_length),
to apply a different weight to every timestep of every sample.
In this case you should make sure to specify
sample_weight_mode="temporal" in compile().
class_weight: Optional dictionary mapping
class indices (integers) to
a weight (float) to apply to the model's loss for the samples
from this class during training.
This can be useful to tell the model to "pay more attention" to
samples from an under-represented class.
# Returns
Scalar training loss
(if the model has a single output and no metrics)
or list of scalars (if the model has multiple outputs
and/or metrics). The attribute `model.metrics_names` will give you
the display labels for the scalar outputs.
"""
x, y, sample_weights = self._standardize_user_data(
x, y,
sample_weight=sample_weight,
class_weight=class_weight)
if self._uses_dynamic_learning_phase():
ins = x + y + sample_weights + [1.]
else:
ins = x + y + sample_weights
self._make_train_function()
outputs = self.train_function(ins)
return unpack_singleton(outputs)
def test_on_batch(self, x, y, sample_weight=None):
"""Test the model on a single batch of samples.
# Arguments
x: Numpy array of test data,
or list of Numpy arrays if the model has multiple inputs.
If all inputs in the model are named,
you can also pass a dictionary
mapping input names to Numpy arrays.
y: Numpy array of target data,
or list of Numpy arrays if the model has multiple outputs.
If all outputs in the model are named,
you can also pass a dictionary
mapping output names to Numpy arrays.
sample_weight: Optional array of the same length as x, containing
weights to apply to the model's loss for each sample.
In the case of temporal data, you can pass a 2D array
with shape (samples, sequence_length),
to apply a different weight to every timestep of every sample.
In this case you should make sure to specify
sample_weight_mode="temporal" in compile().
# Returns
Scalar test loss (if the model has a single output and no metrics)
or list of scalars (if the model has multiple outputs
and/or metrics). The attribute `model.metrics_names` will give you
the display labels for the scalar outputs.
"""
x, y, sample_weights = self._standardize_user_data(
x, y,
sample_weight=sample_weight)
if self._uses_dynamic_learning_phase():
ins = x + y + sample_weights + [0.]
else:
ins = x + y + sample_weights
self._make_test_function()
outputs = self.test_function(ins)
return unpack_singleton(outputs)
def predict_on_batch(self, x):
"""Returns predictions for a single batch of samples.
# Arguments
x: Input samples, as a Numpy array.
# Returns
Numpy array(s) of predictions.
"""
x, _, _ = self._standardize_user_data(x)
if self._uses_dynamic_learning_phase():
ins = x + [0.]
else:
ins = x
self._make_predict_function()
outputs = self.predict_function(ins)
return unpack_singleton(outputs)
@interfaces.legacy_generator_methods_support
def fit_generator(self, generator,
steps_per_epoch=None,
epochs=1,
verbose=1,
callbacks=None,
validation_data=None,
validation_steps=None,
class_weight=None,
max_queue_size=10,
workers=1,
use_multiprocessing=False,
shuffle=True,
initial_epoch=0):
"""Trains the model on data generated batch-by-batch by a Python generator (or an instance of `Sequence`).
The generator is run in parallel to the model, for efficiency.
For instance, this allows you to do real-time data augmentation
on images on CPU in parallel to training your model on GPU.
The use of `keras.utils.Sequence` guarantees the ordering
and guarantees the single use of every input per epoch when
using `use_multiprocessing=True`.
# Arguments
generator: A generator or an instance of `Sequence`
(`keras.utils.Sequence`) object in order to avoid
duplicate data when using multiprocessing.
The output of the generator must be either
- a tuple `(inputs, targets)`
- a tuple `(inputs, targets, sample_weights)`.
This tuple (a single output of the generator) makes a single
batch. Therefore, all arrays in this tuple must have the same
length (equal to the size of this batch). Different batches may
have different sizes. For example, the last batch of the epoch
is commonly smaller than the others, if the size of the dataset
is not divisible by the batch size.
The generator is expected to loop over its data
indefinitely. An epoch finishes when `steps_per_epoch`
batches have been seen by the model.
steps_per_epoch: Integer.
Total number of steps (batches of samples)
to yield from `generator` before declaring one epoch
finished and starting the next epoch. It should typically
be equal to the number of samples of your dataset
divided by the batch size.
Optional for `Sequence`: if unspecified, will use
the `len(generator)` as a number of steps.
epochs: Integer. Number of epochs to train the model.
An epoch is an iteration over the entire data provided,
as defined by `steps_per_epoch`.
Note that in conjunction with `initial_epoch`,
`epochs` is to be understood as "final epoch".
The model is not trained for a number of iterations
given by `epochs`, but merely until the epoch
of index `epochs` is reached.
verbose: Integer. 0, 1, or 2. Verbosity mode.
0 = silent, 1 = progress bar, 2 = one line per epoch.
callbacks: List of `keras.callbacks.Callback` instances.
List of callbacks to apply during training.
See [callbacks](/callbacks).
validation_data: This can be either
- a generator or a `Sequence` object for the validation data
- tuple `(x_val, y_val)`
- tuple `(x_val, y_val, val_sample_weights)`
on which to evaluate
the loss and any model metrics at the end of each epoch.
The model will not be trained on this data.
validation_steps: Only relevant if `validation_data`
is a generator. Total number of steps (batches of samples)
to yield from `validation_data` generator before stopping
at the end of every epoch. It should typically
be equal to the number of samples of your
validation dataset divided by the batch size.
Optional for `Sequence`: if unspecified, will use
the `len(validation_data)` as a number of steps.
class_weight: Optional dictionary mapping class indices (integers)
to a weight (float) value, used for weighting the loss function
(during training only). This can be useful to tell the model to
"pay more attention" to samples
from an under-represented class.
max_queue_size: Integer. Maximum size for the generator queue.
If unspecified, `max_queue_size` will default to 10.
workers: Integer. Maximum number of processes to spin up
when using process-based threading.
If unspecified, `workers` will default to 1. If 0, will
execute the generator on the main thread.
use_multiprocessing: Boolean.
If `True`, use process-based threading.
If unspecified, `use_multiprocessing` will default to `False`.
Note that because this implementation
relies on multiprocessing,
you should not pass non-picklable arguments to the generator
as they can't be passed easily to children processes.
shuffle: Boolean. Whether to shuffle the order of the batches at
the beginning of each epoch. Only used with instances
of `Sequence` (`keras.utils.Sequence`).
Has no effect when `steps_per_epoch` is not `None`.
initial_epoch: Integer.
Epoch at which to start training
(useful for resuming a previous training run).
# Returns
A `History` object. Its `History.history` attribute is
a record of training loss values and metrics values
at successive epochs, as well as validation loss values
and validation metrics values (if applicable).
# Raises
ValueError: In case the generator yields data in an invalid format.
# Example
```python
def generate_arrays_from_file(path):
while True:
with open(path) as f:
for line in f:
# create numpy arrays of input data
# and labels, from each line in the file
x1, x2, y = process_line(line)
yield ({'input_1': x1, 'input_2': x2}, {'output': y})
model.fit_generator(generate_arrays_from_file('/my_file.txt'),
steps_per_epoch=10000, epochs=10)
```
"""
return training_generator.fit_generator(
self, generator,
steps_per_epoch=steps_per_epoch,
epochs=epochs,
verbose=verbose,
callbacks=callbacks,
validation_data=validation_data,
validation_steps=validation_steps,
class_weight=class_weight,
max_queue_size=max_queue_size,
workers=workers,
use_multiprocessing=use_multiprocessing,
shuffle=shuffle,
initial_epoch=initial_epoch)
@interfaces.legacy_generator_methods_support
def evaluate_generator(self, generator,
steps=None,
max_queue_size=10,
workers=1,
use_multiprocessing=False,
verbose=0):
"""Evaluates the model on a data generator.
The generator should return the same kind of data
as accepted by `test_on_batch`.
# Arguments
generator: Generator yielding tuples (inputs, targets)
or (inputs, targets, sample_weights)
or an instance of Sequence (keras.utils.Sequence)
object in order to avoid duplicate data
when using multiprocessing.
steps: Total number of steps (batches of samples)
to yield from `generator` before stopping.
Optional for `Sequence`: if unspecified, will use
the `len(generator)` as a number of steps.
max_queue_size: maximum size for the generator queue
workers: Integer. Maximum number of processes to spin up
when using process based threading.
If unspecified, `workers` will default to 1. If 0, will
execute the generator on the main thread.
use_multiprocessing: if True, use process based threading.
Note that because
this implementation relies on multiprocessing,
you should not pass
non picklable arguments to the generator
as they can't be passed
easily to children processes.
verbose: verbosity mode, 0 or 1.
# Returns
Scalar test loss (if the model has a single output and no metrics)
or list of scalars (if the model has multiple outputs
and/or metrics). The attribute `model.metrics_names` will give you
the display labels for the scalar outputs.
# Raises
ValueError: In case the generator yields
data in an invalid format.
"""
return training_generator.evaluate_generator(
self, generator,
steps=steps,
max_queue_size=max_queue_size,
workers=workers,
use_multiprocessing=use_multiprocessing,
verbose=verbose)
@interfaces.legacy_generator_methods_support
def predict_generator(self, generator,
steps=None,
max_queue_size=10,
workers=1,
use_multiprocessing=False,
verbose=0):
"""Generates predictions for the input samples from a data generator.
The generator should return the same kind of data as accepted by
`predict_on_batch`.
# Arguments
generator: Generator yielding batches of input samples
or an instance of Sequence (keras.utils.Sequence)
object in order to avoid duplicate data
when using multiprocessing.
steps: Total number of steps (batches of samples)
to yield from `generator` before stopping.
Optional for `Sequence`: if unspecified, will use
the `len(generator)` as a number of steps.
max_queue_size: Maximum size for the generator queue.
workers: Integer. Maximum number of processes to spin up
when using process based threading.
If unspecified, `workers` will default to 1. If 0, will
execute the generator on the main thread.
use_multiprocessing: If `True`, use process based threading.
Note that because
this implementation relies on multiprocessing,
you should not pass
non picklable arguments to the generator
as they can't be passed
easily to children processes.
verbose: verbosity mode, 0 or 1.
# Returns
Numpy array(s) of predictions.
# Raises
ValueError: In case the generator yields
data in an invalid format.
"""
return training_generator.predict_generator(
self, generator,
steps=steps,
max_queue_size=max_queue_size,
workers=workers,
use_multiprocessing=use_multiprocessing,
verbose=verbose)
|
the-stack_0_23800 | """Tests for abstract.py."""
from pytype import abstract
from pytype import abstract_utils
from pytype import annotations_util
from pytype import config
from pytype import errors
from pytype import function
from pytype import load_pytd
from pytype import special_builtins
from pytype import state as frame_state
from pytype import vm
from pytype.pytd import pytd
from pytype.pytd import pytd_utils
from pytype.typegraph import cfg
import six
import unittest
class AbstractTestBase(unittest.TestCase):
PYTHON_VERSION = (2, 7)
def setUp(self):
options = config.Options.create()
self._vm = vm.VirtualMachine(
errors.ErrorLog(), options, load_pytd.Loader(None, self.PYTHON_VERSION))
self._program = self._vm.program
self._node = self._vm.root_cfg_node.ConnectNew("test_node")
def new_var(self, *values):
"""Create a Variable bound to the given values."""
var = self._program.NewVariable()
for value in values:
var.AddBinding(value, source_set=(), where=self._node)
return var
def new_dict(self, **kwargs):
"""Create a Dict from keywords mapping names to Variable objects."""
d = abstract.Dict(self._vm)
for name, var in kwargs.items():
d.set_str_item(self._node, name, var)
return d
class InstanceTest(AbstractTestBase):
# TODO(dbaum): Is it worth adding a test for frozenset()? There isn't
# an easy way to create one directly from the vm, it is already covered
# in test_splits.py, and there aren't any new code paths. Perhaps it isn't
# worth the effort.
def test_compatible_with_non_container(self):
# Compatible with either True or False.
i = abstract.Instance(self._vm.convert.object_type, self._vm)
self.assertIs(True, i.compatible_with(True))
self.assertIs(True, i.compatible_with(False))
def test_compatible_with_list(self):
i = abstract.List([], self._vm)
# Empty list is not compatible with True.
self.assertIs(False, i.compatible_with(True))
self.assertIs(True, i.compatible_with(False))
# Once a type parameter is set, list is compatible with True and False.
i.merge_instance_type_parameter(
self._node, abstract_utils.T,
self._vm.convert.object_type.to_variable(self._vm.root_cfg_node))
self.assertIs(True, i.compatible_with(True))
self.assertIs(True, i.compatible_with(False))
def test_compatible_with_set(self):
i = abstract.Instance(self._vm.convert.set_type, self._vm)
# Empty list is not compatible with True.
self.assertIs(False, i.compatible_with(True))
self.assertIs(True, i.compatible_with(False))
# Once a type parameter is set, list is compatible with True and False.
i.merge_instance_type_parameter(
self._node, abstract_utils.T,
self._vm.convert.object_type.to_variable(self._vm.root_cfg_node))
self.assertIs(True, i.compatible_with(True))
self.assertIs(True, i.compatible_with(False))
def test_compatible_with_none(self):
# This test is specifically for abstract.Instance, so we don't use
# self._vm.convert.none, which is an AbstractOrConcreteValue.
i = abstract.Instance(self._vm.convert.none_type, self._vm)
self.assertIs(False, i.compatible_with(True))
self.assertIs(True, i.compatible_with(False))
class TupleTest(AbstractTestBase):
def setUp(self):
super(TupleTest, self).setUp()
self._var = self._program.NewVariable()
self._var.AddBinding(abstract.Unknown(self._vm), [], self._node)
def test_compatible_with__not_empty(self):
t = abstract.Tuple((self._var,), self._vm)
self.assertIs(True, t.compatible_with(True))
self.assertIs(False, t.compatible_with(False))
def test_compatible_with__empty(self):
t = abstract.Tuple((), self._vm)
self.assertIs(False, t.compatible_with(True))
self.assertIs(True, t.compatible_with(False))
def test_getitem__concrete_index(self):
t = abstract.Tuple((self._var,), self._vm)
index = self._vm.convert.constant_to_var(0)
node, var = t.cls.getitem_slot(self._node, index)
self.assertIs(node, self._node)
self.assertIs(abstract_utils.get_atomic_value(var),
abstract_utils.get_atomic_value(self._var))
def test_getitem__abstract_index(self):
t = abstract.Tuple((self._var,), self._vm)
index = self._vm.convert.build_int(self._node)
node, var = t.cls.getitem_slot(self._node, index)
self.assertIs(node, self._node)
self.assertIs(abstract_utils.get_atomic_value(var),
abstract_utils.get_atomic_value(self._var))
class DictTest(AbstractTestBase):
def setUp(self):
super(DictTest, self).setUp()
self._d = abstract.Dict(self._vm)
self._var = self._program.NewVariable()
self._var.AddBinding(abstract.Unknown(self._vm), [], self._node)
def test_compatible_with__when_empty(self):
self.assertIs(False, self._d.compatible_with(True))
self.assertIs(True, self._d.compatible_with(False))
def test_compatible_with__after_setitem(self):
# Once a slot is added, dict is ambiguous.
self._d.setitem_slot(self._node, self._var, self._var)
self.assertIs(True, self._d.compatible_with(True))
self.assertIs(True, self._d.compatible_with(False))
def test_compatible_with__after_set_str_item(self):
self._d.set_str_item(self._node, "key", self._var)
self.assertIs(True, self._d.compatible_with(True))
self.assertIs(False, self._d.compatible_with(False))
def test_compatible_with__after_unknown_update(self):
# Updating an empty dict with an unknown value makes the former ambiguous.
self._d.update(self._node, abstract.Unknown(self._vm))
self.assertIs(True, self._d.compatible_with(True))
self.assertIs(True, self._d.compatible_with(False))
def test_compatible_with__after_empty_update(self):
empty_dict = abstract.Dict(self._vm)
self._d.update(self._node, empty_dict)
self.assertIs(False, self._d.compatible_with(True))
self.assertIs(True, self._d.compatible_with(False))
def test_compatible_with__after_unambiguous_update(self):
unambiguous_dict = abstract.Dict(self._vm)
unambiguous_dict.set_str_item(
self._node, "a", self._vm.convert.create_new_unsolvable(self._node))
self._d.update(self._node, unambiguous_dict)
self.assertIs(True, self._d.compatible_with(True))
self.assertIs(False, self._d.compatible_with(False))
def test_compatible_with__after_ambiguous_update(self):
ambiguous_dict = abstract.Dict(self._vm)
ambiguous_dict.merge_instance_type_parameter(
self._node, abstract_utils.K,
self._vm.convert.create_new_unsolvable(self._node))
ambiguous_dict.could_contain_anything = True
self._d.update(self._node, ambiguous_dict)
self.assertIs(True, self._d.compatible_with(True))
self.assertIs(True, self._d.compatible_with(False))
def test_compatible_with__after_concrete_update(self):
self._d.update(self._node, {})
self.assertIs(False, self._d.compatible_with(True))
self.assertIs(True, self._d.compatible_with(False))
self._d.update(
self._node, {"a": self._vm.convert.create_new_unsolvable(self._node)})
self.assertIs(True, self._d.compatible_with(True))
self.assertIs(False, self._d.compatible_with(False))
def test_pop(self):
self._d.set_str_item(self._node, "a", self._var)
node, ret = self._d.pop_slot(
self._node, self._vm.convert.build_string(self._node, "a"))
self.assertIs(False, self._d.compatible_with(True))
self.assertIs(True, self._d.compatible_with(False))
self.assertIs(node, self._node)
self.assertIs(ret, self._var)
def test_pop_with_default(self):
self._d.set_str_item(self._node, "a", self._var)
node, ret = self._d.pop_slot(
self._node, self._vm.convert.build_string(self._node, "a"),
self._vm.convert.none.to_variable(self._node)) # default is ignored
self.assertIs(False, self._d.compatible_with(True))
self.assertIs(True, self._d.compatible_with(False))
self.assertIs(node, self._node)
self.assertIs(ret, self._var)
def test_bad_pop(self):
self._d.set_str_item(self._node, "a", self._var)
self.assertRaises(function.DictKeyMissing, self._d.pop_slot, self._node,
self._vm.convert.build_string(self._node, "b"))
self.assertIs(True, self._d.compatible_with(True))
self.assertIs(False, self._d.compatible_with(False))
def test_bad_pop_with_default(self):
val = self._vm.convert.primitive_class_instances[int]
self._d.set_str_item(self._node, "a", val.to_variable(self._node))
node, ret = self._d.pop_slot(
self._node, self._vm.convert.build_string(self._node, "b"),
self._vm.convert.none.to_variable(self._node))
self.assertIs(True, self._d.compatible_with(True))
self.assertIs(False, self._d.compatible_with(False))
self.assertIs(node, self._node)
self.assertListEqual(ret.data, [self._vm.convert.none])
def test_ambiguous_pop(self):
val = self._vm.convert.primitive_class_instances[int]
self._d.set_str_item(self._node, "a", val.to_variable(self._node))
ambiguous_key = self._vm.convert.primitive_class_instances[str]
node, ret = self._d.pop_slot(
self._node, ambiguous_key.to_variable(self._node))
self.assertIs(True, self._d.compatible_with(True))
self.assertIs(True, self._d.compatible_with(False))
self.assertIs(node, self._node)
self.assertListEqual(ret.data, [val])
def test_ambiguous_pop_with_default(self):
val = self._vm.convert.primitive_class_instances[int]
self._d.set_str_item(self._node, "a", val.to_variable(self._node))
ambiguous_key = self._vm.convert.primitive_class_instances[str]
default_var = self._vm.convert.none.to_variable(self._node)
node, ret = self._d.pop_slot(
self._node, ambiguous_key.to_variable(self._node), default_var)
self.assertIs(True, self._d.compatible_with(True))
self.assertIs(True, self._d.compatible_with(False))
self.assertIs(node, self._node)
self.assertSetEqual(set(ret.data), {val, self._vm.convert.none})
def test_ambiguous_dict_after_pop(self):
ambiguous_key = self._vm.convert.primitive_class_instances[str]
val = self._vm.convert.primitive_class_instances[int]
node, _ = self._d.setitem_slot(
self._node, ambiguous_key.to_variable(self._node),
val.to_variable(self._node))
_, ret = self._d.pop_slot(node, self._vm.convert.build_string(node, "a"))
self.assertIs(True, self._d.compatible_with(True))
self.assertIs(True, self._d.compatible_with(False))
self.assertListEqual(ret.data, [val])
def test_ambiguous_dict_after_pop_with_default(self):
ambiguous_key = self._vm.convert.primitive_class_instances[str]
val = self._vm.convert.primitive_class_instances[int]
node, _ = self._d.setitem_slot(
self._node, ambiguous_key.to_variable(self._node),
val.to_variable(self._node))
_, ret = self._d.pop_slot(node, self._vm.convert.build_string(node, "a"),
self._vm.convert.none.to_variable(node))
self.assertIs(True, self._d.compatible_with(True))
self.assertIs(True, self._d.compatible_with(False))
self.assertSetEqual(set(ret.data), {val, self._vm.convert.none})
class IsInstanceTest(AbstractTestBase):
def setUp(self):
super(IsInstanceTest, self).setUp()
self._is_instance = special_builtins.IsInstance.make(self._vm)
# Easier access to some primitive instances.
self._bool = self._vm.convert.primitive_class_instances[bool]
self._int = self._vm.convert.primitive_class_instances[int]
self._str = self._vm.convert.primitive_class_instances[str]
# Values that represent primitive classes.
self._obj_class = self._vm.convert.primitive_classes[object]
self._int_class = self._vm.convert.primitive_classes[int]
self._str_class = self._vm.convert.primitive_classes[str]
def assert_call(self, expected, left, right):
"""Check that call() returned the desired results.
Args:
expected: A dict from values to source sets, where a source set is
represented by the sorted binding names separated by spaces, for
example "left:0 right:1" would indicate binding #0 of variable
"left" and binding #1 of variable "right".
left: A Variable to use as the first arg to call().
right: A Variable to use as the second arg to call().
"""
name_map = {left: "left", right: "right"}
node, result = self._is_instance.call(
self._node, None, function.Args(
(left, right), self.new_dict(), None, None))
self.assertIn(node, self._node.outgoing)
result_map = {}
# Turning source sets into canonical string representations of the binding
# names makes it much easier to debug failures.
for b in result.bindings:
terms = set()
for o in b.origins:
self.assertEqual(node, o.where)
for sources in o.source_sets:
terms.add(" ".join(sorted(
"%s:%d" % (name_map[b.variable], b.variable.bindings.index(b))
for b in sources)))
result_map[b.data] = terms
self.assertEqual(expected, result_map)
def test_call_single_bindings(self):
right = self.new_var(self._str_class)
left = self.new_var(self._str)
self.assert_call(
{self._vm.convert.true: {"left:0 right:0"}},
left, right)
left = self.new_var(self._int)
self.assert_call(
{self._vm.convert.false: {"left:0 right:0"}},
left, right)
left = self.new_var(abstract.Unknown(self._vm))
self.assert_call(
{self._bool: {"left:0 right:0"}},
left, right)
def test_call_multiple_bindings(self):
left = self.new_var(self._int, self._str)
right = self.new_var(self._int_class, self._str_class)
self.assert_call(
{
self._vm.convert.true: {"left:0 right:0", "left:1 right:1"},
self._vm.convert.false: {"left:0 right:1", "left:1 right:0"},
}, left, right)
def test_call_wrong_argcount(self):
self._vm.push_frame(frame_state.SimpleFrame())
node, result = self._is_instance.call(
self._node, None, function.Args((), self.new_dict(), None, None))
self.assertEqual(self._node, node)
self.assertIsInstance(abstract_utils.get_atomic_value(result),
abstract.Unsolvable)
self.assertRegexpMatches(str(self._vm.errorlog), "missing-parameter")
def test_call_wrong_keywords(self):
self._vm.push_frame(frame_state.SimpleFrame())
x = self.new_var(abstract.Unknown(self._vm))
node, result = self._is_instance.call(
self._node, None, function.Args(
(x, x), self.new_dict(foo=x), None, None))
self.assertEqual(self._node, node)
self.assertIsInstance(abstract_utils.get_atomic_value(result),
abstract.Unsolvable)
self.assertRegexpMatches(
str(self._vm.errorlog),
r"foo.*isinstance.*\[wrong-keyword-args\]")
def test_is_instance(self):
def check(expected, left, right):
self.assertEqual(expected, self._is_instance._is_instance(left, right))
# Unknown and Unsolvable are ambiguous.
check(None, abstract.Unknown(self._vm), self._obj_class)
check(None, abstract.Unsolvable(self._vm), self._obj_class)
# If the object's class has multiple bindings, result is ambiguous.
obj = abstract.SimpleAbstractValue("foo", self._vm)
check(None, obj, self._obj_class)
obj.set_class(self._node, self.new_var(
self._str_class, self._int_class))
check(None, obj, self._str_class)
# If the class_spec is not a class, result is ambiguous.
check(None, self._str, self._str)
# Result is True/False depending on if the class is in the object's mro.
check(True, self._str, self._obj_class)
check(True, self._str, self._str_class)
check(False, self._str, self._int_class)
def test_flatten(self):
def maybe_var(v):
return v if isinstance(v, cfg.Variable) else self.new_var(v)
def new_tuple(*args):
pyval = tuple(maybe_var(a) for a in args)
return self._vm.convert.tuple_to_value(pyval)
def check(expected_ambiguous, expected_classes, value):
classes = []
ambiguous = special_builtins._flatten(value, classes)
self.assertEqual(expected_ambiguous, ambiguous)
self.assertEqual(expected_classes, classes)
unknown = abstract.Unknown(self._vm)
# Simple values.
check(False, [self._str_class], self._str_class)
check(True, [], self._str)
check(True, [], unknown)
# (str, int)
check(False, [self._str_class, self._int_class],
new_tuple(self._str_class, self._int_class))
# (str, ?, int)
check(True, [self._str_class, self._int_class],
new_tuple(self._str_class, unknown, self._int_class))
# (str, (int, object))
check(False, [self._str_class, self._int_class, self._obj_class],
new_tuple(
self._str_class,
new_tuple(self._int_class, self._obj_class)))
# (str, (?, object))
check(True, [self._str_class, self._obj_class],
new_tuple(
self._str_class,
new_tuple(unknown, self._obj_class)))
# A variable with multiple bindings is ambiguous.
# (str, int | object)
check(True, [self._str_class],
new_tuple(self._str_class,
self.new_var(self._int_class, self._obj_class)))
class PyTDTest(AbstractTestBase):
"""Tests for abstract -> pytd type conversions."""
def test_metaclass(self):
cls = abstract.InterpreterClass("X", [], {}, None, self._vm)
meta = abstract.InterpreterClass("M", [], {}, None, self._vm)
meta.official_name = "M"
cls.cls = meta
pytd_cls = cls.to_pytd_def(self._vm.root_cfg_node, "X")
self.assertEqual(pytd_cls.metaclass, pytd.NamedType("M"))
def test_inherited_metaclass(self):
parent = abstract.InterpreterClass("X", [], {}, None, self._vm)
parent.official_name = "X"
meta = abstract.InterpreterClass("M", [], {}, None, self._vm)
meta.official_name = "M"
parent.cls = meta
child = abstract.InterpreterClass(
"Y", [parent.to_variable(self._vm.root_cfg_node)], {}, None, self._vm)
self.assertIs(child.cls, parent.cls)
pytd_cls = child.to_pytd_def(self._vm.root_cfg_node, "Y")
self.assertIs(pytd_cls.metaclass, None)
def test_metaclass_union(self):
cls = abstract.InterpreterClass("X", [], {}, None, self._vm)
meta1 = abstract.InterpreterClass("M1", [], {}, None, self._vm)
meta2 = abstract.InterpreterClass("M2", [], {}, None, self._vm)
meta1.official_name = "M1"
meta2.official_name = "M2"
cls.cls = abstract.Union([meta1, meta2], self._vm)
pytd_cls = cls.to_pytd_def(self._vm.root_cfg_node, "X")
self.assertEqual(pytd_cls.metaclass, pytd.UnionType(
(pytd.NamedType("M1"), pytd.NamedType("M2"))))
def test_to_type_with_view1(self):
# to_type(<instance of List[int or unsolvable]>, view={T: int})
instance = abstract.List([], self._vm)
instance.merge_instance_type_parameter(
self._vm.root_cfg_node, abstract_utils.T, self._vm.program.NewVariable(
[self._vm.convert.unsolvable], [], self._vm.root_cfg_node))
param_binding = instance.get_instance_type_parameter(
abstract_utils.T).AddBinding(
self._vm.convert.primitive_class_instances[int], [],
self._vm.root_cfg_node)
view = {
instance.get_instance_type_parameter(abstract_utils.T): param_binding}
pytd_type = instance.to_type(self._vm.root_cfg_node, seen=None, view=view)
self.assertEqual("__builtin__.list", pytd_type.base_type.name)
self.assertSetEqual({"__builtin__.int"},
{t.name for t in pytd_type.parameters})
def test_to_type_with_view2(self):
# to_type(<tuple (int or str,)>, view={0: str})
param1 = self._vm.convert.primitive_class_instances[int]
param2 = self._vm.convert.primitive_class_instances[str]
param_var = param1.to_variable(self._vm.root_cfg_node)
str_binding = param_var.AddBinding(param2, [], self._vm.root_cfg_node)
instance = abstract.Tuple((param_var,), self._vm)
view = {param_var: str_binding}
pytd_type = instance.to_type(self._vm.root_cfg_node, seen=None, view=view)
self.assertEqual(pytd_type.parameters[0],
pytd.NamedType("__builtin__.str"))
def test_to_type_with_view_and_empty_param(self):
instance = abstract.List([], self._vm)
pytd_type = instance.to_type(self._vm.root_cfg_node, seen=None, view={})
self.assertEqual("__builtin__.list", pytd_type.base_type.name)
self.assertSequenceEqual((pytd.NothingType(),), pytd_type.parameters)
def test_typing_container(self):
cls = self._vm.convert.list_type
container = abstract.AnnotationContainer("List", self._vm, cls)
expected = pytd.GenericType(pytd.NamedType("__builtin__.list"),
(pytd.AnythingType(),))
actual = container.get_instance_type(self._vm.root_cfg_node)
self.assertEqual(expected, actual)
# TODO(rechen): Test InterpreterFunction.
class FunctionTest(AbstractTestBase):
def _make_pytd_function(self, params, name="f"):
pytd_params = []
for i, p in enumerate(params):
p_type = pytd.ClassType(p.name)
p_type.cls = p
pytd_params.append(
pytd.Parameter(function.argname(i), p_type, False, False, None))
pytd_sig = pytd.Signature(
tuple(pytd_params), None, None, pytd.AnythingType(), (), ())
sig = function.PyTDSignature(name, pytd_sig, self._vm)
return abstract.PyTDFunction(name, (sig,), pytd.METHOD, self._vm)
def _call_pytd_function(self, f, args):
b = f.to_binding(self._vm.root_cfg_node)
return f.call(self._vm.root_cfg_node, b, function.Args(posargs=args))
def test_call_with_empty_arg(self):
self.assertRaises(AssertionError, self._call_pytd_function,
self._make_pytd_function(params=()),
(self._vm.program.NewVariable(),))
def test_call_with_bad_arg(self):
f = self._make_pytd_function(
(self._vm.lookup_builtin("__builtin__.str"),))
arg = self._vm.convert.primitive_class_instances[int].to_variable(
self._vm.root_cfg_node)
self.assertRaises(
function.WrongArgTypes, self._call_pytd_function, f, (arg,))
def test_simple_call(self):
f = self._make_pytd_function(
(self._vm.lookup_builtin("__builtin__.str"),))
arg = self._vm.convert.primitive_class_instances[str].to_variable(
self._vm.root_cfg_node)
node, ret = self._call_pytd_function(f, (arg,))
self.assertIs(node, self._vm.root_cfg_node)
retval, = ret.bindings
self.assertIs(retval.data, self._vm.convert.unsolvable)
def test_call_with_multiple_arg_bindings(self):
f = self._make_pytd_function(
(self._vm.lookup_builtin("__builtin__.str"),))
arg = self._vm.program.NewVariable()
arg.AddBinding(self._vm.convert.primitive_class_instances[str], [],
self._vm.root_cfg_node)
arg.AddBinding(self._vm.convert.primitive_class_instances[int], [],
self._vm.root_cfg_node)
node, ret = self._call_pytd_function(f, (arg,))
self.assertIs(node, self._vm.root_cfg_node)
retval, = ret.bindings
self.assertIs(retval.data, self._vm.convert.unsolvable)
def test_call_with_skipped_combination(self):
f = self._make_pytd_function(
(self._vm.lookup_builtin("__builtin__.str"),))
node = self._vm.root_cfg_node.ConnectNew()
arg = self._vm.convert.primitive_class_instances[str].to_variable(node)
node, ret = self._call_pytd_function(f, (arg,))
self.assertIs(node, self._vm.root_cfg_node)
self.assertFalse(ret.bindings)
def test_signature_from_pytd(self):
# def f(self: Any, *args: Any)
self_param = pytd.Parameter("self", pytd.AnythingType(), False, False, None)
args_param = pytd.Parameter("args", pytd.AnythingType(), False, True, None)
sig = function.Signature.from_pytd(
self._vm, "f", pytd.Signature(
(self_param,), args_param, None, pytd.AnythingType(), (), ()))
self.assertEqual(repr(sig), "def f(self: Any, *args: Any) -> Any")
self.assertEqual(sig.name, "f")
self.assertSequenceEqual(sig.param_names, ("self",))
self.assertEqual(sig.varargs_name, "args")
self.assertFalse(sig.kwonly_params)
self.assertIs(sig.kwargs_name, None)
self.assertSetEqual(set(sig.annotations), {"self", "args", "return"})
self.assertFalse(sig.late_annotations)
self.assertTrue(sig.has_return_annotation)
self.assertTrue(sig.has_param_annotations)
def test_signature_from_callable(self):
# Callable[[int, str], Any]
params = {0: self._vm.convert.int_type, 1: self._vm.convert.str_type}
params[abstract_utils.ARGS] = abstract.Union(
(params[0], params[1]), self._vm)
params[abstract_utils.RET] = self._vm.convert.unsolvable
callable_val = abstract.Callable(
self._vm.convert.function_type, params, self._vm)
sig = function.Signature.from_callable(callable_val)
self.assertEqual(repr(sig), "def <callable>(_0: int, _1: str) -> Any")
self.assertEqual(sig.name, "<callable>")
self.assertSequenceEqual(sig.param_names, ("_0", "_1"))
self.assertIs(sig.varargs_name, None)
self.assertFalse(sig.kwonly_params)
self.assertIs(sig.kwargs_name, None)
six.assertCountEqual(self, sig.annotations.keys(), sig.param_names)
self.assertFalse(sig.late_annotations)
self.assertFalse(sig.has_return_annotation)
self.assertTrue(sig.has_param_annotations)
def test_signature_annotations(self):
# def f(self: Any, *args: Any)
self_param = pytd.Parameter("self", pytd.AnythingType(), False, False, None)
# Imitate the parser's conversion of '*args: Any' to '*args: Tuple[Any]'.
tup = pytd.ClassType("__builtin__.tuple")
tup.cls = self._vm.convert.tuple_type.pytd_cls
any_tuple = pytd.GenericType(tup, (pytd.AnythingType(),))
args_param = pytd.Parameter("args", any_tuple, False, True, None)
sig = function.Signature.from_pytd(
self._vm, "f", pytd.Signature(
(self_param,), args_param, None, pytd.AnythingType(), (), ()))
self.assertEqual(repr(sig),
"def f(self: Any, *args: Tuple[Any, ...]) -> Any")
self.assertIs(sig.annotations["self"], self._vm.convert.unsolvable)
args_type = sig.annotations["args"]
self.assertIsInstance(args_type, abstract.ParameterizedClass)
self.assertIs(args_type.base_cls, self._vm.convert.tuple_type)
self.assertListEqual(list(args_type.formal_type_parameters.items()),
[(abstract_utils.T, self._vm.convert.unsolvable)])
self.assertIs(sig.drop_first_parameter().annotations["args"], args_type)
def test_signature_annotations_existence(self):
# def f(v: "X") -> "Y"
sig = function.Signature(
name="f",
param_names=("v",),
varargs_name=None,
kwonly_params=(),
kwargs_name=None,
defaults={},
annotations={},
late_annotations={
"v": annotations_util.LateAnnotation("X", "v", None),
"return": annotations_util.LateAnnotation("Y", "return", None)
}
)
self.assertEqual(repr(sig), "def f(v: 'X') -> 'Y'")
self.assertFalse(sig.has_param_annotations)
self.assertFalse(sig.has_return_annotation)
sig.set_annotation("v", self._vm.convert.unsolvable)
self.assertTrue(sig.has_param_annotations)
self.assertFalse(sig.has_return_annotation)
sig.set_annotation("return", self._vm.convert.unsolvable)
self.assertTrue(sig.has_param_annotations)
self.assertTrue(sig.has_return_annotation)
def test_signature_posarg_only_param_count(self):
# def f(x): ...
sig = function.Signature(
name="f",
param_names=("x",),
varargs_name=None,
kwonly_params=(),
kwargs_name=None,
defaults={},
annotations={},
late_annotations={},
)
self.assertEqual(repr(sig), "def f(x) -> Any")
self.assertEqual(sig.mandatory_param_count(), 1)
self.assertEqual(sig.maximum_param_count(), 1)
def test_signature_posarg_and_kwarg_param_count(self):
# def f(x, y=None): ...
sig = function.Signature(
name="f",
param_names=("x", "y",),
varargs_name=None,
kwonly_params=(),
kwargs_name=None,
defaults={"y": self._vm.convert.none_type.to_variable(self._node)},
annotations={},
late_annotations={},
)
self.assertEqual(repr(sig), "def f(x, y = None) -> Any")
self.assertEqual(sig.mandatory_param_count(), 1)
self.assertEqual(sig.maximum_param_count(), 2)
def test_signature_varargs_param_count(self):
# def f(*args): ...
sig = function.Signature(
name="f",
param_names=(),
varargs_name="args",
kwonly_params=(),
kwargs_name=None,
defaults={},
annotations={},
late_annotations={},
)
self.assertEqual(repr(sig), "def f(*args) -> Any")
self.assertEqual(sig.mandatory_param_count(), 0)
self.assertIsNone(sig.maximum_param_count())
def test_signature_kwargs_param_count(self):
# def f(**kwargs): ...
sig = function.Signature(
name="f",
param_names=(),
varargs_name=None,
kwonly_params=(),
kwargs_name="kwargs",
defaults={},
annotations={},
late_annotations={},
)
self.assertEqual(repr(sig), "def f(**kwargs) -> Any")
self.assertEqual(sig.mandatory_param_count(), 0)
self.assertIsNone(sig.maximum_param_count())
def test_signature_kwonly_param_count(self):
# def f(*, y=None): ...
sig = function.Signature(
name="f",
param_names=(),
varargs_name=None,
kwonly_params=("y",),
kwargs_name=None,
defaults={"y": self._vm.convert.none_type.to_variable(self._node)},
annotations={},
late_annotations={},
)
self.assertEqual(repr(sig), "def f(*, y = None) -> Any")
self.assertEqual(sig.mandatory_param_count(), 0)
self.assertEqual(sig.maximum_param_count(), 1)
def test_signature_has_param(self):
# def f(x, *args, y, **kwargs): ...
sig = function.Signature(
name="f",
param_names=("x",),
varargs_name="args",
kwonly_params={"y"},
kwargs_name="kwargs",
defaults={},
annotations={},
late_annotations={},
)
self.assertEqual(repr(sig), "def f(x, *args, y, **kwargs) -> Any")
for param in ("x", "args", "y", "kwargs"):
self.assertTrue(sig.has_param(param))
self.assertFalse(sig.has_param("rumpelstiltskin"))
def test_signature_insert_varargs_and_kwargs(self):
# def f(x, *args, y, **kwargs): ...
sig = function.Signature(
name="f",
param_names=("x",),
varargs_name="args",
kwonly_params={"y"},
kwargs_name="kwargs",
defaults={},
annotations={},
late_annotations={},
)
# f(1, 2, y=3, z=4)
int_inst = self._vm.convert.primitive_class_instances[int]
int_binding = int_inst.to_binding(self._node)
arg_dict = {
"x": int_binding, "_1": int_binding, "y": int_binding, "z": int_binding}
sig = sig.insert_varargs_and_kwargs(arg_dict)
self.assertEqual(sig.name, "f")
self.assertSequenceEqual(sig.param_names, ("x", "_1", "z"))
self.assertEqual(sig.varargs_name, "args")
self.assertSetEqual(sig.kwonly_params, {"y"})
self.assertEqual(sig.kwargs_name, "kwargs")
self.assertFalse(sig.annotations)
self.assertFalse(sig.late_annotations)
def test_signature_del_param_annotation(self):
# def f(x) -> int: ...
sig = function.Signature(
name="f",
param_names=("x",),
varargs_name=None,
kwonly_params=(),
kwargs_name=None,
defaults={},
annotations={"x": self._vm.convert.unsolvable,
"return": self._vm.convert.unsolvable},
late_annotations={}
)
sig.del_annotation("x")
six.assertCountEqual(self, sig.annotations.keys(), {"return"})
self.assertFalse(sig.has_param_annotations)
self.assertTrue(sig.has_return_annotation)
def test_signature_del_return_annotation(self):
# def f(x) -> int: ...
sig = function.Signature(
name="f",
param_names=("x",),
varargs_name=None,
kwonly_params=(),
kwargs_name=None,
defaults={},
annotations={"x": self._vm.convert.unsolvable,
"return": self._vm.convert.unsolvable},
late_annotations={}
)
sig.del_annotation("return")
six.assertCountEqual(self, sig.annotations.keys(), {"x"})
self.assertTrue(sig.has_param_annotations)
self.assertFalse(sig.has_return_annotation)
def test_signature_del_nonexistent_annotation(self):
# def f(): ...
sig = function.Signature(
name="f",
param_names=(),
varargs_name=None,
kwonly_params=(),
kwargs_name=None,
defaults={},
annotations={},
late_annotations={}
)
self.assertRaises(KeyError, sig.del_annotation, "rumpelstiltskin")
def test_constructor_args(self):
f = abstract.PyTDFunction.make("open", self._vm, "__builtin__")
self.assertEqual(f.name, "__builtin__.open")
six.assertCountEqual(
self,
{sig.pytd_sig for sig in f.signatures},
self._vm.lookup_builtin("__builtin__.open").signatures)
self.assertIs(f.kind, pytd.METHOD)
self.assertIs(f.vm, self._vm)
def test_constructor_args_pyval(self):
sig = pytd.Signature((), None, None, pytd.AnythingType(), (), ())
pyval = pytd.Function("blah", (sig,), pytd.STATICMETHOD, 0)
f = abstract.PyTDFunction.make("open", self._vm, "__builtin__", pyval=pyval)
self.assertEqual(f.name, "__builtin__.open")
f_sig, = f.signatures
self.assertIs(f_sig.pytd_sig, sig)
self.assertIs(f.kind, pytd.STATICMETHOD)
self.assertIs(f.vm, self._vm)
def test_get_constructor_args(self):
f = abstract.PyTDFunction.make(
"TypeVar", self._vm, "typing", pyval_name="_typevar_new")
self.assertEqual(f.name, "typing.TypeVar")
six.assertCountEqual(
self,
{sig.pytd_sig for sig in f.signatures},
self._vm.loader.import_name("typing").Lookup(
"typing._typevar_new").signatures)
self.assertIs(f.kind, pytd.METHOD)
self.assertIs(f.vm, self._vm)
def test_bound_function_repr(self):
f = self._make_pytd_function(params=())
callself = self._vm.program.NewVariable(
[abstract.AtomicAbstractValue(name, self._vm)
for name in ("test1", "test2")], [], self._vm.root_cfg_node)
bound = abstract.BoundFunction(callself, f)
six.assertCountEqual(self, bound.repr_names(), ["test1.f", "test2.f"])
six.assertRegex(self, repr(bound), r"test(1|2)\.f")
def test_bound_function_callself_repr(self):
f = self._make_pytd_function(params=())
callself = self._vm.program.NewVariable(
[abstract.AtomicAbstractValue("test", self._vm)],
[], self._vm.root_cfg_node)
bound = abstract.BoundFunction(callself, f)
callself_repr = lambda v: v.name + "foo"
six.assertCountEqual(self, bound.repr_names(callself_repr), ["testfoo.f"])
def test_bound_function_nested_repr(self):
f = self._make_pytd_function(params=())
callself1 = self._vm.program.NewVariable(
[abstract.AtomicAbstractValue("test1", self._vm)],
[], self._vm.root_cfg_node)
bound1 = abstract.BoundFunction(callself1, f)
callself2 = self._vm.program.NewVariable(
[abstract.AtomicAbstractValue("test2", self._vm)],
[], self._vm.root_cfg_node)
bound2 = abstract.BoundFunction(callself2, bound1)
# `bound2` is BoundFunction(test2, BoundFunction(test1, f))
six.assertCountEqual(self, bound2.repr_names(), ["test2.f"])
def test_bound_function_repr_no_callself(self):
f = self._make_pytd_function(params=())
callself = self._vm.program.NewVariable()
bound = abstract.BoundFunction(callself, f)
six.assertCountEqual(self, bound.repr_names(), ["<class>.f"])
def test_bound_function_repr_replace_parent(self):
f = self._make_pytd_function(params=(), name="foo.f")
callself = self._vm.program.NewVariable(
[abstract.AtomicAbstractValue("test", self._vm)],
[], self._vm.root_cfg_node)
bound = abstract.BoundFunction(callself, f)
six.assertCountEqual(self, bound.repr_names(), ["test.f"])
class AbstractMethodsTest(AbstractTestBase):
def test_abstract_method(self):
func = abstract.Function("f", self._vm).to_variable(self._vm.root_cfg_node)
func.data[0].is_abstract = True
cls = abstract.InterpreterClass("X", [], {"f": func}, None, self._vm)
six.assertCountEqual(self, cls.abstract_methods, {"f"})
def test_inherited_abstract_method(self):
sized_pytd = self._vm.loader.typing.Lookup("typing.Sized")
sized = self._vm.convert.constant_to_value(
sized_pytd, {}, self._vm.root_cfg_node)
cls = abstract.InterpreterClass(
"X", [sized.to_variable(self._vm.root_cfg_node)], {}, None, self._vm)
six.assertCountEqual(self, cls.abstract_methods, {"__len__"})
def test_overridden_abstract_method(self):
sized_pytd = self._vm.loader.typing.Lookup("typing.Sized")
sized = self._vm.convert.constant_to_value(
sized_pytd, {}, self._vm.root_cfg_node)
bases = [sized.to_variable(self._vm.root_cfg_node)]
members = {"__len__":
self._vm.convert.create_new_unsolvable(self._vm.root_cfg_node)}
cls = abstract.InterpreterClass("X", bases, members, None, self._vm)
self.assertFalse(cls.abstract_methods)
def test_overridden_abstract_method_still_abstract(self):
sized_pytd = self._vm.loader.typing.Lookup("typing.Sized")
sized = self._vm.convert.constant_to_value(
sized_pytd, {}, self._vm.root_cfg_node)
bases = [sized.to_variable(self._vm.root_cfg_node)]
func = abstract.Function("__len__", self._vm)
func.is_abstract = True
members = {"__len__": func.to_variable(self._vm.root_cfg_node)}
cls = abstract.InterpreterClass("X", bases, members, None, self._vm)
six.assertCountEqual(self, cls.abstract_methods, {"__len__"})
class SimpleFunctionTest(AbstractTestBase):
def _make_func(self, name="_", param_names=None, varargs_name=None,
kwonly_params=(), kwargs_name=None, defaults=(),
annotations=None, late_annotations=None):
return abstract.SimpleFunction(name, param_names or (), varargs_name,
kwonly_params, kwargs_name, defaults,
annotations or {}, late_annotations or {},
self._vm)
def _simple_sig(self, param_types, ret_type=None):
annots = {("_%d" % i): t for i, t in enumerate(param_types)}
params = tuple(annots.keys())
if ret_type:
annots["return"] = ret_type
return self._make_func(param_names=params, annotations=annots)
def test_simple_call(self):
f = self._simple_sig([self._vm.convert.str_type],
ret_type=self._vm.convert.int_type)
args = function.Args(
(self._vm.convert.build_string(self._vm.root_cfg_node, "hello"),))
node, ret = f.call(self._vm.root_cfg_node, f, args)
self.assertIs(node, self._vm.root_cfg_node)
ret_val, = ret.data
self.assertEqual(ret_val.cls, self._vm.convert.int_type)
def test_call_with_bad_arg(self):
f = self._make_func(param_names=("test",),
annotations={"test": self._vm.convert.str_type})
args = function.Args((self._vm.convert.build_int(self._vm.root_cfg_node),))
self.assertRaises(function.WrongArgTypes, f.call,
self._vm.root_cfg_node, f, args)
def test_call_with_no_args(self):
f = self._simple_sig([self._vm.convert.str_type, self._vm.convert.int_type])
args = function.Args(())
self.assertRaises(function.MissingParameter, f.call,
self._vm.root_cfg_node, f, args)
def test_call_with_multiple_arg_bindings(self):
f = self._simple_sig([self._vm.convert.str_type])
arg = self._vm.program.NewVariable()
arg.AddBinding(self._vm.convert.primitive_class_instances[str], [],
self._vm.root_cfg_node)
arg.AddBinding(self._vm.convert.primitive_class_instances[int], [],
self._vm.root_cfg_node)
args = function.Args((arg,))
node, ret = f.call(self._vm.root_cfg_node, f, args)
self.assertIs(node, self._vm.root_cfg_node)
self.assertIs(ret.data[0], self._vm.convert.none)
def test_call_with_varargs(self):
f = self._make_func(
varargs_name="arg",
annotations={"arg": self._vm.convert.str_type,
"return": self._vm.convert.str_type}
)
starargs = abstract.Tuple(
(self._vm.convert.build_string(self._vm.root_cfg_node, ""),),
self._vm).to_variable(self._vm.root_cfg_node)
args = function.Args(posargs=(), starargs=starargs)
node, ret = f.call(self._vm.root_cfg_node, f, args)
self.assertIs(node, self._vm.root_cfg_node)
self.assertIs(ret.data[0].cls, self._vm.convert.str_type)
def test_call_with_bad_varargs(self):
f = self._make_func(
varargs_name="arg",
annotations={"arg": self._vm.convert.str_type})
starargs = abstract.Tuple(
(self._vm.convert.build_string(self._vm.root_cfg_node, ""),
self._vm.convert.build_int(self._vm.root_cfg_node)),
self._vm
).to_variable(self._vm.root_cfg_node)
args = function.Args(posargs=(), starargs=starargs)
self.assertRaises(function.WrongArgTypes, f.call,
self._vm.root_cfg_node, f, args)
def test_call_with_multiple_varargs_bindings(self):
f = self._make_func(
varargs_name="arg",
annotations={"arg": self._vm.convert.str_type})
arg = self._vm.program.NewVariable()
arg.AddBinding(self._vm.convert.primitive_class_instances[str], [],
self._vm.root_cfg_node)
arg.AddBinding(self._vm.convert.primitive_class_instances[int], [],
self._vm.root_cfg_node)
starargs = abstract.Tuple((arg,), self._vm)
starargs = starargs.to_variable(self._vm.root_cfg_node)
args = function.Args(posargs=(), starargs=starargs)
f.call(self._vm.root_cfg_node, f, args)
def test_call_with_kwargs(self):
f = self._make_func(
kwargs_name="kwarg",
annotations={"kwarg": self._vm.convert.str_type})
kwargs = abstract.Dict(self._vm)
kwargs.update(
self._vm.root_cfg_node,
{
"_1": self._vm.convert.build_string(self._vm.root_cfg_node, "1"),
"_2": self._vm.convert.build_string(self._vm.root_cfg_node, "2")
})
kwargs = kwargs.to_variable(self._vm.root_cfg_node)
args = function.Args(
posargs=(),
namedargs=abstract.Dict(self._vm),
starstarargs=kwargs
)
f.call(self._vm.root_cfg_node, f, args)
def test_call_with_bad_kwargs(self):
f = self._make_func(
kwargs_name="kwarg",
annotations={"kwarg": self._vm.convert.str_type})
kwargs = abstract.Dict(self._vm)
kwargs.update(self._vm.root_cfg_node,
{"_1": self._vm.convert.build_int(self._vm.root_cfg_node)})
kwargs = kwargs.to_variable(self._vm.root_cfg_node)
args = function.Args(
posargs=(),
namedargs=abstract.Dict(self._vm),
starstarargs=kwargs
)
self.assertRaises(function.WrongArgTypes, f.call,
self._vm.root_cfg_node, f, args)
def test_call_with_kwonly_args(self):
f = self._make_func(
param_names=("test",),
kwonly_params=("a", "b"),
annotations={
"test": self._vm.convert.str_type,
"a": self._vm.convert.str_type,
"b": self._vm.convert.str_type
}
)
kwargs = abstract.Dict(self._vm)
kwargs.update(
self._vm.root_cfg_node,
{
"a": self._vm.convert.build_string(self._vm.root_cfg_node, "2"),
"b": self._vm.convert.build_string(self._vm.root_cfg_node, "3")
}
)
kwargs = kwargs.to_variable(self._vm.root_cfg_node)
args = function.Args(
posargs=(self._vm.convert.build_string(self._vm.root_cfg_node, "1"),),
namedargs=abstract.Dict(self._vm),
starstarargs=kwargs
)
f.call(self._vm.root_cfg_node, f, args)
kwargs = abstract.Dict(self._vm)
kwargs.update(
self._vm.root_cfg_node,
{"b": self._vm.convert.build_string(self._vm.root_cfg_node, "3")}
)
kwargs = kwargs.to_variable(self._vm.root_cfg_node)
args = function.Args(
posargs=(self._vm.convert.build_string(self._vm.root_cfg_node, "1"),
self._vm.convert.build_int(self._vm.root_cfg_node)),
namedargs=abstract.Dict(self._vm),
starstarargs=kwargs
)
self.assertRaises(function.MissingParameter, f.call,
self._vm.root_cfg_node, f, args)
def test_call_with_all_args(self):
f = self._make_func(
param_names=("a", "b", "c"),
varargs_name="arg",
kwargs_name="kwarg",
defaults=(self._vm.convert.build_int(self._vm.root_cfg_node),),
annotations={
"a": self._vm.convert.str_type,
"b": self._vm.convert.int_type,
"c": self._vm.convert.int_type,
"arg": self._vm.convert.primitive_classes[float],
"kwarg": self._vm.convert.primitive_classes[bool]
}
)
posargs = (self._vm.convert.build_string(self._vm.root_cfg_node, "1"),
self._vm.convert.build_int(self._vm.root_cfg_node))
float_inst = self._vm.convert.primitive_class_instances[float]
stararg = abstract.Tuple((float_inst.to_variable(self._vm.root_cfg_node),),
self._vm).to_variable(self._vm.root_cfg_node)
namedargs = abstract.Dict(self._vm)
kwarg = abstract.Dict(self._vm)
kwarg.update(self._vm.root_cfg_node,
{"x": self._vm.convert.build_bool(self._vm.root_cfg_node),
"y": self._vm.convert.build_bool(self._vm.root_cfg_node)})
kwarg = kwarg.to_variable(self._vm.root_cfg_node)
args = function.Args(posargs, namedargs, stararg, kwarg)
f.call(self._vm.root_cfg_node, f, args)
def test_call_with_defaults(self):
f = self._make_func(
param_names=("a", "b", "c"),
defaults=(self._vm.convert.build_int(self._vm.root_cfg_node),),
annotations={
"a": self._vm.convert.int_type,
"b": self._vm.convert.int_type,
"c": self._vm.convert.int_type
}
)
args = function.Args(
posargs=(self._vm.convert.build_int(self._vm.root_cfg_node),
self._vm.convert.build_int(self._vm.root_cfg_node))
)
f.call(self._vm.root_cfg_node, f, args)
args = function.Args(
posargs=(self._vm.convert.build_int(self._vm.root_cfg_node),
self._vm.convert.build_int(self._vm.root_cfg_node),
self._vm.convert.build_int(self._vm.root_cfg_node))
)
f.call(self._vm.root_cfg_node, f, args)
args = function.Args(
posargs=(self._vm.convert.build_int(self._vm.root_cfg_node),))
self.assertRaises(
function.MissingParameter, f.call, self._vm.root_cfg_node, f, args)
def test_call_with_bad_default(self):
f = self._make_func(
param_names=("a", "b"),
defaults=(self._vm.convert.build_string(self._vm.root_cfg_node, ""),),
annotations={
"a": self._vm.convert.int_type,
"b": self._vm.convert.str_type
}
)
args = function.Args(
posargs=(self._vm.convert.build_int(self._vm.root_cfg_node),
self._vm.convert.build_int(self._vm.root_cfg_node))
)
self.assertRaises(
function.WrongArgTypes, f.call, self._vm.root_cfg_node, f, args)
def test_call_with_duplicate_keyword(self):
f = self._simple_sig([self._vm.convert.int_type]*2)
args = function.Args(
posargs=(self._vm.convert.build_int(self._vm.root_cfg_node),
self._vm.convert.build_int(self._vm.root_cfg_node)),
namedargs={"_1": self._vm.convert.build_int(self._vm.root_cfg_node)}
)
self.assertRaises(
function.DuplicateKeyword, f.call, self._vm.root_cfg_node, f, args)
def test_call_with_wrong_arg_count(self):
f = self._simple_sig([self._vm.convert.int_type])
args = function.Args(
posargs=(self._vm.convert.build_int(self._vm.root_cfg_node),
self._vm.convert.build_int(self._vm.root_cfg_node))
)
self.assertRaises(
function.WrongArgCount, f.call, self._vm.root_cfg_node, f, args)
def test_change_defaults(self):
f = self._make_func(
param_names=("a", "b", "c"),
defaults=(self._vm.convert.build_int(self._vm.root_cfg_node),)
)
args = function.Args(
posargs=(self._vm.convert.build_int(self._vm.root_cfg_node),
self._vm.convert.build_int(self._vm.root_cfg_node))
)
f.call(self._vm.root_cfg_node, f, args)
new_defaults = abstract.Tuple(
(self._vm.convert.build_int(self._vm.root_cfg_node),
self._vm.convert.build_int(self._vm.root_cfg_node)),
self._vm).to_variable(self._vm.root_cfg_node)
f.set_function_defaults(new_defaults)
f.call(self._vm.root_cfg_node, f, args)
args = function.Args(
posargs=(self._vm.convert.build_int(self._vm.root_cfg_node),)
)
f.call(self._vm.root_cfg_node, f, args)
def test_call_with_type_parameter(self):
ret_cls = abstract.ParameterizedClass(
self._vm.convert.list_type,
{abstract_utils.T: abstract.TypeParameter(abstract_utils.T, self._vm)},
self._vm
)
f = self._make_func(
param_names=("test",),
annotations={
"test": abstract.TypeParameter(abstract_utils.T, self._vm),
"return": ret_cls
}
)
args = function.Args(
posargs=(self._vm.convert.build_int(self._vm.root_cfg_node),))
_, ret = f.call(self._vm.root_cfg_node, f, args)
# ret is an Instance(ParameterizedClass(list, {abstract_utils.T: int}))
# but we really only care about T.
self.assertIs(ret.data[0].cls.formal_type_parameters[abstract_utils.T],
self._vm.convert.int_type)
def test_signature_func_output_basic(self):
node = self._vm.root_cfg_node
f = self._make_func(name="basic", param_names=("a", "b"))
fp = self._vm.convert.pytd_convert.value_to_pytd_def(node, f, f.name)
self.assertEqual(pytd.Print(fp), "def basic(a, b) -> None: ...")
def test_signature_func_output_annotations(self):
node = self._vm.root_cfg_node
f = self._make_func(
name="annots",
param_names=("a", "b"),
annotations={
"a": self._vm.convert.int_type,
"b": self._vm.convert.str_type,
"return": self._vm.convert.int_type
}
)
fp = self._vm.convert.pytd_convert.value_to_pytd_def(node, f, f.name)
self.assertEqual(pytd.Print(fp), "def annots(a: int, b: str) -> int: ...")
def test_signature_func_output(self):
node = self._vm.root_cfg_node
dict_type = abstract.ParameterizedClass(
self._vm.convert.dict_type,
{abstract_utils.K: self._vm.convert.str_type,
abstract_utils.V: self._vm.convert.int_type},
self._vm)
f = self._make_func(
name="test",
param_names=("a", "b"),
varargs_name="c",
kwonly_params=("d", "e"),
kwargs_name="f",
defaults={
"b": self._vm.convert.build_int(node),
"d": self._vm.convert.build_int(node)
},
annotations={
"a": self._vm.convert.str_type,
"b": self._vm.convert.int_type,
"c": self._vm.convert.str_type,
"d": dict_type,
"e": self._vm.convert.int_type,
"f": self._vm.convert.str_type,
"return": self._vm.convert.str_type
}
)
fp = self._vm.convert.pytd_convert.value_to_pytd_def(node, f, f.name)
f_str = ("def test(a: str, b: int = ..., *c: str, d: Dict[str, int] = ...,"
" e: int, **f: str) -> str: ...")
self.assertEqual(pytd.Print(fp), f_str)
class AbstractTest(AbstractTestBase):
def test_interpreter_class_official_name(self):
cls = abstract.InterpreterClass("X", [], {}, None, self._vm)
cls.update_official_name("Z")
self.assertEqual(cls.official_name, "Z")
cls.update_official_name("A") # takes effect because A < Z
self.assertEqual(cls.official_name, "A")
cls.update_official_name("Z") # no effect
self.assertEqual(cls.official_name, "A")
cls.update_official_name("X") # takes effect because X == cls.name
self.assertEqual(cls.official_name, "X")
cls.update_official_name("A") # no effect
self.assertEqual(cls.official_name, "X")
def test_type_parameter_official_name(self):
param = abstract.TypeParameter("T", self._vm)
self._vm.frame = frame_state.SimpleFrame() # for error logging
param.update_official_name("T")
self.assertFalse(self._vm.errorlog.has_error())
param.update_official_name("Q")
self.assertTrue(self._vm.errorlog.has_error())
def test_type_parameter_equality(self):
param1 = abstract.TypeParameter("S", self._vm)
param2 = abstract.TypeParameter("T", self._vm)
cls = abstract.InterpreterClass("S", [], {}, None, self._vm)
self.assertEqual(param1, param1)
self.assertNotEqual(param1, param2)
self.assertNotEqual(param1, cls)
def test_union_equality(self):
union1 = abstract.Union((self._vm.convert.unsolvable,), self._vm)
union2 = abstract.Union((self._vm.convert.none,), self._vm)
cls = abstract.InterpreterClass("Union", [], {}, None, self._vm)
self.assertEqual(union1, union1)
self.assertNotEqual(union1, union2)
self.assertNotEqual(union1, cls)
def test_instantiate_type_parameter_type(self):
params = {
abstract_utils.T: abstract.TypeParameter(abstract_utils.T, self._vm)}
cls = abstract.ParameterizedClass(
self._vm.convert.type_type, params, self._vm)
self.assertListEqual(cls.instantiate(self._node).data,
[self._vm.convert.unsolvable])
def test_super_type(self):
supercls = special_builtins.Super(self._vm)
self.assertEqual(supercls.get_class(), self._vm.convert.type_type)
def test_instantiate_interpreter_class(self):
cls = abstract.InterpreterClass("X", [], {}, None, self._vm)
# When there is no current frame, create a new instance every time.
v1 = abstract_utils.get_atomic_value(cls.instantiate(self._node))
v2 = abstract_utils.get_atomic_value(cls.instantiate(self._node))
self.assertIsNot(v1, v2)
# Create one instance per opcode.
fake_opcode = object()
self._vm.push_frame(frame_state.SimpleFrame(fake_opcode))
v3 = abstract_utils.get_atomic_value(cls.instantiate(self._node))
v4 = abstract_utils.get_atomic_value(cls.instantiate(self._node))
self.assertIsNot(v1, v3)
self.assertIsNot(v2, v3)
self.assertIs(v3, v4)
def test_set_module_on_module(self):
# A module's 'module' attribute should always remain None, and no one
# should attempt to set it to something besides the module's name or None.
ast = pytd_utils.CreateModule("some_mod")
mod = abstract.Module(self._vm, ast.name, {}, ast)
mod.module = ast.name
self.assertIsNone(mod.module)
self.assertEqual(ast.name, mod.full_name)
mod.module = None
self.assertIsNone(mod.module)
self.assertEqual(ast.name, mod.full_name)
def set_module():
mod.module = "other_mod"
self.assertRaises(AssertionError, set_module)
def test_call_type_parameter_instance(self):
instance = abstract.Instance(self._vm.convert.list_type, self._vm)
instance.merge_instance_type_parameter(
self._vm.root_cfg_node, abstract_utils.T,
self._vm.convert.int_type.to_variable(self._vm.root_cfg_node))
t = abstract.TypeParameter(abstract_utils.T, self._vm)
t_instance = abstract.TypeParameterInstance(t, instance, self._vm)
node, ret = t_instance.call(self._node, t_instance.to_binding(self._node),
function.Args(posargs=()))
self.assertIs(node, self._node)
retval, = ret.data
self.assertEqual(retval.cls, self._vm.convert.int_type)
def test_call_empty_type_parameter_instance(self):
instance = abstract.Instance(self._vm.convert.list_type, self._vm)
t = abstract.TypeParameter(abstract_utils.T, self._vm)
t_instance = abstract.TypeParameterInstance(t, instance, self._vm)
node, ret = t_instance.call(self._node, t_instance.to_binding(self._node),
function.Args(posargs=()))
self.assertIs(node, self._node)
retval, = ret.data
self.assertIs(retval, self._vm.convert.empty)
def test_call_type_parameter_instance_with_wrong_args(self):
instance = abstract.Instance(self._vm.convert.list_type, self._vm)
instance.merge_instance_type_parameter(
self._vm.root_cfg_node, abstract_utils.T,
self._vm.convert.int_type.to_variable(self._vm.root_cfg_node))
t = abstract.TypeParameter(abstract_utils.T, self._vm)
t_instance = abstract.TypeParameterInstance(t, instance, self._vm)
posargs = (self._vm.convert.create_new_unsolvable(self._node),) * 3
node, ret = t_instance.call(self._node, t_instance.to_binding(self._node),
function.Args(posargs=posargs))
self.assertIs(node, self._node)
self.assertTrue(ret.bindings)
error, = self._vm.errorlog
self.assertEqual(error.name, "wrong-arg-count")
def test_instantiate_tuple_class_for_sub(self):
type_param = abstract.TypeParameter(abstract_utils.K, self._vm)
cls = abstract.TupleClass(
self._vm.convert.tuple_type,
{0: type_param, abstract_utils.T: type_param}, self._vm)
# Instantiate the tuple class.
subst_value = self._vm.annotations_util.instantiate_for_sub(
self._vm.root_cfg_node, cls)
# Recover the class from the instance.
subbed_cls = self._vm.annotations_util.sub_one_annotation(
self._vm.root_cfg_node, type_param, [{abstract_utils.K: subst_value}])
self.assertEqual(cls, subbed_cls)
if __name__ == "__main__":
unittest.main()
|
the-stack_0_23801 | # -*- coding: utf-8 -*-
"""
TencentBlueKing is pleased to support the open source community by making 蓝鲸智云-节点管理(BlueKing-BK-NODEMAN) available.
Copyright (C) 2017-2021 THL A29 Limited, a Tencent company. All rights reserved.
Licensed under the MIT License (the "License"); you may not use this file except in compliance with the License.
You may obtain a copy of the License at https://opensource.org/licenses/MIT
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
"""
from django.conf.urls import include, url
from rest_framework import routers as drf_routers
from apps.backend import views
from apps.backend.healthz.views import HealthzViewSet
from apps.backend.plugin.views import (
PluginViewSet,
export_download,
upload_package,
upload_package_by_cos,
)
from apps.backend.subscription.views import SubscriptionViewSet
routers = drf_routers.DefaultRouter(trailing_slash=True)
routers.register("plugin", PluginViewSet, basename="plugin")
routers.register("subscription", SubscriptionViewSet, basename="subscription")
routers.register("healthz", HealthzViewSet, basename="healthz")
export_routers = drf_routers.DefaultRouter(trailing_slash=True)
urlpatterns = [
url(r"api/", include(routers.urls)),
url(r"^package/upload/$", upload_package),
url(r"^package/upload_cos/$", upload_package_by_cos),
url(r"^export/download/$", export_download, name="export_download"),
url(r"^export/", include(export_routers.urls)),
url(r"^get_gse_config/", views.get_gse_config),
url(r"^report_log/", views.report_log),
url(r"^api/job_callback/", views.job_callback),
]
|
the-stack_0_23802 |
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
import seaborn as sns
get_ipython().run_line_magic('matplotlib', 'inline')
np.random.seed(2)
from sklearn.model_selection import train_test_split
from sklearn.metrics import confusion_matrix
import itertools
from keras.utils.np_utils import to_categorical # convert to one-hot-encoding
from keras.models import Sequential
from keras.layers import Dense, Dropout, Flatten, Conv2D, MaxPool2D
from keras.optimizers import RMSprop
from keras.preprocessing.image import ImageDataGenerator
from keras.callbacks import ReduceLROnPlateau
sns.set(style='white', context='notebook', palette='deep')
# Load the data
train = pd.read_csv("./input/train.csv")
test = pd.read_csv("./input/test.csv")
Y_train = train["label"]
# Drop 'label' column
X_train = train.drop(labels = ["label"],axis = 1)
# free some space
del train
# Count the values
Y_train.value_counts()
# Normalize the data
X_train = X_train / 255.0
test = test / 255.0
# Reshape image in 3 dimensions (height = 28px, width = 28px , canal = 1)
X_train = X_train.values.reshape(-1,28,28,1)
test = test.values.reshape(-1,28,28,1)
# Encode labels to one hot vectors (ex : 2 -> [0,0,1,0,0,0,0,0,0,0])
Y_train = to_categorical(Y_train, num_classes = 10)
pd.DataFrame(Y_train).head()
# Set the random seed
random_seed = 2
# Split the train and the validation set for the fitting
X_train, X_val, Y_train, Y_val = train_test_split(X_train, Y_train, test_size = 0.1, random_state=random_seed)
# Some examples
g = plt.imshow(X_train[0][:,:,0])
# Set the CNN model
#CNN architechture -> [[Conv2D->relu]*2 -> MaxPool2D -> Dropout]*2 -> Flatten -> Dense -> Dropout -> Out
model = Sequential()
model.add(Conv2D(filters = 32, kernel_size = (5,5),padding = 'Same',
activation ='relu', input_shape = (28,28,1)))
model.add(Conv2D(filters = 32, kernel_size = (5,5),padding = 'Same',
activation ='relu'))
model.add(MaxPool2D(pool_size=(2,2)))
model.add(Dropout(0.25))
model.add(Conv2D(filters = 64, kernel_size = (3,3),padding = 'Same',
activation ='relu'))
model.add(Conv2D(filters = 64, kernel_size = (3,3),padding = 'Same',
activation ='relu'))
model.add(MaxPool2D(pool_size=(2,2), strides=(2,2)))
model.add(Dropout(0.25))
model.add(Flatten())
model.add(Dense(256, activation = "relu"))
model.add(Dropout(0.5))
model.add(Dense(10, activation = "softmax"))
# Define the optimizer
optimizer = RMSprop(lr=0.001, rho=0.9, epsilon=1e-08, decay=0.0)
# Compile the model
model.compile(optimizer = optimizer , loss = "categorical_crossentropy", metrics=["accuracy"])
model.summary()
# Set a learning rate annealer
learning_rate_reduction = ReduceLROnPlateau(monitor='val_acc',
patience=3,
verbose=1,
factor=0.5,
min_lr=0.00001)
epochs = 35
batch_size = 86
# With data augmentation to prevent overfitting (accuracy 0.99286)
datagen = ImageDataGenerator(
featurewise_center=False, # set input mean to 0 over the dataset
samplewise_center=False, # set each sample mean to 0
featurewise_std_normalization=False, # divide inputs by std of the dataset
samplewise_std_normalization=False, # divide each input by its std
zca_whitening=False, # apply ZCA whitening
rotation_range=10, # randomly rotate images in the range (degrees, 0 to 180)
zoom_range = 0.1, # Randomly zoom image
width_shift_range=0.1, # randomly shift images horizontally (fraction of total width)
height_shift_range=0.1, # randomly shift images vertically (fraction of total height)
horizontal_flip=False, # randomly flip images
vertical_flip=False) # randomly flip images
datagen.fit(X_train)
# Fit the model
history = model.fit_generator(datagen.flow(X_train,Y_train, batch_size=batch_size),
epochs = epochs, validation_data = (X_val,Y_val),
verbose = 2, steps_per_epoch=X_train.shape[0] // batch_size
, callbacks=[learning_rate_reduction])
# Plot the loss and accuracy curves for training and validation
fig, ax = plt.subplots(2,1)
ax[0].plot(history.history['loss'], color='b', label="Training loss")
ax[0].plot(history.history['val_loss'], color='r', label="validation loss",axes =ax[0])
legend = ax[0].legend(loc='best', shadow=True)
ax[1].plot(history.history['acc'], color='b', label="Training accuracy")
ax[1].plot(history.history['val_acc'], color='r',label="Validation accuracy")
legend = ax[1].legend(loc='best', shadow=True)
# Look at confusion matrix
def plot_confusion_matrix(cm, classes,
normalize=False,
title='Confusion matrix',
cmap=plt.cm.Blues):
"""
This function prints and plots the confusion matrix.
Normalization can be applied by setting `normalize=True`.
"""
plt.imshow(cm, interpolation='nearest', cmap=cmap)
plt.title(title)
plt.colorbar()
tick_marks = np.arange(len(classes))
plt.xticks(tick_marks, classes, rotation=45)
plt.yticks(tick_marks, classes)
if normalize:
cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]
thresh = cm.max() / 2.
for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):
plt.text(j, i, cm[i, j],
horizontalalignment="center",
color="white" if cm[i, j] > thresh else "black")
plt.tight_layout()
plt.ylabel('True label')
plt.xlabel('Predicted label')
# Predict the values from the validation dataset
Y_pred = model.predict(X_val)
# Convert predictions classes to one hot vectors
Y_pred_classes = np.argmax(Y_pred,axis = 1)
# Convert validation observations to one hot vectors
Y_true = np.argmax(Y_val,axis = 1)
# compute the confusion matrix
confusion_mtx = confusion_matrix(Y_true, Y_pred_classes)
# plot the confusion matrix
plot_confusion_matrix(confusion_mtx, classes = range(10))
# Display some error results
# Errors are difference between predicted labels and true labels
errors = (Y_pred_classes - Y_true != 0)
Y_pred_classes_errors = Y_pred_classes[errors]
Y_pred_errors = Y_pred[errors]
Y_true_errors = Y_true[errors]
X_val_errors = X_val[errors]
def display_errors(errors_index,img_errors,pred_errors, obs_errors):
""" This function shows 6 images with their predicted and real labels"""
n = 0
nrows = 2
ncols = 3
fig, ax = plt.subplots(nrows,ncols,sharex=True,sharey=True)
for row in range(nrows):
for col in range(ncols):
error = errors_index[n]
ax[row,col].imshow((img_errors[error]).reshape((28,28)))
ax[row,col].set_title("Predicted label :{}\nTrue label :{}".format(pred_errors[error],obs_errors[error]))
n += 1
# Probabilities of the wrong predicted numbers
Y_pred_errors_prob = np.max(Y_pred_errors,axis = 1)
# Predicted probabilities of the true values in the error set
true_prob_errors = np.diagonal(np.take(Y_pred_errors, Y_true_errors, axis=1))
# Difference between the probability of the predicted label and the true label
delta_pred_true_errors = Y_pred_errors_prob - true_prob_errors
# Sorted list of the delta prob errors
sorted_dela_errors = np.argsort(delta_pred_true_errors)
# Top 6 errors
most_important_errors = sorted_dela_errors[-6:]
# Show the top 6 errors
display_errors(most_important_errors, X_val_errors, Y_pred_classes_errors, Y_true_errors)
# predict results
results = model.predict(test)
# select the indix with the maximum probability
results = np.argmax(results,axis = 1)
results = pd.Series(results,name="Label")
submission = pd.concat([pd.Series(range(1,28001),name = "ImageId"),results],axis = 1)
submission.to_csv("cnn_mnist_datagen.csv",index=False)
|
the-stack_0_23803 | import numpy as np
import keras
from keras.models import Sequential
from keras.layers import Dense, Dropout, Flatten, BatchNormalization, Conv2DTranspose, Reshape
from keras.layers import Conv2D, MaxPooling2D
from keras.optimizers import SGD
from keras.callbacks import TensorBoard
from matplotlib import pyplot as plt
from utils.DataLoader import load_gaps, load_stl10_data
def gen_model():
model = Sequential()
model.add(Conv2D(32, (3, 3), activation='relu',
padding='same', input_shape=(64, 64, 1)))
model.add(Conv2D(32, (3, 3), activation='relu', padding='same'))
model.add(BatchNormalization(momentum=0.8))
model.add(MaxPooling2D((2, 2)))
model.add(Conv2D(32, (3, 3), activation='relu', padding='same'))
model.add(Conv2D(32, (3, 3), activation='relu', padding='same'))
model.add(BatchNormalization(momentum=0.8))
model.add(MaxPooling2D((2, 2)))
model.add(Conv2D(32, (3, 3), activation='relu', padding='same'))
model.add(Conv2D(32, (3, 3), activation='relu', padding='same'))
model.add(BatchNormalization(momentum=0.8))
model.add(MaxPooling2D((2, 2)))
model.add(Flatten())
# model.add(Dense(100, activation='relu'))
model.add(Dense(10, activation='relu', name="encoder_output"))
# model.add(Dense(100, activation='relu'))
# model.add(Dense(512, activation='relu'))
# model.add(Reshape((2, 2, 128)))
model.add(Dense(2048, activation='relu'))
model.add(Reshape((8,8,32)))
model.add(Conv2DTranspose(128, (3, 3), strides=2,
activation='relu', padding='same'))
model.add(Conv2DTranspose(64, (3, 3), strides=2,
activation='relu', padding='same'))
model.add(Conv2DTranspose(32, (3, 3), strides=2,
activation='relu', padding='same'))
model.add(Conv2D(1, (3, 3), padding='same', activation='relu'))
model.compile(optimizer='sgd', loss='mae')
return model
def train():
model = gen_model()
data = np.load('images/64px_image_x.npy')
data = np.reshape(data, (40000, 64, 64, 1))
model.fit(x=data, y=data, epochs=100, batch_size=40, validation_split=0.1)
model.save_weights("models/ae_0113_64px.h5")
idx = np.random.randint(0, 40000, size=8)
x = data[idx]
x = np.reshape(x, (8, 64, 64, 1))
ret = model.predict(x)
for i in range(4):
for j in range(2):
plt.subplot(4, 4, (i*4+j*2+1))
plt.imshow(x[i*2+j, :, :, 0], cmap='gray')
plt.subplot(4, 4, (i*4+j*2+2))
plt.imshow(ret[i*2+j, :, :, 0], cmap='gray')
plt.show()
if __name__ == "__main__":
train()
|
the-stack_0_23804 | # -*- coding: utf-8 -*-
import sys
from setuptools import setup, find_packages
from setuptools.extension import Extension
try:
from Cython.Build import cythonize
except ImportError:
raise RuntimeError(
"Cython required for running the package installation\n"
+ "Try installing it with:\n"
+ "$> pip install cython"
)
try:
import numpy
except ImportError:
raise RuntimeError(
"Numpy required for running the package installation\n"
+ "Try installing it with:\n"
+ "$> pip install numpy"
)
# Define common arguments used to compile the extensions
common_link_args = ["-fopenmp"]
common_compile_args = ["-fopenmp", "-O3", "-ffast-math"]
common_include = [numpy.get_include()]
if sys.platform.startswith("darwin"):
common_link_args.append("-Wl,-rpath,/usr/local/opt/gcc/lib/gcc/9/")
extensions_data = {
"pysteps.motion._proesmans": {"sources": ["pysteps/motion/_proesmans.pyx"]},
"pysteps.motion._vet": {"sources": ["pysteps/motion/_vet.pyx"]},
}
extensions = []
for name, data in extensions_data.items():
include = data.get("include", common_include)
extra_compile_args = data.get("extra_compile_args", common_compile_args)
extra_link_args = data.get("extra_link_args", common_link_args)
pysteps_extension = Extension(
name,
sources=data["sources"],
depends=data.get("depends", []),
include_dirs=include,
language=data.get("language", "c"),
define_macros=data.get("macros", []),
extra_compile_args=extra_compile_args,
extra_link_args=extra_link_args,
)
extensions.append(pysteps_extension)
external_modules = cythonize(extensions, force=True, language_level=3)
requirements = [
"numpy",
"jsmin",
"scipy",
"matplotlib",
"jsonschema",
]
setup(
name="pysteps",
version="1.4.1",
author="PySteps developers",
packages=find_packages(),
license="LICENSE",
include_package_data=True,
description="Python framework for short-term ensemble prediction systems",
long_description=open("README.rst").read(),
long_description_content_type="text/x-rst",
url="https://pysteps.github.io/",
classifiers=[
"Development Status :: 5 - Production/Stable",
"Intended Audience :: Science/Research",
"Topic :: Scientific/Engineering",
"Topic :: Scientific/Engineering :: Atmospheric Science",
"Topic :: Scientific/Engineering :: Hydrology",
"License :: OSI Approved :: BSD License",
"Programming Language :: Python :: 3",
"Operating System :: OS Independent",
],
ext_modules=external_modules,
setup_requires=requirements,
install_requires=requirements,
)
|
the-stack_0_23809 | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
# This file handles all flask-restful resources for /v3/users
import base64
import os
import secrets
import uuid
import flask
import http.client
from oslo_serialization import jsonutils
from werkzeug import exceptions
from keystone.api._shared import json_home_relations
from keystone.application_credential import schema as app_cred_schema
from keystone.common import json_home
from keystone.common import provider_api
from keystone.common import rbac_enforcer
from keystone.common import utils
from keystone.common import validation
import keystone.conf
from keystone import exception as ks_exception
from keystone.i18n import _
from keystone.identity import schema
from keystone import notifications
from keystone.server import flask as ks_flask
CRED_TYPE_EC2 = 'ec2'
CONF = keystone.conf.CONF
ENFORCER = rbac_enforcer.RBACEnforcer
PROVIDERS = provider_api.ProviderAPIs
ACCESS_TOKEN_ID_PARAMETER_RELATION = (
json_home_relations.os_oauth1_parameter_rel_func(
parameter_name='access_token_id')
)
def _convert_v3_to_ec2_credential(credential):
# Prior to bug #1259584 fix, blob was stored unserialized
# but it should be stored as a json string for compatibility
# with the v3 credentials API. Fall back to the old behavior
# for backwards compatibility with existing DB contents
try:
blob = jsonutils.loads(credential['blob'])
except TypeError:
blob = credential['blob']
return {'user_id': credential.get('user_id'),
'tenant_id': credential.get('project_id'),
'access': blob.get('access'),
'secret': blob.get('secret'),
'trust_id': blob.get('trust_id')}
def _format_token_entity(entity):
formatted_entity = entity.copy()
access_token_id = formatted_entity['id']
user_id = formatted_entity.get('authorizing_user_id', '')
if 'role_ids' in entity:
formatted_entity.pop('role_ids')
if 'access_secret' in entity:
formatted_entity.pop('access_secret')
url = ('/users/%(user_id)s/OS-OAUTH1/access_tokens/%(access_token_id)s'
'/roles' % {'user_id': user_id,
'access_token_id': access_token_id})
formatted_entity.setdefault('links', {})
formatted_entity['links']['roles'] = (ks_flask.base_url(url))
return formatted_entity
def _check_unrestricted_application_credential(token):
if 'application_credential' in token.methods:
if not token.application_credential['unrestricted']:
action = _("Using method 'application_credential' is not "
"allowed for managing additional application "
"credentials.")
raise ks_exception.ForbiddenAction(action=action)
def _build_user_target_enforcement():
target = {}
try:
target['user'] = PROVIDERS.identity_api.get_user(
flask.request.view_args.get('user_id')
)
if flask.request.view_args.get('group_id'):
target['group'] = PROVIDERS.identity_api.get_group(
flask.request.view_args.get('group_id')
)
except ks_exception.NotFound: # nosec
# Defer existence in the event the user doesn't exist, we'll
# check this later anyway.
pass
return target
def _build_enforcer_target_data_owner_and_user_id_match():
ref = {}
if flask.request.view_args:
credential_id = flask.request.view_args.get('credential_id')
if credential_id is not None:
hashed_id = utils.hash_access_key(credential_id)
ref['credential'] = PROVIDERS.credential_api.get_credential(
hashed_id)
return ref
def _update_request_user_id_attribute():
# This method handles a special case in policy enforcement. The application
# credential API is underneath the user path (e.g.,
# /v3/users/{user_id}/application_credentials/{application_credential_id}).
# The RBAC enforcer thinks the user to evaluate for application credential
# ownership comes from the path, but it should come from the actual
# application credential reference. By ensuring we pull the user ID from
# the application credential, we close a loop hole where users could
# effectively bypass authorization to view or delete any application
# credential in the system, assuming the attacker knows the application
# credential ID of another user. So long as the attacker matches the user
# ID in the request path to the user in the token of the request, they can
# pass the `rule:owner` policy check. This method protects against that by
# ensuring we use the application credential user ID and not something
# determined from the client.
try:
app_cred = (
PROVIDERS.application_credential_api.get_application_credential(
flask.request.view_args.get('application_credential_id')
)
)
flask.request.view_args['user_id'] = app_cred['user_id']
# This target isn't really used in the default policy for application
# credentials, but we return it since we're using this method as a hook
# to update the flask request variables, which are used later in the
# keystone RBAC enforcer to populate the policy_dict, which ultimately
# turns into target attributes.
return {'user_id': app_cred['user_id']}
except ks_exception.NotFound: # nosec
# Defer existance in the event the application credential doesn't
# exist, we'll check this later anyway.
pass
def _format_role_entity(role_id):
role = PROVIDERS.role_api.get_role(role_id)
formatted_entity = role.copy()
if 'description' in role:
formatted_entity.pop('description')
if 'enabled' in role:
formatted_entity.pop('enabled')
return formatted_entity
class UserResource(ks_flask.ResourceBase):
collection_key = 'users'
member_key = 'user'
get_member_from_driver = PROVIDERS.deferred_provider_lookup(
api='identity_api', method='get_user')
def get(self, user_id=None):
"""Get a user resource or list users.
GET/HEAD /v3/users
GET/HEAD /v3/users/{user_id}
"""
if user_id is not None:
return self._get_user(user_id)
return self._list_users()
def _get_user(self, user_id):
"""Get a user resource.
GET/HEAD /v3/users/{user_id}
"""
ENFORCER.enforce_call(
action='identity:get_user',
build_target=_build_user_target_enforcement
)
ref = PROVIDERS.identity_api.get_user(user_id)
return self.wrap_member(ref)
def _list_users(self):
"""List users.
GET/HEAD /v3/users
"""
filters = ('domain_id', 'enabled', 'idp_id', 'name', 'protocol_id',
'unique_id', 'password_expires_at')
target = None
if self.oslo_context.domain_id:
target = {'domain_id': self.oslo_context.domain_id}
hints = self.build_driver_hints(filters)
ENFORCER.enforce_call(
action='identity:list_users', filters=filters, target_attr=target
)
domain = self._get_domain_id_for_list_request()
if domain is None and self.oslo_context.domain_id:
domain = self.oslo_context.domain_id
refs = PROVIDERS.identity_api.list_users(
domain_scope=domain, hints=hints)
# If the user making the request used a domain-scoped token, let's make
# sure we filter out users that are not in that domain. Otherwise, we'd
# be exposing users in other domains. This if statement is needed in
# case _get_domain_id_for_list_request() short-circuits due to
# configuration and protects against information from other domains
# leaking to people who shouldn't see it.
if self.oslo_context.domain_id:
domain_id = self.oslo_context.domain_id
users = [user for user in refs if user['domain_id'] == domain_id]
else:
users = refs
return self.wrap_collection(users, hints=hints)
def post(self):
"""Create a user.
POST /v3/users
"""
user_data = self.request_body_json.get('user', {})
target = {'user': user_data}
ENFORCER.enforce_call(
action='identity:create_user', target_attr=target
)
validation.lazy_validate(schema.user_create, user_data)
user_data = self._normalize_dict(user_data)
user_data = self._normalize_domain_id(user_data)
ref = PROVIDERS.identity_api.create_user(
user_data,
initiator=self.audit_initiator)
return self.wrap_member(ref), http.client.CREATED
def patch(self, user_id):
"""Update a user.
PATCH /v3/users/{user_id}
"""
ENFORCER.enforce_call(
action='identity:update_user',
build_target=_build_user_target_enforcement
)
PROVIDERS.identity_api.get_user(user_id)
user_data = self.request_body_json.get('user', {})
validation.lazy_validate(schema.user_update, user_data)
self._require_matching_id(user_data)
ref = PROVIDERS.identity_api.update_user(
user_id, user_data, initiator=self.audit_initiator)
return self.wrap_member(ref)
def delete(self, user_id):
"""Delete a user.
DELETE /v3/users/{user_id}
"""
ENFORCER.enforce_call(
action='identity:delete_user',
build_target=_build_user_target_enforcement
)
PROVIDERS.identity_api.delete_user(user_id)
return None, http.client.NO_CONTENT
class UserChangePasswordResource(ks_flask.ResourceBase):
@ks_flask.unenforced_api
def get(self, user_id):
# Special case, GET is not allowed.
raise exceptions.MethodNotAllowed(valid_methods=['POST'])
@ks_flask.unenforced_api
def post(self, user_id):
user_data = self.request_body_json.get('user', {})
validation.lazy_validate(schema.password_change, user_data)
try:
PROVIDERS.identity_api.change_password(
user_id=user_id,
original_password=user_data['original_password'],
new_password=user_data['password'],
initiator=self.audit_initiator)
except AssertionError as e:
raise ks_exception.Unauthorized(
_('Error when changing user password: %s') % e
)
return None, http.client.NO_CONTENT
class UserProjectsResource(ks_flask.ResourceBase):
collection_key = 'projects'
member_key = 'project'
get_member_from_driver = PROVIDERS.deferred_provider_lookup(
api='resource_api', method='get_project')
def get(self, user_id):
filters = ('domain_id', 'enabled', 'name')
ENFORCER.enforce_call(action='identity:list_user_projects',
filters=filters,
build_target=_build_user_target_enforcement)
hints = self.build_driver_hints(filters)
refs = PROVIDERS.assignment_api.list_projects_for_user(user_id)
return self.wrap_collection(refs, hints=hints)
class UserGroupsResource(ks_flask.ResourceBase):
collection_key = 'groups'
member_key = 'group'
get_member_from_driver = PROVIDERS.deferred_provider_lookup(
api='identity_api', method='get_group')
def get(self, user_id):
"""Get groups for a user.
GET/HEAD /v3/users/{user_id}/groups
"""
filters = ('name',)
hints = self.build_driver_hints(filters)
ENFORCER.enforce_call(action='identity:list_groups_for_user',
build_target=_build_user_target_enforcement,
filters=filters)
refs = PROVIDERS.identity_api.list_groups_for_user(user_id=user_id,
hints=hints)
if (self.oslo_context.domain_id):
filtered_refs = []
for ref in refs:
if ref['domain_id'] == self.oslo_context.domain_id:
filtered_refs.append(ref)
refs = filtered_refs
return self.wrap_collection(refs, hints=hints)
class _UserOSEC2CredBaseResource(ks_flask.ResourceBase):
collection_key = 'credentials'
member_key = 'credential'
@classmethod
def _add_self_referential_link(cls, ref, collection_name=None):
# NOTE(morgan): This should be refactored to have an EC2 Cred API with
# a sane prefix instead of overloading the "_add_self_referential_link"
# method. This was chosen as it more closely mirrors the pre-flask
# code (for transition).
path = '/users/%(user_id)s/credentials/OS-EC2/%(credential_id)s'
url = ks_flask.base_url(path) % {
'user_id': ref['user_id'],
'credential_id': ref['access']}
ref.setdefault('links', {})
ref['links']['self'] = url
class UserOSEC2CredentialsResourceListCreate(_UserOSEC2CredBaseResource):
def get(self, user_id):
"""List EC2 Credentials for user.
GET/HEAD /v3/users/{user_id}/credentials/OS-EC2
"""
ENFORCER.enforce_call(action='identity:ec2_list_credentials')
PROVIDERS.identity_api.get_user(user_id)
credential_refs = PROVIDERS.credential_api.list_credentials_for_user(
user_id, type=CRED_TYPE_EC2)
collection_refs = [
_convert_v3_to_ec2_credential(cred)
for cred in credential_refs
]
return self.wrap_collection(collection_refs)
def post(self, user_id):
"""Create EC2 Credential for user.
POST /v3/users/{user_id}/credentials/OS-EC2
"""
target = {}
target['credential'] = {'user_id': user_id}
ENFORCER.enforce_call(action='identity:ec2_create_credential',
target_attr=target)
PROVIDERS.identity_api.get_user(user_id)
tenant_id = self.request_body_json.get('tenant_id')
PROVIDERS.resource_api.get_project(tenant_id)
blob = dict(
access=uuid.uuid4().hex,
secret=uuid.uuid4().hex,
trust_id=self.oslo_context.trust_id
)
credential_id = utils.hash_access_key(blob['access'])
cred_data = dict(
user_id=user_id,
project_id=tenant_id,
blob=jsonutils.dumps(blob),
id=credential_id,
type=CRED_TYPE_EC2
)
PROVIDERS.credential_api.create_credential(credential_id, cred_data)
ref = _convert_v3_to_ec2_credential(cred_data)
return self.wrap_member(ref), http.client.CREATED
class UserOSEC2CredentialsResourceGetDelete(_UserOSEC2CredBaseResource):
@staticmethod
def _get_cred_data(credential_id):
cred = PROVIDERS.credential_api.get_credential(credential_id)
if not cred or cred['type'] != CRED_TYPE_EC2:
raise ks_exception.Unauthorized(
message=_('EC2 access key not found.'))
return _convert_v3_to_ec2_credential(cred)
def get(self, user_id, credential_id):
"""Get a specific EC2 credential.
GET/HEAD /users/{user_id}/credentials/OS-EC2/{credential_id}
"""
func = _build_enforcer_target_data_owner_and_user_id_match
ENFORCER.enforce_call(
action='identity:ec2_get_credential',
build_target=func)
PROVIDERS.identity_api.get_user(user_id)
ec2_cred_id = utils.hash_access_key(credential_id)
cred_data = self._get_cred_data(ec2_cred_id)
return self.wrap_member(cred_data)
def delete(self, user_id, credential_id):
"""Delete a specific EC2 credential.
DELETE /users/{user_id}/credentials/OS-EC2/{credential_id}
"""
func = _build_enforcer_target_data_owner_and_user_id_match
ENFORCER.enforce_call(action='identity:ec2_delete_credential',
build_target=func)
PROVIDERS.identity_api.get_user(user_id)
ec2_cred_id = utils.hash_access_key(credential_id)
self._get_cred_data(ec2_cred_id)
PROVIDERS.credential_api.delete_credential(ec2_cred_id)
return None, http.client.NO_CONTENT
class _OAuth1ResourceBase(ks_flask.ResourceBase):
collection_key = 'access_tokens'
member_key = 'access_token'
@classmethod
def _add_self_referential_link(cls, ref, collection_name=None):
# NOTE(morgan): This should be refactored to have an OAuth1 API with
# a sane prefix instead of overloading the "_add_self_referential_link"
# method. This was chosen as it more closely mirrors the pre-flask
# code (for transition).
ref.setdefault('links', {})
path = '/users/%(user_id)s/OS-OAUTH1/access_tokens' % {
'user_id': ref.get('authorizing_user_id', '')
}
ref['links']['self'] = ks_flask.base_url(path) + '/' + ref['id']
class OAuth1ListAccessTokensResource(_OAuth1ResourceBase):
def get(self, user_id):
"""List OAuth1 Access Tokens for user.
GET /v3/users/{user_id}/OS-OAUTH1/access_tokens
"""
ENFORCER.enforce_call(action='identity:list_access_tokens')
if self.oslo_context.is_delegated_auth:
raise ks_exception.Forbidden(
_('Cannot list request tokens with a token '
'issued via delegation.'))
refs = PROVIDERS.oauth_api.list_access_tokens(user_id)
formatted_refs = ([_format_token_entity(x) for x in refs])
return self.wrap_collection(formatted_refs)
class OAuth1AccessTokenCRUDResource(_OAuth1ResourceBase):
def get(self, user_id, access_token_id):
"""Get specific access token.
GET/HEAD /v3/users/{user_id}/OS-OAUTH1/access_tokens/{access_token_id}
"""
ENFORCER.enforce_call(action='identity:get_access_token')
access_token = PROVIDERS.oauth_api.get_access_token(access_token_id)
if access_token['authorizing_user_id'] != user_id:
raise ks_exception.NotFound()
access_token = _format_token_entity(access_token)
return self.wrap_member(access_token)
def delete(self, user_id, access_token_id):
"""Delete specific access token.
DELETE /v3/users/{user_id}/OS-OAUTH1/access_tokens/{access_token_id}
"""
ENFORCER.enforce_call(
action='identity:ec2_delete_credential',
build_target=_build_enforcer_target_data_owner_and_user_id_match)
access_token = PROVIDERS.oauth_api.get_access_token(access_token_id)
reason = (
'Invalidating the token cache because an access token for '
'consumer %(consumer_id)s has been deleted. Authorization for '
'users with OAuth tokens will be recalculated and enforced '
'accordingly the next time they authenticate or validate a '
'token.' % {'consumer_id': access_token['consumer_id']}
)
notifications.invalidate_token_cache_notification(reason)
PROVIDERS.oauth_api.delete_access_token(
user_id, access_token_id, initiator=self.audit_initiator)
return None, http.client.NO_CONTENT
class OAuth1AccessTokenRoleListResource(ks_flask.ResourceBase):
collection_key = 'roles'
member_key = 'role'
def get(self, user_id, access_token_id):
"""List roles for a user access token.
GET/HEAD /v3/users/{user_id}/OS-OAUTH1/access_tokens/
{access_token_id}/roles
"""
ENFORCER.enforce_call(action='identity:list_access_token_roles')
access_token = PROVIDERS.oauth_api.get_access_token(access_token_id)
if access_token['authorizing_user_id'] != user_id:
raise ks_exception.NotFound()
authed_role_ids = access_token['role_ids']
authed_role_ids = jsonutils.loads(authed_role_ids)
refs = ([_format_role_entity(x) for x in authed_role_ids])
return self.wrap_collection(refs)
class OAuth1AccessTokenRoleResource(ks_flask.ResourceBase):
collection_key = 'roles'
member_key = 'role'
def get(self, user_id, access_token_id, role_id):
"""Get role for access token.
GET/HEAD /v3/users/{user_id}/OS-OAUTH1/access_tokens/
{access_token_id}/roles/{role_id}
"""
ENFORCER.enforce_call(action='identity:get_access_token_role')
access_token = PROVIDERS.oauth_api.get_access_token(access_token_id)
if access_token['authorizing_user_id'] != user_id:
raise ks_exception.Unauthorized(_('User IDs do not match'))
authed_role_ids = access_token['role_ids']
authed_role_ids = jsonutils.loads(authed_role_ids)
for authed_role_id in authed_role_ids:
if authed_role_id == role_id:
role = _format_role_entity(role_id)
return self.wrap_member(role)
raise ks_exception.RoleNotFound(role_id=role_id)
class UserAppCredListCreateResource(ks_flask.ResourceBase):
collection_key = 'application_credentials'
member_key = 'application_credential'
_public_parameters = frozenset([
'id',
'name',
'description',
'expires_at',
'project_id',
'roles',
# secret is only exposed after create, it is not stored
'secret',
'links',
'unrestricted',
'access_rules'
])
@staticmethod
def _generate_secret():
length = 64
secret = secrets.token_bytes(length)
secret = base64.urlsafe_b64encode(secret)
secret = secret.rstrip(b'=')
secret = secret.decode('utf-8')
return secret
@staticmethod
def _normalize_role_list(app_cred_roles):
roles = []
for role in app_cred_roles:
if role.get('id'):
roles.append(role)
else:
roles.append(PROVIDERS.role_api.get_unique_role_by_name(
role['name']))
return roles
def _get_roles(self, app_cred_data, token):
if app_cred_data.get('roles'):
roles = self._normalize_role_list(app_cred_data['roles'])
# NOTE(cmurphy): The user is not allowed to add a role that is not
# in their token. This is to prevent trustees or application
# credential users from escallating their privileges to include
# additional roles that the trustor or application credential
# creator has assigned on the project.
token_roles = [r['id'] for r in token.roles]
for role in roles:
if role['id'] not in token_roles:
detail = _('Cannot create an application credential with '
'unassigned role')
raise ks_exception.ApplicationCredentialValidationError(
detail=detail)
else:
roles = token.roles
return roles
def get(self, user_id):
"""List application credentials for user.
GET/HEAD /v3/users/{user_id}/application_credentials
"""
filters = ('name',)
ENFORCER.enforce_call(action='identity:list_application_credentials',
filters=filters)
app_cred_api = PROVIDERS.application_credential_api
hints = self.build_driver_hints(filters)
refs = app_cred_api.list_application_credentials(user_id, hints=hints)
return self.wrap_collection(refs, hints=hints)
def post(self, user_id):
"""Create application credential.
POST /v3/users/{user_id}/application_credentials
"""
ENFORCER.enforce_call(action='identity:create_application_credential')
app_cred_data = self.request_body_json.get(
'application_credential', {})
validation.lazy_validate(app_cred_schema.application_credential_create,
app_cred_data)
token = self.auth_context['token']
_check_unrestricted_application_credential(token)
if self.oslo_context.user_id != user_id:
action = _('Cannot create an application credential for another '
'user.')
raise ks_exception.ForbiddenAction(action=action)
project_id = self.oslo_context.project_id
app_cred_data = self._assign_unique_id(app_cred_data)
if not app_cred_data.get('secret'):
app_cred_data['secret'] = self._generate_secret()
app_cred_data['user_id'] = user_id
app_cred_data['project_id'] = project_id
app_cred_data['roles'] = self._get_roles(app_cred_data, token)
if app_cred_data.get('expires_at'):
app_cred_data['expires_at'] = utils.parse_expiration_date(
app_cred_data['expires_at'])
if app_cred_data.get('access_rules'):
for access_rule in app_cred_data['access_rules']:
# If user provides an access rule by ID, it will be looked up
# by ID. If user provides an access rule that is identical to
# an existing one, the ID generated here will be ignored and
# the pre-existing access rule will be used.
if 'id' not in access_rule:
# Generate directly, rather than using _assign_unique_id,
# so that there is no deep copy made
access_rule['id'] = uuid.uuid4().hex
app_cred_data = self._normalize_dict(app_cred_data)
app_cred_api = PROVIDERS.application_credential_api
try:
ref = app_cred_api.create_application_credential(
app_cred_data, initiator=self.audit_initiator)
except ks_exception.RoleAssignmentNotFound as e:
# Raise a Bad Request, not a Not Found, in accordance with the
# API-SIG recommendations:
# https://specs.openstack.org/openstack/api-wg/guidelines/http.html#failure-code-clarifications
raise ks_exception.ApplicationCredentialValidationError(
detail=str(e))
return self.wrap_member(ref), http.client.CREATED
class UserAppCredGetDeleteResource(ks_flask.ResourceBase):
collection_key = 'application_credentials'
member_key = 'application_credential'
def get(self, user_id, application_credential_id):
"""Get application credential resource.
GET/HEAD /v3/users/{user_id}/application_credentials/
{application_credential_id}
"""
target = _update_request_user_id_attribute()
ENFORCER.enforce_call(
action='identity:get_application_credential',
target_attr=target,
)
ref = PROVIDERS.application_credential_api.get_application_credential(
application_credential_id)
return self.wrap_member(ref)
def delete(self, user_id, application_credential_id):
"""Delete application credential resource.
DELETE /v3/users/{user_id}/application_credentials/
{application_credential_id}
"""
target = _update_request_user_id_attribute()
ENFORCER.enforce_call(
action='identity:delete_application_credential',
target_attr=target
)
token = self.auth_context['token']
_check_unrestricted_application_credential(token)
PROVIDERS.application_credential_api.delete_application_credential(
application_credential_id, initiator=self.audit_initiator)
return None, http.client.NO_CONTENT
class UserAccessRuleListResource(ks_flask.ResourceBase):
collection_key = 'access_rules'
member_key = 'access_rule'
def get(self, user_id):
"""List access rules for user.
GET/HEAD /v3/users/{user_id}/access_rules
"""
filters = ('service', 'path', 'method',)
ENFORCER.enforce_call(action='identity:list_access_rules',
filters=filters,
build_target=_build_user_target_enforcement)
app_cred_api = PROVIDERS.application_credential_api
hints = self.build_driver_hints(filters)
refs = app_cred_api.list_access_rules_for_user(user_id, hints=hints)
hints = self.build_driver_hints(filters)
return self.wrap_collection(refs, hints=hints)
class UserAccessRuleGetDeleteResource(ks_flask.ResourceBase):
collection_key = 'access_rules'
member_key = 'access_rule'
def get(self, user_id, access_rule_id):
"""Get access rule resource.
GET/HEAD /v3/users/{user_id}/access_rules/{access_rule_id}
"""
ENFORCER.enforce_call(
action='identity:get_access_rule',
build_target=_build_user_target_enforcement
)
ref = PROVIDERS.application_credential_api.get_access_rule(
access_rule_id)
return self.wrap_member(ref)
def delete(self, user_id, access_rule_id):
"""Delete access rule resource.
DELETE /v3/users/{user_id}/access_rules/{access_rule_id}
"""
ENFORCER.enforce_call(
action='identity:delete_access_rule',
build_target=_build_user_target_enforcement
)
PROVIDERS.application_credential_api.delete_access_rule(
access_rule_id, initiator=self.audit_initiator)
return None, http.client.NO_CONTENT
class UserAPI(ks_flask.APIBase):
_name = 'users'
_import_name = __name__
resources = [UserResource]
resource_mapping = [
ks_flask.construct_resource_map(
resource=UserChangePasswordResource,
url='/users/<string:user_id>/password',
resource_kwargs={},
rel='user_change_password',
path_vars={'user_id': json_home.Parameters.USER_ID}
),
ks_flask.construct_resource_map(
resource=UserGroupsResource,
url='/users/<string:user_id>/groups',
resource_kwargs={},
rel='user_groups',
path_vars={'user_id': json_home.Parameters.USER_ID}
),
ks_flask.construct_resource_map(
resource=UserProjectsResource,
url='/users/<string:user_id>/projects',
resource_kwargs={},
rel='user_projects',
path_vars={'user_id': json_home.Parameters.USER_ID}
),
ks_flask.construct_resource_map(
resource=UserOSEC2CredentialsResourceListCreate,
url='/users/<string:user_id>/credentials/OS-EC2',
resource_kwargs={},
rel='user_credentials',
resource_relation_func=(
json_home_relations.os_ec2_resource_rel_func),
path_vars={'user_id': json_home.Parameters.USER_ID}
),
ks_flask.construct_resource_map(
resource=UserOSEC2CredentialsResourceGetDelete,
url=('/users/<string:user_id>/credentials/OS-EC2/'
'<string:credential_id>'),
resource_kwargs={},
rel='user_credential',
resource_relation_func=(
json_home_relations.os_ec2_resource_rel_func),
path_vars={
'credential_id': json_home.build_v3_parameter_relation(
'credential_id'),
'user_id': json_home.Parameters.USER_ID}
),
ks_flask.construct_resource_map(
resource=OAuth1ListAccessTokensResource,
url='/users/<string:user_id>/OS-OAUTH1/access_tokens',
resource_kwargs={},
rel='user_access_tokens',
resource_relation_func=(
json_home_relations.os_oauth1_resource_rel_func),
path_vars={'user_id': json_home.Parameters.USER_ID}
),
ks_flask.construct_resource_map(
resource=OAuth1AccessTokenCRUDResource,
url=('/users/<string:user_id>/OS-OAUTH1/'
'access_tokens/<string:access_token_id>'),
resource_kwargs={},
rel='user_access_token',
resource_relation_func=(
json_home_relations.os_oauth1_resource_rel_func),
path_vars={
'access_token_id': ACCESS_TOKEN_ID_PARAMETER_RELATION,
'user_id': json_home.Parameters.USER_ID}
),
ks_flask.construct_resource_map(
resource=OAuth1AccessTokenRoleListResource,
url=('/users/<string:user_id>/OS-OAUTH1/access_tokens/'
'<string:access_token_id>/roles'),
resource_kwargs={},
rel='user_access_token_roles',
resource_relation_func=(
json_home_relations.os_oauth1_resource_rel_func),
path_vars={'access_token_id': ACCESS_TOKEN_ID_PARAMETER_RELATION,
'user_id': json_home.Parameters.USER_ID}
),
ks_flask.construct_resource_map(
resource=OAuth1AccessTokenRoleResource,
url=('/users/<string:user_id>/OS-OAUTH1/access_tokens/'
'<string:access_token_id>/roles/<string:role_id>'),
resource_kwargs={},
rel='user_access_token_role',
resource_relation_func=(
json_home_relations.os_oauth1_resource_rel_func),
path_vars={'access_token_id': ACCESS_TOKEN_ID_PARAMETER_RELATION,
'role_id': json_home.Parameters.ROLE_ID,
'user_id': json_home.Parameters.USER_ID}
),
ks_flask.construct_resource_map(
resource=UserAppCredListCreateResource,
url='/users/<string:user_id>/application_credentials',
resource_kwargs={},
rel='application_credentials',
path_vars={'user_id': json_home.Parameters.USER_ID}
),
ks_flask.construct_resource_map(
resource=UserAppCredGetDeleteResource,
url=('/users/<string:user_id>/application_credentials/'
'<string:application_credential_id>'),
resource_kwargs={},
rel='application_credential',
path_vars={
'user_id': json_home.Parameters.USER_ID,
'application_credential_id':
json_home.Parameters.APPLICATION_CRED_ID}
),
ks_flask.construct_resource_map(
resource=UserAccessRuleListResource,
url='/users/<string:user_id>/access_rules',
resource_kwargs={},
rel='access_rules',
path_vars={'user_id': json_home.Parameters.USER_ID}
),
ks_flask.construct_resource_map(
resource=UserAccessRuleGetDeleteResource,
url=('/users/<string:user_id>/access_rules/'
'<string:access_rule_id>'),
resource_kwargs={},
rel='access_rule',
path_vars={
'user_id': json_home.Parameters.USER_ID,
'access_rule_id':
json_home.Parameters.ACCESS_RULE_ID}
)
]
APIs = (UserAPI,)
|
the-stack_0_23810 | #!/usr/bin/env python
import numpy as np
from astropy.io import fits
from specutils.io import read_fits
import EchelleJSON as ej
c_ang = 2.99792458e18 #A s^-1
c_kms = 2.99792458e5 #km s^-1
#n @ 3000: 1.0002915686329712
#n @ 6000: 1.0002769832562917
#n @ 8000: 1.0002750477973053
n_air = 1.000277
c_ang_air = c_ang/n_air
c_kms_air = c_kms/n_air
def convert_spectrum(fraw, fout, BCV=False):
'''
param fraw: the raw counts
param fout: the .json file to save to
'''
raw_list = read_fits.read_fits_spectrum1d(fraw)
head = fits.getheader(fraw)
try:
BCV = head["BCV"]
except KeyError:
print("No BCV correction for", fraw)
BCV = 0.0
BCV_cor = np.sqrt((c_kms_air + BCV) / (c_kms_air - BCV))
echelle_dict = {}
npix = len(raw_list[0].wavelength)
# Do this for each order in the spectrum
for i,raw in enumerate(raw_list):
# Correct for the barycentric shift
wl = raw.wavelength.value
# Scale the sigma values by the same blaze function that the raw fluxes were scaled by
# raw_flux = raw.flux.value
# raw_flux[raw_flux==0] = 1.0
# Where the ratio values are 0, just set it to 1, since the noise will be 0 here too.
# ratio = blaze.flux.value/raw_flux
# sigma = ratio * np.sqrt(raw.flux.value)
if BCV:
wl = wl * BCV_cor
order_dict = { "wl":wl,
"fl":raw.flux.value,
"mask": np.ones((npix,), dtype="bool")}
echelle_dict["order_{}".format(i)] = order_dict
UT = head["DATE-OBS"]
echelle_dict["UT"] = UT
echelle_dict["AIR"] = head["AIR"]
echelle_dict["EXPTIME"] = head["EXPTIME"]
try:
echelle_dict["HJD"] = head["HJDN"]
except KeyError:
print("Spectrum does not have HJDN keyword", UT)
print("Setting to 0.0")
echelle_dict["HJD"] = 0.0
try:
echelle_dict["BCV"] = BCV
except KeyError:
print("Spectrum does not have BCV keyword", UT)
print("Setting to 0.0")
echelle_dict["BCV"] = 0.0
ej.write(fout, echelle_dict)
if __name__=="__main__":
import argparse
parser = argparse.ArgumentParser(description="Process TRES echelle spectra into an EchelleJSON file.")
parser.add_argument("rawfile", help="The un-blaze-corrected, un-flux-calibrated FITS file.")
parser.add_argument("outfile", help="Output Filename to contain the processed file. Should have no extension, *.hdf5 or *.npy added automatically.")
parser.add_argument("-t", "--trim", type=int, default=6, help="How many pixels to trim from the front of the file. Default is 6")
parser.add_argument("--BCV", action="store_true", help="If provided, do the barycentric correction.")
parser.add_argument("--clobber", action="store_true", help="Overwrite existing outfile?")
args = parser.parse_args()
convert_spectrum(args.rawfile, args.outfile, args.BCV)
|
the-stack_0_23813 | """
Write a function that takes in a list of ints and uses
the Bubble Sort algorithm to sort the list 'in place'
in ascending order. The method should return the same,
in-place sorted list. Note: Bubble sort is one of the
most inefficient ways to sort a large list of integers.
Nevertheless, it is an interview favorite. Bubble sort
has a time complexity of O(n2). However, if the sample
size is small, bubble sort provides a simple
implementation of a classic sorting algorithm.
"""
def bubble_sort(a_list):
if a_list is None or len(a_list) < 2:
return a_list
for i in range(len(a_list) - 1, 0, -1):
swap = False
for j in range(0, i):
if a_list[j] > a_list[j+1]:
a_list[j], a_list[j+1] = a_list[j+1], a_list[j]
swap = True
if not swap:
break
return a_list
|
the-stack_0_23814 | import torch
import torch.nn as nn
from pointnet2_lib.pointnet2.pointnet2_modules import PointnetFPModule, PointnetSAModuleMSG
from lib.config import cfg
from transformer import TransformerBlock, TransformerLayer
def get_model(input_channels=6, use_xyz=True):
return Pointnet2MSG(input_channels=input_channels, use_xyz=use_xyz)
class Pointnet2MSG(nn.Module):
def __init__(self, input_channels=6, use_xyz=True):
super().__init__()
self.SA_modules = nn.ModuleList()
self.transformer_modules = nn.ModuleList()
channel_in = input_channels
'''
# 将数组的每个元素加上新的一维, 然后传入网络中
# 最后一个元素修改后如下:
# mlps : [[6, 16, 16, 32], [6, 32, 32, 64]]
# channel_out: 96
# mlps : [[96, 64, 64, 128], [96, 64, 96, 128]]
# channel_out: 256
# mlps : [[256, 128, 196, 256], [256, 128, 196, 256]]
# channel_out: 512
# mlps : [[512, 256, 256, 512], [512, 256, 384, 512]]
# channel_out: 1024
'''
skip_channel_list = [input_channels]
for k in range(cfg.RPN.SA_CONFIG.NPOINTS.__len__()):
# for k in range(2):
mlps = cfg.RPN.SA_CONFIG.MLPS[k].copy()
channel_out = 0
for idx in range(mlps.__len__()):
mlps[idx] = [channel_in] + mlps[idx]
# 修改 =======pointnet================
# channel_out += mlps[idx][-1] + 3
# 修改 ===============================
channel_out += mlps[idx][-1]
# print("mlps -->", mlps)
self.SA_modules.append(
PointnetSAModuleMSG(
npoint=cfg.RPN.SA_CONFIG.NPOINTS[k],
radii=cfg.RPN.SA_CONFIG.RADIUS[k],
nsamples=cfg.RPN.SA_CONFIG.NSAMPLE[k],
mlps=mlps,
use_xyz=use_xyz,
bn=cfg.RPN.USE_BN
)
)
# self.transformer_modules.append(
# TransformerLayer(cfg.RPN.SA_CONFIG.NPOINTS[k], channel_out)
# )
# print("transformer_modules", self.transformer_modules)
skip_channel_list.append(channel_out)
channel_in = channel_out
'''
# 四次循环mlp的值
# [262, 128, 128]
# [608, 256, 256]
# [768, 512, 512]
# [1536, 512, 512]
# RPN.FP_MLPS = [[128, 128], [256, 256], [512, 512], [512, 512]]
'''
test = [[256], [364]]
self.FP_modules = nn.ModuleList()
for k in range(cfg.RPN.FP_MLPS.__len__()):
# for k in range(2):
pre_channel = cfg.RPN.FP_MLPS[k + 1][-1] if k + \
1 < len(cfg.RPN.FP_MLPS) else channel_out
self.FP_modules.append(
PointnetFPModule(
mlp=[pre_channel + skip_channel_list[k]] + cfg.RPN.FP_MLPS[k])
)
# self.FP_modules.append(
# PointnetFPModule(
# mlp=test[k] + cfg.RPN.FP_MLPS[k])
# )
# print("mlp -->", [pre_channel + skip_channel_list[k]] + cfg.RPN.FP_MLPS[k])
def _break_up_pc(self, pc):
xyz = pc[..., 0:3].contiguous()
features = (
pc[..., 3:].transpose(1, 2).contiguous()
if pc.size(-1) > 3 else None
)
return xyz, features
def forward(self, pointcloud: torch.cuda.FloatTensor):
xyz, features = self._break_up_pc(pointcloud)
l_xyz, l_features = [xyz], [features]
for i in range(len(self.SA_modules)):
li_xyz, li_features = self.SA_modules[i](l_xyz[i], l_features[i])
# print("li_features shape -->", li_features.shape)
# print("li_xyz shape -->", li_xyz.shape)
# li_features = self.transformer_modules[i](li_xyz, li_features)
l_xyz.append(li_xyz)
l_features.append(li_features)
for i in range(-1, -(len(self.FP_modules) + 1), -1):
# print("[i - 1] -->", [i - 1])
# print("l_xyz[i - 1] -->", l_xyz[i-1].shape)
# print("l_xyz[i] -->", l_xyz[i].shape)
# print("l_features[i] -->", l_features[i].shape)
l_features[i - 1] = self.FP_modules[i](
l_xyz[i - 1], l_xyz[i], l_features[i - 1], l_features[i]
)
# print("l_features[i - 1] -->", l_features[i-1].shape)
return l_xyz[0], l_features[0]
|
the-stack_0_23815 | """Unit tests for the io module."""
# Tests of io are scattered over the test suite:
# * test_bufio - tests file buffering
# * test_memoryio - tests BytesIO and StringIO
# * test_fileio - tests FileIO
# * test_file - tests the file interface
# * test_io - tests everything else in the io module
# * test_univnewlines - tests universal newline support
# * test_largefile - tests operations on a file greater than 2**32 bytes
# (only enabled with -ulargefile)
################################################################################
# ATTENTION TEST WRITERS!!!
################################################################################
# When writing tests for io, it's important to test both the C and Python
# implementations. This is usually done by writing a base test that refers to
# the type it is testing as a attribute. Then it provides custom subclasses to
# test both implementations. This file has lots of examples.
################################################################################
from __future__ import print_function
from __future__ import unicode_literals
import os
import sys
import time
import array
import random
import unittest
import weakref
import warnings
import abc
import signal
import errno
from itertools import cycle, count
from collections import deque
from UserList import UserList
from test import test_support as support
import contextlib
import codecs
import io # C implementation of io
import _pyio as pyio # Python implementation of io
try:
import threading
except ImportError:
threading = None
try:
import fcntl
except ImportError:
fcntl = None
__metaclass__ = type
bytes = support.py3k_bytes
def _default_chunk_size():
"""Get the default TextIOWrapper chunk size"""
with io.open(__file__, "r", encoding="latin1") as f:
return f._CHUNK_SIZE
class MockRawIOWithoutRead:
"""A RawIO implementation without read(), so as to exercise the default
RawIO.read() which calls readinto()."""
def __init__(self, read_stack=()):
self._read_stack = list(read_stack)
self._write_stack = []
self._reads = 0
self._extraneous_reads = 0
def write(self, b):
self._write_stack.append(bytes(b))
return len(b)
def writable(self):
return True
def fileno(self):
return 42
def readable(self):
return True
def seekable(self):
return True
def seek(self, pos, whence):
return 0 # wrong but we gotta return something
def tell(self):
return 0 # same comment as above
def readinto(self, buf):
self._reads += 1
max_len = len(buf)
try:
data = self._read_stack[0]
except IndexError:
self._extraneous_reads += 1
return 0
if data is None:
del self._read_stack[0]
return None
n = len(data)
if len(data) <= max_len:
del self._read_stack[0]
buf[:n] = data
return n
else:
buf[:] = data[:max_len]
self._read_stack[0] = data[max_len:]
return max_len
def truncate(self, pos=None):
return pos
class CMockRawIOWithoutRead(MockRawIOWithoutRead, io.RawIOBase):
pass
class PyMockRawIOWithoutRead(MockRawIOWithoutRead, pyio.RawIOBase):
pass
class MockRawIO(MockRawIOWithoutRead):
def read(self, n=None):
self._reads += 1
try:
return self._read_stack.pop(0)
except:
self._extraneous_reads += 1
return b""
class CMockRawIO(MockRawIO, io.RawIOBase):
pass
class PyMockRawIO(MockRawIO, pyio.RawIOBase):
pass
class MisbehavedRawIO(MockRawIO):
def write(self, b):
return MockRawIO.write(self, b) * 2
def read(self, n=None):
return MockRawIO.read(self, n) * 2
def seek(self, pos, whence):
return -123
def tell(self):
return -456
def readinto(self, buf):
MockRawIO.readinto(self, buf)
return len(buf) * 5
class CMisbehavedRawIO(MisbehavedRawIO, io.RawIOBase):
pass
class PyMisbehavedRawIO(MisbehavedRawIO, pyio.RawIOBase):
pass
class CloseFailureIO(MockRawIO):
closed = 0
def close(self):
if not self.closed:
self.closed = 1
raise IOError
class CCloseFailureIO(CloseFailureIO, io.RawIOBase):
pass
class PyCloseFailureIO(CloseFailureIO, pyio.RawIOBase):
pass
class MockFileIO:
def __init__(self, data):
self.read_history = []
super(MockFileIO, self).__init__(data)
def read(self, n=None):
res = super(MockFileIO, self).read(n)
self.read_history.append(None if res is None else len(res))
return res
def readinto(self, b):
res = super(MockFileIO, self).readinto(b)
self.read_history.append(res)
return res
class CMockFileIO(MockFileIO, io.BytesIO):
pass
class PyMockFileIO(MockFileIO, pyio.BytesIO):
pass
class MockNonBlockWriterIO:
def __init__(self):
self._write_stack = []
self._blocker_char = None
def pop_written(self):
s = b"".join(self._write_stack)
self._write_stack[:] = []
return s
def block_on(self, char):
"""Block when a given char is encountered."""
self._blocker_char = char
def readable(self):
return True
def seekable(self):
return True
def writable(self):
return True
def write(self, b):
b = bytes(b)
n = -1
if self._blocker_char:
try:
n = b.index(self._blocker_char)
except ValueError:
pass
else:
if n > 0:
# write data up to the first blocker
self._write_stack.append(b[:n])
return n
else:
# cancel blocker and indicate would block
self._blocker_char = None
return None
self._write_stack.append(b)
return len(b)
class CMockNonBlockWriterIO(MockNonBlockWriterIO, io.RawIOBase):
BlockingIOError = io.BlockingIOError
class PyMockNonBlockWriterIO(MockNonBlockWriterIO, pyio.RawIOBase):
BlockingIOError = pyio.BlockingIOError
class IOTest(unittest.TestCase):
def setUp(self):
support.unlink(support.TESTFN)
def tearDown(self):
support.unlink(support.TESTFN)
def write_ops(self, f):
self.assertEqual(f.write(b"blah."), 5)
f.truncate(0)
self.assertEqual(f.tell(), 5)
f.seek(0)
self.assertEqual(f.write(b"blah."), 5)
self.assertEqual(f.seek(0), 0)
self.assertEqual(f.write(b"Hello."), 6)
self.assertEqual(f.tell(), 6)
self.assertEqual(f.seek(-1, 1), 5)
self.assertEqual(f.tell(), 5)
self.assertEqual(f.write(bytearray(b" world\n\n\n")), 9)
self.assertEqual(f.seek(0), 0)
self.assertEqual(f.write(b"h"), 1)
self.assertEqual(f.seek(-1, 2), 13)
self.assertEqual(f.tell(), 13)
self.assertEqual(f.truncate(12), 12)
self.assertEqual(f.tell(), 13)
self.assertRaises(TypeError, f.seek, 0.0)
def read_ops(self, f, buffered=False):
data = f.read(5)
self.assertEqual(data, b"hello")
data = bytearray(data)
self.assertEqual(f.readinto(data), 5)
self.assertEqual(data, b" worl")
self.assertEqual(f.readinto(data), 2)
self.assertEqual(len(data), 5)
self.assertEqual(data[:2], b"d\n")
self.assertEqual(f.seek(0), 0)
self.assertEqual(f.read(20), b"hello world\n")
self.assertEqual(f.read(1), b"")
self.assertEqual(f.readinto(bytearray(b"x")), 0)
self.assertEqual(f.seek(-6, 2), 6)
self.assertEqual(f.read(5), b"world")
self.assertEqual(f.read(0), b"")
self.assertEqual(f.readinto(bytearray()), 0)
self.assertEqual(f.seek(-6, 1), 5)
self.assertEqual(f.read(5), b" worl")
self.assertEqual(f.tell(), 10)
self.assertRaises(TypeError, f.seek, 0.0)
if buffered:
f.seek(0)
self.assertEqual(f.read(), b"hello world\n")
f.seek(6)
self.assertEqual(f.read(), b"world\n")
self.assertEqual(f.read(), b"")
LARGE = 2**31
def large_file_ops(self, f):
assert f.readable()
assert f.writable()
self.assertEqual(f.seek(self.LARGE), self.LARGE)
self.assertEqual(f.tell(), self.LARGE)
self.assertEqual(f.write(b"xxx"), 3)
self.assertEqual(f.tell(), self.LARGE + 3)
self.assertEqual(f.seek(-1, 1), self.LARGE + 2)
self.assertEqual(f.truncate(), self.LARGE + 2)
self.assertEqual(f.tell(), self.LARGE + 2)
self.assertEqual(f.seek(0, 2), self.LARGE + 2)
self.assertEqual(f.truncate(self.LARGE + 1), self.LARGE + 1)
self.assertEqual(f.tell(), self.LARGE + 2)
self.assertEqual(f.seek(0, 2), self.LARGE + 1)
self.assertEqual(f.seek(-1, 2), self.LARGE)
self.assertEqual(f.read(2), b"x")
def test_invalid_operations(self):
# Try writing on a file opened in read mode and vice-versa.
for mode in ("w", "wb"):
with self.open(support.TESTFN, mode) as fp:
self.assertRaises(IOError, fp.read)
self.assertRaises(IOError, fp.readline)
with self.open(support.TESTFN, "rb") as fp:
self.assertRaises(IOError, fp.write, b"blah")
self.assertRaises(IOError, fp.writelines, [b"blah\n"])
with self.open(support.TESTFN, "r") as fp:
self.assertRaises(IOError, fp.write, "blah")
self.assertRaises(IOError, fp.writelines, ["blah\n"])
def test_raw_file_io(self):
with self.open(support.TESTFN, "wb", buffering=0) as f:
self.assertEqual(f.readable(), False)
self.assertEqual(f.writable(), True)
self.assertEqual(f.seekable(), True)
self.write_ops(f)
with self.open(support.TESTFN, "rb", buffering=0) as f:
self.assertEqual(f.readable(), True)
self.assertEqual(f.writable(), False)
self.assertEqual(f.seekable(), True)
self.read_ops(f)
def test_buffered_file_io(self):
with self.open(support.TESTFN, "wb") as f:
self.assertEqual(f.readable(), False)
self.assertEqual(f.writable(), True)
self.assertEqual(f.seekable(), True)
self.write_ops(f)
with self.open(support.TESTFN, "rb") as f:
self.assertEqual(f.readable(), True)
self.assertEqual(f.writable(), False)
self.assertEqual(f.seekable(), True)
self.read_ops(f, True)
def test_readline(self):
with self.open(support.TESTFN, "wb") as f:
f.write(b"abc\ndef\nxyzzy\nfoo\x00bar\nanother line")
with self.open(support.TESTFN, "rb") as f:
self.assertEqual(f.readline(), b"abc\n")
self.assertEqual(f.readline(10), b"def\n")
self.assertEqual(f.readline(2), b"xy")
self.assertEqual(f.readline(4), b"zzy\n")
self.assertEqual(f.readline(), b"foo\x00bar\n")
self.assertEqual(f.readline(None), b"another line")
self.assertRaises(TypeError, f.readline, 5.3)
with self.open(support.TESTFN, "r") as f:
self.assertRaises(TypeError, f.readline, 5.3)
def test_raw_bytes_io(self):
f = self.BytesIO()
self.write_ops(f)
data = f.getvalue()
self.assertEqual(data, b"hello world\n")
f = self.BytesIO(data)
self.read_ops(f, True)
def test_large_file_ops(self):
# On Windows and Mac OSX this test comsumes large resources; It takes
# a long time to build the >2GB file and takes >2GB of disk space
# therefore the resource must be enabled to run this test.
if sys.platform[:3] == 'win' or sys.platform == 'darwin':
support.requires(
'largefile',
'test requires %s bytes and a long time to run' % self.LARGE)
with self.open(support.TESTFN, "w+b", 0) as f:
self.large_file_ops(f)
with self.open(support.TESTFN, "w+b") as f:
self.large_file_ops(f)
def test_with_open(self):
for bufsize in (0, 1, 100):
f = None
with self.open(support.TESTFN, "wb", bufsize) as f:
f.write(b"xxx")
self.assertEqual(f.closed, True)
f = None
try:
with self.open(support.TESTFN, "wb", bufsize) as f:
1 // 0
except ZeroDivisionError:
self.assertEqual(f.closed, True)
else:
self.fail("1 // 0 didn't raise an exception")
# issue 5008
def test_append_mode_tell(self):
with self.open(support.TESTFN, "wb") as f:
f.write(b"xxx")
with self.open(support.TESTFN, "ab", buffering=0) as f:
self.assertEqual(f.tell(), 3)
with self.open(support.TESTFN, "ab") as f:
self.assertEqual(f.tell(), 3)
with self.open(support.TESTFN, "a") as f:
self.assertTrue(f.tell() > 0)
def test_destructor(self):
record = []
class MyFileIO(self.FileIO):
def __del__(self):
record.append(1)
try:
f = super(MyFileIO, self).__del__
except AttributeError:
pass
else:
f()
def close(self):
record.append(2)
super(MyFileIO, self).close()
def flush(self):
record.append(3)
super(MyFileIO, self).flush()
f = MyFileIO(support.TESTFN, "wb")
f.write(b"xxx")
del f
support.gc_collect()
self.assertEqual(record, [1, 2, 3])
with self.open(support.TESTFN, "rb") as f:
self.assertEqual(f.read(), b"xxx")
def _check_base_destructor(self, base):
record = []
class MyIO(base):
def __init__(self):
# This exercises the availability of attributes on object
# destruction.
# (in the C version, close() is called by the tp_dealloc
# function, not by __del__)
self.on_del = 1
self.on_close = 2
self.on_flush = 3
def __del__(self):
record.append(self.on_del)
try:
f = super(MyIO, self).__del__
except AttributeError:
pass
else:
f()
def close(self):
record.append(self.on_close)
super(MyIO, self).close()
def flush(self):
record.append(self.on_flush)
super(MyIO, self).flush()
f = MyIO()
del f
support.gc_collect()
self.assertEqual(record, [1, 2, 3])
def test_IOBase_destructor(self):
self._check_base_destructor(self.IOBase)
def test_RawIOBase_destructor(self):
self._check_base_destructor(self.RawIOBase)
def test_BufferedIOBase_destructor(self):
self._check_base_destructor(self.BufferedIOBase)
def test_TextIOBase_destructor(self):
self._check_base_destructor(self.TextIOBase)
def test_close_flushes(self):
with self.open(support.TESTFN, "wb") as f:
f.write(b"xxx")
with self.open(support.TESTFN, "rb") as f:
self.assertEqual(f.read(), b"xxx")
def test_array_writes(self):
a = array.array(b'i', range(10))
n = len(a.tostring())
with self.open(support.TESTFN, "wb", 0) as f:
self.assertEqual(f.write(a), n)
with self.open(support.TESTFN, "wb") as f:
self.assertEqual(f.write(a), n)
def test_closefd(self):
self.assertRaises(ValueError, self.open, support.TESTFN, 'w',
closefd=False)
def test_read_closed(self):
with self.open(support.TESTFN, "w") as f:
f.write("egg\n")
with self.open(support.TESTFN, "r") as f:
file = self.open(f.fileno(), "r", closefd=False)
self.assertEqual(file.read(), "egg\n")
file.seek(0)
file.close()
self.assertRaises(ValueError, file.read)
def test_no_closefd_with_filename(self):
# can't use closefd in combination with a file name
self.assertRaises(ValueError, self.open, support.TESTFN, "r", closefd=False)
def test_closefd_attr(self):
with self.open(support.TESTFN, "wb") as f:
f.write(b"egg\n")
with self.open(support.TESTFN, "r") as f:
self.assertEqual(f.buffer.raw.closefd, True)
file = self.open(f.fileno(), "r", closefd=False)
self.assertEqual(file.buffer.raw.closefd, False)
def test_garbage_collection(self):
# FileIO objects are collected, and collecting them flushes
# all data to disk.
f = self.FileIO(support.TESTFN, "wb")
f.write(b"abcxxx")
f.f = f
wr = weakref.ref(f)
del f
support.gc_collect()
self.assertTrue(wr() is None, wr)
with self.open(support.TESTFN, "rb") as f:
self.assertEqual(f.read(), b"abcxxx")
def test_unbounded_file(self):
# Issue #1174606: reading from an unbounded stream such as /dev/zero.
zero = "/dev/zero"
if not os.path.exists(zero):
self.skipTest("{0} does not exist".format(zero))
if sys.maxsize > 0x7FFFFFFF:
self.skipTest("test can only run in a 32-bit address space")
if support.real_max_memuse < support._2G:
self.skipTest("test requires at least 2GB of memory")
with self.open(zero, "rb", buffering=0) as f:
self.assertRaises(OverflowError, f.read)
with self.open(zero, "rb") as f:
self.assertRaises(OverflowError, f.read)
with self.open(zero, "r") as f:
self.assertRaises(OverflowError, f.read)
def test_flush_error_on_close(self):
f = self.open(support.TESTFN, "wb", buffering=0)
def bad_flush():
raise IOError()
f.flush = bad_flush
self.assertRaises(IOError, f.close) # exception not swallowed
self.assertTrue(f.closed)
def test_multi_close(self):
f = self.open(support.TESTFN, "wb", buffering=0)
f.close()
f.close()
f.close()
self.assertRaises(ValueError, f.flush)
def test_RawIOBase_read(self):
# Exercise the default RawIOBase.read() implementation (which calls
# readinto() internally).
rawio = self.MockRawIOWithoutRead((b"abc", b"d", None, b"efg", None))
self.assertEqual(rawio.read(2), b"ab")
self.assertEqual(rawio.read(2), b"c")
self.assertEqual(rawio.read(2), b"d")
self.assertEqual(rawio.read(2), None)
self.assertEqual(rawio.read(2), b"ef")
self.assertEqual(rawio.read(2), b"g")
self.assertEqual(rawio.read(2), None)
self.assertEqual(rawio.read(2), b"")
def test_fileio_closefd(self):
# Issue #4841
with self.open(__file__, 'rb') as f1, \
self.open(__file__, 'rb') as f2:
fileio = self.FileIO(f1.fileno(), closefd=False)
# .__init__() must not close f1
fileio.__init__(f2.fileno(), closefd=False)
f1.readline()
# .close() must not close f2
fileio.close()
f2.readline()
def test_nonbuffered_textio(self):
with warnings.catch_warnings(record=True) as recorded:
with self.assertRaises(ValueError):
self.open(support.TESTFN, 'w', buffering=0)
support.gc_collect()
self.assertEqual(recorded, [])
def test_invalid_newline(self):
with warnings.catch_warnings(record=True) as recorded:
with self.assertRaises(ValueError):
self.open(support.TESTFN, 'w', newline='invalid')
support.gc_collect()
self.assertEqual(recorded, [])
class CIOTest(IOTest):
def test_IOBase_finalize(self):
# Issue #12149: segmentation fault on _PyIOBase_finalize when both a
# class which inherits IOBase and an object of this class are caught
# in a reference cycle and close() is already in the method cache.
class MyIO(self.IOBase):
def close(self):
pass
# create an instance to populate the method cache
MyIO()
obj = MyIO()
obj.obj = obj
wr = weakref.ref(obj)
del MyIO
del obj
support.gc_collect()
self.assertTrue(wr() is None, wr)
class PyIOTest(IOTest):
test_array_writes = unittest.skip(
"len(array.array) returns number of elements rather than bytelength"
)(IOTest.test_array_writes)
class CommonBufferedTests:
# Tests common to BufferedReader, BufferedWriter and BufferedRandom
def test_detach(self):
raw = self.MockRawIO()
buf = self.tp(raw)
self.assertIs(buf.detach(), raw)
self.assertRaises(ValueError, buf.detach)
def test_fileno(self):
rawio = self.MockRawIO()
bufio = self.tp(rawio)
self.assertEqual(42, bufio.fileno())
@unittest.skip('test having existential crisis')
def test_no_fileno(self):
# XXX will we always have fileno() function? If so, kill
# this test. Else, write it.
pass
def test_invalid_args(self):
rawio = self.MockRawIO()
bufio = self.tp(rawio)
# Invalid whence
self.assertRaises(ValueError, bufio.seek, 0, -1)
self.assertRaises(ValueError, bufio.seek, 0, 3)
def test_override_destructor(self):
tp = self.tp
record = []
class MyBufferedIO(tp):
def __del__(self):
record.append(1)
try:
f = super(MyBufferedIO, self).__del__
except AttributeError:
pass
else:
f()
def close(self):
record.append(2)
super(MyBufferedIO, self).close()
def flush(self):
record.append(3)
super(MyBufferedIO, self).flush()
rawio = self.MockRawIO()
bufio = MyBufferedIO(rawio)
writable = bufio.writable()
del bufio
support.gc_collect()
if writable:
self.assertEqual(record, [1, 2, 3])
else:
self.assertEqual(record, [1, 2])
def test_context_manager(self):
# Test usability as a context manager
rawio = self.MockRawIO()
bufio = self.tp(rawio)
def _with():
with bufio:
pass
_with()
# bufio should now be closed, and using it a second time should raise
# a ValueError.
self.assertRaises(ValueError, _with)
def test_error_through_destructor(self):
# Test that the exception state is not modified by a destructor,
# even if close() fails.
rawio = self.CloseFailureIO()
def f():
self.tp(rawio).xyzzy
with support.captured_output("stderr") as s:
self.assertRaises(AttributeError, f)
s = s.getvalue().strip()
if s:
# The destructor *may* have printed an unraisable error, check it
self.assertEqual(len(s.splitlines()), 1)
self.assertTrue(s.startswith("Exception IOError: "), s)
self.assertTrue(s.endswith(" ignored"), s)
def test_repr(self):
raw = self.MockRawIO()
b = self.tp(raw)
clsname = "%s.%s" % (self.tp.__module__, self.tp.__name__)
self.assertEqual(repr(b), "<%s>" % clsname)
raw.name = "dummy"
self.assertEqual(repr(b), "<%s name=u'dummy'>" % clsname)
raw.name = b"dummy"
self.assertEqual(repr(b), "<%s name='dummy'>" % clsname)
def test_flush_error_on_close(self):
raw = self.MockRawIO()
def bad_flush():
raise IOError()
raw.flush = bad_flush
b = self.tp(raw)
self.assertRaises(IOError, b.close) # exception not swallowed
self.assertTrue(b.closed)
def test_close_error_on_close(self):
raw = self.MockRawIO()
def bad_flush():
raise IOError('flush')
def bad_close():
raise IOError('close')
raw.close = bad_close
b = self.tp(raw)
b.flush = bad_flush
with self.assertRaises(IOError) as err: # exception not swallowed
b.close()
self.assertEqual(err.exception.args, ('close',))
self.assertFalse(b.closed)
def test_multi_close(self):
raw = self.MockRawIO()
b = self.tp(raw)
b.close()
b.close()
b.close()
self.assertRaises(ValueError, b.flush)
def test_readonly_attributes(self):
raw = self.MockRawIO()
buf = self.tp(raw)
x = self.MockRawIO()
with self.assertRaises((AttributeError, TypeError)):
buf.raw = x
class SizeofTest:
@support.cpython_only
def test_sizeof(self):
bufsize1 = 4096
bufsize2 = 8192
rawio = self.MockRawIO()
bufio = self.tp(rawio, buffer_size=bufsize1)
size = sys.getsizeof(bufio) - bufsize1
rawio = self.MockRawIO()
bufio = self.tp(rawio, buffer_size=bufsize2)
self.assertEqual(sys.getsizeof(bufio), size + bufsize2)
class BufferedReaderTest(unittest.TestCase, CommonBufferedTests):
read_mode = "rb"
def test_constructor(self):
rawio = self.MockRawIO([b"abc"])
bufio = self.tp(rawio)
bufio.__init__(rawio)
bufio.__init__(rawio, buffer_size=1024)
bufio.__init__(rawio, buffer_size=16)
self.assertEqual(b"abc", bufio.read())
self.assertRaises(ValueError, bufio.__init__, rawio, buffer_size=0)
self.assertRaises(ValueError, bufio.__init__, rawio, buffer_size=-16)
self.assertRaises(ValueError, bufio.__init__, rawio, buffer_size=-1)
rawio = self.MockRawIO([b"abc"])
bufio.__init__(rawio)
self.assertEqual(b"abc", bufio.read())
def test_uninitialized(self):
bufio = self.tp.__new__(self.tp)
del bufio
bufio = self.tp.__new__(self.tp)
self.assertRaisesRegexp((ValueError, AttributeError),
'uninitialized|has no attribute',
bufio.read, 0)
bufio.__init__(self.MockRawIO())
self.assertEqual(bufio.read(0), b'')
def test_read(self):
for arg in (None, 7):
rawio = self.MockRawIO((b"abc", b"d", b"efg"))
bufio = self.tp(rawio)
self.assertEqual(b"abcdefg", bufio.read(arg))
# Invalid args
self.assertRaises(ValueError, bufio.read, -2)
def test_read1(self):
rawio = self.MockRawIO((b"abc", b"d", b"efg"))
bufio = self.tp(rawio)
self.assertEqual(b"a", bufio.read(1))
self.assertEqual(b"b", bufio.read1(1))
self.assertEqual(rawio._reads, 1)
self.assertEqual(b"c", bufio.read1(100))
self.assertEqual(rawio._reads, 1)
self.assertEqual(b"d", bufio.read1(100))
self.assertEqual(rawio._reads, 2)
self.assertEqual(b"efg", bufio.read1(100))
self.assertEqual(rawio._reads, 3)
self.assertEqual(b"", bufio.read1(100))
self.assertEqual(rawio._reads, 4)
# Invalid args
self.assertRaises(ValueError, bufio.read1, -1)
def test_readinto(self):
rawio = self.MockRawIO((b"abc", b"d", b"efg"))
bufio = self.tp(rawio)
b = bytearray(2)
self.assertEqual(bufio.readinto(b), 2)
self.assertEqual(b, b"ab")
self.assertEqual(bufio.readinto(b), 2)
self.assertEqual(b, b"cd")
self.assertEqual(bufio.readinto(b), 2)
self.assertEqual(b, b"ef")
self.assertEqual(bufio.readinto(b), 1)
self.assertEqual(b, b"gf")
self.assertEqual(bufio.readinto(b), 0)
self.assertEqual(b, b"gf")
def test_readlines(self):
def bufio():
rawio = self.MockRawIO((b"abc\n", b"d\n", b"ef"))
return self.tp(rawio)
self.assertEqual(bufio().readlines(), [b"abc\n", b"d\n", b"ef"])
self.assertEqual(bufio().readlines(5), [b"abc\n", b"d\n"])
self.assertEqual(bufio().readlines(None), [b"abc\n", b"d\n", b"ef"])
def test_buffering(self):
data = b"abcdefghi"
dlen = len(data)
tests = [
[ 100, [ 3, 1, 4, 8 ], [ dlen, 0 ] ],
[ 100, [ 3, 3, 3], [ dlen ] ],
[ 4, [ 1, 2, 4, 2 ], [ 4, 4, 1 ] ],
]
for bufsize, buf_read_sizes, raw_read_sizes in tests:
rawio = self.MockFileIO(data)
bufio = self.tp(rawio, buffer_size=bufsize)
pos = 0
for nbytes in buf_read_sizes:
self.assertEqual(bufio.read(nbytes), data[pos:pos+nbytes])
pos += nbytes
# this is mildly implementation-dependent
self.assertEqual(rawio.read_history, raw_read_sizes)
def test_read_non_blocking(self):
# Inject some None's in there to simulate EWOULDBLOCK
rawio = self.MockRawIO((b"abc", b"d", None, b"efg", None, None, None))
bufio = self.tp(rawio)
self.assertEqual(b"abcd", bufio.read(6))
self.assertEqual(b"e", bufio.read(1))
self.assertEqual(b"fg", bufio.read())
self.assertEqual(b"", bufio.peek(1))
self.assertIsNone(bufio.read())
self.assertEqual(b"", bufio.read())
rawio = self.MockRawIO((b"a", None, None))
self.assertEqual(b"a", rawio.readall())
self.assertIsNone(rawio.readall())
def test_read_past_eof(self):
rawio = self.MockRawIO((b"abc", b"d", b"efg"))
bufio = self.tp(rawio)
self.assertEqual(b"abcdefg", bufio.read(9000))
def test_read_all(self):
rawio = self.MockRawIO((b"abc", b"d", b"efg"))
bufio = self.tp(rawio)
self.assertEqual(b"abcdefg", bufio.read())
@unittest.skipUnless(threading, 'Threading required for this test.')
@support.requires_resource('cpu')
def test_threads(self):
try:
# Write out many bytes with exactly the same number of 0's,
# 1's... 255's. This will help us check that concurrent reading
# doesn't duplicate or forget contents.
N = 1000
l = list(range(256)) * N
random.shuffle(l)
s = bytes(bytearray(l))
with self.open(support.TESTFN, "wb") as f:
f.write(s)
with self.open(support.TESTFN, self.read_mode, buffering=0) as raw:
bufio = self.tp(raw, 8)
errors = []
results = []
def f():
try:
# Intra-buffer read then buffer-flushing read
for n in cycle([1, 19]):
s = bufio.read(n)
if not s:
break
# list.append() is atomic
results.append(s)
except Exception as e:
errors.append(e)
raise
threads = [threading.Thread(target=f) for x in range(20)]
for t in threads:
t.start()
time.sleep(0.02) # yield
for t in threads:
t.join()
self.assertFalse(errors,
"the following exceptions were caught: %r" % errors)
s = b''.join(results)
for i in range(256):
c = bytes(bytearray([i]))
self.assertEqual(s.count(c), N)
finally:
support.unlink(support.TESTFN)
def test_misbehaved_io(self):
rawio = self.MisbehavedRawIO((b"abc", b"d", b"efg"))
bufio = self.tp(rawio)
self.assertRaises(IOError, bufio.seek, 0)
self.assertRaises(IOError, bufio.tell)
def test_no_extraneous_read(self):
# Issue #9550; when the raw IO object has satisfied the read request,
# we should not issue any additional reads, otherwise it may block
# (e.g. socket).
bufsize = 16
for n in (2, bufsize - 1, bufsize, bufsize + 1, bufsize * 2):
rawio = self.MockRawIO([b"x" * n])
bufio = self.tp(rawio, bufsize)
self.assertEqual(bufio.read(n), b"x" * n)
# Simple case: one raw read is enough to satisfy the request.
self.assertEqual(rawio._extraneous_reads, 0,
"failed for {}: {} != 0".format(n, rawio._extraneous_reads))
# A more complex case where two raw reads are needed to satisfy
# the request.
rawio = self.MockRawIO([b"x" * (n - 1), b"x"])
bufio = self.tp(rawio, bufsize)
self.assertEqual(bufio.read(n), b"x" * n)
self.assertEqual(rawio._extraneous_reads, 0,
"failed for {}: {} != 0".format(n, rawio._extraneous_reads))
class CBufferedReaderTest(BufferedReaderTest, SizeofTest):
tp = io.BufferedReader
def test_constructor(self):
BufferedReaderTest.test_constructor(self)
# The allocation can succeed on 32-bit builds, e.g. with more
# than 2GB RAM and a 64-bit kernel.
if sys.maxsize > 0x7FFFFFFF:
rawio = self.MockRawIO()
bufio = self.tp(rawio)
self.assertRaises((OverflowError, MemoryError, ValueError),
bufio.__init__, rawio, sys.maxsize)
def test_initialization(self):
rawio = self.MockRawIO([b"abc"])
bufio = self.tp(rawio)
self.assertRaises(ValueError, bufio.__init__, rawio, buffer_size=0)
self.assertRaises(ValueError, bufio.read)
self.assertRaises(ValueError, bufio.__init__, rawio, buffer_size=-16)
self.assertRaises(ValueError, bufio.read)
self.assertRaises(ValueError, bufio.__init__, rawio, buffer_size=-1)
self.assertRaises(ValueError, bufio.read)
def test_misbehaved_io_read(self):
rawio = self.MisbehavedRawIO((b"abc", b"d", b"efg"))
bufio = self.tp(rawio)
# _pyio.BufferedReader seems to implement reading different, so that
# checking this is not so easy.
self.assertRaises(IOError, bufio.read, 10)
def test_garbage_collection(self):
# C BufferedReader objects are collected.
# The Python version has __del__, so it ends into gc.garbage instead
rawio = self.FileIO(support.TESTFN, "w+b")
f = self.tp(rawio)
f.f = f
wr = weakref.ref(f)
del f
support.gc_collect()
self.assertTrue(wr() is None, wr)
@support.impl_detail(cpython=True)
def test_args_error(self):
# Issue #17275
with self.assertRaisesRegexp(TypeError, "BufferedReader"):
self.tp(io.BytesIO(), 1024, 1024, 1024)
class PyBufferedReaderTest(BufferedReaderTest):
tp = pyio.BufferedReader
class BufferedWriterTest(unittest.TestCase, CommonBufferedTests):
write_mode = "wb"
def test_constructor(self):
rawio = self.MockRawIO()
bufio = self.tp(rawio)
bufio.__init__(rawio)
bufio.__init__(rawio, buffer_size=1024)
bufio.__init__(rawio, buffer_size=16)
self.assertEqual(3, bufio.write(b"abc"))
bufio.flush()
self.assertRaises(ValueError, bufio.__init__, rawio, buffer_size=0)
self.assertRaises(ValueError, bufio.__init__, rawio, buffer_size=-16)
self.assertRaises(ValueError, bufio.__init__, rawio, buffer_size=-1)
bufio.__init__(rawio)
self.assertEqual(3, bufio.write(b"ghi"))
bufio.flush()
self.assertEqual(b"".join(rawio._write_stack), b"abcghi")
def test_uninitialized(self):
bufio = self.tp.__new__(self.tp)
del bufio
bufio = self.tp.__new__(self.tp)
self.assertRaisesRegexp((ValueError, AttributeError),
'uninitialized|has no attribute',
bufio.write, b'')
bufio.__init__(self.MockRawIO())
self.assertEqual(bufio.write(b''), 0)
def test_detach_flush(self):
raw = self.MockRawIO()
buf = self.tp(raw)
buf.write(b"howdy!")
self.assertFalse(raw._write_stack)
buf.detach()
self.assertEqual(raw._write_stack, [b"howdy!"])
def test_write(self):
# Write to the buffered IO but don't overflow the buffer.
writer = self.MockRawIO()
bufio = self.tp(writer, 8)
bufio.write(b"abc")
self.assertFalse(writer._write_stack)
def test_write_overflow(self):
writer = self.MockRawIO()
bufio = self.tp(writer, 8)
contents = b"abcdefghijklmnop"
for n in range(0, len(contents), 3):
bufio.write(contents[n:n+3])
flushed = b"".join(writer._write_stack)
# At least (total - 8) bytes were implicitly flushed, perhaps more
# depending on the implementation.
self.assertTrue(flushed.startswith(contents[:-8]), flushed)
def check_writes(self, intermediate_func):
# Lots of writes, test the flushed output is as expected.
contents = bytes(range(256)) * 1000
n = 0
writer = self.MockRawIO()
bufio = self.tp(writer, 13)
# Generator of write sizes: repeat each N 15 times then proceed to N+1
def gen_sizes():
for size in count(1):
for i in range(15):
yield size
sizes = gen_sizes()
while n < len(contents):
size = min(next(sizes), len(contents) - n)
self.assertEqual(bufio.write(contents[n:n+size]), size)
intermediate_func(bufio)
n += size
bufio.flush()
self.assertEqual(contents,
b"".join(writer._write_stack))
def test_writes(self):
self.check_writes(lambda bufio: None)
def test_writes_and_flushes(self):
self.check_writes(lambda bufio: bufio.flush())
def test_writes_and_seeks(self):
def _seekabs(bufio):
pos = bufio.tell()
bufio.seek(pos + 1, 0)
bufio.seek(pos - 1, 0)
bufio.seek(pos, 0)
self.check_writes(_seekabs)
def _seekrel(bufio):
pos = bufio.seek(0, 1)
bufio.seek(+1, 1)
bufio.seek(-1, 1)
bufio.seek(pos, 0)
self.check_writes(_seekrel)
def test_writes_and_truncates(self):
self.check_writes(lambda bufio: bufio.truncate(bufio.tell()))
def test_write_non_blocking(self):
raw = self.MockNonBlockWriterIO()
bufio = self.tp(raw, 8)
self.assertEqual(bufio.write(b"abcd"), 4)
self.assertEqual(bufio.write(b"efghi"), 5)
# 1 byte will be written, the rest will be buffered
raw.block_on(b"k")
self.assertEqual(bufio.write(b"jklmn"), 5)
# 8 bytes will be written, 8 will be buffered and the rest will be lost
raw.block_on(b"0")
try:
bufio.write(b"opqrwxyz0123456789")
except self.BlockingIOError as e:
written = e.characters_written
else:
self.fail("BlockingIOError should have been raised")
self.assertEqual(written, 16)
self.assertEqual(raw.pop_written(),
b"abcdefghijklmnopqrwxyz")
self.assertEqual(bufio.write(b"ABCDEFGHI"), 9)
s = raw.pop_written()
# Previously buffered bytes were flushed
self.assertTrue(s.startswith(b"01234567A"), s)
def test_write_and_rewind(self):
raw = io.BytesIO()
bufio = self.tp(raw, 4)
self.assertEqual(bufio.write(b"abcdef"), 6)
self.assertEqual(bufio.tell(), 6)
bufio.seek(0, 0)
self.assertEqual(bufio.write(b"XY"), 2)
bufio.seek(6, 0)
self.assertEqual(raw.getvalue(), b"XYcdef")
self.assertEqual(bufio.write(b"123456"), 6)
bufio.flush()
self.assertEqual(raw.getvalue(), b"XYcdef123456")
def test_flush(self):
writer = self.MockRawIO()
bufio = self.tp(writer, 8)
bufio.write(b"abc")
bufio.flush()
self.assertEqual(b"abc", writer._write_stack[0])
def test_writelines(self):
l = [b'ab', b'cd', b'ef']
writer = self.MockRawIO()
bufio = self.tp(writer, 8)
bufio.writelines(l)
bufio.flush()
self.assertEqual(b''.join(writer._write_stack), b'abcdef')
def test_writelines_userlist(self):
l = UserList([b'ab', b'cd', b'ef'])
writer = self.MockRawIO()
bufio = self.tp(writer, 8)
bufio.writelines(l)
bufio.flush()
self.assertEqual(b''.join(writer._write_stack), b'abcdef')
def test_writelines_error(self):
writer = self.MockRawIO()
bufio = self.tp(writer, 8)
self.assertRaises(TypeError, bufio.writelines, [1, 2, 3])
self.assertRaises(TypeError, bufio.writelines, None)
def test_destructor(self):
writer = self.MockRawIO()
bufio = self.tp(writer, 8)
bufio.write(b"abc")
del bufio
support.gc_collect()
self.assertEqual(b"abc", writer._write_stack[0])
def test_truncate(self):
# Truncate implicitly flushes the buffer.
with self.open(support.TESTFN, self.write_mode, buffering=0) as raw:
bufio = self.tp(raw, 8)
bufio.write(b"abcdef")
self.assertEqual(bufio.truncate(3), 3)
self.assertEqual(bufio.tell(), 6)
with self.open(support.TESTFN, "rb", buffering=0) as f:
self.assertEqual(f.read(), b"abc")
@unittest.skipUnless(threading, 'Threading required for this test.')
@support.requires_resource('cpu')
def test_threads(self):
try:
# Write out many bytes from many threads and test they were
# all flushed.
N = 1000
contents = bytes(range(256)) * N
sizes = cycle([1, 19])
n = 0
queue = deque()
while n < len(contents):
size = next(sizes)
queue.append(contents[n:n+size])
n += size
del contents
# We use a real file object because it allows us to
# exercise situations where the GIL is released before
# writing the buffer to the raw streams. This is in addition
# to concurrency issues due to switching threads in the middle
# of Python code.
with self.open(support.TESTFN, self.write_mode, buffering=0) as raw:
bufio = self.tp(raw, 8)
errors = []
def f():
try:
while True:
try:
s = queue.popleft()
except IndexError:
return
bufio.write(s)
except Exception as e:
errors.append(e)
raise
threads = [threading.Thread(target=f) for x in range(20)]
for t in threads:
t.start()
time.sleep(0.02) # yield
for t in threads:
t.join()
self.assertFalse(errors,
"the following exceptions were caught: %r" % errors)
bufio.close()
with self.open(support.TESTFN, "rb") as f:
s = f.read()
for i in range(256):
self.assertEqual(s.count(bytes([i])), N)
finally:
support.unlink(support.TESTFN)
def test_misbehaved_io(self):
rawio = self.MisbehavedRawIO()
bufio = self.tp(rawio, 5)
self.assertRaises(IOError, bufio.seek, 0)
self.assertRaises(IOError, bufio.tell)
self.assertRaises(IOError, bufio.write, b"abcdef")
def test_max_buffer_size_deprecation(self):
with support.check_warnings(("max_buffer_size is deprecated",
DeprecationWarning)):
self.tp(self.MockRawIO(), 8, 12)
def test_write_error_on_close(self):
raw = self.MockRawIO()
def bad_write(b):
raise IOError()
raw.write = bad_write
b = self.tp(raw)
b.write(b'spam')
self.assertRaises(IOError, b.close) # exception not swallowed
self.assertTrue(b.closed)
class CBufferedWriterTest(BufferedWriterTest, SizeofTest):
tp = io.BufferedWriter
def test_constructor(self):
BufferedWriterTest.test_constructor(self)
# The allocation can succeed on 32-bit builds, e.g. with more
# than 2GB RAM and a 64-bit kernel.
if sys.maxsize > 0x7FFFFFFF:
rawio = self.MockRawIO()
bufio = self.tp(rawio)
self.assertRaises((OverflowError, MemoryError, ValueError),
bufio.__init__, rawio, sys.maxsize)
def test_initialization(self):
rawio = self.MockRawIO()
bufio = self.tp(rawio)
self.assertRaises(ValueError, bufio.__init__, rawio, buffer_size=0)
self.assertRaises(ValueError, bufio.write, b"def")
self.assertRaises(ValueError, bufio.__init__, rawio, buffer_size=-16)
self.assertRaises(ValueError, bufio.write, b"def")
self.assertRaises(ValueError, bufio.__init__, rawio, buffer_size=-1)
self.assertRaises(ValueError, bufio.write, b"def")
def test_garbage_collection(self):
# C BufferedWriter objects are collected, and collecting them flushes
# all data to disk.
# The Python version has __del__, so it ends into gc.garbage instead
rawio = self.FileIO(support.TESTFN, "w+b")
f = self.tp(rawio)
f.write(b"123xxx")
f.x = f
wr = weakref.ref(f)
del f
support.gc_collect()
self.assertTrue(wr() is None, wr)
with self.open(support.TESTFN, "rb") as f:
self.assertEqual(f.read(), b"123xxx")
@support.impl_detail(cpython=True)
def test_args_error(self):
# Issue #17275
with self.assertRaisesRegexp(TypeError, "BufferedWriter"):
self.tp(io.BytesIO(), 1024, 1024, 1024)
class PyBufferedWriterTest(BufferedWriterTest):
tp = pyio.BufferedWriter
class BufferedRWPairTest(unittest.TestCase):
def test_constructor(self):
pair = self.tp(self.MockRawIO(), self.MockRawIO())
self.assertFalse(pair.closed)
def test_uninitialized(self):
pair = self.tp.__new__(self.tp)
del pair
pair = self.tp.__new__(self.tp)
self.assertRaisesRegexp((ValueError, AttributeError),
'uninitialized|has no attribute',
pair.read, 0)
self.assertRaisesRegexp((ValueError, AttributeError),
'uninitialized|has no attribute',
pair.write, b'')
pair.__init__(self.MockRawIO(), self.MockRawIO())
self.assertEqual(pair.read(0), b'')
self.assertEqual(pair.write(b''), 0)
def test_detach(self):
pair = self.tp(self.MockRawIO(), self.MockRawIO())
self.assertRaises(self.UnsupportedOperation, pair.detach)
def test_constructor_max_buffer_size_deprecation(self):
with support.check_warnings(("max_buffer_size is deprecated",
DeprecationWarning)):
self.tp(self.MockRawIO(), self.MockRawIO(), 8, 12)
def test_constructor_with_not_readable(self):
class NotReadable(MockRawIO):
def readable(self):
return False
self.assertRaises(IOError, self.tp, NotReadable(), self.MockRawIO())
def test_constructor_with_not_writeable(self):
class NotWriteable(MockRawIO):
def writable(self):
return False
self.assertRaises(IOError, self.tp, self.MockRawIO(), NotWriteable())
def test_read(self):
pair = self.tp(self.BytesIO(b"abcdef"), self.MockRawIO())
self.assertEqual(pair.read(3), b"abc")
self.assertEqual(pair.read(1), b"d")
self.assertEqual(pair.read(), b"ef")
pair = self.tp(self.BytesIO(b"abc"), self.MockRawIO())
self.assertEqual(pair.read(None), b"abc")
def test_readlines(self):
pair = lambda: self.tp(self.BytesIO(b"abc\ndef\nh"), self.MockRawIO())
self.assertEqual(pair().readlines(), [b"abc\n", b"def\n", b"h"])
self.assertEqual(pair().readlines(), [b"abc\n", b"def\n", b"h"])
self.assertEqual(pair().readlines(5), [b"abc\n", b"def\n"])
def test_read1(self):
# .read1() is delegated to the underlying reader object, so this test
# can be shallow.
pair = self.tp(self.BytesIO(b"abcdef"), self.MockRawIO())
self.assertEqual(pair.read1(3), b"abc")
def test_readinto(self):
pair = self.tp(self.BytesIO(b"abcdef"), self.MockRawIO())
data = bytearray(5)
self.assertEqual(pair.readinto(data), 5)
self.assertEqual(data, b"abcde")
def test_write(self):
w = self.MockRawIO()
pair = self.tp(self.MockRawIO(), w)
pair.write(b"abc")
pair.flush()
pair.write(b"def")
pair.flush()
self.assertEqual(w._write_stack, [b"abc", b"def"])
def test_peek(self):
pair = self.tp(self.BytesIO(b"abcdef"), self.MockRawIO())
self.assertTrue(pair.peek(3).startswith(b"abc"))
self.assertEqual(pair.read(3), b"abc")
def test_readable(self):
pair = self.tp(self.MockRawIO(), self.MockRawIO())
self.assertTrue(pair.readable())
def test_writeable(self):
pair = self.tp(self.MockRawIO(), self.MockRawIO())
self.assertTrue(pair.writable())
def test_seekable(self):
# BufferedRWPairs are never seekable, even if their readers and writers
# are.
pair = self.tp(self.MockRawIO(), self.MockRawIO())
self.assertFalse(pair.seekable())
# .flush() is delegated to the underlying writer object and has been
# tested in the test_write method.
def test_close_and_closed(self):
pair = self.tp(self.MockRawIO(), self.MockRawIO())
self.assertFalse(pair.closed)
pair.close()
self.assertTrue(pair.closed)
def test_isatty(self):
class SelectableIsAtty(MockRawIO):
def __init__(self, isatty):
MockRawIO.__init__(self)
self._isatty = isatty
def isatty(self):
return self._isatty
pair = self.tp(SelectableIsAtty(False), SelectableIsAtty(False))
self.assertFalse(pair.isatty())
pair = self.tp(SelectableIsAtty(True), SelectableIsAtty(False))
self.assertTrue(pair.isatty())
pair = self.tp(SelectableIsAtty(False), SelectableIsAtty(True))
self.assertTrue(pair.isatty())
pair = self.tp(SelectableIsAtty(True), SelectableIsAtty(True))
self.assertTrue(pair.isatty())
class CBufferedRWPairTest(BufferedRWPairTest):
tp = io.BufferedRWPair
class PyBufferedRWPairTest(BufferedRWPairTest):
tp = pyio.BufferedRWPair
class BufferedRandomTest(BufferedReaderTest, BufferedWriterTest):
read_mode = "rb+"
write_mode = "wb+"
def test_constructor(self):
BufferedReaderTest.test_constructor(self)
BufferedWriterTest.test_constructor(self)
def test_uninitialized(self):
BufferedReaderTest.test_uninitialized(self)
BufferedWriterTest.test_uninitialized(self)
def test_read_and_write(self):
raw = self.MockRawIO((b"asdf", b"ghjk"))
rw = self.tp(raw, 8)
self.assertEqual(b"as", rw.read(2))
rw.write(b"ddd")
rw.write(b"eee")
self.assertFalse(raw._write_stack) # Buffer writes
self.assertEqual(b"ghjk", rw.read())
self.assertEqual(b"dddeee", raw._write_stack[0])
def test_seek_and_tell(self):
raw = self.BytesIO(b"asdfghjkl")
rw = self.tp(raw)
self.assertEqual(b"as", rw.read(2))
self.assertEqual(2, rw.tell())
rw.seek(0, 0)
self.assertEqual(b"asdf", rw.read(4))
rw.write(b"123f")
rw.seek(0, 0)
self.assertEqual(b"asdf123fl", rw.read())
self.assertEqual(9, rw.tell())
rw.seek(-4, 2)
self.assertEqual(5, rw.tell())
rw.seek(2, 1)
self.assertEqual(7, rw.tell())
self.assertEqual(b"fl", rw.read(11))
rw.flush()
self.assertEqual(b"asdf123fl", raw.getvalue())
self.assertRaises(TypeError, rw.seek, 0.0)
def check_flush_and_read(self, read_func):
raw = self.BytesIO(b"abcdefghi")
bufio = self.tp(raw)
self.assertEqual(b"ab", read_func(bufio, 2))
bufio.write(b"12")
self.assertEqual(b"ef", read_func(bufio, 2))
self.assertEqual(6, bufio.tell())
bufio.flush()
self.assertEqual(6, bufio.tell())
self.assertEqual(b"ghi", read_func(bufio))
raw.seek(0, 0)
raw.write(b"XYZ")
# flush() resets the read buffer
bufio.flush()
bufio.seek(0, 0)
self.assertEqual(b"XYZ", read_func(bufio, 3))
def test_flush_and_read(self):
self.check_flush_and_read(lambda bufio, *args: bufio.read(*args))
def test_flush_and_readinto(self):
def _readinto(bufio, n=-1):
b = bytearray(n if n >= 0 else 9999)
n = bufio.readinto(b)
return bytes(b[:n])
self.check_flush_and_read(_readinto)
def test_flush_and_peek(self):
def _peek(bufio, n=-1):
# This relies on the fact that the buffer can contain the whole
# raw stream, otherwise peek() can return less.
b = bufio.peek(n)
if n != -1:
b = b[:n]
bufio.seek(len(b), 1)
return b
self.check_flush_and_read(_peek)
def test_flush_and_write(self):
raw = self.BytesIO(b"abcdefghi")
bufio = self.tp(raw)
bufio.write(b"123")
bufio.flush()
bufio.write(b"45")
bufio.flush()
bufio.seek(0, 0)
self.assertEqual(b"12345fghi", raw.getvalue())
self.assertEqual(b"12345fghi", bufio.read())
def test_threads(self):
BufferedReaderTest.test_threads(self)
BufferedWriterTest.test_threads(self)
def test_writes_and_peek(self):
def _peek(bufio):
bufio.peek(1)
self.check_writes(_peek)
def _peek(bufio):
pos = bufio.tell()
bufio.seek(-1, 1)
bufio.peek(1)
bufio.seek(pos, 0)
self.check_writes(_peek)
def test_writes_and_reads(self):
def _read(bufio):
bufio.seek(-1, 1)
bufio.read(1)
self.check_writes(_read)
def test_writes_and_read1s(self):
def _read1(bufio):
bufio.seek(-1, 1)
bufio.read1(1)
self.check_writes(_read1)
def test_writes_and_readintos(self):
def _read(bufio):
bufio.seek(-1, 1)
bufio.readinto(bytearray(1))
self.check_writes(_read)
def test_write_after_readahead(self):
# Issue #6629: writing after the buffer was filled by readahead should
# first rewind the raw stream.
for overwrite_size in [1, 5]:
raw = self.BytesIO(b"A" * 10)
bufio = self.tp(raw, 4)
# Trigger readahead
self.assertEqual(bufio.read(1), b"A")
self.assertEqual(bufio.tell(), 1)
# Overwriting should rewind the raw stream if it needs so
bufio.write(b"B" * overwrite_size)
self.assertEqual(bufio.tell(), overwrite_size + 1)
# If the write size was smaller than the buffer size, flush() and
# check that rewind happens.
bufio.flush()
self.assertEqual(bufio.tell(), overwrite_size + 1)
s = raw.getvalue()
self.assertEqual(s,
b"A" + b"B" * overwrite_size + b"A" * (9 - overwrite_size))
def test_write_rewind_write(self):
# Various combinations of reading / writing / seeking backwards / writing again
def mutate(bufio, pos1, pos2):
assert pos2 >= pos1
# Fill the buffer
bufio.seek(pos1)
bufio.read(pos2 - pos1)
bufio.write(b'\x02')
# This writes earlier than the previous write, but still inside
# the buffer.
bufio.seek(pos1)
bufio.write(b'\x01')
b = b"\x80\x81\x82\x83\x84"
for i in range(0, len(b)):
for j in range(i, len(b)):
raw = self.BytesIO(b)
bufio = self.tp(raw, 100)
mutate(bufio, i, j)
bufio.flush()
expected = bytearray(b)
expected[j] = 2
expected[i] = 1
self.assertEqual(raw.getvalue(), expected,
"failed result for i=%d, j=%d" % (i, j))
def test_truncate_after_read_or_write(self):
raw = self.BytesIO(b"A" * 10)
bufio = self.tp(raw, 100)
self.assertEqual(bufio.read(2), b"AA") # the read buffer gets filled
self.assertEqual(bufio.truncate(), 2)
self.assertEqual(bufio.write(b"BB"), 2) # the write buffer increases
self.assertEqual(bufio.truncate(), 4)
def test_misbehaved_io(self):
BufferedReaderTest.test_misbehaved_io(self)
BufferedWriterTest.test_misbehaved_io(self)
def test_interleaved_read_write(self):
# Test for issue #12213
with self.BytesIO(b'abcdefgh') as raw:
with self.tp(raw, 100) as f:
f.write(b"1")
self.assertEqual(f.read(1), b'b')
f.write(b'2')
self.assertEqual(f.read1(1), b'd')
f.write(b'3')
buf = bytearray(1)
f.readinto(buf)
self.assertEqual(buf, b'f')
f.write(b'4')
self.assertEqual(f.peek(1), b'h')
f.flush()
self.assertEqual(raw.getvalue(), b'1b2d3f4h')
with self.BytesIO(b'abc') as raw:
with self.tp(raw, 100) as f:
self.assertEqual(f.read(1), b'a')
f.write(b"2")
self.assertEqual(f.read(1), b'c')
f.flush()
self.assertEqual(raw.getvalue(), b'a2c')
def test_interleaved_readline_write(self):
with self.BytesIO(b'ab\ncdef\ng\n') as raw:
with self.tp(raw) as f:
f.write(b'1')
self.assertEqual(f.readline(), b'b\n')
f.write(b'2')
self.assertEqual(f.readline(), b'def\n')
f.write(b'3')
self.assertEqual(f.readline(), b'\n')
f.flush()
self.assertEqual(raw.getvalue(), b'1b\n2def\n3\n')
class CBufferedRandomTest(CBufferedReaderTest, CBufferedWriterTest,
BufferedRandomTest, SizeofTest):
tp = io.BufferedRandom
def test_constructor(self):
BufferedRandomTest.test_constructor(self)
# The allocation can succeed on 32-bit builds, e.g. with more
# than 2GB RAM and a 64-bit kernel.
if sys.maxsize > 0x7FFFFFFF:
rawio = self.MockRawIO()
bufio = self.tp(rawio)
self.assertRaises((OverflowError, MemoryError, ValueError),
bufio.__init__, rawio, sys.maxsize)
def test_garbage_collection(self):
CBufferedReaderTest.test_garbage_collection(self)
CBufferedWriterTest.test_garbage_collection(self)
@support.impl_detail(cpython=True)
def test_args_error(self):
# Issue #17275
with self.assertRaisesRegexp(TypeError, "BufferedRandom"):
self.tp(io.BytesIO(), 1024, 1024, 1024)
class PyBufferedRandomTest(BufferedRandomTest):
tp = pyio.BufferedRandom
# To fully exercise seek/tell, the StatefulIncrementalDecoder has these
# properties:
# - A single output character can correspond to many bytes of input.
# - The number of input bytes to complete the character can be
# undetermined until the last input byte is received.
# - The number of input bytes can vary depending on previous input.
# - A single input byte can correspond to many characters of output.
# - The number of output characters can be undetermined until the
# last input byte is received.
# - The number of output characters can vary depending on previous input.
class StatefulIncrementalDecoder(codecs.IncrementalDecoder):
"""
For testing seek/tell behavior with a stateful, buffering decoder.
Input is a sequence of words. Words may be fixed-length (length set
by input) or variable-length (period-terminated). In variable-length
mode, extra periods are ignored. Possible words are:
- 'i' followed by a number sets the input length, I (maximum 99).
When I is set to 0, words are space-terminated.
- 'o' followed by a number sets the output length, O (maximum 99).
- Any other word is converted into a word followed by a period on
the output. The output word consists of the input word truncated
or padded out with hyphens to make its length equal to O. If O
is 0, the word is output verbatim without truncating or padding.
I and O are initially set to 1. When I changes, any buffered input is
re-scanned according to the new I. EOF also terminates the last word.
"""
def __init__(self, errors='strict'):
codecs.IncrementalDecoder.__init__(self, errors)
self.reset()
def __repr__(self):
return '<SID %x>' % id(self)
def reset(self):
self.i = 1
self.o = 1
self.buffer = bytearray()
def getstate(self):
i, o = self.i ^ 1, self.o ^ 1 # so that flags = 0 after reset()
return bytes(self.buffer), i*100 + o
def setstate(self, state):
buffer, io = state
self.buffer = bytearray(buffer)
i, o = divmod(io, 100)
self.i, self.o = i ^ 1, o ^ 1
def decode(self, input, final=False):
output = ''
for b in input:
if self.i == 0: # variable-length, terminated with period
if b == '.':
if self.buffer:
output += self.process_word()
else:
self.buffer.append(b)
else: # fixed-length, terminate after self.i bytes
self.buffer.append(b)
if len(self.buffer) == self.i:
output += self.process_word()
if final and self.buffer: # EOF terminates the last word
output += self.process_word()
return output
def process_word(self):
output = ''
if self.buffer[0] == ord('i'):
self.i = min(99, int(self.buffer[1:] or 0)) # set input length
elif self.buffer[0] == ord('o'):
self.o = min(99, int(self.buffer[1:] or 0)) # set output length
else:
output = self.buffer.decode('ascii')
if len(output) < self.o:
output += '-'*self.o # pad out with hyphens
if self.o:
output = output[:self.o] # truncate to output length
output += '.'
self.buffer = bytearray()
return output
codecEnabled = False
@classmethod
def lookupTestDecoder(cls, name):
if cls.codecEnabled and name == 'test_decoder':
latin1 = codecs.lookup('latin-1')
return codecs.CodecInfo(
name='test_decoder', encode=latin1.encode, decode=None,
incrementalencoder=None,
streamreader=None, streamwriter=None,
incrementaldecoder=cls)
# Register the previous decoder for testing.
# Disabled by default, tests will enable it.
codecs.register(StatefulIncrementalDecoder.lookupTestDecoder)
class StatefulIncrementalDecoderTest(unittest.TestCase):
"""
Make sure the StatefulIncrementalDecoder actually works.
"""
test_cases = [
# I=1, O=1 (fixed-length input == fixed-length output)
(b'abcd', False, 'a.b.c.d.'),
# I=0, O=0 (variable-length input, variable-length output)
(b'oiabcd', True, 'abcd.'),
# I=0, O=0 (should ignore extra periods)
(b'oi...abcd...', True, 'abcd.'),
# I=0, O=6 (variable-length input, fixed-length output)
(b'i.o6.x.xyz.toolongtofit.', False, 'x-----.xyz---.toolon.'),
# I=2, O=6 (fixed-length input < fixed-length output)
(b'i.i2.o6xyz', True, 'xy----.z-----.'),
# I=6, O=3 (fixed-length input > fixed-length output)
(b'i.o3.i6.abcdefghijklmnop', True, 'abc.ghi.mno.'),
# I=0, then 3; O=29, then 15 (with longer output)
(b'i.o29.a.b.cde.o15.abcdefghijabcdefghij.i3.a.b.c.d.ei00k.l.m', True,
'a----------------------------.' +
'b----------------------------.' +
'cde--------------------------.' +
'abcdefghijabcde.' +
'a.b------------.' +
'.c.------------.' +
'd.e------------.' +
'k--------------.' +
'l--------------.' +
'm--------------.')
]
def test_decoder(self):
# Try a few one-shot test cases.
for input, eof, output in self.test_cases:
d = StatefulIncrementalDecoder()
self.assertEqual(d.decode(input, eof), output)
# Also test an unfinished decode, followed by forcing EOF.
d = StatefulIncrementalDecoder()
self.assertEqual(d.decode(b'oiabcd'), '')
self.assertEqual(d.decode(b'', 1), 'abcd.')
class TextIOWrapperTest(unittest.TestCase):
def setUp(self):
self.testdata = b"AAA\r\nBBB\rCCC\r\nDDD\nEEE\r\n"
self.normalized = b"AAA\nBBB\nCCC\nDDD\nEEE\n".decode("ascii")
support.unlink(support.TESTFN)
def tearDown(self):
support.unlink(support.TESTFN)
def test_constructor(self):
r = self.BytesIO(b"\xc3\xa9\n\n")
b = self.BufferedReader(r, 1000)
t = self.TextIOWrapper(b)
t.__init__(b, encoding="latin1", newline="\r\n")
self.assertEqual(t.encoding, "latin1")
self.assertEqual(t.line_buffering, False)
t.__init__(b, encoding="utf8", line_buffering=True)
self.assertEqual(t.encoding, "utf8")
self.assertEqual(t.line_buffering, True)
self.assertEqual("\xe9\n", t.readline())
self.assertRaises(TypeError, t.__init__, b, newline=42)
self.assertRaises(ValueError, t.__init__, b, newline='xyzzy')
def test_detach(self):
r = self.BytesIO()
b = self.BufferedWriter(r)
t = self.TextIOWrapper(b)
self.assertIs(t.detach(), b)
t = self.TextIOWrapper(b, encoding="ascii")
t.write("howdy")
self.assertFalse(r.getvalue())
t.detach()
self.assertEqual(r.getvalue(), b"howdy")
self.assertRaises(ValueError, t.detach)
def test_repr(self):
raw = self.BytesIO("hello".encode("utf-8"))
b = self.BufferedReader(raw)
t = self.TextIOWrapper(b, encoding="utf-8")
modname = self.TextIOWrapper.__module__
self.assertEqual(repr(t),
"<%s.TextIOWrapper encoding='utf-8'>" % modname)
raw.name = "dummy"
self.assertEqual(repr(t),
"<%s.TextIOWrapper name=u'dummy' encoding='utf-8'>" % modname)
raw.name = b"dummy"
self.assertEqual(repr(t),
"<%s.TextIOWrapper name='dummy' encoding='utf-8'>" % modname)
def test_line_buffering(self):
r = self.BytesIO()
b = self.BufferedWriter(r, 1000)
t = self.TextIOWrapper(b, newline="\n", line_buffering=True)
t.write("X")
self.assertEqual(r.getvalue(), b"") # No flush happened
t.write("Y\nZ")
self.assertEqual(r.getvalue(), b"XY\nZ") # All got flushed
t.write("A\rB")
self.assertEqual(r.getvalue(), b"XY\nZA\rB")
def test_encoding(self):
# Check the encoding attribute is always set, and valid
b = self.BytesIO()
t = self.TextIOWrapper(b, encoding="utf8")
self.assertEqual(t.encoding, "utf8")
t = self.TextIOWrapper(b)
self.assertTrue(t.encoding is not None)
codecs.lookup(t.encoding)
def test_encoding_errors_reading(self):
# (1) default
b = self.BytesIO(b"abc\n\xff\n")
t = self.TextIOWrapper(b, encoding="ascii")
self.assertRaises(UnicodeError, t.read)
# (2) explicit strict
b = self.BytesIO(b"abc\n\xff\n")
t = self.TextIOWrapper(b, encoding="ascii", errors="strict")
self.assertRaises(UnicodeError, t.read)
# (3) ignore
b = self.BytesIO(b"abc\n\xff\n")
t = self.TextIOWrapper(b, encoding="ascii", errors="ignore")
self.assertEqual(t.read(), "abc\n\n")
# (4) replace
b = self.BytesIO(b"abc\n\xff\n")
t = self.TextIOWrapper(b, encoding="ascii", errors="replace")
self.assertEqual(t.read(), "abc\n\ufffd\n")
def test_encoding_errors_writing(self):
# (1) default
b = self.BytesIO()
t = self.TextIOWrapper(b, encoding="ascii")
self.assertRaises(UnicodeError, t.write, "\xff")
# (2) explicit strict
b = self.BytesIO()
t = self.TextIOWrapper(b, encoding="ascii", errors="strict")
self.assertRaises(UnicodeError, t.write, "\xff")
# (3) ignore
b = self.BytesIO()
t = self.TextIOWrapper(b, encoding="ascii", errors="ignore",
newline="\n")
t.write("abc\xffdef\n")
t.flush()
self.assertEqual(b.getvalue(), b"abcdef\n")
# (4) replace
b = self.BytesIO()
t = self.TextIOWrapper(b, encoding="ascii", errors="replace",
newline="\n")
t.write("abc\xffdef\n")
t.flush()
self.assertEqual(b.getvalue(), b"abc?def\n")
def test_newlines(self):
input_lines = [ "unix\n", "windows\r\n", "os9\r", "last\n", "nonl" ]
tests = [
[ None, [ 'unix\n', 'windows\n', 'os9\n', 'last\n', 'nonl' ] ],
[ '', input_lines ],
[ '\n', [ "unix\n", "windows\r\n", "os9\rlast\n", "nonl" ] ],
[ '\r\n', [ "unix\nwindows\r\n", "os9\rlast\nnonl" ] ],
[ '\r', [ "unix\nwindows\r", "\nos9\r", "last\nnonl" ] ],
]
encodings = (
'utf-8', 'latin-1',
'utf-16', 'utf-16-le', 'utf-16-be',
'utf-32', 'utf-32-le', 'utf-32-be',
)
# Try a range of buffer sizes to test the case where \r is the last
# character in TextIOWrapper._pending_line.
for encoding in encodings:
# XXX: str.encode() should return bytes
data = bytes(''.join(input_lines).encode(encoding))
for do_reads in (False, True):
for bufsize in range(1, 10):
for newline, exp_lines in tests:
bufio = self.BufferedReader(self.BytesIO(data), bufsize)
textio = self.TextIOWrapper(bufio, newline=newline,
encoding=encoding)
if do_reads:
got_lines = []
while True:
c2 = textio.read(2)
if c2 == '':
break
self.assertEqual(len(c2), 2)
got_lines.append(c2 + textio.readline())
else:
got_lines = list(textio)
for got_line, exp_line in zip(got_lines, exp_lines):
self.assertEqual(got_line, exp_line)
self.assertEqual(len(got_lines), len(exp_lines))
def test_newlines_input(self):
testdata = b"AAA\nBB\x00B\nCCC\rDDD\rEEE\r\nFFF\r\nGGG"
normalized = testdata.replace(b"\r\n", b"\n").replace(b"\r", b"\n")
for newline, expected in [
(None, normalized.decode("ascii").splitlines(True)),
("", testdata.decode("ascii").splitlines(True)),
("\n", ["AAA\n", "BB\x00B\n", "CCC\rDDD\rEEE\r\n", "FFF\r\n", "GGG"]),
("\r\n", ["AAA\nBB\x00B\nCCC\rDDD\rEEE\r\n", "FFF\r\n", "GGG"]),
("\r", ["AAA\nBB\x00B\nCCC\r", "DDD\r", "EEE\r", "\nFFF\r", "\nGGG"]),
]:
buf = self.BytesIO(testdata)
txt = self.TextIOWrapper(buf, encoding="ascii", newline=newline)
self.assertEqual(txt.readlines(), expected)
txt.seek(0)
self.assertEqual(txt.read(), "".join(expected))
def test_newlines_output(self):
testdict = {
"": b"AAA\nBBB\nCCC\nX\rY\r\nZ",
"\n": b"AAA\nBBB\nCCC\nX\rY\r\nZ",
"\r": b"AAA\rBBB\rCCC\rX\rY\r\rZ",
"\r\n": b"AAA\r\nBBB\r\nCCC\r\nX\rY\r\r\nZ",
}
tests = [(None, testdict[os.linesep])] + sorted(testdict.items())
for newline, expected in tests:
buf = self.BytesIO()
txt = self.TextIOWrapper(buf, encoding="ascii", newline=newline)
txt.write("AAA\nB")
txt.write("BB\nCCC\n")
txt.write("X\rY\r\nZ")
txt.flush()
self.assertEqual(buf.closed, False)
self.assertEqual(buf.getvalue(), expected)
def test_destructor(self):
l = []
base = self.BytesIO
class MyBytesIO(base):
def close(self):
l.append(self.getvalue())
base.close(self)
b = MyBytesIO()
t = self.TextIOWrapper(b, encoding="ascii")
t.write("abc")
del t
support.gc_collect()
self.assertEqual([b"abc"], l)
def test_override_destructor(self):
record = []
class MyTextIO(self.TextIOWrapper):
def __del__(self):
record.append(1)
try:
f = super(MyTextIO, self).__del__
except AttributeError:
pass
else:
f()
def close(self):
record.append(2)
super(MyTextIO, self).close()
def flush(self):
record.append(3)
super(MyTextIO, self).flush()
b = self.BytesIO()
t = MyTextIO(b, encoding="ascii")
del t
support.gc_collect()
self.assertEqual(record, [1, 2, 3])
def test_error_through_destructor(self):
# Test that the exception state is not modified by a destructor,
# even if close() fails.
rawio = self.CloseFailureIO()
def f():
self.TextIOWrapper(rawio).xyzzy
with support.captured_output("stderr") as s:
self.assertRaises(AttributeError, f)
s = s.getvalue().strip()
if s:
# The destructor *may* have printed an unraisable error, check it
self.assertEqual(len(s.splitlines()), 1)
self.assertTrue(s.startswith("Exception IOError: "), s)
self.assertTrue(s.endswith(" ignored"), s)
# Systematic tests of the text I/O API
def test_basic_io(self):
for chunksize in (1, 2, 3, 4, 5, 15, 16, 17, 31, 32, 33, 63, 64, 65):
for enc in "ascii", "latin1", "utf8" :# , "utf-16-be", "utf-16-le":
f = self.open(support.TESTFN, "w+", encoding=enc)
f._CHUNK_SIZE = chunksize
self.assertEqual(f.write("abc"), 3)
f.close()
f = self.open(support.TESTFN, "r+", encoding=enc)
f._CHUNK_SIZE = chunksize
self.assertEqual(f.tell(), 0)
self.assertEqual(f.read(), "abc")
cookie = f.tell()
self.assertEqual(f.seek(0), 0)
self.assertEqual(f.read(None), "abc")
f.seek(0)
self.assertEqual(f.read(2), "ab")
self.assertEqual(f.read(1), "c")
self.assertEqual(f.read(1), "")
self.assertEqual(f.read(), "")
self.assertEqual(f.tell(), cookie)
self.assertEqual(f.seek(0), 0)
self.assertEqual(f.seek(0, 2), cookie)
self.assertEqual(f.write("def"), 3)
self.assertEqual(f.seek(cookie), cookie)
self.assertEqual(f.read(), "def")
if enc.startswith("utf"):
self.multi_line_test(f, enc)
f.close()
def multi_line_test(self, f, enc):
f.seek(0)
f.truncate()
sample = "s\xff\u0fff\uffff"
wlines = []
for size in (0, 1, 2, 3, 4, 5, 30, 31, 32, 33, 62, 63, 64, 65, 1000):
chars = []
for i in range(size):
chars.append(sample[i % len(sample)])
line = "".join(chars) + "\n"
wlines.append((f.tell(), line))
f.write(line)
f.seek(0)
rlines = []
while True:
pos = f.tell()
line = f.readline()
if not line:
break
rlines.append((pos, line))
self.assertEqual(rlines, wlines)
def test_telling(self):
f = self.open(support.TESTFN, "w+", encoding="utf8")
p0 = f.tell()
f.write("\xff\n")
p1 = f.tell()
f.write("\xff\n")
p2 = f.tell()
f.seek(0)
self.assertEqual(f.tell(), p0)
self.assertEqual(f.readline(), "\xff\n")
self.assertEqual(f.tell(), p1)
self.assertEqual(f.readline(), "\xff\n")
self.assertEqual(f.tell(), p2)
f.seek(0)
for line in f:
self.assertEqual(line, "\xff\n")
self.assertRaises(IOError, f.tell)
self.assertEqual(f.tell(), p2)
f.close()
def test_seeking(self):
chunk_size = _default_chunk_size()
prefix_size = chunk_size - 2
u_prefix = "a" * prefix_size
prefix = bytes(u_prefix.encode("utf-8"))
self.assertEqual(len(u_prefix), len(prefix))
u_suffix = "\u8888\n"
suffix = bytes(u_suffix.encode("utf-8"))
line = prefix + suffix
f = self.open(support.TESTFN, "wb")
f.write(line*2)
f.close()
f = self.open(support.TESTFN, "r", encoding="utf-8")
s = f.read(prefix_size)
self.assertEqual(s, prefix.decode("ascii"))
self.assertEqual(f.tell(), prefix_size)
self.assertEqual(f.readline(), u_suffix)
def test_seeking_too(self):
# Regression test for a specific bug
data = b'\xe0\xbf\xbf\n'
f = self.open(support.TESTFN, "wb")
f.write(data)
f.close()
f = self.open(support.TESTFN, "r", encoding="utf-8")
f._CHUNK_SIZE # Just test that it exists
f._CHUNK_SIZE = 2
f.readline()
f.tell()
def test_seek_and_tell(self):
#Test seek/tell using the StatefulIncrementalDecoder.
# Make test faster by doing smaller seeks
CHUNK_SIZE = 128
def test_seek_and_tell_with_data(data, min_pos=0):
"""Tell/seek to various points within a data stream and ensure
that the decoded data returned by read() is consistent."""
f = self.open(support.TESTFN, 'wb')
f.write(data)
f.close()
f = self.open(support.TESTFN, encoding='test_decoder')
f._CHUNK_SIZE = CHUNK_SIZE
decoded = f.read()
f.close()
for i in range(min_pos, len(decoded) + 1): # seek positions
for j in [1, 5, len(decoded) - i]: # read lengths
f = self.open(support.TESTFN, encoding='test_decoder')
self.assertEqual(f.read(i), decoded[:i])
cookie = f.tell()
self.assertEqual(f.read(j), decoded[i:i + j])
f.seek(cookie)
self.assertEqual(f.read(), decoded[i:])
f.close()
# Enable the test decoder.
StatefulIncrementalDecoder.codecEnabled = 1
# Run the tests.
try:
# Try each test case.
for input, _, _ in StatefulIncrementalDecoderTest.test_cases:
test_seek_and_tell_with_data(input)
# Position each test case so that it crosses a chunk boundary.
for input, _, _ in StatefulIncrementalDecoderTest.test_cases:
offset = CHUNK_SIZE - len(input)//2
prefix = b'.'*offset
# Don't bother seeking into the prefix (takes too long).
min_pos = offset*2
test_seek_and_tell_with_data(prefix + input, min_pos)
# Ensure our test decoder won't interfere with subsequent tests.
finally:
StatefulIncrementalDecoder.codecEnabled = 0
def test_encoded_writes(self):
data = "1234567890"
tests = ("utf-16",
"utf-16-le",
"utf-16-be",
"utf-32",
"utf-32-le",
"utf-32-be")
for encoding in tests:
buf = self.BytesIO()
f = self.TextIOWrapper(buf, encoding=encoding)
# Check if the BOM is written only once (see issue1753).
f.write(data)
f.write(data)
f.seek(0)
self.assertEqual(f.read(), data * 2)
f.seek(0)
self.assertEqual(f.read(), data * 2)
self.assertEqual(buf.getvalue(), (data * 2).encode(encoding))
def test_unreadable(self):
class UnReadable(self.BytesIO):
def readable(self):
return False
txt = self.TextIOWrapper(UnReadable())
self.assertRaises(IOError, txt.read)
def test_read_one_by_one(self):
txt = self.TextIOWrapper(self.BytesIO(b"AA\r\nBB"))
reads = ""
while True:
c = txt.read(1)
if not c:
break
reads += c
self.assertEqual(reads, "AA\nBB")
def test_readlines(self):
txt = self.TextIOWrapper(self.BytesIO(b"AA\nBB\nCC"))
self.assertEqual(txt.readlines(), ["AA\n", "BB\n", "CC"])
txt.seek(0)
self.assertEqual(txt.readlines(None), ["AA\n", "BB\n", "CC"])
txt.seek(0)
self.assertEqual(txt.readlines(5), ["AA\n", "BB\n"])
# read in amounts equal to TextIOWrapper._CHUNK_SIZE which is 128.
def test_read_by_chunk(self):
# make sure "\r\n" straddles 128 char boundary.
txt = self.TextIOWrapper(self.BytesIO(b"A" * 127 + b"\r\nB"))
reads = ""
while True:
c = txt.read(128)
if not c:
break
reads += c
self.assertEqual(reads, "A"*127+"\nB")
def test_writelines(self):
l = ['ab', 'cd', 'ef']
buf = self.BytesIO()
txt = self.TextIOWrapper(buf)
txt.writelines(l)
txt.flush()
self.assertEqual(buf.getvalue(), b'abcdef')
def test_writelines_userlist(self):
l = UserList(['ab', 'cd', 'ef'])
buf = self.BytesIO()
txt = self.TextIOWrapper(buf)
txt.writelines(l)
txt.flush()
self.assertEqual(buf.getvalue(), b'abcdef')
def test_writelines_error(self):
txt = self.TextIOWrapper(self.BytesIO())
self.assertRaises(TypeError, txt.writelines, [1, 2, 3])
self.assertRaises(TypeError, txt.writelines, None)
self.assertRaises(TypeError, txt.writelines, b'abc')
def test_issue1395_1(self):
txt = self.TextIOWrapper(self.BytesIO(self.testdata), encoding="ascii")
# read one char at a time
reads = ""
while True:
c = txt.read(1)
if not c:
break
reads += c
self.assertEqual(reads, self.normalized)
def test_issue1395_2(self):
txt = self.TextIOWrapper(self.BytesIO(self.testdata), encoding="ascii")
txt._CHUNK_SIZE = 4
reads = ""
while True:
c = txt.read(4)
if not c:
break
reads += c
self.assertEqual(reads, self.normalized)
def test_issue1395_3(self):
txt = self.TextIOWrapper(self.BytesIO(self.testdata), encoding="ascii")
txt._CHUNK_SIZE = 4
reads = txt.read(4)
reads += txt.read(4)
reads += txt.readline()
reads += txt.readline()
reads += txt.readline()
self.assertEqual(reads, self.normalized)
def test_issue1395_4(self):
txt = self.TextIOWrapper(self.BytesIO(self.testdata), encoding="ascii")
txt._CHUNK_SIZE = 4
reads = txt.read(4)
reads += txt.read()
self.assertEqual(reads, self.normalized)
def test_issue1395_5(self):
txt = self.TextIOWrapper(self.BytesIO(self.testdata), encoding="ascii")
txt._CHUNK_SIZE = 4
reads = txt.read(4)
pos = txt.tell()
txt.seek(0)
txt.seek(pos)
self.assertEqual(txt.read(4), "BBB\n")
def test_issue2282(self):
buffer = self.BytesIO(self.testdata)
txt = self.TextIOWrapper(buffer, encoding="ascii")
self.assertEqual(buffer.seekable(), txt.seekable())
def test_append_bom(self):
# The BOM is not written again when appending to a non-empty file
filename = support.TESTFN
for charset in ('utf-8-sig', 'utf-16', 'utf-32'):
with self.open(filename, 'w', encoding=charset) as f:
f.write('aaa')
pos = f.tell()
with self.open(filename, 'rb') as f:
self.assertEqual(f.read(), 'aaa'.encode(charset))
with self.open(filename, 'a', encoding=charset) as f:
f.write('xxx')
with self.open(filename, 'rb') as f:
self.assertEqual(f.read(), 'aaaxxx'.encode(charset))
def test_seek_bom(self):
# Same test, but when seeking manually
filename = support.TESTFN
for charset in ('utf-8-sig', 'utf-16', 'utf-32'):
with self.open(filename, 'w', encoding=charset) as f:
f.write('aaa')
pos = f.tell()
with self.open(filename, 'r+', encoding=charset) as f:
f.seek(pos)
f.write('zzz')
f.seek(0)
f.write('bbb')
with self.open(filename, 'rb') as f:
self.assertEqual(f.read(), 'bbbzzz'.encode(charset))
def test_errors_property(self):
with self.open(support.TESTFN, "w") as f:
self.assertEqual(f.errors, "strict")
with self.open(support.TESTFN, "w", errors="replace") as f:
self.assertEqual(f.errors, "replace")
@unittest.skipUnless(threading, 'Threading required for this test.')
def test_threads_write(self):
# Issue6750: concurrent writes could duplicate data
event = threading.Event()
with self.open(support.TESTFN, "w", buffering=1) as f:
def run(n):
text = "Thread%03d\n" % n
event.wait()
f.write(text)
threads = [threading.Thread(target=lambda n=x: run(n))
for x in range(20)]
for t in threads:
t.start()
time.sleep(0.02)
event.set()
for t in threads:
t.join()
with self.open(support.TESTFN) as f:
content = f.read()
for n in range(20):
self.assertEqual(content.count("Thread%03d\n" % n), 1)
def test_flush_error_on_close(self):
txt = self.TextIOWrapper(self.BytesIO(self.testdata), encoding="ascii")
def bad_flush():
raise IOError()
txt.flush = bad_flush
self.assertRaises(IOError, txt.close) # exception not swallowed
self.assertTrue(txt.closed)
def test_multi_close(self):
txt = self.TextIOWrapper(self.BytesIO(self.testdata), encoding="ascii")
txt.close()
txt.close()
txt.close()
self.assertRaises(ValueError, txt.flush)
def test_readonly_attributes(self):
txt = self.TextIOWrapper(self.BytesIO(self.testdata), encoding="ascii")
buf = self.BytesIO(self.testdata)
with self.assertRaises((AttributeError, TypeError)):
txt.buffer = buf
def test_read_nonbytes(self):
# Issue #17106
# Crash when underlying read() returns non-bytes
class NonbytesStream(self.StringIO):
read1 = self.StringIO.read
class NonbytesStream(self.StringIO):
read1 = self.StringIO.read
t = self.TextIOWrapper(NonbytesStream('a'))
with self.maybeRaises(TypeError):
t.read(1)
t = self.TextIOWrapper(NonbytesStream('a'))
with self.maybeRaises(TypeError):
t.readline()
t = self.TextIOWrapper(NonbytesStream('a'))
self.assertEqual(t.read(), u'a')
def test_illegal_decoder(self):
# Issue #17106
# Crash when decoder returns non-string
t = self.TextIOWrapper(self.BytesIO(b'aaaaaa'), newline='\n',
encoding='quopri_codec')
with self.maybeRaises(TypeError):
t.read(1)
t = self.TextIOWrapper(self.BytesIO(b'aaaaaa'), newline='\n',
encoding='quopri_codec')
with self.maybeRaises(TypeError):
t.readline()
t = self.TextIOWrapper(self.BytesIO(b'aaaaaa'), newline='\n',
encoding='quopri_codec')
with self.maybeRaises(TypeError):
t.read()
class CTextIOWrapperTest(TextIOWrapperTest):
def test_initialization(self):
r = self.BytesIO(b"\xc3\xa9\n\n")
b = self.BufferedReader(r, 1000)
t = self.TextIOWrapper(b)
self.assertRaises(TypeError, t.__init__, b, newline=42)
self.assertRaises(ValueError, t.read)
self.assertRaises(ValueError, t.__init__, b, newline='xyzzy')
self.assertRaises(ValueError, t.read)
def test_garbage_collection(self):
# C TextIOWrapper objects are collected, and collecting them flushes
# all data to disk.
# The Python version has __del__, so it ends in gc.garbage instead.
rawio = io.FileIO(support.TESTFN, "wb")
b = self.BufferedWriter(rawio)
t = self.TextIOWrapper(b, encoding="ascii")
t.write("456def")
t.x = t
wr = weakref.ref(t)
del t
support.gc_collect()
self.assertTrue(wr() is None, wr)
with self.open(support.TESTFN, "rb") as f:
self.assertEqual(f.read(), b"456def")
def test_rwpair_cleared_before_textio(self):
# Issue 13070: TextIOWrapper's finalization would crash when called
# after the reference to the underlying BufferedRWPair's writer got
# cleared by the GC.
for i in range(1000):
b1 = self.BufferedRWPair(self.MockRawIO(), self.MockRawIO())
t1 = self.TextIOWrapper(b1, encoding="ascii")
b2 = self.BufferedRWPair(self.MockRawIO(), self.MockRawIO())
t2 = self.TextIOWrapper(b2, encoding="ascii")
# circular references
t1.buddy = t2
t2.buddy = t1
support.gc_collect()
maybeRaises = unittest.TestCase.assertRaises
class PyTextIOWrapperTest(TextIOWrapperTest):
@contextlib.contextmanager
def maybeRaises(self, *args, **kwds):
yield
class IncrementalNewlineDecoderTest(unittest.TestCase):
def check_newline_decoding_utf8(self, decoder):
# UTF-8 specific tests for a newline decoder
def _check_decode(b, s, **kwargs):
# We exercise getstate() / setstate() as well as decode()
state = decoder.getstate()
self.assertEqual(decoder.decode(b, **kwargs), s)
decoder.setstate(state)
self.assertEqual(decoder.decode(b, **kwargs), s)
_check_decode(b'\xe8\xa2\x88', "\u8888")
_check_decode(b'\xe8', "")
_check_decode(b'\xa2', "")
_check_decode(b'\x88', "\u8888")
_check_decode(b'\xe8', "")
_check_decode(b'\xa2', "")
_check_decode(b'\x88', "\u8888")
_check_decode(b'\xe8', "")
self.assertRaises(UnicodeDecodeError, decoder.decode, b'', final=True)
decoder.reset()
_check_decode(b'\n', "\n")
_check_decode(b'\r', "")
_check_decode(b'', "\n", final=True)
_check_decode(b'\r', "\n", final=True)
_check_decode(b'\r', "")
_check_decode(b'a', "\na")
_check_decode(b'\r\r\n', "\n\n")
_check_decode(b'\r', "")
_check_decode(b'\r', "\n")
_check_decode(b'\na', "\na")
_check_decode(b'\xe8\xa2\x88\r\n', "\u8888\n")
_check_decode(b'\xe8\xa2\x88', "\u8888")
_check_decode(b'\n', "\n")
_check_decode(b'\xe8\xa2\x88\r', "\u8888")
_check_decode(b'\n', "\n")
def check_newline_decoding(self, decoder, encoding):
result = []
if encoding is not None:
encoder = codecs.getincrementalencoder(encoding)()
def _decode_bytewise(s):
# Decode one byte at a time
for b in encoder.encode(s):
result.append(decoder.decode(b))
else:
encoder = None
def _decode_bytewise(s):
# Decode one char at a time
for c in s:
result.append(decoder.decode(c))
self.assertEqual(decoder.newlines, None)
_decode_bytewise("abc\n\r")
self.assertEqual(decoder.newlines, '\n')
_decode_bytewise("\nabc")
self.assertEqual(decoder.newlines, ('\n', '\r\n'))
_decode_bytewise("abc\r")
self.assertEqual(decoder.newlines, ('\n', '\r\n'))
_decode_bytewise("abc")
self.assertEqual(decoder.newlines, ('\r', '\n', '\r\n'))
_decode_bytewise("abc\r")
self.assertEqual("".join(result), "abc\n\nabcabc\nabcabc")
decoder.reset()
input = "abc"
if encoder is not None:
encoder.reset()
input = encoder.encode(input)
self.assertEqual(decoder.decode(input), "abc")
self.assertEqual(decoder.newlines, None)
def test_newline_decoder(self):
encodings = (
# None meaning the IncrementalNewlineDecoder takes unicode input
# rather than bytes input
None, 'utf-8', 'latin-1',
'utf-16', 'utf-16-le', 'utf-16-be',
'utf-32', 'utf-32-le', 'utf-32-be',
)
for enc in encodings:
decoder = enc and codecs.getincrementaldecoder(enc)()
decoder = self.IncrementalNewlineDecoder(decoder, translate=True)
self.check_newline_decoding(decoder, enc)
decoder = codecs.getincrementaldecoder("utf-8")()
decoder = self.IncrementalNewlineDecoder(decoder, translate=True)
self.check_newline_decoding_utf8(decoder)
def test_newline_bytes(self):
# Issue 5433: Excessive optimization in IncrementalNewlineDecoder
def _check(dec):
self.assertEqual(dec.newlines, None)
self.assertEqual(dec.decode("\u0D00"), "\u0D00")
self.assertEqual(dec.newlines, None)
self.assertEqual(dec.decode("\u0A00"), "\u0A00")
self.assertEqual(dec.newlines, None)
dec = self.IncrementalNewlineDecoder(None, translate=False)
_check(dec)
dec = self.IncrementalNewlineDecoder(None, translate=True)
_check(dec)
class CIncrementalNewlineDecoderTest(IncrementalNewlineDecoderTest):
pass
class PyIncrementalNewlineDecoderTest(IncrementalNewlineDecoderTest):
pass
# XXX Tests for open()
class MiscIOTest(unittest.TestCase):
def tearDown(self):
support.unlink(support.TESTFN)
def test___all__(self):
for name in self.io.__all__:
obj = getattr(self.io, name, None)
self.assertTrue(obj is not None, name)
if name == "open":
continue
elif "error" in name.lower() or name == "UnsupportedOperation":
self.assertTrue(issubclass(obj, Exception), name)
elif not name.startswith("SEEK_"):
self.assertTrue(issubclass(obj, self.IOBase))
def test_attributes(self):
f = self.open(support.TESTFN, "wb", buffering=0)
self.assertEqual(f.mode, "wb")
f.close()
f = self.open(support.TESTFN, "U")
self.assertEqual(f.name, support.TESTFN)
self.assertEqual(f.buffer.name, support.TESTFN)
self.assertEqual(f.buffer.raw.name, support.TESTFN)
self.assertEqual(f.mode, "U")
self.assertEqual(f.buffer.mode, "rb")
self.assertEqual(f.buffer.raw.mode, "rb")
f.close()
f = self.open(support.TESTFN, "w+")
self.assertEqual(f.mode, "w+")
self.assertEqual(f.buffer.mode, "rb+") # Does it really matter?
self.assertEqual(f.buffer.raw.mode, "rb+")
g = self.open(f.fileno(), "wb", closefd=False)
self.assertEqual(g.mode, "wb")
self.assertEqual(g.raw.mode, "wb")
self.assertEqual(g.name, f.fileno())
self.assertEqual(g.raw.name, f.fileno())
f.close()
g.close()
def test_io_after_close(self):
for kwargs in [
{"mode": "w"},
{"mode": "wb"},
{"mode": "w", "buffering": 1},
{"mode": "w", "buffering": 2},
{"mode": "wb", "buffering": 0},
{"mode": "r"},
{"mode": "rb"},
{"mode": "r", "buffering": 1},
{"mode": "r", "buffering": 2},
{"mode": "rb", "buffering": 0},
{"mode": "w+"},
{"mode": "w+b"},
{"mode": "w+", "buffering": 1},
{"mode": "w+", "buffering": 2},
{"mode": "w+b", "buffering": 0},
]:
f = self.open(support.TESTFN, **kwargs)
f.close()
self.assertRaises(ValueError, f.flush)
self.assertRaises(ValueError, f.fileno)
self.assertRaises(ValueError, f.isatty)
self.assertRaises(ValueError, f.__iter__)
if hasattr(f, "peek"):
self.assertRaises(ValueError, f.peek, 1)
self.assertRaises(ValueError, f.read)
if hasattr(f, "read1"):
self.assertRaises(ValueError, f.read1, 1024)
if hasattr(f, "readall"):
self.assertRaises(ValueError, f.readall)
if hasattr(f, "readinto"):
self.assertRaises(ValueError, f.readinto, bytearray(1024))
self.assertRaises(ValueError, f.readline)
self.assertRaises(ValueError, f.readlines)
self.assertRaises(ValueError, f.seek, 0)
self.assertRaises(ValueError, f.tell)
self.assertRaises(ValueError, f.truncate)
self.assertRaises(ValueError, f.write,
b"" if "b" in kwargs['mode'] else "")
self.assertRaises(ValueError, f.writelines, [])
self.assertRaises(ValueError, next, f)
def test_blockingioerror(self):
# Various BlockingIOError issues
self.assertRaises(TypeError, self.BlockingIOError)
self.assertRaises(TypeError, self.BlockingIOError, 1)
self.assertRaises(TypeError, self.BlockingIOError, 1, 2, 3, 4)
self.assertRaises(TypeError, self.BlockingIOError, 1, "", None)
b = self.BlockingIOError(1, "")
self.assertEqual(b.characters_written, 0)
class C(unicode):
pass
c = C("")
b = self.BlockingIOError(1, c)
c.b = b
b.c = c
wr = weakref.ref(c)
del c, b
support.gc_collect()
self.assertTrue(wr() is None, wr)
def test_abcs(self):
# Test the visible base classes are ABCs.
self.assertIsInstance(self.IOBase, abc.ABCMeta)
self.assertIsInstance(self.RawIOBase, abc.ABCMeta)
self.assertIsInstance(self.BufferedIOBase, abc.ABCMeta)
self.assertIsInstance(self.TextIOBase, abc.ABCMeta)
def _check_abc_inheritance(self, abcmodule):
with self.open(support.TESTFN, "wb", buffering=0) as f:
self.assertIsInstance(f, abcmodule.IOBase)
self.assertIsInstance(f, abcmodule.RawIOBase)
self.assertNotIsInstance(f, abcmodule.BufferedIOBase)
self.assertNotIsInstance(f, abcmodule.TextIOBase)
with self.open(support.TESTFN, "wb") as f:
self.assertIsInstance(f, abcmodule.IOBase)
self.assertNotIsInstance(f, abcmodule.RawIOBase)
self.assertIsInstance(f, abcmodule.BufferedIOBase)
self.assertNotIsInstance(f, abcmodule.TextIOBase)
with self.open(support.TESTFN, "w") as f:
self.assertIsInstance(f, abcmodule.IOBase)
self.assertNotIsInstance(f, abcmodule.RawIOBase)
self.assertNotIsInstance(f, abcmodule.BufferedIOBase)
self.assertIsInstance(f, abcmodule.TextIOBase)
def test_abc_inheritance(self):
# Test implementations inherit from their respective ABCs
self._check_abc_inheritance(self)
def test_abc_inheritance_official(self):
# Test implementations inherit from the official ABCs of the
# baseline "io" module.
self._check_abc_inheritance(io)
@unittest.skipUnless(fcntl, 'fcntl required for this test')
def test_nonblock_pipe_write_bigbuf(self):
self._test_nonblock_pipe_write(16*1024)
@unittest.skipUnless(fcntl, 'fcntl required for this test')
def test_nonblock_pipe_write_smallbuf(self):
self._test_nonblock_pipe_write(1024)
def _set_non_blocking(self, fd):
flags = fcntl.fcntl(fd, fcntl.F_GETFL)
self.assertNotEqual(flags, -1)
res = fcntl.fcntl(fd, fcntl.F_SETFL, flags | os.O_NONBLOCK)
self.assertEqual(res, 0)
def _test_nonblock_pipe_write(self, bufsize):
sent = []
received = []
r, w = os.pipe()
self._set_non_blocking(r)
self._set_non_blocking(w)
# To exercise all code paths in the C implementation we need
# to play with buffer sizes. For instance, if we choose a
# buffer size less than or equal to _PIPE_BUF (4096 on Linux)
# then we will never get a partial write of the buffer.
rf = self.open(r, mode='rb', closefd=True, buffering=bufsize)
wf = self.open(w, mode='wb', closefd=True, buffering=bufsize)
with rf, wf:
for N in 9999, 73, 7574:
try:
i = 0
while True:
msg = bytes([i % 26 + 97]) * N
sent.append(msg)
wf.write(msg)
i += 1
except self.BlockingIOError as e:
self.assertEqual(e.args[0], errno.EAGAIN)
sent[-1] = sent[-1][:e.characters_written]
received.append(rf.read())
msg = b'BLOCKED'
wf.write(msg)
sent.append(msg)
while True:
try:
wf.flush()
break
except self.BlockingIOError as e:
self.assertEqual(e.args[0], errno.EAGAIN)
self.assertEqual(e.characters_written, 0)
received.append(rf.read())
received += iter(rf.read, None)
sent, received = b''.join(sent), b''.join(received)
self.assertTrue(sent == received)
self.assertTrue(wf.closed)
self.assertTrue(rf.closed)
class CMiscIOTest(MiscIOTest):
io = io
class PyMiscIOTest(MiscIOTest):
io = pyio
@unittest.skipIf(os.name == 'nt', 'POSIX signals required for this test.')
class SignalsTest(unittest.TestCase):
def setUp(self):
self.oldalrm = signal.signal(signal.SIGALRM, self.alarm_interrupt)
def tearDown(self):
signal.signal(signal.SIGALRM, self.oldalrm)
def alarm_interrupt(self, sig, frame):
1 // 0
@unittest.skipUnless(threading, 'Threading required for this test.')
@unittest.skipIf(sys.platform in ('freebsd5', 'freebsd6', 'freebsd7'),
'issue #12429: skip test on FreeBSD <= 7')
def check_interrupted_write(self, item, bytes, **fdopen_kwargs):
"""Check that a partial write, when it gets interrupted, properly
invokes the signal handler, and bubbles up the exception raised
in the latter."""
# XXX This test has three flaws that appear when objects are
# XXX not reference counted.
# - if wio.write() happens to trigger a garbage collection,
# the signal exception may be raised when some __del__
# method is running; it will not reach the assertRaises()
# call.
# - more subtle, if the wio object is not destroyed at once
# and survives this function, the next opened file is likely
# to have the same fileno (since the file descriptor was
# actively closed). When wio.__del__ is finally called, it
# will close the other's test file... To trigger this with
# CPython, try adding "global wio" in this function.
# - This happens only for streams created by the _pyio module,
# because a wio.close() that fails still consider that the
# file needs to be closed again. You can try adding an
# "assert wio.closed" at the end of the function.
# Fortunately, a little gc.gollect() seems to be enough to
# work around all these issues.
support.gc_collect()
read_results = []
def _read():
s = os.read(r, 1)
read_results.append(s)
t = threading.Thread(target=_read)
t.daemon = True
r, w = os.pipe()
try:
wio = self.io.open(w, **fdopen_kwargs)
t.start()
signal.alarm(1)
# Fill the pipe enough that the write will be blocking.
# It will be interrupted by the timer armed above. Since the
# other thread has read one byte, the low-level write will
# return with a successful (partial) result rather than an EINTR.
# The buffered IO layer must check for pending signal
# handlers, which in this case will invoke alarm_interrupt().
self.assertRaises(ZeroDivisionError,
wio.write, item * (support.PIPE_MAX_SIZE // len(item) + 1))
t.join()
# We got one byte, get another one and check that it isn't a
# repeat of the first one.
read_results.append(os.read(r, 1))
self.assertEqual(read_results, [bytes[0:1], bytes[1:2]])
finally:
os.close(w)
os.close(r)
# This is deliberate. If we didn't close the file descriptor
# before closing wio, wio would try to flush its internal
# buffer, and block again.
try:
wio.close()
except IOError as e:
if e.errno != errno.EBADF:
raise
def test_interrupted_write_unbuffered(self):
self.check_interrupted_write(b"xy", b"xy", mode="wb", buffering=0)
def test_interrupted_write_buffered(self):
self.check_interrupted_write(b"xy", b"xy", mode="wb")
def test_interrupted_write_text(self):
self.check_interrupted_write("xy", b"xy", mode="w", encoding="ascii")
def check_reentrant_write(self, data, **fdopen_kwargs):
def on_alarm(*args):
# Will be called reentrantly from the same thread
wio.write(data)
1//0
signal.signal(signal.SIGALRM, on_alarm)
r, w = os.pipe()
wio = self.io.open(w, **fdopen_kwargs)
try:
signal.alarm(1)
# Either the reentrant call to wio.write() fails with RuntimeError,
# or the signal handler raises ZeroDivisionError.
with self.assertRaises((ZeroDivisionError, RuntimeError)) as cm:
while 1:
for i in range(100):
wio.write(data)
wio.flush()
# Make sure the buffer doesn't fill up and block further writes
os.read(r, len(data) * 100)
exc = cm.exception
if isinstance(exc, RuntimeError):
self.assertTrue(str(exc).startswith("reentrant call"), str(exc))
finally:
wio.close()
os.close(r)
def test_reentrant_write_buffered(self):
self.check_reentrant_write(b"xy", mode="wb")
def test_reentrant_write_text(self):
self.check_reentrant_write("xy", mode="w", encoding="ascii")
def check_interrupted_read_retry(self, decode, **fdopen_kwargs):
"""Check that a buffered read, when it gets interrupted (either
returning a partial result or EINTR), properly invokes the signal
handler and retries if the latter returned successfully."""
r, w = os.pipe()
fdopen_kwargs["closefd"] = False
def alarm_handler(sig, frame):
os.write(w, b"bar")
signal.signal(signal.SIGALRM, alarm_handler)
try:
rio = self.io.open(r, **fdopen_kwargs)
os.write(w, b"foo")
signal.alarm(1)
# Expected behaviour:
# - first raw read() returns partial b"foo"
# - second raw read() returns EINTR
# - third raw read() returns b"bar"
self.assertEqual(decode(rio.read(6)), "foobar")
finally:
rio.close()
os.close(w)
os.close(r)
def test_interrupterd_read_retry_buffered(self):
self.check_interrupted_read_retry(lambda x: x.decode('latin1'),
mode="rb")
def test_interrupterd_read_retry_text(self):
self.check_interrupted_read_retry(lambda x: x,
mode="r")
@unittest.skipUnless(threading, 'Threading required for this test.')
def check_interrupted_write_retry(self, item, **fdopen_kwargs):
"""Check that a buffered write, when it gets interrupted (either
returning a partial result or EINTR), properly invokes the signal
handler and retries if the latter returned successfully."""
select = support.import_module("select")
# A quantity that exceeds the buffer size of an anonymous pipe's
# write end.
N = support.PIPE_MAX_SIZE
r, w = os.pipe()
fdopen_kwargs["closefd"] = False
# We need a separate thread to read from the pipe and allow the
# write() to finish. This thread is started after the SIGALRM is
# received (forcing a first EINTR in write()).
read_results = []
write_finished = False
def _read():
while not write_finished:
while r in select.select([r], [], [], 1.0)[0]:
s = os.read(r, 1024)
read_results.append(s)
t = threading.Thread(target=_read)
t.daemon = True
def alarm1(sig, frame):
signal.signal(signal.SIGALRM, alarm2)
signal.alarm(1)
def alarm2(sig, frame):
t.start()
signal.signal(signal.SIGALRM, alarm1)
try:
wio = self.io.open(w, **fdopen_kwargs)
signal.alarm(1)
# Expected behaviour:
# - first raw write() is partial (because of the limited pipe buffer
# and the first alarm)
# - second raw write() returns EINTR (because of the second alarm)
# - subsequent write()s are successful (either partial or complete)
self.assertEqual(N, wio.write(item * N))
wio.flush()
write_finished = True
t.join()
self.assertEqual(N, sum(len(x) for x in read_results))
finally:
write_finished = True
os.close(w)
os.close(r)
# This is deliberate. If we didn't close the file descriptor
# before closing wio, wio would try to flush its internal
# buffer, and could block (in case of failure).
try:
wio.close()
except IOError as e:
if e.errno != errno.EBADF:
raise
def test_interrupterd_write_retry_buffered(self):
self.check_interrupted_write_retry(b"x", mode="wb")
def test_interrupterd_write_retry_text(self):
self.check_interrupted_write_retry("x", mode="w", encoding="latin1")
class CSignalsTest(SignalsTest):
io = io
class PySignalsTest(SignalsTest):
io = pyio
# Handling reentrancy issues would slow down _pyio even more, so the
# tests are disabled.
test_reentrant_write_buffered = None
test_reentrant_write_text = None
def test_main():
tests = (CIOTest, PyIOTest,
CBufferedReaderTest, PyBufferedReaderTest,
CBufferedWriterTest, PyBufferedWriterTest,
CBufferedRWPairTest, PyBufferedRWPairTest,
CBufferedRandomTest, PyBufferedRandomTest,
StatefulIncrementalDecoderTest,
CIncrementalNewlineDecoderTest, PyIncrementalNewlineDecoderTest,
CTextIOWrapperTest, PyTextIOWrapperTest,
CMiscIOTest, PyMiscIOTest,
CSignalsTest, PySignalsTest,
)
# Put the namespaces of the IO module we are testing and some useful mock
# classes in the __dict__ of each test.
mocks = (MockRawIO, MisbehavedRawIO, MockFileIO, CloseFailureIO,
MockNonBlockWriterIO, MockRawIOWithoutRead)
all_members = io.__all__ + ["IncrementalNewlineDecoder"]
c_io_ns = dict((name, getattr(io, name)) for name in all_members)
py_io_ns = dict((name, getattr(pyio, name)) for name in all_members)
globs = globals()
c_io_ns.update((x.__name__, globs["C" + x.__name__]) for x in mocks)
py_io_ns.update((x.__name__, globs["Py" + x.__name__]) for x in mocks)
# Avoid turning open into a bound method.
py_io_ns["open"] = pyio.OpenWrapper
for test in tests:
if test.__name__.startswith("C"):
for name, obj in c_io_ns.items():
setattr(test, name, obj)
elif test.__name__.startswith("Py"):
for name, obj in py_io_ns.items():
setattr(test, name, obj)
support.run_unittest(*tests)
if __name__ == "__main__":
test_main()
|
the-stack_0_23819 | from __future__ import absolute_import, unicode_literals
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.10/howto/deployment/checklist/
PROJECT_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
BASE_DIR = os.path.dirname(PROJECT_DIR)
# Base URL to use when referring to full URLs within the Wagtail admin backend -
# e.g. in notification emails. Don't include '/admin' or a trailing slash
BASE_URL = 'http://localhost:8019'
# Django base settings.
# https://docs.djangoproject.com/en/2.2/ref/settings/
# Check if a setting fits into a more specific partial before adding it here.
INSTALLED_APPS = [
'wagtail_advanced_form_builder',
'build_test',
'wagtailextraicons',
'wagtail.contrib.forms',
'wagtail.contrib.redirects',
'wagtail.embeds',
'wagtail.sites',
'wagtail.users',
'wagtail.snippets',
'wagtail.documents',
'wagtail.images',
'wagtail.search',
'wagtail.admin',
'wagtail.core',
'wagtail.contrib.modeladmin',
'wagtail.contrib.settings',
'wagtail.contrib.routable_page',
'modelcluster',
'taggit',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sitemaps',
'django.contrib.messages',
'django.contrib.staticfiles',
'django_extensions',
]
MIDDLEWARE = [
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'django.middleware.security.SecurityMiddleware',
'wagtail.contrib.redirects.middleware.RedirectMiddleware',
]
ROOT_URLCONF = 'build_test.urls'
WSGI_APPLICATION = 'build_test.wsgi.application'
# Internationalization
# https://docs.djangoproject.com/en/2.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'Pacific/Auckland'
USE_I18N = True
USE_L10N = True
USE_TZ = True
SESSION_ENGINE = 'django.contrib.sessions.backends.cache'
EMAIL_BACKEND = 'build_test.backends.mail_backend.DevEmailBackend'
EMAIL_HOST = '127.0.0.1'
EMAIL_PORT = 1025
DEBUG = True
ALLOWED_HOSTS = [
'*'
]
MEDIA_ROOT = os.path.join(BASE_DIR, 'media')
MEDIA_URL = '/media/'
STATICFILES_FINDERS = [
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
]
STATICFILES_DIRS = [
os.path.join(PROJECT_DIR, 'static'),
]
STATIC_ROOT = os.path.join(BASE_DIR, 'static')
STATIC_URL = '/static/'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [
os.path.join(PROJECT_DIR, 'templates'),
],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
'wagtail.contrib.settings.context_processors.settings',
],
},
},
]
WAGTAIL_SITE_NAME = 'Wagtail Advanced Form Builder Test'
try:
from .local import *
except ImportError:
pass
|
the-stack_0_23820 | # --------------------------------------------------------------------------
# Source file provided under Apache License, Version 2.0, January 2004,
# http://www.apache.org/licenses/
# (c) Copyright IBM Corp. 2021
# --------------------------------------------------------------------------
# Author: Olivier OUDOT, IBM Analytics, France Lab, Sophia-Antipolis
"""
A blackbox expression is a numerical expression for which the analytical form is not known or cannot be
formulated using CP Optimizer's classical expressions.
A blackbox expression is specified by giving a function that evaluates the expression at given points.
Defining a blackbox function
----------------------------
A blackbox function is defined by an instance of the class :class:`~docplex.cp.blackbox.CpoBlackboxFunction`
that contains:
* the name of the blackbox function, auto-allocated if not given,
* the number of values that are returned by the evaluation of the function, one by default,
* the list of argument types, auto-determined if not given,
* the implementation of the function,
* optionally an argument to pass known result bounds when evaluating the function,
* an indicator allowing parallel evaluation of the blackbox function, False by default.
Each argument type can be given using its symbolic name string, or using its corresponding
type descriptor constant object of class :class:`~docplex.cp.catalog.CpoType` listed in module
:mod:`~docplex.cp.catalog`. Allowed argument types are:
* 'Int' or :const:`~docplex.cp.catalog.Type_Int`
* 'IntVar' or :const:`~docplex.cp.catalog.Type_IntVar`
* 'IntExpr' or :const:`~docplex.cp.catalog.Type_IntExpr`
* 'Float' or :const:`~docplex.cp.catalog.Type_Float`
* 'FloatExpr' or :const:`~docplex.cp.catalog.Type_FloatExpr`
* 'IntervalVar' or :const:`~docplex.cp.catalog.Type_IntervalVar`
* 'SequenceVar' or :const:`~docplex.cp.catalog.Type_SequenceVar`
* 'IntArray' or :const:`~docplex.cp.catalog.Type_IntArray`
* 'IntVarArray' or :const:`~docplex.cp.catalog.Type_IntVarArray`
* 'IntExprArray' or :const:`~docplex.cp.catalog.Type_IntExprArray`
* 'FloatArray' or :const:`~docplex.cp.catalog.Type_FloatArray`
* 'FloatExprArray' or :const:`~docplex.cp.catalog.Type_FloatExprArray`
* 'IntervalVarArray' or :const:`~docplex.cp.catalog.Type_IntervalVarArray`
* 'SequenceVarArray' or :const:`~docplex.cp.catalog.Type_SequenceVarArray`
If not given, the list of argument types is automatically computed as the smallest list of types
that are common to all the references to the blackbox function in the model.
Evaluating a blackbox function
------------------------------
The implementation of the function is a function or a lambda expression that takes as many parameters as declared
in the list of argument types.
Each argument value is fixed and is implemented, for each argument type, as described below:
* 'Int': integer constant
* 'IntVar': integer constant
* 'IntExpr': integer constant
* 'Float': float constant
* 'FloatExpr': float constant
* 'IntervalVar': interval variable solution value, named tuple containing start, end and size of the variable,
* 'SequenceVar': sequence variable solution value, ordered list of interval variables in the sequence,
* 'IntArray': list of integer constants
* 'IntVarArray': list of integer constants
* 'IntExprArray': list of integer constants
* 'FloatArray': list of float constants
* 'FloatExprArray': list of float constants
* 'IntervalVarArray': list of interval variable solution value
* 'SequenceVarArray': list of sequence variable solution value
The function may return:
* one or several float results in a list,
* a single number value, automatically converted in a list with this single value,
* a boolean value, converted as an integer 0 or 1 put in a single value list,
* *None*, if the function has no solution for these arguments,
* an exception, if an error occured during the evaluation.
If an exception is thrown, it is propagated to the solver that rethrow an exception to exit from the solve.
As the evaluation of the blackbox function is required by the different CPO workers, multiple evaluation requests
may happen concurrently.
As Python does not support real multi-threading (see Global Interpreter Lock here: https://wiki.python.org/moin/GlobalInterpreterLock),
concurrent processing may introduce overhead, or computation problems if the blackbox evaluation uses services of
libraries that are not designed to run concurrently.
By default, blackbox function evaluation is then executed in mutual exclusion, but this can be changed by setting
the parameter *parallel* to True, or using method :meth:`~CpoBlackboxFunction.set_parallel_eval`
To avoid calling the blackbox function multiple times with the same parameters, the solver can use a cache that may be
configured at the declaration of the blackbox.
This cache is by default local to each call instance of the blackbox function in the model, in case the evaluation
of the function depends on the calling context and may return different results with the same parameters depending
where the call is placed in the model.
Using a blackbox function in a model
------------------------------------
Once defined, the blackbox function can be used in a model simply by calling the blackbox function descriptor
with appropriate model expressions as arguments.
Following is a simple example that shows how to use a blackbox function with 2 parameters:
::
bbf = CpoBlackboxFunction(lambda x, y: x + y)
mdl = CpoModel()
v1 = integer_var(0, 10, "V1")
v2 = integer_var(0, 10, "V2")
mdl.add(bbf(v1, v2) == 5)
If the blackbox function returns multiple values, using the function in a model is done in two steps, as follows:
::
bbf = CpoBlackboxFunction(impl=lambda x, y: (x + y, x - y), dimension=2)
mdl = CpoModel()
v1 = integer_var(0, 10, "V1")
v2 = integer_var(0, 10, "V2")
a, b = bbf(v1, v2)
mdl.add(a + b == 4)
Note that not all returned expressions need to be used in the model.
Detailed description
--------------------
"""
from docplex.cp.catalog import *
from docplex.cp.expression import build_cpo_expr, CpoFunctionCall
from docplex.cp.utils import *
from itertools import count
#-----------------------------------------------------------------------------
# Constants
#-----------------------------------------------------------------------------
# List of possible argument types (DO NOT CHANGE AS ENCODING DEPENDS ON IT)
ARGUMENT_TYPES = (Type_Int, Type_IntVar, Type_IntExpr,
Type_Float, Type_FloatExpr,
Type_IntervalVar, Type_SequenceVar,
Type_IntArray, Type_IntVarArray, Type_IntExprArray,
Type_FloatArray, Type_FloatExprArray,
Type_IntervalVarArray, Type_SequenceVarArray,)
# Encoding of blackbox types into integer (zero reserved)
BLACKBOX_ARGUMENT_TYPES_ENCODING = {t: (i + 1) for i, t in enumerate(ARGUMENT_TYPES)}
# Set of all argument types
_ARG_TYPES_SET = set(ARGUMENT_TYPES)
# Build allowed types per name, ignoring case. Key is type name in lower case, value is type descriptor.
_ARG_TYPES_DICT = {t.get_name().lower(): t for t in ARGUMENT_TYPES}
#-----------------------------------------------------------------------------
# Classes
#-----------------------------------------------------------------------------
class CpoBlackboxFunction(object):
""" This class represents the descriptor of a blackbox function.
A blackbox function is defined by:
* a name, that must not be equal to an existing modeling operation,
* the number of float values it returns (the dimension),
* the list argument types,
* the implementation of the function, that evaluates the result from a list of
fully evaluated arguments.
* a parameter allowing to pass the known bounds of the result.
Each argument type can be given using its symbolic name string, or using its corresponding
type descriptor constant object of class :class:`~docplex.cp.catalog.CpoType` listed in module
:mod:`~docplex.cp.catalog`.
Allowed argument types are:
* 'Int' or :const:`~docplex.cp.catalog.Type_Int`
* 'IntVar' or :const:`~docplex.cp.catalog.Type_IntVar`
* 'IntExpr' or :const:`~docplex.cp.catalog.Type_IntExpr`
* 'Float' or :const:`~docplex.cp.catalog.Type_Float`
* 'FloatExpr' or :const:`~docplex.cp.catalog.Type_FloatExpr`
* 'IntervalVar' or :const:`~docplex.cp.catalog.Type_IntervalVar`
* 'SequenceVar' or :const:`~docplex.cp.catalog.Type_SequenceVar`
* 'IntArray' or :const:`~docplex.cp.catalog.Type_IntArray`
* 'IntVarArray' or :const:`~docplex.cp.catalog.Type_IntVarArray`
* 'IntExprArray' or :const:`~docplex.cp.catalog.Type_IntExprArray`
* 'FloatArray' or :const:`~docplex.cp.catalog.Type_FloatArray`
* 'FloatExprArray' or :const:`~docplex.cp.catalog.Type_FloatExprArray`
* 'IntervalVarArray' or :const:`~docplex.cp.catalog.Type_IntervalVarArray`
* 'SequenceVarArray' or :const:`~docplex.cp.catalog.Type_SequenceVarArray`
"""
__slots__ = ('name', # Name of the blackbox function, None for auto-allocation
'dimension', # Number of result values
'argtypes', # List of argument types
'atypes_given', # Indicates that argument types where given at function declaration
'impl', # Implementation of the function
'bounds_param', # Name of the bounds parameter
'cachesize', # Size of the function call cache
'globalcache', # Global cache indicator
'operation', # Corresponding operation descriptor
'eval_count', # Number of evaluation resuests
'auto', # Auto-created blackbox (for smart parsing)
'eval_mutex', # Lock to ensure mutual exclusion of function evaluation
)
def __init__(self, impl=None, dimension=1, argtypes=None, name=None, parallel=False, bounds_parameter=None, cachesize=-1, globalcache=False):
""" **Constructor**
The list of function argument types is optional.
If not given, it is automatically determined as the most common types of the expression arguments used
in its different references in the model.
Function implementation is optional.
This allows to parse a CPO file that contains references to blackbox functions(s), which requires
to register them in the model prior to parse it.
However, the model will not be able to be solved.
Bound parameter is optional.
If defined, the known bounds of the function result is passed to the function implementation using this named
parameter.
If the dimension of the function is 1, the bounds is a simple tuple containing lower and upper bounds.
Otherwise, the bounds is a list of tuples, one for each returned value.
If unknown, bounds are set to -inf or inf (float("inf") in Python).
The name of the function is optional.
If not given, a name is automatically allocated when solving the model.
If given, the name of the function must be a symbol (only letters and digits, starting by a letter)
that is not already used as the name of an existing modeling function.
A cache can be used by the solver to avoid calling the blackbox function multiple times with the same values.
By default (cachesize=-1), the size of the cache is automatically determined by the solver, but it can be
forced to a given value, or zero for no cache at all.
By default, this cache local to each call instance of the blackbox function in the model, in case the evaluation
of the function depends on the calling context and may return different results with the same parameters
depending where the call is placed in the model.
The parameter *globalcache* can be set to *True* if the same cache can be used for all call instances.
Args:
impl: (Optional) Implementation of the function
dimension: (Optional) Number of float values that are returned by the function. Default is 1.
argtypes: (Optional) List of argument types or type names.
name: (Optional) Name of the function, restricted to symbol.
parallel: (Optional) Indicates that the blackbox function evaluation is allowed concurrently.
Default is False.
bounds_parameter: (Optional) Name of the parameter in which known return values bounds can be set.
Default is None.
cachesize: (Optional) Indicates that the blackbox function evaluation is allowed concurrently.
Default value is -1, indicating that the cache is managed by the solver with default settings.
globalcache: (Optional) Indicates that the same cache can be used for all blackbox function call instances
in the model.
Default is False.
"""
# Check dimension
if dimension is not None:
assert is_int(dimension) and dimension >= 1, "Blackbox function dimension should be greater than zero"
self.dimension = dimension
self.bounds_param = bounds_parameter
# Check argument types
if argtypes is None:
self.atypes_given = False
self.argtypes = None
else:
self.atypes_given = True
self.argtypes = []
for t in argtypes:
if t in _ARG_TYPES_SET:
at = t
else:
# Consider t as a name and search in types map
at = _ARG_TYPES_DICT.get(str(t).lower())
if at is None:
raise AssertionError("Argument type '{}' is not allowed as blackbox function argument".format(t))
self.argtypes.append(at)
# Check function name
if name is not None:
if name in ALL_OPERATIONS_PER_NAME:
raise AssertionError("Function name {} is already used for a standard modeling operation".format(name))
self.name = name
# Store attributes
self.impl = impl
self.cachesize = cachesize
self.globalcache = globalcache
self.eval_count = 0
self.auto = (dimension is None)
self.set_parallel_eval(parallel)
# Build operation descriptor
if self.atypes_given:
self.operation = CpoOperation(name, name, None, -1, (CpoSignature(Type_FloatExprArray, self.argtypes),) )
else:
self.operation = CpoOperation(name, name, None, -1, () )
def set_name(self, nm):
""" Set the name of the blackbox function
Args:
nm: Name of the blackbox function
"""
self.name = nm
def get_name(self):
""" Get the name of the blackbox function
Returns:
Name of the blackbox function
"""
return self.name
def set_dimension(self, dim):
""" Set the dimension of this blackbox function, i.e. the number of values that it returns.
Args:
dim: Number of result values (size of the result list or array)
"""
self.dimension = dim
def get_dimension(self):
""" Get the dimension of this blackbox function
Returns:
Number of result values (size of the result list or array)
"""
return self.dimension
def set_implementation(self, impl):
""" Set the blackbox function implementation
Args:
impl: Blackbox function implementation
"""
self.impl = impl
def get_implementation(self):
""" Get the blackbox function implementation
Returns:
Blackbox function implementation
"""
return self.impl
def has_implementation(self):
""" Get if the blackbox function has an implementation
Returns:
True if the blackbox function has an implementation
"""
return self.impl is not None
def get_arg_types(self):
""" Get the list of argument types
Returns:
List of argument types, objects of class :class:`~docplex.cp.catalog.CpoType`
"""
return self.argtypes
def set_parallel_eval(self, par):
""" Set parallel evaluation enablement indicator.
Args:
par: Parallel evaluation indicator.
"""
self.eval_mutex = None if par else threading.Lock()
def is_parallel_eval(self):
""" Check if parallel evaluation is allowed.
Returns:
True if parallel evaluation is allowed, false oherwise
"""
return self.eval_mutex is None
def set_cache_size(self, size):
""" Set the size of evaluation cache.
Args:
size: Cache size, -1 for default, 0 for none.
"""
self.cachesize = size
def get_cache_size(self):
""" Get the size of the evaluation cache
Returns:
Evaluation cache size, -1 for default, 0 for none.
"""
return self.cachesize
def set_global_cache(self, glob):
""" Set the global cache indicator.
When set, there is a single evaluation cache for all the blackbox function call instances.
Args:
glob: Global cache indicator
"""
self.globalcache = glob
def is_global_cache(self):
""" Check if a global cache has been set.
Returns:
True if there is a global cache for this function.
"""
return self.globalcache
def get_eval_count(self):
""" Get the number of times this blackbox function has been evaluated
Returns:
number of times this blackbox function has been evaluated
"""
return self.eval_count
def reset_eval_count(self):
""" Reset the number of times this blackbox function has been evaluated
"""
self.eval_count = 0
def build_model_call(self, *args):
""" Build a model expression representing a call to this blackbox function.
Args:
*args: List of expressions that are arguments of the function
Returns:
List model expressions representing access to the unitary result values,
or single result if dimension is 1.
"""
# Build argument expressions
argexprs = [build_cpo_expr(a) for a in args]
# Update/check argument types
self._update_check_arg_types(argexprs)
# Build function call expression
expr = CpoBlackboxFunctionCall(self, argexprs)
# Build list of result access expressions
res = tuple(CpoFunctionCall(Oper_eval, Type_FloatExpr, (expr, build_cpo_expr(i))) for i in range(self.dimension))
return res if self.dimension > 1 else res[0]
def __call__(self, *args):
""" Build a model expression representing a call to this blackbox function.
Args:
*args: List of expressions that are arguments of the function
Returns:
List model expressions representing access to the unitary result values,
or single result if dimension is 1.
"""
return self.build_model_call(*args)
def __str__(self):
""" Build a string representing this blackbox function.
Returns:
String representing this blackbox function.
"""
name = "Anonymous" if self.name is None else self.name
argtypes = "..." if self.argtypes is None else ', '.join(t.get_name() for t in self.argtypes)
return "{}({}): {}".format(self.name, argtypes, self.dimension)
def _eval_function(self, rbnds, *args):
""" Evaluate the function from the list of parameter values
Args:
rbnds: Known result bounds, None in unknown
*args: List of parameter values
Returns:
List of result float values
"""
#print("Evaluate blackbox function {}{}".format(self.name, args))
# Increment number of evaluation requests
self.eval_count += 1
# Get and check arguments
assert self.argtypes is not None, "Blackbox function '{}' argument types are unknown".format(self.name)
if len(args) != len(self.argtypes):
raise CpoException("Evaluation of blackbox function '{}' with wrong number of parameters {} when {} are expected.".format(self.name, len(args), len(self.argtypes)))
# Build associated arguments
kwargs = {}
if (rbnds is not None) and (self.bounds_param is not None):
if self.dimension == 1:
rbnds = rbnds[0]
kwargs[self.bounds_param] = rbnds
# Evaluate function
if self.impl is None:
raise CpoException("Blackbox function '{}' implementation is not provided".format(self.name))
res = self.impl(*args, **kwargs)
# Check single result (separatly to process the case of zero)
if is_number(res):
assert self.dimension == 1, "Evaluation of blackbox function '{}' returned 1 result values instead of {} that have been declared.".format(self.name, self.dimension)
res = (res,)
elif is_bool(res):
assert self.dimension == 1, "Evaluation of blackbox function '{}' returned 1 result values instead of {} that have been declared.".format(self.name, self.dimension)
res = (int(res),)
# Check result (None is allowed)
elif res:
assert is_array(res), "Evaluation of blackbox function '{}' should return a tuple or a list, not {}.".format(self.name, type(res))
assert len(res) == self.dimension, "Evaluation of blackbox function '{}' returned {} result values instead of {} that have been declared.".format(self.name, len(res), self.dimension)
assert all(is_number(v) for v in res), "Evaluation of blackbox function '{}' result should contain only numbers.".format(self.name)
#print("{}{} = {}".format(self.name, args, res))
return res
def eval(self, *args):
""" Evaluate the function from the list of parameter values
This function evaluates the blackbox function without providing bounds.
Args:
*args: List of parameter values
Returns:
List of result float values
"""
return self._eval_function(None, *args)
def _update_check_arg_types(self, argexprs):
""" Update function argument types from a list of argument expressions
Args:
argexprs: List of expressions that are arguments of the function
"""
# Check if argument types already known
if self.argtypes is None:
# Retrieve argument types from expressions
self.argtypes = [_get_argument_type(a) for a in argexprs]
# Set new signature in operation
self.operation.signatures = (CpoSignature(Type_FloatExprArray, self.argtypes),)
else:
# Build and check list or arguments
assert len(argexprs) == len(self.argtypes), "This blackbox function should be called with {} arguments".format(len(self.argtypes))
if self.atypes_given:
for i, a, t in zip(count(), argexprs, self.argtypes):
assert _get_argument_type(a).is_kind_of(t), "The argument {} of blackbox function '{}' should be a {}".format(i + 1, self.name, t.get_public_name())
else:
tchanged = False
for i, a, t in zip(count(), argexprs, self.argtypes):
# Determine most common type
ct = _get_argument_type(a).get_common_type(t)
assert ct is not None, "Argument type {} is not compatible with already used type {}".format(a.type, t)
assert ct in _ARG_TYPES_SET, "Common expression type {} is not allowed as blackbox function argument".format(ct)
if ct != t:
self.argtypes[i] = ct
tchanged = True
# Set new signature in operation if type changed
if tchanged:
self.operation.signatures = (CpoSignature(Type_FloatExprArray, self.argtypes),)
def _update_dimension(self, anx):
""" Update blackbox dimension with an evaluation index
Args:
anx: Evaluation index
"""
if (self.dimension is None) or (self.dimension <= anx):
self.dimension = anx + 1
class CpoBlackboxFunctionCall(CpoFunctionCall):
""" This class represent all model expression nodes that call a blackbox function.
"""
__slots__ = ('blackbox', # Blackbox function descriptor
)
def __init__(self, bbf, oprnds):
""" **Constructor**
Args:
bbf: Blackbox function descriptor
oprnds: List of operand expressions.
"""
assert isinstance(bbf, CpoBlackboxFunction), "Argument 'bbf' should be a CpoBlackboxFunction"
super(CpoBlackboxFunctionCall, self).__init__(bbf.operation, Type_Blackbox, oprnds)
self.blackbox = bbf
def _equals(self, other):
""" Checks the equality of this expression with another object.
This particular method just checks local attributes, but does not check recursively children if any.
Recursion is implemented by method equals() that uses a self-managed stack to avoid too many
recursive calls that may lead to an exception 'RuntimeError: maximum recursion depth exceeded'.
Args:
other: Other object to compare with.
Returns:
True if 'other' is semantically identical to this object, False otherwise.
"""
return super(CpoBlackboxFunctionCall, self)._equals(other) and (self.blackbox == other.blackbox)
#-----------------------------------------------------------------------------
# Private functions
#-----------------------------------------------------------------------------
# Dictionary of type mapping to accepted type
_ARG_TYPE_MAPPING = {t: t for t in ARGUMENT_TYPES}
_ARG_TYPE_MAPPING.update(
{
Type_Bool: Type_IntExpr,
Type_BoolExpr: Type_IntExpr,
Type_BoolArray: Type_IntArray,
Type_BoolExprArray: Type_IntExprArray,
})
def _get_argument_type(a):
""" Get the blackbox argument type corresponding to a given argument type
Args:
a: Argument value
Returns:
Authorized blackbox argument type
Raises:
CpoException if given argument type is not supported.
"""
at = a.type
nt = _ARG_TYPE_MAPPING.get(at)
if nt is None:
raise CpoException("Expression type {} is not allowed as blackbox function argument".format(at))
return nt
|
the-stack_0_23822 | import logging
import os
import threading
lock = threading.Lock()
from firebase_admin import firestore
from google.cloud import firestore
FORMAT = "%(levelname)s %(asctime)s %(funcName)s() %(lineno)i %(message)s"
formatter = logging.Formatter(FORMAT)
stream_handler = logging.StreamHandler()
stream_handler.setFormatter(formatter)
logger = logging.getLogger(__name__)
logger.addHandler(stream_handler)
class JslFirebaseUtil:
__db = None
__instance = None
def __init__(self, credentials_path, log_level):
if self.__instance is None:
JslFirebaseUtil.__new__(self, credentials_path, log_level)
def __new__(cls, credentials_path, log_level):
if not cls.__instance:
lock.acquire()
if not cls.__instance:
cls.__instance = super(JslFirebaseUtil, cls).__new__(cls)
cls.initialize_firebase_client(cls, credentials_path)
logger.setLevel(log_level)
logger.info("Initialization of JslFirebaseUtil is successful")
lock.release()
return cls.__instance
def initialize_firebase_client(self, credentials_path=None):
if credentials_path:
os.environ[
"GOOGLE_APPLICATION_CREDENTIALS"] = credentials_path
self.db = firestore.Client()
logger.info("Firebase client is initialized")
else:
logger.error(f"Credentials Path can not be empty {credentials_path}")
def clear_all_collection(self, collection_name):
deleted_count = 0
try:
if collection_name:
collection_reference = self.db.collection(collection_name)
docs = collection_reference.stream()
for doc in docs:
logger.info(f'Deleting doc {doc.id} => {doc.to_dict()}')
doc.reference.delete()
deleted_count += 1
else:
logger.error(f"{collection_name}: collection name can not be blank")
except Exception as e:
logger.error(f"Exception while clearing all docs inf{collection_name} WIth exception message: {e.args}")
logger.info(f"Total deleted records: {deleted_count}")
def delete_document_collection_with_contains(self, collection_name, limit=10, string_contains=None):
deleted_count = 0
try:
if collection_name and string_contains:
collection_reference = self.db.collection(collection_name)
docs = collection_reference.limit(limit).stream()
for doc in docs:
if string_contains in str(doc.to_dict()):
logger.info(f'Deleting doc {doc.id} => {doc.to_dict()}')
doc.reference.delete()
deleted_count += 1
else:
logger.error(f"{collection_name}:{string_contains} can not be blank")
except Exception as e:
logger.error(f"Exception while clearing all docs in {collection_name} With exception message: {e.args}")
logger.info(f"Total deleted records: {deleted_count}")
def read_document_as_dict(self, collection_name, id=None):
try:
if collection_name and id:
collection_reference = self.db.collection(collection_name)
doc = collection_reference.document(id)
return doc.get().to_dict()
else:
logger.error(f"{collection_name}:{id} can not be blank")
except Exception as e:
logger.error(
f"Exception while read_document in {collection_name} with Id; {id} With exception message: {e.args}")
|
the-stack_0_23823 | #
# ImageViewJpw.py -- Module for a Ginga FITS viewer in a Jupyter web notebook.
#
# This is open-source software licensed under a BSD license.
# Please see the file LICENSE.txt for details.
#
"""
This example illustrates using a Ginga as the driver of a Jupyter web widget.
REQUIREMENTS:
To use this code you will need the "ipywidgets" and "ipyevents" python
modules installed. These are easily installed via:
$ pip install ipyevents
$ jupyter nbextension enable --py --sys-prefix ipyevents
or via conda:
$ conda install -c conda-forge ipyevents
Basic usage in a Jupyter notebook:
import ipywidgets as widgets
# create a Jupyter image that will be our display surface
# format can be 'jpeg' or 'png'; specify width and height to set viewer size
jp_img = widgets.Image(format='jpeg', width=500, height=500)
# Boilerplate to create a Ginga viewer connected to this widget
# this could be simplified by creating a class that created viewers
# as a factory.
from ginga.misc.log import get_logger
logger = get_logger("v1", log_stderr=True, level=40)
from ginga.web.jupyterw.ImageViewJpw import EnhancedCanvasView
v1 = EnhancedCanvasView(logger=logger)
v1.set_widget(jp_img)
bd = v1.get_bindings()
bd.enable_all(True)
# You can now build a GUI with the image widget and other Jupyter
# widgets. Here we just show the image widget.
v1.embed()
"""
from ipyevents import Event as EventListener
from ginga import AstroImage
from ginga import Mixins, Bindings
from ginga.util.toolbox import ModeIndicator
try:
# See if we have aggdraw module--best choice
from ginga.aggw.ImageViewAgg import ImageViewAgg as ImageView
except ImportError:
# fall back to pillow if aggdraw not available
from ginga.pilw.ImageViewPil import ImageViewPil as ImageView
from ginga.web.jupyterw import JpHelp
class ImageViewJpw(ImageView):
def __init__(self, logger=None, rgbmap=None, settings=None):
ImageView.__init__(self, logger=logger,
rgbmap=rgbmap,
settings=settings)
self.jp_img = None
self.jp_evt = None
# Format 'png' is ok with 'RGBA', but 'jpeg' only works with 'RGB'
self.rgb_order = 'RGB'
self._defer_task = None
self.msgtask = None
def set_widget(self, jp_img):
"""Call this method with the Jupyter image widget (image_w)
that will be used.
"""
self.jp_img = jp_img
# TODO: need configure (resize) event callback
# see reschedule_redraw() method
self._defer_task = JpHelp.Timer()
self._defer_task.add_callback('expired',
lambda timer: self.delayed_redraw())
self.msgtask = JpHelp.Timer()
self.msgtask.add_callback('expired',
lambda timer: self.onscreen_message(None))
# for some reason these are stored as strings!
wd, ht = int(jp_img.width), int(jp_img.height)
self.configure_surface(wd, ht)
def get_widget(self):
return self.jp_img
def update_image(self):
fmt = self.jp_img.format
web_img = self.get_rgb_image_as_bytes(format=fmt)
# this updates the model, and then the Jupyter image(s)
self.jp_img.value = web_img
def reschedule_redraw(self, time_sec):
self._defer_task.stop()
self._defer_task.start(time_sec)
def configure_window(self, width, height):
self.configure_surface(width, height)
def _resize_cb(self, event):
self.configure_window(event.width, event.height)
def set_cursor(self, cursor):
# TODO
pass
def onscreen_message(self, text, delay=None, redraw=True):
if self.jp_img is None:
return
self.msgtask.stop()
self.set_onscreen_message(text, redraw=redraw)
if delay is not None:
self.msgtask.start(delay)
class ImageViewEvent(ImageViewJpw):
def __init__(self, logger=None, rgbmap=None, settings=None):
ImageViewJpw.__init__(self, logger=logger, rgbmap=rgbmap,
settings=settings)
self._button = 0
# maps EventListener events to callback handlers
self._evt_dispatch = {
'mousedown': self.button_press_event,
'mouseup': self.button_release_event,
'mousemove': self.motion_notify_event,
'wheel': self.scroll_event,
'mouseenter': self.enter_notify_event,
'mouseleave': self.leave_notify_event,
'keydown': self.key_press_event,
'keyup': self.key_release_event,
}
# mapping from EventListener key events to ginga key events
self._keytbl = {
'shiftleft': 'shift_l',
'shiftright': 'shift_r',
'controlleft': 'control_l',
'controlright': 'control_r',
'altleft': 'alt_l',
'altright': 'alt_r',
'osleft': 'super_l',
'osright': 'super_r',
'contextmenu': 'menu_r',
'backslash': 'backslash',
'space': 'space',
'escape': 'escape',
'enter': 'return',
'tab': 'tab',
'arrowright': 'right',
'arrowleft': 'left',
'arrowup': 'up',
'arrowdown': 'down',
'pageup': 'page_up',
'pagedown': 'page_down',
'f1': 'f1',
'f2': 'f2',
'f3': 'f3',
'f4': 'f4',
'f5': 'f5',
'f6': 'f6',
'f7': 'f7',
'f8': 'f8',
'f9': 'f9',
'f10': 'f10',
'f11': 'f11',
'f12': 'f12',
}
self._keytbl2 = {
'`': 'backquote',
'"': 'doublequote',
"'": 'singlequote',
}
# Define cursors for pick and pan
#hand = openHandCursor()
hand = 'fleur'
self.define_cursor('pan', hand)
cross = 'cross'
self.define_cursor('pick', cross)
for name in ('motion', 'button-press', 'button-release',
'key-press', 'key-release', 'drag-drop',
'scroll', 'map', 'focus', 'enter', 'leave',
'pinch', 'rotate', 'pan', 'swipe', 'tap'):
self.enable_callback(name)
def set_widget(self, jp_imgw):
"""Call this method with the Jupyter image widget (image_w)
that will be used.
"""
super(ImageViewEvent, self).set_widget(jp_imgw)
self.jp_evt = EventListener(source=jp_imgw)
self.jp_evt.watched_events = [
'keydown', 'keyup', 'mouseenter', 'mouseleave',
'mousedown', 'mouseup', 'mousemove', 'wheel',
'contextmenu'
]
self.jp_evt.prevent_default_action = True
self.jp_evt.on_dom_event(self._handle_event)
self.logger.info("installed event handlers")
return self.make_callback('map')
def _handle_event(self, event):
# TODO: need focus events and maybe a map event
# TODO: Set up widget as a drag and drop destination
evt_kind = event['type']
handler = self._evt_dispatch.get(evt_kind, None)
if handler is not None:
return handler(event)
return False
def transkey(self, keycode, keyname=None):
keycode = str(keycode).lower()
if keyname is None:
keyname = keycode
self.logger.debug("key code in jupyter '%s'" % (keycode))
res = self._keytbl.get(keycode, None)
if res is None:
res = self._keytbl2.get(keyname, keyname)
return res
def get_keyTable(self):
return self._keytbl
def focus_event(self, event, has_focus):
return self.make_callback('focus', has_focus)
def enter_notify_event(self, event):
enter_focus = self.t_.get('enter_focus', False)
if enter_focus:
# TODO: set focus on canvas
pass
return self.make_callback('enter')
def leave_notify_event(self, event):
self.logger.debug("leaving widget...")
return self.make_callback('leave')
def key_press_event(self, event):
keyname = self.transkey(event['code'], keyname=event['key'])
self.logger.debug("key press event, key=%s" % (keyname))
return self.make_ui_callback('key-press', keyname)
def key_release_event(self, event):
keyname = self.transkey(event['code'], keyname=event['key'])
self.logger.debug("key release event, key=%s" % (keyname))
return self.make_ui_callback('key-release', keyname)
def button_press_event(self, event):
x, y = event['arrayX'], event['arrayY']
self.last_win_x, self.last_win_y = x, y
button = 0
button |= 0x1 << event['button']
self._button = button
self.logger.debug("button event at %dx%d, button=%x" % (x, y, button))
data_x, data_y = self.check_cursor_location()
return self.make_ui_callback('button-press', button, data_x, data_y)
def button_release_event(self, event):
x, y = event['arrayX'], event['arrayY']
self.last_win_x, self.last_win_y = x, y
button = 0
button |= 0x1 << event['button']
self._button = 0
self.logger.debug("button release at %dx%d button=%x" % (x, y, button))
data_x, data_y = self.check_cursor_location()
return self.make_ui_callback('button-release', button, data_x, data_y)
def motion_notify_event(self, event):
button = self._button
x, y = event['arrayX'], event['arrayY']
self.last_win_x, self.last_win_y = x, y
self.logger.debug("motion event at %dx%d, button=%x" % (x, y, button))
data_x, data_y = self.check_cursor_location()
return self.make_ui_callback('motion', button, data_x, data_y)
def scroll_event(self, event):
x, y = event['arrayX'], event['arrayY']
self.last_win_x, self.last_win_y = x, y
dx, dy = event['deltaX'], event['deltaY']
if (dx != 0 or dy != 0):
# <= This browser gives us deltas for x and y
# Synthesize this as a pan gesture event
self.make_ui_callback('pan', 'start', 0, 0)
self.make_ui_callback('pan', 'move', -dx, -dy)
return self.make_ui_callback('pan', 'stop', 0, 0)
# <= This code path should not be followed under normal
# circumstances.
# we leave it here in case we want to make the scroll
# callback configurable in the future
# TODO: calculate actual angle of direction
if dy < 0:
direction = 0.0 # up
elif dy > 0:
direction = 180.0 # down
else:
return False
# 15 deg is standard 1-click turn for a wheel mouse
num_deg = 15.0
self.logger.debug("scroll deg=%f direction=%f" % (
num_deg, direction))
data_x, data_y = self.check_cursor_location()
return self.make_ui_callback('scroll', direction, num_deg,
data_x, data_y)
class ImageViewZoom(Mixins.UIMixin, ImageViewEvent):
# class variables for binding map and bindings can be set
bindmapClass = Bindings.BindingMapper
bindingsClass = Bindings.ImageViewBindings
@classmethod
def set_bindingsClass(cls, klass):
cls.bindingsClass = klass
@classmethod
def set_bindmapClass(cls, klass):
cls.bindmapClass = klass
def __init__(self, logger=None, rgbmap=None, settings=None,
bindmap=None, bindings=None):
ImageViewEvent.__init__(self, logger=logger, rgbmap=rgbmap,
settings=settings)
Mixins.UIMixin.__init__(self)
self.ui_set_active(True)
if bindmap is None:
bindmap = ImageViewZoom.bindmapClass(self.logger)
self.bindmap = bindmap
bindmap.register_for_events(self)
if bindings is None:
bindings = ImageViewZoom.bindingsClass(self.logger)
self.set_bindings(bindings)
def get_bindmap(self):
return self.bindmap
def get_bindings(self):
return self.bindings
def set_bindings(self, bindings):
self.bindings = bindings
bindings.set_bindings(self)
class CanvasView(ImageViewZoom):
def __init__(self, logger=None, settings=None, rgbmap=None,
bindmap=None, bindings=None):
ImageViewZoom.__init__(self, logger=logger, settings=settings,
rgbmap=rgbmap,
bindmap=bindmap, bindings=bindings)
# Needed for UIMixin to propagate events correctly
self.objects = [self.private_canvas]
self._mi = ModeIndicator(self)
def set_canvas(self, canvas, private_canvas=None):
super(CanvasView, self).set_canvas(canvas,
private_canvas=private_canvas)
self.objects[0] = self.private_canvas
class EnhancedCanvasView(CanvasView):
"""
This just adds some convenience methods to the viewer for loading images,
grabbing screenshots, etc. You can subclass to add new methods.
"""
def embed(self):
"""
Embed a viewer into a Jupyter notebook.
"""
return self.jp_img
def open(self, new=1):
"""
Open this viewer in a new browser window or tab.
"""
# TBD
raise Exception("Not yet implemented!")
def show(self, fmt=None):
"""
Capture the window of a viewer.
"""
# force any delayed redraws
# TODO: this really needs to be addressed in get_rgb_image_as_bytes()
# of the various superclasses, as it affects other backends as well
self.redraw_now()
from IPython.display import Image
if fmt is None:
# what format are we using for the Jupyter image--use that
fmt = self.jp_img.format
return Image(data=bytes(self.get_rgb_image_as_bytes(format=fmt)),
format=fmt, embed=True)
def load_fits(self, filepath):
"""
Load a FITS file into the viewer.
"""
image = AstroImage.AstroImage(logger=self.logger)
image.load_file(filepath)
self.set_image(image)
load = load_fits
def load_hdu(self, hdu):
"""
Load an HDU into the viewer.
"""
image = AstroImage.AstroImage(logger=self.logger)
image.load_hdu(hdu)
self.set_image(image)
def load_data(self, data_np):
"""
Load raw numpy data into the viewer.
"""
image = AstroImage.AstroImage(logger=self.logger)
image.set_data(data_np)
self.set_image(image)
def add_canvas(self, tag=None):
# add a canvas to the view
my_canvas = self.get_canvas()
DrawingCanvas = my_canvas.get_draw_class('drawingcanvas')
canvas = DrawingCanvas()
# enable drawing on the canvas
canvas.enable_draw(True)
canvas.enable_edit(True)
canvas.set_drawtype(None)
canvas.ui_set_active(True)
canvas.set_surface(self)
canvas.register_for_cursor_drawing(self)
# add the canvas to the view.
my_canvas.add(canvas, tag=tag)
return canvas
|
the-stack_0_23824 | #!/usr/local/bin/python3
import smtplib
import time
import jinja2
import threading
import os
import csv
from email.mime.multipart import MIMEMultipart
from email.mime.text import MIMEText
from email.mime.image import MIMEImage
from sshtunnel import SSHTunnelForwarder
from paramiko import client
from operator import itemgetter
from matplotlib import pyplot as plt
class ssh:
client = None
def __init__(self, address, username, password):
print('Connecting to target node...')
self.client = client.SSHClient()
self.client.set_missing_host_key_policy(client.AutoAddPolicy())
self.client.connect(address, username=username, password=password, look_for_keys=False)
def sendCommand(self, command, logpath):
if (self.client):
stdin, stdout, stderr = self.client.exec_command(command)
log_output = stdout.read()
stdin.flush()
log_output_file = open(logpath, 'wb')
log_output_file.write(log_output)
log_output_file.close()
else:
print('Connection is not opened...')
class sshThread(threading.Thread):
def __init__(self, threadId, node, ip_addr, user, password):
threading.Thread.__init__(self)
self.threadId = threadId
self.node = node
self.ip_addr = ip_addr
self.user = user
self.password = password
def run(self):
print("Starting " + self.name)
# Get lock to synchronize threads
threadLock.acquire()
connectNode(self.node, self.ip_addr, self.user, self.password)
# Free lock to release next thread
threadLock.release()
def connectNode(node, ip_addr, user, password):
command_message = 'show task resources'
output_log = '/home/edruser/scripts/{}-tasklog'.format(node)
connection_node = ssh(ip_addr, user, password)
connection_node.sendCommand(command_message, output_log)
def parseOutput(logpath, hournow, node):
# Dict generator
items = []
nodename = node
cpu = ''
facility = ''
task_instance = ''
cpu_used = ''
cpu_alloc = ''
mem_used = ''
mem_alloc = ''
files_used = ''
files_alloc = ''
status = ''
hournow = hournow
with open(logpath, 'r') as infile:
for line in infile:
if 'sessmgr' and 'I' in line:
cpu = str(line.strip().split()[0])
facility = str(line.strip().split()[1])
task_instance = int(line.strip().split()[2])
cpu_used = str(line.strip().split()[3])
cpu_alloc = str(line.strip().split()[4])
mem_used = str(line.strip().split()[5])
mem_alloc = str(line.strip().split()[6])
files_used = str(line.strip().split()[7])
files_alloc = str(line.strip().split()[8])
status = str(line.strip().split()[12])
# Dict = {}
# We just have to quote the keys as needed
node_data = dict(nodename=nodename, cpu=cpu, facility=facility, task_instance=task_instance,
cpu_used=cpu_used, cpu_alloc=cpu_alloc, mem_used=mem_used,
mem_alloc=mem_alloc, files_used=files_used, files_alloc=files_alloc,
status=status, hournow=hournow)
items.append(node_data)
# Sort list of dict, descending, by task_instance id value
# and get the top 10
items_by_task_id = sorted(items, key=itemgetter('task_instance'), reverse=True)
items_by_task_id_top10 = items_by_task_id[:10]
return items_by_task_id_top10
def createCsv(fileinput, node_highest_dict):
with open(fileinput, 'a+') as csv_infile: # append mode with read mode implied
# File has been opened at this point
csv_infile.seek(0) # Ensure that we are at the start of the file
first_char = csv_infile.read(1) # Get the first char of the file
w = csv.DictWriter(csv_infile, node_highest_dict.keys()) # Set the dict writer object
if not first_char: # If first char is not found then deem as empty file
w.writeheader()
w.writerow(node_highest_dict)
else: # non-empty file
w.writerow(node_highest_dict)
def sendEmail(sender, recipient, subject, html_skel, *args):
# Create message container - the correct MIME type is
# multipart/alternative
msg = MIMEMultipart('alternative')
msg['Subject'] = subject
msg['From'] = sender
if type(recipient) is list:
msg['To'] = recipient[0]
msg['Cc'] = ','.join(recipient[1:])
msg['Bcc'] = recipient[-1]
else:
msg['To'] = recipient
msg['Cc'] = ''
msg['Bcc'] = ''
# Create the body of the message
text = 'Please find below the top 10 SessMgr Instance ID for All Cisco SGSN/MME Nodes'
# Record the MIME types of both parts
# text/plain and text/html
part1 = MIMEText(text, 'plain')
part2 = MIMEText(html_skel, 'html')
# Attach image to email body
with open('/home/edruser/scripts/sessmgr_plot/sessmgr_trending.png', 'rb') as f:
img = f.read()
msgImg = MIMEImage(img, 'png')
msgImg.add_header('Content-ID', '<image1>')
msgImg.add_header('Content-Disposition', 'inline', filename='/home/edruser/scripts/sessmgr_plot/sessmgr_trending.png')
# Attach parts into message container
# According to RFC 2046, the last part of a multipart message
# in this case the HTML message
# is best and preferred
msg.attach(part1)
msg.attach(part2)
msg.attach(msgImg)
try:
server = smtplib.SMTP('127.0.0.1', 10022)
server.ehlo()
server.sendmail(sender, recipient, msg.as_string())
server.close()
print('Successfully sent the mail')
except:
print('Failed to send the mail')
# Main
datenow = time.strftime('%s')
datenow_human = time.strftime('%a, %d %b %Y %H:%M:00')
datenow_subj = time.strftime('%Y-%m-%d %H:%M:00')
hournow = time.strftime('%H:00')
datenow_dateonly = time.strftime('%Y%m%d')
datenow_dateonly_dashed = time.strftime('%Y-%m-%d')
# Threading definition
threadLock = threading.Lock()
threads = []
# Create new ssh threads
thread1 = sshThread(1, 'VSGBTR05', '10.205.57.4', 'psi.gandhi', 'password*1')
thread2 = sshThread(2, 'VSGCBT04', '10.205.62.4', 'psi.gandhi', 'password*1')
thread3 = sshThread(3, 'VSGCBT05', '10.205.67.4', 'psi.gandhi', 'password*1')
# Start new ssh threads
thread1.start()
thread2.start()
thread3.start()
# Add threads to thread list
threads.append(thread1)
threads.append(thread2)
threads.append(thread3)
# Wait for all threads to complete
for t in threads:
t.join()
print('Exiting main thread')
# Parse log and generate list of dict container
# to be used later by Jinja2 template engine
output_log_vsgbtr05 = '/home/edruser/scripts/VSGBTR05-tasklog'
output_log_vsgcbt04 = '/home/edruser/scripts/VSGCBT04-tasklog'
output_log_vsgcbt05 = '/home/edruser/scripts/VSGCBT05-tasklog'
vsgcbt04_dict = parseOutput(output_log_vsgcbt04, hournow, 'VSGCBT04')
vsgbtr05_dict = parseOutput(output_log_vsgbtr05, hournow, 'VSGBTR05')
vsgcbt05_dict = parseOutput(output_log_vsgcbt05, hournow, 'VSGCBT05')
# HTML template engine
loader = jinja2.FileSystemLoader('/home/edruser/scripts/templateSessmgr.jinja2.html')
env = jinja2.Environment(loader=loader)
template = env.get_template('')
# Render for each node
# manipulate using data contained in list of lists of dict
big_items = []
big_items.append(vsgbtr05_dict)
big_items.append(vsgcbt04_dict)
big_items.append(vsgcbt05_dict)
node_table_html = template.render(items=big_items)
# Print to csv file
# mode = append
vsgbtr05_highest_dict = vsgbtr05_dict[0]
vsgcbt04_highest_dict = vsgcbt04_dict[0]
vsgcbt05_highest_dict = vsgcbt05_dict[0]
vsgbtr05_highest_csv = '/home/edruser/scripts/sessmgr_trending_csv/vsgbtr05_highest_sessmgr_{}.csv'.format(datenow_dateonly)
vsgcbt04_highest_csv = '/home/edruser/scripts/sessmgr_trending_csv/vsgcbt04_highest_sessmgr_{}.csv'.format(datenow_dateonly)
vsgcbt05_highest_csv = '/home/edruser/scripts/sessmgr_trending_csv/vsgcbt05_highest_sessmgr_{}.csv'.format(datenow_dateonly)
# Create CSV
createCsv(vsgbtr05_highest_csv, vsgbtr05_highest_dict)
createCsv(vsgcbt04_highest_csv, vsgcbt04_highest_dict)
createCsv(vsgcbt05_highest_csv, vsgcbt05_highest_dict)
# Matplotlib graph generation
with open(vsgbtr05_highest_csv, 'r') as f_vsgbtr05:
with open(vsgcbt04_highest_csv, 'r') as f_vsgcbt04:
with open(vsgcbt05_highest_csv, 'r') as f_vsgcbt05:
data_vsgbtr05 = list(csv.reader(f_vsgbtr05))
data_vsgcbt04 = list(csv.reader(f_vsgcbt04))
data_vsgcbt05 = list(csv.reader(f_vsgcbt05))
task_instance_btr05 = [i[3]
for i in data_vsgbtr05[1::]] # skip header
time_instance = [i[11]
for i in data_vsgbtr05[1::]] # skip header
task_instance_cbt04 = [i[3]
for i in data_vsgcbt04[1::]] # skip header
task_instance_cbt05 = [i[3]
for i in data_vsgcbt05[1::]] # skip header
# Instantiate plot object
# which consists of 3 subplots
fig = plt.figure()
ax1 = fig.add_subplot(311) # first subplot in three
ax1.plot(time_instance, task_instance_btr05, 'ro') # red dot
# Set title for the whole graph
ax1.set_title('SessMgr Task Instance ID Changes Over Time - {}'.format(datenow_dateonly_dashed))
ax1.legend(['VSGBTR05'], loc='upper left') # subplot legend
ax2 = fig.add_subplot(312, sharex=ax1) # second subplot in three, use ax1's x-ticks
ax2.plot(time_instance, task_instance_cbt04, 'bs') # blue square
ax2.legend(['VSGCBT04'], loc='upper left')
ax2.set_ylabel('SessMgr Task Instance ID') # hacky hack to set a common-Y label across all subplots
ax3 = fig.add_subplot(313, sharex=ax1) # third subplot in three, use ax1's x-ticks
ax3.plot(time_instance, task_instance_cbt05, 'g^') # green triangle
ax3.legend(['VSGCBT05'], loc='upper left')
# Add legends and other graph properties
plt.xticks(rotation=30) # Rotate x-label 30 degrees
plt.setp(ax1.get_xticklabels(), visible=False) # Remove x ticks from ax1 subplot
plt.setp(ax2.get_xticklabels(), visible=False) # Remove x ticks from ax2 subplot
plt.xlabel('Hour')
plt.tight_layout() # Use tight layout in the graph
plt.savefig('/home/edruser/scripts/sessmgr_plot/sessmgr_trending.png')
# Create HTML Skeleton
html_skel = """\
<html>
<head>
<style>
table, th, td {{
border: 1px solid black;
border-collapse: collapse;
text-align: center;
table-layout: auto;
}}
.safe-limit {{ color: green; }}
.warn-limit {{ color: orange; }}
.over-limit {{ color: red; }}
</style>
</head>
<body>
<article>
<header>
<p>Please find below the top 10 SessMgr Instance ID for All Cisco SGSN/MME Nodes as per {}</p>
</header>
</article>
{}<br>
<h3><b>Trending Graph</b></h3>
<img src="cid:image1">
</body>
<footer>
<p><i>This email is automatically generated. <strong>Please do not reply</strong>.</i></p>
<p><i>Contact Information: <a
href="mailto:[email protected]">[email protected]</a>.</i></p>
</footer>
</html>
""".format(datenow_human, node_table_html)
# Write raw HTML file as point of reference
html_file = open('/home/edruser/scripts/sessMgr_table.html', 'w')
html_file.write(html_skel)
html_file.close()
# Forward local port to XL SMTP mail port
with SSHTunnelForwarder(
('10.23.33.125', 22),
ssh_username = 'root',
ssh_password = 'psi12345',
remote_bind_address = ('10.17.6.210', 25),
local_bind_address = ('127.0.0.1', 10022)
) as tunnel:
recipient = ['[email protected]', '[email protected]',
'[email protected]', '[email protected]','[email protected]',
'[email protected]', '[email protected]', '[email protected]',
'[email protected]', '[email protected]', '[email protected]',
'[email protected]']
sendEmail('[email protected]', recipient,
'[PSI/Cisco Internal] TOP 10 SessMgr Instance ID Cisco SGSN/MME {}'.format(datenow_subj), html_skel)
# Remove source logs
os.remove(output_log_vsgcbt04)
os.remove(output_log_vsgbtr05)
os.remove(output_log_vsgcbt05)
print('FINISH EXECUTING SCRIPT')
|
the-stack_0_23825 | from unittest import mock
import graphene
from django.utils.functional import SimpleLazyObject
from freezegun import freeze_time
from .....app.models import App
from .....webhook.event_types import WebhookEventAsyncType
from .....webhook.payloads import generate_meta, generate_requestor
from ....tests.utils import assert_no_permission, get_graphql_content
APP_DEACTIVATE_MUTATION = """
mutation AppDeactivate($id: ID!){
appDeactivate(id:$id){
app{
id
isActive
}
errors{
field
message
code
}
}
}
"""
def test_deactivate_app(app, staff_api_client, permission_manage_apps):
# given
app.is_active = True
app.save()
query = APP_DEACTIVATE_MUTATION
id = graphene.Node.to_global_id("App", app.id)
variables = {
"id": id,
}
# when
response = staff_api_client.post_graphql(
query, variables=variables, permissions=(permission_manage_apps,)
)
# then
get_graphql_content(response)
app.refresh_from_db()
assert not app.is_active
@freeze_time("2022-05-12 12:00:00")
@mock.patch("saleor.plugins.webhook.plugin.get_webhooks_for_event")
@mock.patch("saleor.plugins.webhook.plugin.trigger_webhooks_async")
def test_deactivate_app_trigger_webhook(
mocked_webhook_trigger,
mocked_get_webhooks_for_event,
any_webhook,
app,
staff_api_client,
permission_manage_apps,
settings,
):
# given
mocked_get_webhooks_for_event.return_value = [any_webhook]
settings.PLUGINS = ["saleor.plugins.webhook.plugin.WebhookPlugin"]
app.is_active = True
app.save()
variables = {
"id": graphene.Node.to_global_id("App", app.id),
}
# when
staff_api_client.post_graphql(
APP_DEACTIVATE_MUTATION,
variables=variables,
permissions=(permission_manage_apps,),
)
app.refresh_from_db()
# then
assert not app.is_active
mocked_webhook_trigger.assert_called_once_with(
{
"id": variables["id"],
"is_active": app.is_active,
"name": app.name,
"meta": generate_meta(
requestor_data=generate_requestor(
SimpleLazyObject(lambda: staff_api_client.user)
)
),
},
WebhookEventAsyncType.APP_STATUS_CHANGED,
[any_webhook],
app,
SimpleLazyObject(lambda: staff_api_client.user),
)
def test_deactivate_app_by_app(app, app_api_client, permission_manage_apps):
# given
app = App.objects.create(name="Sample app objects", is_active=True)
query = APP_DEACTIVATE_MUTATION
id = graphene.Node.to_global_id("App", app.id)
variables = {
"id": id,
}
app_api_client.app.permissions.set([permission_manage_apps])
# when
response = app_api_client.post_graphql(query, variables=variables)
# then
get_graphql_content(response)
app.refresh_from_db()
assert not app.is_active
def test_deactivate_app_missing_permission(
app, staff_api_client, permission_manage_orders
):
# given
app.is_active = True
app.save()
query = APP_DEACTIVATE_MUTATION
id = graphene.Node.to_global_id("App", app.id)
variables = {
"id": id,
}
# when
response = staff_api_client.post_graphql(
query, variables=variables, permissions=(permission_manage_orders,)
)
# then
assert_no_permission(response)
app.refresh_from_db()
assert app.is_active
def test_activate_app_by_app_missing_permission(
app, app_api_client, permission_manage_orders
):
# given
app = App.objects.create(name="Sample app objects", is_active=True)
query = APP_DEACTIVATE_MUTATION
id = graphene.Node.to_global_id("App", app.id)
variables = {
"id": id,
}
app_api_client.app.permissions.set([permission_manage_orders])
# when
response = app_api_client.post_graphql(query, variables=variables)
# then
assert_no_permission(response)
assert app.is_active
def test_app_has_more_permission_than_user_requestor(
app, staff_api_client, permission_manage_orders, permission_manage_apps
):
# given
app.permissions.add(permission_manage_orders)
app.is_active = True
app.save()
query = APP_DEACTIVATE_MUTATION
id = graphene.Node.to_global_id("App", app.id)
variables = {
"id": id,
}
# when
response = staff_api_client.post_graphql(
query, variables=variables, permissions=(permission_manage_apps,)
)
# then
content = get_graphql_content(response)
app_data = content["data"]["appDeactivate"]["app"]
app_errors = content["data"]["appDeactivate"]["errors"]
app.refresh_from_db()
assert not app_errors
assert not app.is_active
assert app_data["isActive"] is False
def test_app_has_more_permission_than_app_requestor(
app_api_client, permission_manage_orders, permission_manage_apps
):
# given
app = App.objects.create(name="Sample app objects", is_active=True)
app.permissions.add(permission_manage_orders)
query = APP_DEACTIVATE_MUTATION
id = graphene.Node.to_global_id("App", app.id)
variables = {
"id": id,
}
# when
response = app_api_client.post_graphql(
query, variables=variables, permissions=(permission_manage_apps,)
)
# then
content = get_graphql_content(response)
app_data = content["data"]["appDeactivate"]["app"]
app_errors = content["data"]["appDeactivate"]["errors"]
app.refresh_from_db()
assert not app_errors
assert not app.is_active
assert app_data["isActive"] is False
|
the-stack_0_23829 | import unittest
from exabel_data_sdk.client.api.data_classes.derived_signal import (
DerivedSignal,
DerivedSignalMetaData,
DerivedSignalUnit,
)
class TestDerivedSignal(unittest.TestCase):
def test_proto_conversion(self):
derived_signal = DerivedSignal(
name="derivedSignals/123",
label="test_signal",
expression="close_price + 1",
description="price plus one",
metadata=DerivedSignalMetaData(unit=DerivedSignalUnit.RATIO_DIFFERENCE, decimals=2),
)
self.assertEqual(derived_signal, DerivedSignal.from_proto(derived_signal.to_proto()))
|
the-stack_0_23830 | from sklearn import tree
clf = tree.DecisionTreeClassifier()
# [freshman_gpa, sophomore_gpa, junior_gpa]
X = [[3.3, 3.8, 4.0], [2.8, 3.1, 2.7], [3.8, 2.1, 1.7], [3.2, 2.8, 3.3], [3.9, 2.9, 4.0],
[1.9, 2.0, 2.8], [3.8, 3.6, 3.0],
[2.8, 2.5, 2.1], [3.4, 2.0, 0], [2.8, 3.1, 2.7], [2.8, 3.1, 3.0]]
# [thou shall fail or pass(graduate)]
Y = ['Graduated', 'Failed', 'Failed', 'Failed', 'Graduated', 'Failed', 'Graduated', 'Failed',
'Failed', 'Graduated', 'Graduated']
# train them on our data
clf = clf.fit(X, Y)
prediction = clf.predict([[3.6, 3.98, 3.7]])
# print your prediction
print(prediction)
|
the-stack_0_23832 | #!/usr/bin/env python3
# coding: utf-8
# Example 35 インターネット照る照る坊主 【IoT カラーLEDを制御】
# 接続図
# [天気情報] (インターネット)
# ↓
# [本機] ------> [IoTカラーLED] or [IoTフルカラーLED]
#
# 機器構成
# 本機 天気情報を色番号に変換してIoTカラーLEDを点灯制御
# IoTカラーLED example14_iot_btn.py 初期設定内の colors_full=False
# IoTフルカラーLED example19_iot_ledpwm.py 初期設定内の colors_full=True
#
# ESP8266用:
# https://github.com/bokunimowakaru/esp/blob/master/2_example/example16f_led/example16f_led.ino
#
# ESP32用:
# https://github.com/bokunimowakaru/esp/blob/master/2_example/example48f_led/example48f_led.ino
#
# 天気情報・参考文献:
# http://weather.livedoor.com/weather_hacks/webservice
# 初期設定
ip_leds = ['127.0.0.1'] # IoTカラーLEDのIPアドレス
colors = ['消灯','赤色','緑色','黄色','青色','赤紫色','藍緑色','白色']
# フルカラーLEDを使用する場合はcolors_full=Trueを設定
colors_full = True # フルカラー有効化フラグ
city_id = 270000 # 大阪の city ID=270000
# 東京=130010 京都=260010
# 横浜=140010 千葉=120010
# 名古屋=230010 福岡=400010
url_wea_s = 'http://weather.livedoor.com/forecast/webservice/json/v1?city='
url_wea_s += str(city_id)
interval = 10 * 60 # 動作間隔10分(単位=秒)
# ライブラリ
import urllib.request # HTTP通信ライブラリを組込む
import json # JSON変換ライブラリを組込む
from time import sleep # スリープ実行モジュール
def getWeather(): # 天気情報取得関数を定義
try: # 例外処理の監視を開始
res = urllib.request.urlopen(url_wea_s) # HTTPアクセスを実行
res_s = res.read().decode() # 受信テキストを変数res_sへ
res.close() # HTTPアクセスの終了
res_dict = json.loads(res_s) # 辞書型の変数res_dictへ代入
except Exception as e:
print(e) # エラー内容を表示
return None # Noneを応答
return res_dict['forecasts'][0]['telop'] # 天候の情報を応答
def led3(ip,color): # IoTカラーLED
if color is None or color < 0 or color > 7: # 範囲外の値の時に
return # 何もせずに戻る
url_led_s = 'http://' + ip # アクセス先
if colors_full: # フルカラーの設定
colors_3 = ['R','G','B'] # 3原色名R,G,Bを代入
colors_rgb = ['000','933','393','770','339','717','276','666'] # カラー
s = '/?' # 文字列変数sの初期化
for i in range(len(colors_3)): # 文字変数cにR、G、Bを代入
s += colors_3[i] + "=" # 変数sにR=、G=、B=を追加
s += colors_rgb[color][i] # 各色の輝度(0~9)を追加
if i < len(colors_3) - 1: # forに次の3原色がある場合
s += '&' # 結合を示す「&」を追加
else:
s = '/?COLOR=' + str(color) # 色番号(0~7)の設定
try:
urllib.request.urlopen(url_led_s + s) # IoTカラーLEDへ色情報を送信
except urllib.error.URLError: # 例外処理発生時
print('URLError :',url_led_s) # エラー表示
# ポート8080へのアクセス用 (下記の5行)
url_led_s = 'http://' + ip + ':8080' # ポートを8080に変更
try:
urllib.request.urlopen(url_led_s + s) # 再アクセス
except urllib.error.URLError: # 例外処理発生時
print('URLError :',url_led_s) # エラー表示
while True:
telop = getWeather() # 天気情報を取得
print('telop =', telop) # telopの内容を表示
if telop is not None:
color = colors.index('消灯') # 初期カラー番号を白色=7に
if telop.find('晴') >= 0: # 晴れが含まれているとき
color |= colors.index('赤色') # 赤色を混合
if telop.find('曇') >= 0: # 曇りが含まれているとき
color |= colors.index('緑色') # 緑色を混合
if telop.find('雨') >= 0 or telop.find('雪') >= 0: # 雨or雪のとき
color |= colors.index('青色') # 青色を混合
color %= len(colors) # colorは0~7
print('Color =',color,colors[color]) # 色番号と色名を表示
for ip in ip_leds: # 各機器のIPアドレスをipへ
led3(ip,color) # 各IoTカラーLEDに色を送信
sleep(interval) # 動作間隔の待ち時間処理
'''
実行例
--------------------------------------------------------------------------------
pi@raspberrypi:~/iot/learning $ ./example35_srv_led3_wea.py
telop = 晴れのち曇り
Color = 3 黄色
telop = 曇り
Color = 2 緑色
--------------------------------------------------------------------------------
pi@raspberrypi:~/iot/learning $ sudo ./example19_iot_ledpwm.py
127.0.0.1 - - [11/Oct/2019 17:54:24] "GET /?R=3&G=9&B=3 HTTP/1.1" 200 17
Color = [7, 7, 0]
GPIO17 = 35
GPIO27 = 35
GPIO22 = 0
127.0.0.1 - - [11/Oct/2019 18:04:24] "GET /?R=3&G=9&B=3 HTTP/1.1" 200 17
Color = [3, 9, 3]
GPIO17 = 4
GPIO27 = 100
GPIO22 = 4
--------------------------------------------------------------------------------
'''
|
the-stack_0_23833 | import pytest
from easy_tenants import (
get_current_tenant,
tenant_context,
tenant_context_disabled,
)
from easy_tenants.exceptions import TenantError
from easy_tenants.models import TenantAbstract
from tests.models import Contact, Product, StoreTenant
def test_inheritance_tenant_model():
assert TenantAbstract in Product.__mro__
def test_create_object(tenant_ctx):
Product.objects.create(name="prod1")
assert Product.objects.count()
def test_set_tenant_in_instance_model(tenant_ctx):
prod = Product.objects.create(name="prod1")
assert prod.tenant_id
def test_get_objects_of_tenant(db):
store1 = StoreTenant.objects.create()
store2 = StoreTenant.objects.create()
with tenant_context(store1):
Product.objects.create(name="prod1")
with tenant_context(store2):
Product.objects.create(name="prod2")
assert Product.objects.count() == 1
def test_custom_queryset_in_manager(tenant_ctx):
Contact.objects.create(name="phone 222")
Contact.objects.create(name="email")
assert callable(Contact.objects.by_phone)
assert Contact.objects.by_phone().count() == 1
def test_bulk_create(tenant_ctx):
objs = [
Product(name="prod1"),
Product(name="prod2"),
]
Product.objects.bulk_create(objs)
tenant = get_current_tenant()
assert Product.objects.count() == 2
assert objs[0].tenant == tenant
assert objs[1].tenant == tenant
def test_all_objects(db):
store1 = StoreTenant.objects.create()
store2 = StoreTenant.objects.create()
with tenant_context(store1):
Product.objects.create(name="prod1")
Product.objects.count() == 1
with tenant_context(store2):
Product.objects.create(name="prod2")
Product.objects.count() == 1
with tenant_context_disabled():
assert Product.objects.count() == 2
def test_tenant_required_error(db):
with pytest.raises(TenantError):
with tenant_context():
Product.objects.create(name="prod1")
|
the-stack_0_23835 | import sys
import numpy as np
import torch
import torch.nn as nn
from base import BaseModel
# append transformer_utils
import os
path = os.path.dirname(os.path.realpath(__file__))
sys.path.append(os.path.join(path, "transformer_utils"))
from multiheadattn import MultiHeadAttn
from embedding import Embedding, PositionalEmbedding
from positionwise_FF import PositionwiseFF
from init_weights import weights_init
__all__ = ["MemTransformerLM"]
class DecoderLayer(nn.Module):
def __init__(self, n_head, d_model, d_head, d_inner, dropout, **kwargs):
super(DecoderLayer, self).__init__()
self.dec_attn = MultiHeadAttn(n_head, d_model, d_head, dropout, **kwargs)
self.pos_ff = PositionwiseFF(
d_model, d_inner, dropout, pre_lnorm=kwargs.get("pre_lnorm")
)
def forward(self, dec_inp, dec_attn_mask=None, mems=None):
output = self.dec_attn(dec_inp, attn_mask=dec_attn_mask, mems=mems)
output = self.pos_ff(output)
return output
class MemTransformerLM(BaseModel):
def __init__(
self,
n_token,
n_layer,
n_head,
d_model,
d_head,
d_inner,
dropout,
dropatt,
d_embed=None,
pre_lnorm=False,
tgt_len=None,
ext_len=None,
clamp_len=-1
):
super(MemTransformerLM, self).__init__()
self.n_token = n_token
d_embed = d_model if d_embed is None else d_embed
self.d_embed = d_embed
self.d_model = d_model
self.n_head = n_head
self.d_head = d_head
self.word_emb = Embedding(n_token, d_embed)
self.drop = nn.Dropout(dropout)
self.n_layer = n_layer
self.tgt_len = tgt_len
self.ext_len = ext_len
self.max_klen = tgt_len + ext_len
self.clamp_len = clamp_len
self.layers = nn.ModuleList()
for _ in range(n_layer):
self.layers.append(
DecoderLayer(
n_head,
d_model,
d_head,
d_inner,
dropout,
dropatt=dropatt,
pre_lnorm=pre_lnorm,
)
)
self.pos_emb = PositionalEmbedding(self.d_model)
self.loss = nn.BCEWithLogitsLoss()
self.fc = nn.Linear(
self.d_embed, self.n_token, bias=True
)
weights_init(self)
def _forward(self, dec_inp):
qlen, _, _ = dec_inp.size()
word_emb = self.word_emb(dec_inp)
klen = qlen
# decoder attention mask
dec_attn_mask = torch.triu(word_emb.new_ones(qlen, klen), diagonal=1).byte()[
:, :, None
]
hids = []
pos_seq = torch.arange(
klen - 1, -1, -1.0, device=word_emb.device, dtype=word_emb.dtype
)
if self.clamp_len > 0:
pos_seq.clamp_(max=self.clamp_len)
pos_emb = self.pos_emb(pos_seq)
core_out = self.drop(word_emb + pos_emb[-qlen:])
hids.append(core_out)
for layer in self.layers:
core_out = layer(core_out, dec_attn_mask=dec_attn_mask.bool())
hids.append(core_out)
core_out = self.drop(core_out)
return core_out
def forward(self, data, target, **kwargs):
target_mask = kwargs.get("target_mask", None)
tgt_len = target.size(0)
hidden = self._forward(data)
if target_mask is not None:
target_mask = target_mask.unsqueeze(2)
hidden = torch.mul(hidden, target_mask)
pred_hid = hidden[-tgt_len:]
pred_hid = pred_hid.transpose(1, 0).contiguous().view(-1, pred_hid.size(-1))
logits = self.fc(pred_hid)
return [logits, self.word_emb.embedding_w]
def get_embedding(self):
return self.word_emb
def __str__(self):
"""
Model prints with number of trainable parameters
"""
model_parameters = filter(lambda p: p.requires_grad, self.parameters())
params = sum([np.prod(p.size()) for p in model_parameters if p is not None])
return "\nTrainable parameters: {}".format(params)
|
the-stack_0_23837 | from pathlib import Path
from ..base_classes.base_json_sprite_loader import BaseJsonSpriteLoader
from ..util import chunks_from_animation
class Mounts(BaseJsonSpriteLoader):
_sprite_sheet_path = Path("Data/Textures/mounts.png")
_chunk_size = 128
_chunk_map = {
"turkey": (0, 0, 1, 1),
"mount_turkey": (0, 0, 1, 1),
**chunks_from_animation("turkey_neck", (12, 1, 13, 2), 4),
"rockdog": (0, 4, 1, 5),
"axolotl": (0, 8, 1, 9),
"qilin": (0, 12, 1, 13),
"mount_rockdog": (0, 4, 1, 5),
"mount_axolotl": (0, 8, 1, 9),
"mount_qilin": (0, 12, 1, 13),
}
_entity_names = [
"ENT_TYPE_MOUNT_TURKEY",
"ENT_TYPE_ITEM_TURKEY_NECK",
"ENT_TYPE_MOUNT_ROCKDOG",
"ENT_TYPE_MOUNT_AXOLOTL",
"ENT_TYPE_FX_AXOLOTL_HEAD_ENTERING_DOOR",
"ENT_TYPE_MOUNT_QILIN",
]
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.