ext
stringclasses 9
values | sha
stringlengths 40
40
| content
stringlengths 3
1.04M
|
---|---|---|
py
|
1a5657ad7da5b155246c78e58d279842bb317521
|
# from -- https://cloud.google.com/ml-engine/docs/scikit/getting-started-training
import datetime
import os
import subprocess
import sys
import pandas as pd
from sklearn import svm
from sklearn.externals import joblib
# Fill in your Cloud Storage bucket name
BUCKET_ID = <YOUR_BUCKET_NAME>
iris_data_filename = 'iris_data.csv'
iris_target_filename = 'iris_target.csv'
data_dir = 'gs://cloud-samples-data/ml-engine/iris'
subprocess.check_call(['gsutil', 'cp', os.path.join(data_dir,
iris_data_filename),
iris_data_filename], stderr=sys.stdout)
subprocess.check_call(['gsutil', 'cp', os.path.join(data_dir,
iris_target_filename),
iris_target_filename], stderr=sys.stdout)
iris_data = pd.read_csv(iris_data_filename).values
iris_target = pd.read_csv(iris_target_filename).values
iris_target = iris_target.reshape((iris_target.size,))
classifier = svm.SVC(verbose=True)
classifier.fit(iris_data, iris_target)
model = 'model.joblib'
joblib.dump(classifier, model)
model_path = os.path.join('gs://', bucket, datetime.datetime.now().strftime(
'iris_%Y%m%d_%H%M%S'), model)
subprocess.check_call(['gsutil', 'cp', model, model_path], stderr=sys.stdout)
|
py
|
1a565800ee6c72f1f790dcda85717d91ada3da4c
|
#!/usr/bin/env python
##
## Copyright (C) 2017, Amit Aides, all rights reserved.
##
## This file is part of Camera Network
## (see https://bitbucket.org/amitibo/cameranetwork_git).
##
## Redistribution and use in source and binary forms, with or without modification,
## are permitted provided that the following conditions are met:
##
## 1) The software is provided under the terms of this license strictly for
## academic, non-commercial, not-for-profit purposes.
## 2) Redistributions of source code must retain the above copyright notice, this
## list of conditions (license) and the following disclaimer.
## 3) Redistributions in binary form must reproduce the above copyright notice,
## this list of conditions (license) and the following disclaimer in the
## documentation and/or other materials provided with the distribution.
## 4) The name of the author may not be used to endorse or promote products derived
## from this software without specific prior written permission.
## 5) As this software depends on other libraries, the user must adhere to and keep
## in place any licensing terms of those libraries.
## 6) Any publications arising from the use of this software, including but not
## limited to academic journal and conference publications, technical reports and
## manuals, must cite the following works:
## Dmitry Veikherman, Amit Aides, Yoav Y. Schechner and Aviad Levis, "Clouds in The Cloud" Proc. ACCV, pp. 659-674 (2014).
##
## THIS SOFTWARE IS PROVIDED BY THE AUTHOR "AS IS" AND ANY EXPRESS OR IMPLIED
## WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
## MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
## EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
## INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
## BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
## DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
## LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
## OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
## ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.##
"""
Clean memory of the odroid.
The script moves captured date to a backup folder. To remove
the backup folder (and clear the memory) use the ``--delete`` flag.
"""
import argparse
import CameraNetwork.global_settings as gs
import datetime
import os
import shutil
import warnings
gs.initPaths()
BACKUP_FOLDER = os.path.expanduser(
datetime.datetime.now().strftime("~/BACKUP_%Y_%m_%d")
)
def move(src_path):
_, tmp = os.path.split(src_path)
dst_path = os.path.join(BACKUP_FOLDER, tmp)
if not os.path.exists(src_path):
print("Source path does not exist: {}".format(src_path))
return
assert not os.path.exists(dst_path),"Destination path exists: {}".format(dst_path)
shutil.move(src_path, dst_path)
def main(delete_backup=False):
if not os.path.exists(BACKUP_FOLDER):
os.makedirs(BACKUP_FOLDER)
print("Created backup folder: {}".format(BACKUP_FOLDER))
move(gs.CAPTURE_PATH)
move(gs.DEFAULT_LOG_FOLDER)
move(gs.MASK_PATH)
move(gs.SUN_POSITIONS_PATH)
if delete_backup:
answer = raw_input("Remove backup? [y/n]:")
if answer == 'y':
shutil.rmtree(BACKUP_FOLDER)
print("Backup folder removed!")
else:
print("Backup folder NOT removed!")
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Clean memory of the odroid.")
parser.add_argument(
'--delete',
action='store_true',
help='When set, the backup folder will be deleted.'
)
args = parser.parse_args()
main(args.delete)
|
py
|
1a56587abcfb5807b58024f92ade2deafb94e57e
|
"""
"""
import time
import unittest
import threading
from pynetworking import ClientManager, MultiServerCommunicator
from pynetworking.tests.example_functions import DummyServerCommunicator, DummyMultiServerCommunicator, \
DummyClientCommunicator
from pynetworking.Logging import logger
from pynetworking import Communication_general
logger.setLevel(10)
class TestManyClients(unittest.TestCase):
address = "127.0.0.1", 5000
def setUp(self):
self.manager = ClientManager(self.address, DummyClientCommunicator)
self.manager.start()
Communication_general.ENCRYPTED_COMMUNICATION = False
def tearDown(self):
DummyMultiServerCommunicator.close_all_connections()
logger.debug(self.manager.clients)
self.manager.stop_listening()
self.manager.stop_connections()
def test_many(self):
clients = []
thread_pool = []
def client_exe(client: DummyMultiServerCommunicator):
client.connect(self.address)
c_id = client.remote_functions.return_client_id()
print(c_id)
time.sleep(5)
client.close_connection()
for i in range(20):
clients.append(DummyMultiServerCommunicator(i))
thread_pool.append(threading.Thread(target=client_exe, args=(clients[i],)))
thread_pool[i].start()
time.sleep(0.1)
time.sleep(1)
logger.debug("Finished")
|
py
|
1a5658978f5a10941f5c15fe8d2afca6fadf1391
|
# SPDX-FileCopyrightText: 2020 Hlib Babii <[email protected]>
#
# SPDX-License-Identifier: Apache-2.0
import os
import sys
sys.path.insert(0, os.path.join(os.path.abspath(os.path.dirname(__file__)), '..'))
|
py
|
1a5658f00a937eeed58321a3b0ccc9fce6abc479
|
from pythonds.graphs import PriorityQueue, Graph, Vertex
def dijkstra(aGraph: Graph, start: Vertex):
pq = PriorityQueue()
start.setDistance(0)
pq.buildHeap([(v.getDistance(), v) for v in aGraph])
while not pq.isEmpty():
currentVert = pq.delMin()
for nextVert in currentVert.getConnections():
newDist = currentVert.getDistance() + currentVert.getWeight(nextVert)
if newDist < nextVert.getDistance():
nextVert.setDistance(newDist)
nextVert.setPred(currentVert)
pd.decreaseKey(nextVert, newDist)
|
py
|
1a5658fadee9cb6dd5ff48f5225fff5f82049ed5
|
import asyncio
import json
from datetime import datetime
import aiohttp
from aiohttp import ClientConnectorError
from tracardi.config import tracardi
from tracardi.domain.resource import ResourceCredentials
from tracardi.domain.resources.token import Token
from tracardi.service.storage.driver import storage
from tracardi.service.plugin.domain.register import Plugin, Spec, MetaData, Form, FormGroup, FormField, FormComponent
from tracardi.service.plugin.domain.result import Result
from tracardi.service.plugin.runner import ActionRunner
from .model.configuration import Configuration
def validate(config: dict) -> Configuration:
return Configuration(**config)
class AmplitudeSendEvent(ActionRunner):
@staticmethod
async def build(**kwargs) -> 'AmplitudeSendEvent':
config = validate(kwargs)
resource = await storage.driver.resource.load(config.source.id)
return AmplitudeSendEvent(config, resource.credentials)
def __init__(self, config, credentials: ResourceCredentials):
self.config = config # type: Configuration
self.credentials = credentials.get_credentials(self, output=Token)
@staticmethod
def _get_value(dot, dot_notation, allow_custom_value=False):
if dot_notation is None:
return None
value = dot[dot_notation]
if value == dot_notation:
if allow_custom_value is True:
return value
return None
return value
async def run(self, payload):
try:
dot = self._get_dot_accessor(payload)
timeout = aiohttp.ClientTimeout(total=self.config.timeout)
async with aiohttp.ClientSession(timeout=timeout) as session:
platform = self._get_value(dot, self.config.platform)
properties = self._get_value(dot, self.config.event_properties)
pii = self._get_value(dot, self.config.user_properties)
event_type = self._get_value(dot, self.config.event_type)
ip = self._get_value(dot, self.config.ip, allow_custom_value=True)
event = {
"app_version": tracardi.version,
"insert_id": self.event.id if self.debug is False else None,
"user_id": self.profile.id,
"session_id": int(datetime.timestamp(
self.session.metadata.time.insert)) if self.profile.metadata.time.insert is not None else -1,
"event_type": self.event.type if event_type is None else event_type,
"event_properties": self.event.properties if not isinstance(properties, dict) else properties,
"user_properties": self.profile.pii.dict() if not isinstance(pii, dict) else pii,
"groups": self._get_value(dot, self.config.groups),
"ip": self.event.metadata.ip if ip is None else ip,
"location_lat": self._get_value(dot, self.config.location_lat),
"location_lng": self._get_value(dot, self.config.location_lng),
"revenueType": self._get_value(dot, self.config.revenueType),
"productId": self._get_value(dot, self.config.productId),
"revenue": self._get_value(dot, self.config.revenue),
"quantity": self._get_value(dot, self.config.quantity),
"price": self._get_value(dot, self.config.price),
"language": self._get_value(dot, self.config.language),
"dma": self._get_value(dot, self.config.dma),
"city": self._get_value(dot, self.config.city),
"region": self._get_value(dot, self.config.region),
"country": self._get_value(dot, self.config.country),
"carrier": self._get_value(dot, self.config.carrier),
"device_model": self._get_value(dot, self.config.device_model),
"device_manufacturer": self._get_value(dot, self.config.device_manufacturer),
"device_brand": self._get_value(dot, self.config.device_brand),
"os_version": self._get_value(dot, self.config.os_version),
"os_name": self._get_value(dot, self.config.os_name),
"platform": self.session.get_platform() if platform is None else platform,
}
params = {
"api_key": self.credentials.token,
"events": [
event
]
}
headers = {
'Content-Type': 'application/json',
'Accept': '*/*'
}
# print(params)
async with session.post(
url=str(self.config.url),
headers=headers,
data=json.dumps(params)
) as response:
result = {
"status": response.status,
"content": await response.json()
}
if response.status in [200, 201, 202, 203]:
return Result(port="response", value=result), Result(port="error", value=None)
else:
return Result(port="response", value=None), Result(port="error", value=result)
except ClientConnectorError as e:
return Result(port="response", value=None), Result(port="error", value=str(e))
except asyncio.exceptions.TimeoutError:
return Result(port="response", value=None), Result(port="error", value="API timed out.")
def register() -> Plugin:
return Plugin(
start=False,
spec=Spec(
module=__name__,
className='AmplitudeSendEvent',
inputs=['payload'],
outputs=["response", "error"],
init={
"source": {"id": None},
"url": "https://api2.amplitude.com/2/httpapi",
"timeout": 15,
"event_type": None,
"event_properties": None,
"user_properties": None,
"groups": None,
"ip": None,
"location_lat": None,
"location_lng": None,
"language": None,
"dma": None,
"city": None,
"region": None,
"country": None,
"revenueType": None,
"productId": None,
"revenue": None,
"quantity": None,
"price": None,
"carrier": None,
"device_model": None,
"device_manufacturer": None,
"device_brand": None,
"os_version": None,
"os_name": None,
"platform": None,
},
form=Form(groups=[
FormGroup(
name="Amplitude source",
fields=[
FormField(
id="source",
name="Amplitude resource",
description="Please select your Amplitude resource.",
component=FormComponent(type="resource", props={"label": "Resource", "tag": "token"})
),
]),
FormGroup(
name="Amplitude Api Configuration",
fields=[
FormField(
id="url",
name="API URL",
description="Please type API URL if it is different then default Amplitude URL.",
component=FormComponent(type="text", props={"label": "API URL"})
),
FormField(
id="timeout",
name="API timeout",
description="Please API timeout.",
component=FormComponent(type="text", props={"label": "API timeout"})
),
]),
FormGroup(
name="Amplitude Event Configuration",
description="Select what data should be sent to Amplitude. Leave empty if you want to send "
"default data.",
fields=[
FormField(
id="event_type",
name="Event type",
description="Leave empty if the current event type is to be copied.",
component=FormComponent(type="dotPath", props={"label": "Reference to event type",
"defaultSourceValue": "event",
"defaultPathValue": "type"
})
),
FormField(
id="event_properties",
name="Event properties",
description="Leave empty if the current event properties is to be copied.",
component=FormComponent(type="dotPath", props={"label": "Reference to event properties",
"defaultSourceValue": "event",
"defaultPathValue": "properties"
})
),
FormField(
id="user_properties",
name="User Personal Identifiable Information (PII)",
description="Leave empty if the current profile PII is to be copied.",
component=FormComponent(type="dotPath", props={"label": "Reference to profile PII",
"defaultSourceValue": "profile",
"defaultPathValue": "pii"
})
),
FormField(
id="groups",
name="Groups",
description="Group types are only available to Growth and Enterprise customers who have "
"purchased the Accounts add-on on Amplitude.",
component=FormComponent(type="dotPath", props={"label": "IP Groups"})
),
]),
FormGroup(
name="Amplitude Location Configuration",
description="Select what data should be sent to Amplitude. Leave empty if you want to send "
"default data.",
fields=[
FormField(
id="ip",
name="IP address",
description="Leave empty if the events ip to be copied.",
component=FormComponent(type="dotPath", props={"label": "IP address",
"defaultSourceValue": "event",
"defaultPathValue": "metadata.ip"
})
),
FormField(
id="location_lat",
name="Latitude",
description="Leave empty if you want Amplitude to read location from IP.",
component=FormComponent(type="dotPath", props={"label": "Path to latitude",
"defaultSourceValue": "payload"
})
),
FormField(
id="location_lng",
name="Longitude",
description="Leave empty if you want Amplitude to read location from IP.",
component=FormComponent(type="dotPath", props={"label": "Path to longitude",
"defaultSourceValue": "payload"
})
),
FormField(
id="language",
name="Language",
description="The language set by the user.",
component=FormComponent(type="dotPath", props={"label": "Path to Language",
"defaultSourceValue": "payload"
})
),
FormField(
id="dma",
name="Designated Market Area (DMA)",
description="The current Designated Market Area of the user.",
component=FormComponent(type="dotPath", props={"label": "Path to DMA",
"defaultSourceValue": "payload"
})
),
FormField(
id="city",
name="City",
description="The current city of the user.",
component=FormComponent(type="dotPath", props={"label": "Path to city",
"defaultSourceValue": "payload"
})
),
FormField(
id="region",
name="Region",
description="The current region of the user.",
component=FormComponent(type="dotPath", props={"label": "Path to region",
"defaultSourceValue": "payload"
})
),
FormField(
id="country",
name="Country",
description="The current country of the user.",
component=FormComponent(type="dotPath", props={"label": "Path to country",
"defaultSourceValue": "payload"
})
),
]),
FormGroup(
name="Amplitude Product Configuration",
description="Select what data should be sent to Amplitude. Leave empty if you want to send "
"default data.",
fields=[
FormField(
id="productId",
name="Product ID",
description="An identifier for the item purchased. You must send a price and quantity "
"or revenue with this field.",
component=FormComponent(type="dotPath", props={"label": "Path to productId",
"defaultSourceValue": "event"
})
),
FormField(
id="revenue",
name="Revenue",
description="revenue = price * quantity. If you send all 3 fields of price, quantity, and "
"revenue, then (price * quantity) will be used as the revenue value. You can "
"use negative values to indicate refunds.",
component=FormComponent(type="dotPath", props={"label": "Path to revenue",
"defaultSourceValue": "event"
})
),
FormField(
id="revenueType",
name="Revenue type",
description="The type of revenue for the item purchased. You must send a price and "
"quantity or revenue with this field.",
component=FormComponent(type="dotPath", props={"label": "Path to revenue type",
"defaultSourceValue": "event"})
),
FormField(
id="quantity",
name="Quantity",
description="The quantity of the item purchased. Defaults to 1 if not specified.",
component=FormComponent(type="dotPath", props={"label": "Path to quantity",
"defaultSourceValue": "event"
})
),
FormField(
id="price",
name="Price",
description="The price of the item purchased. Required for revenue data if the revenue "
"field is not sent. You can use negative values to indicate refunds.",
component=FormComponent(type="dotPath", props={"label": "Path to price",
"defaultSourceValue": "event"
})
),
]),
FormGroup(
name="Amplitude Device Configuration",
description="Select what data should be sent to Amplitude. Leave empty if you want to send "
"default data.",
fields=[
FormField(
id="platform",
name="Platform",
description="Platform of the device.",
component=FormComponent(type="dotPath", props={"label": "Path to platform",
"defaultSourceValue": "session"
})
),
FormField(
id="os_name",
name="Operation System Name (OS)",
description="The name of the mobile operating system or browser that the user is using.",
component=FormComponent(type="dotPath", props={"label": "Path to OS",
"defaultSourceValue": "session"
})
),
FormField(
id="os_version",
name="Operation System Version",
description="The version of the mobile operating system or browser the user is using.",
component=FormComponent(type="dotPath", props={"label": "Path to OS version",
"defaultSourceValue": "session"
})
),
FormField(
id="device_brand",
name="Device Brand",
description="The device brand that the user is using.",
component=FormComponent(type="dotPath", props={"label": "Path to device brand",
"defaultSourceValue": "session"
})
),
FormField(
id="device_manufacturer",
name="Device Manufacturer",
description="The device manufacturer that the user is using.",
component=FormComponent(type="dotPath", props={"label": "Path to device manufacturer",
"defaultSourceValue": "session"
})
),
FormField(
id="device_model",
name="Device Model",
description="The device model that the user is using.",
component=FormComponent(type="dotPath", props={"label": "Path to device model",
"defaultSourceValue": "session"
})
),
FormField(
id="carrier",
name="Carrier",
description="The carrier that the user is using.",
component=FormComponent(type="dotPath", props={"label": "Path to carrier",
"defaultSourceValue": "session"
})
),
])
]),
version="0.6.1",
author="Risto Kowaczewski",
license="MIT",
manual="amplitude_send_event"
),
metadata=MetaData(
name='Amplitude register event',
desc='Sends request to Amplitude API endpoint to register event.',
icon='bar-chart',
group=["Stats"]
)
)
|
py
|
1a565971589940dc74f0c5af0143a58ed174bff0
|
# Copyright (c) 2010-2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# See http://code.google.com/p/python-nose/issues/detail?id=373
# The code below enables nosetests to work with i18n _() blocks
from __future__ import print_function
import sys
import os
try:
from unittest.util import safe_repr
except ImportError:
# Probably py26
_MAX_LENGTH = 80
def safe_repr(obj, short=False):
try:
result = repr(obj)
except Exception:
result = object.__repr__(obj)
if not short or len(result) < _MAX_LENGTH:
return result
return result[:_MAX_LENGTH] + ' [truncated]...'
from eventlet.green import socket
# make unittests pass on all locale
import swift
setattr(swift, 'gettext_', lambda x: x)
from swift.common.utils import readconf
# Work around what seems to be a Python bug.
# c.f. https://bugs.launchpad.net/swift/+bug/820185.
import logging
logging.raiseExceptions = False
def get_config(section_name=None, defaults=None):
"""
Attempt to get a test config dictionary.
:param section_name: the section to read (all sections if not defined)
:param defaults: an optional dictionary namespace of defaults
"""
config = {}
if defaults is not None:
config.update(defaults)
config_file = os.environ.get('SWIFT_TEST_CONFIG_FILE',
'/etc/swift/test.conf')
try:
config = readconf(config_file, section_name)
except IOError:
if not os.path.exists(config_file):
print('Unable to read test config %s - file not found'
% config_file, file=sys.stderr)
elif not os.access(config_file, os.R_OK):
print('Unable to read test config %s - permission denied'
% config_file, file=sys.stderr)
except ValueError as e:
print(e)
return config
def listen_zero():
"""
The eventlet.listen() always sets SO_REUSEPORT, so when called with
("localhost",0), instead of returning unique ports it can return the
same port twice. That causes our tests to fail, so open-code it here
without SO_REUSEPORT.
"""
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.bind(("127.0.0.1", 0))
sock.listen(50)
return sock
|
py
|
1a565a04707df65d9462c64d561ac8ee6dcd83d6
|
# Lint as: python3
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""API for Tensorflow Model Analysis."""
# TODO(b/149126671): Put ValidationResultsWriter in a separate file.
from __future__ import absolute_import
from __future__ import division
# Standard __future__ imports
from __future__ import print_function
import os
import tempfile
from typing import Any, Dict, Iterator, List, Optional, Set, Text, Union
from absl import logging
import apache_beam as beam
import pandas as pd
import pyarrow as pa
import tensorflow as tf
from tensorflow_model_analysis import config
from tensorflow_model_analysis import constants
from tensorflow_model_analysis import model_util
from tensorflow_model_analysis import types
from tensorflow_model_analysis.eval_saved_model import constants as eval_constants
from tensorflow_model_analysis.evaluators import evaluator
from tensorflow_model_analysis.evaluators import metrics_and_plots_evaluator
from tensorflow_model_analysis.evaluators import metrics_and_plots_evaluator_v2
from tensorflow_model_analysis.extractors import batched_input_extractor
from tensorflow_model_analysis.extractors import batched_predict_extractor_v2
from tensorflow_model_analysis.extractors import extractor
from tensorflow_model_analysis.extractors import input_extractor
from tensorflow_model_analysis.extractors import predict_extractor
from tensorflow_model_analysis.extractors import slice_key_extractor
from tensorflow_model_analysis.extractors import tfjs_predict_extractor
from tensorflow_model_analysis.extractors import tflite_predict_extractor
from tensorflow_model_analysis.extractors import unbatch_extractor
from tensorflow_model_analysis.post_export_metrics import post_export_metrics
from tensorflow_model_analysis.proto import metrics_for_slice_pb2
from tensorflow_model_analysis.proto import validation_result_pb2
from tensorflow_model_analysis.slicer import slicer_lib as slicer
from tensorflow_model_analysis.validators import validator
from tensorflow_model_analysis.view import util
from tensorflow_model_analysis.view import view_types
from tensorflow_model_analysis.writers import eval_config_writer
from tensorflow_model_analysis.writers import metrics_plots_and_validations_writer
from tensorflow_model_analysis.writers import writer
from tfx_bsl.arrow import table_util
from tfx_bsl.tfxio import tensor_adapter
from tfx_bsl.tfxio import tf_example_record
from tensorflow_metadata.proto.v0 import schema_pb2
def _assert_tensorflow_version():
"""Check that we're using a compatible TF version."""
# Fail with a clear error in case we are not using a compatible TF version.
major, minor, _ = tf.version.VERSION.split('.')
if (int(major) not in (1, 2)) or (int(major) == 1 and int(minor) < 15):
raise RuntimeError(
'Tensorflow version >= 1.15, < 3 is required. Found (%s). Please '
'install the latest 1.x or 2.x version from '
'https://github.com/tensorflow/tensorflow. ' % tf.version.VERSION)
if int(major) == 2:
logging.warning(
'Tensorflow version (%s) found. Note that TFMA support for TF 2.0 '
'is currently in beta', tf.version.VERSION)
def _is_legacy_eval(
eval_shared_model: Optional[types.MaybeMultipleEvalSharedModels],
eval_config: Optional[config.EvalConfig]):
"""Returns True if legacy evaluation is being used."""
# A legacy evaluation is an evalution that uses only a single EvalSharedModel,
# has no tags (or uses "eval" as its tag), and does not specify an eval_config
# (or specifies an eval_config with no metrics). The legacy evaluation is
# based on using add_metrics_callbacks to create a modified version of the
# graph saved with an EvalSavedModel. The newer version of evaluation supports
# both add_metrics_callbacks as well as metrics defined in MetricsSpecs inside
# of EvalConfig. The newer version works with both "eval" and serving models
# and also supports multi-model evaluation. This function is used by code to
# support backwards compatibility for callers that have not updated to use the
# new EvalConfig.
return (eval_shared_model and not isinstance(eval_shared_model, dict) and
not isinstance(eval_shared_model, list) and
((not eval_shared_model.model_loader.tags or
eval_constants.EVAL_TAG in eval_shared_model.model_loader.tags) and
(not eval_config or not eval_config.metrics_specs)))
def _default_eval_config(eval_shared_models: List[types.EvalSharedModel],
slice_spec: Optional[List[slicer.SingleSliceSpec]],
write_config: Optional[bool],
compute_confidence_intervals: Optional[bool],
min_slice_size: int):
"""Creates default EvalConfig (for use in legacy evaluations)."""
model_specs = []
for shared_model in eval_shared_models:
example_weight_key = shared_model.example_weight_key
example_weight_keys = {}
if example_weight_key and isinstance(example_weight_key, dict):
example_weight_keys = example_weight_key
example_weight_key = ''
model_specs.append(
config.ModelSpec(
name=shared_model.model_name,
example_weight_key=example_weight_key,
example_weight_keys=example_weight_keys))
slicing_specs = None
if slice_spec:
slicing_specs = [s.to_proto() for s in slice_spec]
options = config.Options()
options.compute_confidence_intervals.value = compute_confidence_intervals
options.min_slice_size.value = min_slice_size
if not write_config:
options.disabled_outputs.values.append(eval_config_writer.EVAL_CONFIG_FILE)
return config.EvalConfig(
model_specs=model_specs, slicing_specs=slicing_specs, options=options)
def _model_types(
eval_shared_model: Optional[types.MaybeMultipleEvalSharedModels]
) -> Optional[Set[Text]]:
"""Returns model types associated with given EvalSharedModels."""
eval_shared_models = model_util.verify_and_update_eval_shared_models(
eval_shared_model)
if not eval_shared_models:
return None
else:
return set([m.model_type for m in eval_shared_models])
def _update_eval_config_with_defaults(
eval_config: config.EvalConfig,
eval_shared_model: Optional[types.MaybeMultipleEvalSharedModels]
) -> config.EvalConfig:
"""Returns updated eval config with default values."""
eval_shared_models = model_util.verify_and_update_eval_shared_models(
eval_shared_model)
maybe_add_baseline = eval_shared_models and len(eval_shared_models) == 2
return config.update_eval_config_with_defaults(
eval_config, maybe_add_baseline=maybe_add_baseline)
MetricsForSlice = metrics_for_slice_pb2.MetricsForSlice
def load_metrics(output_path: Text,
output_file_format: Text = '') -> Iterator[MetricsForSlice]:
"""Read and deserialize the MetricsForSlice records."""
for m in metrics_plots_and_validations_writer.load_and_deserialize_metrics(
output_path, output_file_format):
yield m
PlotsForSlice = metrics_for_slice_pb2.PlotsForSlice
def load_plots(output_path: Text,
output_file_format: Text = '') -> Iterator[PlotsForSlice]:
"""Read and deserialize the PlotsForSlice records."""
for p in metrics_plots_and_validations_writer.load_and_deserialize_plots(
output_path, output_file_format):
yield p
# Define types here to avoid type errors between OSS and internal code.
ValidationResult = validation_result_pb2.ValidationResult
def load_validation_result(output_path: Text,
output_file_format: Text = '') -> ValidationResult:
"""Read and deserialize the ValidationResult."""
return metrics_plots_and_validations_writer.load_and_deserialize_validation_result(
output_path, output_file_format)
def make_eval_results(results: List[view_types.EvalResult],
mode: Text) -> view_types.EvalResults:
"""Run model analysis for a single model on multiple data sets.
Args:
results: A list of TFMA evaluation results.
mode: The mode of the evaluation. Currently, tfma.DATA_CENTRIC_MODE and
tfma.MODEL_CENTRIC_MODE are supported.
Returns:
An `tfma.view.EvalResults` object containing all evaluation results. This
can be used to construct a time series view.
"""
return view_types.EvalResults(results, mode)
def load_eval_results(
output_paths: Union[Text, List[Text]],
output_file_format: Optional[Text] = '',
mode: Text = constants.MODEL_CENTRIC_MODE,
model_name: Optional[Text] = None) -> view_types.EvalResults:
"""Loads results for multiple models or multiple data sets.
Args:
output_paths: A single path or list of output paths of completed tfma runs.
output_file_format: Optional file extension to filter files by.
mode: The mode of the evaluation. Currently, tfma.DATA_CENTRIC_MODE and
tfma.MODEL_CENTRIC_MODE are supported.
model_name: Filters to only return results for given model. If unset all
models are returned.
Returns:
An EvalResults containing the evaluation results serialized at output_paths.
This can be used to construct a time series view.
"""
results = []
if not isinstance(output_paths, list):
output_paths = [output_paths]
for output_path in output_paths:
if model_name is None:
_, _, _, model_locations = eval_config_writer.load_eval_run(output_path)
model_names = list(model_locations.keys())
else:
model_names = [model_name]
for model_name in model_names:
results.append(
load_eval_result(
output_path, output_file_format, model_name=model_name))
return make_eval_results(results, mode)
def load_eval_result(
output_path: Text,
output_file_format: Optional[Text] = '',
model_name: Optional[Text] = None) -> view_types.EvalResult:
"""Loads EvalResult object for use with the visualization functions.
Args:
output_path: Output directory containing config, metrics, plots, etc.
output_file_format: Optional file extension to filter files by.
model_name: Optional model name. Required if multi-model evaluation was run.
Returns:
EvalResult object for use with the visualization functions.
"""
# Config, metrics, and plots files should all exist under the given output
# directory, but fairness plugin has a use-case where only the metrics are
# provided so we support all files as being optional (the EvalResult will have
# corresponding None values for files that are not present).
eval_config, data_location, file_format, model_locations = (
eval_config_writer.load_eval_run(output_path))
metrics_list = []
for p in metrics_plots_and_validations_writer.load_and_deserialize_metrics(
output_path, output_file_format):
metrics = util.convert_metrics_proto_to_dict(p, model_name=model_name)
if metrics is not None:
metrics_list.append(metrics)
plots_list = []
for p in metrics_plots_and_validations_writer.load_and_deserialize_plots(
output_path, output_file_format):
plots = util.convert_plots_proto_to_dict(p, model_name=model_name)
if plots is not None:
plots_list.append(plots)
if not model_locations:
model_location = ''
elif model_name is None:
model_location = list(model_locations.values())[0]
else:
model_location = model_locations[model_name]
return view_types.EvalResult(
slicing_metrics=metrics_list,
plots=plots_list,
config=eval_config,
data_location=data_location,
file_format=file_format,
model_location=model_location)
def default_eval_shared_model(
eval_saved_model_path: Text,
add_metrics_callbacks: Optional[List[types.AddMetricsCallbackType]] = None,
include_default_metrics: Optional[bool] = True,
example_weight_key: Optional[Union[Text, Dict[Text, Text]]] = None,
additional_fetches: Optional[List[Text]] = None,
blacklist_feature_fetches: Optional[List[Text]] = None,
tags: Optional[List[Text]] = None,
model_name: Text = '',
eval_config: Optional[config.EvalConfig] = None,
custom_model_loader: Optional[types.ModelLoader] = None
) -> types.EvalSharedModel:
"""Returns default EvalSharedModel.
Args:
eval_saved_model_path: Path to EvalSavedModel.
add_metrics_callbacks: Optional list of callbacks for adding additional
metrics to the graph (see EvalSharedModel for more information on how to
configure additional metrics). Metrics for example count and example
weights will be added automatically.
include_default_metrics: True to include the default metrics that are part
of the saved model graph during evaluation. Note that
eval_config.options.include_default_metrics must also be true.
example_weight_key: Example weight key (single-output model) or dict of
example weight keys (multi-output model) keyed by output name.
additional_fetches: Prefixes of additional tensors stored in
signature_def.inputs that should be fetched at prediction time. The
"features" and "labels" tensors are handled automatically and should not
be included.
blacklist_feature_fetches: List of tensor names in the features dictionary
which should be excluded from the fetches request. This is useful in
scenarios where features are large (e.g. images) and can lead to excessive
memory use if stored.
tags: Model tags (e.g. 'serve' for serving or 'eval' for EvalSavedModel).
model_name: Optional name of the model being created (should match
ModelSpecs.name). The name should only be provided if multiple models are
being evaluated.
eval_config: Eval config. Only used for setting default tags.
custom_model_loader: Optional custom model loader for non-TF models.
"""
if not eval_config:
model_type = constants.TF_ESTIMATOR
if tags is None:
tags = [eval_constants.EVAL_TAG]
else:
model_spec = model_util.get_model_spec(eval_config, model_name)
if not model_spec:
raise ValueError('ModelSpec for model name {} not found in EvalConfig: '
'config={}'.format(model_name, eval_config))
model_type = model_util.get_model_type(model_spec, eval_saved_model_path,
tags)
if tags is None:
# Default to serving unless estimator is used.
if model_type == constants.TF_ESTIMATOR:
tags = [eval_constants.EVAL_TAG]
else:
tags = [tf.saved_model.SERVING]
# Backwards compatibility for legacy add_metrics_callbacks implementation.
if model_type == constants.TF_ESTIMATOR and eval_constants.EVAL_TAG in tags:
# PyType doesn't know about the magic exports we do in post_export_metrics.
# Additionally, the lines seem to get reordered in compilation, so we can't
# just put the disable-attr on the add_metrics_callbacks lines.
# pytype: disable=module-attr
if not add_metrics_callbacks:
add_metrics_callbacks = []
# Always compute example weight and example count.
example_count_callback = post_export_metrics.example_count()
add_metrics_callbacks.append(example_count_callback)
if example_weight_key:
if isinstance(example_weight_key, dict):
for output_name, key in example_weight_key.items():
example_weight_callback = post_export_metrics.example_weight(
key, metric_tag=output_name)
add_metrics_callbacks.append(example_weight_callback)
else:
example_weight_callback = post_export_metrics.example_weight(
example_weight_key)
add_metrics_callbacks.append(example_weight_callback)
# pytype: enable=module-attr
model_loader = custom_model_loader
if not model_loader and model_type in constants.VALID_TF_MODEL_TYPES:
model_loader = types.ModelLoader(
construct_fn=model_util.model_construct_fn(
eval_saved_model_path=eval_saved_model_path,
add_metrics_callbacks=add_metrics_callbacks,
include_default_metrics=include_default_metrics,
additional_fetches=additional_fetches,
blacklist_feature_fetches=blacklist_feature_fetches,
model_type=model_type,
tags=tags),
tags=tags)
return types.EvalSharedModel(
model_name=model_name,
model_type=model_type,
model_path=eval_saved_model_path,
add_metrics_callbacks=add_metrics_callbacks,
include_default_metrics=include_default_metrics,
example_weight_key=example_weight_key,
additional_fetches=additional_fetches,
model_loader=model_loader)
def default_extractors( # pylint: disable=invalid-name
eval_shared_model: Optional[types.MaybeMultipleEvalSharedModels] = None,
eval_config: config.EvalConfig = None,
slice_spec: Optional[List[slicer.SingleSliceSpec]] = None,
materialize: Optional[bool] = True,
tensor_adapter_config: Optional[tensor_adapter.TensorAdapterConfig] = None,
custom_predict_extractor: Optional[extractor.Extractor] = None
) -> List[extractor.Extractor]:
"""Returns the default extractors for use in ExtractAndEvaluate.
Args:
eval_shared_model: Shared model (single-model evaluation) or list of shared
models (multi-model evaluation). Required unless the predictions are
provided alongside of the features (i.e. model-agnostic evaluations).
eval_config: Eval config.
slice_spec: Deprecated (use EvalConfig).
materialize: True to have extractors create materialized output.
tensor_adapter_config: Tensor adapter config which specifies how to obtain
tensors from the Arrow RecordBatch. If None, we feed the raw examples to
the model.
custom_predict_extractor: Optional custom predict extractor for non-TF
models.
Raises:
NotImplementedError: If eval_config contains mixed serving and eval models.
"""
if slice_spec and eval_config:
raise ValueError('slice_spec is deprecated, only use eval_config')
if eval_config is not None:
eval_config = _update_eval_config_with_defaults(eval_config,
eval_shared_model)
if _is_legacy_eval(eval_shared_model, eval_config):
# Backwards compatibility for previous add_metrics_callbacks implementation.
if not eval_config and slice_spec:
eval_config = config.EvalConfig(
slicing_specs=[s.to_proto() for s in slice_spec])
return [
custom_predict_extractor or predict_extractor.PredictExtractor(
eval_shared_model, materialize=materialize),
slice_key_extractor.SliceKeyExtractor(
eval_config=eval_config, materialize=materialize)
]
elif eval_shared_model:
model_types = _model_types(eval_shared_model)
eval_shared_models = model_util.verify_and_update_eval_shared_models(
eval_shared_model)
if (not model_types.issubset(constants.VALID_TF_MODEL_TYPES) and
not custom_predict_extractor):
raise NotImplementedError(
'either a custom_predict_extractor must be used or model type must '
'be one of: {}. evalconfig={}'.format(
str(constants.VALID_TF_MODEL_TYPES), eval_config))
if model_types == set([constants.TF_LITE]):
# TODO(b/163889779): Convert TFLite extractor to operate on batched
# extracts. Then we can remove the input extractor.
return [
input_extractor.InputExtractor(eval_config=eval_config),
(custom_predict_extractor or
tflite_predict_extractor.TFLitePredictExtractor(
eval_config=eval_config, eval_shared_model=eval_shared_model)),
slice_key_extractor.SliceKeyExtractor(
eval_config=eval_config, materialize=materialize)
]
elif constants.TF_LITE in model_types:
raise NotImplementedError(
'support for mixing tf_lite and non-tf_lite models is not '
'implemented: eval_config={}'.format(eval_config))
if model_types == set([constants.TF_JS]):
return [
input_extractor.InputExtractor(eval_config=eval_config),
(custom_predict_extractor or
tfjs_predict_extractor.TFJSPredictExtractor(
eval_config=eval_config, eval_shared_model=eval_shared_model)),
slice_key_extractor.SliceKeyExtractor(
eval_config=eval_config, materialize=materialize)
]
elif constants.TF_JS in model_types:
raise NotImplementedError(
'support for mixing tf_js and non-tf_js models is not '
'implemented: eval_config={}'.format(eval_config))
elif (eval_config and model_types == set([constants.TF_ESTIMATOR]) and
all(eval_constants.EVAL_TAG in m.model_loader.tags
for m in eval_shared_models)):
return [
custom_predict_extractor or predict_extractor.PredictExtractor(
eval_shared_model,
materialize=materialize,
eval_config=eval_config),
slice_key_extractor.SliceKeyExtractor(
eval_config=eval_config, materialize=materialize)
]
elif (eval_config and constants.TF_ESTIMATOR in model_types and
any(eval_constants.EVAL_TAG in m.model_loader.tags
for m in eval_shared_models)):
raise NotImplementedError(
'support for mixing eval and non-eval estimator models is not '
'implemented: eval_config={}'.format(eval_config))
else:
return [
batched_input_extractor.BatchedInputExtractor(
eval_config=eval_config),
(custom_predict_extractor or
batched_predict_extractor_v2.BatchedPredictExtractor(
eval_config=eval_config,
eval_shared_model=eval_shared_model,
tensor_adapter_config=tensor_adapter_config)),
unbatch_extractor.UnbatchExtractor(),
slice_key_extractor.SliceKeyExtractor(
eval_config=eval_config, materialize=materialize)
]
else:
return [
batched_input_extractor.BatchedInputExtractor(eval_config=eval_config),
unbatch_extractor.UnbatchExtractor(),
slice_key_extractor.SliceKeyExtractor(
eval_config=eval_config, materialize=materialize)
]
def default_evaluators( # pylint: disable=invalid-name
eval_shared_model: Optional[types.MaybeMultipleEvalSharedModels] = None,
eval_config: config.EvalConfig = None,
schema: Optional[schema_pb2.Schema] = None,
compute_confidence_intervals: Optional[bool] = False,
min_slice_size: int = 1,
serialize: bool = False,
random_seed_for_testing: Optional[int] = None) -> List[evaluator.Evaluator]:
"""Returns the default evaluators for use in ExtractAndEvaluate.
Args:
eval_shared_model: Optional shared model (single-model evaluation) or list
of shared models (multi-model evaluation). Only required if there are
metrics to be computed in-graph using the model.
eval_config: Eval config.
schema: A schema to use for customizing default evaluators.
compute_confidence_intervals: Deprecated (use eval_config).
min_slice_size: Deprecated (use eval_config).
serialize: Deprecated.
random_seed_for_testing: Provide for deterministic tests only.
"""
disabled_outputs = []
if eval_config:
eval_config = _update_eval_config_with_defaults(eval_config,
eval_shared_model)
disabled_outputs = eval_config.options.disabled_outputs.values
if (_model_types(eval_shared_model) == set([constants.TF_LITE]) or
_model_types(eval_shared_model) == set([constants.TF_JS])):
# no in-graph metrics present when tflite or tfjs is used.
if eval_shared_model:
if isinstance(eval_shared_model, dict):
eval_shared_model = {
k: v._replace(include_default_metrics=False)
for k, v in eval_shared_model.items()
}
elif isinstance(eval_shared_model, list):
eval_shared_model = [
v._replace(include_default_metrics=False)
for v in eval_shared_model
]
else:
eval_shared_model = eval_shared_model._replace(
include_default_metrics=False)
if (constants.METRICS_KEY in disabled_outputs and
constants.PLOTS_KEY in disabled_outputs):
return []
if _is_legacy_eval(eval_shared_model, eval_config):
# Backwards compatibility for previous add_metrics_callbacks implementation.
if eval_config is not None:
if eval_config.options.HasField('compute_confidence_intervals'):
compute_confidence_intervals = (
eval_config.options.compute_confidence_intervals.value)
if eval_config.options.HasField('min_slice_size'):
min_slice_size = eval_config.options.min_slice_size.value
return [
metrics_and_plots_evaluator.MetricsAndPlotsEvaluator(
eval_shared_model,
compute_confidence_intervals=compute_confidence_intervals,
min_slice_size=min_slice_size,
serialize=serialize,
random_seed_for_testing=random_seed_for_testing)
]
else:
return [
metrics_and_plots_evaluator_v2.MetricsAndPlotsEvaluator(
eval_config=eval_config,
eval_shared_model=eval_shared_model,
schema=schema,
random_seed_for_testing=random_seed_for_testing)
]
def default_writers(
output_path: Optional[Text],
eval_shared_model: Optional[types.MaybeMultipleEvalSharedModels] = None,
eval_config: Optional[config.EvalConfig] = None,
display_only_data_location: Optional[Text] = None,
display_only_data_file_format: Optional[Text] = None,
output_file_format: Text = '',
add_metric_callbacks: List[types.AddMetricsCallbackType] = None
) -> List[writer.Writer]: # pylint: disable=invalid-name
"""Returns the default writers for use in WriteResults.
Note, sharding will be enabled by default if an output_file_format is
provided. Filenames will be <output_path>-SSSSS-of-NNNNN.<output_file_format>
where SSSSS is the shard number and NNNNN is the number of shards.
Args:
output_path: Output path.
eval_shared_model: Optional shared model (single-model evaluation) or list
of shared models (multi-model evaluation). Only required if legacy
add_metrics_callbacks are used.
eval_config: Eval config for writing out config along with results. Also
used for to check for missing slices.
display_only_data_location: Optional path indicating where the examples were
read from. This is used only for display purposes - data will not actually
be read from this path.
display_only_data_file_format: Optional format of the input examples. This
is used only for display purposes.
output_file_format: File format to use when saving files. Currently only
'tfrecord' is supported.
add_metric_callbacks: Optional list of metric callbacks (if used).
"""
writers = []
if not add_metric_callbacks:
add_metric_callbacks = []
# The add_metric_callbacks are used in the metrics and plots serialization
# code to post process the metric data by calling populate_stats_and_pop.
# While both the legacy (V1) and new (V2) evaluation implementations support
# EvalSavedModels using add_metric_callbacks, this particular code is only
# required for the legacy evaluation based on the MetricsAndPlotsEvaluator.
# The V2 MetricsAndPlotsEvaluator output requires no additional processing.
# Since the V1 code only supports a single EvalSharedModel, we only set the
# add_metrics_callbacks if a dict is not passed.
if (eval_shared_model and not isinstance(eval_shared_model, dict) and
not isinstance(eval_shared_model, list)):
add_metric_callbacks = eval_shared_model.add_metrics_callbacks
if eval_config:
model_locations = {}
eval_shared_models = model_util.verify_and_update_eval_shared_models(
eval_shared_model)
for v in (eval_shared_models or [None]):
k = '' if v is None else v.model_name
model_locations[k] = ('<unknown>' if v is None or v.model_path is None
else v.model_path)
writers.append(
eval_config_writer.EvalConfigWriter(
output_path,
eval_config=eval_config,
data_location=display_only_data_location,
data_file_format=display_only_data_file_format,
model_locations=model_locations))
output_paths = {
constants.METRICS_KEY:
os.path.join(output_path, constants.METRICS_KEY),
constants.PLOTS_KEY:
os.path.join(output_path, constants.PLOTS_KEY),
constants.VALIDATIONS_KEY:
os.path.join(output_path, constants.VALIDATIONS_KEY)
}
writers.append(
metrics_plots_and_validations_writer.MetricsPlotsAndValidationsWriter(
output_paths=output_paths,
# Empty EvalConfig supported for backwards compatibility.
eval_config=eval_config or config.EvalConfig(),
add_metrics_callbacks=add_metric_callbacks,
output_file_format=output_file_format))
return writers
@beam.ptransform_fn
# TODO(b/156538355): Find out why str is also required instead of just bytes
# after adding types.Extracts.
@beam.typehints.with_input_types(Union[bytes, str, types.Extracts])
@beam.typehints.with_output_types(types.Extracts)
def InputsToExtracts( # pylint: disable=invalid-name
inputs: beam.pvalue.PCollection) -> beam.pvalue.PCollection:
"""Converts serialized inputs (e.g. examples) to Extracts if not already."""
def to_extracts(x: Union[bytes, str, types.Extracts]) -> types.Extracts:
result = {}
if isinstance(x, dict):
result.update(x)
else:
result[constants.INPUT_KEY] = x
return result
return inputs | 'AddInputKey' >> beam.Map(to_extracts)
@beam.ptransform_fn
@beam.typehints.with_input_types(Union[bytes, pa.RecordBatch])
@beam.typehints.with_output_types(types.Extracts)
def BatchedInputsToExtracts( # pylint: disable=invalid-name
batched_inputs: beam.pvalue.PCollection) -> beam.pvalue.PCollection:
"""Converts Arrow RecordBatch inputs to Extracts."""
def to_extracts(x: Union[bytes, pa.RecordBatch]) -> types.Extracts:
result = {}
if isinstance(x, dict):
result.update(x)
else:
result[constants.ARROW_RECORD_BATCH_KEY] = x
return result
return batched_inputs | 'AddArrowRecordBatchKey' >> beam.Map(to_extracts)
@beam.ptransform_fn
@beam.typehints.with_input_types(types.Extracts)
@beam.typehints.with_output_types(Any)
def ExtractAndEvaluate( # pylint: disable=invalid-name
extracts: beam.pvalue.PCollection, extractors: List[extractor.Extractor],
evaluators: List[evaluator.Evaluator]) -> evaluator.Evaluation:
"""Performs Extractions and Evaluations in provided order."""
# evaluation[k] = list of values for k
evaluation = {}
def update(evaluation: Dict[Text, Any], new_evaluation: Dict[Text, Any]):
for k, v in new_evaluation.items():
if k not in evaluation:
evaluation[k] = []
evaluation[k].append(v)
return evaluation
# Run evaluators that run before extraction (i.e. that only require
# the incoming input extract added by ReadInputs)
for v in evaluators:
if not v.run_after:
update(evaluation, extracts | v.stage_name >> v.ptransform)
for x in extractors:
extracts = (extracts | x.stage_name >> x.ptransform)
for v in evaluators:
if v.run_after == x.stage_name:
update(evaluation, extracts | v.stage_name >> v.ptransform)
for v in evaluators:
if v.run_after == extractor.LAST_EXTRACTOR_STAGE_NAME:
update(evaluation, extracts | v.stage_name >> v.ptransform)
# Merge multi-valued keys if necessary.
result = {}
for k, v in evaluation.items():
if len(v) == 1:
result[k] = v[0]
continue
# Note that we assume that if a key is multivalued, its values are
# dictionaries with disjoint keys. The combined value will simply be the
# disjoint union of all the dictionaries.
result[k] = (
v
| 'FlattenEvaluationOutput(%s)' % k >> beam.Flatten()
| 'CombineEvaluationOutput(%s)' % k >> beam.CombinePerKey(
_CombineEvaluationDictionariesFn()))
return result
class _CombineEvaluationDictionariesFn(beam.CombineFn):
"""CombineFn to combine dictionaries generated by different evaluators."""
def create_accumulator(self) -> Dict[Text, Any]:
return {}
def _merge(self, accumulator: Dict[Text, Any],
output_dict: Dict[Text, Any]) -> None:
intersection = set(accumulator) & set(output_dict)
if intersection:
raise ValueError(
'Dictionaries generated by different evaluators should have '
'different keys, but keys %s appeared in the output of multiple '
'evaluators' % intersection)
accumulator.update(output_dict)
def add_input(self, accumulator: Dict[Text, Any],
output_dict: Dict[Text, Any]) -> Dict[Text, Any]:
if not isinstance(output_dict, dict):
raise TypeError(
'for outputs written to by multiple evaluators, the outputs must all '
'be dictionaries, but got output of type %s, value %s' %
(type(output_dict), str(output_dict)))
self._merge(accumulator, output_dict)
return accumulator
def merge_accumulators(
self, accumulators: List[Dict[Text, Any]]) -> Dict[Text, Any]:
result = self.create_accumulator()
for acc in accumulators:
self._merge(result, acc)
return result
def extract_output(self, accumulator: Dict[Text, Any]) -> Dict[Text, Any]:
return accumulator
@beam.ptransform_fn
# TODO(b/157600974): Add input typehint.
@beam.typehints.with_output_types(beam.pvalue.PDone)
def WriteResults( # pylint: disable=invalid-name
evaluation_or_validation: Union[evaluator.Evaluation, validator.Validation],
writers: List[writer.Writer]) -> beam.pvalue.PDone:
"""Writes Evaluation or Validation results using given writers.
Args:
evaluation_or_validation: Evaluation or Validation output.
writers: Writes to use for writing out output.
Raises:
ValueError: If Evaluation or Validation is empty.
Returns:
beam.pvalue.PDone.
"""
if not evaluation_or_validation:
raise ValueError('Evaluations and Validations cannot be empty')
for w in writers:
_ = evaluation_or_validation | w.stage_name >> w.ptransform
return beam.pvalue.PDone(list(evaluation_or_validation.values())[0].pipeline)
def is_batched_input(eval_shared_model: Optional[
types.MaybeMultipleEvalSharedModels] = None,
eval_config: config.EvalConfig = None) -> bool:
"""Returns true if batched input should be used.
We will keep supporting the legacy unbatched V1 PredictExtractor as it parses
the features and labels, and is the only solution currently that allows for
slicing on transformed features. Eventually we should have support for
transformed features via keras preprocessing layers.
Args:
eval_shared_model: Shared model (single-model evaluation) or list of shared
models (multi-model evaluation). Required unless the predictions are
provided alongside of the features (i.e. model-agnostic evaluations).
eval_config: Eval config.
Returns:
A boolean indicating if batched extractors should be used.
"""
if _is_legacy_eval(eval_shared_model, eval_config):
return False
elif eval_shared_model:
model_types = _model_types(eval_shared_model)
eval_shared_models = model_util.verify_and_update_eval_shared_models(
eval_shared_model)
if (model_types == set([constants.TF_LITE]) or
model_types == set([constants.TF_JS])):
return False
elif (eval_config and model_types == set([constants.TF_ESTIMATOR]) and
all(eval_constants.EVAL_TAG in m.model_loader.tags
for m in eval_shared_models)):
return False
return True
@beam.ptransform_fn
@beam.typehints.with_input_types(Any)
@beam.typehints.with_output_types(beam.pvalue.PDone)
def ExtractEvaluateAndWriteResults( # pylint: disable=invalid-name
examples: beam.pvalue.PCollection,
eval_shared_model: Optional[types.MaybeMultipleEvalSharedModels] = None,
eval_config: config.EvalConfig = None,
extractors: Optional[List[extractor.Extractor]] = None,
evaluators: Optional[List[evaluator.Evaluator]] = None,
writers: Optional[List[writer.Writer]] = None,
output_path: Optional[Text] = None,
display_only_data_location: Optional[Text] = None,
display_only_file_format: Optional[Text] = None,
slice_spec: Optional[List[slicer.SingleSliceSpec]] = None,
write_config: Optional[bool] = True,
compute_confidence_intervals: Optional[bool] = False,
min_slice_size: int = 1,
random_seed_for_testing: Optional[int] = None,
tensor_adapter_config: Optional[tensor_adapter.TensorAdapterConfig] = None,
schema: Optional[schema_pb2.Schema] = None) -> beam.pvalue.PDone:
"""PTransform for performing extraction, evaluation, and writing results.
Users who want to construct their own Beam pipelines instead of using the
lightweight run_model_analysis functions should use this PTransform.
Example usage:
```python
eval_config = tfma.EvalConfig(slicing_specs=[...], metrics_specs=[...])
eval_shared_model = tfma.default_eval_shared_model(
eval_saved_model_path=model_location, eval_config=eval_config)
with beam.Pipeline(runner=...) as p:
_ = (p
| 'ReadData' >> beam.io.ReadFromTFRecord(data_location)
| 'ExtractEvaluateAndWriteResults' >>
tfma.ExtractEvaluateAndWriteResults(
eval_shared_model=eval_shared_model,
eval_config=eval_config,
...))
result = tfma.load_eval_result(output_path=output_path)
tfma.view.render_slicing_metrics(result)
```
Note that the exact serialization format is an internal implementation detail
and subject to change. Users should only use the TFMA functions to write and
read the results.
Args:
examples: PCollection of input examples or Arrow Record batches. Examples
can be any format the model accepts (e.g. string containing CSV row,
TensorFlow.Example, etc). If the examples are in the form of a dict it
will be assumed that input is already in the form of tfma.Extracts with
examples stored under tfma.INPUT_KEY (any other keys will be passed along
unchanged to downstream extractors and evaluators).
eval_shared_model: Optional shared model (single-model evaluation) or list
of shared models (multi-model evaluation). Only required if needed by
default extractors, evaluators, or writers and for display purposes of the
model path.
eval_config: Eval config.
extractors: Optional list of Extractors to apply to Extracts. Typically
these will be added by calling the default_extractors function. If no
extractors are provided, default_extractors (non-materialized) will be
used.
evaluators: Optional list of Evaluators for evaluating Extracts. Typically
these will be added by calling the default_evaluators function. If no
evaluators are provided, default_evaluators will be used.
writers: Optional list of Writers for writing Evaluation output. Typically
these will be added by calling the default_writers function. If no writers
are provided, default_writers will be used.
output_path: Path to output results to (config file, metrics, plots, etc).
display_only_data_location: Optional path indicating where the examples were
read from. This is used only for display purposes - data will not actually
be read from this path.
display_only_file_format: Optional format of the examples. This is used only
for display purposes.
slice_spec: Deprecated (use EvalConfig).
write_config: Deprecated (use EvalConfig).
compute_confidence_intervals: Deprecated (use EvalConfig).
min_slice_size: Deprecated (use EvalConfig).
random_seed_for_testing: Provide for deterministic tests only.
tensor_adapter_config: Tensor adapter config which specifies how to obtain
tensors from the Arrow RecordBatch. If None, we feed the raw examples to
the model.
schema: A schema to use for customizing evaluators.
Raises:
ValueError: If EvalConfig invalid or matching Extractor not found for an
Evaluator.
Returns:
PDone.
"""
eval_shared_models = model_util.verify_and_update_eval_shared_models(
eval_shared_model)
if eval_config is None:
eval_config = _default_eval_config(eval_shared_models, slice_spec,
write_config,
compute_confidence_intervals,
min_slice_size)
else:
eval_config = _update_eval_config_with_defaults(eval_config,
eval_shared_model)
config.verify_eval_config(eval_config)
if not extractors:
extractors = default_extractors(
eval_config=eval_config,
eval_shared_model=eval_shared_model,
materialize=False,
tensor_adapter_config=tensor_adapter_config)
if not evaluators:
evaluators = default_evaluators(
eval_config=eval_config,
eval_shared_model=eval_shared_model,
random_seed_for_testing=random_seed_for_testing,
schema=schema)
for v in evaluators:
evaluator.verify_evaluator(v, extractors)
if not writers:
writers = default_writers(
output_path=output_path,
eval_shared_model=eval_shared_model,
eval_config=eval_config,
display_only_data_location=display_only_data_location,
display_only_data_file_format=display_only_file_format)
# pylint: disable=no-value-for-parameter
if is_batched_input(eval_shared_model, eval_config):
extracts = (
examples
| 'BatchedInputsToExtracts' >> BatchedInputsToExtracts())
else:
extracts = (examples | 'InputsToExtracts' >> InputsToExtracts())
_ = (
extracts
| 'ExtractAndEvaluate' >> ExtractAndEvaluate(
extractors=extractors, evaluators=evaluators)
| 'WriteResults' >> WriteResults(writers=writers))
return beam.pvalue.PDone(examples.pipeline)
def run_model_analysis(
eval_shared_model: Optional[types.MaybeMultipleEvalSharedModels] = None,
eval_config: config.EvalConfig = None,
data_location: Text = '',
file_format: Text = 'tfrecords',
output_path: Optional[Text] = None,
extractors: Optional[List[extractor.Extractor]] = None,
evaluators: Optional[List[evaluator.Evaluator]] = None,
writers: Optional[List[writer.Writer]] = None,
pipeline_options: Optional[Any] = None,
slice_spec: Optional[List[slicer.SingleSliceSpec]] = None,
write_config: Optional[bool] = True,
compute_confidence_intervals: Optional[bool] = False,
min_slice_size: int = 1,
random_seed_for_testing: Optional[int] = None,
schema: Optional[schema_pb2.Schema] = None,
) -> Union[view_types.EvalResult, view_types.EvalResults]:
"""Runs TensorFlow model analysis.
It runs a Beam pipeline to compute the slicing metrics exported in TensorFlow
Eval SavedModel and returns the results.
This is a simplified API for users who want to quickly get something running
locally. Users who wish to create their own Beam pipelines can use the
Evaluate PTransform instead.
Args:
eval_shared_model: Optional shared model (single-model evaluation) or list
of shared models (multi-model evaluation). Only required if needed by
default extractors, evaluators, or writers.
eval_config: Eval config.
data_location: The location of the data files.
file_format: The file format of the data, can be either 'text' or
'tfrecords' for now. By default, 'tfrecords' will be used.
output_path: The directory to output metrics and results to. If None, we use
a temporary directory.
extractors: Optional list of Extractors to apply to Extracts. Typically
these will be added by calling the default_extractors function. If no
extractors are provided, default_extractors (non-materialized) will be
used.
evaluators: Optional list of Evaluators for evaluating Extracts. Typically
these will be added by calling the default_evaluators function. If no
evaluators are provided, default_evaluators will be used.
writers: Optional list of Writers for writing Evaluation output. Typically
these will be added by calling the default_writers function. If no writers
are provided, default_writers will be used.
pipeline_options: Optional arguments to run the Pipeline, for instance
whether to run directly.
slice_spec: Deprecated (use EvalConfig).
write_config: Deprecated (use EvalConfig).
compute_confidence_intervals: Deprecated (use EvalConfig).
min_slice_size: Deprecated (use EvalConfig).
random_seed_for_testing: Provide for deterministic tests only.
schema: Optional tf.Metadata schema of the input data.
Returns:
An EvalResult that can be used with the TFMA visualization functions.
Raises:
ValueError: If the file_format is unknown to us.
"""
_assert_tensorflow_version()
if output_path is None:
output_path = tempfile.mkdtemp()
if not tf.io.gfile.exists(output_path):
tf.io.gfile.makedirs(output_path)
if eval_config is None:
eval_shared_models = model_util.verify_and_update_eval_shared_models(
eval_shared_model)
eval_config = _default_eval_config(eval_shared_models, slice_spec,
write_config,
compute_confidence_intervals,
min_slice_size)
else:
eval_config = _update_eval_config_with_defaults(eval_config,
eval_shared_model)
tensor_adapter_config = None
with beam.Pipeline(options=pipeline_options) as p:
if file_format == 'tfrecords':
if is_batched_input(eval_shared_model, eval_config):
tfxio = tf_example_record.TFExampleRecord(
file_pattern=data_location,
schema=schema,
raw_record_column_name=constants.ARROW_INPUT_COLUMN)
if schema is not None:
tensor_adapter_config = tensor_adapter.TensorAdapterConfig(
arrow_schema=tfxio.ArrowSchema(),
tensor_representations=tfxio.TensorRepresentations())
data = p | 'ReadFromTFRecordToArrow' >> tfxio.BeamSource()
else:
data = p | 'ReadFromTFRecord' >> beam.io.ReadFromTFRecord(
file_pattern=data_location,
compression_type=beam.io.filesystem.CompressionTypes.AUTO)
elif file_format == 'text':
data = p | 'ReadFromText' >> beam.io.textio.ReadFromText(data_location)
else:
raise ValueError('unknown file_format: {}'.format(file_format))
# pylint: disable=no-value-for-parameter
_ = (
data
| 'ExtractEvaluateAndWriteResults' >> ExtractEvaluateAndWriteResults(
eval_config=eval_config,
eval_shared_model=eval_shared_model,
display_only_data_location=data_location,
display_only_file_format=file_format,
output_path=output_path,
extractors=extractors,
evaluators=evaluators,
writers=writers,
random_seed_for_testing=random_seed_for_testing,
tensor_adapter_config=tensor_adapter_config,
schema=schema))
# pylint: enable=no-value-for-parameter
if len(eval_config.model_specs) <= 1:
return load_eval_result(output_path)
else:
results = []
for spec in eval_config.model_specs:
results.append(load_eval_result(output_path, model_name=spec.name))
return view_types.EvalResults(results, constants.MODEL_CENTRIC_MODE)
def single_model_analysis(
model_location: Text,
data_location: Text,
output_path: Text = None,
eval_config: Optional[config.EvalConfig] = None,
slice_spec: Optional[List[slicer.SingleSliceSpec]] = None
) -> view_types.EvalResult:
"""Run model analysis for a single model on a single data set.
This is a convenience wrapper around run_model_analysis for a single model
with a single data set. For more complex use cases, use
tfma.run_model_analysis.
Args:
model_location: Path to the export eval saved model.
data_location: The location of the data files.
output_path: The directory to output metrics and results to. If None, we use
a temporary directory.
eval_config: Eval config.
slice_spec: Deprecated (use EvalConfig).
Returns:
An EvalResult that can be used with the TFMA visualization functions.
"""
# Get working_dir ready.
if output_path is None:
output_path = tempfile.mkdtemp()
if not tf.io.gfile.exists(output_path):
tf.io.gfile.makedirs(output_path)
if slice_spec and eval_config:
raise ValueError('slice_spec is deprecated, only use eval_config')
if slice_spec:
eval_config = config.EvalConfig(
slicing_specs=[s.to_proto() for s in slice_spec])
return run_model_analysis(
eval_config=eval_config,
eval_shared_model=default_eval_shared_model(
eval_saved_model_path=model_location),
data_location=data_location,
output_path=output_path) # pytype: disable=bad-return-type
def multiple_model_analysis(model_locations: List[Text], data_location: Text,
**kwargs) -> view_types.EvalResults:
"""Run model analysis for multiple models on the same data set.
Args:
model_locations: A list of paths to the export eval saved model.
data_location: The location of the data files.
**kwargs: The args used for evaluation. See tfma.single_model_analysis() for
details.
Returns:
A tfma.EvalResults containing all the evaluation results with the same order
as model_locations.
"""
results = []
for m in model_locations:
results.append(single_model_analysis(m, data_location, **kwargs))
return view_types.EvalResults(results, constants.MODEL_CENTRIC_MODE)
def multiple_data_analysis(model_location: Text, data_locations: List[Text],
**kwargs) -> view_types.EvalResults:
"""Run model analysis for a single model on multiple data sets.
Args:
model_location: The location of the exported eval saved model.
data_locations: A list of data set locations.
**kwargs: The args used for evaluation. See tfma.run_model_analysis() for
details.
Returns:
A tfma.EvalResults containing all the evaluation results with the same order
as data_locations.
"""
results = []
for d in data_locations:
results.append(single_model_analysis(model_location, d, **kwargs))
return view_types.EvalResults(results, constants.DATA_CENTRIC_MODE)
def analyze_raw_data(
data: pd.DataFrame,
eval_config: config.EvalConfig = None,
output_path: Optional[Text] = None,
add_metric_callbacks: List[types.AddMetricsCallbackType] = None
) -> view_types.EvalResult:
"""Runs TensorFlow model analysis on a pandas.DataFrame.
This function allows you to use TFMA with Pandas DataFrames. The dataframe
must include a 'predicted' column for the predicted label and a 'label' column
for the actual label.
In addition to a DataFrame, this function requires an eval_config, a
`tfma.EvalConfig` object containing various configuration parameters (see
[config.proto](https://github.com/tensorflow/model-analysis/blob/master/tensorflow_model_analysis/proto/config.proto)
for a comprehensive list)...
* the metrics to compute
* the slices to compute metrics on
* the DataFrame's column names for example labels and predictions ('label'
and 'prediction' by default)
* confidence interval options
This function returns a `tfma.EvalResult`, which contains TFMA's computed
metrics and can be used to generate plots with
`tfma.view.render_slicing_metrics`.
Example usage:
```python
model_specs = [
config.ModelSpec(
prediction_key='prediction',
label_key='label')
]
config.options.compute_confidence_intervals.value
metrics_specs = [
config.MetricsSpec(metrics=[
config.MetricConfig(class_name='Accuracy'),
config.MetricConfig(class_name='ExampleCount')
])
]
slicing_specs = [
config.SlicingSpec(), # the empty slice represents the overall dataset
config.SlicingSpec(feature_keys=['language'])
]
eval_config = config.EvalConfig(
model_specs=model_specs,
metrics_specs=metrics_specs,
slicing_specs=slicing_specs)
result = model_eval_lib.analyze_raw_data(df, eval_config)
tfma.view.render_slicing_metrics(result)
# Example with Fairness Indicators
from tensorflow_model_analysis.addons.fairness.post_export_metrics import
fairness_indicators
from tensorflow_model_analysis.addons.fairness.view import widget_view
add_metrics_callbacks = [
tfma.post_export_metrics.fairness_indicators(thresholds=[0.25, 0.5, 0.75])
]
result = model_eval_lib.analyze_raw_data(
data=df,
metrics_specs=metrics_specs,
slicing_specs=slicing_specs,
add_metric_callbacks=add_metrics_callbacks
)
widget_view.render_fairness_indicator(result)
```
Args:
data: A pandas.DataFrame, where rows correspond to examples and columns
correspond to features. One column must indicate a row's predicted label,
and one column must indicate a row's actual label.
eval_config: A `tfma.EvalConfig`, which contains various configuration
parameters including metrics, slices, and label/prediction column names.
output_path: Path to write EvalResult to.
add_metric_callbacks: Optional list of metric callbacks (if used).
Returns:
A tfma.EvalResult to extract metrics or generate visualizations from.
Raises:
KeyError: If the prediction or label columns are not found within the
DataFrame.
"""
for model_spec in eval_config.model_specs:
model_spec.prediction_key = model_spec.prediction_key or 'prediction'
model_spec.label_key = model_spec.label_key or 'label'
if model_spec.prediction_key not in data.columns:
raise KeyError(
'The prediction_key column was not found. Looked for %s but found: %s'
% (model_spec.prediction_key, list(data.columns)))
if model_spec.label_key not in data.columns:
raise KeyError(
'The label_key column was not found. Looked for %s but found: %s' %
(model_spec.label_key, list(data.columns)))
# TODO(b/153570803): Validity check / assertions for dataframe structure
if eval_config.slicing_specs is None:
eval_config.slicing_specs = [config.SlicingSpec(feature_keys=[''])]
if output_path is None:
output_path = tempfile.mkdtemp()
arrow_data = table_util.CanonicalizeRecordBatch(
table_util.DataFrameToRecordBatch(data))
beam_data = beam.Create([arrow_data])
writers = default_writers(
output_path,
eval_config=eval_config,
add_metric_callbacks=add_metric_callbacks)
with beam.Pipeline() as p:
_ = (
p
| beam_data
| 'ExtractEvaluateAndWriteResults' >> ExtractEvaluateAndWriteResults( # pylint: disable=no-value-for-parameter
writers=writers,
eval_config=eval_config,
output_path=output_path))
return load_eval_result(output_path)
|
py
|
1a565abe38b0122edf71ee04b2cd0a13991e0e19
|
"""
Utilities for general use
"""
import pandas as pd
import random
def cartesianproduct(*args):
"""Create full cartesian product of all objects passed"""
# Create a random string to name a new random column for merging
key_col = randomstring(16)
out = pd.DataFrame(args[0].drop_duplicates())
out[key_col] = 1
for itm in args[1:]:
itm = pd.DataFrame(itm.drop_duplicates())
itm[key_col] = 1
out = out.merge(itm, on=key_col)
out.drop(columns=key_col, inplace=True)
return out
|
py
|
1a565ad8449061775c178a6193e11b7213160b05
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from bincrafters import build_template_default
import os
import platform
if __name__ == "__main__":
CONAN_USERNAME = os.environ.get("CONAN_USERNAME", "yjjnls")
CONAN_UPLOAD = 'https://api.bintray.com/conan/%s/%s' % (CONAN_USERNAME,
'stable')
os.environ['CONAN_UPLOAD'] = CONAN_UPLOAD
os.environ['CONAN_CHANNEL'] = 'stable'
os.environ['CONAN_UPLOAD_ONLY_WHEN_STABLE'] = 'False'
os.environ['CONAN_USERNAME'] = CONAN_USERNAME
DEPENDENT_BINTRAY_REPO = os.environ.get(
"DEPENDENT_BINTRAY_REPO", CONAN_USERNAME)
os.environ['DEPENDENT_BINTRAY_REPO'] = DEPENDENT_BINTRAY_REPO
builder = build_template_default.get_builder()
builds = []
for settings, options, env_vars, build_requires, reference in builder.items:
# dynamic only
if not options["gstreamer-custom:shared"]:
continue
# release only
if settings["build_type"] == "Debug":
continue
# Visual Sutido 2017 only
if platform.system() == "Windows":
if settings["compiler"] == "Visual Studio":
if settings["compiler.version"] == '14':
builds.append(
[settings, options, env_vars, build_requires])
elif platform.system() == "Linux":
if settings["compiler"] == "gcc":
if settings["compiler.version"] == '4.9' and settings["arch"] == 'x86_64':
builds.append(
[settings, options,
{'DEPENDENT_BINTRAY_REPO':
DEPENDENT_BINTRAY_REPO},
build_requires])
builder.builds = builds
builder.run()
|
py
|
1a565b985347dca62c911fd14b9a35dfaa18f0c8
|
conv_encoder = km.Sequential(name="ConvEncoderModel")
conv_encoder.add(kl.Conv2D(16, (3,3) , activation='relu', input_shape=(28,28,1) , padding='same' ))
conv_encoder.add(kl.MaxPooling2D((2, 2), padding='same'))
conv_encoder.add(kl.Conv2D(8, (3, 3), activation='relu', padding='same'))
conv_encoder.add(kl.MaxPooling2D((2, 2), padding='same'))
conv_encoder.add(kl.Conv2D(8, (3, 3), activation='relu', padding='same'))
conv_encoder.add(kl. MaxPooling2D((2, 2), padding='same'))
conv_decoder = km.Sequential(name="ConvDecoderModel")
conv_decoder.add(kl.Conv2D(8, (3, 3), activation='relu', input_shape = (4, 4, 8), padding='same'))
conv_decoder.add(kl.UpSampling2D((2, 2)))
conv_decoder.add(kl.Conv2D(8, (3, 3), activation='relu', padding='same'))
conv_decoder.add(kl.UpSampling2D((2, 2)))
conv_decoder.add(kl.Conv2D(16, (3, 3), activation='relu'))
conv_decoder.add(kl.UpSampling2D((2, 2)))
conv_decoder.add(kl.Conv2D(1, (3, 3), activation='sigmoid', padding='same'))
conv_autoencoder = km.Sequential(name="ConvAutoencoderModel")
conv_autoencoder.add(conv_encoder)
conv_autoencoder.add(conv_decoder)
conv_autoencoder.compile(optimizer='adam', loss='binary_crossentropy')
conv_autoencoder.fit(x_train_noisy, x_train_conv, epochs=10, batch_size=256, validation_data=(x_test_noisy, x_test_conv))
|
py
|
1a565cce4e1766f6eb30087c15882f74d383ecc5
|
import bluetooth
bd_addr = "98:D3:31:F5:B9:E6"
port = 1
sock = bluetooth.BluetoothSocket(bluetooth.RFCOMM)
sock.connect((bd_addr,port))
while 1:
a = sock.recv(1)
print(a)
sock.close()
|
py
|
1a565cfe60062642ef0ce63b71f290c0005ec333
|
from nltk.stem.wordnet import WordNetLemmatizer
from SentimentAnalysis.common_functions import preprocess_one_line, remove_duplicates
lemmatizer = WordNetLemmatizer()
lemmatize_flag = True
pos_raw_file = open('/home/data/positive-words-raw.txt', 'r')
neg_raw_file = open('/home/data/negative-words-raw.txt', 'r')
pos_file = open('/home/data/positive-words.txt', 'w')
neg_file = open('/home/data/negative-words.txt', 'w')
positives_raw = pos_raw_file.readlines()
negatives_raw = neg_raw_file.readlines()
positives = []
for pos_line in positives_raw:
new_pos_line = preprocess_one_line(pos_line, lemmatizer, lemmatize_flag, []) # last arg is stopwords -- there are none by definition
positives.append(new_pos_line)
positives = remove_duplicates(positives)
negatives = []
for neg_line in negatives_raw:
new_neg_line = preprocess_one_line(neg_line, lemmatizer, lemmatize_flag, [])
negatives.append(new_neg_line)
negatives = remove_duplicates(negatives)
for word in negatives:
neg_file.write(word+'\n')
neg_file.close()
for word in positives:
pos_file.write(word+'\n')
pos_file.close()
|
py
|
1a565e11d9fe4e9cdd94cf5cce9a2a7f4faf814f
|
"""
Django tests for training in qcm app.
Generated by 'manage.py startapp' using Django 3.2.7.
For more information on this file, see
https://docs.djangoproject.com/en/3.2/topics/testing/
"""
from django.test import TestCase
from user_data.tests import create_student
from ..models import Training
from .test_shortcuts import create_branch_training, create_questions_subset_training
class TrainingModelTest(TestCase):
"""class to test the Training model"""
def test_create_questions_subset_training(self):
"""test a training creation"""
student = create_student(username="test")
training = create_questions_subset_training(user=student)
trainings = Training.objects.all()
self.assertQuerysetEqual(trainings, [training])
def test_create_branch_training(self):
"""test a training creation"""
student = create_student(username="test")
training = create_branch_training(user=student)
trainings = Training.objects.all()
self.assertQuerysetEqual(trainings, [training])
|
py
|
1a565e18afdbd19bfc482e06b2d7e8f7a7f62aea
|
#!/usr/bin/env python3
#
# Copyright (c) 2021 Nordic Semiconductor ASA
#
# SPDX-License-Identifier: Apache-2.0
#
from unittest import TestCase, main
from subprocess import Popen, PIPE
from re import sub
from pathlib import Path
from pprint import pprint
# from ecdsa import VerifyingKey
from hashlib import sha256
import cbor2
try:
import cddl_gen
except ImportError:
print("""
The cddl_gen package must be installed to run these tests.
During development, install with `python3 setup.py develop` to install in a way
that picks up changes in the files without having to reinstall.
""")
import sys
sys.exit(1)
p_root = Path(__file__).absolute().parents[2]
p_tests = Path(p_root, 'tests')
p_manifest12 = Path(p_tests, 'cases', 'manifest12.cddl')
p_manifest14 = Path(p_tests, 'cases', 'manifest14.cddl')
p_test_vectors12 = tuple(Path(p_tests, 'cases', f'manifest12_example{i}.cborhex') for i in range(6))
p_test_vectors14 = tuple(Path(p_tests, 'cases', f'manifest14_example{i}.cborhex') for i in range(6))
p_optional = Path(p_tests, 'cases', 'optional.cddl')
p_cose = Path(p_tests, 'cases', 'cose.cddl')
p_manifest14_priv = Path(p_tests, 'cases', 'manifest14.priv')
p_manifest14_pub = Path(p_tests, 'cases', 'manifest14.pub')
class Testn(TestCase):
def decode_file(self, data_path, *cddl_paths):
data = bytes.fromhex(data_path.read_text().replace("\n", ""))
self.decode_string(data, *cddl_paths)
def decode_string(self, data_string, *cddl_paths):
cddl_str = " ".join((Path(p).read_text() for p in cddl_paths))
self.my_types = cddl_gen.DataTranslator.from_cddl(cddl_str, 16).my_types
cddl = self.my_types["SUIT_Envelope_Tagged"]
self.decoded = cddl.decode_str(data_string)
class Test0(Testn):
def __init__(self, *args, **kwargs):
super(Test0, self).__init__(*args, **kwargs)
self.decode_file(p_test_vectors12[0], p_manifest12)
def test_manifest_digest(self):
self.assertEqual(
bytes.fromhex("5c097ef64bf3bb9b494e71e1f2418eef8d466cc902f639a855ec9af3e9eddb99"),
self.decoded.suit_authentication_wrapper.SUIT_Digest_bstr.suit_digest_bytes)
def test_signature(self):
self.assertEqual(
1,
self.decoded.suit_authentication_wrapper.SUIT_Authentication_Block_bstr[0].COSE_Sign1_Tagged.protected.uintint[0].uintint_key)
self.assertEqual(
-7,
self.decoded.suit_authentication_wrapper.SUIT_Authentication_Block_bstr[0].COSE_Sign1_Tagged.protected.uintint[0].uintint)
self.assertEqual(
bytes.fromhex("a19fd1f23b17beed321cece7423dfb48c457b8f1f6ac83577a3c10c6773f6f3a7902376b59540920b6c5f57bac5fc8543d8f5d3d974faa2e6d03daa534b443a7"),
self.decoded.suit_authentication_wrapper.SUIT_Authentication_Block_bstr[0].COSE_Sign1_Tagged.signature)
def test_validate_run(self):
self.assertEqual(
"suit_condition_image_match",
self.decoded.suit_manifest.SUIT_Unseverable_Members.suit_validate[0].suit_validate.union[0].SUIT_Condition.union_choice)
self.assertEqual(
"suit_directive_run",
self.decoded.suit_manifest.SUIT_Unseverable_Members.suit_run[0].suit_run.union[0].SUIT_Directive.union_choice)
def test_image_size(self):
self.assertEqual(34768, self.decoded.suit_manifest.suit_common.suit_common_sequence[0].suit_common_sequence.union[0].SUIT_Common_Commands.suit_directive_override_parameters.map[3].suit_parameter_image_size)
class Test1(Testn):
def __init__(self, *args, **kwargs):
super(Test1, self).__init__(*args, **kwargs)
self.decode_file(p_test_vectors12[1], p_manifest12)
def test_components(self):
self.assertEqual(
[b'\x00'],
self.decoded.suit_manifest.suit_common.suit_components[0][0].bstr)
def test_uri(self):
self.assertEqual(
"http://example.com/file.bin",
self.decoded.suit_manifest.SUIT_Severable_Manifest_Members.suit_install[0].suit_install.union[0].SUIT_Directive.suit_directive_set_parameters.map[0].suit_parameter_uri)
class Test2(Testn):
def __init__(self, *args, **kwargs):
super(Test2, self).__init__(*args, **kwargs)
self.decode_file(p_test_vectors12[2], p_manifest12)
def test_severed_uri(self):
self.assertEqual(
"http://example.com/very/long/path/to/file/file.bin",
self.decoded.SUIT_Severable_Manifest_Members.suit_install[0].suit_install.union[0].SUIT_Directive.suit_directive_set_parameters.map[0].suit_parameter_uri)
def test_severed_text(self):
self.assertIn(
"Example 2",
self.decoded.SUIT_Severable_Manifest_Members.suit_text[0].suit_text.SUIT_Text_Keys.suit_text_manifest_description[0])
self.assertEqual(
[b'\x00'],
self.decoded.SUIT_Severable_Manifest_Members.suit_text[0].suit_text.SUIT_Component_Identifier[0].SUIT_Component_Identifier_key.bstr)
self.assertEqual(
"arm.com",
self.decoded.SUIT_Severable_Manifest_Members.suit_text[0].suit_text.SUIT_Component_Identifier[0].SUIT_Component_Identifier.SUIT_Text_Component_Keys.suit_text_vendor_domain[0])
self.assertEqual(
"This component is a demonstration. The digest is a sample pattern, not a real one.",
self.decoded.SUIT_Severable_Manifest_Members.suit_text[0].suit_text.SUIT_Component_Identifier[0].SUIT_Component_Identifier.SUIT_Text_Component_Keys.suit_text_component_description[0])
class Test3(Testn):
def __init__(self, *args, **kwargs):
super(Test3, self).__init__(*args, **kwargs)
self.decode_file(p_test_vectors12[3], p_manifest12)
def test_A_B_offset(self):
self.assertEqual(
33792,
self.decoded.suit_manifest.suit_common.suit_common_sequence[0].suit_common_sequence.union[1].SUIT_Common_Commands.suit_directive_try_each.SUIT_Directive_Try_Each_Argument.SUIT_Command_Sequence_bstr[0].union[0].SUIT_Directive.suit_directive_override_parameters.map[0].suit_parameter_component_offset)
self.assertEqual(
541696,
self.decoded.suit_manifest.suit_common.suit_common_sequence[0].suit_common_sequence.union[1].SUIT_Common_Commands.suit_directive_try_each.SUIT_Directive_Try_Each_Argument.SUIT_Command_Sequence_bstr[1].union[0].SUIT_Directive.suit_directive_override_parameters.map[0].suit_parameter_component_offset)
class Test4(Testn):
def __init__(self, *args, **kwargs):
super(Test4, self).__init__(*args, **kwargs)
self.decode_file(p_test_vectors12[4], p_manifest12)
def test_load_decompress(self):
self.assertEqual(
0,
self.decoded.suit_manifest.SUIT_Unseverable_Members.suit_load[0].suit_load.union[1].SUIT_Directive.suit_directive_set_parameters.map[3].suit_parameter_source_component)
self.assertEqual(
"SUIT_Compression_Algorithm_zlib",
self.decoded.suit_manifest.SUIT_Unseverable_Members.suit_load[0].suit_load.union[1].SUIT_Directive.suit_directive_set_parameters.map[2].suit_parameter_compression_info.suit_compression_algorithm)
class Test5(Testn):
def __init__(self, *args, **kwargs):
super(Test5, self).__init__(*args, **kwargs)
self.decode_file(p_test_vectors12[5], p_manifest12)
def test_two_image_match(self):
self.assertEqual(
"suit_condition_image_match",
self.decoded.suit_manifest.SUIT_Severable_Manifest_Members.suit_install[0].suit_install.union[3].SUIT_Condition.union_choice)
self.assertEqual(
"suit_condition_image_match",
self.decoded.suit_manifest.SUIT_Severable_Manifest_Members.suit_install[0].suit_install.union[7].SUIT_Condition.union_choice)
def dumps(obj):
return cbor2.dumps(obj, canonical=True)
def loads(string):
return cbor2.loads(string)
class Test6(Testn):
def __init__(self, *args, **kwargs):
super(Test6, self).__init__(*args, **kwargs)
self.decode_file(p_test_vectors14[0], p_manifest14, p_cose)
def test_authentication(self):
digest = bytes.fromhex("a6c4590ac53043a98e8c4106e1e31b305516d7cf0a655eddfac6d45c810e036a")
signature = bytes.fromhex("d11a2dd9610fb62a707335f584079225709f96e8117e7eeed98a2f207d05c8ecfba1755208f6abea977b8a6efe3bc2ca3215e1193be201467d052b42db6b7287")
sig_struct = bytes.fromhex("846a5369676e61747572653143a10126405820a6c4590ac53043a98e8c4106e1e31b305516d7cf0a655eddfac6d45c810e036a")
# key = VerifyingKey.from_pem(p_manifest14_pub.read_text())
# key.verify_digest(signature, digest)
# key.verify(signature, digest, hashfunc=sha256)
# key.verify(signature, sig_struct, hashfunc=sha256)
self.assertEqual("COSE_Sign1_Tagged", self.decoded.suit_authentication_wrapper.SUIT_Authentication_Block_bstr[0].union_choice)
self.assertEqual(-7, self.decoded.suit_authentication_wrapper.SUIT_Authentication_Block_bstr[0].COSE_Sign1_Tagged.Headers.protected.header_map_bstr.Generic_Headers.uint1union[0].int)
manifest_signature = self.decoded.suit_authentication_wrapper.SUIT_Authentication_Block_bstr[0].COSE_Sign1_Tagged.signature
sig_struct = ["Signature", self.decoded.suit_authentication_wrapper.SUIT_Authentication_Block_bstr[0].COSE_Sign1_Tagged.Headers.protected.header_map_bstr_bstr, b'', b'', b'']
sig_struct_encoded = dumps(sig_struct)
# self.assertEqual(dumps(self.decoded.suit_manifest.orig_obj), self.decoded.orig_obj[3])
manifest_str = dumps(self.decoded.suit_manifest_bstr)
# manifest_hash = sha256(manifest_str).digest()
manifest_hash = dumps(sha256(manifest_str).digest())
manifest_suit_digest = self.decoded.suit_authentication_wrapper.SUIT_Digest_bstr_bstr
# manifest_suit_digest = dumps(dumps(self.decoded.suit_authentication_wrapper.SUIT_Digest_bstr.orig_obj))
sig_struct_encoded = sig_struct_encoded[:-1] + manifest_hash
# sig_struct_encoded = sig_struct_encoded[:-1] + dumps(manifest_hash)
# sig_struct_encoded = sig_struct_encoded[:-1] + dumps(manifest_suit_digest)
# sig_struct_encoded = sig_struct_encoded[:-1] + dumps(dumps(manifest_suit_digest))
# res = self.my_types["Sig_structure"].validate_str(sig_struct_encoded)
# print (sig_struct_encoded.hex())
loaded = loads(sig_struct_encoded)
# key = VerifyingKey.from_pem(p_manifest14_pub.read_text())
# print(sig_struct_encoded.hex())
# print(key.to_string().hex())
# print(manifest_signature.hex())
# res = key.verify(manifest_signature, dumps(self.decoded.orig_obj[3]), hashfunc=sha256)
# res = key.verify_digest(manifest_signature, manifest_hash)
# res = key.verify(manifest_signature, manifest_hash, hashfunc=sha256)
# res = key.verify(manifest_signature, dumps(manifest_hash), hashfunc=sha256)
# res = key.verify(manifest_signature, manifest_suit_digest, hashfunc=sha256)
# res = key.verify(manifest_signature, dumps(manifest_suit_digest), hashfunc=sha256)
# res = key.verify(manifest_signature, dumps(sig_struct_encoded), hashfunc=sha256)
# res = key.verify(manifest_signature, sig_struct_encoded, hashfunc=sha256)
# print(res)
class Test7(Testn):
def __init__(self, *args, **kwargs):
super(Test7, self).__init__(*args, **kwargs)
self.decode_file(p_test_vectors14[1], p_manifest14, p_cose)
def test_structure(self):
self.assertEqual("COSE_Sign1_Tagged", self.decoded.suit_authentication_wrapper.SUIT_Authentication_Block_bstr[0].union_choice)
self.assertEqual(-7, self.decoded.suit_authentication_wrapper.SUIT_Authentication_Block_bstr[0].COSE_Sign1_Tagged.Headers.protected.header_map_bstr.Generic_Headers.uint1union[0].int)
self.assertEqual(bytes.fromhex("60c61d6eb7a1aaeddc49ce8157a55cff0821537eeee77a4ded44155b03045132"), self.decoded.suit_authentication_wrapper.SUIT_Digest_bstr.suit_digest_bytes)
self.assertEqual(1, self.decoded.suit_manifest.suit_manifest_sequence_number)
self.assertEqual(bytes.fromhex("fa6b4a53d5ad5fdfbe9de663e4d41ffe"), self.decoded.suit_manifest.suit_common.suit_common_sequence[0].suit_common_sequence.union[0].SUIT_Common_Commands.suit_directive_override_parameters.map[0].suit_parameter_vendor_identifier.RFC4122_UUID)
self.assertEqual(bytes.fromhex("1492af1425695e48bf429b2d51f2ab45"), self.decoded.suit_manifest.suit_common.suit_common_sequence[0].suit_common_sequence.union[0].SUIT_Common_Commands.suit_directive_override_parameters.map[1].suit_parameter_class_identifier)
self.assertEqual(bytes.fromhex("00112233445566778899aabbccddeeff0123456789abcdeffedcba9876543210"), self.decoded.suit_manifest.suit_common.suit_common_sequence[0].suit_common_sequence.union[0].SUIT_Common_Commands.suit_directive_override_parameters.map[2].suit_parameter_image_digest.suit_digest_bytes)
self.assertEqual('cose_alg_sha_256', self.decoded.suit_manifest.suit_common.suit_common_sequence[0].suit_common_sequence.union[0].SUIT_Common_Commands.suit_directive_override_parameters.map[2].suit_parameter_image_digest.suit_digest_algorithm_id.union_choice)
self.assertEqual(34768, self.decoded.suit_manifest.suit_common.suit_common_sequence[0].suit_common_sequence.union[0].SUIT_Common_Commands.suit_directive_override_parameters.map[3].suit_parameter_image_size)
self.assertEqual(4, len(self.decoded.suit_manifest.suit_common.suit_common_sequence[0].suit_common_sequence.union[0].SUIT_Common_Commands.suit_directive_override_parameters.map))
self.assertEqual(15, self.decoded.suit_manifest.suit_common.suit_common_sequence[0].suit_common_sequence.union[1].SUIT_Condition.suit_condition_vendor_identifier.SUIT_Rep_Policy)
self.assertEqual(15, self.decoded.suit_manifest.suit_common.suit_common_sequence[0].suit_common_sequence.union[2].SUIT_Condition.suit_condition_class_identifier.SUIT_Rep_Policy)
self.assertEqual(3, len(self.decoded.suit_manifest.suit_common.suit_common_sequence[0].suit_common_sequence.union))
self.assertEqual(2, len(self.decoded.suit_manifest.suit_common.suit_common_sequence[0].suit_common_sequence.union[0]))
self.assertEqual(2, len(self.decoded.suit_manifest.suit_common.suit_common_sequence[0].suit_common_sequence.union[0].SUIT_Common_Commands))
self.assertEqual(1, len(self.decoded.suit_manifest.suit_common.suit_common_sequence[0].suit_common_sequence.union[0].SUIT_Common_Commands.suit_directive_override_parameters))
self.assertEqual(4, len(self.decoded.suit_manifest.suit_common.suit_common_sequence[0].suit_common_sequence.union[0].SUIT_Common_Commands.suit_directive_override_parameters.map))
self.assertEqual(2, len(self.decoded.suit_manifest.suit_common.suit_common_sequence[0].suit_common_sequence.union[0].SUIT_Common_Commands.suit_directive_override_parameters.map[0]))
def test_cbor_pen(self):
data = bytes.fromhex(p_test_vectors14[1].read_text().replace("\n", ""))
struct = loads(data)
struct2 = loads(struct.value[3]) # manifest
struct3 = loads(struct2[3]) # common sequence
struct4 = loads(struct3[4]) # override params
self.assertEqual(struct4[0], 20)
self.assertTrue(isinstance(struct4[1][1], bytes))
struct4[1][1] = cbor2.CBORTag(112, struct4[1][1]) # Add the tag for cbor-pen
struct3[4] = dumps(struct4)
struct2[3] = dumps(struct3)
struct.value[3] = dumps(struct2)
data = dumps(struct)
self.decode_string(data, p_manifest14, p_cose)
class Test7Inv(Testn):
def test_inv0(self):
data = bytes.fromhex(p_test_vectors14[1].read_text().replace("\n", ""))
struct = loads(data)
struct2 = loads(struct.value[2]) # authentication
struct3 = loads(struct2[1])
struct3.tag = 99999 # invalid tag for COSE_Sign1
struct2[1] = dumps(struct3)
struct.value[2] = dumps(struct2)
data = dumps(struct)
try:
self.decode_string(data, p_manifest14, p_cose)
except cddl_gen.CddlValidationError as e:
return
else:
assert False, "Should have failed validation"
def test_inv1(self):
data = bytes.fromhex(p_test_vectors14[1].read_text().replace("\n", ""))
struct = loads(data)
struct2 = loads(struct.value[3]) # manifest
struct2[1] += 1 # invalid manifest version
struct.value[3] = dumps(struct2)
data = dumps(struct)
try:
self.decode_string(data, p_manifest14, p_cose)
except cddl_gen.CddlValidationError as e:
return
else:
assert False, "Should have failed validation"
def test_inv2(self):
data = bytes.fromhex(p_test_vectors14[1].read_text().replace("\n", ""))
struct = loads(data)
struct.value[23] = b'' # Invalid integrated payload key
data = dumps(struct)
try:
self.decode_string(data, p_manifest14, p_cose)
except (cddl_gen.CddlValidationError, cbor2.CBORDecodeEOF) as e:
return
else:
assert False, "Should have failed validation"
def test_inv3(self):
data = bytes.fromhex(p_test_vectors14[1].read_text().replace("\n", ""))
struct = loads(data)
struct2 = loads(struct.value[3]) # manifest
struct3 = loads(struct2[3]) # common sequence
struct4 = loads(struct3[4]) # override params
self.assertEqual(struct4[0], 20)
self.assertTrue(isinstance(struct4[1][1], bytes))
struct4[1][1] += b'x' # vendor ID: wrong length
struct3[4] = dumps(struct4)
struct2[3] = dumps(struct3)
struct.value[3] = dumps(struct2)
data = dumps(struct)
try:
self.decode_string(data, p_manifest14, p_cose)
except cddl_gen.CddlValidationError as e:
return
else:
assert False, "Should have failed validation"
class Test8(Testn):
def __init__(self, *args, **kwargs):
super(Test8, self).__init__(*args, **kwargs)
self.decode_file(p_test_vectors14[2], p_manifest14, p_cose)
def test_text(self):
self.assertEqual(
bytes.fromhex('2bfc4d0cc6680be7dd9f5ca30aa2bb5d1998145de33d54101b80e2ca49faf918'),
self.decoded.suit_manifest.SUIT_Severable_Members_Choice.suit_text[0].SUIT_Digest.suit_digest_bytes)
self.assertEqual(
bytes.fromhex('2bfc4d0cc6680be7dd9f5ca30aa2bb5d1998145de33d54101b80e2ca49faf918'),
sha256(dumps(self.decoded.SUIT_Severable_Manifest_Members.suit_text[0].suit_text_bstr)).digest())
self.assertEqual('arm.com', self.decoded.SUIT_Severable_Manifest_Members.suit_text[0].suit_text.SUIT_Component_Identifier[0].SUIT_Component_Identifier.SUIT_Text_Component_Keys.suit_text_vendor_domain[0])
self.assertEqual('This component is a demonstration. The digest is a sample pattern, not a real one.', self.decoded.SUIT_Severable_Manifest_Members.suit_text[0].suit_text.SUIT_Component_Identifier[0].SUIT_Component_Identifier.SUIT_Text_Component_Keys.suit_text_component_description[0])
# Check manifest description. The concatenation and .replace() call are there to add
# trailing whitespace to all blank lines except the first.
# This is done in this way to avoid editors automatically removing the whitespace.
self.assertEqual('''## Example 2: Simultaneous Download, Installation, Secure Boot, Severed Fields
''' + '''
This example covers the following templates:
* Compatibility Check ({{template-compatibility-check}})
* Secure Boot ({{template-secure-boot}})
* Firmware Download ({{firmware-download-template}})
This example also demonstrates severable elements ({{ovr-severable}}), and text ({{manifest-digest-text}}).'''.replace("\n\n", "\n \n"), self.decoded.SUIT_Severable_Manifest_Members.suit_text[0].suit_text.SUIT_Text_Keys.suit_text_manifest_description[0])
class Test9(Testn):
def __init__(self, *args, **kwargs):
super(Test9, self).__init__(*args, **kwargs)
self.decode_file(p_test_vectors14[3], p_manifest14, p_cose)
def test_try_each(self):
self.assertEqual(2, len(self.decoded.suit_manifest.SUIT_Severable_Members_Choice.suit_install[0].SUIT_Command_Sequence_bstr.union[0].SUIT_Directive.suit_directive_try_each.SUIT_Directive_Try_Each_Argument.SUIT_Command_Sequence_bstr))
self.assertEqual(33792, self.decoded.suit_manifest.SUIT_Severable_Members_Choice.suit_install[0].SUIT_Command_Sequence_bstr.union[0].SUIT_Directive.suit_directive_try_each.SUIT_Directive_Try_Each_Argument.SUIT_Command_Sequence_bstr[0].union[0].SUIT_Directive.suit_directive_set_parameters.map[0].suit_parameter_component_slot)
self.assertEqual(541696, self.decoded.suit_manifest.SUIT_Severable_Members_Choice.suit_install[0].SUIT_Command_Sequence_bstr.union[0].SUIT_Directive.suit_directive_try_each.SUIT_Directive_Try_Each_Argument.SUIT_Command_Sequence_bstr[1].union[0].SUIT_Directive.suit_directive_set_parameters.map[0].suit_parameter_component_slot)
class Test10(Testn):
def __init__(self, *args, **kwargs):
super(Test10, self).__init__(*args, **kwargs)
self.decode_file(p_test_vectors14[4], p_manifest14, p_cose)
def test_components(self):
self.assertEqual(3, len(self.decoded.suit_manifest.suit_common.suit_components[0]))
self.assertEqual(b'\x00', self.decoded.suit_manifest.suit_common.suit_components[0][0].bstr[0])
self.assertEqual(b'\x02', self.decoded.suit_manifest.suit_common.suit_components[0][1].bstr[0])
self.assertEqual(b'\x01', self.decoded.suit_manifest.suit_common.suit_components[0][2].bstr[0])
class Test11(Testn):
def __init__(self, *args, **kwargs):
super(Test11, self).__init__(*args, **kwargs)
self.decode_file(p_test_vectors14[5], p_manifest14, p_cose)
def test_validate(self):
self.assertEqual(4, len(self.decoded.suit_manifest.SUIT_Unseverable_Members.suit_validate[0].suit_validate.union))
self.assertEqual(15, self.decoded.suit_manifest.SUIT_Unseverable_Members.suit_validate[0].suit_validate.union[1].SUIT_Condition.suit_condition_image_match.SUIT_Rep_Policy)
class Test11Inv(Testn):
def test_invalid_rep_policy(self):
data = bytes.fromhex(p_test_vectors14[5].read_text().replace("\n", ""))
struct = loads(data)
struct2 = loads(struct.value[3]) # manifest
struct3 = loads(struct2[10]) # suit_validate
struct3[3] += 16 # invalid Rep_Policy
struct2[10] = dumps(struct3)
struct.value[3] = dumps(struct2)
data = dumps(struct)
try:
self.decode_string(data, p_manifest14, p_cose)
except cddl_gen.CddlValidationError as e:
return
else:
assert False, "Should have failed validation"
class TestCLI(TestCase):
def get_std_args(self, input):
return ["cddl-gen", "--cddl", p_manifest12, "--default-max-qty", "16", "convert", "--input", input, "-t", "SUIT_Envelope_Tagged"]
def do_testn(self, n):
call0 = Popen(self.get_std_args(p_test_vectors12[n]) + ["--output", "-", "--output-as", "cbor"], stdout=PIPE)
stdout0, _ = call0.communicate()
self.assertEqual(0, call0.returncode)
call1 = Popen(self.get_std_args("-") + ["--input-as", "cbor", "--output", "-", "--output-as", "json"], stdin=PIPE, stdout=PIPE)
stdout1, _ = call1.communicate(input=stdout0)
self.assertEqual(0, call1.returncode)
call2 = Popen(self.get_std_args("-") + ["--input-as", "json", "--output", "-", "--output-as", "yaml"], stdin=PIPE, stdout=PIPE)
stdout2, _ = call2.communicate(input=stdout1)
self.assertEqual(0, call2.returncode)
call3 = Popen(self.get_std_args("-") + ["--input-as", "yaml", "--output", "-", "--output-as", "cbor"], stdin=PIPE, stdout=PIPE)
stdout3, _ = call3.communicate(input=stdout2)
self.assertEqual(0, call3.returncode)
self.assertEqual(stdout0, stdout3)
call4 = Popen(self.get_std_args("-") + ["--input-as", "cbor", "--output", "-", "--output-as", "cborhex"], stdin=PIPE, stdout=PIPE)
stdout4, _ = call4.communicate(input=stdout3)
self.assertEqual(0, call4.returncode)
call5 = Popen(self.get_std_args("-") + ["--input-as", "cborhex", "--output", "-", "--output-as", "json"], stdin=PIPE, stdout=PIPE)
stdout5, _ = call5.communicate(input=stdout4)
self.assertEqual(0, call5.returncode)
self.assertEqual(stdout1, stdout5)
self.maxDiff = None
with open(p_test_vectors12[n], 'r') as f:
self.assertEqual(sub(r"\W+", "", f.read()), sub(r"\W+", "", stdout4.decode("utf-8")))
def test_0(self):
self.do_testn(0)
def test_1(self):
self.do_testn(1)
def test_2(self):
self.do_testn(2)
def test_3(self):
self.do_testn(3)
def test_4(self):
self.do_testn(4)
def test_5(self):
self.do_testn(5)
class TestOptional(TestCase):
def test_0(self):
with open(p_optional, 'r') as f:
cddl_res = cddl_gen.DataTranslator.from_cddl(f.read(), 16)
cddl = cddl_res.my_types['cfg']
test_yaml = """
mem_config:
- 0
- 5"""
decoded = cddl.decode_str_yaml(test_yaml)
self.assertEqual(decoded.mem_config[0].READ.union_choice, "uint0")
self.assertEqual(decoded.mem_config[0].N, [5])
if __name__ == "__main__":
main()
|
py
|
1a565fc90c3cfe5fd3c3e394c712032657739b3a
|
"""
Bug & Code by Yunhao Cao
"""
import typing
INCREASE_CREDIT = 100
class BankAccount:
def __init__(self, name : str, initialDeposit : int):
self.name = name
self.money = initialDeposit
self.amountLoaned = 0
self.credit = 100
self.totalCredit = 100
# transfer money into another account
def transferMoney(self, toAccount, amount : int) -> None:
# TODO: Q1 What check is missing here?
# TODO: Q2 Another error is presented here, please try to figure it out yourself.
toAccount.money -= amount
self.money += amount
def borrowMoney(self,amount : int) -> None:
# TODO: Q3 What check is missing here?
self.credit -= amount
self.amountLoaned += amount
self.money += amount
def payBackLoan(self,amount : int) -> None:
if self.money <= amount:
raise Exception('not enough money to pay back the loan')
# TODO: Q4 What check is missing here?
# TODO: Q5 Another erorr is made here, please try to figure it out yourself.
self.credit += amount
self.amountLoaned -= amount
self.money -= amount
# increment credit each time when a pay back is made
increaseInCreditAmount = round(amount / self.totalCredit * INCREASE_CREDIT)
self.totalCredit += increaseInCreditAmount
"""
This function takes in parameters:
accounts - a python list of BankAccount instances
eachAmount - a python list of int, eachAmount[i] corresponds to the amount of money that needs to be sent to accounts[i]
Note: The function must fail and no money should be transferred if ANY of the transfers would fail.
"""
def groupTransfer(self, accounts : typing.List, eachAmount : typing.List[int]) -> None:
assert len(accounts) == len(eachAmount)
# TODO: Bug in this function.
sumAmount = 0
for i in range(len(eachAmount)):
ithAmount = eachAmount[i]
if ithAmount < 0:
raise Exception("cannot perform negative amount transfer")
sumAmount += ithAmount
if(self.money < sumAmount):
raise Exception("not enough money")
for i in range(len(accounts)):
ithAccount = accounts[i]
ithAmount = eachAmount[i]
ithAccount.money -= ithAmount
self.money += ithAmount
|
py
|
1a565fd083cfea8a4b32dff5e7da48aca4c20432
|
# Synchronization classes using decorators. Provides synchronized, semaphore
# and event classes which provide transparent decorator patterns for
# Lock, BoundedSemaphore and Event objects in Python.
from threading import Thread, Lock, BoundedSemaphore, Event, currentThread
from time import sleep
from random import random
class synchronized(object):
""" Class enapsulating a lock and a function
allowing it to be used as a synchronizing
decorator making the wrapped function
thread-safe """
def __init__(self, *args):
self.lock = Lock()
def __call__(self, f):
def lockedfunc(*args, **kwargs):
try:
self.lock.acquire()
print 'Acquired lock=>',currentThread()
try:
return f(*args, **kwargs)
except Exception, e:
raise
finally:
self.lock.release()
print 'Released lock=>',currentThread()
return lockedfunc
class semaphore(object):
""" Class encapsulating a semaphore to limit
number of resources """
def __init__(self, *args):
self.sem = BoundedSemaphore(args[0])
def __call__(self, f):
def semfunc(*args, **kwargs):
try:
print 'Trying to acquire sem=>',currentThread()
self.sem.acquire()
print 'Acquired sem=>',currentThread()
try:
return f(*args, **kwargs)
except Exception, e:
raise
finally:
self.sem.release()
print 'Released sem=>',currentThread()
return semfunc
class event(object):
""" Class encapsulating an event object to control
sequential access to a resource """
def __init__(self, *args):
self.evt = Event()
self.evt.set()
def __call__(self, f):
def eventfunc(*args, **kwargs):
try:
print 'Waiting on event =>',currentThread()
self.evt.wait()
# First thread will clear the event and
# make others wait, once it is done with the
# job, it sets the event which wakes up
# another thread, which does the same thing...
# This provides sequential access to a
# resource...
self.evt.clear()
print 'Cleared event =>',currentThread()
try:
return f(*args, **kwargs)
except Exception, e:
raise
finally:
# Wake up another thread...
self.evt.set()
print 'Set event=>',currentThread()
return eventfunc
|
py
|
1a56617c0d435c1d69bbe534eec5fafc5096dd7d
|
import argparse
import time
from pyspark.sql import SparkSession
from pyspark.sql.types import StructType, StructField, FloatType, LongType, DecimalType, IntegerType, StringType, DateType
def run_convert_files():
with SparkSession.builder.appName("convert_files").getOrCreate() as spark:
sc = spark.sparkContext
block_size = 1024 * 1024 * 1024 # 1GB
sc._jsc.hadoopConfiguration().setInt("dfs.blocksize", block_size)
sc._jsc.hadoopConfiguration().setInt("parquet.block.size", blockSize)
def convert_files(src, dst, schema):
df = sc.read.option("header", "false").option("delimiter","|")\
.schema(schema)\
.csv(src)
df.write.parquet(dst)
return
path_src = "s3://tpc-h-small/"
path_dst = "s3://tpc-h-small/parquet/"
src_lineitem = path_src + "lineitem.tbl"
dst_lineitem = path_dst + "lineitem"
schema_lineitem = StructType()\
.add("l_orderkey",LongType(),False)\
.add("l_partkey",LongType(),True)\
.add("l_suppkey",LongType(),True)\
.add("l_linenumber",IntegerType(),True)\
.add("l_quantity",DecimalType(10,2),True)\
.add("l_extendedprice",DecimalType(10,2),True)\
.add("l_discount",DecimalType(10,2),True)\
.add("l_tax",DecimalType(10,2),True)\
.add("l_returnflag",StringType(),True)\
.add("l_linestatus",StringType(),True)\
.add("l_shipdate",DateType(),True)\
.add("l_commitdate",DateType(),True)\
.add("l_receiptdate",DateType(),True)\
.add("l_shipinstruct",StringType(),True)\
.add("l_shipmode",StringType(),True)\
.add("l_comment",StringType(),True)
convert_files(src_lineitem, dst_lineitem, schema_lineitem)
# src_orders = path_src + "orders.tbl"
# dst_orders = path_dst + "orders"
# schema_orders = StructType()\
# .add("o_orderkey",LongType(),False)\
# .add("o_custkey",LongType(),False)\
# .add("o_orderstatus",StringType(),True)\
# .add("o_totalprice",DecimalType(10,2),True)\
# .add("o_orderdate",DateType(),True)\
# .add("o_orderpriority",StringType(),True)\
# .add("o_clerk",StringType(),True)\
# .add("o_shippriority",IntegerType(),True)\
# .add("o_comment",StringType(),True)
# convert_files(src_orders, dst_orders, schema_orders)
if __name__ == "__main__":
run_convert_files()
|
py
|
1a5661bbc185a11fd8fdb784014b4abd6cdbca1d
|
# Copyright 2019 Xanadu Quantum Technologies Inc.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
r"""Unit tests for the states.py fock probabilities methods"""
import pytest
import numpy as np
import tensorflow as tf
from scipy.special import factorial as fac
from strawberryfields import backends
from strawberryfields import utils
MAG_ALPHAS = np.linspace(0, 0.8, 3)
PHASE_ALPHAS = np.linspace(0, 2 * np.pi, 3, endpoint=False)
@pytest.mark.parametrize("a", MAG_ALPHAS)
@pytest.mark.parametrize("phi", PHASE_ALPHAS)
class TestFockProbabilities:
"""Tests for the fock_prob state method"""
def test_gaussian(self, a, phi, setup_backend, cutoff, tol):
"""Tests that probabilities of particular Fock states
|n> are correct for a gaussian state."""
backend = setup_backend(1)
alpha = a * np.exp(1j * phi)
n = np.arange(cutoff)
ref_state = np.exp(-0.5 * np.abs(alpha) ** 2) * alpha ** n / np.sqrt(fac(n))
ref_probs = np.abs(ref_state) ** 2
backend.prepare_coherent_state(np.abs(alpha), np.angle(alpha), 0)
state = backend.state()
for n in range(cutoff):
prob_n = state.fock_prob([n])
assert np.allclose(prob_n, ref_probs[n], atol=tol, rtol=0)
@pytest.mark.backends("fock", "tf")
def test_nongaussian(self, a, phi, setup_backend, cutoff, tol):
"""Tests that probabilities of particular Fock states |n> are
correct for a nongaussian state."""
backend = setup_backend(2)
alpha = a * np.exp(1j * phi)
n = np.arange(cutoff)
ref_state = np.exp(-0.5 * np.abs(alpha) ** 2) * alpha ** n / np.sqrt(fac(n))
ref_probs = np.abs(ref_state) ** 2
backend.prepare_coherent_state(np.abs(alpha), np.angle(alpha), 0)
backend.prepare_fock_state(cutoff // 2, 1)
state = backend.state()
for n in range(cutoff):
prob_n = state.fock_prob([n, cutoff // 2])
assert np.allclose(prob_n, ref_probs[n], atol=tol, rtol=0)
@pytest.mark.backends("fock", "tf", "gaussian")
@pytest.mark.parametrize("a", MAG_ALPHAS)
@pytest.mark.parametrize("phi", PHASE_ALPHAS)
class TestAllFockProbs:
"""Tests for the all_fock_probs state method"""
def test_pure(self, a, phi, setup_backend, cutoff, batch_size, tol):
"""Tests that the numeric probabilities in the full Fock basis are
correct for a one-mode pure state."""
backend = setup_backend(1)
alpha = a * np.exp(1j * phi)
n = np.arange(cutoff)
ref_state = np.exp(-0.5 * np.abs(alpha) ** 2) * alpha ** n / np.sqrt(fac(n))
ref_probs = np.abs(ref_state) ** 2
backend.prepare_coherent_state(np.abs(alpha), np.angle(alpha), 0)
state = backend.state()
probs = state.all_fock_probs(cutoff=cutoff)
if isinstance(probs, tf.Tensor):
probs = probs.numpy()
probs = probs.flatten()
if batch_size is not None:
ref_probs = np.tile(ref_probs, batch_size)
assert np.allclose(probs, ref_probs, atol=tol, rtol=0)
def test_two_mode_gaussian(self, a, phi, setup_backend, batch_size, cutoff, tol):
"""Tests that the numeric probabilities in the full Fock basis are
correct for a two-mode gaussian state."""
if a == 0.0:
pytest.skip("Test only runs for states with non-zero displacement")
backend = setup_backend(2)
alpha = a * np.exp(1j * phi)
n = np.arange(cutoff)
ref_state1 = np.exp(-0.5 * np.abs(alpha) ** 2) * alpha ** n / np.sqrt(fac(n))
ref_state2 = (
np.exp(-0.5 * np.abs(-alpha) ** 2) * (-alpha) ** n / np.sqrt(fac(n))
)
ref_state = np.outer(ref_state1, ref_state2)
ref_probs = np.abs(np.reshape(ref_state ** 2, -1))
if batch_size is not None:
ref_probs = np.tile(ref_probs, batch_size)
backend.prepare_coherent_state(np.abs(alpha), np.angle(alpha), 0)
backend.prepare_coherent_state(np.abs(alpha), np.angle(alpha)+np.pi, 1)
state = backend.state()
for n in range(cutoff):
for m in range(cutoff):
probs = state.all_fock_probs(cutoff=cutoff)
if isinstance(probs, tf.Tensor):
probs = probs.numpy()
assert np.allclose(probs.flatten(), ref_probs, atol=tol, rtol=0)
|
py
|
1a56621981e327574ab1a7664a069d7a284ce00e
|
##############################################################################
#
# Copyright (c) 2002 Zope Foundation and Contributors.
# All Rights Reserved.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE.
#
##############################################################################
"""Adjustments are tunable parameters.
"""
import getopt
import socket
from waitress.compat import (
PY2,
WIN,
string_types,
HAS_IPV6,
)
truthy = frozenset(('t', 'true', 'y', 'yes', 'on', '1'))
def asbool(s):
""" Return the boolean value ``True`` if the case-lowered value of string
input ``s`` is any of ``t``, ``true``, ``y``, ``on``, or ``1``, otherwise
return the boolean value ``False``. If ``s`` is the value ``None``,
return ``False``. If ``s`` is already one of the boolean values ``True``
or ``False``, return it."""
if s is None:
return False
if isinstance(s, bool):
return s
s = str(s).strip()
return s.lower() in truthy
def asoctal(s):
"""Convert the given octal string to an actual number."""
return int(s, 8)
def aslist_cronly(value):
if isinstance(value, string_types):
value = filter(None, [x.strip() for x in value.splitlines()])
return list(value)
def aslist(value):
""" Return a list of strings, separating the input based on newlines
and, if flatten=True (the default), also split on spaces within
each line."""
values = aslist_cronly(value)
result = []
for value in values:
subvalues = value.split()
result.extend(subvalues)
return result
def slash_fixed_str(s):
s = s.strip()
if s:
# always have a leading slash, replace any number of leading slashes
# with a single slash, and strip any trailing slashes
s = '/' + s.lstrip('/').rstrip('/')
return s
class _str_marker(str):
pass
class _int_marker(int):
pass
class Adjustments(object):
"""This class contains tunable parameters.
"""
_params = (
('host', str),
('port', int),
('ipv4', asbool),
('ipv6', asbool),
('listen', aslist),
('threads', int),
('trusted_proxy', str),
('url_scheme', str),
('url_prefix', slash_fixed_str),
('backlog', int),
('recv_bytes', int),
('send_bytes', int),
('outbuf_overflow', int),
('inbuf_overflow', int),
('connection_limit', int),
('cleanup_interval', int),
('channel_timeout', int),
('log_socket_errors', asbool),
('max_request_header_size', int),
('max_request_body_size', int),
('expose_tracebacks', asbool),
('ident', str),
('asyncore_loop_timeout', int),
('asyncore_use_poll', asbool),
('unix_socket', str),
('unix_socket_perms', asoctal),
)
_param_map = dict(_params)
# hostname or IP address to listen on
host = _str_marker('0.0.0.0')
# TCP port to listen on
port = _int_marker(8080)
listen = ['{}:{}'.format(host, port)]
# mumber of threads available for tasks
threads = 4
# Host allowed to overrid ``wsgi.url_scheme`` via header
trusted_proxy = None
# default ``wsgi.url_scheme`` value
url_scheme = 'http'
# default ``SCRIPT_NAME`` value, also helps reset ``PATH_INFO``
# when nonempty
url_prefix = ''
# server identity (sent in Server: header)
ident = 'waitress'
# backlog is the value waitress passes to pass to socket.listen() This is
# the maximum number of incoming TCP connections that will wait in an OS
# queue for an available channel. From listen(1): "If a connection
# request arrives when the queue is full, the client may receive an error
# with an indication of ECONNREFUSED or, if the underlying protocol
# supports retransmission, the request may be ignored so that a later
# reattempt at connection succeeds."
backlog = 1024
# recv_bytes is the argument to pass to socket.recv().
recv_bytes = 8192
# send_bytes is the number of bytes to send to socket.send(). Multiples
# of 9000 should avoid partly-filled packets, but don't set this larger
# than the TCP write buffer size. In Linux, /proc/sys/net/ipv4/tcp_wmem
# controls the minimum, default, and maximum sizes of TCP write buffers.
send_bytes = 18000
# A tempfile should be created if the pending output is larger than
# outbuf_overflow, which is measured in bytes. The default is 1MB. This
# is conservative.
outbuf_overflow = 1048576
# A tempfile should be created if the pending input is larger than
# inbuf_overflow, which is measured in bytes. The default is 512K. This
# is conservative.
inbuf_overflow = 524288
# Stop creating new channels if too many are already active (integer).
# Each channel consumes at least one file descriptor, and, depending on
# the input and output body sizes, potentially up to three. The default
# is conservative, but you may need to increase the number of file
# descriptors available to the Waitress process on most platforms in
# order to safely change it (see ``ulimit -a`` "open files" setting).
# Note that this doesn't control the maximum number of TCP connections
# that can be waiting for processing; the ``backlog`` argument controls
# that.
connection_limit = 100
# Minimum seconds between cleaning up inactive channels.
cleanup_interval = 30
# Maximum seconds to leave an inactive connection open.
channel_timeout = 120
# Boolean: turn off to not log premature client disconnects.
log_socket_errors = True
# maximum number of bytes of all request headers combined (256K default)
max_request_header_size = 262144
# maximum number of bytes in request body (1GB default)
max_request_body_size = 1073741824
# expose tracebacks of uncaught exceptions
expose_tracebacks = False
# Path to a Unix domain socket to use.
unix_socket = None
# Path to a Unix domain socket to use.
unix_socket_perms = 0o600
# The socket options to set on receiving a connection. It is a list of
# (level, optname, value) tuples. TCP_NODELAY disables the Nagle
# algorithm for writes (Waitress already buffers its writes).
socket_options = [
(socket.SOL_TCP, socket.TCP_NODELAY, 1),
]
# The asyncore.loop timeout value
asyncore_loop_timeout = 1
# The asyncore.loop flag to use poll() instead of the default select().
asyncore_use_poll = False
# Enable IPv4 by default
ipv4 = True
# Enable IPv6 by default
ipv6 = True
def __init__(self, **kw):
if 'listen' in kw and ('host' in kw or 'port' in kw):
raise ValueError('host and or port may not be set if listen is set.')
for k, v in kw.items():
if k not in self._param_map:
raise ValueError('Unknown adjustment %r' % k)
setattr(self, k, self._param_map[k](v))
if (not isinstance(self.host, _str_marker) or
not isinstance(self.port, _int_marker)):
self.listen = ['{}:{}'.format(self.host, self.port)]
enabled_families = socket.AF_UNSPEC
if not self.ipv4 and not HAS_IPV6: # pragma: no cover
raise ValueError(
'IPv4 is disabled but IPv6 is not available. Cowardly refusing to start.'
)
if self.ipv4 and not self.ipv6:
enabled_families = socket.AF_INET
if not self.ipv4 and self.ipv6 and HAS_IPV6:
enabled_families = socket.AF_INET6
wanted_sockets = []
hp_pairs = []
for i in self.listen:
if ':' in i:
(host, port) = i.rsplit(":", 1)
# IPv6 we need to make sure that we didn't split on the address
if ']' in port: # pragma: nocover
(host, port) = (i, str(self.port))
else:
(host, port) = (i, str(self.port))
if WIN and PY2: # pragma: no cover
try:
# Try turning the port into an integer
port = int(port)
except:
raise ValueError(
'Windows does not support service names instead of port numbers'
)
try:
if '[' in host and ']' in host: # pragma: nocover
host = host.strip('[').rstrip(']')
if host == '*':
host = None
for s in socket.getaddrinfo(
host,
port,
enabled_families,
socket.SOCK_STREAM,
socket.IPPROTO_TCP,
socket.AI_PASSIVE
):
(family, socktype, proto, _, sockaddr) = s
# It seems that getaddrinfo() may sometimes happily return
# the same result multiple times, this of course makes
# bind() very unhappy...
#
# Split on %, and drop the zone-index from the host in the
# sockaddr. Works around a bug in OS X whereby
# getaddrinfo() returns the same link-local interface with
# two different zone-indices (which makes no sense what so
# ever...) yet treats them equally when we attempt to bind().
if (
sockaddr[1] == 0 or
(sockaddr[0].split('%', 1)[0], sockaddr[1]) not in hp_pairs
):
wanted_sockets.append((family, socktype, proto, sockaddr))
hp_pairs.append((sockaddr[0].split('%', 1)[0], sockaddr[1]))
except:
raise ValueError('Invalid host/port specified.')
self.listen = wanted_sockets
@classmethod
def parse_args(cls, argv):
"""Pre-parse command line arguments for input into __init__. Note that
this does not cast values into adjustment types, it just creates a
dictionary suitable for passing into __init__, where __init__ does the
casting.
"""
long_opts = ['help', 'call']
for opt, cast in cls._params:
opt = opt.replace('_', '-')
if cast is asbool:
long_opts.append(opt)
long_opts.append('no-' + opt)
else:
long_opts.append(opt + '=')
kw = {
'help': False,
'call': False,
}
opts, args = getopt.getopt(argv, '', long_opts)
for opt, value in opts:
param = opt.lstrip('-').replace('-', '_')
if param == 'listen':
kw['listen'] = '{} {}'.format(kw.get('listen', ''), value)
continue
if param.startswith('no_'):
param = param[3:]
kw[param] = 'false'
elif param in ('help', 'call'):
kw[param] = True
elif cls._param_map[param] is asbool:
kw[param] = 'true'
else:
kw[param] = value
return kw, args
|
py
|
1a5662bbd0e5d7cbe17b9f12538f40f12c02b43a
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# Copyright (C) 2011-2016 Lorenzo Carbonell
# [email protected]
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
def redondea(valor):
valor = valor * 10.0
return int(valor) / 10.0
def redondea_digits(valor, digits=0):
if digits == 0:
return int(round(valor, digits))
return round(valor, digits)
def s2f(cadena):
try:
value = float(cadena)
except BaseException:
value = 0.0
return value
def s2f_print(word):
try:
return float(word)
except Exception as e:
print('error:', str(e))
return 0
def cambia(valor, a, SI=True):
if len(valor) == 0:
return ''
valor = float(valor)
if SI is False:
valor = redondea(5.0 / 9.0 * (valor - 32.0))
if a == 'F':
return str(redondea(valor * 9.0 / 5.0 + 32.0))
elif a == 'K':
return str(redondea(valor + 273.15))
return str(valor)
def change_temperature(valor, a):
valor = s2f(valor)
# initial a in ºF
if a == 'C':
valor = 5.0 / 9.0 * (valor - 32.0)
elif a == 'K':
valor = 5.0 / 9.0 * (valor - 32.0) + 273.15
return str(redondea_digits(valor))
def fa2f(temperature):
return (temperature - 273.15) * 9.0 / 5.0 + 32.0
def f2c(temperature):
return (s2f(temperature) - 32.0) * 5.0 / 9.0
|
py
|
1a5665845fc01240cc0a550b1683c4baf5b9d999
|
# Copyright 2018 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
import os
import subprocess
from abc import ABCMeta, abstractmethod
from collections import defaultdict
from dataclasses import dataclass
from typing import Any
from pants.backend.native.tasks.native_task import NativeTask
from pants.base.build_environment import get_buildroot
from pants.base.exceptions import TaskError
from pants.base.workunit import WorkUnit, WorkUnitLabel
from pants.util.memo import memoized_method, memoized_property
from pants.util.meta import classproperty
@dataclass(frozen=True)
class NativeCompileRequest:
compiler: Any
include_dirs: Any
sources: Any
compiler_options: Any
output_dir: Any
header_file_extensions: Any
# TODO(#5950): perform all process execution in the v2 engine!
@dataclass(frozen=True)
class ObjectFiles:
root_dir: Any
filenames: Any
def file_paths(self):
return [os.path.join(self.root_dir, fname) for fname in self.filenames]
class NativeCompile(NativeTask, metaclass=ABCMeta):
# `NativeCompile` will use the `source_target_constraint` to determine what targets have "sources"
# to compile, and the `dependent_target_constraint` to determine which dependent targets to
# operate on for `strict_deps` calculation.
# NB: `source_target_constraint` must be overridden.
source_target_constraint = None
@classproperty
@abstractmethod
def workunit_label(cls):
"""A string describing the work being done during compilation.
`NativeCompile` will use `workunit_label` as the name of the workunit when executing the
compiler process.
:rtype: str
"""
@classmethod
def product_types(cls):
return [ObjectFiles]
@property
def cache_target_dirs(self):
return True
@classmethod
def implementation_version(cls):
return super().implementation_version() + [('NativeCompile', 1)]
class NativeCompileError(TaskError):
"""Raised for errors in this class's logic.
Subclasses are advised to create their own exception class.
"""
def execute(self):
object_files_product = self.context.products.get(ObjectFiles)
source_targets = self.context.targets(self.source_target_constraint.satisfied_by)
with self.invalidated(source_targets, invalidate_dependents=True) as invalidation_check:
for vt in invalidation_check.all_vts:
if not vt.valid:
compile_request = self._make_compile_request(vt)
self.context.log.debug("compile_request: {}".format(compile_request))
self._compile(compile_request)
object_files = self.collect_cached_objects(vt)
self._add_product_at_target_base(object_files_product, vt.target, object_files)
# This may be calculated many times for a target, so we memoize it.
@memoized_method
def _include_dirs_for_target(self, target):
return os.path.join(get_buildroot(), target.address.spec_path)
@dataclass(frozen=True)
class NativeSourcesByType:
rel_root: Any
headers: Any
sources: Any
def get_sources_headers_for_target(self, target):
"""Return a list of file arguments to provide to the compiler.
NB: result list will contain both header and source files!
:raises: :class:`NativeCompile.NativeCompileError` if there is an error processing the sources.
"""
# Get source paths relative to the target base so the exception message with the target and
# paths makes sense.
target_relative_sources = target.sources_relative_to_target_base()
rel_root = target_relative_sources.rel_root
# Unique file names are required because we just dump object files into a single directory, and
# the compiler will silently just produce a single object file if provided non-unique filenames.
# TODO: add some shading to file names so we can remove this check.
# NB: It shouldn't matter if header files have the same name, but this will raise an error in
# that case as well. We won't need to do any shading of header file names.
seen_filenames = defaultdict(list)
for src in target_relative_sources:
seen_filenames[os.path.basename(src)].append(src)
duplicate_filename_err_msgs = []
for fname, source_paths in seen_filenames.items():
if len(source_paths) > 1:
duplicate_filename_err_msgs.append("filename: {}, paths: {}".format(fname, source_paths))
if duplicate_filename_err_msgs:
raise self.NativeCompileError(
"Error in target '{}': source files must have a unique filename within a '{}' target. "
"Conflicting filenames:\n{}"
.format(target.address.spec, target.alias(), '\n'.join(duplicate_filename_err_msgs)))
return [os.path.join(get_buildroot(), rel_root, src) for src in target_relative_sources]
@abstractmethod
def get_compile_settings(self):
"""Return an instance of NativeBuildStep.
NB: Subclasses will be queried for the compile settings once and the result cached.
"""
@memoized_property
def _compile_settings(self):
return self.get_compile_settings()
@abstractmethod
def get_compiler(self, native_library_target):
"""An instance of `_CompilerMixin` which can be invoked to compile files.
NB: Subclasses will be queried for the compiler instance once and the result cached.
:return: :class:`pants.backend.native.config.environment._CompilerMixin`
"""
def _compiler(self, native_library_target):
return self.get_compiler(native_library_target)
def _make_compile_request(self, versioned_target):
target = versioned_target.target
include_dirs = []
for dep in self.native_deps(target):
source_lib_base_dir = os.path.join(get_buildroot(),
dep._sources_field.rel_path)
include_dirs.append(source_lib_base_dir)
for ext_dep in self.packaged_native_deps(target):
external_lib_include_dir = os.path.join(get_buildroot(),
ext_dep._sources_field.rel_path,
ext_dep.include_relpath)
self.context.log.debug('ext_dep: {}, external_lib_include_dir: {}'
.format(ext_dep, external_lib_include_dir))
include_dirs.append(external_lib_include_dir)
sources_and_headers = self.get_sources_headers_for_target(target)
compiler_option_sets = (self._compile_settings.native_build_step
.get_compiler_option_sets_for_target(target))
self.context.log.debug('target: {}, compiler_option_sets: {}'.format(target, compiler_option_sets))
compile_request = NativeCompileRequest(
compiler=self._compiler(target),
include_dirs=include_dirs,
sources=sources_and_headers,
compiler_options=(self._compile_settings
.native_build_step
.get_merged_args_for_compiler_option_sets(compiler_option_sets)),
output_dir=versioned_target.results_dir,
header_file_extensions=self._compile_settings.header_file_extensions)
self.context.log.debug(repr(compile_request))
return compile_request
def _iter_sources_minus_headers(self, compile_request):
for s in compile_request.sources:
if not s.endswith(tuple(compile_request.header_file_extensions)):
yield s
class _HeaderOnlyLibrary(Exception): pass
def _make_compile_argv(self, compile_request):
"""Return a list of arguments to use to compile sources. Subclasses can override and append."""
sources_minus_headers = list(self._iter_sources_minus_headers(compile_request))
if len(sources_minus_headers) == 0:
raise self._HeaderOnlyLibrary()
compiler = compile_request.compiler
compiler_options = compile_request.compiler_options
# We are going to execute in the target output, so get absolute paths for everything.
buildroot = get_buildroot()
# TODO: add -v to every compiler and linker invocation!
argv = (
[compiler.exe_filename] +
list(compiler.extra_args) +
# TODO: If we need to produce static libs, don't add -fPIC! (could use Variants -- see #5788).
['-c', '-fPIC'] +
list(compiler_options) +
[
'-I{}'.format(os.path.join(buildroot, inc_dir))
for inc_dir in compile_request.include_dirs
] +
[os.path.join(buildroot, src) for src in sources_minus_headers])
self.context.log.info("selected compiler exe name: '{}'".format(compiler.exe_filename))
self.context.log.debug("compile argv: {}".format(argv))
return argv
def _compile(self, compile_request):
"""Perform the process of compilation, writing object files to the request's 'output_dir'.
NB: This method must arrange the output files so that `collect_cached_objects()` can collect all
of the results (or vice versa)!
"""
try:
argv = self._make_compile_argv(compile_request)
except self._HeaderOnlyLibrary:
self.context.log.debug('{} is a header-only library'.format(compile_request))
return
compiler = compile_request.compiler
output_dir = compile_request.output_dir
env = compiler.invocation_environment_dict
with self.context.new_workunit(
name=self.workunit_label, labels=[WorkUnitLabel.COMPILER]) as workunit:
try:
process = subprocess.Popen(
argv,
cwd=output_dir,
stdout=workunit.output('stdout'),
stderr=workunit.output('stderr'),
env=env)
except OSError as e:
workunit.set_outcome(WorkUnit.FAILURE)
raise self.NativeCompileError(
"Error invoking '{exe}' with command {cmd} and environment {env} for request {req}: {err}"
.format(exe=compiler.exe_filename, cmd=argv, env=env, req=compile_request, err=e))
rc = process.wait()
if rc != 0:
workunit.set_outcome(WorkUnit.FAILURE)
raise self.NativeCompileError(
"Error in '{section_name}' with command {cmd} and environment {env} for request {req}. "
"Exit code was: {rc}."
.format(section_name=self.workunit_label, cmd=argv, env=env, req=compile_request, rc=rc))
def collect_cached_objects(self, versioned_target):
"""Scan `versioned_target`'s results directory and return the output files from that directory.
:return: :class:`ObjectFiles`
"""
return ObjectFiles(versioned_target.results_dir, os.listdir(versioned_target.results_dir))
|
py
|
1a5665e5500369b2444352e2c3c8acd2ec8ee7d1
|
#!/usr/bin/env python3
import argparse
import os
import sys
import logging
from vapi_json_parser import Field, Struct, Enum, Union, Message, JsonParser,\
SimpleType, StructType, Alias
class CField(Field):
def get_c_name(self):
return "vapi_type_%s" % self.name
def get_c_def(self):
if self.type.get_c_name() == 'vl_api_string_t':
if self.len:
return "u8 %s[%d];" % (self.name, self.len)
else:
return "vl_api_string_t %s;" % (self.name)
else:
if self.len is not None and type(self.len) != dict:
return "%s %s[%d];" % (self.type.get_c_name(), self.name, self.len)
else:
return "%s %s;" % (self.type.get_c_name(), self.name)
def get_swap_to_be_code(self, struct, var):
if self.len is not None and type(self.len) != dict:
if self.len > 0:
return "do { unsigned i; for (i = 0; i < %d; ++i) { %s } }"\
" while(0);" % (
self.len,
self.type.get_swap_to_be_code(struct, "%s[i]" % var))
else:
if self.nelem_field.needs_byte_swap():
nelem_field = "%s(%s%s)" % (
self.nelem_field.type.get_swap_to_host_func_name(),
struct, self.nelem_field.name)
else:
nelem_field = "%s%s" % (struct, self.nelem_field.name)
return (
"do { unsigned i; for (i = 0; i < %s; ++i) { %s } }"
" while(0);" %
(nelem_field, self.type.get_swap_to_be_code(
struct, "%s[i]" % var)))
return self.type.get_swap_to_be_code(struct, "%s" % var)
def get_swap_to_host_code(self, struct, var):
if self.len is not None and type(self.len) != dict:
if self.len > 0:
return "do { unsigned i; for (i = 0; i < %d; ++i) { %s } }"\
" while(0);" % (
self.len,
self.type.get_swap_to_host_code(struct, "%s[i]" % var))
else:
# nelem_field already swapped to host here...
return (
"do { unsigned i; for (i = 0; i < %s%s; ++i) { %s } }"
" while(0);" %
(struct, self.nelem_field.name,
self.type.get_swap_to_host_code(
struct, "%s[i]" % var)))
return self.type.get_swap_to_host_code(struct, "%s" % var)
def needs_byte_swap(self):
return self.type.needs_byte_swap()
def get_vla_field_length_name(self, path):
return "%s_%s_array_size" % ("_".join(path), self.name)
def get_alloc_vla_param_names(self, path):
if self.is_vla():
result = [self.get_vla_field_length_name(path)]
else:
result = []
if self.type.has_vla():
t = self.type.get_alloc_vla_param_names(path + [self.name])
result.extend(t)
return result
def get_vla_calc_size_code(self, prefix, path):
if self.is_vla():
result = ["sizeof(%s.%s[0]) * %s" % (
".".join([prefix] + path),
self.name,
self.get_vla_field_length_name(path))]
else:
result = []
if self.type.has_vla():
t = self.type.get_vla_calc_size_code(prefix, path + [self.name])
result.extend(t)
return result
def get_vla_assign_code(self, prefix, path):
result = []
if self.is_vla():
result.append("%s.%s = %s" % (
".".join([prefix] + path),
self.nelem_field.name,
self.get_vla_field_length_name(path)))
if self.type.has_vla():
t = self.type.get_vla_assign_code(prefix, path + [self.name])
result.extend(t)
return result
class CAlias(CField):
def get_c_name(self):
return "vapi_type_%s" % self.name
def get_c_def(self):
if self.len is not None:
return "typedef %s vapi_type_%s[%d];" % (
self.type.get_c_name(), self.name, self.len)
else:
return "typedef %s vapi_type_%s;" % (
self.type.get_c_name(), self.name)
class CStruct(Struct):
def get_c_def(self):
return "\n".join([
"typedef struct __attribute__((__packed__)) {\n%s" % (
"\n".join([" %s" % x.get_c_def()
for x in self.fields])),
"} %s;" % self.get_c_name()])
def get_vla_assign_code(self, prefix, path):
return [x for f in self.fields if f.has_vla()
for x in f.get_vla_assign_code(prefix, path)]
def get_alloc_vla_param_names(self, path):
return [x for f in self.fields
if f.has_vla()
for x in f.get_alloc_vla_param_names(path)]
def get_vla_calc_size_code(self, prefix, path):
return [x for f in self.fields if f.has_vla()
for x in f.get_vla_calc_size_code(prefix, path)]
class CSimpleType (SimpleType):
swap_to_be_dict = {
'i16': 'htobe16', 'u16': 'htobe16',
'i32': 'htobe32', 'u32': 'htobe32',
'i64': 'htobe64', 'u64': 'htobe64',
}
swap_to_host_dict = {
'i16': 'be16toh', 'u16': 'be16toh',
'i32': 'be32toh', 'u32': 'be32toh',
'i64': 'be64toh', 'u64': 'be64toh',
}
__packed = "__attribute__((packed))"
pack_dict = {
'i8': __packed, 'u8': __packed,
'i16': __packed, 'u16': __packed,
}
def get_c_name(self):
return self.name
def get_swap_to_be_func_name(self):
return self.swap_to_be_dict[self.name]
def get_swap_to_host_func_name(self):
return self.swap_to_host_dict[self.name]
def get_packed_string(self):
return self.pack_dict[self.name]
def get_swap_to_be_code(self, struct, var, cast=None):
x = "%s%s" % (struct, var)
return "%s = %s%s(%s);" % (x,
"(%s)" % cast if cast else "",
self.get_swap_to_be_func_name(), x)
def get_swap_to_host_code(self, struct, var, cast=None):
x = "%s%s" % (struct, var)
return "%s = %s%s(%s);" % (x,
"(%s)" % cast if cast else "",
self.get_swap_to_host_func_name(), x)
def needs_byte_swap(self):
try:
self.get_swap_to_host_func_name()
return True
except KeyError:
pass
return False
def get_packed(self):
return self.pack_dict.get(self.name, "")
class CEnum(Enum):
def get_c_name(self):
return "vapi_enum_%s" % self.name
def get_c_def(self):
return "typedef enum {\n%s\n} %s %s;" % (
"\n".join([" %s = %s," % (i, j) for i, j in self.value_pairs]),
self.type.get_packed(),
self.get_c_name()
)
def needs_byte_swap(self):
return self.type.needs_byte_swap()
def get_swap_to_be_code(self, struct, var):
return self.type.get_swap_to_be_code(struct, var, self.get_c_name())
def get_swap_to_host_code(self, struct, var):
return self.type.get_swap_to_host_code(struct, var, self.get_c_name())
class CUnion(Union):
def get_c_name(self):
return "vapi_union_%s" % self.name
def get_c_def(self):
return "typedef union {\n%s\n} %s;" % (
"\n".join([" %s %s;" % (i.get_c_name(), j)
for i, j in self.type_pairs]),
self.get_c_name()
)
def needs_byte_swap(self):
return False
class CStructType (StructType, CStruct):
def get_c_name(self):
return "vapi_type_%s" % self.name
def get_swap_to_be_func_name(self):
return "%s_hton" % self.get_c_name()
def get_swap_to_host_func_name(self):
return "%s_ntoh" % self.get_c_name()
def get_swap_to_be_func_decl(self):
return "void %s(%s *msg)" % (
self.get_swap_to_be_func_name(), self.get_c_name())
def get_swap_to_be_func_def(self):
return "%s\n{\n%s\n}" % (
self.get_swap_to_be_func_decl(),
"\n".join([
" %s" % p.get_swap_to_be_code("msg->", "%s" % p.name)
for p in self.fields if p.needs_byte_swap()]),
)
def get_swap_to_host_func_decl(self):
return "void %s(%s *msg)" % (
self.get_swap_to_host_func_name(), self.get_c_name())
def get_swap_to_host_func_def(self):
return "%s\n{\n%s\n}" % (
self.get_swap_to_host_func_decl(),
"\n".join([
" %s" % p.get_swap_to_host_code("msg->", "%s" % p.name)
for p in self.fields if p.needs_byte_swap()]),
)
def get_swap_to_be_code(self, struct, var):
return "%s(&%s%s);" % (self.get_swap_to_be_func_name(), struct, var)
def get_swap_to_host_code(self, struct, var):
return "%s(&%s%s);" % (self.get_swap_to_host_func_name(), struct, var)
def needs_byte_swap(self):
for f in self.fields:
if f.needs_byte_swap():
return True
return False
class CMessage (Message):
def __init__(self, logger, definition, json_parser):
super(CMessage, self).__init__(logger, definition, json_parser)
self.payload_members = [
" %s" % p.get_c_def()
for p in self.fields
if p.type != self.header
]
def has_payload(self):
return len(self.payload_members) > 0
def get_msg_id_name(self):
return "vapi_msg_id_%s" % self.name
def get_c_name(self):
return "vapi_msg_%s" % self.name
def get_payload_struct_name(self):
return "vapi_payload_%s" % self.name
def get_alloc_func_name(self):
return "vapi_alloc_%s" % self.name
def get_alloc_vla_param_names(self):
return [x for f in self.fields
if f.has_vla()
for x in f.get_alloc_vla_param_names([])]
def get_alloc_func_decl(self):
return "%s* %s(struct vapi_ctx_s *ctx%s)" % (
self.get_c_name(),
self.get_alloc_func_name(),
"".join([", size_t %s" % n for n in
self.get_alloc_vla_param_names()]))
def get_alloc_func_def(self):
extra = []
if self.header.has_field('client_index'):
extra.append(
" msg->header.client_index = vapi_get_client_index(ctx);")
if self.header.has_field('context'):
extra.append(" msg->header.context = 0;")
return "\n".join([
"%s" % self.get_alloc_func_decl(),
"{",
" %s *msg = NULL;" % self.get_c_name(),
" const size_t size = sizeof(%s)%s;" % (
self.get_c_name(),
"".join([" + %s" % x for f in self.fields if f.has_vla()
for x in f.get_vla_calc_size_code("msg->payload",
[])])),
" /* cast here required to play nicely with C++ world ... */",
" msg = (%s*)vapi_msg_alloc(ctx, size);" % self.get_c_name(),
" if (!msg) {",
" return NULL;",
" }",
] + extra + [
" msg->header._vl_msg_id = vapi_lookup_vl_msg_id(ctx, %s);" %
self.get_msg_id_name(),
"".join([" %s;\n" % line
for f in self.fields if f.has_vla()
for line in f.get_vla_assign_code("msg->payload", [])]),
" return msg;",
"}"])
def get_calc_msg_size_func_name(self):
return "vapi_calc_%s_msg_size" % self.name
def get_calc_msg_size_func_decl(self):
return "uword %s(%s *msg)" % (
self.get_calc_msg_size_func_name(),
self.get_c_name())
def get_calc_msg_size_func_def(self):
return "\n".join([
"%s" % self.get_calc_msg_size_func_decl(),
"{",
" return sizeof(*msg)%s;" %
"".join(["+ msg->payload.%s * sizeof(msg->payload.%s[0])" % (
f.nelem_field.name,
f.name)
for f in self.fields
if f.nelem_field is not None
]),
"}",
])
def get_c_def(self):
if self.has_payload():
return "\n".join([
"typedef struct __attribute__ ((__packed__)) {",
"%s " %
"\n".join(self.payload_members),
"} %s;" % self.get_payload_struct_name(),
"",
"typedef struct __attribute__ ((__packed__)) {",
(" %s %s;" % (self.header.get_c_name(),
self.fields[0].name)
if self.header is not None else ""),
" %s payload;" % self.get_payload_struct_name(),
"} %s;" % self.get_c_name(), ])
else:
return "\n".join([
"typedef struct __attribute__ ((__packed__)) {",
(" %s %s;" % (self.header.get_c_name(),
self.fields[0].name)
if self.header is not None else ""),
"} %s;" % self.get_c_name(), ])
def get_swap_payload_to_host_func_name(self):
return "%s_payload_ntoh" % self.get_c_name()
def get_swap_payload_to_be_func_name(self):
return "%s_payload_hton" % self.get_c_name()
def get_swap_payload_to_host_func_decl(self):
return "void %s(%s *payload)" % (
self.get_swap_payload_to_host_func_name(),
self.get_payload_struct_name())
def get_swap_payload_to_be_func_decl(self):
return "void %s(%s *payload)" % (
self.get_swap_payload_to_be_func_name(),
self.get_payload_struct_name())
def get_swap_payload_to_be_func_def(self):
return "%s\n{\n%s\n}" % (
self.get_swap_payload_to_be_func_decl(),
"\n".join([
" %s" % p.get_swap_to_be_code("payload->", "%s" % p.name)
for p in self.fields
if p.needs_byte_swap() and p.type != self.header]),
)
def get_swap_payload_to_host_func_def(self):
return "%s\n{\n%s\n}" % (
self.get_swap_payload_to_host_func_decl(),
"\n".join([
" %s" % p.get_swap_to_host_code("payload->", "%s" % p.name)
for p in self.fields
if p.needs_byte_swap() and p.type != self.header]),
)
def get_swap_to_host_func_name(self):
return "%s_ntoh" % self.get_c_name()
def get_swap_to_be_func_name(self):
return "%s_hton" % self.get_c_name()
def get_swap_to_host_func_decl(self):
return "void %s(%s *msg)" % (
self.get_swap_to_host_func_name(), self.get_c_name())
def get_swap_to_be_func_decl(self):
return "void %s(%s *msg)" % (
self.get_swap_to_be_func_name(), self.get_c_name())
def get_swap_to_be_func_def(self):
return "\n".join([
"%s" % self.get_swap_to_be_func_decl(),
"{",
(" VAPI_DBG(\"Swapping `%s'@%%p to big endian\", msg);" %
self.get_c_name()),
" %s(&msg->header);" % self.header.get_swap_to_be_func_name()
if self.header is not None else "",
" %s(&msg->payload);" % self.get_swap_payload_to_be_func_name()
if self.has_payload() else "",
"}",
])
def get_swap_to_host_func_def(self):
return "\n".join([
"%s" % self.get_swap_to_host_func_decl(),
"{",
(" VAPI_DBG(\"Swapping `%s'@%%p to host byte order\", msg);" %
self.get_c_name()),
" %s(&msg->header);" % self.header.get_swap_to_host_func_name()
if self.header is not None else "",
" %s(&msg->payload);" % self.get_swap_payload_to_host_func_name()
if self.has_payload() else "",
"}",
])
def get_op_func_name(self):
return "vapi_%s" % self.name
def get_op_func_decl(self):
if self.reply.has_payload():
return "vapi_error_e %s(%s)" % (
self.get_op_func_name(),
",\n ".join([
'struct vapi_ctx_s *ctx',
'%s *msg' % self.get_c_name(),
'vapi_error_e (*callback)(struct vapi_ctx_s *ctx',
' void *callback_ctx',
' vapi_error_e rv',
' bool is_last',
' %s *reply)' %
self.reply.get_payload_struct_name(),
'void *callback_ctx',
])
)
else:
return "vapi_error_e %s(%s)" % (
self.get_op_func_name(),
",\n ".join([
'struct vapi_ctx_s *ctx',
'%s *msg' % self.get_c_name(),
'vapi_error_e (*callback)(struct vapi_ctx_s *ctx',
' void *callback_ctx',
' vapi_error_e rv',
' bool is_last)',
'void *callback_ctx',
])
)
def get_op_func_def(self):
return "\n".join([
"%s" % self.get_op_func_decl(),
"{",
" if (!msg || !callback) {",
" return VAPI_EINVAL;",
" }",
" if (vapi_is_nonblocking(ctx) && vapi_requests_full(ctx)) {",
" return VAPI_EAGAIN;",
" }",
" vapi_error_e rv;",
" if (VAPI_OK != (rv = vapi_producer_lock (ctx))) {",
" return rv;",
" }",
" u32 req_context = vapi_gen_req_context(ctx);",
" msg->header.context = req_context;",
" %s(msg);" % self.get_swap_to_be_func_name(),
(" if (VAPI_OK == (rv = vapi_send_with_control_ping "
"(ctx, msg, req_context))) {"
if self.reply_is_stream else
" if (VAPI_OK == (rv = vapi_send (ctx, msg))) {"
),
(" vapi_store_request(ctx, req_context, %s, "
"(vapi_cb_t)callback, callback_ctx);" %
("true" if self.reply_is_stream else "false")),
" if (VAPI_OK != vapi_producer_unlock (ctx)) {",
" abort (); /* this really shouldn't happen */",
" }",
" if (vapi_is_nonblocking(ctx)) {",
" rv = VAPI_OK;",
" } else {",
" rv = vapi_dispatch(ctx);",
" }",
" } else {",
" %s(msg);" % self.get_swap_to_host_func_name(),
" if (VAPI_OK != vapi_producer_unlock (ctx)) {",
" abort (); /* this really shouldn't happen */",
" }",
" }",
" return rv;",
"}",
"",
])
def get_event_cb_func_decl(self):
if not self.is_reply and not self.is_event:
raise Exception(
"Cannot register event callback for non-reply message")
if self.has_payload():
return "\n".join([
"void vapi_set_%s_event_cb (" %
self.get_c_name(),
" struct vapi_ctx_s *ctx, ",
(" vapi_error_e (*callback)(struct vapi_ctx_s *ctx, "
"void *callback_ctx, %s *payload)," %
self.get_payload_struct_name()),
" void *callback_ctx)",
])
else:
return "\n".join([
"void vapi_set_%s_event_cb (" %
self.get_c_name(),
" struct vapi_ctx_s *ctx, ",
" vapi_error_e (*callback)(struct vapi_ctx_s *ctx, "
"void *callback_ctx),",
" void *callback_ctx)",
])
def get_event_cb_func_def(self):
if not self.is_reply and not self.is_event:
raise Exception(
"Cannot register event callback for non-reply function")
return "\n".join([
"%s" % self.get_event_cb_func_decl(),
"{",
(" vapi_set_event_cb(ctx, %s, (vapi_event_cb)callback, "
"callback_ctx);" %
self.get_msg_id_name()),
"}"])
def get_c_metadata_struct_name(self):
return "__vapi_metadata_%s" % self.name
def get_c_constructor(self):
has_context = False
if self.header is not None:
has_context = self.header.has_field('context')
return '\n'.join([
'static void __attribute__((constructor)) __vapi_constructor_%s()'
% self.name,
'{',
' static const char name[] = "%s";' % self.name,
' static const char name_with_crc[] = "%s_%s";'
% (self.name, self.crc[2:]),
' static vapi_message_desc_t %s = {' %
self.get_c_metadata_struct_name(),
' name,',
' sizeof(name) - 1,',
' name_with_crc,',
' sizeof(name_with_crc) - 1,',
' true,' if has_context else ' false,',
' offsetof(%s, context),' % self.header.get_c_name()
if has_context else ' 0,',
(' offsetof(%s, payload),' % self.get_c_name())
if self.has_payload() else ' VAPI_INVALID_MSG_ID,',
' sizeof(%s),' % self.get_c_name(),
' (generic_swap_fn_t)%s,' % self.get_swap_to_be_func_name(),
' (generic_swap_fn_t)%s,' % self.get_swap_to_host_func_name(),
' VAPI_INVALID_MSG_ID,',
' };',
'',
' %s = vapi_register_msg(&%s);' %
(self.get_msg_id_name(), self.get_c_metadata_struct_name()),
' VAPI_DBG("Assigned msg id %%d to %s", %s);' %
(self.name, self.get_msg_id_name()),
'}',
])
vapi_send_with_control_ping = """
static inline vapi_error_e
vapi_send_with_control_ping (vapi_ctx_t ctx, void *msg, u32 context)
{
vapi_msg_control_ping *ping = vapi_alloc_control_ping (ctx);
if (!ping)
{
return VAPI_ENOMEM;
}
ping->header.context = context;
vapi_msg_control_ping_hton (ping);
return vapi_send2 (ctx, msg, ping);
}
"""
def emit_definition(parser, json_file, emitted, o):
if o in emitted:
return
if o.name in ("msg_header1_t", "msg_header2_t"):
return
if hasattr(o, "depends"):
for x in o.depends:
emit_definition(parser, json_file, emitted, x)
if hasattr(o, "reply"):
emit_definition(parser, json_file, emitted, o.reply)
if hasattr(o, "get_c_def"):
if (o not in parser.enums_by_json[json_file] and
o not in parser.types_by_json[json_file] and
o not in parser.unions_by_json[json_file] and
o.name not in parser.messages_by_json[json_file] and
o not in parser.aliases_by_json[json_file]):
return
guard = "defined_%s" % o.get_c_name()
print("#ifndef %s" % guard)
print("#define %s" % guard)
print("%s" % o.get_c_def())
print("")
function_attrs = "static inline "
if o.name in parser.messages_by_json[json_file]:
if o.has_payload():
print("%s%s" % (function_attrs,
o.get_swap_payload_to_be_func_def()))
print("")
print("%s%s" % (function_attrs,
o.get_swap_payload_to_host_func_def()))
print("")
print("%s%s" % (function_attrs, o.get_swap_to_be_func_def()))
print("")
print("%s%s" % (function_attrs, o.get_swap_to_host_func_def()))
print("")
print("%s%s" % (function_attrs, o.get_calc_msg_size_func_def()))
if not o.is_reply and not o.is_event:
print("")
print("%s%s" % (function_attrs, o.get_alloc_func_def()))
print("")
print("%s%s" % (function_attrs, o.get_op_func_def()))
print("")
print("%s" % o.get_c_constructor())
if o.is_reply or o.is_event:
print("")
print("%s%s;" % (function_attrs, o.get_event_cb_func_def()))
elif hasattr(o, "get_swap_to_be_func_def"):
print("%s%s" % (function_attrs, o.get_swap_to_be_func_def()))
print("")
print("%s%s" % (function_attrs, o.get_swap_to_host_func_def()))
print("#endif")
print("")
emitted.append(o)
def gen_json_unified_header(parser, logger, j, io, name):
d, f = os.path.split(j)
logger.info("Generating header `%s'" % name)
orig_stdout = sys.stdout
sys.stdout = io
include_guard = "__included_%s" % (
j.replace(".", "_").replace("/", "_").replace("-", "_").replace("+", "_"))
print("#ifndef %s" % include_guard)
print("#define %s" % include_guard)
print("")
print("#include <stdlib.h>")
print("#include <stddef.h>")
print("#include <arpa/inet.h>")
print("#include <vapi/vapi_internal.h>")
print("#include <vapi/vapi.h>")
print("#include <vapi/vapi_dbg.h>")
print("")
print("#ifdef __cplusplus")
print("extern \"C\" {")
print("#endif")
if name == "memclnt.api.vapi.h":
print("")
print("static inline vapi_error_e vapi_send_with_control_ping "
"(vapi_ctx_t ctx, void * msg, u32 context);")
else:
print("#include <vapi/vlib.api.vapi.h>")
print("")
for m in parser.messages_by_json[j].values():
print("extern vapi_msg_id_t %s;" % m.get_msg_id_name())
print("")
print("#define DEFINE_VAPI_MSG_IDS_%s\\" %
f.replace(".", "_").replace("/", "_").replace("-", "_").upper())
print("\\\n".join([
" vapi_msg_id_t %s;" % m.get_msg_id_name()
for m in parser.messages_by_json[j].values()
]))
print("")
print("")
emitted = []
for e in parser.enums_by_json[j]:
emit_definition(parser, j, emitted, e)
for u in parser.unions_by_json[j]:
emit_definition(parser, j, emitted, u)
for t in parser.types_by_json[j]:
emit_definition(parser, j, emitted, t)
for a in parser.aliases_by_json[j]:
emit_definition(parser, j, emitted, a)
for m in parser.messages_by_json[j].values():
emit_definition(parser, j, emitted, m)
print("")
if name == "vlib.api.vapi.h":
print("%s" % vapi_send_with_control_ping)
print("")
print("#ifdef __cplusplus")
print("}")
print("#endif")
print("")
print("#endif")
sys.stdout = orig_stdout
def json_to_c_header_name(json_name):
if json_name.endswith(".json"):
return "%s.vapi.h" % os.path.splitext(json_name)[0]
raise Exception("Unexpected json name `%s'!" % json_name)
def gen_c_unified_headers(parser, logger, prefix, remove_path):
if prefix == "" or prefix is None:
prefix = ""
else:
prefix = "%s/" % prefix
for j in parser.json_files:
if remove_path:
d, f = os.path.split(j)
else:
f = j
with open('%s%s' % (prefix, json_to_c_header_name(f)), "w") as io:
gen_json_unified_header(
parser, logger, j, io, json_to_c_header_name(f))
if __name__ == '__main__':
try:
verbose = int(os.getenv("V", 0))
except:
verbose = 0
if verbose >= 2:
log_level = 10
elif verbose == 1:
log_level = 20
else:
log_level = 40
logging.basicConfig(stream=sys.stdout, level=log_level)
logger = logging.getLogger("VAPI C GEN")
logger.setLevel(log_level)
argparser = argparse.ArgumentParser(description="VPP C API generator")
argparser.add_argument('files', metavar='api-file', action='append',
type=str, help='json api file'
'(may be specified multiple times)')
argparser.add_argument('--prefix', action='store', default=None,
help='path prefix')
argparser.add_argument('--remove-path', action='store_true',
help='remove path from filename')
args = argparser.parse_args()
jsonparser = JsonParser(logger, args.files,
simple_type_class=CSimpleType,
enum_class=CEnum,
union_class=CUnion,
struct_type_class=CStructType,
field_class=CField,
message_class=CMessage,
alias_class=CAlias)
# not using the model of having separate generated header and code files
# with generated symbols present in shared library (per discussion with
# Damjan), to avoid symbol version issues in .so
# gen_c_headers_and_code(jsonparser, logger, args.prefix)
gen_c_unified_headers(jsonparser, logger, args.prefix, args.remove_path)
for e in jsonparser.exceptions:
logger.warning(e)
|
py
|
1a5666d99c6225eb7059ac325c050a3dcd7cb76e
|
import pickle
from graph_features import GraphFeatures
import numpy as np
from loggers import BaseLogger, PrintLogger
import os
MOTIFS_VAR_PATH = os.path.join(__file__.rsplit(os.sep, 1)[0])
class MotifRatio:
def __init__(self, ftr: GraphFeatures, is_directed, logger: BaseLogger=None):
self._is_directed = is_directed # are the graphs directed
self._index_ftr = None # list of ftr names + counter [ ... (ftr_i, 0), (ftr_i, 1) ...]
self._logger = logger if logger else PrintLogger("graphs logger")
# self._graph_order = graph_order if graph_order else [g for g in sorted(graph_ftr_dict)]
self._gnx_ftr = ftr
self._set_index_to_ftr(self._gnx_ftr)
# list index in motif to number of edges in the motif
self._motif_index_to_edge_num = {"motif3": self._motif_num_to_number_of_edges(3),
"motif4": self._motif_num_to_number_of_edges(4)}
self._ftr_mx = self._gnx_ftr.to_matrix(dtype=np.float32, mtype=np.matrix, should_zscore=False)
self._headers = []
self._motif_ratio_vec = None
self._motif_ratio_matrix = None
# load motif variation file
def _load_variations_file(self, level):
fname = "%d_%sdirected.pkl" % (level, "" if self._is_directed else "un")
fpath = os.path.join(MOTIFS_VAR_PATH, "motif_variations", fname)
return pickle.load(open(fpath, "rb"))
# return dictionary { motif_index: number_of_edges }
def _motif_num_to_number_of_edges(self, level):
motif_edge_num_dict = {}
for bit_sec, motif_num in self._load_variations_file(level).items():
motif_edge_num_dict[motif_num] = bin(bit_sec).count('1')
return motif_edge_num_dict
# map matrix columns to features + count if there's more then one from a single feature
def _set_index_to_ftr(self, gnx_ftr):
if not self._index_ftr:
sorted_ftr = [f for f in sorted(gnx_ftr) if gnx_ftr[f].is_relevant()] # fix feature order (names)
self._index_ftr = []
for ftr in sorted_ftr:
len_ftr = len(gnx_ftr[ftr])
# fill list with (ftr, counter)
self._index_ftr += [(ftr, i) for i in range(len_ftr)]
# get feature vector for a graph
def _build_vector(self):
# get gnx gnx
final_vec = np.zeros((1, self._ftr_mx.shape[1]))
motif3_ratio = None
motif4_ratio = None
for i, (ftr, ftr_count) in enumerate(self._index_ftr):
if ftr == "motif3":
# calculate { motif_index: motif ratio }
motif3_ratio = self._count_subgraph_motif_by_size(self._ftr_mx, ftr) if not motif3_ratio else motif3_ratio
final_vec[0, i] = motif3_ratio[ftr_count]
self._headers.append("motif3_" + str(self._motif_index_to_edge_num["motif3"][ftr_count]) + "_edges")
elif ftr == "motif4":
# calculate { motif_index: motif ratio }
motif4_ratio = self._count_subgraph_motif_by_size(self._ftr_mx, ftr) if not motif4_ratio else motif4_ratio
final_vec[0, i] = motif4_ratio[ftr_count]
self._headers.append("motif4_" + str(self._motif_index_to_edge_num["motif4"][ftr_count]) + "_edges")
else:
# calculate average of column
final_vec[0, i] = np.sum(self._ftr_mx[:, i]) / self._ftr_mx.shape[0]
self._headers.append(ftr + "_" + str(ftr_count))
return final_vec
def _build_matrix(self):
sum_dictionaries_motifs3 = []
sum_dictionaries_motifs4 = []
# 3: [ ... row(node): { num_edges_in_motif3: count (for_this_node_only) } ... ]
for i in range(self._ftr_mx.shape[0]):
sum_dictionaries_motifs3.append({})
sum_dictionaries_motifs4.append({})
for j, (ftr, ftr_count) in enumerate(self._index_ftr):
if ftr == "motif3":
key = self._motif_index_to_edge_num[ftr][ftr_count]
sum_dictionaries_motifs3[i][key] = sum_dictionaries_motifs3[i].get(key, 1e-3) + self._ftr_mx[i, j]
elif ftr == "motif4":
key = self._motif_index_to_edge_num[ftr][ftr_count]
sum_dictionaries_motifs4[i][key] = sum_dictionaries_motifs4[i].get(key, 1e-3) + self._ftr_mx[i, j]
return_mx = self._ftr_mx.copy()
for i in range(self._ftr_mx.shape[0]):
for j, (ftr, ftr_count) in enumerate(self._index_ftr):
if ftr == "motif3":
# calculate { motif_index: motif ratio }
return_mx[i, j] /= sum_dictionaries_motifs3[i][self._motif_index_to_edge_num[ftr][ftr_count]]
if i == 0:
self._headers.append("motif3_" + str(self._motif_index_to_edge_num["motif3"][ftr_count]) + "_edges")
elif ftr == "motif4":
# calculate { motif_index: motif ratio }
return_mx[i, j] /= sum_dictionaries_motifs4[i][self._motif_index_to_edge_num[ftr][ftr_count]]
if i == 0:
self._headers.append("motif4_" + str(self._motif_index_to_edge_num["motif4"][ftr_count]) + "_edges")
else:
if i == 0:
self._headers.append(ftr + "_" + str(ftr_count))
return return_mx
def motif_ratio_vector(self):
self._motif_ratio_vec = self._motif_ratio_vec if self._motif_ratio_vec else self._build_vector()
return self._motif_ratio_vec[0]
def motif_ratio_matrix(self):
self._motif_ratio_matrix = self._motif_ratio_matrix if self._motif_ratio_matrix else self._build_matrix()
return self._motif_ratio_matrix
def get_headers(self):
return self._headers
# return { motif_index: sum motif in index/ total motifs with same edge count }
def _count_subgraph_motif_by_size(self, ftr_mat, motif_type):
sum_dict = {ftr_count: np.sum(ftr_mat[:, i]) for i, (ftr, ftr_count) in enumerate(self._index_ftr)
if ftr == motif_type} # dictionary { motif_index: sum column }
sum_by_edge = {} # dictionary { num_edges_in_motif: sum of }
for motif_count, sum_motif in sum_dict.items():
key = self._motif_index_to_edge_num[motif_type][motif_count]
sum_by_edge[key] = sum_by_edge.get(key, 0) + sum_motif
# rewrite dictionary { motif_index: sum column/ total motifs with same edge count }
for motif_count in sum_dict:
key = self._motif_index_to_edge_num[motif_type][motif_count]
sum_dict[motif_count] = sum_dict[motif_count] / sum_by_edge[key] if sum_by_edge[key] else 0
return sum_dict
# return [ ... (motif_type, counter) ... ]
def _get_motif_type(self, motif_type, num_motifs):
header = []
for i in range(num_motifs):
header.append((motif_type, i))
return header
@staticmethod
def is_motif(ftr):
return ftr == 'motif4' or ftr == "motif3"
if __name__ == "__main__":
import networkx as nx
from feature_meta import NODE_FEATURES
gnx = nx.Graph()
gnx.add_edges_from([
(1, 2),
(1, 3),
(2, 3),
(2, 7),
(7, 8),
(3, 6),
(4, 6),
(6, 8),
(5, 6),
])
gnx_ftr = GraphFeatures(gnx, NODE_FEATURES, ".", is_max_connected=True)
gnx_ftr.build()
m = MotifRatio(gnx_ftr, False)
e = 0
|
py
|
1a56684c2e4fce374fe0480e4ca982bdb1b5066e
|
from datetime import datetime
from typing import Dict, List, Optional, Tuple # noqa
from sebs.cache import Cache
from sebs.faas.config import Config as DeploymentConfig
from sebs.faas.function import Function, ExecutionResult
from sebs.utils import LoggingHandlers
from sebs.experiments.config import Config as ExperimentConfig
class Result:
def __init__(
self,
experiment_config: ExperimentConfig,
deployment_config: DeploymentConfig,
invocations: Optional[Dict[str, Dict[str, ExecutionResult]]] = None,
metrics: Optional[Dict[str, dict]] = None,
result_bucket: Optional[str] = None,
):
self.config = {
"experiments": experiment_config,
"deployment": deployment_config,
}
if not invocations:
self._invocations = {}
else:
self._invocations = invocations
if not metrics:
self._metrics = {}
else:
self._metrics = metrics
self.result_bucket = result_bucket
def begin(self):
self.begin_time = datetime.now().timestamp()
def end(self):
self.end_time = datetime.now().timestamp()
def times(self) -> Tuple[int, int]:
return self.begin_time, self.end_time
def add_result_bucket(self, result_bucket: str):
self.result_bucket = result_bucket
def add_invocation(self, func: Function, invocation: ExecutionResult):
if func.name in self._invocations:
self._invocations.get(func.name)[invocation.request_id] = invocation # type: ignore
else:
self._invocations[func.name] = {invocation.request_id: invocation}
def functions(self) -> List[str]:
return list(self._invocations.keys())
def invocations(self, func: str) -> Dict[str, ExecutionResult]:
return self._invocations[func]
def metrics(self, func: str) -> dict:
if func not in self._metrics:
self._metrics[func] = {}
return self._metrics[func]
@staticmethod
def deserialize(cached_config: dict, cache: Cache, handlers: LoggingHandlers) -> "Result":
invocations: Dict[str, dict] = {}
for func, func_invocations in cached_config["_invocations"].items():
invocations[func] = {}
for invoc_id, invoc in func_invocations.items():
invocations[func][invoc_id] = ExecutionResult.deserialize(invoc)
ret = Result(
ExperimentConfig.deserialize(cached_config["config"]["experiments"]),
DeploymentConfig.deserialize(cached_config["config"]["deployment"], cache, handlers),
invocations,
# FIXME: compatibility with old results
cached_config["metrics"] if "metrics" in cached_config else {},
cached_config["result_bucket"],
)
ret.begin_time = cached_config["begin_time"]
ret.end_time = cached_config["end_time"]
return ret
|
py
|
1a56686332e75fc577fb6952b2d660a8ca0544a0
|
#!/usr/bin/env python3
# Copyright (c) 2017 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test mempool acceptance of raw transactions."""
from io import BytesIO
from test_framework.test_framework import SthcoinTestFramework
from test_framework.messages import (
BIP125_SEQUENCE_NUMBER,
COIN,
COutPoint,
CTransaction,
CTxOut,
MAX_BLOCK_BASE_SIZE,
)
from test_framework.script import (
hash160,
CScript,
OP_0,
OP_EQUAL,
OP_HASH160,
OP_RETURN,
)
from test_framework.util import (
assert_equal,
assert_raises_rpc_error,
bytes_to_hex_str,
hex_str_to_bytes,
wait_until,
)
class MempoolAcceptanceTest(SthcoinTestFramework):
def set_test_params(self):
self.num_nodes = 1
self.extra_args = [[
'-txindex',
'-reindex', # Need reindex for txindex
'-acceptnonstdtxn=0', # Try to mimic main-net
]] * self.num_nodes
def skip_test_if_missing_module(self):
self.skip_if_no_wallet()
def check_mempool_result(self, result_expected, *args, **kwargs):
"""Wrapper to check result of testmempoolaccept on node_0's mempool"""
result_test = self.nodes[0].testmempoolaccept(*args, **kwargs)
assert_equal(result_expected, result_test)
assert_equal(self.nodes[0].getmempoolinfo()['size'], self.mempool_size) # Must not change mempool state
def run_test(self):
node = self.nodes[0]
self.log.info('Start with empty mempool, and 200 blocks')
self.mempool_size = 0
wait_until(lambda: node.getblockcount() == 200)
assert_equal(node.getmempoolinfo()['size'], self.mempool_size)
self.log.info('Should not accept garbage to testmempoolaccept')
assert_raises_rpc_error(-3, 'Expected type array, got string', lambda: node.testmempoolaccept(rawtxs='ff00baar'))
assert_raises_rpc_error(-8, 'Array must contain exactly one raw transaction for now', lambda: node.testmempoolaccept(rawtxs=['ff00baar', 'ff22']))
assert_raises_rpc_error(-22, 'TX decode failed', lambda: node.testmempoolaccept(rawtxs=['ff00baar']))
self.log.info('A transaction already in the blockchain')
coin = node.listunspent()[0] # Pick a random coin(base) to spend
raw_tx_in_block = node.signrawtransactionwithwallet(node.createrawtransaction(
inputs=[{'txid': coin['txid'], 'vout': coin['vout']}],
outputs=[{node.getnewaddress(): 0.3}, {node.getnewaddress(): 49}],
))['hex']
txid_in_block = node.sendrawtransaction(hexstring=raw_tx_in_block, allowhighfees=True)
node.generate(1)
self.check_mempool_result(
result_expected=[{'txid': txid_in_block, 'allowed': False, 'reject-reason': '18: txn-already-known'}],
rawtxs=[raw_tx_in_block],
)
self.log.info('A transaction not in the mempool')
fee = 0.00000700
raw_tx_0 = node.signrawtransactionwithwallet(node.createrawtransaction(
inputs=[{"txid": txid_in_block, "vout": 0, "sequence": BIP125_SEQUENCE_NUMBER}], # RBF is used later
outputs=[{node.getnewaddress(): 0.3 - fee}],
))['hex']
tx = CTransaction()
tx.deserialize(BytesIO(hex_str_to_bytes(raw_tx_0)))
txid_0 = tx.rehash()
self.check_mempool_result(
result_expected=[{'txid': txid_0, 'allowed': True}],
rawtxs=[raw_tx_0],
)
self.log.info('A transaction in the mempool')
node.sendrawtransaction(hexstring=raw_tx_0)
self.mempool_size = 1
self.check_mempool_result(
result_expected=[{'txid': txid_0, 'allowed': False, 'reject-reason': '18: txn-already-in-mempool'}],
rawtxs=[raw_tx_0],
)
self.log.info('A transaction that replaces a mempool transaction')
tx.deserialize(BytesIO(hex_str_to_bytes(raw_tx_0)))
tx.vout[0].nValue -= int(fee * COIN) # Double the fee
tx.vin[0].nSequence = BIP125_SEQUENCE_NUMBER + 1 # Now, opt out of RBF
raw_tx_0 = node.signrawtransactionwithwallet(bytes_to_hex_str(tx.serialize()))['hex']
tx.deserialize(BytesIO(hex_str_to_bytes(raw_tx_0)))
txid_0 = tx.rehash()
self.check_mempool_result(
result_expected=[{'txid': txid_0, 'allowed': True}],
rawtxs=[raw_tx_0],
)
self.log.info('A transaction that conflicts with an unconfirmed tx')
# Send the transaction that replaces the mempool transaction and opts out of replaceability
node.sendrawtransaction(hexstring=bytes_to_hex_str(tx.serialize()), allowhighfees=True)
# take original raw_tx_0
tx.deserialize(BytesIO(hex_str_to_bytes(raw_tx_0)))
tx.vout[0].nValue -= int(4 * fee * COIN) # Set more fee
# skip re-signing the tx
self.check_mempool_result(
result_expected=[{'txid': tx.rehash(), 'allowed': False, 'reject-reason': '18: txn-mempool-conflict'}],
rawtxs=[bytes_to_hex_str(tx.serialize())],
allowhighfees=True,
)
self.log.info('A transaction with missing inputs, that never existed')
tx.deserialize(BytesIO(hex_str_to_bytes(raw_tx_0)))
tx.vin[0].prevout = COutPoint(hash=int('ff' * 32, 16), n=14)
# skip re-signing the tx
self.check_mempool_result(
result_expected=[{'txid': tx.rehash(), 'allowed': False, 'reject-reason': 'missing-inputs'}],
rawtxs=[bytes_to_hex_str(tx.serialize())],
)
self.log.info('A transaction with missing inputs, that existed once in the past')
tx.deserialize(BytesIO(hex_str_to_bytes(raw_tx_0)))
tx.vin[0].prevout.n = 1 # Set vout to 1, to spend the other outpoint (49 coins) of the in-chain-tx we want to double spend
raw_tx_1 = node.signrawtransactionwithwallet(bytes_to_hex_str(tx.serialize()))['hex']
txid_1 = node.sendrawtransaction(hexstring=raw_tx_1, allowhighfees=True)
# Now spend both to "clearly hide" the outputs, ie. remove the coins from the utxo set by spending them
raw_tx_spend_both = node.signrawtransactionwithwallet(node.createrawtransaction(
inputs=[
{'txid': txid_0, 'vout': 0},
{'txid': txid_1, 'vout': 0},
],
outputs=[{node.getnewaddress(): 0.1}]
))['hex']
txid_spend_both = node.sendrawtransaction(hexstring=raw_tx_spend_both, allowhighfees=True)
node.generate(1)
self.mempool_size = 0
# Now see if we can add the coins back to the utxo set by sending the exact txs again
self.check_mempool_result(
result_expected=[{'txid': txid_0, 'allowed': False, 'reject-reason': 'missing-inputs'}],
rawtxs=[raw_tx_0],
)
self.check_mempool_result(
result_expected=[{'txid': txid_1, 'allowed': False, 'reject-reason': 'missing-inputs'}],
rawtxs=[raw_tx_1],
)
self.log.info('Create a signed "reference" tx for later use')
raw_tx_reference = node.signrawtransactionwithwallet(node.createrawtransaction(
inputs=[{'txid': txid_spend_both, 'vout': 0}],
outputs=[{node.getnewaddress(): 0.05}],
))['hex']
tx.deserialize(BytesIO(hex_str_to_bytes(raw_tx_reference)))
# Reference tx should be valid on itself
self.check_mempool_result(
result_expected=[{'txid': tx.rehash(), 'allowed': True}],
rawtxs=[bytes_to_hex_str(tx.serialize())],
)
self.log.info('A transaction with no outputs')
tx.deserialize(BytesIO(hex_str_to_bytes(raw_tx_reference)))
tx.vout = []
# Skip re-signing the transaction for context independent checks from now on
# tx.deserialize(BytesIO(hex_str_to_bytes(node.signrawtransactionwithwallet(bytes_to_hex_str(tx.serialize()))['hex'])))
self.check_mempool_result(
result_expected=[{'txid': tx.rehash(), 'allowed': False, 'reject-reason': '16: bad-txns-vout-empty'}],
rawtxs=[bytes_to_hex_str(tx.serialize())],
)
self.log.info('A really large transaction')
tx.deserialize(BytesIO(hex_str_to_bytes(raw_tx_reference)))
tx.vin = [tx.vin[0]] * (MAX_BLOCK_BASE_SIZE // len(tx.vin[0].serialize()))
self.check_mempool_result(
result_expected=[{'txid': tx.rehash(), 'allowed': False, 'reject-reason': '16: bad-txns-oversize'}],
rawtxs=[bytes_to_hex_str(tx.serialize())],
)
self.log.info('A transaction with negative output value')
tx.deserialize(BytesIO(hex_str_to_bytes(raw_tx_reference)))
tx.vout[0].nValue *= -1
self.check_mempool_result(
result_expected=[{'txid': tx.rehash(), 'allowed': False, 'reject-reason': '16: bad-txns-vout-negative'}],
rawtxs=[bytes_to_hex_str(tx.serialize())],
)
self.log.info('A transaction with too large output value')
tx.deserialize(BytesIO(hex_str_to_bytes(raw_tx_reference)))
tx.vout[0].nValue = 21000000 * COIN + 1
self.check_mempool_result(
result_expected=[{'txid': tx.rehash(), 'allowed': False, 'reject-reason': '16: bad-txns-vout-toolarge'}],
rawtxs=[bytes_to_hex_str(tx.serialize())],
)
self.log.info('A transaction with too large sum of output values')
tx.deserialize(BytesIO(hex_str_to_bytes(raw_tx_reference)))
tx.vout = [tx.vout[0]] * 2
tx.vout[0].nValue = 21000000 * COIN
self.check_mempool_result(
result_expected=[{'txid': tx.rehash(), 'allowed': False, 'reject-reason': '16: bad-txns-txouttotal-toolarge'}],
rawtxs=[bytes_to_hex_str(tx.serialize())],
)
self.log.info('A transaction with duplicate inputs')
tx.deserialize(BytesIO(hex_str_to_bytes(raw_tx_reference)))
tx.vin = [tx.vin[0]] * 2
self.check_mempool_result(
result_expected=[{'txid': tx.rehash(), 'allowed': False, 'reject-reason': '16: bad-txns-inputs-duplicate'}],
rawtxs=[bytes_to_hex_str(tx.serialize())],
)
self.log.info('A coinbase transaction')
# Pick the input of the first tx we signed, so it has to be a coinbase tx
raw_tx_coinbase_spent = node.getrawtransaction(txid=node.decoderawtransaction(hexstring=raw_tx_in_block)['vin'][0]['txid'])
tx.deserialize(BytesIO(hex_str_to_bytes(raw_tx_coinbase_spent)))
self.check_mempool_result(
result_expected=[{'txid': tx.rehash(), 'allowed': False, 'reject-reason': '16: coinbase'}],
rawtxs=[bytes_to_hex_str(tx.serialize())],
)
self.log.info('Some nonstandard transactions')
tx.deserialize(BytesIO(hex_str_to_bytes(raw_tx_reference)))
tx.nVersion = 3 # A version currently non-standard
self.check_mempool_result(
result_expected=[{'txid': tx.rehash(), 'allowed': False, 'reject-reason': '64: version'}],
rawtxs=[bytes_to_hex_str(tx.serialize())],
)
tx.deserialize(BytesIO(hex_str_to_bytes(raw_tx_reference)))
tx.vout[0].scriptPubKey = CScript([OP_0]) # Some non-standard script
self.check_mempool_result(
result_expected=[{'txid': tx.rehash(), 'allowed': False, 'reject-reason': '64: scriptpubkey'}],
rawtxs=[bytes_to_hex_str(tx.serialize())],
)
tx.deserialize(BytesIO(hex_str_to_bytes(raw_tx_reference)))
tx.vin[0].scriptSig = CScript([OP_HASH160]) # Some not-pushonly scriptSig
self.check_mempool_result(
result_expected=[{'txid': tx.rehash(), 'allowed': False, 'reject-reason': '64: scriptsig-not-pushonly'}],
rawtxs=[bytes_to_hex_str(tx.serialize())],
)
tx.deserialize(BytesIO(hex_str_to_bytes(raw_tx_reference)))
output_p2sh_burn = CTxOut(nValue=540, scriptPubKey=CScript([OP_HASH160, hash160(b'burn'), OP_EQUAL]))
num_scripts = 100000 // len(output_p2sh_burn.serialize()) # Use enough outputs to make the tx too large for our policy
tx.vout = [output_p2sh_burn] * num_scripts
self.check_mempool_result(
result_expected=[{'txid': tx.rehash(), 'allowed': False, 'reject-reason': '64: tx-size'}],
rawtxs=[bytes_to_hex_str(tx.serialize())],
)
tx.deserialize(BytesIO(hex_str_to_bytes(raw_tx_reference)))
tx.vout[0] = output_p2sh_burn
tx.vout[0].nValue -= 1 # Make output smaller, such that it is dust for our policy
self.check_mempool_result(
result_expected=[{'txid': tx.rehash(), 'allowed': False, 'reject-reason': '64: dust'}],
rawtxs=[bytes_to_hex_str(tx.serialize())],
)
tx.deserialize(BytesIO(hex_str_to_bytes(raw_tx_reference)))
tx.vout[0].scriptPubKey = CScript([OP_RETURN, b'\xff'])
tx.vout = [tx.vout[0]] * 2
self.check_mempool_result(
result_expected=[{'txid': tx.rehash(), 'allowed': False, 'reject-reason': '64: multi-op-return'}],
rawtxs=[bytes_to_hex_str(tx.serialize())],
)
self.log.info('A timelocked transaction')
tx.deserialize(BytesIO(hex_str_to_bytes(raw_tx_reference)))
tx.vin[0].nSequence -= 1 # Should be non-max, so locktime is not ignored
tx.nLockTime = node.getblockcount() + 1
self.check_mempool_result(
result_expected=[{'txid': tx.rehash(), 'allowed': False, 'reject-reason': '64: non-final'}],
rawtxs=[bytes_to_hex_str(tx.serialize())],
)
self.log.info('A transaction that is locked by BIP68 sequence logic')
tx.deserialize(BytesIO(hex_str_to_bytes(raw_tx_reference)))
tx.vin[0].nSequence = 2 # We could include it in the second block mined from now, but not the very next one
# Can skip re-signing the tx because of early rejection
self.check_mempool_result(
result_expected=[{'txid': tx.rehash(), 'allowed': False, 'reject-reason': '64: non-BIP68-final'}],
rawtxs=[bytes_to_hex_str(tx.serialize())],
allowhighfees=True,
)
if __name__ == '__main__':
MempoolAcceptanceTest().main()
|
py
|
1a5668d9e70d8c7b4f3eeeac7794025f91712b6a
|
from collections import Counter
from collections import OrderedDict
players = ['Mike', 'Chris', 'Arnold']
standings = OrderedDict([(player, Counter()) for player in players])
print('standings:', standings)
standings['Mike']['game_played'] += 1
standings['Mike']['score'] = 2
standings['Mike']['game_played'] += 1
standings['Mike']['score'] = 3
standings['Arnold']['game_played'] += 1
standings['Arnold']['score'] = 5
standings['Chris']['game_played'] += 1
standings['Chris']['score'] = 5
rank = 1
print("standings.items:", standings.items())
standings_with_index = enumerate(standings.items())
print("standings.items.enum:", standings_with_index)
ranks = [(-counter['score'], counter['games_played'], i, name)
for i, (name, counter) in enumerate(standings.items())]
print("ranks", ranks)
print("Winner: {}".format(sorted(ranks)[rank - 1][3]))
|
py
|
1a5668e78097ac1653261c8a43a730d0d1fe5e74
|
# Copyright (C) 2017 Beijing Didi Infinity Technology and Development Co.,Ltd.
# All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""This model extracts pitch features per frame."""
import delta.compat as tf
from core.ops import py_x_ops
from delta.utils.hparam import HParams
from delta.data.frontend.base_frontend import BaseFrontend
class Pitch(BaseFrontend):
"""
Compute pitch features of every frame in speech, return a float tensor
with size (num_frames, 2).
"""
def __init__(self, config: dict):
super().__init__(config)
@classmethod
def params(cls, config=None):
"""
Set params.
:param config: contains nineteen optional parameters:
--sample_rate : Waveform data sample frequency (must match the waveform
file, if specified there). (float, default = 16000)
--delta-pitch : Smallest relative change in pitch that our algorithm
measures (float, default = 0.005)
--window_length : Frame length in seconds (float, default = 0.025)
--frame_length : Frame shift in seconds (float, default = 0.010)
--frames-per-chunk : Only relevant for offline pitch extraction (e.g.
compute-kaldi-pitch-feats), you can set it to a small
nonzero value, such as 10, for better feature
compatibility with online decoding (affects energy
normalization in the algorithm) (int, default = 0)
--lowpass-cutoff : cutoff frequency for LowPass filter (Hz).
(float, default = 1000)
--lowpass-filter-width : Integer that determines filter width of lowpass filter,
more gives sharper filter (int, default = 1)
--max-f0 : max. F0 to search for (Hz) (float, default = 400)
--max-frames-latency : Maximum number of frames of latency that we allow pitch
tracking to introduce into the feature processing
(affects output only if --frames-per-chunk > 0 and
--simulate-first-pass-online=true (int, default = 0)
--min-f0 : min. F0 to search for (Hz) (float, default = 50)
--nccf-ballast : Increasing this factor reduces NCCF for quiet frames.
(float, default = 7000)
--nccf-ballast-online : This is useful mainly for debug; it affects how the NCCF
ballast is computed. (bool, default = false)
--penalty-factor : cost factor for FO change. (float, default = 0.1)
--preemphasis-coefficient : Coefficient for use in signal preemphasis (deprecated).
(float, default = 0)
--recompute-frame : Only relevant for online pitch extraction, or for
compatibility with online pitch extraction. A
non-critical parameter; the frame at which we recompute
some of the forward pointers, after revising our
estimate of the signal energy. Relevant
if--frames-per-chunk > 0. (int, default = 500)
--resample-frequency : Frequency that we down-sample the signal to. Must be
more than twice lowpass-cutoff (float, default = 4000)
--simulate-first-pass-online : If true, compute-kaldi-pitch-feats will output features
that correspond to what an online decoder would see in
the first pass of decoding-- not the final version of
the features, which is the default. Relevant if
--frames-per-chunk > 0 (bool, default = false)
--snip-edges : If this is set to false, the incomplete frames near the
ending edge won't be snipped, so that the number of
frames is the file size divided by the frame-shift.
This makes different types of features give the same
number of frames. (bool, default = true)
--soft-min-f0 : Minimum f0, applied in soft way, must not exceed min-f0.
(float, default = 10)
--upsample-filter-width : Integer that determines filter width when upsampling
NCCF. (int, default = 5)
:return: An object of class HParams, which is a set of hyperparameters as name-value pairs.
"""
hparams = HParams(cls=cls)
window_length = 0.025
frame_length = 0.010
sample_rate = 16000
snip_edges = True
preemph_coeff = 0.0
min_f0 = 50.0
max_f0 = 400.0
soft_min_f0 = 10.0
penalty_factor = 0.1
lowpass_cutoff = 1000.0
resample_freq = 4000.0
delta_pitch = 0.005
nccf_ballast = 7000.0
lowpass_filter_width = 1
upsample_filter_width = 5
max_frames_latency = 0
frames_per_chunk = 0
simulate_first_pass_online = False
recompute_frame = 500
nccf_ballast_online = False
hparams.add_hparam('window_length', window_length)
hparams.add_hparam('frame_length', frame_length)
hparams.add_hparam('sample_rate', sample_rate)
hparams.add_hparam('snip_edges', snip_edges)
hparams.add_hparam('preemph_coeff', preemph_coeff)
hparams.add_hparam('min_f0', min_f0)
hparams.add_hparam('max_f0', max_f0)
hparams.add_hparam('soft_min_f0', soft_min_f0)
hparams.add_hparam('penalty_factor', penalty_factor)
hparams.add_hparam('lowpass_cutoff', lowpass_cutoff)
hparams.add_hparam('resample_freq', resample_freq)
hparams.add_hparam('delta_pitch', delta_pitch)
hparams.add_hparam('nccf_ballast', nccf_ballast)
hparams.add_hparam('lowpass_filter_width', lowpass_filter_width)
hparams.add_hparam('upsample_filter_width', upsample_filter_width)
hparams.add_hparam('max_frames_latency', max_frames_latency)
hparams.add_hparam('frames_per_chunk', frames_per_chunk)
hparams.add_hparam('simulate_first_pass_online', simulate_first_pass_online)
hparams.add_hparam('recompute_frame', recompute_frame)
hparams.add_hparam('nccf_ballast_online', nccf_ballast_online)
if config is not None:
hparams.override_from_dict(config)
return hparams
def call(self, audio_data, sample_rate=None):
"""
Caculate picth features of audio data.
:param audio_data: the audio signal from which to compute spectrum.
Should be an (1, N) tensor.
:param sample_rate: the samplerate of the signal we working with.
:return: A float tensor of size (num_frames, 2) containing
pitch && POV features of every frame in speech.
"""
p = self.config
with tf.name_scope('pitch'):
if sample_rate is None:
sample_rate = tf.constant(p.sample_rate, dtype=tf.int32)
else:
if not tf.is_tensor(sample_rate):
sample_rate = tf.convert_to_tensor(sample_rate)
pitch = py_x_ops.pitch(
audio_data,
sample_rate,
window_length=p.window_length,
frame_length=p.frame_length,
snip_edges=p.snip_edges,
preemph_coeff=p.preemph_coeff,
min_f0=p.min_f0,
max_f0=p.max_f0,
soft_min_f0=p.soft_min_f0,
penalty_factor=p.penalty_factor,
lowpass_cutoff=p.lowpass_cutoff,
resample_freq=p.resample_freq,
delta_pitch=p.delta_pitch,
nccf_ballast=p.nccf_ballast,
lowpass_filter_width=p.lowpass_filter_width,
upsample_filter_width=p.upsample_filter_width,
max_frames_latency=p.max_frames_latency,
frames_per_chunk=p.frames_per_chunk,
simulate_first_pass_online=p.simulate_first_pass_online,
recompute_frame=p.recompute_frame,
nccf_ballast_online=p.nccf_ballast_online)
return pitch
|
py
|
1a566a3c232ee6aea79cb6c01c58baa12b928a71
|
# Test the runpy module
import contextlib
import importlib.machinery, importlib.util
import os.path
import pathlib
import py_compile
import re
import signal
import subprocess
import sys
import tempfile
import textwrap
import unittest
import warnings
<<<<<<< HEAD
from test.support import no_tracing, verbose
from test.support.import_helper import forget, make_legacy_pyc, unload
from test.support.os_helper import create_empty_file, temp_dir
=======
from test.support import (
forget, make_legacy_pyc, unload, verbose, no_tracing,
create_empty_file, temp_dir)
>>>>>>> 3.9
from test.support.script_helper import make_script, make_zip_script
import runpy
from runpy import _run_code, _run_module_code, run_module, run_path
# Note: This module can't safely test _run_module_as_main as it
# runs its tests in the current process, which would mess with the
# real __main__ module (usually test.regrtest)
# See test_cmd_line_script for a test that executes that code path
# Set up the test code and expected results
example_source = """\
# Check basic code execution
result = ['Top level assignment']
def f():
result.append('Lower level reference')
f()
del f
# Check the sys module
import sys
run_argv0 = sys.argv[0]
run_name_in_sys_modules = __name__ in sys.modules
module_in_sys_modules = (run_name_in_sys_modules and
globals() is sys.modules[__name__].__dict__)
# Check nested operation
import runpy
nested = runpy._run_module_code('x=1\\n', mod_name='<run>')
"""
implicit_namespace = {
"__name__": None,
"__file__": None,
"__cached__": None,
"__package__": None,
"__doc__": None,
"__spec__": None
}
example_namespace = {
"sys": sys,
"runpy": runpy,
"result": ["Top level assignment", "Lower level reference"],
"run_argv0": sys.argv[0],
"run_name_in_sys_modules": False,
"module_in_sys_modules": False,
"nested": dict(implicit_namespace,
x=1, __name__="<run>", __loader__=None),
}
example_namespace.update(implicit_namespace)
class CodeExecutionMixin:
# Issue #15230 (run_path not handling run_name correctly) highlighted a
# problem with the way arguments were being passed from higher level APIs
# down to lower level code. This mixin makes it easier to ensure full
# testing occurs at those upper layers as well, not just at the utility
# layer
# Figuring out the loader details in advance is hard to do, so we skip
# checking the full details of loader and loader_state
CHECKED_SPEC_ATTRIBUTES = ["name", "parent", "origin", "cached",
"has_location", "submodule_search_locations"]
def assertNamespaceMatches(self, result_ns, expected_ns):
"""Check two namespaces match.
Ignores any unspecified interpreter created names
"""
# Avoid side effects
result_ns = result_ns.copy()
expected_ns = expected_ns.copy()
# Impls are permitted to add extra names, so filter them out
for k in list(result_ns):
if k.startswith("__") and k.endswith("__"):
if k not in expected_ns:
result_ns.pop(k)
if k not in expected_ns["nested"]:
result_ns["nested"].pop(k)
# Spec equality includes the loader, so we take the spec out of the
# result namespace and check that separately
result_spec = result_ns.pop("__spec__")
expected_spec = expected_ns.pop("__spec__")
if expected_spec is None:
self.assertIsNone(result_spec)
else:
# If an expected loader is set, we just check we got the right
# type, rather than checking for full equality
if expected_spec.loader is not None:
self.assertEqual(type(result_spec.loader),
type(expected_spec.loader))
for attr in self.CHECKED_SPEC_ATTRIBUTES:
k = "__spec__." + attr
actual = (k, getattr(result_spec, attr))
expected = (k, getattr(expected_spec, attr))
self.assertEqual(actual, expected)
# For the rest, we still don't use direct dict comparison on the
# namespace, as the diffs are too hard to debug if anything breaks
self.assertEqual(set(result_ns), set(expected_ns))
for k in result_ns:
actual = (k, result_ns[k])
expected = (k, expected_ns[k])
self.assertEqual(actual, expected)
def check_code_execution(self, create_namespace, expected_namespace):
"""Check that an interface runs the example code correctly
First argument is a callable accepting the initial globals and
using them to create the actual namespace
Second argument is the expected result
"""
sentinel = object()
expected_ns = expected_namespace.copy()
run_name = expected_ns["__name__"]
saved_argv0 = sys.argv[0]
saved_mod = sys.modules.get(run_name, sentinel)
# Check without initial globals
result_ns = create_namespace(None)
self.assertNamespaceMatches(result_ns, expected_ns)
self.assertIs(sys.argv[0], saved_argv0)
self.assertIs(sys.modules.get(run_name, sentinel), saved_mod)
# And then with initial globals
initial_ns = {"sentinel": sentinel}
expected_ns["sentinel"] = sentinel
result_ns = create_namespace(initial_ns)
self.assertIsNot(result_ns, initial_ns)
self.assertNamespaceMatches(result_ns, expected_ns)
self.assertIs(sys.argv[0], saved_argv0)
self.assertIs(sys.modules.get(run_name, sentinel), saved_mod)
class ExecutionLayerTestCase(unittest.TestCase, CodeExecutionMixin):
"""Unit tests for runpy._run_code and runpy._run_module_code"""
def test_run_code(self):
expected_ns = example_namespace.copy()
expected_ns.update({
"__loader__": None,
})
def create_ns(init_globals):
return _run_code(example_source, {}, init_globals)
self.check_code_execution(create_ns, expected_ns)
def test_run_module_code(self):
mod_name = "<Nonsense>"
mod_fname = "Some other nonsense"
mod_loader = "Now you're just being silly"
mod_package = '' # Treat as a top level module
mod_spec = importlib.machinery.ModuleSpec(mod_name,
origin=mod_fname,
loader=mod_loader)
expected_ns = example_namespace.copy()
expected_ns.update({
"__name__": mod_name,
"__file__": mod_fname,
"__loader__": mod_loader,
"__package__": mod_package,
"__spec__": mod_spec,
"run_argv0": mod_fname,
"run_name_in_sys_modules": True,
"module_in_sys_modules": True,
})
def create_ns(init_globals):
return _run_module_code(example_source,
init_globals,
mod_name,
mod_spec)
self.check_code_execution(create_ns, expected_ns)
# TODO: Use self.addCleanup to get rid of a lot of try-finally blocks
class RunModuleTestCase(unittest.TestCase, CodeExecutionMixin):
"""Unit tests for runpy.run_module"""
def expect_import_error(self, mod_name):
try:
run_module(mod_name)
except ImportError:
pass
else:
self.fail("Expected import error for " + mod_name)
def test_invalid_names(self):
# Builtin module
self.expect_import_error("sys")
# Non-existent modules
self.expect_import_error("sys.imp.eric")
self.expect_import_error("os.path.half")
self.expect_import_error("a.bee")
# Relative names not allowed
self.expect_import_error(".howard")
self.expect_import_error("..eaten")
self.expect_import_error(".test_runpy")
self.expect_import_error(".unittest")
# Package without __main__.py
self.expect_import_error("multiprocessing")
def test_library_module(self):
self.assertEqual(run_module("runpy")["__name__"], "runpy")
def _add_pkg_dir(self, pkg_dir, namespace=False):
os.mkdir(pkg_dir)
if namespace:
return None
pkg_fname = os.path.join(pkg_dir, "__init__.py")
create_empty_file(pkg_fname)
return pkg_fname
def _make_pkg(self, source, depth, mod_base="runpy_test",
*, namespace=False, parent_namespaces=False):
# Enforce a couple of internal sanity checks on test cases
if (namespace or parent_namespaces) and not depth:
raise RuntimeError("Can't mark top level module as a "
"namespace package")
pkg_name = "__runpy_pkg__"
test_fname = mod_base+os.extsep+"py"
pkg_dir = sub_dir = os.path.realpath(tempfile.mkdtemp())
if verbose > 1: print(" Package tree in:", sub_dir)
sys.path.insert(0, pkg_dir)
if verbose > 1: print(" Updated sys.path:", sys.path[0])
if depth:
namespace_flags = [parent_namespaces] * depth
namespace_flags[-1] = namespace
for namespace_flag in namespace_flags:
sub_dir = os.path.join(sub_dir, pkg_name)
pkg_fname = self._add_pkg_dir(sub_dir, namespace_flag)
if verbose > 1: print(" Next level in:", sub_dir)
if verbose > 1: print(" Created:", pkg_fname)
mod_fname = os.path.join(sub_dir, test_fname)
with open(mod_fname, "w") as mod_file:
mod_file.write(source)
if verbose > 1: print(" Created:", mod_fname)
mod_name = (pkg_name+".")*depth + mod_base
mod_spec = importlib.util.spec_from_file_location(mod_name,
mod_fname)
return pkg_dir, mod_fname, mod_name, mod_spec
def _del_pkg(self, top):
for entry in list(sys.modules):
if entry.startswith("__runpy_pkg__"):
del sys.modules[entry]
if verbose > 1: print(" Removed sys.modules entries")
del sys.path[0]
if verbose > 1: print(" Removed sys.path entry")
for root, dirs, files in os.walk(top, topdown=False):
for name in files:
try:
os.remove(os.path.join(root, name))
except OSError as ex:
if verbose > 1: print(ex) # Persist with cleaning up
for name in dirs:
fullname = os.path.join(root, name)
try:
os.rmdir(fullname)
except OSError as ex:
if verbose > 1: print(ex) # Persist with cleaning up
try:
os.rmdir(top)
if verbose > 1: print(" Removed package tree")
except OSError as ex:
if verbose > 1: print(ex) # Persist with cleaning up
def _fix_ns_for_legacy_pyc(self, ns, alter_sys):
char_to_add = "c"
ns["__file__"] += char_to_add
ns["__cached__"] = ns["__file__"]
spec = ns["__spec__"]
new_spec = importlib.util.spec_from_file_location(spec.name,
ns["__file__"])
ns["__spec__"] = new_spec
if alter_sys:
ns["run_argv0"] += char_to_add
def _check_module(self, depth, alter_sys=False,
*, namespace=False, parent_namespaces=False):
pkg_dir, mod_fname, mod_name, mod_spec = (
self._make_pkg(example_source, depth,
namespace=namespace,
parent_namespaces=parent_namespaces))
forget(mod_name)
expected_ns = example_namespace.copy()
expected_ns.update({
"__name__": mod_name,
"__file__": mod_fname,
"__cached__": mod_spec.cached,
"__package__": mod_name.rpartition(".")[0],
"__spec__": mod_spec,
})
if alter_sys:
expected_ns.update({
"run_argv0": mod_fname,
"run_name_in_sys_modules": True,
"module_in_sys_modules": True,
})
def create_ns(init_globals):
return run_module(mod_name, init_globals, alter_sys=alter_sys)
try:
if verbose > 1: print("Running from source:", mod_name)
self.check_code_execution(create_ns, expected_ns)
importlib.invalidate_caches()
__import__(mod_name)
os.remove(mod_fname)
if not sys.dont_write_bytecode:
make_legacy_pyc(mod_fname)
unload(mod_name) # In case loader caches paths
importlib.invalidate_caches()
if verbose > 1: print("Running from compiled:", mod_name)
self._fix_ns_for_legacy_pyc(expected_ns, alter_sys)
self.check_code_execution(create_ns, expected_ns)
finally:
self._del_pkg(pkg_dir)
if verbose > 1: print("Module executed successfully")
def _check_package(self, depth, alter_sys=False,
*, namespace=False, parent_namespaces=False):
pkg_dir, mod_fname, mod_name, mod_spec = (
self._make_pkg(example_source, depth, "__main__",
namespace=namespace,
parent_namespaces=parent_namespaces))
pkg_name = mod_name.rpartition(".")[0]
forget(mod_name)
expected_ns = example_namespace.copy()
expected_ns.update({
"__name__": mod_name,
"__file__": mod_fname,
"__cached__": importlib.util.cache_from_source(mod_fname),
"__package__": pkg_name,
"__spec__": mod_spec,
})
if alter_sys:
expected_ns.update({
"run_argv0": mod_fname,
"run_name_in_sys_modules": True,
"module_in_sys_modules": True,
})
def create_ns(init_globals):
return run_module(pkg_name, init_globals, alter_sys=alter_sys)
try:
if verbose > 1: print("Running from source:", pkg_name)
self.check_code_execution(create_ns, expected_ns)
importlib.invalidate_caches()
__import__(mod_name)
os.remove(mod_fname)
if not sys.dont_write_bytecode:
make_legacy_pyc(mod_fname)
unload(mod_name) # In case loader caches paths
if verbose > 1: print("Running from compiled:", pkg_name)
importlib.invalidate_caches()
self._fix_ns_for_legacy_pyc(expected_ns, alter_sys)
self.check_code_execution(create_ns, expected_ns)
finally:
self._del_pkg(pkg_dir)
if verbose > 1: print("Package executed successfully")
def _add_relative_modules(self, base_dir, source, depth):
if depth <= 1:
raise ValueError("Relative module test needs depth > 1")
pkg_name = "__runpy_pkg__"
module_dir = base_dir
for i in range(depth):
parent_dir = module_dir
module_dir = os.path.join(module_dir, pkg_name)
# Add sibling module
sibling_fname = os.path.join(module_dir, "sibling.py")
create_empty_file(sibling_fname)
if verbose > 1: print(" Added sibling module:", sibling_fname)
# Add nephew module
uncle_dir = os.path.join(parent_dir, "uncle")
self._add_pkg_dir(uncle_dir)
if verbose > 1: print(" Added uncle package:", uncle_dir)
cousin_dir = os.path.join(uncle_dir, "cousin")
self._add_pkg_dir(cousin_dir)
if verbose > 1: print(" Added cousin package:", cousin_dir)
nephew_fname = os.path.join(cousin_dir, "nephew.py")
create_empty_file(nephew_fname)
if verbose > 1: print(" Added nephew module:", nephew_fname)
def _check_relative_imports(self, depth, run_name=None):
contents = r"""\
from __future__ import absolute_import
from . import sibling
from ..uncle.cousin import nephew
"""
pkg_dir, mod_fname, mod_name, mod_spec = (
self._make_pkg(contents, depth))
if run_name is None:
expected_name = mod_name
else:
expected_name = run_name
try:
self._add_relative_modules(pkg_dir, contents, depth)
pkg_name = mod_name.rpartition('.')[0]
if verbose > 1: print("Running from source:", mod_name)
d1 = run_module(mod_name, run_name=run_name) # Read from source
self.assertEqual(d1["__name__"], expected_name)
self.assertEqual(d1["__package__"], pkg_name)
self.assertIn("sibling", d1)
self.assertIn("nephew", d1)
del d1 # Ensure __loader__ entry doesn't keep file open
importlib.invalidate_caches()
__import__(mod_name)
os.remove(mod_fname)
if not sys.dont_write_bytecode:
make_legacy_pyc(mod_fname)
unload(mod_name) # In case the loader caches paths
if verbose > 1: print("Running from compiled:", mod_name)
importlib.invalidate_caches()
d2 = run_module(mod_name, run_name=run_name) # Read from bytecode
self.assertEqual(d2["__name__"], expected_name)
self.assertEqual(d2["__package__"], pkg_name)
self.assertIn("sibling", d2)
self.assertIn("nephew", d2)
del d2 # Ensure __loader__ entry doesn't keep file open
finally:
self._del_pkg(pkg_dir)
if verbose > 1: print("Module executed successfully")
def test_run_module(self):
for depth in range(4):
if verbose > 1: print("Testing package depth:", depth)
self._check_module(depth)
def test_run_module_in_namespace_package(self):
for depth in range(1, 4):
if verbose > 1: print("Testing package depth:", depth)
self._check_module(depth, namespace=True, parent_namespaces=True)
def test_run_package(self):
for depth in range(1, 4):
if verbose > 1: print("Testing package depth:", depth)
self._check_package(depth)
def test_run_package_init_exceptions(self):
# These were previously wrapped in an ImportError; see Issue 14285
result = self._make_pkg("", 1, "__main__")
pkg_dir, _, mod_name, _ = result
mod_name = mod_name.replace(".__main__", "")
self.addCleanup(self._del_pkg, pkg_dir)
init = os.path.join(pkg_dir, "__runpy_pkg__", "__init__.py")
exceptions = (ImportError, AttributeError, TypeError, ValueError)
for exception in exceptions:
name = exception.__name__
with self.subTest(name):
source = "raise {0}('{0} in __init__.py.')".format(name)
with open(init, "wt", encoding="ascii") as mod_file:
mod_file.write(source)
try:
run_module(mod_name)
except exception as err:
self.assertNotIn("finding spec", format(err))
else:
self.fail("Nothing raised; expected {}".format(name))
try:
run_module(mod_name + ".submodule")
except exception as err:
self.assertNotIn("finding spec", format(err))
else:
self.fail("Nothing raised; expected {}".format(name))
def test_submodule_imported_warning(self):
pkg_dir, _, mod_name, _ = self._make_pkg("", 1)
try:
__import__(mod_name)
with self.assertWarnsRegex(RuntimeWarning,
r"found in sys\.modules"):
run_module(mod_name)
finally:
self._del_pkg(pkg_dir)
def test_package_imported_no_warning(self):
pkg_dir, _, mod_name, _ = self._make_pkg("", 1, "__main__")
self.addCleanup(self._del_pkg, pkg_dir)
package = mod_name.replace(".__main__", "")
# No warning should occur if we only imported the parent package
__import__(package)
self.assertIn(package, sys.modules)
with warnings.catch_warnings():
warnings.simplefilter("error", RuntimeWarning)
run_module(package)
# But the warning should occur if we imported the __main__ submodule
__import__(mod_name)
with self.assertWarnsRegex(RuntimeWarning, r"found in sys\.modules"):
run_module(package)
def test_run_package_in_namespace_package(self):
for depth in range(1, 4):
if verbose > 1: print("Testing package depth:", depth)
self._check_package(depth, parent_namespaces=True)
def test_run_namespace_package(self):
for depth in range(1, 4):
if verbose > 1: print("Testing package depth:", depth)
self._check_package(depth, namespace=True)
def test_run_namespace_package_in_namespace_package(self):
for depth in range(1, 4):
if verbose > 1: print("Testing package depth:", depth)
self._check_package(depth, namespace=True, parent_namespaces=True)
def test_run_module_alter_sys(self):
for depth in range(4):
if verbose > 1: print("Testing package depth:", depth)
self._check_module(depth, alter_sys=True)
def test_run_package_alter_sys(self):
for depth in range(1, 4):
if verbose > 1: print("Testing package depth:", depth)
self._check_package(depth, alter_sys=True)
def test_explicit_relative_import(self):
for depth in range(2, 5):
if verbose > 1: print("Testing relative imports at depth:", depth)
self._check_relative_imports(depth)
def test_main_relative_import(self):
for depth in range(2, 5):
if verbose > 1: print("Testing main relative imports at depth:", depth)
self._check_relative_imports(depth, "__main__")
def test_run_name(self):
depth = 1
run_name = "And now for something completely different"
pkg_dir, mod_fname, mod_name, mod_spec = (
self._make_pkg(example_source, depth))
forget(mod_name)
expected_ns = example_namespace.copy()
expected_ns.update({
"__name__": run_name,
"__file__": mod_fname,
"__cached__": importlib.util.cache_from_source(mod_fname),
"__package__": mod_name.rpartition(".")[0],
"__spec__": mod_spec,
})
def create_ns(init_globals):
return run_module(mod_name, init_globals, run_name)
try:
self.check_code_execution(create_ns, expected_ns)
finally:
self._del_pkg(pkg_dir)
def test_pkgutil_walk_packages(self):
# This is a dodgy hack to use the test_runpy infrastructure to test
# issue #15343. Issue #15348 declares this is indeed a dodgy hack ;)
import pkgutil
max_depth = 4
base_name = "__runpy_pkg__"
package_suffixes = ["uncle", "uncle.cousin"]
module_suffixes = ["uncle.cousin.nephew", base_name + ".sibling"]
expected_packages = set()
expected_modules = set()
for depth in range(1, max_depth):
pkg_name = ".".join([base_name] * depth)
expected_packages.add(pkg_name)
for name in package_suffixes:
expected_packages.add(pkg_name + "." + name)
for name in module_suffixes:
expected_modules.add(pkg_name + "." + name)
pkg_name = ".".join([base_name] * max_depth)
expected_packages.add(pkg_name)
expected_modules.add(pkg_name + ".runpy_test")
pkg_dir, mod_fname, mod_name, mod_spec = (
self._make_pkg("", max_depth))
self.addCleanup(self._del_pkg, pkg_dir)
for depth in range(2, max_depth+1):
self._add_relative_modules(pkg_dir, "", depth)
for moduleinfo in pkgutil.walk_packages([pkg_dir]):
self.assertIsInstance(moduleinfo, pkgutil.ModuleInfo)
self.assertIsInstance(moduleinfo.module_finder,
importlib.machinery.FileFinder)
if moduleinfo.ispkg:
expected_packages.remove(moduleinfo.name)
else:
expected_modules.remove(moduleinfo.name)
self.assertEqual(len(expected_packages), 0, expected_packages)
self.assertEqual(len(expected_modules), 0, expected_modules)
class RunPathTestCase(unittest.TestCase, CodeExecutionMixin):
"""Unit tests for runpy.run_path"""
def _make_test_script(self, script_dir, script_basename,
source=None, omit_suffix=False):
if source is None:
source = example_source
return make_script(script_dir, script_basename,
source, omit_suffix)
def _check_script(self, script_name, expected_name, expected_file,
expected_argv0, mod_name=None,
expect_spec=True, check_loader=True):
# First check is without run_name
def create_ns(init_globals):
return run_path(script_name, init_globals)
expected_ns = example_namespace.copy()
if mod_name is None:
spec_name = expected_name
else:
spec_name = mod_name
if expect_spec:
mod_spec = importlib.util.spec_from_file_location(spec_name,
expected_file)
mod_cached = mod_spec.cached
if not check_loader:
mod_spec.loader = None
else:
mod_spec = mod_cached = None
expected_ns.update({
"__name__": expected_name,
"__file__": expected_file,
"__cached__": mod_cached,
"__package__": "",
"__spec__": mod_spec,
"run_argv0": expected_argv0,
"run_name_in_sys_modules": True,
"module_in_sys_modules": True,
})
self.check_code_execution(create_ns, expected_ns)
# Second check makes sure run_name works in all cases
run_name = "prove.issue15230.is.fixed"
def create_ns(init_globals):
return run_path(script_name, init_globals, run_name)
if expect_spec and mod_name is None:
mod_spec = importlib.util.spec_from_file_location(run_name,
expected_file)
if not check_loader:
mod_spec.loader = None
expected_ns["__spec__"] = mod_spec
expected_ns["__name__"] = run_name
expected_ns["__package__"] = run_name.rpartition(".")[0]
self.check_code_execution(create_ns, expected_ns)
def _check_import_error(self, script_name, msg):
msg = re.escape(msg)
self.assertRaisesRegex(ImportError, msg, run_path, script_name)
def test_basic_script(self):
with temp_dir() as script_dir:
mod_name = 'script'
script_name = self._make_test_script(script_dir, mod_name)
self._check_script(script_name, "<run_path>", script_name,
script_name, expect_spec=False)
def test_basic_script_with_path_object(self):
with temp_dir() as script_dir:
mod_name = 'script'
script_name = pathlib.Path(self._make_test_script(script_dir,
mod_name))
self._check_script(script_name, "<run_path>", script_name,
script_name, expect_spec=False)
def test_basic_script_no_suffix(self):
with temp_dir() as script_dir:
mod_name = 'script'
script_name = self._make_test_script(script_dir, mod_name,
omit_suffix=True)
self._check_script(script_name, "<run_path>", script_name,
script_name, expect_spec=False)
def test_script_compiled(self):
with temp_dir() as script_dir:
mod_name = 'script'
script_name = self._make_test_script(script_dir, mod_name)
compiled_name = py_compile.compile(script_name, doraise=True)
os.remove(script_name)
self._check_script(compiled_name, "<run_path>", compiled_name,
compiled_name, expect_spec=False)
def test_directory(self):
with temp_dir() as script_dir:
mod_name = '__main__'
script_name = self._make_test_script(script_dir, mod_name)
self._check_script(script_dir, "<run_path>", script_name,
script_dir, mod_name=mod_name)
def test_directory_compiled(self):
with temp_dir() as script_dir:
mod_name = '__main__'
script_name = self._make_test_script(script_dir, mod_name)
compiled_name = py_compile.compile(script_name, doraise=True)
os.remove(script_name)
if not sys.dont_write_bytecode:
legacy_pyc = make_legacy_pyc(script_name)
self._check_script(script_dir, "<run_path>", legacy_pyc,
script_dir, mod_name=mod_name)
def test_directory_error(self):
with temp_dir() as script_dir:
mod_name = 'not_main'
script_name = self._make_test_script(script_dir, mod_name)
msg = "can't find '__main__' module in %r" % script_dir
self._check_import_error(script_dir, msg)
def test_zipfile(self):
with temp_dir() as script_dir:
mod_name = '__main__'
script_name = self._make_test_script(script_dir, mod_name)
zip_name, fname = make_zip_script(script_dir, 'test_zip', script_name)
self._check_script(zip_name, "<run_path>", fname, zip_name,
mod_name=mod_name, check_loader=False)
def test_zipfile_compiled(self):
with temp_dir() as script_dir:
mod_name = '__main__'
script_name = self._make_test_script(script_dir, mod_name)
compiled_name = py_compile.compile(script_name, doraise=True)
zip_name, fname = make_zip_script(script_dir, 'test_zip',
compiled_name)
self._check_script(zip_name, "<run_path>", fname, zip_name,
mod_name=mod_name, check_loader=False)
def test_zipfile_error(self):
with temp_dir() as script_dir:
mod_name = 'not_main'
script_name = self._make_test_script(script_dir, mod_name)
zip_name, fname = make_zip_script(script_dir, 'test_zip', script_name)
msg = "can't find '__main__' module in %r" % zip_name
self._check_import_error(zip_name, msg)
@no_tracing
def test_main_recursion_error(self):
with temp_dir() as script_dir, temp_dir() as dummy_dir:
mod_name = '__main__'
source = ("import runpy\n"
"runpy.run_path(%r)\n") % dummy_dir
script_name = self._make_test_script(script_dir, mod_name, source)
zip_name, fname = make_zip_script(script_dir, 'test_zip', script_name)
msg = "recursion depth exceeded"
self.assertRaisesRegex(RecursionError, msg, run_path, zip_name)
def test_encoding(self):
with temp_dir() as script_dir:
filename = os.path.join(script_dir, 'script.py')
with open(filename, 'w', encoding='latin1') as f:
f.write("""
#coding:latin1
s = "non-ASCII: h\xe9"
""")
result = run_path(filename)
self.assertEqual(result['s'], "non-ASCII: h\xe9")
class TestExit(unittest.TestCase):
STATUS_CONTROL_C_EXIT = 0xC000013A
EXPECTED_CODE = (
STATUS_CONTROL_C_EXIT
if sys.platform == "win32"
else -signal.SIGINT
)
@staticmethod
@contextlib.contextmanager
def tmp_path(*args, **kwargs):
with temp_dir() as tmp_fn:
yield pathlib.Path(tmp_fn)
def run(self, *args, **kwargs):
with self.tmp_path() as tmp:
self.ham = ham = tmp / "ham.py"
ham.write_text(
textwrap.dedent(
"""\
raise KeyboardInterrupt
"""
)
)
super().run(*args, **kwargs)
def assertSigInt(self, *args, **kwargs):
proc = subprocess.run(*args, **kwargs, text=True, stderr=subprocess.PIPE)
self.assertTrue(proc.stderr.endswith("\nKeyboardInterrupt\n"))
self.assertEqual(proc.returncode, self.EXPECTED_CODE)
def test_pymain_run_file(self):
self.assertSigInt([sys.executable, self.ham])
def test_pymain_run_file_runpy_run_module(self):
tmp = self.ham.parent
run_module = tmp / "run_module.py"
run_module.write_text(
textwrap.dedent(
"""\
import runpy
runpy.run_module("ham")
"""
)
)
self.assertSigInt([sys.executable, run_module], cwd=tmp)
def test_pymain_run_file_runpy_run_module_as_main(self):
tmp = self.ham.parent
run_module_as_main = tmp / "run_module_as_main.py"
run_module_as_main.write_text(
textwrap.dedent(
"""\
import runpy
runpy._run_module_as_main("ham")
"""
)
)
self.assertSigInt([sys.executable, run_module_as_main], cwd=tmp)
def test_pymain_run_command_run_module(self):
self.assertSigInt(
[sys.executable, "-c", "import runpy; runpy.run_module('ham')"],
cwd=self.ham.parent,
)
def test_pymain_run_command(self):
self.assertSigInt([sys.executable, "-c", "import ham"], cwd=self.ham.parent)
def test_pymain_run_stdin(self):
self.assertSigInt([sys.executable], input="import ham", cwd=self.ham.parent)
def test_pymain_run_module(self):
ham = self.ham
self.assertSigInt([sys.executable, "-m", ham.stem], cwd=ham.parent)
if __name__ == "__main__":
unittest.main()
|
py
|
1a566bd69302e8a79b7767cd2473d17ae6f43b46
|
import io
import re
import six
from boto3.session import Session
from botocore.config import Config
AWS_ACCESS_KEY = 'AKIAJXFC3JRVYNIHX2UA'
AWS_ACCESS_SECRET_KEY = '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'
S3_BUCKET_NAME_PATTERN = re.compile(r'^[a-z0-9][a-z0-9\-]{1,61}[a-z0-9]$')
S3_KEY_PATTERN = re.compile(r'^[a-zA-Z0-9][a-zA-Z0-9\-./_]{3,253}[a-zA-Z0-9]$')
class S3Error(AssertionError):
pass
def get_client():
session = Session(aws_access_key_id=AWS_ACCESS_KEY, aws_secret_access_key=AWS_ACCESS_SECRET_KEY)
return session.client('s3', config=Config(signature_version='s3v4'))
def validate_bucket_name(bucket):
if not S3_BUCKET_NAME_PATTERN.match(bucket) or '--' in bucket:
raise S3Error('invalid bucket name {}'.format(bucket))
def validate_key_name(key):
if not S3_KEY_PATTERN.match(key):
raise S3Error('invalid s3 key name {}'.format(key))
def validate_content(content):
if content is None:
raise S3Error('no content to upload')
if not isinstance(content, bytes) and not hasattr(content, 'read'):
raise S3Error('content is neither a string nor a file like object, content={}'.format(content))
def download(bucket, key):
"""
always returns a byte string
"""
validate_bucket_name(bucket)
validate_key_name(key)
client = get_client()
# do a buffered download
bytes_io = io.BytesIO()
client.download_fileobj(bucket, key, bytes_io)
# hope that stuff is not too big, and just return content
return bytes_io.getvalue()
def download_file(bucket, key, filename):
"""
always returns a byte string
"""
validate_bucket_name(bucket)
validate_key_name(key)
client = get_client()
client.download_file(bucket, key, filename)
def upload(bucket, key, content, extra_agrs):
"""replace if key exists"""
# validate_content(content)
validate_bucket_name(bucket)
validate_key_name(key)
client = get_client()
if extra_agrs:
client.put_object(Body=content, Bucket=bucket, Key=key, ContentType=extra_agrs['ContentType'])
else:
client.put_object(Body=content, Bucket=bucket, Key=key)
def delete(bucket, key):
validate_bucket_name(bucket)
validate_key_name(key)
client = get_client()
client.delete_object(Bucket=bucket, Key=key)
def modify_metadata():
from api.models import S3Upload
client = get_client()
for s3_upload in S3Upload.objects.filter(folder='uploads/pod').filter(id__gte=34783).order_by('-id'):
try:
s3_obj = client.get_object(Bucket=s3_upload.bucket, Key=s3_upload.key())
client.put_object(Body=s3_obj['Body'].read(), Bucket=s3_upload.bucket, Key=s3_upload.key(),
ContentType='image/jpeg')
print(s3_upload.id)
except:
print(s3_upload.filename)
|
py
|
1a566c6d07c2ce2e31a9202edd4c14c0e397f19d
|
"""Declarative scaffolding for frameworks"""
import collections
import uuid
import warnings
__all__ = ["ModelMetaclass", "Field", "TypeDefinition",
"TypeEngine", "DeclareException"]
__version__ = "0.9.12"
missing = object()
# These engines can't be cleared
_fixed_engines = collections.ChainMap()
class DeclareException(Exception):
"""
Custom exception for cases where raising a built-in exception
would be ambiguous (whether it was thrown by declare or a bound function)
"""
pass
class TypeEngineMeta(type):
"""
Factory for :class:`~TypeEngine` so that each engine is init'd only once.
This is necessary since if :meth:`~TypeEngine.__new__` returns an instance
of the class, the :meth:`~TypeEngine.__init__` method will be called.
"""
engines = _fixed_engines.new_child()
def __call__(cls, namespace, *args, **kwargs):
engine = TypeEngineMeta.engines.get(namespace)
if engine is None:
engine = cls.__new__(cls)
TypeEngineMeta.engines[namespace] = engine
cls.__init__(engine, namespace, *args, **kwargs)
return engine
@classmethod
def clear_engines(metaclass):
"""Clear all non-fixed engines"""
metaclass.engines.clear()
class TypeEngine(object, metaclass=TypeEngineMeta):
"""
Collection of bound :class:`~TypeDefinition` for a given namespace.
TypeEngines are unique by namespace::
assert TypeEngine("foo") is TypeEngine("foo")
This makes it easier for groups of components to use a single engine to
translate values by type. By default :meth:`~TypeEngine.load` and
:meth:`~TypeEngine.dump` require a reference to the typedef used to convert
values. A custom Engine could use the :class:`~TypeDefinition` attributes
``python_type`` and ``backing_type`` to find the correct typedef from the
set of available typedefs and automatically convert to the necessary
format.
"""
def __init__(self, namespace="global", *args, **kwargs):
self.namespace = namespace
self.unbound_types = set()
self.bound_types = {}
@classmethod
def unique(cls):
"""Return a unique type engine (using uuid4)"""
namespace = str(uuid.uuid4())
return TypeEngine(namespace)
def register(self, typedef):
"""
Add the typedef to this engine if it is compatible.
After registering a :class:`~TypeDefinition`, it will not be bound
until :meth:`~TypeEngine.bind` is next called.
Nothing will happen when register is called with a typedef that is
pending binding or already bound. Otherwise, the engine will ensure it
is compatible with the type using :meth:`~TypeEngine.is_compatible`
before adding it to the set of unbound types.
Parameters
----------
typedef : :class:`~TypeDefinition`
The typedef to register with this engine
Raises
------
exc : :class:`ValueError`
If :meth:`~TypeEngine.is_compatible` is falsey
"""
if typedef in self.bound_types:
return
if not self.is_compatible(typedef):
raise ValueError("Incompatible type {} for engine {}".format(
typedef, self))
if typedef not in self.unbound_types:
self.unbound_types.add(typedef)
typedef._register(self)
def bind(self, **config):
"""
Bind all unbound types to the engine.
Bind each unbound typedef to the engine, passing in the engine and
:attr:`config`. The resulting ``load`` and ``dump`` functions can
be found under ``self.bound_types[typedef]["load"]`` and
``self.bound_types[typedef]["dump"], respectively.
Parameters
----------
config : dict, optional
Engine-binding configuration to pass to each typedef that will be
bound. Examples include floating-point precision values, maximum
lengths for strings, or any other translation constraints/settings
that a typedef needs to construct a load/dump function pair.
"""
while self.unbound_types:
typedef = self.unbound_types.pop()
try:
load, dump = typedef.bind(self, **config)
self.bound_types[typedef] = {
"load": load, "dump": dump
}
except Exception:
self.unbound_types.add(typedef)
raise
def load(self, typedef, value, **kwargs):
"""
Return the result of the bound load method for a typedef
Looks up the load function that was bound to the engine for a typedef,
and return the result of passing the given `value` and any `context`
to that function.
Parameters
----------
typedef : :class:`~TypeDefinition`
The typedef whose bound load method should be used
value : object
The value to be passed into the bound load method
**kwargs : kwargs
Context for the value being loaded
Returns
-------
loaded_value : object
The return value of the load function for the input value
Raises
------
exc : :class:`KeyError`
If the input typedef is not bound to this engine
Example
-------
.. code-block:: python
class Account(TypeDefinition):
prefix = "::account"
def load(self, value, **context):
return value + Account.prefix
def dump(self, value, **context):
return value[:-len(Account.prefix)]
typedef = Account()
engine = TypeEngine("accounts")
engine.register(typedef)
engine.bind()
assert engine.dump(typedef, "Jill::account") == "Jill"
"""
try:
bound_type = self.bound_types[typedef]
except KeyError:
raise DeclareException(
"Can't load unknown type {}".format(typedef))
else:
# Don't need to try/catch since load/dump are bound together
return bound_type["load"](value, **kwargs)
def dump(self, typedef, value, **kwargs):
"""
Return the result of the bound dump method for a typedef
Looks up the dump function that was bound to the engine for a typedef,
and return the result of passing the given `value` and any `context`
to that function.
Parameters
----------
typedef : :class:`~TypeDefinition`
The typedef whose bound dump method should be used
value : object
The value to be passed into the bound dump method
**kwargs : kwargs
Context for the value being dumped
Returns
-------
dumped_value : object
The return value of the dump function for the input value
Raises
------
exc : :class:`KeyError`
If the input typedef is not bound to this engine
Example
-------
.. code-block:: python
class Account(TypeDefinition):
prefix = "::account"
def load(self, value, context):
return value + Account.prefix
def dump(self, value, context):
return value[:-len(Account.prefix)]
typedef = Account()
engine = TypeEngine("accounts")
engine.register(typedef)
engine.bind()
assert engine.load(typedef, "Jill") == "Jill::account"
"""
try:
bound_type = self.bound_types[typedef]
except KeyError:
raise DeclareException(
"Can't dump unknown type {}".format(typedef))
else:
# Don't need to try/catch since load/dump are bound together
return bound_type["dump"](value, **kwargs)
def is_compatible(self, typedef): # pragma: no cover
"""
Returns ``true`` if the typedef is compatible with this engine.
This function should return ``False`` otherwise. The default
implementation will always return ``True``.
"""
return True
def __contains__(self, typedef):
return typedef in self.bound_types
_fixed_engines["global"] = TypeEngine("global")
class TypeDefinition:
"""
Translates between python types and backend/storage/transport types
A single TypeDefinition can be used for multiple TypeEngines, by
implementing :meth:`~TypeDefinition.bind` and returning different
(load, dump) function tuples for each engine.
For TypeDefinitions that are loaded/dumped the same for every engine,
just implement :meth:`~TypeDefinition._load` and
:meth:`~TypeDefinition._dump`.
"""
python_type = None
backing_type = None
def bind(self, engine, **config):
"""
Return a pair of (load, dump) functions for a specific engine.
Some Types will load and dump values depending on certain config, or
for different :class:`~TypeEngine`.
By default, this function will return the functions
:meth:`~TypeDefinition.load` and :meth:`~TypeDefinition._dump`.
The default :meth:`~TypeDefintion._load` and
:meth:`~TypeDefintion._dump` functions simply return the input value.
Parameters
----------
engine : :class:`~TypeEngine`
The engine that will save these load, dump functions
config : dictionary
Optional configuration for creating the functions.
Returns
-------
(load, dump) : (func, func) tuple
Each function takes a value and context, and returns a single value
"""
return self._load, self._dump
def _register(self, engine):
"""Called when the type is registered with an engine."""
pass
def _load(self, value, **kwargs):
"""
Engine-agnostic load function. Implement this method for any
TypeDefinition whose load function does not depend on the TypeEngine
being used to load it.
NOTE: This will not be available at runtime -
TypeDefinitionMetaclass hides the reference at runtime to reduce the
chance of incorrectly using an engine-agnostic load method when the
TypeDefinition prefers an engine-specific load method.
By default, returns :attr:`value` unchanged.
"""
return value
def _dump(self, value, **kwargs):
"""
Engine-agnostic dump function. Implement this method for any
TypeDefinition whose dump function does not depend on the TypeEngine
being used to dump it.
NOTE: This will not be available at runtime -
TypeDefinitionMetaclass hides the reference at runtime to reduce the
chance of incorrectly using an engine-agnostic dump method when the
TypeDefinition prefers an engine-specific dump method.
By default, returns :attr:`value` unchanged.
"""
return value
def subclassof(obj, classinfo):
"""Wrap issubclass to only return True/False"""
try:
return issubclass(obj, classinfo)
except TypeError:
return False
def instanceof(obj, classinfo):
"""Wrap isinstance to only return True/False"""
try:
return isinstance(obj, classinfo)
except TypeError: # pragma: no cover
# No coverage since we never call this without a class,
# type, or tuple of classes, types, or such typles.
return False
class Field:
def __init__(self, *, typedef=None, **kwargs):
self._model_name = None
if typedef is None:
self.typedef = typedef
else:
if subclassof(typedef, TypeDefinition):
typedef = typedef()
if instanceof(typedef, TypeDefinition):
self.typedef = typedef
else:
raise TypeError(("Expected {} to be None, instance of "
"TypeDefinition, or subclass of"
"TypeDefinition".format(typedef)))
super().__init__(**kwargs)
@property
def model_name(self):
"""Name of the model's attr that references self"""
return self._model_name
@model_name.setter
def model_name(self, value):
if self._model_name is not None:
raise AttributeError("{} model_name already set to '{}'".format(
self.__class__.__name__, self._model_name))
self._model_name = value
def set(self, obj, value):
if self._model_name is None:
raise AttributeError("Can't set field without binding to model")
obj.__dict__[self._model_name] = value
def get(self, obj):
if self._model_name is None:
raise AttributeError("Can't get field without binding to model")
try:
return obj.__dict__[self._model_name]
except KeyError:
raise AttributeError("'{}' has no attribute '{}'".format(
obj.__class__, self._model_name))
def delete(self, obj):
if self._model_name is None:
raise AttributeError("Can't delete field without binding to model")
try:
del obj.__dict__[self._model_name]
except KeyError:
raise AttributeError("'{}' has no attribute '{}'".format(
obj.__class__, self._model_name))
# Descriptor Protocol
# To override, use set, get, delete above
# https://docs.python.org/3.4/howto/descriptor.html
def __set__(self, obj, value):
self.set(obj, value)
def __get__(self, obj, type=None):
if obj is None:
return self
return self.get(obj)
def __delete__(self, obj):
self.delete(obj)
def index(objects, attr):
"""
Generate a mapping of a list of objects indexed by the given attr.
Parameters
----------
objects : :class:`list`, iterable
attr : string
The attribute to index the list of objects by
Returns
-------
dictionary : dict
keys are the value of each object's attr, and values are from objects
Example
-------
class Person(object):
def __init__(self, name, email, age):
self.name = name
self.email = email
self.age = age
people = [
Person('one', '[email protected]', 1),
Person('two', '[email protected]', 2),
Person('three', '[email protected]', 3)
]
by_email = index(people, 'email')
by_name = index(people, 'name')
assert by_name['one'] is people[0]
assert by_email['[email protected]'] is people[1]
"""
with warnings.catch_warnings():
warnings.simplefilter("ignore")
return {getattr(obj, attr): obj for obj in objects}
class ModelMetaclass(type, TypeDefinition):
"""
Track the order that ``Field`` attributes are declared, and
insert a Meta object (class) in the class
"""
@classmethod
def __prepare__(mcs, name, bases):
"""Returns an OrderedDict so attribute order is preserved"""
return collections.OrderedDict()
def __new__(mcs, name, bases, attrs):
"""Add a container class `Meta` to the class"""
Meta = attrs.get('Meta', missing)
if Meta is missing:
class Meta:
pass
attrs['Meta'] = Meta
if not isinstance(Meta, type):
raise TypeError("Expected `Meta` to be a class object")
cls = super().__new__(mcs, name, bases, attrs)
# Load and index fields by name
# ----------------------------------------------------------
fields = []
for name, attr in attrs.items():
if isinstance(attr, Field):
fields.append(attr)
# This will raise AttributeError if the field's
# name is already set
with warnings.catch_warnings():
warnings.simplefilter("ignore")
attr.model_name = name
Meta.fields_by_model_name = index(fields, 'model_name')
Meta.fields = fields
return cls
|
py
|
1a566cb5a64376cf6fcba9203f648186d44984c3
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2011 OpenStack LLC.
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""RequestContext: context for requests that persist through all of nova."""
import copy
import uuid
from nova.openstack.common import local
from nova.openstack.common import log as logging
from nova.openstack.common import timeutils
from nova import policy
LOG = logging.getLogger(__name__)
def generate_request_id():
return 'req-' + str(uuid.uuid4())
class RequestContext(object):
"""Security context and request information.
Represents the user taking a given action within the system.
"""
def __init__(self, user_id, project_id, is_admin=None, read_deleted="no",
roles=None, remote_address=None, timestamp=None,
request_id=None, auth_token=None, overwrite=True,
quota_class=None, user_name=None, project_name=None,
service_catalog=None, instance_lock_checked=False, **kwargs):
"""
:param read_deleted: 'no' indicates deleted records are hidden, 'yes'
indicates deleted records are visible, 'only' indicates that
*only* deleted records are visible.
:param overwrite: Set to False to ensure that the greenthread local
copy of the index is not overwritten.
:param kwargs: Extra arguments that might be present, but we ignore
because they possibly came in from older rpc messages.
"""
if kwargs:
LOG.warn(_('Arguments dropped when creating context: %s') %
str(kwargs))
self.user_id = user_id
self.project_id = project_id
self.roles = roles or []
self.is_admin = is_admin
if self.is_admin is None:
self.is_admin = policy.check_is_admin(self.roles)
self.read_deleted = read_deleted
self.remote_address = remote_address
if not timestamp:
timestamp = timeutils.utcnow()
if isinstance(timestamp, basestring):
timestamp = timeutils.parse_strtime(timestamp)
self.timestamp = timestamp
if not request_id:
request_id = generate_request_id()
self.request_id = request_id
self.auth_token = auth_token
self.service_catalog = service_catalog
self.instance_lock_checked = instance_lock_checked
# NOTE(markmc): this attribute is currently only used by the
# rs_limits turnstile pre-processor.
# See https://lists.launchpad.net/openstack/msg12200.html
self.quota_class = quota_class
self.user_name = user_name
self.project_name = project_name
if overwrite or not hasattr(local.store, 'context'):
self.update_store()
def _get_read_deleted(self):
return self._read_deleted
def _set_read_deleted(self, read_deleted):
if read_deleted not in ('no', 'yes', 'only'):
raise ValueError(_("read_deleted can only be one of 'no', "
"'yes' or 'only', not %r") % read_deleted)
self._read_deleted = read_deleted
def _del_read_deleted(self):
del self._read_deleted
read_deleted = property(_get_read_deleted, _set_read_deleted,
_del_read_deleted)
def update_store(self):
local.store.context = self
def to_dict(self):
return {'user_id': self.user_id,
'project_id': self.project_id,
'is_admin': self.is_admin,
'read_deleted': self.read_deleted,
'roles': self.roles,
'remote_address': self.remote_address,
'timestamp': timeutils.strtime(self.timestamp),
'request_id': self.request_id,
'auth_token': self.auth_token,
'quota_class': self.quota_class,
'user_name': self.user_name,
'service_catalog': self.service_catalog,
'project_name': self.project_name,
'instance_lock_checked': self.instance_lock_checked,
'tenant': self.tenant,
'user': self.user}
@classmethod
def from_dict(cls, values):
return cls(**values)
def elevated(self, read_deleted=None, overwrite=False):
"""Return a version of this context with admin flag set."""
context = copy.copy(self)
context.is_admin = True
if 'admin' not in context.roles:
context.roles.append('admin')
if read_deleted is not None:
context.read_deleted = read_deleted
return context
# NOTE(sirp): the openstack/common version of RequestContext uses
# tenant/user whereas the Nova version uses project_id/user_id. We need
# this shim in order to use context-aware code from openstack/common, like
# logging, until we make the switch to using openstack/common's version of
# RequestContext.
@property
def tenant(self):
return self.project_id
@property
def user(self):
return self.user_id
def get_admin_context(read_deleted="no"):
return RequestContext(user_id=None,
project_id=None,
is_admin=True,
read_deleted=read_deleted,
overwrite=False)
|
py
|
1a566e725fe8aa198a20c06bbea7ecab796c779a
|
#import dependencies
from bs4 import BeautifulSoup as bs
from splinter import Browser
import os
import pandas as pd
import time
import requests
import urllib
from urllib.request import urlopen, urlretrieve
from urllib.parse import urljoin
from urllib.parse import urlsplit
from splinter import Browser
from selenium import webdriver
from selenium.webdriver.firefox.firefox_binary import FirefoxBinary
import time
from bs4 import BeautifulSoup as yourVariable
#preparation steps:
import pymongo
#install flask
from flask import Flask, render_template
# setup mongo connection (MongoDB Compass to python)
conn = "mongodb://localhost:27017" # the default port for MongoDB
client = pymongo.MongoClient(conn) #to connect to Mongo database via db = client.name_of_database (it'll be created if absent)
# connect to mongo db and collection
db = client.hemispheresDB
collection = db.collection
### NASA Mars News
##Connecting to Mars Space News Site site
url_space = "https://spacenews.com/segment/news"
# Retrieve page with the requests module
response = requests.get(url_space)
def scrape():
from webdriver_manager.chrome import ChromeDriverManager
executable_path = {'executable_path': ChromeDriverManager().install()}
browser = Browser('chrome', **executable_path, headless=True)
# Create BeautifulSoup object; parse with 'lxml'
from bs4 import BeautifulSoup as bs
soup = bs(response.text, 'lxml')
mars_dict = {}
#find the latest articles, search for a title
results = soup.find_all('div', class_='article-item__top')
for result in results:
title = result.find('a', class_='title').text
# Extract title text, save it into variable
news_title = soup.title.text
mars_dict['a_title'] = news_title
paragraphs = soup.find_all("div", class_="article-meta")
for paragraph in paragraphs:
news_paragraph = paragraph.find('p', class_='post-excerpt').text
mars_dict['b_paragraph'] = news_paragraph
from webdriver_manager.chrome import ChromeDriverManager
executable_path = {'executable_path': ChromeDriverManager().install()}
browser = Browser('chrome', **executable_path, headless=True)
#Visit the url for the Featured Space Image site (https://spaceimages-mars.com), assign the url string to a variable
space_image = "https://spaceimages-mars.com"
browser.visit(space_image)
time.sleep(2)
from urllib.parse import urlsplit
base_url = "{0.scheme}://{0.netloc}/".format(urlsplit(space_image))
#get image url using BeautifulSoup
html_image = browser.html
soup = bs(html_image, "html.parser")
# Create BeautifulSoup object; parse with 'parser'
from bs4 import BeautifulSoup as bs
#get image url using BeautifulSoup
html_image = browser.html
soup = bs(html_image, 'html.parser')
soup = bs(urlopen(space_image))
for img in soup.find_all('img'):
featured_image_url = urljoin(space_image, img['src'])
file_name = img['src'].split('/')[-1]
urlretrieve(featured_image_url, file_name)
mars_dict['c_featured_image'] = featured_image_url
mars_dict['d_featured_image_name'] = file_name
### Mars Facts
url_facts = 'https://galaxyfacts-mars.com/'
time.sleep(2)
table = pd.read_html(url_facts)
facts_table = table[0]
facts_table.columns = ["Description", "Mars", "Earth"]
facts_table.set_index("Description", inplace=True)
mars_dict["e_Mars_data_table"] = facts_table.to_html()
### Mars Hemispheres
mars_hemispheres_list = []
#Visit the url for Mars Hemispheres site (https://marshemispheres.com/), assign the url string to a variable
hemisphere_images = "https://marshemispheres.com/"
browser.visit(hemisphere_images)
from urllib.parse import urlsplit
base_url = "{0.scheme}://{0.netloc}/".format(urlsplit(space_image))
#get image url using BeautifulSoup
html_image = browser.html
soup = bs(html_image, "html.parser")
# Create BeautifulSoup object; parse with 'parser'
from bs4 import BeautifulSoup as bs
#get image url using BeautifulSoup
html_image = browser.html
soup = bs(html_image, 'html.parser')
#Visit the url for Mars Hemispheres site (https://marshemispheres.com/), assign the url string to a variable
hemisphere_images = "https://marshemispheres.com/"
browser.visit(hemisphere_images)
mars_hemispheres_list = []
soup = bs(urlopen(hemisphere_images))
for i in range (4):
time.sleep(5) #to create a loop
# locate tag h3 (corresponding hemispheres)
images = browser.find_by_tag("h3")
# click on each image to get url
images[i].click()
# separate url
html = browser.html
soup = bs(html, "html.parser")
# search for HD image
url_hemisphere = soup.find("img", class_="wide-image")["src"]
# looking for image title
img_title = soup.find("h2",class_="title").text
# get image url
img_url = "https://marshemispheres.com/"+ url_hemisphere
# store the results into dictionary
dictionary={"title":img_title,"img_url":img_url}
# append the dictionary into mars hemisheres list
mars_hemispheres_list.append(dictionary)
browser.back()
mars_dict['f_Mars_hemispheres_list'] = mars_hemispheres_list
return mars_dict
|
py
|
1a566fba0617e69d9952b085fc0ac95e828b44c5
|
import numpy as np
def rnd_crc(radius=1, n=1):
# Uniformly choose angles in [0,2pi) for each point
theta = np.random.uniform(0, 2 * np.pi, n)
# Convert angles to (x,y) coordinates, add center, and convert to list
return np.vstack((np.cos(theta), np.sin(theta))).T * radius
|
py
|
1a5671ec90962f92f91ca8f011ef9d1fb17e4071
|
# -*- coding: utf-8 -*-
"""
flask-rstblog
~~~~~~~~~~~~~
:copyright: (c) 2011 by Christoph Heer.
:license: BSD, see LICENSE for more details.
"""
import os
from datetime import date
from jinja2 import FileSystemLoader
from flask import Flask, render_template
from flaskrst.modules import manager
from flaskrst.templating import inject_navigation
class Flask(Flask):
def create_global_jinja_loader(self):
template_path = os.path.join(self.config.get('SOURCE', ''),
"_templates")
builtin_templates = os.path.join(self.root_path, self.template_folder)
return FileSystemLoader([template_path, builtin_templates])
def create_app(source=None, config=None):
app = Flask("flaskrst")
# Set default config values
app.config.setdefault('MODULES', {})
app.config.setdefault('STYLESHEETS', [])
app.config.setdefault('FEEDS', [])
# Load config
if config:
app.config.from_pyfile(config)
config_loaded = True
# maybe there is a file declared by env
elif 'FLASK_RST_CONFIG' in os.environ:
app.config.from_envvar('FLASK_RST_CONFIG')
config_loaded = True
# no config loaded try again later after source setting
else:
config_loaded = False
# Set source path
if source:
app.config['SOURCE'] = source
elif 'FLASK_RST_SOURCE' in os.environ:
app.config['SOURCE'] = os.environ['FLASK_RST_SOURCE']
else:
# Use current working directory as source
app.config['SOURCE'] = os.getcwd()
# If no config already loaded than is a config maybe in source path
if not config_loaded:
config_path = os.path.join(app.config['SOURCE'], 'config.py')
app.config.from_pyfile(config_path, silent=True)
# Set path of static folder
if 'STATIC_FOLDER' in app.config:
app.static_folder = app.config['STATIC_FOLDER']
else:
# Is a static folder called _static in source path?
source_static_folder = os.path.join(app.config['SOURCE'], "_static")
if os.path.isdir(source_static_folder):
app.static_folder = source_static_folder
# Load flask-rst modules
manager.init_app(app)
manager.load_from_config()
# Add some jinja globals and context processors
app.jinja_env.globals['date'] = date
app.context_processor(inject_navigation)
@app.errorhandler(404)
def page_not_found(error):
return render_template('404.html'), 404
return app
|
py
|
1a5671f685db4ebdb3686e91b46b0cdb6d947104
|
from .module import Module
from .linear import Linear
from .loss import BCEWithLogitsLoss
from .container import Sequential
from .rnn import RNN
|
py
|
1a5672a4637d9ae00c732fadf3f8b349ad8aaf2e
|
from copy import deepcopy
from typing import Optional
from warnings import warn
import numpy
from catsim import cat
from catsim.simulation import Estimator, Selector
from sklearn.linear_model import LogisticRegression
def _fit_log_reg(
items,
administered_items,
response_vector,
use_discriminations=True,
# use_guess_slip=True,
log_reg=None,
):
if log_reg is None:
log_reg = LogisticRegression(C=float("inf"))
X = items[administered_items][:, 1, numpy.newaxis]
sample_weight = None
if use_discriminations:
sample_weight = items[administered_items, 0]
# if use_guess_slip:
# response_vector = [
# slip if resp else guess
# for resp, (guess, slip) in zip(
# response_vector, items[administered_items, 2:4]
# )
# ]
log_reg.fit(X, response_vector, sample_weight=sample_weight)
return log_reg
"""
def _set_log_reg(mean, scale):
coef = 1 / scale
_log_reg.intercept_ = -mean * coef
_log_reg.coef_ = coef
"""
def _log_reg_scale(log_reg):
return -1 / log_reg.coef_[0, 0]
class LogisticEstimator(Estimator):
"""Estimator that uses a hill-climbing algorithm to maximize the likelihood function
:param precision: number of decimal points of precision
:param verbose: verbosity level of the maximization method
"""
def __init__(
self, use_discriminations=True # , return_scale=False, # , use_guess_slip=True
):
super().__init__()
self._use_discriminations = use_discriminations
# self._use_guess_slip = use_guess_slip
# self._return_scale = return_scale
def estimate(
self,
index: int = None,
items: numpy.ndarray = None,
administered_items: list = None,
response_vector: list = None,
est_theta: float = None,
**kwargs,
) -> float:
"""Returns the theta value that minimizes the negative log-likelihood function, given the current state of the
test for the given examinee.
:param index: index of the current examinee in the simulator
:param items: a matrix containing item parameters in the format that `catsim` understands
(see: :py:func:`catsim.cat.generate_item_bank`)
:param administered_items: a list containing the indexes of items that were already administered
:param response_vector: a boolean list containing the examinee's answers to the administered items
:param est_theta: a float containing the current estimated proficiency
:returns: the current :math:`\\hat\\theta`
"""
items, administered_items, response_vector, est_theta = self._prepare_args(
return_items=True,
return_response_vector=True,
return_est_theta=True,
index=index,
items=items,
administered_items=administered_items,
response_vector=response_vector,
est_theta=est_theta,
**kwargs,
)
assert items is not None
assert administered_items is not None
assert response_vector is not None
assert est_theta is not None
if len(set(response_vector)) == 1:
return cat.dodd(est_theta, items, response_vector[-1])
log_reg = _fit_log_reg(
items,
administered_items,
response_vector,
use_discriminations=self._use_discriminations,
# use_guess_slip=self._use_guess_slip,
)
# y = mx + c, max entropy when y = 0 => x = -c / m
theta = -log_reg.intercept_[0] / log_reg.coef_[0, 0]
return theta
# return theta, _log_reg_scale(log_reg)
def _all_future_scales(
log_reg, items, administered_items, response_vector, next_choice
):
res = numpy.zeros((items.shape[0],))
for item in items[:, 1].argsort():
log_reg = _fit_log_reg(
items,
administered_items + [item],
response_vector + [next_choice],
use_discriminations=True,
log_reg=log_reg,
)
scale = abs(_log_reg_scale(log_reg))
res[item] = scale
return res
class MinExpectedScaleSelector(Selector):
"""
Owens 1977,
"""
def select(
self,
index: int = None,
items: numpy.ndarray = None,
administered_items: list = None,
est_theta: float = None,
response_vector: list = None,
**kwargs,
) -> Optional[int]:
"""Returns the index of the next item to be administered.
:param index: the index of the current examinee in the simulator.
:param items: a matrix containing item parameters in the format that `catsim` understands
(see: :py:func:`catsim.cat.generate_item_bank`)
:param administered_items: a list containing the indexes of items that were already administered
:param est_theta: a float containing the current estimated proficiency
:returns: index of the next item to be applied or `None` if there are no more items in the item bank.
"""
items, administered_items, response_vector, est_theta = self._prepare_args(
return_items=True,
return_response_vector=True,
return_est_theta=True,
index=index,
items=items,
administered_items=administered_items,
response_vector=response_vector,
est_theta=est_theta,
**kwargs,
)
assert items is not None
assert administered_items is not None
assert response_vector is not None
assert est_theta is not None
def default():
# Fall back to max info
ordered_items = self._sort_by_info(items, est_theta)
valid_indexes = self._get_non_administered(
ordered_items, administered_items
)
return valid_indexes[0]
if len(administered_items) > 0 and len(set(response_vector)) >= 2:
log_reg = LogisticRegression(C=float("inf"), warm_start=True)
log_reg_before = _fit_log_reg(
items,
administered_items,
response_vector,
use_discriminations=True,
log_reg=log_reg,
)
if _log_reg_scale(log_reg_before) <= 0:
return default()
log_reg.tol = 0.05
neg_prob, pos_prob = log_reg_before.predict_proba(
items[:, 1, numpy.newaxis]
).T
else:
return default()
# TODO: Can instead use Dodd's like logic to find expected scale even when there is only one class
# min_theta = min(items[:, 1])
# max_theta = max(items[:, 1])
# _set_log_reg(
# est_theta, min(max_theta - est_theta, est_theta - min_theta)
# )
working_log_reg = deepcopy(log_reg)
false_scales = _all_future_scales(
working_log_reg, items, administered_items, response_vector, False
)
working_log_reg = deepcopy(log_reg)
true_scales = _all_future_scales(
working_log_reg, items, administered_items, response_vector, True
)
organized_items = [
x
for x in numpy.array(
[
pp * ts + np * fs
for np, pp, fs, ts in zip(
neg_prob, pos_prob, false_scales, true_scales
)
]
).argsort()
if x not in administered_items
]
if len(organized_items) == 0:
warn("There are no more items to apply.")
return None
return organized_items[0]
|
py
|
1a567410b99303f292ad2af6a35618cf31f8c37c
|
# weather.py
'''
# Configuration
The weather module reads from the weather.yaml file stored in bobbit's
configuration directory and expects the following values:
default: This is the default zip code
'''
import logging
import re
import aiohttp.client_exceptions
# Metadata
NAME = 'weather'
ENABLE = True
USAGE = '''Usage: ![weather|forecast] <zipcode>
Given a zipcode, this returns the current weather or the daily forecast for
that location.
Examples:
> !weather # Default location
> !forecast 46556 # Specific zip code
'''
WEATHER_RX = r'^!weather\s*(?P<zipcode>\d{5})*$'
FORECAST_RX = r'^!forecast\s*(?P<zipcode>\d{5})*$'
# Constants
ZIPCODE = {
'#nd-cse' : 46556, # Notre Dame, IN
'#ndlug' : 46556, # Notre Dame, IN
'#lug' : 46556, # Notre Dame, IN
'#uwec-cs': 54702, # Eau Claire, WI
}
DEFAULT_ZIPCODE = None
WEATHER_GOV_URL = 'https://forecast.weather.gov'
# Functions
async def retrieve_weather_data(bot, zipcode):
url = WEATHER_GOV_URL + '/zipcity.php'
params = {
'inputstring': zipcode
}
async with bot.http_client.get(url, params=params) as response:
try:
text = await response.text()
xml_url = re.findall(r'<a href="(MapClick[^"]+dwml)"', text)[0]
json_url = WEATHER_GOV_URL + '/' + xml_url.replace('dwml', 'json')
logging.debug('JSON URL: %s', json_url)
except IndexError as e:
logging.warning('Unable to get weather data: %s', e)
return {}
async with bot.http_client.get(json_url) as response:
try:
return await response.json()
except aiohttp.client_exceptions.ContentTypeError:
logging.warning('Unable to get weather data: %s', response.text)
return {}
def get_location(data):
location = data['location']['areaDescription']
for prefix in re.findall(r'(\d+ Miles [ENSW]+)', location):
location = location.replace(prefix, '')
return location.strip()[:-3] + ", " + location.strip()[-2:]
# Commands
async def weather(bot, message, zipcode=None):
zipcode = zipcode or ZIPCODE.get(message.channel, DEFAULT_ZIPCODE)
data = await retrieve_weather_data(bot, zipcode)
if not data:
return message.with_body('No results')
location = get_location(data)
current = data['currentobservation']
return message.with_body(bot.client.format_text(
'{bold}Weather{bold} for {bold}{location}{bold}: {temp}°F, {weather}',
location = location,
temp = current['Temp'].strip(),
weather = current['Weather'].strip(),
))
async def forecast(bot, message, zipcode=None):
zipcode = zipcode or ZIPCODE.get(message.channel, DEFAULT_ZIPCODE)
data = await retrieve_weather_data(bot, zipcode)
if not data:
return message.with_body('No results')
location = get_location(data)
text = data['data']['text']
return message.with_body(bot.client.format_text(
'{bold}Forecast{bold} for {bold}{location}{bold}: {bold}Today{bold}: {today} {bold}Tonight{bold}: {tonight}',
location = location,
today = text[0].strip(),
tonight = text[1].strip(),
))
# Register
def register(bot):
global DEFAULT_ZIPCODE
config = bot.config.load_module_config('weather')
DEFAULT_ZIPCODE = config.get('default', ZIPCODE['#lug'])
return (
('command', WEATHER_RX , weather),
('command', FORECAST_RX, forecast),
)
# vim: set sts=4 sw=4 ts=8 expandtab ft=python:
|
py
|
1a56756eae5d39ae0d4a51b9237ec7192fad309b
|
#!/usr/bin/env python3
#
# Copyright (c) 2014, 2016, 2018, 2020 LexisNexis Risk Data Management Inc.
#
# This file is part of the RadSSH software package.
#
# RadSSH is free software, released under the Revised BSD License.
# You are permitted to use, modify, and redsitribute this software
# according to the Revised BSD License, a copy of which should be
# included with the distribution as file LICENSE.txt
#
'''RadSSH setuptools interface'''
import sys
import os
import shutil
from setuptools import setup
from os import listdir
from os.path import isfile, join
import radssh
# Gather up all supplemental plugins from various directories
# and copy them into the core plugins directory prior to install
if not os.path.exists('radssh/plugins'):
os.mkdir('radssh/plugins')
for p, d, f in os.walk('radssh'):
for ignore in [subdir for subdir in d if not subdir.endswith('_plugins')]:
d.remove(ignore)
if p.endswith('_plugins'):
print('Merging plugins from %s' % p)
for plugin in f:
if not plugin.endswith('.pyc'):
shutil.copy2(os.path.join(p, plugin), 'radssh/plugins')
# Get list of non .py files in plugins directory to include as pkg_data
olddir = os.getcwd()
os.chdir('radssh')
pkg_data_files = [join('plugins', f) for f in listdir('plugins') if not f.endswith('.py') and isfile(join('plugins', f))]
os.chdir(olddir)
# Conditional requirements (colorama for Windows platform only)
required_packages = ['paramiko>=2.7.0', 'netaddr']
if sys.platform.startswith('win'):
required_packages.append('colorama>=0.3.9')
required_packages.append('pyreadline')
setup(name='radssh',
version=radssh.version,
description='RadSSH Module',
author=radssh.__author__,
author_email=radssh.__author_email__,
license='BSD',
keywords='ssh parallel paramiko',
url='https://github.com/radssh/radssh',
# See https://pypi.python.org/pypi?%3Aaction=list_classifiers
classifiers=[
'Development Status :: 5 - Production/Stable',
'Environment :: Console',
'Intended Audience :: System Administrators',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: POSIX',
'Operating System :: Microsoft :: Windows',
'Operating System :: MacOS :: MacOS X',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Topic :: System :: Shells',
'Topic :: Utilities'],
packages=['radssh', 'radssh.plugins'],
package_data={'': pkg_data_files},
install_requires=required_packages,
long_description='''
RadSSH Package
==============
RadSSH is a Python package that is built with Paramiko.
Documentation for the project is hosted on ReadTheDocs, at http://radssh.readthedocs.org
Frequently Asked Questions: https://github.com/radssh/radssh/blob/master/FAQ.md
RadSSH is installable via **pip**, using "**pip install radssh**".
----
The RadSSH shell behaves similar to a normal ssh command line client, but instead of connecting to one host (at a time), you can connect to dozens or even hundreds at a time, and issue interactive command lines to all hosts at once. It requires very little learning curve to get started, and leverages on existing command line syntax that you already know. ::
[paul@localhost ~]$ python -m radssh.shell huey dewey louie
Please enter a password for (paul) :
Connecting to 3 hosts...
...
RadSSH $ hostname
[huey] huey.example.org
[dewey] dewey.example.org
[louie] louie.example.org
Average completion time for 3 hosts: 0.058988s
RadSSH $ uptime
[huey] 15:21:28 up 6 days, 22:49, 17 users, load average: 0.30, 0.43, 0.39
[louie] 15:43 up 652 days, 4:59, 0 users, load averages: 0.44 0.20 0.17
[dewey] 15:21:28 up 109 days, 23:28, 3 users, load average: 0.27, 0.09, 0.07
Average completion time for 3 hosts: 0.044532s
RadSSH $ df -h /
[huey] Filesystem Size Used Avail Use% Mounted on
[huey] /dev/mapper/vg-Scientific
[huey] 24G 22G 694M 97% /
[louie] Filesystem Size Used Avail Capacity Mounted on
[louie] /dev/disk0s3 234G 134G 99G 57% /
[dewey] Filesystem Size Used Avail Use% Mounted on
[dewey] /dev/mapper/vg_pkapp745-LogVol00 20G 17G 2.1G 89% /
Average completion time for 3 hosts: 0.036792s
RadSSH $ *exit
Shell exiting
RadSSH includes a loadable plugin facility to extend the functionality of the shell with basic Python scripting, as well as a high level API that can be used to build stand alone applications for dedicated SSH control processing in a parallel environment.
Interested in more?
* Download at https://pypi.python.org/pypi/radssh
* Read the Docs at http://radssh.readthedocs.org/en/latest/index.html
* Participate at https://github.com/radssh/radssh
''',
)
|
py
|
1a5676e066e48adf1b8084a7cfb285a72c4dfef5
|
# --*-- coding:utf-8 --*--
"""
Extract ips for scanning and checking in databases
"""
import pika
from settings import *
class BaseMqProducer(object):
def __init__(self):
credentials = pika.PlainCredentials(MQ_USR, MQ_PWD)
conn_params = pika.ConnectionParameters(MQ_HOST, MQ_PORT, credentials=credentials)
self.conn_broker = pika.BlockingConnection(conn_params)
self.channel = self.conn_broker.channel()
def close(self):
self.conn_broker.close()
class ScanProducer(BaseMqProducer):
def __init__(self):
super(ScanProducer, self).__init__()
pass
class CheckMqProducer(BaseMqProducer):
def __init__(self):
super(CheckMqProducer, self).__init__()
pass
# # 通过此信道交互
# channel.exchange_declare(exchange="hello-exchange",
# exchange_type="direct",
# passive=False,
# durable=True,
# auto_delete=False
# )
#
# for item in range(10000):
# msg_props = pika.BasicProperties()
# msg_props.content_type = "text/plain"
#
# channel.basic_publish(body=str(item),
# exchange="hello-exchange",
# properties=msg_props,
# routing_key="hola"
# )
#
#
# conn_broker.close()
|
py
|
1a5678ff882ca91f4c4656679b8722fb7dbdf330
|
################################################################################
# Module: __init__.py
# Description: OSMnx - Retrieve, construct, analyze, and visualize street
# networks from OpenStreetMap
# License: MIT, see full license in LICENSE.txt
# Web: https://github.com/gboeing/osmnx
################################################################################
from .buildings import *
from .core import *
from .elevation import *
from .footprints import *
from .plot import *
from .pois import *
from .projection import *
from .save_load import *
from .simplify import *
from .stats import *
from .utils import *
__version__ = '0.9'
|
py
|
1a5679414067eddafd708399d092a895d5c01928
|
'''
The actual saltkey functional code
'''
# Import python modules
import os
import shutil
import sys
import logging
# Import salt modules
import salt.crypt
import salt.utils
import salt.utils.event
log = logging.getLogger(__name__)
class Key(object):
'''
The object that encapsulates saltkey actions
'''
def __init__(self, opts):
self.opts = opts
self.event = salt.utils.event.SaltEvent(opts['sock_dir'], 'master')
self.colors = salt.utils.get_colors(
not bool(self.opts.get('no_color', False))
)
def _keys(self, key_type, full_path=False):
'''
Safely return the names of the unaccepted keys, pass True to return
the full key paths. Returns a set.
'''
ret = set()
subdir = ''
if key_type == 'pre':
subdir = 'minions_pre'
elif key_type == 'rej':
subdir = 'minions_rejected'
elif key_type == 'acc':
subdir = 'minions'
dir_ = os.path.join(self.opts['pki_dir'], subdir)
if not os.path.isdir(dir_):
err = ('The {0} directory is not present, ensure that '
'the master server has been started').format(subdir)
self._log(err, level='error')
sys.exit(42)
keys = os.listdir(dir_)
if full_path:
for key in keys:
ret.add(os.path.join(dir_, key))
else:
ret = set(keys)
return ret
def _log(self, message, level=''):
if hasattr(log, level):
log_msg = getattr(log, level)
log_msg(message)
if not self.opts['quiet']:
print(message)
def _list_pre(self, header=True, printer=None):
'''
List the unaccepted keys
'''
if header == True:
self._log('{0}Unaccepted Keys:{1}'.format(
self.colors['LIGHT_RED'], self.colors['ENDC']
))
keys = self._keys('pre')
if printer is None:
for key in sorted(keys):
output = '{0}{1}{2}'.format(
self.colors['RED'],
key,
self.colors['ENDC']
)
self._log(output)
else:
printer(list(keys))
def _list_accepted(self, header=True, printer=None):
'''
List the accepted public keys
'''
if header == True:
self._log('{0}Accepted Keys:{1}'.format(
self.colors['LIGHT_GREEN'], self.colors['ENDC']
))
keys = self._keys('acc')
if printer is None:
for key in sorted(keys):
self._log('{0}{1}{2}'.format(
self.colors['GREEN'], key, self.colors['ENDC']
))
else:
printer(list(keys))
def _list_rejected(self, header=True, printer=None):
'''
List the unaccepted keys
'''
if header == True:
self._log('{0}Rejected:{1}'.format(
self.colors['LIGHT_BLUE'], self.colors['ENDC']
))
keys = self._keys('rej')
if printer is None:
for key in sorted(keys):
self._log('{0}{1}{2}'.format(
self.colors['BLUE'], key, self.colors['ENDC']
))
else:
printer(list(keys))
def _list(self, name):
'''
List keys
'''
printout = self._get_outputter()
if 'json_out' in self.opts and self.opts['json_out']:
printout.indent = 2
if name in ('pre', 'un', 'unaccept', 'unaccepted'):
self._list_pre(header=False, printer=printout)
elif name in ('acc', 'accept', 'accepted'):
self._list_accepted(header=False, printer=printout)
elif name in ('rej', 'reject', 'rejected'):
self._list_rejected(header=False, printer=printout)
elif name in ('all',):
if printout is not None:
keys = {
'rejected': list(self._keys('rej')),
'accepted': list(self._keys('acc')),
'unaccepted': list(self._keys('pre')),
}
printout(keys)
else:
self._list_pre(printer=printout)
self._list_accepted(printer=printout)
self._list_rejected(printer=printout)
else:
err = ('Unrecognized key type "{0}". Run with -h for options.'
).format(name)
self._log(err, level='error')
def _get_outputter(self):
get_outputter = salt.output.get_outputter
if self.opts['raw_out']:
printout = get_outputter('raw')
elif self.opts['json_out']:
printout = get_outputter('json')
elif self.opts['yaml_out']:
printout = get_outputter('yaml')
else:
printout = None # use default color output
return printout
def _print_key(self, name):
'''
Print out the specified public key
'''
keys = self._keys('pre', True).union(self._keys('acc', True))
for key in sorted(keys):
if key.endswith(name):
with open(key, 'r') as kfn:
self._log(kfn.read())
def _print_all(self):
'''
Print out the public keys, all of em'
'''
self._log('{0}Unaccepted keys:{1}'.format(
self.colors['LIGHT_RED'], self.colors['ENDC']
))
for key in sorted(self._keys('pre', True)):
self._log(' {0}{1}{2}'.format(
self.colors['RED'],
os.path.basename(key),
self.colors['ENDC']
))
with open(key, 'r') as kfn:
self._log(kfn.read())
self._log('{0}Accepted keys:{1}'.format(
self.colors['LIGHT_GREEN'], self.colors['ENDC']
))
for key in sorted(self._keys('acc', True)):
self._log(' {0}{1}{2}'.format(
self.colors['GREEN'],
os.path.basename(key),
self.colors['ENDC']
))
with open(key, 'r') as kfn:
self._log(kfn.read())
self._log('{0}Rejected keys:{1}'.format(
self.colors['LIGHT_BLUE'], self.colors['ENDC']
))
for key in sorted(self._keys('pre', True)):
self._log(' {0}{1}{2}'.format(
self.colors['BLUE'],
os.path.basename(key),
self.colors['ENDC']))
with open(key, 'r') as kfn:
self._log(kfn.read())
def _accept(self, key):
'''
Accept a specified host's public key
'''
(minions_accepted,
minions_pre,
minions_rejected) = self._check_minions_directories()
pre = os.listdir(minions_pre)
if key not in pre:
err = ('The key named {0} does not exist, please accept an '
'available key').format(key)
#log.error(err)
self._log(err, level='error')
sys.exit(43)
shutil.move(os.path.join(minions_pre, key),
os.path.join(minions_accepted, key))
eload = {'result': True,
'act': 'accept',
'id': key}
self.event.fire_event(eload, 'key')
self._log(
'Key for {0} accepted.'.format(key),
level='info'
)
def _accept_all(self):
'''
Accept all keys in pre
'''
(minions_accepted,
minions_pre,
minions_rejected) = self._check_minions_directories()
for key in os.listdir(minions_pre):
self._accept(key)
def _delete_key(self, delete=None):
'''
Delete a key
'''
(minions_accepted,
minions_pre,
minions_rejected) = self._check_minions_directories()
if delete is None:
delete = self.opts['delete']
pre = os.path.join(minions_pre, delete)
acc = os.path.join(minions_accepted, delete)
rej = os.path.join(minions_rejected, delete)
if os.path.exists(pre):
os.remove(pre)
self._log('Removed pending key {0}'.format(delete),
level='info')
if os.path.exists(acc):
os.remove(acc)
self._log('Removed accepted key {0}'.format(delete),
level='info')
if os.path.exists(rej):
os.remove(rej)
self._log('Removed rejected key {0}'.format(delete),
level='info')
def _delete_all(self):
'''
Delete all keys
'''
for dir in ("acc", "rej", "pre"):
for key in self._keys(dir):
self._delete_key(key)
def _reject(self, key):
'''
Reject a specified host's public key
'''
(minions_accepted,
minions_pre,
minions_rejected) = self._check_minions_directories()
pre = os.listdir(minions_pre)
if key not in pre:
err = ('The host named {0} is unavailable, please accept an '
'available key').format(key)
self._log(err, level='error')
sys.exit(43)
shutil.move(os.path.join(minions_pre, key),
os.path.join(minions_rejected, key))
self._log('{0} key rejected.'.format(key), level='info')
def _reject_all(self):
'''
Reject all keys in pre
'''
(minions_accepted,
minions_pre,
minions_rejected) = self._check_minions_directories()
for key in os.listdir(minions_pre):
self._reject(key)
def _check_minions_directories(self):
minions_accepted = os.path.join(self.opts['pki_dir'], 'minions')
minions_pre = os.path.join(self.opts['pki_dir'], 'minions_pre')
minions_rejected = os.path.join(self.opts['pki_dir'],
'minions_rejected')
for dir_ in [minions_accepted, minions_pre, minions_rejected]:
if not os.path.isdir(dir_):
err = ('The minions directory {0} is not present, ensure '
'that the master server has been started'.format(dir_))
self._log(err, level='error')
sys.exit(42)
return minions_accepted, minions_pre, minions_rejected
def run(self):
'''
Run the logic for saltkey
'''
if self.opts['gen_keys']:
salt.crypt.gen_keys(
self.opts['gen_keys_dir'],
self.opts['gen_keys'],
self.opts['keysize'])
return
if self.opts['list']:
self._list(self.opts['list'])
elif self.opts['list_all']:
self._list('all')
elif self.opts['print']:
self._print_key(self.opts['print'])
elif self.opts['print_all']:
self._print_all()
elif self.opts['accept']:
self._accept(self.opts['accept'])
elif self.opts['accept_all']:
self._accept_all()
elif self.opts['reject']:
self._reject(self.opts['reject'])
elif self.opts['reject_all']:
self._reject_all()
elif self.opts['delete']:
self._delete_key()
elif self.opts['delete_all']:
self._delete_all()
else:
self._list('all')
|
py
|
1a567a543a7c3f8a4f361badc8f9bf8fa170e277
|
# Copyright (c) AIRBUS and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from __future__ import annotations
import random
from enum import Enum
from typing import Any, Dict, List, Optional, Union
import numpy as np
from skdecide.discrete_optimization.generic_tools.do_mutation import Mutation
from skdecide.discrete_optimization.generic_tools.ea.deap_wrappers import (
generic_mutate_wrapper,
)
from skdecide.discrete_optimization.generic_tools.ea.ga import (
DeapCrossover,
DeapMutation,
DeapSelection,
ObjectiveHandling,
)
from skdecide.discrete_optimization.generic_tools.result_storage.result_storage import (
ResultStorage,
)
class ParametersGa:
mutation: Union[Mutation, DeapMutation] = None
crossover: DeapCrossover = None
selection: DeapSelection = None
encoding: str = None
objective_handling: ObjectiveHandling = None
objectives: Union[str, List[str]] = None
objective_weights: List[float] = None
pop_size: int = None
max_evals: int = None
mut_rate: float = None
crossover_rate: float = None
tournament_size: float = None
deap_verbose: bool = False
def __init__(
self,
mutation,
crossover,
selection,
encoding,
objective_handling,
objectives,
objective_weights,
pop_size,
max_evals,
mut_rate,
crossover_rate,
tournament_size,
deap_verbose,
):
self.mutation = mutation
self.crossover = crossover
self.selection = selection
self.encoding = encoding
self.objective_handling = objective_handling
self.objectives = objectives
self.objective_weights = objective_weights
self.pop_size = pop_size
self.max_evals = max_evals
self.mut_rate = mut_rate
self.crossover_rate = crossover_rate
self.tournament_size = tournament_size
self.deap_verbose = deap_verbose
@staticmethod
def default_rcpsp():
return ParametersGa(
mutation=DeapMutation.MUT_SHUFFLE_INDEXES,
crossover=DeapCrossover.CX_PARTIALY_MATCHED,
selection=DeapSelection.SEL_TOURNAMENT,
encoding="rcpsp_permutation",
objective_handling=ObjectiveHandling.AGGREGATE,
objectives=["makespan"],
objective_weights=[-1],
pop_size=100,
max_evals=10000,
mut_rate=0.1,
crossover_rate=0.9,
tournament_size=5,
deap_verbose=False,
)
class ParametersAltGa:
mutations: List[Union[Mutation, DeapMutation]] = None
crossovers: List[DeapCrossover] = None
selection: DeapSelection = None
encodings: List[str] = None
objective_handling: ObjectiveHandling = None
objectives: Union[str, List[str]] = None
objective_weights: List[float] = None
pop_size: int = None
max_evals: int = None
mut_rate: float = None
crossover_rate: float = None
tournament_size: float = None
deap_verbose: bool = False
sub_evals: List[int] = None
def __init__(
self,
mutations,
crossovers,
selection,
encodings,
objective_handling,
objectives,
objective_weights,
pop_size,
max_evals,
mut_rate,
crossover_rate,
tournament_size,
deap_verbose,
sub_evals,
):
self.mutations = mutations
self.crossovers = crossovers
self.selection = selection
self.encodings = encodings
self.objective_handling = objective_handling
self.objectives = objectives
self.objective_weights = objective_weights
self.pop_size = pop_size
self.max_evals = max_evals
self.mut_rate = mut_rate
self.crossover_rate = crossover_rate
self.tournament_size = tournament_size
self.deap_verbose = deap_verbose
self.sub_evals = sub_evals
@staticmethod
def default_mrcpsp():
return ParametersAltGa(
mutations=[DeapMutation.MUT_UNIFORM_INT, DeapMutation.MUT_SHUFFLE_INDEXES],
crossovers=[DeapCrossover.CX_ONE_POINT, DeapCrossover.CX_PARTIALY_MATCHED],
selection=DeapSelection.SEL_TOURNAMENT,
encodings=["rcpsp_modes_arrity_fix", "rcpsp_permutation"],
objective_handling=ObjectiveHandling.AGGREGATE,
objectives=["makespan"],
objective_weights=[-1],
pop_size=100,
max_evals=10000,
mut_rate=0.1,
crossover_rate=0.9,
tournament_size=5,
deap_verbose=False,
sub_evals=[1000, 1000],
)
@staticmethod
def default_msrcpsp():
return ParametersAltGa(
mutations=[
DeapMutation.MUT_UNIFORM_INT,
DeapMutation.MUT_SHUFFLE_INDEXES,
DeapMutation.MUT_SHUFFLE_INDEXES,
],
crossovers=[
DeapCrossover.CX_ONE_POINT,
DeapCrossover.CX_PARTIALY_MATCHED,
DeapCrossover.CX_PARTIALY_MATCHED,
],
selection=DeapSelection.SEL_TOURNAMENT,
# encodings=['modes_arrity_fix', 'priority_list_task', 'priority_worker_per_task_perm'],
encodings=[
"modes_arrity_fix_from_0",
"priority_list_task",
"priority_worker_per_task_perm",
],
objective_handling=ObjectiveHandling.AGGREGATE,
objectives=["makespan"],
objective_weights=[-1],
pop_size=100,
max_evals=10000,
mut_rate=0.1,
crossover_rate=0.9,
tournament_size=5,
deap_verbose=False,
sub_evals=[500, 500, 500],
)
|
py
|
1a567a91a706b60d3b808a3e253b91090c558cf6
|
#!/usr/bin/python
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: azure
short_description: create or terminate a virtual machine in azure
description:
- Creates or terminates azure instances. When created optionally waits for it to be 'running'.
version_added: "1.7"
options:
name:
description:
- name of the virtual machine and associated cloud service.
required: true
default: null
location:
description:
- the azure location to use (e.g. 'East US')
required: true
default: null
subscription_id:
description:
- azure subscription id. Overrides the AZURE_SUBSCRIPTION_ID environment variable.
required: false
default: null
management_cert_path:
description:
- path to an azure management certificate associated with the subscription id. Overrides the AZURE_CERT_PATH environment variable.
required: false
default: null
storage_account:
description:
- the azure storage account in which to store the data disks.
required: true
image:
description:
- system image for creating the virtual machine
(e.g., b39f27a8b8c64d52b05eac6a62ebad85__Ubuntu_DAILY_BUILD-precise-12_04_3-LTS-amd64-server-20131205-en-us-30GB)
required: true
default: null
role_size:
description:
- azure role size for the new virtual machine (e.g., Small, ExtraLarge, A6). You have to pay attention to the fact that instances of
type G and DS are not available in all regions (locations). Make sure if you selected the size and type of instance available in your chosen location.
required: false
default: Small
endpoints:
description:
- a comma-separated list of TCP ports to expose on the virtual machine (e.g., "22,80")
required: false
default: 22
user:
description:
- the unix username for the new virtual machine.
required: false
default: null
password:
description:
- the unix password for the new virtual machine.
required: false
default: null
ssh_cert_path:
description:
- path to an X509 certificate containing the public ssh key to install in the virtual machine.
See http://www.windowsazure.com/en-us/manage/linux/tutorials/intro-to-linux/ for more details.
- if this option is specified, password-based ssh authentication will be disabled.
required: false
default: null
virtual_network_name:
description:
- Name of virtual network.
required: false
default: null
hostname:
description:
- hostname to write /etc/hostname. Defaults to <name>.cloudapp.net.
required: false
default: null
wait:
description:
- wait for the instance to be in state 'running' before returning
required: false
default: "no"
choices: [ "yes", "no" ]
aliases: []
wait_timeout:
description:
- how long before wait gives up, in seconds
default: 600
aliases: []
wait_timeout_redirects:
description:
- how long before wait gives up for redirects, in seconds
default: 300
aliases: []
state:
description:
- create or terminate instances
required: false
default: 'present'
aliases: []
auto_updates:
description:
- Enable Auto Updates on Windows Machines
required: false
version_added: "2.0"
default: "no"
choices: [ "yes", "no" ]
enable_winrm:
description:
- Enable winrm on Windows Machines
required: false
version_added: "2.0"
default: "yes"
choices: [ "yes", "no" ]
os_type:
description:
- The type of the os that is gettings provisioned
required: false
version_added: "2.0"
default: "linux"
choices: [ "windows", "linux" ]
requirements:
- "python >= 2.6"
- "azure >= 0.7.1"
author: "John Whitbeck (@jwhitbeck)"
'''
EXAMPLES = '''
# Note: None of these examples set subscription_id or management_cert_path
# It is assumed that their matching environment variables are set.
- name: Provision virtual machine example
azure:
name: my-virtual-machine
role_size: Small
image: b39f27a8b8c64d52b05eac6a62ebad85__Ubuntu_DAILY_BUILD-precise-12_04_3-LTS-amd64-server-20131205-en-us-30GB
location: East US
user: ubuntu
ssh_cert_path: /path/to/azure_x509_cert.pem
storage_account: my-storage-account
wait: True
state: present
delegate_to: localhost
- name: Terminate virtual machine example
azure:
name: my-virtual-machine
state: absent
delegate_to: localhost
- name: Create windows machine
azure:
name: ben-Winows-23
hostname: win123
os_type: windows
enable_winrm: True
subscription_id: '{{ azure_sub_id }}'
management_cert_path: '{{ azure_cert_path }}'
role_size: Small
image: bd507d3a70934695bc2128e3e5a255ba__RightImage-Windows-2012-x64-v13.5
location: East Asia
password: xxx
storage_account: benooytes
user: admin
wait: True
state: present
virtual_network_name: '{{ vnet_name }}'
delegate_to: localhost
'''
import base64
import datetime
import os
import time
from urlparse import urlparse
from ansible.module_utils.facts import * # TimeoutError
AZURE_LOCATIONS = ['South Central US',
'Central US',
'East US 2',
'East US',
'West US',
'North Central US',
'North Europe',
'West Europe',
'East Asia',
'Southeast Asia',
'Japan West',
'Japan East',
'Brazil South']
AZURE_ROLE_SIZES = ['ExtraSmall',
'Small',
'Medium',
'Large',
'ExtraLarge',
'A5',
'A6',
'A7',
'A8',
'A9',
'Basic_A0',
'Basic_A1',
'Basic_A2',
'Basic_A3',
'Basic_A4',
'Standard_D1',
'Standard_D2',
'Standard_D3',
'Standard_D4',
'Standard_D11',
'Standard_D12',
'Standard_D13',
'Standard_D14',
'Standard_D1_v2',
'Standard_D2_v2',
'Standard_D3_v2',
'Standard_D4_v2',
'Standard_D5_v2',
'Standard_D11_v2',
'Standard_D12_v2',
'Standard_D13_v2',
'Standard_D14_v2',
'Standard_DS1',
'Standard_DS2',
'Standard_DS3',
'Standard_DS4',
'Standard_DS11',
'Standard_DS12',
'Standard_DS13',
'Standard_DS14',
'Standard_G1',
'Standard_G2',
'Standard_G3',
'Standard_G4',
'Standard_G5']
from distutils.version import LooseVersion
try:
import azure as windows_azure
if hasattr(windows_azure, '__version__') and LooseVersion(windows_azure.__version__) <= "0.11.1":
from azure import WindowsAzureError as AzureException
from azure import WindowsAzureMissingResourceError as AzureMissingException
else:
from azure.common import AzureException as AzureException
from azure.common import AzureMissingResourceHttpError as AzureMissingException
from azure.servicemanagement import (ServiceManagementService, OSVirtualHardDisk, SSH, PublicKeys,
PublicKey, LinuxConfigurationSet, ConfigurationSetInputEndpoints,
ConfigurationSetInputEndpoint, Listener, WindowsConfigurationSet)
HAS_AZURE = True
except ImportError:
HAS_AZURE = False
from types import MethodType
import json
def _wait_for_completion(azure, promise, wait_timeout, msg):
if not promise:
return
wait_timeout = time.time() + wait_timeout
while wait_timeout > time.time():
operation_result = azure.get_operation_status(promise.request_id)
time.sleep(5)
if operation_result.status == "Succeeded":
return
raise AzureException('Timed out waiting for async operation ' + msg + ' "' + str(promise.request_id) + '" to complete.')
def _delete_disks_when_detached(azure, wait_timeout, disk_names):
def _handle_timeout(signum, frame):
raise TimeoutError("Timeout reached while waiting for disks to become detached.")
signal.signal(signal.SIGALRM, _handle_timeout)
signal.alarm(wait_timeout)
try:
while len(disk_names) > 0:
for disk_name in disk_names:
disk = azure.get_disk(disk_name)
if disk.attached_to is None:
azure.delete_disk(disk.name, True)
disk_names.remove(disk_name)
except AzureException as e:
module.fail_json(msg="failed to get or delete disk %s, error was: %s" % (disk_name, str(e)))
finally:
signal.alarm(0)
def get_ssh_certificate_tokens(module, ssh_cert_path):
"""
Returns the sha1 fingerprint and a base64-encoded PKCS12 version of the certificate.
"""
# This returns a string such as SHA1 Fingerprint=88:60:0B:13:A9:14:47:DA:4E:19:10:7D:34:92:2B:DF:A1:7D:CA:FF
rc, stdout, stderr = module.run_command(['openssl', 'x509', '-in', ssh_cert_path, '-fingerprint', '-noout'])
if rc != 0:
module.fail_json(msg="failed to generate the key fingerprint, error was: %s" % stderr)
fingerprint = stdout.strip()[17:].replace(':', '')
rc, stdout, stderr = module.run_command(['openssl', 'pkcs12', '-export', '-in', ssh_cert_path, '-nokeys', '-password', 'pass:'])
if rc != 0:
module.fail_json(msg="failed to generate the pkcs12 signature from the certificate, error was: %s" % stderr)
pkcs12_base64 = base64.b64encode(stdout.strip())
return (fingerprint, pkcs12_base64)
def create_virtual_machine(module, azure):
"""
Create new virtual machine
module : AnsibleModule object
azure: authenticated azure ServiceManagementService object
Returns:
True if a new virtual machine and/or cloud service was created, false otherwise
"""
name = module.params.get('name')
os_type = module.params.get('os_type')
hostname = module.params.get('hostname') or name + ".cloudapp.net"
endpoints = module.params.get('endpoints').split(',')
ssh_cert_path = module.params.get('ssh_cert_path')
user = module.params.get('user')
password = module.params.get('password')
location = module.params.get('location')
role_size = module.params.get('role_size')
storage_account = module.params.get('storage_account')
image = module.params.get('image')
virtual_network_name = module.params.get('virtual_network_name')
wait = module.params.get('wait')
wait_timeout = int(module.params.get('wait_timeout'))
changed = False
# Check if a deployment with the same name already exists
cloud_service_name_available = azure.check_hosted_service_name_availability(name)
if cloud_service_name_available.result:
# cloud service does not exist; create it
try:
result = azure.create_hosted_service(service_name=name, label=name, location=location)
_wait_for_completion(azure, result, wait_timeout, "create_hosted_service")
changed = True
except AzureException as e:
module.fail_json(msg="failed to create the new service, error was: %s" % str(e))
try:
# check to see if a vm with this name exists; if so, do nothing
azure.get_role(name, name, name)
except AzureMissingException:
# vm does not exist; create it
if os_type == 'linux':
# Create linux configuration
disable_ssh_password_authentication = not password
vm_config = LinuxConfigurationSet(hostname, user, password, disable_ssh_password_authentication)
else:
#Create Windows Config
vm_config = WindowsConfigurationSet(hostname, password, None, module.params.get('auto_updates'), None, user)
vm_config.domain_join = None
if module.params.get('enable_winrm'):
listener = Listener('Http')
vm_config.win_rm.listeners.listeners.append(listener)
else:
vm_config.win_rm = None
# Add ssh certificates if specified
if ssh_cert_path:
fingerprint, pkcs12_base64 = get_ssh_certificate_tokens(module, ssh_cert_path)
# Add certificate to cloud service
result = azure.add_service_certificate(name, pkcs12_base64, 'pfx', '')
_wait_for_completion(azure, result, wait_timeout, "add_service_certificate")
# Create ssh config
ssh_config = SSH()
ssh_config.public_keys = PublicKeys()
authorized_keys_path = u'/home/%s/.ssh/authorized_keys' % user
ssh_config.public_keys.public_keys.append(PublicKey(path=authorized_keys_path, fingerprint=fingerprint))
# Append ssh config to linux machine config
vm_config.ssh = ssh_config
# Create network configuration
network_config = ConfigurationSetInputEndpoints()
network_config.configuration_set_type = 'NetworkConfiguration'
network_config.subnet_names = []
network_config.public_ips = None
for port in endpoints:
network_config.input_endpoints.append(ConfigurationSetInputEndpoint(name='TCP-%s' % port,
protocol='TCP',
port=port,
local_port=port))
# First determine where to store disk
today = datetime.date.today().strftime('%Y-%m-%d')
disk_prefix = u'%s-%s' % (name, name)
media_link = u'http://%s.blob.core.windows.net/vhds/%s-%s.vhd' % (storage_account, disk_prefix, today)
# Create system hard disk
os_hd = OSVirtualHardDisk(image, media_link)
# Spin up virtual machine
try:
result = azure.create_virtual_machine_deployment(service_name=name,
deployment_name=name,
deployment_slot='production',
label=name,
role_name=name,
system_config=vm_config,
network_config=network_config,
os_virtual_hard_disk=os_hd,
role_size=role_size,
role_type='PersistentVMRole',
virtual_network_name=virtual_network_name)
_wait_for_completion(azure, result, wait_timeout, "create_virtual_machine_deployment")
changed = True
except AzureException as e:
module.fail_json(msg="failed to create the new virtual machine, error was: %s" % str(e))
try:
deployment = azure.get_deployment_by_name(service_name=name, deployment_name=name)
return (changed, urlparse(deployment.url).hostname, deployment)
except AzureException as e:
module.fail_json(msg="failed to lookup the deployment information for %s, error was: %s" % (name, str(e)))
def terminate_virtual_machine(module, azure):
"""
Terminates a virtual machine
module : AnsibleModule object
azure: authenticated azure ServiceManagementService object
Returns:
True if a new virtual machine was deleted, false otherwise
"""
# Whether to wait for termination to complete before returning
wait = module.params.get('wait')
wait_timeout = int(module.params.get('wait_timeout'))
name = module.params.get('name')
delete_empty_services = module.params.get('delete_empty_services')
changed = False
deployment = None
public_dns_name = None
disk_names = []
try:
deployment = azure.get_deployment_by_name(service_name=name, deployment_name=name)
except AzureMissingException as e:
pass # no such deployment or service
except AzureException as e:
module.fail_json(msg="failed to find the deployment, error was: %s" % str(e))
# Delete deployment
if deployment:
changed = True
try:
# gather disk info
results = []
for role in deployment.role_list:
role_props = azure.get_role(name, deployment.name, role.role_name)
if role_props.os_virtual_hard_disk.disk_name not in disk_names:
disk_names.append(role_props.os_virtual_hard_disk.disk_name)
except AzureException as e:
module.fail_json(msg="failed to get the role %s, error was: %s" % (role.role_name, str(e)))
try:
result = azure.delete_deployment(name, deployment.name)
_wait_for_completion(azure, result, wait_timeout, "delete_deployment")
except AzureException as e:
module.fail_json(msg="failed to delete the deployment %s, error was: %s" % (deployment.name, str(e)))
# It's unclear when disks associated with terminated deployment get detached.
# Thus, until the wait_timeout is reached, we continue to delete disks as they
# become detached by polling the list of remaining disks and examining the state.
try:
_delete_disks_when_detached(azure, wait_timeout, disk_names)
except (AzureException, TimeoutError) as e:
module.fail_json(msg=str(e))
try:
# Now that the vm is deleted, remove the cloud service
result = azure.delete_hosted_service(service_name=name)
_wait_for_completion(azure, result, wait_timeout, "delete_hosted_service")
except AzureException as e:
module.fail_json(msg="failed to delete the service %s, error was: %s" % (name, str(e)))
public_dns_name = urlparse(deployment.url).hostname
return changed, public_dns_name, deployment
def get_azure_creds(module):
# Check module args for credentials, then check environment vars
subscription_id = module.params.get('subscription_id')
if not subscription_id:
subscription_id = os.environ.get('AZURE_SUBSCRIPTION_ID', None)
if not subscription_id:
module.fail_json(msg="No subscription_id provided. Please set 'AZURE_SUBSCRIPTION_ID' or use the 'subscription_id' parameter")
management_cert_path = module.params.get('management_cert_path')
if not management_cert_path:
management_cert_path = os.environ.get('AZURE_CERT_PATH', None)
if not management_cert_path:
module.fail_json(msg="No management_cert_path provided. Please set 'AZURE_CERT_PATH' or use the 'management_cert_path' parameter")
return subscription_id, management_cert_path
def main():
module = AnsibleModule(
argument_spec=dict(
ssh_cert_path=dict(),
name=dict(),
hostname=dict(),
os_type=dict(default='linux', choices=['linux', 'windows']),
location=dict(choices=AZURE_LOCATIONS),
role_size=dict(choices=AZURE_ROLE_SIZES),
subscription_id=dict(no_log=True),
storage_account=dict(),
management_cert_path=dict(),
endpoints=dict(default='22'),
user=dict(),
password=dict(no_log=True),
image=dict(),
virtual_network_name=dict(default=None),
state=dict(default='present'),
wait=dict(type='bool', default=False),
wait_timeout=dict(default=600),
wait_timeout_redirects=dict(default=300),
auto_updates=dict(type='bool', default=False),
enable_winrm=dict(type='bool', default=True),
)
)
if not HAS_AZURE:
module.fail_json(msg='azure python module required for this module')
# create azure ServiceManagementService object
subscription_id, management_cert_path = get_azure_creds(module)
wait_timeout_redirects = int(module.params.get('wait_timeout_redirects'))
if hasattr(windows_azure, '__version__') and LooseVersion(windows_azure.__version__) <= "0.8.0":
# wrapper for handling redirects which the sdk <= 0.8.0 is not following
azure = Wrapper(ServiceManagementService(subscription_id, management_cert_path), wait_timeout_redirects)
else:
azure = ServiceManagementService(subscription_id, management_cert_path)
cloud_service_raw = None
if module.params.get('state') == 'absent':
(changed, public_dns_name, deployment) = terminate_virtual_machine(module, azure)
elif module.params.get('state') == 'present':
# Changed is always set to true when provisioning new instances
if not module.params.get('name'):
module.fail_json(msg='name parameter is required for new instance')
if not module.params.get('image'):
module.fail_json(msg='image parameter is required for new instance')
if not module.params.get('user'):
module.fail_json(msg='user parameter is required for new instance')
if not module.params.get('location'):
module.fail_json(msg='location parameter is required for new instance')
if not module.params.get('storage_account'):
module.fail_json(msg='storage_account parameter is required for new instance')
if not (module.params.get('password') or module.params.get('ssh_cert_path')):
module.fail_json(msg='password or ssh_cert_path parameter is required for new instance')
(changed, public_dns_name, deployment) = create_virtual_machine(module, azure)
module.exit_json(changed=changed, public_dns_name=public_dns_name, deployment=json.loads(json.dumps(deployment, default=lambda o: o.__dict__)))
class Wrapper(object):
def __init__(self, obj, wait_timeout):
self.other = obj
self.wait_timeout = wait_timeout
def __getattr__(self, name):
if hasattr(self.other, name):
func = getattr(self.other, name)
return lambda *args, **kwargs: self._wrap(func, args, kwargs)
raise AttributeError(name)
def _wrap(self, func, args, kwargs):
if isinstance(func, MethodType):
result = self._handle_temporary_redirects(lambda: func(*args, **kwargs))
else:
result = self._handle_temporary_redirects(lambda: func(self.other, *args, **kwargs))
return result
def _handle_temporary_redirects(self, f):
wait_timeout = time.time() + self.wait_timeout
while wait_timeout > time.time():
try:
return f()
except AzureException as e:
if not str(e).lower().find("temporary redirect") == -1:
time.sleep(5)
pass
else:
raise e
# import module snippets
from ansible.module_utils.basic import *
if __name__ == '__main__':
main()
|
py
|
1a567df5edc9b8f4ef7cf8f030fb516681f16d0d
|
import os
import keywords as kw
import mechanism_names as mn
import mechanism
from util import ParseUtil, make_readable_list_of_strings
# All parameters and their defaults.
PD = {kw.BEHAVIORS: set(), # set of (restricted) strings , REQ
kw.STIMULUS_ELEMENTS: set(), # set of (restricted) strings , REQ
kw.MECHANISM_NAME: '', # One of the available ones REQ
kw.START_V: 0, # Scalar or list of se->b:val or default:val ,
kw.START_VSS: 0, # Scalar or list of se->se:val or default:val ,
kw.ALPHA_V: 1, # -"- ,
kw.ALPHA_VSS: 1, # Scalar or list of se->se:val or default:val ,
kw.BETA: 1, # -"- ,
kw.MU: 0, # -"- ,
kw.DISCOUNT: 1, # Scalar
kw.TRACE: 0, # Scalar (number between 0 and 1)
kw.U: 0, # Scalar or list of se:val or default:val ,
kw.LAMBDA: 0, # Scalar or list of se:val or default:val ,
kw.START_W: 0, # -"- ,
kw.ALPHA_W: 1, # -"- ,
kw.BEHAVIOR_COST: 0, # Scalar or list of b:val or default:val ,
kw.RESPONSE_REQUIREMENTS: dict(), # List of b:se or b:(se1,se2,...) ,
kw.BIND_TRIALS: 'off', # on or off
kw.N_SUBJECTS: 1, # Positive integer
kw.TITLE: '', # String (,)
kw.SUBPLOTTITLE: '', # String (,)
kw.RUNLABEL: '', # String (restricted), for postrocessing only (,)
kw.SUBJECT: 'average', # average, all or zero-based index
kw.XSCALE: 'all', # all or s1->b1->s2->..., s=se1,se2,...
kw.XSCALE_MATCH: 'subset', # subset or exact
kw.EVAL_PHASES: 'all', # @post: all or list of phase labels ,
kw.CUMULATIVE: 'on', # on or off
kw.MATCH: 'subset', # subset or exact
kw.FILENAME: ''} # valid path REQ
def is_parameter_name(name):
return name in PD
def check_is_parameter_name(name):
if not is_parameter_name(name):
return f"Internal error: Invalid parameter name '{name}'."
return None
class Parameters():
def __init__(self):
# All parameters and their valuess
self.val = dict(PD)
# self.got = dict.fromkeys(PD, False)
def str_append(self, prop, v_str, variables, phases, all_run_labels, to_be_continued):
err = check_is_parameter_name(prop)
if err:
return err
if not self.is_csv(prop):
return f"Internal error: Parameter '{prop}' is not of type list."
return self.str_set(prop, v_str, variables, phases, all_run_labels,
to_be_continued, True)
def str_set(self, prop, v_str, variables, phases, all_run_labels, to_be_continued,
is_appending=False):
"""
Parses the specified value (as a string) of the specified parameter and sets the resulting
value. The input variables is a Variables object.
Returns error message if parsing failed.
"""
err = check_is_parameter_name(prop)
if err:
return err
# all_phase_labels = phases.labels_set()
if prop == kw.BEHAVIORS:
return self._parse_behaviors(v_str, variables, is_appending)
elif prop == kw.STIMULUS_ELEMENTS:
return self._parse_stimulus_elements(v_str, variables, is_appending)
elif prop == kw.MECHANISM_NAME:
return self._parse_mechanism_name(v_str)
elif prop in (kw.START_VSS, kw.ALPHA_VSS):
return self._parse_alphastart_vss(prop, v_str, variables, to_be_continued,
is_appending)
elif prop in (kw.START_W, kw.ALPHA_W, kw.U, kw.LAMBDA):
return self._parse_stimulus_values(prop, v_str, variables, to_be_continued,
is_appending)
elif prop in (kw.BETA, kw.MU, kw.START_V, kw.ALPHA_V):
return self._parse_stimulus_response_values(prop, v_str, variables,
to_be_continued, is_appending)
# Float
elif prop in (kw.DISCOUNT, kw.TRACE):
v, err = ParseUtil.evaluate(v_str, variables)
if err:
return err
if (v < 0) or (v > 1):
return f"Parameter '{prop}' must be a number >=0 and <=1."
self.val[prop] = v
return None
elif prop == kw.BEHAVIOR_COST:
return self._parse_behavior_cost(v_str, variables, to_be_continued, is_appending)
elif prop == kw.RESPONSE_REQUIREMENTS:
return self._parse_response_requirements(v_str, to_be_continued, is_appending)
# 'on' or 'off'
elif prop in (kw.BIND_TRIALS, kw.CUMULATIVE):
v_str_lower = v_str.lower()
if v_str_lower not in ('on', 'off'):
return "Parameter '{}' must be 'on' or 'off'.".format(prop)
self.val[prop] = v_str_lower
return None
# Positive integer
elif prop == kw.N_SUBJECTS:
v, err = ParseUtil.parse_posint(v_str, variables)
if err:
return err
if not v:
return "Parameter {} must be a positive integer.".format(kw.N_SUBJECTS)
self.val[kw.N_SUBJECTS] = v
return None
# Any nonempty (after strip) string
elif prop in (kw.TITLE, kw.SUBPLOTTITLE):
if to_be_continued: # Add the removed comma
v_str = v_str + ","
self.val[prop] = v_str
return None
# 'average', 'all' or 1-based index
elif prop == kw.SUBJECT:
return self._parse_subject(v_str, variables)
# 'all' or s1->b1->s2->..., s=se1,se2,...
elif prop == kw.XSCALE:
return self._parse_xscale(v_str, phases)
# 'subset' or 'exact'
elif prop in (kw.MATCH, kw.XSCALE_MATCH):
if v_str.lower() not in ('subset', 'exact'):
return "Parameter {} must be 'subset' or 'exact'.".format(prop)
self.val[prop] = v_str
return None
# 'all' or cs-list of phase labels
elif prop == kw.PHASES:
return self._parse_phases(v_str) # , all_phase_labels)
# String (@run-labels) (for postprocessing)
elif prop == kw.RUNLABEL:
if v_str not in all_run_labels:
return "Invalid @RUN-label {}".format(v_str)
self.val[kw.RUNLABEL] = v_str
return None
# Valid path to writable file
elif prop == kw.FILENAME:
filename = v_str
try:
file = open(filename, 'w', newline='')
except Exception as ex:
return str(ex)
finally:
file.close()
try:
os.remove(filename)
except FileNotFoundError:
pass
self.val[kw.FILENAME] = filename
return None
def make_mechanism_obj(self):
"""
Returns a Mechanism object (None of error) and error message (None if no error).
GA = 'ga'
SR = 'sr'
ES = 'es'
QL = 'ql'
AC = 'ac'
RW = 'rw'
MECHANISM_NAMES = (GA, SR, ES, QL, AC, RW)
"""
mechanism_name = self.val[kw.MECHANISM_NAME]
if not mechanism_name:
return None, "Parameter 'mechanism' is not specified."
self.scalar_expand()
if mechanism_name in mn.SR:
mechanism_obj = mechanism.StimulusResponse(self)
elif mechanism_name in mn.QL:
mechanism_obj = mechanism.Qlearning(self)
# elif mechanism_name == SARSA:
# mechanism_obj = LsMechanism.SARSA(**self.parameters)
elif mechanism_name in mn.ES:
mechanism_obj = mechanism.EXP_SARSA(self)
elif mechanism_name in mn.AC:
mechanism_obj = mechanism.ActorCritic(self)
elif mechanism_name in mn.GA:
mechanism_obj = mechanism.Enquist(self)
elif mechanism_name in mn.RW:
mechanism_obj = mechanism.OriginalRescorlaWagner(self)
else:
raise Exception(f"Internal error. Unknown mechanism {mechanism_name}.")
return mechanism_obj, None
def _parse_behaviors(self, behaviors_str, variables, is_appending):
"""
Parse the string behaviors_str with comma-separated behavior names and return the
corrsponding set of strings.
Example: "B1, B2,B123" returns {'B1', 'B2', 'B123'}
"""
if not is_appending:
self.val[kw.BEHAVIORS] = set()
behaviors_list = behaviors_str.split(',')
for b in behaviors_list:
b = b.strip()
if len(b) == 0:
return "Found empty behavior name."
if b in self.val[kw.BEHAVIORS]:
return f"The behavior name '{b}' occurs more than once."
if b in self.val[kw.STIMULUS_ELEMENTS]:
return f"The behavior name '{b}' is invalid, since it is a stimulus element."
if variables.contains(b):
return f"The behavior name '{b}' is invalid, since it is a variable name."
if not b.isidentifier():
return f"Behavior name '{b}' is not a valid identifier."
self.val[kw.BEHAVIORS].add(b)
return None # No error
def _parse_stimulus_elements(self, stimulus_elements_str, variables, is_appending):
"""
Parse the string stimulus_elements_str with comma-separated stimulus element names and
return the corrsponding set of strings.
Example: "E1, E2,E123" returns {'E1', 'E2', 'E123'}
"""
if not is_appending:
self.val[kw.STIMULUS_ELEMENTS] = set()
stimulus_elements_list = stimulus_elements_str.split(',')
for e in stimulus_elements_list:
e = e.strip()
if len(e) == 0:
return "Found empty stimulus element name."
if e in self.val[kw.STIMULUS_ELEMENTS]:
return f"The stimulus element name '{e}' occurs more than once."
if e in self.val[kw.BEHAVIORS]:
return f"The stimulus element name '{e}' is invalid, since it is a behavior name."
if variables.contains(e):
return f"The stimulus element name '{e}' is invalid, since it is a variable name."
if not e.isidentifier():
return f"Stimulus element name '{e}' is not a valid identifier."
self.val[kw.STIMULUS_ELEMENTS].add(e)
return None # No error
def _parse_mechanism_name(self, mechanism_name):
"""
Parse the string mechanism_name with a mechanism name and return the corrsponding string.
"""
mn_lower = mechanism_name.lower()
if mn_lower not in mn.MECHANISM_NAMES:
cs_valid_names = ', '.join(sorted(mn.MECHANISM_NAMES))
return "Invalid mechanism name '{}'. ".format(mechanism_name) + \
"Mechanism name must be one of the following: {}.".format(cs_valid_names)
self.val[kw.MECHANISM_NAME] = mn_lower
return None
def _parse_phases(self, v_str):
if v_str == 'all':
self.val[kw.PHASES] = v_str # list(all_phase_labels)
else:
phase_labels = ParseUtil.comma_split_strip(v_str)
for phase_label in phase_labels:
if len(phase_label) == 0:
return "Expected comma-separated list of phase labels, found {}".format(phase_labels)
# else:
# if phase_label not in all_phase_labels:
# return "Undefined phase label '{}'.".format(phase_label)
self.val[kw.PHASES] = phase_labels
return None
def _parse_behavior_cost(self, behavior_cost_str, variables, to_be_continued, is_appending):
if not self.val[kw.BEHAVIORS]:
return f"The parameter 'behaviors' must be assigned before the parameter '{kw.BEHAVIOR_COST}'."
# Create and populate the struct with None values
if not is_appending:
self.val[kw.BEHAVIOR_COST] = dict()
for e in self.val[kw.BEHAVIORS]:
self.val[kw.BEHAVIOR_COST][e] = None
self.val[kw.BEHAVIOR_COST][kw.DEFAULT] = None
single_c, _ = ParseUtil.evaluate(behavior_cost_str, variables)
if single_c is not None:
if is_appending:
return "A single value for '{}' cannot follow other values.".format(kw.BEHAVIOR_COST)
elif to_be_continued:
return "A single value for '{}' cannot be followed by other values.".format(kw.BEHAVIOR_COST)
else:
for key in self.val[kw.BEHAVIOR_COST]:
self.val[kw.BEHAVIOR_COST][key] = single_c
self.val[kw.BEHAVIOR_COST].pop(kw.DEFAULT)
else:
cs = ParseUtil.comma_split(behavior_cost_str)
cs = [x.strip() for x in cs]
for bc_str in cs: # bc_str is 'e:value' or 'default:value'
if bc_str.count(':') != 1:
return "Expected 'element:value' or 'default:value' in '{}', got '{}'.".format(kw.BEHAVIOR_COST, bc_str)
b, c_str = bc_str.split(':')
b = b.strip()
c_str = c_str.strip()
c, err = ParseUtil.evaluate(c_str, variables)
if err:
return f"Invalid value '{c_str}' for '{b}' in parameter '{kw.BEHAVIOR_COST}'."
if b == kw.DEFAULT:
if self.val[kw.BEHAVIOR_COST][kw.DEFAULT] is not None:
return "Default value for '{}' can only be stated once.".format(kw.BEHAVIOR_COST)
elif b not in self.val[kw.BEHAVIORS]:
return f"Error in parameter '{kw.BEHAVIOR_COST}': '{b}' is an invalid behavior name."
if self.val[kw.BEHAVIOR_COST][b] is not None:
return "Duplicate of {} in '{}'.".format(b, kw.BEHAVIOR_COST)
self.val[kw.BEHAVIOR_COST][b] = c
if not to_be_continued:
# Set the default value for non-set behaviors
err = self._set_default_values(kw.BEHAVIOR_COST)
if err:
return err
return None # No error
def _parse_stimulus_response_values(self, NAME, sr_str, variables, to_be_continued,
is_appending):
"""
Parse the string sr_str with a value for stimulus-response pairs.
Example: "S1->R1: 1.23, S2->R1:3.45, default:1" sets the parameter to
{('S1','R1'):1.23, ('S1','R2'):1, ('S2','R1'):3.45, ('S2','R2'):1}
under the assumption that
behaviors = {'R1', 'R2'} and
stimulus_elements = {'S1', 'S2'}
"""
if not self.val[kw.STIMULUS_ELEMENTS]:
return f"The parameter 'stimulus_elements' must be assigned before the parameter '{NAME}'."
if not self.val[kw.BEHAVIORS]:
return f"The parameter 'behaviors' must be assigned before the parameter '{NAME}'."
# Create and populate the struct with None values
if not is_appending:
self.val[NAME] = dict()
for e in self.val[kw.STIMULUS_ELEMENTS]:
for b in self.val[kw.BEHAVIORS]:
self.val[NAME][(e, b)] = None
self.val[NAME][kw.DEFAULT] = None
single_v, _ = ParseUtil.evaluate(sr_str, variables)
if single_v is not None:
if is_appending:
return f"A single value for '{NAME}' cannot follow other values."
elif to_be_continued:
return f"A single value for '{NAME}' cannot be followed by other values."
else:
for key in self.val[NAME]:
self.val[NAME][key] = single_v
self.val[NAME].pop(kw.DEFAULT)
else:
vs = ParseUtil.comma_split(sr_str)
vs = [x.strip() for x in vs]
for eb_v_str in vs: # eb_v_str is 'e->b:value' or 'default:value'
if eb_v_str.count(':') != 1:
return f"Expected 'x->y:value' or 'default:value' in '{NAME}', got '{eb_v_str}'."
eb, v_str = eb_v_str.split(':')
eb = eb.strip()
v_str = v_str.strip()
v, err = ParseUtil.evaluate(v_str, variables)
if err:
return f"Invalid value '{v_str}' for '{eb}' in parameter '{NAME}'."
if eb == kw.DEFAULT:
if self.val[NAME][kw.DEFAULT] is not None:
return f"Default value for '{NAME}' can only be stated once."
self.val[NAME][kw.DEFAULT] = v
elif eb.count('->') == 1:
e, b = eb.split('->')
if e not in self.val[kw.STIMULUS_ELEMENTS]:
return f"Error in parameter '{NAME}': '{e}' is an invalid stimulus element."
if b not in self.val[kw.BEHAVIORS]:
return f"Error in parameter '{NAME}': '{b}' is an invalid behavior name."
if self.val[NAME][(e, b)] is not None:
return f"Duplicate of {e}->{b} in '{NAME}'."
self.val[NAME][(e, b)] = v
else:
return f"Invalid string '{eb}' in parameter '{NAME}'."
if not to_be_continued:
# Set the default value for non-set stimulus-behavior pairs
err = self._set_default_values(NAME)
if err:
return err
return None # No error
def _parse_alphastart_vss(self, NAME, vss_str, variables, to_be_continued,
is_appending):
"""
Parse the string vss_str with a start_vss/alpha_vss specification.
Example: "S1->S2: 1.23, S2->S1:3.45, default:1" sets the parameter to
{('S1','S2'):1.23, ('S2','S1'):3.45, ('S1','S1'):1, ('S2','S2'):1}
under the assumption that stimulus_elements = {'S1', 'S2'}
"""
if not self.val[kw.STIMULUS_ELEMENTS]:
return f"The parameter 'stimulus_elements' must be assigned before the parameter '{NAME}'."
# Create and populate the struct with None values
if not is_appending:
self.val[NAME] = dict()
for e1 in self.val[kw.STIMULUS_ELEMENTS]:
for e2 in self.val[kw.STIMULUS_ELEMENTS]:
self.val[NAME][(e1, e2)] = None
self.val[NAME][kw.DEFAULT] = None
single_vss, _ = ParseUtil.evaluate(vss_str, variables)
if single_vss is not None:
if is_appending:
return f"A single value for '{NAME}' cannot follow other values."
elif to_be_continued:
return f"A single value for '{NAME}' cannot be followed by other values."
else:
for key in self.val[NAME]:
self.val[NAME][key] = single_vss
self.val[NAME].pop(kw.DEFAULT)
else:
vs = ParseUtil.comma_split(vss_str)
vs = [x.strip() for x in vs]
for ee_str in vs: # eb_v_str is 'e1->e2:value' or 'default:value'
if ee_str.count(':') != 1:
return f"Expected 'x->y:value' or 'default:value' in '{NAME}', got '{ee_str}'."
ee, v_str = ee_str.split(':')
ee = ee.strip()
v_str = v_str.strip()
v, err = ParseUtil.evaluate(v_str, variables)
if err:
return f"Invalid value '{v_str}' for '{ee}' in parameter '{NAME}'."
if ee == kw.DEFAULT:
if self.val[NAME][kw.DEFAULT] is not None:
return f"Default value for '{NAME}' can only be stated once."
self.val[NAME][kw.DEFAULT] = v
elif ee.count('->') == 1:
e1, e2 = ee.split('->')
if e1 not in self.val[kw.STIMULUS_ELEMENTS]:
return f"Error in parameter '{NAME}': '{e1}' is an invalid stimulus element."
if e2 not in self.val[kw.STIMULUS_ELEMENTS]:
return f"Error in parameter '{NAME}': '{e2}' is an invalid stimulus element."
if self.val[NAME][(e1, e2)] is not None:
return f"Duplicate of {e1}->{e2} in '{NAME}'."
self.val[NAME][(e1, e2)] = v
else:
return f"Invalid string '{ee}' in parameter '{NAME}'."
if not to_be_continued:
# Set the default value for non-set stimulus-stimulus pairs
err = self._set_default_values(NAME)
if err:
return err
return None # No error
def _parse_response_requirements(self, v_str, to_be_continued, is_appending):
if not self.val[kw.STIMULUS_ELEMENTS]:
return f"The parameter 'stimulus_elements' must be assigned before the parameter '{kw.RESPONSE_REQUIREMENTS}'."
if not self.val[kw.BEHAVIORS]:
return f"The parameter 'behaviors' must be assigned before the parameter '{kw.RESPONSE_REQUIREMENTS}'."
if not is_appending:
self.val[kw.RESPONSE_REQUIREMENTS] = dict()
for b in self.val[kw.BEHAVIORS]:
self.val[kw.RESPONSE_REQUIREMENTS][b] = None
rrs = ParseUtil.comma_split_sq(v_str)
for rr in rrs:
if rr.count(':') != 1:
return "Expected 'behavior:stimulus_element', got '{}'.".format(rr)
b, s = rr.split(':')
b = b.strip()
s = s.strip()
if len(b) == 0 or len(s) == 0:
return "Expected 'behavior:stimulus_element', got '{}'.".format(rr)
if b not in self.val[kw.BEHAVIORS]:
return "Unknown behavior name '{}'.".format(b)
if self.val[kw.RESPONSE_REQUIREMENTS][b] is not None:
return "Duplication of behavior '{}' in {}.".format(b, kw.RESPONSE_REQUIREMENTS)
if '[' in s or ']' in s:
if s.count('[') != 1 or s.count(']') != 1 or s[0] != '[' or s[-1] != ']':
return "Malformed expression '{}'.".format(s)
s = s[1:-1] # Strip the '['and the ']'
es = s.split(',')
for e in es:
e = e.strip()
if e not in self.val[kw.STIMULUS_ELEMENTS]:
return "Unknown stimulus element '{}'.".format(e)
self._response_requirements_add_element(b, e)
else:
if s not in self.val[kw.STIMULUS_ELEMENTS]:
return "Unknown stimulus element '{}'.".format(s)
self._response_requirements_add_element(b, s)
if not to_be_continued:
# For the unrestricted behaviors, add all stimulus elements
for b in self.val[kw.RESPONSE_REQUIREMENTS]:
if self.val[kw.RESPONSE_REQUIREMENTS][b] is None:
self.val[kw.RESPONSE_REQUIREMENTS][b] = set(self.val[kw.STIMULUS_ELEMENTS])
# Check that each stimulus element has at least one feasible response
stimulus_elements_in_rr = []
for stimulus_list in self.val[kw.RESPONSE_REQUIREMENTS].values():
stimulus_elements_in_rr.extend(stimulus_list)
if set(stimulus_elements_in_rr) != set(self.val[kw.STIMULUS_ELEMENTS]):
elements_without_response = set(self.val[kw.STIMULUS_ELEMENTS]) - set(stimulus_elements_in_rr)
elements_without_response = list(elements_without_response)
elements_without_response.sort() # To make error message testable
elements_without_response_str = make_readable_list_of_strings(elements_without_response)
err = f"Invalid {kw.RESPONSE_REQUIREMENTS}: "
if len(elements_without_response) == 1:
return err + f"Stimulus element {elements_without_response_str} has no possible responses."
else:
return err + f"Stimulus elements {elements_without_response_str} have no possible responses."
return None # No error
def _response_requirements_add_element(self, b, e):
if self.val[kw.RESPONSE_REQUIREMENTS][b] is None:
self.val[kw.RESPONSE_REQUIREMENTS][b] = {e}
else:
self.val[kw.RESPONSE_REQUIREMENTS][b].add(e)
def _parse_stimulus_values(self, NAME, stimulus_values, variables, to_be_continued,
is_appending):
if not self.val[kw.STIMULUS_ELEMENTS]:
return f"The parameter 'stimulus_elements' must be assigned before the parameter '{NAME}'."
# Create and populate the struct with None values
if not is_appending:
self.val[NAME] = dict()
for e in self.val[kw.STIMULUS_ELEMENTS]:
self.val[NAME][e] = None
self.val[NAME][kw.DEFAULT] = None
single_w, _ = ParseUtil.evaluate(stimulus_values, variables)
if single_w is not None:
if is_appending:
return "A single value for '{}' cannot follow other values.".format(NAME)
elif to_be_continued:
return "A single value for '{}' cannot be followed by other values.".format(NAME)
else:
for key in self.val[NAME]:
self.val[NAME][key] = single_w
self.val[NAME].pop(kw.DEFAULT)
else:
ws = ParseUtil.comma_split(stimulus_values)
ws = [x.strip() for x in ws]
for e_w_str in ws: # eb_w_str is 'e:value' or 'default:value'
if e_w_str.count(':') != 1:
return "Expected 'element:value' or 'default:value' in '{}', got '{}'.".format(NAME, e_w_str)
e, w_str = e_w_str.split(':')
e = e.strip()
w_str = w_str.strip()
w, err = ParseUtil.evaluate(w_str, variables)
if err:
return "Invalid value '{}' for '{}' in parameter '{}'.".format(w_str, e, NAME)
if e == kw.DEFAULT:
if self.val[NAME][kw.DEFAULT] is not None:
return "Default value for '{}' can only be stated once.".format(NAME)
elif e not in self.val[kw.STIMULUS_ELEMENTS]:
return f"Error in parameter '{NAME}': '{e}' is an invalid stimulus element."
if self.val[NAME][e] is not None:
return "Duplicate of {} in '{}'.".format(e, NAME)
self.val[NAME][e] = w
if not to_be_continued:
# Set the default value for non-set stimulus elements
err = self._set_default_values(NAME)
if err:
return err
return None # No error
def _parse_subject(self, v_str, variables):
err = f"Parameter {kw.SUBJECT} must be 'average', 'all', or a positive integer."
if v_str.lower() in ('average', 'all'):
self.val[kw.SUBJECT] = v_str.lower()
else:
v, interr = ParseUtil.parse_posint(v_str, variables)
if interr: # Parsing error
return err + " " + interr
if v is None: # Parsing worked, but negative integer
return err
self.val[kw.SUBJECT] = v - 1 # Zero-based index internally
return None
def _parse_xscale(self, xscale, phases):
if not self.val[kw.STIMULUS_ELEMENTS]:
return f"The parameter 'stimulus_elements' must be assigned before the parameter '{kw.XSCALE}'."
if not self.val[kw.BEHAVIORS] and self.val[kw.MECHANISM_NAME] not in mn.RW:
return f"The parameter 'behaviors' must be assigned before the parameter '{kw.XSCALE}'."
if phases.is_phase_label(xscale):
pass
elif xscale == 'all':
pass
else:
xscale, err = ParseUtil.parse_chain(xscale, self.val[kw.STIMULUS_ELEMENTS],
self.val[kw.BEHAVIORS])
if err:
return err
self.val[kw.XSCALE] = xscale
return None
def _set_default_values(self, NAME):
default_needed = False
for key in self.val[NAME]:
if key is not kw.DEFAULT and self.val[NAME][key] is None:
default_needed = True
break
if default_needed and self.val[NAME][kw.DEFAULT] is None:
return f"Missing default value for parameter '{NAME}'."
else:
for key in self.val[NAME]:
if self.val[NAME][key] is None:
self.val[NAME][key] = self.val[NAME][kw.DEFAULT]
self.val[NAME].pop(kw.DEFAULT)
def get(self, prop):
return self.val[prop]
def may_end_with_comma(self, prop):
return self.is_csv(prop) or prop in (kw.TITLE, kw.SUBPLOTTITLE, kw.RUNLABEL)
def is_csv(self, prop):
return prop in (kw.BEHAVIORS, kw.STIMULUS_ELEMENTS, kw.BETA, kw.MU, kw.LAMBDA, kw.START_V,
kw.START_VSS, kw.START_W, kw.ALPHA_V, kw.ALPHA_VSS, kw.ALPHA_W,
kw.BEHAVIOR_COST, kw.U, kw.RESPONSE_REQUIREMENTS, kw.PHASES)
def scalar_expand(self):
"""
Expand dict-parameters that are defined by scalar. If defined as dict, check that keys are
compatible with stimulus elements and behaviors.
"""
behaviors = self.val[kw.BEHAVIORS]
stimulus_elements = self.val[kw.STIMULUS_ELEMENTS]
# Check START_VSS and ALPHA_VSS
expected_ss_keys = set()
for stimulus_element1 in stimulus_elements:
for stimulus_element2 in stimulus_elements:
key = (stimulus_element1, stimulus_element2)
expected_ss_keys.add(key)
for param_name in [kw.START_VSS, kw.ALPHA_VSS]:
start_vss = self.val[param_name]
if type(start_vss) is dict:
if set(start_vss.keys()) != expected_ss_keys:
self._raise_match_err(param_name, kw.STIMULUS_ELEMENTS)
else: # scalar expand
self.val[param_name] = dict()
scalar = start_vss
for stimulus_element1 in stimulus_elements:
for stimulus_element2 in stimulus_elements:
key = (stimulus_element1, stimulus_element2)
self.val[param_name][key] = scalar
expected_sb_keys = set()
for stimulus_element in stimulus_elements:
for behavior in behaviors:
key = (stimulus_element, behavior)
expected_sb_keys.add(key)
# Check START_V
self._scalar_expand_element_behavior(kw.START_V, stimulus_elements, behaviors,
expected_sb_keys)
# Check ALPHA_V
self._scalar_expand_element_behavior(kw.ALPHA_V, stimulus_elements, behaviors,
expected_sb_keys)
# Check BETA
self._scalar_expand_element_behavior(kw.BETA, stimulus_elements, behaviors,
expected_sb_keys)
# Check MU
self._scalar_expand_element_behavior(kw.MU, stimulus_elements, behaviors,
expected_sb_keys)
expected_s_keys = set()
for stimulus_element in stimulus_elements:
expected_s_keys.add(stimulus_element)
# Check U
self._scalar_expand_element(kw.U, stimulus_elements, expected_s_keys)
# Check START_W
self._scalar_expand_element(kw.START_W, stimulus_elements, expected_s_keys)
# Check ALPHA_W
self._scalar_expand_element(kw.ALPHA_W, stimulus_elements, expected_s_keys)
# Check LAMBDA
self._scalar_expand_element(kw.LAMBDA, stimulus_elements, expected_s_keys)
# Check BEHAVIOR_COST
expected_b_keys = set()
for behavior in behaviors:
expected_b_keys.add(behavior)
behavior_cost = self.val[kw.BEHAVIOR_COST]
if type(behavior_cost) is dict:
if set(behavior_cost.keys()) != expected_b_keys:
self._raise_match_err(kw.BEHAVIOR_COST, kw.BEHAVIORS)
else: # scalar expand
self.val[kw.BEHAVIOR_COST] = dict()
scalar = behavior_cost
for behavior in behaviors:
self.val[kw.BEHAVIOR_COST][behavior] = scalar
def _scalar_expand_element_behavior(self, param_name, stimulus_elements, behaviors,
expected_sb_keys):
val = self.val[param_name]
if type(val) is dict:
if set(val.keys()) != expected_sb_keys:
self._raise_match_err(param_name, kw.STIMULUS_ELEMENTS, kw.BEHAVIORS)
else: # scalar expand
self.val[param_name] = dict()
scalar = val
for stimulus_element in stimulus_elements:
for behavior in behaviors:
key = (stimulus_element, behavior)
self.val[param_name][key] = scalar
def _scalar_expand_element(self, param_name, stimulus_elements, expected_s_keys):
val = self.val[param_name]
if type(val) is dict:
if set(val.keys()) != expected_s_keys:
self._raise_match_err(param_name, kw.STIMULUS_ELEMENTS)
else: # scalar expand
self.val[param_name] = dict()
scalar = val
for stimulus_element in stimulus_elements:
self.val[param_name][stimulus_element] = scalar
@staticmethod
def _raise_match_err(param1, param2, param3=None):
if param3:
err = f"The parameter '{param1}' does not match '{param2}' and '{param3}'."
else:
err = f"The parameter '{param1}' does not match '{param2}'."
raise Exception(err)
|
py
|
1a567e2f0aea37470b21ebc7435dd115f21471f8
|
import socket
import threading
import codecs
from scapy.all import *
contentTable = ['porn', 'guns', 'torrent', 'skype']
firstIface = 'eth0'
firstIfaceFlows = ['52:54:00:42:84:65']
secondIface = 'eth1'
secondIfaceFlows = ['52:54:00:a1:54:c0']
def inOutServer():
global contentTable
global firstIface
global secondIface
inSocket = socket.socket(socket.AF_PACKET, socket.SOCK_RAW, socket.htons(3))
inSocket.bind((firstIface, 0))
outSocket = socket.socket(socket.AF_PACKET, socket.SOCK_RAW)
outSocket.setsockopt(socket.SOL_SOCKET, socket.SO_SNDBUF, 1514)
outSocket.bind((secondIface, 0))
for index in range(len(contentTable)):
contentTable[index] = contentTable[index].encode()
while True:
pkt = inSocket.recvfrom(1514)
try:
et = Ether(bytes(pkt[0]))
if not et.src in firstIfaceFlows:
continue
except:
continue
if TCP in et and Raw in et:
if et[IP][TCP].dport == 80:
data = et[Raw].load
for content in contentTable:
if not content in data:
continue
index = data.find(content)
et[Raw].load = data[:index] + b'*' + data[index+len(content):]
del et[TCP].chksum
del et[IP].ihl
del et[IP].len
del et[IP].chksum
et.show2(dump=True)
break
outSocket.send(bytes(et))
def outInServer():
global firstIface
global secondIface
inSocket = socket.socket(socket.AF_PACKET, socket.SOCK_RAW, socket.htons(3))
inSocket.bind((secondIface, 0))
outSocket = socket.socket(socket.AF_PACKET, socket.SOCK_RAW)
outSocket.setsockopt(socket.SOL_SOCKET, socket.SO_SNDBUF, 1514)
outSocket.bind((firstIface, 0))
while True:
pkt = inSocket.recvfrom(1514)
try:
et = Ether(bytes(pkt[0]))
if not et.src in secondIfaceFlows:
continue
except:
continue
outSocket.send(bytes(et))
inOut = threading.Thread(target=inOutServer,args=())
outIn = threading.Thread(target=outInServer,args=())
outIn.start()
inOut.start()
inOut.join()
|
py
|
1a567e331fc6444350846e05255b8960d7ec6481
|
class Module(object):
pass
|
py
|
1a567ea722711aefd108ca18fa5d9a58e7046d99
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for ragged_array_ops.where."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl.testing import parameterized
from tensorflow.python.framework import test_util
from tensorflow.python.ops.ragged import ragged_factory_ops
from tensorflow.python.ops.ragged import ragged_where_op
from tensorflow.python.platform import googletest
@test_util.run_all_in_graph_and_eager_modes
class RaggedWhereOpTest(test_util.TensorFlowTestCase,
parameterized.TestCase):
@parameterized.parameters([
#=========================================================================
# Docstring Examples
#=========================================================================
dict( # shape=[D1, (D2)]
condition=ragged_factory_ops.constant_value(
[[True, False, True], [False, True]]),
expected=[[0, 0], [0, 2], [1, 1]]),
dict( # shape=[D1, (D2)]
condition=ragged_factory_ops.constant_value(
[[True, False, True], [False, True]]),
x=ragged_factory_ops.constant_value(
[['A', 'B', 'C'], ['D', 'E']]),
y=ragged_factory_ops.constant_value(
[['a', 'b', 'c'], ['d', 'e']]),
expected=ragged_factory_ops.constant_value(
[[b'A', b'b', b'C'], [b'd', b'E']])),
dict( # shape=[D1, (D2)]
condition=ragged_factory_ops.constant_value([True, False]),
x=ragged_factory_ops.constant_value([['A', 'B', 'C'], ['D', 'E']]),
y=ragged_factory_ops.constant_value([['a', 'b', 'c'], ['d', 'e']]),
expected=ragged_factory_ops.constant_value(
[[b'A', b'B', b'C'], [b'd', b'e']])),
#=========================================================================
# Coordinate-retrieval mode
#=========================================================================
dict( # shape=[D1]
condition=[True, False, True, False, True],
expected=[[0], [2], [4]]),
dict( # shape=[D1, D2]
condition=[[True, False], [False, True]],
expected=[[0, 0], [1, 1]]),
dict( # shape=[D1, (D2)]
condition=ragged_factory_ops.constant_value(
[[True, False, True], [False, True]]),
expected=[[0, 0], [0, 2], [1, 1]]),
dict( # shape=[D1, (D2), (D3)]
condition=ragged_factory_ops.constant_value([
[[True, False, True], [False, True]],
[[True], [], [False], [False, True, False]]
]),
expected=[[0, 0, 0], [0, 0, 2], [0, 1, 1],
[1, 0, 0], [1, 3, 1]]),
dict( # shape=[D1, (D2), D3]
condition=ragged_factory_ops.constant_value([
[[True, False], [False, True]],
[[True, False], [False, False], [True, False], [False, True]]
], ragged_rank=1),
expected=[[0, 0, 0], [0, 1, 1],
[1, 0, 0], [1, 2, 0], [1, 3, 1]]),
dict( # shape=[D1, (D2), (D3), (D4)]
condition=ragged_factory_ops.constant_value([
[[[], [True]]],
[[[True, False, True], [False, True]],
[[True], [], [False], [False, True, False]]]
]),
expected=[[0, 0, 1, 0],
[1, 0, 0, 0], [1, 0, 0, 2], [1, 0, 1, 1],
[1, 1, 0, 0], [1, 1, 3, 1]]),
#=========================================================================
# Elementwise value-selection mode
#=========================================================================
dict( # shape=[]
condition=True, x='A', y='a', expected=b'A'),
dict( # shape=[]
condition=False, x='A', y='a', expected=b'a'),
dict( # shape=[D1]
condition=[True, False, True],
x=['A', 'B', 'C'],
y=['a', 'b', 'c'],
expected=[b'A', b'b', b'C']),
dict( # shape=[D1, D2]
condition=[[True, False], [False, True]],
x=[['A', 'B'], ['D', 'E']],
y=[['a', 'b'], ['d', 'e']],
expected=[[b'A', b'b'], [b'd', b'E']]),
dict( # shape=[D1, (D2)]
condition=ragged_factory_ops.constant_value(
[[True, False, True], [False, True]]),
x=ragged_factory_ops.constant_value([['A', 'B', 'C'], ['D', 'E']]),
y=ragged_factory_ops.constant_value([['a', 'b', 'c'], ['d', 'e']]),
expected=ragged_factory_ops.constant_value(
[[b'A', b'b', b'C'], [b'd', b'E']])),
dict( # shape=[D1, (D2), D3]
condition=ragged_factory_ops.constant_value([
[[True, False], [False, True]],
[[True, False], [False, False], [True, False], [False, True]]
], ragged_rank=1),
x=ragged_factory_ops.constant_value([
[['A', 'B'], ['C', 'D']],
[['E', 'F'], ['G', 'H'], ['I', 'J'], ['K', 'L']]
], ragged_rank=1),
y=ragged_factory_ops.constant_value([
[['a', 'b'], ['c', 'd']],
[['e', 'f'], ['g', 'h'], ['i', 'j'], ['k', 'l']]
], ragged_rank=1),
expected=ragged_factory_ops.constant_value([
[[b'A', b'b'], [b'c', b'D']],
[[b'E', b'f'], [b'g', b'h'], [b'I', b'j'], [b'k', b'L']]
], ragged_rank=1)),
dict( # shape=[D1, (D2), (D3), (D4)]
condition=ragged_factory_ops.constant_value([
[[[], [True]]],
[[[True, False, True], [False, True]],
[[True], [], [False], [False, True, False]]]
]),
x=ragged_factory_ops.constant_value([
[[[], ['A']]],
[[['B', 'C', 'D'], ['E', 'F']],
[['G'], [], ['H'], ['I', 'J', 'K']]]
]),
y=ragged_factory_ops.constant_value([
[[[], ['a']]],
[[['b', 'c', 'd'], ['e', 'f']],
[['g'], [], ['h'], ['i', 'j', 'k']]]
]),
expected=ragged_factory_ops.constant_value([
[[[], [b'A']]],
[[[b'B', b'c', b'D'], [b'e', b'F']],
[[b'G'], [], [b'h'], [b'i', b'J', b'k']]]
])),
#=========================================================================
# Elementwise row-selection mode
#=========================================================================
dict( # x.shape=[D1, D2], y.shape=[D1, D2]
condition=[True, False, True],
x=[['A', 'B'], ['C', 'D'], ['E', 'F']],
y=[['a', 'b'], ['c', 'd'], ['e', 'f']],
expected=[[b'A', b'B'], [b'c', b'd'], [b'E', b'F']]),
dict( # x.shape=[D1, D2], y.shape=[D1, (D2)]
condition=[True, False, True],
x=[['A', 'B'], ['C', 'D'], ['E', 'F']],
y=ragged_factory_ops.constant_value(
[['a', 'b'], ['c'], ['d', 'e']]),
expected=ragged_factory_ops.constant_value(
[[b'A', b'B'], [b'c'], [b'E', b'F']])),
dict( # x.shape=[D1, (D2)], y.shape=[D1, (D2)]
condition=[True, False, True],
x=ragged_factory_ops.constant_value(
[['A', 'B', 'C'], ['D', 'E'], ['F', 'G']]),
y=ragged_factory_ops.constant_value(
[['a', 'b'], ['c'], ['d', 'e']]),
expected=ragged_factory_ops.constant_value(
[[b'A', b'B', b'C'], [b'c'], [b'F', b'G']])),
dict( # shape=[D1, (D2), (D3), (D4)]
condition=ragged_factory_ops.constant_value([True, False]),
x=ragged_factory_ops.constant_value([
[[[], ['A']]],
[[['B', 'C', 'D'], ['E', 'F']],
[['G'], [], ['H'], ['I', 'J', 'K']]]
]),
y=ragged_factory_ops.constant_value([[[['a']]], [[['b']]]]),
expected=ragged_factory_ops.constant_value(
[[[[], [b'A']]], [[[b'b']]]])),
]) # pyformat: disable
def testRaggedWhere(self, condition, expected, x=None, y=None):
result = ragged_where_op.where(condition, x, y)
self.assertAllEqual(result, expected)
@parameterized.parameters([
dict(
condition=[True, False],
x=[1, 2],
error=ValueError,
message='x and y must be either both None or both non-None'),
dict(
condition=ragged_factory_ops.constant_value([[True, False, True],
[False, True]]),
x=ragged_factory_ops.constant_value([['A', 'B', 'C'], ['D', 'E']]),
y=[['a', 'b'], ['d', 'e']],
error=ValueError,
message='Input shapes do not match.'),
])
def testRaggedWhereErrors(self, condition, error, message, x=None, y=None):
with self.assertRaisesRegexp(error, message):
ragged_where_op.where(condition, x, y)
if __name__ == '__main__':
googletest.main()
|
py
|
1a567eff7bee42564babacc28a35868a8f9b83ce
|
# coding=utf-8
# Copyright 2014 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
import os
import re
from pants.util.contextutil import temporary_dir
from pants_test.pants_run_integration_test import PantsRunIntegrationTest
class IvyResolveIntegrationTest(PantsRunIntegrationTest):
def test_ivy_resolve_gives_correct_exception_on_cycles(self):
with temporary_dir(root_dir=self.workdir_root()) as workdir:
pants_run = self.run_pants_with_workdir([
'compile', 'testprojects/src/java/org/pantsbuild/testproject/cycle1'], workdir)
self.assert_failure(pants_run)
self.assertIn('Cycle detected', pants_run.stderr_data)
def test_java_compile_with_ivy_report(self):
# Ensure the ivy report file gets generated
with temporary_dir(root_dir=self.workdir_root()) as workdir:
ivy_report_dir = '{workdir}/ivy-report'.format(workdir=workdir)
pants_run = self.run_pants_with_workdir([
'compile',
'testprojects/src/java/org/pantsbuild/testproject/unicode/main',
'--resolve-ivy-report',
'--resolve-ivy-outdir={reportdir}'.format(reportdir=ivy_report_dir)],
workdir)
self.assert_success(pants_run)
# Find the ivy report
found = False
pattern = re.compile('internal-[a-f0-9]+-default\.html$')
for f in os.listdir(ivy_report_dir):
if os.path.isfile(os.path.join(ivy_report_dir, f)):
if pattern.match(f):
found = True
break
self.assertTrue(found,
msg="Couldn't find ivy report in {report_dir}"
.format(report_dir=ivy_report_dir))
def test_ivy_args(self):
pants_run = self.run_pants([
'resolve',
'--resolve-ivy-args=-blablabla',
'examples/src/scala::'
])
self.assert_failure(pants_run)
self.assertIn('Unrecognized option: -blablabla', pants_run.stdout_data)
def test_ivy_confs_success(self):
pants_run = self.run_pants([
'resolve',
'--resolve-ivy-confs=default',
'--resolve-ivy-confs=sources',
'--resolve-ivy-confs=javadoc',
'3rdparty:junit'
])
self.assert_success(pants_run)
def test_ivy_confs_failure(self):
pants_run = self.run_pants([
'resolve',
'--resolve-ivy-confs=parampampam',
'3rdparty:junit'
])
self.assert_failure(pants_run)
def test_ivy_confs_ini_failure(self):
pants_ini_config = {'resolve.ivy': {'confs': 'parampampam'}}
pants_run = self.run_pants([
'resolve',
'3rdparty:junit'
], config=pants_ini_config)
self.assert_failure(pants_run)
|
py
|
1a5680efb07f2dc3f2a02902ed7c61b6e9914c34
|
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union
from ... import _utilities, _tables
from . import outputs
__all__ = ['WebAppDomainOwnershipIdentifierSlot']
warnings.warn("""The 'latest' version is deprecated. Please migrate to the resource in the top-level module: 'azure-native:web:WebAppDomainOwnershipIdentifierSlot'.""", DeprecationWarning)
class WebAppDomainOwnershipIdentifierSlot(pulumi.CustomResource):
warnings.warn("""The 'latest' version is deprecated. Please migrate to the resource in the top-level module: 'azure-native:web:WebAppDomainOwnershipIdentifierSlot'.""", DeprecationWarning)
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
domain_ownership_identifier_name: Optional[pulumi.Input[str]] = None,
kind: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
slot: Optional[pulumi.Input[str]] = None,
value: Optional[pulumi.Input[str]] = None,
__props__=None,
__name__=None,
__opts__=None):
"""
A domain specific resource identifier.
Latest API Version: 2020-10-01.
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] domain_ownership_identifier_name: Name of domain ownership identifier.
:param pulumi.Input[str] kind: Kind of resource.
:param pulumi.Input[str] name: Name of the app.
:param pulumi.Input[str] resource_group_name: Name of the resource group to which the resource belongs.
:param pulumi.Input[str] slot: Name of the deployment slot. If a slot is not specified, the API will delete the binding for the production slot.
:param pulumi.Input[str] value: String representation of the identity.
"""
pulumi.log.warn("""WebAppDomainOwnershipIdentifierSlot is deprecated: The 'latest' version is deprecated. Please migrate to the resource in the top-level module: 'azure-native:web:WebAppDomainOwnershipIdentifierSlot'.""")
if __name__ is not None:
warnings.warn("explicit use of __name__ is deprecated", DeprecationWarning)
resource_name = __name__
if __opts__ is not None:
warnings.warn("explicit use of __opts__ is deprecated, use 'opts' instead", DeprecationWarning)
opts = __opts__
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = dict()
__props__['domain_ownership_identifier_name'] = domain_ownership_identifier_name
__props__['kind'] = kind
if name is None and not opts.urn:
raise TypeError("Missing required property 'name'")
__props__['name'] = name
if resource_group_name is None and not opts.urn:
raise TypeError("Missing required property 'resource_group_name'")
__props__['resource_group_name'] = resource_group_name
if slot is None and not opts.urn:
raise TypeError("Missing required property 'slot'")
__props__['slot'] = slot
__props__['value'] = value
__props__['system_data'] = None
__props__['type'] = None
alias_opts = pulumi.ResourceOptions(aliases=[pulumi.Alias(type_="azure-nextgen:web/latest:WebAppDomainOwnershipIdentifierSlot"), pulumi.Alias(type_="azure-native:web:WebAppDomainOwnershipIdentifierSlot"), pulumi.Alias(type_="azure-nextgen:web:WebAppDomainOwnershipIdentifierSlot"), pulumi.Alias(type_="azure-native:web/v20160801:WebAppDomainOwnershipIdentifierSlot"), pulumi.Alias(type_="azure-nextgen:web/v20160801:WebAppDomainOwnershipIdentifierSlot"), pulumi.Alias(type_="azure-native:web/v20180201:WebAppDomainOwnershipIdentifierSlot"), pulumi.Alias(type_="azure-nextgen:web/v20180201:WebAppDomainOwnershipIdentifierSlot"), pulumi.Alias(type_="azure-native:web/v20181101:WebAppDomainOwnershipIdentifierSlot"), pulumi.Alias(type_="azure-nextgen:web/v20181101:WebAppDomainOwnershipIdentifierSlot"), pulumi.Alias(type_="azure-native:web/v20190801:WebAppDomainOwnershipIdentifierSlot"), pulumi.Alias(type_="azure-nextgen:web/v20190801:WebAppDomainOwnershipIdentifierSlot"), pulumi.Alias(type_="azure-native:web/v20200601:WebAppDomainOwnershipIdentifierSlot"), pulumi.Alias(type_="azure-nextgen:web/v20200601:WebAppDomainOwnershipIdentifierSlot"), pulumi.Alias(type_="azure-native:web/v20200901:WebAppDomainOwnershipIdentifierSlot"), pulumi.Alias(type_="azure-nextgen:web/v20200901:WebAppDomainOwnershipIdentifierSlot"), pulumi.Alias(type_="azure-native:web/v20201001:WebAppDomainOwnershipIdentifierSlot"), pulumi.Alias(type_="azure-nextgen:web/v20201001:WebAppDomainOwnershipIdentifierSlot")])
opts = pulumi.ResourceOptions.merge(opts, alias_opts)
super(WebAppDomainOwnershipIdentifierSlot, __self__).__init__(
'azure-native:web/latest:WebAppDomainOwnershipIdentifierSlot',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None) -> 'WebAppDomainOwnershipIdentifierSlot':
"""
Get an existing WebAppDomainOwnershipIdentifierSlot resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = dict()
__props__["kind"] = None
__props__["name"] = None
__props__["system_data"] = None
__props__["type"] = None
__props__["value"] = None
return WebAppDomainOwnershipIdentifierSlot(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter
def kind(self) -> pulumi.Output[Optional[str]]:
"""
Kind of resource.
"""
return pulumi.get(self, "kind")
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
"""
Resource Name.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="systemData")
def system_data(self) -> pulumi.Output['outputs.SystemDataResponse']:
"""
The system metadata relating to this resource.
"""
return pulumi.get(self, "system_data")
@property
@pulumi.getter
def type(self) -> pulumi.Output[str]:
"""
Resource type.
"""
return pulumi.get(self, "type")
@property
@pulumi.getter
def value(self) -> pulumi.Output[Optional[str]]:
"""
String representation of the identity.
"""
return pulumi.get(self, "value")
def translate_output_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
def translate_input_property(self, prop):
return _tables.SNAKE_TO_CAMEL_CASE_TABLE.get(prop) or prop
|
py
|
1a56813ab3cae382f01386ca1dc75862a855ddb3
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# SPDX-License-Identifier: Apache-2.0
# Copyright 2021 RT Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import math
import rospy
from geometry_msgs.msg import Twist
from thouzer_msgs.msg import Vel
from std_srvs.srv import Trigger, TriggerResponse
class CmdVelTwist(object):
def __init__(self):
rospy.loginfo("cmd_vel remapper start")
self._twist_sub = rospy.Subscriber('/cmd_vel', Twist, self.joy_callback, queue_size=1)
self._vel_pub = rospy.Publisher('/thouzer/vel', Vel, queue_size=1)
def joy_callback(self, msg):
vel = Vel()
vel.v_mps = msg.linear.x
vel.w_degps = math.degrees(msg.angular.z)
print(vel)
self._vel_pub.publish(vel)
if __name__ == '__main__':
rospy.wait_for_service('/motor_on')
rospy.wait_for_service('/motor_off')
rospy.on_shutdown(rospy.ServiceProxy('/motor_off', Trigger).call)
rospy.ServiceProxy('/motor_on', Trigger).call()
rospy.init_node('thouzer_cmd_vel')
logicool_cmd_vel = CmdVelTwist()
rospy.spin()
|
py
|
1a5681a9088d589ccdc809e80a0a3ac76759c4df
|
import importlib.metadata
name = "Dramakul"
DISTRIBUTION_METADATA = importlib.metadata.metadata(name)
description = DISTRIBUTION_METADATA["description"]
__version__ = DISTRIBUTION_METADATA["version"]
|
py
|
1a5682bd956761a2c11c48916e1869ee99fc161b
|
from django.test import TestCase, Client
from django.contrib.auth import get_user_model
from django.urls import reverse
class AdminSiteTests(TestCase):
def setUp(self):
self.client = Client()
self.admin_user = get_user_model().objects.create_superuser(
email='[email protected]',
password='pass123'
)
self.client.force_login(self.admin_user)
self.user = get_user_model().objects.create_user(
email='[email protected]',
password='pass123',
name='Test user full name'
)
def test_users_listed(self):
"""Test that users are listed on user page"""
url = reverse('admin:core_user_changelist')
res = self.client.get(url)
self.assertContains(res, self.user.name)
self.assertContains(res, self.user.email)
def test_user_change_page(self):
"""Test that user edit page works"""
url = reverse("admin:core_user_change", args=[self.user.id])
res = self.client.get(url)
self.assertEqual(res.status_code, 200)
def test_create_user_page(self):
"""Test that the create user page works"""
url = reverse('admin:core_user_add')
res = self.client.get(url)
self.assertEqual(res.status_code, 200)
|
py
|
1a5682c5246908ae69f51042882db54998c35a13
|
from django.contrib import admin
from offices.models import CountyOffice
class CountyOfficeAdmin(admin.ModelAdmin):
search_fields = ['title']
list_filter = ('state_ref',)
admin.site.register(CountyOffice, CountyOfficeAdmin)
|
py
|
1a56843623b42aa069890901df50c457267da825
|
from django.contrib import admin
from django.contrib.auth.admin import UserAdmin
from .models import User, Librarian, Patron, Student, Faculty, Professor, TA, Instructor
from django.contrib.auth.forms import UserChangeForm
# Register your models here.
class BaseUserChangeForm(UserChangeForm):
class Meta(UserChangeForm.Meta):
model = User
class BaseUserAdmin(UserAdmin):
form = BaseUserChangeForm
fieldsets = UserAdmin.fieldsets + (
(None, {'fields': ('phone_number', 'address')}),
)
class LibrarianAdmin(BaseUserAdmin):
pass
class PatronAdmin(BaseUserAdmin):
pass
class StudentAdmin(BaseUserAdmin):
pass
class FacultyAdmin(BaseUserAdmin):
pass
class ProfessorAdmin(BaseUserAdmin):
pass
class TAAdmin(BaseUserAdmin):
pass
class InstructorAdmin(BaseUserAdmin):
pass
admin.site.register(User, BaseUserAdmin)
admin.site.register(Patron, PatronAdmin)
admin.site.register(Librarian, LibrarianAdmin)
admin.site.register(Student, StudentAdmin)
admin.site.register(Faculty, FacultyAdmin)
admin.site.register(Professor, ProfessorAdmin)
admin.site.register(TA, TAAdmin)
admin.site.register(Instructor, InstructorAdmin)
|
py
|
1a56863a3f3f86eca98746e2b4d7e8335527330d
|
from .agent_builder import AgentBuilder
from .a2c_builder import A2CBuilder
from .ppo_builder import PPOBuilder
from .trpo_builder import TRPOBuilder
from .ddpg_builder import DDPGBuilder
from .td3_builder import TD3Builder
from .sac_builder import SACBuilder
from .environment_builder import EnvironmentBuilder
__all__ = [
'AgentBuilder',
'A2CBuilder',
'PPOBuilder',
'TRPOBuilder',
'DDPGBuilder',
'TD3Builder',
'SACBuilder',
'EnvironmentBuilder'
]
|
py
|
1a56863d1c72313da6ee94e6fb214d57bd0ae0ed
|
# qubit number=3
# total number=12
import numpy as np
from qiskit import QuantumCircuit, execute, Aer, QuantumRegister, ClassicalRegister, transpile, BasicAer, IBMQ
import networkx as nx
from qiskit.visualization import plot_histogram
from typing import *
from pprint import pprint
from math import log2
from collections import Counter
from qiskit.test.mock import FakeVigo, FakeYorktown
kernel = 'circuit/bernstein'
def make_circuit(n:int) -> QuantumCircuit:
# circuit begin
input_qubit = QuantumRegister(n,"qc")
prog = QuantumCircuit(input_qubit)
prog.h(input_qubit[0]) # number=1
prog.h(input_qubit[1]) # number=2
prog.h(input_qubit[2]) # number=3
prog.h(input_qubit[3]) # number=4
for edge in E:
k = edge[0]
l = edge[1]
prog.cp(-2 * gamma, input_qubit[k-1], input_qubit[l-1])
prog.p(gamma, k)
prog.p(gamma, l)
prog.rx(2 * beta, range(len(V)))
prog.cx(input_qubit[0],input_qubit[2]) # number=9
prog.x(input_qubit[2]) # number=10
prog.cx(input_qubit[0],input_qubit[2]) # number=11
prog.x(input_qubit[2]) # number=6
prog.cx(input_qubit[1],input_qubit[0]) # number=7
prog.cx(input_qubit[1],input_qubit[0]) # number=8
# circuit end
return prog
if __name__ == '__main__':
n = 4
V = np.arange(0, n, 1)
E = [(0, 1, 1.0), (0, 2, 1.0), (1, 2, 1.0), (3, 2, 1.0), (3, 1, 1.0)]
G = nx.Graph()
G.add_nodes_from(V)
G.add_weighted_edges_from(E)
step_size = 0.1
a_gamma = np.arange(0, np.pi, step_size)
a_beta = np.arange(0, np.pi, step_size)
a_gamma, a_beta = np.meshgrid(a_gamma, a_beta)
F1 = 3 - (np.sin(2 * a_beta) ** 2 * np.sin(2 * a_gamma) ** 2 - 0.5 * np.sin(4 * a_beta) * np.sin(4 * a_gamma)) * (
1 + np.cos(4 * a_gamma) ** 2)
result = np.where(F1 == np.amax(F1))
a = list(zip(result[0], result[1]))[0]
gamma = a[0] * step_size
beta = a[1] * step_size
prog = make_circuit(4)
sample_shot =3962
writefile = open("../data/startQiskit_QC87.csv", "w")
# prog.draw('mpl', filename=(kernel + '.png'))
IBMQ.load_account()
provider = IBMQ.get_provider(hub='ibm-q')
provider.backends()
backend = provider.get_backend("ibmq_5_yorktown")
circuit1 = transpile(prog, FakeYorktown())
circuit1.measure_all()
prog = circuit1
info = execute(prog,backend=backend, shots=sample_shot).result().get_counts()
print(info, file=writefile)
print("results end", file=writefile)
print(circuit1.depth(), file=writefile)
print(circuit1, file=writefile)
writefile.close()
|
py
|
1a5686647ce5422efb41e5cd33860af98a8134d8
|
# automatically generated by the FlatBuffers compiler, do not modify
# namespace: protocol
class MessageBody(object):
NONE = 0
TracesFromPriorRequest = 1
ObservesInitRequest = 2
ProposalRequest = 3
TracesFromPriorReply = 4
ObservesInitReply = 5
ProposalReply = 6
|
py
|
1a568668a2e5a03fab7e4653acd0dda11ec35543
|
# Copyright 2019 The TensorTrade Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License
import tensortrade.orders.create as create
from typing import Union, List, Tuple
from itertools import product
from gym.spaces import Discrete
from tensortrade.actions import ActionScheme
from tensortrade.orders import TradeSide, TradeType, Order, OrderListener, risk_managed_order
class ManagedRiskOrders(ActionScheme):
"""A discrete action scheme that determines actions based on managing risk,
through setting a follow-up stop loss and take profit on every order.
"""
def __init__(self,
stop_loss_percentages: Union[List[float], float] = [0.02, 0.04, 0.06],
take_profit_percentages: Union[List[float], float] = [0.01, 0.02, 0.03],
trade_sizes: Union[List[float], int] = 10,
trade_type: TradeType = TradeType.MARKET,
ttl_in_seconds: int = None,
ttl_in_steps: int = None,
order_listener: OrderListener = None):
"""
Arguments:
pairs: A list of trading pairs to select from when submitting an order.
(e.g. TradingPair(BTC, USD), TradingPair(ETH, BTC), etc.)
stop_loss_percentages: A list of possible stop loss percentages for each order.
take_profit_percentages: A list of possible take profit percentages for each order.
trade_sizes: A list of trade sizes to select from when submitting an order.
(e.g. '[1, 1/3]' = 100% or 33% of balance is tradable. '4' = 25%, 50%, 75%, or 100% of balance is tradable.)
order_listener (optional): An optional listener for order events executed by this action scheme.
"""
self.stop_loss_percentages = self.default('stop_loss_percentages', stop_loss_percentages)
self.take_profit_percentages = self.default(
'take_profit_percentages', take_profit_percentages)
self.trade_sizes = self.default('trade_sizes', trade_sizes)
self.trade_type = self.default('trade_type', trade_type)
self.ttl_in_seconds = self.default('ttl_in_seconds', ttl_in_seconds)
self.ttl_in_steps = self.default('ttl_in_steps', ttl_in_steps)
self._order_listener = self.default('order_listener', order_listener)
generator = product(self.stop_loss_percentages,
self.take_profit_percentages,
self.trade_sizes,
[TradeSide.BUY, TradeSide.SELL])
self.actions = list(generator)
@property
def action_space(self) -> Discrete:
"""The discrete action space produced by the action scheme."""
return Discrete(len(self.actions))
@property
def stop_loss_percentages(self) -> List[float]:
"""A list of order percentage losses to select a stop loss from when submitting an order.
(e.g. 0.01 = sell if price drops 1%, 0.15 = 15%, etc.)
"""
return self._stop_loss_percentages
@stop_loss_percentages.setter
def stop_loss_percentages(self, stop_loss_percentages: Union[List[float], float]):
self._stop_loss_percentages = stop_loss_percentages if isinstance(
stop_loss_percentages, list) else [stop_loss_percentages]
@property
def take_profit_percentages(self) -> List[float]:
"""A list of order percentage gains to select a take profit from when submitting an order.
(e.g. 0.01 = sell if price rises 1%, 0.15 = 15%, etc.)
"""
return self._take_profit_percentages
@take_profit_percentages.setter
def take_profit_percentages(self, take_profit_percentages: Union[List[float], float]):
self._take_profit_percentages = take_profit_percentages if isinstance(
take_profit_percentages, list) else [take_profit_percentages]
@property
def trade_sizes(self) -> List[float]:
"""A list of trade sizes to select from when submitting an order.
(e.g. '[1, 1/3]' = 100% or 33% of balance is tradable. '4' = 25%, 50%, 75%, or 100% of balance is tradable.)
"""
return self._trade_sizes
@trade_sizes.setter
def trade_sizes(self, trade_sizes: Union[List[float], int]):
self._trade_sizes = trade_sizes if isinstance(trade_sizes, list) else [
(x + 1) / trade_sizes for x in range(trade_sizes)]
def get_order(self, action: int, portfolio: 'Portfolio') -> Order:
if action == 0:
return None
((exchange, pair), (stop_loss, take_profit, size, side)) = self.actions[action]
price = exchange.quote_price(pair)
wallet_instrument = pair.base if side == TradeSide.BUY else pair.quote
wallet = portfolio.get_wallet(exchange.id, instrument=wallet_instrument)
size = (wallet.balance.size * size)
size = min(wallet.balance.size, size)
if size < 10 ** -pair.base.precision:
return None
params = {
'step': exchange.clock.step,
'side': side,
'pair': pair,
'price': price,
'size': size,
'down_percent': stop_loss,
'up_percent': take_profit,
'portfolio': portfolio,
'trade_type': self.trade_type,
'ttl_in_seconds': self.ttl_in_seconds,
'ttl_in_steps': self.ttl_in_steps,
}
order = risk_managed_order(**params)
if self._order_listener is not None:
order.attach(self._order_listener)
def reset(self):
pass
|
py
|
1a56866f2bb135a31bf7f5f216bfd4e0c8f95352
|
# Copyright 2013 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
# from oasisagent.objects import function
#
# Function = function.Function
# __all__ = (Function)
|
py
|
1a5686735a15f23b492105c8ebcb222b161ac5f5
|
import _plotly_utils.basevalidators
class HighlightcolorValidator(_plotly_utils.basevalidators.ColorValidator):
def __init__(
self, plotly_name="highlightcolor", parent_name="surface.contours.y", **kwargs
):
super(HighlightcolorValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "calc"),
role=kwargs.pop("role", "style"),
**kwargs
)
|
py
|
1a568757366ac8f0ffd0ec99a36e8b2191ed604e
|
from __future__ import print_function, unicode_literals
import json
import sys
from tagalog import io
def main():
for line in io.lines(sys.stdin):
msg = json.loads(line)
if '@timestamp' in msg:
print(msg['@timestamp'], msg['@message'])
else:
print(msg['@message'])
if __name__ == '__main__':
main()
|
py
|
1a56879a7de3eb5d2083507fdd6bf6de62b6e04f
|
# Copyright (c) 2006-2009 The Trustees of Indiana University.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# - Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# - Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# - Neither the Indiana University nor the names of its contributors may be used
# to endorse or promote products derived from this software without specific
# prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from corepy.spre.spe import Instruction, DispatchInstruction
from x86_64_fields import *
from x86_64_insts import *
__annoy__ = True
__doc__="""
x86_64 Instruction Set Architecture (ISA).
To use, import this module and call the Instructions as Python
functions to generate a properly coded version. For example, to
create an add instruction:
import corepy.arch.x86_64.isa as isa
import corepy.arch.x86_64.types.registers as regs
inst = isa.add(regs.eax, regs.ebx) # add ebx to eax
Operands are in the same order as presented in the architecture manuals.
For a complete reference and details for all instructions, please
referer to:
'Intel 64 and IA-32 Architectures Software Developer's Manual' or
'AMD64 Architecture Programmer's Manual'.
URL (valid as of Sept 21, 2007):
http://www.intel.com/products/processor/manuals/index.htm
http://developer.amd.com/devguides.jsp
"""
# ------------------------------
# x86 Registers
# ------------------------------
# reg num: named register
gp8_map = {0: al_t, 1: cl_t}
gp16_map = {0: ax_t, 2: dx_t}
gp32_map = {0: eax_t}
gp64_map = {0: rax_t}
fp_map = {0: st0_t}
# ------------------------------
# x86 Instructions
# ------------------------------
# Currently 16bit versions of instructions have separate operand
# functions, and the size-override prefix is in the opcode, so protected
# (32bit default) mode is assumed here.
class adc(DispatchInstruction):
dispatch = (
(reg64_simm8, {'opcode':[0x83], 'modrm':0x10}),
(mem64_simm8, {'opcode':[0x83], 'modrm':0x10}),
(rax_imm32, {'opcode':[0x15], 'modrm':None}),
(reg64_imm32, {'opcode':[0x81], 'modrm':0x10}),
(mem64_imm32, {'opcode':[0x81], 'modrm':0x10}),
(reg64_reg64, {'opcode':[0x11], 'modrm':None}),
(mem64_reg64, {'opcode':[0x11], 'modrm':None}),
(reg64_mem64, {'opcode':[0x13], 'modrm':None}),
(reg32_simm8, {'opcode':[0x83], 'modrm':0x10}),
(mem32_simm8, {'opcode':[0x83], 'modrm':0x10}),
(eax_imm32, {'opcode':[0x15], 'modrm':None}),
(reg32_imm32, {'opcode':[0x81], 'modrm':0x10}),
(mem32_imm32, {'opcode':[0x81], 'modrm':0x10}),
(reg32_reg32, {'opcode':[0x11], 'modrm':None}),
(mem32_reg32, {'opcode':[0x11], 'modrm':None}),
(reg32_mem32, {'opcode':[0x13], 'modrm':None}),
(reg16_simm8, {'opcode':[0x83], 'modrm':0x10}),
(mem16_simm8, {'opcode':[0x83], 'modrm':0x10}),
(ax_imm16, {'opcode':[0x66, 0x15], 'modrm':None}),
(reg16_imm16, {'opcode':[0x81], 'modrm':0x10}),
(mem16_imm16, {'opcode':[0x81], 'modrm':0x10}),
(reg16_reg16, {'opcode':[0x11], 'modrm':None}),
(mem16_reg16, {'opcode':[0x11], 'modrm':None}),
(reg16_mem16, {'opcode':[0x13], 'modrm':None}),
(al_imm8, {'opcode':[0x14], 'modrm':None}),
(reg8_imm8, {'opcode':[0x80], 'modrm':0x10}),
(mem8_imm8, {'opcode':[0x80], 'modrm':0x10}),
(reg8_reg8, {'opcode':[0x10], 'modrm':None}),
(mem8_reg8, {'opcode':[0x10], 'modrm':None}),
(reg8_mem8, {'opcode':[0x12], 'modrm':None}))
class add(DispatchInstruction):
dispatch = (
(reg64_simm8, {'opcode':[0x83], 'modrm':0x00}),
(mem64_simm8, {'opcode':[0x83], 'modrm':0x00}),
(rax_imm32, {'opcode':[0x05], 'modrm':None}),
(reg64_imm32, {'opcode':[0x81], 'modrm':0x00}),
(mem64_imm32, {'opcode':[0x81], 'modrm':0x00}),
(reg64_reg64, {'opcode':[0x01], 'modrm':None}),
(mem64_reg64, {'opcode':[0x01], 'modrm':None}),
(reg64_mem64, {'opcode':[0x03], 'modrm':None}),
(reg32_simm8, {'opcode':[0x83], 'modrm':0x00}),
(mem32_simm8, {'opcode':[0x83], 'modrm':0x00}),
(eax_imm32, {'opcode':[0x05], 'modrm':None}),
(reg32_imm32, {'opcode':[0x81], 'modrm':0x00}),
(mem32_imm32, {'opcode':[0x81], 'modrm':0x00}),
(reg32_reg32, {'opcode':[0x01], 'modrm':None}),
(mem32_reg32, {'opcode':[0x01], 'modrm':None}),
(reg32_mem32, {'opcode':[0x03], 'modrm':None}),
(reg16_simm8, {'opcode':[0x83], 'modrm':0x00}),
(mem16_simm8, {'opcode':[0x83], 'modrm':0x00}),
(ax_imm16, {'opcode':[0x66, 0x05], 'modrm':None}),
(reg16_imm16, {'opcode':[0x81], 'modrm':0x00}),
(mem16_imm16, {'opcode':[0x81], 'modrm':0x00}),
(reg16_reg16, {'opcode':[0x01], 'modrm':None}),
(mem16_reg16, {'opcode':[0x01], 'modrm':None}),
(reg16_mem16, {'opcode':[0x03], 'modrm':None}),
(al_imm8, {'opcode':[0x04], 'modrm':None}),
(reg8_imm8, {'opcode':[0x80], 'modrm':0x00}),
(mem8_imm8, {'opcode':[0x80], 'modrm':0x00}),
(reg8_reg8, {'opcode':[0x00], 'modrm':None}),
(mem8_reg8, {'opcode':[0x00], 'modrm':None}),
(reg8_mem8, {'opcode':[0x02], 'modrm':None}))
class and_(DispatchInstruction):
dispatch = (
(reg64_simm8, {'opcode':[0x83], 'modrm':0x20}),
(mem64_simm8, {'opcode':[0x83], 'modrm':0x20}),
(rax_imm32, {'opcode':[0x25], 'modrm':None}),
(reg64_imm32, {'opcode':[0x81], 'modrm':0x20}),
(mem64_imm32, {'opcode':[0x81], 'modrm':0x20}),
(reg64_reg64, {'opcode':[0x21], 'modrm':None}),
(mem64_reg64, {'opcode':[0x21], 'modrm':None}),
(reg64_mem64, {'opcode':[0x23], 'modrm':None}),
(reg32_simm8, {'opcode':[0x83], 'modrm':0x20}),
(mem32_simm8, {'opcode':[0x83], 'modrm':0x20}),
(eax_imm32, {'opcode':[0x25], 'modrm':None}),
(reg32_imm32, {'opcode':[0x81], 'modrm':0x20}),
(mem32_imm32, {'opcode':[0x81], 'modrm':0x20}),
(reg32_reg32, {'opcode':[0x21], 'modrm':None}),
(mem32_reg32, {'opcode':[0x21], 'modrm':None}),
(reg32_mem32, {'opcode':[0x23], 'modrm':None}),
(reg16_simm8, {'opcode':[0x83], 'modrm':0x20}),
(mem16_simm8, {'opcode':[0x83], 'modrm':0x20}),
(ax_imm16, {'opcode':[0x66, 0x25], 'modrm':None}),
(reg16_imm16, {'opcode':[0x81], 'modrm':0x20}),
(mem16_imm16, {'opcode':[0x81], 'modrm':0x20}),
(reg16_reg16, {'opcode':[0x21], 'modrm':None}),
(mem16_reg16, {'opcode':[0x21], 'modrm':None}),
(reg16_mem16, {'opcode':[0x23], 'modrm':None}),
(al_imm8, {'opcode':[0x24], 'modrm':None}),
(reg8_imm8, {'opcode':[0x80], 'modrm':0x20}),
(mem8_imm8, {'opcode':[0x80], 'modrm':0x20}),
(reg8_reg8, {'opcode':[0x20], 'modrm':None}),
(mem8_reg8, {'opcode':[0x20], 'modrm':None}),
(reg8_mem8, {'opcode':[0x22], 'modrm':None}))
class bsf(DispatchInstruction):
dispatch = (
(reg64_reg64, {'opcode':[0x0F, 0xBC], 'modrm':None}),
(reg64_mem64, {'opcode':[0x0F, 0xBC], 'modrm':None}),
(reg32_reg32, {'opcode':[0x0F, 0xBC], 'modrm':None}),
(reg32_mem32, {'opcode':[0x0F, 0xBC], 'modrm':None}),
(reg16_reg16, {'opcode':[0x0F, 0xBC], 'modrm':None}),
(reg16_mem16, {'opcode':[0x0F, 0xBC], 'modrm':None}))
class bsr(DispatchInstruction):
dispatch = (
(reg64_reg64, {'opcode':[0x0F, 0xBD], 'modrm':None}),
(reg64_mem64, {'opcode':[0x0F, 0xBD], 'modrm':None}),
(reg32_reg32, {'opcode':[0x0F, 0xBD], 'modrm':None}),
(reg32_mem32, {'opcode':[0x0F, 0xBD], 'modrm':None}),
(reg16_reg16, {'opcode':[0x0F, 0xBD], 'modrm':None}),
(reg16_mem16, {'opcode':[0x0F, 0xBD], 'modrm':None}))
class bswap(DispatchInstruction):
dispatch = (
(reg64, {'opcode':[0x0F, 0xC8], 'modrm':None}),
(reg32, {'opcode':[0x0F, 0xC8], 'modrm':None}))
class bt(DispatchInstruction):
dispatch = (
(reg64_imm8, {'opcode':[0x0F, 0xBA], 'modrm':0x20}),
(mem64_imm8, {'opcode':[0x0F, 0xBA], 'modrm':0x20}),
(reg64_reg64, {'opcode':[0x0F, 0xA3], 'modrm':None}),
(mem64_reg64, {'opcode':[0x0F, 0xA3], 'modrm':None}),
(reg32_imm8, {'opcode':[0x0F, 0xBA], 'modrm':0x20}),
(mem32_imm8, {'opcode':[0x0F, 0xBA], 'modrm':0x20}),
(reg32_reg32, {'opcode':[0x0F, 0xA3], 'modrm':None}),
(mem32_reg32, {'opcode':[0x0F, 0xA3], 'modrm':None}),
(reg16_imm8, {'opcode':[0x0F, 0xBA], 'modrm':0x20}),
(mem16_imm8, {'opcode':[0x0F, 0xBA], 'modrm':0x20}),
(reg16_reg16, {'opcode':[0x0F, 0xA3], 'modrm':None}),
(mem16_reg16, {'opcode':[0x0F, 0xA3], 'modrm':None}))
class btc(DispatchInstruction):
dispatch = (
(reg64_imm8, {'opcode':[0x0F, 0xBA], 'modrm':0x38}),
(mem64_imm8, {'opcode':[0x0F, 0xBA], 'modrm':0x38}),
(reg64_reg64, {'opcode':[0x0F, 0xBB], 'modrm':None}),
(mem64_reg64, {'opcode':[0x0F, 0xBB], 'modrm':None}),
(reg32_imm8, {'opcode':[0x0F, 0xBA], 'modrm':0x38}),
(mem32_imm8, {'opcode':[0x0F, 0xBA], 'modrm':0x38}),
(reg32_reg32, {'opcode':[0x0F, 0xBB], 'modrm':None}),
(mem32_reg32, {'opcode':[0x0F, 0xBB], 'modrm':None}),
(reg16_imm8, {'opcode':[0x0F, 0xBA], 'modrm':0x38}),
(mem16_imm8, {'opcode':[0x0F, 0xBA], 'modrm':0x38}),
(reg16_reg16, {'opcode':[0x0F, 0xBB], 'modrm':None}),
(mem16_reg16, {'opcode':[0x0F, 0xBB], 'modrm':None}))
class btr(DispatchInstruction):
dispatch = (
(reg64_imm8, {'opcode':[0x0F, 0xBA], 'modrm':0x30}),
(mem64_imm8, {'opcode':[0x0F, 0xBA], 'modrm':0x30}),
(reg64_reg64, {'opcode':[0x0F, 0xB3], 'modrm':None}),
(mem64_reg64, {'opcode':[0x0F, 0xB3], 'modrm':None}),
(reg32_imm8, {'opcode':[0x0F, 0xBA], 'modrm':0x30}),
(mem32_imm8, {'opcode':[0x0F, 0xBA], 'modrm':0x30}),
(reg32_reg32, {'opcode':[0x0F, 0xB3], 'modrm':None}),
(mem32_reg32, {'opcode':[0x0F, 0xB3], 'modrm':None}),
(reg16_imm8, {'opcode':[0x0F, 0xBA], 'modrm':0x30}),
(mem16_imm8, {'opcode':[0x0F, 0xBA], 'modrm':0x30}),
(reg16_reg16, {'opcode':[0x0F, 0xB3], 'modrm':None}),
(mem16_reg16, {'opcode':[0x0F, 0xB3], 'modrm':None}))
class bts(DispatchInstruction):
dispatch = (
(reg64_imm8, {'opcode':[0x0F, 0xBA], 'modrm':0x28}),
(mem64_imm8, {'opcode':[0x0F, 0xBA], 'modrm':0x28}),
(reg64_reg64, {'opcode':[0x0F, 0xAB], 'modrm':None}),
(mem64_reg64, {'opcode':[0x0F, 0xAB], 'modrm':None}),
(reg32_imm8, {'opcode':[0x0F, 0xBA], 'modrm':0x28}),
(mem32_imm8, {'opcode':[0x0F, 0xBA], 'modrm':0x28}),
(reg32_reg32, {'opcode':[0x0F, 0xAB], 'modrm':None}),
(mem32_reg32, {'opcode':[0x0F, 0xAB], 'modrm':None}),
(reg16_imm8, {'opcode':[0x0F, 0xBA], 'modrm':0x28}),
(mem16_imm8, {'opcode':[0x0F, 0xBA], 'modrm':0x28}),
(reg16_reg16, {'opcode':[0x0F, 0xAB], 'modrm':None}),
(mem16_reg16, {'opcode':[0x0F, 0xAB], 'modrm':None}))
class call(DispatchInstruction):
dispatch = (
(lbl32off, {'opcode':[0xE8], 'modrm':None}),
(rel32off, {'opcode':[0xE8], 'modrm':None}),
(reg64, {'opcode':[0xFF], 'modrm':0x10}),
(mem64_32, {'opcode':[0xFF], 'modrm':0x10}),
(reg16, {'opcode':[0xFF], 'modrm':0x10}),
(mem16, {'opcode':[0x66, 0xFF], 'modrm':0x10}))
class cbw(Instruction):
machine_inst = no_op
params = {'opcode':[0x66, 0x98], 'modrm':None}
class cdq(Instruction):
machine_inst = no_op
params = {'opcode':[0x99], 'modrm':None}
class cdqe(Instruction):
machine_inst = no_op
params = {'opcode':[0x48, 0x98], 'modrm':None}
class clc(Instruction):
machine_inst = no_op
params = {'opcode':[0xF8], 'modrm':None}
class cld(Instruction):
machine_inst = no_op
params = {'opcode':[0xFC], 'modrm':None}
class clflush(Instruction):
machine_inst = mem8
params = {'opcode':[0x0F, 0xAE], 'modrm':0x38}
class cli(Instruction):
machine_inst = no_op
params = {'opcode':[0xFA], 'modrm':None}
class clts(Instruction):
machine_inst = no_op
params = {'opcode':[0x0F, 0x06], 'modrm':None}
class cmc(Instruction):
machine_inst = no_op
params = {'opcode':[0xF5], 'modrm':None}
class cmovo(DispatchInstruction):
dispatch = (
(reg64_reg64_rev, {'opcode':[0x0F, 0x40], 'modrm':None}),
(reg64_mem64, {'opcode':[0x0F, 0x40], 'modrm':None}),
(reg32_reg32_rev, {'opcode':[0x0F, 0x40], 'modrm':None}),
(reg32_mem32, {'opcode':[0x0F, 0x40], 'modrm':None}),
(reg16_reg16_rev, {'opcode':[0x0F, 0x40], 'modrm':None}),
(reg16_mem16, {'opcode':[0x0F, 0x40], 'modrm':None}))
class cmovno(DispatchInstruction):
dispatch = (
(reg64_reg64_rev, {'opcode':[0x0F, 0x41], 'modrm':None}),
(reg64_mem64, {'opcode':[0x0F, 0x41], 'modrm':None}),
(reg32_reg32_rev, {'opcode':[0x0F, 0x41], 'modrm':None}),
(reg32_mem32, {'opcode':[0x0F, 0x41], 'modrm':None}),
(reg16_reg16_rev, {'opcode':[0x0F, 0x41], 'modrm':None}),
(reg16_mem16, {'opcode':[0x0F, 0x41], 'modrm':None}))
class cmovb(DispatchInstruction):
dispatch = (
(reg64_reg64_rev, {'opcode':[0x0F, 0x42], 'modrm':None}),
(reg64_mem64, {'opcode':[0x0F, 0x42], 'modrm':None}),
(reg32_reg32_rev, {'opcode':[0x0F, 0x42], 'modrm':None}),
(reg32_mem32, {'opcode':[0x0F, 0x42], 'modrm':None}),
(reg16_reg16_rev, {'opcode':[0x0F, 0x42], 'modrm':None}),
(reg16_mem16, {'opcode':[0x0F, 0x42], 'modrm':None}))
class cmovc(DispatchInstruction):
dispatch = (
(reg64_reg64_rev, {'opcode':[0x0F, 0x42], 'modrm':None}),
(reg64_mem64, {'opcode':[0x0F, 0x42], 'modrm':None}),
(reg32_reg32_rev, {'opcode':[0x0F, 0x42], 'modrm':None}),
(reg32_mem32, {'opcode':[0x0F, 0x42], 'modrm':None}),
(reg16_reg16_rev, {'opcode':[0x0F, 0x42], 'modrm':None}),
(reg16_mem16, {'opcode':[0x0F, 0x42], 'modrm':None}))
class cmovnae(DispatchInstruction):
dispatch = (
(reg64_reg64_rev, {'opcode':[0x0F, 0x42], 'modrm':None}),
(reg64_mem64, {'opcode':[0x0F, 0x42], 'modrm':None}),
(reg32_reg32_rev, {'opcode':[0x0F, 0x42], 'modrm':None}),
(reg32_mem32, {'opcode':[0x0F, 0x42], 'modrm':None}),
(reg16_reg16_rev, {'opcode':[0x0F, 0x42], 'modrm':None}),
(reg16_mem16, {'opcode':[0x0F, 0x42], 'modrm':None}))
class cmovnb(DispatchInstruction):
dispatch = (
(reg64_reg64_rev, {'opcode':[0x0F, 0x43], 'modrm':None}),
(reg64_mem64, {'opcode':[0x0F, 0x43], 'modrm':None}),
(reg32_reg32_rev, {'opcode':[0x0F, 0x43], 'modrm':None}),
(reg32_mem32, {'opcode':[0x0F, 0x43], 'modrm':None}),
(reg16_reg16_rev, {'opcode':[0x0F, 0x43], 'modrm':None}),
(reg16_mem16, {'opcode':[0x0F, 0x43], 'modrm':None}))
class cmovnc(DispatchInstruction):
dispatch = (
(reg64_reg64_rev, {'opcode':[0x0F, 0x43], 'modrm':None}),
(reg64_mem64, {'opcode':[0x0F, 0x43], 'modrm':None}),
(reg32_reg32_rev, {'opcode':[0x0F, 0x43], 'modrm':None}),
(reg32_mem32, {'opcode':[0x0F, 0x43], 'modrm':None}),
(reg16_reg16_rev, {'opcode':[0x0F, 0x43], 'modrm':None}),
(reg16_mem16, {'opcode':[0x0F, 0x43], 'modrm':None}))
class cmovae(DispatchInstruction):
dispatch = (
(reg64_reg64_rev, {'opcode':[0x0F, 0x43], 'modrm':None}),
(reg64_mem64, {'opcode':[0x0F, 0x43], 'modrm':None}),
(reg32_reg32_rev, {'opcode':[0x0F, 0x43], 'modrm':None}),
(reg32_mem32, {'opcode':[0x0F, 0x43], 'modrm':None}),
(reg16_reg16_rev, {'opcode':[0x0F, 0x43], 'modrm':None}),
(reg16_mem16, {'opcode':[0x0F, 0x43], 'modrm':None}))
class cmovz(DispatchInstruction):
dispatch = (
(reg64_reg64_rev, {'opcode':[0x0F, 0x44], 'modrm':None}),
(reg64_mem64, {'opcode':[0x0F, 0x44], 'modrm':None}),
(reg32_reg32_rev, {'opcode':[0x0F, 0x44], 'modrm':None}),
(reg32_mem32, {'opcode':[0x0F, 0x44], 'modrm':None}),
(reg16_reg16_rev, {'opcode':[0x0F, 0x44], 'modrm':None}),
(reg16_mem16, {'opcode':[0x0F, 0x44], 'modrm':None}))
class cmove(DispatchInstruction):
dispatch = (
(reg64_reg64_rev, {'opcode':[0x0F, 0x44], 'modrm':None}),
(reg64_mem64, {'opcode':[0x0F, 0x44], 'modrm':None}),
(reg32_reg32_rev, {'opcode':[0x0F, 0x44], 'modrm':None}),
(reg32_mem32, {'opcode':[0x0F, 0x44], 'modrm':None}),
(reg16_reg16_rev, {'opcode':[0x0F, 0x44], 'modrm':None}),
(reg16_mem16, {'opcode':[0x0F, 0x44], 'modrm':None}))
class cmovnz(DispatchInstruction):
dispatch = (
(reg64_reg64_rev, {'opcode':[0x0F, 0x45], 'modrm':None}),
(reg64_mem64, {'opcode':[0x0F, 0x45], 'modrm':None}),
(reg32_reg32_rev, {'opcode':[0x0F, 0x45], 'modrm':None}),
(reg32_mem32, {'opcode':[0x0F, 0x45], 'modrm':None}),
(reg16_reg16_rev, {'opcode':[0x0F, 0x45], 'modrm':None}),
(reg16_mem16, {'opcode':[0x0F, 0x45], 'modrm':None}))
class cmovne(DispatchInstruction):
dispatch = (
(reg64_reg64_rev, {'opcode':[0x0F, 0x45], 'modrm':None}),
(reg64_mem64, {'opcode':[0x0F, 0x45], 'modrm':None}),
(reg32_reg32_rev, {'opcode':[0x0F, 0x45], 'modrm':None}),
(reg32_mem32, {'opcode':[0x0F, 0x45], 'modrm':None}),
(reg16_reg16_rev, {'opcode':[0x0F, 0x45], 'modrm':None}),
(reg16_mem16, {'opcode':[0x0F, 0x45], 'modrm':None}))
class cmovbe(DispatchInstruction):
dispatch = (
(reg64_reg64_rev, {'opcode':[0x0F, 0x46], 'modrm':None}),
(reg64_mem64, {'opcode':[0x0F, 0x46], 'modrm':None}),
(reg32_reg32_rev, {'opcode':[0x0F, 0x46], 'modrm':None}),
(reg32_mem32, {'opcode':[0x0F, 0x46], 'modrm':None}),
(reg16_reg16_rev, {'opcode':[0x0F, 0x46], 'modrm':None}),
(reg16_mem16, {'opcode':[0x0F, 0x46], 'modrm':None}))
class cmovna(DispatchInstruction):
dispatch = (
(reg64_reg64_rev, {'opcode':[0x0F, 0x46], 'modrm':None}),
(reg64_mem64, {'opcode':[0x0F, 0x46], 'modrm':None}),
(reg32_reg32_rev, {'opcode':[0x0F, 0x46], 'modrm':None}),
(reg32_mem32, {'opcode':[0x0F, 0x46], 'modrm':None}),
(reg16_reg16_rev, {'opcode':[0x0F, 0x46], 'modrm':None}),
(reg16_mem16, {'opcode':[0x0F, 0x46], 'modrm':None}))
class cmovnbe(DispatchInstruction):
dispatch = (
(reg64_reg64_rev, {'opcode':[0x0F, 0x47], 'modrm':None}),
(reg64_mem64, {'opcode':[0x0F, 0x47], 'modrm':None}),
(reg32_reg32_rev, {'opcode':[0x0F, 0x47], 'modrm':None}),
(reg32_mem32, {'opcode':[0x0F, 0x47], 'modrm':None}),
(reg16_reg16_rev, {'opcode':[0x0F, 0x47], 'modrm':None}),
(reg16_mem16, {'opcode':[0x0F, 0x47], 'modrm':None}))
class cmova(DispatchInstruction):
dispatch = (
(reg64_reg64_rev, {'opcode':[0x0F, 0x47], 'modrm':None}),
(reg64_mem64, {'opcode':[0x0F, 0x47], 'modrm':None}),
(reg32_reg32_rev, {'opcode':[0x0F, 0x47], 'modrm':None}),
(reg32_mem32, {'opcode':[0x0F, 0x47], 'modrm':None}),
(reg16_reg16_rev, {'opcode':[0x0F, 0x47], 'modrm':None}),
(reg16_mem16, {'opcode':[0x0F, 0x47], 'modrm':None}))
class cmovs(DispatchInstruction):
dispatch = (
(reg64_reg64_rev, {'opcode':[0x0F, 0x48], 'modrm':None}),
(reg64_mem64, {'opcode':[0x0F, 0x48], 'modrm':None}),
(reg32_reg32_rev, {'opcode':[0x0F, 0x48], 'modrm':None}),
(reg32_mem32, {'opcode':[0x0F, 0x48], 'modrm':None}),
(reg16_reg16_rev, {'opcode':[0x0F, 0x48], 'modrm':None}),
(reg16_mem16, {'opcode':[0x0F, 0x48], 'modrm':None}))
class cmovns(DispatchInstruction):
dispatch = (
(reg64_reg64_rev, {'opcode':[0x0F, 0x49], 'modrm':None}),
(reg64_mem64, {'opcode':[0x0F, 0x49], 'modrm':None}),
(reg32_reg32_rev, {'opcode':[0x0F, 0x49], 'modrm':None}),
(reg32_mem32, {'opcode':[0x0F, 0x49], 'modrm':None}),
(reg16_reg16_rev, {'opcode':[0x0F, 0x49], 'modrm':None}),
(reg16_mem16, {'opcode':[0x0F, 0x49], 'modrm':None}))
class cmovp(DispatchInstruction):
dispatch = (
(reg64_reg64_rev, {'opcode':[0x0F, 0x4A], 'modrm':None}),
(reg64_mem64, {'opcode':[0x0F, 0x4A], 'modrm':None}),
(reg32_reg32_rev, {'opcode':[0x0F, 0x4A], 'modrm':None}),
(reg32_mem32, {'opcode':[0x0F, 0x4A], 'modrm':None}),
(reg16_reg16_rev, {'opcode':[0x0F, 0x4A], 'modrm':None}),
(reg16_mem16, {'opcode':[0x0F, 0x4A], 'modrm':None}))
class cmovpe(DispatchInstruction):
dispatch = (
(reg64_reg64_rev, {'opcode':[0x0F, 0x4A], 'modrm':None}),
(reg64_mem64, {'opcode':[0x0F, 0x4A], 'modrm':None}),
(reg32_reg32_rev, {'opcode':[0x0F, 0x4A], 'modrm':None}),
(reg32_mem32, {'opcode':[0x0F, 0x4A], 'modrm':None}),
(reg16_reg16_rev, {'opcode':[0x0F, 0x4A], 'modrm':None}),
(reg16_mem16, {'opcode':[0x0F, 0x4A], 'modrm':None}))
class cmovnp(DispatchInstruction):
dispatch = (
(reg64_reg64_rev, {'opcode':[0x0F, 0x4B], 'modrm':None}),
(reg64_mem64, {'opcode':[0x0F, 0x4B], 'modrm':None}),
(reg32_reg32_rev, {'opcode':[0x0F, 0x4B], 'modrm':None}),
(reg32_mem32, {'opcode':[0x0F, 0x4B], 'modrm':None}),
(reg16_reg16_rev, {'opcode':[0x0F, 0x4B], 'modrm':None}),
(reg16_mem16, {'opcode':[0x0F, 0x4B], 'modrm':None}))
class cmovpo(DispatchInstruction):
dispatch = (
(reg64_reg64_rev, {'opcode':[0x0F, 0x4B], 'modrm':None}),
(reg64_mem64, {'opcode':[0x0F, 0x4B], 'modrm':None}),
(reg32_reg32_rev, {'opcode':[0x0F, 0x4B], 'modrm':None}),
(reg32_mem32, {'opcode':[0x0F, 0x4B], 'modrm':None}),
(reg16_reg16_rev, {'opcode':[0x0F, 0x4B], 'modrm':None}),
(reg16_mem16, {'opcode':[0x0F, 0x4B], 'modrm':None}))
class cmovl(DispatchInstruction):
dispatch = (
(reg64_reg64_rev, {'opcode':[0x0F, 0x4C], 'modrm':None}),
(reg64_mem64, {'opcode':[0x0F, 0x4C], 'modrm':None}),
(reg32_reg32_rev, {'opcode':[0x0F, 0x4C], 'modrm':None}),
(reg32_mem32, {'opcode':[0x0F, 0x4C], 'modrm':None}),
(reg16_reg16_rev, {'opcode':[0x0F, 0x4C], 'modrm':None}),
(reg16_mem16, {'opcode':[0x0F, 0x4C], 'modrm':None}))
class cmovnge(DispatchInstruction):
dispatch = (
(reg64_reg64_rev, {'opcode':[0x0F, 0x4C], 'modrm':None}),
(reg64_mem64, {'opcode':[0x0F, 0x4C], 'modrm':None}),
(reg32_reg32_rev, {'opcode':[0x0F, 0x4C], 'modrm':None}),
(reg32_mem32, {'opcode':[0x0F, 0x4C], 'modrm':None}),
(reg16_reg16_rev, {'opcode':[0x0F, 0x4C], 'modrm':None}),
(reg16_mem16, {'opcode':[0x0F, 0x4C], 'modrm':None}))
class cmovnl(DispatchInstruction):
dispatch = (
(reg64_reg64_rev, {'opcode':[0x0F, 0x4D], 'modrm':None}),
(reg64_mem64, {'opcode':[0x0F, 0x4D], 'modrm':None}),
(reg32_reg32_rev, {'opcode':[0x0F, 0x4D], 'modrm':None}),
(reg32_mem32, {'opcode':[0x0F, 0x4D], 'modrm':None}),
(reg16_reg16_rev, {'opcode':[0x0F, 0x4D], 'modrm':None}),
(reg16_mem16, {'opcode':[0x0F, 0x4D], 'modrm':None}))
class cmovge(DispatchInstruction):
dispatch = (
(reg64_reg64_rev, {'opcode':[0x0F, 0x4D], 'modrm':None}),
(reg64_mem64, {'opcode':[0x0F, 0x4D], 'modrm':None}),
(reg32_reg32_rev, {'opcode':[0x0F, 0x4D], 'modrm':None}),
(reg32_mem32, {'opcode':[0x0F, 0x4D], 'modrm':None}),
(reg16_reg16_rev, {'opcode':[0x0F, 0x4D], 'modrm':None}),
(reg16_mem16, {'opcode':[0x0F, 0x4D], 'modrm':None}))
class cmovle(DispatchInstruction):
dispatch = (
(reg64_reg64_rev, {'opcode':[0x0F, 0x4E], 'modrm':None}),
(reg64_mem64, {'opcode':[0x0F, 0x4E], 'modrm':None}),
(reg32_reg32_rev, {'opcode':[0x0F, 0x4E], 'modrm':None}),
(reg32_mem32, {'opcode':[0x0F, 0x4E], 'modrm':None}),
(reg16_reg16_rev, {'opcode':[0x0F, 0x4E], 'modrm':None}),
(reg16_mem16, {'opcode':[0x0F, 0x4E], 'modrm':None}))
class cmovng(DispatchInstruction):
dispatch = (
(reg64_reg64_rev, {'opcode':[0x0F, 0x4E], 'modrm':None}),
(reg64_mem64, {'opcode':[0x0F, 0x4E], 'modrm':None}),
(reg32_reg32_rev, {'opcode':[0x0F, 0x4E], 'modrm':None}),
(reg32_mem32, {'opcode':[0x0F, 0x4E], 'modrm':None}),
(reg16_reg16_rev, {'opcode':[0x0F, 0x4E], 'modrm':None}),
(reg16_mem16, {'opcode':[0x0F, 0x4E], 'modrm':None}))
class cmovnle(DispatchInstruction):
dispatch = (
(reg64_reg64_rev, {'opcode':[0x0F, 0x4F], 'modrm':None}),
(reg64_mem64, {'opcode':[0x0F, 0x4F], 'modrm':None}),
(reg32_reg32_rev, {'opcode':[0x0F, 0x4F], 'modrm':None}),
(reg32_mem32, {'opcode':[0x0F, 0x4F], 'modrm':None}),
(reg16_reg16_rev, {'opcode':[0x0F, 0x4F], 'modrm':None}),
(reg16_mem16, {'opcode':[0x0F, 0x4F], 'modrm':None}))
class cmovg(DispatchInstruction):
dispatch = (
(reg64_reg64_rev, {'opcode':[0x0F, 0x4F], 'modrm':None}),
(reg64_mem64, {'opcode':[0x0F, 0x4F], 'modrm':None}),
(reg32_reg32_rev, {'opcode':[0x0F, 0x4F], 'modrm':None}),
(reg32_mem32, {'opcode':[0x0F, 0x4F], 'modrm':None}),
(reg16_reg16_rev, {'opcode':[0x0F, 0x4F], 'modrm':None}),
(reg16_mem16, {'opcode':[0x0F, 0x4F], 'modrm':None}))
class cmp(DispatchInstruction):
dispatch = (
(reg64_simm8, {'opcode':[0x83], 'modrm':0x38}),
(mem64_simm8, {'opcode':[0x83], 'modrm':0x38}),
(rax_imm32, {'opcode':[0x3D], 'modrm':None}),
(reg64_imm32, {'opcode':[0x81], 'modrm':0x38}),
(mem64_imm32, {'opcode':[0x81], 'modrm':0x38}),
(reg64_reg64, {'opcode':[0x39], 'modrm':None}),
(mem64_reg64, {'opcode':[0x39], 'modrm':None}),
(reg64_mem64, {'opcode':[0x3B], 'modrm':None}),
(reg32_simm8, {'opcode':[0x83], 'modrm':0x38}),
(mem32_simm8, {'opcode':[0x83], 'modrm':0x38}),
(eax_imm32, {'opcode':[0x3D], 'modrm':None}),
(reg32_imm32, {'opcode':[0x81], 'modrm':0x38}),
(mem32_imm32, {'opcode':[0x81], 'modrm':0x38}),
(reg32_reg32, {'opcode':[0x39], 'modrm':None}),
(mem32_reg32, {'opcode':[0x39], 'modrm':None}),
(reg32_mem32, {'opcode':[0x3B], 'modrm':None}),
(reg16_simm8, {'opcode':[0x83], 'modrm':0x38}),
(mem16_simm8, {'opcode':[0x83], 'modrm':0x38}),
(ax_imm16, {'opcode':[0x66, 0x3D], 'modrm':None}),
(reg16_imm16, {'opcode':[0x81], 'modrm':0x38}),
(mem16_imm16, {'opcode':[0x81], 'modrm':0x38}),
(reg16_reg16, {'opcode':[0x39], 'modrm':None}),
(mem16_reg16, {'opcode':[0x39], 'modrm':None}),
(reg16_mem16, {'opcode':[0x3B], 'modrm':None}),
(al_imm8, {'opcode':[0x3C], 'modrm':None}),
(reg8_imm8, {'opcode':[0x80], 'modrm':0x38}),
(mem8_imm8, {'opcode':[0x80], 'modrm':0x38}),
(reg8_reg8, {'opcode':[0x38], 'modrm':None}),
(mem8_reg8, {'opcode':[0x38], 'modrm':None}),
(reg8_mem8, {'opcode':[0x3A], 'modrm':None}))
class cmpsb(Instruction):
machine_inst = no_op
params = {'opcode':[0xA6], 'modrm':None}
class cmpsd(Instruction):
machine_inst = no_op
params = {'opcode':[0xA7], 'modrm':None}
class cmpsw(Instruction):
machine_inst = no_op
params = {'opcode':[0x66, 0xA7], 'modrm':None}
class cmpsq(Instruction):
machine_inst = no_op
params = {'opcode':[0x48, 0xA7], 'modrm':None}
class cmpxchg(DispatchInstruction):
dispatch = (
(reg64_reg64, {'opcode':[0x0F, 0xB1], 'modrm':None}),
(mem64_reg64, {'opcode':[0x0F, 0xB1], 'modrm':None}),
(reg32_reg32, {'opcode':[0x0F, 0xB1], 'modrm':None}),
(mem32_reg32, {'opcode':[0x0F, 0xB1], 'modrm':None}),
(reg16_reg16, {'opcode':[0x0F, 0xB1], 'modrm':None}),
(mem16_reg16, {'opcode':[0x0F, 0xB1], 'modrm':None}),
(reg8_reg8, {'opcode':[0x0F, 0xB0], 'modrm':None}),
(mem8_reg8, {'opcode':[0x0F, 0xB0], 'modrm':None}))
class cmpxchg8b(Instruction):
machine_inst = mem64_32
params = {'opcode':[0x0F, 0xC7], 'modrm':0x08}
class cmpxchg16b(Instruction):
machine_inst = mem128
params = {'opcode':[0x0F, 0xC7], 'modrm':0x08}
class cpuid(Instruction):
machine_inst = no_op
params = {'opcode':[0x0F, 0xA2], 'modrm':None}
class cqo(Instruction):
machine_inst = no_op
params = {'opcode':[0x48, 0x99], 'modrm':None}
class crc32(DispatchInstruction):
dispatch = (
(reg64_reg64, {'opcode':[0x0F, 0x38, 0xF1], 'modrm':None, 'prefix':[0xF2]}),
(reg64_mem64, {'opcode':[0x0F, 0x38, 0xF1], 'modrm':None, 'prefix':[0xF2]}),
(reg64_reg8, {'opcode':[0x0F, 0x38, 0xF0], 'modrm':None, 'prefix':[0xF2]}),
(reg64_mem8, {'opcode':[0x0F, 0x38, 0xF0], 'modrm':None, 'prefix':[0xF2]}),
(reg32_reg32, {'opcode':[0x0F, 0x38, 0xF1], 'modrm':None, 'prefix':[0xF2]}),
(reg32_mem32, {'opcode':[0x0F, 0x38, 0xF1], 'modrm':None, 'prefix':[0xF2]}),
(reg32_reg16, {'opcode':[0x0F, 0x38, 0xF1], 'modrm':None, 'prefix':[0x66, 0xF2]}),
(reg32_mem16, {'opcode':[0x0F, 0x38, 0xF1], 'modrm':None, 'prefix':[0x66, 0xF2]}),
(reg32_reg8, {'opcode':[0x0F, 0x38, 0xF0], 'modrm':None, 'prefix':[0xF2]}),
(reg32_mem8, {'opcode':[0x0F, 0x38, 0xF0], 'modrm':None, 'prefix':[0xF2]}))
class cwd(Instruction):
machine_inst = no_op
params = {'opcode':[0x66, 0x99], 'modrm':None}
class cwde(Instruction):
machine_inst = no_op
params = {'opcode':[0x98], 'modrm':None}
class dec(DispatchInstruction):
dispatch = (
(reg64, {'opcode':[0xFF], 'modrm':0x08}),
(mem64, {'opcode':[0xFF], 'modrm':0x08}),
(reg32, {'opcode':[0xFF], 'modrm':0x08}),
(mem32, {'opcode':[0xFF], 'modrm':0x08}),
(reg16, {'opcode':[0xFF], 'modrm':0x08}),
(mem16, {'opcode':[0x66, 0xFF], 'modrm':0x08}),
(reg8, {'opcode':[0xFE], 'modrm':0x08}),
(mem8, {'opcode':[0xFE], 'modrm':0x08}))
class div(DispatchInstruction):
dispatch = (
(reg64, {'opcode':[0xF7], 'modrm':0x30}),
(mem64, {'opcode':[0xF7], 'modrm':0x30}),
(reg32, {'opcode':[0xF7], 'modrm':0x30}),
(mem32, {'opcode':[0xF7], 'modrm':0x30}),
(reg16, {'opcode':[0xF7], 'modrm':0x30}),
(mem16, {'opcode':[0x66, 0xF7], 'modrm':0x30}),
(reg8, {'opcode':[0xF6], 'modrm':0x30}),
(mem8, {'opcode':[0xF6], 'modrm':0x30}))
class enter(Instruction):
machine_inst = imm16_imm8
params = {'opcode':[0xC8], 'modrm':None}
class hlt(Instruction):
machine_inst = no_op
params = {'opcode':[0xF4], 'modrm':None}
class idiv(DispatchInstruction):
dispatch = (
(reg32, {'opcode':[0xF7], 'modrm':0x38}),
(mem32, {'opcode':[0xF7], 'modrm':0x38}),
(reg16, {'opcode':[0xF7], 'modrm':0x38}),
(mem16, {'opcode':[0x66, 0xF7], 'modrm':0x38}),
(reg8, {'opcode':[0xF6], 'modrm':0x38}),
(mem8, {'opcode':[0xF6], 'modrm':0x38}))
class imul(DispatchInstruction):
dispatch = (
(reg64, {'opcode':[0xF7], 'modrm':0x28}),
(mem64, {'opcode':[0xF7], 'modrm':0x28}),
(reg64_reg64_rev, {'opcode':[0x0F, 0xAF], 'modrm':None}),
(reg64_mem64, {'opcode':[0x0F, 0xAF], 'modrm':None}),
(reg64_reg64_simm8_rev,{'opcode':[0x6B], 'modrm':None}),
(reg64_mem64_simm8, {'opcode':[0x6B], 'modrm':None}),
(reg64_reg64_imm32, {'opcode':[0x69], 'modrm':None}),
(reg64_mem64_imm32, {'opcode':[0x69], 'modrm':None}),
(reg32, {'opcode':[0xF7], 'modrm':0x28}),
(mem32, {'opcode':[0xF7], 'modrm':0x28}),
(reg32_reg32_rev, {'opcode':[0x0F, 0xAF], 'modrm':None}),
(reg32_mem32, {'opcode':[0x0F, 0xAF], 'modrm':None}),
(reg32_reg32_simm8_rev,{'opcode':[0x6B], 'modrm':None}),
(reg32_mem32_simm8, {'opcode':[0x6B], 'modrm':None}),
(reg32_reg32_imm32, {'opcode':[0x69], 'modrm':None}),
(reg32_mem32_imm32, {'opcode':[0x69], 'modrm':None}),
(reg16, {'opcode':[0xF7], 'modrm':0x28}),
(mem16, {'opcode':[0x66, 0xF7], 'modrm':0x28}),
(reg16_reg16, {'opcode':[0x0F, 0xAF], 'modrm':None}),
(reg16_mem16, {'opcode':[0x0F, 0xAF], 'modrm':None}),
(reg16_reg16_simm8_rev,{'opcode':[0x6B], 'modrm':None}),
(reg16_mem16_simm8, {'opcode':[0x6B], 'modrm':None}),
(reg16_reg16_imm16, {'opcode':[0x69], 'modrm':None}),
(reg16_mem16_imm16, {'opcode':[0x69], 'modrm':None}),
(reg8, {'opcode':[0xF6], 'modrm':0x28}),
(mem8, {'opcode':[0xF6], 'modrm':0x28}))
class in_(DispatchInstruction):
dispatch = (
(eax_dx, {'opcode':[0xED], 'modrm':None}),
(ax_dx, {'opcode':[0x66, 0xED], 'modrm':None}),
(al_dx, {'opcode':[0xEC], 'modrm':None}),
(eax_imm8, {'opcode':[0xE5], 'modrm':None}),
(ax_imm8, {'opcode':[0x66, 0xE5], 'modrm':None}),
(al_imm8, {'opcode':[0xE4], 'modrm':None}))
class inc(DispatchInstruction):
dispatch = (
(reg64, {'opcode':[0xFF], 'modrm':0x00}),
(mem64, {'opcode':[0xFF], 'modrm':0x00}),
(reg32, {'opcode':[0xFF], 'modrm':0x00}),
(mem32, {'opcode':[0xFF], 'modrm':0x00}),
(reg16, {'opcode':[0xFF], 'modrm':0x00}),
(mem16, {'opcode':[0x66, 0xFF], 'modrm':0x00}),
(reg8, {'opcode':[0xFE], 'modrm':0x00}),
(mem8, {'opcode':[0xFE], 'modrm':0x00}))
class insb(Instruction):
machine_inst = no_op
params = {'opcode':[0x6C], 'modrm':None}
class insd(Instruction):
machine_inst = no_op
params = {'opcode':[0x6D], 'modrm':None}
class insw(Instruction):
machine_inst = no_op
params = {'opcode':[0x66, 0x6D], 'modrm':None}
class int_(Instruction):
machine_inst = imm8
params = {'opcode':[0xCD], 'modrm':None}
class int_3(Instruction):
"""NOTE - this is a special form of 'int 3' used for debugging; see the
architecture manuals for more information."""
machine_inst = no_op
params = {'opcode':[0xCC], 'modrm':None}
class invd(Instruction):
machine_inst = no_op
params = {'opcode':[0x0F, 0x08], 'modrm':None}
class invlpg(Instruction):
machine_inst = mem8
params = {'opcode':[0x0F, 0x01], 'modrm':0x38}
class iret(Instruction):
machine_inst = no_op
params = {'opcode':[0xCF], 'modrm':None}
class iretd(Instruction):
machine_inst = no_op
params = {'opcode':[0xCF], 'modrm':None}
class iretq(Instruction):
machine_inst = no_op
params = {'opcode':[0x48, 0xCF], 'modrm':None}
class ja(DispatchInstruction):
dispatch = (
(lbl32_8off, {'opcode':[[0x77], [0x0F, 0x87]], 'modrm':None}),
(rel32_8off, {'opcode':[[0x77], [0x0F, 0x87]], 'modrm':None}))
#(rel8off, {'opcode':[0x77], 'modrm':None}),
#(rel32off, {'opcode':[0x0F, 0x87], 'modrm':None}))
class jae(DispatchInstruction):
dispatch = (
(lbl32_8off, {'opcode':[[0x73], [0x0F, 0x83]], 'modrm':None}),
(rel32_8off, {'opcode':[[0x73], [0x0F, 0x83]], 'modrm':None}))
#(rel8off, {'opcode':[0x73], 'modrm':None}),
#(rel32off, {'opcode':[0x0F, 0x83], 'modrm':None}))
class jb(DispatchInstruction):
dispatch = (
(lbl32_8off, {'opcode':[[0x72], [0x0F, 0x82]], 'modrm':None}),
(rel32_8off, {'opcode':[[0x72], [0x0F, 0x82]], 'modrm':None}))
#(rel8off, {'opcode':[0x72], 'modrm':None}),
#(rel32off, {'opcode':[0x0F, 0x82], 'modrm':None}))
class jbe(DispatchInstruction):
dispatch = (
(lbl32_8off, {'opcode':[[0x76], [0x0F, 0x86]], 'modrm':None}),
(rel32_8off, {'opcode':[[0x76], [0x0F, 0x86]], 'modrm':None}))
#(rel8off, {'opcode':[0x76], 'modrm':None}),
#(rel32off, {'opcode':[0x0F, 0x86], 'modrm':None}))
class jc(DispatchInstruction):
dispatch = (
(lbl32_8off, {'opcode':[[0x72], [0x0F, 0x82]], 'modrm':None}),
(rel32_8off, {'opcode':[[0x72], [0x0F, 0x82]], 'modrm':None}))
#(rel8off, {'opcode':[0x72], 'modrm':None}),
#(rel32off, {'opcode':[0x0F, 0x82], 'modrm':None}))
class je(DispatchInstruction):
dispatch = (
(lbl32_8off, {'opcode':[[0x74], [0x0F, 0x84]], 'modrm':None}),
(rel32_8off, {'opcode':[[0x74], [0x0F, 0x84]], 'modrm':None}))
#(rel8off, {'opcode':[0x74], 'modrm':None}),
#(rel32off, {'opcode':[0x0F, 0x84], 'modrm':None}))
class jecxz(DispatchInstruction):
dispatch = (
(lbl8off, {'opcode':[0x67, 0xE3], 'modrm':None}),
(rel8off, {'opcode':[0x67, 0xE3], 'modrm':None}))
class jg(DispatchInstruction):
dispatch = (
(lbl32_8off, {'opcode':[[0x7F], [0x0F, 0x8F]], 'modrm':None}),
(rel32_8off, {'opcode':[[0x7F], [0x0F, 0x8F]], 'modrm':None}))
#(rel8off, {'opcode':[0x7F], 'modrm':None}),
#(rel32off, {'opcode':[0x0F, 0x8F], 'modrm':None}))
class jge(DispatchInstruction):
dispatch = (
(lbl32_8off, {'opcode':[[0x7D], [0x0F, 0x8D]], 'modrm':None}),
(rel32_8off, {'opcode':[[0x7D], [0x0F, 0x8D]], 'modrm':None}))
#(rel8off, {'opcode':[0x7D], 'modrm':None}),
#(rel32off, {'opcode':[0x0F, 0x8D], 'modrm':None}))
class jl(DispatchInstruction):
dispatch = (
(lbl32_8off, {'opcode':[[0x7C], [0x0F, 0x8C]], 'modrm':None}),
(rel32_8off, {'opcode':[[0x7C], [0x0F, 0x8C]], 'modrm':None}))
#(rel8off, {'opcode':[0x7C], 'modrm':None}),
#(rel32off, {'opcode':[0x0F, 0x8C], 'modrm':None}))
class jle(DispatchInstruction):
dispatch = (
(lbl32_8off, {'opcode':[[0x7E], [0x0F, 0x8E]], 'modrm':None}),
(rel32_8off, {'opcode':[[0x7E], [0x0F, 0x8E]], 'modrm':None}))
#(rel8off, {'opcode':[0x7E], 'modrm':None}),
#(rel32off, {'opcode':[0x0F, 0x8E], 'modrm':None}))
class jmp(DispatchInstruction):
dispatch = (
(lbl32_8off, {'opcode':[[0xEB], [0xE9]], 'modrm':None}),
(rel32_8off, {'opcode':[[0xEB], [0xE9]], 'modrm':None}),
#(rel8off, {'opcode':[0xEB], 'modrm':None}),
#(rel32off, {'opcode':[0xE9], 'modrm':None}),
(reg64, {'opcode':[0xFF], 'modrm':0x20}),
(mem64_32, {'opcode':[0xFF], 'modrm':0x20}),
(reg16, {'opcode':[0xFF], 'modrm':0x20}),
(mem16, {'opcode':[0x66, 0xFF], 'modrm':0x20}))
class jna(DispatchInstruction):
dispatch = (
(lbl32_8off, {'opcode':[[0x76], [0x0F, 0x86]], 'modrm':None}),
(rel32_8off, {'opcode':[[0x76], [0x0F, 0x86]], 'modrm':None}))
#(rel8off, {'opcode':[0x76], 'modrm':None}),
#(rel32off, {'opcode':[0x0F, 0x86], 'modrm':None}))
class jnae(DispatchInstruction):
dispatch = (
(lbl32_8off, {'opcode':[[0x72], [0x0F, 0x82]], 'modrm':None}),
(rel32_8off, {'opcode':[[0x72], [0x0F, 0x82]], 'modrm':None}))
#(rel8off, {'opcode':[0x72], 'modrm':None}),
#(rel32off, {'opcode':[0x0F, 0x82], 'modrm':None}))
class jnb(DispatchInstruction):
dispatch = (
(lbl32_8off, {'opcode':[[0x73], [0x0F, 0x83]], 'modrm':None}),
(rel32_8off, {'opcode':[[0x73], [0x0F, 0x83]], 'modrm':None}))
#(rel8off, {'opcode':[0x73], 'modrm':None}),
#(rel32off, {'opcode':[0x0F, 0x83], 'modrm':None}))
class jnbe(DispatchInstruction):
dispatch = (
(lbl32_8off, {'opcode':[[0x77], [0x0F, 0x87]], 'modrm':None}),
(rel32_8off, {'opcode':[[0x77], [0x0F, 0x87]], 'modrm':None}))
#(rel8off, {'opcode':[0x77], 'modrm':None}),
#(rel32off, {'opcode':[0x0F, 0x87], 'modrm':None}))
class jnc(DispatchInstruction):
dispatch = (
(lbl32_8off, {'opcode':[[0x73], [0x0F, 0x83]], 'modrm':None}),
(rel32_8off, {'opcode':[[0x73], [0x0F, 0x83]], 'modrm':None}))
#(rel8off, {'opcode':[0x73], 'modrm':None}),
#(rel32off, {'opcode':[0x0F, 0x83], 'modrm':None}))
class jne(DispatchInstruction):
dispatch = (
(lbl32_8off, {'opcode':[[0x75], [0x0F, 0x85]], 'modrm':None}),
(rel32_8off, {'opcode':[[0x75], [0x0F, 0x85]], 'modrm':None}))
#(rel8off, {'opcode':[0x75], 'modrm':None}),
#(rel32off, {'opcode':[0x0F, 0x85], 'modrm':None}))
class jng(DispatchInstruction):
dispatch = (
(lbl32_8off, {'opcode':[[0x7E], [0x0F, 0x8E]], 'modrm':None}),
(rel32_8off, {'opcode':[[0x7E], [0x0F, 0x8E]], 'modrm':None}))
#(rel8off, {'opcode':[0x7E], 'modrm':None}),
#(rel32off, {'opcode':[0x0F, 0x8E], 'modrm':None}))
class jnge(DispatchInstruction):
dispatch = (
(lbl32_8off, {'opcode':[[0x7C], [0x0F, 0x8C]], 'modrm':None}),
(rel32_8off, {'opcode':[[0x7C], [0x0F, 0x8C]], 'modrm':None}))
#(rel8off, {'opcode':[0x7C], 'modrm':None}),
#(rel32off, {'opcode':[0x0F, 0x8C], 'modrm':None}))
class jnl(DispatchInstruction):
dispatch = (
(lbl32_8off, {'opcode':[[0x7D], [0x0F, 0x8D]], 'modrm':None}),
(rel32_8off, {'opcode':[[0x7D], [0x0F, 0x8D]], 'modrm':None}))
#(rel8off, {'opcode':[0x7D], 'modrm':None}),
#(rel32off, {'opcode':[0x0F, 0x8D], 'modrm':None}))
class jnle(DispatchInstruction):
dispatch = (
(lbl32_8off, {'opcode':[[0x7F], [0x0F, 0x8F]], 'modrm':None}),
(rel32_8off, {'opcode':[[0x7F], [0x0F, 0x8F]], 'modrm':None}))
#(rel8off, {'opcode':[0x7F], 'modrm':None}),
#(rel32off, {'opcode':[0x0F, 0x8F], 'modrm':None}))
class jno(DispatchInstruction):
dispatch = (
(lbl32_8off, {'opcode':[[0x71], [0x0F, 0x81]], 'modrm':None}),
(rel32_8off, {'opcode':[[0x71], [0x0F, 0x81]], 'modrm':None}))
#(rel8off, {'opcode':[0x71], 'modrm':None}),
#(rel32off, {'opcode':[0x0F, 0x81], 'modrm':None}))
class jnp(DispatchInstruction):
dispatch = (
(lbl32_8off, {'opcode':[[0x7B], [0x0F, 0x8B]], 'modrm':None}),
(rel32_8off, {'opcode':[[0x7B], [0x0F, 0x8B]], 'modrm':None}))
#(rel8off, {'opcode':[0x7B], 'modrm':None}),
#(rel32off, {'opcode':[0x0F, 0x8B], 'modrm':None}))
class jns(DispatchInstruction):
dispatch = (
(lbl32_8off, {'opcode':[[0x79], [0x0F, 0x89]], 'modrm':None}),
(rel32_8off, {'opcode':[[0x79], [0x0F, 0x89]], 'modrm':None}))
#(rel8off, {'opcode':[0x79], 'modrm':None}),
#(rel32off, {'opcode':[0x0F, 0x89], 'modrm':None}))
class jnz(DispatchInstruction):
dispatch = (
(lbl32_8off, {'opcode':[[0x75], [0x0F, 0x85]], 'modrm':None}),
(rel32_8off, {'opcode':[[0x75], [0x0F, 0x85]], 'modrm':None}))
#(rel8off, {'opcode':[0x75], 'modrm':None}),
#(rel32off, {'opcode':[0x0F, 0x85], 'modrm':None}))
class jo(DispatchInstruction):
dispatch = (
(lbl32_8off, {'opcode':[[0x70], [0x0F, 0x80]], 'modrm':None}),
(rel32_8off, {'opcode':[[0x70], [0x0F, 0x80]], 'modrm':None}))
#(rel8off, {'opcode':[0x70], 'modrm':None}),
#(rel32off, {'opcode':[0x0F, 0x80], 'modrm':None}))
class jp(DispatchInstruction):
dispatch = (
(lbl32_8off, {'opcode':[[0x7A], [0x0F, 0x8A]], 'modrm':None}),
(rel32_8off, {'opcode':[[0x7A], [0x0F, 0x8A]], 'modrm':None}))
#(rel8off, {'opcode':[0x7A], 'modrm':None}),
#(rel32off, {'opcode':[0x0F, 0x8A], 'modrm':None}))
class jpe(DispatchInstruction):
dispatch = (
(lbl32_8off, {'opcode':[[0x7A], [0x0F, 0x8A]], 'modrm':None}),
(rel32_8off, {'opcode':[[0x7A], [0x0F, 0x8A]], 'modrm':None}))
#(rel8off, {'opcode':[0x7A], 'modrm':None}),
#(rel32off, {'opcode':[0x0F, 0x8A], 'modrm':None}))
class jpo(DispatchInstruction):
dispatch = (
(lbl32_8off, {'opcode':[[0x7B], [0x0F, 0x8B]], 'modrm':None}),
(rel32_8off, {'opcode':[[0x7B], [0x0F, 0x8B]], 'modrm':None}))
#(rel8off, {'opcode':[0x7B], 'modrm':None}),
#(rel32off, {'opcode':[0x0F, 0x8B], 'modrm':None}))
class jrcxz(DispatchInstruction):
dispatch = (
(lbl8off, {'opcode':[0xE3], 'modrm':None}),
(rel8off, {'opcode':[0xE3], 'modrm':None}))
class js(DispatchInstruction):
dispatch = (
(lbl32_8off, {'opcode':[[0x78], [0x0F, 0x88]], 'modrm':None}),
(rel32_8off, {'opcode':[[0x78], [0x0F, 0x88]], 'modrm':None}))
#(rel8off, {'opcode':[0x78], 'modrm':None}),
#(rel32off, {'opcode':[0x0F, 0x88], 'modrm':None}))
class jz(DispatchInstruction):
dispatch = (
(lbl32_8off, {'opcode':[[0x74], [0x0F, 0x84]], 'modrm':None}),
(rel32_8off, {'opcode':[[0x74], [0x0F, 0x84]], 'modrm':None}))
#(rel8off, {'opcode':[0x74], 'modrm':None}),
#(rel32off, {'opcode':[0x0F, 0x84], 'modrm':None}))
class lahf(Instruction):
machine_inst = no_op
params = {'opcode':[0x9F], 'modrm':None}
class lea(DispatchInstruction):
dispatch = (
(reg64_mem, {'opcode':[0x8D], 'modrm':0x00}),
(reg32_mem, {'opcode':[0x8D], 'modrm':0x00}),
(reg16_mem, {'opcode':[0x8D], 'modrm':0x00}))
class leave(Instruction):
machine_inst = no_op
params = {'opcode':[0xC9], 'modrm':None}
class lfence(Instruction):
machine_inst = no_op
params = {'opcode':[0x0F, 0xAE, 0xE8], 'modrm':None}
class lodsb(Instruction):
machine_inst = no_op
params = {'opcode':[0xAC], 'modrm':None}
class lodsd(Instruction):
machine_inst = no_op
params = {'opcode':[0xAD], 'modrm':None}
class lodsq(Instruction):
machine_inst = no_op
params = {'opcode':[0x48, 0xAD], 'modrm':None}
class lodsw(Instruction):
machine_inst = no_op
params = {'opcode':[0x66, 0xAD], 'modrm':None}
class loop(DispatchInstruction):
dispatch = (
(lbl8off, {'opcode':[0xE2], 'modrm':None}),
(rel8off, {'opcode':[0xE2], 'modrm':None}))
class loope(DispatchInstruction):
dispatch = (
(lbl8off, {'opcode':[0xE1], 'modrm':None}),
(rel8off, {'opcode':[0xE1], 'modrm':None}))
class loopne(DispatchInstruction):
dispatch = (
(lbl8off, {'opcode':[0xE0], 'modrm':None}),
(rel8off, {'opcode':[0xE0], 'modrm':None}))
class loopnz(DispatchInstruction):
dispatch = (
(lbl8off, {'opcode':[0xE0], 'modrm':None}),
(rel8off, {'opcode':[0xE0], 'modrm':None}))
class loopz(DispatchInstruction):
dispatch = (
(lbl8off, {'opcode':[0xE1], 'modrm':None}),
(rel8off, {'opcode':[0xE1], 'modrm':None}))
class lzcnt(DispatchInstruction):
dispatch = (
(reg64_reg64, {'opcode':[0x0F, 0xBD], 'modrm':None, 'prefix':[0xF3]}),
(reg64_mem64, {'opcode':[0x0F, 0xBD], 'modrm':None, 'prefix':[0xF3]}),
(reg32_reg32, {'opcode':[0x0F, 0xBD], 'modrm':None, 'prefix':[0xF3]}),
(reg32_mem32, {'opcode':[0x0F, 0xBD], 'modrm':None, 'prefix':[0xF3]}),
(reg16_reg16, {'opcode':[0x0F, 0xBD], 'modrm':None, 'prefix':[0xF3]}),
(reg16_mem16, {'opcode':[0x0F, 0xBD], 'modrm':None, 'prefix':[0xF3]}))
class mfence(Instruction):
machine_inst = no_op
params = {'opcode':[0x0F, 0xAE, 0xF0], 'modrm':None}
class mov(DispatchInstruction):
dispatch = (
# TODO - implement moffset* operands!
(reg64_imm32, {'opcode':[0xC7], 'modrm':0x00}),
(mem64_imm32, {'opcode':[0xC7], 'modrm':0x00}),
(reg64_imm64, {'opcode':[0xB8], 'modrm':None}),
(reg64_reg64, {'opcode':[0x89], 'modrm':None}),
(mem64_reg64, {'opcode':[0x89], 'modrm':None}),
(reg64_mem64, {'opcode':[0x8B], 'modrm':None}),
(reg32_imm32, {'opcode':[0xB8], 'modrm':None}),
(mem32_imm32, {'opcode':[0xC7], 'modrm':0x00}),
(reg32_reg32, {'opcode':[0x89], 'modrm':None}),
(mem32_reg32, {'opcode':[0x89], 'modrm':None}),
(reg32_mem32, {'opcode':[0x8B], 'modrm':None}),
(reg16_imm16, {'opcode':[0xB8], 'modrm':None}),
(mem16_imm16, {'opcode':[0xC7], 'modrm':0x00}),
(reg16_reg16, {'opcode':[0x89], 'modrm':None}),
(mem16_reg16, {'opcode':[0x89], 'modrm':None}),
(reg16_mem16, {'opcode':[0x8B], 'modrm':None}),
(reg8_imm8, {'opcode':[0xB0], 'modrm':None}),
(mem8_imm8, {'opcode':[0xC6], 'modrm':0x00}),
(reg8_reg8, {'opcode':[0x88], 'modrm':None}),
(mem8_reg8, {'opcode':[0x88], 'modrm':None}),
(reg8_mem8, {'opcode':[0x8A], 'modrm':None}))
class movnti(DispatchInstruction):
dispatch = (
(mem64_reg64, {'opcode':[0x0F, 0xC3], 'modrm':None}),
(mem32_reg32, {'opcode':[0x0F, 0xC3], 'modrm':None}))
# SSE2!
class movsb(Instruction):
machine_inst = no_op
params = {'opcode':[0xA4], 'modrm':None}
class movsd(Instruction):
machine_inst = no_op
params = {'opcode':[0xA5], 'modrm':None}
class movsq(Instruction):
machine_inst = no_op
params = {'opcode':[0x48, 0xA5], 'modrm':None}
class movsw(Instruction):
machine_inst = no_op
params = {'opcode':[0x66, 0xA5], 'modrm':None}
class movsx(DispatchInstruction):
dispatch = (
(reg64_reg8, {'opcode':[0x0F, 0xBE], 'modrm':None, 'prefix':[]}),
(reg64_mem8, {'opcode':[0x0F, 0xBE], 'modrm':None, 'prefix':[]}),
(reg64_reg16, {'opcode':[0x0F, 0xBF], 'modrm':None}),
(reg64_mem16, {'opcode':[0x0F, 0xBF], 'modrm':None}),
(reg32_reg8, {'opcode':[0x0F, 0xBE], 'modrm':None, 'prefix':[]}),
(reg32_mem8, {'opcode':[0x0F, 0xBE], 'modrm':None, 'prefix':[]}),
(reg32_reg16, {'opcode':[0x0F, 0xBF], 'modrm':None, 'prefix':[]}),
(reg32_mem16, {'opcode':[0x0F, 0xBF], 'modrm':None, 'prefix':[]}),
(reg16_reg8, {'opcode':[0x0F, 0xBE], 'modrm':None}),
(reg16_mem8, {'opcode':[0x0F, 0xBE], 'modrm':None}))
class movsxd(DispatchInstruction):
dispatch = (
(reg64_reg32, {'opcode':[0x63], 'modrm':None}),
(reg64_mem32, {'opcode':[0x63], 'modrm':None}))
class movzx(DispatchInstruction):
dispatch = (
(reg64_reg8, {'opcode':[0x0F, 0xB6], 'modrm':None, 'prefix':[]}),
(reg64_mem8, {'opcode':[0x0F, 0xB6], 'modrm':None, 'prefix':[]}),
(reg64_reg16, {'opcode':[0x0F, 0xB7], 'modrm':None}),
(reg64_mem16, {'opcode':[0x0F, 0xB7], 'modrm':None}),
(reg32_reg8, {'opcode':[0x0F, 0xB6], 'modrm':None, 'prefix':[]}),
(reg32_mem8, {'opcode':[0x0F, 0xB6], 'modrm':None, 'prefix':[]}),
(reg32_reg16, {'opcode':[0x0F, 0xB7], 'modrm':None, 'prefix':[]}),
(reg32_mem16, {'opcode':[0x0F, 0xB7], 'modrm':None, 'prefix':[]}),
(reg16_reg8, {'opcode':[0x0F, 0xB6], 'modrm':None}),
(reg16_mem8, {'opcode':[0x0F, 0xB6], 'modrm':None}))
class mul(DispatchInstruction):
dispatch = (
(reg64, {'opcode':[0xF7], 'modrm':0x20}),
(mem64, {'opcode':[0xF7], 'modrm':0x20}),
(reg32, {'opcode':[0xF7], 'modrm':0x20}),
(mem32, {'opcode':[0xF7], 'modrm':0x20}),
(reg16, {'opcode':[0xF7], 'modrm':0x20}),
(mem16, {'opcode':[0x66, 0xF7], 'modrm':0x20}),
(reg8, {'opcode':[0xF6], 'modrm':0x20}),
(mem8, {'opcode':[0xF6], 'modrm':0x20}))
class neg(DispatchInstruction):
dispatch = (
(reg64, {'opcode':[0xF7], 'modrm':0x18}),
(mem64, {'opcode':[0xF7], 'modrm':0x18}),
(reg32, {'opcode':[0xF7], 'modrm':0x18}),
(mem32, {'opcode':[0xF7], 'modrm':0x18}),
(reg16, {'opcode':[0xF7], 'modrm':0x18}),
(mem16, {'opcode':[0x66, 0xF7], 'modrm':0x18}),
(reg8, {'opcode':[0xF6], 'modrm':0x18}),
(mem8, {'opcode':[0xF6], 'modrm':0x18}))
# TODO - REX prefix isn't needed for the reg64/mem64 versions.. what to do?
# Could add an extra 'rex' param indicating whether REX is needed..
class nop(Instruction):
machine_inst = no_op
params = {'opcode':[0x90], 'modrm':None}
class not_(DispatchInstruction):
dispatch = (
(reg64, {'opcode':[0xF7], 'modrm':0x10}),
(mem64, {'opcode':[0xF7], 'modrm':0x10}),
(reg32, {'opcode':[0xF7], 'modrm':0x10}),
(mem32, {'opcode':[0xF7], 'modrm':0x10}),
(reg16, {'opcode':[0xF7], 'modrm':0x10}),
(mem16, {'opcode':[0x66, 0xF7], 'modrm':0x10}),
(reg8, {'opcode':[0xF6], 'modrm':0x10}),
(mem8, {'opcode':[0xF6], 'modrm':0x10}))
class or_(DispatchInstruction):
dispatch = (
(reg64_simm8, {'opcode':[0x83], 'modrm':0x08}),
(mem64_simm8, {'opcode':[0x83], 'modrm':0x08}),
(rax_imm32, {'opcode':[0x0D], 'modrm':None}),
(reg64_imm32, {'opcode':[0x81], 'modrm':0x08}),
(mem64_imm32, {'opcode':[0x81], 'modrm':0x08}),
(mem64_reg64, {'opcode':[0x09], 'modrm':None}),
(reg64_reg64, {'opcode':[0x09], 'modrm':None}),
(reg64_mem64, {'opcode':[0x0B], 'modrm':None}),
(reg32_simm8, {'opcode':[0x83], 'modrm':0x08}),
(mem32_simm8, {'opcode':[0x83], 'modrm':0x08}),
(eax_imm32, {'opcode':[0x0D], 'modrm':None}),
(reg32_imm32, {'opcode':[0x81], 'modrm':0x08}),
(mem32_imm32, {'opcode':[0x81], 'modrm':0x08}),
(mem32_reg32, {'opcode':[0x09], 'modrm':None}),
(reg32_reg32, {'opcode':[0x09], 'modrm':None}),
(reg32_mem32, {'opcode':[0x0B], 'modrm':None}),
(reg16_simm8, {'opcode':[0x83], 'modrm':0x08}),
(mem16_simm8, {'opcode':[0x83], 'modrm':0x08}),
(ax_imm16, {'opcode':[0x66, 0x0D], 'modrm':None}),
(reg16_imm16, {'opcode':[0x81], 'modrm':0x08}),
(mem16_imm16, {'opcode':[0x81], 'modrm':0x08}),
(mem16_reg16, {'opcode':[0x09], 'modrm':None}),
(reg16_reg16, {'opcode':[0x09], 'modrm':None}),
(reg16_mem16, {'opcode':[0x0B], 'modrm':None}),
(al_imm8, {'opcode':[0x0C], 'modrm':None}),
(reg8_imm8, {'opcode':[0x80], 'modrm':0x08}),
(mem8_imm8, {'opcode':[0x80], 'modrm':0x08}),
(reg8_reg8, {'opcode':[0x08], 'modrm':None}),
(mem8_reg8, {'opcode':[0x08], 'modrm':None}),
(reg8_mem8, {'opcode':[0x0A], 'modrm':None}))
class out(DispatchInstruction):
dispatch = (
(dx_eax, {'opcode':[0xEF], 'modrm':None}),
(dx_ax, {'opcode':[0x66, 0xEF], 'modrm':None}),
(dx_al, {'opcode':[0xEE], 'modrm':None}),
(imm8_eax, {'opcode':[0xE7], 'modrm':None}),
(imm8_ax, {'opcode':[0x66, 0xE7], 'modrm':None}),
(imm8_al, {'opcode':[0xE6], 'modrm':None}))
class outsb(Instruction):
machine_inst = no_op
params = {'opcode':[0x6E], 'modrm':None}
class outsd(Instruction):
machine_inst = no_op
params = {'opcode':[0x6F], 'modrm':None}
class outsw(Instruction):
machine_inst = no_op
params = {'opcode':[0x66, 0x6F], 'modrm':None}
class pause(Instruction):
machine_inst = no_op
params = {'opcode':[0xF3, 0x90], 'modrm':None}
class pop(DispatchInstruction):
dispatch = (
(reg64, {'opcode':[0x58], 'modrm':None}),
(mem64, {'opcode':[0x8F], 'modrm':0x00}),
(reg16, {'opcode':[0x58], 'modrm':None}),
(mem16, {'opcode':[0x66, 0x8F], 'modrm':0x00}))
class popcnt(DispatchInstruction):
dispatch = (
(reg64_reg64, {'opcode':[0x0F, 0xB8], 'modrm':None, 'prefix':[0xF3]}),
(reg64_mem64, {'opcode':[0x0F, 0xB8], 'modrm':None, 'prefix':[0xF3]}),
(reg32_reg32, {'opcode':[0x0F, 0xB8], 'modrm':None, 'prefix':[0xF3]}),
(reg32_mem32, {'opcode':[0x0F, 0xB8], 'modrm':None, 'prefix':[0xF3]}),
(reg16_reg16, {'opcode':[0x0F, 0xB8], 'modrm':None, 'prefix':[0xF3]}),
(reg16_mem16, {'opcode':[0x0F, 0xB8], 'modrm':None, 'prefix':[0xF3]}))
class popf(Instruction):
machine_inst = no_op
params = {'opcode':[0x66, 0x9D], 'modrm':None}
class popfq(Instruction):
machine_inst = no_op
params = {'opcode':[0x9D], 'modrm':None}
class prefetch(Instruction):
machine_inst = mem8
params = {'opcode':[0x0F, 0x0D], 'modrm':0x00}
class prefetchnta(Instruction):
machine_inst = mem8
params = {'opcode':[0x0F, 0x18], 'modrm':0x00}
class prefetcht0(Instruction):
machine_inst = mem8
params = {'opcode':[0x0F, 0x18], 'modrm':0x08}
class prefetcht1(Instruction):
machine_inst = mem8
params = {'opcode':[0x0F, 0x18], 'modrm':0x10}
class prefetcht2(Instruction):
machine_inst = mem8
params = {'opcode':[0x0F, 0x18], 'modrm':0x18}
class prefetchw(Instruction):
machine_inst = mem8
params = {'opcode':[0x0F, 0x0D], 'modrm':0x08}
class push(DispatchInstruction):
dispatch = (
(reg64, {'opcode':[0x50], 'modrm':None}),
(mem64, {'opcode':[0xFF], 'modrm':0x30}),
# TODO - add keyword arg to override operand size?
#(imm8, {'opcode':[0x6A], 'modrm':None}),
#(imm16, {'opcode':[0x66, 0x68], 'modrm':None}),
(imm32, {'opcode':[0x68], 'modrm':None}),
(reg16, {'opcode':[0x50], 'modrm':None}),
(mem16, {'opcode':[0x66, 0xFF], 'modrm':0x30}))
class pushf(Instruction):
machine_inst = no_op
params = {'opcode':[0x66, 0x9C], 'modrm':None}
class pushfq(Instruction):
machine_inst = no_op
params = {'opcode':[0x9C], 'modrm':None}
class rcl(DispatchInstruction):
dispatch = (
(reg64_1, {'opcode':[0xD1], 'modrm':0x10}),
(mem64_1, {'opcode':[0xD1], 'modrm':0x10}),
(reg64_cl, {'opcode':[0xD3], 'modrm':0x10}),
(mem64_cl, {'opcode':[0xD3], 'modrm':0x10}),
(reg64_simm8, {'opcode':[0xC1], 'modrm':0x10}),
(mem64_simm8, {'opcode':[0xC1], 'modrm':0x10}),
(reg32_1, {'opcode':[0xD1], 'modrm':0x10}),
(mem32_1, {'opcode':[0xD1], 'modrm':0x10}),
(reg32_cl, {'opcode':[0xD3], 'modrm':0x10}),
(mem32_cl, {'opcode':[0xD3], 'modrm':0x10}),
(reg32_simm8, {'opcode':[0xC1], 'modrm':0x10}),
(mem32_simm8, {'opcode':[0xC1], 'modrm':0x10}),
(reg16_1, {'opcode':[0xD1], 'modrm':0x10}),
(mem16_1, {'opcode':[0xD1], 'modrm':0x10}),
(reg16_cl, {'opcode':[0xD3], 'modrm':0x10}),
(mem16_cl, {'opcode':[0xD3], 'modrm':0x10}),
(reg16_simm8, {'opcode':[0xC1], 'modrm':0x10}),
(mem16_simm8, {'opcode':[0xC1], 'modrm':0x10}),
(reg8_1, {'opcode':[0xD0], 'modrm':0x10}),
(mem8_1, {'opcode':[0xD0], 'modrm':0x10}),
(reg8_cl, {'opcode':[0xD2], 'modrm':0x10}),
(mem8_cl, {'opcode':[0xD2], 'modrm':0x10}),
(reg8_imm8, {'opcode':[0xC0], 'modrm':0x10}),
(mem8_imm8, {'opcode':[0xC0], 'modrm':0x10}))
class rcr(DispatchInstruction):
dispatch = (
(reg64_1, {'opcode':[0xD1], 'modrm':0x18}),
(mem64_1, {'opcode':[0xD1], 'modrm':0x18}),
(reg64_cl, {'opcode':[0xD3], 'modrm':0x18}),
(mem64_cl, {'opcode':[0xD3], 'modrm':0x18}),
(reg64_simm8, {'opcode':[0xC1], 'modrm':0x18}),
(mem64_simm8, {'opcode':[0xC1], 'modrm':0x18}),
(reg32_1, {'opcode':[0xD1], 'modrm':0x18}),
(mem32_1, {'opcode':[0xD1], 'modrm':0x18}),
(reg32_cl, {'opcode':[0xD3], 'modrm':0x18}),
(mem32_cl, {'opcode':[0xD3], 'modrm':0x18}),
(reg32_simm8, {'opcode':[0xC1], 'modrm':0x18}),
(mem32_simm8, {'opcode':[0xC1], 'modrm':0x18}),
(reg16_1, {'opcode':[0xD1], 'modrm':0x18}),
(mem16_1, {'opcode':[0xD1], 'modrm':0x18}),
(reg16_cl, {'opcode':[0xD3], 'modrm':0x18}),
(mem16_cl, {'opcode':[0xD3], 'modrm':0x18}),
(reg16_simm8, {'opcode':[0xC1], 'modrm':0x18}),
(mem16_simm8, {'opcode':[0xC1], 'modrm':0x18}),
(reg8_1, {'opcode':[0xD0], 'modrm':0x18}),
(mem8_1, {'opcode':[0xD0], 'modrm':0x18}),
(reg8_cl, {'opcode':[0xD2], 'modrm':0x18}),
(mem8_cl, {'opcode':[0xD2], 'modrm':0x18}),
(reg8_imm8, {'opcode':[0xC0], 'modrm':0x18}),
(mem8_imm8, {'opcode':[0xC0], 'modrm':0x18}))
class rdtsc(Instruction):
machine_inst = no_op
params = {'opcode':[0x0F, 0x31], 'modrm':None}
class rdtscp(Instruction):
machine_inst = no_op
params = {'opcode':[0x0F, 0x01, 0xF9], 'modrm':None}
class ret(DispatchInstruction):
dispatch = (
(no_op, {'opcode':[0xC3], 'modrm':None}),
(imm16, {'opcode':[0xC2], 'modrm':None}))
class rol(DispatchInstruction):
dispatch = (
(reg64_1, {'opcode':[0xD1], 'modrm':0x00}),
(mem64_1, {'opcode':[0xD1], 'modrm':0x00}),
(reg64_cl, {'opcode':[0xD3], 'modrm':0x00}),
(mem64_cl, {'opcode':[0xD3], 'modrm':0x00}),
(reg64_imm8, {'opcode':[0xC1], 'modrm':0x00}),
(mem64_imm8, {'opcode':[0xC1], 'modrm':0x00}),
(reg32_1, {'opcode':[0xD1], 'modrm':0x00}),
(mem32_1, {'opcode':[0xD1], 'modrm':0x00}),
(reg32_cl, {'opcode':[0xD3], 'modrm':0x00}),
(mem32_cl, {'opcode':[0xD3], 'modrm':0x00}),
(reg32_imm8, {'opcode':[0xC1], 'modrm':0x00}),
(mem32_imm8, {'opcode':[0xC1], 'modrm':0x00}),
(reg16_1, {'opcode':[0xD1], 'modrm':0x00}),
(mem16_1, {'opcode':[0xD1], 'modrm':0x00}),
(reg16_cl, {'opcode':[0xD3], 'modrm':0x00}),
(mem16_cl, {'opcode':[0xD3], 'modrm':0x00}),
(reg16_imm8, {'opcode':[0xC1], 'modrm':0x00}),
(mem16_imm8, {'opcode':[0xC1], 'modrm':0x00}),
(reg8_1, {'opcode':[0xD0], 'modrm':0x00}),
(mem8_1, {'opcode':[0xD0], 'modrm':0x00}),
(reg8_cl, {'opcode':[0xD2], 'modrm':0x00}),
(mem8_cl, {'opcode':[0xD2], 'modrm':0x00}),
(reg8_imm8, {'opcode':[0xC0], 'modrm':0x00}),
(mem8_imm8, {'opcode':[0xC0], 'modrm':0x00}))
class ror(DispatchInstruction):
dispatch = (
(reg64_1, {'opcode':[0xD1], 'modrm':0x08}),
(mem64_1, {'opcode':[0xD1], 'modrm':0x08}),
(reg64_cl, {'opcode':[0xD3], 'modrm':0x08}),
(mem64_cl, {'opcode':[0xD3], 'modrm':0x08}),
(reg64_imm8, {'opcode':[0xC1], 'modrm':0x08}),
(mem64_imm8, {'opcode':[0xC1], 'modrm':0x08}),
(reg32_1, {'opcode':[0xD1], 'modrm':0x08}),
(mem32_1, {'opcode':[0xD1], 'modrm':0x08}),
(reg32_cl, {'opcode':[0xD3], 'modrm':0x08}),
(mem32_cl, {'opcode':[0xD3], 'modrm':0x08}),
(reg32_imm8, {'opcode':[0xC1], 'modrm':0x08}),
(mem32_imm8, {'opcode':[0xC1], 'modrm':0x08}),
(reg16_1, {'opcode':[0xD1], 'modrm':0x08}),
(mem16_1, {'opcode':[0xD1], 'modrm':0x08}),
(reg16_cl, {'opcode':[0xD3], 'modrm':0x08}),
(mem16_cl, {'opcode':[0xD3], 'modrm':0x08}),
(reg16_imm8, {'opcode':[0xC1], 'modrm':0x08}),
(mem16_imm8, {'opcode':[0xC1], 'modrm':0x08}),
(reg8_1, {'opcode':[0xD0], 'modrm':0x08}),
(mem8_1, {'opcode':[0xD0], 'modrm':0x08}),
(reg8_cl, {'opcode':[0xD2], 'modrm':0x08}),
(mem8_cl, {'opcode':[0xD2], 'modrm':0x08}),
(reg8_imm8, {'opcode':[0xC0], 'modrm':0x08}),
(mem8_imm8, {'opcode':[0xC0], 'modrm':0x08}))
class sahf(Instruction):
machine_inst = no_op
params = {'opcode':[0x9E], 'modrm':None}
class sal(DispatchInstruction):
dispatch = (
(reg64_1, {'opcode':[0xD1], 'modrm':0x20}),
(mem64_1, {'opcode':[0xD1], 'modrm':0x20}),
(reg64_cl, {'opcode':[0xD3], 'modrm':0x20}),
(mem64_cl, {'opcode':[0xD3], 'modrm':0x20}),
(reg64_simm8, {'opcode':[0xC1], 'modrm':0x20}),
(mem64_simm8, {'opcode':[0xC1], 'modrm':0x20}),
(reg32_1, {'opcode':[0xD1], 'modrm':0x20}),
(mem32_1, {'opcode':[0xD1], 'modrm':0x20}),
(reg32_cl, {'opcode':[0xD3], 'modrm':0x20}),
(mem32_cl, {'opcode':[0xD3], 'modrm':0x20}),
(reg32_simm8, {'opcode':[0xC1], 'modrm':0x20}),
(mem32_simm8, {'opcode':[0xC1], 'modrm':0x20}),
(reg16_1, {'opcode':[0xD1], 'modrm':0x20}),
(mem16_1, {'opcode':[0xD1], 'modrm':0x20}),
(reg16_cl, {'opcode':[0xD3], 'modrm':0x20}),
(mem16_cl, {'opcode':[0xD3], 'modrm':0x20}),
(reg16_simm8, {'opcode':[0xC1], 'modrm':0x20}),
(mem16_simm8, {'opcode':[0xC1], 'modrm':0x20}),
(reg8_1, {'opcode':[0xD0], 'modrm':0x20}),
(mem8_1, {'opcode':[0xD0], 'modrm':0x20}),
(reg8_cl, {'opcode':[0xD2], 'modrm':0x20}),
(mem8_cl, {'opcode':[0xD2], 'modrm':0x20}),
(reg8_imm8, {'opcode':[0xC0], 'modrm':0x20}),
(mem8_imm8, {'opcode':[0xC0], 'modrm':0x20}))
class sar(DispatchInstruction):
dispatch = (
(reg64_1, {'opcode':[0xD1], 'modrm':0x38}),
(mem64_1, {'opcode':[0xD1], 'modrm':0x38}),
(reg64_cl, {'opcode':[0xD3], 'modrm':0x38}),
(mem64_cl, {'opcode':[0xD3], 'modrm':0x38}),
(reg64_simm8, {'opcode':[0xC1], 'modrm':0x38}),
(mem64_simm8, {'opcode':[0xC1], 'modrm':0x38}),
(reg32_1, {'opcode':[0xD1], 'modrm':0x38}),
(mem32_1, {'opcode':[0xD1], 'modrm':0x38}),
(reg32_cl, {'opcode':[0xD3], 'modrm':0x38}),
(mem32_cl, {'opcode':[0xD3], 'modrm':0x38}),
(reg32_simm8, {'opcode':[0xC1], 'modrm':0x38}),
(mem32_simm8, {'opcode':[0xC1], 'modrm':0x38}),
(reg16_1, {'opcode':[0xD1], 'modrm':0x38}),
(mem16_1, {'opcode':[0xD1], 'modrm':0x38}),
(reg16_cl, {'opcode':[0xD3], 'modrm':0x38}),
(mem16_cl, {'opcode':[0xD3], 'modrm':0x38}),
(reg16_simm8, {'opcode':[0xC1], 'modrm':0x38}),
(mem16_simm8, {'opcode':[0xC1], 'modrm':0x38}),
(reg8_1, {'opcode':[0xD0], 'modrm':0x38}),
(mem8_1, {'opcode':[0xD0], 'modrm':0x38}),
(reg8_cl, {'opcode':[0xD2], 'modrm':0x38}),
(mem8_cl, {'opcode':[0xD2], 'modrm':0x38}),
(reg8_imm8, {'opcode':[0xC0], 'modrm':0x38}),
(mem8_imm8, {'opcode':[0xC0], 'modrm':0x38}))
class sbb(DispatchInstruction):
dispatch = (
(reg64_simm8, {'opcode':[0x83], 'modrm':0x18}),
(mem64_simm8, {'opcode':[0x83], 'modrm':0x18}),
(rax_imm32, {'opcode':[0x1D], 'modrm':None}),
(reg64_imm32, {'opcode':[0x81], 'modrm':0x18}),
(mem64_imm32, {'opcode':[0x81], 'modrm':0x18}),
(reg64_reg64, {'opcode':[0x19], 'modrm':None}),
(mem64_reg64, {'opcode':[0x19], 'modrm':None}),
(reg64_mem64, {'opcode':[0x1B], 'modrm':None}),
(reg32_simm8, {'opcode':[0x83], 'modrm':0x18}),
(mem32_simm8, {'opcode':[0x83], 'modrm':0x18}),
(eax_imm32, {'opcode':[0x1D], 'modrm':None}),
(reg32_imm32, {'opcode':[0x81], 'modrm':0x18}),
(mem32_imm32, {'opcode':[0x81], 'modrm':0x18}),
(reg32_reg32, {'opcode':[0x19], 'modrm':None}),
(mem32_reg32, {'opcode':[0x19], 'modrm':None}),
(reg32_mem32, {'opcode':[0x1B], 'modrm':None}),
(reg16_simm8, {'opcode':[0x83], 'modrm':0x18}),
(mem16_simm8, {'opcode':[0x83], 'modrm':0x18}),
(ax_imm16, {'opcode':[0x66, 0x1D], 'modrm':None}),
(reg16_imm16, {'opcode':[0x81], 'modrm':0x18}),
(mem16_imm16, {'opcode':[0x81], 'modrm':0x18}),
(reg16_reg16, {'opcode':[0x19], 'modrm':None}),
(mem16_reg16, {'opcode':[0x19], 'modrm':None}),
(reg16_mem16, {'opcode':[0x1B], 'modrm':None}),
(al_imm8, {'opcode':[0x1C], 'modrm':None}),
(reg8_imm8, {'opcode':[0x80], 'modrm':0x18}),
(mem8_imm8, {'opcode':[0x80], 'modrm':0x18}),
(reg8_reg8, {'opcode':[0x18], 'modrm':None}),
(mem8_reg8, {'opcode':[0x18], 'modrm':None}),
(reg8_mem8, {'opcode':[0x1A], 'modrm':None}))
class scasb(Instruction):
machine_inst = no_op
params = {'opcode':[0xAE], 'modrm':None}
class scasd(Instruction):
machine_inst = no_op
params = {'opcode':[0xAF], 'modrm':None}
class scasq(Instruction):
machine_inst = no_op
params = {'opcode':[0x48, 0xAF], 'modrm':None}
class scasw(Instruction):
machine_inst = no_op
params = {'opcode':[0x66, 0xAF], 'modrm':None}
class seta(DispatchInstruction):
dispatch = (
(reg8, {'opcode':[0x0F, 0x97], 'modrm':0x00}),
(mem8, {'opcode':[0x0F, 0x97], 'modrm':0x00}))
class setae(DispatchInstruction):
dispatch = (
(reg8, {'opcode':[0x0F, 0x93], 'modrm':0x00}),
(mem8, {'opcode':[0x0F, 0x93], 'modrm':0x00}))
class setb(DispatchInstruction):
dispatch = (
(reg8, {'opcode':[0x0F, 0x92], 'modrm':0x00}),
(mem8, {'opcode':[0x0F, 0x92], 'modrm':0x00}))
class setbe(DispatchInstruction):
dispatch = (
(reg8, {'opcode':[0x0F, 0x96], 'modrm':0x00}),
(mem8, {'opcode':[0x0F, 0x96], 'modrm':0x00}))
class setc(DispatchInstruction):
dispatch = (
(reg8, {'opcode':[0x0F, 0x92], 'modrm':0x00}),
(mem8, {'opcode':[0x0F, 0x92], 'modrm':0x00}))
class sete(DispatchInstruction):
dispatch = (
(reg8, {'opcode':[0x0F, 0x94], 'modrm':0x00}),
(mem8, {'opcode':[0x0F, 0x94], 'modrm':0x00}))
class setg(DispatchInstruction):
dispatch = (
(reg8, {'opcode':[0x0F, 0x9F], 'modrm':0x00}),
(mem8, {'opcode':[0x0F, 0x9F], 'modrm':0x00}))
class setge(DispatchInstruction):
dispatch = (
(reg8, {'opcode':[0x0F, 0x9D], 'modrm':0x00}),
(mem8, {'opcode':[0x0F, 0x9D], 'modrm':0x00}))
class setl(DispatchInstruction):
dispatch = (
(reg8, {'opcode':[0x0F, 0x9C], 'modrm':0x00}),
(mem8, {'opcode':[0x0F, 0x9C], 'modrm':0x00}))
class setle(DispatchInstruction):
dispatch = (
(reg8, {'opcode':[0x0F, 0x9E], 'modrm':0x00}),
(mem8, {'opcode':[0x0F, 0x9E], 'modrm':0x00}))
class setna(DispatchInstruction):
dispatch = (
(reg8, {'opcode':[0x0F, 0x96], 'modrm':0x00}),
(mem8, {'opcode':[0x0F, 0x96], 'modrm':0x00}))
class setnae(DispatchInstruction):
dispatch = (
(reg8, {'opcode':[0x0F, 0x92], 'modrm':0x00}),
(mem8, {'opcode':[0x0F, 0x92], 'modrm':0x00}))
class setnb(DispatchInstruction):
dispatch = (
(reg8, {'opcode':[0x0F, 0x93], 'modrm':0x00}),
(mem8, {'opcode':[0x0F, 0x93], 'modrm':0x00}))
class setnbe(DispatchInstruction):
dispatch = (
(reg8, {'opcode':[0x0F, 0x97], 'modrm':0x00}),
(mem8, {'opcode':[0x0F, 0x97], 'modrm':0x00}))
class setnc(DispatchInstruction):
dispatch = (
(reg8, {'opcode':[0x0F, 0x93], 'modrm':0x00}),
(mem8, {'opcode':[0x0F, 0x93], 'modrm':0x00}))
class setne(DispatchInstruction):
dispatch = (
(reg8, {'opcode':[0x0F, 0x95], 'modrm':0x00}),
(mem8, {'opcode':[0x0F, 0x95], 'modrm':0x00}))
class setng(DispatchInstruction):
dispatch = (
(reg8, {'opcode':[0x0F, 0x9E], 'modrm':0x00}),
(mem8, {'opcode':[0x0F, 0x9E], 'modrm':0x00}))
class setnge(DispatchInstruction):
dispatch = (
(reg8, {'opcode':[0x0F, 0x9C], 'modrm':0x00}),
(mem8, {'opcode':[0x0F, 0x9C], 'modrm':0x00}))
class setnl(DispatchInstruction):
dispatch = (
(reg8, {'opcode':[0x0F, 0x9D], 'modrm':0x00}),
(mem8, {'opcode':[0x0F, 0x9D], 'modrm':0x00}))
class setnle(DispatchInstruction):
dispatch = (
(reg8, {'opcode':[0x0F, 0x9F], 'modrm':0x00}),
(mem8, {'opcode':[0x0F, 0x9F], 'modrm':0x00}))
class setno(DispatchInstruction):
dispatch = (
(reg8, {'opcode':[0x0F, 0x91], 'modrm':0x00}),
(mem8, {'opcode':[0x0F, 0x91], 'modrm':0x00}))
class setnp(DispatchInstruction):
dispatch = (
(reg8, {'opcode':[0x0F, 0x9B], 'modrm':0x00}),
(mem8, {'opcode':[0x0F, 0x9B], 'modrm':0x00}))
class setns(DispatchInstruction):
dispatch = (
(reg8, {'opcode':[0x0F, 0x99], 'modrm':0x00}),
(mem8, {'opcode':[0x0F, 0x99], 'modrm':0x00}))
class setnz(DispatchInstruction):
dispatch = (
(reg8, {'opcode':[0x0F, 0x95], 'modrm':0x00}),
(mem8, {'opcode':[0x0F, 0x95], 'modrm':0x00}))
class seto(DispatchInstruction):
dispatch = (
(reg8, {'opcode':[0x0F, 0x90], 'modrm':0x00}),
(mem8, {'opcode':[0x0F, 0x90], 'modrm':0x00}))
class setp(DispatchInstruction):
dispatch = (
(reg8, {'opcode':[0x0F, 0x9A], 'modrm':0x00}),
(mem8, {'opcode':[0x0F, 0x9A], 'modrm':0x00}))
class setpe(DispatchInstruction):
dispatch = (
(reg8, {'opcode':[0x0F, 0x9A], 'modrm':0x00}),
(mem8, {'opcode':[0x0F, 0x9A], 'modrm':0x00}))
class setpo(DispatchInstruction):
dispatch = (
(reg8, {'opcode':[0x0F, 0x9B], 'modrm':0x00}),
(mem8, {'opcode':[0x0F, 0x9B], 'modrm':0x00}))
class sets(DispatchInstruction):
dispatch = (
(reg8, {'opcode':[0x0F, 0x98], 'modrm':0x00}),
(mem8, {'opcode':[0x0F, 0x98], 'modrm':0x00}))
class setz(DispatchInstruction):
dispatch = (
(reg8, {'opcode':[0x0F, 0x94], 'modrm':0x00}),
(mem8, {'opcode':[0x0F, 0x94], 'modrm':0x00}))
class sfence(Instruction):
machine_inst = no_op
params = {'opcode':[0x0F, 0xAE, 0xF8], 'modrm':None}
class shl(DispatchInstruction):
dispatch = (
(reg64_1, {'opcode':[0xD1], 'modrm':0x20}),
(mem64_1, {'opcode':[0xD1], 'modrm':0x20}),
(reg64_cl, {'opcode':[0xD3], 'modrm':0x20}),
(mem64_cl, {'opcode':[0xD3], 'modrm':0x20}),
(reg64_imm8, {'opcode':[0xC1], 'modrm':0x20}),
(mem64_imm8, {'opcode':[0xC1], 'modrm':0x20}),
(reg32_1, {'opcode':[0xD1], 'modrm':0x20}),
(mem32_1, {'opcode':[0xD1], 'modrm':0x20}),
(reg32_cl, {'opcode':[0xD3], 'modrm':0x20}),
(mem32_cl, {'opcode':[0xD3], 'modrm':0x20}),
(reg32_imm8, {'opcode':[0xC1], 'modrm':0x20}),
(mem32_imm8, {'opcode':[0xC1], 'modrm':0x20}),
(reg16_1, {'opcode':[0xD1], 'modrm':0x20}),
(mem16_1, {'opcode':[0xD1], 'modrm':0x20}),
(reg16_cl, {'opcode':[0xD3], 'modrm':0x20}),
(mem16_cl, {'opcode':[0xD3], 'modrm':0x20}),
(reg16_imm8, {'opcode':[0xC1], 'modrm':0x20}),
(mem16_imm8, {'opcode':[0xC1], 'modrm':0x20}),
(reg8_1, {'opcode':[0xD0], 'modrm':0x20}),
(mem8_1, {'opcode':[0xD0], 'modrm':0x20}),
(reg8_cl, {'opcode':[0xD2], 'modrm':0x20}),
(mem8_cl, {'opcode':[0xD2], 'modrm':0x20}),
(reg8_imm8, {'opcode':[0xC0], 'modrm':0x20}),
(mem8_imm8, {'opcode':[0xC0], 'modrm':0x20}))
class shld(DispatchInstruction):
dispatch = (
(reg64_reg64_imm8, {'opcode':[0x0F, 0xA4], 'modrm':None}),
(mem64_reg64_imm8, {'opcode':[0x0F, 0xA4], 'modrm':None}),
(reg64_reg64_cl, {'opcode':[0x0F, 0xA5], 'modrm':None}),
(mem64_reg64_cl, {'opcode':[0x0F, 0xA5], 'modrm':None}),
(reg32_reg32_imm8, {'opcode':[0x0F, 0xA4], 'modrm':None}),
(mem32_reg32_imm8, {'opcode':[0x0F, 0xA4], 'modrm':None}),
(reg32_reg32_cl, {'opcode':[0x0F, 0xA5], 'modrm':None}),
(mem32_reg32_cl, {'opcode':[0x0F, 0xA5], 'modrm':None}),
(reg16_reg16_imm8, {'opcode':[0x0F, 0xA4], 'modrm':None}),
(mem16_reg16_imm8, {'opcode':[0x0F, 0xA4], 'modrm':None}),
(reg16_reg16_cl, {'opcode':[0x0F, 0xA5], 'modrm':None}),
(mem16_reg16_cl, {'opcode':[0x0F, 0xA5], 'modrm':None}))
class shr(DispatchInstruction):
dispatch = (
(reg64_1, {'opcode':[0xD1], 'modrm':0x28}),
(mem64_1, {'opcode':[0xD1], 'modrm':0x28}),
(reg64_cl, {'opcode':[0xD3], 'modrm':0x28}),
(mem64_cl, {'opcode':[0xD3], 'modrm':0x28}),
(reg64_imm8, {'opcode':[0xC1], 'modrm':0x28}),
(mem64_imm8, {'opcode':[0xC1], 'modrm':0x28}),
(reg32_1, {'opcode':[0xD1], 'modrm':0x28}),
(mem32_1, {'opcode':[0xD1], 'modrm':0x28}),
(reg32_cl, {'opcode':[0xD3], 'modrm':0x28}),
(mem32_cl, {'opcode':[0xD3], 'modrm':0x28}),
(reg32_imm8, {'opcode':[0xC1], 'modrm':0x28}),
(mem32_imm8, {'opcode':[0xC1], 'modrm':0x28}),
(reg16_1, {'opcode':[0xD1], 'modrm':0x28}),
(mem16_1, {'opcode':[0xD1], 'modrm':0x28}),
(reg16_cl, {'opcode':[0xD3], 'modrm':0x28}),
(mem16_cl, {'opcode':[0xD3], 'modrm':0x28}),
(reg16_imm8, {'opcode':[0xC1], 'modrm':0x28}),
(mem16_imm8, {'opcode':[0xC1], 'modrm':0x28}),
(reg8_1, {'opcode':[0xD0], 'modrm':0x28}),
(mem8_1, {'opcode':[0xD0], 'modrm':0x28}),
(reg8_cl, {'opcode':[0xD2], 'modrm':0x28}),
(mem8_cl, {'opcode':[0xD2], 'modrm':0x28}),
(reg8_imm8, {'opcode':[0xC0], 'modrm':0x28}),
(mem8_imm8, {'opcode':[0xC0], 'modrm':0x28}))
class shrd(DispatchInstruction):
dispatch = (
(reg64_reg64_imm8, {'opcode':[0x0F, 0xAC], 'modrm':None}),
(mem64_reg64_imm8, {'opcode':[0x0F, 0xAC], 'modrm':None}),
(reg64_reg64_cl, {'opcode':[0x0F, 0xAD], 'modrm':None}),
(mem64_reg64_cl, {'opcode':[0x0F, 0xAD], 'modrm':None}),
(reg32_reg32_imm8, {'opcode':[0x0F, 0xAC], 'modrm':None}),
(mem32_reg32_imm8, {'opcode':[0x0F, 0xAC], 'modrm':None}),
(reg32_reg32_cl, {'opcode':[0x0F, 0xAD], 'modrm':None}),
(mem32_reg32_cl, {'opcode':[0x0F, 0xAD], 'modrm':None}),
(reg16_reg16_imm8, {'opcode':[0x0F, 0xAC], 'modrm':None}),
(mem16_reg16_imm8, {'opcode':[0x0F, 0xAC], 'modrm':None}),
(reg16_reg16_cl, {'opcode':[0x0F, 0xAD], 'modrm':None}),
(mem16_reg16_cl, {'opcode':[0x0F, 0xAD], 'modrm':None}))
class stc(Instruction):
machine_inst = no_op
params = {'opcode':[0xF9], 'modrm':None}
class std(Instruction):
machine_inst = no_op
params = {'opcode':[0xFD], 'modrm':None}
class stosb(Instruction):
machine_inst = no_op
params = {'opcode':[0xAA], 'modrm':None}
class stosd(Instruction):
machine_inst = no_op
params = {'opcode':[0xAB], 'modrm':None}
class stosq(Instruction):
machine_inst = no_op
params = {'opcode':[0x48, 0xAB], 'modrm':None}
class stosw(Instruction):
machine_inst = no_op
params = {'opcode':[0x66, 0xAB], 'modrm':None}
class sub(DispatchInstruction):
dispatch = (
(reg64_simm8, {'opcode':[0x83], 'modrm':0x28}),
(mem64_simm8, {'opcode':[0x83], 'modrm':0x28}),
(rax_imm32, {'opcode':[0x2D], 'modrm':None}),
(reg64_imm32, {'opcode':[0x81], 'modrm':0x28}),
(mem64_imm32, {'opcode':[0x81], 'modrm':0x28}),
(reg64_reg64, {'opcode':[0x29], 'modrm':None}),
(mem64_reg64, {'opcode':[0x29], 'modrm':None}),
(reg64_mem64, {'opcode':[0x2B], 'modrm':None}),
(reg32_simm8, {'opcode':[0x83], 'modrm':0x28}),
(mem32_simm8, {'opcode':[0x83], 'modrm':0x28}),
(eax_imm32, {'opcode':[0x2D], 'modrm':None}),
(reg32_imm32, {'opcode':[0x81], 'modrm':0x28}),
(mem32_imm32, {'opcode':[0x81], 'modrm':0x28}),
(reg32_reg32, {'opcode':[0x29], 'modrm':None}),
(mem32_reg32, {'opcode':[0x29], 'modrm':None}),
(reg32_mem32, {'opcode':[0x2B], 'modrm':None}),
(reg16_simm8, {'opcode':[0x83], 'modrm':0x28}),
(mem16_simm8, {'opcode':[0x83], 'modrm':0x28}),
(ax_imm16, {'opcode':[0x66, 0x2D], 'modrm':None}),
(reg16_imm16, {'opcode':[0x81], 'modrm':0x28}),
(mem16_imm16, {'opcode':[0x81], 'modrm':0x28}),
(reg16_reg16, {'opcode':[0x29], 'modrm':None}),
(mem16_reg16, {'opcode':[0x29], 'modrm':None}),
(reg16_mem16, {'opcode':[0x2B], 'modrm':None}),
(al_imm8, {'opcode':[0x2C], 'modrm':None}),
(reg8_imm8, {'opcode':[0x80], 'modrm':0x28}),
(mem8_imm8, {'opcode':[0x80], 'modrm':0x28}),
(reg8_reg8, {'opcode':[0x28], 'modrm':None}),
(mem8_reg8, {'opcode':[0x28], 'modrm':None}),
(reg8_mem8, {'opcode':[0x2A], 'modrm':None}))
class test(DispatchInstruction):
dispatch = (
(rax_imm32, {'opcode':[0xA9], 'modrm':None}),
(reg64_imm32, {'opcode':[0xF7], 'modrm':0x00}),
(mem64_imm32, {'opcode':[0xF7], 'modrm':0x00}),
(reg64_reg64, {'opcode':[0x85], 'modrm':None}),
(mem64_reg64, {'opcode':[0x85], 'modrm':None}),
(eax_imm32, {'opcode':[0xA9], 'modrm':None}),
(reg32_imm32, {'opcode':[0xF7], 'modrm':0x00}),
(mem32_imm32, {'opcode':[0xF7], 'modrm':0x00}),
(reg32_reg32, {'opcode':[0x85], 'modrm':None}),
(mem32_reg32, {'opcode':[0x85], 'modrm':None}),
(ax_imm16, {'opcode':[0x66, 0xA9], 'modrm':None}),
(reg16_imm16, {'opcode':[0xF7], 'modrm':0x00}),
(mem16_imm16, {'opcode':[0xF7], 'modrm':0x00}),
(reg16_reg16, {'opcode':[0x85], 'modrm':None}),
(mem16_reg16, {'opcode':[0x85], 'modrm':None}),
(al_imm8, {'opcode':[0xA8], 'modrm':None}),
(reg8_imm8, {'opcode':[0xF6], 'modrm':0x00}),
(mem8_imm8, {'opcode':[0xF6], 'modrm':0x00}),
(reg8_reg8, {'opcode':[0x84], 'modrm':None}),
(mem8_reg8, {'opcode':[0x84], 'modrm':None}))
class ud2(Instruction):
machine_inst = no_op
params = {'opcode':[0x0F, 0x0B], 'modrm':None}
class xadd(DispatchInstruction):
dispatch = (
(reg64_reg64, {'opcode':[0x0F, 0xC1], 'modrm':None}),
(mem64_reg64, {'opcode':[0x0F, 0xC1], 'modrm':None}),
(reg32_reg32, {'opcode':[0x0F, 0xC1], 'modrm':None}),
(mem32_reg32, {'opcode':[0x0F, 0xC1], 'modrm':None}),
(reg16_reg16, {'opcode':[0x0F, 0xC1], 'modrm':None}),
(mem16_reg16, {'opcode':[0x0F, 0xC1], 'modrm':None}),
(reg8_reg8, {'opcode':[0x0F, 0xC0], 'modrm':None}),
(mem8_reg8, {'opcode':[0x0F, 0xC0], 'modrm':None}))
class xchg(DispatchInstruction):
dispatch = (
(rax_reg64, {'opcode':[0x90], 'modrm':None}),
(reg64_rax, {'opcode':[0x90], 'modrm':None}),
(reg64_reg64, {'opcode':[0x87], 'modrm':None}),
(mem64_reg64, {'opcode':[0x87], 'modrm':None}),
(reg64_mem64, {'opcode':[0x87], 'modrm':None}),
(eax_reg32, {'opcode':[0x90], 'modrm':None}),
(reg32_eax, {'opcode':[0x90], 'modrm':None}),
(reg32_reg32, {'opcode':[0x87], 'modrm':None}),
(mem32_reg32, {'opcode':[0x87], 'modrm':None}),
(reg32_mem32, {'opcode':[0x87], 'modrm':None}),
(reg16_ax, {'opcode':[0x90], 'modrm':None}),
(ax_reg16, {'opcode':[0x90], 'modrm':None}),
(reg16_reg16, {'opcode':[0x87], 'modrm':None}),
(mem16_reg16, {'opcode':[0x87], 'modrm':None}),
(reg16_mem16, {'opcode':[0x87], 'modrm':None}),
(reg8_reg8, {'opcode':[0x86], 'modrm':None}),
(mem8_reg8, {'opcode':[0x86], 'modrm':None}),
(reg8_mem8, {'opcode':[0x86], 'modrm':None}))
class xlatb(Instruction):
machine_inst = no_op
params = {'opcode':[0xD7], 'modrm':None}
class xor(DispatchInstruction):
dispatch = (
(reg64_simm8, {'opcode':[0x83], 'modrm':0x30}),
(mem64_simm8, {'opcode':[0x83], 'modrm':0x30}),
(rax_imm32, {'opcode':[0x35], 'modrm':None}),
(reg64_imm32, {'opcode':[0x81], 'modrm':0x30}),
(mem64_imm32, {'opcode':[0x81], 'modrm':0x30}),
(reg64_reg64, {'opcode':[0x31], 'modrm':None}),
(mem64_reg64, {'opcode':[0x31], 'modrm':None}),
(reg64_mem64, {'opcode':[0x33], 'modrm':None}),
(reg32_simm8, {'opcode':[0x83], 'modrm':0x30}),
(mem32_simm8, {'opcode':[0x83], 'modrm':0x30}),
(eax_imm32, {'opcode':[0x35], 'modrm':None}),
(reg32_imm32, {'opcode':[0x81], 'modrm':0x30}),
(mem32_imm32, {'opcode':[0x81], 'modrm':0x30}),
(reg32_reg32, {'opcode':[0x31], 'modrm':None}),
(mem32_reg32, {'opcode':[0x31], 'modrm':None}),
(reg32_mem32, {'opcode':[0x33], 'modrm':None}),
(reg16_simm8, {'opcode':[0x83], 'modrm':0x30}),
(mem16_simm8, {'opcode':[0x83], 'modrm':0x30}),
(ax_imm16, {'opcode':[0x66, 0x35], 'modrm':None}),
(reg16_imm16, {'opcode':[0x81], 'modrm':0x30}),
(mem16_imm16, {'opcode':[0x81], 'modrm':0x30}),
(reg16_reg16, {'opcode':[0x31], 'modrm':None}),
(mem16_reg16, {'opcode':[0x31], 'modrm':None}),
(reg16_mem16, {'opcode':[0x33], 'modrm':None}),
(al_imm8, {'opcode':[0x34], 'modrm':None}),
(reg8_imm8, {'opcode':[0x80], 'modrm':0x30}),
(mem8_imm8, {'opcode':[0x80], 'modrm':0x30}),
(reg8_reg8, {'opcode':[0x30], 'modrm':None}),
(mem8_reg8, {'opcode':[0x30], 'modrm':None}),
(reg8_mem8, {'opcode':[0x32], 'modrm':None}))
# X87_ISA = (
class f2xm1(Instruction):
machine_inst = no_op
params = {'opcode':[0xD9, 0xF0], 'modrm':None}
class fabs(Instruction):
machine_inst = no_op
params = {'opcode':[0xD9, 0xE1], 'modrm':None}
class fadd(DispatchInstruction):
dispatch = (
(st0_sti, {'opcode':[0xD8, 0xC0], 'modrm':None}),
(sti_st0, {'opcode':[0xDC, 0xC0], 'modrm':None}),
(mem32, {'opcode':[0xD8], 'modrm':0x00}),
(mem64, {'opcode':[0xDC], 'modrm':0x00}))
class faddp(DispatchInstruction):
dispatch = (
(no_op, {'opcode':[0xDE, 0xC1], 'modrm':None}),
(sti_st0, {'opcode':[0xDE, 0xC0], 'modrm':None}))
class fiadd(DispatchInstruction):
dispatch = (
(mem32, {'opcode':[0xDA], 'modrm':0x00}),
(mem16, {'opcode':[0xDE], 'modrm':0x00}))
class fchs(Instruction):
machine_inst = no_op
params = {'opcode':[0xD9, 0xE0], 'modrm':None}
class fcmovb(Instruction):
machine_inst = st0_sti
params = {'opcode':[0xDA, 0xC0], 'modrm':None}
class fcmovbe(Instruction):
machine_inst = st0_sti
params = {'opcode':[0xDA, 0xD0], 'modrm':None}
class fcmove(Instruction):
machine_inst = st0_sti
params = {'opcode':[0xDA, 0xC8], 'modrm':None}
class fcmovnb(Instruction):
machine_inst = st0_sti
params = {'opcode':[0xDB, 0xC0], 'modrm':None}
class fcmovnbe(Instruction):
machine_inst = st0_sti
params = {'opcode':[0xDB, 0xD0], 'modrm':None}
class fcmovne(Instruction):
machine_inst = st0_sti
params = {'opcode':[0xDB, 0xC8], 'modrm':None}
class fcmovnu(Instruction):
machine_inst = st0_sti
params = {'opcode':[0xDB, 0xD8], 'modrm':None}
class fcmovu(Instruction):
machine_inst = st0_sti
params = {'opcode':[0xDA, 0xD8], 'modrm':None}
class fcom(DispatchInstruction):
dispatch = (
(no_op, {'opcode':[0xD8, 0xD1], 'modrm':None}),
(sti, {'opcode':[0xD8, 0xD0], 'modrm':None}),
(mem32, {'opcode':[0xD8], 'modrm':0x10}),
(mem64, {'opcode':[0xDC], 'modrm':0x10}))
class fcomp(DispatchInstruction):
dispatch = (
(no_op, {'opcode':[0xD8, 0xD9], 'modrm':None}),
(sti, {'opcode':[0xD8, 0xD8], 'modrm':None}),
(mem32, {'opcode':[0xD8], 'modrm':0x18}),
(mem64, {'opcode':[0xDC], 'modrm':0x18}))
class fcompp(Instruction):
machine_inst = no_op
params = {'opcode':[0xDE, 0xD9], 'modrm':None}
class fcomi(Instruction):
machine_inst = st0_sti
params = {'opcode':[0xDB, 0xF0], 'modrm':None}
class fcomip(Instruction):
machine_inst = st0_sti
params = {'opcode':[0xDF, 0xF0], 'modrm':None}
class fcos(Instruction):
machine_inst = no_op
params = {'opcode':[0xD9, 0xFF], 'modrm':None}
class fdecstp(Instruction):
machine_inst = no_op
params = {'opcode':[0xD9, 0xF6], 'modrm':None}
class fdiv(DispatchInstruction):
dispatch = (
(st0_sti, {'opcode':[0xD8, 0xF0], 'modrm':None}),
(sti_st0, {'opcode':[0xDC, 0xF8], 'modrm':None}),
(mem32, {'opcode':[0xD8], 'modrm':0x30}),
(mem64, {'opcode':[0xDC], 'modrm':0x30}))
class fdivp(DispatchInstruction):
dispatch = (
(no_op, {'opcode':[0xDE, 0xF9], 'modrm':None}),
(sti_st0, {'opcode':[0xDE, 0xF8], 'modrm':None}))
class fidiv(DispatchInstruction):
dispatch = (
(mem32, {'opcode':[0xDA], 'modrm':0x30}),
(mem16, {'opcode':[0xDE], 'modrm':0x30}))
class fdivr(DispatchInstruction):
dispatch = (
(st0_sti, {'opcode':[0xD8, 0xF8], 'modrm':None}),
(sti_st0, {'opcode':[0xDC, 0xF0], 'modrm':None}),
(mem32, {'opcode':[0xD8], 'modrm':0x38}),
(mem64, {'opcode':[0xDC], 'modrm':0x38}))
class fdivrp(DispatchInstruction):
dispatch = (
(no_op, {'opcode':[0xDE, 0xF1], 'modrm':None}),
(sti_st0, {'opcode':[0xDE, 0xF0], 'modrm':None}))
class fidivr(DispatchInstruction):
dispatch = (
(mem32, {'opcode':[0xDA], 'modrm':0x38}),
(mem16, {'opcode':[0xDE], 'modrm':0x38}))
class ffree(Instruction):
machine_inst = sti
params = {'opcode':[0xDD, 0xC0], 'modrm':None}
class ficom(DispatchInstruction):
dispatch = (
(mem32, {'opcode':[0xDA], 'modrm':0x10}),
(mem16, {'opcode':[0xDE], 'modrm':0x10}))
class ficomp(DispatchInstruction):
dispatch = (
(mem32, {'opcode':[0xDA], 'modrm':0x18}),
(mem16, {'opcode':[0xDE], 'modrm':0x18}))
class fild(DispatchInstruction):
dispatch = (
(mem64, {'opcode':[0xDF], 'modrm':0x28}),
(mem32, {'opcode':[0xDB], 'modrm':0x00}),
(mem16, {'opcode':[0xDF], 'modrm':0x00}))
class fincstp(Instruction):
machine_inst = no_op
params = {'opcode':[0xD9, 0xF7], 'modrm':None}
class finit(Instruction):
machine_inst = no_op
params = {'opcode':[0x9B, 0xDB, 0xE3], 'modrm':None}
class fninit(Instruction):
machine_inst = no_op
params = {'opcode':[0xDB, 0xE3], 'modrm':None}
class fist(DispatchInstruction):
dispatch = (
(mem32, {'opcode':[0xDB], 'modrm':0x10}),
(mem16, {'opcode':[0xDF], 'modrm':0x10}))
class fistp(DispatchInstruction):
dispatch = (
(mem64, {'opcode':[0xDF], 'modrm':0x38}),
(mem32, {'opcode':[0xDB], 'modrm':0x18}),
(mem16, {'opcode':[0xDF], 'modrm':0x18}))
class fisttp(DispatchInstruction):
dispatch = (
(mem64, {'opcode':[0xDD], 'modrm':0x08}),
(mem32, {'opcode':[0xDB], 'modrm':0x08}),
(mem16, {'opcode':[0xDF], 'modrm':0x08}))
class fld(DispatchInstruction):
dispatch = (
(sti, {'opcode':[0xD9, 0xC0], 'modrm':None}),
(mem80, {'opcode':[0xDB], 'modrm':0x28}),
(mem64, {'opcode':[0xDD], 'modrm':0x00}),
(mem32, {'opcode':[0xD9], 'modrm':0x00}))
class fld1(Instruction):
machine_inst = no_op
params = {'opcode':[0xD9, 0xE8], 'modrm':None}
class fldcw(Instruction):
machine_inst = mem16
params = {'opcode':[0xD9], 'modrm':0x28}
class fldenv(Instruction):
machine_inst = mem228
params = {'opcode':[0xD9], 'modrm':0x20}
class fldl2e(Instruction):
machine_inst = no_op
params = {'opcode':[0xD9, 0xEA], 'modrm':None}
class fldl2t(Instruction):
machine_inst = no_op
params = {'opcode':[0xD9, 0xE9], 'modrm':None}
class fldlg2(Instruction):
machine_inst = no_op
params = {'opcode':[0xD9, 0xEC], 'modrm':None}
class fldln2(Instruction):
machine_inst = no_op
params = {'opcode':[0xD9, 0xED], 'modrm':None}
class fldpi(Instruction):
machine_inst = no_op
params = {'opcode':[0xD9, 0xEB], 'modrm':None}
class fldz(Instruction):
machine_inst = no_op
params = {'opcode':[0xD9, 0xEE], 'modrm':None}
class fmul(DispatchInstruction):
dispatch = (
(st0_sti, {'opcode':[0xD8, 0xC8], 'modrm':None}),
(sti_st0, {'opcode':[0xDC, 0xC8], 'modrm':None}),
(mem32, {'opcode':[0xD8], 'modrm':0x08}),
(mem64, {'opcode':[0xDC], 'modrm':0x08}))
class fmulp(DispatchInstruction):
dispatch = (
(no_op, {'opcode':[0xDE, 0xC9], 'modrm':None}),
(sti_st0, {'opcode':[0xDE, 0xC8], 'modrm':None}))
class fimul(DispatchInstruction):
dispatch = (
(mem32, {'opcode':[0xDA], 'modrm':0x08}),
(mem16, {'opcode':[0xDE], 'modrm':0x08}))
class fnop(Instruction):
machine_inst = no_op
params = {'opcode':[0xD9, 0xD0], 'modrm':None}
class fpatan(Instruction):
machine_inst = no_op
params = {'opcode':[0xD9, 0xF3], 'modrm':None}
class fprem(Instruction):
machine_inst = no_op
params = {'opcode':[0xD9, 0xF8], 'modrm':None}
class fprem1(Instruction):
machine_inst = no_op
params = {'opcode':[0xD9, 0xF5], 'modrm':None}
class fptan(Instruction):
machine_inst = no_op
params = {'opcode':[0xD9, 0xF2], 'modrm':None}
class frndint(Instruction):
machine_inst = no_op
params = {'opcode':[0xD9, 0xFC], 'modrm':None}
class frstor(Instruction):
machine_inst = mem752
params = {'opcode':[0xDD], 'modrm':0x20}
class fsave(Instruction):
machine_inst = mem752
params = {'opcode':[0x9B, 0xDD], 'modrm':0x30}
class fnsave(Instruction):
machine_inst = mem752
params = {'opcode':[0xDD], 'modrm':0x30}
class fscale(Instruction):
machine_inst = no_op
params = {'opcode':[0xD9, 0xFD], 'modrm':None}
class fsin(Instruction):
machine_inst = no_op
params = {'opcode':[0xD9, 0xFE], 'modrm':None}
class fsincos(Instruction):
machine_inst = no_op
params = {'opcode':[0xD9, 0xFB], 'modrm':None}
class fsqrt(Instruction):
machine_inst = no_op
params = {'opcode':[0xD9, 0xFA], 'modrm':None}
class fst(DispatchInstruction):
dispatch = (
(sti, {'opcode':[0xDD, 0xD0], 'modrm':None}),
(mem64, {'opcode':[0xDD], 'modrm':0x10}),
(mem32, {'opcode':[0xD9], 'modrm':0x10}))
class fstp(DispatchInstruction):
dispatch = (
(sti, {'opcode':[0xDD, 0xD8], 'modrm':None}),
(mem80, {'opcode':[0xDB], 'modrm':0x38}),
(mem64, {'opcode':[0xDD], 'modrm':0x18}),
(mem32, {'opcode':[0xD9], 'modrm':0x18}))
class fstcw(Instruction):
machine_inst = mem16
params = {'opcode':[0x9B, 0xD9], 'modrm':0x38}
class fnstcw(Instruction):
machine_inst = mem16
params = {'opcode':[0xD9], 'modrm':0x38}
class fstenv(Instruction):
machine_inst = mem228
params = {'opcode':[0x9B, 0xD9], 'modrm':0x30}
class fnstenv(Instruction):
machine_inst = mem228
params = {'opcode':[0xD9], 'modrm':0x30}
class fstsw(DispatchInstruction):
dispatch = (
(ax, {'opcode':[0x9B, 0xDF, 0xE0], 'modrm':None}),
(mem16, {'opcode':[0x9B, 0xDD], 'modrm':0x38}))
class fnstsw(DispatchInstruction):
dispatch = (
(ax, {'opcode':[0xDF, 0xE0], 'modrm':None}),
(mem16, {'opcode':[0xDD], 'modrm':0x38}))
class fsub(DispatchInstruction):
dispatch = (
(st0_sti, {'opcode':[0xD8, 0xE0], 'modrm':None}),
(sti_st0, {'opcode':[0xDC, 0xE8], 'modrm':None}),
(mem32, {'opcode':[0xD8], 'modrm':0x20}),
(mem64, {'opcode':[0xDC], 'modrm':0x20}))
class fsubp(DispatchInstruction):
dispatch = (
(no_op, {'opcode':[0xDE, 0xE9], 'modrm':None}),
(sti_st0, {'opcode':[0xDE, 0xE8], 'modrm':None}))
class fisub(DispatchInstruction):
dispatch = (
(mem32, {'opcode':[0xDA], 'modrm':0x20}),
(mem16, {'opcode':[0xDE], 'modrm':0x20}))
class fsubr(DispatchInstruction):
dispatch = (
(st0_sti, {'opcode':[0xD8, 0xE8], 'modrm':None}),
(sti_st0, {'opcode':[0xDC, 0xE0], 'modrm':None}),
(mem32, {'opcode':[0xD8], 'modrm':0x28}),
(mem64, {'opcode':[0xDC], 'modrm':0x28}))
class fsubrp(DispatchInstruction):
dispatch = (
(no_op, {'opcode':[0xDE, 0xE1], 'modrm':None}),
(sti_st0, {'opcode':[0xDE, 0xE0], 'modrm':None}))
class fisubr(DispatchInstruction):
dispatch = (
(mem32, {'opcode':[0xDA], 'modrm':0x28}),
(mem16, {'opcode':[0xDE], 'modrm':0x28}))
class ftst(Instruction):
machine_inst = no_op
params = {'opcode':[0xD9, 0xE4], 'modrm':None}
class fucom(DispatchInstruction):
dispatch = (
(no_op, {'opcode':[0xDD, 0xE1], 'modrm':None}),
(sti, {'opcode':[0xDD, 0xE0], 'modrm':None}))
class fucomp(DispatchInstruction):
dispatch = (
(no_op, {'opcode':[0xDD, 0xE9], 'modrm':None}),
(sti, {'opcode':[0xDD, 0xE8], 'modrm':None}))
class fucompp(Instruction):
machine_inst = no_op
params = {'opcode':[0xDA, 0xE9], 'modrm':None}
class fucomi(Instruction):
machine_inst = st0_sti
params = {'opcode':[0xDB, 0xE8], 'modrm':None}
class fucomip(Instruction):
machine_inst = st0_sti
params = {'opcode':[0xDF, 0xE8], 'modrm':None}
class fwait(Instruction):
machine_inst = no_op
params = {'opcode':[0x9B], 'modrm':None}
class fxam(Instruction):
machine_inst = no_op
params = {'opcode':[0xD9, 0xE5], 'modrm':None}
class fxch(DispatchInstruction):
dispatch = (
(no_op, {'opcode':[0xD9, 0xC9], 'modrm':None}),
(sti, {'opcode':[0xD9, 0xC8], 'modrm':None}))
class fxrstor(Instruction):
machine_inst = mem4096
params = {'opcode':[0x0F, 0xAE], 'modrm':0x08}
#sse?
class fxsave(Instruction):
machine_inst = mem4096
params = {'opcode':[0x0F, 0xAE], 'modrm':0x00}
#sse?
class fxtract(Instruction):
machine_inst = no_op
params = {'opcode':[0xD9, 0xF4], 'modrm':None}
class fyl2x(Instruction):
machine_inst = no_op
params = {'opcode':[0xD9, 0xF1], 'modrm':None}
class fyl2xp1(Instruction):
machine_inst = no_op
params = {'opcode':[0xD9, 0xF9], 'modrm':None}
#SSE_ISA = (
class addpd(DispatchInstruction):
dispatch = (
(xmm_xmm, {'opcode':[0x0F, 0x58], 'modrm':None, 'prefix':[0x66]}),
(xmm_mem128, {'opcode':[0x0F, 0x58], 'modrm':None, 'prefix':[0x66]}))
arch_ext = 2
class addps(DispatchInstruction):
dispatch = (
(xmm_xmm, {'opcode':[0x0F, 0x58], 'modrm':None, 'prefix':[]}),
(xmm_mem128, {'opcode':[0x0F, 0x58], 'modrm':None, 'prefix':[]}))
arch_ext = 1
class addsd(DispatchInstruction):
dispatch = (
(xmm_xmm, {'opcode':[0x0F, 0x58], 'modrm':None, 'prefix':[0xF2]}),
(xmm_mem64, {'opcode':[0x0F, 0x58], 'modrm':None, 'prefix':[0xF2]}))
arch_ext = 2
class addss(DispatchInstruction):
dispatch = (
(xmm_xmm, {'opcode':[0x0F, 0x58], 'modrm':None, 'prefix':[0xF3]}),
(xmm_mem32, {'opcode':[0x0F, 0x58], 'modrm':None, 'prefix':[0xF3]}))
arch_ext = 1
class addsubpd(DispatchInstruction):
dispatch = (
(xmm_xmm, {'opcode':[0x0F, 0xD0], 'modrm':None, 'prefix':[0x66]}),
(xmm_mem128, {'opcode':[0x0F, 0xD0], 'modrm':None, 'prefix':[0x66]}))
arch_ext = 3
class addsubps(DispatchInstruction):
dispatch = (
(xmm_xmm, {'opcode':[0x0F, 0xD0], 'modrm':None, 'prefix':[0xF2]}),
(xmm_mem128, {'opcode':[0x0F, 0xD0], 'modrm':None, 'prefix':[0xF2]}))
arch_ext = 3
class andnpd(DispatchInstruction):
dispatch = (
(xmm_xmm, {'opcode':[0x0F, 0x55], 'modrm':None, 'prefix':[0x66]}),
(xmm_mem128, {'opcode':[0x0F, 0x55], 'modrm':None, 'prefix':[0x66]}))
arch_ext = 2
class andnps(DispatchInstruction):
dispatch = (
(xmm_xmm, {'opcode':[0x0F, 0x55], 'modrm':None, 'prefix':[]}),
(xmm_mem128, {'opcode':[0x0F, 0x55], 'modrm':None, 'prefix':[]}))
arch_ext = 1
class andpd(DispatchInstruction):
dispatch = (
(xmm_xmm, {'opcode':[0x0F, 0x54], 'modrm':None, 'prefix':[0x66]}),
(xmm_mem128, {'opcode':[0x0F, 0x54], 'modrm':None, 'prefix':[0x66]}))
arch_ext = 2
class andps(DispatchInstruction):
dispatch = (
(xmm_xmm, {'opcode':[0x0F, 0x54], 'modrm':None, 'prefix':[]}),
(xmm_mem128, {'opcode':[0x0F, 0x54], 'modrm':None, 'prefix':[]}))
arch_ext = 1
class blendpd(DispatchInstruction):
dispatch = (
(xmm_xmm_imm8, {'opcode':[0x0F, 0x3A, 0x0D], 'modrm':None, 'prefix':[0x66]}),
(xmm_mem128_imm8, {'opcode':[0x0F, 0x3A, 0x0D], 'modrm':None, 'prefix':[0x66]}))
arch_ext = 4
class blendps(DispatchInstruction):
dispatch = (
(xmm_xmm_imm8, {'opcode':[0x0F, 0x3A, 0x0C], 'modrm':None, 'prefix':[0x66]}),
(xmm_mem128_imm8, {'opcode':[0x0F, 0x3A, 0x0C], 'modrm':None, 'prefix':[0x66]}))
arch_ext = 4
class blendvpd(DispatchInstruction):
dispatch = (
(xmm_xmm, {'opcode':[0x0F, 0x38, 0x15], 'modrm':None, 'prefix':[0x66]}),
(xmm_mem128, {'opcode':[0x0F, 0x38, 0x15], 'modrm':None, 'prefix':[0x66]}))
arch_ext = 4
class blendvps(DispatchInstruction):
dispatch = (
(xmm_xmm, {'opcode':[0x0F, 0x38, 0x14], 'modrm':None, 'prefix':[0x66]}),
(xmm_mem128, {'opcode':[0x0F, 0x38, 0x14], 'modrm':None, 'prefix':[0x66]}))
arch_ext = 4
class cmpeqpd(DispatchInstruction):
dispatch = (
(xmm_xmm_imm, {'opcode':[0x0F, 0xC2], 'modrm':None, 'imm':0, 'prefix':[0x66]}),
(xmm_mem128_imm, {'opcode':[0x0F, 0xC2], 'modrm':None, 'imm':0, 'prefix':[0x66]}))
arch_ext = 2
class cmpeqps(DispatchInstruction):
dispatch = (
(xmm_xmm_imm, {'opcode':[0x0F, 0xC2], 'modrm':None, 'imm':0, 'prefix':[]}),
(xmm_mem128_imm, {'opcode':[0x0F, 0xC2], 'modrm':None, 'imm':0, 'prefix':[]}))
arch_ext = 1
class cmpeqsd(DispatchInstruction):
dispatch = (
(xmm_xmm_imm, {'opcode':[0x0F, 0xC2], 'modrm':None, 'imm':0, 'prefix':[0xF2]}),
(xmm_mem64_imm, {'opcode':[0x0F, 0xC2], 'modrm':None, 'imm':0, 'prefix':[0xF2]}))
arch_ext = 2
class cmpeqss(DispatchInstruction):
dispatch = (
(xmm_xmm_imm, {'opcode':[0x0F, 0xC2], 'modrm':None, 'imm':0, 'prefix':[0xF3]}),
(xmm_mem32_imm, {'opcode':[0x0F, 0xC2], 'modrm':None, 'imm':0, 'prefix':[0xF3]}))
arch_ext = 1
class cmplepd(DispatchInstruction):
dispatch = (
(xmm_xmm_imm, {'opcode':[0x0F, 0xC2], 'modrm':None, 'imm':2, 'prefix':[0x66]}),
(xmm_mem128_imm, {'opcode':[0x0F, 0xC2], 'modrm':None, 'imm':2, 'prefix':[0x66]}))
arch_ext = 2
class cmpleps(DispatchInstruction):
dispatch = (
(xmm_xmm_imm, {'opcode':[0x0F, 0xC2], 'modrm':None, 'imm':2, 'prefix':[]}),
(xmm_mem128_imm, {'opcode':[0x0F, 0xC2], 'modrm':None, 'imm':2, 'prefix':[]}))
arch_ext = 1
class cmplesd(DispatchInstruction):
dispatch = (
(xmm_xmm_imm, {'opcode':[0x0F, 0xC2], 'modrm':None, 'imm':2, 'prefix':[0xF2]}),
(xmm_mem64_imm, {'opcode':[0x0F, 0xC2], 'modrm':None, 'imm':2, 'prefix':[0xF2]}))
arch_ext = 2
class cmpless(DispatchInstruction):
dispatch = (
(xmm_xmm_imm, {'opcode':[0x0F, 0xC2], 'modrm':None, 'imm':2, 'prefix':[0xF3]}),
(xmm_mem32_imm, {'opcode':[0x0F, 0xC2], 'modrm':None, 'imm':2, 'prefix':[0xF3]}))
arch_ext = 1
class cmpltpd(DispatchInstruction):
dispatch = (
(xmm_xmm_imm, {'opcode':[0x0F, 0xC2], 'modrm':None, 'imm':1, 'prefix':[0x66]}),
(xmm_mem128_imm, {'opcode':[0x0F, 0xC2], 'modrm':None, 'imm':1, 'prefix':[0x66]}))
arch_ext = 2
class cmpltps(DispatchInstruction):
dispatch = (
(xmm_xmm_imm, {'opcode':[0x0F, 0xC2], 'modrm':None, 'imm':1, 'prefix':[]}),
(xmm_mem128_imm, {'opcode':[0x0F, 0xC2], 'modrm':None, 'imm':1, 'prefix':[]}))
arch_ext = 1
class cmpltsd(DispatchInstruction):
dispatch = (
(xmm_xmm_imm, {'opcode':[0x0F, 0xC2], 'modrm':None, 'imm':1, 'prefix':[0xF2]}),
(xmm_mem64_imm, {'opcode':[0x0F, 0xC2], 'modrm':None, 'imm':1, 'prefix':[0xF2]}))
arch_ext = 2
class cmpltss(DispatchInstruction):
dispatch = (
(xmm_xmm_imm, {'opcode':[0x0F, 0xC2], 'modrm':None, 'imm':1, 'prefix':[0xF3]}),
(xmm_mem32_imm, {'opcode':[0x0F, 0xC2], 'modrm':None, 'imm':1, 'prefix':[0xF3]}))
arch_ext = 1
class cmpneqpd(DispatchInstruction):
dispatch = (
(xmm_xmm_imm, {'opcode':[0x0F, 0xC2], 'modrm':None, 'imm':4, 'prefix':[0x66]}),
(xmm_mem128_imm, {'opcode':[0x0F, 0xC2], 'modrm':None, 'imm':4, 'prefix':[0x66]}))
arch_ext = 2
class cmpneqps(DispatchInstruction):
dispatch = (
(xmm_xmm_imm, {'opcode':[0x0F, 0xC2], 'modrm':None, 'imm':4, 'prefix':[]}),
(xmm_mem128_imm, {'opcode':[0x0F, 0xC2], 'modrm':None, 'imm':4, 'prefix':[]}))
arch_ext = 1
class cmpneqsd(DispatchInstruction):
dispatch = (
(xmm_xmm_imm, {'opcode':[0x0F, 0xC2], 'modrm':None, 'imm':4, 'prefix':[0xF2]}),
(xmm_mem64_imm, {'opcode':[0x0F, 0xC2], 'modrm':None, 'imm':4, 'prefix':[0xF2]}))
arch_ext = 2
class cmpneqss(DispatchInstruction):
dispatch = (
(xmm_xmm_imm, {'opcode':[0x0F, 0xC2], 'modrm':None, 'imm':4, 'prefix':[0xF3]}),
(xmm_mem32_imm, {'opcode':[0x0F, 0xC2], 'modrm':None, 'imm':4, 'prefix':[0xF3]}))
arch_ext = 1
class cmpnlepd(DispatchInstruction):
dispatch = (
(xmm_xmm_imm, {'opcode':[0x0F, 0xC2], 'modrm':None, 'imm':6, 'prefix':[0x66]}),
(xmm_mem128_imm, {'opcode':[0x0F, 0xC2], 'modrm':None, 'imm':6, 'prefix':[0x66]}))
arch_ext = 2
class cmpnleps(DispatchInstruction):
dispatch = (
(xmm_xmm_imm, {'opcode':[0x0F, 0xC2], 'modrm':None, 'imm':6, 'prefix':[]}),
(xmm_mem128_imm, {'opcode':[0x0F, 0xC2], 'modrm':None, 'imm':6, 'prefix':[]}))
arch_ext = 1
class cmpnlesd(DispatchInstruction):
dispatch = (
(xmm_xmm_imm, {'opcode':[0x0F, 0xC2], 'modrm':None, 'imm':6, 'prefix':[0xF2]}),
(xmm_mem64_imm, {'opcode':[0x0F, 0xC2], 'modrm':None, 'imm':6, 'prefix':[0xF2]}))
arch_ext = 2
class cmpnless(DispatchInstruction):
dispatch = (
(xmm_xmm_imm, {'opcode':[0x0F, 0xC2], 'modrm':None, 'imm':6, 'prefix':[0xF3]}),
(xmm_mem32_imm, {'opcode':[0x0F, 0xC2], 'modrm':None, 'imm':6, 'prefix':[0xF3]}))
arch_ext = 1
class cmpnltpd(DispatchInstruction):
dispatch = (
(xmm_xmm_imm, {'opcode':[0x0F, 0xC2], 'modrm':None, 'imm':5, 'prefix':[0x66]}),
(xmm_mem128_imm, {'opcode':[0x0F, 0xC2], 'modrm':None, 'imm':5, 'prefix':[0x66]}))
arch_ext = 2
class cmpnltps(DispatchInstruction):
dispatch = (
(xmm_xmm_imm, {'opcode':[0x0F, 0xC2], 'modrm':None, 'imm':5, 'prefix':[]}),
(xmm_mem128_imm, {'opcode':[0x0F, 0xC2], 'modrm':None, 'imm':5, 'prefix':[]}))
arch_ext = 1
class cmpnltsd(DispatchInstruction):
dispatch = (
(xmm_xmm_imm, {'opcode':[0x0F, 0xC2], 'modrm':None, 'imm':5, 'prefix':[0xF2]}),
(xmm_mem64_imm, {'opcode':[0x0F, 0xC2], 'modrm':None, 'imm':5, 'prefix':[0xF2]}))
arch_ext = 2
class cmpnltss(DispatchInstruction):
dispatch = (
(xmm_xmm_imm, {'opcode':[0x0F, 0xC2], 'modrm':None, 'imm':5, 'prefix':[0xF3]}),
(xmm_mem32_imm, {'opcode':[0x0F, 0xC2], 'modrm':None, 'imm':5, 'prefix':[0xF3]}))
arch_ext = 1
class cmpordpd(DispatchInstruction):
dispatch = (
(xmm_xmm_imm, {'opcode':[0x0F, 0xC2], 'modrm':None, 'imm':7, 'prefix':[0x66]}),
(xmm_mem128_imm, {'opcode':[0x0F, 0xC2], 'modrm':None, 'imm':7, 'prefix':[0x66]}))
arch_ext = 2
class cmpordps(DispatchInstruction):
dispatch = (
(xmm_xmm_imm, {'opcode':[0x0F, 0xC2], 'modrm':None, 'imm':7, 'prefix':[]}),
(xmm_mem128_imm, {'opcode':[0x0F, 0xC2], 'modrm':None, 'imm':7, 'prefix':[]}))
arch_ext = 1
class cmpordsd(DispatchInstruction):
dispatch = (
(xmm_xmm_imm, {'opcode':[0x0F, 0xC2], 'modrm':None, 'imm':7, 'prefix':[0xF2]}),
(xmm_mem64_imm, {'opcode':[0x0F, 0xC2], 'modrm':None, 'imm':7, 'prefix':[0xF2]}))
arch_ext = 2
class cmpordss(DispatchInstruction):
dispatch = (
(xmm_xmm_imm, {'opcode':[0x0F, 0xC2], 'modrm':None, 'imm':7, 'prefix':[0xF3]}),
(xmm_mem32_imm, {'opcode':[0x0F, 0xC2], 'modrm':None, 'imm':7, 'prefix':[0xF3]}))
arch_ext = 1
class cmpunordpd(DispatchInstruction):
dispatch = (
(xmm_xmm_imm, {'opcode':[0x0F, 0xC2], 'modrm':None, 'imm':3, 'prefix':[0x66]}),
(xmm_mem128_imm, {'opcode':[0x0F, 0xC2], 'modrm':None, 'imm':3, 'prefix':[0x66]}))
arch_ext = 2
class cmpunordps(DispatchInstruction):
dispatch = (
(xmm_xmm_imm, {'opcode':[0x0F, 0xC2], 'modrm':None, 'imm':3, 'prefix':[]}),
(xmm_mem128_imm, {'opcode':[0x0F, 0xC2], 'modrm':None, 'imm':3, 'prefix':[]}))
arch_ext = 1
class cmpunordsd(DispatchInstruction):
dispatch = (
(xmm_xmm_imm, {'opcode':[0x0F, 0xC2], 'modrm':None, 'imm':3, 'prefix':[0xF2]}),
(xmm_mem64_imm, {'opcode':[0x0F, 0xC2], 'modrm':None, 'imm':3, 'prefix':[0xF2]}))
arch_ext = 2
class cmpunordss(DispatchInstruction):
dispatch = (
(xmm_xmm_imm, {'opcode':[0x0F, 0xC2], 'modrm':None, 'imm':3, 'prefix':[0xF3]}),
(xmm_mem32_imm, {'opcode':[0x0F, 0xC2], 'modrm':None, 'imm':3, 'prefix':[0xF3]}))
arch_ext = 1
class cmppd(DispatchInstruction):
dispatch = (
(xmm_xmm_imm8, {'opcode':[0x0F, 0xC2], 'modrm':None, 'prefix':[0x66]}),
(xmm_mem128_imm8, {'opcode':[0x0F, 0xC2], 'modrm':None, 'prefix':[0x66]}))
arch_ext = 2
class cmpps(DispatchInstruction):
dispatch = (
(xmm_xmm_imm8, {'opcode':[0x0F, 0xC2], 'modrm':None, 'prefix':[]}),
(xmm_mem128_imm8, {'opcode':[0x0F, 0xC2], 'modrm':None, 'prefix':[]}))
arch_ext = 1
class cmpsd(DispatchInstruction):
dispatch = (
(xmm_xmm_imm8, {'opcode':[0x0F, 0xC2], 'modrm':None, 'prefix':[0xF2]}),
(xmm_mem64_imm8, {'opcode':[0x0F, 0xC2], 'modrm':None, 'prefix':[0xF2]}))
arch_ext = 2
class cmpss(DispatchInstruction):
dispatch = (
(xmm_xmm_imm8, {'opcode':[0x0F, 0xC2], 'modrm':None, 'prefix':[0xF3]}),
(xmm_mem32_imm8, {'opcode':[0x0F, 0xC2], 'modrm':None, 'prefix':[0xF3]}))
arch_ext = 1
class comisd(DispatchInstruction):
dispatch = (
(xmm_xmm, {'opcode':[0x0F, 0x2F], 'modrm':None, 'prefix':[0x66]}),
(xmm_mem64, {'opcode':[0x0F, 0x2F], 'modrm':None, 'prefix':[0x66]}))
arch_ext = 2
class comiss(DispatchInstruction):
dispatch = (
(xmm_xmm, {'opcode':[0x0F, 0x2F], 'modrm':None, 'prefix':[]}),
(xmm_mem64, {'opcode':[0x0F, 0x2F], 'modrm':None, 'prefix':[]}))
arch_ext = 1
class cvtdq2pd(DispatchInstruction):
dispatch = (
(xmm_xmm, {'opcode':[0x0F, 0xE6], 'modrm':None, 'prefix':[0xF3]}),
(xmm_mem64, {'opcode':[0x0F, 0xE6], 'modrm':None, 'prefix':[0xF3]}))
arch_ext = 2
class cvtdq2ps(DispatchInstruction):
dispatch = (
(xmm_xmm, {'opcode':[0x0F, 0x5B], 'modrm':None, 'prefix':[]}),
(xmm_mem64, {'opcode':[0x0F, 0x5B], 'modrm':None, 'prefix':[]}))
arch_ext = 2
class cvtpd2dq(DispatchInstruction):
dispatch = (
(xmm_xmm, {'opcode':[0x0F, 0xE6], 'modrm':None, 'prefix':[0xF2]}),
(xmm_mem128, {'opcode':[0x0F, 0xE6], 'modrm':None, 'prefix':[0xF2]}))
arch_ext = 2
class cvtpd2pi(DispatchInstruction):
dispatch = (
(mmx_xmm, {'opcode':[0x0F, 0x2D], 'modrm':None, 'prefix':[0x66]}),
(mmx_mem128, {'opcode':[0x0F, 0x2D], 'modrm':None, 'prefix':[0x66]}))
arch_ext = 2
class cvtpd2ps(DispatchInstruction):
dispatch = (
(xmm_xmm, {'opcode':[0x0F, 0x5A], 'modrm':None, 'prefix':[0x66]}),
(xmm_mem128, {'opcode':[0x0F, 0x5A], 'modrm':None, 'prefix':[0x66]}))
arch_ext = 2
class cvtpi2pd(DispatchInstruction):
dispatch = (
(xmm_mmx, {'opcode':[0x0F, 0x2A], 'modrm':None, 'prefix':[0x66]}),
(xmm_mem64, {'opcode':[0x0F, 0x2A], 'modrm':None, 'prefix':[0x66]}))
arch_ext = 2
class cvtpi2ps(DispatchInstruction):
dispatch = (
(xmm_mmx, {'opcode':[0x0F, 0x2A], 'modrm':None, 'prefix':[]}),
(xmm_mem64, {'opcode':[0x0F, 0x2A], 'modrm':None, 'prefix':[]}))
arch_ext = 2
class cvtps2dq(DispatchInstruction):
dispatch = (
(xmm_xmm, {'opcode':[0x0F, 0x5B], 'modrm':None, 'prefix':[0x66]}),
(xmm_mem128, {'opcode':[0x0F, 0x5B], 'modrm':None, 'prefix':[0x66]}))
arch_ext = 2
class cvtps2pd(DispatchInstruction):
dispatch = (
(xmm_xmm, {'opcode':[0x0F, 0x5A], 'modrm':None, 'prefix':[]}),
(xmm_mem64, {'opcode':[0x0F, 0x5A], 'modrm':None, 'prefix':[]}))
arch_ext = 2
class cvtps2pi(DispatchInstruction):
dispatch = (
(mmx_xmm, {'opcode':[0x0F, 0x2D], 'modrm':None, 'prefix':[]}),
(mmx_mem64, {'opcode':[0x0F, 0x2D], 'modrm':None}))
arch_ext = 2
class cvtsd2si(DispatchInstruction):
dispatch = (
# TODO - reg64 version defined by intel manuals but not AMD
#(reg64_xmm, {'opcode':[0x0F, 0x2D], 'modrm':None, 'prefix':[0xF2]}),
#(reg64_mem64, {'opcode':[0x0F, 0x2D], 'modrm':None, 'prefix':[0xF2]}),
(reg32_xmm, {'opcode':[0x0F, 0x2D], 'modrm':None, 'prefix':[0xF2]}),
(reg32_mem64, {'opcode':[0x0F, 0x2D], 'modrm':None, 'prefix':[0xF2]}))
arch_ext = 2
class cvtsd2ss(DispatchInstruction):
dispatch = (
(xmm_xmm, {'opcode':[0x0F, 0x5A], 'modrm':None, 'prefix':[0xF2]}),
(xmm_mem64, {'opcode':[0x0F, 0x5A], 'modrm':None, 'prefix':[0xF2]}))
arch_ext = 2
class cvtsi2sd(DispatchInstruction):
dispatch = (
(xmm_reg64, {'opcode':[0x0F, 0x2A], 'modrm':None, 'prefix':[0xF2]}),
(xmm_mem64, {'opcode':[0x0F, 0x2A], 'modrm':None, 'prefix':[0xF2]}),
(xmm_reg32, {'opcode':[0x0F, 0x2A], 'modrm':None, 'prefix':[0xF2]}),
(xmm_mem32, {'opcode':[0x0F, 0x2A], 'modrm':None, 'prefix':[0xF2]}))
arch_ext = 2
class cvtsi2sd(DispatchInstruction):
dispatch = (
# TODO - reg64 version defined by intel manuals but not AMD
#(xmm_reg64, {'opcode':[0x0F, 0x2A], 'modrm':None, 'prefix':[0xF2]}),
#(xmm_mem64, {'opcode':[0x0F, 0x2A], 'modrm':None, 'prefix':[0xF2]}),
(xmm_reg32, {'opcode':[0x0F, 0x2A], 'modrm':None, 'prefix':[0xF2]}),
(xmm_mem32, {'opcode':[0x0F, 0x2A], 'modrm':None, 'prefix':[0xF2]}))
arch_ext = 2
class cvtss2sd(DispatchInstruction):
dispatch = (
(xmm_xmm, {'opcode':[0x0F, 0x5A], 'modrm':None, 'prefix':[0xF3]}),
(xmm_mem32, {'opcode':[0x0F, 0x5A], 'modrm':None, 'prefix':[0xF3]}))
arch_ext = 2
class cvtss2si(DispatchInstruction):
dispatch = (
# TODO - reg64 version defined by intel manuals but not AMD
#(reg64_xmm, {'opcode':[0x0F, 0x2D], 'modrm':None, 'prefix':[0xF3]}),
#(reg64_mem32, {'opcode':[0x0F, 0x2D], 'modrm':None, 'prefix':[0xF3]}),
(reg32_xmm, {'opcode':[0x0F, 0x2D], 'modrm':None, 'prefix':[0xF3]}),
(reg32_mem32, {'opcode':[0x0F, 0x2D], 'modrm':None, 'prefix':[0xF3]}))
arch_ext = 1
class cvttpd2dq(DispatchInstruction):
dispatch = (
(xmm_xmm, {'opcode':[0x0F, 0xE6], 'modrm':None, 'prefix':[0x66]}),
(xmm_mem128, {'opcode':[0x0F, 0xE6], 'modrm':None, 'prefix':[0x66]}))
arch_ext = 2
class cvttpd2pi(DispatchInstruction):
dispatch = (
(mmx_xmm, {'opcode':[0x0F, 0x2C], 'modrm':None, 'prefix':[0x66]}),
(mmx_mem128, {'opcode':[0x0F, 0x2C], 'modrm':None, 'prefix':[0x66]}))
arch_ext = 2
class cvttps2dq(DispatchInstruction):
dispatch = (
(xmm_xmm, {'opcode':[0x0F, 0x5B], 'modrm':None, 'prefix':[0xF3]}),
(xmm_mem128, {'opcode':[0x0F, 0x5B], 'modrm':None, 'prefix':[0xF3]}))
arch_ext = 2
class cvttps2pi(DispatchInstruction):
dispatch = (
(mmx_xmm, {'opcode':[0x0F, 0x2C], 'modrm':None, 'prefix':[]}),
(mmx_mem64, {'opcode':[0x0F, 0x2C], 'modrm':None}))
arch_ext = 2
class cvttsd2si(DispatchInstruction):
dispatch = (
# TODO - reg64 version defined by intel manuals but not AMD
#(reg64_xmm, {'opcode':[0x0F, 0x2C], 'modrm':None, 'prefix':[0xF2]}),
#(reg64_mem64, {'opcode':[0x0F, 0x2C], 'modrm':None, 'prefix':[0xF2]}),
(reg32_xmm, {'opcode':[0x0F, 0x2C], 'modrm':None, 'prefix':[0xF2]}),
(reg32_mem64, {'opcode':[0x0F, 0x2C], 'modrm':None, 'prefix':[0xF2]}))
arch_ext = 2
class cvttss2si(DispatchInstruction):
dispatch = (
# TODO - reg64 version defined by intel manuals but not AMD
#(reg64_xmm, {'opcode':[0x0F, 0x2C], 'modrm':None, 'prefix':[0xF3]}),
#(reg64_mem32, {'opcode':[0x0F, 0x2C], 'modrm':None, 'prefix':[0xF3]}),
(reg32_xmm, {'opcode':[0x0F, 0x2C], 'modrm':None, 'prefix':[0xF3]}),
(reg32_mem32, {'opcode':[0x0F, 0x2C], 'modrm':None, 'prefix':[0xF3]}))
arch_ext = 1
class divpd(DispatchInstruction):
dispatch = (
(xmm_xmm, {'opcode':[0x0F, 0x5E], 'modrm':None, 'prefix':[0x66]}),
(xmm_mem128, {'opcode':[0x0F, 0x5E], 'modrm':None, 'prefix':[0x66]}))
arch_ext = 2
class divps(DispatchInstruction):
dispatch = (
(xmm_xmm, {'opcode':[0x0F, 0x5E], 'modrm':None, 'prefix':[]}),
(xmm_mem128, {'opcode':[0x0F, 0x5E], 'modrm':None, 'prefix':[]}))
arch_ext = 1
class divsd(DispatchInstruction):
dispatch = (
(xmm_xmm, {'opcode':[0x0F, 0x5E], 'modrm':None, 'prefix':[0xF2]}),
(xmm_mem64, {'opcode':[0x0F, 0x5E], 'modrm':None, 'prefix':[0xF2]}))
arch_ext = 2
class divss(DispatchInstruction):
dispatch = (
(xmm_xmm, {'opcode':[0x0F, 0x5E], 'modrm':None, 'prefix':[0xF3]}),
(xmm_mem32, {'opcode':[0x0F, 0x5E], 'modrm':None, 'prefix':[0xF3]}))
arch_ext = 1
class dppd(DispatchInstruction):
dispatch = (
(xmm_xmm_imm8, {'opcode':[0x0F, 0x3A, 0x41], 'modrm':None, 'prefix':[0x66]}),
(xmm_mem128_imm8, {'opcode':[0x0F, 0x3A, 0x41], 'modrm':None, 'prefix':[0x66]}))
arch_ext = 4
class dpps(DispatchInstruction):
dispatch = (
(xmm_xmm_imm8, {'opcode':[0x0F, 0x3A, 0x40], 'modrm':None, 'prefix':[0x66]}),
(xmm_mem128_imm8, {'opcode':[0x0F, 0x3A, 0x40], 'modrm':None, 'prefix':[0x66]}))
arch_ext = 4
class emms(Instruction):
machine_inst = no_op
params = {'opcode':[0x0F, 0x77],'modrm':None}
arch_ext = 0
class extractps(DispatchInstruction):
dispatch = (
(reg64_xmm_imm8_rev, {'opcode':[0x0F, 0x3A, 0x17], 'modrm':None, 'prefix':[0x66]}),
(reg32_xmm_imm8_rev, {'opcode':[0x0F, 0x3A, 0x17], 'modrm':None, 'prefix':[0x66]}),
(mem32_xmm_imm8, {'opcode':[0x0F, 0x3A, 0x17], 'modrm':None, 'prefix':[0x66]}))
# TODO - ugh, this make the printer not emit 'dword' for the mem32 case
#arch_ext = 4
class extrq(DispatchInstruction):
dispatch = (
(xmm_imm8_imm8, {'opcode':[0x0F, 0x78], 'modrm':0x00, 'prefix':[0x66]}),
(xmm_xmm, {'opcode':[0x0F, 0x79], 'modrm':None, 'prefix':[0x66]}))
arch_ext = 4
class haddpd(DispatchInstruction):
dispatch = (
(xmm_xmm, {'opcode':[0x0F, 0x7C], 'modrm':None, 'prefix':[0x66]}),
(xmm_mem128, {'opcode':[0x0F, 0x7C], 'modrm':None, 'prefix':[0x66]}))
arch_ext = 3
class haddps(DispatchInstruction):
dispatch = (
(xmm_xmm, {'opcode':[0x0F, 0x7C], 'modrm':None, 'prefix':[0xF2]}),
(xmm_mem128, {'opcode':[0x0F, 0x7C], 'modrm':None, 'prefix':[0xF2]}))
arch_ext = 3
class hsubpd(DispatchInstruction):
dispatch = (
(xmm_xmm, {'opcode':[0x0F, 0x7D], 'modrm':None, 'prefix':[0x66]}),
(xmm_mem128, {'opcode':[0x0F, 0x7D], 'modrm':None, 'prefix':[0x66]}))
arch_ext = 3
class hsubps(DispatchInstruction):
dispatch = (
(xmm_xmm, {'opcode':[0x0F, 0x7D], 'modrm':None, 'prefix':[0xF2]}),
(xmm_mem128, {'opcode':[0x0F, 0x7D], 'modrm':None, 'prefix':[0xF2]}))
arch_ext = 3
class insertps(DispatchInstruction):
dispatch = (
(xmm_xmm_imm8, {'opcode':[0x0F, 0x3A, 0x21], 'modrm':None, 'prefix':[0x66]}),
(xmm_mem32_imm8, {'opcode':[0x0F, 0x3A, 0x21], 'modrm':None, 'prefix':[0x66]}))
arch_ext = 4
class insertq(DispatchInstruction):
dispatch = (
(xmm_xmm_imm8_imm8, {'opcode':[0x0F, 0x78], 'modrm':None, 'prefix':[0xF2]}),
(xmm_xmm, {'opcode':[0x0F, 0x79], 'modrm':None, 'prefix':[0xF2]}))
arch_ext = 4
class lddqu(Instruction):
machine_inst = xmm_mem128
params = {'opcode':[0x0F, 0xF0],'modrm':None, 'prefix':[0xF2]}
arch_ext = 3
class ldmxcsr(Instruction):
machine_inst = mem32
params = {'opcode':[0x0F, 0xAE],'modrm':0x10}
arch_ext = 1
class maskmovdqu(Instruction):
machine_inst = xmm_xmm
params = {'opcode':[0x0F, 0xF7],'modrm':None, 'prefix':[0x66]}
arch_ext = 2
class maskmovq(Instruction):
machine_inst = mmx_mmx
params = {'opcode':[0x0F, 0xF7],'modrm':None}
arch_ext = 1
class maxpd(DispatchInstruction):
dispatch = (
(xmm_xmm, {'opcode':[0x0F, 0x5F], 'modrm':None, 'prefix':[0x66]}),
(xmm_mem128, {'opcode':[0x0F, 0x5F], 'modrm':None, 'prefix':[0x66]}))
arch_ext = 2
class maxps(DispatchInstruction):
dispatch = (
(xmm_xmm, {'opcode':[0x0F, 0x5F], 'modrm':None, 'prefix':[]}),
(xmm_mem128, {'opcode':[0x0F, 0x5F], 'modrm':None, 'prefix':[]}))
arch_ext = 1
class maxsd(DispatchInstruction):
dispatch = (
(xmm_xmm, {'opcode':[0x0F, 0x5F], 'modrm':None, 'prefix':[0xF2]}),
(xmm_mem64, {'opcode':[0x0F, 0x5F], 'modrm':None, 'prefix':[0xF2]}))
arch_ext = 2
class maxss(DispatchInstruction):
dispatch = (
(xmm_xmm, {'opcode':[0x0F, 0x5F], 'modrm':None, 'prefix':[0xF3]}),
(xmm_mem32, {'opcode':[0x0F, 0x5F], 'modrm':None, 'prefix':[0xF3]}))
arch_ext = 1
class minpd(DispatchInstruction):
dispatch = (
(xmm_xmm, {'opcode':[0x0F, 0x5D], 'modrm':None, 'prefix':[0x66]}),
(xmm_mem128, {'opcode':[0x0F, 0x5D], 'modrm':None, 'prefix':[0x66]}))
arch_ext = 2
class minps(DispatchInstruction):
dispatch = (
(xmm_xmm, {'opcode':[0x0F, 0x5D], 'modrm':None, 'prefix':[]}),
(xmm_mem128, {'opcode':[0x0F, 0x5D], 'modrm':None, 'prefix':[]}))
arch_ext = 1
class minsd(DispatchInstruction):
dispatch = (
(xmm_xmm, {'opcode':[0x0F, 0x5D], 'modrm':None, 'prefix':[0xF2]}),
(xmm_mem64, {'opcode':[0x0F, 0x5D], 'modrm':None, 'prefix':[0xF2]}))
arch_ext = 2
class minss(DispatchInstruction):
dispatch = (
(xmm_xmm, {'opcode':[0x0F, 0x5D], 'modrm':None, 'prefix':[0xF3]}),
(xmm_mem32, {'opcode':[0x0F, 0x5D], 'modrm':None, 'prefix':[0xF3]}))
arch_ext = 1
class movapd(DispatchInstruction):
dispatch = (
(xmm_xmm, {'opcode':[0x0F, 0x28], 'modrm':None, 'prefix':[0x66]}),
(xmm_mem128, {'opcode':[0x0F, 0x28], 'modrm':None, 'prefix':[0x66]}),
(mem128_xmm, {'opcode':[0x0F, 0x29], 'modrm':None, 'prefix':[0x66]}))
arch_ext = 2
class movaps(DispatchInstruction):
dispatch = (
(xmm_xmm, {'opcode':[0x0F, 0x28], 'modrm':None, 'prefix':[]}),
(xmm_mem128, {'opcode':[0x0F, 0x28], 'modrm':None, 'prefix':[]}),
(mem128_xmm, {'opcode':[0x0F, 0x29], 'modrm':None, 'prefix':[]}))
arch_ext = 1
class movd(DispatchInstruction):
dispatch = (
# TODO - these are valid according to AMD64, but not according to Intel 64
#(xmm_reg64, {'opcode':[0x0F, 0x6E], 'modrm':None, 'prefix':[0x66]}),
#(xmm_mem64, {'opcode':[0x0F, 0x6E], 'modrm':None, 'prefix':[0x66]}),
#(mem64_xmm, {'opcode':[0x0F, 0x7E], 'modrm':None, 'prefix':[0x66]}),
#(reg64_xmm_rev, {'opcode':[0x0F, 0x7E], 'modrm':None, 'prefix':[0x66]}),
#(mmx_reg64, {'opcode':[0x0F, 0x6E], 'modrm':None}),
#(mmx_mem64, {'opcode':[0x0F, 0x6E], 'modrm':None}),
#(mem64_mmx, {'opcode':[0x0F, 0x7E], 'modrm':None}),
#(reg64_mmx_rev, {'opcode':[0x0F, 0x7E], 'modrm':None}),
(xmm_reg32, {'opcode':[0x0F, 0x6E], 'modrm':None, 'prefix':[0x66]}),
(xmm_mem32, {'opcode':[0x0F, 0x6E], 'modrm':None, 'prefix':[0x66]}),
(mem32_xmm, {'opcode':[0x0F, 0x7E], 'modrm':None, 'prefix':[0x66]}),
(reg32_xmm_rev, {'opcode':[0x0F, 0x7E], 'modrm':None, 'prefix':[0x66]}),
(mmx_reg32, {'opcode':[0x0F, 0x6E], 'modrm':None}),
(mmx_mem32, {'opcode':[0x0F, 0x6E], 'modrm':None}),
(mem32_mmx, {'opcode':[0x0F, 0x7E], 'modrm':None}),
(reg32_mmx_rev, {'opcode':[0x0F, 0x7E], 'modrm':None}))
arch_ext = 2 # TODO - err, some are 2, some are 0
class movddup(DispatchInstruction):
dispatch = (
(xmm_xmm, {'opcode':[0x0F, 0x12], 'modrm':None, 'prefix':[0xF2]}),
(xmm_mem64, {'opcode':[0x0F, 0x12], 'modrm':None, 'prefix':[0xF2]}))
arch_ext = 3
class movdq2q(Instruction):
machine_inst = mmx_xmm
params = {'opcode':[0x0F, 0xD6],'modrm':None, 'prefix':[0xF2]}
arch_ext = 2
class movdqa(DispatchInstruction):
dispatch = (
(xmm_xmm, {'opcode':[0x0F, 0x6F], 'modrm':None, 'prefix':[0x66]}),
(xmm_mem128, {'opcode':[0x0F, 0x6F], 'modrm':None, 'prefix':[0x66]}),
(mem128_xmm, {'opcode':[0x0F, 0x7F], 'modrm':None, 'prefix':[0x66]}))
arch_ext = 2
class movdqu(DispatchInstruction):
dispatch = (
(xmm_xmm, {'opcode':[0x0F, 0x6F], 'modrm':None, 'prefix':[0xF3]}),
(xmm_mem128, {'opcode':[0x0F, 0x6F], 'modrm':None, 'prefix':[0xF3]}),
(mem128_xmm, {'opcode':[0x0F, 0x7F], 'modrm':None, 'prefix':[0xF3]}))
arch_ext = 2
class movhlps(Instruction):
machine_inst = xmm_xmm
params = {'opcode':[0x0F, 0x12],'modrm':None, 'prefix':[]}
arch_ext = 1
class movhpd(DispatchInstruction):
dispatch = (
(xmm_mem64, {'opcode':[0x0F, 0x16], 'modrm':None, 'prefix':[0x66]}),
(mem64_xmm, {'opcode':[0x0F, 0x17], 'modrm':None, 'prefix':[0x66]}))
arch_ext = 2
class movhps(DispatchInstruction):
dispatch = (
(xmm_mem64, {'opcode':[0x0F, 0x16], 'modrm':None, 'prefix':[]}),
(mem64_xmm, {'opcode':[0x0F, 0x17], 'modrm':None, 'prefix':[]}))
arch_ext = 1
class movlhps(Instruction):
machine_inst = xmm_xmm
params = {'opcode':[0x0F, 0x16], 'modrm':None, 'prefix':[]}
arch_ext = 1
class movlpd(DispatchInstruction):
dispatch = (
(xmm_mem64, {'opcode':[0x0F, 0x12], 'modrm':None, 'prefix':[0x66]}),
(mem64_xmm, {'opcode':[0x0F, 0x13], 'modrm':None, 'prefix':[0x66]}))
arch_ext = 2
class movlps(DispatchInstruction):
dispatch = (
(xmm_mem64, {'opcode':[0x0F, 0x12], 'modrm':None, 'prefix':[]}),
(mem64_xmm, {'opcode':[0x0F, 0x13], 'modrm':None, 'prefix':[]}))
arch_ext = 1
class movmskpd(Instruction):
machine_inst = reg32_xmm
params = {'opcode':[0x0F, 0x50], 'modrm':None, 'prefix':[0x66]}
arch_ext = 2
class movmskps(Instruction):
machine_inst = reg32_xmm
params = {'opcode':[0x0F, 0x50], 'modrm':None, 'prefix':[]}
arch_ext = 2
class movntdq(Instruction):
machine_inst = mem128_xmm
params = {'opcode':[0x0F, 0xE7], 'modrm':None, 'prefix':[0x66]}
arch_ext = 2
class movntdqa(Instruction):
machine_inst = xmm_mem128
params = {'opcode':[0x0F, 0x38, 0x2A], 'modrm':None, 'prefix':[0x66]}
arch_ext = 4
class movntpd(Instruction):
machine_inst = mem128_xmm
params = {'opcode':[0x0F, 0x2B], 'modrm':None, 'prefix':[0x66]}
arch_ext = 2
class movntps(Instruction):
machine_inst = mem128_xmm
params = {'opcode':[0x0F, 0x2B], 'modrm':None, 'prefix':[]}
arch_ext = 2
class movntq(Instruction):
machine_inst = mem64_mmx
params = {'opcode':[0x0F, 0xE7], 'modrm':None, 'prefix':[]}
arch_ext = 1
class movntsd(Instruction):
machine_inst = mem64_xmm
params = {'opcode':[0x0F, 0x2B], 'modrm':None, 'prefix':[0xF2]}
arch_ext = 4
class movntss(Instruction):
machine_inst = mem32_xmm
params = {'opcode':[0x0F, 0x2B], 'modrm':None, 'prefix':[0xF3]}
arch_ext = 4
class movq(DispatchInstruction):
dispatch = (
# TODO - first 4 are defined by Intel 64 but not AMD64
(xmm_reg64, {'opcode':[0x0F, 0x6E], 'modrm':None, 'prefix':[0x66]}),
(reg64_xmm_rev, {'opcode':[0x0F, 0x7E], 'modrm':None, 'prefix':[0x66]}),
(mmx_reg64, {'opcode':[0x0F, 0x6E], 'modrm':None}),
(reg64_mmx_rev, {'opcode':[0x0F, 0x7E], 'modrm':None}),
(xmm_xmm, {'opcode':[0x0F, 0x7E], 'modrm':None, 'prefix':[0xF3]}),
(xmm_mem64, {'opcode':[0x0F, 0x7E], 'modrm':None, 'prefix':[0xF3]}),
(mem64_xmm, {'opcode':[0x0F, 0xD6], 'modrm':None, 'prefix':[0x66]}),
(mmx_mmx, {'opcode':[0x0F, 0x6F], 'modrm':None}),
(mmx_mem64, {'opcode':[0x0F, 0x6F], 'modrm':None}),
(mem64_mmx, {'opcode':[0x0F, 0x7F], 'modrm':None}))
arch_ext = 2 # TODO - err, some are 2, some are 0
class movq2dq(Instruction):
machine_inst = xmm_mmx
params = {'opcode':[0x0F, 0xD6], 'modrm':None, 'prefix':[0xF3]}
arch_ext = 2
class movsd(DispatchInstruction):
dispatch = (
(xmm_xmm, {'opcode':[0x0F, 0x10], 'modrm':None, 'prefix':[0xF2]}),
(xmm_mem64, {'opcode':[0x0F, 0x10], 'modrm':None, 'prefix':[0xF2]}),
(mem64_xmm, {'opcode':[0x0F, 0x11], 'modrm':None, 'prefix':[0xF2]}))
arch_ext = 2
class movshdup(DispatchInstruction):
dispatch = (
(xmm_xmm, {'opcode':[0x0F, 0x16], 'modrm':None, 'prefix':[0xF3]}),
(xmm_mem128, {'opcode':[0x0F, 0x16], 'modrm':None, 'prefix':[0xF3]}))
arch_ext = 3
class movsldup(DispatchInstruction):
dispatch = (
(xmm_xmm, {'opcode':[0x0F, 0x12], 'modrm':None, 'prefix':[0xF3]}),
(xmm_mem128, {'opcode':[0x0F, 0x12], 'modrm':None, 'prefix':[0xF3]}))
arch_ext = 3
class movss(DispatchInstruction):
dispatch = (
(xmm_xmm, {'opcode':[0x0F, 0x10], 'modrm':None, 'prefix':[0xF3]}),
(xmm_mem32, {'opcode':[0x0F, 0x10], 'modrm':None, 'prefix':[0xF3]}),
(mem32_xmm, {'opcode':[0x0F, 0x11], 'modrm':None, 'prefix':[0xF3]}))
arch_ext = 1
class movupd(DispatchInstruction):
dispatch = (
(xmm_xmm, {'opcode':[0x0F, 0x10], 'modrm':None, 'prefix':[0x66]}),
(xmm_mem128, {'opcode':[0x0F, 0x10], 'modrm':None, 'prefix':[0x66]}),
(mem128_xmm, {'opcode':[0x0F, 0x11], 'modrm':None, 'prefix':[0x66]}))
arch_ext = 2
class movups(DispatchInstruction):
dispatch = (
(xmm_xmm, {'opcode':[0x0F, 0x10], 'modrm':None, 'prefix':[]}),
(xmm_mem128, {'opcode':[0x0F, 0x10], 'modrm':None, 'prefix':[]}),
(mem128_xmm, {'opcode':[0x0F, 0x11], 'modrm':None, 'prefix':[]}))
arch_ext = 1
class mpsadbw(DispatchInstruction):
dispatch = (
(xmm_xmm_imm8, {'opcode':[0x0F, 0x3A, 0x42], 'modrm':None, 'prefix':[0x66]}),
(xmm_mem128_imm8, {'opcode':[0x0F, 0x3A, 0x42], 'modrm':None, 'prefix':[0x66]}))
arch_ext = 4
class mulpd(DispatchInstruction):
dispatch = (
(xmm_xmm, {'opcode':[0x0F, 0x59], 'modrm':None, 'prefix':[0x66]}),
(xmm_mem128, {'opcode':[0x0F, 0x59], 'modrm':None, 'prefix':[0x66]}))
arch_ext = 2
class mulps(DispatchInstruction):
dispatch = (
(xmm_xmm, {'opcode':[0x0F, 0x59], 'modrm':None, 'prefix':[]}),
(xmm_mem128, {'opcode':[0x0F, 0x59], 'modrm':None, 'prefix':[]}))
arch_ext = 1
class mulsd(DispatchInstruction):
dispatch = (
(xmm_xmm, {'opcode':[0x0F, 0x59], 'modrm':None, 'prefix':[0xF2]}),
(xmm_mem64, {'opcode':[0x0F, 0x59], 'modrm':None, 'prefix':[0xF2]}))
arch_ext = 2
class mulss(DispatchInstruction):
dispatch = (
(xmm_xmm, {'opcode':[0x0F, 0x59], 'modrm':None, 'prefix':[0xF3]}),
(xmm_mem32, {'opcode':[0x0F, 0x59], 'modrm':None, 'prefix':[0xF3]}))
arch_ext = 1
class orpd(DispatchInstruction):
dispatch = (
(xmm_xmm, {'opcode':[0x0F, 0x56], 'modrm':None, 'prefix':[0x66]}),
(xmm_mem128, {'opcode':[0x0F, 0x56], 'modrm':None, 'prefix':[0x66]}))
arch_ext = 2
class orps(DispatchInstruction):
dispatch = (
(xmm_xmm, {'opcode':[0x0F, 0x56], 'modrm':None, 'prefix':[]}),
(xmm_mem128, {'opcode':[0x0F, 0x56], 'modrm':None, 'prefix':[]}))
arch_ext = 1
class pabsb(DispatchInstruction):
dispatch = (
(xmm_xmm, {'opcode':[0x0F, 0x38, 0x1C], 'modrm':None, 'prefix':[0x66]}),
(xmm_mem128, {'opcode':[0x0F, 0x38, 0x1C], 'modrm':None, 'prefix':[0x66]}),
(mmx_mmx, {'opcode':[0x0F, 0x38, 0x1C], 'modrm':None}),
(mmx_mem64, {'opcode':[0x0F, 0x38, 0x1C], 'modrm':None}))
arch_ext = 3
class pabsd(DispatchInstruction):
dispatch = (
(xmm_xmm, {'opcode':[0x0F, 0x38, 0x1E], 'modrm':None, 'prefix':[0x66]}),
(xmm_mem128, {'opcode':[0x0F, 0x38, 0x1E], 'modrm':None, 'prefix':[0x66]}),
(mmx_mmx, {'opcode':[0x0F, 0x38, 0x1E], 'modrm':None}),
(mmx_mem64, {'opcode':[0x0F, 0x38, 0x1E], 'modrm':None}))
arch_ext = 3
class pabsw(DispatchInstruction):
dispatch = (
(xmm_xmm, {'opcode':[0x0F, 0x38, 0x1D], 'modrm':None, 'prefix':[0x66]}),
(xmm_mem128, {'opcode':[0x0F, 0x38, 0x1D], 'modrm':None, 'prefix':[0x66]}),
(mmx_mmx, {'opcode':[0x0F, 0x38, 0x1D], 'modrm':None}),
(mmx_mem64, {'opcode':[0x0F, 0x38, 0x1D], 'modrm':None}))
arch_ext = 3
class packssdw(DispatchInstruction):
dispatch = (
(xmm_xmm, {'opcode':[0x0F, 0x6B], 'modrm':None, 'prefix':[0x66]}),
(xmm_mem128, {'opcode':[0x0F, 0x6B], 'modrm':None, 'prefix':[0x66]}),
(mmx_mmx, {'opcode':[0x0F, 0x6B], 'modrm':None}),
(mmx_mem64, {'opcode':[0x0F, 0x6B], 'modrm':None}))
arch_ext = 2 # TODO - err, some are 2, some are 0
class packsswb(DispatchInstruction):
dispatch = (
(xmm_xmm, {'opcode':[0x0F, 0x63], 'modrm':None, 'prefix':[0x66]}),
(xmm_mem128, {'opcode':[0x0F, 0x63], 'modrm':None, 'prefix':[0x66]}),
(mmx_mmx, {'opcode':[0x0F, 0x63], 'modrm':None}),
(mmx_mem64, {'opcode':[0x0F, 0x63], 'modrm':None}))
arch_ext = 2 # TODO - err, some are 2, some are 0
class packusdw(DispatchInstruction):
dispatch = (
(xmm_xmm, {'opcode':[0x0F, 0x38, 0x2B], 'modrm':None, 'prefix':[0x66]}),
(xmm_mem128, {'opcode':[0x0F, 0x38, 0x2B], 'modrm':None, 'prefix':[0x66]}))
arch_ext = 4
class packuswb(DispatchInstruction):
dispatch = (
(xmm_xmm, {'opcode':[0x0F, 0x67], 'modrm':None, 'prefix':[0x66]}),
(xmm_mem128, {'opcode':[0x0F, 0x67], 'modrm':None, 'prefix':[0x66]}),
(mmx_mmx, {'opcode':[0x0F, 0x67], 'modrm':None}),
(mmx_mem64, {'opcode':[0x0F, 0x67], 'modrm':None}))
arch_ext = 2 # TODO - err, some are 2, some are 0
class paddb(DispatchInstruction):
dispatch = (
(xmm_xmm, {'opcode':[0x0F, 0xFC], 'modrm':None, 'prefix':[0x66]}),
(xmm_mem128, {'opcode':[0x0F, 0xFC], 'modrm':None, 'prefix':[0x66]}),
(mmx_mmx, {'opcode':[0x0F, 0xFC], 'modrm':None}),
(mmx_mem64, {'opcode':[0x0F, 0xFC], 'modrm':None}))
arch_ext = 2 # TODO - err, some are 2, some are 0
class paddd(DispatchInstruction):
dispatch = (
(xmm_xmm, {'opcode':[0x0F, 0xFE], 'modrm':None, 'prefix':[0x66]}),
(xmm_mem128, {'opcode':[0x0F, 0xFE], 'modrm':None, 'prefix':[0x66]}),
(mmx_mmx, {'opcode':[0x0F, 0xFE], 'modrm':None}),
(mmx_mem64, {'opcode':[0x0F, 0xFE], 'modrm':None}))
arch_ext = 2 # TODO - err, some are 2, some are 0
class paddq(DispatchInstruction):
dispatch = (
(xmm_xmm, {'opcode':[0x0F, 0xD4], 'modrm':None, 'prefix':[0x66]}),
(xmm_mem128, {'opcode':[0x0F, 0xD4], 'modrm':None, 'prefix':[0x66]}),
(mmx_mmx, {'opcode':[0x0F, 0xD4], 'modrm':None}),
(mmx_mem64, {'opcode':[0x0F, 0xD4], 'modrm':None}))
arch_ext = 2 # TODO - err, some are 2, some are 0
class paddsb(DispatchInstruction):
dispatch = (
(xmm_xmm, {'opcode':[0x0F, 0xEC], 'modrm':None, 'prefix':[0x66]}),
(xmm_mem128, {'opcode':[0x0F, 0xEC], 'modrm':None, 'prefix':[0x66]}),
(mmx_mmx, {'opcode':[0x0F, 0xEC], 'modrm':None}),
(mmx_mem64, {'opcode':[0x0F, 0xEC], 'modrm':None}))
arch_ext = 2 # TODO - err, some are 2, some are 0
class paddsw(DispatchInstruction):
dispatch = (
(xmm_xmm, {'opcode':[0x0F, 0xED], 'modrm':None, 'prefix':[0x66]}),
(xmm_mem128, {'opcode':[0x0F, 0xED], 'modrm':None, 'prefix':[0x66]}),
(mmx_mmx, {'opcode':[0x0F, 0xED], 'modrm':None}),
(mmx_mem64, {'opcode':[0x0F, 0xED], 'modrm':None}))
arch_ext = 2 # TODO - err, some are 2, some are 0
class paddusb(DispatchInstruction):
dispatch = (
(xmm_xmm, {'opcode':[0x0F, 0xDC], 'modrm':None, 'prefix':[0x66]}),
(xmm_mem128, {'opcode':[0x0F, 0xDC], 'modrm':None, 'prefix':[0x66]}),
(mmx_mmx, {'opcode':[0x0F, 0xDC], 'modrm':None}),
(mmx_mem64, {'opcode':[0x0F, 0xDC], 'modrm':None}))
arch_ext = 0
class paddusw(DispatchInstruction):
dispatch = (
(xmm_xmm, {'opcode':[0x0F, 0xDD], 'modrm':None, 'prefix':[0x66]}),
(xmm_mem128, {'opcode':[0x0F, 0xDD], 'modrm':None, 'prefix':[0x66]}),
(mmx_mmx, {'opcode':[0x0F, 0xDD], 'modrm':None}),
(mmx_mem64, {'opcode':[0x0F, 0xDD], 'modrm':None}))
arch_ext = 0
class paddw(DispatchInstruction):
dispatch = (
(xmm_xmm, {'opcode':[0x0F, 0xFD], 'modrm':None, 'prefix':[0x66]}),
(xmm_mem128, {'opcode':[0x0F, 0xFD], 'modrm':None, 'prefix':[0x66]}),
(mmx_mmx, {'opcode':[0x0F, 0xFD], 'modrm':None}),
(mmx_mem64, {'opcode':[0x0F, 0xFD], 'modrm':None}))
arch_ext = 2 # TODO - err, some are 2, some are 0
class palignr(DispatchInstruction):
dispatch = (
(xmm_xmm_imm8, {'opcode':[0x0F, 0x3A, 0x0F], 'modrm':None, 'prefix':[0x66]}),
(xmm_mem128_imm8,{'opcode':[0x0F, 0x3A, 0x0F], 'modrm':None, 'prefix':[0x66]}),
(mmx_mmx_imm8, {'opcode':[0x0F, 0x3A, 0x0F], 'modrm':None}),
(mmx_mem64_imm8, {'opcode':[0x0F, 0x3A, 0x0F], 'modrm':None}))
arch_ext = 3
class pand(DispatchInstruction):
dispatch = (
(xmm_xmm, {'opcode':[0x0F, 0xDB], 'modrm':None, 'prefix':[0x66]}),
(xmm_mem128, {'opcode':[0x0F, 0xDB], 'modrm':None, 'prefix':[0x66]}),
(mmx_mmx, {'opcode':[0x0F, 0xDB], 'modrm':None}),
(mmx_mem64, {'opcode':[0x0F, 0xDB], 'modrm':None}))
arch_ext = 2 # TODO - err, some are 2, some are 0
class pandn(DispatchInstruction):
dispatch = (
(xmm_xmm, {'opcode':[0x0F, 0xDF], 'modrm':None, 'prefix':[0x66]}),
(xmm_mem128, {'opcode':[0x0F, 0xDF], 'modrm':None, 'prefix':[0x66]}),
(mmx_mmx, {'opcode':[0x0F, 0xDF], 'modrm':None}),
(mmx_mem64, {'opcode':[0x0F, 0xDF], 'modrm':None}))
arch_ext = 2 # TODO - err, some are 2, some are 0
class pavgb(DispatchInstruction):
dispatch = (
(xmm_xmm, {'opcode':[0x0F, 0xE0], 'modrm':None, 'prefix':[0x66]}),
(xmm_mem128, {'opcode':[0x0F, 0xE0], 'modrm':None, 'prefix':[0x66]}),
(mmx_mmx, {'opcode':[0x0F, 0xE0], 'modrm':None}),
(mmx_mem64, {'opcode':[0x0F, 0xE0], 'modrm':None}))
arch_ext = 2 # TODO - err, some are 2, some are 0
class pavgw(DispatchInstruction):
dispatch = (
(xmm_xmm, {'opcode':[0x0F, 0xE3], 'modrm':None, 'prefix':[0x66]}),
(xmm_mem128, {'opcode':[0x0F, 0xE3], 'modrm':None, 'prefix':[0x66]}),
(mmx_mmx, {'opcode':[0x0F, 0xE3], 'modrm':None}),
(mmx_mem64, {'opcode':[0x0F, 0xE3], 'modrm':None}))
arch_ext = 2 # TODO - err, some are 2, some are 0
class pblendvb(DispatchInstruction):
dispatch = (
(xmm_xmm, {'opcode':[0x0F, 0x38, 0x10], 'modrm':None, 'prefix':[0x66]}),
(xmm_mem128, {'opcode':[0x0F, 0x38, 0x10], 'modrm':None, 'prefix':[0x66]}))
arch_ext = 4
class pblendw(DispatchInstruction):
dispatch = (
(xmm_xmm_imm8, {'opcode':[0x0F, 0x3A, 0x0E], 'modrm':None, 'prefix':[0x66]}),
(xmm_mem128_imm8, {'opcode':[0x0F, 0x3A, 0x0E], 'modrm':None, 'prefix':[0x66]}))
arch_ext = 4
class pcmpeqb(DispatchInstruction):
dispatch = (
(xmm_xmm, {'opcode':[0x0F, 0x74], 'modrm':None, 'prefix':[0x66]}),
(xmm_mem128, {'opcode':[0x0F, 0x74], 'modrm':None, 'prefix':[0x66]}),
(mmx_mmx, {'opcode':[0x0F, 0x74], 'modrm':None}),
(mmx_mem64, {'opcode':[0x0F, 0x74], 'modrm':None}))
arch_ext = 2 # TODO - err, some are 2, some are 0
class pcmpeqd(DispatchInstruction):
dispatch = (
(xmm_xmm, {'opcode':[0x0F, 0x76], 'modrm':None, 'prefix':[0x66]}),
(xmm_mem128, {'opcode':[0x0F, 0x76], 'modrm':None, 'prefix':[0x66]}),
(mmx_mmx, {'opcode':[0x0F, 0x76], 'modrm':None}),
(mmx_mem64, {'opcode':[0x0F, 0x76], 'modrm':None}))
arch_ext = 2 # TODO - err, some are 2, some are 0
class pcmpeqq(DispatchInstruction):
dispatch = (
(xmm_xmm, {'opcode':[0x0F, 0x38, 0x29], 'modrm':None, 'prefix':[0x66]}),
(xmm_mem128, {'opcode':[0x0F, 0x38, 0x29], 'modrm':None, 'prefix':[0x66]}))
arch_ext = 4
class pcmpeqw(DispatchInstruction):
dispatch = (
(xmm_xmm, {'opcode':[0x0F, 0x75], 'modrm':None, 'prefix':[0x66]}),
(xmm_mem128, {'opcode':[0x0F, 0x75], 'modrm':None, 'prefix':[0x66]}),
(mmx_mmx, {'opcode':[0x0F, 0x75], 'modrm':None}),
(mmx_mem64, {'opcode':[0x0F, 0x75], 'modrm':None}))
arch_ext = 2 # TODO - err, some are 2, some are 0
class pcmpestri(DispatchInstruction):
dispatch = (
(xmm_xmm_imm8, {'opcode':[0x0F, 0x3A, 0x61], 'modrm':None, 'prefix':[0x66]}),
(xmm_mem128_imm8, {'opcode':[0x0F, 0x3A, 0x61], 'modrm':None, 'prefix':[0x66]}))
arch_ext = 4
class pcmpestrm(DispatchInstruction):
dispatch = (
(xmm_xmm_imm8, {'opcode':[0x0F, 0x3A, 0x60], 'modrm':None, 'prefix':[0x66]}),
(xmm_mem128_imm8, {'opcode':[0x0F, 0x3A, 0x60], 'modrm':None, 'prefix':[0x66]}))
arch_ext = 4
class pcmpgtb(DispatchInstruction):
dispatch = (
(xmm_xmm, {'opcode':[0x0F, 0x64], 'modrm':None, 'prefix':[0x66]}),
(xmm_mem128, {'opcode':[0x0F, 0x64], 'modrm':None, 'prefix':[0x66]}),
(mmx_mmx, {'opcode':[0x0F, 0x64], 'modrm':None}),
(mmx_mem64, {'opcode':[0x0F, 0x64], 'modrm':None}))
arch_ext = 2 # TODO - err, some are 2, some are 0
class pcmpgtd(DispatchInstruction):
dispatch = (
(xmm_xmm, {'opcode':[0x0F, 0x66], 'modrm':None, 'prefix':[0x66]}),
(xmm_mem128, {'opcode':[0x0F, 0x66], 'modrm':None, 'prefix':[0x66]}),
(mmx_mmx, {'opcode':[0x0F, 0x66], 'modrm':None}),
(mmx_mem64, {'opcode':[0x0F, 0x66], 'modrm':None}))
arch_ext = 2 # TODO - err, some are 2, some are 0
class pcmpgtw(DispatchInstruction):
dispatch = (
(xmm_xmm, {'opcode':[0x0F, 0x65], 'modrm':None, 'prefix':[0x66]}),
(xmm_mem128, {'opcode':[0x0F, 0x65], 'modrm':None, 'prefix':[0x66]}),
(mmx_mmx, {'opcode':[0x0F, 0x65], 'modrm':None}),
(mmx_mem64, {'opcode':[0x0F, 0x65], 'modrm':None}))
arch_ext = 2 # TODO - err, some are 2, some are 0
class pcmpgtq(DispatchInstruction):
dispatch = (
(xmm_xmm, {'opcode':[0x0F, 0x38, 0x37], 'modrm':None, 'prefix':[0x66]}),
(xmm_mem128, {'opcode':[0x0F, 0x38, 0x37], 'modrm':None, 'prefix':[0x66]}))
arch_ext = 4
class pcmpistri(DispatchInstruction):
dispatch = (
(xmm_xmm_imm8, {'opcode':[0x0F, 0x3A, 0x63], 'modrm':None, 'prefix':[0x66]}),
(xmm_mem128_imm8, {'opcode':[0x0F, 0x3A, 0x63], 'modrm':None, 'prefix':[0x66]}))
arch_ext = 4
class pcmpistrm(DispatchInstruction):
dispatch = (
(xmm_xmm_imm8, {'opcode':[0x0F, 0x3A, 0x62], 'modrm':None, 'prefix':[0x66]}),
(xmm_mem128_imm8, {'opcode':[0x0F, 0x3A, 0x62], 'modrm':None, 'prefix':[0x66]}))
arch_ext = 4
class pextrb(DispatchInstruction):
dispatch = (
(reg64_xmm_imm8_rev, {'opcode':[0x0F, 0x3A, 0x14], 'modrm':None, 'prefix':[0x66]}),
(reg32_xmm_imm8_rev, {'opcode':[0x0F, 0x3A, 0x14], 'modrm':None, 'prefix':[0x66]}),
(mem8_xmm_imm8, {'opcode':[0x0F, 0x3A, 0x14], 'modrm':None, 'prefix':[0x66]}))
arch_ext = 4
class pextrd(DispatchInstruction):
dispatch = (
(reg32_xmm_imm8_rev, {'opcode':[0x0F, 0x3A, 0x16], 'modrm':None, 'prefix':[0x66]}),
(mem32_xmm_imm8, {'opcode':[0x0F, 0x3A, 0x16], 'modrm':None, 'prefix':[0x66]}))
arch_ext = 4
class pextrq(DispatchInstruction):
dispatch = (
(reg64_xmm_imm8_rev, {'opcode':[0x0F, 0x3A, 0x16], 'modrm':None, 'prefix':[0x66]}),
(mem64_xmm_imm8, {'opcode':[0x0F, 0x3A, 0x16], 'modrm':None, 'prefix':[0x66]}))
arch_ext = 4
class pextrw(DispatchInstruction):
dispatch = (
(reg64_xmm_imm8, {'opcode':[0x0F, 0xC5], 'modrm':None, 'prefix':[0x66]}),
(reg32_xmm_imm8, {'opcode':[0x0F, 0xC5], 'modrm':None, 'prefix':[0x66]}),
(mem16_xmm_imm8, {'opcode':[0x0F, 0x3A, 0x15], 'modrm':None, 'prefix':[0x66]}),
(reg64_mmx_imm8, {'opcode':[0x0F, 0xC5], 'modrm':None}),
(reg32_mmx_imm8, {'opcode':[0x0F, 0xC5], 'modrm':None}))
arch_ext = 1
class phaddsw(DispatchInstruction):
dispatch = (
(xmm_xmm, {'opcode':[0x0F, 0x38, 0x03], 'modrm':None, 'prefix':[0x66]}),
(xmm_mem128, {'opcode':[0x0F, 0x38, 0x03], 'modrm':None, 'prefix':[0x66]}),
(mmx_mmx, {'opcode':[0x0F, 0x38, 0x03], 'modrm':None}),
(mmx_mem64, {'opcode':[0x0F, 0x38, 0x03], 'modrm':None}))
arch_ext = 3
class phaddw(DispatchInstruction):
dispatch = (
(xmm_xmm, {'opcode':[0x0F, 0x38, 0x01], 'modrm':None, 'prefix':[0x66]}),
(xmm_mem128, {'opcode':[0x0F, 0x38, 0x01], 'modrm':None, 'prefix':[0x66]}),
(mmx_mmx, {'opcode':[0x0F, 0x38, 0x01], 'modrm':None}),
(mmx_mem64, {'opcode':[0x0F, 0x38, 0x01], 'modrm':None}))
arch_ext = 3
class phaddd(DispatchInstruction):
dispatch = (
(xmm_xmm, {'opcode':[0x0F, 0x38, 0x02], 'modrm':None, 'prefix':[0x66]}),
(xmm_mem128, {'opcode':[0x0F, 0x38, 0x02], 'modrm':None, 'prefix':[0x66]}),
(mmx_mmx, {'opcode':[0x0F, 0x38, 0x02], 'modrm':None}),
(mmx_mem64, {'opcode':[0x0F, 0x38, 0x02], 'modrm':None}))
arch_ext = 3
class phminposuw(DispatchInstruction):
dispatch = (
(xmm_xmm, {'opcode':[0x0F, 0x38, 0x41], 'modrm':None, 'prefix':[0x66]}),
(xmm_mem128, {'opcode':[0x0F, 0x38, 0x41], 'modrm':None, 'prefix':[0x66]}))
arch_ext = 4
class phsubsw(DispatchInstruction):
dispatch = (
(xmm_xmm, {'opcode':[0x0F, 0x38, 0x07], 'modrm':None, 'prefix':[0x66]}),
(xmm_mem128, {'opcode':[0x0F, 0x38, 0x07], 'modrm':None, 'prefix':[0x66]}),
(mmx_mmx, {'opcode':[0x0F, 0x38, 0x07], 'modrm':None}),
(mmx_mem64, {'opcode':[0x0F, 0x38, 0x07], 'modrm':None}))
arch_ext = 3
class phsubw(DispatchInstruction):
dispatch = (
(xmm_xmm, {'opcode':[0x0F, 0x38, 0x05], 'modrm':None, 'prefix':[0x66]}),
(xmm_mem128, {'opcode':[0x0F, 0x38, 0x05], 'modrm':None, 'prefix':[0x66]}),
(mmx_mmx, {'opcode':[0x0F, 0x38, 0x05], 'modrm':None}),
(mmx_mem64, {'opcode':[0x0F, 0x38, 0x05], 'modrm':None}))
arch_ext = 3
class phsubd(DispatchInstruction):
dispatch = (
(xmm_xmm, {'opcode':[0x0F, 0x38, 0x06], 'modrm':None, 'prefix':[0x66]}),
(xmm_mem128, {'opcode':[0x0F, 0x38, 0x06], 'modrm':None, 'prefix':[0x66]}),
(mmx_mmx, {'opcode':[0x0F, 0x38, 0x06], 'modrm':None}),
(mmx_mem64, {'opcode':[0x0F, 0x38, 0x06], 'modrm':None}))
arch_ext = 3
class pinsrb(DispatchInstruction):
dispatch = (
(xmm_reg32_imm8, {'opcode':[0x0F, 0x3A, 0x20], 'modrm':None, 'prefix':[0x66]}),
(xmm_mem8_imm8, {'opcode':[0x0F, 0x3A, 0x20], 'modrm':None, 'prefix':[0x66]}))
arch_ext = 4
class pinsrd(DispatchInstruction):
dispatch = (
(xmm_reg32_imm8, {'opcode':[0x0F, 0x3A, 0x22], 'modrm':None, 'prefix':[0x66]}),
(xmm_mem32_imm8, {'opcode':[0x0F, 0x3A, 0x22], 'modrm':None, 'prefix':[0x66]}))
arch_ext = 4
class pinsrq(DispatchInstruction):
dispatch = (
(xmm_reg64_imm8, {'opcode':[0x0F, 0x3A, 0x22], 'modrm':None, 'prefix':[0x66]}),
(xmm_mem64_imm8, {'opcode':[0x0F, 0x3A, 0x22], 'modrm':None, 'prefix':[0x66]}))
arch_ext = 4
class pinsrw(DispatchInstruction):
dispatch = (
(xmm_reg32_imm8, {'opcode':[0x0F, 0xC4], 'modrm':None, 'prefix':[0x66]}),
(xmm_mem16_imm8, {'opcode':[0x0F, 0xC4], 'modrm':None, 'prefix':[0x66]}),
(mmx_reg32_imm8, {'opcode':[0x0F, 0xC4], 'modrm':None}),
(mmx_mem16_imm8, {'opcode':[0x0F, 0xC4], 'modrm':None}))
arch_ext = 1
class pmaddubsw(DispatchInstruction):
dispatch = (
(xmm_xmm, {'opcode':[0x0F, 0x38, 0x04], 'modrm':None, 'prefix':[0x66]}),
(xmm_mem128, {'opcode':[0x0F, 0x38, 0x04], 'modrm':None, 'prefix':[0x66]}),
(mmx_mmx, {'opcode':[0x0F, 0x38, 0x04], 'modrm':None}),
(mmx_mem64, {'opcode':[0x0F, 0x38, 0x04], 'modrm':None}))
arch_ext = 3
class pmaddwd(DispatchInstruction):
dispatch = (
(xmm_xmm, {'opcode':[0x0F, 0xF5], 'modrm':None, 'prefix':[0x66]}),
(xmm_mem128, {'opcode':[0x0F, 0xF5], 'modrm':None, 'prefix':[0x66]}),
(mmx_mmx, {'opcode':[0x0F, 0xF5], 'modrm':None}),
(mmx_mem64, {'opcode':[0x0F, 0xF5], 'modrm':None}))
arch_ext = 2 # TODO - err, some are 2, some are 0
class pmaxsb(DispatchInstruction):
dispatch = (
(xmm_xmm, {'opcode':[0x0F, 0x38, 0x3C], 'modrm':None, 'prefix':[0x66]}),
(xmm_mem128, {'opcode':[0x0F, 0x38, 0x3C], 'modrm':None, 'prefix':[0x66]}))
arch_ext = 4
class pmaxsd(DispatchInstruction):
dispatch = (
(xmm_xmm, {'opcode':[0x0F, 0x38, 0x3D], 'modrm':None, 'prefix':[0x66]}),
(xmm_mem128, {'opcode':[0x0F, 0x38, 0x3D], 'modrm':None, 'prefix':[0x66]}))
arch_ext = 4
class pmaxsw(DispatchInstruction):
dispatch = (
(xmm_xmm, {'opcode':[0x0F, 0xEE], 'modrm':None, 'prefix':[0x66]}),
(xmm_mem128, {'opcode':[0x0F, 0xEE], 'modrm':None, 'prefix':[0x66]}),
(mmx_mmx, {'opcode':[0x0F, 0xEE], 'modrm':None}),
(mmx_mem64, {'opcode':[0x0F, 0xEE], 'modrm':None}))
arch_ext = 1
class pmaxub(DispatchInstruction):
dispatch = (
(xmm_xmm, {'opcode':[0x0F, 0xDE], 'modrm':None, 'prefix':[0x66]}),
(xmm_mem128, {'opcode':[0x0F, 0xDE], 'modrm':None, 'prefix':[0x66]}),
(mmx_mmx, {'opcode':[0x0F, 0xDE], 'modrm':None}),
(mmx_mem64, {'opcode':[0x0F, 0xDE], 'modrm':None}))
arch_ext = 1
class pmaxud(DispatchInstruction):
dispatch = (
(xmm_xmm, {'opcode':[0x0F, 0x38, 0x3F], 'modrm':None, 'prefix':[0x66]}),
(xmm_mem128, {'opcode':[0x0F, 0x38, 0x3F], 'modrm':None, 'prefix':[0x66]}))
arch_ext = 4
class pmaxuw(DispatchInstruction):
dispatch = (
(xmm_xmm, {'opcode':[0x0F, 0x38, 0x3E], 'modrm':None, 'prefix':[0x66]}),
(xmm_mem128, {'opcode':[0x0F, 0x38, 0x3E], 'modrm':None, 'prefix':[0x66]}))
arch_ext = 4
class pminsb(DispatchInstruction):
dispatch = (
(xmm_xmm, {'opcode':[0x0F, 0x38, 0x38], 'modrm':None, 'prefix':[0x66]}),
(xmm_mem128, {'opcode':[0x0F, 0x38, 0x38], 'modrm':None, 'prefix':[0x66]}))
arch_ext = 4
class pminsd(DispatchInstruction):
dispatch = (
(xmm_xmm, {'opcode':[0x0F, 0x38, 0x39], 'modrm':None, 'prefix':[0x66]}),
(xmm_mem128, {'opcode':[0x0F, 0x38, 0x39], 'modrm':None, 'prefix':[0x66]}))
arch_ext = 4
class pminsw(DispatchInstruction):
dispatch = (
(xmm_xmm, {'opcode':[0x0F, 0xEA], 'modrm':None, 'prefix':[0x66]}),
(xmm_mem128, {'opcode':[0x0F, 0xEA], 'modrm':None, 'prefix':[0x66]}),
(mmx_mmx, {'opcode':[0x0F, 0xEA], 'modrm':None}),
(mmx_mem64, {'opcode':[0x0F, 0xEA], 'modrm':None}))
arch_ext = 1
class pminub(DispatchInstruction):
dispatch = (
(xmm_xmm, {'opcode':[0x0F, 0xDA], 'modrm':None, 'prefix':[0x66]}),
(xmm_mem128, {'opcode':[0x0F, 0xDA], 'modrm':None, 'prefix':[0x66]}),
(mmx_mmx, {'opcode':[0x0F, 0xDA], 'modrm':None}),
(mmx_mem64, {'opcode':[0x0F, 0xDA], 'modrm':None}))
arch_ext = 1
class pminud(DispatchInstruction):
dispatch = (
(xmm_xmm, {'opcode':[0x0F, 0x38, 0x3B], 'modrm':None, 'prefix':[0x66]}),
(xmm_mem128, {'opcode':[0x0F, 0x38, 0x3B], 'modrm':None, 'prefix':[0x66]}))
arch_ext = 4
class pminuw(DispatchInstruction):
dispatch = (
(xmm_xmm, {'opcode':[0x0F, 0x38, 0x3A], 'modrm':None, 'prefix':[0x66]}),
(xmm_mem128, {'opcode':[0x0F, 0x38, 0x3A], 'modrm':None, 'prefix':[0x66]}))
arch_ext = 4
class pmovmskb(DispatchInstruction):
dispatch = (
# TODO - undocumented reg64 forms?
#(reg64_xmm, {'opcode':[0x0F, 0xD7], 'modrm':None, 'prefix':[0x66]}),
#(reg64_mmx, {'opcode':[0x0F, 0xD7], 'modrm':None}),
(reg32_xmm, {'opcode':[0x0F, 0xD7], 'modrm':None, 'prefix':[0x66]}),
(reg32_mmx, {'opcode':[0x0F, 0xD7], 'modrm':None}))
arch_ext = 2 # TODO - err, some are 2, some are 1
class pmovsxbw(DispatchInstruction):
dispatch = (
(xmm_xmm, {'opcode':[0x0F, 0x38, 0x20], 'modrm':None, 'prefix':[0x66]}),
(xmm_mem64, {'opcode':[0x0F, 0x38, 0x20], 'modrm':None, 'prefix':[0x66]}))
arch_ext = 4
class pmovsxbd(DispatchInstruction):
dispatch = (
(xmm_xmm, {'opcode':[0x0F, 0x38, 0x21], 'modrm':None, 'prefix':[0x66]}),
(xmm_mem32, {'opcode':[0x0F, 0x38, 0x21], 'modrm':None, 'prefix':[0x66]}))
arch_ext = 4
class pmovsxbq(DispatchInstruction):
dispatch = (
(xmm_xmm, {'opcode':[0x0F, 0x38, 0x22], 'modrm':None, 'prefix':[0x66]}),
(xmm_mem16, {'opcode':[0x0F, 0x38, 0x22], 'modrm':None, 'prefix':[0x66]}))
arch_ext = 4
class pmovsxwd(DispatchInstruction):
dispatch = (
(xmm_xmm, {'opcode':[0x0F, 0x38, 0x23], 'modrm':None, 'prefix':[0x66]}),
(xmm_mem64, {'opcode':[0x0F, 0x38, 0x23], 'modrm':None, 'prefix':[0x66]}))
arch_ext = 4
class pmovsxwq(DispatchInstruction):
dispatch = (
(xmm_xmm, {'opcode':[0x0F, 0x38, 0x24], 'modrm':None, 'prefix':[0x66]}),
(xmm_mem32, {'opcode':[0x0F, 0x38, 0x24], 'modrm':None, 'prefix':[0x66]}))
arch_ext = 4
class pmovsxdq(DispatchInstruction):
dispatch = (
(xmm_xmm, {'opcode':[0x0F, 0x38, 0x25], 'modrm':None, 'prefix':[0x66]}),
(xmm_mem64, {'opcode':[0x0F, 0x38, 0x25], 'modrm':None, 'prefix':[0x66]}))
arch_ext = 4
class pmovzxbw(DispatchInstruction):
dispatch = (
(xmm_xmm, {'opcode':[0x0F, 0x38, 0x30], 'modrm':None, 'prefix':[0x66]}),
(xmm_mem64, {'opcode':[0x0F, 0x38, 0x30], 'modrm':None, 'prefix':[0x66]}))
arch_ext = 4
class pmovzxbd(DispatchInstruction):
dispatch = (
(xmm_xmm, {'opcode':[0x0F, 0x38, 0x31], 'modrm':None, 'prefix':[0x66]}),
(xmm_mem32, {'opcode':[0x0F, 0x38, 0x31], 'modrm':None, 'prefix':[0x66]}))
arch_ext = 4
class pmovzxbq(DispatchInstruction):
dispatch = (
(xmm_xmm, {'opcode':[0x0F, 0x38, 0x32], 'modrm':None, 'prefix':[0x66]}),
(xmm_mem16, {'opcode':[0x0F, 0x38, 0x32], 'modrm':None, 'prefix':[0x66]}))
arch_ext = 4
class pmovzxwd(DispatchInstruction):
dispatch = (
(xmm_xmm, {'opcode':[0x0F, 0x38, 0x33], 'modrm':None, 'prefix':[0x66]}),
(xmm_mem64, {'opcode':[0x0F, 0x38, 0x33], 'modrm':None, 'prefix':[0x66]}))
arch_ext = 4
class pmovzxwq(DispatchInstruction):
dispatch = (
(xmm_xmm, {'opcode':[0x0F, 0x38, 0x34], 'modrm':None, 'prefix':[0x66]}),
(xmm_mem32, {'opcode':[0x0F, 0x38, 0x34], 'modrm':None, 'prefix':[0x66]}))
arch_ext = 4
class pmovzxdq(DispatchInstruction):
dispatch = (
(xmm_xmm, {'opcode':[0x0F, 0x38, 0x35], 'modrm':None, 'prefix':[0x66]}),
(xmm_mem64, {'opcode':[0x0F, 0x38, 0x35], 'modrm':None, 'prefix':[0x66]}))
arch_ext = 4
class pmuldq(DispatchInstruction):
dispatch = (
(xmm_xmm, {'opcode':[0x0F, 0x38, 0x28], 'modrm':None, 'prefix':[0x66]}),
(xmm_mem128, {'opcode':[0x0F, 0x38, 0x28], 'modrm':None, 'prefix':[0x66]}))
arch_ext = 4
class pmulhrsw(DispatchInstruction):
dispatch = (
(xmm_xmm, {'opcode':[0x0F, 0x38, 0x0B], 'modrm':None, 'prefix':[0x66]}),
(xmm_mem128, {'opcode':[0x0F, 0x38, 0x0B], 'modrm':None, 'prefix':[0x66]}),
(mmx_mmx, {'opcode':[0x0F, 0x38, 0x0B], 'modrm':None}),
(mmx_mem64, {'opcode':[0x0F, 0x38, 0x0B], 'modrm':None}))
arch_ext = 3
class pmulhuw(DispatchInstruction):
dispatch = (
(xmm_xmm, {'opcode':[0x0F, 0xE4], 'modrm':None, 'prefix':[0x66]}),
(xmm_mem128, {'opcode':[0x0F, 0xE4], 'modrm':None, 'prefix':[0x66]}),
(mmx_mmx, {'opcode':[0x0F, 0xE4], 'modrm':None}),
(mmx_mem64, {'opcode':[0x0F, 0xE4], 'modrm':None}))
arch_ext = 1
class pmulhw(DispatchInstruction):
dispatch = (
(xmm_xmm, {'opcode':[0x0F, 0xE5], 'modrm':None, 'prefix':[0x66]}),
(xmm_mem128, {'opcode':[0x0F, 0xE5], 'modrm':None, 'prefix':[0x66]}),
(mmx_mmx, {'opcode':[0x0F, 0xE5], 'modrm':None}),
(mmx_mem64, {'opcode':[0x0F, 0xE5], 'modrm':None}))
arch_ext = 1
class pmulld(DispatchInstruction):
dispatch = (
(xmm_xmm, {'opcode':[0x0F, 0x38, 0x40], 'modrm':None, 'prefix':[0x66]}),
(xmm_mem128, {'opcode':[0x0F, 0x38, 0x40], 'modrm':None, 'prefix':[0x66]}))
arch_ext = 4
class pmullw(DispatchInstruction):
dispatch = (
(xmm_xmm, {'opcode':[0x0F, 0xD5], 'modrm':None, 'prefix':[0x66]}),
(xmm_mem128, {'opcode':[0x0F, 0xD5], 'modrm':None, 'prefix':[0x66]}),
(mmx_mmx, {'opcode':[0x0F, 0xD5], 'modrm':None}),
(mmx_mem64, {'opcode':[0x0F, 0xD5], 'modrm':None}))
arch_ext = 2 # and 0
class pmuludq(DispatchInstruction):
dispatch = (
(xmm_xmm, {'opcode':[0x0F, 0xF4], 'modrm':None, 'prefix':[0x66]}),
(xmm_mem128, {'opcode':[0x0F, 0xF4], 'modrm':None, 'prefix':[0x66]}),
(mmx_mmx, {'opcode':[0x0F, 0xF4], 'modrm':None}),
(mmx_mem64, {'opcode':[0x0F, 0xF4], 'modrm':None}))
arch_ext = 2
class por(DispatchInstruction):
dispatch = (
(xmm_xmm, {'opcode':[0x0F, 0xEB], 'modrm':None, 'prefix':[0x66]}),
(xmm_mem128, {'opcode':[0x0F, 0xEB], 'modrm':None, 'prefix':[0x66]}),
(mmx_mmx, {'opcode':[0x0F, 0xEB], 'modrm':None}),
(mmx_mem64, {'opcode':[0x0F, 0xEB], 'modrm':None}))
arch_ext = 2 # and 0
class psadbw(DispatchInstruction):
dispatch = (
(xmm_xmm, {'opcode':[0x0F, 0xF6], 'modrm':None, 'prefix':[0x66]}),
(xmm_mem128, {'opcode':[0x0F, 0xF6], 'modrm':None, 'prefix':[0x66]}),
(mmx_mmx, {'opcode':[0x0F, 0xF6], 'modrm':None}),
(mmx_mem64, {'opcode':[0x0F, 0xF6], 'modrm':None}))
arch_ext = 1
class pshufb(DispatchInstruction):
dispatch = (
(xmm_xmm, {'opcode':[0x0F, 0x38, 0x00], 'modrm':None, 'prefix':[0x66]}),
(xmm_mem128, {'opcode':[0x0F, 0x38, 0x00], 'modrm':None, 'prefix':[0x66]}),
(mmx_mmx, {'opcode':[0x0F, 0x38, 0x00], 'modrm':None}),
(mmx_mem64, {'opcode':[0x0F, 0x38, 0x00], 'modrm':None}))
arch_ext = 3
class pshufd(DispatchInstruction):
dispatch = (
(xmm_xmm_imm8, {'opcode':[0x0F, 0x70], 'modrm':None, 'prefix':[0x66]}),
(xmm_mem128_imm8, {'opcode':[0x0F, 0x70], 'modrm':None, 'prefix':[0x66]}))
arch_ext = 2
class pshufhw(DispatchInstruction):
dispatch = (
(xmm_xmm_imm8, {'opcode':[0x0F, 0x70], 'modrm':None, 'prefix':[0xF3]}),
(xmm_mem128_imm8, {'opcode':[0x0F, 0x70], 'modrm':None, 'prefix':[0xF3]}))
arch_ext = 2
class pshuflw(DispatchInstruction):
dispatch = (
(xmm_xmm_imm8, {'opcode':[0x0F, 0x70], 'modrm':None, 'prefix':[0xF2]}),
(xmm_mem128_imm8, {'opcode':[0x0F, 0x70], 'modrm':None, 'prefix':[0xF2]}))
arch_ext = 2
class pshufw(DispatchInstruction):
dispatch = (
(mmx_mmx_imm8, {'opcode':[0x0F, 0x70], 'modrm':None}),
(mmx_mem64_imm8, {'opcode':[0x0F, 0x70], 'modrm':None}))
arch_ext = 1
class psignb(DispatchInstruction):
dispatch = (
(xmm_xmm, {'opcode':[0x0F, 0x38, 0x08], 'modrm':None, 'prefix':[0x66]}),
(xmm_mem128, {'opcode':[0x0F, 0x38, 0x08], 'modrm':None, 'prefix':[0x66]}),
(mmx_mmx, {'opcode':[0x0F, 0x38, 0x08], 'modrm':None}),
(mmx_mem64, {'opcode':[0x0F, 0x38, 0x08], 'modrm':None}))
arch_ext = 3
class psignd(DispatchInstruction):
dispatch = (
(xmm_xmm, {'opcode':[0x0F, 0x38, 0x0A], 'modrm':None, 'prefix':[0x66]}),
(xmm_mem128, {'opcode':[0x0F, 0x38, 0x0A], 'modrm':None, 'prefix':[0x66]}),
(mmx_mmx, {'opcode':[0x0F, 0x38, 0x0A], 'modrm':None}),
(mmx_mem64, {'opcode':[0x0F, 0x38, 0x0A], 'modrm':None}))
arch_ext = 3
class psignw(DispatchInstruction):
dispatch = (
(xmm_xmm, {'opcode':[0x0F, 0x38, 0x09], 'modrm':None, 'prefix':[0x66]}),
(xmm_mem128, {'opcode':[0x0F, 0x38, 0x09], 'modrm':None, 'prefix':[0x66]}),
(mmx_mmx, {'opcode':[0x0F, 0x38, 0x09], 'modrm':None}),
(mmx_mem64, {'opcode':[0x0F, 0x38, 0x09], 'modrm':None}))
arch_ext = 3
class pslld(DispatchInstruction):
dispatch = (
(xmm_imm8, {'opcode':[0x0F, 0x72], 'modrm':0x30, 'prefix':[0x66]}),
(xmm_xmm, {'opcode':[0x0F, 0xF2], 'modrm':None, 'prefix':[0x66]}),
(xmm_mem128, {'opcode':[0x0F, 0xF2], 'modrm':None, 'prefix':[0x66]}),
(mmx_imm8, {'opcode':[0x0F, 0x72], 'modrm':0x30}),
(mmx_mmx, {'opcode':[0x0F, 0xF2], 'modrm':None}),
(mmx_mem128, {'opcode':[0x0F, 0xF2], 'modrm':None}))
arch_ext = 2 # and 0 and 1
class pslldq(Instruction):
machine_inst = xmm_imm8
params = {'opcode':[0x0F, 0x73], 'modrm':0x38, 'prefix':[0x66]}
arch_ext = 1
class psllq(DispatchInstruction):
dispatch = (
(xmm_imm8, {'opcode':[0x0F, 0x73], 'modrm':0x30, 'prefix':[0x66]}),
(xmm_xmm, {'opcode':[0x0F, 0xF3], 'modrm':None, 'prefix':[0x66]}),
(xmm_mem128, {'opcode':[0x0F, 0xF3], 'modrm':None, 'prefix':[0x66]}),
(mmx_imm8, {'opcode':[0x0F, 0x73], 'modrm':0x30}),
(mmx_mmx, {'opcode':[0x0F, 0xF3], 'modrm':None}),
(mmx_mem128, {'opcode':[0x0F, 0xF3], 'modrm':None}))
arch_ext = 2 # and 0 and 1
class psllw(DispatchInstruction):
dispatch = (
(xmm_imm8, {'opcode':[0x0F, 0x71], 'modrm':0x30, 'prefix':[0x66]}),
(xmm_xmm, {'opcode':[0x0F, 0xF1], 'modrm':None, 'prefix':[0x66]}),
(xmm_mem128, {'opcode':[0x0F, 0xF1], 'modrm':None, 'prefix':[0x66]}),
(mmx_imm8, {'opcode':[0x0F, 0x71], 'modrm':0x30}),
(mmx_mmx, {'opcode':[0x0F, 0xF1], 'modrm':None}),
(mmx_mem128, {'opcode':[0x0F, 0xF1], 'modrm':None}))
arch_ext = 2 # and 0 and 1
class psrad(DispatchInstruction):
dispatch = (
(xmm_imm8, {'opcode':[0x0F, 0x72], 'modrm':0x20, 'prefix':[0x66]}),
(xmm_xmm, {'opcode':[0x0F, 0xE2], 'modrm':None, 'prefix':[0x66]}),
(xmm_mem128, {'opcode':[0x0F, 0xE2], 'modrm':None, 'prefix':[0x66]}),
(mmx_imm8, {'opcode':[0x0F, 0x72], 'modrm':0x20}),
(mmx_mmx, {'opcode':[0x0F, 0xE2], 'modrm':None}),
(mmx_mem128, {'opcode':[0x0F, 0xE2], 'modrm':None}))
arch_ext = 2 # and 0 and 1
class psraw(DispatchInstruction):
dispatch = (
(xmm_imm8, {'opcode':[0x0F, 0x71], 'modrm':0x20, 'prefix':[0x66]}),
(xmm_xmm, {'opcode':[0x0F, 0xE1], 'modrm':None, 'prefix':[0x66]}),
(xmm_mem128, {'opcode':[0x0F, 0xE1], 'modrm':None, 'prefix':[0x66]}),
(mmx_imm8, {'opcode':[0x0F, 0x71], 'modrm':0x20}),
(mmx_mmx, {'opcode':[0x0F, 0xE1], 'modrm':None}),
(mmx_mem128, {'opcode':[0x0F, 0xE1], 'modrm':None}))
arch_ext = 2 # and 0 and 1
class psrld(DispatchInstruction):
dispatch = (
(xmm_imm8, {'opcode':[0x0F, 0x72], 'modrm':0x10, 'prefix':[0x66]}),
(xmm_xmm, {'opcode':[0x0F, 0xD2], 'modrm':None, 'prefix':[0x66]}),
(xmm_mem128, {'opcode':[0x0F, 0xD2], 'modrm':None, 'prefix':[0x66]}),
(mmx_imm8, {'opcode':[0x0F, 0x72], 'modrm':0x10}),
(mmx_mmx, {'opcode':[0x0F, 0xD2], 'modrm':None}),
(mmx_mem128, {'opcode':[0x0F, 0xD2], 'modrm':None}))
arch_ext = 2 # and 0 and 1
class psrldq(Instruction):
machine_inst = xmm_imm8
params = {'opcode':[0x0F, 0x73], 'modrm':0x18, 'prefix':[0x66]}
arch_ext = 1
class psrlq(DispatchInstruction):
dispatch = (
(xmm_imm8, {'opcode':[0x0F, 0x73], 'modrm':0x10, 'prefix':[0x66]}),
(xmm_xmm, {'opcode':[0x0F, 0xD3], 'modrm':None, 'prefix':[0x66]}),
(xmm_mem128, {'opcode':[0x0F, 0xD3], 'modrm':None, 'prefix':[0x66]}),
(mmx_imm8, {'opcode':[0x0F, 0x73], 'modrm':0x10}),
(mmx_mmx, {'opcode':[0x0F, 0xD3], 'modrm':None}),
(mmx_mem128, {'opcode':[0x0F, 0xD3], 'modrm':None}))
arch_ext = 2 # and 0 and 1
class psrlw(DispatchInstruction):
dispatch = (
(xmm_imm8, {'opcode':[0x0F, 0x71], 'modrm':0x10, 'prefix':[0x66]}),
(xmm_xmm, {'opcode':[0x0F, 0xD1], 'modrm':None, 'prefix':[0x66]}),
(xmm_mem128, {'opcode':[0x0F, 0xD1], 'modrm':None, 'prefix':[0x66]}),
(mmx_imm8, {'opcode':[0x0F, 0x71], 'modrm':0x10}),
(mmx_mmx, {'opcode':[0x0F, 0xD1], 'modrm':None}),
(mmx_mem128, {'opcode':[0x0F, 0xD1], 'modrm':None}))
arch_ext = 2 # and 0 and 1
class psubb(DispatchInstruction):
dispatch = (
(xmm_xmm, {'opcode':[0x0F, 0xF8], 'modrm':None, 'prefix':[0x66]}),
(xmm_mem128, {'opcode':[0x0F, 0xF8], 'modrm':None, 'prefix':[0x66]}),
(mmx_mmx, {'opcode':[0x0F, 0xF8], 'modrm':None}),
(mmx_mem64, {'opcode':[0x0F, 0xF8], 'modrm':None}))
arch_ext = 2 # TODO - err, some are 2, some are 0
class psubd(DispatchInstruction):
dispatch = (
(xmm_xmm, {'opcode':[0x0F, 0xFA], 'modrm':None, 'prefix':[0x66]}),
(xmm_mem128, {'opcode':[0x0F, 0xFA], 'modrm':None, 'prefix':[0x66]}),
(mmx_mmx, {'opcode':[0x0F, 0xFA], 'modrm':None}),
(mmx_mem64, {'opcode':[0x0F, 0xFA], 'modrm':None}))
arch_ext = 2 # TODO - err, some are 2, some are 0
class psubq(DispatchInstruction):
dispatch = (
(xmm_xmm, {'opcode':[0x0F, 0xFB], 'modrm':None, 'prefix':[0x66]}),
(xmm_mem128, {'opcode':[0x0F, 0xFB], 'modrm':None, 'prefix':[0x66]}),
(mmx_mmx, {'opcode':[0x0F, 0xFB], 'modrm':None}),
(mmx_mem64, {'opcode':[0x0F, 0xFB], 'modrm':None}))
arch_ext = 2 # TODO - err, some are 2, some are 0
class psubsb(DispatchInstruction):
dispatch = (
(xmm_xmm, {'opcode':[0x0F, 0xE8], 'modrm':None, 'prefix':[0x66]}),
(xmm_mem128, {'opcode':[0x0F, 0xE8], 'modrm':None, 'prefix':[0x66]}),
(mmx_mmx, {'opcode':[0x0F, 0xE8], 'modrm':None}),
(mmx_mem64, {'opcode':[0x0F, 0xE8], 'modrm':None}))
arch_ext = 2 # TODO - err, some are 2, some are 0
class psubsw(DispatchInstruction):
dispatch = (
(xmm_xmm, {'opcode':[0x0F, 0xE9], 'modrm':None, 'prefix':[0x66]}),
(xmm_mem128, {'opcode':[0x0F, 0xE9], 'modrm':None, 'prefix':[0x66]}),
(mmx_mmx, {'opcode':[0x0F, 0xE9], 'modrm':None}),
(mmx_mem64, {'opcode':[0x0F, 0xE9], 'modrm':None}))
arch_ext = 2 # TODO - err, some are 2, some are 0
class psubusb(DispatchInstruction):
dispatch = (
(xmm_xmm, {'opcode':[0x0F, 0xD8], 'modrm':None, 'prefix':[0x66]}),
(xmm_mem128, {'opcode':[0x0F, 0xD8], 'modrm':None, 'prefix':[0x66]}),
(mmx_mmx, {'opcode':[0x0F, 0xD8], 'modrm':None}),
(mmx_mem64, {'opcode':[0x0F, 0xD8], 'modrm':None}))
arch_ext = 0
class psubusw(DispatchInstruction):
dispatch = (
(xmm_xmm, {'opcode':[0x0F, 0xD9], 'modrm':None, 'prefix':[0x66]}),
(xmm_mem128, {'opcode':[0x0F, 0xD9], 'modrm':None, 'prefix':[0x66]}),
(mmx_mmx, {'opcode':[0x0F, 0xD9], 'modrm':None}),
(mmx_mem64, {'opcode':[0x0F, 0xD9], 'modrm':None}))
arch_ext = 0
class psubw(DispatchInstruction):
dispatch = (
(xmm_xmm, {'opcode':[0x0F, 0xF9], 'modrm':None, 'prefix':[0x66]}),
(xmm_mem128, {'opcode':[0x0F, 0xF9], 'modrm':None, 'prefix':[0x66]}),
(mmx_mmx, {'opcode':[0x0F, 0xF9], 'modrm':None}),
(mmx_mem64, {'opcode':[0x0F, 0xF9], 'modrm':None}))
arch_ext = 2 # TODO - err, some are 2, some are 0
class ptest(DispatchInstruction):
dispatch = (
(xmm_xmm, {'opcode':[0x0F, 0x38, 0x17], 'modrm':None, 'prefix':[0x66]}),
(xmm_mem128, {'opcode':[0x0F, 0x38, 0x17], 'modrm':None, 'prefix':[0x66]}))
arch_ext = 4
class punpckhbw(DispatchInstruction):
dispatch = (
(xmm_xmm, {'opcode':[0x0F, 0x68], 'modrm':None, 'prefix':[0x66]}),
(xmm_mem128, {'opcode':[0x0F, 0x68], 'modrm':None, 'prefix':[0x66]}),
(mmx_mmx, {'opcode':[0x0F, 0x68], 'modrm':None}),
(mmx_mem64, {'opcode':[0x0F, 0x68], 'modrm':None}))
arch_ext = 2 # TODO - err, some are 2, some are 0
class punpckhdq(DispatchInstruction):
dispatch = (
(xmm_xmm, {'opcode':[0x0F, 0x6A], 'modrm':None, 'prefix':[0x66]}),
(xmm_mem128, {'opcode':[0x0F, 0x6A], 'modrm':None, 'prefix':[0x66]}),
(mmx_mmx, {'opcode':[0x0F, 0x6A], 'modrm':None}),
(mmx_mem64, {'opcode':[0x0F, 0x6A], 'modrm':None}))
arch_ext = 2 # TODO - err, some are 2, some are 0
class punpckhqdq(DispatchInstruction):
dispatch = (
(xmm_xmm, {'opcode':[0x0F, 0x6D], 'modrm':None, 'prefix':[0x66]}),
(xmm_mem128, {'opcode':[0x0F, 0x6D], 'modrm':None, 'prefix':[0x66]}))
arch_ext = 2
class punpckhwd(DispatchInstruction):
dispatch = (
(xmm_xmm, {'opcode':[0x0F, 0x69], 'modrm':None, 'prefix':[0x66]}),
(xmm_mem128, {'opcode':[0x0F, 0x69], 'modrm':None, 'prefix':[0x66]}),
(mmx_mmx, {'opcode':[0x0F, 0x69], 'modrm':None}),
(mmx_mem64, {'opcode':[0x0F, 0x69], 'modrm':None}))
arch_ext = 2 # TODO - err, some are 2, some are 0
class punpcklbw(DispatchInstruction):
dispatch = (
(xmm_xmm, {'opcode':[0x0F, 0x60], 'modrm':None, 'prefix':[0x66]}),
(xmm_mem128, {'opcode':[0x0F, 0x60], 'modrm':None, 'prefix':[0x66]}),
(mmx_mmx, {'opcode':[0x0F, 0x60], 'modrm':None}),
(mmx_mem64, {'opcode':[0x0F, 0x60], 'modrm':None}))
arch_ext = 2 # TODO - err, some are 2, some are 0
class punpckldq(DispatchInstruction):
dispatch = (
(xmm_xmm, {'opcode':[0x0F, 0x62], 'modrm':None, 'prefix':[0x66]}),
(xmm_mem128, {'opcode':[0x0F, 0x62], 'modrm':None, 'prefix':[0x66]}),
(mmx_mmx, {'opcode':[0x0F, 0x62], 'modrm':None}),
(mmx_mem64, {'opcode':[0x0F, 0x62], 'modrm':None}))
arch_ext = 2 # TODO - err, some are 2, some are 0
class punpcklqdq(DispatchInstruction):
dispatch = (
(xmm_xmm, {'opcode':[0x0F, 0x6C], 'modrm':None, 'prefix':[0x66]}),
(xmm_mem128, {'opcode':[0x0F, 0x6C], 'modrm':None, 'prefix':[0x66]}))
arch_ext = 2
class punpcklwd(DispatchInstruction):
dispatch = (
(xmm_xmm, {'opcode':[0x0F, 0x61], 'modrm':None, 'prefix':[0x66]}),
(xmm_mem128, {'opcode':[0x0F, 0x61], 'modrm':None, 'prefix':[0x66]}),
(mmx_mmx, {'opcode':[0x0F, 0x61], 'modrm':None}),
(mmx_mem64, {'opcode':[0x0F, 0x61], 'modrm':None}))
arch_ext = 2 # TODO - err, some are 2, some are 0
class pxor(DispatchInstruction):
dispatch = (
(xmm_xmm, {'opcode':[0x0F, 0xEF], 'modrm':None, 'prefix':[0x66]}),
(xmm_mem128, {'opcode':[0x0F, 0xEF], 'modrm':None, 'prefix':[0x66]}),
(mmx_mmx, {'opcode':[0x0F, 0xEF], 'modrm':None}),
(mmx_mem64, {'opcode':[0x0F, 0xEF], 'modrm':None}))
arch_ext = 2 # TODO - err, some are 2, some are 0
class rcpps(DispatchInstruction):
dispatch = (
(xmm_xmm, {'opcode':[0x0F, 0x53], 'modrm':None, 'prefix':[]}),
(xmm_mem128, {'opcode':[0x0F, 0x53], 'modrm':None, 'prefix':[]}))
arch_ext = 1
class rcpss(DispatchInstruction):
dispatch = (
(xmm_xmm, {'opcode':[0x0F, 0x53], 'modrm':None, 'prefix':[0xF3]}),
(xmm_mem32, {'opcode':[0x0F, 0x53], 'modrm':None, 'prefix':[0xF3]}))
arch_ext = 2
class roundpd(DispatchInstruction):
dispatch = (
(xmm_xmm_imm8, {'opcode':[0x0F, 0x3A, 0x09], 'modrm':None, 'prefix':[0x66]}),
(xmm_mem128_imm8, {'opcode':[0x0F, 0x3A, 0x09], 'modrm':None, 'prefix':[0x66]}))
arch_ext = 4
class roundps(DispatchInstruction):
dispatch = (
(xmm_xmm_imm8, {'opcode':[0x0F, 0x3A, 0x08], 'modrm':None, 'prefix':[0x66]}),
(xmm_mem128_imm8, {'opcode':[0x0F, 0x3A, 0x08], 'modrm':None, 'prefix':[0x66]}))
arch_ext = 4
class roundsd(DispatchInstruction):
dispatch = (
(xmm_xmm_imm8, {'opcode':[0x0F, 0x3A, 0x0B], 'modrm':None, 'prefix':[0x66]}),
(xmm_mem64_imm8, {'opcode':[0x0F, 0x3A, 0x0B], 'modrm':None, 'prefix':[0x66]}))
arch_ext = 4
class roundss(DispatchInstruction):
dispatch = (
(xmm_xmm_imm8, {'opcode':[0x0F, 0x3A, 0x0A], 'modrm':None, 'prefix':[0x66]}),
(xmm_mem128_imm8, {'opcode':[0x0F, 0x3A, 0x0A], 'modrm':None, 'prefix':[0x66]}))
arch_ext = 4
class rsqrtps(DispatchInstruction):
dispatch = (
(xmm_xmm, {'opcode':[0x0F, 0x52], 'modrm':None, 'prefix':[]}),
(xmm_mem128, {'opcode':[0x0F, 0x52], 'modrm':None, 'prefix':[]}))
arch_ext = 1
class rsqrtss(DispatchInstruction):
dispatch = (
(xmm_xmm, {'opcode':[0x0F, 0x52], 'modrm':None, 'prefix':[0xF3]}),
(xmm_mem32, {'opcode':[0x0F, 0x52], 'modrm':None, 'prefix':[0xF3]}))
arch_ext = 2
class shufpd(DispatchInstruction):
dispatch = (
(xmm_xmm_imm8, {'opcode':[0x0F, 0xC6], 'modrm':None, 'prefix':[0x66]}),
(xmm_mem128_imm8, {'opcode':[0x0F, 0xC6], 'modrm':None, 'prefix':[0x66]}))
arch_ext = 2
class shufps(DispatchInstruction):
dispatch = (
(xmm_xmm_imm8, {'opcode':[0x0F, 0xC6], 'modrm':None, 'prefix':[]}),
(xmm_mem128_imm8, {'opcode':[0x0F, 0xC6], 'modrm':None, 'prefix':[]}))
arch_ext = 1
class sqrtpd(DispatchInstruction):
dispatch = (
(xmm_xmm, {'opcode':[0x0F, 0x51], 'modrm':None, 'prefix':[0x66]}),
(xmm_mem128, {'opcode':[0x0F, 0x51], 'modrm':None, 'prefix':[0x66]}))
arch_ext = 2
class sqrtps(DispatchInstruction):
dispatch = (
(xmm_xmm, {'opcode':[0x0F, 0x51], 'modrm':None, 'prefix':[]}),
(xmm_mem128, {'opcode':[0x0F, 0x51], 'modrm':None, 'prefix':[]}))
arch_ext = 1
class sqrtsd(DispatchInstruction):
dispatch = (
(xmm_xmm, {'opcode':[0x0F, 0x51], 'modrm':None, 'prefix':[0xF2]}),
(xmm_mem64, {'opcode':[0x0F, 0x51], 'modrm':None, 'prefix':[0xF2]}))
arch_ext = 2
class sqrtss(DispatchInstruction):
dispatch = (
(xmm_xmm, {'opcode':[0x0F, 0x51], 'modrm':None, 'prefix':[0xF3]}),
(xmm_mem32 , {'opcode':[0x0F, 0x51], 'modrm':None, 'prefix':[0xF3]}))
arch_ext = 1
class stmxcsr(Instruction):
machine_inst = mem32
params = {'opcode':[0x0F, 0xAE], 'modrm':0x18}
arch_ext = 1
class subpd(DispatchInstruction):
dispatch = (
(xmm_xmm, {'opcode':[0x0F, 0x5C], 'modrm':None, 'prefix':[0x66]}),
(xmm_mem128, {'opcode':[0x0F, 0x5C], 'modrm':None, 'prefix':[0x66]}))
arch_ext = 2
class subps(DispatchInstruction):
dispatch = (
(xmm_xmm, {'opcode':[0x0F, 0x5C], 'modrm':None, 'prefix':[]}),
(xmm_mem128, {'opcode':[0x0F, 0x5C], 'modrm':None, 'prefix':[]}))
arch_ext = 1
class subsd(DispatchInstruction):
dispatch = (
(xmm_xmm, {'opcode':[0x0F, 0x5C], 'modrm':None, 'prefix':[0xF2]}),
(xmm_mem64, {'opcode':[0x0F, 0x5C], 'modrm':None, 'prefix':[0xF2]}))
arch_ext = 2
class subss(DispatchInstruction):
dispatch = (
(xmm_xmm, {'opcode':[0x0F, 0x5C], 'modrm':None, 'prefix':[0xF3]}),
(xmm_mem32, {'opcode':[0x0F, 0x5C], 'modrm':None, 'prefix':[0xF3]}))
arch_ext = 1
class ucomisd(DispatchInstruction):
dispatch = (
(xmm_xmm, {'opcode':[0x0F, 0x2E], 'modrm':None, 'prefix':[0x66]}),
(xmm_mem64, {'opcode':[0x0F, 0x2E], 'modrm':None, 'prefix':[0x66]}))
arch_ext = 2
class ucomiss(DispatchInstruction):
dispatch = (
(xmm_xmm, {'opcode':[0x0F, 0x2E], 'modrm':None, 'prefix':[]}),
(xmm_mem32, {'opcode':[0x0F, 0x2E], 'modrm':None, 'prefix':[]}))
arch_ext = 1
class unpckhpd(DispatchInstruction):
dispatch = (
(xmm_xmm, {'opcode':[0x0F, 0x15], 'modrm':None, 'prefix':[0x66]}),
(xmm_mem128, {'opcode':[0x0F, 0x15], 'modrm':None, 'prefix':[0x66]}))
arch_ext = 1
class unpckhps(DispatchInstruction):
dispatch = (
(xmm_xmm, {'opcode':[0x0F, 0x15], 'modrm':None, 'prefix':[]}),
(xmm_mem128, {'opcode':[0x0F, 0x15], 'modrm':None, 'prefix':[]}))
arch_ext = 1
class unpcklpd(DispatchInstruction):
dispatch = (
(xmm_xmm, {'opcode':[0x0F, 0x14], 'modrm':None, 'prefix':[0x66]}),
(xmm_mem128, {'opcode':[0x0F, 0x14], 'modrm':None, 'prefix':[0x66]}))
arch_ext = 1
class unpcklps(DispatchInstruction):
dispatch = (
(xmm_xmm, {'opcode':[0x0F, 0x14], 'modrm':None, 'prefix':[]}),
(xmm_mem128, {'opcode':[0x0F, 0x14], 'modrm':None, 'prefix':[]}))
arch_ext = 1
class xorpd(DispatchInstruction):
dispatch = (
(xmm_xmm, {'opcode':[0x0F, 0x57], 'modrm':None, 'prefix':[0x66]}),
(xmm_mem128, {'opcode':[0x0F, 0x57], 'modrm':None, 'prefix':[0x66]}))
arch_ext = 2
class xorps(DispatchInstruction):
dispatch = (
(xmm_xmm, {'opcode':[0x0F, 0x57], 'modrm':None, 'prefix':[]}),
(xmm_mem128, {'opcode':[0x0F, 0x57], 'modrm':None, 'prefix':[]}))
arch_ext = 1
|
py
|
1a5687d9a2cfe90be5fa7b715461d44c3faccf0d
|
# coding: utf-8
from __future__ import (
absolute_import,
print_function,
unicode_literals,
)
from pydocx.models import XmlModel, XmlCollection
from pydocx.openxml.wordprocessing.endnote import Endnote
class Endnotes(XmlModel):
XML_TAG = 'endnotes'
children = XmlCollection(Endnote)
def __init__(self, *args, **kwargs):
super(Endnotes, self).__init__(*args, **kwargs)
endnote_by_id = {}
for endnote in self.children:
if endnote.endnote_id:
endnote_by_id[endnote.endnote_id] = endnote
self._endnote_by_id = endnote_by_id
def get_endnote_by_id(self, endnote_id):
return self._endnote_by_id.get(endnote_id)
|
py
|
1a5687fbdd7435bd1fcc36a0e256796ecfc5fee9
|
from rest_framework import serializers
from rest_waspmote.models import WaspData
__author__ = 'julian'
class WaspDataSerializer(serializers.HyperlinkedModelSerializer):
class Meta:
model = WaspData
fields = ('timestamp_waspmote','status','alt','lat','long','speed','voltage','notes','objects','valorsensor','timestamp_server')
|
py
|
1a56883a471661a907c55586485b9fb391666fda
|
from collections import Counter
def cut_the_ropes(arr):
ropes = Counter(arr)
numbers = [len(arr)]
for rope_len, ropes_count in sorted(ropes.items()):
numbers.append( numbers[-1] - ropes_count )
numbers.pop()
return numbers
|
py
|
1a56887314f63c97d78bcee9c75941bb6c2ffc86
|
from pathlib import Path
import requests
from lxml import etree
headers = {
'user-agent': "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.132 Safari/537.36"
}
meta_url = "https://pypi.org/"
search_url = "https://pypi.org/search/?q"
def init_save_path(save_path="."):
""" 初始化下载环境 """
source_path = Path(save_path)
save_path = source_path / 'whls'
if not save_path.exists():
save_path.mkdir()
return save_path
def load_whl_info(packages_path: str):
with open(packages_path, "r") as fr:
whl_info = fr.read()
return whl_info
def init_download_packages(whl_info: str):
""" 处理输入 """
need_packages = []
package_info = [i.strip() for i in whl_info.split("\n") if i.strip()]
whl_name = ""
version = ""
for i in package_info:
whl_name = i
if "==" in i:
whl_name, version = i.split("==")
need_packages.append((whl_name, version))
return need_packages
def pypi_spider(save_path, need_packages: list, error_package: list = []):
""" pypi镜像包爬虫
need_packages: 需要下载的包
error_package: 下载中出错的包
"""
for idx, package_info in enumerate(need_packages, 1):
search_content = package_info[0]
version = package_info[1]
print('需要下载的包信息', package_info)
response = requests.get(
f'{search_url}={search_content}', headers=headers)
html_str = response.content.decode('utf-8')
html = etree.HTML(html_str)
search_results = html.xpath(
'//*[@id="content"]/div/div/div[2]/form/div[3]/ul/*')
result_url = ''
for result in search_results:
result_href = result.xpath('./a/@href')[0]
result_name = result.xpath('./a/h3/span[1]')[0].text
result_version = result.xpath('./a/h3/span[2]')[0].text
if result_name == search_content:
result_url = f'{meta_url}{result_href}#files'
break
elif result_name == search_content.capitalize() and len(result_name) == len(search_content):
result_url = f'{meta_url}{result_href}#files'
break
elif '-' in search_content and search_content.replace('-', '_') == result_name and len(result_name) == len(search_content):
result_url = f'{meta_url}{result_href}#files'
break
if version:
result_url = f'{meta_url}{result_href}{version}/#files'
print(f'开始准备下载 {result_name} {version}')
else:
print(f'开始准备下载 {result_name} {result_version}')
if not result_url:
error_package.append(search_content)
continue
# get download url
response = requests.get(result_url, headers=headers)
result_html_str = response.content.decode('utf-8')
result_html = etree.HTML(result_html_str)
result_download_nodes = result_html.xpath(
'//*[@id="files"]/table/tbody/tr')
win32_info = None # 相同版本的win32包
for result_download in result_download_nodes:
file_type = result_download.xpath(
'./td[1]/text()')[1].replace(" ", '').replace('\n', '')
download_version = result_download.xpath(
'./td[2]/text()')[1].replace(" ", '').replace('\n', '')
download_href = result_download.xpath('./th/a/@href')[0]
whl_name = result_download.xpath(
'./th/a/text()')[0].replace(" ", '').replace('\n', '')
whl_size = result_download.xpath(
'./th/text()')[2].replace(" ", '').replace('\n', '')
# 下载版本判断
if download_version == 'cp37' and 'win32' in whl_name:
win32_info = (whl_name, download_href)
if download_version == 'py2.py3' and 'py2.py3-none-any' in whl_name: # 准确下载python3的版本
break
elif download_version == 'cp37' and 'win_amd64' in whl_name: # 准确下载python3.7 win64的版本
# 查看是否有win32的包,并且下载
if win32_info:
print(f'{search_content}的win32版本下载链接', win32_info)
file_name = save_path / win32_info[0]
file_content = requests.get(win32_info[1], headers=headers)
with open(file_name.absolute(), 'wb') as f:
f.write(file_content.content)
break
elif 'py3' in download_version or download_version == 'None': # 下载通用版本
break
# 下载
file_name = save_path / whl_name
file_content = requests.get(download_href, headers=headers)
with open(file_name.absolute(), 'wb') as f:
f.write(file_content.content)
print(f'{search_content}{whl_size} 版本{download_version} 类型{file_type} -- 下载成功')
if len(need_packages) == idx:
print('此次任务结束')
if error_package:
print('此次任务下载失败的包如下:')
for idx, error_ in enumerate(error_package, 1):
print(f'{idx}: {error_}')
return error_package
def show_help():
print("choose which source you need to download")
url_info = """
+++++++++COMMANDS++++++++++
1:\t\tpypi.org
2:\t\tdouban
3:\t\taliyun
+++++++++++++++++++++++++++
"""
print(url_info)
def main_loop():
packages_path = input(">>> input packages path: ").strip()
if not packages_path:
print("not found")
return
whl_info = load_whl_info(packages_path)
need_packages = init_download_packages(whl_info)
input_path = input(">>> input save path: ").strip()
input_path = "." if not input_path else input_path
save_path = init_save_path(input_path)
show_help()
choose = input(">>> ")
if choose == "1":
pypi_spider(save_path, need_packages)
if __name__ == "__main__":
main_loop()
|
py
|
1a568a062f7ec05c6da10ce78360f6f81740d272
|
from seleniumbase import BaseCase
from ..page_objects.main_page import MainPage as PageObjects
class HappyPathTest(BaseCase):
def common_actions(self):
# Preenche valor para aplicar com 20,00
self.update_text(PageObjects.input_valor_aplicar, '20,00')
# Preenche valor que você quer poupar com 20,00
self.update_text(PageObjects.input_valor_investir, '20,00')
# Por quanto tempo você quer poupar com 20
self.update_text(PageObjects.input_quanto_tempo, '20')
self.click(PageObjects.btn_simular)
self.assert_element(PageObjects.table)
self.click(PageObjects.btn_repeat_simulation)
def test_happy_path_para_voce(self):
self.open(PageObjects.url)
self.common_actions()
# Testar para empresa
self.click(PageObjects.radio_btn_empresa)
self.common_actions()
|
py
|
1a568a90e696265f52c983fb8db6261ece3666db
|
import sys
import csv_data
import my_perceptron
# TWEAKING VARIABLES
max_perceptron_iterations = 100
def printResults( data_name, result_unrounded ):
print( "RESULTS FOR", data_name.upper() )
print( "{:.2f}% correct prediction on {}\n".format( round( result_unrounded, 2 ), data_name.lower() ) )
def main( argv ):
if len( argv ) != 3:
print( "Usage: \"python3 perceptron.py <train> <test> <model>\"" ); exit()
# Read Data
Training_Data = csv_data.Data( argv[ 0 ] )
Testing_Data = csv_data.Data( argv[ 1 ] )
# Create Perceptron
perceptron = my_perceptron.Perceptron()
print( "\n\nPredictions results with", max_perceptron_iterations, "iterations of learning:\n" )
perceptron.perceptronTrain( Training_Data, max_perceptron_iterations )
resultsPercentage = perceptron.perceptronPredictionResults( Training_Data )
printResults( "Training Data", resultsPercentage )
resultsPercentage = perceptron.perceptronPredictionResultsAndPrintActivations( Testing_Data )
printResults( "Testing Data", resultsPercentage )
perceptron.outputModelToFile( argv[ 2 ] )
print( "Weights and bias recorded for", max_perceptron_iterations, "iterations in", argv[ 2 ], "\n" );
print( "\n\nAlso predictiong results with 50 iterations of learning because I get better percentages:\n" )
perceptron.perceptronTrain( Training_Data, 50 )
resultsPercentage = perceptron.perceptronPredictionResults( Training_Data )
printResults( "Training Data", resultsPercentage )
resultsPercentage = perceptron.perceptronPredictionResults( Testing_Data )
printResults( "Testing Data", resultsPercentage )
print( "Weights and bias of output file will still reflect the", max_perceptron_iterations, "iteration test above.\n\n" )
if __name__=='__main__':
main( sys.argv[1:] )
|
py
|
1a568af6a39122535ab3df2bebc2e8ae412213bf
|
from django.db import models
from django.conf import settings
from ..departamentos.models import Departamento
from ..empresas.models import Empresa
class Funcionario(models.Model):
"""
Funcionário
"""
nome = models.CharField(max_length=150, help_text='Nome do funcionário')
user = models.OneToOneField(to=settings.AUTH_USER_MODEL, on_delete=models.PROTECT)
departamentos = models.ManyToManyField(Departamento)
empresa = models.ForeignKey(Empresa, on_delete=models.PROTECT)
class Meta:
verbose_name = 'Funcionário'
verbose_name_plural = "Funcionários"
def __str__(self):
return self.nome
|
py
|
1a568b4592c6466ca835888023b8967ab1a93c72
|
from setuptools import setup, find_packages
setup(
name='fetchai-netutils',
version='0.0.6a2',
description='Tools and utilities for interacting with the fetch network',
url='https://github.com/fetchai/',
author='Ed FitzGerald',
author_email='[email protected]',
packages=find_packages(exclude=['contrib', 'docs', 'tests']), # Required
install_requires=[],
extras_require={
'dev': ['check-manifest'],
'test': ['coverage'],
},
entry_points={
'console_scripts': [
'swarm=fetch.cluster.apps.swarm:main',
],
},
)
|
py
|
1a568bab37209ab4cc52c15cb4b653e07c6c69d5
|
# Copyright The PyTorch Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import functools
from abc import ABC, abstractmethod
from typing import Any, Callable, Optional, Union
from collections.abc import Mapping, Sequence
from collections import namedtuple
from copy import deepcopy
from distutils.version import LooseVersion
import os
import torch
from torch import nn
from pytorch_lightning.utilities.apply_func import apply_to_collection
from pytorch_lightning.utilities.distributed import gather_all_tensors
from pytorch_lightning.metrics.utils import _flatten, dim_zero_cat, dim_zero_mean, dim_zero_sum
class Metric(nn.Module, ABC):
"""
Base class for all metrics present in the Metrics API.
Implements ``add_state()``, ``forward()``, ``reset()`` and a few other things to
handle distributed synchronization and per-step metric computation.
Override ``update()`` and ``compute()`` functions to implement your own metric. Use
``add_state()`` to register metric state variables which keep track of state on each
call of ``update()`` and are synchronized across processes when ``compute()`` is called.
Note:
Metric state variables can either be ``torch.Tensors`` or an empty list which can we used
to store `torch.Tensors``.
Note:
Different metrics only override ``update()`` and not ``forward()``. A call to ``update()``
is valid, but it won't return the metric value at the current step. A call to ``forward()``
automatically calls ``update()`` and also returns the metric value at the current step.
Args:
compute_on_step:
Forward only calls ``update()`` and returns None if this is set to False. default: True
dist_sync_on_step:
Synchronize metric state across processes at each ``forward()``
before returning the value at the step.
process_group:
Specify the process group on which synchronization is called. default: None (which selects the entire world)
dist_sync_fn:
Callback that performs the allgather operation on the metric state. When `None`, DDP
will be used to perform the allgather. default: None
"""
def __init__(
self,
compute_on_step: bool = True,
dist_sync_on_step: bool = False,
process_group: Optional[Any] = None,
dist_sync_fn: Callable = None,
):
super().__init__()
self.dist_sync_on_step = dist_sync_on_step
self.compute_on_step = compute_on_step
self.process_group = process_group
self.dist_sync_fn = dist_sync_fn
self._to_sync = True
self.update = self._wrap_update(self.update)
self.compute = self._wrap_compute(self.compute)
self._computed = None
self._forward_cache = None
# initialize state
self._reductions = {}
self._defaults = {}
def add_state(
self, name: str, default, dist_reduce_fx: Optional[Union[str, Callable]] = None, persistent: bool = True
):
"""
Adds metric state variable. Only used by subclasses.
Args:
name: The name of the state variable. The variable will then be accessible at ``self.name``.
default: Default value of the state; can either be a ``torch.Tensor`` or an empty list. The state will be
reset to this value when ``self.reset()`` is called.
dist_reduce_fx (Optional): Function to reduce state accross mutliple processes in distributed mode.
If value is ``"sum"``, ``"mean"``, or ``"cat"``, we will use ``torch.sum``, ``torch.mean``,
and ``torch.cat`` respectively, each with argument ``dim=0``. The user can also pass a custom
function in this parameter.
persistent (Optional): whether the state will be saved as part of the modules ``state_dict``.
Note:
Setting ``dist_reduce_fx`` to None will return the metric state synchronized across different processes.
However, there won't be any reduction function applied to the synchronized metric state.
The metric states would be synced as follows
- If the metric state is ``torch.Tensor``, the synced value will be a stacked ``torch.Tensor`` across
the process dimension if the metric state was a ``torch.Tensor``. The original ``torch.Tensor`` metric
state retains dimension and hence the synchronized output will be of shape ``(num_process, ...)``.
- If the metric state is a ``list``, the synced value will be a ``list`` containing the
combined elements from all processes.
Note:
When passing a custom function to ``dist_reduce_fx``, expect the synchronized metric state to follow
the format discussed in the above note.
"""
if (
not isinstance(default, torch.Tensor)
and not isinstance(default, list) # noqa: W503
or (isinstance(default, list) and len(default) != 0) # noqa: W503
):
raise ValueError(
"state variable must be a tensor or any empty list (where you can append tensors)"
)
if dist_reduce_fx == "sum":
dist_reduce_fx = dim_zero_sum
elif dist_reduce_fx == "mean":
dist_reduce_fx = dim_zero_mean
elif dist_reduce_fx == "cat":
dist_reduce_fx = dim_zero_cat
elif dist_reduce_fx is not None and not isinstance(dist_reduce_fx, Callable):
raise ValueError(
"`dist_reduce_fx` must be callable or one of ['mean', 'sum', 'cat', None]"
)
if isinstance(default, torch.Tensor):
if LooseVersion(torch.__version__) >= LooseVersion("1.6.0"):
# persistent keyword is only supported in torch >= 1.6.0
self.register_buffer(name, default, persistent=persistent)
else:
self.register_buffer(name, default)
else:
setattr(self, name, default)
self._defaults[name] = deepcopy(default)
self._reductions[name] = dist_reduce_fx
@torch.jit.unused
def forward(self, *args, **kwargs):
"""
Automatically calls ``update()``. Returns the metric value over inputs if ``compute_on_step`` is True.
"""
# add current step
with torch.no_grad():
self.update(*args, **kwargs)
self._forward_cache = None
if self.compute_on_step:
self._to_sync = self.dist_sync_on_step
# save context before switch
self._cache = {attr: getattr(self, attr) for attr in self._defaults.keys()}
# call reset, update, compute, on single batch
self.reset()
self.update(*args, **kwargs)
self._forward_cache = self.compute()
# restore context
for attr, val in self._cache.items():
setattr(self, attr, val)
self._to_sync = True
self._computed = None
return self._forward_cache
def _sync_dist(self, dist_sync_fn=gather_all_tensors):
input_dict = {attr: getattr(self, attr) for attr in self._reductions.keys()}
output_dict = apply_to_collection(
input_dict,
torch.Tensor,
dist_sync_fn,
group=self.process_group,
)
for attr, reduction_fn in self._reductions.items():
# pre-processing ops (stack or flatten for inputs)
if isinstance(output_dict[attr][0], torch.Tensor):
output_dict[attr] = torch.stack(output_dict[attr])
elif isinstance(output_dict[attr][0], list):
output_dict[attr] = _flatten(output_dict[attr])
assert isinstance(reduction_fn, (Callable)) or reduction_fn is None
reduced = reduction_fn(output_dict[attr]) if reduction_fn is not None else output_dict[attr]
setattr(self, attr, reduced)
def _wrap_update(self, update):
@functools.wraps(update)
def wrapped_func(*args, **kwargs):
self._computed = None
return update(*args, **kwargs)
return wrapped_func
def _wrap_compute(self, compute):
@functools.wraps(compute)
def wrapped_func(*args, **kwargs):
# return cached value
if self._computed is not None:
return self._computed
dist_sync_fn = self.dist_sync_fn
if (dist_sync_fn is None
and torch.distributed.is_available()
and torch.distributed.is_initialized()):
# User provided a bool, so we assume DDP if available
dist_sync_fn = gather_all_tensors
if self._to_sync and dist_sync_fn is not None:
self._sync_dist(dist_sync_fn)
self._computed = compute(*args, **kwargs)
self.reset()
return self._computed
return wrapped_func
@abstractmethod
def update(self) -> None: # pylint: disable=E0202
"""
Override this method to update the state variables of your metric class.
"""
pass
@abstractmethod
def compute(self): # pylint: disable=E0202
"""
Override this method to compute the final metric value from state variables
synchronized across the distributed backend.
"""
pass
def reset(self):
"""
This method automatically resets the metric state variables to their default value.
"""
for attr, default in self._defaults.items():
current_val = getattr(self, attr)
if isinstance(current_val, torch.Tensor):
setattr(self, attr, deepcopy(default).to(current_val.device))
else:
setattr(self, attr, deepcopy(default))
def __getstate__(self):
# ignore update and compute functions for pickling
return {k: v for k, v in self.__dict__.items() if k not in ["update", "compute"]}
def __setstate__(self, state):
# manually restore update and compute functions for pickling
self.__dict__.update(state)
self.update = self._wrap_update(self.update)
self.compute = self._wrap_compute(self.compute)
|
py
|
1a568e3deb2a9f909312dced5ff68c2c3cd2e7cb
|
#!/usr/bin/python3
#import time
import random
import imp
modl = imp.load_source('ppFunctions', '../00/ppFunctions.py')
import os
from ppFunctions import *
from termcolor import colored, cprint
#sleep becouse of loading midi modules
print("Are you ready?")
time.sleep(1)
print_status = lambda x: cprint(x, 'white', 'on_blue')
print_help = lambda x: cprint(x, 'red')
hit = 0
rounde = 1
done = False
generatedList = []
for i in range(stringToMidiNum("c"), stringToMidiNum("c'")+1):
if i%12 in blackTonesBase:
generatedList.append(i)
while True:
try:
os.system('clear')
print_status("Status: round=" + str(rounde) + ", hit=" + str(hit))
print_help("Help: rEPEAT sKIP")
playHarmonicNotes(stringToMidiNum("f a c'"))
randomNote = random.choice(generatedList)
playNote(randomNote)
while not done:
guessedNote = input("Your input:")
if guessedNote == "r":
print("Repeating...")
playHarmonicNotes(stringToMidiNum("f a c'"))
playNote(randomNote)
elif guessedNote == "s":
print("Skiping...")
done = True
elif guessedNote not in lilypondTones:
print("What? Syntax error!")
else:
if (lilypondTones[guessedNote] == randomNote%12):
print("Yea!")
hit += 1
rounde += 1
done = True
else:
print("Almost!")
hit = 0
done = False
except (KeyboardInterrupt):
print('...Program Stopped Manually!')
raise
|
py
|
1a568e7300f9bd7bb649c163c0af2b3867b7eb46
|
#
# This source file is part of the EdgeDB open source project.
#
# Copyright 2008-present MagicStack Inc. and the EdgeDB authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Mypy plugin to provide support for schema objects."""
from __future__ import annotations
from typing import *
from mypy import exprtotype
import mypy.plugin as mypy_plugin
from mypy import nodes
from mypy import types
from mypy import semanal
from mypy.plugins import common as mypy_helpers
from mypy.server import trigger as mypy_trigger
METADATA_KEY = 'edbplugin'
AST_BASE_CLASSES = {
'edb.common.ast.base.AST',
}
STRUCT_BASE_METACLASSES = {
'edb.common.struct.StructMeta',
}
SCHEMA_BASE_METACLASSES = {
'edb.schema.objects.ObjectMeta',
'edb.schema.types.SchemaCollectionMeta',
}
def plugin(version: str):
return EDBPlugin
class EDBPlugin(mypy_plugin.Plugin):
def get_base_class_hook(self, fullname: str):
if fullname.startswith('edb.'):
return self.handle_schema_class
def handle_schema_class(self, ctx: mypy_plugin.ClassDefContext):
mro = ctx.cls.info.mro
mcls = ctx.cls.info.metaclass_type
mcls_mro = mcls.type.mro if mcls else []
transformers: List[BaseTransformer] = []
if any(c.fullname in SCHEMA_BASE_METACLASSES for c in mcls_mro):
transformers.append(
SchemaClassTransformer(
ctx,
field_makers={'edb.schema.objects.SchemaField'},
)
)
transformers.append(
StructTransformer(
ctx,
field_makers={'edb.schema.objects.Field'},
)
)
elif any(c.fullname in STRUCT_BASE_METACLASSES for c in mcls_mro):
transformers.append(
StructTransformer(
ctx,
field_makers={'edb.common.struct.Field'},
)
)
elif any(c.fullname in AST_BASE_CLASSES for c in mro):
transformers.append(
ASTClassTransformer(
ctx,
)
)
for transformer in transformers:
transformer.transform()
class DeferException(Exception):
pass
class Field(NamedTuple):
name: str
has_explicit_accessor: bool
has_default: bool
line: int
column: int
type: types.Type
def to_argument(self) -> nodes.Argument:
result = nodes.Argument(
variable=self.to_var(),
type_annotation=self.type,
initializer=None,
kind=nodes.ARG_NAMED_OPT if self.has_default else nodes.ARG_NAMED,
)
return result
def to_var(self) -> nodes.Var:
return nodes.Var(self.name, self.type)
def serialize(self) -> nodes.JsonDict:
return {
'name': self.name,
'has_explicit_accessor': self.has_explicit_accessor,
'has_default': self.has_default,
'line': self.line,
'column': self.column,
'type': self.type.serialize(),
}
@classmethod
def deserialize(
cls,
api,
data: nodes.JsonDict,
) -> Field:
return cls(
name=data['name'],
has_explicit_accessor=data['has_explicit_accessor'],
has_default=data['has_default'],
line=data['line'],
column=data['column'],
type=mypy_helpers.deserialize_and_fixup_type(data['type'], api),
)
class BaseTransformer:
def __init__(
self,
ctx: mypy_plugin.ClassDefContext,
) -> None:
self._ctx = ctx
def transform(self):
ctx = self._ctx
metadata_key = self._get_metadata_key()
metadata = ctx.cls.info.metadata.get(metadata_key)
if not metadata:
ctx.cls.info.metadata[metadata_key] = metadata = {}
metadata['processing'] = True
if metadata.get('processed'):
return
try:
fields = self._transform()
except DeferException:
ctx.api.defer()
return None
metadata['fields'] = {f.name: f.serialize() for f in fields}
metadata['processed'] = True
def _transform(self) -> List[Field]:
raise NotImplementedError
def _field_from_field_def(
self,
stmt: nodes.AssignmentStmt,
name: nodes.NameExpr,
sym: nodes.SymbolTableNode,
) -> Optional[Field]:
raise NotImplementedError
def _collect_fields(self) -> List[Field]:
"""Collect all fields declared in a class and its ancestors."""
cls = self._ctx.cls
fields: List[Field] = []
known_fields: Set[str] = set()
for stmt in cls.defs.body:
if not isinstance(stmt, nodes.AssignmentStmt):
continue
lhs = stmt.lvalues[0]
if not isinstance(lhs, nodes.NameExpr):
continue
sym = cls.info.names.get(lhs.name)
if sym is None or isinstance(sym.node, nodes.PlaceholderNode):
# Not resolved yet?
continue
node = sym.node
assert isinstance(node, nodes.Var)
if node.is_classvar:
# Disregard ClassVar stuff
continue
field = self._field_from_field_def(stmt, lhs, sym)
if field is not None:
fields.append(field)
known_fields.add(field.name)
return self._get_inherited_fields(known_fields) + fields
def _lookup_type(self, fullname: str) -> types.Type:
ctx = self._ctx
type_sym = ctx.api.lookup_fully_qualified_or_none(fullname)
if type_sym is None:
raise DeferException
t: types.Type
if isinstance(type_sym.node, nodes.TypeInfo):
from mypy.typevars import fill_typevars
t = fill_typevars(type_sym.node)
elif type_sym.type:
t = type_sym.type
else:
ctx.api.fail(f'cannot find {fullname}', ctx.cls)
return t
def _get_metadata_key(self) -> str:
return f'{METADATA_KEY}%%{type(self).__name__}'
def _has_explicit_field_accessor(self, fieldname: str) -> bool:
cls = self._ctx.cls
accessor = cls.info.names.get(f'get_{fieldname}')
return accessor is not None and not accessor.plugin_generated
def _get_inherited_fields(self, self_fields: Set[str]) -> List[Field]:
ctx = self._ctx
cls = ctx.cls
all_fields: List[Field] = []
known_fields = set(self_fields)
for ancestor_info in cls.info.mro[1:-1]:
metadata = ancestor_info.metadata.get(self._get_metadata_key())
if metadata is None:
continue
elif not metadata.get('processed'):
raise DeferException
ancestor_fields = []
ctx.api.add_plugin_dependency(
mypy_trigger.make_wildcard_trigger(ancestor_info.fullname))
for name, data in metadata['fields'].items():
if name not in known_fields:
if self._has_explicit_field_accessor(name):
data = dict(data)
data['has_explicit_accessor'] = True
field = Field.deserialize(ctx.api, data)
known_fields.add(name)
ancestor_fields.append(field)
all_fields = ancestor_fields + all_fields
return all_fields
def _synthesize_init(self, fields: List[Field]) -> None:
ctx = self._ctx
cls_info = ctx.cls.info
# If our self type has placeholders (probably because of type
# var bounds), defer. If we skip deferring and stick something
# in our symbol table anyway, we'll get in trouble. (Arguably
# plugins.common ought to help us with this, but oh well.)
self_type = mypy_helpers.fill_typevars(cls_info)
if semanal.has_placeholder(self_type):
raise DeferException
if (
(
'__init__' not in cls_info.names
or cls_info.names['__init__'].plugin_generated
) and fields
):
mypy_helpers.add_method(
ctx,
'__init__',
self_type=self_type,
args=[field.to_argument() for field in fields],
return_type=types.NoneType(),
)
class BaseStructTransformer(BaseTransformer):
def __init__(
self,
ctx: mypy_plugin.ClassDefContext,
field_makers: AbstractSet[str],
) -> None:
super().__init__(ctx)
self._field_makers = field_makers
def _field_from_field_def(
self,
stmt: nodes.AssignmentStmt,
name: nodes.NameExpr,
sym: nodes.SymbolTableNode,
) -> Optional[Field]:
ctx = self._ctx
rhs = stmt.rvalue
if isinstance(rhs, nodes.CastExpr):
rhs = rhs.expr
if not isinstance(rhs, nodes.CallExpr):
return None
fdef = rhs.callee
ftype = None
if (
isinstance(fdef, nodes.IndexExpr)
and isinstance(fdef.analyzed, nodes.TypeApplication)
):
# Explicitly typed Field declaration
ctor = fdef.analyzed.expr
if len(fdef.analyzed.types) > 1:
ctx.api.fail('too many type arguments to Field', fdef)
ftype = fdef.analyzed.types[0]
else:
ctor = fdef
ftype = None
if (
not isinstance(ctor, nodes.RefExpr)
or ctor.fullname not in self._field_makers
):
return None
type_arg = rhs.args[0]
deflt = self._get_default(rhs)
if ftype is None:
try:
un_type = exprtotype.expr_to_unanalyzed_type(type_arg)
except exprtotype.TypeTranslationError:
ctx.api.fail('Cannot resolve schema field type', type_arg)
else:
ftype = ctx.api.anal_type(un_type)
if ftype is None:
raise DeferException
is_optional = (
isinstance(deflt, nodes.NameExpr)
and deflt.fullname == 'builtins.None'
)
if is_optional:
ftype = types.UnionType.make_union(
[ftype, types.NoneType()],
line=ftype.line,
column=ftype.column,
)
assert isinstance(name.node, nodes.Var)
name.node.type = ftype
return Field(
name=name.name,
has_explicit_accessor=self._has_explicit_field_accessor(name.name),
has_default=deflt is not None,
line=stmt.line,
column=stmt.column,
type=ftype,
)
def _get_default(self, call) -> Optional[nodes.Expression]:
for (n, v) in zip(call.arg_names, call.args):
if n == 'default':
return v
else:
return None
class StructTransformer(BaseStructTransformer):
def _transform(self) -> List[Field]:
fields = self._collect_fields()
self._synthesize_init(fields)
return fields
def _field_from_field_def(
self,
stmt: nodes.AssignmentStmt,
name: nodes.NameExpr,
sym: nodes.SymbolTableNode,
):
field = super()._field_from_field_def(stmt, name, sym)
if field is None:
return None
else:
assert isinstance(sym.node, nodes.Var)
sym.node.is_initialized_in_class = False
name.is_inferred_def = False
rhs = stmt.rvalue
if not isinstance(rhs, nodes.CastExpr):
stmt.rvalue = nodes.CastExpr(
typ=field.type,
expr=rhs,
)
stmt.rvalue.line = rhs.line
stmt.rvalue.column = rhs.column
return field
class SchemaClassTransformer(BaseStructTransformer):
def _transform(self) -> List[Field]:
ctx = self._ctx
fields = self._collect_fields()
schema_t = self._lookup_type('edb.schema.schema.Schema')
for f in fields:
if f.has_explicit_accessor:
continue
mypy_helpers.add_method(
ctx,
name=f'get_{f.name}',
args=[
nodes.Argument(
variable=nodes.Var(
name='schema',
type=schema_t,
),
type_annotation=schema_t,
initializer=None,
kind=nodes.ARG_POS,
),
],
return_type=f.type,
)
return fields
class ASTClassTransformer(BaseTransformer):
def _transform(self) -> List[Field]:
fields = self._collect_fields()
# NB: __init__ synthesis below brings up a vast number of
# typing errors which require AST definitions to be
# annotated with defaults properly and the code adjusted
# to handle Optional fields (historically we've been
# initializing container fields with empty lists/tuples).
# self._synthesize_init(fields)
return fields
def _field_from_field_def(
self,
stmt: nodes.AssignmentStmt,
name: nodes.NameExpr,
sym: nodes.SymbolTableNode,
) -> Optional[Field]:
if sym.type is None:
# No type annotation?
return None
else:
has_default = not isinstance(stmt.rvalue, nodes.TempNode)
if not has_default:
sym.implicit = True
return Field(
name=name.name,
has_default=has_default,
line=stmt.line,
column=stmt.column,
type=sym.type,
has_explicit_accessor=False,
)
|
gyp
|
1a5690786ca65133dc94f3c216ca2f4e24788128
|
# Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
{
'variables': {
'chromium_code': 1,
'linux_link_kerberos%': 0,
# Enables BidirectionalStream; Used in cronet, disabled by default.
'enable_bidirectional_stream%': 0,
'conditions': [
['chromeos==1 or embedded==1 or OS=="ios"', {
# Disable Kerberos on ChromeOS and iOS, at least for now.
# It needs configuration (krb5.conf and so on).
'use_kerberos%': 0,
}, { # chromeos == 0 and embedded==0 and OS!="ios"
'use_kerberos%': 1,
}],
['OS=="android" and target_arch != "ia32"', {
# The way the cache uses mmap() is inefficient on some Android devices.
# If this flag is set, we hackily avoid using mmap() in the disk cache.
# We are pretty confident that mmap-ing the index would not hurt any
# existing x86 android devices, but we cannot be so sure about the
# variety of ARM devices. So enable it for x86 only for now.
'posix_avoid_mmap%': 1,
}, {
'posix_avoid_mmap%': 0,
}],
['OS=="ios"', {
# Websockets and socket stream are not used on iOS.
'enable_websockets%': 0,
# iOS does not use V8.
'use_v8_in_net%': 0,
'enable_built_in_dns%': 0,
}, {
'enable_websockets%': 1,
'use_v8_in_net%': 1,
'enable_built_in_dns%': 1,
}],
],
},
'includes': [
'../build/win_precompile.gypi',
'net.gypi',
],
'targets': [
{
'target_name': 'net_derived_sources',
'type': 'none',
'sources': [
'base/registry_controlled_domains/effective_tld_names.gperf',
'base/registry_controlled_domains/effective_tld_names_unittest1.gperf',
'base/registry_controlled_domains/effective_tld_names_unittest2.gperf',
'base/registry_controlled_domains/effective_tld_names_unittest3.gperf',
'base/registry_controlled_domains/effective_tld_names_unittest4.gperf',
'base/registry_controlled_domains/effective_tld_names_unittest5.gperf',
'base/registry_controlled_domains/effective_tld_names_unittest6.gperf',
'base/stale_while_revalidate_experiment_domains.gperf',
],
'rules': [
{
'rule_name': 'dafsa',
'extension': 'gperf',
'outputs': [
'<(SHARED_INTERMEDIATE_DIR)/net/<(RULE_INPUT_DIRNAME)/<(RULE_INPUT_ROOT)-inc.cc',
],
'inputs': [
'tools/dafsa/make_dafsa.py',
],
'action': [
'python',
'tools/dafsa/make_dafsa.py',
'<(RULE_INPUT_PATH)',
'<(SHARED_INTERMEDIATE_DIR)/net/<(RULE_INPUT_DIRNAME)/<(RULE_INPUT_ROOT)-inc.cc',
],
},
],
'direct_dependent_settings': {
'include_dirs': [
'<(SHARED_INTERMEDIATE_DIR)'
],
},
},
{
# Protobuf compiler / generator for QUIC crypto protocol buffer.
# GN version: //net:net_quic_proto
'target_name': 'net_quic_proto',
'type': 'static_library',
'sources': [
'quic/proto/cached_network_parameters.proto',
'quic/proto/source_address_token.proto',
],
'variables': {
'enable_wexit_time_destructors': 1,
'proto_in_dir': 'quic/proto',
'proto_out_dir': 'net/quic/proto',
'cc_generator_options': 'dllexport_decl=NET_EXPORT_PRIVATE:',
'cc_include': 'net/base/net_export.h',
},
'includes': [
'../build/protoc.gypi',
],
'defines': [
'NET_IMPLEMENTATION',
],
},
{
# GN version: //net
'target_name': 'net',
'dependencies': [
'../base/base.gyp:base_i18n',
'../third_party/brotli/brotli.gyp:brotli',
'../third_party/icu/icu.gyp:icui18n',
'../third_party/icu/icu.gyp:icuuc',
'../third_party/protobuf/protobuf.gyp:protobuf_lite',
'../url/url.gyp:url_lib',
'net_features',
'net_quic_proto',
],
'sources': [
'base/filename_util_icu.cc',
'base/net_string_util_icu.cc',
'filter/brotli_filter.cc',
],
'includes': [ 'net_common.gypi' ],
},
{
# GN version: //net:features
'target_name': 'net_features',
'includes': [ '../build/buildflag_header.gypi' ],
'variables': {
'buildflag_header_path': 'net/net_features.h',
'buildflag_flags': [
'ENABLE_BIDIRECTIONAL_STREAM=<(enable_bidirectional_stream)',
],
},
},
{
# GN version: //net:net_unittests
'target_name': 'net_unittests',
'type': '<(gtest_target_type)',
'dependencies': [
'../base/base.gyp:base',
'../base/base.gyp:base_i18n',
'../base/base.gyp:base_prefs_test_support',
'../base/third_party/dynamic_annotations/dynamic_annotations.gyp:dynamic_annotations',
'../crypto/crypto.gyp:crypto',
'../crypto/crypto.gyp:crypto_test_support',
'../testing/gmock.gyp:gmock',
'../testing/gtest.gyp:gtest',
'../third_party/zlib/zlib.gyp:zlib',
'../url/url.gyp:url_lib',
'balsa',
'http_server',
'net',
'net_quic_proto',
'net_derived_sources',
'net_extras',
'net_test_support',
'simple_quic_tools',
'stale_while_revalidate_experiment_domains',
],
'sources': [
'<@(net_test_sources)',
],
'conditions': [
['os_posix == 1 and OS != "mac" and OS != "ios" and OS != "android"', {
'dependencies': [
'epoll_quic_tools',
'epoll_server',
'flip_in_mem_edsm_server_base',
],
'sources': [
'<@(net_linux_test_sources)',
],
}],
['OS == "mac" or OS == "ios"', {
'sources': [
'<@(net_base_test_mac_ios_sources)',
],
}],
['chromeos==1', {
'sources!': [
'proxy/proxy_config_service_linux_unittest.cc',
],
}],
[ 'OS == "android"', {
'sources!': [
# See bug http://crbug.com/344533.
'disk_cache/blockfile/index_table_v3_unittest.cc',
],
'dependencies': [
'net_javatests',
],
}],
[ 'use_nss_certs != 1', {
'sources!': [
'cert/nss_cert_database_unittest.cc',
'cert/nss_cert_database_chromeos_unittest.cc',
'cert/nss_profile_filter_chromeos_unittest.cc',
'ssl/client_cert_store_nss_unittest.cc',
],
}],
[ 'use_openssl == 1', {
# Avoid compiling/linking with the system library.
'dependencies': [
'../third_party/boringssl/boringssl.gyp:boringssl',
],
}],
[ 'use_nss_certs == 1 or OS == "ios" or use_openssl == 0', {
'conditions': [
[ 'desktop_linux == 1 or chromeos == 1', {
'dependencies': [
'../build/linux/system.gyp:ssl',
],
}, { # desktop_linux == 0 and chromeos == 0
'dependencies': [
'../third_party/nss/nss.gyp:nspr',
'../third_party/nss/nss.gyp:nss',
'third_party/nss/ssl.gyp:libssl',
],
}],
],
}],
[ 'os_posix == 1 and OS != "mac" and OS != "android" and OS != "ios"', {
'conditions': [
['use_allocator!="none"', {
'dependencies': [
'../base/allocator/allocator.gyp:allocator',
],
}],
],
}],
[ 'use_kerberos==1', {
'defines': [
'USE_KERBEROS',
],
}],
[ 'use_kerberos==0 or OS == "android"', {
# These are excluded on Android, because the actual Kerberos support,
# which these test, is in a separate app on Android.
'sources!': [
'http/http_auth_gssapi_posix_unittest.cc',
'http/mock_gssapi_library_posix.cc',
'http/mock_gssapi_library_posix.h',
],
}],
[ 'use_kerberos==0', {
'sources!': [
'http/http_auth_handler_negotiate_unittest.cc',
],
}],
[ 'use_nss_certs == 0 and OS != "ios"', {
# Only include this test when using system NSS for cert verification
# or on iOS (which also uses NSS for certs).
'sources!': [
'cert_net/nss_ocsp_unittest.cc',
],
}],
[ 'use_openssl==1', {
'sources!': [
'quic/test_tools/crypto_test_utils_nss.cc',
],
}, { # else !use_openssl: remove the unneeded files and pull in NSS.
'sources!': [
'quic/test_tools/crypto_test_utils_openssl.cc',
'socket/ssl_client_socket_openssl_unittest.cc',
'ssl/ssl_client_session_cache_openssl_unittest.cc',
],
},
],
[ 'use_openssl_certs == 0', {
'sources!': [
'ssl/openssl_client_key_store_unittest.cc',
],
}],
[ 'enable_websockets != 1', {
'sources/': [
['exclude', '^websockets/'],
['exclude', '^server/'],
],
'dependencies!': [
'http_server',
],
}],
['disable_file_support==1', {
'sources!': [
'base/directory_lister_unittest.cc',
'base/directory_listing_unittest.cc',
'url_request/url_request_file_job_unittest.cc',
],
}],
[ 'disable_ftp_support==1', {
'sources/': [
['exclude', '^ftp/'],
],
'sources!': [
'url_request/url_request_ftp_job_unittest.cc',
],
},
],
[ 'enable_bidirectional_stream!=1', {
'sources!': [
'http/bidirectional_stream_unittest.cc',
],
},
],
[ 'enable_built_in_dns!=1', {
'sources!': [
'dns/address_sorter_posix_unittest.cc',
'dns/address_sorter_unittest.cc',
],
},
],
[ 'use_v8_in_net==1', {
'dependencies': [
'net_with_v8',
],
}, { # else: !use_v8_in_net
'sources!': [
'proxy/proxy_resolver_v8_tracing_unittest.cc',
'proxy/proxy_resolver_v8_tracing_wrapper_unittest.cc',
'proxy/proxy_resolver_v8_unittest.cc',
],
},
],
[ 'use_v8_in_net==1 and OS != "android"', {
'dependencies': [
'net_with_v8',
'net_browser_services',
'net_utility_services',
'../third_party/mojo/mojo_edk.gyp:mojo_system_impl',
],
}, { # else
'sources!': [
'dns/host_resolver_mojo_unittest.cc',
'dns/mojo_host_resolver_impl_unittest.cc',
'proxy/mojo_proxy_resolver_factory_impl_unittest.cc',
'proxy/mojo_proxy_resolver_impl_unittest.cc',
'proxy/mojo_proxy_resolver_v8_tracing_bindings_unittest.cc',
'proxy/proxy_resolver_factory_mojo_unittest.cc',
'proxy/proxy_service_mojo_unittest.cc',
],
},
],
[ 'enable_mdns != 1', {
'sources!' : [
'dns/mdns_cache_unittest.cc',
'dns/mdns_client_unittest.cc',
'dns/mdns_query_unittest.cc',
'dns/record_parsed_unittest.cc',
'dns/record_rdata_unittest.cc',
],
}],
[ 'OS == "win"', {
'sources!': [
'dns/dns_config_service_posix_unittest.cc',
'http/http_auth_gssapi_posix_unittest.cc',
],
# TODO(jschuh): crbug.com/167187 fix size_t to int truncations.
'msvs_disabled_warnings': [4267, ],
'conditions': [
[ 'icu_use_data_file_flag == 0', {
# This is needed to trigger the dll copy step on windows.
# TODO(mark): Specifying this here shouldn't be necessary.
'dependencies': [
'../third_party/icu/icu.gyp:icudata',
],
}],
],
},
],
[ 'OS == "ios"', {
'actions': [
{
'action_name': 'copy_test_data',
'variables': {
'test_data_files': [
'data/certificate_policies_unittest/',
'data/name_constraints_unittest/',
'data/parse_certificate_unittest/',
'data/ssl/certificates/',
'data/test.html',
'data/url_request_unittest/',
'data/verify_certificate_chain_unittest/',
'data/verify_name_match_unittest/names/',
],
'test_data_prefix': 'net',
},
'includes': [ '../build/copy_test_data_ios.gypi' ],
},
],
'sources!': [
# TODO(droger): The following tests are disabled because the
# implementation is missing or incomplete.
# KeygenHandler::GenKeyAndSignChallenge() is not ported to iOS.
'base/keygen_handler_unittest.cc',
'disk_cache/backend_unittest.cc',
'disk_cache/blockfile/block_files_unittest.cc',
# Need to read input data files.
'filter/brotli_filter_unittest.cc',
'filter/gzip_filter_unittest.cc',
# Need TestServer.
"cert_net/cert_net_fetcher_impl_unittest.cc",
'proxy/proxy_script_fetcher_impl_unittest.cc',
'socket/ssl_client_socket_unittest.cc',
'socket/ssl_server_socket_unittest.cc',
'spdy/fuzzing/hpack_fuzz_util_test.cc',
# Needs GetAppOutput().
'test/python_utils_unittest.cc',
'url_request/url_fetcher_impl_unittest.cc',
'url_request/url_request_context_builder_unittest.cc',
# The following tests are disabled because they don't apply to
# iOS.
# OS is not "linux" or "freebsd" or "openbsd".
'socket/unix_domain_client_socket_posix_unittest.cc',
'socket/unix_domain_server_socket_posix_unittest.cc',
# See bug http://crbug.com/344533.
'disk_cache/blockfile/index_table_v3_unittest.cc',
],
}],
['OS == "android"', {
# TODO(mmenke): This depends on test_support_base, which depends on
# icu. Figure out a way to remove that dependency.
'dependencies': [
'../testing/android/native_test.gyp:native_test_native_code',
]
}],
['use_v8_in_net==1 and v8_use_external_startup_data==1', {
'dependencies': [
'../gin/gin.gyp:gin',
]
}],
],
'target_conditions': [
# These source files are excluded by default platform rules, but they
# are needed in specific cases on other platforms. Re-including them can
# only be done in target_conditions as it is evaluated after the
# platform rules.
['OS == "android"', {
'sources/': [
['include', '^base/address_tracker_linux_unittest\\.cc$'],
],
}],
['OS == "ios"', {
'sources/': [
['include', '^base/mac/url_conversions_unittest\\.mm$'],
],
}],
],
},
{
'target_name': 'net_perftests',
'type': 'executable',
'dependencies': [
'../base/base.gyp:base',
'../base/base.gyp:base_i18n',
'../base/base.gyp:test_support_perf',
'../testing/gtest.gyp:gtest',
'../url/url.gyp:url_lib',
'net',
'net_extras',
'net_test_support',
],
'sources': [
'base/mime_sniffer_perftest.cc',
'cookies/cookie_monster_perftest.cc',
'disk_cache/blockfile/disk_cache_perftest.cc',
'extras/sqlite/sqlite_persistent_cookie_store_perftest.cc',
'proxy/proxy_resolver_perftest.cc',
'udp/udp_socket_perftest.cc',
'websockets/websocket_frame_perftest.cc',
],
'conditions': [
[ 'use_v8_in_net==1', {
'dependencies': [
'net_with_v8',
],
}, { # else: !use_v8_in_net
'sources!': [
'proxy/proxy_resolver_perftest.cc',
],
},
],
[ 'OS == "win"', {
'conditions': [
[ 'icu_use_data_file_flag == 0', {
# This is needed to trigger the dll copy step on windows.
# TODO(mark): Specifying this here shouldn't be necessary.
'dependencies': [
'../third_party/icu/icu.gyp:icudata',
],
}],
],
# TODO(jschuh): crbug.com/167187 fix size_t to int truncations.
'msvs_disabled_warnings': [4267, ],
}],
[ 'enable_websockets != 1', {
'sources!': [
'websockets/websocket_frame_perftest.cc',
],
}],
],
},
{
'target_name': 'net_test_support',
'type': 'static_library',
'dependencies': [
'../base/base.gyp:base',
'../base/base.gyp:test_support_base',
'../crypto/crypto.gyp:crypto',
'../net/tools/tld_cleanup/tld_cleanup.gyp:tld_cleanup_util',
'../testing/gtest.gyp:gtest',
'../testing/gmock.gyp:gmock',
'../url/url.gyp:url_lib',
'net',
],
'export_dependent_settings': [
'../base/base.gyp:base',
# TODO(mmenke): This depends on icu, figure out a way to build tests
# without icu.
'../base/base.gyp:test_support_base',
'../crypto/crypto.gyp:crypto',
'../testing/gtest.gyp:gtest',
'../testing/gmock.gyp:gmock',
],
'sources': [
'base/load_timing_info_test_util.cc',
'base/load_timing_info_test_util.h',
'base/mock_file_stream.cc',
'base/mock_file_stream.h',
'base/test_completion_callback.cc',
'base/test_completion_callback.h',
'base/test_data_directory.cc',
'base/test_data_directory.h',
'cert/mock_cert_verifier.cc',
'cert/mock_cert_verifier.h',
'cookies/cookie_monster_store_test.cc',
'cookies/cookie_monster_store_test.h',
'cookies/cookie_store_test_callbacks.cc',
'cookies/cookie_store_test_callbacks.h',
'cookies/cookie_store_test_helpers.cc',
'cookies/cookie_store_test_helpers.h',
'cookies/cookie_store_unittest.h',
'disk_cache/disk_cache_test_base.cc',
'disk_cache/disk_cache_test_base.h',
'disk_cache/disk_cache_test_util.cc',
'disk_cache/disk_cache_test_util.h',
'dns/dns_test_util.cc',
'dns/dns_test_util.h',
'dns/mock_host_resolver.cc',
'dns/mock_host_resolver.h',
'dns/mock_mdns_socket_factory.cc',
'dns/mock_mdns_socket_factory.h',
'http/http_transaction_test_util.cc',
'http/http_transaction_test_util.h',
'log/test_net_log.cc',
'log/test_net_log.h',
'log/test_net_log_entry.cc',
'log/test_net_log_entry.h',
'log/test_net_log_util.cc',
'log/test_net_log_util.h',
'proxy/mock_proxy_resolver.cc',
'proxy/mock_proxy_resolver.h',
'proxy/mock_proxy_script_fetcher.cc',
'proxy/mock_proxy_script_fetcher.h',
'proxy/proxy_config_service_common_unittest.cc',
'proxy/proxy_config_service_common_unittest.h',
'socket/socket_test_util.cc',
'socket/socket_test_util.h',
'test/cert_test_util.cc',
'test/cert_test_util.h',
'test/cert_test_util_nss.cc',
'test/channel_id_test_util.cc',
'test/channel_id_test_util.h',
'test/ct_test_util.cc',
'test/ct_test_util.h',
'test/embedded_test_server/default_handlers.cc',
'test/embedded_test_server/default_handlers.h',
'test/embedded_test_server/embedded_test_server.cc',
'test/embedded_test_server/embedded_test_server.h',
'test/embedded_test_server/http_connection.cc',
'test/embedded_test_server/http_connection.h',
'test/embedded_test_server/http_request.cc',
'test/embedded_test_server/http_request.h',
'test/embedded_test_server/http_response.cc',
'test/embedded_test_server/http_response.h',
'test/embedded_test_server/request_handler_util.cc',
'test/embedded_test_server/request_handler_util.h',
'test/event_waiter.h',
'test/net_test_suite.cc',
'test/net_test_suite.h',
'test/python_utils.cc',
'test/python_utils.h',
'test/spawned_test_server/base_test_server.cc',
'test/spawned_test_server/base_test_server.h',
'test/spawned_test_server/local_test_server.cc',
'test/spawned_test_server/local_test_server.h',
'test/spawned_test_server/local_test_server_posix.cc',
'test/spawned_test_server/local_test_server_win.cc',
'test/spawned_test_server/spawned_test_server.h',
'test/test_certificate_data.h',
'test/url_request/ssl_certificate_error_job.cc',
'test/url_request/ssl_certificate_error_job.h',
'test/url_request/url_request_failed_job.cc',
'test/url_request/url_request_failed_job.h',
'test/url_request/url_request_mock_data_job.cc',
'test/url_request/url_request_mock_data_job.h',
'test/url_request/url_request_slow_download_job.cc',
'test/url_request/url_request_slow_download_job.h',
'url_request/test_url_fetcher_factory.cc',
'url_request/test_url_fetcher_factory.h',
'url_request/url_request_test_util.cc',
'url_request/url_request_test_util.h',
],
'conditions': [
['OS != "ios"', {
'dependencies': [
'../third_party/protobuf/protobuf.gyp:py_proto',
],
}, {
'sources!': [
'test/spawned_test_server/base_test_server.cc',
'test/spawned_test_server/base_test_server.h',
'test/spawned_test_server/local_test_server.cc',
'test/spawned_test_server/local_test_server.h',
'test/spawned_test_server/local_test_server_posix.cc',
'test/spawned_test_server/local_test_server_win.cc',
'test/spawned_test_server/spawned_test_server.h',
],
}],
['use_nss_certs == 1 or OS == "ios"', {
'conditions': [
[ 'desktop_linux == 1 or chromeos == 1', {
'dependencies': [
'../build/linux/system.gyp:ssl',
],
}, { # desktop_linux == 0 and chromeos == 0
'dependencies': [
'../third_party/nss/nss.gyp:nspr',
'../third_party/nss/nss.gyp:nss',
'third_party/nss/ssl.gyp:libssl',
],
}],
],
}],
['os_posix == 1 and OS != "mac" and OS != "android" and OS != "ios"', {
'conditions': [
['use_allocator!="none"', {
'dependencies': [
'../base/allocator/allocator.gyp:allocator',
],
}],
],
}],
['OS == "android"', {
'dependencies': [
'net_test_jni_headers',
],
'sources': [
'test/embedded_test_server/android/embedded_test_server_android.cc',
'test/embedded_test_server/android/embedded_test_server_android.h',
'test/spawned_test_server/remote_test_server.cc',
'test/spawned_test_server/remote_test_server.h',
'test/spawned_test_server/spawner_communicator.cc',
'test/spawned_test_server/spawner_communicator.h',
],
}],
[ 'use_v8_in_net==1', {
'dependencies': [
'net_with_v8',
],
},
],
[ 'enable_mdns != 1', {
'sources!' : [
'dns/mock_mdns_socket_factory.cc',
'dns/mock_mdns_socket_factory.h'
]
}],
[ 'use_nss_certs != 1', {
'sources!': [
'test/cert_test_util_nss.cc',
],
}],
['disable_file_support != 1', {
'sources': [
'test/url_request/url_request_mock_http_job.cc',
'test/url_request/url_request_mock_http_job.h',
'url_request/test_url_request_interceptor.cc',
'url_request/test_url_request_interceptor.h',
],
}],
],
# TODO(jschuh): crbug.com/167187 fix size_t to int truncations.
'msvs_disabled_warnings': [4267, ],
},
{
'target_name': 'net_resources',
'type': 'none',
'variables': {
'grit_out_dir': '<(SHARED_INTERMEDIATE_DIR)/net',
},
'actions': [
{
'action_name': 'net_resources',
'variables': {
'grit_grd_file': 'base/net_resources.grd',
},
'includes': [ '../build/grit_action.gypi' ],
},
],
},
{
'target_name': 'net_extras',
'type': 'static_library',
'variables': { 'enable_wexit_time_destructors': 1, },
'dependencies': [
'../base/base.gyp:base',
'../sql/sql.gyp:sql',
'net',
],
'sources': [
'<@(net_extras_sources)',
],
},
{
'target_name': 'net_docs',
'type': 'none',
'actions': [
{
'action_name': 'net_docs',
'variables': {
'net_docs_input_dir': '.',
},
'inputs': [
'<@(net_docs_sources)',
],
'outputs': [
'<(net_docs_output_dir)',
],
'action': [
'python',
'<(net_docs_script)',
'--input_path',
'<(net_docs_input_dir)',
'--output_path',
'<(net_docs_output_dir)',
'<@(net_docs_sources)',
],
'message': 'Rendering network stack documentation',
}
],
},
{
'target_name': 'http_server',
'type': 'static_library',
'variables': { 'enable_wexit_time_destructors': 1, },
'dependencies': [
'../base/base.gyp:base',
'net',
],
'sources': [
'server/http_connection.cc',
'server/http_connection.h',
'server/http_server.cc',
'server/http_server.h',
'server/http_server_request_info.cc',
'server/http_server_request_info.h',
'server/http_server_response_info.cc',
'server/http_server_response_info.h',
'server/web_socket.cc',
'server/web_socket.h',
'server/web_socket_encoder.cc',
'server/web_socket_encoder.h',
],
# TODO(jschuh): crbug.com/167187 fix size_t to int truncations.
'msvs_disabled_warnings': [4267, ],
},
{ # GN version: //net:balsa
'target_name': 'balsa',
'type': 'static_library',
'dependencies': [
'../base/base.gyp:base',
'net',
],
'sources': [
'tools/balsa/balsa_enums.h',
'tools/balsa/balsa_frame.cc',
'tools/balsa/balsa_frame.h',
'tools/balsa/balsa_headers.cc',
'tools/balsa/balsa_headers.h',
'tools/balsa/balsa_headers_token_utils.cc',
'tools/balsa/balsa_headers_token_utils.h',
'tools/balsa/balsa_visitor_interface.h',
'tools/balsa/http_message_constants.cc',
'tools/balsa/http_message_constants.h',
'tools/balsa/noop_balsa_visitor.h',
'tools/balsa/simple_buffer.cc',
'tools/balsa/simple_buffer.h',
'tools/balsa/split.cc',
'tools/balsa/split.h',
'tools/balsa/string_piece_utils.h',
'tools/quic/spdy_balsa_utils.cc',
'tools/quic/spdy_balsa_utils.h',
],
},
{
'target_name': 'dump_cache',
'type': 'executable',
'dependencies': [
'../base/base.gyp:base',
'net',
'net_test_support',
],
'sources': [
'tools/dump_cache/dump_cache.cc',
'tools/dump_cache/dump_files.cc',
'tools/dump_cache/dump_files.h',
],
# TODO(jschuh): crbug.com/167187 fix size_t to int truncations.
'msvs_disabled_warnings': [4267, ],
},
{
'target_name': 'simple_quic_tools',
'type': 'static_library',
'dependencies': [
'../base/base.gyp:base',
'../base/third_party/dynamic_annotations/dynamic_annotations.gyp:dynamic_annotations',
'../url/url.gyp:url_lib',
'net',
'net_quic_proto',
],
'sources': [
'tools/quic/quic_client_base.cc',
'tools/quic/quic_client_base.h',
'tools/quic/quic_client_session.cc',
'tools/quic/quic_client_session.h',
'tools/quic/quic_dispatcher.cc',
'tools/quic/quic_dispatcher.h',
'tools/quic/quic_in_memory_cache.cc',
'tools/quic/quic_in_memory_cache.h',
'tools/quic/quic_per_connection_packet_writer.cc',
'tools/quic/quic_per_connection_packet_writer.h',
'tools/quic/quic_server_session_base.cc',
'tools/quic/quic_server_session_base.h',
'tools/quic/quic_simple_client.cc',
'tools/quic/quic_simple_client.h',
'tools/quic/quic_simple_per_connection_packet_writer.cc',
'tools/quic/quic_simple_per_connection_packet_writer.h',
'tools/quic/quic_simple_server.cc',
'tools/quic/quic_simple_server.h',
'tools/quic/quic_simple_server_packet_writer.cc',
'tools/quic/quic_simple_server_packet_writer.h',
'tools/quic/quic_simple_server_session.cc',
'tools/quic/quic_simple_server_session.h',
'tools/quic/quic_spdy_client_stream.cc',
'tools/quic/quic_spdy_client_stream.h',
'tools/quic/quic_simple_server_stream.cc',
'tools/quic/quic_simple_server_stream.h',
'tools/quic/quic_time_wait_list_manager.cc',
'tools/quic/quic_time_wait_list_manager.h',
'tools/quic/synchronous_host_resolver.cc',
'tools/quic/synchronous_host_resolver.h',
],
},
{
# GN version: //net:stale_while_revalidate_experiment_domains
'target_name': 'stale_while_revalidate_experiment_domains',
'type': 'static_library',
'dependencies': [
'../base/base.gyp:base',
'net',
'net_derived_sources',
],
'sources': [
'base/stale_while_revalidate_experiment_domains.cc',
'base/stale_while_revalidate_experiment_domains.h',
],
},
],
'conditions': [
['use_v8_in_net == 1', {
'targets': [
{
'target_name': 'net_with_v8',
'type': '<(component)',
'variables': { 'enable_wexit_time_destructors': 1, },
'dependencies': [
'../base/base.gyp:base',
'../gin/gin.gyp:gin',
'../url/url.gyp:url_lib',
'../v8/tools/gyp/v8.gyp:v8',
'net'
],
'defines': [
'NET_IMPLEMENTATION',
],
'sources': [
'proxy/proxy_resolver_v8.cc',
'proxy/proxy_resolver_v8.h',
'proxy/proxy_resolver_v8_tracing.cc',
'proxy/proxy_resolver_v8_tracing.h',
'proxy/proxy_resolver_v8_tracing_wrapper.cc',
'proxy/proxy_resolver_v8_tracing_wrapper.h',
'proxy/proxy_service_v8.cc',
'proxy/proxy_service_v8.h',
],
# TODO(jschuh): crbug.com/167187 fix size_t to int truncations.
'msvs_disabled_warnings': [4267, ],
},
],
}],
['use_v8_in_net == 1 and OS != "android"', {
'targets': [
{
# GN version: //net/interfaces
'target_name': 'net_interfaces',
'type': 'static_library',
'sources': [
'interfaces/host_resolver_service.mojom',
'interfaces/proxy_resolver_service.mojom',
],
'includes': [
'../third_party/mojo/mojom_bindings_generator.gypi',
],
},
{
# GN version: //net:net_browser_services
'target_name': 'net_browser_services',
'type': 'static_library',
'sources': [
'dns/mojo_host_resolver_impl.cc',
'dns/mojo_host_resolver_impl.h',
'proxy/in_process_mojo_proxy_resolver_factory.cc',
'proxy/in_process_mojo_proxy_resolver_factory.h',
'proxy/mojo_proxy_resolver_factory.h',
'proxy/proxy_resolver_factory_mojo.cc',
'proxy/proxy_resolver_factory_mojo.h',
'proxy/proxy_service_mojo.cc',
'proxy/proxy_service_mojo.h',
],
'dependencies': [
'mojo_type_converters',
'net',
'net_interfaces',
'../mojo/mojo_base.gyp:mojo_common_lib',
'../mojo/mojo_base.gyp:mojo_environment_chromium',
'../mojo/mojo_base.gyp:mojo_url_type_converters',
'../third_party/mojo/mojo_public.gyp:mojo_cpp_bindings',
# NOTE(amistry): As long as we support in-process Mojo v8 PAC, we
# need this dependency since in_process_mojo_proxy_resolver_factory
# creates the utility process side Mojo services in the browser
# process. Ultimately, this will go away when we only support
# out-of-process.
'net_utility_services',
],
},
{
# GN version: //net:net_utility_services
'target_name': 'net_utility_services',
'type': 'static_library',
'sources': [
'dns/host_resolver_mojo.cc',
'dns/host_resolver_mojo.h',
'proxy/mojo_proxy_resolver_factory_impl.cc',
'proxy/mojo_proxy_resolver_factory_impl.h',
'proxy/mojo_proxy_resolver_impl.cc',
'proxy/mojo_proxy_resolver_impl.h',
'proxy/mojo_proxy_resolver_v8_tracing_bindings.h',
],
'dependencies': [
'mojo_type_converters',
'net_interfaces',
'net_with_v8',
'../mojo/mojo_base.gyp:mojo_url_type_converters',
'../third_party/mojo/mojo_public.gyp:mojo_cpp_bindings',
],
},
{
# GN version: //net:mojo_type_converters
'target_name': 'mojo_type_converters',
'type': 'static_library',
'sources': [
'dns/mojo_host_type_converters.cc',
'dns/mojo_host_type_converters.h',
'proxy/mojo_proxy_type_converters.cc',
'proxy/mojo_proxy_type_converters.h',
],
'dependencies': [
'net',
'net_interfaces',
'../third_party/mojo/mojo_public.gyp:mojo_cpp_bindings',
],
},
],
}],
['OS != "ios" and OS != "android"', {
'targets': [
# iOS doesn't have the concept of simple executables, these targets
# can't be compiled on the platform.
{
'target_name': 'crash_cache',
'type': 'executable',
'dependencies': [
'../base/base.gyp:base',
'net',
'net_test_support',
],
'sources': [
'tools/crash_cache/crash_cache.cc',
],
# TODO(jschuh): crbug.com/167187 fix size_t to int truncations.
'msvs_disabled_warnings': [4267, ],
},
{
'target_name': 'crl_set_dump',
'type': 'executable',
'dependencies': [
'../base/base.gyp:base',
'net',
],
'sources': [
'tools/crl_set_dump/crl_set_dump.cc',
],
# TODO(jschuh): crbug.com/167187 fix size_t to int truncations.
'msvs_disabled_warnings': [4267, ],
},
{
'target_name': 'dns_fuzz_stub',
'type': 'executable',
'dependencies': [
'../base/base.gyp:base',
'net',
],
'sources': [
'tools/dns_fuzz_stub/dns_fuzz_stub.cc',
],
# TODO(jschuh): crbug.com/167187 fix size_t to int truncations.
'msvs_disabled_warnings': [4267, ],
},
{
'target_name': 'gdig',
'type': 'executable',
'dependencies': [
'../base/base.gyp:base',
'net',
],
'sources': [
'tools/gdig/file_net_log.cc',
'tools/gdig/gdig.cc',
],
},
{
'target_name': 'get_server_time',
'type': 'executable',
'dependencies': [
'../base/base.gyp:base',
'../base/base.gyp:base_i18n',
'../url/url.gyp:url_lib',
'net',
],
'sources': [
'tools/get_server_time/get_server_time.cc',
],
# TODO(jschuh): crbug.com/167187 fix size_t to int truncations.
'msvs_disabled_warnings': [4267, ],
},
{
'target_name': 'hpack_example_generator',
'type': 'executable',
'dependencies': [
'../base/base.gyp:base',
'net',
],
'sources': [
'spdy/fuzzing/hpack_example_generator.cc',
],
# TODO(jschuh): crbug.com/167187 fix size_t to int truncations.
'msvs_disabled_warnings': [4267, ],
},
{
'target_name': 'hpack_fuzz_mutator',
'type': 'executable',
'dependencies': [
'../base/base.gyp:base',
'net',
],
'sources': [
'spdy/fuzzing/hpack_fuzz_mutator.cc',
],
# TODO(jschuh): crbug.com/167187 fix size_t to int truncations.
'msvs_disabled_warnings': [4267, ],
},
{
'target_name': 'hpack_fuzz_wrapper',
'type': 'executable',
'dependencies': [
'../base/base.gyp:base',
'net',
],
'sources': [
'spdy/fuzzing/hpack_fuzz_wrapper.cc',
],
# TODO(jschuh): crbug.com/167187 fix size_t to int truncations.
'msvs_disabled_warnings': [4267, ],
},
{
'target_name': 'net_watcher',
'type': 'executable',
'dependencies': [
'../base/base.gyp:base',
'net',
'net_with_v8',
],
'conditions': [
[ 'use_glib == 1', {
'dependencies': [
'../build/linux/system.gyp:gconf',
'../build/linux/system.gyp:gio',
],
},
],
],
'sources': [
'tools/net_watcher/net_watcher.cc',
],
},
{
'target_name': 'run_testserver',
'type': 'executable',
'dependencies': [
'../base/base.gyp:base',
'../base/base.gyp:test_support_base',
'../testing/gtest.gyp:gtest',
'net_test_support',
],
'sources': [
'tools/testserver/run_testserver.cc',
],
},
{
'target_name': 'quic_client',
'type': 'executable',
'dependencies': [
'../base/base.gyp:base',
'../url/url.gyp:url_lib',
'net',
'simple_quic_tools',
],
'sources': [
'tools/quic/quic_simple_client_bin.cc',
],
},
{
'target_name': 'quic_server',
'type': 'executable',
'dependencies': [
'../base/base.gyp:base',
'net',
'net_quic_proto',
'simple_quic_tools',
],
'sources': [
'tools/quic/quic_simple_server_bin.cc',
],
},
{
'target_name': 'stress_cache',
'type': 'executable',
'dependencies': [
'../base/base.gyp:base',
'net',
'net_test_support',
],
'sources': [
'tools/stress_cache/stress_cache.cc',
],
# TODO(jschuh): crbug.com/167187 fix size_t to int truncations.
'msvs_disabled_warnings': [4267, ],
},
{
'target_name': 'tld_cleanup',
'type': 'executable',
'dependencies': [
'../base/base.gyp:base',
'../base/base.gyp:base_i18n',
'../net/tools/tld_cleanup/tld_cleanup.gyp:tld_cleanup_util',
],
'sources': [
'tools/tld_cleanup/tld_cleanup.cc',
],
# TODO(jschuh): crbug.com/167187 fix size_t to int truncations.
'msvs_disabled_warnings': [4267, ],
},
],
}],
['os_posix == 1 and OS != "mac" and OS != "ios" and OS != "android"', {
'targets': [
{
'target_name': 'epoll_server',
'type': 'static_library',
'dependencies': [
'../base/base.gyp:base',
'net',
],
'sources': [
'tools/epoll_server/epoll_server.cc',
'tools/epoll_server/epoll_server.h',
],
},
{
'target_name': 'flip_in_mem_edsm_server_base',
'type': 'static_library',
'cflags': [
'-Wno-deprecated',
],
'dependencies': [
'../base/base.gyp:base',
'../third_party/boringssl/boringssl.gyp:boringssl',
'balsa',
'epoll_server',
'net',
],
'sources': [
'tools/flip_server/acceptor_thread.cc',
'tools/flip_server/acceptor_thread.h',
'tools/flip_server/constants.h',
'tools/flip_server/create_listener.cc',
'tools/flip_server/create_listener.h',
'tools/flip_server/flip_config.cc',
'tools/flip_server/flip_config.h',
'tools/flip_server/http_interface.cc',
'tools/flip_server/http_interface.h',
'tools/flip_server/mem_cache.cc',
'tools/flip_server/mem_cache.h',
'tools/flip_server/output_ordering.cc',
'tools/flip_server/output_ordering.h',
'tools/flip_server/ring_buffer.cc',
'tools/flip_server/ring_buffer.h',
'tools/flip_server/sm_connection.cc',
'tools/flip_server/sm_connection.h',
'tools/flip_server/sm_interface.h',
'tools/flip_server/spdy_interface.cc',
'tools/flip_server/spdy_interface.h',
'tools/flip_server/spdy_ssl.cc',
'tools/flip_server/spdy_ssl.h',
'tools/flip_server/spdy_util.cc',
'tools/flip_server/spdy_util.h',
'tools/flip_server/streamer_interface.cc',
'tools/flip_server/streamer_interface.h',
'tools/flip_server/url_to_filename_encoder.cc',
'tools/flip_server/url_to_filename_encoder.h',
'tools/flip_server/url_utilities.cc',
'tools/flip_server/url_utilities.h',
],
},
{
'target_name': 'flip_in_mem_edsm_server_unittests',
'type': 'executable',
'dependencies': [
'../testing/gtest.gyp:gtest',
'../testing/gmock.gyp:gmock',
'../third_party/boringssl/boringssl.gyp:boringssl',
'flip_in_mem_edsm_server_base',
'net',
'net_test_support',
],
'sources': [
'tools/flip_server/flip_test_utils.cc',
'tools/flip_server/flip_test_utils.h',
'tools/flip_server/http_interface_test.cc',
'tools/flip_server/mem_cache_test.cc',
'tools/flip_server/run_all_tests.cc',
'tools/flip_server/spdy_interface_test.cc',
'tools/flip_server/url_to_filename_encoder_unittest.cc',
'tools/flip_server/url_utilities_unittest.cc',
],
},
{
'target_name': 'flip_in_mem_edsm_server',
'type': 'executable',
'cflags': [
'-Wno-deprecated',
],
'dependencies': [
'../base/base.gyp:base',
'flip_in_mem_edsm_server_base',
'net',
],
'sources': [
'tools/flip_server/flip_in_mem_edsm_server.cc',
],
},
{
'target_name': 'epoll_quic_tools',
'type': 'static_library',
'dependencies': [
'../base/base.gyp:base',
'../base/third_party/dynamic_annotations/dynamic_annotations.gyp:dynamic_annotations',
'../url/url.gyp:url_lib',
'balsa',
'epoll_server',
'net',
'net_quic_proto',
],
'sources': [
'tools/quic/quic_client.cc',
'tools/quic/quic_client.h',
'tools/quic/quic_default_packet_writer.cc',
'tools/quic/quic_default_packet_writer.h',
'tools/quic/quic_epoll_clock.cc',
'tools/quic/quic_epoll_clock.h',
'tools/quic/quic_epoll_connection_helper.cc',
'tools/quic/quic_epoll_connection_helper.h',
'tools/quic/quic_packet_reader.cc',
'tools/quic/quic_packet_reader.h',
'tools/quic/quic_packet_writer_wrapper.cc',
'tools/quic/quic_packet_writer_wrapper.h',
'tools/quic/quic_server.cc',
'tools/quic/quic_server.h',
'tools/quic/quic_socket_utils.cc',
'tools/quic/quic_socket_utils.h',
],
},
{
'target_name': 'epoll_quic_client',
'type': 'executable',
'dependencies': [
'../base/base.gyp:base',
'net',
'epoll_quic_tools',
'simple_quic_tools',
],
'sources': [
'tools/quic/quic_client_bin.cc',
],
},
{
'target_name': 'epoll_quic_server',
'type': 'executable',
'dependencies': [
'../base/base.gyp:base',
'net',
'net_quic_proto',
'epoll_quic_tools',
'simple_quic_tools',
],
'sources': [
'tools/quic/quic_server_bin.cc',
],
},
]
}],
['OS=="android"', {
'targets': [
{ # The same target as 'net', but with smaller binary size due to
# exclusion of ICU, FTP, FILE and WebSockets support.
'target_name': 'net_small',
'variables': {
'disable_ftp_support': 1,
'disable_file_support': 1,
'enable_websockets': 0,
},
'dependencies': [
'../url/url.gyp:url_lib_use_icu_alternatives_on_android',
'net_features',
],
'defines': [
'USE_ICU_ALTERNATIVES_ON_ANDROID=1',
'DISABLE_FILE_SUPPORT=1',
'DISABLE_FTP_SUPPORT=1',
],
'sources': [
'filter/brotli_filter_disabled.cc',
'base/net_string_util_icu_alternatives_android.cc',
'base/net_string_util_icu_alternatives_android.h',
],
'includes': [ 'net_common.gypi' ],
},
{
'target_name': 'net_jni_headers',
'type': 'none',
'sources': [
'android/java/src/org/chromium/net/AndroidCertVerifyResult.java',
'android/java/src/org/chromium/net/AndroidKeyStore.java',
'android/java/src/org/chromium/net/AndroidNetworkLibrary.java',
'android/java/src/org/chromium/net/AndroidTrafficStats.java',
'android/java/src/org/chromium/net/GURLUtils.java',
'android/java/src/org/chromium/net/HttpNegotiateAuthenticator.java',
'android/java/src/org/chromium/net/NetStringUtil.java',
'android/java/src/org/chromium/net/NetworkChangeNotifier.java',
'android/java/src/org/chromium/net/ProxyChangeListener.java',
'android/java/src/org/chromium/net/X509Util.java',
],
'variables': {
'jni_gen_package': 'net',
},
'includes': [ '../build/jni_generator.gypi' ],
},
{
'target_name': 'net_test_jni_headers',
'type': 'none',
'sources': [
'android/javatests/src/org/chromium/net/AndroidKeyStoreTestUtil.java',
'test/android/javatests/src/org/chromium/net/test/EmbeddedTestServerImpl.java',
'test/android/javatests/src/org/chromium/net/test/DummySpnegoAuthenticator.java',
],
'variables': {
'jni_gen_package': 'net/test',
},
'includes': [ '../build/jni_generator.gypi' ],
},
{
'target_name': 'net_java',
'type': 'none',
'variables': {
'java_in_dir': '../net/android/java',
},
'dependencies': [
'../base/base.gyp:base',
'cert_verify_status_android_java',
'certificate_mime_types_java',
'network_change_notifier_types_java',
'network_change_notifier_android_types_java',
'net_errors_java',
'private_key_types_java',
'traffic_stats_error_java',
],
'includes': [ '../build/java.gypi' ],
},
{
'target_name': 'embedded_test_server_aidl',
'type': 'none',
'variables': {
'aidl_interface_file': '../net/test/android/javatests/src/org/chromium/net/test/IEmbeddedTestServerInterface.aidl',
},
'sources': [
'../net/test/android/javatests/src/org/chromium/net/test/IEmbeddedTestServerImpl.aidl',
],
'includes': [ '../build/java_aidl.gypi' ],
},
{
'target_name': 'net_java_test_support',
'type': 'none',
'variables': {
'java_in_dir': '../net/test/android/javatests',
# TODO(jbudorick): remove chromium_code: 0 line once crbug.com/488192 is fixed.
'chromium_code': 0,
},
'dependencies': [
'embedded_test_server_aidl',
'net_java',
'url_request_failed_job_java',
'../base/base.gyp:base_java',
'../base/base.gyp:base_java_test_support',
'../third_party/android_tools/android_tools.gyp:legacy_http_javalib',
],
'includes': [ '../build/java.gypi' ],
},
{
'target_name': 'libnet_java_test_support',
'type': 'shared_library',
'dependencies': [
'net_test_support',
'../base/base.gyp:base',
],
'sources': [
'test/android/net_test_entry_point.cc',
'test/android/net_test_jni_onload.cc',
'test/android/net_test_jni_onload.h',
],
},
{
'target_name': 'net_test_support_apk',
'type': 'none',
'dependencies': [
'net_java_test_support',
],
'variables': {
'android_manifest_path': 'test/android/javatests/AndroidManifest.xml',
'apk_name': 'ChromiumNetTestSupport',
'is_test_apk': 1,
'java_in_dir': 'test/android/javatests',
'java_in_dir_suffix': '/src_dummy',
'native_lib_target': 'libnet_java_test_support',
},
'includes': [
'../build/java_apk.gypi',
],
},
{
# Targets that need the net test support APK should depend on this
# target. It ensures that the APK is built without passing the
# classpath on to dependent targets.
'target_name': 'require_net_test_support_apk',
'type': 'none',
'actions': [
{
'action_name': 'require_ChromiumNetTestSupport',
'variables': {
'required_file': '<(PRODUCT_DIR)/net_test_support_apk/ChromiumNetTestSupport.apk.required',
},
'inputs': [
'<(PRODUCT_DIR)/apks/ChromiumNetTestSupport.apk',
],
'outputs': [
'<(required_file)',
],
'action': [
'python', '../build/android/gyp/touch.py', '<(required_file)',
],
},
],
},
{
'target_name': 'url_request_failed_job_java',
'type': 'none',
'variables': {
'source_file': 'test/url_request/url_request_failed_job.h',
},
'includes': [ '../build/android/java_cpp_enum.gypi' ],
},
{
'target_name': 'net_javatests',
'type': 'none',
'variables': {
'java_in_dir': '../net/android/javatests',
},
'dependencies': [
'../base/base.gyp:base',
'../base/base.gyp:base_java_test_support',
'net_java',
'net_java_test_support',
],
'includes': [ '../build/java.gypi' ],
},
{
'target_name': 'net_errors_java',
'type': 'none',
'sources': [
'android/java/NetError.template',
],
'variables': {
'package_name': 'org/chromium/net',
'template_deps': ['base/net_error_list.h'],
},
'includes': [ '../build/android/java_cpp_template.gypi' ],
},
{
'target_name': 'certificate_mime_types_java',
'type': 'none',
'variables': {
'source_file': 'base/mime_util.h',
},
'includes': [ '../build/android/java_cpp_enum.gypi' ],
},
{
'target_name': 'cert_verify_status_android_java',
'type': 'none',
'variables': {
'source_file': 'android/cert_verify_result_android.h',
},
'includes': [ '../build/android/java_cpp_enum.gypi' ],
},
{
'target_name': 'network_change_notifier_types_java',
'type': 'none',
'variables': {
'source_file': 'base/network_change_notifier.h',
},
'includes': [ '../build/android/java_cpp_enum.gypi' ],
},
{
'target_name': 'network_change_notifier_android_types_java',
'type': 'none',
'variables': {
'source_file': 'android/network_change_notifier_android.cc',
},
'includes': [ '../build/android/java_cpp_enum.gypi' ],
},
{
'target_name': 'private_key_types_java',
'type': 'none',
'variables': {
'source_file': 'android/keystore.h',
},
'includes': [ '../build/android/java_cpp_enum.gypi' ],
},
{
'target_name': 'traffic_stats_error_java',
'type': 'none',
'variables': {
'source_file': 'android/traffic_stats.cc',
},
'includes': [ '../build/android/java_cpp_enum.gypi' ],
},
{
'target_name': 'net_unittests_apk',
'type': 'none',
'dependencies': [
'net_java',
'net_javatests',
'net_java_test_support',
'net_unittests',
],
'conditions': [
['v8_use_external_startup_data==1', {
'dependencies': [
'../v8/tools/gyp/v8.gyp:v8_external_snapshot',
],
'variables': {
'dest_path': '<(asset_location)',
'renaming_sources': [
'<(PRODUCT_DIR)/natives_blob.bin',
'<(PRODUCT_DIR)/snapshot_blob.bin',
],
'renaming_destinations': [
'natives_blob_<(arch_suffix).bin',
'snapshot_blob_<(arch_suffix).bin',
],
'clear': 1,
},
'includes': ['../build/android/copy_ex.gypi'],
}],
],
'variables': {
'test_suite_name': 'net_unittests',
'isolate_file': 'net_unittests.isolate',
'android_manifest_path': 'android/unittest_support/AndroidManifest.xml',
'resource_dir': 'android/unittest_support/res',
'conditions': [
['v8_use_external_startup_data==1', {
'asset_location': '<(PRODUCT_DIR)/net_unittests_apk/assets',
'additional_input_paths': [
'<(PRODUCT_DIR)/net_unittests_apk/assets/natives_blob_<(arch_suffix).bin',
'<(PRODUCT_DIR)/net_unittests_apk/assets/snapshot_blob_<(arch_suffix).bin',
],
}],
],
},
'includes': [
'../build/apk_test.gypi',
'../build/android/v8_external_startup_data_arch_suffix.gypi',
],
},
{
'target_name': 'net_junit_tests',
'type': 'none',
'dependencies': [
'net_java',
'../base/base.gyp:base',
'../base/base.gyp:base_java_test_support',
'../base/base.gyp:base_junit_test_support',
'../testing/android/junit/junit_test.gyp:junit_test_support',
],
'variables': {
'main_class': 'org.chromium.testing.local.JunitTestMain',
'src_paths': [
'android/junit/',
],
},
'includes': [
'../build/host_jar.gypi',
],
},
],
'conditions': [
['test_isolation_mode != "noop"',
{
'targets': [
{
'target_name': 'net_unittests_apk_run',
'type': 'none',
'dependencies': [
'net_unittests_apk',
],
'includes': [
'../build/isolate.gypi',
],
'sources': [
'net_unittests_apk.isolate',
],
},
]
}
],
],
}],
['OS == "android" or OS == "linux"', {
'targets': [
{
'target_name': 'disk_cache_memory_test',
'type': 'executable',
'dependencies': [
'../base/base.gyp:base',
'net',
],
'sources': [
'tools/disk_cache_memory_test/disk_cache_memory_test.cc',
],
},
],
}],
['test_isolation_mode != "noop"', {
'targets': [
{
'target_name': 'net_unittests_run',
'type': 'none',
'dependencies': [
'net_unittests',
],
'includes': [
'../build/isolate.gypi',
],
'sources': [
'net_unittests.isolate',
],
},
],
}],
],
}
|
py
|
1a5690bf8321690ea7b11c7f3213ceac98906c85
|
# code is borrowed from the original repo and fit into our training framework
# https://github.com/HuCaoFighting/Swin-Unet/tree/4375a8d6fa7d9c38184c5d3194db990a00a3e912
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import torch
import torch.nn as nn
import torch.utils.checkpoint as checkpoint
from einops import rearrange
from timm.models.layers import DropPath, to_2tuple, trunc_normal_
import copy
import logging
import math
from os.path import join as pjoin
import numpy as np
from torch.nn import CrossEntropyLoss, Dropout, Softmax, Linear, Conv2d, LayerNorm
from torch.nn.modules.utils import _pair
from scipy import ndimage
class Mlp(nn.Module):
def __init__(self, in_features, hidden_features=None, out_features=None, act_layer=nn.GELU, drop=0.):
super().__init__()
out_features = out_features or in_features
hidden_features = hidden_features or in_features
self.fc1 = nn.Linear(in_features, hidden_features)
self.act = act_layer()
self.fc2 = nn.Linear(hidden_features, out_features)
self.drop = nn.Dropout(drop)
def forward(self, x):
x = self.fc1(x)
x = self.act(x)
x = self.drop(x)
x = self.fc2(x)
x = self.drop(x)
return x
def window_partition(x, window_size):
"""
Args:
x: (B, H, W, C)
window_size (int): window size
Returns:
windows: (num_windows*B, window_size, window_size, C)
"""
B, H, W, C = x.shape
x = x.view(B, H // window_size, window_size, W // window_size, window_size, C)
windows = x.permute(0, 1, 3, 2, 4, 5).contiguous().view(-1, window_size, window_size, C)
return windows
def window_reverse(windows, window_size, H, W):
"""
Args:
windows: (num_windows*B, window_size, window_size, C)
window_size (int): Window size
H (int): Height of image
W (int): Width of image
Returns:
x: (B, H, W, C)
"""
B = int(windows.shape[0] / (H * W / window_size / window_size))
x = windows.view(B, H // window_size, W // window_size, window_size, window_size, -1)
x = x.permute(0, 1, 3, 2, 4, 5).contiguous().view(B, H, W, -1)
return x
class WindowAttention(nn.Module):
r""" Window based multi-head self attention (W-MSA) module with relative position bias.
It supports both of shifted and non-shifted window.
Args:
dim (int): Number of input channels.
window_size (tuple[int]): The height and width of the window.
num_heads (int): Number of attention heads.
qkv_bias (bool, optional): If True, add a learnable bias to query, key, value. Default: True
qk_scale (float | None, optional): Override default qk scale of head_dim ** -0.5 if set
attn_drop (float, optional): Dropout ratio of attention weight. Default: 0.0
proj_drop (float, optional): Dropout ratio of output. Default: 0.0
"""
def __init__(self, dim, window_size, num_heads, qkv_bias=True, qk_scale=None, attn_drop=0., proj_drop=0.):
super().__init__()
self.dim = dim
self.window_size = window_size # Wh, Ww
self.num_heads = num_heads
head_dim = dim // num_heads
self.scale = qk_scale or head_dim ** -0.5
# define a parameter table of relative position bias
self.relative_position_bias_table = nn.Parameter(
torch.zeros((2 * window_size[0] - 1) * (2 * window_size[1] - 1), num_heads)) # 2*Wh-1 * 2*Ww-1, nH
# get pair-wise relative position index for each token inside the window
coords_h = torch.arange(self.window_size[0])
coords_w = torch.arange(self.window_size[1])
coords = torch.stack(torch.meshgrid([coords_h, coords_w])) # 2, Wh, Ww
coords_flatten = torch.flatten(coords, 1) # 2, Wh*Ww
relative_coords = coords_flatten[:, :, None] - coords_flatten[:, None, :] # 2, Wh*Ww, Wh*Ww
relative_coords = relative_coords.permute(1, 2, 0).contiguous() # Wh*Ww, Wh*Ww, 2
relative_coords[:, :, 0] += self.window_size[0] - 1 # shift to start from 0
relative_coords[:, :, 1] += self.window_size[1] - 1
relative_coords[:, :, 0] *= 2 * self.window_size[1] - 1
relative_position_index = relative_coords.sum(-1) # Wh*Ww, Wh*Ww
self.register_buffer("relative_position_index", relative_position_index)
self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias)
self.attn_drop = nn.Dropout(attn_drop)
self.proj = nn.Linear(dim, dim)
self.proj_drop = nn.Dropout(proj_drop)
trunc_normal_(self.relative_position_bias_table, std=.02)
self.softmax = nn.Softmax(dim=-1)
def forward(self, x, mask=None):
"""
Args:
x: input features with shape of (num_windows*B, N, C)
mask: (0/-inf) mask with shape of (num_windows, Wh*Ww, Wh*Ww) or None
"""
B_, N, C = x.shape
qkv = self.qkv(x).reshape(B_, N, 3, self.num_heads, C // self.num_heads).permute(2, 0, 3, 1, 4)
q, k, v = qkv[0], qkv[1], qkv[2] # make torchscript happy (cannot use tensor as tuple)
q = q * self.scale
attn = (q @ k.transpose(-2, -1))
relative_position_bias = self.relative_position_bias_table[self.relative_position_index.view(-1)].view(
self.window_size[0] * self.window_size[1], self.window_size[0] * self.window_size[1], -1) # Wh*Ww,Wh*Ww,nH
relative_position_bias = relative_position_bias.permute(2, 0, 1).contiguous() # nH, Wh*Ww, Wh*Ww
attn = attn + relative_position_bias.unsqueeze(0)
if mask is not None:
nW = mask.shape[0]
attn = attn.view(B_ // nW, nW, self.num_heads, N, N) + mask.unsqueeze(1).unsqueeze(0)
attn = attn.view(-1, self.num_heads, N, N)
attn = self.softmax(attn)
else:
attn = self.softmax(attn)
attn = self.attn_drop(attn)
x = (attn @ v).transpose(1, 2).reshape(B_, N, C)
x = self.proj(x)
x = self.proj_drop(x)
return x
def extra_repr(self) -> str:
return f'dim={self.dim}, window_size={self.window_size}, num_heads={self.num_heads}'
def flops(self, N):
# calculate flops for 1 window with token length of N
flops = 0
# qkv = self.qkv(x)
flops += N * self.dim * 3 * self.dim
# attn = (q @ k.transpose(-2, -1))
flops += self.num_heads * N * (self.dim // self.num_heads) * N
# x = (attn @ v)
flops += self.num_heads * N * N * (self.dim // self.num_heads)
# x = self.proj(x)
flops += N * self.dim * self.dim
return flops
class SwinTransformerBlock(nn.Module):
r""" Swin Transformer Block.
Args:
dim (int): Number of input channels.
input_resolution (tuple[int]): Input resulotion.
num_heads (int): Number of attention heads.
window_size (int): Window size.
shift_size (int): Shift size for SW-MSA.
mlp_ratio (float): Ratio of mlp hidden dim to embedding dim.
qkv_bias (bool, optional): If True, add a learnable bias to query, key, value. Default: True
qk_scale (float | None, optional): Override default qk scale of head_dim ** -0.5 if set.
drop (float, optional): Dropout rate. Default: 0.0
attn_drop (float, optional): Attention dropout rate. Default: 0.0
drop_path (float, optional): Stochastic depth rate. Default: 0.0
act_layer (nn.Module, optional): Activation layer. Default: nn.GELU
norm_layer (nn.Module, optional): Normalization layer. Default: nn.LayerNorm
"""
def __init__(self, dim, input_resolution, num_heads, window_size=7, shift_size=0,
mlp_ratio=4., qkv_bias=True, qk_scale=None, drop=0., attn_drop=0., drop_path=0.,
act_layer=nn.GELU, norm_layer=nn.LayerNorm):
super().__init__()
self.dim = dim
self.input_resolution = input_resolution
self.num_heads = num_heads
self.window_size = window_size
self.shift_size = shift_size
self.mlp_ratio = mlp_ratio
if min(self.input_resolution) <= self.window_size:
# if window size is larger than input resolution, we don't partition windows
self.shift_size = 0
self.window_size = min(self.input_resolution)
assert 0 <= self.shift_size < self.window_size, "shift_size must in 0-window_size"
self.norm1 = norm_layer(dim)
self.attn = WindowAttention(
dim, window_size=to_2tuple(self.window_size), num_heads=num_heads,
qkv_bias=qkv_bias, qk_scale=qk_scale, attn_drop=attn_drop, proj_drop=drop)
self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity()
self.norm2 = norm_layer(dim)
mlp_hidden_dim = int(dim * mlp_ratio)
self.mlp = Mlp(in_features=dim, hidden_features=mlp_hidden_dim, act_layer=act_layer, drop=drop)
if self.shift_size > 0:
# calculate attention mask for SW-MSA
H, W = self.input_resolution
img_mask = torch.zeros((1, H, W, 1)) # 1 H W 1
h_slices = (slice(0, -self.window_size),
slice(-self.window_size, -self.shift_size),
slice(-self.shift_size, None))
w_slices = (slice(0, -self.window_size),
slice(-self.window_size, -self.shift_size),
slice(-self.shift_size, None))
cnt = 0
for h in h_slices:
for w in w_slices:
img_mask[:, h, w, :] = cnt
cnt += 1
mask_windows = window_partition(img_mask, self.window_size) # nW, window_size, window_size, 1
mask_windows = mask_windows.view(-1, self.window_size * self.window_size)
attn_mask = mask_windows.unsqueeze(1) - mask_windows.unsqueeze(2)
attn_mask = attn_mask.masked_fill(attn_mask != 0, float(-100.0)).masked_fill(attn_mask == 0, float(0.0))
else:
attn_mask = None
self.register_buffer("attn_mask", attn_mask)
def forward(self, x):
H, W = self.input_resolution
B, L, C = x.shape
assert L == H * W, "input feature has wrong size"
shortcut = x
x = self.norm1(x)
x = x.view(B, H, W, C)
# cyclic shift
if self.shift_size > 0:
shifted_x = torch.roll(x, shifts=(-self.shift_size, -self.shift_size), dims=(1, 2))
else:
shifted_x = x
# partition windows
x_windows = window_partition(shifted_x, self.window_size) # nW*B, window_size, window_size, C
x_windows = x_windows.view(-1, self.window_size * self.window_size, C) # nW*B, window_size*window_size, C
# W-MSA/SW-MSA
attn_windows = self.attn(x_windows, mask=self.attn_mask) # nW*B, window_size*window_size, C
# merge windows
attn_windows = attn_windows.view(-1, self.window_size, self.window_size, C)
shifted_x = window_reverse(attn_windows, self.window_size, H, W) # B H' W' C
# reverse cyclic shift
if self.shift_size > 0:
x = torch.roll(shifted_x, shifts=(self.shift_size, self.shift_size), dims=(1, 2))
else:
x = shifted_x
x = x.view(B, H * W, C)
# FFN
x = shortcut + self.drop_path(x)
x = x + self.drop_path(self.mlp(self.norm2(x)))
return x
def extra_repr(self) -> str:
return f"dim={self.dim}, input_resolution={self.input_resolution}, num_heads={self.num_heads}, " \
f"window_size={self.window_size}, shift_size={self.shift_size}, mlp_ratio={self.mlp_ratio}"
def flops(self):
flops = 0
H, W = self.input_resolution
# norm1
flops += self.dim * H * W
# W-MSA/SW-MSA
nW = H * W / self.window_size / self.window_size
flops += nW * self.attn.flops(self.window_size * self.window_size)
# mlp
flops += 2 * H * W * self.dim * self.dim * self.mlp_ratio
# norm2
flops += self.dim * H * W
return flops
class PatchMerging(nn.Module):
r""" Patch Merging Layer.
Args:
input_resolution (tuple[int]): Resolution of input feature.
dim (int): Number of input channels.
norm_layer (nn.Module, optional): Normalization layer. Default: nn.LayerNorm
"""
def __init__(self, input_resolution, dim, norm_layer=nn.LayerNorm):
super().__init__()
self.input_resolution = input_resolution
self.dim = dim
self.reduction = nn.Linear(4 * dim, 2 * dim, bias=False)
self.norm = norm_layer(4 * dim)
def forward(self, x):
"""
x: B, H*W, C
"""
H, W = self.input_resolution
B, L, C = x.shape
assert L == H * W, "input feature has wrong size"
assert H % 2 == 0 and W % 2 == 0, f"x size ({H}*{W}) are not even."
x = x.view(B, H, W, C)
x0 = x[:, 0::2, 0::2, :] # B H/2 W/2 C
x1 = x[:, 1::2, 0::2, :] # B H/2 W/2 C
x2 = x[:, 0::2, 1::2, :] # B H/2 W/2 C
x3 = x[:, 1::2, 1::2, :] # B H/2 W/2 C
x = torch.cat([x0, x1, x2, x3], -1) # B H/2 W/2 4*C
x = x.view(B, -1, 4 * C) # B H/2*W/2 4*C
x = self.norm(x)
x = self.reduction(x)
return x
def extra_repr(self) -> str:
return f"input_resolution={self.input_resolution}, dim={self.dim}"
def flops(self):
H, W = self.input_resolution
flops = H * W * self.dim
flops += (H // 2) * (W // 2) * 4 * self.dim * 2 * self.dim
return flops
class PatchExpand(nn.Module):
def __init__(self, input_resolution, dim, dim_scale=2, norm_layer=nn.LayerNorm):
super().__init__()
self.input_resolution = input_resolution
self.dim = dim
self.expand = nn.Linear(dim, 2*dim, bias=False) if dim_scale==2 else nn.Identity()
self.norm = norm_layer(dim // dim_scale)
def forward(self, x):
"""
x: B, H*W, C
"""
H, W = self.input_resolution
x = self.expand(x)
B, L, C = x.shape
assert L == H * W, "input feature has wrong size"
x = x.view(B, H, W, C)
x = rearrange(x, 'b h w (p1 p2 c)-> b (h p1) (w p2) c', p1=2, p2=2, c=C//4)
x = x.view(B,-1,C//4)
x= self.norm(x)
return x
class FinalPatchExpand_X4(nn.Module):
def __init__(self, input_resolution, dim, dim_scale=4, norm_layer=nn.LayerNorm):
super().__init__()
self.input_resolution = input_resolution
self.dim = dim
self.dim_scale = dim_scale
self.expand = nn.Linear(dim, 16*dim, bias=False)
self.output_dim = dim
self.norm = norm_layer(self.output_dim)
def forward(self, x):
"""
x: B, H*W, C
"""
H, W = self.input_resolution
x = self.expand(x)
B, L, C = x.shape
assert L == H * W, "input feature has wrong size"
x = x.view(B, H, W, C)
x = rearrange(x, 'b h w (p1 p2 c)-> b (h p1) (w p2) c', p1=self.dim_scale, p2=self.dim_scale, c=C//(self.dim_scale**2))
x = x.view(B,-1,self.output_dim)
x= self.norm(x)
return x
class BasicLayer(nn.Module):
""" A basic Swin Transformer layer for one stage.
Args:
dim (int): Number of input channels.
input_resolution (tuple[int]): Input resolution.
depth (int): Number of blocks.
num_heads (int): Number of attention heads.
window_size (int): Local window size.
mlp_ratio (float): Ratio of mlp hidden dim to embedding dim.
qkv_bias (bool, optional): If True, add a learnable bias to query, key, value. Default: True
qk_scale (float | None, optional): Override default qk scale of head_dim ** -0.5 if set.
drop (float, optional): Dropout rate. Default: 0.0
attn_drop (float, optional): Attention dropout rate. Default: 0.0
drop_path (float | tuple[float], optional): Stochastic depth rate. Default: 0.0
norm_layer (nn.Module, optional): Normalization layer. Default: nn.LayerNorm
downsample (nn.Module | None, optional): Downsample layer at the end of the layer. Default: None
use_checkpoint (bool): Whether to use checkpointing to save memory. Default: False.
"""
def __init__(self, dim, input_resolution, depth, num_heads, window_size,
mlp_ratio=4., qkv_bias=True, qk_scale=None, drop=0., attn_drop=0.,
drop_path=0., norm_layer=nn.LayerNorm, downsample=None, use_checkpoint=False):
super().__init__()
self.dim = dim
self.input_resolution = input_resolution
self.depth = depth
self.use_checkpoint = use_checkpoint
# build blocks
self.blocks = nn.ModuleList([
SwinTransformerBlock(dim=dim, input_resolution=input_resolution,
num_heads=num_heads, window_size=window_size,
shift_size=0 if (i % 2 == 0) else window_size // 2,
mlp_ratio=mlp_ratio,
qkv_bias=qkv_bias, qk_scale=qk_scale,
drop=drop, attn_drop=attn_drop,
drop_path=drop_path[i] if isinstance(drop_path, list) else drop_path,
norm_layer=norm_layer)
for i in range(depth)])
# patch merging layer
if downsample is not None:
self.downsample = downsample(input_resolution, dim=dim, norm_layer=norm_layer)
else:
self.downsample = None
def forward(self, x):
for blk in self.blocks:
if self.use_checkpoint:
x = checkpoint.checkpoint(blk, x)
else:
x = blk(x)
if self.downsample is not None:
x = self.downsample(x)
return x
def extra_repr(self) -> str:
return f"dim={self.dim}, input_resolution={self.input_resolution}, depth={self.depth}"
def flops(self):
flops = 0
for blk in self.blocks:
flops += blk.flops()
if self.downsample is not None:
flops += self.downsample.flops()
return flops
class BasicLayer_up(nn.Module):
""" A basic Swin Transformer layer for one stage.
Args:
dim (int): Number of input channels.
input_resolution (tuple[int]): Input resolution.
depth (int): Number of blocks.
num_heads (int): Number of attention heads.
window_size (int): Local window size.
mlp_ratio (float): Ratio of mlp hidden dim to embedding dim.
qkv_bias (bool, optional): If True, add a learnable bias to query, key, value. Default: True
qk_scale (float | None, optional): Override default qk scale of head_dim ** -0.5 if set.
drop (float, optional): Dropout rate. Default: 0.0
attn_drop (float, optional): Attention dropout rate. Default: 0.0
drop_path (float | tuple[float], optional): Stochastic depth rate. Default: 0.0
norm_layer (nn.Module, optional): Normalization layer. Default: nn.LayerNorm
downsample (nn.Module | None, optional): Downsample layer at the end of the layer. Default: None
use_checkpoint (bool): Whether to use checkpointing to save memory. Default: False.
"""
def __init__(self, dim, input_resolution, depth, num_heads, window_size,
mlp_ratio=4., qkv_bias=True, qk_scale=None, drop=0., attn_drop=0.,
drop_path=0., norm_layer=nn.LayerNorm, upsample=None, use_checkpoint=False):
super().__init__()
self.dim = dim
self.input_resolution = input_resolution
self.depth = depth
self.use_checkpoint = use_checkpoint
# build blocks
self.blocks = nn.ModuleList([
SwinTransformerBlock(dim=dim, input_resolution=input_resolution,
num_heads=num_heads, window_size=window_size,
shift_size=0 if (i % 2 == 0) else window_size // 2,
mlp_ratio=mlp_ratio,
qkv_bias=qkv_bias, qk_scale=qk_scale,
drop=drop, attn_drop=attn_drop,
drop_path=drop_path[i] if isinstance(drop_path, list) else drop_path,
norm_layer=norm_layer)
for i in range(depth)])
# patch merging layer
if upsample is not None:
self.upsample = PatchExpand(input_resolution, dim=dim, dim_scale=2, norm_layer=norm_layer)
else:
self.upsample = None
def forward(self, x):
for blk in self.blocks:
if self.use_checkpoint:
x = checkpoint.checkpoint(blk, x)
else:
x = blk(x)
if self.upsample is not None:
x = self.upsample(x)
return x
class PatchEmbed(nn.Module):
r""" Image to Patch Embedding
Args:
img_size (int): Image size. Default: 224.
patch_size (int): Patch token size. Default: 4.
in_chans (int): Number of input image channels. Default: 3.
embed_dim (int): Number of linear projection output channels. Default: 96.
norm_layer (nn.Module, optional): Normalization layer. Default: None
"""
def __init__(self, img_size=224, patch_size=4, in_chans=3, embed_dim=96, norm_layer=None):
super().__init__()
img_size = to_2tuple(img_size)
patch_size = to_2tuple(patch_size)
patches_resolution = [img_size[0] // patch_size[0], img_size[1] // patch_size[1]]
self.img_size = img_size
self.patch_size = patch_size
self.patches_resolution = patches_resolution
self.num_patches = patches_resolution[0] * patches_resolution[1]
self.in_chans = in_chans
self.embed_dim = embed_dim
self.proj = nn.Conv2d(in_chans, embed_dim, kernel_size=patch_size, stride=patch_size)
if norm_layer is not None:
self.norm = norm_layer(embed_dim)
else:
self.norm = None
def forward(self, x):
B, C, H, W = x.shape
# FIXME look at relaxing size constraints
assert H == self.img_size[0] and W == self.img_size[1], \
f"Input image size ({H}*{W}) doesn't match model ({self.img_size[0]}*{self.img_size[1]})."
x = self.proj(x).flatten(2).transpose(1, 2) # B Ph*Pw C
if self.norm is not None:
x = self.norm(x)
return x
def flops(self):
Ho, Wo = self.patches_resolution
flops = Ho * Wo * self.embed_dim * self.in_chans * (self.patch_size[0] * self.patch_size[1])
if self.norm is not None:
flops += Ho * Wo * self.embed_dim
return flops
class SwinTransformerSys(nn.Module):
r""" Swin Transformer
A PyTorch impl of : `Swin Transformer: Hierarchical Vision Transformer using Shifted Windows` -
https://arxiv.org/pdf/2103.14030
Args:
img_size (int | tuple(int)): Input image size. Default 224
patch_size (int | tuple(int)): Patch size. Default: 4
in_chans (int): Number of input image channels. Default: 3
num_classes (int): Number of classes for classification head. Default: 1000
embed_dim (int): Patch embedding dimension. Default: 96
depths (tuple(int)): Depth of each Swin Transformer layer.
num_heads (tuple(int)): Number of attention heads in different layers.
window_size (int): Window size. Default: 7
mlp_ratio (float): Ratio of mlp hidden dim to embedding dim. Default: 4
qkv_bias (bool): If True, add a learnable bias to query, key, value. Default: True
qk_scale (float): Override default qk scale of head_dim ** -0.5 if set. Default: None
drop_rate (float): Dropout rate. Default: 0
attn_drop_rate (float): Attention dropout rate. Default: 0
drop_path_rate (float): Stochastic depth rate. Default: 0.1
norm_layer (nn.Module): Normalization layer. Default: nn.LayerNorm.
ape (bool): If True, add absolute position embedding to the patch embedding. Default: False
patch_norm (bool): If True, add normalization after patch embedding. Default: True
use_checkpoint (bool): Whether to use checkpointing to save memory. Default: False
"""
def __init__(self, img_size=224, patch_size=4, in_chans=3, num_classes=1000,
embed_dim=96, depths=[2, 2, 2, 2], depths_decoder=[1, 2, 2, 2], num_heads=[3, 6, 12, 24],
window_size=7, mlp_ratio=4., qkv_bias=True, qk_scale=None,
drop_rate=0., attn_drop_rate=0., drop_path_rate=0.1,
norm_layer=nn.LayerNorm, ape=False, patch_norm=True,
use_checkpoint=False, final_upsample="expand_first", **kwargs):
super().__init__()
print("SwinTransformerSys expand initial----depths:{};depths_decoder:{};drop_path_rate:{};num_classes:{}".format(depths,
depths_decoder,drop_path_rate,num_classes))
self.num_classes = num_classes
self.num_layers = len(depths)
self.embed_dim = embed_dim
self.ape = ape
self.patch_norm = patch_norm
self.num_features = int(embed_dim * 2 ** (self.num_layers - 1))
self.num_features_up = int(embed_dim * 2)
self.mlp_ratio = mlp_ratio
self.final_upsample = final_upsample
# split image into non-overlapping patches
self.patch_embed = PatchEmbed(
img_size=img_size, patch_size=patch_size, in_chans=in_chans, embed_dim=embed_dim,
norm_layer=norm_layer if self.patch_norm else None)
num_patches = self.patch_embed.num_patches
patches_resolution = self.patch_embed.patches_resolution
self.patches_resolution = patches_resolution
# absolute position embedding
if self.ape:
self.absolute_pos_embed = nn.Parameter(torch.zeros(1, num_patches, embed_dim))
trunc_normal_(self.absolute_pos_embed, std=.02)
self.pos_drop = nn.Dropout(p=drop_rate)
# stochastic depth
dpr = [x.item() for x in torch.linspace(0, drop_path_rate, sum(depths))] # stochastic depth decay rule
# build encoder and bottleneck layers
self.layers = nn.ModuleList()
for i_layer in range(self.num_layers):
layer = BasicLayer(dim=int(embed_dim * 2 ** i_layer),
input_resolution=(patches_resolution[0] // (2 ** i_layer),
patches_resolution[1] // (2 ** i_layer)),
depth=depths[i_layer],
num_heads=num_heads[i_layer],
window_size=window_size,
mlp_ratio=self.mlp_ratio,
qkv_bias=qkv_bias, qk_scale=qk_scale,
drop=drop_rate, attn_drop=attn_drop_rate,
drop_path=dpr[sum(depths[:i_layer]):sum(depths[:i_layer + 1])],
norm_layer=norm_layer,
downsample=PatchMerging if (i_layer < self.num_layers - 1) else None,
use_checkpoint=use_checkpoint)
self.layers.append(layer)
# build decoder layers
self.layers_up = nn.ModuleList()
self.concat_back_dim = nn.ModuleList()
for i_layer in range(self.num_layers):
concat_linear = nn.Linear(2*int(embed_dim*2**(self.num_layers-1-i_layer)),
int(embed_dim*2**(self.num_layers-1-i_layer))) if i_layer > 0 else nn.Identity()
if i_layer ==0 :
layer_up = PatchExpand(input_resolution=(patches_resolution[0] // (2 ** (self.num_layers-1-i_layer)),
patches_resolution[1] // (2 ** (self.num_layers-1-i_layer))), dim=int(embed_dim * 2 ** (self.num_layers-1-i_layer)), dim_scale=2, norm_layer=norm_layer)
else:
layer_up = BasicLayer_up(dim=int(embed_dim * 2 ** (self.num_layers-1-i_layer)),
input_resolution=(patches_resolution[0] // (2 ** (self.num_layers-1-i_layer)),
patches_resolution[1] // (2 ** (self.num_layers-1-i_layer))),
depth=depths[(self.num_layers-1-i_layer)],
num_heads=num_heads[(self.num_layers-1-i_layer)],
window_size=window_size,
mlp_ratio=self.mlp_ratio,
qkv_bias=qkv_bias, qk_scale=qk_scale,
drop=drop_rate, attn_drop=attn_drop_rate,
drop_path=dpr[sum(depths[:(self.num_layers-1-i_layer)]):sum(depths[:(self.num_layers-1-i_layer) + 1])],
norm_layer=norm_layer,
upsample=PatchExpand if (i_layer < self.num_layers - 1) else None,
use_checkpoint=use_checkpoint)
self.layers_up.append(layer_up)
self.concat_back_dim.append(concat_linear)
self.norm = norm_layer(self.num_features)
self.norm_up= norm_layer(self.embed_dim)
if self.final_upsample == "expand_first":
print("---final upsample expand_first---")
self.up = FinalPatchExpand_X4(input_resolution=(img_size//patch_size,img_size//patch_size),dim_scale=4,dim=embed_dim)
self.output = nn.Conv2d(in_channels=embed_dim,out_channels=self.num_classes,kernel_size=1,bias=False)
self.apply(self._init_weights)
def _init_weights(self, m):
if isinstance(m, nn.Linear):
trunc_normal_(m.weight, std=.02)
if isinstance(m, nn.Linear) and m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.LayerNorm):
nn.init.constant_(m.bias, 0)
nn.init.constant_(m.weight, 1.0)
@torch.jit.ignore
def no_weight_decay(self):
return {'absolute_pos_embed'}
@torch.jit.ignore
def no_weight_decay_keywords(self):
return {'relative_position_bias_table'}
#Encoder and Bottleneck
def forward_features(self, x):
x = self.patch_embed(x)
if self.ape:
x = x + self.absolute_pos_embed
x = self.pos_drop(x)
x_downsample = []
for layer in self.layers:
x_downsample.append(x)
x = layer(x)
x = self.norm(x) # B L C
return x, x_downsample
#Dencoder and Skip connection
def forward_up_features(self, x, x_downsample):
for inx, layer_up in enumerate(self.layers_up):
if inx == 0:
x = layer_up(x)
else:
x = torch.cat([x,x_downsample[3-inx]],-1)
x = self.concat_back_dim[inx](x)
x = layer_up(x)
x = self.norm_up(x) # B L C
return x
def up_x4(self, x):
H, W = self.patches_resolution
B, L, C = x.shape
assert L == H*W, "input features has wrong size"
if self.final_upsample=="expand_first":
x = self.up(x)
x = x.view(B,4*H,4*W,-1)
x = x.permute(0,3,1,2) #B,C,H,W
x = self.output(x)
return x
def forward(self, x):
x, x_downsample = self.forward_features(x)
x = self.forward_up_features(x,x_downsample)
x = self.up_x4(x)
return x
def flops(self):
flops = 0
flops += self.patch_embed.flops()
for i, layer in enumerate(self.layers):
flops += layer.flops()
flops += self.num_features * self.patches_resolution[0] * self.patches_resolution[1] // (2 ** self.num_layers)
flops += self.num_features * self.num_classes
return flops
logger = logging.getLogger(__name__)
class SwinUnet_config():
def __init__(self):
self.patch_size = 4
self.in_chans = 3
self.num_classes = 4
self.embed_dim = 96
self.depths = [2, 2, 6, 2]
self.num_heads = [3, 6, 12, 24]
self.window_size = 7
self.mlp_ratio = 4.
self.qkv_bias = True
self.qk_scale = None
self.drop_rate = 0.
self.drop_path_rate = 0.1
self.ape = False
self.patch_norm = True
self.use_checkpoint = False
class SwinUnet(nn.Module):
def __init__(self, config, img_size=224, num_classes=21843, zero_head=False, vis=False):
super(SwinUnet, self).__init__()
self.num_classes = num_classes
self.zero_head = zero_head
self.config = config
self.swin_unet = SwinTransformerSys(img_size=img_size,
patch_size=config.patch_size,
in_chans=config.in_chans,
num_classes=self.num_classes,
embed_dim=config.embed_dim,
depths=config.depths,
num_heads=config.num_heads,
window_size=config.window_size,
mlp_ratio=config.mlp_ratio,
qkv_bias=config.qkv_bias,
qk_scale=config.qk_scale,
drop_rate=config.drop_rate,
drop_path_rate=config.drop_path_rate,
ape=config.ape,
patch_norm=config.patch_norm,
use_checkpoint=config.use_checkpoint)
def forward(self, x):
# print(x.size())
# if x.size()[1] == 1:
# x = x.repeat(1,3,1,1)
logits = self.swin_unet(x)
return logits
def load_from(self, pretrained_path):
if pretrained_path is not None:
print("pretrained_path:{}".format(pretrained_path))
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
pretrained_dict = torch.load(pretrained_path, map_location=device)
if "model" not in pretrained_dict:
print("---start load pretrained modle by splitting---")
pretrained_dict = {k[17:]:v for k,v in pretrained_dict.items()}
for k in list(pretrained_dict.keys()):
if "output" in k:
print("delete key:{}".format(k))
del pretrained_dict[k]
msg = self.swin_unet.load_state_dict(pretrained_dict,strict=False)
# print(msg)
return
pretrained_dict = pretrained_dict['model']
print("---start load pretrained modle of swin encoder---")
model_dict = self.swin_unet.state_dict()
full_dict = copy.deepcopy(pretrained_dict)
for k, v in pretrained_dict.items():
if "layers." in k:
current_layer_num = 3-int(k[7:8])
current_k = "layers_up." + str(current_layer_num) + k[8:]
full_dict.update({current_k:v})
for k in list(full_dict.keys()):
if k in model_dict:
if full_dict[k].shape != model_dict[k].shape:
print("delete:{};shape pretrain:{};shape model:{}".format(k,v.shape,model_dict[k].shape))
del full_dict[k]
msg = self.swin_unet.load_state_dict(full_dict, strict=False)
# print(msg)
else:
print("none pretrain")
|
py
|
1a5690f41b1a9f69cda4001f2e7bec733e2a0a73
|
import uuid
from django.db import models
from django.utils import timezone
from wallet.models import Wallet
class Transaction(models.Model):
TX_STATUS = [('INIT', 'INITIATED'), ('ABT', 'ABORTED'), ('IP', 'IN-PROGRESS'), ('CMP', 'COMPLETED'), ('TO', 'TIMED-OUT')]
TX_TYPE = [('DX', 'DEPOSIT'), ('WX', 'WITHDRAWAL'), ('US', 'UNSPECIFIED')]
id = models.BigAutoField(primary_key=True)
reference_id = models.UUIDField(unique=True, default=uuid.uuid4)
created_at = models.DateTimeField(default=timezone.now)
wallet = models.ForeignKey(Wallet, blank=False, null=False, on_delete=models.CASCADE, db_column='wid')
amount = models.PositiveIntegerField(default=0)
status = models.CharField(max_length=10, choices=TX_STATUS, default='INIT')
type = models.CharField(max_length=20, choices=TX_TYPE, default='US')
class Meta:
db_table = 'transaction'
def get_dict(self):
dict_obj = {'reference_id': self.reference_id if self.reference_id else None,
'created_at': str(self.created_at) if self.created_at else None,
'wallet_id': self.wallet.wxid if self.wallet.wxid else None,
'status': self.status if self.status else None
}
return dict_obj
|
py
|
1a56915815c7f96a1f624bb3ba76512dd2e94bb9
|
#!/usr/bin/env python3
import unittest
from unittest.mock import patch
import numpy as np
import pandas as pd
from tmc import points
from tmc.utils import load, get_stdout, patch_helper
module_name="src.subsetting_by_positions"
subsetting_by_positions = load(module_name, "subsetting_by_positions")
main = load(module_name, "main")
ph = patch_helper(module_name)
@points('p04-08.1')
class SubsettingByPositions(unittest.TestCase):
def test_shape_and_columns(self):
df = subsetting_by_positions()
self.assertEqual(df.shape, (10,2), msg="The returned DataFrame had wrong shape!")
#np.testing.assert_array_equal(df.index, range(10), err_msg="Incorrect index")
np.testing.assert_array_equal(df.columns, ["Title", "Artist"],
err_msg="Incorrect column names")
def test_called(self):
with patch(ph("subsetting_by_positions"), wraps=subsetting_by_positions) as psbp,\
patch(ph("pd.read_csv"), wraps=pd.read_csv) as prc:
main()
psbp.assert_called()
prc.assert_called()
if __name__ == '__main__':
unittest.main()
|
py
|
1a56920de499e0b75d7aaa01a68af9518661a8bc
|
# Import python libs
import new
import sys
# Import Salt Testing libs
from salttesting import skipIf, TestCase
from salttesting.helpers import ensure_in_syspath
ensure_in_syspath('../../')
# wmi and pythoncom modules are platform specific...
wmi = new.module('wmi')
sys.modules['wmi'] = wmi
pythoncom = new.module('pythoncom')
sys.modules['pythoncom'] = pythoncom
from salttesting.mock import NO_MOCK, NO_MOCK_REASON, Mock, patch, call, ANY
if NO_MOCK is False:
WMI = Mock()
wmi.WMI = Mock(return_value=WMI)
pythoncom.CoInitialize = Mock()
pythoncom.CoUninitialize = Mock()
# This is imported late so mock can do it's job
import bonneville.modules.win_status as status
@skipIf(NO_MOCK, NO_MOCK_REASON)
class TestProcsBase(TestCase):
def __init__(self, *args, **kwargs):
TestCase.__init__(self, *args, **kwargs)
self.__processes = []
def add_process(
self,
pid=100,
cmd='cmd',
name='name',
user='user',
user_domain='domain',
get_owner_result=0):
process = Mock()
process.GetOwner = Mock(
return_value=(user_domain, get_owner_result, user)
)
process.ProcessId = pid
process.CommandLine = cmd
process.Name = name
self.__processes.append(process)
def call_procs(self):
WMI.win32_process = Mock(return_value=self.__processes)
self.result = status.procs()
class TestProcsCount(TestProcsBase):
def setUp(self):
self.add_process(pid=100)
self.add_process(pid=101)
self.call_procs()
def test_process_count(self):
self.assertEqual(len(self.result), 2)
def test_process_key_is_pid(self):
self.assertSetEqual(set(self.result.keys()), set([100, 101]))
class TestProcsAttributes(TestProcsBase):
def setUp(self):
self._expected_name = 'name'
self._expected_cmd = 'cmd'
self._expected_user = 'user'
self._expected_domain = 'domain'
pid = 100
self.add_process(
pid=pid,
cmd=self._expected_cmd,
user=self._expected_user,
user_domain=self._expected_domain,
get_owner_result=0)
self.call_procs()
self.proc = self.result[pid]
def test_process_cmd_is_set(self):
self.assertEqual(self.proc['cmd'], self._expected_cmd)
def test_process_name_is_set(self):
self.assertEqual(self.proc['name'], self._expected_name)
def test_process_user_is_set(self):
self.assertEqual(self.proc['user'], self._expected_user)
def test_process_user_domain_is_set(self):
self.assertEqual(self.proc['user_domain'], self._expected_domain)
class TestProcsUnicodeAttributes(TestProcsBase):
def setUp(self):
unicode_str = u'\xc1'
self.utf8str = unicode_str.encode('utf8')
pid = 100
self.add_process(
pid=pid,
user=unicode_str,
user_domain=unicode_str,
cmd=unicode_str,
name=unicode_str)
self.call_procs()
self.proc = self.result[pid]
def test_process_cmd_is_utf8(self):
self.assertEqual(self.proc['cmd'], self.utf8str)
def test_process_name_is_utf8(self):
self.assertEqual(self.proc['name'], self.utf8str)
def test_process_user_is_utf8(self):
self.assertEqual(self.proc['user'], self.utf8str)
def test_process_user_domain_is_utf8(self):
self.assertEqual(self.proc['user_domain'], self.utf8str)
class TestProcsWMIGetOwnerAccessDeniedWorkaround(TestProcsBase):
def setUp(self):
self.expected_user = 'SYSTEM'
self.expected_domain = 'NT AUTHORITY'
self.add_process(pid=0, get_owner_result=2)
self.add_process(pid=4, get_owner_result=2)
self.call_procs()
def test_user_is_set(self):
self.assertEqual(self.result[0]['user'], self.expected_user)
self.assertEqual(self.result[4]['user'], self.expected_user)
def test_process_user_domain_is_set(self):
self.assertEqual(self.result[0]['user_domain'], self.expected_domain)
self.assertEqual(self.result[4]['user_domain'], self.expected_domain)
class TestProcsWMIGetOwnerErrorsAreLogged(TestProcsBase):
def setUp(self):
self.expected_error_code = 8
self.add_process(get_owner_result=self.expected_error_code)
def test_error_logged_if_process_get_owner_fails(self):
with patch('salt.modules.win_status.log') as log:
self.call_procs()
log.warning.assert_called_once_with(ANY)
self.assertIn(
str(self.expected_error_code),
log.warning.call_args[0][0]
)
class TestEmptyCommandLine(TestProcsBase):
def setUp(self):
self.expected_error_code = 8
pid = 100
self.add_process(pid=pid, cmd=None)
self.call_procs()
self.proc = self.result[pid]
def test_cmd_is_empty_string(self):
self.assertEqual(self.proc['cmd'], '')
#class TestProcsComInitialization(TestProcsBase):
# def setUp(self):
# call_count = 5
# for _ in range(call_count):
# self.call_procs()
# self.expected_calls = [call()] * call_count
#
# def test_initialize_and_uninitialize_called(self):
# pythoncom.CoInitialize.assert_has_calls(self.expected_calls)
# pythoncom.CoUninitialize.assert_has_calls(self.expected_calls)
if __name__ == '__main__':
from integration import run_tests
run_tests(
[
TestProcsCount,
TestProcsAttributes,
TestProcsUnicodeAttributes,
TestProcsWMIGetOwnerErrorsAreLogged,
TestProcsWMIGetOwnerAccessDeniedWorkaround,
],
needs_daemon=False
)
|
py
|
1a5692128821bf48b31f66b7b07d4ffd559c7b85
|
from typing import List, Type
from colassigner.util import camel_to_snake # noqa: F401
def class_def_from_cls(cls: Type):
return get_class_def(
cls.__name__,
[p.__name__ for p in get_simplified_mro(cls)],
remove_dunder(cls.__dict__),
)
def get_class_def(
cls_name: str, parent_names: List[str] = None, att_dict: dict = None
):
parent_str = ", ".join(parent_names)
if att_dict:
att_strs = [f" {k} = {v}" for k, v in att_dict.items()]
else:
att_strs = [" pass"]
return "\n".join(
[
f"class {cls_name}({parent_str}):",
*att_strs,
]
)
def snake_to_camel(snake_str: str):
return "".join(snake_str.replace("_", " ").title().split())
def simplify_mro(parent_list: List[Type]):
out = []
for cls in parent_list:
if any(map(lambda added_cls: cls in added_cls.mro(), out)):
continue
out.append(cls)
return out
def get_simplified_mro(cls: Type):
return simplify_mro(cls.mro()[1:])
def remove_dunder(dic: dict):
return {k: v for k, v in dic.items() if not k.startswith("__")}
|
py
|
1a5692211f322e4a249f4ff9ce64adb3c97e0165
|
from PyQt5 import QtWidgets
from PyQt5.QtCore import qWarning, Qt
from PyQt5.QtWidgets import QWidget, QSplitter
from candy_editor.qt.controls.ToolWindowManager.ToolWindowManagerArea import ToolWindowManagerArea
class ToolWindowManagerWrapper ( QWidget ):
def __init__ ( self, manager ):
super ( ToolWindowManagerWrapper, self ).__init__ ( manager )
self.manager = manager
self.setWindowFlags ( self.windowFlags () | Qt.Tool )
self.setWindowTitle ( '' )
mainLayout = QtWidgets.QVBoxLayout ( self )
mainLayout.setContentsMargins ( 0, 0, 0, 0 )
self.manager.wrappers.append ( self )
def closeEvent ( self, event ):
'''
关闭时处理所有拥有的ToolWindowManagerArea
'''
from .ToolWindowManager import ToolWindowManager
toolWindows = []
for widget in self.findChildren ( ToolWindowManagerArea ):
toolWindows += widget.toolWindows ()
self.manager.moveToolWindows ( toolWindows, ToolWindowManager.NoArea )
def saveState ( self ):
result = {}
if self.layout ().count () > 1:
qWarning ('too many children for wrapper')
return result
if self.isWindow () and self.layout ().count () == 0:
qWarning ('empty top level wrapper')
return result
# result[ 'geometry' ] = str ( self.saveGeometry () )
splitter = self.findChild ( QSplitter )
if splitter:
result[ 'splitter' ] = self.manager.saveSplitterState ( splitter )
else:
area = self.findChild ( ToolWindowManagerArea )
if area:
result[ 'area' ] = area.saveState ()
elif self.layout ().count () > 0:
qWarning ('unknown child')
return {}
return result
def restoreState ( self, data ):
if 'geometry' in data:
self.restoreGeometry ( data['geometry'] )
if self.layout ().count () > 0:
qWarning ('wrapper is not empty')
return
if 'splitter' in data:
self.layout ().addWidget (
self.manager.restoreSplitterState ( data[ 'splitter' ].toMap () )
)
elif 'area' in data:
area = self.manager.createArea ()
area.restoreState ( data[ 'area' ] )
self.layout ().addWidget ( area )
def isOccupied ( self ):
return self.layout ().count () > 0
|
py
|
1a56926d6e7e559323104c113a5174e815f864a6
|
#Claire Williams & Matthew Rasmussen
#1/26/2021
#Moves info from one CSV file to other CSV files
#dictionary full of info
import csv
def make_athletes_table():
'''SOMETHING '''
athlete_dict = {}
with open('athlete_events.csv') as csv_file:
csv_reader = csv.reader(csv_file, delimiter=',')
headers = next(csv_reader)
for row in csv_reader:
name = row[1]
sex = row[2]
height = row[4]
weight = row[5]
if name not in athlete_dict:
athlete_dict[name] = [len(athlete_dict) + 1, sex, height, weight]
with open('athletes.csv', 'w', newline='') as new_csv_file:
writer = csv.writer(new_csv_file, delimiter=',')
for key in athlete_dict:
writer.writerow([athlete_dict[key][0], key, athlete_dict[key][1], athlete_dict[key][2], athlete_dict[key][3]])
return athlete_dict
def make_nations_table():
nations_dict = {}
with open ('noc_regions.csv') as csv_file:
csv_reader = csv.reader(csv_file, delimiter=',')
headers = next(csv_reader)
for row in csv_reader:
noc = row[0]
region = row[1]
nations_dict[noc] = [len(nations_dict) + 1, region]
with open ('athlete_events.csv') as csv_file:
csv_reader = csv.reader(csv_file, delimiter=',')
headers = next(csv_reader)
for row in csv_reader:
noc = row[7]
team = row[6]
if noc not in nations_dict:
nations_dict[noc] = [len(nations_dict) + 1, team]
with open('nations.csv', 'w', newline='') as new_csv_file:
writer = csv.writer(new_csv_file, delimiter=',')
for nation in nations_dict:
writer.writerow([nations_dict[nation][0], nation, nations_dict[nation][1]])
return nations_dict
def make_games_table():
games_dict = {}
with open ('athlete_events.csv') as csv_file:
csv_reader = csv.reader(csv_file, delimiter=',')
headers = next(csv_reader)
for row in csv_reader:
game = row[8]
year = row[9]
season = row[10]
city = row[11]
if game not in games_dict:
games_dict[game] = [len(games_dict) + 1, year, season, city]
with open('games.csv', 'w', newline='') as new_csv_file:
writer = csv.writer(new_csv_file, delimiter=',')
for key in games_dict:
writer.writerow([games_dict[key][0], games_dict[key][1], games_dict[key][2], games_dict[key][3]])
return games_dict
def make_contests_table():
contest_dict = {}
with open ('athlete_events.csv') as csv_file:
csv_reader = csv.reader(csv_file, delimiter=',')
headers = next(csv_reader)
for row in csv_reader:
contest = row[13]
sport = row[12]
if contest not in contest_dict:
contest_dict[contest] = [len(contest_dict) + 1, sport]
with open('contests.csv', 'w', newline='') as new_csv_file:
writer = csv.writer(new_csv_file, delimiter=',')
for key in contest_dict:
writer.writerow([contest_dict[key][0], key, contest_dict[key][1]])
return contest_dict
def make_athletes_games(athelete_dict, nations_dict, games_dict):
athletes_games_dict = {}
with open ('athlete_events.csv') as csv_file:
csv_reader = csv.reader(csv_file, delimiter=',')
headers = next(csv_reader)
for row in csv_reader:
athlete = row[1]
game_name = row[8]
noc = row[7]
if (athlete, game_name) not in athletes_games_dict:
athletes_games_dict[(athlete, game_name)] = [len(athletes_games_dict) + 1, athelete_dict[athlete][0], nations_dict[noc][0], games_dict[game_name][0]]
with open('athletes_games.csv', 'w', newline='') as new_csv_file:
writer = csv.writer(new_csv_file, delimiter=',')
for key in athletes_games_dict:
writer.writerow(athletes_games_dict[key])
return athletes_games_dict
def make_contests_medals(athletes_games_dict, contests_dict):
contests_medals_dict = {}
with open ('athlete_events.csv') as csv_file:
csv_reader = csv.reader(csv_file, delimiter=',')
headers = next(csv_reader)
for row in csv_reader:
athlete = row[1]
game_name = row[8]
contest = row[13]
medal = row[14]
contests_medals_dict[len(contests_medals_dict) + 1] = [athletes_games_dict[(athlete, game_name)][0], contests_dict[contest][0], medal]
with open('contests_medals.csv', 'w', newline='') as new_csv_file:
writer = csv.writer(new_csv_file, delimiter=',')
for key in contests_medals_dict:
writer.writerow([key, contests_medals_dict[key][0], contests_medals_dict[key][1], contests_medals_dict[key][2]])
def main():
athelete_dict = make_athletes_table()
nations_dict = make_nations_table()
games_dict = make_games_table()
contests_dict = make_contests_table()
athletes_games_dict = make_athletes_games(athelete_dict, nations_dict, games_dict)
make_contests_medals(athletes_games_dict, contests_dict)
main()
|
py
|
1a5692bf6677bc96cfe8eff05fc67994f145f4dc
|
import FWCore.ParameterSet.Config as cms
# Pixel Digi Monitoring
from Validation.SiPixelPhase1DigisV.SiPixelPhase1DigisV_cfi import *
# Hits
from Validation.SiPixelPhase1HitsV.SiPixelPhase1HitsV_cfi import *
# RecHit (clusters)
from Validation.SiPixelPhase1RecHitsV.SiPixelPhase1RecHitsV_cfi import *
# Clusters ontrack/offtrack (also general tracks)
from Validation.SiPixelPhase1TrackClustersV.SiPixelPhase1TrackClustersV_cfi import *
# Tracking Truth MC
from Validation.SiPixelPhase1TrackingParticleV.SiPixelPhase1TrackingParticleV_cfi import *
PerModule.enabled = False
siPixelPhase1OfflineDQM_sourceV = cms.Sequence(SiPixelPhase1DigisAnalyzerV
+ SiPixelPhase1HitsAnalyzerV
+ SiPixelPhase1RecHitsAnalyzerV
+ SiPixelPhase1TrackClustersAnalyzerV
+ SiPixelPhase1TrackingParticleAnalyzerV
)
|
py
|
1a5692f7b5be5e87b78dac9d1ae51f280ca089f8
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for contrib.seq2seq.python.ops.attention_wrapper."""
# pylint: disable=unused-import,g-bad-import-order
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# pylint: enable=unused-import
import collections
import functools
import numpy as np
from tensorflow.contrib.seq2seq.python.ops import decoder
from tensorflow.contrib.seq2seq.python.ops import attention_wrapper as wrapper
from tensorflow.contrib.seq2seq.python.ops import helper as helper_py
from tensorflow.contrib.seq2seq.python.ops import basic_decoder
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.layers import core as layers_core
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import rnn
from tensorflow.python.ops import rnn_cell
from tensorflow.python.ops import variables
from tensorflow.python.ops import variable_scope as vs
from tensorflow.python.platform import test
from tensorflow.python.util import nest
# pylint: enable=g-import-not-at-top
# for testing
AttentionWrapperState = wrapper.AttentionWrapperState # pylint: disable=invalid-name
LSTMStateTuple = rnn_cell.LSTMStateTuple # pylint: disable=invalid-name
BasicDecoderOutput = basic_decoder.BasicDecoderOutput # pylint: disable=invalid-name
float32 = np.float32
int32 = np.int32
array = np.array
dtype = np.dtype
class ResultSummary(
collections.namedtuple('ResultSummary', ('shape', 'dtype', 'mean'))):
pass
def get_result_summary(x):
if isinstance(x, np.ndarray):
return ResultSummary(x.shape, x.dtype, x.mean())
return x
class AttentionWrapperTest(test.TestCase):
def assertAllCloseOrEqual(self, x, y, **kwargs):
if isinstance(x, np.ndarray) or isinstance(x, float):
return super(AttentionWrapperTest, self).assertAllClose(
x, y, atol=1e-3, **kwargs)
else:
self.assertAllEqual(x, y, **kwargs)
def testAttentionWrapperState(self):
num_fields = len(wrapper.AttentionWrapperState._fields) # pylint: disable=protected-access
state = wrapper.AttentionWrapperState(*([None] * num_fields))
new_state = state.clone(time=1)
self.assertEqual(state.time, None)
self.assertEqual(new_state.time, 1)
def testAttentionWrapperStateShapePropgation(self):
batch_size = 5
max_time = 5
num_units = 5
memory = random_ops.random_uniform(
[batch_size, max_time, num_units], seed=1)
mechanism = wrapper.LuongAttention(num_units, memory)
cell = wrapper.AttentionWrapper(rnn_cell.LSTMCell(num_units), mechanism)
# Create zero state with static batch size.
static_state = cell.zero_state(batch_size, dtypes.float32)
# Create zero state without static batch size.
state = cell.zero_state(array_ops.shape(memory)[0], dtypes.float32)
state = static_state.clone(
cell_state=state.cell_state, attention=state.attention)
self.assertEqual(state.cell_state.c.shape, static_state.cell_state.c.shape)
self.assertEqual(state.cell_state.h.shape, static_state.cell_state.h.shape)
self.assertEqual(state.attention.shape, static_state.attention.shape)
def _testWithAttention(self,
create_attention_mechanism,
expected_final_output,
expected_final_state,
attention_mechanism_depth=3,
alignment_history=False,
expected_final_alignment_history=None,
attention_layer_size=6,
attention_layer=None,
name=''):
attention_layer_sizes = (
[attention_layer_size] if attention_layer_size is not None else None)
attention_layers = (
[attention_layer] if attention_layer is not None else None)
self._testWithMaybeMultiAttention(
is_multi=False,
create_attention_mechanisms=[create_attention_mechanism],
expected_final_output=expected_final_output,
expected_final_state=expected_final_state,
attention_mechanism_depths=[attention_mechanism_depth],
alignment_history=alignment_history,
expected_final_alignment_history=expected_final_alignment_history,
attention_layer_sizes=attention_layer_sizes,
attention_layers=attention_layers,
name=name)
def _testWithMaybeMultiAttention(self,
is_multi,
create_attention_mechanisms,
expected_final_output,
expected_final_state,
attention_mechanism_depths,
alignment_history=False,
expected_final_alignment_history=None,
attention_layer_sizes=None,
attention_layers=None,
name=''):
# Allow is_multi to be True with a single mechanism to enable test for
# passing in a single mechanism in a list.
assert len(create_attention_mechanisms) == 1 or is_multi
encoder_sequence_length = [3, 2, 3, 1, 1]
decoder_sequence_length = [2, 0, 1, 2, 3]
batch_size = 5
encoder_max_time = 8
decoder_max_time = 4
input_depth = 7
encoder_output_depth = 10
cell_depth = 9
if attention_layer_sizes is not None:
# Compute sum of attention_layer_sizes. Use encoder_output_depth if None.
attention_depth = sum(attention_layer_size or encoder_output_depth
for attention_layer_size in attention_layer_sizes)
elif attention_layers is not None:
# Compute sum of attention_layers output depth.
attention_depth = sum(
attention_layer.compute_output_shape(
[batch_size, cell_depth + encoder_output_depth]).dims[-1].value
for attention_layer in attention_layers)
else:
attention_depth = encoder_output_depth * len(create_attention_mechanisms)
decoder_inputs = array_ops.placeholder_with_default(
np.random.randn(batch_size, decoder_max_time,
input_depth).astype(np.float32),
shape=(None, None, input_depth))
encoder_outputs = array_ops.placeholder_with_default(
np.random.randn(batch_size, encoder_max_time,
encoder_output_depth).astype(np.float32),
shape=(None, None, encoder_output_depth))
attention_mechanisms = [
creator(num_units=depth,
memory=encoder_outputs,
memory_sequence_length=encoder_sequence_length)
for creator, depth in zip(create_attention_mechanisms,
attention_mechanism_depths)]
with self.session(use_gpu=True) as sess:
with vs.variable_scope(
'root',
initializer=init_ops.random_normal_initializer(stddev=0.01, seed=3)):
attention_layer_size = attention_layer_sizes
attention_layer = attention_layers
if not is_multi:
if attention_layer_size is not None:
attention_layer_size = attention_layer_size[0]
if attention_layer is not None:
attention_layer = attention_layer[0]
cell = rnn_cell.LSTMCell(cell_depth)
cell = wrapper.AttentionWrapper(
cell,
attention_mechanisms if is_multi else attention_mechanisms[0],
attention_layer_size=attention_layer_size,
alignment_history=alignment_history,
attention_layer=attention_layer)
helper = helper_py.TrainingHelper(decoder_inputs,
decoder_sequence_length)
my_decoder = basic_decoder.BasicDecoder(
cell=cell,
helper=helper,
initial_state=cell.zero_state(
dtype=dtypes.float32, batch_size=batch_size))
final_outputs, final_state, _ = decoder.dynamic_decode(my_decoder)
self.assertTrue(
isinstance(final_outputs, basic_decoder.BasicDecoderOutput))
self.assertTrue(
isinstance(final_state, wrapper.AttentionWrapperState))
self.assertTrue(
isinstance(final_state.cell_state, rnn_cell.LSTMStateTuple))
self.assertEqual((batch_size, None, attention_depth),
tuple(final_outputs.rnn_output.get_shape().as_list()))
self.assertEqual((batch_size, None),
tuple(final_outputs.sample_id.get_shape().as_list()))
self.assertEqual((batch_size, attention_depth),
tuple(final_state.attention.get_shape().as_list()))
self.assertEqual((batch_size, cell_depth),
tuple(final_state.cell_state.c.get_shape().as_list()))
self.assertEqual((batch_size, cell_depth),
tuple(final_state.cell_state.h.get_shape().as_list()))
if alignment_history:
if is_multi:
state_alignment_history = []
for history_array in final_state.alignment_history:
history = history_array.stack()
self.assertEqual(
(None, batch_size, None),
tuple(history.get_shape().as_list()))
state_alignment_history.append(history)
state_alignment_history = tuple(state_alignment_history)
else:
state_alignment_history = final_state.alignment_history.stack()
self.assertEqual(
(None, batch_size, None),
tuple(state_alignment_history.get_shape().as_list()))
nest.assert_same_structure(
cell.state_size,
cell.zero_state(batch_size, dtypes.float32))
# Remove the history from final_state for purposes of the
# remainder of the tests.
final_state = final_state._replace(alignment_history=()) # pylint: disable=protected-access
else:
state_alignment_history = ()
sess.run(variables.global_variables_initializer())
sess_results = sess.run({
'final_outputs': final_outputs,
'final_state': final_state,
'state_alignment_history': state_alignment_history,
})
final_output_info = nest.map_structure(get_result_summary,
sess_results['final_outputs'])
final_state_info = nest.map_structure(get_result_summary,
sess_results['final_state'])
print(name)
print('Copy/paste:\nexpected_final_output = %s' % str(final_output_info))
print('expected_final_state = %s' % str(final_state_info))
nest.map_structure(self.assertAllCloseOrEqual, expected_final_output,
final_output_info)
nest.map_structure(self.assertAllCloseOrEqual, expected_final_state,
final_state_info)
if alignment_history: # by default, the wrapper emits attention as output
final_alignment_history_info = nest.map_structure(
get_result_summary, sess_results['state_alignment_history'])
print('expected_final_alignment_history = %s' %
str(final_alignment_history_info))
nest.map_structure(
self.assertAllCloseOrEqual,
# outputs are batch major but the stacked TensorArray is time major
expected_final_alignment_history,
final_alignment_history_info)
def testBahdanauNormalizedDType(self):
for dtype in [np.float16, np.float32, np.float64]:
num_units = 128
encoder_outputs = array_ops.placeholder(dtype, shape=[64, None, 256])
encoder_sequence_length = array_ops.placeholder(dtypes.int32, shape=[64])
decoder_inputs = array_ops.placeholder(dtype, shape=[64, None, 128])
decoder_sequence_length = array_ops.placeholder(dtypes.int32, shape=[64])
batch_size = 64
attention_mechanism = wrapper.BahdanauAttention(
num_units=num_units,
memory=encoder_outputs,
memory_sequence_length=encoder_sequence_length,
normalize=True,
dtype=dtype,
)
cell = rnn_cell.LSTMCell(num_units)
cell = wrapper.AttentionWrapper(cell, attention_mechanism)
helper = helper_py.TrainingHelper(decoder_inputs,
decoder_sequence_length)
my_decoder = basic_decoder.BasicDecoder(
cell=cell,
helper=helper,
initial_state=cell.zero_state(
dtype=dtype, batch_size=batch_size))
final_outputs, final_state, _ = decoder.dynamic_decode(my_decoder)
self.assertTrue(
isinstance(final_outputs, basic_decoder.BasicDecoderOutput))
self.assertEqual(final_outputs.rnn_output.dtype, dtype)
self.assertTrue(
isinstance(final_state, wrapper.AttentionWrapperState))
self.assertTrue(
isinstance(final_state.cell_state, rnn_cell.LSTMStateTuple))
def testBahdanauNotNormalized(self):
create_attention_mechanism = wrapper.BahdanauAttention
expected_final_output = BasicDecoderOutput(
rnn_output=ResultSummary(
shape=(5, 3, 6), dtype=dtype('float32'), mean=-0.0052250605),
sample_id=ResultSummary(
shape=(5, 3), dtype=dtype('int32'), mean=1.4))
expected_final_state = AttentionWrapperState(
cell_state=LSTMStateTuple(
c=ResultSummary(
shape=(5, 9), dtype=dtype('float32'), mean=-0.0040092287),
h=ResultSummary(
shape=(5, 9), dtype=dtype('float32'), mean=-0.0020015112)),
attention=ResultSummary(
shape=(5, 6), dtype=dtype('float32'), mean=-0.0052052638),
time=3,
alignments=ResultSummary(
shape=(5, 8), dtype=dtype('float32'), mean=0.125),
attention_state=ResultSummary(
shape=(5, 8), dtype=dtype('float32'), mean=0.125),
alignment_history=())
expected_final_alignment_history = ResultSummary(
shape=(3, 5, 8), dtype=dtype('float32'), mean=0.12500001)
self._testWithAttention(
create_attention_mechanism,
expected_final_output,
expected_final_state,
alignment_history=True,
expected_final_alignment_history=expected_final_alignment_history,
name='testBahdanauNotNormalized')
def testBahdanauNormalized(self):
create_attention_mechanism = functools.partial(
wrapper.BahdanauAttention, normalize=True)
expected_final_output = BasicDecoderOutput(
rnn_output=ResultSummary(
shape=(5, 3, 6), dtype=dtype('float32'), mean=-0.00597103),
sample_id=ResultSummary(
shape=(5, 3), dtype=dtype('int32'), mean=1.4))
expected_final_state = AttentionWrapperState(
cell_state=LSTMStateTuple(
c=ResultSummary(
shape=(5, 9), dtype=dtype('float32'), mean=-0.0040052128),
h=ResultSummary(
shape=(5, 9), dtype=dtype('float32'), mean=-0.0019996136)),
attention=ResultSummary(
shape=(5, 6), dtype=dtype('float32'), mean=-0.00595117),
time=3,
alignments=ResultSummary(
shape=(5, 8), dtype=dtype('float32'), mean=0.125),
attention_state=ResultSummary(
shape=(5, 8), dtype=dtype('float32'), mean=0.125),
alignment_history=())
self._testWithAttention(
create_attention_mechanism,
expected_final_output,
expected_final_state,
name='testBahdanauNormalized')
def testLuongNotNormalized(self):
create_attention_mechanism = wrapper.LuongAttention
expected_final_output = BasicDecoderOutput(
rnn_output=ResultSummary(
shape=(5, 3, 6), dtype=dtype('float32'), mean=-0.0052615386),
sample_id=ResultSummary(
shape=(5, 3), dtype=dtype('int32'), mean=1.4))
expected_final_state = AttentionWrapperState(
cell_state=LSTMStateTuple(
c=ResultSummary(
shape=(5, 9), dtype=dtype('float32'), mean=-0.004009536),
h=ResultSummary(
shape=(5, 9), dtype=dtype('float32'), mean=-0.0020016613)),
attention=ResultSummary(
shape=(5, 6), dtype=dtype('float32'), mean=-0.0051812846),
time=3,
alignments=ResultSummary(
shape=(5, 8), dtype=dtype('float32'), mean=0.125),
attention_state=ResultSummary(
shape=(5, 8), dtype=dtype('float32'), mean=0.125),
alignment_history=())
self._testWithAttention(
create_attention_mechanism,
expected_final_output,
expected_final_state,
attention_mechanism_depth=9,
name='testLuongNotNormalized')
def testLuongScaledDType(self):
# Test case for GitHub issue 18099
for dt in [np.float16, np.float32, np.float64]:
num_units = 128
encoder_outputs = array_ops.placeholder(dt, shape=[64, None, 256])
encoder_sequence_length = array_ops.placeholder(dtypes.int32, shape=[64])
decoder_inputs = array_ops.placeholder(dt, shape=[64, None, 128])
decoder_sequence_length = array_ops.placeholder(dtypes.int32, shape=[64])
batch_size = 64
attention_mechanism = wrapper.LuongAttention(
num_units=num_units,
memory=encoder_outputs,
memory_sequence_length=encoder_sequence_length,
scale=True,
dtype=dt,
)
cell = rnn_cell.LSTMCell(num_units)
cell = wrapper.AttentionWrapper(cell, attention_mechanism)
helper = helper_py.TrainingHelper(decoder_inputs,
decoder_sequence_length)
my_decoder = basic_decoder.BasicDecoder(
cell=cell,
helper=helper,
initial_state=cell.zero_state(
dtype=dt, batch_size=batch_size))
final_outputs, final_state, _ = decoder.dynamic_decode(my_decoder)
self.assertTrue(
isinstance(final_outputs, basic_decoder.BasicDecoderOutput))
self.assertEqual(final_outputs.rnn_output.dtype, dt)
self.assertTrue(
isinstance(final_state, wrapper.AttentionWrapperState))
self.assertTrue(
isinstance(final_state.cell_state, rnn_cell.LSTMStateTuple))
def testLuongScaled(self):
create_attention_mechanism = functools.partial(
wrapper.LuongAttention, scale=True)
expected_final_output = BasicDecoderOutput(
rnn_output=ResultSummary(
shape=(5, 3, 6), dtype=dtype('float32'), mean=-0.0052615386),
sample_id=ResultSummary(
shape=(5, 3), dtype=dtype('int32'), mean=1.4))
expected_final_state = AttentionWrapperState(
cell_state=LSTMStateTuple(
c=ResultSummary(
shape=(5, 9), dtype=dtype('float32'), mean=-0.004009536),
h=ResultSummary(
shape=(5, 9), dtype=dtype('float32'), mean=-0.0020016613)),
attention=ResultSummary(
shape=(5, 6), dtype=dtype('float32'), mean=-0.0051812846),
time=3,
alignments=ResultSummary(
shape=(5, 8), dtype=dtype('float32'), mean=0.125),
attention_state=ResultSummary(
shape=(5, 8), dtype=dtype('float32'), mean=0.125),
alignment_history=())
self._testWithAttention(
create_attention_mechanism,
expected_final_output,
expected_final_state,
attention_mechanism_depth=9,
name='testLuongScaled')
def testNotUseAttentionLayer(self):
create_attention_mechanism = wrapper.BahdanauAttention
expected_final_output = BasicDecoderOutput(
rnn_output=ResultSummary(
shape=(5, 3, 10), dtype=dtype('float32'), mean=0.117389656),
sample_id=ResultSummary(
shape=(5, 3), dtype=dtype('int32'), mean=4.5999999999999996))
expected_final_state = AttentionWrapperState(
cell_state=LSTMStateTuple(
c=ResultSummary(
shape=(5, 9), dtype=dtype('float32'), mean=-0.0063607907),
h=ResultSummary(
shape=(5, 9), dtype=dtype('float32'), mean=-0.00323448)),
attention=ResultSummary(
shape=(5, 10), dtype=dtype('float32'), mean=0.117389656,),
time=3,
alignments=ResultSummary(
shape=(5, 8), dtype=dtype('float32'), mean=0.125),
attention_state=ResultSummary(
shape=(5, 8), dtype=dtype('float32'), mean=0.125),
alignment_history=())
self._testWithAttention(
create_attention_mechanism,
expected_final_output,
expected_final_state,
attention_layer_size=None,
name='testNotUseAttentionLayer')
def test_safe_cumprod(self):
# Create some random test input
test_input = np.random.uniform(size=(10, 20))
for axis in [0, 1]:
for exclusive in [True, False]:
with self.cached_session():
# Compute cumprod with regular tf.cumprod
cumprod_output = math_ops.cumprod(
test_input, axis=axis, exclusive=exclusive).eval()
# Compute cumprod with safe_cumprod
safe_cumprod_output = wrapper.safe_cumprod(
test_input, axis=axis, exclusive=exclusive).eval()
for x, y in zip(cumprod_output.shape, safe_cumprod_output.shape):
self.assertEqual(x, y)
for x, y in zip(cumprod_output.flatten(),
safe_cumprod_output.flatten()):
# Use assertAlmostEqual for the actual values due to floating point
self.assertAlmostEqual(x, y, places=5)
def test_monotonic_attention(self):
def monotonic_attention_explicit(p_choose_i, previous_attention):
"""Explicitly compute monotonic attention distribution using numpy."""
# Base case for recurrence relation
out = [previous_attention[0]]
# Explicitly follow the recurrence relation
for j in range(1, p_choose_i.shape[0]):
out.append((1 - p_choose_i[j - 1])*out[j - 1] + previous_attention[j])
return p_choose_i*np.array(out)
# Generate a random batch of choosing probabilities for seq. len. 20
p_choose_i = np.random.uniform(size=(10, 20)).astype(np.float32)
# Generate random previous attention distributions
previous_attention = np.random.uniform(size=(10, 20)).astype(np.float32)
previous_attention /= previous_attention.sum(axis=1).reshape((-1, 1))
# Create the output to test against
explicit_output = np.array([
monotonic_attention_explicit(p, a)
for p, a in zip(p_choose_i, previous_attention)])
# Compute output with TensorFlow function, for both calculation types
with self.cached_session():
recursive_output = wrapper.monotonic_attention(
p_choose_i, previous_attention, 'recursive').eval()
self.assertEqual(recursive_output.ndim, explicit_output.ndim)
for x, y in zip(recursive_output.shape, explicit_output.shape):
self.assertEqual(x, y)
for x, y in zip(recursive_output.flatten(), explicit_output.flatten()):
# Use assertAlmostEqual for the actual values due to floating point
self.assertAlmostEqual(x, y, places=5)
# Generate new p_choose_i for parallel, which is unstable when p_choose_i[n]
# is close to 1
p_choose_i = np.random.uniform(0, 0.9, size=(10, 20)).astype(np.float32)
# Create new output to test against
explicit_output = np.array([
monotonic_attention_explicit(p, a)
for p, a in zip(p_choose_i, previous_attention)])
# Compute output with TensorFlow function, for both calculation types
with self.cached_session():
parallel_output = wrapper.monotonic_attention(
p_choose_i, previous_attention, 'parallel').eval()
self.assertEqual(parallel_output.ndim, explicit_output.ndim)
for x, y in zip(parallel_output.shape, explicit_output.shape):
self.assertEqual(x, y)
for x, y in zip(parallel_output.flatten(), explicit_output.flatten()):
# Use assertAlmostEqual for the actual values due to floating point
self.assertAlmostEqual(x, y, places=5)
# Now, test hard mode, where probabilities must be 0 or 1
p_choose_i = np.random.choice(np.array([0, 1], np.float32), (10, 20))
previous_attention = np.zeros((10, 20), np.float32)
# Randomly choose input sequence indices at each timestep
random_idx = np.random.randint(0, previous_attention.shape[1],
previous_attention.shape[0])
previous_attention[np.arange(previous_attention.shape[0]), random_idx] = 1
# Create the output to test against
explicit_output = np.array([
monotonic_attention_explicit(p, a)
for p, a in zip(p_choose_i, previous_attention)])
# Compute output with TensorFlow function, for both calculation types
with self.cached_session():
hard_output = wrapper.monotonic_attention(
# TensorFlow is unhappy when these are not wrapped as tf.constant
constant_op.constant(p_choose_i),
constant_op.constant(previous_attention),
'hard').eval()
self.assertEqual(hard_output.ndim, explicit_output.ndim)
for x, y in zip(hard_output.shape, explicit_output.shape):
self.assertEqual(x, y)
for x, y in zip(hard_output.flatten(), explicit_output.flatten()):
# Use assertAlmostEqual for the actual values due to floating point
self.assertAlmostEqual(x, y, places=5)
# Now, test recursively computing attention distributions vs. sampling
def sample(p_choose_i):
"""Generate a sequence of emit-ingest decisions from p_choose_i."""
output = np.zeros(p_choose_i.shape)
t_im1 = 0
for i in range(p_choose_i.shape[0]):
for j in range(t_im1, p_choose_i.shape[1]):
if np.random.uniform() <= p_choose_i[i, j]:
output[i, j] = 1
t_im1 = j
break
else:
t_im1 = p_choose_i.shape[1]
return output
# Now, the first axis is output timestep and second is input timestep
p_choose_i = np.random.uniform(size=(4, 5)).astype(np.float32)
# Generate the average of a bunch of samples
n_samples = 100000
sampled_output = np.mean(
[sample(p_choose_i) for _ in range(n_samples)], axis=0)
# Create initial previous_attention base case
recursive_output = [np.array([1] + [0]*(p_choose_i.shape[1] - 1),
np.float32)]
# Compute output with TensorFlow function, for both calculation types
with self.cached_session():
for j in range(p_choose_i.shape[0]):
# Compute attention distribution for this output time step
recursive_output.append(wrapper.monotonic_attention(
# newaxis is for adding the expected batch dimension
p_choose_i[j][np.newaxis],
recursive_output[-1][np.newaxis], 'recursive').eval()[0])
# Stack together distributions; remove basecase
recursive_output = np.array(recursive_output[1:])
self.assertEqual(recursive_output.ndim, sampled_output.ndim)
for x, y in zip(recursive_output.shape, sampled_output.shape):
self.assertEqual(x, y)
for x, y in zip(recursive_output.flatten(), sampled_output.flatten()):
# Use a very forgiving threshold since we are sampling
self.assertAlmostEqual(x, y, places=2)
def testBahdanauMonotonicNotNormalized(self):
create_attention_mechanism = functools.partial(
wrapper.BahdanauMonotonicAttention, sigmoid_noise=1.0,
sigmoid_noise_seed=3)
expected_final_output = BasicDecoderOutput(
rnn_output=ResultSummary(
shape=(5, 3, 6), dtype=dtype('float32'), mean=-0.002122893),
sample_id=ResultSummary(
shape=(5, 3), dtype=dtype('int32'), mean=1.7333333333333334))
expected_final_state = AttentionWrapperState(
cell_state=LSTMStateTuple(
c=ResultSummary(
shape=(5, 9), dtype=dtype('float32'), mean=-0.0040002423),
h=ResultSummary(
shape=(5, 9), dtype=dtype('float32'), mean=-0.0019968653)),
attention=ResultSummary(
shape=(5, 6), dtype=dtype('float32'), mean=-5.9313523e-05),
time=3,
alignments=ResultSummary(
shape=(5, 8), dtype=dtype('float32'), mean=0.032228071),
attention_state=ResultSummary(
shape=(5, 8), dtype=dtype('float32'), mean=0.032228071),
alignment_history=())
expected_final_alignment_history = ResultSummary(
shape=(3, 5, 8), dtype=dtype('float32'), mean=0.050430927)
self._testWithAttention(
create_attention_mechanism,
expected_final_output,
expected_final_state,
alignment_history=True,
expected_final_alignment_history=expected_final_alignment_history,
name='testBahdanauMonotonicNotNormalized')
def testBahdanauMonotonicNormalized(self):
create_attention_mechanism = functools.partial(
wrapper.BahdanauMonotonicAttention, normalize=True,
sigmoid_noise=1.0, sigmoid_noise_seed=3)
expected_final_output = BasicDecoderOutput(
rnn_output=ResultSummary(
shape=(5, 3, 6), dtype=dtype('float32'), mean=-0.0025896581),
sample_id=ResultSummary(
shape=(5, 3), dtype=dtype('int32'), mean=1.73333333))
expected_final_state = AttentionWrapperState(
cell_state=LSTMStateTuple(
c=ResultSummary(
shape=(5, 9), dtype=dtype('float32'), mean=-0.0040013152),
h=ResultSummary(
shape=(5, 9), dtype=dtype('float32'), mean=-0.0019973689)),
attention=ResultSummary(
shape=(5, 6), dtype=dtype('float32'), mean=-0.00069823361),
time=3,
alignments=ResultSummary(
shape=(5, 8), dtype=dtype('float32'), mean=0.029914695),
attention_state=ResultSummary(
shape=(5, 8), dtype=dtype('float32'), mean=0.029914695),
alignment_history=())
expected_final_alignment_history = ResultSummary(
shape=(3, 5, 8), dtype=dtype('float32'), mean=0.0465225502849)
self._testWithAttention(
create_attention_mechanism,
expected_final_output,
expected_final_state,
alignment_history=True,
expected_final_alignment_history=expected_final_alignment_history,
name='testBahdanauMonotonicNormalized')
def testBahdanauMonotonicHard(self):
# Run attention mechanism with mode='hard', make sure probabilities are hard
b, t, u, d = 10, 20, 30, 40
with self.session(use_gpu=True) as sess:
a = wrapper.BahdanauMonotonicAttention(
d,
random_ops.random_normal((b, t, u)),
mode='hard')
# Just feed previous attention as [1, 0, 0, ...]
attn, unused_state = a(
random_ops.random_normal((b, d)), array_ops.one_hot([0]*b, t))
sess.run(variables.global_variables_initializer())
attn_out = attn.eval()
# All values should be 0 or 1
self.assertTrue(np.all(np.logical_or(attn_out == 0, attn_out == 1)))
# Sum of distributions should be 0 or 1 (0 when all p_choose_i are 0)
self.assertTrue(np.all(np.logical_or(attn_out.sum(axis=1) == 1,
attn_out.sum(axis=1) == 0)))
def testLuongMonotonicNotNormalized(self):
create_attention_mechanism = functools.partial(
wrapper.LuongMonotonicAttention, sigmoid_noise=1.0,
sigmoid_noise_seed=3)
expected_final_output = BasicDecoderOutput(
rnn_output=ResultSummary(
shape=(5, 3, 6), dtype=dtype('float32'), mean=-0.0021257224),
sample_id=ResultSummary(
shape=(5, 3), dtype=dtype('int32'), mean=1.7333333333333334))
expected_final_state = AttentionWrapperState(
cell_state=LSTMStateTuple(
c=ResultSummary(
shape=(5, 9), dtype=dtype('float32'), mean=-0.0040003359),
h=ResultSummary(
shape=(5, 9), dtype=dtype('float32'), mean=-0.001996913)),
attention=ResultSummary(
shape=(5, 6), dtype=dtype('float32'), mean=-5.2024145e-05),
time=3,
alignments=ResultSummary(
shape=(5, 8), dtype=dtype('float32'), mean=0.032198936),
attention_state=ResultSummary(
shape=(5, 8), dtype=dtype('float32'), mean=0.032198936),
alignment_history=())
expected_final_alignment_history = ResultSummary(
shape=(3, 5, 8), dtype=dtype('float32'), mean=0.050387777)
self._testWithAttention(
create_attention_mechanism,
expected_final_output,
expected_final_state,
attention_mechanism_depth=9,
alignment_history=True,
expected_final_alignment_history=expected_final_alignment_history,
name='testLuongMonotonicNotNormalized')
def testLuongMonotonicScaled(self):
create_attention_mechanism = functools.partial(
wrapper.LuongMonotonicAttention, scale=True, sigmoid_noise=1.0,
sigmoid_noise_seed=3)
expected_final_output = BasicDecoderOutput(
rnn_output=ResultSummary(
shape=(5, 3, 6), dtype=dtype('float32'), mean=-0.0021257224),
sample_id=ResultSummary(
shape=(5, 3), dtype=dtype('int32'), mean=1.7333333333333334))
expected_final_state = AttentionWrapperState(
cell_state=LSTMStateTuple(
c=ResultSummary(
shape=(5, 9), dtype=dtype('float32'), mean=-0.0040003359),
h=ResultSummary(
shape=(5, 9), dtype=dtype('float32'), mean=-0.001996913)),
attention=ResultSummary(
shape=(5, 6), dtype=dtype('float32'), mean=-5.2024145e-05),
time=3,
alignments=ResultSummary(
shape=(5, 8), dtype=dtype('float32'), mean=0.032198936),
attention_state=ResultSummary(
shape=(5, 8), dtype=dtype('float32'), mean=0.032198936),
alignment_history=())
expected_final_alignment_history = ResultSummary(
shape=(3, 5, 8), dtype=dtype('float32'), mean=0.050387777)
self._testWithAttention(
create_attention_mechanism,
expected_final_output,
expected_final_state,
attention_mechanism_depth=9,
alignment_history=True,
expected_final_alignment_history=expected_final_alignment_history,
name='testLuongMonotonicScaled')
def testMultiAttention(self):
create_attention_mechanisms = (
wrapper.BahdanauAttention, wrapper.LuongAttention)
expected_final_output = BasicDecoderOutput(
rnn_output=ResultSummary(
shape=(5, 3, 7), dtype=dtype('float32'), mean=0.0011709079),
sample_id=ResultSummary(
shape=(5, 3), dtype=dtype('int32'), mean=3.2000000000000002))
expected_final_state = AttentionWrapperState(
cell_state=LSTMStateTuple(
c=ResultSummary(
shape=(5, 9), dtype=dtype('float32'), mean=-0.0038725811),
h=ResultSummary(
shape=(5, 9), dtype=dtype('float32'), mean=-0.0019329828)),
attention=ResultSummary(
shape=(5, 7), dtype=dtype('float32'), mean=0.001174294),
time=3,
alignments=(
ResultSummary(shape=(5, 8), dtype=dtype('float32'), mean=0.125),
ResultSummary(shape=(5, 8), dtype=dtype('float32'), mean=0.125)),
attention_state=(
ResultSummary(shape=(5, 8), dtype=dtype('float32'), mean=0.125),
ResultSummary(shape=(5, 8), dtype=dtype('float32'), mean=0.125)),
alignment_history=())
expected_final_alignment_history = (
ResultSummary(shape=(3, 5, 8), dtype=dtype('float32'), mean=0.125),
ResultSummary(shape=(3, 5, 8), dtype=dtype('float32'), mean=0.125))
self._testWithMaybeMultiAttention(
True,
create_attention_mechanisms,
expected_final_output,
expected_final_state,
attention_mechanism_depths=[9, 9],
attention_layer_sizes=[3, 4],
alignment_history=True,
expected_final_alignment_history=expected_final_alignment_history,
name='testMultiAttention')
def testMultiAttentionWithLayerInstances(self):
create_attention_mechanisms = (
wrapper.BahdanauAttention, wrapper.LuongAttention)
expected_final_output = BasicDecoderOutput(
rnn_output=ResultSummary(
shape=(5, 3, 7), dtype=dtype('float32'), mean=0.0011709079),
sample_id=ResultSummary(
shape=(5, 3), dtype=dtype('int32'), mean=3.2000000000000002))
expected_final_state = AttentionWrapperState(
cell_state=LSTMStateTuple(
c=ResultSummary(
shape=(5, 9), dtype=dtype('float32'), mean=-0.0038725811),
h=ResultSummary(
shape=(5, 9), dtype=dtype('float32'), mean=-0.0019329828)),
attention=ResultSummary(
shape=(5, 7), dtype=dtype('float32'), mean=0.001174294),
time=3,
alignments=(
ResultSummary(shape=(5, 8), dtype=dtype('float32'), mean=0.125),
ResultSummary(shape=(5, 8), dtype=dtype('float32'), mean=0.125)),
attention_state=(
ResultSummary(shape=(5, 8), dtype=dtype('float32'), mean=0.125),
ResultSummary(shape=(5, 8), dtype=dtype('float32'), mean=0.125)),
alignment_history=())
expected_final_alignment_history = (
ResultSummary(shape=(3, 5, 8), dtype=dtype('float32'), mean=0.125),
ResultSummary(shape=(3, 5, 8), dtype=dtype('float32'), mean=0.125))
self._testWithMaybeMultiAttention(
True,
create_attention_mechanisms,
expected_final_output,
expected_final_state,
attention_mechanism_depths=[9, 9],
attention_layers=[layers_core.Dense(3, use_bias=False),
layers_core.Dense(4, use_bias=False)],
alignment_history=True,
expected_final_alignment_history=expected_final_alignment_history,
name='testMultiAttention')
def testLuongMonotonicHard(self):
# Run attention mechanism with mode='hard', make sure probabilities are hard
b, t, u, d = 10, 20, 30, 40
with self.session(use_gpu=True) as sess:
a = wrapper.LuongMonotonicAttention(
d,
random_ops.random_normal((b, t, u)),
mode='hard')
# Just feed previous attention as [1, 0, 0, ...]
attn, unused_state = a(
random_ops.random_normal((b, d)), array_ops.one_hot([0]*b, t))
sess.run(variables.global_variables_initializer())
attn_out = attn.eval()
# All values should be 0 or 1
self.assertTrue(np.all(np.logical_or(attn_out == 0, attn_out == 1)))
# Sum of distributions should be 0 or 1 (0 when all p_choose_i are 0)
self.assertTrue(np.all(np.logical_or(attn_out.sum(axis=1) == 1,
attn_out.sum(axis=1) == 0)))
def testMultiAttentionNoAttentionLayer(self):
create_attention_mechanisms = (
wrapper.BahdanauAttention, wrapper.LuongAttention)
expected_final_output = BasicDecoderOutput(
rnn_output=ResultSummary(
shape=(5, 3, 20), dtype=dtype('float32'), mean=0.115853324533),
sample_id=ResultSummary(
shape=(5, 3), dtype=dtype('int32'), mean=8.6))
expected_final_state = AttentionWrapperState(
cell_state=LSTMStateTuple(
c=ResultSummary(
shape=(5, 9), dtype=dtype('float32'), mean=-0.003545674),
h=ResultSummary(
shape=(5, 9), dtype=dtype('float32'), mean=-0.0018327223)),
attention=ResultSummary(
shape=(5, 20), dtype=dtype('float32'), mean=0.11462739855),
time=3,
alignments=(ResultSummary(
shape=(5, 8), dtype=dtype('float32'), mean=0.125),
ResultSummary(
shape=(5, 8), dtype=dtype('float32'), mean=0.125)),
alignment_history=(),
attention_state=(ResultSummary(
shape=(5, 8), dtype=dtype('float32'), mean=0.125),
ResultSummary(
shape=(5, 8), dtype=dtype('float32'), mean=0.125)))
expected_final_alignment_history = (
ResultSummary(shape=(3, 5, 8), dtype=dtype('float32'), mean=0.125),
ResultSummary(shape=(3, 5, 8), dtype=dtype('float32'), mean=0.125))
self._testWithMaybeMultiAttention(
is_multi=True,
create_attention_mechanisms=create_attention_mechanisms,
expected_final_output=expected_final_output,
expected_final_state=expected_final_state,
attention_mechanism_depths=[9, 9],
alignment_history=True,
expected_final_alignment_history=expected_final_alignment_history,
name='testMultiAttention')
def testSingleAttentionAsList(self):
create_attention_mechanisms = [wrapper.BahdanauAttention]
expected_final_output = BasicDecoderOutput(
rnn_output=ResultSummary(
shape=(5, 3, 3), dtype=dtype('float32'), mean=-0.0098485695),
sample_id=ResultSummary(
shape=(5, 3), dtype=dtype('int32'), mean=1.8))
expected_final_state = AttentionWrapperState(
cell_state=LSTMStateTuple(
c=ResultSummary(
shape=(5, 9), dtype=dtype('float32'), mean=-0.0040023471),
h=ResultSummary(
shape=(5, 9), dtype=dtype('float32'), mean=-0.0019979973)),
attention=ResultSummary(
shape=(5, 3), dtype=dtype('float32'), mean=-0.0098808752),
time=3,
alignments=(
ResultSummary(shape=(5, 8), dtype=dtype('float32'), mean=0.125),),
attention_state=(
ResultSummary(shape=(5, 8), dtype=dtype('float32'), mean=0.125),),
alignment_history=())
expected_final_alignment_history = (
ResultSummary(shape=(3, 5, 8), dtype=dtype('float32'), mean=0.125),)
self._testWithMaybeMultiAttention(
is_multi=True, # pass the AttentionMechanism wrapped in a list
create_attention_mechanisms=create_attention_mechanisms,
expected_final_output=expected_final_output,
expected_final_state=expected_final_state,
attention_mechanism_depths=[9],
attention_layer_sizes=[3],
alignment_history=True,
expected_final_alignment_history=expected_final_alignment_history,
name='testMultiAttention')
def testCustomizedAttention(self):
batch_size = 2
max_time = 3
num_units = 2
memory = constant_op.constant([[[1., 1.], [2., 2.], [3., 3.]],
[[4., 4.], [5., 5.], [6., 6.]]])
memory_sequence_length = constant_op.constant([3, 2])
attention_mechanism = wrapper.BahdanauAttention(num_units, memory,
memory_sequence_length)
# Sets all returned values to be all ones.
def _customized_attention(unused_attention_mechanism, unused_cell_output,
unused_attention_state, unused_attention_layer):
"""Customized attention.
Returns:
attention: `Tensor` of shape [batch_size, num_units], attention output.
alignments: `Tensor` of shape [batch_size, max_time], sigma value for
each input memory (prob. function of input keys).
next_attention_state: A `Tensor` representing the next state for the
attention.
"""
attention = array_ops.ones([batch_size, num_units])
alignments = array_ops.ones([batch_size, max_time])
next_attention_state = alignments
return attention, alignments, next_attention_state
attention_cell = wrapper.AttentionWrapper(
rnn_cell.LSTMCell(2),
attention_mechanism,
attention_layer_size=None, # don't use attention layer.
output_attention=False,
alignment_history=(),
attention_fn=_customized_attention,
name='attention')
self.assertEqual(num_units, attention_cell.output_size)
initial_state = attention_cell.zero_state(
batch_size=2, dtype=dtypes.float32)
source_input_emb = array_ops.ones([2, 3, 2])
source_input_length = constant_op.constant([3, 2])
# 'state' is a tuple of
# (cell_state, h, attention, alignments, alignment_history, attention_state)
output, state = rnn.dynamic_rnn(
attention_cell,
inputs=source_input_emb,
sequence_length=source_input_length,
initial_state=initial_state,
dtype=dtypes.float32)
with self.session() as sess:
sess.run(variables.global_variables_initializer())
output_value, state_value = sess.run([output, state], feed_dict={})
self.assertAllEqual(np.array([2, 3, 2]), output_value.shape)
self.assertAllClose(np.array([[1., 1.], [1., 1.]]), state_value.attention)
self.assertAllClose(
np.array([[1., 1., 1.], [1., 1., 1.]]), state_value.alignments)
self.assertAllClose(
np.array([[1., 1., 1.], [1., 1., 1.]]), state_value.attention_state)
if __name__ == '__main__':
test.main()
|
py
|
1a56930dcd3cafca3aee90c8e6e0429d2868ef54
|
# Copyright 2019 Kyoto University (Hirofumi Inaguma)
# Apache 2.0 (http://www.apache.org/licenses/LICENSE-2.0)
"""Training/decoding definition for the speech translation task."""
import itertools
import json
import logging
import os
from chainer import training
from chainer.training import extensions
import numpy as np
import torch
from espnet.asr.asr_utils import adadelta_eps_decay
from espnet.asr.asr_utils import adam_lr_decay
from espnet.asr.asr_utils import add_results_to_json
from espnet.asr.asr_utils import CompareValueTrigger
from espnet.asr.asr_utils import restore_snapshot
from espnet.asr.asr_utils import snapshot_object
from espnet.asr.asr_utils import torch_load
from espnet.asr.asr_utils import torch_resume
from espnet.asr.asr_utils import torch_snapshot
from espnet.asr.pytorch_backend.asr_init import load_trained_model
from espnet.asr.pytorch_backend.asr_init import load_trained_modules
from espnet.nets.pytorch_backend.e2e_asr import pad_list
from espnet.nets.st_interface import STInterface
from espnet.utils.dataset import ChainerDataLoader
from espnet.utils.dataset import TransformDataset
from espnet.utils.deterministic_utils import set_deterministic_pytorch
from espnet.utils.dynamic_import import dynamic_import
from espnet.utils.io_utils import LoadInputsAndTargets
from espnet.utils.training.batchfy import make_batchset
from espnet.utils.training.iterators import ShufflingEnabler
from espnet.utils.training.tensorboard_logger import TensorboardLogger
from espnet.utils.training.train_utils import check_early_stop
from espnet.utils.training.train_utils import set_early_stop
from espnet.asr.pytorch_backend.asr import CustomConverter as ASRCustomConverter
from espnet.asr.pytorch_backend.asr import CustomEvaluator
from espnet.asr.pytorch_backend.asr import CustomUpdater
class CustomConverter(ASRCustomConverter):
"""Custom batch converter for Pytorch.
Args:
subsampling_factor (int): The subsampling factor.
dtype (torch.dtype): Data type to convert.
use_source_text (bool): use source transcription.
"""
def __init__(
self, subsampling_factor=1, dtype=torch.float32, use_source_text=False
):
"""Construct a CustomConverter object."""
super().__init__(subsampling_factor=subsampling_factor, dtype=dtype)
self.use_source_text = use_source_text
def __call__(self, batch, device=torch.device("cpu")):
"""Transform a batch and send it to a device.
Args:
batch (list): The batch to transform.
device (torch.device): The device to send to.
Returns:
tuple(torch.Tensor, torch.Tensor, torch.Tensor)
"""
# batch should be located in list
assert len(batch) == 1
xs, ys, ys_src = batch[0]
# get batch of lengths of input sequences
ilens = np.array([x.shape[0] for x in xs])
ilens = torch.from_numpy(ilens).to(device)
xs_pad = pad_list([torch.from_numpy(x).float() for x in xs], 0).to(
device, dtype=self.dtype
)
ys_pad = pad_list(
[torch.from_numpy(np.array(y, dtype=np.int64)) for y in ys],
self.ignore_id,
).to(device)
if self.use_source_text:
ys_pad_src = pad_list(
[torch.from_numpy(np.array(y, dtype=np.int64)) for y in ys_src],
self.ignore_id,
).to(device)
else:
ys_pad_src = None
return xs_pad, ilens, ys_pad, ys_pad_src
def train(args):
"""Train with the given args.
Args:
args (namespace): The program arguments.
"""
set_deterministic_pytorch(args)
# check cuda availability
if not torch.cuda.is_available():
logging.warning("cuda is not available")
# get input and output dimension info
with open(args.valid_json, "rb") as f:
valid_json = json.load(f)["utts"]
utts = list(valid_json.keys())
idim = int(valid_json[utts[0]]["input"][0]["shape"][-1])
odim = int(valid_json[utts[0]]["output"][0]["shape"][-1])
logging.info("#input dims : " + str(idim))
logging.info("#output dims: " + str(odim))
# Initialize with pre-trained ASR encoder and MT decoder
if args.enc_init is not None or args.dec_init is not None:
model = load_trained_modules(idim, odim, args, interface=STInterface)
else:
model_class = dynamic_import(args.model_module)
model = model_class(idim, odim, args)
assert isinstance(model, STInterface)
total_subsampling_factor = model.get_total_subsampling_factor()
# write model config
if not os.path.exists(args.outdir):
os.makedirs(args.outdir)
model_conf = args.outdir + "/model.json"
with open(model_conf, "wb") as f:
logging.info("writing a model config file to " + model_conf)
f.write(
json.dumps(
(idim, odim, vars(args)), indent=4, ensure_ascii=False, sort_keys=True
).encode("utf_8")
)
for key in sorted(vars(args).keys()):
logging.info("ARGS: " + key + ": " + str(vars(args)[key]))
reporter = model.reporter
# check the use of multi-gpu
if args.ngpu > 1:
if args.batch_size != 0:
logging.warning(
"batch size is automatically increased (%d -> %d)"
% (args.batch_size, args.batch_size * args.ngpu)
)
args.batch_size *= args.ngpu
# set torch device
device = torch.device("cuda" if args.ngpu > 0 else "cpu")
if args.train_dtype in ("float16", "float32", "float64"):
dtype = getattr(torch, args.train_dtype)
else:
dtype = torch.float32
model = model.to(device=device, dtype=dtype)
logging.warning(
"num. model params: {:,} (num. trained: {:,} ({:.1f}%))".format(
sum(p.numel() for p in model.parameters()),
sum(p.numel() for p in model.parameters() if p.requires_grad),
sum(p.numel() for p in model.parameters() if p.requires_grad)
* 100.0
/ sum(p.numel() for p in model.parameters()),
)
)
# Setup an optimizer
if args.opt == "adadelta":
optimizer = torch.optim.Adadelta(
model.parameters(), rho=0.95, eps=args.eps, weight_decay=args.weight_decay
)
elif args.opt == "adam":
optimizer = torch.optim.Adam(
model.parameters(), lr=args.lr, weight_decay=args.weight_decay
)
elif args.opt == "noam":
from espnet.nets.pytorch_backend.transformer.optimizer import get_std_opt
optimizer = get_std_opt(
model.parameters(),
args.adim,
args.transformer_warmup_steps,
args.transformer_lr,
)
else:
raise NotImplementedError("unknown optimizer: " + args.opt)
# setup apex.amp
if args.train_dtype in ("O0", "O1", "O2", "O3"):
try:
from apex import amp
except ImportError as e:
logging.error(
f"You need to install apex for --train-dtype {args.train_dtype}. "
"See https://github.com/NVIDIA/apex#linux"
)
raise e
if args.opt == "noam":
model, optimizer.optimizer = amp.initialize(
model, optimizer.optimizer, opt_level=args.train_dtype
)
else:
model, optimizer = amp.initialize(
model, optimizer, opt_level=args.train_dtype
)
use_apex = True
else:
use_apex = False
# FIXME: TOO DIRTY HACK
setattr(optimizer, "target", reporter)
setattr(optimizer, "serialize", lambda s: reporter.serialize(s))
# Setup a converter
converter = CustomConverter(
subsampling_factor=model.subsample[0],
dtype=dtype,
use_source_text=args.asr_weight > 0 or args.mt_weight > 0,
)
# read json data
with open(args.train_json, "rb") as f:
train_json = json.load(f)["utts"]
with open(args.valid_json, "rb") as f:
valid_json = json.load(f)["utts"]
use_sortagrad = args.sortagrad == -1 or args.sortagrad > 0
# make minibatch list (variable length)
train = make_batchset(
train_json,
args.batch_size,
args.maxlen_in,
args.maxlen_out,
args.minibatches,
min_batch_size=args.ngpu if args.ngpu > 1 else 1,
shortest_first=use_sortagrad,
count=args.batch_count,
batch_bins=args.batch_bins,
batch_frames_in=args.batch_frames_in,
batch_frames_out=args.batch_frames_out,
batch_frames_inout=args.batch_frames_inout,
iaxis=0,
oaxis=0,
)
valid = make_batchset(
valid_json,
args.batch_size,
args.maxlen_in,
args.maxlen_out,
args.minibatches,
min_batch_size=args.ngpu if args.ngpu > 1 else 1,
count=args.batch_count,
batch_bins=args.batch_bins,
batch_frames_in=args.batch_frames_in,
batch_frames_out=args.batch_frames_out,
batch_frames_inout=args.batch_frames_inout,
iaxis=0,
oaxis=0,
)
load_tr = LoadInputsAndTargets(
mode="asr",
load_output=True,
preprocess_conf=args.preprocess_conf,
preprocess_args={"train": True}, # Switch the mode of preprocessing
)
load_cv = LoadInputsAndTargets(
mode="asr",
load_output=True,
preprocess_conf=args.preprocess_conf,
preprocess_args={"train": False}, # Switch the mode of preprocessing
)
# hack to make batchsize argument as 1
# actual bathsize is included in a list
# default collate function converts numpy array to pytorch tensor
# we used an empty collate function instead which returns list
train_iter = ChainerDataLoader(
dataset=TransformDataset(train, lambda data: converter([load_tr(data)])),
batch_size=1,
num_workers=args.n_iter_processes,
shuffle=not use_sortagrad,
collate_fn=lambda x: x[0],
)
valid_iter = ChainerDataLoader(
dataset=TransformDataset(valid, lambda data: converter([load_cv(data)])),
batch_size=1,
shuffle=False,
collate_fn=lambda x: x[0],
num_workers=args.n_iter_processes,
)
# Set up a trainer
updater = CustomUpdater(
model,
args.grad_clip,
{"main": train_iter},
optimizer,
device,
args.ngpu,
args.grad_noise,
args.accum_grad,
use_apex=use_apex,
)
trainer = training.Trainer(updater, (args.epochs, "epoch"), out=args.outdir)
if use_sortagrad:
trainer.extend(
ShufflingEnabler([train_iter]),
trigger=(args.sortagrad if args.sortagrad != -1 else args.epochs, "epoch"),
)
# Resume from a snapshot
if args.resume:
logging.info("resumed from %s" % args.resume)
torch_resume(args.resume, trainer)
# Evaluate the model with the test dataset for each epoch
if args.save_interval_iters > 0:
trainer.extend(
CustomEvaluator(model, {"main": valid_iter}, reporter, device, args.ngpu),
trigger=(args.save_interval_iters, "iteration"),
)
else:
trainer.extend(
CustomEvaluator(model, {"main": valid_iter}, reporter, device, args.ngpu)
)
# Save attention weight at each epoch
if args.num_save_attention > 0:
data = sorted(
list(valid_json.items())[: args.num_save_attention],
key=lambda x: int(x[1]["input"][0]["shape"][1]),
reverse=True,
)
if hasattr(model, "module"):
att_vis_fn = model.module.calculate_all_attentions
plot_class = model.module.attention_plot_class
else:
att_vis_fn = model.calculate_all_attentions
plot_class = model.attention_plot_class
att_reporter = plot_class(
att_vis_fn,
data,
args.outdir + "/att_ws",
converter=converter,
transform=load_cv,
device=device,
subsampling_factor=total_subsampling_factor,
)
trainer.extend(att_reporter, trigger=(1, "epoch"))
else:
att_reporter = None
# Save CTC prob at each epoch
if (args.asr_weight > 0 and args.mtlalpha > 0) and args.num_save_ctc > 0:
# NOTE: sort it by output lengths
data = sorted(
list(valid_json.items())[: args.num_save_ctc],
key=lambda x: int(x[1]["output"][0]["shape"][0]),
reverse=True,
)
if hasattr(model, "module"):
ctc_vis_fn = model.module.calculate_all_ctc_probs
plot_class = model.module.ctc_plot_class
else:
ctc_vis_fn = model.calculate_all_ctc_probs
plot_class = model.ctc_plot_class
ctc_reporter = plot_class(
ctc_vis_fn,
data,
args.outdir + "/ctc_prob",
converter=converter,
transform=load_cv,
device=device,
subsampling_factor=total_subsampling_factor,
)
trainer.extend(ctc_reporter, trigger=(1, "epoch"))
else:
ctc_reporter = None
# Make a plot for training and validation values
trainer.extend(
extensions.PlotReport(
[
"main/loss",
"validation/main/loss",
"main/loss_asr",
"validation/main/loss_asr",
"main/loss_mt",
"validation/main/loss_mt",
"main/loss_st",
"validation/main/loss_st",
],
"epoch",
file_name="loss.png",
)
)
trainer.extend(
extensions.PlotReport(
[
"main/acc",
"validation/main/acc",
"main/acc_asr",
"validation/main/acc_asr",
"main/acc_mt",
"validation/main/acc_mt",
],
"epoch",
file_name="acc.png",
)
)
trainer.extend(
extensions.PlotReport(
["main/bleu", "validation/main/bleu"], "epoch", file_name="bleu.png"
)
)
# Save best models
trainer.extend(
snapshot_object(model, "model.loss.best"),
trigger=training.triggers.MinValueTrigger("validation/main/loss"),
)
trainer.extend(
snapshot_object(model, "model.acc.best"),
trigger=training.triggers.MaxValueTrigger("validation/main/acc"),
)
# save snapshot which contains model and optimizer states
if args.save_interval_iters > 0:
trainer.extend(
torch_snapshot(filename="snapshot.iter.{.updater.iteration}"),
trigger=(args.save_interval_iters, "iteration"),
)
else:
trainer.extend(torch_snapshot(), trigger=(1, "epoch"))
# epsilon decay in the optimizer
if args.opt == "adadelta":
if args.criterion == "acc":
trainer.extend(
restore_snapshot(
model, args.outdir + "/model.acc.best", load_fn=torch_load
),
trigger=CompareValueTrigger(
"validation/main/acc",
lambda best_value, current_value: best_value > current_value,
),
)
trainer.extend(
adadelta_eps_decay(args.eps_decay),
trigger=CompareValueTrigger(
"validation/main/acc",
lambda best_value, current_value: best_value > current_value,
),
)
elif args.criterion == "loss":
trainer.extend(
restore_snapshot(
model, args.outdir + "/model.loss.best", load_fn=torch_load
),
trigger=CompareValueTrigger(
"validation/main/loss",
lambda best_value, current_value: best_value < current_value,
),
)
trainer.extend(
adadelta_eps_decay(args.eps_decay),
trigger=CompareValueTrigger(
"validation/main/loss",
lambda best_value, current_value: best_value < current_value,
),
)
elif args.opt == "adam":
if args.criterion == "acc":
trainer.extend(
restore_snapshot(
model, args.outdir + "/model.acc.best", load_fn=torch_load
),
trigger=CompareValueTrigger(
"validation/main/acc",
lambda best_value, current_value: best_value > current_value,
),
)
trainer.extend(
adam_lr_decay(args.lr_decay),
trigger=CompareValueTrigger(
"validation/main/acc",
lambda best_value, current_value: best_value > current_value,
),
)
elif args.criterion == "loss":
trainer.extend(
restore_snapshot(
model, args.outdir + "/model.loss.best", load_fn=torch_load
),
trigger=CompareValueTrigger(
"validation/main/loss",
lambda best_value, current_value: best_value < current_value,
),
)
trainer.extend(
adam_lr_decay(args.lr_decay),
trigger=CompareValueTrigger(
"validation/main/loss",
lambda best_value, current_value: best_value < current_value,
),
)
# Write a log of evaluation statistics for each epoch
trainer.extend(
extensions.LogReport(trigger=(args.report_interval_iters, "iteration"))
)
report_keys = [
"epoch",
"iteration",
"main/loss",
"main/loss_st",
"main/loss_asr",
"validation/main/loss",
"validation/main/loss_st",
"validation/main/loss_asr",
"main/acc",
"validation/main/acc",
]
if args.asr_weight > 0:
report_keys.append("main/acc_asr")
report_keys.append("validation/main/acc_asr")
report_keys += ["elapsed_time"]
if args.opt == "adadelta":
trainer.extend(
extensions.observe_value(
"eps",
lambda trainer: trainer.updater.get_optimizer("main").param_groups[0][
"eps"
],
),
trigger=(args.report_interval_iters, "iteration"),
)
report_keys.append("eps")
elif args.opt in ["adam", "noam"]:
trainer.extend(
extensions.observe_value(
"lr",
lambda trainer: trainer.updater.get_optimizer("main").param_groups[0][
"lr"
],
),
trigger=(args.report_interval_iters, "iteration"),
)
report_keys.append("lr")
if args.asr_weight > 0:
if args.mtlalpha > 0:
report_keys.append("main/cer_ctc")
report_keys.append("validation/main/cer_ctc")
if args.mtlalpha < 1:
if args.report_cer:
report_keys.append("validation/main/cer")
if args.report_wer:
report_keys.append("validation/main/wer")
if args.report_bleu:
report_keys.append("main/bleu")
report_keys.append("validation/main/bleu")
trainer.extend(
extensions.PrintReport(report_keys),
trigger=(args.report_interval_iters, "iteration"),
)
trainer.extend(extensions.ProgressBar(update_interval=args.report_interval_iters))
set_early_stop(trainer, args)
if args.tensorboard_dir is not None and args.tensorboard_dir != "":
from torch.utils.tensorboard import SummaryWriter
trainer.extend(
TensorboardLogger(
SummaryWriter(args.tensorboard_dir),
att_reporter=att_reporter,
ctc_reporter=ctc_reporter,
),
trigger=(args.report_interval_iters, "iteration"),
)
# Run the training
trainer.run()
check_early_stop(trainer, args.epochs)
def trans(args):
"""Decode with the given args.
Args:
args (namespace): The program arguments.
"""
set_deterministic_pytorch(args)
model, train_args = load_trained_model(args.model)
assert isinstance(model, STInterface)
model.trans_args = args
# gpu
if args.ngpu == 1:
gpu_id = list(range(args.ngpu))
logging.info("gpu id: " + str(gpu_id))
model.cuda()
# read json data
with open(args.trans_json, "rb") as f:
js = json.load(f)["utts"]
new_js = {}
load_inputs_and_targets = LoadInputsAndTargets(
mode="asr",
load_output=False,
sort_in_input_length=False,
preprocess_conf=train_args.preprocess_conf
if args.preprocess_conf is None
else args.preprocess_conf,
preprocess_args={"train": False},
)
if args.batchsize == 0:
with torch.no_grad():
for idx, name in enumerate(js.keys(), 1):
logging.info("(%d/%d) decoding " + name, idx, len(js.keys()))
batch = [(name, js[name])]
feat = load_inputs_and_targets(batch)[0][0]
nbest_hyps = model.translate(
feat,
args,
train_args.char_list,
)
new_js[name] = add_results_to_json(
js[name], nbest_hyps, train_args.char_list
)
else:
def grouper(n, iterable, fillvalue=None):
kargs = [iter(iterable)] * n
return itertools.zip_longest(*kargs, fillvalue=fillvalue)
# sort data if batchsize > 1
keys = list(js.keys())
if args.batchsize > 1:
feat_lens = [js[key]["input"][0]["shape"][0] for key in keys]
sorted_index = sorted(range(len(feat_lens)), key=lambda i: -feat_lens[i])
keys = [keys[i] for i in sorted_index]
with torch.no_grad():
for names in grouper(args.batchsize, keys, None):
names = [name for name in names if name]
batch = [(name, js[name]) for name in names]
feats = load_inputs_and_targets(batch)[0]
nbest_hyps = model.translate_batch(
feats,
args,
train_args.char_list,
)
for i, nbest_hyp in enumerate(nbest_hyps):
name = names[i]
new_js[name] = add_results_to_json(
js[name], nbest_hyp, train_args.char_list
)
with open(args.result_label, "wb") as f:
f.write(
json.dumps(
{"utts": new_js}, indent=4, ensure_ascii=False, sort_keys=True
).encode("utf_8")
)
|
py
|
1a5694c944e284ae00f962a870e6d1a51e1d644e
|
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
from .. import _utilities
import typing
# Export this package's modules as members:
from ._enums import *
from .blockchain_member import *
from .get_blockchain_member import *
from .get_transaction_node import *
from .list_blockchain_member_api_keys import *
from .list_location_consortiums import *
from .list_transaction_node_api_keys import *
from .transaction_node import *
from ._inputs import *
from . import outputs
# Make subpackages available:
if typing.TYPE_CHECKING:
import pulumi_azure_native.blockchain.v20180601preview as __v20180601preview
v20180601preview = __v20180601preview
else:
v20180601preview = _utilities.lazy_import('pulumi_azure_native.blockchain.v20180601preview')
|
py
|
1a56958b7e75554f1fa19283efd9c5ce5df7c159
|
import os
import os.path as osp
import argparse
import pickle
import numpy as np
from operator import itemgetter
import re
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument('-appstr', type=str, default='unknown')
parser.add_argument('-ntask', type=int, default=1, help='number of tasks')
parser.add_argument("-bmin", type=int, default=1, help ='minimum value for bandit budget')
parser.add_argument("-bmax", type=int, default=8, help ='maximum value for bandit budget')
parser.add_argument("-eta", type=int, default=2, help ='base value for bandit structure')
parser.add_argument("-Nloop", type=int, default=1, help ='number of bandit loops')
parser.add_argument('-expid', type=str, default='0')
return parser.parse_args()
def main(args):
summary = []
my_source = f'./{args.appstr}_ntask{args.ntask}_bandit{args.bmin}-{args.bmax}-{args.eta}_Nloop{args.Nloop}_expid{args.expid}.txt'
save_path = f'./{args.appstr}_ntask{args.ntask}_bandit{args.bmin}-{args.bmax}-{args.eta}_Nloop{args.Nloop}_expid{args.expid}.pkl'
GPTuneBand_source = f'./{args.appstr}_ntask{args.ntask}_bandit{args.bmin}-{args.bmax}-{args.eta}_Nloop{args.Nloop}_expid{args.expid}_parsed.pkl'
with open(my_source, "r") as f:
line = f.readline()
while line:
info = line.split()
if (info[0] == 'Tuner:' and info[1] == "GPTuneBand"):
results = []
tunername = info[1]
results.append(tunername)
line = f.readline()
line = f.readline().split()
for _ in range(int(args.ntask)):
tid = int(line[1])
line = f.readline().split()
line = f.readline()
result = pickle.load(open(GPTuneBand_source, "rb"))
results.append(result)
if int(args.ntask) > 1:
line = f.readline().split()
summary.append(results)
line = f.readline()
elif (info[0] == 'Tuner:' and info[1] == "hpbandster"):
results = []
tunername = info[1]
results.append(tunername)
line = f.readline()
line = f.readline().split()
for _ in range(int(args.ntask)):
tid = int(line[1])
line = f.readline().split()
task = line[0][7:]
line = f.readline().strip(" Os ")
data = [[float(y) for y in x.split(", ")] for x in re.split('\[\[|\]\]|\), \(|\(|\)', line) if len(x) > 2]
data = [y for y in data if y[1] < float("Inf")]
x = []
y = []
pre_fix = 0
max_num = -999
for info in data:
if info[0] > max_num:
max_num = info[0]
for info in data:
pre_fix += info[0]/max_num
if np.isclose(info[0], max_num):
x.append(pre_fix)
y.append(info[1])
results.append([tid, task, [x, y]])
if int(args.ntask) > 1:
line = f.readline().split()
summary.append(results)
line = f.readline()
else: # GPTune OpenTuner and TPE
results = []
tunername = info[1]
results.append(tunername)
line = f.readline()
line = f.readline().split()
for _ in range(int(args.ntask)):
tid = int(line[1])
line = f.readline().split()
task = [x for x in line]
line = f.readline().strip(' Os [ ]\n')
history = [float(x) for x in re.split('\], \[', line)]
x = list(np.arange(1,len(history)+1))
results.append([tid, task, [x,history]])
if int(args.ntask) > 1:
line = f.readline().split()
summary.append(results)
line = f.readline()
print(summary[0])
print(summary[1])
print("Results saved to", save_path)
pickle.dump(summary, open(save_path, "wb"))
if __name__ == "__main__":
main(parse_args())
|
py
|
1a56959839188458fc00d01357c32e361852b688
|
from api.models.base import Base
db = Base.db
class Activity(Base):
"""Model activities available for points."""
__tablename__ = 'activities'
activity_type_id = db.Column(
db.String,
db.ForeignKey('activity_types.uuid'),
nullable=False
)
added_by_id = db.Column(
db.String,
db.ForeignKey('users.uuid'),
nullable=False
)
activity_date = db.Column(db.Date)
logged_activities = db.relationship(
'LoggedActivity',
back_populates='activity',
lazy='dynamic',
order_by='desc(LoggedActivity.created_at)'
)
activity_type = db.relationship(
'ActivityType',
back_populates='activities',
uselist=False
)
|
py
|
1a5695f333c44ffbd704837089d0f02e9ab6b365
|
import os
import glob
import argparse
import matplotlib
# Keras / TensorFlow
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '5'
from keras.models import load_model
from layers import BilinearUpSampling2D
from utils import predict, load_images, display_images
from matplotlib import pyplot as plt
# Argument Parser
parser = argparse.ArgumentParser(description='High Quality Monocular Depth Estimation via Transfer Learning')
parser.add_argument('--model', default='nyu.h5', type=str, help='Trained Keras model file.')
parser.add_argument('--input', default='examples/*.png', type=str, help='Input filename or folder.')
args = parser.parse_args()
# Custom object needed for inference and training
custom_objects = {'BilinearUpSampling2D': BilinearUpSampling2D, 'depth_loss_function': None}
print('Loading model...')
# Load model into GPU / CPU
model = load_model(args.model, custom_objects=custom_objects, compile=False)
print('\nModel loaded ({0}).'.format(args.model))
# Input images
inputs = load_images( glob.glob(args.input) )
print('\nLoaded ({0}) images of size {1}.'.format(inputs.shape[0], inputs.shape[1:]))
# Compute results
outputs = predict(model, inputs)
#matplotlib problem on ubuntu terminal fix
#matplotlib.use('TkAgg')
# Display results
viz = display_images(outputs.copy(), inputs.copy())
plt.figure(figsize=(10,5))
plt.imshow(viz)
plt.savefig('test.png')
plt.show()
|
py
|
1a5696030e1c9792567a9895fed93220b69da3e4
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10 on 2017-09-04 07:54
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('quiz', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='question',
name='type',
field=models.CharField(choices=[(b'mcq', b'mcq'), (b'fill', b'fill'), (b'code', b'code')], default=b'mcq', max_length=3),
),
]
|
py
|
1a5696966c6b293fd61e8ff44fbacd7ffe583ef8
|
import cv2
from time import sleep
cap = cv2.VideoCapture(0)
while True:
ret,frame = cap.read()
# Our operations on the frame come here
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
# Display the resulting frame
cv2.imshow('frame',gray)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
sleep(1)
cap.release()
cv2.destroyAllWindows()
|
py
|
1a5696fab7d50fca1ea3dee0cd3f74545fe66a20
|
"""Tests for run from CLI, interactive, without an EE, with unicode."""
import pytest
from ..._interactions import Command
from ..._interactions import UiTestStep
from ..._interactions import add_indices
from ..._interactions import step_id
from .base import BaseClass
from .base import base_steps
from .base import inventory_path
from .base import playbook_path
cmdline = f"{playbook_path} -i {inventory_path}"
CLI = Command(subcommand="run", cmdline=cmdline, execution_environment=False).join()
initial_steps = (
UiTestStep(
user_input=CLI,
comment="ansible-navigator run playbook",
search_within_response=["COMPLETE", "SUCCESSFUL"],
),
)
steps = add_indices(initial_steps + base_steps)
@pytest.mark.parametrize("step", steps, ids=step_id)
class Test(BaseClass):
"""Run the tests for run from CLI, interactive, without an EE."""
UPDATE_FIXTURES = False
|
py
|
1a569728f381664a1055351525ecbb700832d5bb
|
#!/usr/bin/env python
# coding: utf-8
import sys
from metaseq import *
from mqo import *
class MQSystemClass:
def __init__(self, filename):
self.mqo_loadFile(filename)
def mqo_loadFile(self, fname): # for ScriptRunner
self.doc = MQOReader.load(fname)
def mqo_saveFile(self, fname): # for ScriptRunner
MQOWriter.save(fname, self.doc)
def getDocument(self):
return self.doc
def newObject(self):
return MQObject()
def newMaterial(self):
return MQMaterial()
def newPoint(self, x,y,z):
return MQPoint(x,y,z)
def newCoordinate(self, u,v):
return MQCoordinate(u,v)
def newColor(self, r,g,b):
return MQColor(r,g,b)
def newAngle(self, head,pitch,bank):
return MQAngle(head,pitch,bank)
def newMatrix(self):
return MQMatrix()
def messageBox(self, msg):
print("# messageBox" + msg + "\n")
def println(self, s):
print(s)
def clearLog(self):
print("# ****** clear log ******")
class Script:
def __init__(self, script_path, mqsystem):
self.script_path = script_path
self.mqsystem = mqsystem
def run(self):
MQSystem = self.mqsystem
print("# exec script: " + self.script_path)
execfile(self.script_path)
if __name__ == "__main__":
if len(sys.argv) < 2:
print("usage: {s} [-o OUTPUT.mqo] INPUT.mqo SCRIPT.py [SCRIPT2.py ...]".format(s = sys.argv[0]))
exit(1)
src_mqo_name = "in.mqo"
dst_mqo_name = "out.mqo"
scripts = []
it = iter(sys.argv[1:])
for f in it:
if f == '-o' :
dst_mqo_name = it.next()
continue
if f == '-i' :
src_mqo_name = it.next()
continue
if f.endswith('.mqo'):
src_mqo_name = f
else:
scripts.append(f)
mq = MQSystemClass(src_mqo_name)
for s in scripts:
Script(s, mq).run()
mq.mqo_saveFile(dst_mqo_name)
|
py
|
1a569787050d7aaa2cb8ef1ad34bebc233edd7f1
|
# gpl author: Ryan Inch (Imaginer)
import bpy
from bpy.types import Menu
from . import utils_core
class DynTopoMenu(Menu):
bl_label = "Dyntopo"
bl_idname = "VIEW3D_MT_sv3_dyntopo"
@classmethod
def poll(self, context):
return utils_core.get_mode() == 'SCULPT'
def draw(self, context):
layout = self.layout
if context.object.use_dynamic_topology_sculpting:
layout.row().operator("sculpt.dynamic_topology_toggle",
text="Disable Dynamic Topology")
layout.row().separator()
layout.row().menu(DynDetailMenu.bl_idname)
layout.row().menu(DetailMethodMenu.bl_idname)
layout.row().separator()
layout.row().operator("sculpt.optimize")
if context.tool_settings.sculpt.detail_type_method == 'CONSTANT':
layout.row().operator("sculpt.detail_flood_fill")
layout.row().menu(SymmetrizeMenu.bl_idname)
layout.row().prop(context.tool_settings.sculpt,
"use_smooth_shading", toggle=True)
else:
row = layout.row()
row.operator_context = 'INVOKE_DEFAULT'
row.operator("sculpt.dynamic_topology_toggle",
text="Enable Dynamic Topology")
class DynDetailMenu(Menu):
bl_label = "Detail Size"
bl_idname = "VIEW3D_MT_sv3_dyn_detail"
def init(self):
settings = (("40", 40),
("30", 30),
("20", 20),
("10", 10),
("5", 5),
("1", 1))
if bpy.context.tool_settings.sculpt.detail_type_method == 'RELATIVE':
datapath = "tool_settings.sculpt.detail_size"
slider_setting = "detail_size"
elif bpy.context.tool_settings.sculpt.detail_type_method == 'CONSTANT':
datapath = "tool_settings.sculpt.constant_detail_resolution"
slider_setting = "constant_detail_resolution"
else:
datapath = "tool_settings.sculpt.detail_percent"
slider_setting = "detail_percent"
settings = (("100", 100),
("75", 75),
("50", 50),
("25", 25),
("10", 10),
("5", 5))
return settings, datapath, slider_setting
def draw(self, context):
settings, datapath, slider_setting = self.init()
layout = self.layout
# add the top slider
layout.row().prop(context.tool_settings.sculpt,
slider_setting, slider=True)
layout.row().separator()
# add the rest of the menu items
for i in range(len(settings)):
utils_core.menuprop(
layout.row(), settings[i][0], settings[i][1], datapath,
icon='RADIOBUT_OFF', disable=True,
disable_icon='RADIOBUT_ON'
)
class DetailMethodMenu(Menu):
bl_label = "Detail Method"
bl_idname = "VIEW3D_MT_sv3_detail_method_menu"
def draw(self, context):
layout = self.layout
refine_path = "tool_settings.sculpt.detail_refine_method"
type_path = "tool_settings.sculpt.detail_type_method"
refine_items = (("Subdivide Edges", 'SUBDIVIDE'),
("Collapse Edges", 'COLLAPSE'),
("Subdivide Collapse", 'SUBDIVIDE_COLLAPSE'))
type_items = (("Relative Detail", 'RELATIVE'),
("Constant Detail", 'CONSTANT'),
("Brush Detail", 'BRUSH'))
layout.row().label(text="Refine")
layout.row().separator()
# add the refine menu items
for item in refine_items:
utils_core.menuprop(
layout.row(), item[0], item[1],
refine_path, disable=True,
icon='RADIOBUT_OFF',
disable_icon='RADIOBUT_ON'
)
layout.row().label(text="")
layout.row().label(text="Type")
layout.row().separator()
# add the type menu items
for item in type_items:
utils_core.menuprop(
layout.row(), item[0], item[1],
type_path, disable=True,
icon='RADIOBUT_OFF', disable_icon='RADIOBUT_ON'
)
class SymmetrizeMenu(Menu):
bl_label = "Symmetrize"
bl_idname = "VIEW3D_MT_sv3_symmetrize_menu"
def draw(self, context):
layout = self.layout
path = "tool_settings.sculpt.symmetrize_direction"
# add the the symmetrize operator to the menu
layout.row().operator("sculpt.symmetrize")
layout.row().separator()
# add the rest of the menu items
for item in context.tool_settings.sculpt. \
bl_rna.properties['symmetrize_direction'].enum_items:
utils_core.menuprop(
layout.row(), item.name, item.identifier,
path, disable=True,
icon='RADIOBUT_OFF', disable_icon='RADIOBUT_ON'
)
classes = (
DynTopoMenu,
DynDetailMenu,
DetailMethodMenu,
SymmetrizeMenu
)
def register():
for cls in classes:
bpy.utils.register_class(cls)
def unregister():
for cls in classes:
bpy.utils.unregister_class(cls)
|
py
|
1a5697e2b529a52ed8bf87df8dbe4457633b5ce7
|
#
# Copyright (c) 2021, Neptune Labs Sp. z o.o.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import random
import pytest
from PIL import Image
from e2e_tests.base import AVAILABLE_CONTAINERS, BaseE2ETest, fake
from e2e_tests.utils import generate_image, image_to_png, tmp_context
from neptune.new.metadata_containers import MetadataContainer
class TestSeries(BaseE2ETest):
@pytest.mark.parametrize("container", AVAILABLE_CONTAINERS, indirect=True)
def test_log_numbers(self, container: MetadataContainer):
key = self.gen_key()
values = [random.random() for _ in range(50)]
container[key].log(values[0])
container[key].log(values[1:])
container.sync()
assert container[key].fetch_last() == values[-1]
fetched_values = container[key].fetch_values()
assert list(fetched_values["value"]) == values
@pytest.mark.parametrize("container", AVAILABLE_CONTAINERS, indirect=True)
def test_log_strings(self, container: MetadataContainer):
key = self.gen_key()
values = [fake.word() for _ in range(50)]
container[key].log(values[0])
container[key].log(values[1:])
container.sync()
assert container[key].fetch_last() == values[-1]
fetched_values = container[key].fetch_values()
assert list(fetched_values["value"]) == values
@pytest.mark.parametrize("container", AVAILABLE_CONTAINERS, indirect=True)
def test_log_images(self, container: MetadataContainer):
key = self.gen_key()
# images with size between 200KB - 12MB
images = list(generate_image(size=2**n) for n in range(8, 12))
container[key].log(images[0])
container[key].log(images[1:])
container.sync()
with tmp_context():
container[key].download_last("last")
container[key].download("all")
with Image.open("last/3.png") as img:
assert img == image_to_png(image=images[-1])
for i in range(4):
with Image.open(f"all/{i}.png") as img:
assert img == image_to_png(image=images[i])
|
py
|
1a5698d3005efcd63f7d7d9c65d6f8ab771a3f3b
|
# Generated by Django 2.1 on 2018-08-21 18:29
from django.db import migrations, models
import mainapp.utils.model_utils
class Migration(migrations.Migration):
dependencies = [
('mainapp', '0070_merge_20180821_2307'),
]
operations = [
migrations.CreateModel(
name='CsvFileUpload',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=20)),
('csv_file', models.FileField(upload_to=mainapp.utils.model_utils.upload_to)),
],
),
]
|
py
|
1a56995c7e1d3f48080a0676bc784f9e937619d7
|
from .. import Provider as BankProvider
class Provider(BankProvider):
bban_format = '????##########'
country_code = 'NL'
|
py
|
1a569964e4753b684bebaddb6b3f87b9199315f0
|
import copy
import time
from os import write
import matplotlib as mpl
import matplotlib.lines as lines
import matplotlib.patches as patches
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import streamlit as st
from matplotlib.patches import Rectangle
from numpy.lib.function_base import interp
from numpy.lib.npyio import save
from scipy import interpolate
from util import write_excel
# {{InterpretationBox[
# TooltipBox[
# GraphicsBox[{{GrayLevel[0],
# RectangleBox[{0, 0}]}, {GrayLevel[0],
# RectangleBox[{1, -1}]},
# {RGBColor[0.87`, 0.94`, 1],
# RectangleBox[{0, -1}, {2, 1}]}},
# AspectRatio -> 1, Frame -> True,
# FrameStyle -> RGBColor[0.5800000000000001`, 0.6266666666666667`, 0.6666666666666666`],
# FrameTicks -> None, PlotRangePadding -> None,
# ImageSize -> Dynamic[{Automatic, Times[1.35`, Times[CurrentValue["FontCapHeight"],
# Power[AbsoluteCurrentValue[Magnification], -1]]]}]], "RGBColor[0.87, 0.94, 1]"],
# RGBColor[0.87`, 0.94`, 1], Editable -> False, Selectable -> False]
# ,Rectangle[$CellContext`bl$pt,$CellContext`tr$pt]},
# Text[total work [J]:,{0.28\[VeryThinSpace]+$CellContext`txt,-0.16+$CellContext`txt}],
# Text[T [\[Degree]C]:,{0.36\[VeryThinSpace]+$CellContext`txt,-0.25+$CellContext`txt}],
# Text[$CellContext`work,{0.6\[VeryThinSpace]+$CellContext`txt,-0.16+$CellContext`txt}],
# Text[$CellContext`Tcurr,{0.6\[VeryThinSpace]+$CellContext`txt,-0.25+$CellContext`txt}],
# $CellContext`dict[$CellContext`c],
# {FaceForm[InterpretationBox[TooltipBox[GraphicsBox[{{GrayLevel[0], RectangleBox[{0, 0}]},
# {GrayLevel[0], RectangleBox[{1, -1}]},
# {GrayLevel[0, 0], RectangleBox[{0, -1}, {2, 1}]}},
# AspectRatio -> 1, Frame -> True, FrameStyle -> GrayLevel[0`, 0`],
# FrameTicks -> None, PlotRangePadding -> None,
# ImageSize -> Dynamic[{Automatic, Times[1.35`, Times[CurrentValue["FontCapHeight"],
# Power[AbsoluteCurrentValue[Magnification], -1]]]}]], "GrayLevel[0, 0]"],
# GrayLevel[0, 0], Editable -> False, Selectable -> False]],
# EdgeForm[InterpretationBox[TooltipBox[GraphicsBox[{{GrayLevel[0], RectangleBox[{0, 0}]},
# {GrayLevel[0], RectangleBox[{1, -1}]}, {GrayLevel[0], RectangleBox[{0, -1}, {2, 1}]}}, AspectRatio -> 1, Frame -> True, FrameStyle -> GrayLevel[0`],
# FrameTicks -> None, PlotRangePadding -> None, ImageSize -> Dynamic[{Automatic, Times[1.35`, Times[CurrentValue["FontCapHeight"],
# Power[AbsoluteCurrentValue[Magnification], -1]]]}]], "GrayLevel[0]"], GrayLevel[0], Editable -> False, Selectable -> False]],
# Rectangle[{0.4,-0.13},{0.6,0.01}]},
# Arrow[{{0.5,-0.12},{0.5\[VeryThinSpace]+0.12 Cos[FractionBox["1", "180"] (135-FractionBox[RowBox[{"90", " ", "\:f74e"}], "$CellContext`Imax"]) \[Pi]],
# -0.12+0.12 Sin[FractionBox["1", "180"] (135-FractionBox[RowBox[{"90", " ", "\:f74e"}], "$CellContext`Imax"]) \[Pi]]}}],
# {FaceForm[{InterpretationBox[TooltipBox[GraphicsBox[{{GrayLevel[0], RectangleBox[{0, 0}]},
# {GrayLevel[0], RectangleBox[{1, -1}]}, {GrayLevel[0.9`], RectangleBox[{0, -1}, {2, 1}]}},
# AspectRatio -> 1, Frame -> True, FrameStyle -> GrayLevel[0.6000000000000001`], FrameTicks -> None,
# PlotRangePadding -> None, ImageSize -> Dynamic[{Automatic, Times[1.35`, Times[CurrentValue["FontCapHeight"], Power[AbsoluteCurrentValue[Magnification], -1]]]}]], "GrayLevel[0.9]"],
# GrayLevel[0.9`], Editable -> False, Selectable -> False],Opacity[0.8]}],EdgeForm[InterpretationBox[TooltipBox[GraphicsBox[{{GrayLevel[0], RectangleBox[{0, 0}]},
# {GrayLevel[0], RectangleBox[{1, -1}]}, {GrayLevel[0], RectangleBox[{0, -1}, {2, 1}]}}, AspectRatio -> 1, Frame -> True, FrameStyle -> GrayLevel[0`], FrameTicks -> None,
# PlotRangePadding -> None, ImageSize -> Dynamic[{Automatic, Times[1.35`, Times[CurrentValue["FontCapHeight"], Power[AbsoluteCurrentValue[Magnification], -1]]]}]], "GrayLevel[0]"],
# GrayLevel[0], Editable -> False, Selectable -> False]],Rectangle[{0.46\[VeryThinSpace]-$CellContext`th,0.4},{0.54\[VeryThinSpace]-$CellContext`th,1.2}]},
# {$CellContext`dark$red,Rectangle[{0.47\[VeryThinSpace]-$CellContext`th,0.4},{0.53\[VeryThinSpace]-$CellContext`th,0.4\[VeryThinSpace]+$CellContext`dy (-14+$CellContext`Tcurr)}]},
# Table[Line[{{0.475\[VeryThinSpace]-$CellContext`th,0.4\[VeryThinSpace]+$CellContext`dy+$CellContext`i $CellContext`dy},{0.525\[VeryThinSpace]-$CellContext`th,0.4\[VeryThinSpace]+$CellContext`dy+$CellContext`i $CellContext`dy}}],{$CellContext`i,0,$CellContext`npts}],Line[{{0.4,-0.06},{0.3,-0.06},{0.3,0.19},{0.7,0.19},{0.7,-0.06},{0.6,-0.06}}],{InterpretationBox[TooltipBox[GraphicsBox[{{GrayLevel[0], RectangleBox[{0, 0}]}, {GrayLevel[0], RectangleBox[{1, -1}]}, {RGBColor[0.7333333333333333`, 0.6`, 0.4666666666666667`], RectangleBox[{0, -1}, {2, 1}]}}, AspectRatio -> 1, Frame -> True, FrameStyle -> RGBColor[0.4888888888888889`, 0.4`, 0.3111111111111111`], FrameTicks -> None, PlotRangePadding -> None, ImageSize -> Dynamic[{Automatic, Times[1.35`, Times[CurrentValue["FontCapHeight"], Power[AbsoluteCurrentValue[Magnification], -1]]]}]], "RGBColor[0.7333333333333333, 0.6, 0.4666666666666667]"], RGBColor[0.7333333333333333`, 0.6`, 0.4666666666666667`], Editable -> False, Selectable -> False],Rectangle[{0.36,0.15},{0.64,0.23}]},Text[15,{0.43\[VeryThinSpace]-$CellContext`th,0.395\[VeryThinSpace]+$CellContext`dy}],Text[20,{0.43\[VeryThinSpace]-$CellContext`th,0.395\[VeryThinSpace]+6 $CellContext`dy}],Text[25,{0.43\[VeryThinSpace]-$CellContext`th,0.395\[VeryThinSpace]+11 $CellContext`dy}]}
T = np.arange(0, 101, dtype=float)
cP = np.array([
4.217,
4.213,
4.21,
4.207,
4.205,
4.202,
4.2,
4.198,
4.196,
4.194,
4.192,
4.191,
4.189,
4.188,
4.187,
4.186,
4.185,
4.184,
4.183,
4.182,
4.182,
4.181,
4.181,
4.18,
4.18,
4.18,
4.179,
4.179,
4.179,
4.179,
4.178,
4.178,
4.178,
4.178,
4.178,
4.178,
4.178,
4.178,
4.178,
4.179,
4.179,
4.179,
4.179,
4.179,
4.179,
4.18,
4.18,
4.18,
4.18,
4.181,
4.181,
4.181,
4.182,
4.182,
4.182,
4.183,
4.183,
4.183,
4.184,
4.184,
4.185,
4.185,
4.186,
4.186,
4.187,
4.187,
4.188,
4.188,
4.189,
4.189,
4.19,
4.19,
4.191,
4.192,
4.192,
4.193,
4.194,
4.194,
4.195,
4.196,
4.196,
4.197,
4.198,
4.199,
4.2,
4.2,
4.201,
4.202,
4.203,
4.204,
4.205,
4.206,
4.207,
4.208,
4.209,
4.21,
4.211,
4.212,
4.213,
4.214,
4.216
])
cP_water = interpolate.interp1d(T, cP)
def arrow(angle):
angle = np.radians(angle)
length = 0.1
return patches.FancyArrow(0.5, -0.12, np.cos(angle)*length, np.sin(angle)*length)
def rect(bl, tr, **kwargs):
h = tr[1] - bl[1]
w = tr[0] - bl[0]
return patches.Rectangle(bl, w, h, **kwargs)
def draw(current, container):
fig, ax = plt.subplots(figsize=(4,4))
ax.axis('off')
water_color = "#d7f1fa"
# rect((0.4,-0.13),(0.6,0.01), fc="1", ec="0", linewidth=1)
water = patches.Rectangle((0.1,0.1), 0.8, 0.8, linewidth=1, ec="0",
fc='#d7f1fa')
waterCu = patches.Rectangle((0.1,0.1), 0.8, 0.8, linewidth=3, ec="#b87333",
fc='#d7f1fa')
container_dict= {"Dewar": [patches.Rectangle((0.05,0.05), 0.9, 0.9, linewidth=1, ec="0",
fc='1'), water],
"Styrofoam": [patches.Rectangle((0.05,0.05), 0.9, 0.9, linewidth=1, ec="0",
fc='0.9'), water],
"Cu in Air": [patches.Rectangle((0.05,0.05), 0.9, 0.9, linewidth=1, ec="0",
fc='1'), waterCu],
"Cu in Water": [patches.Rectangle((0.05,0.05), 0.9, 0.9, linewidth=1, ec="0",
fc=water_color), waterCu]
}
shapes = [
rect((0.4,-0.13),(0.6,0.01), fc="1", ec="0", linewidth=1),
*container_dict[container],
patches.Rectangle((0.36,0.15), 0.64-0.36, 0.08, fc="#835828", linewidth=0.5, ec="0"),
arrow(150-current*(120)/5),
]
for shape in shapes:
ax.add_patch(shape)
ax.add_line(lines.Line2D([0.40, 0.35, 0.35, 0.358], [-0.05, -0.05, 0.18, 0.18], color="0", linewidth=0.75))
ax.add_line(lines.Line2D([0.60, 0.65, 0.65, 0.642], [-0.05, -0.05, 0.18, 0.18], color="0", linewidth=0.75))
ax.text(0.43, -0.185, "Current", fontdict=dict(size=8))
ax.text(0.47, 0.17, "1 Ω", fontdict=dict(size=8, color='1'))
ax.set_xlim(0, 1)
ax.set_ylim(-0.2, 1)
# ax.set_aspect('equal')
return fig, ax
def simulate(Tsys, Tsurr, current, work, container):
c = container
work += current**2 * dt
cp = 18.01 * cP_water(Tsys)
Tsys = Tsys + (current**2 *dt - c*(Tsys-Tsurr)*dt)/cp
return work, Tsys
dt = 2.0
def run():
data_default = dict(t=[0], Tsys=[20.0],work=[0])
st.markdown("""# First Law of Thermodynamics
This interactive lets you explore the first law of thermodynamics for a system
consisting of 1 mol of water at a constant pressure of 1 atm. The sliders let you
- Control the temperature of the system ($T_\\text{sys}$) and surroundings ($T_\\text{surr}$).
- Do work on the system by controlling the current (0 to 5 A) through a 1 Ω resistor.
- Choose the walls of the container; note that the dewar has perfectly adiabatic walls.
""")
if 'running' not in st.session_state:
st.session_state.running = False
containers = {"Dewar": 0, "Styrofoam": 0.03, "Cu in Air": 0.3, "Cu in Water": 10}
containers_list = list(containers.keys())
container_index_dict = {name: i for i, name in enumerate(containers_list)}
if 'container' not in st.session_state:
st.session_state.container = "Dewar"
if 'data' not in st.session_state:
st.session_state.data = copy.copy(data_default)
Tsys = st.sidebar.slider("System temperature (°C)", value=float(st.session_state.data["Tsys"][-1]), max_value=100.0, min_value=0.0, step=0.1)
Tsurr = st.sidebar.slider("Surroundings temperature (°C)", value=20.0, max_value=100.0, min_value=0.0, step=0.1)
current = st.sidebar.slider("Current (A)", value=0.0, min_value=0.0, max_value=5.0, step=0.01)
container = st.sidebar.selectbox("System walls:", containers_list)
st.session_state.container = containers[container]
button_text = "Pause" if st.session_state.running else "Run"
start_stop_sim = st.sidebar.button(f"{button_text} simulation")
if start_stop_sim:
st.session_state.running = not st.session_state.running
if st.session_state.running: # Reset temperature...
st.session_state.data["Tsys"][-1] = Tsys
st.experimental_rerun()
reset_simulation = st.sidebar.button(f"Reset simulation")
if reset_simulation:
st.session_state.running = False
st.session_state.data = copy.copy(data_default)
st.session_state.container = "Dewar"
st.experimental_rerun()
if st.session_state.running:
st.markdown("### Simulation state: running")
else:
st.markdown("### Simulation state: paused")
work = st.session_state.data["work"][-1]
Tsys = st.session_state.data["Tsys"][-1]
st.markdown(f"""## Properties
$T_{{\\text{{sys}}}}$ = {Tsys:.2f} °C, $T_{{\\text{{surr}}}}$ = {Tsurr:.2f} °C
Total work $w$ = {work:.2f} J, Walls: {container}
""", unsafe_allow_html=True
)
fig, ax = draw(current, container)
st.pyplot(fig)
show_data = st.checkbox(label="Show data")
save_excel_button = False
if show_data:
df = pd.DataFrame(st.session_state.data)
df.rename(columns={"work": "work (J)", "Tsys": "Tsys (°C)", "t": "Time (s)"}, inplace=True)
st.write(df)
filename = st.text_input("Filename:", value="CHE341-1stLaw-data")
save_excel_button = st.button("Save to Excel")
if save_excel_button:
write_excel(df, filename)
# Needs to be at the bottom
if st.session_state.running:
work, Tsys = simulate(Tsys, Tsurr, current, work, st.session_state.container)
st.session_state.data["work"].append(work)
st.session_state.data["Tsys"].append(Tsys)
st.session_state.data['t'].append(st.session_state.data['t'][-1]+dt)
time.sleep(0.5)
st.experimental_rerun()
if __name__ == '__main__':
run()
|
py
|
1a569a60fe15963ff8bd498f56f7244681de2332
|
# Copyright 2016 Mycroft AI, Inc.
#
# This file is part of Mycroft Core.
#
# Mycroft Core is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Mycroft Core is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Mycroft Core. If not, see <http://www.gnu.org/licenses/>.
from __future__ import print_function
from adapt.intent import IntentBuilder
from mycroft.skills.core import MycroftSkill, intent_handler
from mycroft.util.log import getLogger
from mycroft.skills.context import *
import os
import string
import re
import math
__author__ = 'TREE'
LOGGER = getLogger(__name__)
class MathSkill(MycroftSkill):
def __init__(self):
super(MathSkill, self).__init__(name="MathSkill")
@intent_handler(IntentBuilder("MathStartIntent").require("MathStart").build())
@adds_context('MathContext')
def handle_math_start(self, message):
self.speak('Please provide the first number.', expect_response=True)
@intent_handler(IntentBuilder("FirstNumberIntent").require("Num1").require("MathContext").build())
@adds_context('FirstNumberContext')
def handle_first_number(self, message):
#utterance = message.data.get('utterance')
self.num1 = message.data.get("Num1")
self.speak('Please provide the second number.', expect_response=True)
print(self.num1)
@intent_handler(IntentBuilder("SecondNumberIntent").require("Num2").require("FirstNumberContext").build())
@adds_context('SecondNumberContext')
@removes_context('FirstNumberContext')
def handle_second_number(self, message):
#utterance = message.data.get('utterance')
self.num2 = message.data.get("Num2")
self.speak('What operation would you like to do', expect_response=True)
print(self.num2)
@intent_handler(IntentBuilder('CalculateIntent').require('Calculate').require('SecondNumberContext').build())
@adds_context('CalculateContext')
@removes_context('SecondNumberContext')
def handle_calculate(self, message):
utterance = message.data.get('utterance')
#print(utterance)
if "add" in utterance:
self.answer = float(self.num1) + float(self.num2)
self.speak('The answer is {}.'.format(self.answer))
elif "multiply" in utterance:
self.answer = float(self.num1) * float(self.num2)
self.speak('The answer is {}.'.format(self.answer))
elif "divide" in utterance:
self.answer = float(self.num1) / float(self.num2)
self.speak('The answer is {}.'.format(self.answer))
elif "subtract" in utterance:
self.answer = float(self.num1) - float(self.num2)
self.speak('The answer is {}'.format(self.answer))
self.speak('Would you like to perform another operation?', expect_response=True)
@intent_handler(IntentBuilder('NextCalculationIntent').require('Calculate').require('Num').require('CalculateContext').build())
def handle_next_calculation(self, message):
utterance = message.data.get('utterance')
self.num = message.data.get("Num")
print(utterance)
print(self.num)
if "add" in utterance:
self.answer = float(self.answer) + float(self.num)
self.speak('The answer is {}.'.format(self.answer))
elif "multiply" in utterance:
self.answer = float(self.answer) * float(self.num)
self.speak('The answer is {}.'.format(self.answer))
elif "x" in utterance:
self.answer = float(self.answer) * float(self.num)
self.speak('The answer is {}.'.format(self.answer))
elif "divide" in utterance:
self.answer = float(self.answer) / float(self.num)
self.speak('The answer is {}.'.format(self.answer))
elif "subtract" in utterance:
self.answer = float(self.answer) - float(self.num)
self.speak('The answer is {}.'.format(self.answer))
elif "square root" in utterance:
self.answer = math.sqrt(self.answer)
self.speak('The answer is {}.'.format(self.answer))
self.speak('Would you like to perform another operation?', expect_response=True)
@intent_handler(IntentBuilder("TangentIntent").require("Tangent").require("Num").build())
def handle_tangent(self, message):
utterance = message.data.get('utterance')
self.num = message.data.get("Num")
number = float(self.num)
if "degrees" in utterance:
self.answer = math.tan(math.radians(number))
self.speak('The answer is {:f} degrees.'.format(self.answer))
elif "radians" in utterance:
self.answer = math.tan(number)
self.speak('The answer is {:f} radians.'.format(self.answer))
else:
self.answer = math.tan(number)
self.speak('The answer is {:f} radians.'.format(self.answer))
def create_skill():
return MathSkill()
|
py
|
1a569aa0784f3860c5a1d8d7049086ea828788c8
|
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Helper functions for building example regressor Estimator models."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# Standard Imports
import tensorflow as tf
from tensorflow_model_analysis.eval_saved_model import export
from tensorflow_model_analysis.eval_saved_model import util
def make_regressor_input_fn(feature_spec):
"""Train input function.
Args:
feature_spec: a dictionary mapping feature_name to Tensor or SparseTensor.
Returns:
A function.
"""
def _input_fn():
"""Example-based input function."""
serialized_examples = [
x.SerializeToString() for x in [
util.make_example(age=1.0, language='english', label=4.0),
util.make_example(age=2.0, language='english', label=7.0),
util.make_example(age=3.0, language='english', label=10.0),
util.make_example(age=4.0, language='english', label=13.0),
util.make_example(age=1.0, language='chinese', label=3.0),
util.make_example(age=2.0, language='chinese', label=6.0),
util.make_example(age=3.0, language='chinese', label=9.0),
util.make_example(age=4.0, language='chinese', label=12.0),
util.make_example(age=10.0, language='english', label=31.0),
util.make_example(age=20.0, language='english', label=61.0),
util.make_example(age=30.0, language='english', label=91.0),
util.make_example(age=40.0, language='english', label=121.0),
util.make_example(age=10.0, language='chinese', label=30.0),
util.make_example(age=20.0, language='chinese', label=60.0),
util.make_example(age=30.0, language='chinese', label=90.0),
util.make_example(age=40.0, language='chinese', label=120.0)
]
]
features = tf.io.parse_example(
serialized=serialized_examples, features=feature_spec)
labels = features.pop('label')
return features, labels
return _input_fn
def make_classifier_input_fn(feature_spec, n_classes=2, label_vocabulary=None):
"""Train input function.
Args:
feature_spec: a dictionary mapping feature_name to Tensor or SparseTensor.
n_classes: set for multiclass.
label_vocabulary: (Optional) Label vocabulary to use for labels.
Returns:
A function.
"""
def _input_fn():
"""Example-based input function."""
english_label = label_vocabulary[1] if label_vocabulary else 1.0
chinese_label = label_vocabulary[0] if label_vocabulary else 0.0
if n_classes > 2:
# For multi-class labels, English is class 0, Chinese is class 1.
chinese_label = label_vocabulary[1] if label_vocabulary else 1
english_label = label_vocabulary[0] if label_vocabulary else 0
serialized_examples = [
x.SerializeToString() for x in [
util.make_example(age=1.0, language='english', label=english_label),
util.make_example(age=2.0, language='english', label=english_label),
util.make_example(age=3.0, language='chinese', label=chinese_label),
util.make_example(age=4.0, language='chinese', label=chinese_label)
]
]
features = tf.io.parse_example(
serialized=serialized_examples, features=feature_spec)
labels = features.pop('label')
if n_classes > 2 and not label_vocabulary:
labels = tf.sparse.to_dense(labels, default_value=-1)
return features, labels
return _input_fn
def make_example(age, language, label=None):
example = tf.train.Example()
example.features.feature['age'].float_list.value.append(age)
example.features.feature['language'].bytes_list.value.append(language)
if label:
if isinstance(label, list):
example.features.feature['label'].int64_list.value.extend(label)
else:
example.features.feature['label'].float_list.value.append(label)
return example
def linear_columns(include_label_column=False):
"""Return feature_columns for linear model."""
language = tf.feature_column.indicator_column(
tf.feature_column.categorical_column_with_vocabulary_list(
key='language', vocabulary_list=('english', 'chinese')))
age = tf.feature_column.numeric_column(key='age', default_value=0.0)
features = [age, language]
if include_label_column:
label = tf.feature_column.numeric_column(key='label', default_value=0.0)
features.append(label)
return features
def dnn_columns(include_label_column=False, n_classes=2):
"""Return feature_columns for DNN model."""
language = tf.feature_column.embedding_column(
tf.feature_column.categorical_column_with_vocabulary_list(
key='language', vocabulary_list=('english', 'chinese')),
dimension=1)
age = tf.feature_column.numeric_column(key='age', default_value=0.0)
features = [age, language]
if include_label_column:
label = tf.feature_column.numeric_column(key='label', default_value=0.0)
if n_classes > 2:
label = tf.feature_column.categorical_column_with_identity(
key='label', num_buckets=n_classes)
features.append(label)
return features
def regressor_extra_metrics(features, labels, predictions):
return {
'my_mean_prediction':
tf.compat.v1.metrics.mean(predictions['predictions']),
'my_mean_age':
tf.compat.v1.metrics.mean(features['age']),
'my_mean_label':
tf.compat.v1.metrics.mean(labels),
'my_mean_age_times_label':
tf.compat.v1.metrics.mean(labels * features['age']),
}
def classifier_extra_metrics(features, labels, predictions):
"""Returns extra metrics to use with classifier."""
if 'logistic' in predictions:
metrics = {
'my_mean_prediction':
tf.compat.v1.metrics.mean(predictions['logistic']),
'my_mean_age':
tf.compat.v1.metrics.mean(features['age']),
}
if labels.dtype != tf.string:
metrics.update({
'my_mean_label':
tf.compat.v1.metrics.mean(labels),
'my_mean_age_times_label':
tf.compat.v1.metrics.mean(labels * features['age']),
})
return metrics
# Logistic won't be present in multiclass cases.
return {
'mean_english_prediction':
tf.compat.v1.metrics.mean(predictions['probabilities'][0]),
'my_mean_age':
tf.compat.v1.metrics.mean(features['age']),
}
def export_model_and_eval_model(estimator,
serving_input_receiver_fn=None,
eval_input_receiver_fn=None,
export_path=None,
eval_export_path=None):
"""Export SavedModel and EvalSavedModel.
Args:
estimator: Estimator to export.
serving_input_receiver_fn: Serving input receiver function.
eval_input_receiver_fn: Eval input receiver function.
export_path: Export path. If None, inference model is not exported.
eval_export_path: Eval export path. If None, EvalSavedModel is not exported.
Returns:
Tuple of (path to the export directory, path to eval export directory).
"""
export_path_result = None
eval_export_path_result = None
if export_path and serving_input_receiver_fn:
export_path_result = estimator.export_saved_model(
export_dir_base=export_path,
serving_input_receiver_fn=serving_input_receiver_fn)
if eval_export_path and eval_input_receiver_fn:
eval_export_path_result = export.export_eval_savedmodel(
estimator=estimator,
export_dir_base=eval_export_path,
eval_input_receiver_fn=eval_input_receiver_fn,
serving_input_receiver_fn=serving_input_receiver_fn)
return export_path_result, eval_export_path_result
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.